diff options
Diffstat (limited to 'include')
1150 files changed, 37689 insertions, 16871 deletions
diff --git a/include/Kbuild b/include/Kbuild new file mode 100644 index 000000000000..ffba79483cc5 --- /dev/null +++ b/include/Kbuild @@ -0,0 +1,1185 @@ +# SPDX-License-Identifier: GPL-2.0-only + +# Add header-test-$(CONFIG_...) guard to headers that are only compiled +# for particular architectures. +# +# Headers listed in header-test- are excluded from the test coverage. +# Many headers are excluded for now because they fail to build. Please +# consider to fix headers first before adding new ones to the blacklist. +# +# Sorted alphabetically. +header-test- += acpi/acbuffer.h +header-test- += acpi/acpi.h +header-test- += acpi/acpi_bus.h +header-test- += acpi/acpi_drivers.h +header-test- += acpi/acpi_io.h +header-test- += acpi/acpi_lpat.h +header-test- += acpi/acpiosxf.h +header-test- += acpi/acpixf.h +header-test- += acpi/acrestyp.h +header-test- += acpi/actbl.h +header-test- += acpi/actbl1.h +header-test- += acpi/actbl2.h +header-test- += acpi/actbl3.h +header-test- += acpi/actypes.h +header-test- += acpi/battery.h +header-test- += acpi/cppc_acpi.h +header-test- += acpi/nfit.h +header-test- += acpi/platform/acenv.h +header-test- += acpi/platform/acenvex.h +header-test- += acpi/platform/acintel.h +header-test- += acpi/platform/aclinux.h +header-test- += acpi/platform/aclinuxex.h +header-test- += acpi/processor.h +header-test-$(CONFIG_X86) += clocksource/hyperv_timer.h +header-test- += clocksource/timer-sp804.h +header-test- += crypto/cast_common.h +header-test- += crypto/internal/cryptouser.h +header-test- += crypto/pkcs7.h +header-test- += crypto/poly1305.h +header-test- += crypto/sha3.h +header-test- += drm/ati_pcigart.h +header-test- += drm/bridge/dw_hdmi.h +header-test- += drm/bridge/dw_mipi_dsi.h +header-test- += drm/drm_audio_component.h +header-test- += drm/drm_auth.h +header-test- += drm/drm_debugfs.h +header-test- += drm/drm_debugfs_crc.h +header-test- += drm/drm_displayid.h +header-test- += drm/drm_encoder_slave.h +header-test- += drm/drm_fb_cma_helper.h +header-test- += drm/drm_fb_helper.h +header-test- += drm/drm_fixed.h +header-test- += drm/drm_format_helper.h +header-test- += drm/drm_lease.h +header-test- += drm/drm_legacy.h +header-test- += drm/drm_panel.h +header-test- += drm/drm_plane_helper.h +header-test- += drm/drm_rect.h +header-test- += drm/i915_component.h +header-test- += drm/intel-gtt.h +header-test- += drm/tinydrm/tinydrm-helpers.h +header-test- += drm/ttm/ttm_debug.h +header-test- += keys/asymmetric-parser.h +header-test- += keys/asymmetric-subtype.h +header-test- += keys/asymmetric-type.h +header-test- += keys/big_key-type.h +header-test- += keys/request_key_auth-type.h +header-test- += keys/trusted.h +header-test- += kvm/arm_arch_timer.h +header-test- += kvm/arm_pmu.h +header-test-$(CONFIG_ARM) += kvm/arm_psci.h +header-test-$(CONFIG_ARM64) += kvm/arm_psci.h +header-test- += kvm/arm_vgic.h +header-test- += linux/8250_pci.h +header-test- += linux/a.out.h +header-test- += linux/adxl.h +header-test- += linux/agpgart.h +header-test- += linux/alcor_pci.h +header-test- += linux/amba/clcd.h +header-test- += linux/amba/pl080.h +header-test- += linux/amd-iommu.h +header-test-$(CONFIG_ARM) += linux/arm-cci.h +header-test-$(CONFIG_ARM64) += linux/arm-cci.h +header-test- += linux/arm_sdei.h +header-test- += linux/asn1_decoder.h +header-test- += linux/ata_platform.h +header-test- += linux/ath9k_platform.h +header-test- += linux/atm_tcp.h +header-test- += linux/atomic-fallback.h +header-test- += linux/avf/virtchnl.h +header-test- += linux/bcm47xx_sprom.h +header-test- += linux/bcma/bcma_driver_gmac_cmn.h +header-test- += linux/bcma/bcma_driver_mips.h +header-test- += linux/bcma/bcma_driver_pci.h +header-test- += linux/bcma/bcma_driver_pcie2.h +header-test- += linux/bit_spinlock.h +header-test- += linux/blk-mq-rdma.h +header-test- += linux/blk-mq.h +header-test- += linux/blktrace_api.h +header-test- += linux/blockgroup_lock.h +header-test- += linux/bma150.h +header-test- += linux/bpf_lirc.h +header-test- += linux/bpf_types.h +header-test- += linux/bsg-lib.h +header-test- += linux/bsg.h +header-test- += linux/btf.h +header-test- += linux/btree-128.h +header-test- += linux/btree-type.h +header-test-$(CONFIG_CPU_BIG_ENDIAN) += linux/byteorder/big_endian.h +header-test- += linux/byteorder/generic.h +header-test-$(CONFIG_CPU_LITTLE_ENDIAN) += linux/byteorder/little_endian.h +header-test- += linux/c2port.h +header-test- += linux/can/dev/peak_canfd.h +header-test- += linux/can/platform/cc770.h +header-test- += linux/can/platform/sja1000.h +header-test- += linux/ceph/ceph_features.h +header-test- += linux/ceph/ceph_frag.h +header-test- += linux/ceph/ceph_fs.h +header-test- += linux/ceph/debugfs.h +header-test- += linux/ceph/msgr.h +header-test- += linux/ceph/rados.h +header-test- += linux/cgroup_subsys.h +header-test- += linux/clk/sunxi-ng.h +header-test- += linux/clk/ti.h +header-test- += linux/cn_proc.h +header-test- += linux/coda_psdev.h +header-test- += linux/compaction.h +header-test- += linux/console_struct.h +header-test- += linux/count_zeros.h +header-test- += linux/cs5535.h +header-test- += linux/cuda.h +header-test- += linux/cyclades.h +header-test- += linux/dcookies.h +header-test- += linux/delayacct.h +header-test- += linux/delayed_call.h +header-test- += linux/device-mapper.h +header-test- += linux/devpts_fs.h +header-test- += linux/dio.h +header-test- += linux/dirent.h +header-test- += linux/dlm_plock.h +header-test- += linux/dm-dirty-log.h +header-test- += linux/dm-region-hash.h +header-test- += linux/dma-debug.h +header-test- += linux/dma/mmp-pdma.h +header-test- += linux/dma/sprd-dma.h +header-test- += linux/dns_resolver.h +header-test- += linux/drbd_genl.h +header-test- += linux/drbd_genl_api.h +header-test- += linux/dw_apb_timer.h +header-test- += linux/dynamic_debug.h +header-test- += linux/dynamic_queue_limits.h +header-test- += linux/ecryptfs.h +header-test- += linux/edma.h +header-test- += linux/eeprom_93cx6.h +header-test- += linux/efs_vh.h +header-test- += linux/elevator.h +header-test- += linux/elfcore-compat.h +header-test- += linux/error-injection.h +header-test- += linux/errseq.h +header-test- += linux/eventpoll.h +header-test- += linux/ext2_fs.h +header-test- += linux/f75375s.h +header-test- += linux/falloc.h +header-test- += linux/fault-inject.h +header-test- += linux/fbcon.h +header-test- += linux/firmware/intel/stratix10-svc-client.h +header-test- += linux/firmware/meson/meson_sm.h +header-test- += linux/firmware/trusted_foundations.h +header-test- += linux/firmware/xlnx-zynqmp.h +header-test- += linux/fixp-arith.h +header-test- += linux/flat.h +header-test- += linux/fs_types.h +header-test- += linux/fs_uart_pd.h +header-test- += linux/fsi-occ.h +header-test- += linux/fsi-sbefifo.h +header-test- += linux/fsl/bestcomm/ata.h +header-test- += linux/fsl/bestcomm/bestcomm.h +header-test- += linux/fsl/bestcomm/bestcomm_priv.h +header-test- += linux/fsl/bestcomm/fec.h +header-test- += linux/fsl/bestcomm/gen_bd.h +header-test- += linux/fsl/bestcomm/sram.h +header-test- += linux/fsl_hypervisor.h +header-test- += linux/fsldma.h +header-test- += linux/ftrace_irq.h +header-test- += linux/gameport.h +header-test- += linux/genl_magic_func.h +header-test- += linux/genl_magic_struct.h +header-test- += linux/gpio/aspeed.h +header-test- += linux/gpio/gpio-reg.h +header-test- += linux/hid-debug.h +header-test- += linux/hiddev.h +header-test- += linux/hippidevice.h +header-test- += linux/hmm.h +header-test- += linux/hp_sdc.h +header-test- += linux/huge_mm.h +header-test- += linux/hugetlb_cgroup.h +header-test- += linux/hugetlb_inline.h +header-test- += linux/hwmon-vid.h +header-test- += linux/hyperv.h +header-test- += linux/i2c-algo-pca.h +header-test- += linux/i2c-algo-pcf.h +header-test- += linux/i3c/ccc.h +header-test- += linux/i3c/device.h +header-test- += linux/i3c/master.h +header-test- += linux/i8042.h +header-test- += linux/ide.h +header-test- += linux/idle_inject.h +header-test- += linux/if_frad.h +header-test- += linux/if_rmnet.h +header-test- += linux/if_tap.h +header-test- += linux/iio/accel/kxcjk_1013.h +header-test- += linux/iio/adc/ad_sigma_delta.h +header-test- += linux/iio/buffer-dma.h +header-test- += linux/iio/buffer_impl.h +header-test- += linux/iio/common/st_sensors.h +header-test- += linux/iio/common/st_sensors_i2c.h +header-test- += linux/iio/common/st_sensors_spi.h +header-test- += linux/iio/dac/ad5421.h +header-test- += linux/iio/dac/ad5504.h +header-test- += linux/iio/dac/ad5791.h +header-test- += linux/iio/dac/max517.h +header-test- += linux/iio/dac/mcp4725.h +header-test- += linux/iio/frequency/ad9523.h +header-test- += linux/iio/frequency/adf4350.h +header-test- += linux/iio/hw-consumer.h +header-test- += linux/iio/imu/adis.h +header-test- += linux/iio/sysfs.h +header-test- += linux/iio/timer/stm32-timer-trigger.h +header-test- += linux/iio/trigger.h +header-test- += linux/iio/triggered_event.h +header-test- += linux/imx-media.h +header-test- += linux/inet_diag.h +header-test- += linux/init_ohci1394_dma.h +header-test- += linux/initrd.h +header-test- += linux/input/adp5589.h +header-test- += linux/input/bu21013.h +header-test- += linux/input/cma3000.h +header-test- += linux/input/kxtj9.h +header-test- += linux/input/lm8333.h +header-test- += linux/input/sparse-keymap.h +header-test- += linux/input/touchscreen.h +header-test- += linux/input/tps6507x-ts.h +header-test-$(CONFIG_X86) += linux/intel-iommu.h +header-test- += linux/intel-ish-client-if.h +header-test- += linux/intel-pti.h +header-test- += linux/intel-svm.h +header-test- += linux/interconnect-provider.h +header-test- += linux/ioc3.h +header-test-$(CONFIG_BLOCK) += linux/iomap.h +header-test- += linux/ipack.h +header-test- += linux/irq_cpustat.h +header-test- += linux/irq_poll.h +header-test- += linux/irqchip/arm-gic-v3.h +header-test- += linux/irqchip/arm-gic-v4.h +header-test- += linux/irqchip/irq-madera.h +header-test- += linux/irqchip/irq-sa11x0.h +header-test- += linux/irqchip/mxs.h +header-test- += linux/irqchip/versatile-fpga.h +header-test- += linux/irqdesc.h +header-test- += linux/irqflags.h +header-test- += linux/iscsi_boot_sysfs.h +header-test- += linux/isdn/capiutil.h +header-test- += linux/isdn/hdlc.h +header-test- += linux/isdn_ppp.h +header-test- += linux/jbd2.h +header-test- += linux/jump_label.h +header-test- += linux/jump_label_ratelimit.h +header-test- += linux/jz4740-adc.h +header-test- += linux/kasan.h +header-test- += linux/kcore.h +header-test- += linux/kdev_t.h +header-test- += linux/kernelcapi.h +header-test- += linux/khugepaged.h +header-test- += linux/kobj_map.h +header-test- += linux/kobject_ns.h +header-test- += linux/kvm_host.h +header-test- += linux/kvm_irqfd.h +header-test- += linux/kvm_para.h +header-test- += linux/lantiq.h +header-test- += linux/lapb.h +header-test- += linux/latencytop.h +header-test- += linux/led-lm3530.h +header-test- += linux/leds-bd2802.h +header-test- += linux/leds-lp3944.h +header-test- += linux/leds-lp3952.h +header-test- += linux/leds_pwm.h +header-test- += linux/libata.h +header-test- += linux/license.h +header-test- += linux/lightnvm.h +header-test- += linux/lis3lv02d.h +header-test- += linux/list_bl.h +header-test- += linux/list_lru.h +header-test- += linux/list_nulls.h +header-test- += linux/lockd/share.h +header-test- += linux/lzo.h +header-test- += linux/mailbox/zynqmp-ipi-message.h +header-test- += linux/maple.h +header-test- += linux/mbcache.h +header-test- += linux/mbus.h +header-test- += linux/mc146818rtc.h +header-test- += linux/mc6821.h +header-test- += linux/mdev.h +header-test- += linux/mem_encrypt.h +header-test- += linux/memfd.h +header-test- += linux/mfd/88pm80x.h +header-test- += linux/mfd/88pm860x.h +header-test- += linux/mfd/abx500/ab8500-bm.h +header-test- += linux/mfd/abx500/ab8500-gpadc.h +header-test- += linux/mfd/adp5520.h +header-test- += linux/mfd/arizona/pdata.h +header-test- += linux/mfd/as3711.h +header-test- += linux/mfd/as3722.h +header-test- += linux/mfd/da903x.h +header-test- += linux/mfd/da9055/pdata.h +header-test- += linux/mfd/db8500-prcmu.h +header-test- += linux/mfd/dbx500-prcmu.h +header-test- += linux/mfd/dln2.h +header-test- += linux/mfd/dm355evm_msp.h +header-test- += linux/mfd/ds1wm.h +header-test- += linux/mfd/ezx-pcap.h +header-test- += linux/mfd/intel_msic.h +header-test- += linux/mfd/janz.h +header-test- += linux/mfd/kempld.h +header-test- += linux/mfd/lm3533.h +header-test- += linux/mfd/lp8788-isink.h +header-test- += linux/mfd/lpc_ich.h +header-test- += linux/mfd/max77693.h +header-test- += linux/mfd/max8998-private.h +header-test- += linux/mfd/menelaus.h +header-test- += linux/mfd/mt6397/core.h +header-test- += linux/mfd/palmas.h +header-test- += linux/mfd/pcf50633/backlight.h +header-test- += linux/mfd/rc5t583.h +header-test- += linux/mfd/retu.h +header-test- += linux/mfd/samsung/core.h +header-test- += linux/mfd/si476x-platform.h +header-test- += linux/mfd/si476x-reports.h +header-test- += linux/mfd/sky81452.h +header-test- += linux/mfd/smsc.h +header-test- += linux/mfd/sta2x11-mfd.h +header-test- += linux/mfd/stmfx.h +header-test- += linux/mfd/tc3589x.h +header-test- += linux/mfd/tc6387xb.h +header-test- += linux/mfd/tc6393xb.h +header-test- += linux/mfd/tps65090.h +header-test- += linux/mfd/tps6586x.h +header-test- += linux/mfd/tps65910.h +header-test- += linux/mfd/tps80031.h +header-test- += linux/mfd/ucb1x00.h +header-test- += linux/mfd/viperboard.h +header-test- += linux/mfd/wm831x/core.h +header-test- += linux/mfd/wm831x/otp.h +header-test- += linux/mfd/wm831x/pdata.h +header-test- += linux/mfd/wm8994/core.h +header-test- += linux/mfd/wm8994/pdata.h +header-test- += linux/mlx4/doorbell.h +header-test- += linux/mlx4/srq.h +header-test- += linux/mlx5/doorbell.h +header-test- += linux/mlx5/eq.h +header-test- += linux/mlx5/fs_helpers.h +header-test- += linux/mlx5/mlx5_ifc.h +header-test- += linux/mlx5/mlx5_ifc_fpga.h +header-test- += linux/mm-arch-hooks.h +header-test- += linux/mm_inline.h +header-test- += linux/mmu_context.h +header-test- += linux/mpage.h +header-test- += linux/mtd/bbm.h +header-test- += linux/mtd/cfi.h +header-test- += linux/mtd/doc2000.h +header-test- += linux/mtd/flashchip.h +header-test- += linux/mtd/ftl.h +header-test- += linux/mtd/gen_probe.h +header-test- += linux/mtd/jedec.h +header-test- += linux/mtd/nand_bch.h +header-test- += linux/mtd/nand_ecc.h +header-test- += linux/mtd/ndfc.h +header-test- += linux/mtd/onenand.h +header-test- += linux/mtd/pismo.h +header-test- += linux/mtd/plat-ram.h +header-test- += linux/mtd/spi-nor.h +header-test- += linux/mv643xx.h +header-test- += linux/mv643xx_eth.h +header-test- += linux/mvebu-pmsu.h +header-test- += linux/mxm-wmi.h +header-test- += linux/n_r3964.h +header-test- += linux/ndctl.h +header-test- += linux/nfs.h +header-test- += linux/nfs_fs_i.h +header-test- += linux/nfs_fs_sb.h +header-test- += linux/nfs_page.h +header-test- += linux/nfs_xdr.h +header-test- += linux/nfsacl.h +header-test- += linux/nl802154.h +header-test- += linux/ns_common.h +header-test- += linux/nsc_gpio.h +header-test- += linux/ntb_transport.h +header-test- += linux/nubus.h +header-test- += linux/nvme-fc-driver.h +header-test- += linux/nvme-fc.h +header-test- += linux/nvme-rdma.h +header-test- += linux/nvram.h +header-test- += linux/objagg.h +header-test- += linux/of_clk.h +header-test- += linux/of_net.h +header-test- += linux/of_pdt.h +header-test- += linux/olpc-ec.h +header-test- += linux/omap-dma.h +header-test- += linux/omap-dmaengine.h +header-test- += linux/omap-gpmc.h +header-test- += linux/omap-iommu.h +header-test- += linux/omap-mailbox.h +header-test- += linux/once.h +header-test- += linux/osq_lock.h +header-test- += linux/overflow.h +header-test- += linux/page-flags-layout.h +header-test- += linux/page-isolation.h +header-test- += linux/page_ext.h +header-test- += linux/page_owner.h +header-test- += linux/parport_pc.h +header-test- += linux/parser.h +header-test- += linux/pci-acpi.h +header-test- += linux/pci-dma-compat.h +header-test- += linux/pci_hotplug.h +header-test- += linux/pda_power.h +header-test- += linux/perf/arm_pmu.h +header-test- += linux/perf_regs.h +header-test- += linux/phy/omap_control_phy.h +header-test- += linux/phy/tegra/xusb.h +header-test- += linux/phy/ulpi_phy.h +header-test- += linux/phy_fixed.h +header-test- += linux/pipe_fs_i.h +header-test- += linux/pktcdvd.h +header-test- += linux/pl320-ipc.h +header-test- += linux/pl353-smc.h +header-test- += linux/platform_data/ad5449.h +header-test- += linux/platform_data/ad5755.h +header-test- += linux/platform_data/ad7266.h +header-test- += linux/platform_data/ad7291.h +header-test- += linux/platform_data/ad7298.h +header-test- += linux/platform_data/ad7303.h +header-test- += linux/platform_data/ad7791.h +header-test- += linux/platform_data/ad7793.h +header-test- += linux/platform_data/ad7887.h +header-test- += linux/platform_data/adau17x1.h +header-test- += linux/platform_data/adp8870.h +header-test- += linux/platform_data/ads1015.h +header-test- += linux/platform_data/ads7828.h +header-test- += linux/platform_data/apds990x.h +header-test- += linux/platform_data/arm-ux500-pm.h +header-test- += linux/platform_data/asoc-s3c.h +header-test- += linux/platform_data/at91_adc.h +header-test- += linux/platform_data/ata-pxa.h +header-test- += linux/platform_data/atmel.h +header-test- += linux/platform_data/bh1770glc.h +header-test- += linux/platform_data/brcmfmac.h +header-test- += linux/platform_data/cros_ec_commands.h +header-test- += linux/platform_data/clk-u300.h +header-test- += linux/platform_data/cyttsp4.h +header-test- += linux/platform_data/dma-coh901318.h +header-test- += linux/platform_data/dma-imx-sdma.h +header-test- += linux/platform_data/dma-mcf-edma.h +header-test- += linux/platform_data/dma-s3c24xx.h +header-test- += linux/platform_data/dmtimer-omap.h +header-test- += linux/platform_data/dsa.h +header-test- += linux/platform_data/edma.h +header-test- += linux/platform_data/elm.h +header-test- += linux/platform_data/emif_plat.h +header-test- += linux/platform_data/fsa9480.h +header-test- += linux/platform_data/g762.h +header-test- += linux/platform_data/gpio-ath79.h +header-test- += linux/platform_data/gpio-davinci.h +header-test- += linux/platform_data/gpio-dwapb.h +header-test- += linux/platform_data/gpio-htc-egpio.h +header-test- += linux/platform_data/gpmc-omap.h +header-test- += linux/platform_data/hsmmc-omap.h +header-test- += linux/platform_data/hwmon-s3c.h +header-test- += linux/platform_data/i2c-davinci.h +header-test- += linux/platform_data/i2c-imx.h +header-test- += linux/platform_data/i2c-mux-reg.h +header-test- += linux/platform_data/i2c-ocores.h +header-test- += linux/platform_data/i2c-xiic.h +header-test- += linux/platform_data/intel-spi.h +header-test- += linux/platform_data/invensense_mpu6050.h +header-test- += linux/platform_data/irda-pxaficp.h +header-test- += linux/platform_data/irda-sa11x0.h +header-test- += linux/platform_data/itco_wdt.h +header-test- += linux/platform_data/jz4740/jz4740_nand.h +header-test- += linux/platform_data/keyboard-pxa930_rotary.h +header-test- += linux/platform_data/keypad-omap.h +header-test- += linux/platform_data/leds-lp55xx.h +header-test- += linux/platform_data/leds-omap.h +header-test- += linux/platform_data/lp855x.h +header-test- += linux/platform_data/lp8727.h +header-test- += linux/platform_data/max197.h +header-test- += linux/platform_data/max3421-hcd.h +header-test- += linux/platform_data/max732x.h +header-test- += linux/platform_data/mcs.h +header-test- += linux/platform_data/mdio-bcm-unimac.h +header-test- += linux/platform_data/mdio-gpio.h +header-test- += linux/platform_data/media/si4713.h +header-test- += linux/platform_data/mlxreg.h +header-test- += linux/platform_data/mmc-omap.h +header-test- += linux/platform_data/mmc-sdhci-s3c.h +header-test- += linux/platform_data/mmp_audio.h +header-test- += linux/platform_data/mtd-orion_nand.h +header-test- += linux/platform_data/mv88e6xxx.h +header-test- += linux/platform_data/net-cw1200.h +header-test- += linux/platform_data/omap-twl4030.h +header-test- += linux/platform_data/omapdss.h +header-test- += linux/platform_data/pcf857x.h +header-test- += linux/platform_data/pixcir_i2c_ts.h +header-test- += linux/platform_data/pwm_omap_dmtimer.h +header-test- += linux/platform_data/pxa2xx_udc.h +header-test- += linux/platform_data/pxa_sdhci.h +header-test- += linux/platform_data/remoteproc-omap.h +header-test- += linux/platform_data/sa11x0-serial.h +header-test- += linux/platform_data/sc18is602.h +header-test- += linux/platform_data/sdhci-pic32.h +header-test- += linux/platform_data/serial-sccnxp.h +header-test- += linux/platform_data/sht3x.h +header-test- += linux/platform_data/shtc1.h +header-test- += linux/platform_data/si5351.h +header-test- += linux/platform_data/sky81452-backlight.h +header-test- += linux/platform_data/spi-davinci.h +header-test- += linux/platform_data/spi-ep93xx.h +header-test- += linux/platform_data/spi-mt65xx.h +header-test- += linux/platform_data/st_sensors_pdata.h +header-test- += linux/platform_data/ti-sysc.h +header-test- += linux/platform_data/timer-ixp4xx.h +header-test- += linux/platform_data/touchscreen-s3c2410.h +header-test- += linux/platform_data/tsc2007.h +header-test- += linux/platform_data/tsl2772.h +header-test- += linux/platform_data/uio_pruss.h +header-test- += linux/platform_data/usb-davinci.h +header-test- += linux/platform_data/usb-ehci-mxc.h +header-test- += linux/platform_data/usb-ehci-orion.h +header-test- += linux/platform_data/usb-mx2.h +header-test- += linux/platform_data/usb-ohci-s3c2410.h +header-test- += linux/platform_data/usb-omap.h +header-test- += linux/platform_data/usb-s3c2410_udc.h +header-test- += linux/platform_data/usb3503.h +header-test- += linux/platform_data/ux500_wdt.h +header-test- += linux/platform_data/video-clcd-versatile.h +header-test- += linux/platform_data/video-imxfb.h +header-test- += linux/platform_data/video-pxafb.h +header-test- += linux/platform_data/video_s3c.h +header-test- += linux/platform_data/voltage-omap.h +header-test- += linux/platform_data/x86/apple.h +header-test- += linux/platform_data/x86/clk-pmc-atom.h +header-test- += linux/platform_data/x86/pmc_atom.h +header-test- += linux/platform_data/xtalk-bridge.h +header-test- += linux/pm2301_charger.h +header-test- += linux/pm_wakeirq.h +header-test- += linux/pm_wakeup.h +header-test- += linux/pmbus.h +header-test- += linux/pmu.h +header-test- += linux/posix_acl.h +header-test- += linux/posix_acl_xattr.h +header-test- += linux/power/ab8500.h +header-test- += linux/power/bq27xxx_battery.h +header-test- += linux/power/generic-adc-battery.h +header-test- += linux/power/jz4740-battery.h +header-test- += linux/power/max17042_battery.h +header-test- += linux/power/max8903_charger.h +header-test- += linux/ppp-comp.h +header-test- += linux/pps-gpio.h +header-test- += linux/pr.h +header-test- += linux/proc_ns.h +header-test- += linux/processor.h +header-test- += linux/psi.h +header-test- += linux/psp-sev.h +header-test- += linux/pstore.h +header-test- += linux/ptr_ring.h +header-test- += linux/ptrace.h +header-test- += linux/qcom-geni-se.h +header-test- += linux/qed/eth_common.h +header-test- += linux/qed/fcoe_common.h +header-test- += linux/qed/iscsi_common.h +header-test- += linux/qed/iwarp_common.h +header-test- += linux/qed/qed_eth_if.h +header-test- += linux/qed/qed_fcoe_if.h +header-test- += linux/qed/rdma_common.h +header-test- += linux/qed/storage_common.h +header-test- += linux/qed/tcp_common.h +header-test- += linux/qnx6_fs.h +header-test- += linux/quicklist.h +header-test- += linux/ramfs.h +header-test- += linux/range.h +header-test- += linux/rcu_node_tree.h +header-test- += linux/rculist_bl.h +header-test- += linux/rculist_nulls.h +header-test- += linux/rcutiny.h +header-test- += linux/rcutree.h +header-test- += linux/reboot-mode.h +header-test- += linux/regulator/fixed.h +header-test- += linux/regulator/gpio-regulator.h +header-test- += linux/regulator/max8973-regulator.h +header-test- += linux/regulator/of_regulator.h +header-test- += linux/regulator/tps51632-regulator.h +header-test- += linux/regulator/tps62360.h +header-test- += linux/regulator/tps6507x.h +header-test- += linux/regulator/userspace-consumer.h +header-test- += linux/remoteproc/st_slim_rproc.h +header-test- += linux/reset/socfpga.h +header-test- += linux/reset/sunxi.h +header-test- += linux/rtc/m48t59.h +header-test- += linux/rtc/rtc-omap.h +header-test- += linux/rtc/sirfsoc_rtciobrg.h +header-test- += linux/rwlock.h +header-test- += linux/rwlock_types.h +header-test- += linux/scc.h +header-test- += linux/sched/deadline.h +header-test- += linux/sched/smt.h +header-test- += linux/sched/sysctl.h +header-test- += linux/sched_clock.h +header-test- += linux/scpi_protocol.h +header-test- += linux/scx200_gpio.h +header-test- += linux/seccomp.h +header-test- += linux/sed-opal.h +header-test- += linux/seg6_iptunnel.h +header-test- += linux/selection.h +header-test- += linux/set_memory.h +header-test- += linux/shrinker.h +header-test- += linux/sirfsoc_dma.h +header-test- += linux/skb_array.h +header-test- += linux/slab_def.h +header-test- += linux/slub_def.h +header-test- += linux/sm501.h +header-test- += linux/smc91x.h +header-test- += linux/static_key.h +header-test- += linux/soc/actions/owl-sps.h +header-test- += linux/soc/amlogic/meson-canvas.h +header-test- += linux/soc/brcmstb/brcmstb.h +header-test- += linux/soc/ixp4xx/npe.h +header-test- += linux/soc/mediatek/infracfg.h +header-test- += linux/soc/qcom/smd-rpm.h +header-test- += linux/soc/qcom/smem.h +header-test- += linux/soc/qcom/smem_state.h +header-test- += linux/soc/qcom/wcnss_ctrl.h +header-test- += linux/soc/renesas/rcar-rst.h +header-test- += linux/soc/samsung/exynos-pmu.h +header-test- += linux/soc/sunxi/sunxi_sram.h +header-test- += linux/soc/ti/ti-msgmgr.h +header-test- += linux/soc/ti/ti_sci_inta_msi.h +header-test- += linux/soc/ti/ti_sci_protocol.h +header-test- += linux/soundwire/sdw.h +header-test- += linux/soundwire/sdw_intel.h +header-test- += linux/soundwire/sdw_type.h +header-test- += linux/spi/ad7877.h +header-test- += linux/spi/ads7846.h +header-test- += linux/spi/at86rf230.h +header-test- += linux/spi/ds1305.h +header-test- += linux/spi/libertas_spi.h +header-test- += linux/spi/lms283gf05.h +header-test- += linux/spi/max7301.h +header-test- += linux/spi/mcp23s08.h +header-test- += linux/spi/rspi.h +header-test- += linux/spi/s3c24xx.h +header-test- += linux/spi/sh_msiof.h +header-test- += linux/spi/spi-fsl-dspi.h +header-test- += linux/spi/spi_bitbang.h +header-test- += linux/spi/spi_gpio.h +header-test- += linux/spi/xilinx_spi.h +header-test- += linux/spinlock_api_smp.h +header-test- += linux/spinlock_api_up.h +header-test- += linux/spinlock_types.h +header-test- += linux/splice.h +header-test- += linux/sram.h +header-test- += linux/srcutiny.h +header-test- += linux/srcutree.h +header-test- += linux/ssb/ssb_driver_chipcommon.h +header-test- += linux/ssb/ssb_driver_extif.h +header-test- += linux/ssb/ssb_driver_mips.h +header-test- += linux/ssb/ssb_driver_pci.h +header-test- += linux/ssbi.h +header-test- += linux/stackdepot.h +header-test- += linux/stmp3xxx_rtc_wdt.h +header-test- += linux/string_helpers.h +header-test- += linux/sungem_phy.h +header-test- += linux/sunrpc/msg_prot.h +header-test- += linux/sunrpc/rpc_pipe_fs.h +header-test- += linux/sunrpc/xprtmultipath.h +header-test- += linux/sunrpc/xprtsock.h +header-test- += linux/sunxi-rsb.h +header-test- += linux/svga.h +header-test- += linux/sw842.h +header-test- += linux/swapfile.h +header-test- += linux/swapops.h +header-test- += linux/swiotlb.h +header-test- += linux/sysv_fs.h +header-test- += linux/t10-pi.h +header-test- += linux/task_io_accounting.h +header-test- += linux/tick.h +header-test- += linux/timb_dma.h +header-test- += linux/timekeeping.h +header-test- += linux/timekeeping32.h +header-test- += linux/ts-nbus.h +header-test- += linux/tsacct_kern.h +header-test- += linux/tty_flip.h +header-test- += linux/tty_ldisc.h +header-test- += linux/ucb1400.h +header-test- += linux/usb/association.h +header-test- += linux/usb/cdc-wdm.h +header-test- += linux/usb/cdc_ncm.h +header-test- += linux/usb/ezusb.h +header-test- += linux/usb/gadget_configfs.h +header-test- += linux/usb/gpio_vbus.h +header-test- += linux/usb/hcd.h +header-test- += linux/usb/iowarrior.h +header-test- += linux/usb/irda.h +header-test- += linux/usb/isp116x.h +header-test- += linux/usb/isp1362.h +header-test- += linux/usb/musb.h +header-test- += linux/usb/net2280.h +header-test- += linux/usb/ohci_pdriver.h +header-test- += linux/usb/otg-fsm.h +header-test- += linux/usb/pd_ado.h +header-test- += linux/usb/r8a66597.h +header-test- += linux/usb/rndis_host.h +header-test- += linux/usb/serial.h +header-test- += linux/usb/sl811.h +header-test- += linux/usb/storage.h +header-test- += linux/usb/uas.h +header-test- += linux/usb/usb338x.h +header-test- += linux/usb/usbnet.h +header-test- += linux/usb/wusb-wa.h +header-test- += linux/usb/xhci-dbgp.h +header-test- += linux/usb_usual.h +header-test- += linux/user-return-notifier.h +header-test- += linux/userfaultfd_k.h +header-test- += linux/verification.h +header-test- += linux/vgaarb.h +header-test- += linux/via_core.h +header-test- += linux/via_i2c.h +header-test- += linux/virtio_byteorder.h +header-test- += linux/virtio_ring.h +header-test- += linux/visorbus.h +header-test- += linux/vme.h +header-test- += linux/vmstat.h +header-test- += linux/vmw_vmci_api.h +header-test- += linux/vmw_vmci_defs.h +header-test- += linux/vringh.h +header-test- += linux/vt_buffer.h +header-test- += linux/zorro.h +header-test- += linux/zpool.h +header-test- += math-emu/double.h +header-test- += math-emu/op-common.h +header-test- += math-emu/quad.h +header-test- += math-emu/single.h +header-test- += math-emu/soft-fp.h +header-test- += media/davinci/dm355_ccdc.h +header-test- += media/davinci/dm644x_ccdc.h +header-test- += media/davinci/isif.h +header-test- += media/davinci/vpbe_osd.h +header-test- += media/davinci/vpbe_types.h +header-test- += media/davinci/vpif_types.h +header-test- += media/demux.h +header-test- += media/drv-intf/soc_mediabus.h +header-test- += media/dvb_net.h +header-test- += media/fwht-ctrls.h +header-test- += media/i2c/ad9389b.h +header-test- += media/i2c/adv7343.h +header-test- += media/i2c/adv7511.h +header-test- += media/i2c/adv7842.h +header-test- += media/i2c/m5mols.h +header-test- += media/i2c/mt9m032.h +header-test- += media/i2c/mt9t112.h +header-test- += media/i2c/mt9v032.h +header-test- += media/i2c/ov2659.h +header-test- += media/i2c/ov7670.h +header-test- += media/i2c/rj54n1cb0c.h +header-test- += media/i2c/saa6588.h +header-test- += media/i2c/saa7115.h +header-test- += media/i2c/sr030pc30.h +header-test- += media/i2c/tc358743.h +header-test- += media/i2c/tda1997x.h +header-test- += media/i2c/ths7303.h +header-test- += media/i2c/tvaudio.h +header-test- += media/i2c/tvp514x.h +header-test- += media/i2c/tvp7002.h +header-test- += media/i2c/wm8775.h +header-test- += media/imx.h +header-test- += media/media-dev-allocator.h +header-test- += media/mpeg2-ctrls.h +header-test- += media/rcar-fcp.h +header-test- += media/tuner-types.h +header-test- += media/tveeprom.h +header-test- += media/v4l2-flash-led-class.h +header-test- += misc/altera.h +header-test- += misc/cxl-base.h +header-test- += misc/cxllib.h +header-test- += net/9p/9p.h +header-test- += net/9p/client.h +header-test- += net/9p/transport.h +header-test- += net/af_vsock.h +header-test- += net/ax88796.h +header-test- += net/bluetooth/hci.h +header-test- += net/bluetooth/hci_core.h +header-test- += net/bluetooth/hci_mon.h +header-test- += net/bluetooth/hci_sock.h +header-test- += net/bluetooth/l2cap.h +header-test- += net/bluetooth/mgmt.h +header-test- += net/bluetooth/rfcomm.h +header-test- += net/bluetooth/sco.h +header-test- += net/bond_options.h +header-test- += net/caif/cfsrvl.h +header-test- += net/codel_impl.h +header-test- += net/codel_qdisc.h +header-test- += net/compat.h +header-test- += net/datalink.h +header-test- += net/dcbevent.h +header-test- += net/dcbnl.h +header-test- += net/dn_dev.h +header-test- += net/dn_fib.h +header-test- += net/dn_neigh.h +header-test- += net/dn_nsp.h +header-test- += net/dn_route.h +header-test- += net/erspan.h +header-test- += net/esp.h +header-test- += net/ethoc.h +header-test- += net/firewire.h +header-test- += net/flow_offload.h +header-test- += net/fq.h +header-test- += net/fq_impl.h +header-test- += net/garp.h +header-test- += net/gtp.h +header-test- += net/gue.h +header-test- += net/hwbm.h +header-test- += net/ila.h +header-test- += net/inet6_connection_sock.h +header-test- += net/inet_common.h +header-test- += net/inet_frag.h +header-test- += net/ip6_route.h +header-test- += net/ip_vs.h +header-test- += net/ipcomp.h +header-test- += net/ipconfig.h +header-test- += net/iucv/af_iucv.h +header-test- += net/iucv/iucv.h +header-test- += net/lapb.h +header-test- += net/llc_c_ac.h +header-test- += net/llc_c_st.h +header-test- += net/llc_s_ac.h +header-test- += net/llc_s_ev.h +header-test- += net/llc_s_st.h +header-test- += net/mpls_iptunnel.h +header-test- += net/mrp.h +header-test- += net/ncsi.h +header-test- += net/netevent.h +header-test- += net/netns/can.h +header-test- += net/netns/generic.h +header-test- += net/netns/ieee802154_6lowpan.h +header-test- += net/netns/ipv4.h +header-test- += net/netns/ipv6.h +header-test- += net/netns/mpls.h +header-test- += net/netns/nftables.h +header-test- += net/netns/sctp.h +header-test- += net/netrom.h +header-test- += net/p8022.h +header-test- += net/phonet/pep.h +header-test- += net/phonet/phonet.h +header-test- += net/phonet/pn_dev.h +header-test- += net/pptp.h +header-test- += net/psample.h +header-test- += net/psnap.h +header-test- += net/regulatory.h +header-test- += net/rose.h +header-test- += net/sctp/auth.h +header-test- += net/sctp/stream_interleave.h +header-test- += net/sctp/stream_sched.h +header-test- += net/sctp/tsnmap.h +header-test- += net/sctp/ulpevent.h +header-test- += net/sctp/ulpqueue.h +header-test- += net/secure_seq.h +header-test- += net/smc.h +header-test- += net/stp.h +header-test- += net/transp_v6.h +header-test- += net/tun_proto.h +header-test- += net/udplite.h +header-test- += net/xdp.h +header-test- += net/xdp_priv.h +header-test- += pcmcia/cistpl.h +header-test- += pcmcia/ds.h +header-test- += rdma/tid_rdma_defs.h +header-test- += scsi/fc/fc_encaps.h +header-test- += scsi/fc/fc_fc2.h +header-test- += scsi/fc/fc_fcoe.h +header-test- += scsi/fc/fc_fip.h +header-test- += scsi/fc_encode.h +header-test- += scsi/fc_frame.h +header-test- += scsi/iser.h +header-test- += scsi/libfc.h +header-test- += scsi/libfcoe.h +header-test- += scsi/libsas.h +header-test- += scsi/sas_ata.h +header-test- += scsi/scsi_cmnd.h +header-test- += scsi/scsi_dbg.h +header-test- += scsi/scsi_device.h +header-test- += scsi/scsi_dh.h +header-test- += scsi/scsi_eh.h +header-test- += scsi/scsi_host.h +header-test- += scsi/scsi_ioctl.h +header-test- += scsi/scsi_request.h +header-test- += scsi/scsi_tcq.h +header-test- += scsi/scsi_transport.h +header-test- += scsi/scsi_transport_fc.h +header-test- += scsi/scsi_transport_sas.h +header-test- += scsi/scsi_transport_spi.h +header-test- += scsi/scsi_transport_srp.h +header-test- += scsi/scsicam.h +header-test- += scsi/sg.h +header-test- += soc/arc/aux.h +header-test- += soc/arc/mcip.h +header-test- += soc/arc/timers.h +header-test- += soc/brcmstb/common.h +header-test- += soc/fsl/bman.h +header-test- += soc/fsl/qe/qe.h +header-test- += soc/fsl/qe/qe_ic.h +header-test- += soc/fsl/qe/qe_tdm.h +header-test- += soc/fsl/qe/ucc.h +header-test- += soc/fsl/qe/ucc_fast.h +header-test- += soc/fsl/qe/ucc_slow.h +header-test- += soc/fsl/qman.h +header-test- += soc/nps/common.h +header-test-$(CONFIG_ARC) += soc/nps/mtm.h +header-test- += soc/qcom/cmd-db.h +header-test- += soc/qcom/rpmh.h +header-test- += soc/qcom/tcs.h +header-test- += soc/tegra/ahb.h +header-test- += soc/tegra/bpmp-abi.h +header-test- += soc/tegra/common.h +header-test- += soc/tegra/flowctrl.h +header-test- += soc/tegra/fuse.h +header-test- += soc/tegra/mc.h +header-test- += sound/ac97/compat.h +header-test- += sound/aci.h +header-test- += sound/ad1843.h +header-test- += sound/adau1373.h +header-test- += sound/ak4113.h +header-test- += sound/ak4114.h +header-test- += sound/ak4117.h +header-test- += sound/cs35l33.h +header-test- += sound/cs35l34.h +header-test- += sound/cs35l35.h +header-test- += sound/cs35l36.h +header-test- += sound/cs4271.h +header-test- += sound/cs42l52.h +header-test- += sound/cs8427.h +header-test- += sound/da7218.h +header-test- += sound/da7219-aad.h +header-test- += sound/da7219.h +header-test- += sound/da9055.h +header-test- += sound/emu8000.h +header-test- += sound/emux_synth.h +header-test- += sound/hda_component.h +header-test- += sound/hda_hwdep.h +header-test- += sound/hda_i915.h +header-test- += sound/hwdep.h +header-test- += sound/i2c.h +header-test- += sound/l3.h +header-test- += sound/max98088.h +header-test- += sound/max98095.h +header-test- += sound/mixer_oss.h +header-test- += sound/omap-hdmi-audio.h +header-test- += sound/pcm_drm_eld.h +header-test- += sound/pcm_iec958.h +header-test- += sound/pcm_oss.h +header-test- += sound/pxa2xx-lib.h +header-test- += sound/rt286.h +header-test- += sound/rt298.h +header-test- += sound/rt5645.h +header-test- += sound/rt5659.h +header-test- += sound/rt5660.h +header-test- += sound/rt5665.h +header-test- += sound/rt5670.h +header-test- += sound/s3c24xx_uda134x.h +header-test- += sound/seq_device.h +header-test- += sound/seq_kernel.h +header-test- += sound/seq_midi_emul.h +header-test- += sound/seq_oss.h +header-test- += sound/soc-acpi-intel-match.h +header-test- += sound/soc-dai.h +header-test- += sound/soc-dapm.h +header-test- += sound/soc-dpcm.h +header-test- += sound/sof/control.h +header-test- += sound/sof/dai-intel.h +header-test- += sound/sof/dai.h +header-test- += sound/sof/header.h +header-test- += sound/sof/info.h +header-test- += sound/sof/pm.h +header-test- += sound/sof/stream.h +header-test- += sound/sof/topology.h +header-test- += sound/sof/trace.h +header-test- += sound/sof/xtensa.h +header-test- += sound/spear_spdif.h +header-test- += sound/sta32x.h +header-test- += sound/sta350.h +header-test- += sound/tea6330t.h +header-test- += sound/tlv320aic32x4.h +header-test- += sound/tlv320dac33-plat.h +header-test- += sound/uda134x.h +header-test- += sound/wavefront.h +header-test- += sound/wm8903.h +header-test- += sound/wm8904.h +header-test- += sound/wm8960.h +header-test- += sound/wm8962.h +header-test- += sound/wm8993.h +header-test- += sound/wm8996.h +header-test- += sound/wm9081.h +header-test- += sound/wm9090.h +header-test- += target/iscsi/iscsi_target_stat.h +header-test- += trace/bpf_probe.h +header-test- += trace/events/9p.h +header-test- += trace/events/afs.h +header-test- += trace/events/asoc.h +header-test- += trace/events/bcache.h +header-test- += trace/events/block.h +header-test- += trace/events/cachefiles.h +header-test- += trace/events/cgroup.h +header-test- += trace/events/clk.h +header-test- += trace/events/cma.h +header-test- += trace/events/ext4.h +header-test- += trace/events/f2fs.h +header-test- += trace/events/fs_dax.h +header-test- += trace/events/fscache.h +header-test- += trace/events/fsi.h +header-test- += trace/events/fsi_master_ast_cf.h +header-test- += trace/events/fsi_master_gpio.h +header-test- += trace/events/huge_memory.h +header-test- += trace/events/ib_mad.h +header-test- += trace/events/ib_umad.h +header-test- += trace/events/iscsi.h +header-test- += trace/events/jbd2.h +header-test- += trace/events/kvm.h +header-test- += trace/events/kyber.h +header-test- += trace/events/libata.h +header-test- += trace/events/mce.h +header-test- += trace/events/mdio.h +header-test- += trace/events/migrate.h +header-test- += trace/events/mmflags.h +header-test- += trace/events/nbd.h +header-test- += trace/events/nilfs2.h +header-test- += trace/events/pwc.h +header-test- += trace/events/rdma.h +header-test- += trace/events/rpcgss.h +header-test- += trace/events/rpcrdma.h +header-test- += trace/events/rxrpc.h +header-test- += trace/events/scsi.h +header-test- += trace/events/siox.h +header-test- += trace/events/spi.h +header-test- += trace/events/swiotlb.h +header-test- += trace/events/syscalls.h +header-test- += trace/events/target.h +header-test- += trace/events/thermal_power_allocator.h +header-test- += trace/events/timer.h +header-test- += trace/events/wbt.h +header-test- += trace/events/xen.h +header-test- += trace/perf.h +header-test- += trace/trace_events.h +header-test- += uapi/drm/vmwgfx_drm.h +header-test- += uapi/linux/a.out.h +header-test- += uapi/linux/coda.h +header-test- += uapi/linux/coda_psdev.h +header-test- += uapi/linux/errqueue.h +header-test- += uapi/linux/eventpoll.h +header-test- += uapi/linux/hdlc/ioctl.h +header-test- += uapi/linux/input.h +header-test- += uapi/linux/kvm.h +header-test- += uapi/linux/kvm_para.h +header-test- += uapi/linux/lightnvm.h +header-test- += uapi/linux/mic_common.h +header-test- += uapi/linux/mman.h +header-test- += uapi/linux/nilfs2_ondisk.h +header-test- += uapi/linux/patchkey.h +header-test- += uapi/linux/ptrace.h +header-test- += uapi/linux/scc.h +header-test- += uapi/linux/seg6_iptunnel.h +header-test- += uapi/linux/smc_diag.h +header-test- += uapi/linux/timex.h +header-test- += uapi/linux/videodev2.h +header-test- += uapi/scsi/scsi_bsg_fc.h +header-test- += uapi/sound/asound.h +header-test- += uapi/sound/sof/eq.h +header-test- += uapi/sound/sof/fw.h +header-test- += uapi/sound/sof/header.h +header-test- += uapi/sound/sof/manifest.h +header-test- += uapi/sound/sof/trace.h +header-test- += uapi/xen/evtchn.h +header-test- += uapi/xen/gntdev.h +header-test- += uapi/xen/privcmd.h +header-test- += vdso/vsyscall.h +header-test- += video/broadsheetfb.h +header-test- += video/cvisionppc.h +header-test- += video/gbe.h +header-test- += video/kyro.h +header-test- += video/maxinefb.h +header-test- += video/metronomefb.h +header-test- += video/neomagic.h +header-test- += video/of_display_timing.h +header-test- += video/omapvrfb.h +header-test- += video/s1d13xxxfb.h +header-test- += video/sstfb.h +header-test- += video/tgafb.h +header-test- += video/udlfb.h +header-test- += video/uvesafb.h +header-test- += video/vga.h +header-test- += video/w100fb.h +header-test- += xen/acpi.h +header-test- += xen/arm/hypercall.h +header-test- += xen/arm/page-coherent.h +header-test- += xen/arm/page.h +header-test- += xen/balloon.h +header-test- += xen/events.h +header-test- += xen/features.h +header-test- += xen/grant_table.h +header-test- += xen/hvm.h +header-test- += xen/interface/callback.h +header-test- += xen/interface/event_channel.h +header-test- += xen/interface/grant_table.h +header-test- += xen/interface/hvm/dm_op.h +header-test- += xen/interface/hvm/hvm_op.h +header-test- += xen/interface/hvm/hvm_vcpu.h +header-test- += xen/interface/hvm/params.h +header-test- += xen/interface/hvm/start_info.h +header-test- += xen/interface/io/9pfs.h +header-test- += xen/interface/io/blkif.h +header-test- += xen/interface/io/console.h +header-test- += xen/interface/io/displif.h +header-test- += xen/interface/io/fbif.h +header-test- += xen/interface/io/kbdif.h +header-test- += xen/interface/io/netif.h +header-test- += xen/interface/io/pciif.h +header-test- += xen/interface/io/protocols.h +header-test- += xen/interface/io/pvcalls.h +header-test- += xen/interface/io/ring.h +header-test- += xen/interface/io/sndif.h +header-test- += xen/interface/io/tpmif.h +header-test- += xen/interface/io/vscsiif.h +header-test- += xen/interface/io/xs_wire.h +header-test- += xen/interface/memory.h +header-test- += xen/interface/nmi.h +header-test- += xen/interface/physdev.h +header-test- += xen/interface/platform.h +header-test- += xen/interface/sched.h +header-test- += xen/interface/vcpu.h +header-test- += xen/interface/version.h +header-test- += xen/interface/xen-mca.h +header-test- += xen/interface/xen.h +header-test- += xen/interface/xenpmu.h +header-test- += xen/mem-reservation.h +header-test- += xen/page.h +header-test- += xen/platform_pci.h +header-test- += xen/swiotlb-xen.h +header-test- += xen/xen-front-pgdir-shbuf.h +header-test- += xen/xen-ops.h +header-test- += xen/xen.h +header-test- += xen/xenbus.h + +# Do not include directly +header-test- += linux/compiler-clang.h +header-test- += linux/compiler-gcc.h +header-test- += linux/patchkey.h +header-test- += linux/rwlock_api_smp.h +header-test- += linux/spinlock_types_up.h +header-test- += linux/spinlock_up.h +header-test- += linux/wimax/debug.h +header-test- += rdma/uverbs_named_ioctl.h + +# asm-generic/*.h is used by asm/*.h, and should not be included directly +header-test- += asm-generic/% uapi/asm-generic/% + +# Timestamp files touched by Kconfig +header-test- += config/% + +# Timestamp files touched by scripts/adjust_autoksyms.sh +header-test- += ksym/% + +# You could compile-test these, but maybe not so useful... +header-test- += dt-bindings/% + +# Do not test generated headers. Stale headers are often left over when you +# traverse the git history without cleaning. +header-test- += generated/% + +# The rest are compile-tested +header-test-pattern-y += */*.h */*/*.h */*/*/*.h */*/*/*/*.h diff --git a/include/acpi/acconfig.h b/include/acpi/acconfig.h index 16a83959e616..42d573172e53 100644 --- a/include/acpi/acconfig.h +++ b/include/acpi/acconfig.h @@ -95,9 +95,9 @@ #define ACPI_DEFAULT_PAGE_SIZE 4096 /* Must be power of 2 */ -/* owner_id tracking. 8 entries allows for 255 owner_ids */ +/* owner_id tracking. 128 entries allows for 4095 owner_ids */ -#define ACPI_NUM_OWNERID_MASKS 8 +#define ACPI_NUM_OWNERID_MASKS 128 /* Size of the root table array is increased by this increment */ diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h index 31b6c87d6240..175f7b40c585 100644 --- a/include/acpi/acpi_bus.h +++ b/include/acpi/acpi_bus.h @@ -506,13 +506,16 @@ int acpi_bus_get_status(struct acpi_device *device); int acpi_bus_set_power(acpi_handle handle, int state); const char *acpi_power_state_string(int state); -int acpi_device_get_power(struct acpi_device *device, int *state); int acpi_device_set_power(struct acpi_device *device, int state); int acpi_bus_init_power(struct acpi_device *device); int acpi_device_fix_up_power(struct acpi_device *device); int acpi_bus_update_power(acpi_handle handle, int *state_p); int acpi_device_update_power(struct acpi_device *device, int *state_p); bool acpi_bus_power_manageable(acpi_handle handle); +int acpi_device_power_add_dependent(struct acpi_device *adev, + struct device *dev); +void acpi_device_power_remove_dependent(struct acpi_device *adev, + struct device *dev); #ifdef CONFIG_PM bool acpi_bus_can_wakeup(acpi_handle handle); @@ -651,6 +654,12 @@ static inline int acpi_pm_set_bridge_wakeup(struct device *dev, bool enable) } #endif +#ifdef CONFIG_ACPI_SYSTEM_POWER_STATES_SUPPORT +bool acpi_sleep_state_supported(u8 sleep_state); +#else +static inline bool acpi_sleep_state_supported(u8 sleep_state) { return false; } +#endif + #ifdef CONFIG_ACPI_SLEEP u32 acpi_target_system_state(void); #else diff --git a/include/acpi/acpi_drivers.h b/include/acpi/acpi_drivers.h index 84f2b3642ab0..5eb175933a5b 100644 --- a/include/acpi/acpi_drivers.h +++ b/include/acpi/acpi_drivers.h @@ -12,7 +12,7 @@ #define ACPI_MAX_STRING 80 /* - * Please update drivers/acpi/debug.c and Documentation/acpi/debug.txt + * Please update drivers/acpi/debug.c and Documentation/firmware-guide/acpi/debug.rst * if you add to this list. */ #define ACPI_BUS_COMPONENT 0x00010000 diff --git a/include/acpi/acpi_io.h b/include/acpi/acpi_io.h index d0633fc1fc15..12d8bd333fe7 100644 --- a/include/acpi/acpi_io.h +++ b/include/acpi/acpi_io.h @@ -16,8 +16,8 @@ static inline void __iomem *acpi_os_ioremap(acpi_physical_address phys, extern bool acpi_permanent_mmap; -void __iomem *__ref -acpi_os_map_iomem(acpi_physical_address phys, acpi_size size); +void __iomem __ref +*acpi_os_map_iomem(acpi_physical_address phys, acpi_size size); void __ref acpi_os_unmap_iomem(void __iomem *virt, acpi_size size); void __iomem *acpi_os_get_iomem(acpi_physical_address phys, unsigned int size); diff --git a/include/acpi/acpiosxf.h b/include/acpi/acpiosxf.h index 1b59fb61f67d..2e63b7b390f5 100644 --- a/include/acpi/acpiosxf.h +++ b/include/acpi/acpiosxf.h @@ -330,6 +330,7 @@ acpi_status acpi_os_enter_sleep(u8 sleep_state, u32 rega_value, u32 regb_value); * Debug print routines */ #ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_printf +ACPI_PRINTF_LIKE(1) void ACPI_INTERNAL_VAR_XFACE acpi_os_printf(const char *format, ...); #endif diff --git a/include/acpi/acpixf.h b/include/acpi/acpixf.h index 4a8a05401fb5..e5e041413581 100644 --- a/include/acpi/acpixf.h +++ b/include/acpi/acpixf.h @@ -12,7 +12,7 @@ /* Current ACPICA subsystem version in YYYYMMDD format */ -#define ACPI_CA_VERSION 0x20190509 +#define ACPI_CA_VERSION 0x20190816 #include <acpi/acconfig.h> #include <acpi/actypes.h> @@ -297,6 +297,9 @@ ACPI_GLOBAL(u8, acpi_gbl_system_awake_and_running); #define ACPI_HW_DEPENDENT_RETURN_OK(prototype) \ ACPI_EXTERNAL_RETURN_OK(prototype) +#define ACPI_HW_DEPENDENT_RETURN_UINT32(prototype) \ + ACPI_EXTERNAL_RETURN_UINT32(prototype) + #define ACPI_HW_DEPENDENT_RETURN_VOID(prototype) \ ACPI_EXTERNAL_RETURN_VOID(prototype) @@ -307,6 +310,9 @@ ACPI_GLOBAL(u8, acpi_gbl_system_awake_and_running); #define ACPI_HW_DEPENDENT_RETURN_OK(prototype) \ static ACPI_INLINE prototype {return(AE_OK);} +#define ACPI_HW_DEPENDENT_RETURN_UINT32(prototype) \ + static ACPI_INLINE prototype {return(0);} + #define ACPI_HW_DEPENDENT_RETURN_VOID(prototype) \ static ACPI_INLINE prototype {return;} @@ -738,7 +744,7 @@ ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status u32 gpe_number, acpi_event_status *event_status)) -ACPI_HW_DEPENDENT_RETURN_VOID(void acpi_dispatch_gpe(acpi_handle gpe_device, u32 gpe_number)) +ACPI_HW_DEPENDENT_RETURN_UINT32(u32 acpi_dispatch_gpe(acpi_handle gpe_device, u32 gpe_number)) ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status acpi_disable_all_gpes(void)) ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status acpi_enable_all_runtime_gpes(void)) ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status acpi_enable_all_wakeup_gpes(void)) diff --git a/include/acpi/actypes.h b/include/acpi/actypes.h index ad6892a24015..2f3f28c7cea3 100644 --- a/include/acpi/actypes.h +++ b/include/acpi/actypes.h @@ -442,8 +442,8 @@ typedef void *acpi_handle; /* Actually a ptr to a NS Node */ /* Owner IDs are used to track namespace nodes for selective deletion */ -typedef u8 acpi_owner_id; -#define ACPI_OWNER_ID_MAX 0xFF +typedef u16 acpi_owner_id; +#define ACPI_OWNER_ID_MAX 0xFFF /* 4095 possible owner IDs */ #define ACPI_INTEGER_BIT_SIZE 64 #define ACPI_MAX_DECIMAL_DIGITS 20 /* 2^64 = 18,446,744,073,709,551,616 */ @@ -506,7 +506,7 @@ typedef u64 acpi_integer; /* Pointer/Integer type conversions */ -#define ACPI_TO_POINTER(i) ACPI_ADD_PTR (void, (void *) 0, (acpi_size) (i)) +#define ACPI_TO_POINTER(i) ACPI_CAST_PTR (void, (acpi_size) (i)) #define ACPI_TO_INTEGER(p) ACPI_PTR_DIFF (p, (void *) 0) #define ACPI_OFFSET(d, f) ACPI_PTR_DIFF (&(((d *) 0)->f), (void *) 0) #define ACPI_PHYSADDR_TO_PTR(i) ACPI_TO_POINTER(i) @@ -1265,12 +1265,14 @@ typedef enum { #define ACPI_OSI_WIN_VISTA_SP2 0x0A #define ACPI_OSI_WIN_7 0x0B #define ACPI_OSI_WIN_8 0x0C -#define ACPI_OSI_WIN_10 0x0D -#define ACPI_OSI_WIN_10_RS1 0x0E -#define ACPI_OSI_WIN_10_RS2 0x0F -#define ACPI_OSI_WIN_10_RS3 0x10 -#define ACPI_OSI_WIN_10_RS4 0x11 -#define ACPI_OSI_WIN_10_RS5 0x12 +#define ACPI_OSI_WIN_8_1 0x0D +#define ACPI_OSI_WIN_10 0x0E +#define ACPI_OSI_WIN_10_RS1 0x0F +#define ACPI_OSI_WIN_10_RS2 0x10 +#define ACPI_OSI_WIN_10_RS3 0x11 +#define ACPI_OSI_WIN_10_RS4 0x12 +#define ACPI_OSI_WIN_10_RS5 0x13 +#define ACPI_OSI_WIN_10_19H1 0x14 /* Definitions of getopt */ diff --git a/include/acpi/processor.h b/include/acpi/processor.h index 1194a4c78d55..47805172e73d 100644 --- a/include/acpi/processor.h +++ b/include/acpi/processor.h @@ -4,6 +4,8 @@ #include <linux/kernel.h> #include <linux/cpu.h> +#include <linux/cpufreq.h> +#include <linux/pm_qos.h> #include <linux/thermal.h> #include <asm/acpi.h> @@ -230,6 +232,8 @@ struct acpi_processor { struct acpi_processor_limit limit; struct thermal_cooling_device *cdev; struct device *dev; /* Processor device. */ + struct freq_qos_request perflib_req; + struct freq_qos_request thermal_req; }; struct acpi_processor_errata { @@ -296,16 +300,22 @@ static inline void acpi_processor_ffh_cstate_enter(struct acpi_processor_cx /* in processor_perflib.c */ #ifdef CONFIG_CPU_FREQ -void acpi_processor_ppc_init(void); -void acpi_processor_ppc_exit(void); +extern bool acpi_processor_cpufreq_init; +void acpi_processor_ignore_ppc_init(void); +void acpi_processor_ppc_init(struct cpufreq_policy *policy); +void acpi_processor_ppc_exit(struct cpufreq_policy *policy); void acpi_processor_ppc_has_changed(struct acpi_processor *pr, int event_flag); extern int acpi_processor_get_bios_limit(int cpu, unsigned int *limit); #else -static inline void acpi_processor_ppc_init(void) +static inline void acpi_processor_ignore_ppc_init(void) { return; } -static inline void acpi_processor_ppc_exit(void) +static inline void acpi_processor_ppc_init(struct cpufreq_policy *policy) +{ + return; +} +static inline void acpi_processor_ppc_exit(struct cpufreq_policy *policy) { return; } @@ -421,14 +431,14 @@ static inline int acpi_processor_hotplug(struct acpi_processor *pr) int acpi_processor_get_limit_info(struct acpi_processor *pr); extern const struct thermal_cooling_device_ops processor_cooling_ops; #if defined(CONFIG_ACPI_CPU_FREQ_PSS) & defined(CONFIG_CPU_FREQ) -void acpi_thermal_cpufreq_init(void); -void acpi_thermal_cpufreq_exit(void); +void acpi_thermal_cpufreq_init(struct cpufreq_policy *policy); +void acpi_thermal_cpufreq_exit(struct cpufreq_policy *policy); #else -static inline void acpi_thermal_cpufreq_init(void) +static inline void acpi_thermal_cpufreq_init(struct cpufreq_policy *policy) { return; } -static inline void acpi_thermal_cpufreq_exit(void) +static inline void acpi_thermal_cpufreq_exit(struct cpufreq_policy *policy) { return; } diff --git a/include/asm-generic/5level-fixup.h b/include/asm-generic/5level-fixup.h index bb6cb347018c..f6947da70d71 100644 --- a/include/asm-generic/5level-fixup.h +++ b/include/asm-generic/5level-fixup.h @@ -19,9 +19,24 @@ #define p4d_alloc(mm, pgd, address) (pgd) #define p4d_offset(pgd, start) (pgd) -#define p4d_none(p4d) 0 -#define p4d_bad(p4d) 0 -#define p4d_present(p4d) 1 + +#ifndef __ASSEMBLY__ +static inline int p4d_none(p4d_t p4d) +{ + return 0; +} + +static inline int p4d_bad(p4d_t p4d) +{ + return 0; +} + +static inline int p4d_present(p4d_t p4d) +{ + return 1; +} +#endif + #define p4d_ERROR(p4d) do { } while (0) #define p4d_clear(p4d) pgd_clear(p4d) #define p4d_val(p4d) pgd_val(p4d) diff --git a/include/asm-generic/Kbuild b/include/asm-generic/Kbuild index 6f4536d70b8e..adff14fcb8e4 100644 --- a/include/asm-generic/Kbuild +++ b/include/asm-generic/Kbuild @@ -3,3 +3,5 @@ # asm headers that all architectures except um should have # (This file is not included when SRCARCH=um since UML borrows several # asm headers from the host architecutre.) + +mandatory-y += simd.h diff --git a/include/asm-generic/atomic64.h b/include/asm-generic/atomic64.h index d7a15096fb3b..370f01d4450f 100644 --- a/include/asm-generic/atomic64.h +++ b/include/asm-generic/atomic64.h @@ -10,24 +10,24 @@ #include <linux/types.h> typedef struct { - long long counter; + s64 counter; } atomic64_t; #define ATOMIC64_INIT(i) { (i) } -extern long long atomic64_read(const atomic64_t *v); -extern void atomic64_set(atomic64_t *v, long long i); +extern s64 atomic64_read(const atomic64_t *v); +extern void atomic64_set(atomic64_t *v, s64 i); #define atomic64_set_release(v, i) atomic64_set((v), (i)) #define ATOMIC64_OP(op) \ -extern void atomic64_##op(long long a, atomic64_t *v); +extern void atomic64_##op(s64 a, atomic64_t *v); #define ATOMIC64_OP_RETURN(op) \ -extern long long atomic64_##op##_return(long long a, atomic64_t *v); +extern s64 atomic64_##op##_return(s64 a, atomic64_t *v); #define ATOMIC64_FETCH_OP(op) \ -extern long long atomic64_fetch_##op(long long a, atomic64_t *v); +extern s64 atomic64_fetch_##op(s64 a, atomic64_t *v); #define ATOMIC64_OPS(op) ATOMIC64_OP(op) ATOMIC64_OP_RETURN(op) ATOMIC64_FETCH_OP(op) @@ -46,11 +46,11 @@ ATOMIC64_OPS(xor) #undef ATOMIC64_OP_RETURN #undef ATOMIC64_OP -extern long long atomic64_dec_if_positive(atomic64_t *v); +extern s64 atomic64_dec_if_positive(atomic64_t *v); #define atomic64_dec_if_positive atomic64_dec_if_positive -extern long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n); -extern long long atomic64_xchg(atomic64_t *v, long long new); -extern long long atomic64_fetch_add_unless(atomic64_t *v, long long a, long long u); +extern s64 atomic64_cmpxchg(atomic64_t *v, s64 o, s64 n); +extern s64 atomic64_xchg(atomic64_t *v, s64 new); +extern s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u); #define atomic64_fetch_add_unless atomic64_fetch_add_unless #endif /* _ASM_GENERIC_ATOMIC64_H */ diff --git a/include/asm-generic/bitops-instrumented.h b/include/asm-generic/bitops-instrumented.h new file mode 100644 index 000000000000..ddd1c6d9d8db --- /dev/null +++ b/include/asm-generic/bitops-instrumented.h @@ -0,0 +1,263 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +/* + * This file provides wrappers with sanitizer instrumentation for bit + * operations. + * + * To use this functionality, an arch's bitops.h file needs to define each of + * the below bit operations with an arch_ prefix (e.g. arch_set_bit(), + * arch___set_bit(), etc.). + */ +#ifndef _ASM_GENERIC_BITOPS_INSTRUMENTED_H +#define _ASM_GENERIC_BITOPS_INSTRUMENTED_H + +#include <linux/kasan-checks.h> + +/** + * set_bit - Atomically set a bit in memory + * @nr: the bit to set + * @addr: the address to start counting from + * + * This is a relaxed atomic operation (no implied memory barriers). + * + * Note that @nr may be almost arbitrarily large; this function is not + * restricted to acting on a single-word quantity. + */ +static inline void set_bit(long nr, volatile unsigned long *addr) +{ + kasan_check_write(addr + BIT_WORD(nr), sizeof(long)); + arch_set_bit(nr, addr); +} + +/** + * __set_bit - Set a bit in memory + * @nr: the bit to set + * @addr: the address to start counting from + * + * Unlike set_bit(), this function is non-atomic. If it is called on the same + * region of memory concurrently, the effect may be that only one operation + * succeeds. + */ +static inline void __set_bit(long nr, volatile unsigned long *addr) +{ + kasan_check_write(addr + BIT_WORD(nr), sizeof(long)); + arch___set_bit(nr, addr); +} + +/** + * clear_bit - Clears a bit in memory + * @nr: Bit to clear + * @addr: Address to start counting from + * + * This is a relaxed atomic operation (no implied memory barriers). + */ +static inline void clear_bit(long nr, volatile unsigned long *addr) +{ + kasan_check_write(addr + BIT_WORD(nr), sizeof(long)); + arch_clear_bit(nr, addr); +} + +/** + * __clear_bit - Clears a bit in memory + * @nr: the bit to clear + * @addr: the address to start counting from + * + * Unlike clear_bit(), this function is non-atomic. If it is called on the same + * region of memory concurrently, the effect may be that only one operation + * succeeds. + */ +static inline void __clear_bit(long nr, volatile unsigned long *addr) +{ + kasan_check_write(addr + BIT_WORD(nr), sizeof(long)); + arch___clear_bit(nr, addr); +} + +/** + * clear_bit_unlock - Clear a bit in memory, for unlock + * @nr: the bit to set + * @addr: the address to start counting from + * + * This operation is atomic and provides release barrier semantics. + */ +static inline void clear_bit_unlock(long nr, volatile unsigned long *addr) +{ + kasan_check_write(addr + BIT_WORD(nr), sizeof(long)); + arch_clear_bit_unlock(nr, addr); +} + +/** + * __clear_bit_unlock - Clears a bit in memory + * @nr: Bit to clear + * @addr: Address to start counting from + * + * This is a non-atomic operation but implies a release barrier before the + * memory operation. It can be used for an unlock if no other CPUs can + * concurrently modify other bits in the word. + */ +static inline void __clear_bit_unlock(long nr, volatile unsigned long *addr) +{ + kasan_check_write(addr + BIT_WORD(nr), sizeof(long)); + arch___clear_bit_unlock(nr, addr); +} + +/** + * change_bit - Toggle a bit in memory + * @nr: Bit to change + * @addr: Address to start counting from + * + * This is a relaxed atomic operation (no implied memory barriers). + * + * Note that @nr may be almost arbitrarily large; this function is not + * restricted to acting on a single-word quantity. + */ +static inline void change_bit(long nr, volatile unsigned long *addr) +{ + kasan_check_write(addr + BIT_WORD(nr), sizeof(long)); + arch_change_bit(nr, addr); +} + +/** + * __change_bit - Toggle a bit in memory + * @nr: the bit to change + * @addr: the address to start counting from + * + * Unlike change_bit(), this function is non-atomic. If it is called on the same + * region of memory concurrently, the effect may be that only one operation + * succeeds. + */ +static inline void __change_bit(long nr, volatile unsigned long *addr) +{ + kasan_check_write(addr + BIT_WORD(nr), sizeof(long)); + arch___change_bit(nr, addr); +} + +/** + * test_and_set_bit - Set a bit and return its old value + * @nr: Bit to set + * @addr: Address to count from + * + * This is an atomic fully-ordered operation (implied full memory barrier). + */ +static inline bool test_and_set_bit(long nr, volatile unsigned long *addr) +{ + kasan_check_write(addr + BIT_WORD(nr), sizeof(long)); + return arch_test_and_set_bit(nr, addr); +} + +/** + * __test_and_set_bit - Set a bit and return its old value + * @nr: Bit to set + * @addr: Address to count from + * + * This operation is non-atomic. If two instances of this operation race, one + * can appear to succeed but actually fail. + */ +static inline bool __test_and_set_bit(long nr, volatile unsigned long *addr) +{ + kasan_check_write(addr + BIT_WORD(nr), sizeof(long)); + return arch___test_and_set_bit(nr, addr); +} + +/** + * test_and_set_bit_lock - Set a bit and return its old value, for lock + * @nr: Bit to set + * @addr: Address to count from + * + * This operation is atomic and provides acquire barrier semantics if + * the returned value is 0. + * It can be used to implement bit locks. + */ +static inline bool test_and_set_bit_lock(long nr, volatile unsigned long *addr) +{ + kasan_check_write(addr + BIT_WORD(nr), sizeof(long)); + return arch_test_and_set_bit_lock(nr, addr); +} + +/** + * test_and_clear_bit - Clear a bit and return its old value + * @nr: Bit to clear + * @addr: Address to count from + * + * This is an atomic fully-ordered operation (implied full memory barrier). + */ +static inline bool test_and_clear_bit(long nr, volatile unsigned long *addr) +{ + kasan_check_write(addr + BIT_WORD(nr), sizeof(long)); + return arch_test_and_clear_bit(nr, addr); +} + +/** + * __test_and_clear_bit - Clear a bit and return its old value + * @nr: Bit to clear + * @addr: Address to count from + * + * This operation is non-atomic. If two instances of this operation race, one + * can appear to succeed but actually fail. + */ +static inline bool __test_and_clear_bit(long nr, volatile unsigned long *addr) +{ + kasan_check_write(addr + BIT_WORD(nr), sizeof(long)); + return arch___test_and_clear_bit(nr, addr); +} + +/** + * test_and_change_bit - Change a bit and return its old value + * @nr: Bit to change + * @addr: Address to count from + * + * This is an atomic fully-ordered operation (implied full memory barrier). + */ +static inline bool test_and_change_bit(long nr, volatile unsigned long *addr) +{ + kasan_check_write(addr + BIT_WORD(nr), sizeof(long)); + return arch_test_and_change_bit(nr, addr); +} + +/** + * __test_and_change_bit - Change a bit and return its old value + * @nr: Bit to change + * @addr: Address to count from + * + * This operation is non-atomic. If two instances of this operation race, one + * can appear to succeed but actually fail. + */ +static inline bool __test_and_change_bit(long nr, volatile unsigned long *addr) +{ + kasan_check_write(addr + BIT_WORD(nr), sizeof(long)); + return arch___test_and_change_bit(nr, addr); +} + +/** + * test_bit - Determine whether a bit is set + * @nr: bit number to test + * @addr: Address to start counting from + */ +static inline bool test_bit(long nr, const volatile unsigned long *addr) +{ + kasan_check_read(addr + BIT_WORD(nr), sizeof(long)); + return arch_test_bit(nr, addr); +} + +#if defined(arch_clear_bit_unlock_is_negative_byte) +/** + * clear_bit_unlock_is_negative_byte - Clear a bit in memory and test if bottom + * byte is negative, for unlock. + * @nr: the bit to clear + * @addr: the address to start counting from + * + * This operation is atomic and provides release barrier semantics. + * + * This is a bit of a one-trick-pony for the filemap code, which clears + * PG_locked and tests PG_waiters, + */ +static inline bool +clear_bit_unlock_is_negative_byte(long nr, volatile unsigned long *addr) +{ + kasan_check_write(addr + BIT_WORD(nr), sizeof(long)); + return arch_clear_bit_unlock_is_negative_byte(nr, addr); +} +/* Let everybody know we have it. */ +#define clear_bit_unlock_is_negative_byte clear_bit_unlock_is_negative_byte +#endif + +#endif /* _ASM_GENERIC_BITOPS_INSTRUMENTED_H */ diff --git a/include/asm-generic/bug.h b/include/asm-generic/bug.h index 0e9bd9c83870..384b5c835ced 100644 --- a/include/asm-generic/bug.h +++ b/include/asm-generic/bug.h @@ -10,6 +10,7 @@ #define BUGFLAG_WARNING (1 << 0) #define BUGFLAG_ONCE (1 << 1) #define BUGFLAG_DONE (1 << 2) +#define BUGFLAG_NO_CUT_HERE (1 << 3) /* CUT_HERE already sent */ #define BUGFLAG_TAINT(taint) ((taint) << 8) #define BUG_GET_TAINT(bug) ((bug)->flags >> 8) #endif @@ -61,18 +62,6 @@ struct bug_entry { #define BUG_ON(condition) do { if (unlikely(condition)) BUG(); } while (0) #endif -#ifdef __WARN_FLAGS -#define __WARN_TAINT(taint) __WARN_FLAGS(BUGFLAG_TAINT(taint)) -#define __WARN_ONCE_TAINT(taint) __WARN_FLAGS(BUGFLAG_ONCE|BUGFLAG_TAINT(taint)) - -#define WARN_ON_ONCE(condition) ({ \ - int __ret_warn_on = !!(condition); \ - if (unlikely(__ret_warn_on)) \ - __WARN_ONCE_TAINT(TAINT_WARN); \ - unlikely(__ret_warn_on); \ -}) -#endif - /* * WARN(), WARN_ON(), WARN_ON_ONCE, and so on can be used to report * significant kernel issues that need prompt attention if they should ever @@ -89,25 +78,27 @@ struct bug_entry { * * Use the versions with printk format strings to provide better diagnostics. */ -#ifndef __WARN_TAINT -extern __printf(3, 4) -void warn_slowpath_fmt(const char *file, const int line, - const char *fmt, ...); +#ifndef __WARN_FLAGS extern __printf(4, 5) -void warn_slowpath_fmt_taint(const char *file, const int line, unsigned taint, - const char *fmt, ...); -extern void warn_slowpath_null(const char *file, const int line); -#define WANT_WARN_ON_SLOWPATH -#define __WARN() warn_slowpath_null(__FILE__, __LINE__) -#define __WARN_printf(arg...) warn_slowpath_fmt(__FILE__, __LINE__, arg) -#define __WARN_printf_taint(taint, arg...) \ - warn_slowpath_fmt_taint(__FILE__, __LINE__, taint, arg) +void warn_slowpath_fmt(const char *file, const int line, unsigned taint, + const char *fmt, ...); +#define __WARN() __WARN_printf(TAINT_WARN, NULL) +#define __WARN_printf(taint, arg...) \ + warn_slowpath_fmt(__FILE__, __LINE__, taint, arg) #else extern __printf(1, 2) void __warn_printk(const char *fmt, ...); -#define __WARN() __WARN_TAINT(TAINT_WARN) -#define __WARN_printf(arg...) do { __warn_printk(arg); __WARN(); } while (0) -#define __WARN_printf_taint(taint, arg...) \ - do { __warn_printk(arg); __WARN_TAINT(taint); } while (0) +#define __WARN() __WARN_FLAGS(BUGFLAG_TAINT(TAINT_WARN)) +#define __WARN_printf(taint, arg...) do { \ + __warn_printk(arg); \ + __WARN_FLAGS(BUGFLAG_NO_CUT_HERE | BUGFLAG_TAINT(taint));\ + } while (0) +#define WARN_ON_ONCE(condition) ({ \ + int __ret_warn_on = !!(condition); \ + if (unlikely(__ret_warn_on)) \ + __WARN_FLAGS(BUGFLAG_ONCE | \ + BUGFLAG_TAINT(TAINT_WARN)); \ + unlikely(__ret_warn_on); \ +}) #endif /* used internally by panic.c */ @@ -130,7 +121,7 @@ void __warn(const char *file, int line, void *caller, unsigned taint, #define WARN(condition, format...) ({ \ int __ret_warn_on = !!(condition); \ if (unlikely(__ret_warn_on)) \ - __WARN_printf(format); \ + __WARN_printf(TAINT_WARN, format); \ unlikely(__ret_warn_on); \ }) #endif @@ -138,7 +129,7 @@ void __warn(const char *file, int line, void *caller, unsigned taint, #define WARN_TAINT(condition, taint, format...) ({ \ int __ret_warn_on = !!(condition); \ if (unlikely(__ret_warn_on)) \ - __WARN_printf_taint(taint, format); \ + __WARN_printf(taint, format); \ unlikely(__ret_warn_on); \ }) @@ -183,7 +174,7 @@ void __warn(const char *file, int line, void *caller, unsigned taint, #endif #ifndef HAVE_ARCH_BUG_ON -#define BUG_ON(condition) do { if (condition) BUG(); } while (0) +#define BUG_ON(condition) do { if (unlikely(condition)) BUG(); } while (0) #endif #ifndef HAVE_ARCH_WARN_ON diff --git a/include/asm-generic/cacheflush.h b/include/asm-generic/cacheflush.h index 0dd47a6db2cf..a950a22c4890 100644 --- a/include/asm-generic/cacheflush.h +++ b/include/asm-generic/cacheflush.h @@ -5,24 +5,70 @@ /* Keep includes the same across arches. */ #include <linux/mm.h> +#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0 + /* * The cache doesn't need to be flushed when TLB entries change when * the cache is mapped to physical memory, not virtual memory */ -#define flush_cache_all() do { } while (0) -#define flush_cache_mm(mm) do { } while (0) -#define flush_cache_dup_mm(mm) do { } while (0) -#define flush_cache_range(vma, start, end) do { } while (0) -#define flush_cache_page(vma, vmaddr, pfn) do { } while (0) -#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0 -#define flush_dcache_page(page) do { } while (0) -#define flush_dcache_mmap_lock(mapping) do { } while (0) -#define flush_dcache_mmap_unlock(mapping) do { } while (0) -#define flush_icache_range(start, end) do { } while (0) -#define flush_icache_page(vma,pg) do { } while (0) -#define flush_icache_user_range(vma,pg,adr,len) do { } while (0) -#define flush_cache_vmap(start, end) do { } while (0) -#define flush_cache_vunmap(start, end) do { } while (0) +static inline void flush_cache_all(void) +{ +} + +static inline void flush_cache_mm(struct mm_struct *mm) +{ +} + +static inline void flush_cache_dup_mm(struct mm_struct *mm) +{ +} + +static inline void flush_cache_range(struct vm_area_struct *vma, + unsigned long start, + unsigned long end) +{ +} + +static inline void flush_cache_page(struct vm_area_struct *vma, + unsigned long vmaddr, + unsigned long pfn) +{ +} + +static inline void flush_dcache_page(struct page *page) +{ +} + +static inline void flush_dcache_mmap_lock(struct address_space *mapping) +{ +} + +static inline void flush_dcache_mmap_unlock(struct address_space *mapping) +{ +} + +static inline void flush_icache_range(unsigned long start, unsigned long end) +{ +} + +static inline void flush_icache_page(struct vm_area_struct *vma, + struct page *page) +{ +} + +static inline void flush_icache_user_range(struct vm_area_struct *vma, + struct page *page, + unsigned long addr, int len) +{ +} + +static inline void flush_cache_vmap(unsigned long start, unsigned long end) +{ +} + +static inline void flush_cache_vunmap(unsigned long start, unsigned long end) +{ +} #define copy_to_user_page(vma, page, vaddr, dst, src, len) \ do { \ diff --git a/include/asm-generic/div64.h b/include/asm-generic/div64.h index dc9726fdac8f..a3b98c86f077 100644 --- a/include/asm-generic/div64.h +++ b/include/asm-generic/div64.h @@ -28,12 +28,12 @@ /** * do_div - returns 2 values: calculate remainder and update new dividend - * @n: pointer to uint64_t dividend (will be updated) + * @n: uint64_t dividend (will be updated) * @base: uint32_t divisor * * Summary: - * ``uint32_t remainder = *n % base;`` - * ``*n = *n / base;`` + * ``uint32_t remainder = n % base;`` + * ``n = n / base;`` * * Return: (uint32_t)remainder * @@ -178,7 +178,8 @@ static inline uint64_t __arch_xprod_64(const uint64_t m, uint64_t n, bool bias) uint32_t m_hi = m >> 32; uint32_t n_lo = n; uint32_t n_hi = n >> 32; - uint64_t res, tmp; + uint64_t res; + uint32_t res_lo, res_hi, tmp; if (!bias) { res = ((uint64_t)m_lo * n_lo) >> 32; @@ -187,8 +188,9 @@ static inline uint64_t __arch_xprod_64(const uint64_t m, uint64_t n, bool bias) res = (m + (uint64_t)m_lo * n_lo) >> 32; } else { res = m + (uint64_t)m_lo * n_lo; - tmp = (res < m) ? (1ULL << 32) : 0; - res = (res >> 32) + tmp; + res_lo = res >> 32; + res_hi = (res_lo < m_hi); + res = res_lo | ((uint64_t)res_hi << 32); } if (!(m & ((1ULL << 63) | (1ULL << 31)))) { @@ -197,10 +199,12 @@ static inline uint64_t __arch_xprod_64(const uint64_t m, uint64_t n, bool bias) res += (uint64_t)m_hi * n_lo; res >>= 32; } else { - tmp = res += (uint64_t)m_lo * n_hi; + res += (uint64_t)m_lo * n_hi; + tmp = res >> 32; res += (uint64_t)m_hi * n_lo; - tmp = (res < tmp) ? (1ULL << 32) : 0; - res = (res >> 32) + tmp; + res_lo = res >> 32; + res_hi = (res_lo < tmp); + res = res_lo | ((uint64_t)res_hi << 32); } res += (uint64_t)m_hi * n_hi; diff --git a/include/asm-generic/error-injection.h b/include/asm-generic/error-injection.h index 95a159a4137f..80ca61058dd2 100644 --- a/include/asm-generic/error-injection.h +++ b/include/asm-generic/error-injection.h @@ -16,6 +16,8 @@ struct error_injection_entry { int etype; }; +struct pt_regs; + #ifdef CONFIG_FUNCTION_ERROR_INJECTION /* * Whitelist ganerating macro. Specify functions which can be @@ -28,8 +30,12 @@ static struct error_injection_entry __used \ .addr = (unsigned long)fname, \ .etype = EI_ETYPE_##_etype, \ }; + +void override_function_with_return(struct pt_regs *regs); #else #define ALLOW_ERROR_INJECTION(fname, _etype) + +static inline void override_function_with_return(struct pt_regs *regs) { } #endif #endif diff --git a/include/asm-generic/export.h b/include/asm-generic/export.h index 294d6ae785d4..fa577978fbbd 100644 --- a/include/asm-generic/export.h +++ b/include/asm-generic/export.h @@ -4,26 +4,24 @@ #ifndef KSYM_FUNC #define KSYM_FUNC(x) x #endif -#ifdef CONFIG_64BIT -#ifndef KSYM_ALIGN +#ifdef CONFIG_HAVE_ARCH_PREL32_RELOCATIONS +#define KSYM_ALIGN 4 +#elif defined(CONFIG_64BIT) #define KSYM_ALIGN 8 -#endif #else -#ifndef KSYM_ALIGN #define KSYM_ALIGN 4 #endif -#endif #ifndef KCRC_ALIGN #define KCRC_ALIGN 4 #endif .macro __put, val, name #ifdef CONFIG_HAVE_ARCH_PREL32_RELOCATIONS - .long \val - ., \name - . + .long \val - ., \name - ., 0 #elif defined(CONFIG_64BIT) - .quad \val, \name + .quad \val, \name, 0 #else - .long \val, \name + .long \val, \name, 0 #endif .endm @@ -57,7 +55,6 @@ __kcrctab_\name: #endif #endif .endm -#undef __put #if defined(CONFIG_TRIM_UNUSED_KSYMS) diff --git a/include/asm-generic/flat.h b/include/asm-generic/flat.h new file mode 100644 index 000000000000..1928a3596938 --- /dev/null +++ b/include/asm-generic/flat.h @@ -0,0 +1,26 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_GENERIC_FLAT_H +#define _ASM_GENERIC_FLAT_H + +#include <linux/uaccess.h> + +static inline int flat_get_addr_from_rp(u32 __user *rp, u32 relval, u32 flags, + u32 *addr) +{ +#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS + return copy_from_user(addr, rp, 4) ? -EFAULT : 0; +#else + return get_user(*addr, rp); +#endif +} + +static inline int flat_put_addr_at_rp(u32 __user *rp, u32 addr, u32 rel) +{ +#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS + return copy_to_user(rp, &addr, 4) ? -EFAULT : 0; +#else + return put_user(addr, rp); +#endif +} + +#endif /* _ASM_GENERIC_FLAT_H */ diff --git a/include/asm-generic/futex.h b/include/asm-generic/futex.h index 8666fe7f35d7..02970b11f71f 100644 --- a/include/asm-generic/futex.h +++ b/include/asm-generic/futex.h @@ -118,26 +118,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, static inline int arch_futex_atomic_op_inuser(int op, u32 oparg, int *oval, u32 __user *uaddr) { - int oldval = 0, ret; - - pagefault_disable(); - - switch (op) { - case FUTEX_OP_SET: - case FUTEX_OP_ADD: - case FUTEX_OP_OR: - case FUTEX_OP_ANDN: - case FUTEX_OP_XOR: - default: - ret = -ENOSYS; - } - - pagefault_enable(); - - if (!ret) - *oval = oldval; - - return ret; + return -ENOSYS; } static inline int diff --git a/include/asm-generic/getorder.h b/include/asm-generic/getorder.h index c64bea7a52be..e9f20b813a69 100644 --- a/include/asm-generic/getorder.h +++ b/include/asm-generic/getorder.h @@ -7,24 +7,6 @@ #include <linux/compiler.h> #include <linux/log2.h> -/* - * Runtime evaluation of get_order() - */ -static inline __attribute_const__ -int __get_order(unsigned long size) -{ - int order; - - size--; - size >>= PAGE_SHIFT; -#if BITS_PER_LONG == 32 - order = fls(size); -#else - order = fls64(size); -#endif - return order; -} - /** * get_order - Determine the allocation order of a memory size * @size: The size for which to get the order @@ -43,19 +25,27 @@ int __get_order(unsigned long size) * to hold an object of the specified size. * * The result is undefined if the size is 0. - * - * This function may be used to initialise variables with compile time - * evaluations of constants. */ -#define get_order(n) \ -( \ - __builtin_constant_p(n) ? ( \ - ((n) == 0UL) ? BITS_PER_LONG - PAGE_SHIFT : \ - (((n) < (1UL << PAGE_SHIFT)) ? 0 : \ - ilog2((n) - 1) - PAGE_SHIFT + 1) \ - ) : \ - __get_order(n) \ -) +static inline __attribute_const__ int get_order(unsigned long size) +{ + if (__builtin_constant_p(size)) { + if (!size) + return BITS_PER_LONG - PAGE_SHIFT; + + if (size < (1UL << PAGE_SHIFT)) + return 0; + + return ilog2((size) - 1) - PAGE_SHIFT + 1; + } + + size--; + size >>= PAGE_SHIFT; +#if BITS_PER_LONG == 32 + return fls(size); +#else + return fls64(size); +#endif +} #endif /* __ASSEMBLY__ */ diff --git a/include/asm-generic/io.h b/include/asm-generic/io.h index b83e2802c969..d02806513670 100644 --- a/include/asm-generic/io.h +++ b/include/asm-generic/io.h @@ -963,15 +963,6 @@ static inline void __iomem *ioremap(phys_addr_t offset, size_t size) } #endif -#ifndef __ioremap -#define __ioremap __ioremap -static inline void __iomem *__ioremap(phys_addr_t offset, size_t size, - unsigned long flags) -{ - return ioremap(offset, size); -} -#endif - #ifndef iounmap #define iounmap iounmap diff --git a/include/asm-generic/mshyperv.h b/include/asm-generic/mshyperv.h new file mode 100644 index 000000000000..18d8e2d8210f --- /dev/null +++ b/include/asm-generic/mshyperv.h @@ -0,0 +1,181 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +/* + * Linux-specific definitions for managing interactions with Microsoft's + * Hyper-V hypervisor. The definitions in this file are architecture + * independent. See arch/<arch>/include/asm/mshyperv.h for definitions + * that are specific to architecture <arch>. + * + * Definitions that are specified in the Hyper-V Top Level Functional + * Spec (TLFS) should not go in this file, but should instead go in + * hyperv-tlfs.h. + * + * Copyright (C) 2019, Microsoft, Inc. + * + * Author : Michael Kelley <mikelley@microsoft.com> + */ + +#ifndef _ASM_GENERIC_MSHYPERV_H +#define _ASM_GENERIC_MSHYPERV_H + +#include <linux/types.h> +#include <linux/atomic.h> +#include <linux/bitops.h> +#include <linux/cpumask.h> +#include <asm/ptrace.h> +#include <asm/hyperv-tlfs.h> + +struct ms_hyperv_info { + u32 features; + u32 misc_features; + u32 hints; + u32 nested_features; + u32 max_vp_index; + u32 max_lp_index; +}; +extern struct ms_hyperv_info ms_hyperv; + +extern u64 hv_do_hypercall(u64 control, void *inputaddr, void *outputaddr); +extern u64 hv_do_fast_hypercall8(u16 control, u64 input8); + + +/* Generate the guest OS identifier as described in the Hyper-V TLFS */ +static inline __u64 generate_guest_id(__u64 d_info1, __u64 kernel_version, + __u64 d_info2) +{ + __u64 guest_id = 0; + + guest_id = (((__u64)HV_LINUX_VENDOR_ID) << 48); + guest_id |= (d_info1 << 48); + guest_id |= (kernel_version << 16); + guest_id |= d_info2; + + return guest_id; +} + + +/* Free the message slot and signal end-of-message if required */ +static inline void vmbus_signal_eom(struct hv_message *msg, u32 old_msg_type) +{ + /* + * On crash we're reading some other CPU's message page and we need + * to be careful: this other CPU may already had cleared the header + * and the host may already had delivered some other message there. + * In case we blindly write msg->header.message_type we're going + * to lose it. We can still lose a message of the same type but + * we count on the fact that there can only be one + * CHANNELMSG_UNLOAD_RESPONSE and we don't care about other messages + * on crash. + */ + if (cmpxchg(&msg->header.message_type, old_msg_type, + HVMSG_NONE) != old_msg_type) + return; + + /* + * The cmxchg() above does an implicit memory barrier to + * ensure the write to MessageType (ie set to + * HVMSG_NONE) happens before we read the + * MessagePending and EOMing. Otherwise, the EOMing + * will not deliver any more messages since there is + * no empty slot + */ + if (msg->header.message_flags.msg_pending) { + /* + * This will cause message queue rescan to + * possibly deliver another msg from the + * hypervisor + */ + hv_signal_eom(); + } +} + +void hv_setup_vmbus_irq(void (*handler)(void)); +void hv_remove_vmbus_irq(void); +void hv_enable_vmbus_irq(void); +void hv_disable_vmbus_irq(void); + +void hv_setup_kexec_handler(void (*handler)(void)); +void hv_remove_kexec_handler(void); +void hv_setup_crash_handler(void (*handler)(struct pt_regs *regs)); +void hv_remove_crash_handler(void); + +#if IS_ENABLED(CONFIG_HYPERV) +/* + * Hypervisor's notion of virtual processor ID is different from + * Linux' notion of CPU ID. This information can only be retrieved + * in the context of the calling CPU. Setup a map for easy access + * to this information. + */ +extern u32 *hv_vp_index; +extern u32 hv_max_vp_index; + +/* Sentinel value for an uninitialized entry in hv_vp_index array */ +#define VP_INVAL U32_MAX + +/** + * hv_cpu_number_to_vp_number() - Map CPU to VP. + * @cpu_number: CPU number in Linux terms + * + * This function returns the mapping between the Linux processor + * number and the hypervisor's virtual processor number, useful + * in making hypercalls and such that talk about specific + * processors. + * + * Return: Virtual processor number in Hyper-V terms + */ +static inline int hv_cpu_number_to_vp_number(int cpu_number) +{ + return hv_vp_index[cpu_number]; +} + +static inline int cpumask_to_vpset(struct hv_vpset *vpset, + const struct cpumask *cpus) +{ + int cpu, vcpu, vcpu_bank, vcpu_offset, nr_bank = 1; + + /* valid_bank_mask can represent up to 64 banks */ + if (hv_max_vp_index / 64 >= 64) + return 0; + + /* + * Clear all banks up to the maximum possible bank as hv_tlb_flush_ex + * structs are not cleared between calls, we risk flushing unneeded + * vCPUs otherwise. + */ + for (vcpu_bank = 0; vcpu_bank <= hv_max_vp_index / 64; vcpu_bank++) + vpset->bank_contents[vcpu_bank] = 0; + + /* + * Some banks may end up being empty but this is acceptable. + */ + for_each_cpu(cpu, cpus) { + vcpu = hv_cpu_number_to_vp_number(cpu); + if (vcpu == VP_INVAL) + return -1; + vcpu_bank = vcpu / 64; + vcpu_offset = vcpu % 64; + __set_bit(vcpu_offset, (unsigned long *) + &vpset->bank_contents[vcpu_bank]); + if (vcpu_bank >= nr_bank) + nr_bank = vcpu_bank + 1; + } + vpset->valid_bank_mask = GENMASK_ULL(nr_bank - 1, 0); + return nr_bank; +} + +void hyperv_report_panic(struct pt_regs *regs, long err); +void hyperv_report_panic_msg(phys_addr_t pa, size_t size); +bool hv_is_hyperv_initialized(void); +void hyperv_cleanup(void); +void hv_setup_sched_clock(void *sched_clock); +#else /* CONFIG_HYPERV */ +static inline bool hv_is_hyperv_initialized(void) { return false; } +static inline void hyperv_cleanup(void) {} +#endif /* CONFIG_HYPERV */ + +#if IS_ENABLED(CONFIG_HYPERV) +extern int hv_setup_stimer0_irq(int *irq, int *vector, void (*handler)(void)); +extern void hv_remove_stimer0_irq(int irq); +#endif + +#endif diff --git a/include/asm-generic/pgalloc.h b/include/asm-generic/pgalloc.h index 948714c1535a..73f7421413cb 100644 --- a/include/asm-generic/pgalloc.h +++ b/include/asm-generic/pgalloc.h @@ -1,13 +1,107 @@ /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __ASM_GENERIC_PGALLOC_H #define __ASM_GENERIC_PGALLOC_H -/* - * an empty file is enough for a nommu architecture - */ + #ifdef CONFIG_MMU -#error need to implement an architecture specific asm/pgalloc.h + +#define GFP_PGTABLE_KERNEL (GFP_KERNEL | __GFP_ZERO) +#define GFP_PGTABLE_USER (GFP_PGTABLE_KERNEL | __GFP_ACCOUNT) + +/** + * __pte_alloc_one_kernel - allocate a page for PTE-level kernel page table + * @mm: the mm_struct of the current context + * + * This function is intended for architectures that need + * anything beyond simple page allocation. + * + * Return: pointer to the allocated memory or %NULL on error + */ +static inline pte_t *__pte_alloc_one_kernel(struct mm_struct *mm) +{ + return (pte_t *)__get_free_page(GFP_PGTABLE_KERNEL); +} + +#ifndef __HAVE_ARCH_PTE_ALLOC_ONE_KERNEL +/** + * pte_alloc_one_kernel - allocate a page for PTE-level kernel page table + * @mm: the mm_struct of the current context + * + * Return: pointer to the allocated memory or %NULL on error + */ +static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm) +{ + return __pte_alloc_one_kernel(mm); +} +#endif + +/** + * pte_free_kernel - free PTE-level kernel page table page + * @mm: the mm_struct of the current context + * @pte: pointer to the memory containing the page table + */ +static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte) +{ + free_page((unsigned long)pte); +} + +/** + * __pte_alloc_one - allocate a page for PTE-level user page table + * @mm: the mm_struct of the current context + * @gfp: GFP flags to use for the allocation + * + * Allocates a page and runs the pgtable_pte_page_ctor(). + * + * This function is intended for architectures that need + * anything beyond simple page allocation or must have custom GFP flags. + * + * Return: `struct page` initialized as page table or %NULL on error + */ +static inline pgtable_t __pte_alloc_one(struct mm_struct *mm, gfp_t gfp) +{ + struct page *pte; + + pte = alloc_page(gfp); + if (!pte) + return NULL; + if (!pgtable_pte_page_ctor(pte)) { + __free_page(pte); + return NULL; + } + + return pte; +} + +#ifndef __HAVE_ARCH_PTE_ALLOC_ONE +/** + * pte_alloc_one - allocate a page for PTE-level user page table + * @mm: the mm_struct of the current context + * + * Allocates a page and runs the pgtable_pte_page_ctor(). + * + * Return: `struct page` initialized as page table or %NULL on error + */ +static inline pgtable_t pte_alloc_one(struct mm_struct *mm) +{ + return __pte_alloc_one(mm, GFP_PGTABLE_USER); +} #endif -#define check_pgt_cache() do { } while (0) +/* + * Should really implement gc for free page table pages. This could be + * done with a reference count in struct page. + */ + +/** + * pte_free - free PTE-level user page table page + * @mm: the mm_struct of the current context + * @pte_page: the `struct page` representing the page table + */ +static inline void pte_free(struct mm_struct *mm, struct page *pte_page) +{ + pgtable_pte_page_dtor(pte_page); + __free_page(pte_page); +} + +#endif /* CONFIG_MMU */ #endif /* __ASM_GENERIC_PGALLOC_H */ diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h index 75d9d68a6de7..818691846c90 100644 --- a/include/asm-generic/pgtable.h +++ b/include/asm-generic/pgtable.h @@ -1002,9 +1002,8 @@ static inline int pmd_none_or_trans_huge_or_clear_bad(pmd_t *pmd) * need this). If THP is not enabled, the pmd can't go away under the * code even if MADV_DONTNEED runs, but if THP is enabled we need to * run a pmd_trans_unstable before walking the ptes after - * split_huge_page_pmd returns (because it may have run when the pmd - * become null, but then a page fault can map in a THP and not a - * regular page). + * split_huge_pmd returns (because it may have run when the pmd become + * null, but then a page fault can map in a THP and not a regular page). */ static inline int pmd_trans_unstable(pmd_t *pmd) { @@ -1126,7 +1125,7 @@ int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn, static inline void init_espfix_bsp(void) { } #endif -extern void __init pgd_cache_init(void); +extern void __init pgtable_cache_init(void); #ifndef __HAVE_ARCH_PFN_MODIFY_ALLOWED static inline bool pfn_modify_allowed(unsigned long pfn, pgprot_t prot) diff --git a/include/asm-generic/preempt.h b/include/asm-generic/preempt.h index c3046c920063..d683f5e6d791 100644 --- a/include/asm-generic/preempt.h +++ b/include/asm-generic/preempt.h @@ -78,11 +78,11 @@ static __always_inline bool should_resched(int preempt_offset) tif_need_resched()); } -#ifdef CONFIG_PREEMPT +#ifdef CONFIG_PREEMPTION extern asmlinkage void preempt_schedule(void); #define __preempt_schedule() preempt_schedule() extern asmlinkage void preempt_schedule_notrace(void); #define __preempt_schedule_notrace() preempt_schedule_notrace() -#endif /* CONFIG_PREEMPT */ +#endif /* CONFIG_PREEMPTION */ #endif /* __ASM_PREEMPT_H */ diff --git a/include/asm-generic/ptrace.h b/include/asm-generic/ptrace.h deleted file mode 100644 index ab16b6cb1028..000000000000 --- a/include/asm-generic/ptrace.h +++ /dev/null @@ -1,73 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ -/* - * Common low level (register) ptrace helpers - * - * Copyright 2004-2011 Analog Devices Inc. - */ - -#ifndef __ASM_GENERIC_PTRACE_H__ -#define __ASM_GENERIC_PTRACE_H__ - -#ifndef __ASSEMBLY__ - -/* Helpers for working with the instruction pointer */ -#ifndef GET_IP -#define GET_IP(regs) ((regs)->pc) -#endif -#ifndef SET_IP -#define SET_IP(regs, val) (GET_IP(regs) = (val)) -#endif - -static inline unsigned long instruction_pointer(struct pt_regs *regs) -{ - return GET_IP(regs); -} -static inline void instruction_pointer_set(struct pt_regs *regs, - unsigned long val) -{ - SET_IP(regs, val); -} - -#ifndef profile_pc -#define profile_pc(regs) instruction_pointer(regs) -#endif - -/* Helpers for working with the user stack pointer */ -#ifndef GET_USP -#define GET_USP(regs) ((regs)->usp) -#endif -#ifndef SET_USP -#define SET_USP(regs, val) (GET_USP(regs) = (val)) -#endif - -static inline unsigned long user_stack_pointer(struct pt_regs *regs) -{ - return GET_USP(regs); -} -static inline void user_stack_pointer_set(struct pt_regs *regs, - unsigned long val) -{ - SET_USP(regs, val); -} - -/* Helpers for working with the frame pointer */ -#ifndef GET_FP -#define GET_FP(regs) ((regs)->fp) -#endif -#ifndef SET_FP -#define SET_FP(regs, val) (GET_FP(regs) = (val)) -#endif - -static inline unsigned long frame_pointer(struct pt_regs *regs) -{ - return GET_FP(regs); -} -static inline void frame_pointer_set(struct pt_regs *regs, - unsigned long val) -{ - SET_FP(regs, val); -} - -#endif /* __ASSEMBLY__ */ - -#endif diff --git a/include/asm-generic/vdso/vsyscall.h b/include/asm-generic/vdso/vsyscall.h new file mode 100644 index 000000000000..e94b19782c92 --- /dev/null +++ b/include/asm-generic/vdso/vsyscall.h @@ -0,0 +1,50 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __ASM_GENERIC_VSYSCALL_H +#define __ASM_GENERIC_VSYSCALL_H + +#ifndef __ASSEMBLY__ + +#ifndef __arch_get_k_vdso_data +static __always_inline struct vdso_data *__arch_get_k_vdso_data(void) +{ + return NULL; +} +#endif /* __arch_get_k_vdso_data */ + +#ifndef __arch_update_vdso_data +static __always_inline int __arch_update_vdso_data(void) +{ + return 0; +} +#endif /* __arch_update_vdso_data */ + +#ifndef __arch_get_clock_mode +static __always_inline int __arch_get_clock_mode(struct timekeeper *tk) +{ + return 0; +} +#endif /* __arch_get_clock_mode */ + +#ifndef __arch_use_vsyscall +static __always_inline int __arch_use_vsyscall(struct vdso_data *vdata) +{ + return 1; +} +#endif /* __arch_use_vsyscall */ + +#ifndef __arch_update_vsyscall +static __always_inline void __arch_update_vsyscall(struct vdso_data *vdata, + struct timekeeper *tk) +{ +} +#endif /* __arch_update_vsyscall */ + +#ifndef __arch_sync_vdso_data +static __always_inline void __arch_sync_vdso_data(struct vdso_data *vdata) +{ +} +#endif /* __arch_sync_vdso_data */ + +#endif /* !__ASSEMBLY__ */ + +#endif /* __ASM_GENERIC_VSYSCALL_H */ diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h index 088987e9a3ea..dae64600ccbf 100644 --- a/include/asm-generic/vmlinux.lds.h +++ b/include/asm-generic/vmlinux.lds.h @@ -110,10 +110,17 @@ #endif #ifdef CONFIG_FTRACE_MCOUNT_RECORD +#ifdef CC_USING_PATCHABLE_FUNCTION_ENTRY +#define MCOUNT_REC() . = ALIGN(8); \ + __start_mcount_loc = .; \ + KEEP(*(__patchable_function_entries)) \ + __stop_mcount_loc = .; +#else #define MCOUNT_REC() . = ALIGN(8); \ __start_mcount_loc = .; \ KEEP(*(__mcount_loc)) \ __stop_mcount_loc = .; +#endif #else #define MCOUNT_REC() #endif @@ -208,8 +215,13 @@ __start_lsm_info = .; \ KEEP(*(.lsm_info.init)) \ __end_lsm_info = .; +#define EARLY_LSM_TABLE() . = ALIGN(8); \ + __start_early_lsm_info = .; \ + KEEP(*(.early_lsm_info.init)) \ + __end_early_lsm_info = .; #else #define LSM_TABLE() +#define EARLY_LSM_TABLE() #endif #define ___OF_TABLE(cfg, name) _OF_TABLE_##cfg(name) @@ -239,6 +251,16 @@ #define ACPI_PROBE_TABLE(name) #endif +#ifdef CONFIG_THERMAL +#define THERMAL_TABLE(name) \ + . = ALIGN(8); \ + __##name##_thermal_table = .; \ + KEEP(*(__##name##_thermal_table)) \ + __##name##_thermal_table_end = .; +#else +#define THERMAL_TABLE(name) +#endif + #define KERNEL_DTB() \ STRUCT_ALIGN(); \ __dtb_start = .; \ @@ -608,8 +630,10 @@ IRQCHIP_OF_MATCH_TABLE() \ ACPI_PROBE_TABLE(irqchip) \ ACPI_PROBE_TABLE(timer) \ + THERMAL_TABLE(governor) \ EARLYCON_TABLE() \ - LSM_TABLE() + LSM_TABLE() \ + EARLY_LSM_TABLE() #define INIT_TEXT \ *(.init.text .init.text.*) \ diff --git a/include/clocksource/hyperv_timer.h b/include/clocksource/hyperv_timer.h new file mode 100644 index 000000000000..422f5e5237be --- /dev/null +++ b/include/clocksource/hyperv_timer.h @@ -0,0 +1,105 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +/* + * Definitions for the clocksource provided by the Hyper-V + * hypervisor to guest VMs, as described in the Hyper-V Top + * Level Functional Spec (TLFS). + * + * Copyright (C) 2019, Microsoft, Inc. + * + * Author: Michael Kelley <mikelley@microsoft.com> + */ + +#ifndef __CLKSOURCE_HYPERV_TIMER_H +#define __CLKSOURCE_HYPERV_TIMER_H + +#include <linux/clocksource.h> +#include <linux/math64.h> +#include <asm/mshyperv.h> + +#define HV_MAX_MAX_DELTA_TICKS 0xffffffff +#define HV_MIN_DELTA_TICKS 1 + +/* Routines called by the VMbus driver */ +extern int hv_stimer_alloc(int sint); +extern void hv_stimer_free(void); +extern void hv_stimer_init(unsigned int cpu); +extern void hv_stimer_cleanup(unsigned int cpu); +extern void hv_stimer_global_cleanup(void); +extern void hv_stimer0_isr(void); + +#ifdef CONFIG_HYPERV_TIMER +extern struct clocksource *hyperv_cs; +extern void hv_init_clocksource(void); + +extern struct ms_hyperv_tsc_page *hv_get_tsc_page(void); + +static inline notrace u64 +hv_read_tsc_page_tsc(const struct ms_hyperv_tsc_page *tsc_pg, u64 *cur_tsc) +{ + u64 scale, offset; + u32 sequence; + + /* + * The protocol for reading Hyper-V TSC page is specified in Hypervisor + * Top-Level Functional Specification ver. 3.0 and above. To get the + * reference time we must do the following: + * - READ ReferenceTscSequence + * A special '0' value indicates the time source is unreliable and we + * need to use something else. The currently published specification + * versions (up to 4.0b) contain a mistake and wrongly claim '-1' + * instead of '0' as the special value, see commit c35b82ef0294. + * - ReferenceTime = + * ((RDTSC() * ReferenceTscScale) >> 64) + ReferenceTscOffset + * - READ ReferenceTscSequence again. In case its value has changed + * since our first reading we need to discard ReferenceTime and repeat + * the whole sequence as the hypervisor was updating the page in + * between. + */ + do { + sequence = READ_ONCE(tsc_pg->tsc_sequence); + if (!sequence) + return U64_MAX; + /* + * Make sure we read sequence before we read other values from + * TSC page. + */ + smp_rmb(); + + scale = READ_ONCE(tsc_pg->tsc_scale); + offset = READ_ONCE(tsc_pg->tsc_offset); + *cur_tsc = hv_get_raw_timer(); + + /* + * Make sure we read sequence after we read all other values + * from TSC page. + */ + smp_rmb(); + + } while (READ_ONCE(tsc_pg->tsc_sequence) != sequence); + + return mul_u64_u64_shr(*cur_tsc, scale, 64) + offset; +} + +static inline notrace u64 +hv_read_tsc_page(const struct ms_hyperv_tsc_page *tsc_pg) +{ + u64 cur_tsc; + + return hv_read_tsc_page_tsc(tsc_pg, &cur_tsc); +} + +#else /* CONFIG_HYPERV_TIMER */ +static inline struct ms_hyperv_tsc_page *hv_get_tsc_page(void) +{ + return NULL; +} + +static inline u64 hv_read_tsc_page_tsc(const struct ms_hyperv_tsc_page *tsc_pg, + u64 *cur_tsc) +{ + return U64_MAX; +} +#endif /* CONFIG_HYPERV_TIMER */ + +#endif diff --git a/include/clocksource/timer-davinci.h b/include/clocksource/timer-davinci.h new file mode 100644 index 000000000000..1dcc1333fbc8 --- /dev/null +++ b/include/clocksource/timer-davinci.h @@ -0,0 +1,44 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * TI DaVinci clocksource driver + * + * Copyright (C) 2019 Texas Instruments + * Author: Bartosz Golaszewski <bgolaszewski@baylibre.com> + */ + +#ifndef __TIMER_DAVINCI_H__ +#define __TIMER_DAVINCI_H__ + +#include <linux/clk.h> +#include <linux/ioport.h> + +enum { + DAVINCI_TIMER_CLOCKEVENT_IRQ, + DAVINCI_TIMER_CLOCKSOURCE_IRQ, + DAVINCI_TIMER_NUM_IRQS, +}; + +/** + * struct davinci_timer_cfg - davinci clocksource driver configuration struct + * @reg: register range resource + * @irq: clockevent and clocksource interrupt resources + * @cmp_off: if set - it specifies the compare register used for clockevent + * + * Note: if the compare register is specified, the driver will use the bottom + * clock half for both clocksource and clockevent and the compare register + * to generate event irqs. The user must supply the correct compare register + * interrupt number. + * + * This is only used by da830 the DSP of which uses the top half. The timer + * driver still configures the top half to run in free-run mode. + */ +struct davinci_timer_cfg { + struct resource reg; + struct resource irq[DAVINCI_TIMER_NUM_IRQS]; + unsigned int cmp_off; +}; + +int __init davinci_timer_register(struct clk *clk, + const struct davinci_timer_cfg *data); + +#endif /* __TIMER_DAVINCI_H__ */ diff --git a/include/crypto/aead.h b/include/crypto/aead.h index 61bb10490492..3c245b1859e7 100644 --- a/include/crypto/aead.h +++ b/include/crypto/aead.h @@ -317,21 +317,7 @@ static inline struct crypto_aead *crypto_aead_reqtfm(struct aead_request *req) * * Return: 0 if the cipher operation was successful; < 0 if an error occurred */ -static inline int crypto_aead_encrypt(struct aead_request *req) -{ - struct crypto_aead *aead = crypto_aead_reqtfm(req); - struct crypto_alg *alg = aead->base.__crt_alg; - unsigned int cryptlen = req->cryptlen; - int ret; - - crypto_stats_get(alg); - if (crypto_aead_get_flags(aead) & CRYPTO_TFM_NEED_KEY) - ret = -ENOKEY; - else - ret = crypto_aead_alg(aead)->encrypt(req); - crypto_stats_aead_encrypt(cryptlen, alg, ret); - return ret; -} +int crypto_aead_encrypt(struct aead_request *req); /** * crypto_aead_decrypt() - decrypt ciphertext @@ -355,23 +341,7 @@ static inline int crypto_aead_encrypt(struct aead_request *req) * integrity of the ciphertext or the associated data was violated); * < 0 if an error occurred. */ -static inline int crypto_aead_decrypt(struct aead_request *req) -{ - struct crypto_aead *aead = crypto_aead_reqtfm(req); - struct crypto_alg *alg = aead->base.__crt_alg; - unsigned int cryptlen = req->cryptlen; - int ret; - - crypto_stats_get(alg); - if (crypto_aead_get_flags(aead) & CRYPTO_TFM_NEED_KEY) - ret = -ENOKEY; - else if (req->cryptlen < crypto_aead_authsize(aead)) - ret = -EINVAL; - else - ret = crypto_aead_alg(aead)->decrypt(req); - crypto_stats_aead_decrypt(cryptlen, alg, ret); - return ret; -} +int crypto_aead_decrypt(struct aead_request *req); /** * DOC: Asynchronous AEAD Request Handle diff --git a/include/crypto/aes.h b/include/crypto/aes.h index 0fdb542c70cd..2090729701ab 100644 --- a/include/crypto/aes.h +++ b/include/crypto/aes.h @@ -29,12 +29,62 @@ struct crypto_aes_ctx { }; extern const u32 crypto_ft_tab[4][256] ____cacheline_aligned; -extern const u32 crypto_fl_tab[4][256] ____cacheline_aligned; extern const u32 crypto_it_tab[4][256] ____cacheline_aligned; -extern const u32 crypto_il_tab[4][256] ____cacheline_aligned; + +/* + * validate key length for AES algorithms + */ +static inline int aes_check_keylen(unsigned int keylen) +{ + switch (keylen) { + case AES_KEYSIZE_128: + case AES_KEYSIZE_192: + case AES_KEYSIZE_256: + break; + default: + return -EINVAL; + } + + return 0; +} int crypto_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key, unsigned int key_len); -int crypto_aes_expand_key(struct crypto_aes_ctx *ctx, const u8 *in_key, - unsigned int key_len); + +/** + * aes_expandkey - Expands the AES key as described in FIPS-197 + * @ctx: The location where the computed key will be stored. + * @in_key: The supplied key. + * @key_len: The length of the supplied key. + * + * Returns 0 on success. The function fails only if an invalid key size (or + * pointer) is supplied. + * The expanded key size is 240 bytes (max of 14 rounds with a unique 16 bytes + * key schedule plus a 16 bytes key which is used before the first round). + * The decryption key is prepared for the "Equivalent Inverse Cipher" as + * described in FIPS-197. The first slot (16 bytes) of each key (enc or dec) is + * for the initial combination, the second slot for the first round and so on. + */ +int aes_expandkey(struct crypto_aes_ctx *ctx, const u8 *in_key, + unsigned int key_len); + +/** + * aes_encrypt - Encrypt a single AES block + * @ctx: Context struct containing the key schedule + * @out: Buffer to store the ciphertext + * @in: Buffer containing the plaintext + */ +void aes_encrypt(const struct crypto_aes_ctx *ctx, u8 *out, const u8 *in); + +/** + * aes_decrypt - Decrypt a single AES block + * @ctx: Context struct containing the key schedule + * @out: Buffer to store the plaintext + * @in: Buffer containing the ciphertext + */ +void aes_decrypt(const struct crypto_aes_ctx *ctx, u8 *out, const u8 *in); + +extern const u8 crypto_aes_sbox[]; +extern const u8 crypto_aes_inv_sbox[]; + #endif diff --git a/include/crypto/algapi.h b/include/crypto/algapi.h index 743d626479ef..e5bd302f2c49 100644 --- a/include/crypto/algapi.h +++ b/include/crypto/algapi.h @@ -189,7 +189,6 @@ void crypto_init_queue(struct crypto_queue *queue, unsigned int max_qlen); int crypto_enqueue_request(struct crypto_queue *queue, struct crypto_async_request *request); struct crypto_async_request *crypto_dequeue_request(struct crypto_queue *queue); -int crypto_tfm_in_queue(struct crypto_queue *queue, struct crypto_tfm *tfm); static inline unsigned int crypto_queue_len(struct crypto_queue *queue) { return queue->qlen; @@ -371,12 +370,6 @@ static inline void *ablkcipher_request_ctx(struct ablkcipher_request *req) return req->__ctx; } -static inline int ablkcipher_tfm_in_queue(struct crypto_queue *queue, - struct crypto_ablkcipher *tfm) -{ - return crypto_tfm_in_queue(queue, crypto_ablkcipher_tfm(tfm)); -} - static inline struct crypto_alg *crypto_get_attr_alg(struct rtattr **tb, u32 type, u32 mask) { @@ -416,10 +409,8 @@ static inline int crypto_memneq(const void *a, const void *b, size_t size) static inline void crypto_yield(u32 flags) { -#if !defined(CONFIG_PREEMPT) || defined(CONFIG_PREEMPT_VOLUNTARY) if (flags & CRYPTO_TFM_REQ_MAY_SLEEP) cond_resched(); -#endif } int crypto_register_notifier(struct notifier_block *nb); diff --git a/include/crypto/arc4.h b/include/crypto/arc4.h index 5b2c24ab0139..f3c22fe01704 100644 --- a/include/crypto/arc4.h +++ b/include/crypto/arc4.h @@ -6,8 +6,18 @@ #ifndef _CRYPTO_ARC4_H #define _CRYPTO_ARC4_H +#include <linux/types.h> + #define ARC4_MIN_KEY_SIZE 1 #define ARC4_MAX_KEY_SIZE 256 #define ARC4_BLOCK_SIZE 1 +struct arc4_ctx { + u32 S[256]; + u32 x, y; +}; + +int arc4_setkey(struct arc4_ctx *ctx, const u8 *in_key, unsigned int key_len); +void arc4_crypt(struct arc4_ctx *ctx, u8 *out, const u8 *in, unsigned int len); + #endif /* _CRYPTO_ARC4_H */ diff --git a/include/crypto/chacha.h b/include/crypto/chacha.h index 1fc70a69d550..d1e723c6a37d 100644 --- a/include/crypto/chacha.h +++ b/include/crypto/chacha.h @@ -41,7 +41,7 @@ static inline void chacha20_block(u32 *state, u8 *stream) } void hchacha_block(const u32 *in, u32 *out, int nrounds); -void crypto_chacha_init(u32 *state, struct chacha_ctx *ctx, u8 *iv); +void crypto_chacha_init(u32 *state, const struct chacha_ctx *ctx, const u8 *iv); int crypto_chacha20_setkey(struct crypto_skcipher *tfm, const u8 *key, unsigned int keysize); diff --git a/include/crypto/crypto_wq.h b/include/crypto/crypto_wq.h deleted file mode 100644 index 23114746ac08..000000000000 --- a/include/crypto/crypto_wq.h +++ /dev/null @@ -1,8 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef CRYPTO_WQ_H -#define CRYPTO_WQ_H - -#include <linux/workqueue.h> - -extern struct workqueue_struct *kcrypto_wq; -#endif diff --git a/include/crypto/ctr.h b/include/crypto/ctr.h index 06984a26c8cf..a1c66d1001af 100644 --- a/include/crypto/ctr.h +++ b/include/crypto/ctr.h @@ -8,8 +8,58 @@ #ifndef _CRYPTO_CTR_H #define _CRYPTO_CTR_H +#include <crypto/algapi.h> +#include <crypto/internal/skcipher.h> +#include <linux/string.h> +#include <linux/types.h> + #define CTR_RFC3686_NONCE_SIZE 4 #define CTR_RFC3686_IV_SIZE 8 #define CTR_RFC3686_BLOCK_SIZE 16 +static inline int crypto_ctr_encrypt_walk(struct skcipher_request *req, + void (*fn)(struct crypto_skcipher *, + const u8 *, u8 *)) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + int blocksize = crypto_skcipher_chunksize(tfm); + u8 buf[MAX_CIPHER_BLOCKSIZE]; + struct skcipher_walk walk; + int err; + + /* avoid integer division due to variable blocksize parameter */ + if (WARN_ON_ONCE(!is_power_of_2(blocksize))) + return -EINVAL; + + err = skcipher_walk_virt(&walk, req, false); + + while (walk.nbytes > 0) { + u8 *dst = walk.dst.virt.addr; + u8 *src = walk.src.virt.addr; + int nbytes = walk.nbytes; + int tail = 0; + + if (nbytes < walk.total) { + tail = walk.nbytes & (blocksize - 1); + nbytes -= tail; + } + + do { + int bsize = min(nbytes, blocksize); + + fn(tfm, walk.iv, buf); + + crypto_xor_cpy(dst, src, buf, bsize); + crypto_inc(walk.iv, blocksize); + + dst += bsize; + src += bsize; + nbytes -= bsize; + } while (nbytes > 0); + + err = skcipher_walk_done(&walk, tail); + } + return err; +} + #endif /* _CRYPTO_CTR_H */ diff --git a/include/crypto/des.h b/include/crypto/des.h index 72c7c8e5a5a7..7812b4331ae4 100644 --- a/include/crypto/des.h +++ b/include/crypto/des.h @@ -6,10 +6,7 @@ #ifndef __CRYPTO_DES_H #define __CRYPTO_DES_H -#include <crypto/skcipher.h> -#include <linux/compiler.h> -#include <linux/fips.h> -#include <linux/string.h> +#include <linux/types.h> #define DES_KEY_SIZE 8 #define DES_EXPKEY_WORDS 32 @@ -19,48 +16,42 @@ #define DES3_EDE_EXPKEY_WORDS (3 * DES_EXPKEY_WORDS) #define DES3_EDE_BLOCK_SIZE DES_BLOCK_SIZE -static inline int __des3_verify_key(u32 *flags, const u8 *key) -{ - int err = -EINVAL; - u32 K[6]; +struct des_ctx { + u32 expkey[DES_EXPKEY_WORDS]; +}; - memcpy(K, key, DES3_EDE_KEY_SIZE); +struct des3_ede_ctx { + u32 expkey[DES3_EDE_EXPKEY_WORDS]; +}; - if (unlikely(!((K[0] ^ K[2]) | (K[1] ^ K[3])) || - !((K[2] ^ K[4]) | (K[3] ^ K[5]))) && - (fips_enabled || - (*flags & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS))) - goto bad; +void des_encrypt(const struct des_ctx *ctx, u8 *dst, const u8 *src); +void des_decrypt(const struct des_ctx *ctx, u8 *dst, const u8 *src); - if (unlikely(!((K[0] ^ K[4]) | (K[1] ^ K[5]))) && fips_enabled) - goto bad; +void des3_ede_encrypt(const struct des3_ede_ctx *dctx, u8 *dst, const u8 *src); +void des3_ede_decrypt(const struct des3_ede_ctx *dctx, u8 *dst, const u8 *src); - err = 0; - -out: - memzero_explicit(K, DES3_EDE_KEY_SIZE); - - return err; - -bad: - *flags |= CRYPTO_TFM_RES_WEAK_KEY; - goto out; -} - -static inline int des3_verify_key(struct crypto_skcipher *tfm, const u8 *key) -{ - u32 flags; - int err; - - flags = crypto_skcipher_get_flags(tfm); - err = __des3_verify_key(&flags, key); - crypto_skcipher_set_flags(tfm, flags); - return err; -} - -extern unsigned long des_ekey(u32 *pe, const u8 *k); - -extern int __des3_ede_setkey(u32 *expkey, u32 *flags, const u8 *key, - unsigned int keylen); +/** + * des_expand_key - Expand a DES input key into a key schedule + * @ctx: the key schedule + * @key: buffer containing the input key + * @len: size of the buffer contents + * + * Returns 0 on success, -EINVAL if the input key is rejected and -ENOKEY if + * the key is accepted but has been found to be weak. + */ +int des_expand_key(struct des_ctx *ctx, const u8 *key, unsigned int keylen); + +/** + * des3_ede_expand_key - Expand a triple DES input key into a key schedule + * @ctx: the key schedule + * @key: buffer containing the input key + * @len: size of the buffer contents + * + * Returns 0 on success, -EINVAL if the input key is rejected and -ENOKEY if + * the key is accepted but has been found to be weak. Note that weak keys will + * be rejected (and -EINVAL will be returned) when running in FIPS mode. + */ +int des3_ede_expand_key(struct des3_ede_ctx *ctx, const u8 *key, + unsigned int keylen); #endif /* __CRYPTO_DES_H */ diff --git a/include/crypto/drbg.h b/include/crypto/drbg.h index 3fb581bf3b87..8c9af21efce1 100644 --- a/include/crypto/drbg.h +++ b/include/crypto/drbg.h @@ -129,6 +129,8 @@ struct drbg_state { bool seeded; /* DRBG fully seeded? */ bool pr; /* Prediction resistance enabled? */ + bool fips_primed; /* Continuous test primed? */ + unsigned char *prev; /* FIPS 140-2 continuous test value */ struct work_struct seed_work; /* asynchronous seeding support */ struct crypto_rng *jent; const struct drbg_state_ops *d_ops; diff --git a/include/crypto/gcm.h b/include/crypto/gcm.h index c50e057ea17e..9d7eff04f224 100644 --- a/include/crypto/gcm.h +++ b/include/crypto/gcm.h @@ -1,8 +1,63 @@ #ifndef _CRYPTO_GCM_H #define _CRYPTO_GCM_H +#include <linux/errno.h> + #define GCM_AES_IV_SIZE 12 #define GCM_RFC4106_IV_SIZE 8 #define GCM_RFC4543_IV_SIZE 8 +/* + * validate authentication tag for GCM + */ +static inline int crypto_gcm_check_authsize(unsigned int authsize) +{ + switch (authsize) { + case 4: + case 8: + case 12: + case 13: + case 14: + case 15: + case 16: + break; + default: + return -EINVAL; + } + + return 0; +} + +/* + * validate authentication tag for RFC4106 + */ +static inline int crypto_rfc4106_check_authsize(unsigned int authsize) +{ + switch (authsize) { + case 8: + case 12: + case 16: + break; + default: + return -EINVAL; + } + + return 0; +} + +/* + * validate assoclen for RFC4106/RFC4543 + */ +static inline int crypto_ipsec_check_assoclen(unsigned int assoclen) +{ + switch (assoclen) { + case 16: + case 20: + break; + default: + return -EINVAL; + } + + return 0; +} #endif diff --git a/include/crypto/ghash.h b/include/crypto/ghash.h index 9136301062a5..f832c9f2aca3 100644 --- a/include/crypto/ghash.h +++ b/include/crypto/ghash.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 */ /* - * Common values for GHASH algorithms + * Common values for the GHASH hash function */ #ifndef __CRYPTO_GHASH_H__ diff --git a/include/crypto/internal/cryptouser.h b/include/crypto/internal/cryptouser.h index 8c602b187c58..fd54074332f5 100644 --- a/include/crypto/internal/cryptouser.h +++ b/include/crypto/internal/cryptouser.h @@ -1,14 +1,15 @@ /* SPDX-License-Identifier: GPL-2.0 */ +#include <linux/cryptouser.h> #include <net/netlink.h> -extern struct sock *crypto_nlsk; - struct crypto_alg *crypto_alg_match(struct crypto_user_alg *p, int exact); #ifdef CONFIG_CRYPTO_STATS int crypto_reportstat(struct sk_buff *in_skb, struct nlmsghdr *in_nlh, struct nlattr **attrs); #else -static int crypto_reportstat(struct sk_buff *in_skb, struct nlmsghdr *in_nlh, struct nlattr **attrs) +static inline int crypto_reportstat(struct sk_buff *in_skb, + struct nlmsghdr *in_nlh, + struct nlattr **attrs) { return -ENOTSUPP; } diff --git a/include/crypto/internal/des.h b/include/crypto/internal/des.h new file mode 100644 index 000000000000..81ea1a425e9c --- /dev/null +++ b/include/crypto/internal/des.h @@ -0,0 +1,152 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * DES & Triple DES EDE key verification helpers + */ + +#ifndef __CRYPTO_INTERNAL_DES_H +#define __CRYPTO_INTERNAL_DES_H + +#include <linux/crypto.h> +#include <linux/fips.h> +#include <crypto/des.h> +#include <crypto/aead.h> +#include <crypto/skcipher.h> + +/** + * crypto_des_verify_key - Check whether a DES key is weak + * @tfm: the crypto algo + * @key: the key buffer + * + * Returns -EINVAL if the key is weak and the crypto TFM does not permit weak + * keys. Otherwise, 0 is returned. + * + * It is the job of the caller to ensure that the size of the key equals + * DES_KEY_SIZE. + */ +static inline int crypto_des_verify_key(struct crypto_tfm *tfm, const u8 *key) +{ + struct des_ctx tmp; + int err; + + err = des_expand_key(&tmp, key, DES_KEY_SIZE); + if (err == -ENOKEY) { + if (crypto_tfm_get_flags(tfm) & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS) + err = -EINVAL; + else + err = 0; + } + + if (err) + crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_WEAK_KEY); + + memzero_explicit(&tmp, sizeof(tmp)); + return err; +} + +/* + * RFC2451: + * + * For DES-EDE3, there is no known need to reject weak or + * complementation keys. Any weakness is obviated by the use of + * multiple keys. + * + * However, if the first two or last two independent 64-bit keys are + * equal (k1 == k2 or k2 == k3), then the DES3 operation is simply the + * same as DES. Implementers MUST reject keys that exhibit this + * property. + * + */ +static inline int des3_ede_verify_key(const u8 *key, unsigned int key_len, + bool check_weak) +{ + int ret = fips_enabled ? -EINVAL : -ENOKEY; + u32 K[6]; + + memcpy(K, key, DES3_EDE_KEY_SIZE); + + if ((!((K[0] ^ K[2]) | (K[1] ^ K[3])) || + !((K[2] ^ K[4]) | (K[3] ^ K[5]))) && + (fips_enabled || check_weak)) + goto bad; + + if ((!((K[0] ^ K[4]) | (K[1] ^ K[5]))) && fips_enabled) + goto bad; + + ret = 0; +bad: + memzero_explicit(K, DES3_EDE_KEY_SIZE); + + return ret; +} + +/** + * crypto_des3_ede_verify_key - Check whether a DES3-EDE key is weak + * @tfm: the crypto algo + * @key: the key buffer + * + * Returns -EINVAL if the key is weak and the crypto TFM does not permit weak + * keys or when running in FIPS mode. Otherwise, 0 is returned. Note that some + * keys are rejected in FIPS mode even if weak keys are permitted by the TFM + * flags. + * + * It is the job of the caller to ensure that the size of the key equals + * DES3_EDE_KEY_SIZE. + */ +static inline int crypto_des3_ede_verify_key(struct crypto_tfm *tfm, + const u8 *key) +{ + int err; + + err = des3_ede_verify_key(key, DES3_EDE_KEY_SIZE, + crypto_tfm_get_flags(tfm) & + CRYPTO_TFM_REQ_FORBID_WEAK_KEYS); + if (err) + crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_WEAK_KEY); + return err; +} + +static inline int verify_skcipher_des_key(struct crypto_skcipher *tfm, + const u8 *key) +{ + return crypto_des_verify_key(crypto_skcipher_tfm(tfm), key); +} + +static inline int verify_skcipher_des3_key(struct crypto_skcipher *tfm, + const u8 *key) +{ + return crypto_des3_ede_verify_key(crypto_skcipher_tfm(tfm), key); +} + +static inline int verify_ablkcipher_des_key(struct crypto_ablkcipher *tfm, + const u8 *key) +{ + return crypto_des_verify_key(crypto_ablkcipher_tfm(tfm), key); +} + +static inline int verify_ablkcipher_des3_key(struct crypto_ablkcipher *tfm, + const u8 *key) +{ + return crypto_des3_ede_verify_key(crypto_ablkcipher_tfm(tfm), key); +} + +static inline int verify_aead_des_key(struct crypto_aead *tfm, const u8 *key, + int keylen) +{ + if (keylen != DES_KEY_SIZE) { + crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); + return -EINVAL; + } + return crypto_des_verify_key(crypto_aead_tfm(tfm), key); +} + +static inline int verify_aead_des3_key(struct crypto_aead *tfm, const u8 *key, + int keylen) +{ + if (keylen != DES3_EDE_KEY_SIZE) { + crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); + return -EINVAL; + } + return crypto_des3_ede_verify_key(crypto_aead_tfm(tfm), key); +} + +#endif /* __CRYPTO_INTERNAL_DES_H */ diff --git a/include/crypto/internal/hash.h b/include/crypto/internal/hash.h index 31e0662fa429..bfc9db7b100d 100644 --- a/include/crypto/internal/hash.h +++ b/include/crypto/internal/hash.h @@ -196,12 +196,6 @@ static inline struct ahash_request *ahash_dequeue_request( return ahash_request_cast(crypto_dequeue_request(queue)); } -static inline int ahash_tfm_in_queue(struct crypto_queue *queue, - struct crypto_ahash *tfm) -{ - return crypto_tfm_in_queue(queue, crypto_ahash_tfm(tfm)); -} - static inline void *crypto_shash_ctx(struct crypto_shash *tfm) { return crypto_tfm_ctx(&tfm->base); diff --git a/include/crypto/internal/skcipher.h b/include/crypto/internal/skcipher.h index fe0376d5a471..734b6f7081b8 100644 --- a/include/crypto/internal/skcipher.h +++ b/include/crypto/internal/skcipher.h @@ -148,6 +148,11 @@ int skcipher_walk_aead_decrypt(struct skcipher_walk *walk, struct aead_request *req, bool atomic); void skcipher_walk_complete(struct skcipher_walk *walk, int err); +static inline void skcipher_walk_abort(struct skcipher_walk *walk) +{ + skcipher_walk_done(walk, -ECANCELED); +} + static inline void ablkcipher_request_complete(struct ablkcipher_request *req, int err) { @@ -200,6 +205,66 @@ static inline unsigned int crypto_skcipher_alg_max_keysize( return alg->max_keysize; } +static inline unsigned int crypto_skcipher_alg_chunksize( + struct skcipher_alg *alg) +{ + if ((alg->base.cra_flags & CRYPTO_ALG_TYPE_MASK) == + CRYPTO_ALG_TYPE_BLKCIPHER) + return alg->base.cra_blocksize; + + if (alg->base.cra_ablkcipher.encrypt) + return alg->base.cra_blocksize; + + return alg->chunksize; +} + +static inline unsigned int crypto_skcipher_alg_walksize( + struct skcipher_alg *alg) +{ + if ((alg->base.cra_flags & CRYPTO_ALG_TYPE_MASK) == + CRYPTO_ALG_TYPE_BLKCIPHER) + return alg->base.cra_blocksize; + + if (alg->base.cra_ablkcipher.encrypt) + return alg->base.cra_blocksize; + + return alg->walksize; +} + +/** + * crypto_skcipher_chunksize() - obtain chunk size + * @tfm: cipher handle + * + * The block size is set to one for ciphers such as CTR. However, + * you still need to provide incremental updates in multiples of + * the underlying block size as the IV does not have sub-block + * granularity. This is known in this API as the chunk size. + * + * Return: chunk size in bytes + */ +static inline unsigned int crypto_skcipher_chunksize( + struct crypto_skcipher *tfm) +{ + return crypto_skcipher_alg_chunksize(crypto_skcipher_alg(tfm)); +} + +/** + * crypto_skcipher_walksize() - obtain walk size + * @tfm: cipher handle + * + * In some cases, algorithms can only perform optimally when operating on + * multiple blocks in parallel. This is reflected by the walksize, which + * must be a multiple of the chunksize (or equal if the concern does not + * apply) + * + * Return: walk size in bytes + */ +static inline unsigned int crypto_skcipher_walksize( + struct crypto_skcipher *tfm) +{ + return crypto_skcipher_alg_walksize(crypto_skcipher_alg(tfm)); +} + /* Helpers for simple block cipher modes of operation */ struct skcipher_ctx_simple { struct crypto_cipher *cipher; /* underlying block cipher */ diff --git a/include/crypto/morus1280_glue.h b/include/crypto/morus1280_glue.h deleted file mode 100644 index 5cefddb1991f..000000000000 --- a/include/crypto/morus1280_glue.h +++ /dev/null @@ -1,97 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ -/* - * The MORUS-1280 Authenticated-Encryption Algorithm - * Common glue skeleton -- header file - * - * Copyright (c) 2016-2018 Ondrej Mosnacek <omosnacek@gmail.com> - * Copyright (C) 2017-2018 Red Hat, Inc. All rights reserved. - */ - -#ifndef _CRYPTO_MORUS1280_GLUE_H -#define _CRYPTO_MORUS1280_GLUE_H - -#include <linux/module.h> -#include <linux/types.h> -#include <crypto/algapi.h> -#include <crypto/aead.h> -#include <crypto/morus_common.h> - -#define MORUS1280_WORD_SIZE 8 -#define MORUS1280_BLOCK_SIZE (MORUS_BLOCK_WORDS * MORUS1280_WORD_SIZE) - -struct morus1280_block { - u8 bytes[MORUS1280_BLOCK_SIZE]; -}; - -struct morus1280_glue_ops { - void (*init)(void *state, const void *key, const void *iv); - void (*ad)(void *state, const void *data, unsigned int length); - void (*enc)(void *state, const void *src, void *dst, unsigned int length); - void (*dec)(void *state, const void *src, void *dst, unsigned int length); - void (*enc_tail)(void *state, const void *src, void *dst, unsigned int length); - void (*dec_tail)(void *state, const void *src, void *dst, unsigned int length); - void (*final)(void *state, void *tag_xor, u64 assoclen, u64 cryptlen); -}; - -struct morus1280_ctx { - const struct morus1280_glue_ops *ops; - struct morus1280_block key; -}; - -void crypto_morus1280_glue_init_ops(struct crypto_aead *aead, - const struct morus1280_glue_ops *ops); -int crypto_morus1280_glue_setkey(struct crypto_aead *aead, const u8 *key, - unsigned int keylen); -int crypto_morus1280_glue_setauthsize(struct crypto_aead *tfm, - unsigned int authsize); -int crypto_morus1280_glue_encrypt(struct aead_request *req); -int crypto_morus1280_glue_decrypt(struct aead_request *req); - -#define MORUS1280_DECLARE_ALG(id, driver_name, priority) \ - static const struct morus1280_glue_ops crypto_morus1280_##id##_ops = {\ - .init = crypto_morus1280_##id##_init, \ - .ad = crypto_morus1280_##id##_ad, \ - .enc = crypto_morus1280_##id##_enc, \ - .enc_tail = crypto_morus1280_##id##_enc_tail, \ - .dec = crypto_morus1280_##id##_dec, \ - .dec_tail = crypto_morus1280_##id##_dec_tail, \ - .final = crypto_morus1280_##id##_final, \ - }; \ - \ - static int crypto_morus1280_##id##_init_tfm(struct crypto_aead *tfm) \ - { \ - crypto_morus1280_glue_init_ops(tfm, &crypto_morus1280_##id##_ops); \ - return 0; \ - } \ - \ - static void crypto_morus1280_##id##_exit_tfm(struct crypto_aead *tfm) \ - { \ - } \ - \ - static struct aead_alg crypto_morus1280_##id##_alg = { \ - .setkey = crypto_morus1280_glue_setkey, \ - .setauthsize = crypto_morus1280_glue_setauthsize, \ - .encrypt = crypto_morus1280_glue_encrypt, \ - .decrypt = crypto_morus1280_glue_decrypt, \ - .init = crypto_morus1280_##id##_init_tfm, \ - .exit = crypto_morus1280_##id##_exit_tfm, \ - \ - .ivsize = MORUS_NONCE_SIZE, \ - .maxauthsize = MORUS_MAX_AUTH_SIZE, \ - .chunksize = MORUS1280_BLOCK_SIZE, \ - \ - .base = { \ - .cra_flags = CRYPTO_ALG_INTERNAL, \ - .cra_blocksize = 1, \ - .cra_ctxsize = sizeof(struct morus1280_ctx), \ - .cra_alignmask = 0, \ - .cra_priority = priority, \ - \ - .cra_name = "__morus1280", \ - .cra_driver_name = "__"driver_name, \ - \ - .cra_module = THIS_MODULE, \ - } \ - } - -#endif /* _CRYPTO_MORUS1280_GLUE_H */ diff --git a/include/crypto/morus640_glue.h b/include/crypto/morus640_glue.h deleted file mode 100644 index 0ee6266cb26c..000000000000 --- a/include/crypto/morus640_glue.h +++ /dev/null @@ -1,97 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ -/* - * The MORUS-640 Authenticated-Encryption Algorithm - * Common glue skeleton -- header file - * - * Copyright (c) 2016-2018 Ondrej Mosnacek <omosnacek@gmail.com> - * Copyright (C) 2017-2018 Red Hat, Inc. All rights reserved. - */ - -#ifndef _CRYPTO_MORUS640_GLUE_H -#define _CRYPTO_MORUS640_GLUE_H - -#include <linux/module.h> -#include <linux/types.h> -#include <crypto/algapi.h> -#include <crypto/aead.h> -#include <crypto/morus_common.h> - -#define MORUS640_WORD_SIZE 4 -#define MORUS640_BLOCK_SIZE (MORUS_BLOCK_WORDS * MORUS640_WORD_SIZE) - -struct morus640_block { - u8 bytes[MORUS640_BLOCK_SIZE]; -}; - -struct morus640_glue_ops { - void (*init)(void *state, const void *key, const void *iv); - void (*ad)(void *state, const void *data, unsigned int length); - void (*enc)(void *state, const void *src, void *dst, unsigned int length); - void (*dec)(void *state, const void *src, void *dst, unsigned int length); - void (*enc_tail)(void *state, const void *src, void *dst, unsigned int length); - void (*dec_tail)(void *state, const void *src, void *dst, unsigned int length); - void (*final)(void *state, void *tag_xor, u64 assoclen, u64 cryptlen); -}; - -struct morus640_ctx { - const struct morus640_glue_ops *ops; - struct morus640_block key; -}; - -void crypto_morus640_glue_init_ops(struct crypto_aead *aead, - const struct morus640_glue_ops *ops); -int crypto_morus640_glue_setkey(struct crypto_aead *aead, const u8 *key, - unsigned int keylen); -int crypto_morus640_glue_setauthsize(struct crypto_aead *tfm, - unsigned int authsize); -int crypto_morus640_glue_encrypt(struct aead_request *req); -int crypto_morus640_glue_decrypt(struct aead_request *req); - -#define MORUS640_DECLARE_ALG(id, driver_name, priority) \ - static const struct morus640_glue_ops crypto_morus640_##id##_ops = {\ - .init = crypto_morus640_##id##_init, \ - .ad = crypto_morus640_##id##_ad, \ - .enc = crypto_morus640_##id##_enc, \ - .enc_tail = crypto_morus640_##id##_enc_tail, \ - .dec = crypto_morus640_##id##_dec, \ - .dec_tail = crypto_morus640_##id##_dec_tail, \ - .final = crypto_morus640_##id##_final, \ - }; \ - \ - static int crypto_morus640_##id##_init_tfm(struct crypto_aead *tfm) \ - { \ - crypto_morus640_glue_init_ops(tfm, &crypto_morus640_##id##_ops); \ - return 0; \ - } \ - \ - static void crypto_morus640_##id##_exit_tfm(struct crypto_aead *tfm) \ - { \ - } \ - \ - static struct aead_alg crypto_morus640_##id##_alg = {\ - .setkey = crypto_morus640_glue_setkey, \ - .setauthsize = crypto_morus640_glue_setauthsize, \ - .encrypt = crypto_morus640_glue_encrypt, \ - .decrypt = crypto_morus640_glue_decrypt, \ - .init = crypto_morus640_##id##_init_tfm, \ - .exit = crypto_morus640_##id##_exit_tfm, \ - \ - .ivsize = MORUS_NONCE_SIZE, \ - .maxauthsize = MORUS_MAX_AUTH_SIZE, \ - .chunksize = MORUS640_BLOCK_SIZE, \ - \ - .base = { \ - .cra_flags = CRYPTO_ALG_INTERNAL, \ - .cra_blocksize = 1, \ - .cra_ctxsize = sizeof(struct morus640_ctx), \ - .cra_alignmask = 0, \ - .cra_priority = priority, \ - \ - .cra_name = "__morus640", \ - .cra_driver_name = "__"driver_name, \ - \ - .cra_module = THIS_MODULE, \ - } \ - } - -#endif /* _CRYPTO_MORUS640_GLUE_H */ diff --git a/include/crypto/morus_common.h b/include/crypto/morus_common.h deleted file mode 100644 index 969510a9a56c..000000000000 --- a/include/crypto/morus_common.h +++ /dev/null @@ -1,18 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ -/* - * The MORUS Authenticated-Encryption Algorithm - * Common definitions - * - * Copyright (c) 2016-2018 Ondrej Mosnacek <omosnacek@gmail.com> - * Copyright (C) 2017-2018 Red Hat, Inc. All rights reserved. - */ - -#ifndef _CRYPTO_MORUS_COMMON_H -#define _CRYPTO_MORUS_COMMON_H - -#define MORUS_BLOCK_WORDS 4 -#define MORUS_STATE_BLOCKS 5 -#define MORUS_NONCE_SIZE 16 -#define MORUS_MAX_AUTH_SIZE 16 - -#endif /* _CRYPTO_MORUS_COMMON_H */ diff --git a/include/crypto/pkcs7.h b/include/crypto/pkcs7.h index 96071bee03ac..38ec7f5f9041 100644 --- a/include/crypto/pkcs7.h +++ b/include/crypto/pkcs7.h @@ -9,6 +9,7 @@ #define _CRYPTO_PKCS7_H #include <linux/verification.h> +#include <linux/hash_info.h> #include <crypto/public_key.h> struct key; @@ -40,4 +41,7 @@ extern int pkcs7_verify(struct pkcs7_message *pkcs7, extern int pkcs7_supply_detached_data(struct pkcs7_message *pkcs7, const void *data, size_t datalen); +extern int pkcs7_get_digest(struct pkcs7_message *pkcs7, const u8 **buf, + u32 *len, enum hash_algo *hash_algo); + #endif /* _CRYPTO_PKCS7_H */ diff --git a/include/crypto/sha.h b/include/crypto/sha.h index 8a46202b1857..5c2132c71900 100644 --- a/include/crypto/sha.h +++ b/include/crypto/sha.h @@ -112,4 +112,51 @@ extern int crypto_sha512_update(struct shash_desc *desc, const u8 *data, extern int crypto_sha512_finup(struct shash_desc *desc, const u8 *data, unsigned int len, u8 *hash); + +/* + * Stand-alone implementation of the SHA256 algorithm. It is designed to + * have as little dependencies as possible so it can be used in the + * kexec_file purgatory. In other cases you should generally use the + * hash APIs from include/crypto/hash.h. Especially when hashing large + * amounts of data as those APIs may be hw-accelerated. + * + * For details see lib/crypto/sha256.c + */ + +static inline int sha256_init(struct sha256_state *sctx) +{ + sctx->state[0] = SHA256_H0; + sctx->state[1] = SHA256_H1; + sctx->state[2] = SHA256_H2; + sctx->state[3] = SHA256_H3; + sctx->state[4] = SHA256_H4; + sctx->state[5] = SHA256_H5; + sctx->state[6] = SHA256_H6; + sctx->state[7] = SHA256_H7; + sctx->count = 0; + + return 0; +} +extern int sha256_update(struct sha256_state *sctx, const u8 *input, + unsigned int length); +extern int sha256_final(struct sha256_state *sctx, u8 *hash); + +static inline int sha224_init(struct sha256_state *sctx) +{ + sctx->state[0] = SHA224_H0; + sctx->state[1] = SHA224_H1; + sctx->state[2] = SHA224_H2; + sctx->state[3] = SHA224_H3; + sctx->state[4] = SHA224_H4; + sctx->state[5] = SHA224_H5; + sctx->state[6] = SHA224_H6; + sctx->state[7] = SHA224_H7; + sctx->count = 0; + + return 0; +} +extern int sha224_update(struct sha256_state *sctx, const u8 *input, + unsigned int length); +extern int sha224_final(struct sha256_state *sctx, u8 *hash); + #endif diff --git a/include/crypto/sha1_base.h b/include/crypto/sha1_base.h index 63c14f2dc7bd..20fd1f7468af 100644 --- a/include/crypto/sha1_base.h +++ b/include/crypto/sha1_base.h @@ -5,6 +5,9 @@ * Copyright (C) 2015 Linaro Ltd <ard.biesheuvel@linaro.org> */ +#ifndef _CRYPTO_SHA1_BASE_H +#define _CRYPTO_SHA1_BASE_H + #include <crypto/internal/hash.h> #include <crypto/sha.h> #include <linux/crypto.h> @@ -101,3 +104,5 @@ static inline int sha1_base_finish(struct shash_desc *desc, u8 *out) *sctx = (struct sha1_state){}; return 0; } + +#endif /* _CRYPTO_SHA1_BASE_H */ diff --git a/include/crypto/sha256_base.h b/include/crypto/sha256_base.h index 59159bc944f5..cea60cff80bd 100644 --- a/include/crypto/sha256_base.h +++ b/include/crypto/sha256_base.h @@ -5,6 +5,9 @@ * Copyright (C) 2015 Linaro Ltd <ard.biesheuvel@linaro.org> */ +#ifndef _CRYPTO_SHA256_BASE_H +#define _CRYPTO_SHA256_BASE_H + #include <crypto/internal/hash.h> #include <crypto/sha.h> #include <linux/crypto.h> @@ -19,34 +22,14 @@ static inline int sha224_base_init(struct shash_desc *desc) { struct sha256_state *sctx = shash_desc_ctx(desc); - sctx->state[0] = SHA224_H0; - sctx->state[1] = SHA224_H1; - sctx->state[2] = SHA224_H2; - sctx->state[3] = SHA224_H3; - sctx->state[4] = SHA224_H4; - sctx->state[5] = SHA224_H5; - sctx->state[6] = SHA224_H6; - sctx->state[7] = SHA224_H7; - sctx->count = 0; - - return 0; + return sha224_init(sctx); } static inline int sha256_base_init(struct shash_desc *desc) { struct sha256_state *sctx = shash_desc_ctx(desc); - sctx->state[0] = SHA256_H0; - sctx->state[1] = SHA256_H1; - sctx->state[2] = SHA256_H2; - sctx->state[3] = SHA256_H3; - sctx->state[4] = SHA256_H4; - sctx->state[5] = SHA256_H5; - sctx->state[6] = SHA256_H6; - sctx->state[7] = SHA256_H7; - sctx->count = 0; - - return 0; + return sha256_init(sctx); } static inline int sha256_base_do_update(struct shash_desc *desc, @@ -123,3 +106,5 @@ static inline int sha256_base_finish(struct shash_desc *desc, u8 *out) *sctx = (struct sha256_state){}; return 0; } + +#endif /* _CRYPTO_SHA256_BASE_H */ diff --git a/include/crypto/sha512_base.h b/include/crypto/sha512_base.h index 099be8027f3f..fb19c77494dc 100644 --- a/include/crypto/sha512_base.h +++ b/include/crypto/sha512_base.h @@ -5,6 +5,9 @@ * Copyright (C) 2015 Linaro Ltd <ard.biesheuvel@linaro.org> */ +#ifndef _CRYPTO_SHA512_BASE_H +#define _CRYPTO_SHA512_BASE_H + #include <crypto/internal/hash.h> #include <crypto/sha.h> #include <linux/crypto.h> @@ -126,3 +129,5 @@ static inline int sha512_base_finish(struct shash_desc *desc, u8 *out) *sctx = (struct sha512_state){}; return 0; } + +#endif /* _CRYPTO_SHA512_BASE_H */ diff --git a/include/crypto/skcipher.h b/include/crypto/skcipher.h index ce7fa0973580..37c164234d97 100644 --- a/include/crypto/skcipher.h +++ b/include/crypto/skcipher.h @@ -288,66 +288,6 @@ static inline unsigned int crypto_sync_skcipher_ivsize( return crypto_skcipher_ivsize(&tfm->base); } -static inline unsigned int crypto_skcipher_alg_chunksize( - struct skcipher_alg *alg) -{ - if ((alg->base.cra_flags & CRYPTO_ALG_TYPE_MASK) == - CRYPTO_ALG_TYPE_BLKCIPHER) - return alg->base.cra_blocksize; - - if (alg->base.cra_ablkcipher.encrypt) - return alg->base.cra_blocksize; - - return alg->chunksize; -} - -static inline unsigned int crypto_skcipher_alg_walksize( - struct skcipher_alg *alg) -{ - if ((alg->base.cra_flags & CRYPTO_ALG_TYPE_MASK) == - CRYPTO_ALG_TYPE_BLKCIPHER) - return alg->base.cra_blocksize; - - if (alg->base.cra_ablkcipher.encrypt) - return alg->base.cra_blocksize; - - return alg->walksize; -} - -/** - * crypto_skcipher_chunksize() - obtain chunk size - * @tfm: cipher handle - * - * The block size is set to one for ciphers such as CTR. However, - * you still need to provide incremental updates in multiples of - * the underlying block size as the IV does not have sub-block - * granularity. This is known in this API as the chunk size. - * - * Return: chunk size in bytes - */ -static inline unsigned int crypto_skcipher_chunksize( - struct crypto_skcipher *tfm) -{ - return crypto_skcipher_alg_chunksize(crypto_skcipher_alg(tfm)); -} - -/** - * crypto_skcipher_walksize() - obtain walk size - * @tfm: cipher handle - * - * In some cases, algorithms can only perform optimally when operating on - * multiple blocks in parallel. This is reflected by the walksize, which - * must be a multiple of the chunksize (or equal if the concern does not - * apply) - * - * Return: walk size in bytes - */ -static inline unsigned int crypto_skcipher_walksize( - struct crypto_skcipher *tfm) -{ - return crypto_skcipher_alg_walksize(crypto_skcipher_alg(tfm)); -} - /** * crypto_skcipher_blocksize() - obtain block size of cipher * @tfm: cipher handle @@ -479,21 +419,7 @@ static inline struct crypto_sync_skcipher *crypto_sync_skcipher_reqtfm( * * Return: 0 if the cipher operation was successful; < 0 if an error occurred */ -static inline int crypto_skcipher_encrypt(struct skcipher_request *req) -{ - struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); - struct crypto_alg *alg = tfm->base.__crt_alg; - unsigned int cryptlen = req->cryptlen; - int ret; - - crypto_stats_get(alg); - if (crypto_skcipher_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) - ret = -ENOKEY; - else - ret = tfm->encrypt(req); - crypto_stats_skcipher_encrypt(cryptlen, ret, alg); - return ret; -} +int crypto_skcipher_encrypt(struct skcipher_request *req); /** * crypto_skcipher_decrypt() - decrypt ciphertext @@ -506,21 +432,7 @@ static inline int crypto_skcipher_encrypt(struct skcipher_request *req) * * Return: 0 if the cipher operation was successful; < 0 if an error occurred */ -static inline int crypto_skcipher_decrypt(struct skcipher_request *req) -{ - struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); - struct crypto_alg *alg = tfm->base.__crt_alg; - unsigned int cryptlen = req->cryptlen; - int ret; - - crypto_stats_get(alg); - if (crypto_skcipher_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) - ret = -ENOKEY; - else - ret = tfm->decrypt(req); - crypto_stats_skcipher_decrypt(cryptlen, ret, alg); - return ret; -} +int crypto_skcipher_decrypt(struct skcipher_request *req); /** * DOC: Symmetric Key Cipher Request Handle diff --git a/include/crypto/sm3_base.h b/include/crypto/sm3_base.h index 31891b0dc7e3..1cbf9aa1fe52 100644 --- a/include/crypto/sm3_base.h +++ b/include/crypto/sm3_base.h @@ -6,6 +6,9 @@ * Written by Gilad Ben-Yossef <gilad@benyossef.com> */ +#ifndef _CRYPTO_SM3_BASE_H +#define _CRYPTO_SM3_BASE_H + #include <crypto/internal/hash.h> #include <crypto/sm3.h> #include <linux/crypto.h> @@ -104,3 +107,5 @@ static inline int sm3_base_finish(struct shash_desc *desc, u8 *out) *sctx = (struct sm3_state){}; return 0; } + +#endif /* _CRYPTO_SM3_BASE_H */ diff --git a/include/drm/amd_asic_type.h b/include/drm/amd_asic_type.h index dd63d08cc54e..296aab724677 100644 --- a/include/drm/amd_asic_type.h +++ b/include/drm/amd_asic_type.h @@ -49,6 +49,11 @@ enum amd_asic_type { CHIP_VEGA12, CHIP_VEGA20, CHIP_RAVEN, + CHIP_ARCTURUS, + CHIP_RENOIR, + CHIP_NAVI10, + CHIP_NAVI14, + CHIP_NAVI12, CHIP_LAST, }; diff --git a/include/drm/bridge/analogix_dp.h b/include/drm/bridge/analogix_dp.h index e56046cf4d04..7aa2f93da49c 100644 --- a/include/drm/bridge/analogix_dp.h +++ b/include/drm/bridge/analogix_dp.h @@ -38,10 +38,6 @@ struct analogix_dp_plat_data { struct drm_connector *); }; -int analogix_dp_psr_enabled(struct analogix_dp_device *dp); -int analogix_dp_enable_psr(struct analogix_dp_device *dp); -int analogix_dp_disable_psr(struct analogix_dp_device *dp); - int analogix_dp_resume(struct analogix_dp_device *dp); int analogix_dp_suspend(struct analogix_dp_device *dp); diff --git a/include/drm/bridge/dw_hdmi.h b/include/drm/bridge/dw_hdmi.h index b4ca970a5b75..cf528c289857 100644 --- a/include/drm/bridge/dw_hdmi.h +++ b/include/drm/bridge/dw_hdmi.h @@ -150,9 +150,13 @@ struct dw_hdmi *dw_hdmi_bind(struct platform_device *pdev, struct drm_encoder *encoder, const struct dw_hdmi_plat_data *plat_data); +void dw_hdmi_resume(struct dw_hdmi *hdmi); + void dw_hdmi_setup_rx_sense(struct dw_hdmi *hdmi, bool hpd, bool rx_sense); void dw_hdmi_set_sample_rate(struct dw_hdmi *hdmi, unsigned int rate); +void dw_hdmi_set_channel_count(struct dw_hdmi *hdmi, unsigned int cnt); +void dw_hdmi_set_channel_allocation(struct dw_hdmi *hdmi, unsigned int ca); void dw_hdmi_audio_enable(struct dw_hdmi *hdmi); void dw_hdmi_audio_disable(struct dw_hdmi *hdmi); void dw_hdmi_set_high_tmds_clock_ratio(struct dw_hdmi *hdmi); diff --git a/include/drm/bridge/dw_mipi_dsi.h b/include/drm/bridge/dw_mipi_dsi.h index 0c33b9e9e0f0..94cc64a342e1 100644 --- a/include/drm/bridge/dw_mipi_dsi.h +++ b/include/drm/bridge/dw_mipi_dsi.h @@ -9,10 +9,20 @@ #ifndef __DW_MIPI_DSI__ #define __DW_MIPI_DSI__ +#include <linux/types.h> + +#include <drm/drm_modes.h> + +struct drm_display_mode; +struct drm_encoder; struct dw_mipi_dsi; +struct mipi_dsi_device; +struct platform_device; struct dw_mipi_dsi_phy_ops { int (*init)(void *priv_data); + void (*power_on)(void *priv_data); + void (*power_off)(void *priv_data); int (*get_lane_mbps)(void *priv_data, const struct drm_display_mode *mode, unsigned long mode_flags, u32 lanes, u32 format, diff --git a/include/drm/drmP.h b/include/drm/drmP.h index 94aae87b1138..037b1f7a87a5 100644 --- a/include/drm/drmP.h +++ b/include/drm/drmP.h @@ -87,7 +87,7 @@ struct module; struct device_node; struct videomode; -struct reservation_object; +struct dma_resv; struct dma_buf_attachment; struct pci_dev; diff --git a/include/drm/drm_agpsupport.h b/include/drm/drm_agpsupport.h index b05e462276d5..664e120b93e6 100644 --- a/include/drm/drm_agpsupport.h +++ b/include/drm/drm_agpsupport.h @@ -31,11 +31,6 @@ struct drm_agp_head { void drm_free_agp(struct agp_memory * handle, int pages); int drm_bind_agp(struct agp_memory * handle, unsigned int start); int drm_unbind_agp(struct agp_memory * handle); -struct agp_memory *drm_agp_bind_pages(struct drm_device *dev, - struct page **pages, - unsigned long num_pages, - uint32_t gtt_offset, - uint32_t type); struct drm_agp_head *drm_agp_init(struct drm_device *dev); void drm_legacy_agp_clear(struct drm_device *dev); @@ -80,15 +75,6 @@ static inline int drm_unbind_agp(struct agp_memory * handle) return -ENODEV; } -static inline struct agp_memory *drm_agp_bind_pages(struct drm_device *dev, - struct page **pages, - unsigned long num_pages, - uint32_t gtt_offset, - uint32_t type) -{ - return NULL; -} - static inline struct drm_agp_head *drm_agp_init(struct drm_device *dev) { return NULL; diff --git a/include/drm/drm_atomic.h b/include/drm/drm_atomic.h index e937ff2beb04..927e1205d7aa 100644 --- a/include/drm/drm_atomic.h +++ b/include/drm/drm_atomic.h @@ -459,6 +459,13 @@ struct drm_private_state * drm_atomic_get_new_private_obj_state(struct drm_atomic_state *state, struct drm_private_obj *obj); +struct drm_connector * +drm_atomic_get_old_connector_for_encoder(struct drm_atomic_state *state, + struct drm_encoder *encoder); +struct drm_connector * +drm_atomic_get_new_connector_for_encoder(struct drm_atomic_state *state, + struct drm_encoder *encoder); + /** * drm_atomic_get_existing_crtc_state - get crtc state, if it exists * @state: global atomic state object @@ -950,4 +957,19 @@ drm_atomic_crtc_needs_modeset(const struct drm_crtc_state *state) state->connectors_changed; } +/** + * drm_atomic_crtc_effectively_active - compute whether crtc is actually active + * @state: &drm_crtc_state for the CRTC + * + * When in self refresh mode, the crtc_state->active value will be false, since + * the crtc is off. However in some cases we're interested in whether the crtc + * is active, or effectively active (ie: it's connected to an active display). + * In these cases, use this function instead of just checking active. + */ +static inline bool +drm_atomic_crtc_effectively_active(const struct drm_crtc_state *state) +{ + return state->active || state->self_refresh_active; +} + #endif /* DRM_ATOMIC_H_ */ diff --git a/include/drm/drm_atomic_helper.h b/include/drm/drm_atomic_helper.h index 58214be3bf3d..bf4e07141d81 100644 --- a/include/drm/drm_atomic_helper.h +++ b/include/drm/drm_atomic_helper.h @@ -117,12 +117,8 @@ int drm_atomic_helper_update_plane(struct drm_plane *plane, struct drm_modeset_acquire_ctx *ctx); int drm_atomic_helper_disable_plane(struct drm_plane *plane, struct drm_modeset_acquire_ctx *ctx); -int __drm_atomic_helper_disable_plane(struct drm_plane *plane, - struct drm_plane_state *plane_state); int drm_atomic_helper_set_config(struct drm_mode_set *set, struct drm_modeset_acquire_ctx *ctx); -int __drm_atomic_helper_set_config(struct drm_mode_set *set, - struct drm_atomic_state *state); int drm_atomic_helper_disable_all(struct drm_device *dev, struct drm_modeset_acquire_ctx *ctx); diff --git a/include/drm/drm_atomic_state_helper.h b/include/drm/drm_atomic_state_helper.h index 66c92cbd8e16..e4577cc11689 100644 --- a/include/drm/drm_atomic_state_helper.h +++ b/include/drm/drm_atomic_state_helper.h @@ -37,6 +37,8 @@ struct drm_private_state; struct drm_modeset_acquire_ctx; struct drm_device; +void __drm_atomic_helper_crtc_reset(struct drm_crtc *crtc, + struct drm_crtc_state *state); void drm_atomic_helper_crtc_reset(struct drm_crtc *crtc); void __drm_atomic_helper_crtc_duplicate_state(struct drm_crtc *crtc, struct drm_crtc_state *state); @@ -60,6 +62,7 @@ void drm_atomic_helper_plane_destroy_state(struct drm_plane *plane, void __drm_atomic_helper_connector_reset(struct drm_connector *connector, struct drm_connector_state *conn_state); void drm_atomic_helper_connector_reset(struct drm_connector *connector); +void drm_atomic_helper_connector_tv_reset(struct drm_connector *connector); void __drm_atomic_helper_connector_duplicate_state(struct drm_connector *connector, struct drm_connector_state *state); diff --git a/include/drm/drm_auth.h b/include/drm/drm_auth.h index 871008118bab..6bf8b2b78991 100644 --- a/include/drm/drm_auth.h +++ b/include/drm/drm_auth.h @@ -1,3 +1,6 @@ +#ifndef _DRM_AUTH_H_ +#define _DRM_AUTH_H_ + /* * Internal Header for the Direct Rendering Manager * @@ -25,8 +28,12 @@ * OTHER DEALINGS IN THE SOFTWARE. */ -#ifndef _DRM_AUTH_H_ -#define _DRM_AUTH_H_ +#include <linux/idr.h> +#include <linux/kref.h> +#include <linux/wait.h> + +struct drm_file; +struct drm_hw_lock; /* * Legacy DRI1 locking data structure. Only here instead of in drm_legacy.h for diff --git a/include/drm/drm_bridge.h b/include/drm/drm_bridge.h index d4428913a4e1..7616f6562fe4 100644 --- a/include/drm/drm_bridge.h +++ b/include/drm/drm_bridge.h @@ -237,6 +237,103 @@ struct drm_bridge_funcs { * The enable callback is optional. */ void (*enable)(struct drm_bridge *bridge); + + /** + * @atomic_pre_enable: + * + * This callback should enable the bridge. It is called right before + * the preceding element in the display pipe is enabled. If the + * preceding element is a bridge this means it's called before that + * bridge's @atomic_pre_enable or @pre_enable function. If the preceding + * element is a &drm_encoder it's called right before the encoder's + * &drm_encoder_helper_funcs.atomic_enable hook. + * + * The display pipe (i.e. clocks and timing signals) feeding this bridge + * will not yet be running when this callback is called. The bridge must + * not enable the display link feeding the next bridge in the chain (if + * there is one) when this callback is called. + * + * Note that this function will only be invoked in the context of an + * atomic commit. It will not be invoked from &drm_bridge_pre_enable. It + * would be prudent to also provide an implementation of @pre_enable if + * you are expecting driver calls into &drm_bridge_pre_enable. + * + * The @atomic_pre_enable callback is optional. + */ + void (*atomic_pre_enable)(struct drm_bridge *bridge, + struct drm_atomic_state *state); + + /** + * @atomic_enable: + * + * This callback should enable the bridge. It is called right after + * the preceding element in the display pipe is enabled. If the + * preceding element is a bridge this means it's called after that + * bridge's @atomic_enable or @enable function. If the preceding element + * is a &drm_encoder it's called right after the encoder's + * &drm_encoder_helper_funcs.atomic_enable hook. + * + * The bridge can assume that the display pipe (i.e. clocks and timing + * signals) feeding it is running when this callback is called. This + * callback must enable the display link feeding the next bridge in the + * chain if there is one. + * + * Note that this function will only be invoked in the context of an + * atomic commit. It will not be invoked from &drm_bridge_enable. It + * would be prudent to also provide an implementation of @enable if + * you are expecting driver calls into &drm_bridge_enable. + * + * The enable callback is optional. + */ + void (*atomic_enable)(struct drm_bridge *bridge, + struct drm_atomic_state *state); + /** + * @atomic_disable: + * + * This callback should disable the bridge. It is called right before + * the preceding element in the display pipe is disabled. If the + * preceding element is a bridge this means it's called before that + * bridge's @atomic_disable or @disable vfunc. If the preceding element + * is a &drm_encoder it's called right before the + * &drm_encoder_helper_funcs.atomic_disable hook. + * + * The bridge can assume that the display pipe (i.e. clocks and timing + * signals) feeding it is still running when this callback is called. + * + * Note that this function will only be invoked in the context of an + * atomic commit. It will not be invoked from &drm_bridge_disable. It + * would be prudent to also provide an implementation of @disable if + * you are expecting driver calls into &drm_bridge_disable. + * + * The disable callback is optional. + */ + void (*atomic_disable)(struct drm_bridge *bridge, + struct drm_atomic_state *state); + + /** + * @atomic_post_disable: + * + * This callback should disable the bridge. It is called right after the + * preceding element in the display pipe is disabled. If the preceding + * element is a bridge this means it's called after that bridge's + * @atomic_post_disable or @post_disable function. If the preceding + * element is a &drm_encoder it's called right after the encoder's + * &drm_encoder_helper_funcs.atomic_disable hook. + * + * The bridge must assume that the display pipe (i.e. clocks and timing + * signals) feeding it is no longer running when this callback is + * called. + * + * Note that this function will only be invoked in the context of an + * atomic commit. It will not be invoked from &drm_bridge_post_disable. + * It would be prudent to also provide an implementation of + * @post_disable if you are expecting driver calls into + * &drm_bridge_post_disable. + * + * The post_disable callback is optional. + */ + void (*atomic_post_disable)(struct drm_bridge *bridge, + struct drm_atomic_state *state); }; /** @@ -265,6 +362,14 @@ struct drm_bridge_timings { * input signal after the clock edge. */ u32 hold_time_ps; + /** + * @dual_link: + * + * True if the bus operates in dual-link mode. The exact meaning is + * dependent on the bus type. For LVDS buses, this indicates that even- + * and odd-numbered pixels are received on separate links. + */ + bool dual_link; }; /** @@ -314,6 +419,15 @@ void drm_bridge_mode_set(struct drm_bridge *bridge, void drm_bridge_pre_enable(struct drm_bridge *bridge); void drm_bridge_enable(struct drm_bridge *bridge); +void drm_atomic_bridge_disable(struct drm_bridge *bridge, + struct drm_atomic_state *state); +void drm_atomic_bridge_post_disable(struct drm_bridge *bridge, + struct drm_atomic_state *state); +void drm_atomic_bridge_pre_enable(struct drm_bridge *bridge, + struct drm_atomic_state *state); +void drm_atomic_bridge_enable(struct drm_bridge *bridge, + struct drm_atomic_state *state); + #ifdef CONFIG_DRM_PANEL_BRIDGE struct drm_bridge *drm_panel_bridge_add(struct drm_panel *panel, u32 connector_type); diff --git a/include/drm/drm_client.h b/include/drm/drm_client.h index 268b2cf0052a..5cf2c5dd8b1e 100644 --- a/include/drm/drm_client.h +++ b/include/drm/drm_client.h @@ -3,8 +3,13 @@ #ifndef _DRM_CLIENT_H_ #define _DRM_CLIENT_H_ +#include <linux/lockdep.h> +#include <linux/mutex.h> #include <linux/types.h> +#include <drm/drm_connector.h> +#include <drm/drm_crtc.h> + struct drm_client_dev; struct drm_device; struct drm_file; @@ -85,6 +90,16 @@ struct drm_client_dev { * @file: DRM file */ struct drm_file *file; + + /** + * @modeset_mutex: Protects @modesets. + */ + struct mutex modeset_mutex; + + /** + * @modesets: CRTC configurations + */ + struct drm_mode_set *modesets; }; int drm_client_init(struct drm_device *dev, struct drm_client_dev *client, @@ -134,6 +149,39 @@ struct drm_client_buffer { struct drm_client_buffer * drm_client_framebuffer_create(struct drm_client_dev *client, u32 width, u32 height, u32 format); void drm_client_framebuffer_delete(struct drm_client_buffer *buffer); +void *drm_client_buffer_vmap(struct drm_client_buffer *buffer); +void drm_client_buffer_vunmap(struct drm_client_buffer *buffer); + +int drm_client_modeset_create(struct drm_client_dev *client); +void drm_client_modeset_free(struct drm_client_dev *client); +int drm_client_modeset_probe(struct drm_client_dev *client, unsigned int width, unsigned int height); +bool drm_client_rotation(struct drm_mode_set *modeset, unsigned int *rotation); +int drm_client_modeset_commit_force(struct drm_client_dev *client); +int drm_client_modeset_commit(struct drm_client_dev *client); +int drm_client_modeset_dpms(struct drm_client_dev *client, int mode); + +/** + * drm_client_for_each_modeset() - Iterate over client modesets + * @modeset: &drm_mode_set loop cursor + * @client: DRM client + */ +#define drm_client_for_each_modeset(modeset, client) \ + for (({ lockdep_assert_held(&(client)->modeset_mutex); }), \ + modeset = (client)->modesets; modeset->crtc; modeset++) + +/** + * drm_client_for_each_connector_iter - connector_list iterator macro + * @connector: &struct drm_connector pointer used as cursor + * @iter: &struct drm_connector_list_iter + * + * This iterates the connectors that are useable for internal clients (excludes + * writeback connectors). + * + * For more info see drm_for_each_connector_iter(). + */ +#define drm_client_for_each_connector_iter(connector, iter) \ + drm_for_each_connector_iter(connector, iter) \ + if (connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK) int drm_client_debugfs_init(struct drm_minor *minor); diff --git a/include/drm/drm_connector.h b/include/drm/drm_connector.h index 02a131202add..681cb590f952 100644 --- a/include/drm/drm_connector.h +++ b/include/drm/drm_connector.h @@ -41,6 +41,7 @@ struct drm_property; struct drm_property_blob; struct drm_printer; struct edid; +struct i2c_adapter; enum drm_connector_force { DRM_FORCE_UNSPECIFIED, @@ -323,6 +324,8 @@ enum drm_panel_orientation { * edge of the pixel clock * @DRM_BUS_FLAG_SYNC_SAMPLE_NEGEDGE: Sync signals are sampled on the falling * edge of the pixel clock + * @DRM_BUS_FLAG_SHARP_SIGNALS: Set if the Sharp-specific signals + * (SPL, CLS, PS, REV) must be used */ enum drm_bus_flags { DRM_BUS_FLAG_DE_LOW = BIT(0), @@ -341,6 +344,7 @@ enum drm_bus_flags { DRM_BUS_FLAG_SYNC_DRIVE_NEGEDGE = DRM_BUS_FLAG_SYNC_NEGEDGE, DRM_BUS_FLAG_SYNC_SAMPLE_POSEDGE = DRM_BUS_FLAG_SYNC_NEGEDGE, DRM_BUS_FLAG_SYNC_SAMPLE_NEGEDGE = DRM_BUS_FLAG_SYNC_POSEDGE, + DRM_BUS_FLAG_SHARP_SIGNALS = BIT(8), }; /** @@ -464,13 +468,37 @@ int drm_display_info_set_bus_formats(struct drm_display_info *info, unsigned int num_formats); /** + * struct drm_connector_tv_margins - TV connector related margins + * + * Describes the margins in pixels to put around the image on TV + * connectors to deal with overscan. + */ +struct drm_connector_tv_margins { + /** + * @bottom: Bottom margin in pixels. + */ + unsigned int bottom; + + /** + * @left: Left margin in pixels. + */ + unsigned int left; + + /** + * @right: Right margin in pixels. + */ + unsigned int right; + + /** + * @top: Top margin in pixels. + */ + unsigned int top; +}; + +/** * struct drm_tv_connector_state - TV connector related states * @subconnector: selected subconnector - * @margins: margins (all margins are expressed in pixels) - * @margins.left: left margin - * @margins.right: right margin - * @margins.top: top margin - * @margins.bottom: bottom margin + * @margins: TV margins * @mode: TV mode * @brightness: brightness in percent * @contrast: contrast in percent @@ -481,12 +509,7 @@ int drm_display_info_set_bus_formats(struct drm_display_info *info, */ struct drm_tv_connector_state { enum drm_mode_subconnector subconnector; - struct { - unsigned int left; - unsigned int right; - unsigned int top; - unsigned int bottom; - } margins; + struct drm_connector_tv_margins margins; unsigned int mode; unsigned int brightness; unsigned int contrast; @@ -517,6 +540,15 @@ struct drm_connector_state { * Used by the atomic helpers to select the encoder, through the * &drm_connector_helper_funcs.atomic_best_encoder or * &drm_connector_helper_funcs.best_encoder callbacks. + * + * This is also used in the atomic helpers to map encoders to their + * current and previous connectors, see + * drm_atomic_get_old_connector_for_encoder() and + * drm_atomic_get_new_connector_for_encoder(). + * + * NOTE: Atomic drivers must fill this out (either themselves or through + * helpers), for otherwise the GETCONNECTOR and GETENCODER IOCTLs will + * not return correct data to userspace. */ struct drm_encoder *best_encoder; @@ -540,6 +572,20 @@ struct drm_connector_state { struct drm_tv_connector_state tv; /** + * @self_refresh_aware: + * + * This tracks whether a connector is aware of the self refresh state. + * It should be set to true for those connector implementations which + * understand the self refresh state. This is needed since the crtc + * registers the self refresh helpers and it doesn't know if the + * connectors downstream have implemented self refresh entry/exit. + * + * Drivers should set this to true in atomic_check if they know how to + * handle self_refresh requests. + */ + bool self_refresh_aware; + + /** * @picture_aspect_ratio: Connector property to control the * HDMI infoframe aspect ratio setting. * @@ -557,6 +603,12 @@ struct drm_connector_state { unsigned int content_type; /** + * @hdcp_content_type: Connector property to pass the type of + * protected content. This is most commonly used for HDCP. + */ + unsigned int hdcp_content_type; + + /** * @scaling_mode: Connector property to control the * upscaling, mostly used for built-in panels. */ @@ -599,6 +651,12 @@ struct drm_connector_state { * and the connector bpc limitations obtained from edid. */ u8 max_bpc; + + /** + * @hdr_output_metadata: + * DRM blob property for HDR output metadata + */ + struct drm_property_blob *hdr_output_metadata; }; /** @@ -894,19 +952,123 @@ struct drm_connector_funcs { const struct drm_connector_state *state); }; -/* mode specified on the command line */ +/** + * struct drm_cmdline_mode - DRM Mode passed through the kernel command-line + * + * Each connector can have an initial mode with additional options + * passed through the kernel command line. This structure allows to + * express those parameters and will be filled by the command-line + * parser. + */ struct drm_cmdline_mode { + /** + * @name: + * + * Name of the mode. + */ + char name[DRM_DISPLAY_MODE_LEN]; + + /** + * @specified: + * + * Has a mode been read from the command-line? + */ bool specified; + + /** + * @refresh_specified: + * + * Did the mode have a preferred refresh rate? + */ bool refresh_specified; + + /** + * @bpp_specified: + * + * Did the mode have a preferred BPP? + */ bool bpp_specified; - int xres, yres; + + /** + * @xres: + * + * Active resolution on the X axis, in pixels. + */ + int xres; + + /** + * @yres: + * + * Active resolution on the Y axis, in pixels. + */ + int yres; + + /** + * @bpp: + * + * Bits per pixels for the mode. + */ int bpp; + + /** + * @refresh: + * + * Refresh rate, in Hertz. + */ int refresh; + + /** + * @rb: + * + * Do we need to use reduced blanking? + */ bool rb; + + /** + * @interlace: + * + * The mode is interlaced. + */ bool interlace; + + /** + * @cvt: + * + * The timings will be calculated using the VESA Coordinated + * Video Timings instead of looking up the mode from a table. + */ bool cvt; + + /** + * @margins: + * + * Add margins to the mode calculation (1.8% of xres rounded + * down to 8 pixels and 1.8% of yres). + */ bool margins; + + /** + * @force: + * + * Ignore the hotplug state of the connector, and force its + * state to one of the DRM_FORCE_* values. + */ enum drm_connector_force force; + + /** + * @rotation_reflection: + * + * Initial rotation and reflection of the mode setup from the + * command line. See DRM_MODE_ROTATE_* and + * DRM_MODE_REFLECT_*. The only rotations supported are + * DRM_MODE_ROTATE_0 and DRM_MODE_ROTATE_180. + */ + unsigned int rotation_reflection; + + /** + * @tv_margins: TV margins to apply to the mode. + */ + struct drm_connector_tv_margins tv_margins; }; /** @@ -1062,12 +1224,6 @@ struct drm_connector { struct drm_property *vrr_capable_property; /** - * @content_protection_property: DRM ENUM property for content - * protection. See drm_connector_attach_content_protection_property(). - */ - struct drm_property *content_protection_property; - - /** * @colorspace_property: Connector property to set the suitable * colorspace supported by the sink. */ @@ -1162,6 +1318,18 @@ struct drm_connector { * [0]: progressive, [1]: interlaced */ int audio_latency[2]; + + /** + * @ddc: associated ddc adapter. + * A connector usually has its associated ddc adapter. If a driver uses + * this field, then an appropriate symbolic link is created in connector + * sysfs directory to make it easy for the user to tell which i2c + * adapter is for a particular display. + * + * The field should be set by calling drm_connector_init_with_ddc(). + */ + struct i2c_adapter *ddc; + /** * @null_edid_counter: track sinks that give us all zeros for the EDID. * Needed to workaround some HW bugs where we get all 0s @@ -1239,6 +1407,9 @@ struct drm_connector { * &drm_mode_config.connector_free_work. */ struct llist_node free_node; + + /** @hdr_sink_metadata: HDR Metadata Information read from sink */ + struct hdr_sink_metadata hdr_sink_metadata; }; #define obj_to_connector(x) container_of(x, struct drm_connector, base) @@ -1247,6 +1418,11 @@ int drm_connector_init(struct drm_device *dev, struct drm_connector *connector, const struct drm_connector_funcs *funcs, int connector_type); +int drm_connector_init_with_ddc(struct drm_device *dev, + struct drm_connector *connector, + const struct drm_connector_funcs *funcs, + int connector_type, + struct i2c_adapter *ddc); void drm_connector_attach_edid_property(struct drm_connector *connector); int drm_connector_register(struct drm_connector *connector); void drm_connector_unregister(struct drm_connector *connector); @@ -1332,6 +1508,7 @@ const char *drm_get_dvi_i_select_name(int val); const char *drm_get_tv_subconnector_name(int val); const char *drm_get_tv_select_name(int val); const char *drm_get_content_protection_name(int val); +const char *drm_get_hdcp_content_type_name(int val); int drm_mode_create_dvi_i_properties(struct drm_device *dev); int drm_mode_create_tv_margin_properties(struct drm_device *dev); @@ -1345,8 +1522,6 @@ int drm_connector_attach_scaling_mode_property(struct drm_connector *connector, u32 scaling_mode_mask); int drm_connector_attach_vrr_capable_property( struct drm_connector *connector); -int drm_connector_attach_content_protection_property( - struct drm_connector *connector); int drm_mode_create_aspect_ratio_property(struct drm_device *dev); int drm_mode_create_colorspace_property(struct drm_connector *connector); int drm_mode_create_content_type_property(struct drm_device *dev); diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h index 58ad983d7cd6..408b6f4e63c0 100644 --- a/include/drm/drm_crtc.h +++ b/include/drm/drm_crtc.h @@ -39,6 +39,7 @@ #include <drm/drm_framebuffer.h> #include <drm/drm_modes.h> #include <drm/drm_connector.h> +#include <drm/drm_device.h> #include <drm/drm_property.h> #include <drm/drm_bridge.h> #include <drm/drm_edid.h> @@ -53,6 +54,7 @@ struct drm_mode_set; struct drm_file; struct drm_clip_rect; struct drm_printer; +struct drm_self_refresh_data; struct device_node; struct dma_fence; struct edid; @@ -283,12 +285,12 @@ struct drm_crtc_state { u32 target_vblank; /** - * @pageflip_flags: + * @async_flip: * - * DRM_MODE_PAGE_FLIP_* flags, as passed to the page flip ioctl. - * Zero in any other case. + * This is set when DRM_MODE_PAGE_FLIP_ASYNC is set in the legacy + * PAGE_FLIP IOCTL. It's not wired up for the atomic IOCTL itself yet. */ - u32 pageflip_flags; + bool async_flip; /** * @vrr_enabled: @@ -300,6 +302,17 @@ struct drm_crtc_state { bool vrr_enabled; /** + * @self_refresh_active: + * + * Used by the self refresh helpers to denote when a self refresh + * transition is occurring. This will be set on enable/disable callbacks + * when self refresh is being enabled or disabled. In some cases, it may + * not be desirable to fully shut off the crtc during self refresh. + * CRTC's can inspect this flag and determine the best course of action. + */ + bool self_refresh_active; + + /** * @event: * * Optional pointer to a DRM event to signal upon completion of the @@ -743,6 +756,9 @@ struct drm_crtc_funcs { * provided from the configured source. Drivers must accept an "auto" * source name that will select a default source for this CRTC. * + * This may trigger an atomic modeset commit if necessary, to enable CRC + * generation. + * * Note that "auto" can depend upon the current modeset configuration, * e.g. it could pick an encoder or output specific CRC sampling point. * @@ -754,6 +770,7 @@ struct drm_crtc_funcs { * 0 on success or a negative error code on failure. */ int (*set_crc_source)(struct drm_crtc *crtc, const char *source); + /** * @verify_crc_source: * @@ -1087,6 +1104,13 @@ struct drm_crtc { * The name of the CRTC's fence timeline. */ char timeline_name[32]; + + /** + * @self_refresh_data: Holds the state for the self refresh helpers + * + * Initialized via drm_self_refresh_helper_init(). + */ + struct drm_self_refresh_data *self_refresh_data; }; /** diff --git a/include/drm/drm_debugfs.h b/include/drm/drm_debugfs.h index ac0f75df1ac9..7501e323d383 100644 --- a/include/drm/drm_debugfs.h +++ b/include/drm/drm_debugfs.h @@ -32,6 +32,8 @@ #ifndef _DRM_DEBUGFS_H_ #define _DRM_DEBUGFS_H_ +#include <linux/types.h> +#include <linux/seq_file.h> /** * struct drm_info_list - debugfs info list entry * diff --git a/include/drm/drm_device.h b/include/drm/drm_device.h index 7f9ef709b2b6..1acfc3bbd3fb 100644 --- a/include/drm/drm_device.h +++ b/include/drm/drm_device.h @@ -17,6 +17,7 @@ struct drm_vblank_crtc; struct drm_sg_mem; struct drm_local_map; struct drm_vma_offset_manager; +struct drm_vram_mm; struct drm_fb_helper; struct inode; @@ -286,6 +287,9 @@ struct drm_device { /** @vma_offset_manager: GEM information */ struct drm_vma_offset_manager *vma_offset_manager; + /** @vram_mm: VRAM MM memory manager */ + struct drm_vram_mm *vram_mm; + /** * @switch_power_state: * diff --git a/include/drm/drm_displayid.h b/include/drm/drm_displayid.h index c0d4df6a606f..9d3b745c3107 100644 --- a/include/drm/drm_displayid.h +++ b/include/drm/drm_displayid.h @@ -40,6 +40,7 @@ #define DATA_BLOCK_DISPLAY_INTERFACE 0x0f #define DATA_BLOCK_STEREO_DISPLAY_INTERFACE 0x10 #define DATA_BLOCK_TILED_DISPLAY 0x12 +#define DATA_BLOCK_CTA 0x81 #define DATA_BLOCK_VENDOR_SPECIFIC 0x7f @@ -90,4 +91,13 @@ struct displayid_detailed_timing_block { struct displayid_block base; struct displayid_detailed_timings_1 timings[0]; }; + +#define for_each_displayid_db(displayid, block, idx, length) \ + for ((block) = (struct displayid_block *)&(displayid)[idx]; \ + (idx) + sizeof(struct displayid_block) <= (length) && \ + (idx) + sizeof(struct displayid_block) + (block)->num_bytes <= (length) && \ + (block)->num_bytes > 0; \ + (idx) += (block)->num_bytes + sizeof(struct displayid_block), \ + (block) = (struct displayid_block *)&(displayid)[idx]) + #endif diff --git a/include/drm/drm_dp_helper.h b/include/drm/drm_dp_helper.h index 97ce790a5b5a..8364502f92cf 100644 --- a/include/drm/drm_dp_helper.h +++ b/include/drm/drm_dp_helper.h @@ -249,6 +249,7 @@ #define DP_DSC_PEAK_THROUGHPUT 0x06B # define DP_DSC_THROUGHPUT_MODE_0_MASK (0xf << 0) # define DP_DSC_THROUGHPUT_MODE_0_SHIFT 0 +# define DP_DSC_THROUGHPUT_MODE_0_UPSUPPORTED 0 # define DP_DSC_THROUGHPUT_MODE_0_340 (1 << 0) # define DP_DSC_THROUGHPUT_MODE_0_400 (2 << 0) # define DP_DSC_THROUGHPUT_MODE_0_450 (3 << 0) @@ -263,8 +264,10 @@ # define DP_DSC_THROUGHPUT_MODE_0_900 (12 << 0) # define DP_DSC_THROUGHPUT_MODE_0_950 (13 << 0) # define DP_DSC_THROUGHPUT_MODE_0_1000 (14 << 0) +# define DP_DSC_THROUGHPUT_MODE_0_170 (15 << 4) # define DP_DSC_THROUGHPUT_MODE_1_MASK (0xf << 4) # define DP_DSC_THROUGHPUT_MODE_1_SHIFT 4 +# define DP_DSC_THROUGHPUT_MODE_1_UPSUPPORTED 0 # define DP_DSC_THROUGHPUT_MODE_1_340 (1 << 4) # define DP_DSC_THROUGHPUT_MODE_1_400 (2 << 4) # define DP_DSC_THROUGHPUT_MODE_1_450 (3 << 4) @@ -279,6 +282,7 @@ # define DP_DSC_THROUGHPUT_MODE_1_900 (12 << 4) # define DP_DSC_THROUGHPUT_MODE_1_950 (13 << 4) # define DP_DSC_THROUGHPUT_MODE_1_1000 (14 << 4) +# define DP_DSC_THROUGHPUT_MODE_1_170 (15 << 4) #define DP_DSC_MAX_SLICE_WIDTH 0x06C #define DP_DSC_MIN_SLICE_WIDTH_VALUE 2560 @@ -352,6 +356,11 @@ # define DP_FEC_CORR_BLK_ERROR_COUNT_CAP (1 << 2) # define DP_FEC_BIT_ERROR_COUNT_CAP (1 << 3) +/* DP Extended DSC Capabilities */ +#define DP_DSC_BRANCH_OVERALL_THROUGHPUT_0 0x0a0 /* DP 1.4a SCR */ +#define DP_DSC_BRANCH_OVERALL_THROUGHPUT_1 0x0a1 +#define DP_DSC_BRANCH_MAX_LINE_WIDTH 0x0a2 + /* link configuration */ #define DP_LINK_BW_SET 0x100 # define DP_LINK_RATE_TABLE 0x00 /* eDP 1.4 */ @@ -1083,17 +1092,30 @@ struct dp_sdp_header { #define EDP_SDP_HEADER_VALID_PAYLOAD_BYTES 0x1F #define DP_SDP_PPS_HEADER_PAYLOAD_BYTES_MINUS_1 0x7F -struct edp_vsc_psr { +/** + * struct dp_sdp - DP secondary data packet + * @sdp_header: DP secondary data packet header + * @db: DP secondaray data packet data blocks + * VSC SDP Payload for PSR + * db[0]: Stereo Interface + * db[1]: 0 - PSR State; 1 - Update RFB; 2 - CRC Valid + * db[2]: CRC value bits 7:0 of the R or Cr component + * db[3]: CRC value bits 15:8 of the R or Cr component + * db[4]: CRC value bits 7:0 of the G or Y component + * db[5]: CRC value bits 15:8 of the G or Y component + * db[6]: CRC value bits 7:0 of the B or Cb component + * db[7]: CRC value bits 15:8 of the B or Cb component + * db[8] - db[31]: Reserved + * VSC SDP Payload for Pixel Encoding/Colorimetry Format + * db[0] - db[15]: Reserved + * db[16]: Pixel Encoding and Colorimetry Formats + * db[17]: Dynamic Range and Component Bit Depth + * db[18]: Content Type + * db[19] - db[31]: Reserved + */ +struct dp_sdp { struct dp_sdp_header sdp_header; - u8 DB0; /* Stereo Interface */ - u8 DB1; /* 0 - PSR State; 1 - Update RFB; 2 - CRC Valid */ - u8 DB2; /* CRC value bits 7:0 of the R or Cr component */ - u8 DB3; /* CRC value bits 15:8 of the R or Cr component */ - u8 DB4; /* CRC value bits 7:0 of the G or Y component */ - u8 DB5; /* CRC value bits 15:8 of the G or Y component */ - u8 DB6; /* CRC value bits 7:0 of the B or Cb component */ - u8 DB7; /* CRC value bits 15:8 of the B or Cb component */ - u8 DB8_31[24]; /* Reserved */ + u8 db[32]; } __packed; #define EDP_VSC_PSR_STATE_ACTIVE (1<<0) @@ -1287,6 +1309,10 @@ struct drm_dp_aux { * @cec: struct containing fields used for CEC-Tunneling-over-AUX. */ struct drm_dp_aux_cec cec; + /** + * @is_remote: Is this AUX CH actually using sideband messaging. + */ + bool is_remote; }; ssize_t drm_dp_dpcd_read(struct drm_dp_aux *aux, unsigned int offset, @@ -1401,6 +1427,13 @@ enum drm_dp_quirk { * driver still need to implement proper handling for such device. */ DP_DPCD_QUIRK_NO_PSR, + /** + * @DP_DPCD_QUIRK_NO_SINK_COUNT: + * + * The device does not set SINK_COUNT to a non-zero value. + * The driver should ignore SINK_COUNT during detection. + */ + DP_DPCD_QUIRK_NO_SINK_COUNT, }; /** diff --git a/include/drm/drm_dp_mst_helper.h b/include/drm/drm_dp_mst_helper.h index 8c97a5f92c47..2ba6253ea6d3 100644 --- a/include/drm/drm_dp_mst_helper.h +++ b/include/drm/drm_dp_mst_helper.h @@ -643,6 +643,17 @@ void drm_dp_mst_dump_topology(struct seq_file *m, void drm_dp_mst_topology_mgr_suspend(struct drm_dp_mst_topology_mgr *mgr); int __must_check drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr); + +ssize_t drm_dp_mst_dpcd_read(struct drm_dp_aux *aux, + unsigned int offset, void *buffer, size_t size); +ssize_t drm_dp_mst_dpcd_write(struct drm_dp_aux *aux, + unsigned int offset, void *buffer, size_t size); + +int drm_dp_mst_connector_late_register(struct drm_connector *connector, + struct drm_dp_mst_port *port); +void drm_dp_mst_connector_early_unregister(struct drm_connector *connector, + struct drm_dp_mst_port *port); + struct drm_dp_mst_topology_state *drm_atomic_get_mst_topology_state(struct drm_atomic_state *state, struct drm_dp_mst_topology_mgr *mgr); int __must_check diff --git a/include/drm/drm_drv.h b/include/drm/drm_drv.h index 68ca736c548d..8976afe48c1c 100644 --- a/include/drm/drm_drv.h +++ b/include/drm/drm_drv.h @@ -62,12 +62,6 @@ enum drm_driver_feature { */ DRIVER_MODESET = BIT(1), /** - * @DRIVER_PRIME: - * - * Driver implements DRM PRIME buffer sharing. - */ - DRIVER_PRIME = BIT(2), - /** * @DRIVER_RENDER: * * Driver supports dedicated render nodes. See also the :ref:`section on @@ -502,21 +496,25 @@ struct drm_driver { * @gem_free_object: deconstructor for drm_gem_objects * * This is deprecated and should not be used by new drivers. Use - * @gem_free_object_unlocked instead. + * &drm_gem_object_funcs.free instead. */ void (*gem_free_object) (struct drm_gem_object *obj); /** * @gem_free_object_unlocked: deconstructor for drm_gem_objects * - * This is for drivers which are not encumbered with &drm_device.struct_mutex - * legacy locking schemes. Use this hook instead of @gem_free_object. + * This is deprecated and should not be used by new drivers. Use + * &drm_gem_object_funcs.free instead. + * Compared to @gem_free_object this is not encumbered with + * &drm_device.struct_mutex legacy locking schemes. */ void (*gem_free_object_unlocked) (struct drm_gem_object *obj); /** * @gem_open_object: * + * This callback is deprecated in favour of &drm_gem_object_funcs.open. + * * Driver hook called upon gem handle creation */ int (*gem_open_object) (struct drm_gem_object *, struct drm_file *); @@ -524,6 +522,8 @@ struct drm_driver { /** * @gem_close_object: * + * This callback is deprecated in favour of &drm_gem_object_funcs.close. + * * Driver hook called upon gem handle release */ void (*gem_close_object) (struct drm_gem_object *, struct drm_file *); @@ -531,6 +531,9 @@ struct drm_driver { /** * @gem_print_info: * + * This callback is deprecated in favour of + * &drm_gem_object_funcs.print_info. + * * If driver subclasses struct &drm_gem_object, it can implement this * optional hook for printing additional driver specific info. * @@ -545,56 +548,108 @@ struct drm_driver { /** * @gem_create_object: constructor for gem objects * - * Hook for allocating the GEM object struct, for use by core - * helpers. + * Hook for allocating the GEM object struct, for use by the CMA and + * SHMEM GEM helpers. */ struct drm_gem_object *(*gem_create_object)(struct drm_device *dev, size_t size); - - /* prime: */ /** * @prime_handle_to_fd: * - * export handle -> fd (see drm_gem_prime_handle_to_fd() helper) + * Main PRIME export function. Should be implemented with + * drm_gem_prime_handle_to_fd() for GEM based drivers. + * + * For an in-depth discussion see :ref:`PRIME buffer sharing + * documentation <prime_buffer_sharing>`. */ int (*prime_handle_to_fd)(struct drm_device *dev, struct drm_file *file_priv, uint32_t handle, uint32_t flags, int *prime_fd); /** * @prime_fd_to_handle: * - * import fd -> handle (see drm_gem_prime_fd_to_handle() helper) + * Main PRIME import function. Should be implemented with + * drm_gem_prime_fd_to_handle() for GEM based drivers. + * + * For an in-depth discussion see :ref:`PRIME buffer sharing + * documentation <prime_buffer_sharing>`. */ int (*prime_fd_to_handle)(struct drm_device *dev, struct drm_file *file_priv, int prime_fd, uint32_t *handle); /** * @gem_prime_export: * - * export GEM -> dmabuf - * - * This defaults to drm_gem_prime_export() if not set. + * Export hook for GEM drivers. Deprecated in favour of + * &drm_gem_object_funcs.export. */ - struct dma_buf * (*gem_prime_export)(struct drm_device *dev, - struct drm_gem_object *obj, int flags); + struct dma_buf * (*gem_prime_export)(struct drm_gem_object *obj, + int flags); /** * @gem_prime_import: * - * import dmabuf -> GEM + * Import hook for GEM drivers. * * This defaults to drm_gem_prime_import() if not set. */ struct drm_gem_object * (*gem_prime_import)(struct drm_device *dev, struct dma_buf *dma_buf); + + /** + * @gem_prime_pin: + * + * Deprecated hook in favour of &drm_gem_object_funcs.pin. + */ int (*gem_prime_pin)(struct drm_gem_object *obj); + + /** + * @gem_prime_unpin: + * + * Deprecated hook in favour of &drm_gem_object_funcs.unpin. + */ void (*gem_prime_unpin)(struct drm_gem_object *obj); - struct reservation_object * (*gem_prime_res_obj)( - struct drm_gem_object *obj); + + + /** + * @gem_prime_get_sg_table: + * + * Deprecated hook in favour of &drm_gem_object_funcs.get_sg_table. + */ struct sg_table *(*gem_prime_get_sg_table)(struct drm_gem_object *obj); + + /** + * @gem_prime_import_sg_table: + * + * Optional hook used by the PRIME helper functions + * drm_gem_prime_import() respectively drm_gem_prime_import_dev(). + */ struct drm_gem_object *(*gem_prime_import_sg_table)( struct drm_device *dev, struct dma_buf_attachment *attach, struct sg_table *sgt); + /** + * @gem_prime_vmap: + * + * Deprecated vmap hook for GEM drivers. Please use + * &drm_gem_object_funcs.vmap instead. + */ void *(*gem_prime_vmap)(struct drm_gem_object *obj); + + /** + * @gem_prime_vunmap: + * + * Deprecated vunmap hook for GEM drivers. Please use + * &drm_gem_object_funcs.vunmap instead. + */ void (*gem_prime_vunmap)(struct drm_gem_object *obj, void *vaddr); + + /** + * @gem_prime_mmap: + * + * mmap hook for GEM drivers, used to implement dma-buf mmap in the + * PRIME helpers. + * + * FIXME: There's way too much duplication going on here, and also moved + * to &drm_gem_object_funcs. + */ int (*gem_prime_mmap)(struct drm_gem_object *obj, struct vm_area_struct *vma); @@ -662,6 +717,9 @@ struct drm_driver { /** * @gem_vm_ops: Driver private ops for this object + * + * For GEM drivers this is deprecated in favour of + * &drm_gem_object_funcs.vm_ops. */ const struct vm_operations_struct *gem_vm_ops; diff --git a/include/drm/drm_edid.h b/include/drm/drm_edid.h index c9ca0be54d9a..b9719418c3d2 100644 --- a/include/drm/drm_edid.h +++ b/include/drm/drm_edid.h @@ -25,6 +25,7 @@ #include <linux/types.h> #include <linux/hdmi.h> +#include <drm/drm_mode.h> struct drm_device; struct i2c_adapter; @@ -176,21 +177,23 @@ struct detailed_timing { #define DRM_EDID_INPUT_BLANK_TO_BLACK (1 << 4) #define DRM_EDID_INPUT_VIDEO_LEVEL (3 << 5) #define DRM_EDID_INPUT_DIGITAL (1 << 7) -#define DRM_EDID_DIGITAL_DEPTH_MASK (7 << 4) -#define DRM_EDID_DIGITAL_DEPTH_UNDEF (0 << 4) -#define DRM_EDID_DIGITAL_DEPTH_6 (1 << 4) -#define DRM_EDID_DIGITAL_DEPTH_8 (2 << 4) -#define DRM_EDID_DIGITAL_DEPTH_10 (3 << 4) -#define DRM_EDID_DIGITAL_DEPTH_12 (4 << 4) -#define DRM_EDID_DIGITAL_DEPTH_14 (5 << 4) -#define DRM_EDID_DIGITAL_DEPTH_16 (6 << 4) -#define DRM_EDID_DIGITAL_DEPTH_RSVD (7 << 4) -#define DRM_EDID_DIGITAL_TYPE_UNDEF (0) -#define DRM_EDID_DIGITAL_TYPE_DVI (1) -#define DRM_EDID_DIGITAL_TYPE_HDMI_A (2) -#define DRM_EDID_DIGITAL_TYPE_HDMI_B (3) -#define DRM_EDID_DIGITAL_TYPE_MDDI (4) -#define DRM_EDID_DIGITAL_TYPE_DP (5) +#define DRM_EDID_DIGITAL_DEPTH_MASK (7 << 4) /* 1.4 */ +#define DRM_EDID_DIGITAL_DEPTH_UNDEF (0 << 4) /* 1.4 */ +#define DRM_EDID_DIGITAL_DEPTH_6 (1 << 4) /* 1.4 */ +#define DRM_EDID_DIGITAL_DEPTH_8 (2 << 4) /* 1.4 */ +#define DRM_EDID_DIGITAL_DEPTH_10 (3 << 4) /* 1.4 */ +#define DRM_EDID_DIGITAL_DEPTH_12 (4 << 4) /* 1.4 */ +#define DRM_EDID_DIGITAL_DEPTH_14 (5 << 4) /* 1.4 */ +#define DRM_EDID_DIGITAL_DEPTH_16 (6 << 4) /* 1.4 */ +#define DRM_EDID_DIGITAL_DEPTH_RSVD (7 << 4) /* 1.4 */ +#define DRM_EDID_DIGITAL_TYPE_MASK (7 << 0) /* 1.4 */ +#define DRM_EDID_DIGITAL_TYPE_UNDEF (0 << 0) /* 1.4 */ +#define DRM_EDID_DIGITAL_TYPE_DVI (1 << 0) /* 1.4 */ +#define DRM_EDID_DIGITAL_TYPE_HDMI_A (2 << 0) /* 1.4 */ +#define DRM_EDID_DIGITAL_TYPE_HDMI_B (3 << 0) /* 1.4 */ +#define DRM_EDID_DIGITAL_TYPE_MDDI (4 << 0) /* 1.4 */ +#define DRM_EDID_DIGITAL_TYPE_DP (5 << 0) /* 1.4 */ +#define DRM_EDID_DIGITAL_DFP_1_X (1 << 0) /* 1.3 */ #define DRM_EDID_FEATURE_DEFAULT_GTF (1 << 0) #define DRM_EDID_FEATURE_PREFERRED_TIMING (1 << 1) @@ -370,6 +373,10 @@ drm_hdmi_avi_infoframe_quant_range(struct hdmi_avi_infoframe *frame, const struct drm_display_mode *mode, enum hdmi_quantization_range rgb_quant_range); +int +drm_hdmi_infoframe_set_hdr_metadata(struct hdmi_drm_infoframe *frame, + const struct drm_connector_state *conn_state); + /** * drm_eld_mnl - Get ELD monitor name length in bytes. * @eld: pointer to an eld memory structure with mnl set diff --git a/include/drm/drm_fb_helper.h b/include/drm/drm_fb_helper.h index 40af2866c26a..c8a8ae2a678a 100644 --- a/include/drm/drm_fb_helper.h +++ b/include/drm/drm_fb_helper.h @@ -43,17 +43,6 @@ enum mode_set_atomic { ENTER_ATOMIC_MODE_SET, }; -struct drm_fb_offset { - int x, y; -}; - -struct drm_fb_helper_crtc { - struct drm_mode_set mode_set; - struct drm_display_mode *desired_mode; - int x, y; - int rotation; -}; - /** * struct drm_fb_helper_surface_size - describes fbdev size and scanout surface size * @fb_width: fbdev width @@ -104,18 +93,10 @@ struct drm_fb_helper_funcs { struct drm_fb_helper_surface_size *sizes); }; -struct drm_fb_helper_connector { - struct drm_connector *connector; -}; - /** * struct drm_fb_helper - main structure to emulate fbdev on top of KMS * @fb: Scanout framebuffer object * @dev: DRM device - * @crtc_count: number of possible CRTCs - * @crtc_info: per-CRTC helper state (mode, x/y offset, etc) - * @connector_count: number of connected connectors - * @connector_info_alloc_count: size of connector_info * @funcs: driver callbacks for fb helper * @fbdev: emulated fbdev device info struct * @pseudo_palette: fake palette of 16 colors @@ -147,24 +128,6 @@ struct drm_fb_helper { struct drm_framebuffer *fb; struct drm_device *dev; - int crtc_count; - struct drm_fb_helper_crtc *crtc_info; - int connector_count; - int connector_info_alloc_count; - /** - * @sw_rotations: - * Bitmask of all rotations requested for panel-orientation which - * could not be handled in hardware. If only one bit is set - * fbdev->fbcon_rotate_hint gets set to the requested rotation. - */ - int sw_rotations; - /** - * @connector_info: - * - * Array of per-connector information. Do not iterate directly, but use - * drm_fb_helper_for_each_connector. - */ - struct drm_fb_helper_connector **connector_info; const struct drm_fb_helper_funcs *funcs; struct fb_info *fbdev; u32 pseudo_palette[17]; @@ -304,18 +267,8 @@ int drm_fb_helper_ioctl(struct fb_info *info, unsigned int cmd, int drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper); int drm_fb_helper_initial_config(struct drm_fb_helper *fb_helper, int bpp_sel); -int drm_fb_helper_single_add_all_connectors(struct drm_fb_helper *fb_helper); int drm_fb_helper_debug_enter(struct fb_info *info); int drm_fb_helper_debug_leave(struct fb_info *info); -struct drm_display_mode * -drm_has_preferred_mode(struct drm_fb_helper_connector *fb_connector, - int width, int height); -struct drm_display_mode * -drm_pick_cmdline_mode(struct drm_fb_helper_connector *fb_helper_conn); - -int drm_fb_helper_add_one_connector(struct drm_fb_helper *fb_helper, struct drm_connector *connector); -int drm_fb_helper_remove_one_connector(struct drm_fb_helper *fb_helper, - struct drm_connector *connector); int drm_fb_helper_fbdev_setup(struct drm_device *dev, struct drm_fb_helper *fb_helper, @@ -490,12 +443,6 @@ static inline int drm_fb_helper_initial_config(struct drm_fb_helper *fb_helper, return 0; } -static inline int -drm_fb_helper_single_add_all_connectors(struct drm_fb_helper *fb_helper) -{ - return 0; -} - static inline int drm_fb_helper_debug_enter(struct fb_info *info) { return 0; @@ -506,34 +453,6 @@ static inline int drm_fb_helper_debug_leave(struct fb_info *info) return 0; } -static inline struct drm_display_mode * -drm_has_preferred_mode(struct drm_fb_helper_connector *fb_connector, - int width, int height) -{ - return NULL; -} - -static inline struct drm_display_mode * -drm_pick_cmdline_mode(struct drm_fb_helper_connector *fb_helper_conn, - int width, int height) -{ - return NULL; -} - -static inline int -drm_fb_helper_add_one_connector(struct drm_fb_helper *fb_helper, - struct drm_connector *connector) -{ - return 0; -} - -static inline int -drm_fb_helper_remove_one_connector(struct drm_fb_helper *fb_helper, - struct drm_connector *connector) -{ - return 0; -} - static inline int drm_fb_helper_fbdev_setup(struct drm_device *dev, struct drm_fb_helper *fb_helper, @@ -575,6 +494,27 @@ drm_fbdev_generic_setup(struct drm_device *dev, unsigned int preferred_bpp) #endif +/* TODO: There's a todo entry to remove these three */ +static inline int +drm_fb_helper_single_add_all_connectors(struct drm_fb_helper *fb_helper) +{ + return 0; +} + +static inline int +drm_fb_helper_add_one_connector(struct drm_fb_helper *fb_helper, + struct drm_connector *connector) +{ + return 0; +} + +static inline int +drm_fb_helper_remove_one_connector(struct drm_fb_helper *fb_helper, + struct drm_connector *connector) +{ + return 0; +} + /** * drm_fb_helper_remove_conflicting_framebuffers - remove firmware-configured framebuffers * @a: memory range, users of which are to be removed diff --git a/include/drm/drm_fourcc.h b/include/drm/drm_fourcc.h index b3d9d88ab290..306d1efeb5e0 100644 --- a/include/drm/drm_fourcc.h +++ b/include/drm/drm_fourcc.h @@ -260,6 +260,50 @@ drm_format_info_is_yuv_sampling_444(const struct drm_format_info *info) return info->is_yuv && info->hsub == 1 && info->vsub == 1; } +/** + * drm_format_info_plane_width - width of the plane given the first plane + * @info: pixel format info + * @width: width of the first plane + * @plane: plane index + * + * Returns: + * The width of @plane, given that the width of the first plane is @width. + */ +static inline +int drm_format_info_plane_width(const struct drm_format_info *info, int width, + int plane) +{ + if (!info || plane >= info->num_planes) + return 0; + + if (plane == 0) + return width; + + return width / info->hsub; +} + +/** + * drm_format_info_plane_height - height of the plane given the first plane + * @info: pixel format info + * @height: height of the first plane + * @plane: plane index + * + * Returns: + * The height of @plane, given that the height of the first plane is @height. + */ +static inline +int drm_format_info_plane_height(const struct drm_format_info *info, int height, + int plane) +{ + if (!info || plane >= info->num_planes) + return 0; + + if (plane == 0) + return height; + + return height / info->vsub; +} + const struct drm_format_info *__drm_format_info(u32 format); const struct drm_format_info *drm_format_info(u32 format); const struct drm_format_info * @@ -268,12 +312,6 @@ drm_get_format_info(struct drm_device *dev, uint32_t drm_mode_legacy_fb_format(uint32_t bpp, uint32_t depth); uint32_t drm_driver_legacy_fb_format(struct drm_device *dev, uint32_t bpp, uint32_t depth); -int drm_format_num_planes(uint32_t format); -int drm_format_plane_cpp(uint32_t format, int plane); -int drm_format_horz_chroma_subsampling(uint32_t format); -int drm_format_vert_chroma_subsampling(uint32_t format); -int drm_format_plane_width(int width, uint32_t format, int plane); -int drm_format_plane_height(int height, uint32_t format, int plane); unsigned int drm_format_info_block_width(const struct drm_format_info *info, int plane); unsigned int drm_format_info_block_height(const struct drm_format_info *info, diff --git a/include/drm/drm_framebuffer.h b/include/drm/drm_framebuffer.h index c23016748e3f..c0e0256e3e98 100644 --- a/include/drm/drm_framebuffer.h +++ b/include/drm/drm_framebuffer.h @@ -87,6 +87,9 @@ struct drm_framebuffer_funcs { * for more information as all the semantics and arguments have a one to * one mapping on this function. * + * Atomic drivers should use drm_atomic_helper_dirtyfb() to implement + * this hook. + * * RETURNS: * * 0 on success or a negative error code on failure. diff --git a/include/drm/drm_gem.h b/include/drm/drm_gem.h index 5047c7ee25f5..6aaba14f5972 100644 --- a/include/drm/drm_gem.h +++ b/include/drm/drm_gem.h @@ -35,7 +35,7 @@ */ #include <linux/kref.h> -#include <linux/reservation.h> +#include <linux/dma-resv.h> #include <drm/drm_vma_manager.h> @@ -101,7 +101,7 @@ struct drm_gem_object_funcs { /** * @pin: * - * Pin backing buffer in memory. + * Pin backing buffer in memory. Used by the drm_gem_map_attach() helper. * * This callback is optional. */ @@ -110,7 +110,7 @@ struct drm_gem_object_funcs { /** * @unpin: * - * Unpin backing buffer. + * Unpin backing buffer. Used by the drm_gem_map_detach() helper. * * This callback is optional. */ @@ -120,16 +120,21 @@ struct drm_gem_object_funcs { * @get_sg_table: * * Returns a Scatter-Gather table representation of the buffer. - * Used when exporting a buffer. + * Used when exporting a buffer by the drm_gem_map_dma_buf() helper. + * Releasing is done by calling dma_unmap_sg_attrs() and sg_free_table() + * in drm_gem_unmap_buf(), therefore these helpers and this callback + * here cannot be used for sg tables pointing at driver private memory + * ranges. * - * This callback is mandatory if buffer export is supported. + * See also drm_prime_pages_to_sg(). */ struct sg_table *(*get_sg_table)(struct drm_gem_object *obj); /** * @vmap: * - * Returns a virtual address for the buffer. + * Returns a virtual address for the buffer. Used by the + * drm_gem_dmabuf_vmap() helper. * * This callback is optional. */ @@ -138,7 +143,8 @@ struct drm_gem_object_funcs { /** * @vunmap: * - * Releases the the address previously returned by @vmap. + * Releases the the address previously returned by @vmap. Used by the + * drm_gem_dmabuf_vunmap() helper. * * This callback is optional. */ @@ -270,7 +276,7 @@ struct drm_gem_object { * * Normally (@resv == &@_resv) except for imported GEM objects. */ - struct reservation_object *resv; + struct dma_resv *resv; /** * @_resv: @@ -279,7 +285,7 @@ struct drm_gem_object { * * This is unused for imported GEM objects. */ - struct reservation_object _resv; + struct dma_resv _resv; /** * @funcs: @@ -384,7 +390,7 @@ void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages, int drm_gem_objects_lookup(struct drm_file *filp, void __user *bo_handles, int count, struct drm_gem_object ***objs_out); struct drm_gem_object *drm_gem_object_lookup(struct drm_file *filp, u32 handle); -long drm_gem_reservation_object_wait(struct drm_file *filep, u32 handle, +long drm_gem_dma_resv_wait(struct drm_file *filep, u32 handle, bool wait_all, unsigned long timeout); int drm_gem_lock_reservations(struct drm_gem_object **objs, int count, struct ww_acquire_ctx *acquire_ctx); @@ -401,9 +407,4 @@ int drm_gem_dumb_destroy(struct drm_file *file, struct drm_device *dev, uint32_t handle); -int drm_gem_pin(struct drm_gem_object *obj); -void drm_gem_unpin(struct drm_gem_object *obj); -void *drm_gem_vmap(struct drm_gem_object *obj); -void drm_gem_vunmap(struct drm_gem_object *obj, void *vaddr); - #endif /* __DRM_GEM_H__ */ diff --git a/include/drm/drm_gem_framebuffer_helper.h b/include/drm/drm_gem_framebuffer_helper.h index 7f307e834eef..d9f13fd25b0a 100644 --- a/include/drm/drm_gem_framebuffer_helper.h +++ b/include/drm/drm_gem_framebuffer_helper.h @@ -33,11 +33,4 @@ int drm_gem_fb_prepare_fb(struct drm_plane *plane, struct drm_plane_state *state); int drm_gem_fb_simple_display_pipe_prepare_fb(struct drm_simple_display_pipe *pipe, struct drm_plane_state *plane_state); - -struct drm_framebuffer * -drm_gem_fbdev_fb_create(struct drm_device *dev, - struct drm_fb_helper_surface_size *sizes, - unsigned int pitch_align, struct drm_gem_object *obj, - const struct drm_framebuffer_funcs *funcs); - #endif diff --git a/include/drm/drm_gem_shmem_helper.h b/include/drm/drm_gem_shmem_helper.h index 038b6d313447..7865e6b5d36c 100644 --- a/include/drm/drm_gem_shmem_helper.h +++ b/include/drm/drm_gem_shmem_helper.h @@ -45,6 +45,22 @@ struct drm_gem_shmem_object { unsigned int pages_use_count; /** + * @madv: State for madvise + * + * 0 is active/inuse. + * A negative value is the object is purged. + * Positive values are driver specific and not used by the helpers. + */ + int madv; + + /** + * @madv_list: List entry for madvise tracking + * + * Typically used by drivers to track purgeable objects + */ + struct list_head madv_list; + + /** * @pages_mark_dirty_on_put: * * Mark pages as dirty when they are put. @@ -121,6 +137,18 @@ void drm_gem_shmem_unpin(struct drm_gem_object *obj); void *drm_gem_shmem_vmap(struct drm_gem_object *obj); void drm_gem_shmem_vunmap(struct drm_gem_object *obj, void *vaddr); +int drm_gem_shmem_madvise(struct drm_gem_object *obj, int madv); + +static inline bool drm_gem_shmem_is_purgeable(struct drm_gem_shmem_object *shmem) +{ + return (shmem->madv > 0) && + !shmem->vmap_use_count && shmem->sgt && + !shmem->base.dma_buf && !shmem->base.import_attach; +} + +void drm_gem_shmem_purge_locked(struct drm_gem_object *obj); +bool drm_gem_shmem_purge(struct drm_gem_object *obj); + struct drm_gem_shmem_object * drm_gem_shmem_create_with_handle(struct drm_file *file_priv, struct drm_device *dev, size_t size, diff --git a/include/drm/drm_gem_vram_helper.h b/include/drm/drm_gem_vram_helper.h new file mode 100644 index 000000000000..ac217d768456 --- /dev/null +++ b/include/drm/drm_gem_vram_helper.h @@ -0,0 +1,129 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ + +#ifndef DRM_GEM_VRAM_HELPER_H +#define DRM_GEM_VRAM_HELPER_H + +#include <drm/drm_gem.h> +#include <drm/ttm/ttm_bo_api.h> +#include <drm/ttm/ttm_placement.h> +#include <linux/kernel.h> /* for container_of() */ + +struct drm_mode_create_dumb; +struct drm_vram_mm_funcs; +struct filp; +struct vm_area_struct; + +#define DRM_GEM_VRAM_PL_FLAG_VRAM TTM_PL_FLAG_VRAM +#define DRM_GEM_VRAM_PL_FLAG_SYSTEM TTM_PL_FLAG_SYSTEM + +/* + * Buffer-object helpers + */ + +/** + * struct drm_gem_vram_object - GEM object backed by VRAM + * @gem: GEM object + * @bo: TTM buffer object + * @kmap: Mapping information for @bo + * @placement: TTM placement information. Supported placements are \ + %TTM_PL_VRAM and %TTM_PL_SYSTEM + * @placements: TTM placement information. + * @pin_count: Pin counter + * + * The type struct drm_gem_vram_object represents a GEM object that is + * backed by VRAM. It can be used for simple framebuffer devices with + * dedicated memory. The buffer object can be evicted to system memory if + * video memory becomes scarce. + */ +struct drm_gem_vram_object { + struct ttm_buffer_object bo; + struct ttm_bo_kmap_obj kmap; + + /* Supported placements are %TTM_PL_VRAM and %TTM_PL_SYSTEM */ + struct ttm_placement placement; + struct ttm_place placements[2]; + + int pin_count; +}; + +/** + * Returns the container of type &struct drm_gem_vram_object + * for field bo. + * @bo: the VRAM buffer object + * Returns: The containing GEM VRAM object + */ +static inline struct drm_gem_vram_object *drm_gem_vram_of_bo( + struct ttm_buffer_object *bo) +{ + return container_of(bo, struct drm_gem_vram_object, bo); +} + +/** + * Returns the container of type &struct drm_gem_vram_object + * for field gem. + * @gem: the GEM object + * Returns: The containing GEM VRAM object + */ +static inline struct drm_gem_vram_object *drm_gem_vram_of_gem( + struct drm_gem_object *gem) +{ + return container_of(gem, struct drm_gem_vram_object, bo.base); +} + +struct drm_gem_vram_object *drm_gem_vram_create(struct drm_device *dev, + struct ttm_bo_device *bdev, + size_t size, + unsigned long pg_align, + bool interruptible); +void drm_gem_vram_put(struct drm_gem_vram_object *gbo); +u64 drm_gem_vram_mmap_offset(struct drm_gem_vram_object *gbo); +s64 drm_gem_vram_offset(struct drm_gem_vram_object *gbo); +int drm_gem_vram_pin(struct drm_gem_vram_object *gbo, unsigned long pl_flag); +int drm_gem_vram_unpin(struct drm_gem_vram_object *gbo); +void *drm_gem_vram_kmap(struct drm_gem_vram_object *gbo, bool map, + bool *is_iomem); +void drm_gem_vram_kunmap(struct drm_gem_vram_object *gbo); + +int drm_gem_vram_fill_create_dumb(struct drm_file *file, + struct drm_device *dev, + struct ttm_bo_device *bdev, + unsigned long pg_align, + bool interruptible, + struct drm_mode_create_dumb *args); + +/* + * Helpers for struct ttm_bo_driver + */ + +void drm_gem_vram_bo_driver_evict_flags(struct ttm_buffer_object *bo, + struct ttm_placement *pl); + +int drm_gem_vram_bo_driver_verify_access(struct ttm_buffer_object *bo, + struct file *filp); + +extern const struct drm_vram_mm_funcs drm_gem_vram_mm_funcs; + +/* + * Helpers for struct drm_driver + */ + +int drm_gem_vram_driver_dumb_create(struct drm_file *file, + struct drm_device *dev, + struct drm_mode_create_dumb *args); +int drm_gem_vram_driver_dumb_mmap_offset(struct drm_file *file, + struct drm_device *dev, + uint32_t handle, uint64_t *offset); + +/** + * define DRM_GEM_VRAM_DRIVER - default callback functions for \ + &struct drm_driver + * + * Drivers that use VRAM MM and GEM VRAM can use this macro to initialize + * &struct drm_driver with default functions. + */ +#define DRM_GEM_VRAM_DRIVER \ + .dumb_create = drm_gem_vram_driver_dumb_create, \ + .dumb_map_offset = drm_gem_vram_driver_dumb_mmap_offset, \ + .gem_prime_mmap = drm_gem_prime_mmap + +#endif diff --git a/include/drm/drm_hdcp.h b/include/drm/drm_hdcp.h index f243408ecf26..06a11202a097 100644 --- a/include/drm/drm_hdcp.h +++ b/include/drm/drm_hdcp.h @@ -252,17 +252,51 @@ struct hdcp2_rep_stream_ready { * host format and back */ static inline -u32 drm_hdcp2_seq_num_to_u32(u8 seq_num[HDCP_2_2_SEQ_NUM_LEN]) +u32 drm_hdcp_be24_to_cpu(const u8 seq_num[HDCP_2_2_SEQ_NUM_LEN]) { return (u32)(seq_num[2] | seq_num[1] << 8 | seq_num[0] << 16); } static inline -void drm_hdcp2_u32_to_seq_num(u8 seq_num[HDCP_2_2_SEQ_NUM_LEN], u32 val) +void drm_hdcp_cpu_to_be24(u8 seq_num[HDCP_2_2_SEQ_NUM_LEN], u32 val) { seq_num[0] = val >> 16; seq_num[1] = val >> 8; seq_num[2] = val; } +#define DRM_HDCP_SRM_GEN1_MAX_BYTES (5 * 1024) +#define DRM_HDCP_1_4_SRM_ID 0x8 +#define DRM_HDCP_SRM_ID_MASK (0xF << 4) +#define DRM_HDCP_1_4_VRL_LENGTH_SIZE 3 +#define DRM_HDCP_1_4_DCP_SIG_SIZE 40 +#define DRM_HDCP_2_SRM_ID 0x9 +#define DRM_HDCP_2_INDICATOR 0x1 +#define DRM_HDCP_2_INDICATOR_MASK 0xF +#define DRM_HDCP_2_VRL_LENGTH_SIZE 3 +#define DRM_HDCP_2_DCP_SIG_SIZE 384 +#define DRM_HDCP_2_NO_OF_DEV_PLUS_RESERVED_SZ 4 +#define DRM_HDCP_2_KSV_COUNT_2_LSBITS(byte) (((byte) & 0xC) >> 6) + +struct hdcp_srm_header { + u8 srm_id; + u8 reserved; + __be16 srm_version; + u8 srm_gen_no; +} __packed; + +struct drm_device; +struct drm_connector; + +bool drm_hdcp_check_ksvs_revoked(struct drm_device *dev, + u8 *ksvs, u32 ksv_count); +int drm_connector_attach_content_protection_property( + struct drm_connector *connector, bool hdcp_content_type); +void drm_hdcp_update_content_protection(struct drm_connector *connector, + u64 val); + +/* Content Type classification for HDCP2.2 vs others */ +#define DRM_MODE_HDCP_CONTENT_TYPE0 0 +#define DRM_MODE_HDCP_CONTENT_TYPE1 1 + #endif diff --git a/include/drm/drm_ioctl.h b/include/drm/drm_ioctl.h index fafb6f592c4b..10100a4bbe2a 100644 --- a/include/drm/drm_ioctl.h +++ b/include/drm/drm_ioctl.h @@ -114,6 +114,9 @@ enum drm_ioctl_flags { * Whether &drm_ioctl_desc.func should be called with the DRM BKL held * or not. Enforced as the default for all modern drivers, hence there * should never be a need to set this flag. + * + * Do not use anywhere else than for the VBLANK_WAIT IOCTL, which is the + * only legacy IOCTL which needs this. */ DRM_UNLOCKED = BIT(4), /** diff --git a/include/drm/drm_legacy.h b/include/drm/drm_legacy.h index 2182a56ac421..58dc0c04bf99 100644 --- a/include/drm/drm_legacy.h +++ b/include/drm/drm_legacy.h @@ -1,11 +1,5 @@ #ifndef __DRM_DRM_LEGACY_H__ #define __DRM_DRM_LEGACY_H__ - -#include <drm/drm_auth.h> -#include <drm/drm_hashtab.h> - -struct drm_device; - /* * Legacy driver interfaces for the Direct Rendering Manager * @@ -39,6 +33,12 @@ struct drm_device; * OTHER DEALINGS IN THE SOFTWARE. */ +#include <drm/drm.h> +#include <drm/drm_auth.h> +#include <drm/drm_hashtab.h> + +struct drm_device; +struct file; /* * Legacy Support for palateontologic DRM drivers diff --git a/include/drm/drm_mipi_dbi.h b/include/drm/drm_mipi_dbi.h new file mode 100644 index 000000000000..67c66f5ee591 --- /dev/null +++ b/include/drm/drm_mipi_dbi.h @@ -0,0 +1,188 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * MIPI Display Bus Interface (DBI) LCD controller support + * + * Copyright 2016 Noralf Trønnes + */ + +#ifndef __LINUX_MIPI_DBI_H +#define __LINUX_MIPI_DBI_H + +#include <linux/mutex.h> +#include <drm/drm_device.h> +#include <drm/drm_simple_kms_helper.h> + +struct drm_rect; +struct spi_device; +struct gpio_desc; +struct regulator; + +/** + * struct mipi_dbi - MIPI DBI interface + */ +struct mipi_dbi { + /** + * @cmdlock: Command lock + */ + struct mutex cmdlock; + + /** + * @command: Bus specific callback executing commands. + */ + int (*command)(struct mipi_dbi *dbi, u8 *cmd, u8 *param, size_t num); + + /** + * @read_commands: Array of read commands terminated by a zero entry. + * Reading is disabled if this is NULL. + */ + const u8 *read_commands; + + /** + * @swap_bytes: Swap bytes in buffer before transfer + */ + bool swap_bytes; + + /** + * @reset: Optional reset gpio + */ + struct gpio_desc *reset; + + /* Type C specific */ + + /** + * @spi: SPI device + */ + struct spi_device *spi; + + /** + * @dc: Optional D/C gpio. + */ + struct gpio_desc *dc; + + /** + * @tx_buf9: Buffer used for Option 1 9-bit conversion + */ + void *tx_buf9; + + /** + * @tx_buf9_len: Size of tx_buf9. + */ + size_t tx_buf9_len; +}; + +/** + * struct mipi_dbi_dev - MIPI DBI device + */ +struct mipi_dbi_dev { + /** + * @drm: DRM device + */ + struct drm_device drm; + + /** + * @pipe: Display pipe structure + */ + struct drm_simple_display_pipe pipe; + + /** + * @connector: Connector + */ + struct drm_connector connector; + + /** + * @mode: Fixed display mode + */ + struct drm_display_mode mode; + + /** + * @enabled: Pipeline is enabled + */ + bool enabled; + + /** + * @tx_buf: Buffer used for transfer (copy clip rect area) + */ + u16 *tx_buf; + + /** + * @rotation: initial rotation in degrees Counter Clock Wise + */ + unsigned int rotation; + + /** + * @backlight: backlight device (optional) + */ + struct backlight_device *backlight; + + /** + * @regulator: power regulator (optional) + */ + struct regulator *regulator; + + /** + * @dbi: MIPI DBI interface + */ + struct mipi_dbi dbi; +}; + +static inline struct mipi_dbi_dev *drm_to_mipi_dbi_dev(struct drm_device *drm) +{ + return container_of(drm, struct mipi_dbi_dev, drm); +} + +int mipi_dbi_spi_init(struct spi_device *spi, struct mipi_dbi *dbi, + struct gpio_desc *dc); +int mipi_dbi_dev_init_with_formats(struct mipi_dbi_dev *dbidev, + const struct drm_simple_display_pipe_funcs *funcs, + const uint32_t *formats, unsigned int format_count, + const struct drm_display_mode *mode, + unsigned int rotation, size_t tx_buf_size); +int mipi_dbi_dev_init(struct mipi_dbi_dev *dbidev, + const struct drm_simple_display_pipe_funcs *funcs, + const struct drm_display_mode *mode, unsigned int rotation); +void mipi_dbi_release(struct drm_device *drm); +void mipi_dbi_pipe_update(struct drm_simple_display_pipe *pipe, + struct drm_plane_state *old_state); +void mipi_dbi_enable_flush(struct mipi_dbi_dev *dbidev, + struct drm_crtc_state *crtc_state, + struct drm_plane_state *plan_state); +void mipi_dbi_pipe_disable(struct drm_simple_display_pipe *pipe); +void mipi_dbi_hw_reset(struct mipi_dbi *dbi); +bool mipi_dbi_display_is_on(struct mipi_dbi *dbi); +int mipi_dbi_poweron_reset(struct mipi_dbi_dev *dbidev); +int mipi_dbi_poweron_conditional_reset(struct mipi_dbi_dev *dbidev); + +u32 mipi_dbi_spi_cmd_max_speed(struct spi_device *spi, size_t len); +int mipi_dbi_spi_transfer(struct spi_device *spi, u32 speed_hz, + u8 bpw, const void *buf, size_t len); + +int mipi_dbi_command_read(struct mipi_dbi *dbi, u8 cmd, u8 *val); +int mipi_dbi_command_buf(struct mipi_dbi *dbi, u8 cmd, u8 *data, size_t len); +int mipi_dbi_command_stackbuf(struct mipi_dbi *dbi, u8 cmd, u8 *data, size_t len); +int mipi_dbi_buf_copy(void *dst, struct drm_framebuffer *fb, + struct drm_rect *clip, bool swap); +/** + * mipi_dbi_command - MIPI DCS command with optional parameter(s) + * @dbi: MIPI DBI structure + * @cmd: Command + * @seq...: Optional parameter(s) + * + * Send MIPI DCS command to the controller. Use mipi_dbi_command_read() for + * get/read. + * + * Returns: + * Zero on success, negative error code on failure. + */ +#define mipi_dbi_command(dbi, cmd, seq...) \ +({ \ + u8 d[] = { seq }; \ + mipi_dbi_command_stackbuf(dbi, cmd, d, ARRAY_SIZE(d)); \ +}) + +#ifdef CONFIG_DEBUG_FS +int mipi_dbi_debugfs_init(struct drm_minor *minor); +#else +#define mipi_dbi_debugfs_init NULL +#endif + +#endif /* __LINUX_MIPI_DBI_H */ diff --git a/include/drm/drm_mode_config.h b/include/drm/drm_mode_config.h index 7f60e8eb269a..3bcbe30339f0 100644 --- a/include/drm/drm_mode_config.h +++ b/include/drm/drm_mode_config.h @@ -836,10 +836,36 @@ struct drm_mode_config { */ struct drm_property *writeback_out_fence_ptr_property; + /** + * @hdr_output_metadata_property: Connector property containing hdr + * metatada. This will be provided by userspace compositors based + * on HDR content + */ + struct drm_property *hdr_output_metadata_property; + + /** + * @content_protection_property: DRM ENUM property for content + * protection. See drm_connector_attach_content_protection_property(). + */ + struct drm_property *content_protection_property; + + /** + * @hdcp_content_type_property: DRM ENUM property for type of + * Protected Content. + */ + struct drm_property *hdcp_content_type_property; + /* dumb ioctl parameters */ uint32_t preferred_depth, prefer_shadow; /** + * @prefer_shadow_fbdev: + * + * Hint to framebuffer emulation to prefer shadow-fb rendering. + */ + bool prefer_shadow_fbdev; + + /** * @quirk_addfb_prefer_xbgr_30bpp: * * Special hack for legacy ADDFB to keep nouveau userspace happy. Should diff --git a/include/drm/drm_modes.h b/include/drm/drm_modes.h index 083f16747369..e946e20c61d8 100644 --- a/include/drm/drm_modes.h +++ b/include/drm/drm_modes.h @@ -537,7 +537,7 @@ void drm_connector_list_update(struct drm_connector *connector); /* parsing cmdline modes */ bool drm_mode_parse_command_line_for_connector(const char *mode_option, - struct drm_connector *connector, + const struct drm_connector *connector, struct drm_cmdline_mode *mode); struct drm_display_mode * drm_mode_create_from_cmdline_mode(struct drm_device *dev, diff --git a/include/drm/drm_modeset_helper_vtables.h b/include/drm/drm_modeset_helper_vtables.h index f7bbd0b0ecd1..6b18c8adfe9d 100644 --- a/include/drm/drm_modeset_helper_vtables.h +++ b/include/drm/drm_modeset_helper_vtables.h @@ -680,6 +680,52 @@ struct drm_encoder_helper_funcs { struct drm_connector *connector); /** + * @atomic_disable: + * + * This callback should be used to disable the encoder. With the atomic + * drivers it is called before this encoder's CRTC has been shut off + * using their own &drm_crtc_helper_funcs.atomic_disable hook. If that + * sequence is too simple drivers can just add their own driver private + * encoder hooks and call them from CRTC's callback by looping over all + * encoders connected to it using for_each_encoder_on_crtc(). + * + * This callback is a variant of @disable that provides the atomic state + * to the driver. If @atomic_disable is implemented, @disable is not + * called by the helpers. + * + * This hook is only used by atomic helpers. Atomic drivers don't need + * to implement it if there's no need to disable anything at the encoder + * level. To ensure that runtime PM handling (using either DPMS or the + * new "ACTIVE" property) works @atomic_disable must be the inverse of + * @atomic_enable. + */ + void (*atomic_disable)(struct drm_encoder *encoder, + struct drm_atomic_state *state); + + /** + * @atomic_enable: + * + * This callback should be used to enable the encoder. It is called + * after this encoder's CRTC has been enabled using their own + * &drm_crtc_helper_funcs.atomic_enable hook. If that sequence is + * too simple drivers can just add their own driver private encoder + * hooks and call them from CRTC's callback by looping over all encoders + * connected to it using for_each_encoder_on_crtc(). + * + * This callback is a variant of @enable that provides the atomic state + * to the driver. If @atomic_enable is implemented, @enable is not + * called by the helpers. + * + * This hook is only used by atomic helpers, it is the opposite of + * @atomic_disable. Atomic drivers don't need to implement it if there's + * no need to enable anything at the encoder level. To ensure that + * runtime PM handling works @atomic_enable must be the inverse of + * @atomic_disable. + */ + void (*atomic_enable)(struct drm_encoder *encoder, + struct drm_atomic_state *state); + + /** * @disable: * * This callback should be used to disable the encoder. With the atomic @@ -695,6 +741,9 @@ struct drm_encoder_helper_funcs { * handling (using either DPMS or the new "ACTIVE" property) works * @disable must be the inverse of @enable for atomic drivers. * + * For atomic drivers also consider @atomic_disable and save yourself + * from having to read the NOTE below! + * * NOTE: * * With legacy CRTC helpers there's a big semantic difference between @@ -719,11 +768,11 @@ struct drm_encoder_helper_funcs { * hooks and call them from CRTC's callback by looping over all encoders * connected to it using for_each_encoder_on_crtc(). * - * This hook is used only by atomic helpers, for symmetry with @disable. - * Atomic drivers don't need to implement it if there's no need to - * enable anything at the encoder level. To ensure that runtime PM handling - * (using either DPMS or the new "ACTIVE" property) works - * @enable must be the inverse of @disable for atomic drivers. + * This hook is only used by atomic helpers, it is the opposite of + * @disable. Atomic drivers don't need to implement it if there's no + * need to enable anything at the encoder level. To ensure that + * runtime PM handling (using either DPMS or the new "ACTIVE" property) + * works @enable must be the inverse of @disable for atomic drivers. */ void (*enable)(struct drm_encoder *encoder); @@ -979,7 +1028,7 @@ struct drm_connector_helper_funcs { * deadlock. */ int (*atomic_check)(struct drm_connector *connector, - struct drm_connector_state *state); + struct drm_atomic_state *state); /** * @atomic_commit: diff --git a/include/drm/drm_panel.h b/include/drm/drm_panel.h index 8c738c0e6e9f..624bd15ecfab 100644 --- a/include/drm/drm_panel.h +++ b/include/drm/drm_panel.h @@ -24,6 +24,7 @@ #ifndef __DRM_PANEL_H__ #define __DRM_PANEL_H__ +#include <linux/err.h> #include <linux/errno.h> #include <linux/list.h> @@ -35,14 +36,6 @@ struct display_timing; /** * struct drm_panel_funcs - perform operations on a given panel - * @disable: disable panel (turn off back light, etc.) - * @unprepare: turn off panel - * @prepare: turn on panel and perform set up - * @enable: enable panel (turn on back light, etc.) - * @get_modes: add modes to the connector that the panel is attached to and - * return the number of modes added - * @get_timings: copy display timings into the provided array and return - * the number of display timings available * * The .prepare() function is typically called before the display controller * starts to transmit video data. Panel drivers can use this to turn the panel @@ -68,132 +61,107 @@ struct display_timing; * the panel. This is the job of the .unprepare() function. */ struct drm_panel_funcs { - int (*disable)(struct drm_panel *panel); - int (*unprepare)(struct drm_panel *panel); + /** + * @prepare: + * + * Turn on panel and perform set up. + */ int (*prepare)(struct drm_panel *panel); + + /** + * @enable: + * + * Enable panel (turn on back light, etc.). + */ int (*enable)(struct drm_panel *panel); + + /** + * @disable: + * + * Disable panel (turn off back light, etc.). + */ + int (*disable)(struct drm_panel *panel); + + /** + * @unprepare: + * + * Turn off panel. + */ + int (*unprepare)(struct drm_panel *panel); + + /** + * @get_modes: + * + * Add modes to the connector that the panel is attached to and + * return the number of modes added. + */ int (*get_modes)(struct drm_panel *panel); + + /** + * @get_timings: + * + * Copy display timings into the provided array and return + * the number of display timings available. + */ int (*get_timings)(struct drm_panel *panel, unsigned int num_timings, struct display_timing *timings); }; /** * struct drm_panel - DRM panel object - * @drm: DRM device owning the panel - * @connector: DRM connector that the panel is attached to - * @dev: parent device of the panel - * @link: link from panel device (supplier) to DRM device (consumer) - * @funcs: operations that can be performed on the panel - * @list: panel entry in registry */ struct drm_panel { + /** + * @drm: + * + * DRM device owning the panel. + */ struct drm_device *drm; + + /** + * @connector: + * + * DRM connector that the panel is attached to. + */ struct drm_connector *connector; + + /** + * @dev: + * + * Parent device of the panel. + */ struct device *dev; + /** + * @funcs: + * + * Operations that can be performed on the panel. + */ const struct drm_panel_funcs *funcs; + /** + * @list: + * + * Panel entry in registry. + */ struct list_head list; }; -/** - * drm_disable_unprepare - power off a panel - * @panel: DRM panel - * - * Calling this function will completely power off a panel (assert the panel's - * reset, turn off power supplies, ...). After this function has completed, it - * is usually no longer possible to communicate with the panel until another - * call to drm_panel_prepare(). - * - * Return: 0 on success or a negative error code on failure. - */ -static inline int drm_panel_unprepare(struct drm_panel *panel) -{ - if (panel && panel->funcs && panel->funcs->unprepare) - return panel->funcs->unprepare(panel); - - return panel ? -ENOSYS : -EINVAL; -} - -/** - * drm_panel_disable - disable a panel - * @panel: DRM panel - * - * This will typically turn off the panel's backlight or disable the display - * drivers. For smart panels it should still be possible to communicate with - * the integrated circuitry via any command bus after this call. - * - * Return: 0 on success or a negative error code on failure. - */ -static inline int drm_panel_disable(struct drm_panel *panel) -{ - if (panel && panel->funcs && panel->funcs->disable) - return panel->funcs->disable(panel); - - return panel ? -ENOSYS : -EINVAL; -} - -/** - * drm_panel_prepare - power on a panel - * @panel: DRM panel - * - * Calling this function will enable power and deassert any reset signals to - * the panel. After this has completed it is possible to communicate with any - * integrated circuitry via a command bus. - * - * Return: 0 on success or a negative error code on failure. - */ -static inline int drm_panel_prepare(struct drm_panel *panel) -{ - if (panel && panel->funcs && panel->funcs->prepare) - return panel->funcs->prepare(panel); - - return panel ? -ENOSYS : -EINVAL; -} - -/** - * drm_panel_enable - enable a panel - * @panel: DRM panel - * - * Calling this function will cause the panel display drivers to be turned on - * and the backlight to be enabled. Content will be visible on screen after - * this call completes. - * - * Return: 0 on success or a negative error code on failure. - */ -static inline int drm_panel_enable(struct drm_panel *panel) -{ - if (panel && panel->funcs && panel->funcs->enable) - return panel->funcs->enable(panel); - - return panel ? -ENOSYS : -EINVAL; -} - -/** - * drm_panel_get_modes - probe the available display modes of a panel - * @panel: DRM panel - * - * The modes probed from the panel are automatically added to the connector - * that the panel is attached to. - * - * Return: The number of modes available from the panel on success or a - * negative error code on failure. - */ -static inline int drm_panel_get_modes(struct drm_panel *panel) -{ - if (panel && panel->funcs && panel->funcs->get_modes) - return panel->funcs->get_modes(panel); - - return panel ? -ENOSYS : -EINVAL; -} - void drm_panel_init(struct drm_panel *panel); int drm_panel_add(struct drm_panel *panel); void drm_panel_remove(struct drm_panel *panel); int drm_panel_attach(struct drm_panel *panel, struct drm_connector *connector); -int drm_panel_detach(struct drm_panel *panel); +void drm_panel_detach(struct drm_panel *panel); + +int drm_panel_prepare(struct drm_panel *panel); +int drm_panel_unprepare(struct drm_panel *panel); + +int drm_panel_enable(struct drm_panel *panel); +int drm_panel_disable(struct drm_panel *panel); + +int drm_panel_get_modes(struct drm_panel *panel); #if defined(CONFIG_OF) && defined(CONFIG_DRM_PANEL) struct drm_panel *of_drm_find_panel(const struct device_node *np); diff --git a/include/drm/drm_plane.h b/include/drm/drm_plane.h index 6078c700d9ba..cd5903ad33f7 100644 --- a/include/drm/drm_plane.h +++ b/include/drm/drm_plane.h @@ -69,7 +69,7 @@ struct drm_plane_state { * * Optional fence to wait for before scanning out @fb. The core atomic * code will set this when userspace is using explicit fencing. Do not - * write this directly for a driver's implicit fence, use + * write this field directly for a driver's implicit fence, use * drm_atomic_set_fence_for_plane() to ensure that an explicit fence is * preserved. * diff --git a/include/drm/drm_prime.h b/include/drm/drm_prime.h index b03731a3f079..d89311b822d5 100644 --- a/include/drm/drm_prime.h +++ b/include/drm/drm_prime.h @@ -42,7 +42,6 @@ * This just contains the internal &struct dma_buf and handle caches for each * &struct drm_file used by the PRIME core code. */ - struct drm_prime_file_private { /* private: */ struct mutex lock; @@ -64,25 +63,18 @@ struct drm_file; struct device; -struct dma_buf *drm_gem_prime_export(struct drm_device *dev, - struct drm_gem_object *obj, - int flags); +/* core prime functions */ +struct dma_buf *drm_gem_dmabuf_export(struct drm_device *dev, + struct dma_buf_export_info *exp_info); +void drm_gem_dmabuf_release(struct dma_buf *dma_buf); + +int drm_gem_prime_fd_to_handle(struct drm_device *dev, + struct drm_file *file_priv, int prime_fd, uint32_t *handle); int drm_gem_prime_handle_to_fd(struct drm_device *dev, struct drm_file *file_priv, uint32_t handle, uint32_t flags, int *prime_fd); -int drm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma); -struct drm_gem_object *drm_gem_prime_import(struct drm_device *dev, - struct dma_buf *dma_buf); -struct drm_gem_object *drm_gem_prime_import_dev(struct drm_device *dev, - struct dma_buf *dma_buf, - struct device *attach_dev); - -int drm_gem_prime_fd_to_handle(struct drm_device *dev, - struct drm_file *file_priv, int prime_fd, uint32_t *handle); -struct dma_buf *drm_gem_dmabuf_export(struct drm_device *dev, - struct dma_buf_export_info *exp_info); -void drm_gem_dmabuf_release(struct dma_buf *dma_buf); +/* helper functions for exporting */ int drm_gem_map_attach(struct dma_buf *dma_buf, struct dma_buf_attachment *attach); void drm_gem_map_detach(struct dma_buf *dma_buf, @@ -94,12 +86,25 @@ void drm_gem_unmap_dma_buf(struct dma_buf_attachment *attach, enum dma_data_direction dir); void *drm_gem_dmabuf_vmap(struct dma_buf *dma_buf); void drm_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr); + +int drm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma); int drm_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma); -int drm_prime_sg_to_page_addr_arrays(struct sg_table *sgt, struct page **pages, - dma_addr_t *addrs, int max_pages); struct sg_table *drm_prime_pages_to_sg(struct page **pages, unsigned int nr_pages); +struct dma_buf *drm_gem_prime_export(struct drm_gem_object *obj, + int flags); + +/* helper functions for importing */ +struct drm_gem_object *drm_gem_prime_import_dev(struct drm_device *dev, + struct dma_buf *dma_buf, + struct device *attach_dev); +struct drm_gem_object *drm_gem_prime_import(struct drm_device *dev, + struct dma_buf *dma_buf); + void drm_prime_gem_destroy(struct drm_gem_object *obj, struct sg_table *sg); +int drm_prime_sg_to_page_addr_arrays(struct sg_table *sgt, struct page **pages, + dma_addr_t *addrs, int max_pages); + #endif /* __DRM_PRIME_H__ */ diff --git a/include/drm/drm_print.h b/include/drm/drm_print.h index 3a4247319e63..a5d6f2f3e430 100644 --- a/include/drm/drm_print.h +++ b/include/drm/drm_print.h @@ -32,6 +32,8 @@ #include <linux/device.h> #include <linux/debugfs.h> +#include <drm/drm.h> + /** * DOC: print * diff --git a/include/drm/drm_self_refresh_helper.h b/include/drm/drm_self_refresh_helper.h new file mode 100644 index 000000000000..520235c20708 --- /dev/null +++ b/include/drm/drm_self_refresh_helper.h @@ -0,0 +1,21 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright (C) 2019 Google, Inc. + * + * Authors: + * Sean Paul <seanpaul@chromium.org> + */ +#ifndef DRM_SELF_REFRESH_HELPER_H_ +#define DRM_SELF_REFRESH_HELPER_H_ + +struct drm_atomic_state; +struct drm_crtc; + +void drm_self_refresh_helper_alter_state(struct drm_atomic_state *state); +void drm_self_refresh_helper_update_avg_times(struct drm_atomic_state *state, + unsigned int commit_time_ms, + unsigned int new_self_refresh_mask); + +int drm_self_refresh_helper_init(struct drm_crtc *crtc); +void drm_self_refresh_helper_cleanup(struct drm_crtc *crtc); +#endif diff --git a/include/drm/drm_sysfs.h b/include/drm/drm_sysfs.h index 4f311e836cdc..d454ef617b2c 100644 --- a/include/drm/drm_sysfs.h +++ b/include/drm/drm_sysfs.h @@ -4,10 +4,13 @@ struct drm_device; struct device; +struct drm_connector; +struct drm_property; int drm_class_device_register(struct device *dev); void drm_class_device_unregister(struct device *dev); void drm_sysfs_hotplug_event(struct drm_device *dev); - +void drm_sysfs_connector_status_event(struct drm_connector *connector, + struct drm_property *property); #endif diff --git a/include/drm/drm_vblank.h b/include/drm/drm_vblank.h index e528bb2f659d..9fe4ba8bc622 100644 --- a/include/drm/drm_vblank.h +++ b/include/drm/drm_vblank.h @@ -30,7 +30,6 @@ #include <drm/drm_file.h> #include <drm/drm_modes.h> -#include <uapi/drm/drm.h> struct drm_device; struct drm_crtc; diff --git a/include/drm/drm_vram_mm_helper.h b/include/drm/drm_vram_mm_helper.h new file mode 100644 index 000000000000..2aacfb1ccfae --- /dev/null +++ b/include/drm/drm_vram_mm_helper.h @@ -0,0 +1,104 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ + +#ifndef DRM_VRAM_MM_HELPER_H +#define DRM_VRAM_MM_HELPER_H + +#include <drm/drm_file.h> +#include <drm/drm_ioctl.h> +#include <drm/ttm/ttm_bo_driver.h> + +struct drm_device; + +/** + * struct drm_vram_mm_funcs - Callback functions for &struct drm_vram_mm + * @evict_flags: Provides an implementation for struct \ + &ttm_bo_driver.evict_flags + * @verify_access: Provides an implementation for \ + struct &ttm_bo_driver.verify_access + * + * These callback function integrate VRAM MM with TTM buffer objects. New + * functions can be added if necessary. + */ +struct drm_vram_mm_funcs { + void (*evict_flags)(struct ttm_buffer_object *bo, + struct ttm_placement *placement); + int (*verify_access)(struct ttm_buffer_object *bo, struct file *filp); +}; + +/** + * struct drm_vram_mm - An instance of VRAM MM + * @vram_base: Base address of the managed video memory + * @vram_size: Size of the managed video memory in bytes + * @bdev: The TTM BO device. + * @funcs: TTM BO functions + * + * The fields &struct drm_vram_mm.vram_base and + * &struct drm_vram_mm.vrm_size are managed by VRAM MM, but are + * available for public read access. Use the field + * &struct drm_vram_mm.bdev to access the TTM BO device. + */ +struct drm_vram_mm { + uint64_t vram_base; + size_t vram_size; + + struct ttm_bo_device bdev; + + const struct drm_vram_mm_funcs *funcs; +}; + +/** + * drm_vram_mm_of_bdev() - \ + Returns the container of type &struct ttm_bo_device for field bdev. + * @bdev: the TTM BO device + * + * Returns: + * The containing instance of &struct drm_vram_mm + */ +static inline struct drm_vram_mm *drm_vram_mm_of_bdev( + struct ttm_bo_device *bdev) +{ + return container_of(bdev, struct drm_vram_mm, bdev); +} + +int drm_vram_mm_init(struct drm_vram_mm *vmm, struct drm_device *dev, + uint64_t vram_base, size_t vram_size, + const struct drm_vram_mm_funcs *funcs); +void drm_vram_mm_cleanup(struct drm_vram_mm *vmm); + +int drm_vram_mm_mmap(struct file *filp, struct vm_area_struct *vma, + struct drm_vram_mm *vmm); + +/* + * Helpers for integration with struct drm_device + */ + +struct drm_vram_mm *drm_vram_helper_alloc_mm( + struct drm_device *dev, uint64_t vram_base, size_t vram_size, + const struct drm_vram_mm_funcs *funcs); +void drm_vram_helper_release_mm(struct drm_device *dev); + +/* + * Helpers for &struct file_operations + */ + +int drm_vram_mm_file_operations_mmap( + struct file *filp, struct vm_area_struct *vma); + +/** + * define DRM_VRAM_MM_FILE_OPERATIONS - default callback functions for \ + &struct file_operations + * + * Drivers that use VRAM MM can use this macro to initialize + * &struct file_operations with default functions. + */ +#define DRM_VRAM_MM_FILE_OPERATIONS \ + .llseek = no_llseek, \ + .read = drm_read, \ + .poll = drm_poll, \ + .unlocked_ioctl = drm_ioctl, \ + .compat_ioctl = drm_compat_ioctl, \ + .mmap = drm_vram_mm_file_operations_mmap, \ + .open = drm_open, \ + .release = drm_release \ + +#endif diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h index 0daca4d8dad9..57b4121c750a 100644 --- a/include/drm/gpu_scheduler.h +++ b/include/drm/gpu_scheduler.h @@ -167,9 +167,6 @@ struct drm_sched_fence *to_drm_sched_fence(struct dma_fence *f); * @sched: the scheduler instance on which this job is scheduled. * @s_fence: contains the fences for the scheduling of job. * @finish_cb: the callback for the finished fence. - * @finish_work: schedules the function @drm_sched_job_finish once the job has - * finished to remove the job from the - * @drm_gpu_scheduler.ring_mirror_list. * @node: used to append this struct to the @drm_gpu_scheduler.ring_mirror_list. * @id: a unique id assigned to each job scheduled on the scheduler. * @karma: increment on every hang caused by this job. If this exceeds the hang @@ -188,7 +185,6 @@ struct drm_sched_job { struct drm_gpu_scheduler *sched; struct drm_sched_fence *s_fence; struct dma_fence_cb finish_cb; - struct work_struct finish_work; struct list_head node; uint64_t id; atomic_t karma; @@ -263,6 +259,7 @@ struct drm_sched_backend_ops { * guilty and it will be considered for scheduling further. * @num_jobs: the number of jobs in queue in the scheduler * @ready: marks if the underlying HW is ready to work + * @free_guilty: A hit to time out handler to free the guilty job. * * One scheduler is implemented for each hardware ring. */ @@ -283,6 +280,7 @@ struct drm_gpu_scheduler { int hang_limit; atomic_t num_jobs; bool ready; + bool free_guilty; }; int drm_sched_init(struct drm_gpu_scheduler *sched, @@ -296,7 +294,7 @@ int drm_sched_job_init(struct drm_sched_job *job, void *owner); void drm_sched_job_cleanup(struct drm_sched_job *job); void drm_sched_wakeup(struct drm_gpu_scheduler *sched); -void drm_sched_stop(struct drm_gpu_scheduler *sched); +void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad); void drm_sched_start(struct drm_gpu_scheduler *sched, bool full_recovery); void drm_sched_resubmit_jobs(struct drm_gpu_scheduler *sched); void drm_sched_increase_karma(struct drm_sched_job *bad); diff --git a/include/drm/i915_component.h b/include/drm/i915_component.h index dcb95bd9dee6..55c3b123581b 100644 --- a/include/drm/i915_component.h +++ b/include/drm/i915_component.h @@ -34,7 +34,7 @@ enum i915_component_type { /* MAX_PORT is the number of port * It must be sync with I915_MAX_PORTS defined i915_drv.h */ -#define MAX_PORTS 6 +#define MAX_PORTS 9 /** * struct i915_audio_component - Used for direct communication between i915 and hda drivers diff --git a/include/drm/i915_drm.h b/include/drm/i915_drm.h index 7523e9a7b6e2..23274cf92712 100644 --- a/include/drm/i915_drm.h +++ b/include/drm/i915_drm.h @@ -30,11 +30,11 @@ #include <uapi/drm/i915_drm.h> /* For use by IPS driver */ -extern unsigned long i915_read_mch_val(void); -extern bool i915_gpu_raise(void); -extern bool i915_gpu_lower(void); -extern bool i915_gpu_busy(void); -extern bool i915_gpu_turbo_disable(void); +unsigned long i915_read_mch_val(void); +bool i915_gpu_raise(void); +bool i915_gpu_lower(void); +bool i915_gpu_busy(void); +bool i915_gpu_turbo_disable(void); /* Exported from arch/x86/kernel/early-quirks.c */ extern struct resource intel_graphics_stolen_res; @@ -109,6 +109,9 @@ enum port { PORT_D, PORT_E, PORT_F, + PORT_G, + PORT_H, + PORT_I, I915_MAX_PORTS }; diff --git a/include/drm/i915_pciids.h b/include/drm/i915_pciids.h index 6477da22af28..b1f66b117c74 100644 --- a/include/drm/i915_pciids.h +++ b/include/drm/i915_pciids.h @@ -466,7 +466,10 @@ INTEL_VGA_DEVICE(0x9BC5, info), \ INTEL_VGA_DEVICE(0x9BC8, info), \ INTEL_VGA_DEVICE(0x9BC4, info), \ - INTEL_VGA_DEVICE(0x9BC2, info) + INTEL_VGA_DEVICE(0x9BC2, info), \ + INTEL_VGA_DEVICE(0x9BC6, info), \ + INTEL_VGA_DEVICE(0x9BE6, info), \ + INTEL_VGA_DEVICE(0x9BF6, info) #define INTEL_KBL_IDS(info) \ INTEL_KBL_GT1_IDS(info), \ @@ -559,7 +562,6 @@ #define INTEL_ICL_PORT_F_IDS(info) \ INTEL_VGA_DEVICE(0x8A50, info), \ INTEL_VGA_DEVICE(0x8A5C, info), \ - INTEL_VGA_DEVICE(0x8A5D, info), \ INTEL_VGA_DEVICE(0x8A59, info), \ INTEL_VGA_DEVICE(0x8A58, info), \ INTEL_VGA_DEVICE(0x8A52, info), \ @@ -569,11 +571,13 @@ INTEL_VGA_DEVICE(0x8A56, info), \ INTEL_VGA_DEVICE(0x8A71, info), \ INTEL_VGA_DEVICE(0x8A70, info), \ - INTEL_VGA_DEVICE(0x8A53, info) + INTEL_VGA_DEVICE(0x8A53, info), \ + INTEL_VGA_DEVICE(0x8A54, info) #define INTEL_ICL_11_IDS(info) \ INTEL_ICL_PORT_F_IDS(info), \ - INTEL_VGA_DEVICE(0x8A51, info) + INTEL_VGA_DEVICE(0x8A51, info), \ + INTEL_VGA_DEVICE(0x8A5D, info) /* EHL */ #define INTEL_EHL_IDS(info) \ @@ -582,4 +586,14 @@ INTEL_VGA_DEVICE(0x4551, info), \ INTEL_VGA_DEVICE(0x4541, info) +/* TGL */ +#define INTEL_TGL_12_IDS(info) \ + INTEL_VGA_DEVICE(0x9A49, info), \ + INTEL_VGA_DEVICE(0x9A40, info), \ + INTEL_VGA_DEVICE(0x9A59, info), \ + INTEL_VGA_DEVICE(0x9A60, info), \ + INTEL_VGA_DEVICE(0x9A68, info), \ + INTEL_VGA_DEVICE(0x9A70, info), \ + INTEL_VGA_DEVICE(0x9A78, info) + #endif /* _I915_PCIIDS_H */ diff --git a/include/drm/tinydrm/mipi-dbi.h b/include/drm/tinydrm/mipi-dbi.h deleted file mode 100644 index 51fc667beef7..000000000000 --- a/include/drm/tinydrm/mipi-dbi.h +++ /dev/null @@ -1,117 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ -/* - * MIPI Display Bus Interface (DBI) LCD controller support - * - * Copyright 2016 Noralf Trønnes - */ - -#ifndef __LINUX_MIPI_DBI_H -#define __LINUX_MIPI_DBI_H - -#include <linux/mutex.h> -#include <drm/drm_device.h> -#include <drm/drm_simple_kms_helper.h> - -struct drm_rect; -struct spi_device; -struct gpio_desc; -struct regulator; - -/** - * struct mipi_dbi - MIPI DBI controller - * @spi: SPI device - * @enabled: Pipeline is enabled - * @cmdlock: Command lock - * @command: Bus specific callback executing commands. - * @read_commands: Array of read commands terminated by a zero entry. - * Reading is disabled if this is NULL. - * @dc: Optional D/C gpio. - * @tx_buf: Buffer used for transfer (copy clip rect area) - * @tx_buf9: Buffer used for Option 1 9-bit conversion - * @tx_buf9_len: Size of tx_buf9. - * @swap_bytes: Swap bytes in buffer before transfer - * @reset: Optional reset gpio - * @rotation: initial rotation in degrees Counter Clock Wise - * @backlight: backlight device (optional) - * @regulator: power regulator (optional) - */ -struct mipi_dbi { - /** - * @drm: DRM device - */ - struct drm_device drm; - - /** - * @pipe: Display pipe structure - */ - struct drm_simple_display_pipe pipe; - - struct spi_device *spi; - bool enabled; - struct mutex cmdlock; - int (*command)(struct mipi_dbi *mipi, u8 *cmd, u8 *param, size_t num); - const u8 *read_commands; - struct gpio_desc *dc; - u16 *tx_buf; - void *tx_buf9; - size_t tx_buf9_len; - bool swap_bytes; - struct gpio_desc *reset; - unsigned int rotation; - struct backlight_device *backlight; - struct regulator *regulator; -}; - -static inline struct mipi_dbi *drm_to_mipi_dbi(struct drm_device *drm) -{ - return container_of(drm, struct mipi_dbi, drm); -} - -int mipi_dbi_spi_init(struct spi_device *spi, struct mipi_dbi *mipi, - struct gpio_desc *dc); -int mipi_dbi_init(struct mipi_dbi *mipi, - const struct drm_simple_display_pipe_funcs *funcs, - const struct drm_display_mode *mode, unsigned int rotation); -void mipi_dbi_release(struct drm_device *drm); -void mipi_dbi_pipe_update(struct drm_simple_display_pipe *pipe, - struct drm_plane_state *old_state); -void mipi_dbi_enable_flush(struct mipi_dbi *mipi, - struct drm_crtc_state *crtc_state, - struct drm_plane_state *plan_state); -void mipi_dbi_pipe_disable(struct drm_simple_display_pipe *pipe); -void mipi_dbi_hw_reset(struct mipi_dbi *mipi); -bool mipi_dbi_display_is_on(struct mipi_dbi *mipi); -int mipi_dbi_poweron_reset(struct mipi_dbi *mipi); -int mipi_dbi_poweron_conditional_reset(struct mipi_dbi *mipi); -u32 mipi_dbi_spi_cmd_max_speed(struct spi_device *spi, size_t len); - -int mipi_dbi_command_read(struct mipi_dbi *mipi, u8 cmd, u8 *val); -int mipi_dbi_command_buf(struct mipi_dbi *mipi, u8 cmd, u8 *data, size_t len); -int mipi_dbi_command_stackbuf(struct mipi_dbi *mipi, u8 cmd, u8 *data, size_t len); -int mipi_dbi_buf_copy(void *dst, struct drm_framebuffer *fb, - struct drm_rect *clip, bool swap); -/** - * mipi_dbi_command - MIPI DCS command with optional parameter(s) - * @mipi: MIPI structure - * @cmd: Command - * @seq...: Optional parameter(s) - * - * Send MIPI DCS command to the controller. Use mipi_dbi_command_read() for - * get/read. - * - * Returns: - * Zero on success, negative error code on failure. - */ -#define mipi_dbi_command(mipi, cmd, seq...) \ -({ \ - u8 d[] = { seq }; \ - mipi_dbi_command_stackbuf(mipi, cmd, d, ARRAY_SIZE(d)); \ -}) - -#ifdef CONFIG_DEBUG_FS -int mipi_dbi_debugfs_init(struct drm_minor *minor); -#else -#define mipi_dbi_debugfs_init NULL -#endif - -#endif /* __LINUX_MIPI_DBI_H */ diff --git a/include/drm/tinydrm/tinydrm-helpers.h b/include/drm/tinydrm/tinydrm-helpers.h deleted file mode 100644 index f8bcadf48cb1..000000000000 --- a/include/drm/tinydrm/tinydrm-helpers.h +++ /dev/null @@ -1,75 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ -/* - * Copyright (C) 2016 Noralf Trønnes - */ - -#ifndef __LINUX_TINYDRM_HELPERS_H -#define __LINUX_TINYDRM_HELPERS_H - -struct backlight_device; -struct drm_device; -struct drm_display_mode; -struct drm_framebuffer; -struct drm_rect; -struct drm_simple_display_pipe; -struct drm_simple_display_pipe_funcs; -struct spi_transfer; -struct spi_message; -struct spi_device; -struct device; - -/** - * tinydrm_machine_little_endian - Machine is little endian - * - * Returns: - * true if *defined(__LITTLE_ENDIAN)*, false otherwise - */ -static inline bool tinydrm_machine_little_endian(void) -{ -#if defined(__LITTLE_ENDIAN) - return true; -#else - return false; -#endif -} - -int tinydrm_display_pipe_init(struct drm_device *drm, - struct drm_simple_display_pipe *pipe, - const struct drm_simple_display_pipe_funcs *funcs, - int connector_type, - const uint32_t *formats, - unsigned int format_count, - const struct drm_display_mode *mode, - unsigned int rotation); - -size_t tinydrm_spi_max_transfer_size(struct spi_device *spi, size_t max_len); -bool tinydrm_spi_bpw_supported(struct spi_device *spi, u8 bpw); -int tinydrm_spi_transfer(struct spi_device *spi, u32 speed_hz, - struct spi_transfer *header, u8 bpw, const void *buf, - size_t len); -void _tinydrm_dbg_spi_message(struct spi_device *spi, struct spi_message *m); - -#ifdef DEBUG -/** - * tinydrm_dbg_spi_message - Dump SPI message - * @spi: SPI device - * @m: SPI message - * - * Dumps info about the transfers in a SPI message including buffer content. - * DEBUG has to be defined for this function to be enabled alongside setting - * the DRM_UT_DRIVER bit of &drm_debug. - */ -static inline void tinydrm_dbg_spi_message(struct spi_device *spi, - struct spi_message *m) -{ - if (drm_debug & DRM_UT_DRIVER) - _tinydrm_dbg_spi_message(spi, m); -} -#else -static inline void tinydrm_dbg_spi_message(struct spi_device *spi, - struct spi_message *m) -{ -} -#endif /* DEBUG */ - -#endif /* __LINUX_TINYDRM_HELPERS_H */ diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h index 49d9cdfc58f2..43c4929a2171 100644 --- a/include/drm/ttm/ttm_bo_api.h +++ b/include/drm/ttm/ttm_bo_api.h @@ -31,6 +31,7 @@ #ifndef _TTM_BO_API_H_ #define _TTM_BO_API_H_ +#include <drm/drm_gem.h> #include <drm/drm_hashtab.h> #include <drm/drm_vma_manager.h> #include <linux/kref.h> @@ -39,7 +40,7 @@ #include <linux/mutex.h> #include <linux/mm.h> #include <linux/bitmap.h> -#include <linux/reservation.h> +#include <linux/dma-resv.h> struct ttm_bo_global; @@ -127,6 +128,7 @@ struct ttm_tt; /** * struct ttm_buffer_object * + * @base: drm_gem_object superclass data. * @bdev: Pointer to the buffer object device structure. * @type: The bo type. * @destroy: Destruction function. If NULL, kfree is used. @@ -150,7 +152,6 @@ struct ttm_tt; * @ddestroy: List head for the delayed destroy list. * @swap: List head for swap LRU list. * @moving: Fence set when BO is moving - * @vma_node: Address space manager node. * @offset: The current GPU offset, which can have different meanings * depending on the memory type. For SYSTEM type memory, it should be 0. * @cur_placement: Hint of current placement. @@ -169,6 +170,8 @@ struct ttm_tt; */ struct ttm_buffer_object { + struct drm_gem_object base; + /** * Members constant at init. */ @@ -215,9 +218,6 @@ struct ttm_buffer_object { */ struct dma_fence *moving; - - struct drm_vma_offset_node vma_node; - unsigned priority; /** @@ -230,8 +230,6 @@ struct ttm_buffer_object { struct sg_table *sg; - struct reservation_object *resv; - struct reservation_object ttm_resv; struct mutex wu_mutex; }; @@ -275,7 +273,7 @@ struct ttm_bo_kmap_obj { struct ttm_operation_ctx { bool interruptible; bool no_wait_gpu; - struct reservation_object *resv; + struct dma_resv *resv; uint64_t bytes_moved; uint32_t flags; }; @@ -495,7 +493,7 @@ size_t ttm_bo_dma_acc_size(struct ttm_bo_device *bdev, * @page_alignment: Data alignment in pages. * @ctx: TTM operation context for memory allocation. * @acc_size: Accounted size for this object. - * @resv: Pointer to a reservation_object, or NULL to let ttm allocate one. + * @resv: Pointer to a dma_resv, or NULL to let ttm allocate one. * @destroy: Destroy function. Use NULL for kfree(). * * This function initializes a pre-allocated struct ttm_buffer_object. @@ -528,7 +526,7 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev, struct ttm_operation_ctx *ctx, size_t acc_size, struct sg_table *sg, - struct reservation_object *resv, + struct dma_resv *resv, void (*destroy) (struct ttm_buffer_object *)); /** @@ -547,7 +545,7 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev, * point to the shmem object backing a GEM object if TTM is used to back a * GEM user interface. * @acc_size: Accounted size for this object. - * @resv: Pointer to a reservation_object, or NULL to let ttm allocate one. + * @resv: Pointer to a dma_resv, or NULL to let ttm allocate one. * @destroy: Destroy function. Use NULL for kfree(). * * This function initializes a pre-allocated struct ttm_buffer_object. @@ -572,7 +570,7 @@ int ttm_bo_init(struct ttm_bo_device *bdev, struct ttm_buffer_object *bo, unsigned long size, enum ttm_bo_type type, struct ttm_placement *placement, uint32_t page_alignment, bool interrubtible, size_t acc_size, - struct sg_table *sg, struct reservation_object *resv, + struct sg_table *sg, struct dma_resv *resv, void (*destroy) (struct ttm_buffer_object *)); /** @@ -768,4 +766,23 @@ int ttm_bo_swapout(struct ttm_bo_global *glob, struct ttm_operation_ctx *ctx); void ttm_bo_swapout_all(struct ttm_bo_device *bdev); int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo); + +/** + * ttm_bo_uses_embedded_gem_object - check if the given bo uses the + * embedded drm_gem_object. + * + * Most ttm drivers are using gem too, so the embedded + * ttm_buffer_object.base will be initialized by the driver (before + * calling ttm_bo_init). It is also possible to use ttm without gem + * though (vmwgfx does that). + * + * This helper will figure whenever a given ttm bo is a gem object too + * or not. + * + * @bo: The bo to check. + */ +static inline bool ttm_bo_uses_embedded_gem_object(struct ttm_buffer_object *bo) +{ + return bo->base.dev != NULL; +} #endif diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h index 129dabbc002d..6f536caea368 100644 --- a/include/drm/ttm/ttm_bo_driver.h +++ b/include/drm/ttm/ttm_bo_driver.h @@ -35,7 +35,7 @@ #include <linux/workqueue.h> #include <linux/fs.h> #include <linux/spinlock.h> -#include <linux/reservation.h> +#include <linux/dma-resv.h> #include "ttm_bo_api.h" #include "ttm_memory.h" @@ -390,6 +390,16 @@ struct ttm_bo_driver { * notify driver that a BO was deleted from LRU. */ void (*del_from_lru_notify)(struct ttm_buffer_object *bo); + + /** + * Notify the driver that we're about to release a BO + * + * @bo: BO that is about to be released + * + * Gives the driver a chance to do any cleanup, including + * adding fences that may force a delayed delete + */ + void (*release_notify)(struct ttm_buffer_object *bo); }; /** @@ -654,14 +664,14 @@ static inline int __ttm_bo_reserve(struct ttm_buffer_object *bo, if (WARN_ON(ticket)) return -EBUSY; - success = reservation_object_trylock(bo->resv); + success = dma_resv_trylock(bo->base.resv); return success ? 0 : -EBUSY; } if (interruptible) - ret = reservation_object_lock_interruptible(bo->resv, ticket); + ret = dma_resv_lock_interruptible(bo->base.resv, ticket); else - ret = reservation_object_lock(bo->resv, ticket); + ret = dma_resv_lock(bo->base.resv, ticket); if (ret == -EINTR) return -ERESTARTSYS; return ret; @@ -745,10 +755,10 @@ static inline int ttm_bo_reserve_slowpath(struct ttm_buffer_object *bo, WARN_ON(!kref_read(&bo->kref)); if (interruptible) - ret = ww_mutex_lock_slow_interruptible(&bo->resv->lock, - ticket); + ret = dma_resv_lock_slow_interruptible(bo->base.resv, + ticket); else - ww_mutex_lock_slow(&bo->resv->lock, ticket); + dma_resv_lock_slow(bo->base.resv, ticket); if (likely(ret == 0)) ttm_bo_del_sub_from_lru(bo); @@ -767,12 +777,13 @@ static inline int ttm_bo_reserve_slowpath(struct ttm_buffer_object *bo, */ static inline void ttm_bo_unreserve(struct ttm_buffer_object *bo) { - if (!(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) { - spin_lock(&bo->bdev->glob->lru_lock); + spin_lock(&bo->bdev->glob->lru_lock); + if (list_empty(&bo->lru)) ttm_bo_add_to_lru(bo); - spin_unlock(&bo->bdev->glob->lru_lock); - } - reservation_object_unlock(bo->resv); + else + ttm_bo_move_to_lru_tail(bo, NULL); + spin_unlock(&bo->bdev->glob->lru_lock); + dma_resv_unlock(bo->base.resv); } /* diff --git a/include/drm/ttm/ttm_execbuf_util.h b/include/drm/ttm/ttm_execbuf_util.h index 621615fa7728..7e46cc678e7e 100644 --- a/include/drm/ttm/ttm_execbuf_util.h +++ b/include/drm/ttm/ttm_execbuf_util.h @@ -70,6 +70,7 @@ extern void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket, * @list: thread private list of ttm_validate_buffer structs. * @intr: should the wait be interruptible * @dups: [out] optional list of duplicates. + * @del_lru: true if BOs should be removed from the LRU. * * Tries to reserve bos pointed to by the list entries for validation. * If the function returns 0, all buffers are marked as "unfenced", @@ -98,7 +99,7 @@ extern void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket, extern int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket, struct list_head *list, bool intr, - struct list_head *dups); + struct list_head *dups, bool del_lru); /** * function ttm_eu_fence_buffer_objects. diff --git a/include/dt-bindings/bus/moxtet.h b/include/dt-bindings/bus/moxtet.h new file mode 100644 index 000000000000..dc9345440ebe --- /dev/null +++ b/include/dt-bindings/bus/moxtet.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Constant for device tree bindings for Turris Mox module configuration bus + * + * Copyright (C) 2019 Marek Behun <marek.behun@nic.cz> + */ + +#ifndef _DT_BINDINGS_BUS_MOXTET_H +#define _DT_BINDINGS_BUS_MOXTET_H + +#define MOXTET_IRQ_PCI 0 +#define MOXTET_IRQ_USB3 4 +#define MOXTET_IRQ_PERIDOT(n) (8 + (n)) +#define MOXTET_IRQ_TOPAZ 12 + +#endif /* _DT_BINDINGS_BUS_MOXTET_H */ diff --git a/include/dt-bindings/bus/ti-sysc.h b/include/dt-bindings/bus/ti-sysc.h index 7138384e2ef9..babd08a1d226 100644 --- a/include/dt-bindings/bus/ti-sysc.h +++ b/include/dt-bindings/bus/ti-sysc.h @@ -1,3 +1,4 @@ +/* SPDX-License-Identifier: GPL-2.0 */ /* TI sysc interconnect target module defines */ /* Generic sysc found on omap2 and later, also known as type1 */ diff --git a/include/dt-bindings/clock/ast2600-clock.h b/include/dt-bindings/clock/ast2600-clock.h new file mode 100644 index 000000000000..38074a5f7296 --- /dev/null +++ b/include/dt-bindings/clock/ast2600-clock.h @@ -0,0 +1,113 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later OR MIT */ +#ifndef DT_BINDINGS_AST2600_CLOCK_H +#define DT_BINDINGS_AST2600_CLOCK_H + +#define ASPEED_CLK_GATE_ECLK 0 +#define ASPEED_CLK_GATE_GCLK 1 + +#define ASPEED_CLK_GATE_MCLK 2 + +#define ASPEED_CLK_GATE_VCLK 3 +#define ASPEED_CLK_GATE_BCLK 4 +#define ASPEED_CLK_GATE_DCLK 5 + +#define ASPEED_CLK_GATE_LCLK 6 +#define ASPEED_CLK_GATE_LHCCLK 7 + +#define ASPEED_CLK_GATE_D1CLK 8 +#define ASPEED_CLK_GATE_YCLK 9 + +#define ASPEED_CLK_GATE_REF0CLK 10 +#define ASPEED_CLK_GATE_REF1CLK 11 + +#define ASPEED_CLK_GATE_ESPICLK 12 + +#define ASPEED_CLK_GATE_USBUHCICLK 13 +#define ASPEED_CLK_GATE_USBPORT1CLK 14 +#define ASPEED_CLK_GATE_USBPORT2CLK 15 + +#define ASPEED_CLK_GATE_RSACLK 16 +#define ASPEED_CLK_GATE_RVASCLK 17 + +#define ASPEED_CLK_GATE_MAC1CLK 18 +#define ASPEED_CLK_GATE_MAC2CLK 19 +#define ASPEED_CLK_GATE_MAC3CLK 20 +#define ASPEED_CLK_GATE_MAC4CLK 21 + +#define ASPEED_CLK_GATE_UART1CLK 22 +#define ASPEED_CLK_GATE_UART2CLK 23 +#define ASPEED_CLK_GATE_UART3CLK 24 +#define ASPEED_CLK_GATE_UART4CLK 25 +#define ASPEED_CLK_GATE_UART5CLK 26 +#define ASPEED_CLK_GATE_UART6CLK 27 +#define ASPEED_CLK_GATE_UART7CLK 28 +#define ASPEED_CLK_GATE_UART8CLK 29 +#define ASPEED_CLK_GATE_UART9CLK 30 +#define ASPEED_CLK_GATE_UART10CLK 31 +#define ASPEED_CLK_GATE_UART11CLK 32 +#define ASPEED_CLK_GATE_UART12CLK 33 +#define ASPEED_CLK_GATE_UART13CLK 34 + +#define ASPEED_CLK_GATE_SDCLK 35 +#define ASPEED_CLK_GATE_EMMCCLK 36 + +#define ASPEED_CLK_GATE_I3C0CLK 37 +#define ASPEED_CLK_GATE_I3C1CLK 38 +#define ASPEED_CLK_GATE_I3C2CLK 39 +#define ASPEED_CLK_GATE_I3C3CLK 40 +#define ASPEED_CLK_GATE_I3C4CLK 41 +#define ASPEED_CLK_GATE_I3C5CLK 42 +#define ASPEED_CLK_GATE_I3C6CLK 43 +#define ASPEED_CLK_GATE_I3C7CLK 44 + +#define ASPEED_CLK_GATE_FSICLK 45 + +#define ASPEED_CLK_HPLL 46 +#define ASPEED_CLK_MPLL 47 +#define ASPEED_CLK_DPLL 48 +#define ASPEED_CLK_EPLL 49 +#define ASPEED_CLK_APLL 50 +#define ASPEED_CLK_AHB 51 +#define ASPEED_CLK_APB1 52 +#define ASPEED_CLK_APB2 53 +#define ASPEED_CLK_BCLK 54 +#define ASPEED_CLK_D1CLK 55 +#define ASPEED_CLK_VCLK 56 +#define ASPEED_CLK_LHCLK 57 +#define ASPEED_CLK_UART 58 +#define ASPEED_CLK_UARTX 59 +#define ASPEED_CLK_SDIO 60 +#define ASPEED_CLK_EMMC 61 +#define ASPEED_CLK_ECLK 62 +#define ASPEED_CLK_ECLK_MUX 63 +#define ASPEED_CLK_MAC12 64 +#define ASPEED_CLK_MAC34 65 +#define ASPEED_CLK_USBPHY_40M 66 + +/* Only list resets here that are not part of a gate */ +#define ASPEED_RESET_ADC 55 +#define ASPEED_RESET_JTAG_MASTER2 54 +#define ASPEED_RESET_I3C_DMA 39 +#define ASPEED_RESET_PWM 37 +#define ASPEED_RESET_PECI 36 +#define ASPEED_RESET_MII 35 +#define ASPEED_RESET_I2C 34 +#define ASPEED_RESET_H2X 31 +#define ASPEED_RESET_GP_MCU 30 +#define ASPEED_RESET_DP_MCU 29 +#define ASPEED_RESET_DP 28 +#define ASPEED_RESET_RC_XDMA 27 +#define ASPEED_RESET_GRAPHICS 26 +#define ASPEED_RESET_DEV_XDMA 25 +#define ASPEED_RESET_DEV_MCTP 24 +#define ASPEED_RESET_RC_MCTP 23 +#define ASPEED_RESET_JTAG_MASTER 22 +#define ASPEED_RESET_PCIE_DEV_O 21 +#define ASPEED_RESET_PCIE_DEV_OEN 20 +#define ASPEED_RESET_PCIE_RC_O 19 +#define ASPEED_RESET_PCIE_RC_OEN 18 +#define ASPEED_RESET_PCI_DP 5 +#define ASPEED_RESET_AHB 1 +#define ASPEED_RESET_SDRAM 0 + +#endif diff --git a/include/dt-bindings/clock/bcm2835.h b/include/dt-bindings/clock/bcm2835.h index 2cec01f96897..b60c03430cf1 100644 --- a/include/dt-bindings/clock/bcm2835.h +++ b/include/dt-bindings/clock/bcm2835.h @@ -58,3 +58,5 @@ #define BCM2835_CLOCK_DSI1E 48 #define BCM2835_CLOCK_DSI0P 49 #define BCM2835_CLOCK_DSI1P 50 + +#define BCM2711_CLOCK_EMMC2 51 diff --git a/include/dt-bindings/clock/exynos4.h b/include/dt-bindings/clock/exynos4.h index a0439ce8e8d3..88ec3968b90a 100644 --- a/include/dt-bindings/clock/exynos4.h +++ b/include/dt-bindings/clock/exynos4.h @@ -187,6 +187,7 @@ #define CLK_MIPI_HSI 349 /* Exynos4210 only */ #define CLK_PIXELASYNCM0 351 #define CLK_PIXELASYNCM1 352 +#define CLK_ASYNC_G3D 353 /* Exynos4x12 only */ #define CLK_PWM_ISP_SCLK 379 /* Exynos4x12 only */ #define CLK_SPI0_ISP_SCLK 380 /* Exynos4x12 only */ #define CLK_SPI1_ISP_SCLK 381 /* Exynos4x12 only */ diff --git a/include/dt-bindings/clock/exynos5420.h b/include/dt-bindings/clock/exynos5420.h index 355f469943f1..02d5ac469a3d 100644 --- a/include/dt-bindings/clock/exynos5420.h +++ b/include/dt-bindings/clock/exynos5420.h @@ -60,6 +60,7 @@ #define CLK_MAU_EPLL 159 #define CLK_SCLK_HSIC_12M 160 #define CLK_SCLK_MPHY_IXTAL24 161 +#define CLK_SCLK_BPLL 162 /* gate clocks */ #define CLK_UART0 257 @@ -195,6 +196,16 @@ #define CLK_ACLK432_CAM 518 #define CLK_ACLK_FL1550_CAM 519 #define CLK_ACLK550_CAM 520 +#define CLK_CLKM_PHY0 521 +#define CLK_CLKM_PHY1 522 +#define CLK_ACLK_PPMU_DREX0_0 523 +#define CLK_ACLK_PPMU_DREX0_1 524 +#define CLK_ACLK_PPMU_DREX1_0 525 +#define CLK_ACLK_PPMU_DREX1_1 526 +#define CLK_PCLK_PPMU_DREX0_0 527 +#define CLK_PCLK_PPMU_DREX0_1 528 +#define CLK_PCLK_PPMU_DREX1_0 529 +#define CLK_PCLK_PPMU_DREX1_1 530 /* mux clocks */ #define CLK_MOUT_HDMI 640 @@ -217,6 +228,8 @@ #define CLK_MOUT_EPLL 657 #define CLK_MOUT_MAU_EPLL 658 #define CLK_MOUT_USER_MAU_EPLL 659 +#define CLK_MOUT_SCLK_SPLL 660 +#define CLK_MOUT_MX_MSPLL_CCORE_PHY 661 /* divider clocks */ #define CLK_DOUT_PIXEL 768 @@ -248,8 +261,11 @@ #define CLK_DOUT_CCLK_DREX0 794 #define CLK_DOUT_CLK2X_PHY0 795 #define CLK_DOUT_PCLK_CORE_MEM 796 +#define CLK_FF_DOUT_SPLL2 797 +#define CLK_DOUT_PCLK_DREX0 798 +#define CLK_DOUT_PCLK_DREX1 799 /* must be greater than maximal clock id */ -#define CLK_NR_CLKS 797 +#define CLK_NR_CLKS 800 #endif /* _DT_BINDINGS_CLOCK_EXYNOS_5420_H */ diff --git a/include/dt-bindings/clock/g12a-clkc.h b/include/dt-bindings/clock/g12a-clkc.h index e10470ed7c4f..0837c1a7ae49 100644 --- a/include/dt-bindings/clock/g12a-clkc.h +++ b/include/dt-bindings/clock/g12a-clkc.h @@ -136,5 +136,12 @@ #define CLKID_VDEC_1 204 #define CLKID_VDEC_HEVC 207 #define CLKID_VDEC_HEVCF 210 +#define CLKID_TS 212 +#define CLKID_CPUB_CLK 224 +#define CLKID_GP1_PLL 243 +#define CLKID_DSU_CLK 252 +#define CLKID_CPU1_CLK 253 +#define CLKID_CPU2_CLK 254 +#define CLKID_CPU3_CLK 255 #endif /* __G12A_CLKC_H */ diff --git a/include/dt-bindings/clock/imx8-clock.h b/include/dt-bindings/clock/imx8-clock.h index 4236818e3be5..673a8c662340 100644 --- a/include/dt-bindings/clock/imx8-clock.h +++ b/include/dt-bindings/clock/imx8-clock.h @@ -283,7 +283,11 @@ #define IMX_ADMA_LPCG_PWM_IPG_CLK 38 #define IMX_ADMA_LPCG_LCD_PIX_CLK 39 #define IMX_ADMA_LPCG_LCD_APB_CLK 40 +#define IMX_ADMA_LPCG_DSP_ADB_CLK 41 +#define IMX_ADMA_LPCG_DSP_IPG_CLK 42 +#define IMX_ADMA_LPCG_DSP_CORE_CLK 43 +#define IMX_ADMA_LPCG_OCRAM_IPG_CLK 44 -#define IMX_ADMA_LPCG_CLK_END 41 +#define IMX_ADMA_LPCG_CLK_END 45 #endif /* __DT_BINDINGS_CLOCK_IMX_H */ diff --git a/include/dt-bindings/clock/imx8mm-clock.h b/include/dt-bindings/clock/imx8mm-clock.h index 1b4353e7b486..07e6c686f3ef 100644 --- a/include/dt-bindings/clock/imx8mm-clock.h +++ b/include/dt-bindings/clock/imx8mm-clock.h @@ -239,6 +239,15 @@ #define IMX8MM_CLK_NAND_USDHC_BUS_RAWNAND_CLK 222 -#define IMX8MM_CLK_END 223 +#define IMX8MM_CLK_GPIO1_ROOT 223 +#define IMX8MM_CLK_GPIO2_ROOT 224 +#define IMX8MM_CLK_GPIO3_ROOT 225 +#define IMX8MM_CLK_GPIO4_ROOT 226 +#define IMX8MM_CLK_GPIO5_ROOT 227 + +#define IMX8MM_CLK_SNVS_ROOT 228 +#define IMX8MM_CLK_GIC 229 + +#define IMX8MM_CLK_END 230 #endif diff --git a/include/dt-bindings/clock/imx8mn-clock.h b/include/dt-bindings/clock/imx8mn-clock.h new file mode 100644 index 000000000000..d7b201652f4c --- /dev/null +++ b/include/dt-bindings/clock/imx8mn-clock.h @@ -0,0 +1,216 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright 2018-2019 NXP + */ + +#ifndef __DT_BINDINGS_CLOCK_IMX8MN_H +#define __DT_BINDINGS_CLOCK_IMX8MN_H + +#define IMX8MN_CLK_DUMMY 0 +#define IMX8MN_CLK_32K 1 +#define IMX8MN_CLK_24M 2 +#define IMX8MN_OSC_HDMI_CLK 3 +#define IMX8MN_CLK_EXT1 4 +#define IMX8MN_CLK_EXT2 5 +#define IMX8MN_CLK_EXT3 6 +#define IMX8MN_CLK_EXT4 7 +#define IMX8MN_AUDIO_PLL1_REF_SEL 8 +#define IMX8MN_AUDIO_PLL2_REF_SEL 9 +#define IMX8MN_VIDEO_PLL1_REF_SEL 10 +#define IMX8MN_DRAM_PLL_REF_SEL 11 +#define IMX8MN_GPU_PLL_REF_SEL 12 +#define IMX8MN_VPU_PLL_REF_SEL 13 +#define IMX8MN_ARM_PLL_REF_SEL 14 +#define IMX8MN_SYS_PLL1_REF_SEL 15 +#define IMX8MN_SYS_PLL2_REF_SEL 16 +#define IMX8MN_SYS_PLL3_REF_SEL 17 +#define IMX8MN_AUDIO_PLL1 18 +#define IMX8MN_AUDIO_PLL2 19 +#define IMX8MN_VIDEO_PLL1 20 +#define IMX8MN_DRAM_PLL 21 +#define IMX8MN_GPU_PLL 22 +#define IMX8MN_VPU_PLL 23 +#define IMX8MN_ARM_PLL 24 +#define IMX8MN_SYS_PLL1 25 +#define IMX8MN_SYS_PLL2 26 +#define IMX8MN_SYS_PLL3 27 +#define IMX8MN_AUDIO_PLL1_BYPASS 28 +#define IMX8MN_AUDIO_PLL2_BYPASS 29 +#define IMX8MN_VIDEO_PLL1_BYPASS 30 +#define IMX8MN_DRAM_PLL_BYPASS 31 +#define IMX8MN_GPU_PLL_BYPASS 32 +#define IMX8MN_VPU_PLL_BYPASS 33 +#define IMX8MN_ARM_PLL_BYPASS 34 +#define IMX8MN_SYS_PLL1_BYPASS 35 +#define IMX8MN_SYS_PLL2_BYPASS 36 +#define IMX8MN_SYS_PLL3_BYPASS 37 +#define IMX8MN_AUDIO_PLL1_OUT 38 +#define IMX8MN_AUDIO_PLL2_OUT 39 +#define IMX8MN_VIDEO_PLL1_OUT 40 +#define IMX8MN_DRAM_PLL_OUT 41 +#define IMX8MN_GPU_PLL_OUT 42 +#define IMX8MN_VPU_PLL_OUT 43 +#define IMX8MN_ARM_PLL_OUT 44 +#define IMX8MN_SYS_PLL1_OUT 45 +#define IMX8MN_SYS_PLL2_OUT 46 +#define IMX8MN_SYS_PLL3_OUT 47 +#define IMX8MN_SYS_PLL1_40M 48 +#define IMX8MN_SYS_PLL1_80M 49 +#define IMX8MN_SYS_PLL1_100M 50 +#define IMX8MN_SYS_PLL1_133M 51 +#define IMX8MN_SYS_PLL1_160M 52 +#define IMX8MN_SYS_PLL1_200M 53 +#define IMX8MN_SYS_PLL1_266M 54 +#define IMX8MN_SYS_PLL1_400M 55 +#define IMX8MN_SYS_PLL1_800M 56 +#define IMX8MN_SYS_PLL2_50M 57 +#define IMX8MN_SYS_PLL2_100M 58 +#define IMX8MN_SYS_PLL2_125M 59 +#define IMX8MN_SYS_PLL2_166M 60 +#define IMX8MN_SYS_PLL2_200M 61 +#define IMX8MN_SYS_PLL2_250M 62 +#define IMX8MN_SYS_PLL2_333M 63 +#define IMX8MN_SYS_PLL2_500M 64 +#define IMX8MN_SYS_PLL2_1000M 65 + +/* CORE CLOCK ROOT */ +#define IMX8MN_CLK_A53_SRC 66 +#define IMX8MN_CLK_GPU_CORE_SRC 67 +#define IMX8MN_CLK_GPU_SHADER_SRC 68 +#define IMX8MN_CLK_A53_CG 69 +#define IMX8MN_CLK_GPU_CORE_CG 70 +#define IMX8MN_CLK_GPU_SHADER_CG 71 +#define IMX8MN_CLK_A53_DIV 72 +#define IMX8MN_CLK_GPU_CORE_DIV 73 +#define IMX8MN_CLK_GPU_SHADER_DIV 74 + +/* BUS CLOCK ROOT */ +#define IMX8MN_CLK_MAIN_AXI 75 +#define IMX8MN_CLK_ENET_AXI 76 +#define IMX8MN_CLK_NAND_USDHC_BUS 77 +#define IMX8MN_CLK_DISP_AXI 78 +#define IMX8MN_CLK_DISP_APB 79 +#define IMX8MN_CLK_USB_BUS 80 +#define IMX8MN_CLK_GPU_AXI 81 +#define IMX8MN_CLK_GPU_AHB 82 +#define IMX8MN_CLK_NOC 83 +#define IMX8MN_CLK_AHB 84 +#define IMX8MN_CLK_AUDIO_AHB 85 + +/* IPG CLOCK ROOT */ +#define IMX8MN_CLK_IPG_ROOT 86 +#define IMX8MN_CLK_IPG_AUDIO_ROOT 87 + +/* IP */ +#define IMX8MN_CLK_DRAM_CORE 88 +#define IMX8MN_CLK_DRAM_ALT 89 +#define IMX8MN_CLK_DRAM_APB 90 +#define IMX8MN_CLK_DRAM_ALT_ROOT 91 +#define IMX8MN_CLK_DISP_PIXEL 92 +#define IMX8MN_CLK_SAI2 93 +#define IMX8MN_CLK_SAI3 94 +#define IMX8MN_CLK_SAI5 95 +#define IMX8MN_CLK_SAI6 96 +#define IMX8MN_CLK_SPDIF1 97 +#define IMX8MN_CLK_ENET_REF 98 +#define IMX8MN_CLK_ENET_TIMER 99 +#define IMX8MN_CLK_ENET_PHY_REF 100 +#define IMX8MN_CLK_NAND 101 +#define IMX8MN_CLK_QSPI 102 +#define IMX8MN_CLK_USDHC1 103 +#define IMX8MN_CLK_USDHC2 104 +#define IMX8MN_CLK_I2C1 105 +#define IMX8MN_CLK_I2C2 106 +#define IMX8MN_CLK_I2C3 107 +#define IMX8MN_CLK_I2C4 118 +#define IMX8MN_CLK_UART1 119 +#define IMX8MN_CLK_UART2 110 +#define IMX8MN_CLK_UART3 111 +#define IMX8MN_CLK_UART4 112 +#define IMX8MN_CLK_USB_CORE_REF 113 +#define IMX8MN_CLK_USB_PHY_REF 114 +#define IMX8MN_CLK_ECSPI1 115 +#define IMX8MN_CLK_ECSPI2 116 +#define IMX8MN_CLK_PWM1 117 +#define IMX8MN_CLK_PWM2 118 +#define IMX8MN_CLK_PWM3 119 +#define IMX8MN_CLK_PWM4 120 +#define IMX8MN_CLK_WDOG 121 +#define IMX8MN_CLK_WRCLK 122 +#define IMX8MN_CLK_CLKO1 123 +#define IMX8MN_CLK_CLKO2 124 +#define IMX8MN_CLK_DSI_CORE 125 +#define IMX8MN_CLK_DSI_PHY_REF 126 +#define IMX8MN_CLK_DSI_DBI 127 +#define IMX8MN_CLK_USDHC3 128 +#define IMX8MN_CLK_CAMERA_PIXEL 129 +#define IMX8MN_CLK_CSI1_PHY_REF 130 +#define IMX8MN_CLK_CSI2_PHY_REF 131 +#define IMX8MN_CLK_CSI2_ESC 132 +#define IMX8MN_CLK_ECSPI3 133 +#define IMX8MN_CLK_PDM 134 +#define IMX8MN_CLK_SAI7 135 + +#define IMX8MN_CLK_ECSPI1_ROOT 136 +#define IMX8MN_CLK_ECSPI2_ROOT 137 +#define IMX8MN_CLK_ECSPI3_ROOT 138 +#define IMX8MN_CLK_ENET1_ROOT 139 +#define IMX8MN_CLK_GPIO1_ROOT 140 +#define IMX8MN_CLK_GPIO2_ROOT 141 +#define IMX8MN_CLK_GPIO3_ROOT 142 +#define IMX8MN_CLK_GPIO4_ROOT 143 +#define IMX8MN_CLK_GPIO5_ROOT 144 +#define IMX8MN_CLK_I2C1_ROOT 145 +#define IMX8MN_CLK_I2C2_ROOT 146 +#define IMX8MN_CLK_I2C3_ROOT 147 +#define IMX8MN_CLK_I2C4_ROOT 148 +#define IMX8MN_CLK_MU_ROOT 149 +#define IMX8MN_CLK_OCOTP_ROOT 150 +#define IMX8MN_CLK_PWM1_ROOT 151 +#define IMX8MN_CLK_PWM2_ROOT 152 +#define IMX8MN_CLK_PWM3_ROOT 153 +#define IMX8MN_CLK_PWM4_ROOT 154 +#define IMX8MN_CLK_QSPI_ROOT 155 +#define IMX8MN_CLK_NAND_ROOT 156 +#define IMX8MN_CLK_SAI2_ROOT 157 +#define IMX8MN_CLK_SAI2_IPG 158 +#define IMX8MN_CLK_SAI3_ROOT 159 +#define IMX8MN_CLK_SAI3_IPG 160 +#define IMX8MN_CLK_SAI5_ROOT 161 +#define IMX8MN_CLK_SAI5_IPG 162 +#define IMX8MN_CLK_SAI6_ROOT 163 +#define IMX8MN_CLK_SAI6_IPG 164 +#define IMX8MN_CLK_SAI7_ROOT 165 +#define IMX8MN_CLK_SAI7_IPG 166 +#define IMX8MN_CLK_SDMA1_ROOT 167 +#define IMX8MN_CLK_SDMA2_ROOT 168 +#define IMX8MN_CLK_UART1_ROOT 169 +#define IMX8MN_CLK_UART2_ROOT 170 +#define IMX8MN_CLK_UART3_ROOT 171 +#define IMX8MN_CLK_UART4_ROOT 172 +#define IMX8MN_CLK_USB1_CTRL_ROOT 173 +#define IMX8MN_CLK_USDHC1_ROOT 174 +#define IMX8MN_CLK_USDHC2_ROOT 175 +#define IMX8MN_CLK_WDOG1_ROOT 176 +#define IMX8MN_CLK_WDOG2_ROOT 177 +#define IMX8MN_CLK_WDOG3_ROOT 178 +#define IMX8MN_CLK_GPU_BUS_ROOT 179 +#define IMX8MN_CLK_ASRC_ROOT 180 +#define IMX8MN_CLK_GPU3D_ROOT 181 +#define IMX8MN_CLK_PDM_ROOT 182 +#define IMX8MN_CLK_PDM_IPG 183 +#define IMX8MN_CLK_DISP_AXI_ROOT 184 +#define IMX8MN_CLK_DISP_APB_ROOT 185 +#define IMX8MN_CLK_DISP_PIXEL_ROOT 186 +#define IMX8MN_CLK_CAMERA_PIXEL_ROOT 187 +#define IMX8MN_CLK_USDHC3_ROOT 188 +#define IMX8MN_CLK_SDMA3_ROOT 189 +#define IMX8MN_CLK_TMU_ROOT 190 +#define IMX8MN_CLK_ARM 191 +#define IMX8MN_CLK_NAND_USDHC_BUS_RAWNAND_CLK 192 +#define IMX8MN_CLK_GPU_CORE_ROOT 193 +#define IMX8MN_CLK_GIC 194 + +#define IMX8MN_CLK_END 195 + +#endif diff --git a/include/dt-bindings/clock/imx8mq-clock.h b/include/dt-bindings/clock/imx8mq-clock.h index 6677e920dc2d..65463673d25e 100644 --- a/include/dt-bindings/clock/imx8mq-clock.h +++ b/include/dt-bindings/clock/imx8mq-clock.h @@ -400,5 +400,8 @@ #define IMX8MQ_CLK_GPIO4_ROOT 262 #define IMX8MQ_CLK_GPIO5_ROOT 263 -#define IMX8MQ_CLK_END 264 +#define IMX8MQ_CLK_SNVS_ROOT 264 +#define IMX8MQ_CLK_GIC 265 + +#define IMX8MQ_CLK_END 266 #endif /* __DT_BINDINGS_CLOCK_IMX8MQ_H */ diff --git a/include/dt-bindings/clock/ingenic,tcu.h b/include/dt-bindings/clock/ingenic,tcu.h new file mode 100644 index 000000000000..d569650a7945 --- /dev/null +++ b/include/dt-bindings/clock/ingenic,tcu.h @@ -0,0 +1,20 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * This header provides clock numbers for the ingenic,tcu DT binding. + */ + +#ifndef __DT_BINDINGS_CLOCK_INGENIC_TCU_H__ +#define __DT_BINDINGS_CLOCK_INGENIC_TCU_H__ + +#define TCU_CLK_TIMER0 0 +#define TCU_CLK_TIMER1 1 +#define TCU_CLK_TIMER2 2 +#define TCU_CLK_TIMER3 3 +#define TCU_CLK_TIMER4 4 +#define TCU_CLK_TIMER5 5 +#define TCU_CLK_TIMER6 6 +#define TCU_CLK_TIMER7 7 +#define TCU_CLK_WDT 8 +#define TCU_CLK_OST 9 + +#endif /* __DT_BINDINGS_CLOCK_INGENIC_TCU_H__ */ diff --git a/include/dt-bindings/clock/jz4740-cgu.h b/include/dt-bindings/clock/jz4740-cgu.h index 6ed83f926ae7..e82d77028581 100644 --- a/include/dt-bindings/clock/jz4740-cgu.h +++ b/include/dt-bindings/clock/jz4740-cgu.h @@ -34,5 +34,6 @@ #define JZ4740_CLK_ADC 19 #define JZ4740_CLK_I2C 20 #define JZ4740_CLK_AIC 21 +#define JZ4740_CLK_TCU 22 #endif /* __DT_BINDINGS_CLOCK_JZ4740_CGU_H__ */ diff --git a/include/dt-bindings/clock/meson8b-clkc.h b/include/dt-bindings/clock/meson8b-clkc.h index 47556539f0ee..68862aaf977e 100644 --- a/include/dt-bindings/clock/meson8b-clkc.h +++ b/include/dt-bindings/clock/meson8b-clkc.h @@ -112,5 +112,8 @@ #define CLKID_VDEC_HCODEC 199 #define CLKID_VDEC_2 202 #define CLKID_VDEC_HEVC 206 +#define CLKID_CTS_AMCLK 209 +#define CLKID_CTS_MCLK_I958 212 +#define CLKID_CTS_I958 213 #endif /* __MESON8B_CLKC_H */ diff --git a/include/dt-bindings/clock/mt6779-clk.h b/include/dt-bindings/clock/mt6779-clk.h new file mode 100644 index 000000000000..b083139afbd2 --- /dev/null +++ b/include/dt-bindings/clock/mt6779-clk.h @@ -0,0 +1,436 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2019 MediaTek Inc. + * Author: Wendell Lin <wendell.lin@mediatek.com> + */ + +#ifndef _DT_BINDINGS_CLK_MT6779_H +#define _DT_BINDINGS_CLK_MT6779_H + +/* TOPCKGEN */ +#define CLK_TOP_AXI 1 +#define CLK_TOP_MM 2 +#define CLK_TOP_CAM 3 +#define CLK_TOP_MFG 4 +#define CLK_TOP_CAMTG 5 +#define CLK_TOP_UART 6 +#define CLK_TOP_SPI 7 +#define CLK_TOP_MSDC50_0_HCLK 8 +#define CLK_TOP_MSDC50_0 9 +#define CLK_TOP_MSDC30_1 10 +#define CLK_TOP_MSDC30_2 11 +#define CLK_TOP_AUD 12 +#define CLK_TOP_AUD_INTBUS 13 +#define CLK_TOP_FPWRAP_ULPOSC 14 +#define CLK_TOP_SCP 15 +#define CLK_TOP_ATB 16 +#define CLK_TOP_SSPM 17 +#define CLK_TOP_DPI0 18 +#define CLK_TOP_SCAM 19 +#define CLK_TOP_AUD_1 20 +#define CLK_TOP_AUD_2 21 +#define CLK_TOP_DISP_PWM 22 +#define CLK_TOP_SSUSB_TOP_XHCI 23 +#define CLK_TOP_USB_TOP 24 +#define CLK_TOP_SPM 25 +#define CLK_TOP_I2C 26 +#define CLK_TOP_F52M_MFG 27 +#define CLK_TOP_SENINF 28 +#define CLK_TOP_DXCC 29 +#define CLK_TOP_CAMTG2 30 +#define CLK_TOP_AUD_ENG1 31 +#define CLK_TOP_AUD_ENG2 32 +#define CLK_TOP_FAES_UFSFDE 33 +#define CLK_TOP_FUFS 34 +#define CLK_TOP_IMG 35 +#define CLK_TOP_DSP 36 +#define CLK_TOP_DSP1 37 +#define CLK_TOP_DSP2 38 +#define CLK_TOP_IPU_IF 39 +#define CLK_TOP_CAMTG3 40 +#define CLK_TOP_CAMTG4 41 +#define CLK_TOP_PMICSPI 42 +#define CLK_TOP_MAINPLL_CK 43 +#define CLK_TOP_MAINPLL_D2 44 +#define CLK_TOP_MAINPLL_D3 45 +#define CLK_TOP_MAINPLL_D5 46 +#define CLK_TOP_MAINPLL_D7 47 +#define CLK_TOP_MAINPLL_D2_D2 48 +#define CLK_TOP_MAINPLL_D2_D4 49 +#define CLK_TOP_MAINPLL_D2_D8 50 +#define CLK_TOP_MAINPLL_D2_D16 51 +#define CLK_TOP_MAINPLL_D3_D2 52 +#define CLK_TOP_MAINPLL_D3_D4 53 +#define CLK_TOP_MAINPLL_D3_D8 54 +#define CLK_TOP_MAINPLL_D5_D2 55 +#define CLK_TOP_MAINPLL_D5_D4 56 +#define CLK_TOP_MAINPLL_D7_D2 57 +#define CLK_TOP_MAINPLL_D7_D4 58 +#define CLK_TOP_UNIVPLL_CK 59 +#define CLK_TOP_UNIVPLL_D2 60 +#define CLK_TOP_UNIVPLL_D3 61 +#define CLK_TOP_UNIVPLL_D5 62 +#define CLK_TOP_UNIVPLL_D7 63 +#define CLK_TOP_UNIVPLL_D2_D2 64 +#define CLK_TOP_UNIVPLL_D2_D4 65 +#define CLK_TOP_UNIVPLL_D2_D8 66 +#define CLK_TOP_UNIVPLL_D3_D2 67 +#define CLK_TOP_UNIVPLL_D3_D4 68 +#define CLK_TOP_UNIVPLL_D3_D8 69 +#define CLK_TOP_UNIVPLL_D5_D2 70 +#define CLK_TOP_UNIVPLL_D5_D4 71 +#define CLK_TOP_UNIVPLL_D5_D8 72 +#define CLK_TOP_APLL1_CK 73 +#define CLK_TOP_APLL1_D2 74 +#define CLK_TOP_APLL1_D4 75 +#define CLK_TOP_APLL1_D8 76 +#define CLK_TOP_APLL2_CK 77 +#define CLK_TOP_APLL2_D2 78 +#define CLK_TOP_APLL2_D4 79 +#define CLK_TOP_APLL2_D8 80 +#define CLK_TOP_TVDPLL_CK 81 +#define CLK_TOP_TVDPLL_D2 82 +#define CLK_TOP_TVDPLL_D4 83 +#define CLK_TOP_TVDPLL_D8 84 +#define CLK_TOP_TVDPLL_D16 85 +#define CLK_TOP_MSDCPLL_CK 86 +#define CLK_TOP_MSDCPLL_D2 87 +#define CLK_TOP_MSDCPLL_D4 88 +#define CLK_TOP_MSDCPLL_D8 89 +#define CLK_TOP_MSDCPLL_D16 90 +#define CLK_TOP_AD_OSC_CK 91 +#define CLK_TOP_OSC_D2 92 +#define CLK_TOP_OSC_D4 93 +#define CLK_TOP_OSC_D8 94 +#define CLK_TOP_OSC_D16 95 +#define CLK_TOP_F26M_CK_D2 96 +#define CLK_TOP_MFGPLL_CK 97 +#define CLK_TOP_UNIVP_192M_CK 98 +#define CLK_TOP_UNIVP_192M_D2 99 +#define CLK_TOP_UNIVP_192M_D4 100 +#define CLK_TOP_UNIVP_192M_D8 101 +#define CLK_TOP_UNIVP_192M_D16 102 +#define CLK_TOP_UNIVP_192M_D32 103 +#define CLK_TOP_MMPLL_CK 104 +#define CLK_TOP_MMPLL_D4 105 +#define CLK_TOP_MMPLL_D4_D2 106 +#define CLK_TOP_MMPLL_D4_D4 107 +#define CLK_TOP_MMPLL_D5 108 +#define CLK_TOP_MMPLL_D5_D2 109 +#define CLK_TOP_MMPLL_D5_D4 110 +#define CLK_TOP_MMPLL_D6 111 +#define CLK_TOP_MMPLL_D7 112 +#define CLK_TOP_CLK26M 113 +#define CLK_TOP_CLK13M 114 +#define CLK_TOP_ADSP 115 +#define CLK_TOP_DPMAIF 116 +#define CLK_TOP_VENC 117 +#define CLK_TOP_VDEC 118 +#define CLK_TOP_CAMTM 119 +#define CLK_TOP_PWM 120 +#define CLK_TOP_ADSPPLL_CK 121 +#define CLK_TOP_I2S0_M_SEL 122 +#define CLK_TOP_I2S1_M_SEL 123 +#define CLK_TOP_I2S2_M_SEL 124 +#define CLK_TOP_I2S3_M_SEL 125 +#define CLK_TOP_I2S4_M_SEL 126 +#define CLK_TOP_I2S5_M_SEL 127 +#define CLK_TOP_APLL12_DIV0 128 +#define CLK_TOP_APLL12_DIV1 129 +#define CLK_TOP_APLL12_DIV2 130 +#define CLK_TOP_APLL12_DIV3 131 +#define CLK_TOP_APLL12_DIV4 132 +#define CLK_TOP_APLL12_DIVB 133 +#define CLK_TOP_APLL12_DIV5 134 +#define CLK_TOP_IPE 135 +#define CLK_TOP_DPE 136 +#define CLK_TOP_CCU 137 +#define CLK_TOP_DSP3 138 +#define CLK_TOP_SENINF1 139 +#define CLK_TOP_SENINF2 140 +#define CLK_TOP_AUD_H 141 +#define CLK_TOP_CAMTG5 142 +#define CLK_TOP_TVDPLL_MAINPLL_D2_CK 143 +#define CLK_TOP_AD_OSC2_CK 144 +#define CLK_TOP_OSC2_D2 145 +#define CLK_TOP_OSC2_D3 146 +#define CLK_TOP_FMEM_466M_CK 147 +#define CLK_TOP_ADSPPLL_D4 148 +#define CLK_TOP_ADSPPLL_D5 149 +#define CLK_TOP_ADSPPLL_D6 150 +#define CLK_TOP_OSC_D10 151 +#define CLK_TOP_UNIVPLL_D3_D16 152 +#define CLK_TOP_NR_CLK 153 + +/* APMIXED */ +#define CLK_APMIXED_ARMPLL_LL 1 +#define CLK_APMIXED_ARMPLL_BL 2 +#define CLK_APMIXED_ARMPLL_BB 3 +#define CLK_APMIXED_CCIPLL 4 +#define CLK_APMIXED_MAINPLL 5 +#define CLK_APMIXED_UNIV2PLL 6 +#define CLK_APMIXED_MSDCPLL 7 +#define CLK_APMIXED_ADSPPLL 8 +#define CLK_APMIXED_MMPLL 9 +#define CLK_APMIXED_MFGPLL 10 +#define CLK_APMIXED_TVDPLL 11 +#define CLK_APMIXED_APLL1 12 +#define CLK_APMIXED_APLL2 13 +#define CLK_APMIXED_SSUSB26M 14 +#define CLK_APMIXED_APPLL26M 15 +#define CLK_APMIXED_MIPIC0_26M 16 +#define CLK_APMIXED_MDPLLGP26M 17 +#define CLK_APMIXED_MM_F26M 18 +#define CLK_APMIXED_UFS26M 19 +#define CLK_APMIXED_MIPIC1_26M 20 +#define CLK_APMIXED_MEMPLL26M 21 +#define CLK_APMIXED_CLKSQ_LVPLL_26M 22 +#define CLK_APMIXED_MIPID0_26M 23 +#define CLK_APMIXED_MIPID1_26M 24 +#define CLK_APMIXED_NR_CLK 25 + +/* CAMSYS */ +#define CLK_CAM_LARB10 1 +#define CLK_CAM_DFP_VAD 2 +#define CLK_CAM_LARB11 3 +#define CLK_CAM_LARB9 4 +#define CLK_CAM_CAM 5 +#define CLK_CAM_CAMTG 6 +#define CLK_CAM_SENINF 7 +#define CLK_CAM_CAMSV0 8 +#define CLK_CAM_CAMSV1 9 +#define CLK_CAM_CAMSV2 10 +#define CLK_CAM_CAMSV3 11 +#define CLK_CAM_CCU 12 +#define CLK_CAM_FAKE_ENG 13 +#define CLK_CAM_NR_CLK 14 + +/* INFRA */ +#define CLK_INFRA_PMIC_TMR 1 +#define CLK_INFRA_PMIC_AP 2 +#define CLK_INFRA_PMIC_MD 3 +#define CLK_INFRA_PMIC_CONN 4 +#define CLK_INFRA_SCPSYS 5 +#define CLK_INFRA_SEJ 6 +#define CLK_INFRA_APXGPT 7 +#define CLK_INFRA_ICUSB 8 +#define CLK_INFRA_GCE 9 +#define CLK_INFRA_THERM 10 +#define CLK_INFRA_I2C0 11 +#define CLK_INFRA_I2C1 12 +#define CLK_INFRA_I2C2 13 +#define CLK_INFRA_I2C3 14 +#define CLK_INFRA_PWM_HCLK 15 +#define CLK_INFRA_PWM1 16 +#define CLK_INFRA_PWM2 17 +#define CLK_INFRA_PWM3 18 +#define CLK_INFRA_PWM4 19 +#define CLK_INFRA_PWM 20 +#define CLK_INFRA_UART0 21 +#define CLK_INFRA_UART1 22 +#define CLK_INFRA_UART2 23 +#define CLK_INFRA_UART3 24 +#define CLK_INFRA_GCE_26M 25 +#define CLK_INFRA_CQ_DMA_FPC 26 +#define CLK_INFRA_BTIF 27 +#define CLK_INFRA_SPI0 28 +#define CLK_INFRA_MSDC0 29 +#define CLK_INFRA_MSDC1 30 +#define CLK_INFRA_MSDC2 31 +#define CLK_INFRA_MSDC0_SCK 32 +#define CLK_INFRA_DVFSRC 33 +#define CLK_INFRA_GCPU 34 +#define CLK_INFRA_TRNG 35 +#define CLK_INFRA_AUXADC 36 +#define CLK_INFRA_CPUM 37 +#define CLK_INFRA_CCIF1_AP 38 +#define CLK_INFRA_CCIF1_MD 39 +#define CLK_INFRA_AUXADC_MD 40 +#define CLK_INFRA_MSDC1_SCK 41 +#define CLK_INFRA_MSDC2_SCK 42 +#define CLK_INFRA_AP_DMA 43 +#define CLK_INFRA_XIU 44 +#define CLK_INFRA_DEVICE_APC 45 +#define CLK_INFRA_CCIF_AP 46 +#define CLK_INFRA_DEBUGSYS 47 +#define CLK_INFRA_AUD 48 +#define CLK_INFRA_CCIF_MD 49 +#define CLK_INFRA_DXCC_SEC_CORE 50 +#define CLK_INFRA_DXCC_AO 51 +#define CLK_INFRA_DRAMC_F26M 52 +#define CLK_INFRA_IRTX 53 +#define CLK_INFRA_DISP_PWM 54 +#define CLK_INFRA_DPMAIF_CK 55 +#define CLK_INFRA_AUD_26M_BCLK 56 +#define CLK_INFRA_SPI1 57 +#define CLK_INFRA_I2C4 58 +#define CLK_INFRA_MODEM_TEMP_SHARE 59 +#define CLK_INFRA_SPI2 60 +#define CLK_INFRA_SPI3 61 +#define CLK_INFRA_UNIPRO_SCK 62 +#define CLK_INFRA_UNIPRO_TICK 63 +#define CLK_INFRA_UFS_MP_SAP_BCLK 64 +#define CLK_INFRA_MD32_BCLK 65 +#define CLK_INFRA_SSPM 66 +#define CLK_INFRA_UNIPRO_MBIST 67 +#define CLK_INFRA_SSPM_BUS_HCLK 68 +#define CLK_INFRA_I2C5 69 +#define CLK_INFRA_I2C5_ARBITER 70 +#define CLK_INFRA_I2C5_IMM 71 +#define CLK_INFRA_I2C1_ARBITER 72 +#define CLK_INFRA_I2C1_IMM 73 +#define CLK_INFRA_I2C2_ARBITER 74 +#define CLK_INFRA_I2C2_IMM 75 +#define CLK_INFRA_SPI4 76 +#define CLK_INFRA_SPI5 77 +#define CLK_INFRA_CQ_DMA 78 +#define CLK_INFRA_UFS 79 +#define CLK_INFRA_AES_UFSFDE 80 +#define CLK_INFRA_UFS_TICK 81 +#define CLK_INFRA_MSDC0_SELF 82 +#define CLK_INFRA_MSDC1_SELF 83 +#define CLK_INFRA_MSDC2_SELF 84 +#define CLK_INFRA_SSPM_26M_SELF 85 +#define CLK_INFRA_SSPM_32K_SELF 86 +#define CLK_INFRA_UFS_AXI 87 +#define CLK_INFRA_I2C6 88 +#define CLK_INFRA_AP_MSDC0 89 +#define CLK_INFRA_MD_MSDC0 90 +#define CLK_INFRA_USB 91 +#define CLK_INFRA_DEVMPU_BCLK 92 +#define CLK_INFRA_CCIF2_AP 93 +#define CLK_INFRA_CCIF2_MD 94 +#define CLK_INFRA_CCIF3_AP 95 +#define CLK_INFRA_CCIF3_MD 96 +#define CLK_INFRA_SEJ_F13M 97 +#define CLK_INFRA_AES_BCLK 98 +#define CLK_INFRA_I2C7 99 +#define CLK_INFRA_I2C8 100 +#define CLK_INFRA_FBIST2FPC 101 +#define CLK_INFRA_CCIF4_AP 102 +#define CLK_INFRA_CCIF4_MD 103 +#define CLK_INFRA_FADSP 104 +#define CLK_INFRA_SSUSB_XHCI 105 +#define CLK_INFRA_SPI6 106 +#define CLK_INFRA_SPI7 107 +#define CLK_INFRA_NR_CLK 108 + +/* MFGCFG */ +#define CLK_MFGCFG_BG3D 1 +#define CLK_MFGCFG_NR_CLK 2 + +/* IMG */ +#define CLK_IMG_WPE_A 1 +#define CLK_IMG_MFB 2 +#define CLK_IMG_DIP 3 +#define CLK_IMG_LARB6 4 +#define CLK_IMG_LARB5 5 +#define CLK_IMG_NR_CLK 6 + +/* IPE */ +#define CLK_IPE_LARB7 1 +#define CLK_IPE_LARB8 2 +#define CLK_IPE_SMI_SUBCOM 3 +#define CLK_IPE_FD 4 +#define CLK_IPE_FE 5 +#define CLK_IPE_RSC 6 +#define CLK_IPE_DPE 7 +#define CLK_IPE_NR_CLK 8 + +/* MM_CONFIG */ +#define CLK_MM_SMI_COMMON 1 +#define CLK_MM_SMI_LARB0 2 +#define CLK_MM_SMI_LARB1 3 +#define CLK_MM_GALS_COMM0 4 +#define CLK_MM_GALS_COMM1 5 +#define CLK_MM_GALS_CCU2MM 6 +#define CLK_MM_GALS_IPU12MM 7 +#define CLK_MM_GALS_IMG2MM 8 +#define CLK_MM_GALS_CAM2MM 9 +#define CLK_MM_GALS_IPU2MM 10 +#define CLK_MM_MDP_DL_TXCK 11 +#define CLK_MM_IPU_DL_TXCK 12 +#define CLK_MM_MDP_RDMA0 13 +#define CLK_MM_MDP_RDMA1 14 +#define CLK_MM_MDP_RSZ0 15 +#define CLK_MM_MDP_RSZ1 16 +#define CLK_MM_MDP_TDSHP 17 +#define CLK_MM_MDP_WROT0 18 +#define CLK_MM_FAKE_ENG 19 +#define CLK_MM_DISP_OVL0 20 +#define CLK_MM_DISP_OVL0_2L 21 +#define CLK_MM_DISP_OVL1_2L 22 +#define CLK_MM_DISP_RDMA0 23 +#define CLK_MM_DISP_RDMA1 24 +#define CLK_MM_DISP_WDMA0 25 +#define CLK_MM_DISP_COLOR0 26 +#define CLK_MM_DISP_CCORR0 27 +#define CLK_MM_DISP_AAL0 28 +#define CLK_MM_DISP_GAMMA0 29 +#define CLK_MM_DISP_DITHER0 30 +#define CLK_MM_DISP_SPLIT 31 +#define CLK_MM_DSI0_MM_CK 32 +#define CLK_MM_DSI0_IF_CK 33 +#define CLK_MM_DPI_MM_CK 34 +#define CLK_MM_DPI_IF_CK 35 +#define CLK_MM_FAKE_ENG2 36 +#define CLK_MM_MDP_DL_RX_CK 37 +#define CLK_MM_IPU_DL_RX_CK 38 +#define CLK_MM_26M 39 +#define CLK_MM_MM_R2Y 40 +#define CLK_MM_DISP_RSZ 41 +#define CLK_MM_MDP_WDMA0 42 +#define CLK_MM_MDP_AAL 43 +#define CLK_MM_MDP_HDR 44 +#define CLK_MM_DBI_MM_CK 45 +#define CLK_MM_DBI_IF_CK 46 +#define CLK_MM_MDP_WROT1 47 +#define CLK_MM_DISP_POSTMASK0 48 +#define CLK_MM_DISP_HRT_BW 49 +#define CLK_MM_DISP_OVL_FBDC 50 +#define CLK_MM_NR_CLK 51 + +/* VDEC_GCON */ +#define CLK_VDEC_VDEC 1 +#define CLK_VDEC_LARB1 2 +#define CLK_VDEC_GCON_NR_CLK 3 + +/* VENC_GCON */ +#define CLK_VENC_GCON_LARB 1 +#define CLK_VENC_GCON_VENC 2 +#define CLK_VENC_GCON_JPGENC 3 +#define CLK_VENC_GCON_GALS 4 +#define CLK_VENC_GCON_NR_CLK 5 + +/* AUD */ +#define CLK_AUD_AFE 1 +#define CLK_AUD_22M 2 +#define CLK_AUD_24M 3 +#define CLK_AUD_APLL2_TUNER 4 +#define CLK_AUD_APLL_TUNER 5 +#define CLK_AUD_TDM 6 +#define CLK_AUD_ADC 7 +#define CLK_AUD_DAC 8 +#define CLK_AUD_DAC_PREDIS 9 +#define CLK_AUD_TML 10 +#define CLK_AUD_NLE 11 +#define CLK_AUD_I2S1_BCLK_SW 12 +#define CLK_AUD_I2S2_BCLK_SW 13 +#define CLK_AUD_I2S3_BCLK_SW 14 +#define CLK_AUD_I2S4_BCLK_SW 15 +#define CLK_AUD_I2S5_BCLK_SW 16 +#define CLK_AUD_CONN_I2S_ASRC 17 +#define CLK_AUD_GENERAL1_ASRC 18 +#define CLK_AUD_GENERAL2_ASRC 19 +#define CLK_AUD_DAC_HIRES 20 +#define CLK_AUD_PDN_ADDA6_ADC 21 +#define CLK_AUD_ADC_HIRES 22 +#define CLK_AUD_ADC_HIRES_TML 23 +#define CLK_AUD_ADDA6_ADC_HIRES 24 +#define CLK_AUD_3RD_DAC 25 +#define CLK_AUD_3RD_DAC_PREDIS 26 +#define CLK_AUD_3RD_DAC_TML 27 +#define CLK_AUD_3RD_DAC_HIRES 28 +#define CLK_AUD_NR_CLK 29 + +#endif /* _DT_BINDINGS_CLK_MT6779_H */ diff --git a/include/dt-bindings/clock/mt8183-clk.h b/include/dt-bindings/clock/mt8183-clk.h index 0046506eb24c..a7b470b0ec8a 100644 --- a/include/dt-bindings/clock/mt8183-clk.h +++ b/include/dt-bindings/clock/mt8183-clk.h @@ -284,6 +284,10 @@ #define CLK_INFRA_FBIST2FPC 100 #define CLK_INFRA_NR_CLK 101 +/* PERICFG */ +#define CLK_PERI_AXI 0 +#define CLK_PERI_NR_CLK 1 + /* MFGCFG */ #define CLK_MFG_BG3D 0 #define CLK_MFG_NR_CLK 1 diff --git a/include/dt-bindings/clock/mt8516-clk.h b/include/dt-bindings/clock/mt8516-clk.h index 9cfca53cd78d..816447b98edd 100644 --- a/include/dt-bindings/clock/mt8516-clk.h +++ b/include/dt-bindings/clock/mt8516-clk.h @@ -208,4 +208,21 @@ #define CLK_TOP_MSDC2_INFRA 176 #define CLK_TOP_NR_CLK 177 +/* AUDSYS */ + +#define CLK_AUD_AFE 0 +#define CLK_AUD_I2S 1 +#define CLK_AUD_22M 2 +#define CLK_AUD_24M 3 +#define CLK_AUD_INTDIR 4 +#define CLK_AUD_APLL2_TUNER 5 +#define CLK_AUD_APLL_TUNER 6 +#define CLK_AUD_HDMI 7 +#define CLK_AUD_SPDF 8 +#define CLK_AUD_ADC 9 +#define CLK_AUD_DAC 10 +#define CLK_AUD_DAC_PREDIS 11 +#define CLK_AUD_TML 12 +#define CLK_AUD_NR_CLK 13 + #endif /* _DT_BINDINGS_CLK_MT8516_H */ diff --git a/include/dt-bindings/clock/omap5.h b/include/dt-bindings/clock/omap5.h index f3283957f48d..e5411938983c 100644 --- a/include/dt-bindings/clock/omap5.h +++ b/include/dt-bindings/clock/omap5.h @@ -89,6 +89,9 @@ /* dss clocks */ #define OMAP5_DSS_CORE_CLKCTRL OMAP5_CLKCTRL_INDEX(0x20) +/* gpu clocks */ +#define OMAP5_GPU_CLKCTRL OMAP5_CLKCTRL_INDEX(0x20) + /* l3init clocks */ #define OMAP5_MMC1_CLKCTRL OMAP5_CLKCTRL_INDEX(0x28) #define OMAP5_MMC2_CLKCTRL OMAP5_CLKCTRL_INDEX(0x30) diff --git a/include/dt-bindings/clock/qcom,gcc-qcs404.h b/include/dt-bindings/clock/qcom,gcc-qcs404.h index 454b3f43f538..bc3051543347 100644 --- a/include/dt-bindings/clock/qcom,gcc-qcs404.h +++ b/include/dt-bindings/clock/qcom,gcc-qcs404.h @@ -146,6 +146,8 @@ #define GCC_MDP_TBU_CLK 138 #define GCC_QDSS_DAP_CLK 139 #define GCC_DCC_XO_CLK 140 +#define GCC_WCSS_Q6_AHB_CLK 141 +#define GCC_WCSS_Q6_AXIM_CLK 142 #define GCC_CDSP_CFG_AHB_CLK 143 #define GCC_BIMC_CDSP_CLK 144 #define GCC_CDSP_TBU_CLK 145 @@ -166,5 +168,13 @@ #define GCC_PCIEPHY_0_PHY_BCR 12 #define GCC_EMAC_BCR 13 #define GCC_CDSP_RESTART 14 +#define GCC_PCIE_0_AXI_MASTER_STICKY_ARES 15 +#define GCC_PCIE_0_AHB_ARES 16 +#define GCC_PCIE_0_AXI_SLAVE_ARES 17 +#define GCC_PCIE_0_AXI_MASTER_ARES 18 +#define GCC_PCIE_0_CORE_STICKY_ARES 19 +#define GCC_PCIE_0_SLEEP_ARES 20 +#define GCC_PCIE_0_PIPE_ARES 21 +#define GCC_WDSP_RESTART 22 #endif diff --git a/include/dt-bindings/clock/qcom,gcc-sm8150.h b/include/dt-bindings/clock/qcom,gcc-sm8150.h new file mode 100644 index 000000000000..90d60ef94c64 --- /dev/null +++ b/include/dt-bindings/clock/qcom,gcc-sm8150.h @@ -0,0 +1,243 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2019, The Linux Foundation. All rights reserved. + */ + +#ifndef _DT_BINDINGS_CLK_QCOM_GCC_SM8150_H +#define _DT_BINDINGS_CLK_QCOM_GCC_SM8150_H + +/* GCC clocks */ +#define GCC_AGGRE_NOC_PCIE_TBU_CLK 0 +#define GCC_AGGRE_UFS_CARD_AXI_CLK 1 +#define GCC_AGGRE_UFS_CARD_AXI_HW_CTL_CLK 2 +#define GCC_AGGRE_UFS_PHY_AXI_CLK 3 +#define GCC_AGGRE_UFS_PHY_AXI_HW_CTL_CLK 4 +#define GCC_AGGRE_USB3_PRIM_AXI_CLK 5 +#define GCC_AGGRE_USB3_SEC_AXI_CLK 6 +#define GCC_BOOT_ROM_AHB_CLK 7 +#define GCC_CAMERA_AHB_CLK 8 +#define GCC_CAMERA_HF_AXI_CLK 9 +#define GCC_CAMERA_SF_AXI_CLK 10 +#define GCC_CAMERA_XO_CLK 11 +#define GCC_CFG_NOC_USB3_PRIM_AXI_CLK 12 +#define GCC_CFG_NOC_USB3_SEC_AXI_CLK 13 +#define GCC_CPUSS_AHB_CLK 14 +#define GCC_CPUSS_AHB_CLK_SRC 15 +#define GCC_CPUSS_DVM_BUS_CLK 16 +#define GCC_CPUSS_GNOC_CLK 17 +#define GCC_CPUSS_RBCPR_CLK 18 +#define GCC_DDRSS_GPU_AXI_CLK 19 +#define GCC_DISP_AHB_CLK 20 +#define GCC_DISP_HF_AXI_CLK 21 +#define GCC_DISP_SF_AXI_CLK 22 +#define GCC_DISP_XO_CLK 23 +#define GCC_EMAC_AXI_CLK 24 +#define GCC_EMAC_PTP_CLK 25 +#define GCC_EMAC_PTP_CLK_SRC 26 +#define GCC_EMAC_RGMII_CLK 27 +#define GCC_EMAC_RGMII_CLK_SRC 28 +#define GCC_EMAC_SLV_AHB_CLK 29 +#define GCC_GP1_CLK 30 +#define GCC_GP1_CLK_SRC 31 +#define GCC_GP2_CLK 32 +#define GCC_GP2_CLK_SRC 33 +#define GCC_GP3_CLK 34 +#define GCC_GP3_CLK_SRC 35 +#define GCC_GPU_CFG_AHB_CLK 36 +#define GCC_GPU_GPLL0_CLK_SRC 37 +#define GCC_GPU_GPLL0_DIV_CLK_SRC 38 +#define GCC_GPU_IREF_CLK 39 +#define GCC_GPU_MEMNOC_GFX_CLK 40 +#define GCC_GPU_SNOC_DVM_GFX_CLK 41 +#define GCC_NPU_AT_CLK 42 +#define GCC_NPU_AXI_CLK 43 +#define GCC_NPU_CFG_AHB_CLK 44 +#define GCC_NPU_GPLL0_CLK_SRC 45 +#define GCC_NPU_GPLL0_DIV_CLK_SRC 46 +#define GCC_NPU_TRIG_CLK 47 +#define GCC_PCIE0_PHY_REFGEN_CLK 48 +#define GCC_PCIE1_PHY_REFGEN_CLK 49 +#define GCC_PCIE_0_AUX_CLK 50 +#define GCC_PCIE_0_AUX_CLK_SRC 51 +#define GCC_PCIE_0_CFG_AHB_CLK 52 +#define GCC_PCIE_0_CLKREF_CLK 53 +#define GCC_PCIE_0_MSTR_AXI_CLK 54 +#define GCC_PCIE_0_PIPE_CLK 55 +#define GCC_PCIE_0_SLV_AXI_CLK 56 +#define GCC_PCIE_0_SLV_Q2A_AXI_CLK 57 +#define GCC_PCIE_1_AUX_CLK 58 +#define GCC_PCIE_1_AUX_CLK_SRC 59 +#define GCC_PCIE_1_CFG_AHB_CLK 60 +#define GCC_PCIE_1_CLKREF_CLK 61 +#define GCC_PCIE_1_MSTR_AXI_CLK 62 +#define GCC_PCIE_1_PIPE_CLK 63 +#define GCC_PCIE_1_SLV_AXI_CLK 64 +#define GCC_PCIE_1_SLV_Q2A_AXI_CLK 65 +#define GCC_PCIE_PHY_AUX_CLK 66 +#define GCC_PCIE_PHY_REFGEN_CLK_SRC 67 +#define GCC_PDM2_CLK 68 +#define GCC_PDM2_CLK_SRC 69 +#define GCC_PDM_AHB_CLK 70 +#define GCC_PDM_XO4_CLK 71 +#define GCC_PRNG_AHB_CLK 72 +#define GCC_QMIP_CAMERA_NRT_AHB_CLK 73 +#define GCC_QMIP_CAMERA_RT_AHB_CLK 74 +#define GCC_QMIP_DISP_AHB_CLK 75 +#define GCC_QMIP_VIDEO_CVP_AHB_CLK 76 +#define GCC_QMIP_VIDEO_VCODEC_AHB_CLK 77 +#define GCC_QSPI_CNOC_PERIPH_AHB_CLK 78 +#define GCC_QSPI_CORE_CLK 79 +#define GCC_QSPI_CORE_CLK_SRC 80 +#define GCC_QUPV3_WRAP0_S0_CLK 81 +#define GCC_QUPV3_WRAP0_S0_CLK_SRC 82 +#define GCC_QUPV3_WRAP0_S1_CLK 83 +#define GCC_QUPV3_WRAP0_S1_CLK_SRC 84 +#define GCC_QUPV3_WRAP0_S2_CLK 85 +#define GCC_QUPV3_WRAP0_S2_CLK_SRC 86 +#define GCC_QUPV3_WRAP0_S3_CLK 87 +#define GCC_QUPV3_WRAP0_S3_CLK_SRC 88 +#define GCC_QUPV3_WRAP0_S4_CLK 89 +#define GCC_QUPV3_WRAP0_S4_CLK_SRC 90 +#define GCC_QUPV3_WRAP0_S5_CLK 91 +#define GCC_QUPV3_WRAP0_S5_CLK_SRC 92 +#define GCC_QUPV3_WRAP0_S6_CLK 93 +#define GCC_QUPV3_WRAP0_S6_CLK_SRC 94 +#define GCC_QUPV3_WRAP0_S7_CLK 95 +#define GCC_QUPV3_WRAP0_S7_CLK_SRC 96 +#define GCC_QUPV3_WRAP1_S0_CLK 97 +#define GCC_QUPV3_WRAP1_S0_CLK_SRC 98 +#define GCC_QUPV3_WRAP1_S1_CLK 99 +#define GCC_QUPV3_WRAP1_S1_CLK_SRC 100 +#define GCC_QUPV3_WRAP1_S2_CLK 101 +#define GCC_QUPV3_WRAP1_S2_CLK_SRC 102 +#define GCC_QUPV3_WRAP1_S3_CLK 103 +#define GCC_QUPV3_WRAP1_S3_CLK_SRC 104 +#define GCC_QUPV3_WRAP1_S4_CLK 105 +#define GCC_QUPV3_WRAP1_S4_CLK_SRC 106 +#define GCC_QUPV3_WRAP1_S5_CLK 107 +#define GCC_QUPV3_WRAP1_S5_CLK_SRC 108 +#define GCC_QUPV3_WRAP2_S0_CLK 109 +#define GCC_QUPV3_WRAP2_S0_CLK_SRC 110 +#define GCC_QUPV3_WRAP2_S1_CLK 111 +#define GCC_QUPV3_WRAP2_S1_CLK_SRC 112 +#define GCC_QUPV3_WRAP2_S2_CLK 113 +#define GCC_QUPV3_WRAP2_S2_CLK_SRC 114 +#define GCC_QUPV3_WRAP2_S3_CLK 115 +#define GCC_QUPV3_WRAP2_S3_CLK_SRC 116 +#define GCC_QUPV3_WRAP2_S4_CLK 117 +#define GCC_QUPV3_WRAP2_S4_CLK_SRC 118 +#define GCC_QUPV3_WRAP2_S5_CLK 119 +#define GCC_QUPV3_WRAP2_S5_CLK_SRC 120 +#define GCC_QUPV3_WRAP_0_M_AHB_CLK 121 +#define GCC_QUPV3_WRAP_0_S_AHB_CLK 122 +#define GCC_QUPV3_WRAP_1_M_AHB_CLK 123 +#define GCC_QUPV3_WRAP_1_S_AHB_CLK 124 +#define GCC_QUPV3_WRAP_2_M_AHB_CLK 125 +#define GCC_QUPV3_WRAP_2_S_AHB_CLK 126 +#define GCC_SDCC2_AHB_CLK 127 +#define GCC_SDCC2_APPS_CLK 128 +#define GCC_SDCC2_APPS_CLK_SRC 129 +#define GCC_SDCC4_AHB_CLK 130 +#define GCC_SDCC4_APPS_CLK 131 +#define GCC_SDCC4_APPS_CLK_SRC 132 +#define GCC_SYS_NOC_CPUSS_AHB_CLK 133 +#define GCC_TSIF_AHB_CLK 134 +#define GCC_TSIF_INACTIVITY_TIMERS_CLK 135 +#define GCC_TSIF_REF_CLK 136 +#define GCC_TSIF_REF_CLK_SRC 137 +#define GCC_UFS_CARD_AHB_CLK 138 +#define GCC_UFS_CARD_AXI_CLK 139 +#define GCC_UFS_CARD_AXI_CLK_SRC 140 +#define GCC_UFS_CARD_AXI_HW_CTL_CLK 141 +#define GCC_UFS_CARD_CLKREF_CLK 142 +#define GCC_UFS_CARD_ICE_CORE_CLK 143 +#define GCC_UFS_CARD_ICE_CORE_CLK_SRC 144 +#define GCC_UFS_CARD_ICE_CORE_HW_CTL_CLK 145 +#define GCC_UFS_CARD_PHY_AUX_CLK 146 +#define GCC_UFS_CARD_PHY_AUX_CLK_SRC 147 +#define GCC_UFS_CARD_PHY_AUX_HW_CTL_CLK 148 +#define GCC_UFS_CARD_RX_SYMBOL_0_CLK 149 +#define GCC_UFS_CARD_RX_SYMBOL_1_CLK 150 +#define GCC_UFS_CARD_TX_SYMBOL_0_CLK 151 +#define GCC_UFS_CARD_UNIPRO_CORE_CLK 152 +#define GCC_UFS_CARD_UNIPRO_CORE_CLK_SRC 153 +#define GCC_UFS_CARD_UNIPRO_CORE_HW_CTL_CLK 154 +#define GCC_UFS_MEM_CLKREF_CLK 155 +#define GCC_UFS_PHY_AHB_CLK 156 +#define GCC_UFS_PHY_AXI_CLK 157 +#define GCC_UFS_PHY_AXI_CLK_SRC 158 +#define GCC_UFS_PHY_AXI_HW_CTL_CLK 159 +#define GCC_UFS_PHY_ICE_CORE_CLK 160 +#define GCC_UFS_PHY_ICE_CORE_CLK_SRC 161 +#define GCC_UFS_PHY_ICE_CORE_HW_CTL_CLK 162 +#define GCC_UFS_PHY_PHY_AUX_CLK 163 +#define GCC_UFS_PHY_PHY_AUX_CLK_SRC 164 +#define GCC_UFS_PHY_PHY_AUX_HW_CTL_CLK 165 +#define GCC_UFS_PHY_RX_SYMBOL_0_CLK 166 +#define GCC_UFS_PHY_RX_SYMBOL_1_CLK 167 +#define GCC_UFS_PHY_TX_SYMBOL_0_CLK 168 +#define GCC_UFS_PHY_UNIPRO_CORE_CLK 169 +#define GCC_UFS_PHY_UNIPRO_CORE_CLK_SRC 170 +#define GCC_UFS_PHY_UNIPRO_CORE_HW_CTL_CLK 171 +#define GCC_USB30_PRIM_MASTER_CLK 172 +#define GCC_USB30_PRIM_MASTER_CLK_SRC 173 +#define GCC_USB30_PRIM_MOCK_UTMI_CLK 174 +#define GCC_USB30_PRIM_MOCK_UTMI_CLK_SRC 175 +#define GCC_USB30_PRIM_SLEEP_CLK 176 +#define GCC_USB30_SEC_MASTER_CLK 177 +#define GCC_USB30_SEC_MASTER_CLK_SRC 178 +#define GCC_USB30_SEC_MOCK_UTMI_CLK 179 +#define GCC_USB30_SEC_MOCK_UTMI_CLK_SRC 180 +#define GCC_USB30_SEC_SLEEP_CLK 181 +#define GCC_USB3_PRIM_CLKREF_CLK 182 +#define GCC_USB3_PRIM_PHY_AUX_CLK 183 +#define GCC_USB3_PRIM_PHY_AUX_CLK_SRC 184 +#define GCC_USB3_PRIM_PHY_COM_AUX_CLK 185 +#define GCC_USB3_PRIM_PHY_PIPE_CLK 186 +#define GCC_USB3_SEC_CLKREF_CLK 187 +#define GCC_USB3_SEC_PHY_AUX_CLK 188 +#define GCC_USB3_SEC_PHY_AUX_CLK_SRC 189 +#define GCC_USB3_SEC_PHY_COM_AUX_CLK 190 +#define GCC_USB3_SEC_PHY_PIPE_CLK 191 +#define GCC_VIDEO_AHB_CLK 192 +#define GCC_VIDEO_AXI0_CLK 193 +#define GCC_VIDEO_AXI1_CLK 194 +#define GCC_VIDEO_AXIC_CLK 195 +#define GCC_VIDEO_XO_CLK 196 +#define GPLL0 197 +#define GPLL0_OUT_EVEN 198 +#define GPLL7 199 +#define GPLL9 200 + +/* Reset clocks */ +#define GCC_EMAC_BCR 0 +#define GCC_GPU_BCR 1 +#define GCC_MMSS_BCR 2 +#define GCC_NPU_BCR 3 +#define GCC_PCIE_0_BCR 4 +#define GCC_PCIE_0_PHY_BCR 5 +#define GCC_PCIE_1_BCR 6 +#define GCC_PCIE_1_PHY_BCR 7 +#define GCC_PCIE_PHY_BCR 8 +#define GCC_PDM_BCR 9 +#define GCC_PRNG_BCR 10 +#define GCC_QSPI_BCR 11 +#define GCC_QUPV3_WRAPPER_0_BCR 12 +#define GCC_QUPV3_WRAPPER_1_BCR 13 +#define GCC_QUPV3_WRAPPER_2_BCR 14 +#define GCC_QUSB2PHY_PRIM_BCR 15 +#define GCC_QUSB2PHY_SEC_BCR 16 +#define GCC_USB3_PHY_PRIM_BCR 17 +#define GCC_USB3_DP_PHY_PRIM_BCR 18 +#define GCC_USB3_PHY_SEC_BCR 19 +#define GCC_USB3PHY_PHY_SEC_BCR 20 +#define GCC_SDCC2_BCR 21 +#define GCC_SDCC4_BCR 22 +#define GCC_TSIF_BCR 23 +#define GCC_UFS_CARD_BCR 24 +#define GCC_UFS_PHY_BCR 25 +#define GCC_USB30_PRIM_BCR 26 +#define GCC_USB30_SEC_BCR 27 +#define GCC_USB_PHY_CFG_AHB2PHY_BCR 28 + +#endif diff --git a/include/dt-bindings/clock/qcom,gpucc-msm8998.h b/include/dt-bindings/clock/qcom,gpucc-msm8998.h new file mode 100644 index 000000000000..2623570ee974 --- /dev/null +++ b/include/dt-bindings/clock/qcom,gpucc-msm8998.h @@ -0,0 +1,29 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2019, Jeffrey Hugo + */ + +#ifndef _DT_BINDINGS_CLK_MSM_GPUCC_8998_H +#define _DT_BINDINGS_CLK_MSM_GPUCC_8998_H + +#define GPUPLL0 0 +#define GPUPLL0_OUT_EVEN 1 +#define RBCPR_CLK_SRC 2 +#define GFX3D_CLK_SRC 3 +#define RBBMTIMER_CLK_SRC 4 +#define GFX3D_ISENSE_CLK_SRC 5 +#define RBCPR_CLK 6 +#define GFX3D_CLK 7 +#define RBBMTIMER_CLK 8 +#define GFX3D_ISENSE_CLK 9 +#define GPUCC_CXO_CLK 10 + +#define GPU_CX_BCR 0 +#define RBCPR_BCR 1 +#define GPU_GX_BCR 2 +#define GPU_ISENSE_BCR 3 + +#define GPU_CX_GDSC 1 +#define GPU_GX_GDSC 2 + +#endif diff --git a/include/dt-bindings/clock/rk3228-cru.h b/include/dt-bindings/clock/rk3228-cru.h index 3b245e3df8da..de550ea56eeb 100644 --- a/include/dt-bindings/clock/rk3228-cru.h +++ b/include/dt-bindings/clock/rk3228-cru.h @@ -64,6 +64,7 @@ #define SCLK_WIFI 141 #define SCLK_OTGPHY0 142 #define SCLK_OTGPHY1 143 +#define SCLK_HDMI_PHY 144 /* dclk gates */ #define DCLK_VOP 190 diff --git a/include/dt-bindings/clock/rk3308-cru.h b/include/dt-bindings/clock/rk3308-cru.h new file mode 100644 index 000000000000..d97840f9ee2e --- /dev/null +++ b/include/dt-bindings/clock/rk3308-cru.h @@ -0,0 +1,387 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2019 Rockchip Electronics Co. Ltd. + * Author: Finley Xiao <finley.xiao@rock-chips.com> + */ + +#ifndef _DT_BINDINGS_CLK_ROCKCHIP_RK3308_H +#define _DT_BINDINGS_CLK_ROCKCHIP_RK3308_H + +/* core clocks */ +#define PLL_APLL 1 +#define PLL_DPLL 2 +#define PLL_VPLL0 3 +#define PLL_VPLL1 4 +#define ARMCLK 5 + +/* sclk (special clocks) */ +#define USB480M 14 +#define SCLK_RTC32K 15 +#define SCLK_PVTM_CORE 16 +#define SCLK_UART0 17 +#define SCLK_UART1 18 +#define SCLK_UART2 19 +#define SCLK_UART3 20 +#define SCLK_UART4 21 +#define SCLK_I2C0 22 +#define SCLK_I2C1 23 +#define SCLK_I2C2 24 +#define SCLK_I2C3 25 +#define SCLK_PWM0 26 +#define SCLK_SPI0 27 +#define SCLK_SPI1 28 +#define SCLK_SPI2 29 +#define SCLK_TIMER0 30 +#define SCLK_TIMER1 31 +#define SCLK_TIMER2 32 +#define SCLK_TIMER3 33 +#define SCLK_TIMER4 34 +#define SCLK_TIMER5 35 +#define SCLK_TSADC 36 +#define SCLK_SARADC 37 +#define SCLK_OTP 38 +#define SCLK_OTP_USR 39 +#define SCLK_CPU_BOOST 40 +#define SCLK_CRYPTO 41 +#define SCLK_CRYPTO_APK 42 +#define SCLK_NANDC_DIV 43 +#define SCLK_NANDC_DIV50 44 +#define SCLK_NANDC 45 +#define SCLK_SDMMC_DIV 46 +#define SCLK_SDMMC_DIV50 47 +#define SCLK_SDMMC 48 +#define SCLK_SDMMC_DRV 49 +#define SCLK_SDMMC_SAMPLE 50 +#define SCLK_SDIO_DIV 51 +#define SCLK_SDIO_DIV50 52 +#define SCLK_SDIO 53 +#define SCLK_SDIO_DRV 54 +#define SCLK_SDIO_SAMPLE 55 +#define SCLK_EMMC_DIV 56 +#define SCLK_EMMC_DIV50 57 +#define SCLK_EMMC 58 +#define SCLK_EMMC_DRV 59 +#define SCLK_EMMC_SAMPLE 60 +#define SCLK_SFC 61 +#define SCLK_OTG_ADP 62 +#define SCLK_MAC_SRC 63 +#define SCLK_MAC 64 +#define SCLK_MAC_REF 65 +#define SCLK_MAC_RX_TX 66 +#define SCLK_MAC_RMII 67 +#define SCLK_DDR_MON_TIMER 68 +#define SCLK_DDR_MON 69 +#define SCLK_DDRCLK 70 +#define SCLK_PMU 71 +#define SCLK_USBPHY_REF 72 +#define SCLK_WIFI 73 +#define SCLK_PVTM_PMU 74 +#define SCLK_PDM 75 +#define SCLK_I2S0_8CH_TX 76 +#define SCLK_I2S0_8CH_TX_OUT 77 +#define SCLK_I2S0_8CH_RX 78 +#define SCLK_I2S0_8CH_RX_OUT 79 +#define SCLK_I2S1_8CH_TX 80 +#define SCLK_I2S1_8CH_TX_OUT 81 +#define SCLK_I2S1_8CH_RX 82 +#define SCLK_I2S1_8CH_RX_OUT 83 +#define SCLK_I2S2_8CH_TX 84 +#define SCLK_I2S2_8CH_TX_OUT 85 +#define SCLK_I2S2_8CH_RX 86 +#define SCLK_I2S2_8CH_RX_OUT 87 +#define SCLK_I2S3_8CH_TX 88 +#define SCLK_I2S3_8CH_TX_OUT 89 +#define SCLK_I2S3_8CH_RX 90 +#define SCLK_I2S3_8CH_RX_OUT 91 +#define SCLK_I2S0_2CH 92 +#define SCLK_I2S0_2CH_OUT 93 +#define SCLK_I2S1_2CH 94 +#define SCLK_I2S1_2CH_OUT 95 +#define SCLK_SPDIF_TX_DIV 96 +#define SCLK_SPDIF_TX_DIV50 97 +#define SCLK_SPDIF_TX 98 +#define SCLK_SPDIF_RX_DIV 99 +#define SCLK_SPDIF_RX_DIV50 100 +#define SCLK_SPDIF_RX 101 +#define SCLK_I2S0_8CH_TX_MUX 102 +#define SCLK_I2S0_8CH_RX_MUX 103 +#define SCLK_I2S1_8CH_TX_MUX 104 +#define SCLK_I2S1_8CH_RX_MUX 105 +#define SCLK_I2S2_8CH_TX_MUX 106 +#define SCLK_I2S2_8CH_RX_MUX 107 +#define SCLK_I2S3_8CH_TX_MUX 108 +#define SCLK_I2S3_8CH_RX_MUX 109 +#define SCLK_I2S0_8CH_TX_SRC 110 +#define SCLK_I2S0_8CH_RX_SRC 111 +#define SCLK_I2S1_8CH_TX_SRC 112 +#define SCLK_I2S1_8CH_RX_SRC 113 +#define SCLK_I2S2_8CH_TX_SRC 114 +#define SCLK_I2S2_8CH_RX_SRC 115 +#define SCLK_I2S3_8CH_TX_SRC 116 +#define SCLK_I2S3_8CH_RX_SRC 117 +#define SCLK_I2S0_2CH_SRC 118 +#define SCLK_I2S1_2CH_SRC 119 +#define SCLK_PWM1 120 +#define SCLK_PWM2 121 +#define SCLK_OWIRE 122 + +/* dclk */ +#define DCLK_VOP 125 + +/* aclk */ +#define ACLK_BUS_SRC 130 +#define ACLK_BUS 131 +#define ACLK_PERI_SRC 132 +#define ACLK_PERI 133 +#define ACLK_MAC 134 +#define ACLK_CRYPTO 135 +#define ACLK_VOP 136 +#define ACLK_GIC 137 +#define ACLK_DMAC0 138 +#define ACLK_DMAC1 139 + +/* hclk */ +#define HCLK_BUS 150 +#define HCLK_PERI 151 +#define HCLK_AUDIO 152 +#define HCLK_NANDC 153 +#define HCLK_SDMMC 154 +#define HCLK_SDIO 155 +#define HCLK_EMMC 156 +#define HCLK_SFC 157 +#define HCLK_OTG 158 +#define HCLK_HOST 159 +#define HCLK_HOST_ARB 160 +#define HCLK_PDM 161 +#define HCLK_SPDIFTX 162 +#define HCLK_SPDIFRX 163 +#define HCLK_I2S0_8CH 164 +#define HCLK_I2S1_8CH 165 +#define HCLK_I2S2_8CH 166 +#define HCLK_I2S3_8CH 167 +#define HCLK_I2S0_2CH 168 +#define HCLK_I2S1_2CH 169 +#define HCLK_VAD 170 +#define HCLK_CRYPTO 171 +#define HCLK_VOP 172 + +/* pclk */ +#define PCLK_BUS 190 +#define PCLK_DDR 191 +#define PCLK_PERI 192 +#define PCLK_PMU 193 +#define PCLK_AUDIO 194 +#define PCLK_MAC 195 +#define PCLK_ACODEC 196 +#define PCLK_UART0 197 +#define PCLK_UART1 198 +#define PCLK_UART2 199 +#define PCLK_UART3 200 +#define PCLK_UART4 201 +#define PCLK_I2C0 202 +#define PCLK_I2C1 203 +#define PCLK_I2C2 204 +#define PCLK_I2C3 205 +#define PCLK_PWM0 206 +#define PCLK_SPI0 207 +#define PCLK_SPI1 208 +#define PCLK_SPI2 209 +#define PCLK_SARADC 210 +#define PCLK_TSADC 211 +#define PCLK_TIMER 212 +#define PCLK_OTP_NS 213 +#define PCLK_WDT 214 +#define PCLK_GPIO0 215 +#define PCLK_GPIO1 216 +#define PCLK_GPIO2 217 +#define PCLK_GPIO3 218 +#define PCLK_GPIO4 219 +#define PCLK_SGRF 220 +#define PCLK_GRF 221 +#define PCLK_USBSD_DET 222 +#define PCLK_DDR_UPCTL 223 +#define PCLK_DDR_MON 224 +#define PCLK_DDRPHY 225 +#define PCLK_DDR_STDBY 226 +#define PCLK_USB_GRF 227 +#define PCLK_CRU 228 +#define PCLK_OTP_PHY 229 +#define PCLK_CPU_BOOST 230 +#define PCLK_PWM1 231 +#define PCLK_PWM2 232 +#define PCLK_CAN 233 +#define PCLK_OWIRE 234 + +#define CLK_NR_CLKS (PCLK_OWIRE + 1) + +/* soft-reset indices */ + +/* cru_softrst_con0 */ +#define SRST_CORE0_PO 0 +#define SRST_CORE1_PO 1 +#define SRST_CORE2_PO 2 +#define SRST_CORE3_PO 3 +#define SRST_CORE0 4 +#define SRST_CORE1 5 +#define SRST_CORE2 6 +#define SRST_CORE3 7 +#define SRST_CORE0_DBG 8 +#define SRST_CORE1_DBG 9 +#define SRST_CORE2_DBG 10 +#define SRST_CORE3_DBG 11 +#define SRST_TOPDBG 12 +#define SRST_CORE_NOC 13 +#define SRST_STRC_A 14 +#define SRST_L2C 15 + +/* cru_softrst_con1 */ +#define SRST_DAP 16 +#define SRST_CORE_PVTM 17 +#define SRST_CORE_PRF 18 +#define SRST_CORE_GRF 19 +#define SRST_DDRUPCTL 20 +#define SRST_DDRUPCTL_P 22 +#define SRST_MSCH 23 +#define SRST_DDRMON_P 25 +#define SRST_DDRSTDBY_P 26 +#define SRST_DDRSTDBY 27 +#define SRST_DDRPHY 28 +#define SRST_DDRPHY_DIV 29 +#define SRST_DDRPHY_P 30 + +/* cru_softrst_con2 */ +#define SRST_BUS_NIU_H 32 +#define SRST_USB_NIU_P 33 +#define SRST_CRYPTO_A 34 +#define SRST_CRYPTO_H 35 +#define SRST_CRYPTO 36 +#define SRST_CRYPTO_APK 37 +#define SRST_VOP_A 38 +#define SRST_VOP_H 39 +#define SRST_VOP_D 40 +#define SRST_INTMEM_A 41 +#define SRST_ROM_H 42 +#define SRST_GIC_A 43 +#define SRST_UART0_P 44 +#define SRST_UART0 45 +#define SRST_UART1_P 46 +#define SRST_UART1 47 + +/* cru_softrst_con3 */ +#define SRST_UART2_P 48 +#define SRST_UART2 49 +#define SRST_UART3_P 50 +#define SRST_UART3 51 +#define SRST_UART4_P 52 +#define SRST_UART4 53 +#define SRST_I2C0_P 54 +#define SRST_I2C0 55 +#define SRST_I2C1_P 56 +#define SRST_I2C1 57 +#define SRST_I2C2_P 58 +#define SRST_I2C2 59 +#define SRST_I2C3_P 60 +#define SRST_I2C3 61 +#define SRST_PWM0_P 62 +#define SRST_PWM0 63 + +/* cru_softrst_con4 */ +#define SRST_SPI0_P 64 +#define SRST_SPI0 65 +#define SRST_SPI1_P 66 +#define SRST_SPI1 67 +#define SRST_SPI2_P 68 +#define SRST_SPI2 69 +#define SRST_SARADC_P 70 +#define SRST_TSADC_P 71 +#define SRST_TSADC 72 +#define SRST_TIMER0_P 73 +#define SRST_TIMER0 74 +#define SRST_TIMER1 75 +#define SRST_TIMER2 76 +#define SRST_TIMER3 77 +#define SRST_TIMER4 78 +#define SRST_TIMER5 79 + +/* cru_softrst_con5 */ +#define SRST_OTP_NS_P 80 +#define SRST_OTP_NS_SBPI 81 +#define SRST_OTP_NS_USR 82 +#define SRST_OTP_PHY_P 83 +#define SRST_OTP_PHY 84 +#define SRST_GPIO0_P 86 +#define SRST_GPIO1_P 87 +#define SRST_GPIO2_P 88 +#define SRST_GPIO3_P 89 +#define SRST_GPIO4_P 90 +#define SRST_GRF_P 91 +#define SRST_USBSD_DET_P 92 +#define SRST_PMU 93 +#define SRST_PMU_PVTM 94 +#define SRST_USB_GRF_P 95 + +/* cru_softrst_con6 */ +#define SRST_CPU_BOOST 96 +#define SRST_CPU_BOOST_P 97 +#define SRST_PWM1_P 98 +#define SRST_PWM1 99 +#define SRST_PWM2_P 100 +#define SRST_PWM2 101 +#define SRST_PERI_NIU_A 104 +#define SRST_PERI_NIU_H 105 +#define SRST_PERI_NIU_p 106 +#define SRST_USB2OTG_H 107 +#define SRST_USB2OTG 108 +#define SRST_USB2OTG_ADP 109 +#define SRST_USB2HOST_H 110 +#define SRST_USB2HOST_ARB_H 111 + +/* cru_softrst_con7 */ +#define SRST_USB2HOST_AUX_H 112 +#define SRST_USB2HOST_EHCI 113 +#define SRST_USB2HOST 114 +#define SRST_USBPHYPOR 115 +#define SRST_UTMI0 116 +#define SRST_UTMI1 117 +#define SRST_SDIO_H 118 +#define SRST_EMMC_H 119 +#define SRST_SFC_H 120 +#define SRST_SFC 121 +#define SRST_SD_H 122 +#define SRST_NANDC_H 123 +#define SRST_NANDC_N 124 +#define SRST_MAC_A 125 +#define SRST_CAN_P 126 +#define SRST_OWIRE_P 127 + +/* cru_softrst_con8 */ +#define SRST_AUDIO_NIU_H 128 +#define SRST_AUDIO_NIU_P 129 +#define SRST_PDM_H 130 +#define SRST_PDM_M 131 +#define SRST_SPDIFTX_H 132 +#define SRST_SPDIFTX_M 133 +#define SRST_SPDIFRX_H 134 +#define SRST_SPDIFRX_M 135 +#define SRST_I2S0_8CH_H 136 +#define SRST_I2S0_8CH_TX_M 137 +#define SRST_I2S0_8CH_RX_M 138 +#define SRST_I2S1_8CH_H 139 +#define SRST_I2S1_8CH_TX_M 140 +#define SRST_I2S1_8CH_RX_M 141 +#define SRST_I2S2_8CH_H 142 +#define SRST_I2S2_8CH_TX_M 143 + +/* cru_softrst_con9 */ +#define SRST_I2S2_8CH_RX_M 144 +#define SRST_I2S3_8CH_H 145 +#define SRST_I2S3_8CH_TX_M 146 +#define SRST_I2S3_8CH_RX_M 147 +#define SRST_I2S0_2CH_H 148 +#define SRST_I2S0_2CH_M 149 +#define SRST_I2S1_2CH_H 150 +#define SRST_I2S1_2CH_M 151 +#define SRST_VAD_H 152 +#define SRST_ACODEC_P 153 + +#endif diff --git a/include/dt-bindings/clock/rk3328-cru.h b/include/dt-bindings/clock/rk3328-cru.h index afb811340382..555b4ff660ae 100644 --- a/include/dt-bindings/clock/rk3328-cru.h +++ b/include/dt-bindings/clock/rk3328-cru.h @@ -164,6 +164,7 @@ #define PCLK_DCF 233 #define PCLK_SARADC 234 #define PCLK_ACODECPHY 235 +#define PCLK_WDT 236 /* hclk gates */ #define HCLK_PERI 308 diff --git a/include/dt-bindings/clock/stratix10-clock.h b/include/dt-bindings/clock/stratix10-clock.h index 0ac1c90a18bf..08b98e20b7cc 100644 --- a/include/dt-bindings/clock/stratix10-clock.h +++ b/include/dt-bindings/clock/stratix10-clock.h @@ -79,6 +79,8 @@ #define STRATIX10_USB_CLK 59 #define STRATIX10_SPI_M_CLK 60 #define STRATIX10_NAND_CLK 61 -#define STRATIX10_NUM_CLKS 62 +#define STRATIX10_NAND_X_CLK 62 +#define STRATIX10_NAND_ECC_CLK 63 +#define STRATIX10_NUM_CLKS 64 #endif /* __STRATIX10_CLOCK_H */ diff --git a/include/dt-bindings/clock/sun8i-v3s-ccu.h b/include/dt-bindings/clock/sun8i-v3s-ccu.h index c0d5d5599c87..014ac6123d17 100644 --- a/include/dt-bindings/clock/sun8i-v3s-ccu.h +++ b/include/dt-bindings/clock/sun8i-v3s-ccu.h @@ -104,4 +104,8 @@ #define CLK_MIPI_CSI 73 +/* Clocks not available on V3s */ +#define CLK_BUS_I2S0 75 +#define CLK_I2S0 76 + #endif /* _DT_BINDINGS_CLK_SUN8I_V3S_H_ */ diff --git a/include/dt-bindings/gce/mt8183-gce.h b/include/dt-bindings/gce/mt8183-gce.h new file mode 100644 index 000000000000..29c967476f73 --- /dev/null +++ b/include/dt-bindings/gce/mt8183-gce.h @@ -0,0 +1,175 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2019 MediaTek Inc. + * Author: Bibby Hsieh <bibby.hsieh@mediatek.com> + * + */ + +#ifndef _DT_BINDINGS_GCE_MT8183_H +#define _DT_BINDINGS_GCE_MT8183_H + +#define CMDQ_NO_TIMEOUT 0xffffffff + +/* GCE HW thread priority */ +#define CMDQ_THR_PRIO_LOWEST 0 +#define CMDQ_THR_PRIO_HIGHEST 1 + +/* GCE SUBSYS */ +#define SUBSYS_1300XXXX 0 +#define SUBSYS_1400XXXX 1 +#define SUBSYS_1401XXXX 2 +#define SUBSYS_1402XXXX 3 +#define SUBSYS_1502XXXX 4 +#define SUBSYS_1880XXXX 5 +#define SUBSYS_1881XXXX 6 +#define SUBSYS_1882XXXX 7 +#define SUBSYS_1883XXXX 8 +#define SUBSYS_1884XXXX 9 +#define SUBSYS_1000XXXX 10 +#define SUBSYS_1001XXXX 11 +#define SUBSYS_1002XXXX 12 +#define SUBSYS_1003XXXX 13 +#define SUBSYS_1004XXXX 14 +#define SUBSYS_1005XXXX 15 +#define SUBSYS_1020XXXX 16 +#define SUBSYS_1028XXXX 17 +#define SUBSYS_1700XXXX 18 +#define SUBSYS_1701XXXX 19 +#define SUBSYS_1702XXXX 20 +#define SUBSYS_1703XXXX 21 +#define SUBSYS_1800XXXX 22 +#define SUBSYS_1801XXXX 23 +#define SUBSYS_1802XXXX 24 +#define SUBSYS_1804XXXX 25 +#define SUBSYS_1805XXXX 26 +#define SUBSYS_1808XXXX 27 +#define SUBSYS_180aXXXX 28 +#define SUBSYS_180bXXXX 29 + +#define CMDQ_EVENT_DISP_RDMA0_SOF 0 +#define CMDQ_EVENT_DISP_RDMA1_SOF 1 +#define CMDQ_EVENT_MDP_RDMA0_SOF 2 +#define CMDQ_EVENT_MDP_RSZ0_SOF 4 +#define CMDQ_EVENT_MDP_RSZ1_SOF 5 +#define CMDQ_EVENT_MDP_TDSHP_SOF 6 +#define CMDQ_EVENT_MDP_WROT0_SOF 7 +#define CMDQ_EVENT_MDP_WDMA0_SOF 8 +#define CMDQ_EVENT_DISP_OVL0_SOF 9 +#define CMDQ_EVENT_DISP_OVL0_2L_SOF 10 +#define CMDQ_EVENT_DISP_OVL1_2L_SOF 11 +#define CMDQ_EVENT_DISP_WDMA0_SOF 12 +#define CMDQ_EVENT_DISP_COLOR0_SOF 13 +#define CMDQ_EVENT_DISP_CCORR0_SOF 14 +#define CMDQ_EVENT_DISP_AAL0_SOF 15 +#define CMDQ_EVENT_DISP_GAMMA0_SOF 16 +#define CMDQ_EVENT_DISP_DITHER0_SOF 17 +#define CMDQ_EVENT_DISP_PWM0_SOF 18 +#define CMDQ_EVENT_DISP_DSI0_SOF 19 +#define CMDQ_EVENT_DISP_DPI0_SOF 20 +#define CMDQ_EVENT_DISP_RSZ_SOF 22 +#define CMDQ_EVENT_MDP_AAL_SOF 23 +#define CMDQ_EVENT_MDP_CCORR_SOF 24 +#define CMDQ_EVENT_DISP_DBI_SOF 25 +#define CMDQ_EVENT_DISP_RDMA0_EOF 26 +#define CMDQ_EVENT_DISP_RDMA1_EOF 27 +#define CMDQ_EVENT_MDP_RDMA0_EOF 28 +#define CMDQ_EVENT_MDP_RSZ0_EOF 30 +#define CMDQ_EVENT_MDP_RSZ1_EOF 31 +#define CMDQ_EVENT_MDP_TDSHP_EOF 32 +#define CMDQ_EVENT_MDP_WROT0_EOF 33 +#define CMDQ_EVENT_MDP_WDMA0_EOF 34 +#define CMDQ_EVENT_DISP_OVL0_EOF 35 +#define CMDQ_EVENT_DISP_OVL0_2L_EOF 36 +#define CMDQ_EVENT_DISP_OVL1_2L_EOF 37 +#define CMDQ_EVENT_DISP_WDMA0_EOF 38 +#define CMDQ_EVENT_DISP_COLOR0_EOF 39 +#define CMDQ_EVENT_DISP_CCORR0_EOF 40 +#define CMDQ_EVENT_DISP_AAL0_EOF 41 +#define CMDQ_EVENT_DISP_GAMMA0_EOF 42 +#define CMDQ_EVENT_DISP_DITHER0_EOF 43 +#define CMDQ_EVENT_DSI0_EOF 44 +#define CMDQ_EVENT_DPI0_EOF 45 +#define CMDQ_EVENT_DISP_RSZ_EOF 47 +#define CMDQ_EVENT_MDP_AAL_EOF 48 +#define CMDQ_EVENT_MDP_CCORR_EOF 49 +#define CMDQ_EVENT_DBI_EOF 50 +#define CMDQ_EVENT_MUTEX_STREAM_DONE0 130 +#define CMDQ_EVENT_MUTEX_STREAM_DONE1 131 +#define CMDQ_EVENT_MUTEX_STREAM_DONE2 132 +#define CMDQ_EVENT_MUTEX_STREAM_DONE3 133 +#define CMDQ_EVENT_MUTEX_STREAM_DONE4 134 +#define CMDQ_EVENT_MUTEX_STREAM_DONE5 135 +#define CMDQ_EVENT_MUTEX_STREAM_DONE6 136 +#define CMDQ_EVENT_MUTEX_STREAM_DONE7 137 +#define CMDQ_EVENT_MUTEX_STREAM_DONE8 138 +#define CMDQ_EVENT_MUTEX_STREAM_DONE9 139 +#define CMDQ_EVENT_MUTEX_STREAM_DONE10 140 +#define CMDQ_EVENT_MUTEX_STREAM_DONE11 141 +#define CMDQ_EVENT_DISP_RDMA0_BUF_UNDERRUN_EVEN 142 +#define CMDQ_EVENT_DISP_RDMA1_BUF_UNDERRUN_EVEN 143 +#define CMDQ_EVENT_DSI0_TE_EVENT 144 +#define CMDQ_EVENT_DSI0_IRQ_EVENT 145 +#define CMDQ_EVENT_DSI0_DONE_EVENT 146 +#define CMDQ_EVENT_DISP_WDMA0_SW_RST_DONE 150 +#define CMDQ_EVENT_MDP_WDMA_SW_RST_DONE 151 +#define CMDQ_EVENT_MDP_WROT0_SW_RST_DONE 152 +#define CMDQ_EVENT_MDP_RDMA0_SW_RST_DONE 154 +#define CMDQ_EVENT_DISP_OVL0_FRAME_RST_DONE_PULE 155 +#define CMDQ_EVENT_DISP_OVL0_2L_FRAME_RST_DONE_ULSE 156 +#define CMDQ_EVENT_DISP_OVL1_2L_FRAME_RST_DONE_ULSE 157 +#define CMDQ_EVENT_ISP_FRAME_DONE_P2_0 257 +#define CMDQ_EVENT_ISP_FRAME_DONE_P2_1 258 +#define CMDQ_EVENT_ISP_FRAME_DONE_P2_2 259 +#define CMDQ_EVENT_ISP_FRAME_DONE_P2_3 260 +#define CMDQ_EVENT_ISP_FRAME_DONE_P2_4 261 +#define CMDQ_EVENT_ISP_FRAME_DONE_P2_5 262 +#define CMDQ_EVENT_ISP_FRAME_DONE_P2_6 263 +#define CMDQ_EVENT_ISP_FRAME_DONE_P2_7 264 +#define CMDQ_EVENT_ISP_FRAME_DONE_P2_8 265 +#define CMDQ_EVENT_ISP_FRAME_DONE_P2_9 266 +#define CMDQ_EVENT_ISP_FRAME_DONE_P2_10 267 +#define CMDQ_EVENT_ISP_FRAME_DONE_P2_11 268 +#define CMDQ_EVENT_ISP_FRAME_DONE_P2_12 269 +#define CMDQ_EVENT_ISP_FRAME_DONE_P2_13 270 +#define CMDQ_EVENT_ISP_FRAME_DONE_P2_14 271 +#define CMDQ_EVENT_ISP_FRAME_DONE_P2_15 272 +#define CMDQ_EVENT_ISP_FRAME_DONE_P2_16 273 +#define CMDQ_EVENT_ISP_FRAME_DONE_P2_17 274 +#define CMDQ_EVENT_ISP_FRAME_DONE_P2_18 275 +#define CMDQ_EVENT_AMD_FRAME_DONE 276 +#define CMDQ_EVENT_DVE_DONE 277 +#define CMDQ_EVENT_WMFE_DONE 278 +#define CMDQ_EVENT_RSC_DONE 279 +#define CMDQ_EVENT_MFB_DONE 280 +#define CMDQ_EVENT_WPE_A_DONE 281 +#define CMDQ_EVENT_SPE_B_DONE 282 +#define CMDQ_EVENT_OCC_DONE 283 +#define CMDQ_EVENT_VENC_CMDQ_FRAME_DONE 289 +#define CMDQ_EVENT_JPG_ENC_CMDQ_DONE 290 +#define CMDQ_EVENT_JPG_DEC_CMDQ_DONE 291 +#define CMDQ_EVENT_VENC_CMDQ_MB_DONE 292 +#define CMDQ_EVENT_VENC_CMDQ_128BYTE_DONE 293 +#define CMDQ_EVENT_ISP_FRAME_DONE_A 321 +#define CMDQ_EVENT_ISP_FRAME_DONE_B 322 +#define CMDQ_EVENT_CAMSV0_PASS1_DONE 323 +#define CMDQ_EVENT_CAMSV1_PASS1_DONE 324 +#define CMDQ_EVENT_CAMSV2_PASS1_DONE 325 +#define CMDQ_EVENT_TSF_DONE 326 +#define CMDQ_EVENT_SENINF_CAM0_FIFO_FULL 327 +#define CMDQ_EVENT_SENINF_CAM1_FIFO_FULL 328 +#define CMDQ_EVENT_SENINF_CAM2_FIFO_FULL 329 +#define CMDQ_EVENT_SENINF_CAM3_FIFO_FULL 330 +#define CMDQ_EVENT_SENINF_CAM4_FIFO_FULL 331 +#define CMDQ_EVENT_SENINF_CAM5_FIFO_FULL 332 +#define CMDQ_EVENT_SENINF_CAM6_FIFO_FULL 333 +#define CMDQ_EVENT_SENINF_CAM7_FIFO_FULL 334 +#define CMDQ_EVENT_IPU_CORE0_DONE0 353 +#define CMDQ_EVENT_IPU_CORE0_DONE1 354 +#define CMDQ_EVENT_IPU_CORE0_DONE2 355 +#define CMDQ_EVENT_IPU_CORE0_DONE3 356 +#define CMDQ_EVENT_IPU_CORE1_DONE0 385 +#define CMDQ_EVENT_IPU_CORE1_DONE1 386 +#define CMDQ_EVENT_IPU_CORE1_DONE2 387 +#define CMDQ_EVENT_IPU_CORE1_DONE3 388 + +#endif diff --git a/include/dt-bindings/gpio/tegra186-gpio.h b/include/dt-bindings/gpio/tegra186-gpio.h index cabc5712e745..0782b05e2775 100644 --- a/include/dt-bindings/gpio/tegra186-gpio.h +++ b/include/dt-bindings/gpio/tegra186-gpio.h @@ -41,34 +41,6 @@ #define TEGRA186_MAIN_GPIO(port, offset) \ ((TEGRA186_MAIN_GPIO_PORT_##port * 8) + offset) -/* need to keep these for backwards-compatibility */ -#define TEGRA_MAIN_GPIO_PORT_A 0 -#define TEGRA_MAIN_GPIO_PORT_B 1 -#define TEGRA_MAIN_GPIO_PORT_C 2 -#define TEGRA_MAIN_GPIO_PORT_D 3 -#define TEGRA_MAIN_GPIO_PORT_E 4 -#define TEGRA_MAIN_GPIO_PORT_F 5 -#define TEGRA_MAIN_GPIO_PORT_G 6 -#define TEGRA_MAIN_GPIO_PORT_H 7 -#define TEGRA_MAIN_GPIO_PORT_I 8 -#define TEGRA_MAIN_GPIO_PORT_J 9 -#define TEGRA_MAIN_GPIO_PORT_K 10 -#define TEGRA_MAIN_GPIO_PORT_L 11 -#define TEGRA_MAIN_GPIO_PORT_M 12 -#define TEGRA_MAIN_GPIO_PORT_N 13 -#define TEGRA_MAIN_GPIO_PORT_O 14 -#define TEGRA_MAIN_GPIO_PORT_P 15 -#define TEGRA_MAIN_GPIO_PORT_Q 16 -#define TEGRA_MAIN_GPIO_PORT_R 17 -#define TEGRA_MAIN_GPIO_PORT_T 18 -#define TEGRA_MAIN_GPIO_PORT_X 19 -#define TEGRA_MAIN_GPIO_PORT_Y 20 -#define TEGRA_MAIN_GPIO_PORT_BB 21 -#define TEGRA_MAIN_GPIO_PORT_CC 22 - -#define TEGRA_MAIN_GPIO(port, offset) \ - ((TEGRA_MAIN_GPIO_PORT_##port * 8) + offset) - /* GPIOs implemented by AON GPIO controller */ #define TEGRA186_AON_GPIO_PORT_S 0 #define TEGRA186_AON_GPIO_PORT_U 1 @@ -82,17 +54,4 @@ #define TEGRA186_AON_GPIO(port, offset) \ ((TEGRA186_AON_GPIO_PORT_##port * 8) + offset) -/* need to keep these for backwards-compatibility */ -#define TEGRA_AON_GPIO_PORT_S 0 -#define TEGRA_AON_GPIO_PORT_U 1 -#define TEGRA_AON_GPIO_PORT_V 2 -#define TEGRA_AON_GPIO_PORT_W 3 -#define TEGRA_AON_GPIO_PORT_Z 4 -#define TEGRA_AON_GPIO_PORT_AA 5 -#define TEGRA_AON_GPIO_PORT_EE 6 -#define TEGRA_AON_GPIO_PORT_FF 7 - -#define TEGRA_AON_GPIO(port, offset) \ - ((TEGRA_AON_GPIO_PORT_##port * 8) + offset) - #endif diff --git a/include/dt-bindings/interconnect/qcom,qcs404.h b/include/dt-bindings/interconnect/qcom,qcs404.h new file mode 100644 index 000000000000..960f6e39c5f2 --- /dev/null +++ b/include/dt-bindings/interconnect/qcom,qcs404.h @@ -0,0 +1,88 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Qualcomm interconnect IDs + * + * Copyright (c) 2019, Linaro Ltd. + * Author: Georgi Djakov <georgi.djakov@linaro.org> + */ + +#ifndef __DT_BINDINGS_INTERCONNECT_QCOM_QCS404_H +#define __DT_BINDINGS_INTERCONNECT_QCOM_QCS404_H + +#define MASTER_AMPSS_M0 0 +#define MASTER_OXILI 1 +#define MASTER_MDP_PORT0 2 +#define MASTER_SNOC_BIMC_1 3 +#define MASTER_TCU_0 4 +#define SLAVE_EBI_CH0 5 +#define SLAVE_BIMC_SNOC 6 + +#define MASTER_SPDM 0 +#define MASTER_BLSP_1 1 +#define MASTER_BLSP_2 2 +#define MASTER_XI_USB_HS1 3 +#define MASTER_CRYPT0 4 +#define MASTER_SDCC_1 5 +#define MASTER_SDCC_2 6 +#define MASTER_SNOC_PCNOC 7 +#define MASTER_QPIC 8 +#define PCNOC_INT_0 9 +#define PCNOC_INT_2 10 +#define PCNOC_INT_3 11 +#define PCNOC_S_0 12 +#define PCNOC_S_1 13 +#define PCNOC_S_2 14 +#define PCNOC_S_3 15 +#define PCNOC_S_4 16 +#define PCNOC_S_6 17 +#define PCNOC_S_7 18 +#define PCNOC_S_8 19 +#define PCNOC_S_9 20 +#define PCNOC_S_10 21 +#define PCNOC_S_11 22 +#define SLAVE_SPDM 23 +#define SLAVE_PDM 24 +#define SLAVE_PRNG 25 +#define SLAVE_TCSR 26 +#define SLAVE_SNOC_CFG 27 +#define SLAVE_MESSAGE_RAM 28 +#define SLAVE_DISP_SS_CFG 29 +#define SLAVE_GPU_CFG 30 +#define SLAVE_BLSP_1 31 +#define SLAVE_BLSP_2 32 +#define SLAVE_TLMM_NORTH 33 +#define SLAVE_PCIE 34 +#define SLAVE_ETHERNET 35 +#define SLAVE_TLMM_EAST 36 +#define SLAVE_TCU 37 +#define SLAVE_PMIC_ARB 38 +#define SLAVE_SDCC_1 39 +#define SLAVE_SDCC_2 40 +#define SLAVE_TLMM_SOUTH 41 +#define SLAVE_USB_HS 42 +#define SLAVE_USB3 43 +#define SLAVE_CRYPTO_0_CFG 44 +#define SLAVE_PCNOC_SNOC 45 + +#define MASTER_QDSS_BAM 0 +#define MASTER_BIMC_SNOC 1 +#define MASTER_PCNOC_SNOC 2 +#define MASTER_QDSS_ETR 3 +#define MASTER_EMAC 4 +#define MASTER_PCIE 5 +#define MASTER_USB3 6 +#define QDSS_INT 7 +#define SNOC_INT_0 8 +#define SNOC_INT_1 9 +#define SNOC_INT_2 10 +#define SLAVE_KPSS_AHB 11 +#define SLAVE_WCSS 12 +#define SLAVE_SNOC_BIMC_1 13 +#define SLAVE_IMEM 14 +#define SLAVE_SNOC_PCNOC 15 +#define SLAVE_QDSS_STM 16 +#define SLAVE_CATS_0 17 +#define SLAVE_CATS_1 18 +#define SLAVE_LPASS 19 + +#endif diff --git a/include/dt-bindings/leds/common.h b/include/dt-bindings/leds/common.h index e171d0a6beb2..9e1256a7c1bf 100644 --- a/include/dt-bindings/leds/common.h +++ b/include/dt-bindings/leds/common.h @@ -3,8 +3,9 @@ * This header provides macros for the common LEDs device tree bindings. * * Copyright (C) 2015, Samsung Electronics Co., Ltd. - * * Author: Jacek Anaszewski <j.anaszewski@samsung.com> + * + * Copyright (C) 2019 Jacek Anaszewski <jacek.anaszewski@gmail.com> */ #ifndef __DT_BINDINGS_LEDS_H @@ -19,4 +20,56 @@ #define LEDS_BOOST_ADAPTIVE 1 #define LEDS_BOOST_FIXED 2 +/* Standard LED colors */ +#define LED_COLOR_ID_WHITE 0 +#define LED_COLOR_ID_RED 1 +#define LED_COLOR_ID_GREEN 2 +#define LED_COLOR_ID_BLUE 3 +#define LED_COLOR_ID_AMBER 4 +#define LED_COLOR_ID_VIOLET 5 +#define LED_COLOR_ID_YELLOW 6 +#define LED_COLOR_ID_IR 7 +#define LED_COLOR_ID_MAX 8 + +/* Standard LED functions */ +#define LED_FUNCTION_ACTIVITY "activity" +#define LED_FUNCTION_ALARM "alarm" +#define LED_FUNCTION_BACKLIGHT "backlight" +#define LED_FUNCTION_BLUETOOTH "bluetooth" +#define LED_FUNCTION_BOOT "boot" +#define LED_FUNCTION_CPU "cpu" +#define LED_FUNCTION_CAPSLOCK "capslock" +#define LED_FUNCTION_CHARGING "charging" +#define LED_FUNCTION_DEBUG "debug" +#define LED_FUNCTION_DISK "disk" +#define LED_FUNCTION_DISK_ACTIVITY "disk-activity" +#define LED_FUNCTION_DISK_ERR "disk-err" +#define LED_FUNCTION_DISK_READ "disk-read" +#define LED_FUNCTION_DISK_WRITE "disk-write" +#define LED_FUNCTION_FAULT "fault" +#define LED_FUNCTION_FLASH "flash" +#define LED_FUNCTION_HEARTBEAT "heartbeat" +#define LED_FUNCTION_INDICATOR "indicator" +#define LED_FUNCTION_KBD_BACKLIGHT "kbd_backlight" +#define LED_FUNCTION_LAN "lan" +#define LED_FUNCTION_MAIL "mail" +#define LED_FUNCTION_MTD "mtd" +#define LED_FUNCTION_MICMUTE "micmute" +#define LED_FUNCTION_MUTE "mute" +#define LED_FUNCTION_NUMLOCK "numlock" +#define LED_FUNCTION_PANIC "panic" +#define LED_FUNCTION_PROGRAMMING "programming" +#define LED_FUNCTION_POWER "power" +#define LED_FUNCTION_RX "rx" +#define LED_FUNCTION_SD "sd" +#define LED_FUNCTION_SCROLLLOCK "scrolllock" +#define LED_FUNCTION_STANDBY "standby" +#define LED_FUNCTION_STATUS "status" +#define LED_FUNCTION_TORCH "torch" +#define LED_FUNCTION_TX "tx" +#define LED_FUNCTION_USB "usb" +#define LED_FUNCTION_WAN "wan" +#define LED_FUNCTION_WLAN "wlan" +#define LED_FUNCTION_WPS "wps" + #endif /* __DT_BINDINGS_LEDS_H */ diff --git a/include/dt-bindings/memory/mt8183-larb-port.h b/include/dt-bindings/memory/mt8183-larb-port.h new file mode 100644 index 000000000000..2c579f305162 --- /dev/null +++ b/include/dt-bindings/memory/mt8183-larb-port.h @@ -0,0 +1,130 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2018 MediaTek Inc. + * Author: Yong Wu <yong.wu@mediatek.com> + */ +#ifndef __DTS_IOMMU_PORT_MT8183_H +#define __DTS_IOMMU_PORT_MT8183_H + +#define MTK_M4U_ID(larb, port) (((larb) << 5) | (port)) + +#define M4U_LARB0_ID 0 +#define M4U_LARB1_ID 1 +#define M4U_LARB2_ID 2 +#define M4U_LARB3_ID 3 +#define M4U_LARB4_ID 4 +#define M4U_LARB5_ID 5 +#define M4U_LARB6_ID 6 +#define M4U_LARB7_ID 7 + +/* larb0 */ +#define M4U_PORT_DISP_OVL0 MTK_M4U_ID(M4U_LARB0_ID, 0) +#define M4U_PORT_DISP_2L_OVL0_LARB0 MTK_M4U_ID(M4U_LARB0_ID, 1) +#define M4U_PORT_DISP_2L_OVL1_LARB0 MTK_M4U_ID(M4U_LARB0_ID, 2) +#define M4U_PORT_DISP_RDMA0 MTK_M4U_ID(M4U_LARB0_ID, 3) +#define M4U_PORT_DISP_RDMA1 MTK_M4U_ID(M4U_LARB0_ID, 4) +#define M4U_PORT_DISP_WDMA0 MTK_M4U_ID(M4U_LARB0_ID, 5) +#define M4U_PORT_MDP_RDMA0 MTK_M4U_ID(M4U_LARB0_ID, 6) +#define M4U_PORT_MDP_WROT0 MTK_M4U_ID(M4U_LARB0_ID, 7) +#define M4U_PORT_MDP_WDMA0 MTK_M4U_ID(M4U_LARB0_ID, 8) +#define M4U_PORT_DISP_FAKE0 MTK_M4U_ID(M4U_LARB0_ID, 9) + +/* larb1 */ +#define M4U_PORT_HW_VDEC_MC_EXT MTK_M4U_ID(M4U_LARB1_ID, 0) +#define M4U_PORT_HW_VDEC_PP_EXT MTK_M4U_ID(M4U_LARB1_ID, 1) +#define M4U_PORT_HW_VDEC_VLD_EXT MTK_M4U_ID(M4U_LARB1_ID, 2) +#define M4U_PORT_HW_VDEC_AVC_MV_EXT MTK_M4U_ID(M4U_LARB1_ID, 3) +#define M4U_PORT_HW_VDEC_PRED_RD_EXT MTK_M4U_ID(M4U_LARB1_ID, 4) +#define M4U_PORT_HW_VDEC_PRED_WR_EXT MTK_M4U_ID(M4U_LARB1_ID, 5) +#define M4U_PORT_HW_VDEC_PPWRAP_EXT MTK_M4U_ID(M4U_LARB1_ID, 6) + +/* larb2 VPU0 */ +#define M4U_PORT_IMG_IPUO MTK_M4U_ID(M4U_LARB2_ID, 0) +#define M4U_PORT_IMG_IPU3O MTK_M4U_ID(M4U_LARB2_ID, 1) +#define M4U_PORT_IMG_IPUI MTK_M4U_ID(M4U_LARB2_ID, 2) + +/* larb3 VPU1 */ +#define M4U_PORT_CAM_IPUO MTK_M4U_ID(M4U_LARB3_ID, 0) +#define M4U_PORT_CAM_IPU2O MTK_M4U_ID(M4U_LARB3_ID, 1) +#define M4U_PORT_CAM_IPU3O MTK_M4U_ID(M4U_LARB3_ID, 2) +#define M4U_PORT_CAM_IPUI MTK_M4U_ID(M4U_LARB3_ID, 3) +#define M4U_PORT_CAM_IPU2I MTK_M4U_ID(M4U_LARB3_ID, 4) + +/* larb4 */ +#define M4U_PORT_VENC_RCPU MTK_M4U_ID(M4U_LARB4_ID, 0) +#define M4U_PORT_VENC_REC MTK_M4U_ID(M4U_LARB4_ID, 1) +#define M4U_PORT_VENC_BSDMA MTK_M4U_ID(M4U_LARB4_ID, 2) +#define M4U_PORT_VENC_SV_COMV MTK_M4U_ID(M4U_LARB4_ID, 3) +#define M4U_PORT_VENC_RD_COMV MTK_M4U_ID(M4U_LARB4_ID, 4) +#define M4U_PORT_JPGENC_RDMA MTK_M4U_ID(M4U_LARB4_ID, 5) +#define M4U_PORT_JPGENC_BSDMA MTK_M4U_ID(M4U_LARB4_ID, 6) +#define M4U_PORT_VENC_CUR_LUMA MTK_M4U_ID(M4U_LARB4_ID, 7) +#define M4U_PORT_VENC_CUR_CHROMA MTK_M4U_ID(M4U_LARB4_ID, 8) +#define M4U_PORT_VENC_REF_LUMA MTK_M4U_ID(M4U_LARB4_ID, 9) +#define M4U_PORT_VENC_REF_CHROMA MTK_M4U_ID(M4U_LARB4_ID, 10) + +/* larb5 */ +#define M4U_PORT_CAM_IMGI MTK_M4U_ID(M4U_LARB5_ID, 0) +#define M4U_PORT_CAM_IMG2O MTK_M4U_ID(M4U_LARB5_ID, 1) +#define M4U_PORT_CAM_IMG3O MTK_M4U_ID(M4U_LARB5_ID, 2) +#define M4U_PORT_CAM_VIPI MTK_M4U_ID(M4U_LARB5_ID, 3) +#define M4U_PORT_CAM_LCEI MTK_M4U_ID(M4U_LARB5_ID, 4) +#define M4U_PORT_CAM_SMXI MTK_M4U_ID(M4U_LARB5_ID, 5) +#define M4U_PORT_CAM_SMXO MTK_M4U_ID(M4U_LARB5_ID, 6) +#define M4U_PORT_CAM_WPE0_RDMA1 MTK_M4U_ID(M4U_LARB5_ID, 7) +#define M4U_PORT_CAM_WPE0_RDMA0 MTK_M4U_ID(M4U_LARB5_ID, 8) +#define M4U_PORT_CAM_WPE0_WDMA MTK_M4U_ID(M4U_LARB5_ID, 9) +#define M4U_PORT_CAM_FDVT_RP MTK_M4U_ID(M4U_LARB5_ID, 10) +#define M4U_PORT_CAM_FDVT_WR MTK_M4U_ID(M4U_LARB5_ID, 11) +#define M4U_PORT_CAM_FDVT_RB MTK_M4U_ID(M4U_LARB5_ID, 12) +#define M4U_PORT_CAM_WPE1_RDMA0 MTK_M4U_ID(M4U_LARB5_ID, 13) +#define M4U_PORT_CAM_WPE1_RDMA1 MTK_M4U_ID(M4U_LARB5_ID, 14) +#define M4U_PORT_CAM_WPE1_WDMA MTK_M4U_ID(M4U_LARB5_ID, 15) +#define M4U_PORT_CAM_DPE_RDMA MTK_M4U_ID(M4U_LARB5_ID, 16) +#define M4U_PORT_CAM_DPE_WDMA MTK_M4U_ID(M4U_LARB5_ID, 17) +#define M4U_PORT_CAM_MFB_RDMA0 MTK_M4U_ID(M4U_LARB5_ID, 18) +#define M4U_PORT_CAM_MFB_RDMA1 MTK_M4U_ID(M4U_LARB5_ID, 19) +#define M4U_PORT_CAM_MFB_WDMA MTK_M4U_ID(M4U_LARB5_ID, 20) +#define M4U_PORT_CAM_RSC_RDMA0 MTK_M4U_ID(M4U_LARB5_ID, 21) +#define M4U_PORT_CAM_RSC_WDMA MTK_M4U_ID(M4U_LARB5_ID, 22) +#define M4U_PORT_CAM_OWE_RDMA MTK_M4U_ID(M4U_LARB5_ID, 23) +#define M4U_PORT_CAM_OWE_WDMA MTK_M4U_ID(M4U_LARB5_ID, 24) + +/* larb6 */ +#define M4U_PORT_CAM_IMGO MTK_M4U_ID(M4U_LARB6_ID, 0) +#define M4U_PORT_CAM_RRZO MTK_M4U_ID(M4U_LARB6_ID, 1) +#define M4U_PORT_CAM_AAO MTK_M4U_ID(M4U_LARB6_ID, 2) +#define M4U_PORT_CAM_AFO MTK_M4U_ID(M4U_LARB6_ID, 3) +#define M4U_PORT_CAM_LSCI0 MTK_M4U_ID(M4U_LARB6_ID, 4) +#define M4U_PORT_CAM_LSCI1 MTK_M4U_ID(M4U_LARB6_ID, 5) +#define M4U_PORT_CAM_PDO MTK_M4U_ID(M4U_LARB6_ID, 6) +#define M4U_PORT_CAM_BPCI MTK_M4U_ID(M4U_LARB6_ID, 7) +#define M4U_PORT_CAM_LCSO MTK_M4U_ID(M4U_LARB6_ID, 8) +#define M4U_PORT_CAM_CAM_RSSO_A MTK_M4U_ID(M4U_LARB6_ID, 9) +#define M4U_PORT_CAM_UFEO MTK_M4U_ID(M4U_LARB6_ID, 10) +#define M4U_PORT_CAM_SOCO MTK_M4U_ID(M4U_LARB6_ID, 11) +#define M4U_PORT_CAM_SOC1 MTK_M4U_ID(M4U_LARB6_ID, 12) +#define M4U_PORT_CAM_SOC2 MTK_M4U_ID(M4U_LARB6_ID, 13) +#define M4U_PORT_CAM_CCUI MTK_M4U_ID(M4U_LARB6_ID, 14) +#define M4U_PORT_CAM_CCUO MTK_M4U_ID(M4U_LARB6_ID, 15) +#define M4U_PORT_CAM_RAWI_A MTK_M4U_ID(M4U_LARB6_ID, 16) +#define M4U_PORT_CAM_CCUG MTK_M4U_ID(M4U_LARB6_ID, 17) +#define M4U_PORT_CAM_PSO MTK_M4U_ID(M4U_LARB6_ID, 18) +#define M4U_PORT_CAM_AFO_1 MTK_M4U_ID(M4U_LARB6_ID, 19) +#define M4U_PORT_CAM_LSCI_2 MTK_M4U_ID(M4U_LARB6_ID, 20) +#define M4U_PORT_CAM_PDI MTK_M4U_ID(M4U_LARB6_ID, 21) +#define M4U_PORT_CAM_FLKO MTK_M4U_ID(M4U_LARB6_ID, 22) +#define M4U_PORT_CAM_LMVO MTK_M4U_ID(M4U_LARB6_ID, 23) +#define M4U_PORT_CAM_UFGO MTK_M4U_ID(M4U_LARB6_ID, 24) +#define M4U_PORT_CAM_SPARE MTK_M4U_ID(M4U_LARB6_ID, 25) +#define M4U_PORT_CAM_SPARE_2 MTK_M4U_ID(M4U_LARB6_ID, 26) +#define M4U_PORT_CAM_SPARE_3 MTK_M4U_ID(M4U_LARB6_ID, 27) +#define M4U_PORT_CAM_SPARE_4 MTK_M4U_ID(M4U_LARB6_ID, 28) +#define M4U_PORT_CAM_SPARE_5 MTK_M4U_ID(M4U_LARB6_ID, 29) +#define M4U_PORT_CAM_SPARE_6 MTK_M4U_ID(M4U_LARB6_ID, 30) + +/* CCU */ +#define M4U_PORT_CCU0 MTK_M4U_ID(M4U_LARB7_ID, 0) +#define M4U_PORT_CCU1 MTK_M4U_ID(M4U_LARB7_ID, 1) + +#endif diff --git a/include/dt-bindings/net/ti-dp83867.h b/include/dt-bindings/net/ti-dp83867.h index 3b48847cd83b..6fc4b445d3a1 100644 --- a/include/dt-bindings/net/ti-dp83867.h +++ b/include/dt-bindings/net/ti-dp83867.h @@ -48,4 +48,6 @@ #define DP83867_CLK_O_SEL_CHN_C_TCLK 0xA #define DP83867_CLK_O_SEL_CHN_D_TCLK 0xB #define DP83867_CLK_O_SEL_REF_CLK 0xC +/* Special flag to indicate clock should be off */ +#define DP83867_CLK_O_SEL_OFF 0xFFFFFFFF #endif diff --git a/include/dt-bindings/phy/phy-lantiq-vrx200-pcie.h b/include/dt-bindings/phy/phy-lantiq-vrx200-pcie.h new file mode 100644 index 000000000000..95a7896356d6 --- /dev/null +++ b/include/dt-bindings/phy/phy-lantiq-vrx200-pcie.h @@ -0,0 +1,11 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2019 Martin Blumenstingl <martin.blumenstingl@googlemail.com> + */ + +#define LANTIQ_PCIE_PHY_MODE_25MHZ 0 +#define LANTIQ_PCIE_PHY_MODE_25MHZ_SSC 1 +#define LANTIQ_PCIE_PHY_MODE_36MHZ 2 +#define LANTIQ_PCIE_PHY_MODE_36MHZ_SSC 3 +#define LANTIQ_PCIE_PHY_MODE_100MHZ 4 +#define LANTIQ_PCIE_PHY_MODE_100MHZ_SSC 5 diff --git a/include/dt-bindings/pinctrl/k3.h b/include/dt-bindings/pinctrl/k3.h index 45e11b6170ca..499de6216581 100644 --- a/include/dt-bindings/pinctrl/k3.h +++ b/include/dt-bindings/pinctrl/k3.h @@ -32,4 +32,7 @@ #define AM65X_IOPAD(pa, val, muxmode) (((pa) & 0x1fff)) ((val) | (muxmode)) #define AM65X_WKUP_IOPAD(pa, val, muxmode) (((pa) & 0x1fff)) ((val) | (muxmode)) +#define J721E_IOPAD(pa, val, muxmode) (((pa) & 0x1fff)) ((val) | (muxmode)) +#define J721E_WKUP_IOPAD(pa, val, muxmode) (((pa) & 0x1fff)) ((val) | (muxmode)) + #endif diff --git a/include/dt-bindings/power/meson-g12a-power.h b/include/dt-bindings/power/meson-g12a-power.h new file mode 100644 index 000000000000..bb5e67a842de --- /dev/null +++ b/include/dt-bindings/power/meson-g12a-power.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: (GPL-2.0+ or MIT) */ +/* + * Copyright (c) 2019 BayLibre, SAS + * Author: Neil Armstrong <narmstrong@baylibre.com> + */ + +#ifndef _DT_BINDINGS_MESON_G12A_POWER_H +#define _DT_BINDINGS_MESON_G12A_POWER_H + +#define PWRC_G12A_VPU_ID 0 +#define PWRC_G12A_ETH_ID 1 + +#endif diff --git a/include/dt-bindings/power/meson-sm1-power.h b/include/dt-bindings/power/meson-sm1-power.h new file mode 100644 index 000000000000..a020ab00c134 --- /dev/null +++ b/include/dt-bindings/power/meson-sm1-power.h @@ -0,0 +1,18 @@ +/* SPDX-License-Identifier: (GPL-2.0+ or MIT) */ +/* + * Copyright (c) 2019 BayLibre, SAS + * Author: Neil Armstrong <narmstrong@baylibre.com> + */ + +#ifndef _DT_BINDINGS_MESON_SM1_POWER_H +#define _DT_BINDINGS_MESON_SM1_POWER_H + +#define PWRC_SM1_VPU_ID 0 +#define PWRC_SM1_NNA_ID 1 +#define PWRC_SM1_USB_ID 2 +#define PWRC_SM1_PCIE_ID 3 +#define PWRC_SM1_GE2D_ID 4 +#define PWRC_SM1_AUDIO_ID 5 +#define PWRC_SM1_ETH_ID 6 + +#endif diff --git a/include/dt-bindings/power/qcom-aoss-qmp.h b/include/dt-bindings/power/qcom-aoss-qmp.h new file mode 100644 index 000000000000..ec336d31dee4 --- /dev/null +++ b/include/dt-bindings/power/qcom-aoss-qmp.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2018, Linaro Ltd. */ + +#ifndef __DT_BINDINGS_POWER_QCOM_AOSS_QMP_H +#define __DT_BINDINGS_POWER_QCOM_AOSS_QMP_H + +#define AOSS_QMP_LS_CDSP 0 +#define AOSS_QMP_LS_LPASS 1 +#define AOSS_QMP_LS_MODEM 2 +#define AOSS_QMP_LS_SLPI 3 +#define AOSS_QMP_LS_SPSS 4 +#define AOSS_QMP_LS_VENUS 5 + +#endif diff --git a/include/dt-bindings/power/qcom-rpmpd.h b/include/dt-bindings/power/qcom-rpmpd.h index 87d9c6611682..93e36d011527 100644 --- a/include/dt-bindings/power/qcom-rpmpd.h +++ b/include/dt-bindings/power/qcom-rpmpd.h @@ -36,4 +36,38 @@ #define MSM8996_VDDSSCX 5 #define MSM8996_VDDSSCX_VFC 6 +/* MSM8998 Power Domain Indexes */ +#define MSM8998_VDDCX 0 +#define MSM8998_VDDCX_AO 1 +#define MSM8998_VDDCX_VFL 2 +#define MSM8998_VDDMX 3 +#define MSM8998_VDDMX_AO 4 +#define MSM8998_VDDMX_VFL 5 +#define MSM8998_SSCCX 6 +#define MSM8998_SSCCX_VFL 7 +#define MSM8998_SSCMX 8 +#define MSM8998_SSCMX_VFL 9 + +/* QCS404 Power Domains */ +#define QCS404_VDDMX 0 +#define QCS404_VDDMX_AO 1 +#define QCS404_VDDMX_VFL 2 +#define QCS404_LPICX 3 +#define QCS404_LPICX_VFL 4 +#define QCS404_LPIMX 5 +#define QCS404_LPIMX_VFL 6 + +/* RPM SMD Power Domain performance levels */ +#define RPM_SMD_LEVEL_RETENTION 16 +#define RPM_SMD_LEVEL_RETENTION_PLUS 32 +#define RPM_SMD_LEVEL_MIN_SVS 48 +#define RPM_SMD_LEVEL_LOW_SVS 64 +#define RPM_SMD_LEVEL_SVS 128 +#define RPM_SMD_LEVEL_SVS_PLUS 192 +#define RPM_SMD_LEVEL_NOM 256 +#define RPM_SMD_LEVEL_NOM_PLUS 320 +#define RPM_SMD_LEVEL_TURBO 384 +#define RPM_SMD_LEVEL_TURBO_NO_CPR 416 +#define RPM_SMD_LEVEL_BINNING 512 + #endif diff --git a/include/dt-bindings/regulator/active-semi,8865-regulator.h b/include/dt-bindings/regulator/active-semi,8865-regulator.h new file mode 100644 index 000000000000..15473dbeaf38 --- /dev/null +++ b/include/dt-bindings/regulator/active-semi,8865-regulator.h @@ -0,0 +1,28 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Device Tree binding constants for the ACT8865 PMIC regulators + */ + +#ifndef _DT_BINDINGS_REGULATOR_ACT8865_H +#define _DT_BINDINGS_REGULATOR_ACT8865_H + +/* + * These constants should be used to specify regulator modes in device tree for + * ACT8865 regulators as follows: + * ACT8865_REGULATOR_MODE_FIXED: It is specific to DCDC regulators and it + * specifies the usage of fixed-frequency + * PWM. + * + * ACT8865_REGULATOR_MODE_NORMAL: It is specific to LDO regulators and it + * specifies the usage of normal mode. + * + * ACT8865_REGULATOR_MODE_LOWPOWER: For DCDC and LDO regulators; it specify + * the usage of proprietary power-saving + * mode. + */ + +#define ACT8865_REGULATOR_MODE_FIXED 1 +#define ACT8865_REGULATOR_MODE_NORMAL 2 +#define ACT8865_REGULATOR_MODE_LOWPOWER 3 + +#endif diff --git a/include/dt-bindings/reset-controller/mt8183-resets.h b/include/dt-bindings/reset-controller/mt8183-resets.h new file mode 100644 index 000000000000..8804e34ebdd4 --- /dev/null +++ b/include/dt-bindings/reset-controller/mt8183-resets.h @@ -0,0 +1,81 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2019 MediaTek Inc. + * Author: Yong Liang <yong.liang@mediatek.com> + */ + +#ifndef _DT_BINDINGS_RESET_CONTROLLER_MT8183 +#define _DT_BINDINGS_RESET_CONTROLLER_MT8183 + +/* INFRACFG AO resets */ +#define MT8183_INFRACFG_AO_THERM_SW_RST 0 +#define MT8183_INFRACFG_AO_USB_TOP_SW_RST 1 +#define MT8183_INFRACFG_AO_MM_IOMMU_SW_RST 3 +#define MT8183_INFRACFG_AO_MSDC3_SW_RST 4 +#define MT8183_INFRACFG_AO_MSDC2_SW_RST 5 +#define MT8183_INFRACFG_AO_MSDC1_SW_RST 6 +#define MT8183_INFRACFG_AO_MSDC0_SW_RST 7 +#define MT8183_INFRACFG_AO_APDMA_SW_RST 9 +#define MT8183_INFRACFG_AO_MIMP_D_SW_RST 10 +#define MT8183_INFRACFG_AO_BTIF_SW_RST 12 +#define MT8183_INFRACFG_AO_DISP_PWM_SW_RST 14 +#define MT8183_INFRACFG_AO_AUXADC_SW_RST 15 + +#define MT8183_INFRACFG_AO_IRTX_SW_RST 32 +#define MT8183_INFRACFG_AO_SPI0_SW_RST 33 +#define MT8183_INFRACFG_AO_I2C0_SW_RST 34 +#define MT8183_INFRACFG_AO_I2C1_SW_RST 35 +#define MT8183_INFRACFG_AO_I2C2_SW_RST 36 +#define MT8183_INFRACFG_AO_I2C3_SW_RST 37 +#define MT8183_INFRACFG_AO_UART0_SW_RST 38 +#define MT8183_INFRACFG_AO_UART1_SW_RST 39 +#define MT8183_INFRACFG_AO_UART2_SW_RST 40 +#define MT8183_INFRACFG_AO_PWM_SW_RST 41 +#define MT8183_INFRACFG_AO_SPI1_SW_RST 42 +#define MT8183_INFRACFG_AO_I2C4_SW_RST 43 +#define MT8183_INFRACFG_AO_DVFSP_SW_RST 44 +#define MT8183_INFRACFG_AO_SPI2_SW_RST 45 +#define MT8183_INFRACFG_AO_SPI3_SW_RST 46 +#define MT8183_INFRACFG_AO_UFSHCI_SW_RST 47 + +#define MT8183_INFRACFG_AO_PMIC_WRAP_SW_RST 64 +#define MT8183_INFRACFG_AO_SPM_SW_RST 65 +#define MT8183_INFRACFG_AO_USBSIF_SW_RST 66 +#define MT8183_INFRACFG_AO_KP_SW_RST 68 +#define MT8183_INFRACFG_AO_APXGPT_SW_RST 69 +#define MT8183_INFRACFG_AO_CLDMA_AO_SW_RST 70 +#define MT8183_INFRACFG_AO_UNIPRO_UFS_SW_RST 71 +#define MT8183_INFRACFG_AO_DX_CC_SW_RST 72 +#define MT8183_INFRACFG_AO_UFSPHY_SW_RST 73 + +#define MT8183_INFRACFG_AO_DX_CC_SEC_SW_RST 96 +#define MT8183_INFRACFG_AO_GCE_SW_RST 97 +#define MT8183_INFRACFG_AO_CLDMA_SW_RST 98 +#define MT8183_INFRACFG_AO_TRNG_SW_RST 99 +#define MT8183_INFRACFG_AO_AP_MD_CCIF_1_SW_RST 103 +#define MT8183_INFRACFG_AO_AP_MD_CCIF_SW_RST 104 +#define MT8183_INFRACFG_AO_I2C1_IMM_SW_RST 105 +#define MT8183_INFRACFG_AO_I2C1_ARB_SW_RST 106 +#define MT8183_INFRACFG_AO_I2C2_IMM_SW_RST 107 +#define MT8183_INFRACFG_AO_I2C2_ARB_SW_RST 108 +#define MT8183_INFRACFG_AO_I2C5_SW_RST 109 +#define MT8183_INFRACFG_AO_I2C5_IMM_SW_RST 110 +#define MT8183_INFRACFG_AO_I2C5_ARB_SW_RST 111 +#define MT8183_INFRACFG_AO_SPI4_SW_RST 112 +#define MT8183_INFRACFG_AO_SPI5_SW_RST 113 +#define MT8183_INFRACFG_AO_INFRA2MFGAXI_CBIP_CLAS_SW_RST 114 +#define MT8183_INFRACFG_AO_MFGAXI2INFRA_M0_CBIP_GLAS_OUT_SW_RST 115 +#define MT8183_INFRACFG_AO_MFGAXI2INFRA_M1_CBIP_GLAS_OUT_SW_RST 116 +#define MT8183_INFRACFG_AO_UFS_AES_SW_RST 117 +#define MT8183_INFRACFG_AO_CCU_I2C_IRQ_SW_RST 118 +#define MT8183_INFRACFG_AO_CCU_I2C_DMA_SW_RST 119 +#define MT8183_INFRACFG_AO_I2C6_SW_RST 120 +#define MT8183_INFRACFG_AO_CCU_GALS_SW_RST 121 +#define MT8183_INFRACFG_AO_IPU_GALS_SW_RST 122 +#define MT8183_INFRACFG_AO_CONN2AP_GALS_SW_RST 123 +#define MT8183_INFRACFG_AO_AP_MD_CCIF2_SW_RST 124 +#define MT8183_INFRACFG_AO_AP_MD_CCIF3_SW_RST 125 +#define MT8183_INFRACFG_AO_I2C7_SW_RST 126 +#define MT8183_INFRACFG_AO_I2C8_SW_RST 127 + +#endif /* _DT_BINDINGS_RESET_CONTROLLER_MT8183 */ diff --git a/include/dt-bindings/reset/amlogic,meson-g12a-audio-reset.h b/include/dt-bindings/reset/amlogic,meson-g12a-audio-reset.h new file mode 100644 index 000000000000..14b78dabed0e --- /dev/null +++ b/include/dt-bindings/reset/amlogic,meson-g12a-audio-reset.h @@ -0,0 +1,38 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2019 BayLibre, SAS. + * Author: Jerome Brunet <jbrunet@baylibre.com> + * + */ + +#ifndef _DT_BINDINGS_AMLOGIC_MESON_G12A_AUDIO_RESET_H +#define _DT_BINDINGS_AMLOGIC_MESON_G12A_AUDIO_RESET_H + +#define AUD_RESET_PDM 0 +#define AUD_RESET_TDMIN_A 1 +#define AUD_RESET_TDMIN_B 2 +#define AUD_RESET_TDMIN_C 3 +#define AUD_RESET_TDMIN_LB 4 +#define AUD_RESET_LOOPBACK 5 +#define AUD_RESET_TODDR_A 6 +#define AUD_RESET_TODDR_B 7 +#define AUD_RESET_TODDR_C 8 +#define AUD_RESET_FRDDR_A 9 +#define AUD_RESET_FRDDR_B 10 +#define AUD_RESET_FRDDR_C 11 +#define AUD_RESET_TDMOUT_A 12 +#define AUD_RESET_TDMOUT_B 13 +#define AUD_RESET_TDMOUT_C 14 +#define AUD_RESET_SPDIFOUT 15 +#define AUD_RESET_SPDIFOUT_B 16 +#define AUD_RESET_SPDIFIN 17 +#define AUD_RESET_EQDRC 18 +#define AUD_RESET_RESAMPLE 19 +#define AUD_RESET_DDRARB 20 +#define AUD_RESET_POWDET 21 +#define AUD_RESET_TORAM 22 +#define AUD_RESET_TOACODEC 23 +#define AUD_RESET_TOHDMITX 24 +#define AUD_RESET_CLKTREE 25 + +#endif diff --git a/include/dt-bindings/reset/amlogic,meson-gxbb-reset.h b/include/dt-bindings/reset/amlogic,meson-gxbb-reset.h index 524d6077ac1b..ea5058618863 100644 --- a/include/dt-bindings/reset/amlogic,meson-gxbb-reset.h +++ b/include/dt-bindings/reset/amlogic,meson-gxbb-reset.h @@ -1,56 +1,7 @@ +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * * Copyright (c) 2016 BayLibre, SAS. * Author: Neil Armstrong <narmstrong@baylibre.com> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, see <http://www.gnu.org/licenses/>. - * The full GNU General Public License is included in this distribution - * in the file called COPYING. - * - * BSD LICENSE - * - * Copyright (c) 2016 BayLibre, SAS. - * Author: Neil Armstrong <narmstrong@baylibre.com> - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef _DT_BINDINGS_AMLOGIC_MESON_GXBB_RESET_H #define _DT_BINDINGS_AMLOGIC_MESON_GXBB_RESET_H diff --git a/include/dt-bindings/reset/amlogic,meson8b-reset.h b/include/dt-bindings/reset/amlogic,meson8b-reset.h index 614aff2c7aff..c614438bcbdb 100644 --- a/include/dt-bindings/reset/amlogic,meson8b-reset.h +++ b/include/dt-bindings/reset/amlogic,meson8b-reset.h @@ -1,56 +1,7 @@ +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * * Copyright (c) 2016 BayLibre, SAS. * Author: Neil Armstrong <narmstrong@baylibre.com> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, see <http://www.gnu.org/licenses/>. - * The full GNU General Public License is included in this distribution - * in the file called COPYING. - * - * BSD LICENSE - * - * Copyright (c) 2016 BayLibre, SAS. - * Author: Neil Armstrong <narmstrong@baylibre.com> - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef _DT_BINDINGS_AMLOGIC_MESON8B_RESET_H #define _DT_BINDINGS_AMLOGIC_MESON8B_RESET_H diff --git a/include/dt-bindings/reset/bitmain,bm1880-reset.h b/include/dt-bindings/reset/bitmain,bm1880-reset.h new file mode 100644 index 000000000000..4c0de5223773 --- /dev/null +++ b/include/dt-bindings/reset/bitmain,bm1880-reset.h @@ -0,0 +1,51 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * Copyright (c) 2018 Bitmain Ltd. + * Copyright (c) 2019 Linaro Ltd. + */ + +#ifndef _DT_BINDINGS_BM1880_RESET_H +#define _DT_BINDINGS_BM1880_RESET_H + +#define BM1880_RST_MAIN_AP 0 +#define BM1880_RST_SECOND_AP 1 +#define BM1880_RST_DDR 2 +#define BM1880_RST_VIDEO 3 +#define BM1880_RST_JPEG 4 +#define BM1880_RST_VPP 5 +#define BM1880_RST_GDMA 6 +#define BM1880_RST_AXI_SRAM 7 +#define BM1880_RST_TPU 8 +#define BM1880_RST_USB 9 +#define BM1880_RST_ETH0 10 +#define BM1880_RST_ETH1 11 +#define BM1880_RST_NAND 12 +#define BM1880_RST_EMMC 13 +#define BM1880_RST_SD 14 +#define BM1880_RST_SDMA 15 +#define BM1880_RST_I2S0 16 +#define BM1880_RST_I2S1 17 +#define BM1880_RST_UART0_1_CLK 18 +#define BM1880_RST_UART0_1_ACLK 19 +#define BM1880_RST_UART2_3_CLK 20 +#define BM1880_RST_UART2_3_ACLK 21 +#define BM1880_RST_MINER 22 +#define BM1880_RST_I2C0 23 +#define BM1880_RST_I2C1 24 +#define BM1880_RST_I2C2 25 +#define BM1880_RST_I2C3 26 +#define BM1880_RST_I2C4 27 +#define BM1880_RST_PWM0 28 +#define BM1880_RST_PWM1 29 +#define BM1880_RST_PWM2 30 +#define BM1880_RST_PWM3 31 +#define BM1880_RST_SPI 32 +#define BM1880_RST_GPIO0 33 +#define BM1880_RST_GPIO1 34 +#define BM1880_RST_GPIO2 35 +#define BM1880_RST_EFUSE 36 +#define BM1880_RST_WDT 37 +#define BM1880_RST_AHB_ROM 38 +#define BM1880_RST_SPIC 39 + +#endif /* _DT_BINDINGS_BM1880_RESET_H */ diff --git a/include/dt-bindings/reset/hisi,hi6220-resets.h b/include/dt-bindings/reset/hisi,hi6220-resets.h index e7c362a81a97..63aff7d8aa45 100644 --- a/include/dt-bindings/reset/hisi,hi6220-resets.h +++ b/include/dt-bindings/reset/hisi,hi6220-resets.h @@ -73,4 +73,11 @@ #define MEDIA_MMU 6 #define MEDIA_XG2RAM1 7 +#define AO_G3D 1 +#define AO_CODECISP 2 +#define AO_MCPU 4 +#define AO_BBPHARQMEM 5 +#define AO_HIFI 8 +#define AO_ACPUSCUL2C 12 + #endif /*_DT_BINDINGS_RESET_CONTROLLER_HI6220*/ diff --git a/include/dt-bindings/reset/imx8mq-reset.h b/include/dt-bindings/reset/imx8mq-reset.h index 57c592498aa0..9a301082d361 100644 --- a/include/dt-bindings/reset/imx8mq-reset.h +++ b/include/dt-bindings/reset/imx8mq-reset.h @@ -31,33 +31,33 @@ #define IMX8MQ_RESET_OTG2_PHY_RESET 20 #define IMX8MQ_RESET_MIPI_DSI_RESET_BYTE_N 21 #define IMX8MQ_RESET_MIPI_DSI_RESET_N 22 -#define IMX8MQ_RESET_MIPI_DIS_DPI_RESET_N 23 -#define IMX8MQ_RESET_MIPI_DIS_ESC_RESET_N 24 -#define IMX8MQ_RESET_MIPI_DIS_PCLK_RESET_N 25 +#define IMX8MQ_RESET_MIPI_DSI_DPI_RESET_N 23 +#define IMX8MQ_RESET_MIPI_DSI_ESC_RESET_N 24 +#define IMX8MQ_RESET_MIPI_DSI_PCLK_RESET_N 25 #define IMX8MQ_RESET_PCIEPHY 26 #define IMX8MQ_RESET_PCIEPHY_PERST 27 #define IMX8MQ_RESET_PCIE_CTRL_APPS_EN 28 #define IMX8MQ_RESET_PCIE_CTRL_APPS_TURNOFF 29 -#define IMX8MQ_RESET_HDMI_PHY_APB_RESET 30 +#define IMX8MQ_RESET_HDMI_PHY_APB_RESET 30 /* i.MX8MM does NOT support */ #define IMX8MQ_RESET_DISP_RESET 31 #define IMX8MQ_RESET_GPU_RESET 32 #define IMX8MQ_RESET_VPU_RESET 33 -#define IMX8MQ_RESET_PCIEPHY2 34 -#define IMX8MQ_RESET_PCIEPHY2_PERST 35 -#define IMX8MQ_RESET_PCIE2_CTRL_APPS_EN 36 -#define IMX8MQ_RESET_PCIE2_CTRL_APPS_TURNOFF 37 -#define IMX8MQ_RESET_MIPI_CSI1_CORE_RESET 38 -#define IMX8MQ_RESET_MIPI_CSI1_PHY_REF_RESET 39 -#define IMX8MQ_RESET_MIPI_CSI1_ESC_RESET 40 -#define IMX8MQ_RESET_MIPI_CSI2_CORE_RESET 41 -#define IMX8MQ_RESET_MIPI_CSI2_PHY_REF_RESET 42 -#define IMX8MQ_RESET_MIPI_CSI2_ESC_RESET 43 +#define IMX8MQ_RESET_PCIEPHY2 34 /* i.MX8MM does NOT support */ +#define IMX8MQ_RESET_PCIEPHY2_PERST 35 /* i.MX8MM does NOT support */ +#define IMX8MQ_RESET_PCIE2_CTRL_APPS_EN 36 /* i.MX8MM does NOT support */ +#define IMX8MQ_RESET_PCIE2_CTRL_APPS_TURNOFF 37 /* i.MX8MM does NOT support */ +#define IMX8MQ_RESET_MIPI_CSI1_CORE_RESET 38 /* i.MX8MM does NOT support */ +#define IMX8MQ_RESET_MIPI_CSI1_PHY_REF_RESET 39 /* i.MX8MM does NOT support */ +#define IMX8MQ_RESET_MIPI_CSI1_ESC_RESET 40 /* i.MX8MM does NOT support */ +#define IMX8MQ_RESET_MIPI_CSI2_CORE_RESET 41 /* i.MX8MM does NOT support */ +#define IMX8MQ_RESET_MIPI_CSI2_PHY_REF_RESET 42 /* i.MX8MM does NOT support */ +#define IMX8MQ_RESET_MIPI_CSI2_ESC_RESET 43 /* i.MX8MM does NOT support */ #define IMX8MQ_RESET_DDRC1_PRST 44 #define IMX8MQ_RESET_DDRC1_CORE_RESET 45 #define IMX8MQ_RESET_DDRC1_PHY_RESET 46 -#define IMX8MQ_RESET_DDRC2_PRST 47 -#define IMX8MQ_RESET_DDRC2_CORE_RESET 48 -#define IMX8MQ_RESET_DDRC2_PHY_RESET 49 +#define IMX8MQ_RESET_DDRC2_PRST 47 /* i.MX8MM does NOT support */ +#define IMX8MQ_RESET_DDRC2_CORE_RESET 48 /* i.MX8MM does NOT support */ +#define IMX8MQ_RESET_DDRC2_PHY_RESET 49 /* i.MX8MM does NOT support */ #define IMX8MQ_RESET_NUM 50 diff --git a/include/dt-bindings/reset/mt7629-resets.h b/include/dt-bindings/reset/mt7629-resets.h new file mode 100644 index 000000000000..6bb85734f68d --- /dev/null +++ b/include/dt-bindings/reset/mt7629-resets.h @@ -0,0 +1,71 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2019 MediaTek Inc. + */ + +#ifndef _DT_BINDINGS_RESET_CONTROLLER_MT7629 +#define _DT_BINDINGS_RESET_CONTROLLER_MT7629 + +/* INFRACFG resets */ +#define MT7629_INFRA_EMI_MPU_RST 0 +#define MT7629_INFRA_UART5_RST 2 +#define MT7629_INFRA_CIRQ_EINT_RST 3 +#define MT7629_INFRA_APXGPT_RST 4 +#define MT7629_INFRA_SCPSYS_RST 5 +#define MT7629_INFRA_KP_RST 6 +#define MT7629_INFRA_SPI1_RST 7 +#define MT7629_INFRA_SPI4_RST 8 +#define MT7629_INFRA_SYSTIMER_RST 9 +#define MT7629_INFRA_IRRX_RST 10 +#define MT7629_INFRA_AO_BUS_RST 16 +#define MT7629_INFRA_EMI_RST 32 +#define MT7629_INFRA_APMIXED_RST 35 +#define MT7629_INFRA_MIPI_RST 36 +#define MT7629_INFRA_TRNG_RST 37 +#define MT7629_INFRA_SYSCIRQ_RST 38 +#define MT7629_INFRA_MIPI_CSI_RST 39 +#define MT7629_INFRA_GCE_FAXI_RST 40 +#define MT7629_INFRA_I2C_SRAM_RST 41 +#define MT7629_INFRA_IOMMU_RST 47 + +/* PERICFG resets */ +#define MT7629_PERI_UART0_SW_RST 0 +#define MT7629_PERI_UART1_SW_RST 1 +#define MT7629_PERI_UART2_SW_RST 2 +#define MT7629_PERI_BTIF_SW_RST 6 +#define MT7629_PERI_PWN_SW_RST 8 +#define MT7629_PERI_DMA_SW_RST 11 +#define MT7629_PERI_NFI_SW_RST 14 +#define MT7629_PERI_I2C0_SW_RST 22 +#define MT7629_PERI_SPI0_SW_RST 33 +#define MT7629_PERI_SPI1_SW_RST 34 +#define MT7629_PERI_FLASHIF_SW_RST 36 + +/* PCIe Subsystem resets */ +#define MT7629_PCIE1_CORE_RST 19 +#define MT7629_PCIE1_MMIO_RST 20 +#define MT7629_PCIE1_HRST 21 +#define MT7629_PCIE1_USER_RST 22 +#define MT7629_PCIE1_PIPE_RST 23 +#define MT7629_PCIE0_CORE_RST 27 +#define MT7629_PCIE0_MMIO_RST 28 +#define MT7629_PCIE0_HRST 29 +#define MT7629_PCIE0_USER_RST 30 +#define MT7629_PCIE0_PIPE_RST 31 + +/* SSUSB Subsystem resets */ +#define MT7629_SSUSB_PHY_PWR_RST 3 +#define MT7629_SSUSB_MAC_PWR_RST 4 + +/* ETH Subsystem resets */ +#define MT7629_ETHSYS_SYS_RST 0 +#define MT7629_ETHSYS_MCM_RST 2 +#define MT7629_ETHSYS_HSDMA_RST 5 +#define MT7629_ETHSYS_FE_RST 6 +#define MT7629_ETHSYS_ESW_RST 16 +#define MT7629_ETHSYS_GMAC_RST 23 +#define MT7629_ETHSYS_EPHY_RST 24 +#define MT7629_ETHSYS_CRYPTO_RST 29 +#define MT7629_ETHSYS_PPE_RST 31 + +#endif /* _DT_BINDINGS_RESET_CONTROLLER_MT7629 */ diff --git a/include/dt-bindings/reset/sun8i-v3s-ccu.h b/include/dt-bindings/reset/sun8i-v3s-ccu.h index b58ef21a2e18..b6790173afd6 100644 --- a/include/dt-bindings/reset/sun8i-v3s-ccu.h +++ b/include/dt-bindings/reset/sun8i-v3s-ccu.h @@ -75,4 +75,7 @@ #define RST_BUS_UART1 50 #define RST_BUS_UART2 51 +/* Reset lines not available on V3s */ +#define RST_BUS_I2S0 52 + #endif /* _DT_BINDINGS_RST_SUN8I_H3_H_ */ diff --git a/include/dt-bindings/soc/ti,sci_pm_domain.h b/include/dt-bindings/soc/ti,sci_pm_domain.h new file mode 100644 index 000000000000..8f2a7360b65e --- /dev/null +++ b/include/dt-bindings/soc/ti,sci_pm_domain.h @@ -0,0 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef __DT_BINDINGS_TI_SCI_PM_DOMAIN_H +#define __DT_BINDINGS_TI_SCI_PM_DOMAIN_H + +#define TI_SCI_PD_EXCLUSIVE 1 +#define TI_SCI_PD_SHARED 0 + +#endif /* __DT_BINDINGS_TI_SCI_PM_DOMAIN_H */ diff --git a/include/dt-bindings/sound/madera.h b/include/dt-bindings/sound/madera.h new file mode 100644 index 000000000000..d0096d5eb0da --- /dev/null +++ b/include/dt-bindings/sound/madera.h @@ -0,0 +1,25 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Device Tree defines for Madera codecs + * + * Copyright (C) 2016-2017 Cirrus Logic, Inc. and + * Cirrus Logic International Semiconductor Ltd. + */ + +#ifndef DT_BINDINGS_SOUND_MADERA_H +#define DT_BINDINGS_SOUND_MADERA_H + +#define MADERA_INMODE_DIFF 0 +#define MADERA_INMODE_SE 1 +#define MADERA_INMODE_DMIC 2 + +#define MADERA_DMIC_REF_MICVDD 0 +#define MADERA_DMIC_REF_MICBIAS1 1 +#define MADERA_DMIC_REF_MICBIAS2 2 +#define MADERA_DMIC_REF_MICBIAS3 3 + +#define CS47L35_DMIC_REF_MICBIAS1B 1 +#define CS47L35_DMIC_REF_MICBIAS2A 2 +#define CS47L35_DMIC_REF_MICBIAS2B 3 + +#endif diff --git a/include/dt-bindings/sound/meson-g12a-tohdmitx.h b/include/dt-bindings/sound/meson-g12a-tohdmitx.h new file mode 100644 index 000000000000..c5e1f48d30d0 --- /dev/null +++ b/include/dt-bindings/sound/meson-g12a-tohdmitx.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __DT_MESON_G12A_TOHDMITX_H +#define __DT_MESON_G12A_TOHDMITX_H + +#define TOHDMITX_I2S_IN_A 0 +#define TOHDMITX_I2S_IN_B 1 +#define TOHDMITX_I2S_IN_C 2 +#define TOHDMITX_I2S_OUT 3 +#define TOHDMITX_SPDIF_IN_A 4 +#define TOHDMITX_SPDIF_IN_B 5 +#define TOHDMITX_SPDIF_OUT 6 + +#endif /* __DT_MESON_G12A_TOHDMITX_H */ diff --git a/include/keys/request_key_auth-type.h b/include/keys/request_key_auth-type.h index 20485ca481f4..36b89a933310 100644 --- a/include/keys/request_key_auth-type.h +++ b/include/keys/request_key_auth-type.h @@ -14,6 +14,7 @@ * Authorisation record for request_key(). */ struct request_key_auth { + struct rcu_head rcu; struct key *target_key; struct key *dest_keyring; const struct cred *cred; diff --git a/include/kvm/arm_pmu.h b/include/kvm/arm_pmu.h index 84a9db156be7..6db030439e29 100644 --- a/include/kvm/arm_pmu.h +++ b/include/kvm/arm_pmu.h @@ -11,18 +11,19 @@ #include <asm/perf_event.h> #define ARMV8_PMU_CYCLE_IDX (ARMV8_PMU_MAX_COUNTERS - 1) +#define ARMV8_PMU_MAX_COUNTER_PAIRS ((ARMV8_PMU_MAX_COUNTERS + 1) >> 1) #ifdef CONFIG_KVM_ARM_PMU struct kvm_pmc { u8 idx; /* index into the pmu->pmc array */ struct perf_event *perf_event; - u64 bitmask; }; struct kvm_pmu { int irq_num; struct kvm_pmc pmc[ARMV8_PMU_MAX_COUNTERS]; + DECLARE_BITMAP(chained, ARMV8_PMU_MAX_COUNTER_PAIRS); bool ready; bool created; bool irq_level; @@ -33,10 +34,11 @@ struct kvm_pmu { u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u64 select_idx); void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, u64 select_idx, u64 val); u64 kvm_pmu_valid_counter_mask(struct kvm_vcpu *vcpu); +void kvm_pmu_vcpu_init(struct kvm_vcpu *vcpu); void kvm_pmu_vcpu_reset(struct kvm_vcpu *vcpu); void kvm_pmu_vcpu_destroy(struct kvm_vcpu *vcpu); -void kvm_pmu_disable_counter(struct kvm_vcpu *vcpu, u64 val); -void kvm_pmu_enable_counter(struct kvm_vcpu *vcpu, u64 val); +void kvm_pmu_disable_counter_mask(struct kvm_vcpu *vcpu, u64 val); +void kvm_pmu_enable_counter_mask(struct kvm_vcpu *vcpu, u64 val); void kvm_pmu_flush_hwstate(struct kvm_vcpu *vcpu); void kvm_pmu_sync_hwstate(struct kvm_vcpu *vcpu); bool kvm_pmu_should_notify_user(struct kvm_vcpu *vcpu); @@ -70,10 +72,11 @@ static inline u64 kvm_pmu_valid_counter_mask(struct kvm_vcpu *vcpu) { return 0; } +static inline void kvm_pmu_vcpu_init(struct kvm_vcpu *vcpu) {} static inline void kvm_pmu_vcpu_reset(struct kvm_vcpu *vcpu) {} static inline void kvm_pmu_vcpu_destroy(struct kvm_vcpu *vcpu) {} -static inline void kvm_pmu_disable_counter(struct kvm_vcpu *vcpu, u64 val) {} -static inline void kvm_pmu_enable_counter(struct kvm_vcpu *vcpu, u64 val) {} +static inline void kvm_pmu_disable_counter_mask(struct kvm_vcpu *vcpu, u64 val) {} +static inline void kvm_pmu_enable_counter_mask(struct kvm_vcpu *vcpu, u64 val) {} static inline void kvm_pmu_flush_hwstate(struct kvm_vcpu *vcpu) {} static inline void kvm_pmu_sync_hwstate(struct kvm_vcpu *vcpu) {} static inline bool kvm_pmu_should_notify_user(struct kvm_vcpu *vcpu) diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h index 46bbc949c20a..af4f09c02bf1 100644 --- a/include/kvm/arm_vgic.h +++ b/include/kvm/arm_vgic.h @@ -249,6 +249,9 @@ struct vgic_dist { struct list_head lpi_list_head; int lpi_list_count; + /* LPI translation cache */ + struct list_head lpi_translation_cache; + /* used by vgic-debug */ struct vgic_state_iter *iter; @@ -311,7 +314,6 @@ struct vgic_cpu { * parts of the redistributor. */ struct vgic_io_device rd_iodev; - struct vgic_io_device sgi_iodev; struct vgic_redist_region *rdreg; /* Contains the attributes and gpa of the LPI pending tables. */ @@ -350,6 +352,7 @@ int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu); void kvm_vgic_load(struct kvm_vcpu *vcpu); void kvm_vgic_put(struct kvm_vcpu *vcpu); +void kvm_vgic_vmcr_sync(struct kvm_vcpu *vcpu); #define irqchip_in_kernel(k) (!!((k)->arch.vgic.in_kernel)) #define vgic_initialized(k) ((k)->arch.vgic.initialized) diff --git a/include/linux/acpi.h b/include/linux/acpi.h index d315d86844e4..8b4e516bac00 100644 --- a/include/linux/acpi.h +++ b/include/linux/acpi.h @@ -10,6 +10,7 @@ #include <linux/errno.h> #include <linux/ioport.h> /* for struct resource */ +#include <linux/irqdomain.h> #include <linux/resource_ext.h> #include <linux/device.h> #include <linux/property.h> @@ -314,10 +315,19 @@ int acpi_isa_irq_to_gsi (unsigned isa_irq, u32 *gsi); void acpi_set_irq_model(enum acpi_irq_model_id model, struct fwnode_handle *fwnode); +struct irq_domain *acpi_irq_create_hierarchy(unsigned int flags, + unsigned int size, + struct fwnode_handle *fwnode, + const struct irq_domain_ops *ops, + void *host_data); + #ifdef CONFIG_X86_IO_APIC extern int acpi_get_override_irq(u32 gsi, int *trigger, int *polarity); #else -#define acpi_get_override_irq(gsi, trigger, polarity) (-1) +static inline int acpi_get_override_irq(u32 gsi, int *trigger, int *polarity) +{ + return -1; +} #endif /* * This function undoes the effect of one call to acpi_register_gsi(). @@ -367,6 +377,7 @@ extern acpi_status wmi_install_notify_handler(const char *guid, extern acpi_status wmi_remove_notify_handler(const char *guid); extern acpi_status wmi_get_event_data(u32 event, struct acpi_buffer *out); extern bool wmi_has_guid(const char *guid); +extern char *wmi_get_acpi_device_uid(const char *guid); #endif /* CONFIG_ACPI_WMI */ @@ -632,6 +643,12 @@ bool acpi_gtdt_c3stop(int type); int acpi_arch_timer_mem_init(struct arch_timer_mem *timer_mem, int *timer_count); #endif +#ifndef ACPI_HAVE_ARCH_SET_ROOT_POINTER +static inline void acpi_arch_set_root_pointer(u64 addr) +{ +} +#endif + #ifndef ACPI_HAVE_ARCH_GET_ROOT_POINTER static inline u64 acpi_arch_get_root_pointer(void) { @@ -913,31 +930,25 @@ static inline int acpi_dev_pm_attach(struct device *dev, bool power_on) #endif #if defined(CONFIG_ACPI) && defined(CONFIG_PM_SLEEP) -int acpi_dev_suspend_late(struct device *dev); int acpi_subsys_prepare(struct device *dev); void acpi_subsys_complete(struct device *dev); int acpi_subsys_suspend_late(struct device *dev); int acpi_subsys_suspend_noirq(struct device *dev); -int acpi_subsys_resume_noirq(struct device *dev); -int acpi_subsys_resume_early(struct device *dev); int acpi_subsys_suspend(struct device *dev); int acpi_subsys_freeze(struct device *dev); -int acpi_subsys_freeze_late(struct device *dev); -int acpi_subsys_freeze_noirq(struct device *dev); -int acpi_subsys_thaw_noirq(struct device *dev); +int acpi_subsys_poweroff(struct device *dev); +void acpi_ec_mark_gpe_for_wake(void); +void acpi_ec_set_gpe_wake_mask(u8 action); #else -static inline int acpi_dev_resume_early(struct device *dev) { return 0; } static inline int acpi_subsys_prepare(struct device *dev) { return 0; } static inline void acpi_subsys_complete(struct device *dev) {} static inline int acpi_subsys_suspend_late(struct device *dev) { return 0; } static inline int acpi_subsys_suspend_noirq(struct device *dev) { return 0; } -static inline int acpi_subsys_resume_noirq(struct device *dev) { return 0; } -static inline int acpi_subsys_resume_early(struct device *dev) { return 0; } static inline int acpi_subsys_suspend(struct device *dev) { return 0; } static inline int acpi_subsys_freeze(struct device *dev) { return 0; } -static inline int acpi_subsys_freeze_late(struct device *dev) { return 0; } -static inline int acpi_subsys_freeze_noirq(struct device *dev) { return 0; } -static inline int acpi_subsys_thaw_noirq(struct device *dev) { return 0; } +static inline int acpi_subsys_poweroff(struct device *dev) { return 0; } +static inline void acpi_ec_mark_gpe_for_wake(void) {} +static inline void acpi_ec_set_gpe_wake_mask(u8 action) {} #endif #ifdef CONFIG_ACPI @@ -993,62 +1004,11 @@ void __acpi_handle_debug(struct _ddebug *descriptor, acpi_handle handle, const c #endif #endif -struct acpi_gpio_params { - unsigned int crs_entry_index; - unsigned int line_index; - bool active_low; -}; - -struct acpi_gpio_mapping { - const char *name; - const struct acpi_gpio_params *data; - unsigned int size; - -/* Ignore IoRestriction field */ -#define ACPI_GPIO_QUIRK_NO_IO_RESTRICTION BIT(0) -/* - * When ACPI GPIO mapping table is in use the index parameter inside it - * refers to the GPIO resource in _CRS method. That index has no - * distinction of actual type of the resource. When consumer wants to - * get GpioIo type explicitly, this quirk may be used. - */ -#define ACPI_GPIO_QUIRK_ONLY_GPIOIO BIT(1) - - unsigned int quirks; -}; - #if defined(CONFIG_ACPI) && defined(CONFIG_GPIOLIB) -int acpi_dev_add_driver_gpios(struct acpi_device *adev, - const struct acpi_gpio_mapping *gpios); - -static inline void acpi_dev_remove_driver_gpios(struct acpi_device *adev) -{ - if (adev) - adev->driver_gpios = NULL; -} - -int devm_acpi_dev_add_driver_gpios(struct device *dev, - const struct acpi_gpio_mapping *gpios); -void devm_acpi_dev_remove_driver_gpios(struct device *dev); - bool acpi_gpio_get_irq_resource(struct acpi_resource *ares, struct acpi_resource_gpio **agpio); int acpi_dev_gpio_irq_get(struct acpi_device *adev, int index); #else -static inline int acpi_dev_add_driver_gpios(struct acpi_device *adev, - const struct acpi_gpio_mapping *gpios) -{ - return -ENXIO; -} -static inline void acpi_dev_remove_driver_gpios(struct acpi_device *adev) {} - -static inline int devm_acpi_dev_add_driver_gpios(struct device *dev, - const struct acpi_gpio_mapping *gpios) -{ - return -ENXIO; -} -static inline void devm_acpi_dev_remove_driver_gpios(struct device *dev) {} - static inline bool acpi_gpio_get_irq_resource(struct acpi_resource *ares, struct acpi_resource_gpio **agpio) { @@ -1301,10 +1261,16 @@ static inline int lpit_read_residency_count_address(u64 *address) #endif #ifdef CONFIG_ACPI_PPTT +int acpi_pptt_cpu_is_thread(unsigned int cpu); int find_acpi_cpu_topology(unsigned int cpu, int level); int find_acpi_cpu_topology_package(unsigned int cpu); +int find_acpi_cpu_topology_hetero_id(unsigned int cpu); int find_acpi_cpu_cache_topology(unsigned int cpu, int level); #else +static inline int acpi_pptt_cpu_is_thread(unsigned int cpu) +{ + return -EINVAL; +} static inline int find_acpi_cpu_topology(unsigned int cpu, int level) { return -EINVAL; @@ -1313,6 +1279,10 @@ static inline int find_acpi_cpu_topology_package(unsigned int cpu) { return -EINVAL; } +static inline int find_acpi_cpu_topology_hetero_id(unsigned int cpu) +{ + return -EINVAL; +} static inline int find_acpi_cpu_cache_topology(unsigned int cpu, int level) { return -EINVAL; diff --git a/include/linux/alarmtimer.h b/include/linux/alarmtimer.h index 0760ca1cb009..74748e306f4b 100644 --- a/include/linux/alarmtimer.h +++ b/include/linux/alarmtimer.h @@ -5,7 +5,8 @@ #include <linux/time.h> #include <linux/hrtimer.h> #include <linux/timerqueue.h> -#include <linux/rtc.h> + +struct rtc_device; enum alarmtimer_type { ALARM_REALTIME, diff --git a/include/linux/amba/clcd-regs.h b/include/linux/amba/clcd-regs.h index 516a6fda83c5..421b0fa90d6a 100644 --- a/include/linux/amba/clcd-regs.h +++ b/include/linux/amba/clcd-regs.h @@ -42,6 +42,7 @@ #define TIM2_PCD_LO_MASK GENMASK(4, 0) #define TIM2_PCD_LO_BITS 5 #define TIM2_CLKSEL (1 << 5) +#define TIM2_ACB_MASK GENMASK(10, 6) #define TIM2_IVS (1 << 11) #define TIM2_IHS (1 << 12) #define TIM2_IPC (1 << 13) diff --git a/include/linux/amd-iommu.h b/include/linux/amd-iommu.h index 4a4d00646040..21e950e4ab62 100644 --- a/include/linux/amd-iommu.h +++ b/include/linux/amd-iommu.h @@ -184,6 +184,9 @@ extern int amd_iommu_register_ga_log_notifier(int (*notifier)(u32)); extern int amd_iommu_update_ga(int cpu, bool is_run, void *data); +extern int amd_iommu_activate_guest_mode(void *data); +extern int amd_iommu_deactivate_guest_mode(void *data); + #else /* defined(CONFIG_AMD_IOMMU) && defined(CONFIG_IRQ_REMAP) */ static inline int @@ -198,6 +201,15 @@ amd_iommu_update_ga(int cpu, bool is_run, void *data) return 0; } +static inline int amd_iommu_activate_guest_mode(void *data) +{ + return 0; +} + +static inline int amd_iommu_deactivate_guest_mode(void *data) +{ + return 0; +} #endif /* defined(CONFIG_AMD_IOMMU) && defined(CONFIG_IRQ_REMAP) */ #endif /* _ASM_X86_AMD_IOMMU_H */ diff --git a/include/linux/arch_topology.h b/include/linux/arch_topology.h index d9bdc1a7f4e7..42f2b5126094 100644 --- a/include/linux/arch_topology.h +++ b/include/linux/arch_topology.h @@ -18,7 +18,7 @@ DECLARE_PER_CPU(unsigned long, cpu_scale); struct sched_domain; static inline -unsigned long topology_get_cpu_scale(struct sched_domain *sd, int cpu) +unsigned long topology_get_cpu_scale(int cpu) { return per_cpu(cpu_scale, cpu); } @@ -33,4 +33,30 @@ unsigned long topology_get_freq_scale(int cpu) return per_cpu(freq_scale, cpu); } +struct cpu_topology { + int thread_id; + int core_id; + int package_id; + int llc_id; + cpumask_t thread_sibling; + cpumask_t core_sibling; + cpumask_t llc_sibling; +}; + +#ifdef CONFIG_GENERIC_ARCH_TOPOLOGY +extern struct cpu_topology cpu_topology[NR_CPUS]; + +#define topology_physical_package_id(cpu) (cpu_topology[cpu].package_id) +#define topology_core_id(cpu) (cpu_topology[cpu].core_id) +#define topology_core_cpumask(cpu) (&cpu_topology[cpu].core_sibling) +#define topology_sibling_cpumask(cpu) (&cpu_topology[cpu].thread_sibling) +#define topology_llc_cpumask(cpu) (&cpu_topology[cpu].llc_sibling) +void init_cpu_topology(void); +void store_cpu_topology(unsigned int cpuid); +const struct cpumask *cpu_coregroup_mask(int cpu); +void update_siblings_masks(unsigned int cpu); +void remove_cpu_topology(unsigned int cpuid); +void reset_cpu_topology(void); +#endif + #endif /* _LINUX_ARCH_TOPOLOGY_H_ */ diff --git a/include/linux/audit.h b/include/linux/audit.h index 3a4f2415bb7c..aee3dc9eb378 100644 --- a/include/linux/audit.h +++ b/include/linux/audit.h @@ -11,7 +11,6 @@ #include <linux/sched.h> #include <linux/ptrace.h> -#include <linux/namei.h> /* LOOKUP_* */ #include <uapi/linux/audit.h> #define AUDIT_INO_UNSET ((unsigned long)-1) @@ -182,6 +181,9 @@ static inline unsigned int audit_get_sessionid(struct task_struct *tsk) } extern u32 audit_enabled; + +extern int audit_signal_info(int sig, struct task_struct *t); + #else /* CONFIG_AUDIT */ static inline __printf(4, 5) void audit_log(struct audit_context *ctx, gfp_t gfp_mask, int type, @@ -235,6 +237,12 @@ static inline unsigned int audit_get_sessionid(struct task_struct *tsk) } #define audit_enabled AUDIT_OFF + +static inline int audit_signal_info(int sig, struct task_struct *t) +{ + return 0; +} + #endif /* CONFIG_AUDIT */ #ifdef CONFIG_AUDIT_COMPAT_GENERIC @@ -243,6 +251,10 @@ static inline unsigned int audit_get_sessionid(struct task_struct *tsk) #define audit_is_compat(arch) false #endif +#define AUDIT_INODE_PARENT 1 /* dentry represents the parent */ +#define AUDIT_INODE_HIDDEN 2 /* audit record should be hidden */ +#define AUDIT_INODE_NOEVAL 4 /* audit record incomplete */ + #ifdef CONFIG_AUDITSYSCALL #include <asm/syscall.h> /* for syscall_get_arch() */ @@ -256,9 +268,6 @@ extern void __audit_syscall_exit(int ret_success, long ret_value); extern struct filename *__audit_reusename(const __user char *uptr); extern void __audit_getname(struct filename *name); -#define AUDIT_INODE_PARENT 1 /* dentry represents the parent */ -#define AUDIT_INODE_HIDDEN 2 /* audit record should be hidden */ -#define AUDIT_INODE_NOEVAL 4 /* audit record incomplete */ extern void __audit_inode(struct filename *name, const struct dentry *dentry, unsigned int flags); extern void __audit_file(const struct file *); @@ -319,16 +328,9 @@ static inline void audit_getname(struct filename *name) } static inline void audit_inode(struct filename *name, const struct dentry *dentry, - unsigned int flags) { - if (unlikely(!audit_dummy_context())) { - unsigned int aflags = 0; - - if (flags & LOOKUP_PARENT) - aflags |= AUDIT_INODE_PARENT; - if (flags & LOOKUP_NO_EVAL) - aflags |= AUDIT_INODE_NOEVAL; + unsigned int aflags) { + if (unlikely(!audit_dummy_context())) __audit_inode(name, dentry, aflags); - } } static inline void audit_file(struct file *file) { @@ -552,7 +554,7 @@ static inline void __audit_inode_child(struct inode *parent, { } static inline void audit_inode(struct filename *name, const struct dentry *dentry, - unsigned int parent) + unsigned int aflags) { } static inline void audit_file(struct file *file) { diff --git a/include/linux/avf/virtchnl.h b/include/linux/avf/virtchnl.h index 191621ff7594..ca956b672ac0 100644 --- a/include/linux/avf/virtchnl.h +++ b/include/linux/avf/virtchnl.h @@ -61,12 +61,14 @@ enum virtchnl_status_code { #define VIRTCHNL_ERR_PARAM VIRTCHNL_STATUS_ERR_PARAM #define VIRTCHNL_STATUS_NOT_SUPPORTED VIRTCHNL_STATUS_ERR_NOT_SUPPORTED +#define VIRTCHNL_LINK_SPEED_2_5GB_SHIFT 0x0 #define VIRTCHNL_LINK_SPEED_100MB_SHIFT 0x1 #define VIRTCHNL_LINK_SPEED_1000MB_SHIFT 0x2 #define VIRTCHNL_LINK_SPEED_10GB_SHIFT 0x3 #define VIRTCHNL_LINK_SPEED_40GB_SHIFT 0x4 #define VIRTCHNL_LINK_SPEED_20GB_SHIFT 0x5 #define VIRTCHNL_LINK_SPEED_25GB_SHIFT 0x6 +#define VIRTCHNL_LINK_SPEED_5GB_SHIFT 0x7 enum virtchnl_link_speed { VIRTCHNL_LINK_SPEED_UNKNOWN = 0, @@ -76,6 +78,8 @@ enum virtchnl_link_speed { VIRTCHNL_LINK_SPEED_40GB = BIT(VIRTCHNL_LINK_SPEED_40GB_SHIFT), VIRTCHNL_LINK_SPEED_20GB = BIT(VIRTCHNL_LINK_SPEED_20GB_SHIFT), VIRTCHNL_LINK_SPEED_25GB = BIT(VIRTCHNL_LINK_SPEED_25GB_SHIFT), + VIRTCHNL_LINK_SPEED_2_5GB = BIT(VIRTCHNL_LINK_SPEED_2_5GB_SHIFT), + VIRTCHNL_LINK_SPEED_5GB = BIT(VIRTCHNL_LINK_SPEED_5GB_SHIFT), }; /* for hsplit_0 field of Rx HMC context */ diff --git a/include/linux/backing-dev-defs.h b/include/linux/backing-dev-defs.h index 07e02d6df5ad..4fc87dee005a 100644 --- a/include/linux/backing-dev-defs.h +++ b/include/linux/backing-dev-defs.h @@ -63,10 +63,31 @@ enum wb_reason { * so it has a mismatch name. */ WB_REASON_FORKER_THREAD, + WB_REASON_FOREIGN_FLUSH, WB_REASON_MAX, }; +struct wb_completion { + atomic_t cnt; + wait_queue_head_t *waitq; +}; + +#define __WB_COMPLETION_INIT(_waitq) \ + (struct wb_completion){ .cnt = ATOMIC_INIT(1), .waitq = (_waitq) } + +/* + * If one wants to wait for one or more wb_writeback_works, each work's + * ->done should be set to a wb_completion defined using the following + * macro. Once all work items are issued with wb_queue_work(), the caller + * can wait for the completion of all using wb_wait_for_completion(). Work + * items which are waited upon aren't freed automatically on completion. + */ +#define WB_COMPLETION_INIT(bdi) __WB_COMPLETION_INIT(&(bdi)->wb_waitq) + +#define DEFINE_WB_COMPLETION(cmpl, bdi) \ + struct wb_completion cmpl = WB_COMPLETION_INIT(bdi) + /* * For cgroup writeback, multiple wb's may map to the same blkcg. Those * wb's can operate mostly independently but should share the congested @@ -165,6 +186,8 @@ struct bdi_writeback { }; struct backing_dev_info { + u64 id; + struct rb_node rb_node; /* keyed by ->id */ struct list_head bdi_list; unsigned long ra_pages; /* max readahead in PAGE_SIZE units */ unsigned long io_pages; /* max allowed IO size */ @@ -203,7 +226,6 @@ struct backing_dev_info { #ifdef CONFIG_DEBUG_FS struct dentry *debug_dir; - struct dentry *debug_stats; #endif }; diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h index f9b029180241..97967ce06de3 100644 --- a/include/linux/backing-dev.h +++ b/include/linux/backing-dev.h @@ -24,6 +24,7 @@ static inline struct backing_dev_info *bdi_get(struct backing_dev_info *bdi) return bdi; } +struct backing_dev_info *bdi_get_by_id(u64 id); void bdi_put(struct backing_dev_info *bdi); __printf(2, 3) @@ -44,10 +45,13 @@ void wb_start_background_writeback(struct bdi_writeback *wb); void wb_workfn(struct work_struct *work); void wb_wakeup_delayed(struct bdi_writeback *wb); +void wb_wait_for_completion(struct wb_completion *done); + extern spinlock_t bdi_lock; extern struct list_head bdi_list; extern struct workqueue_struct *bdi_wq; +extern struct workqueue_struct *bdi_async_bio_wq; static inline bool wb_has_dirty_io(struct bdi_writeback *wb) { @@ -226,6 +230,8 @@ static inline int bdi_sched_wait(void *word) struct bdi_writeback_congested * wb_congested_get_create(struct backing_dev_info *bdi, int blkcg_id, gfp_t gfp); void wb_congested_put(struct bdi_writeback_congested *congested); +struct bdi_writeback *wb_get_lookup(struct backing_dev_info *bdi, + struct cgroup_subsys_state *memcg_css); struct bdi_writeback *wb_get_create(struct backing_dev_info *bdi, struct cgroup_subsys_state *memcg_css, gfp_t gfp); diff --git a/include/linux/backlight.h b/include/linux/backlight.h index 0b5897446dca..c7d6b2e8c3b5 100644 --- a/include/linux/backlight.h +++ b/include/linux/backlight.h @@ -46,6 +46,12 @@ enum backlight_notification { BACKLIGHT_UNREGISTERED, }; +enum backlight_scale { + BACKLIGHT_SCALE_UNKNOWN = 0, + BACKLIGHT_SCALE_LINEAR, + BACKLIGHT_SCALE_NON_LINEAR, +}; + struct backlight_device; struct fb_info; @@ -80,6 +86,8 @@ struct backlight_properties { enum backlight_type type; /* Flags used to signal drivers of state changes */ unsigned int state; + /* Type of the brightness scale (linear, non-linear, ...) */ + enum backlight_scale scale; #define BL_CORE_SUSPENDED (1 << 0) /* backlight is suspended */ #define BL_CORE_FBBLANK (1 << 1) /* backlight is under an fb blank event */ diff --git a/include/linux/balloon_compaction.h b/include/linux/balloon_compaction.h index f31521dcb09a..338aa27e4773 100644 --- a/include/linux/balloon_compaction.h +++ b/include/linux/balloon_compaction.h @@ -64,6 +64,10 @@ extern struct page *balloon_page_alloc(void); extern void balloon_page_enqueue(struct balloon_dev_info *b_dev_info, struct page *page); extern struct page *balloon_page_dequeue(struct balloon_dev_info *b_dev_info); +extern size_t balloon_page_list_enqueue(struct balloon_dev_info *b_dev_info, + struct list_head *pages); +extern size_t balloon_page_list_dequeue(struct balloon_dev_info *b_dev_info, + struct list_head *pages, size_t n_req_pages); static inline void balloon_devinfo_init(struct balloon_dev_info *balloon) { diff --git a/include/linux/bio.h b/include/linux/bio.h index f87abaa898f0..3cdb84cdc488 100644 --- a/include/linux/bio.h +++ b/include/linux/bio.h @@ -102,9 +102,23 @@ static inline void *bio_data(struct bio *bio) return NULL; } -static inline bool bio_full(struct bio *bio) +/** + * bio_full - check if the bio is full + * @bio: bio to check + * @len: length of one segment to be added + * + * Return true if @bio is full and one segment with @len bytes can't be + * added to the bio, otherwise return false + */ +static inline bool bio_full(struct bio *bio, unsigned len) { - return bio->bi_vcnt >= bio->bi_max_vecs; + if (bio->bi_vcnt >= bio->bi_max_vecs) + return true; + + if (bio->bi_iter.bi_size > UINT_MAX - len) + return true; + + return false; } static inline bool bio_next_segment(const struct bio *bio, @@ -408,7 +422,6 @@ static inline void bio_wouldblock_error(struct bio *bio) } struct request_queue; -extern int bio_phys_segments(struct request_queue *, struct bio *); extern int submit_bio_wait(struct bio *bio); extern void bio_advance(struct bio *, unsigned); @@ -427,6 +440,7 @@ bool __bio_try_merge_page(struct bio *bio, struct page *page, void __bio_add_page(struct bio *bio, struct page *page, unsigned int len, unsigned int off); int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter); +void bio_release_pages(struct bio *bio, bool mark_dirty); struct rq_map_data; extern struct bio *bio_map_user_iov(struct request_queue *, struct iov_iter *, gfp_t); @@ -444,17 +458,6 @@ void generic_end_io_acct(struct request_queue *q, int op, struct hd_struct *part, unsigned long start_time); -#ifndef ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE -# error "You should define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE for your platform" -#endif -#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE -extern void bio_flush_dcache_pages(struct bio *bi); -#else -static inline void bio_flush_dcache_pages(struct bio *bi) -{ -} -#endif - extern void bio_copy_data_iter(struct bio *dst, struct bvec_iter *dst_iter, struct bio *src, struct bvec_iter *src_iter); extern void bio_copy_data(struct bio *dst, struct bio *src); diff --git a/include/linux/bitmap.h b/include/linux/bitmap.h index f58e97446abc..29fc933df3bf 100644 --- a/include/linux/bitmap.h +++ b/include/linux/bitmap.h @@ -120,6 +120,10 @@ extern int __bitmap_empty(const unsigned long *bitmap, unsigned int nbits); extern int __bitmap_full(const unsigned long *bitmap, unsigned int nbits); extern int __bitmap_equal(const unsigned long *bitmap1, const unsigned long *bitmap2, unsigned int nbits); +extern bool __pure __bitmap_or_equal(const unsigned long *src1, + const unsigned long *src2, + const unsigned long *src3, + unsigned int nbits); extern void __bitmap_complement(unsigned long *dst, const unsigned long *src, unsigned int nbits); extern void __bitmap_shift_right(unsigned long *dst, const unsigned long *src, @@ -321,6 +325,26 @@ static inline int bitmap_equal(const unsigned long *src1, return __bitmap_equal(src1, src2, nbits); } +/** + * bitmap_or_equal - Check whether the or of two bitmaps is equal to a third + * @src1: Pointer to bitmap 1 + * @src2: Pointer to bitmap 2 will be or'ed with bitmap 1 + * @src3: Pointer to bitmap 3. Compare to the result of *@src1 | *@src2 + * @nbits: number of bits in each of these bitmaps + * + * Returns: True if (*@src1 | *@src2) == *@src3, false otherwise + */ +static inline bool bitmap_or_equal(const unsigned long *src1, + const unsigned long *src2, + const unsigned long *src3, + unsigned int nbits) +{ + if (!small_const_nbits(nbits)) + return __bitmap_or_equal(src1, src2, src3, nbits); + + return !(((*src1 | *src2) ^ *src3) & BITMAP_LAST_WORD_MASK(nbits)); +} + static inline int bitmap_intersects(const unsigned long *src1, const unsigned long *src2, unsigned int nbits) { diff --git a/include/linux/bitops.h b/include/linux/bitops.h index cf074bce3eb3..c94a9ff9f082 100644 --- a/include/linux/bitops.h +++ b/include/linux/bitops.h @@ -4,6 +4,13 @@ #include <asm/types.h> #include <linux/bits.h> +/* Set bits in the first 'n' bytes when loaded from memory */ +#ifdef __LITTLE_ENDIAN +# define aligned_byte_mask(n) ((1UL << 8*(n))-1) +#else +# define aligned_byte_mask(n) (~0xffUL << (BITS_PER_LONG - 8 - 8*(n))) +#endif + #define BITS_PER_TYPE(type) (sizeof(type) * BITS_PER_BYTE) #define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, BITS_PER_TYPE(long)) diff --git a/include/linux/bits.h b/include/linux/bits.h index 2b7b532c1d51..669d69441a62 100644 --- a/include/linux/bits.h +++ b/include/linux/bits.h @@ -1,13 +1,15 @@ /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __LINUX_BITS_H #define __LINUX_BITS_H + +#include <linux/const.h> #include <asm/bitsperlong.h> -#define BIT(nr) (1UL << (nr)) -#define BIT_ULL(nr) (1ULL << (nr)) -#define BIT_MASK(nr) (1UL << ((nr) % BITS_PER_LONG)) +#define BIT(nr) (UL(1) << (nr)) +#define BIT_ULL(nr) (ULL(1) << (nr)) +#define BIT_MASK(nr) (UL(1) << ((nr) % BITS_PER_LONG)) #define BIT_WORD(nr) ((nr) / BITS_PER_LONG) -#define BIT_ULL_MASK(nr) (1ULL << ((nr) % BITS_PER_LONG_LONG)) +#define BIT_ULL_MASK(nr) (ULL(1) << ((nr) % BITS_PER_LONG_LONG)) #define BIT_ULL_WORD(nr) ((nr) / BITS_PER_LONG_LONG) #define BITS_PER_BYTE 8 @@ -17,10 +19,11 @@ * GENMASK_ULL(39, 21) gives us the 64bit vector 0x000000ffffe00000. */ #define GENMASK(h, l) \ - (((~0UL) - (1UL << (l)) + 1) & (~0UL >> (BITS_PER_LONG - 1 - (h)))) + (((~UL(0)) - (UL(1) << (l)) + 1) & \ + (~UL(0) >> (BITS_PER_LONG - 1 - (h)))) #define GENMASK_ULL(h, l) \ - (((~0ULL) - (1ULL << (l)) + 1) & \ - (~0ULL >> (BITS_PER_LONG_LONG - 1 - (h)))) + (((~ULL(0)) - (ULL(1) << (l)) + 1) & \ + (~ULL(0) >> (BITS_PER_LONG_LONG - 1 - (h)))) #endif /* __LINUX_BITS_H */ diff --git a/include/linux/blk-cgroup.h b/include/linux/blk-cgroup.h index 76c61318fda5..bed9e43f9426 100644 --- a/include/linux/blk-cgroup.h +++ b/include/linux/blk-cgroup.h @@ -63,19 +63,17 @@ struct blkcg { /* * blkg_[rw]stat->aux_cnt is excluded for local stats but included for - * recursive. Used to carry stats of dead children, and, for blkg_rwstat, - * to carry result values from read and sum operations. + * recursive. Used to carry stats of dead children. */ -struct blkg_stat { - struct percpu_counter cpu_cnt; - atomic64_t aux_cnt; -}; - struct blkg_rwstat { struct percpu_counter cpu_cnt[BLKG_RWSTAT_NR]; atomic64_t aux_cnt[BLKG_RWSTAT_NR]; }; +struct blkg_rwstat_sample { + u64 cnt[BLKG_RWSTAT_NR]; +}; + /* * A blkcg_gq (blkg) is association between a block cgroup (blkcg) and a * request_queue (q). This is used by blkcg policies which need to track @@ -134,20 +132,25 @@ struct blkcg_gq { struct blkg_policy_data *pd[BLKCG_MAX_POLS]; - struct rcu_head rcu_head; + spinlock_t async_bio_lock; + struct bio_list async_bios; + struct work_struct async_bio_work; atomic_t use_delay; atomic64_t delay_nsec; atomic64_t delay_start; u64 last_delay; int last_use; + + struct rcu_head rcu_head; }; typedef struct blkcg_policy_data *(blkcg_pol_alloc_cpd_fn)(gfp_t gfp); typedef void (blkcg_pol_init_cpd_fn)(struct blkcg_policy_data *cpd); typedef void (blkcg_pol_free_cpd_fn)(struct blkcg_policy_data *cpd); typedef void (blkcg_pol_bind_cpd_fn)(struct blkcg_policy_data *cpd); -typedef struct blkg_policy_data *(blkcg_pol_alloc_pd_fn)(gfp_t gfp, int node); +typedef struct blkg_policy_data *(blkcg_pol_alloc_pd_fn)(gfp_t gfp, + struct request_queue *q, struct blkcg *blkcg); typedef void (blkcg_pol_init_pd_fn)(struct blkg_policy_data *pd); typedef void (blkcg_pol_online_pd_fn)(struct blkg_policy_data *pd); typedef void (blkcg_pol_offline_pd_fn)(struct blkg_policy_data *pd); @@ -179,6 +182,7 @@ struct blkcg_policy { extern struct blkcg blkcg_root; extern struct cgroup_subsys_state * const blkcg_root_css; +extern bool blkcg_debug_stats; struct blkcg_gq *blkg_lookup_slowpath(struct blkcg *blkcg, struct request_queue *q, bool update_hint); @@ -198,6 +202,13 @@ int blkcg_activate_policy(struct request_queue *q, void blkcg_deactivate_policy(struct request_queue *q, const struct blkcg_policy *pol); +static inline u64 blkg_rwstat_read_counter(struct blkg_rwstat *rwstat, + unsigned int idx) +{ + return atomic64_read(&rwstat->aux_cnt[idx]) + + percpu_counter_sum_positive(&rwstat->cpu_cnt[idx]); +} + const char *blkg_dev_name(struct blkcg_gq *blkg); void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg, u64 (*prfill)(struct seq_file *, @@ -206,8 +217,7 @@ void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg, bool show_total); u64 __blkg_prfill_u64(struct seq_file *sf, struct blkg_policy_data *pd, u64 v); u64 __blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd, - const struct blkg_rwstat *rwstat); -u64 blkg_prfill_stat(struct seq_file *sf, struct blkg_policy_data *pd, int off); + const struct blkg_rwstat_sample *rwstat); u64 blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd, int off); int blkg_print_stat_bytes(struct seq_file *sf, void *v); @@ -215,10 +225,8 @@ int blkg_print_stat_ios(struct seq_file *sf, void *v); int blkg_print_stat_bytes_recursive(struct seq_file *sf, void *v); int blkg_print_stat_ios_recursive(struct seq_file *sf, void *v); -u64 blkg_stat_recursive_sum(struct blkcg_gq *blkg, - struct blkcg_policy *pol, int off); -struct blkg_rwstat blkg_rwstat_recursive_sum(struct blkcg_gq *blkg, - struct blkcg_policy *pol, int off); +void blkg_rwstat_recursive_sum(struct blkcg_gq *blkg, struct blkcg_policy *pol, + int off, struct blkg_rwstat_sample *sum); struct blkg_conf_ctx { struct gendisk *disk; @@ -226,6 +234,7 @@ struct blkg_conf_ctx { char *body; }; +struct gendisk *blkcg_conf_get_disk(char **inputp); int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol, char *input, struct blkg_conf_ctx *ctx); void blkg_conf_finish(struct blkg_conf_ctx *ctx); @@ -368,7 +377,7 @@ static inline struct blkcg_gq *__blkg_lookup(struct blkcg *blkcg, * @q: request_queue of interest * * Lookup blkg for the @blkcg - @q pair. This function should be called - * under RCU read loc. + * under RCU read lock. */ static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, struct request_queue *q) @@ -569,69 +578,6 @@ static inline void blkg_put(struct blkcg_gq *blkg) if (((d_blkg) = __blkg_lookup(css_to_blkcg(pos_css), \ (p_blkg)->q, false))) -static inline int blkg_stat_init(struct blkg_stat *stat, gfp_t gfp) -{ - int ret; - - ret = percpu_counter_init(&stat->cpu_cnt, 0, gfp); - if (ret) - return ret; - - atomic64_set(&stat->aux_cnt, 0); - return 0; -} - -static inline void blkg_stat_exit(struct blkg_stat *stat) -{ - percpu_counter_destroy(&stat->cpu_cnt); -} - -/** - * blkg_stat_add - add a value to a blkg_stat - * @stat: target blkg_stat - * @val: value to add - * - * Add @val to @stat. The caller must ensure that IRQ on the same CPU - * don't re-enter this function for the same counter. - */ -static inline void blkg_stat_add(struct blkg_stat *stat, uint64_t val) -{ - percpu_counter_add_batch(&stat->cpu_cnt, val, BLKG_STAT_CPU_BATCH); -} - -/** - * blkg_stat_read - read the current value of a blkg_stat - * @stat: blkg_stat to read - */ -static inline uint64_t blkg_stat_read(struct blkg_stat *stat) -{ - return percpu_counter_sum_positive(&stat->cpu_cnt); -} - -/** - * blkg_stat_reset - reset a blkg_stat - * @stat: blkg_stat to reset - */ -static inline void blkg_stat_reset(struct blkg_stat *stat) -{ - percpu_counter_set(&stat->cpu_cnt, 0); - atomic64_set(&stat->aux_cnt, 0); -} - -/** - * blkg_stat_add_aux - add a blkg_stat into another's aux count - * @to: the destination blkg_stat - * @from: the source - * - * Add @from's count including the aux one to @to's aux count. - */ -static inline void blkg_stat_add_aux(struct blkg_stat *to, - struct blkg_stat *from) -{ - atomic64_add(blkg_stat_read(from) + atomic64_read(&from->aux_cnt), - &to->aux_cnt); -} - static inline int blkg_rwstat_init(struct blkg_rwstat *rwstat, gfp_t gfp) { int i, ret; @@ -693,15 +639,14 @@ static inline void blkg_rwstat_add(struct blkg_rwstat *rwstat, * * Read the current snapshot of @rwstat and return it in the aux counts. */ -static inline struct blkg_rwstat blkg_rwstat_read(struct blkg_rwstat *rwstat) +static inline void blkg_rwstat_read(struct blkg_rwstat *rwstat, + struct blkg_rwstat_sample *result) { - struct blkg_rwstat result; int i; for (i = 0; i < BLKG_RWSTAT_NR; i++) - atomic64_set(&result.aux_cnt[i], - percpu_counter_sum_positive(&rwstat->cpu_cnt[i])); - return result; + result->cnt[i] = + percpu_counter_sum_positive(&rwstat->cpu_cnt[i]); } /** @@ -714,10 +659,10 @@ static inline struct blkg_rwstat blkg_rwstat_read(struct blkg_rwstat *rwstat) */ static inline uint64_t blkg_rwstat_total(struct blkg_rwstat *rwstat) { - struct blkg_rwstat tmp = blkg_rwstat_read(rwstat); + struct blkg_rwstat_sample tmp = { }; - return atomic64_read(&tmp.aux_cnt[BLKG_RWSTAT_READ]) + - atomic64_read(&tmp.aux_cnt[BLKG_RWSTAT_WRITE]); + blkg_rwstat_read(rwstat, &tmp); + return tmp.cnt[BLKG_RWSTAT_READ] + tmp.cnt[BLKG_RWSTAT_WRITE]; } /** @@ -763,6 +708,15 @@ static inline bool blk_throtl_bio(struct request_queue *q, struct blkcg_gq *blkg struct bio *bio) { return false; } #endif +bool __blkcg_punt_bio_submit(struct bio *bio); + +static inline bool blkcg_punt_bio_submit(struct bio *bio) +{ + if (bio->bi_opf & REQ_CGROUP_PUNT) + return __blkcg_punt_bio_submit(bio); + else + return false; +} static inline void blkcg_bio_issue_init(struct bio *bio) { @@ -910,6 +864,7 @@ static inline char *blkg_path(struct blkcg_gq *blkg) { return NULL; } static inline void blkg_get(struct blkcg_gq *blkg) { } static inline void blkg_put(struct blkcg_gq *blkg) { } +static inline bool blkcg_punt_bio_submit(struct bio *bio) { return false; } static inline void blkcg_bio_issue_init(struct bio *bio) { } static inline bool blkcg_bio_issue_check(struct request_queue *q, struct bio *bio) { return true; } diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index 15d1aa53d96c..0bf056de5cc3 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h @@ -140,6 +140,7 @@ typedef int (poll_fn)(struct blk_mq_hw_ctx *); typedef int (map_queues_fn)(struct blk_mq_tag_set *set); typedef bool (busy_fn)(struct request_queue *); typedef void (complete_fn)(struct request *); +typedef void (cleanup_rq_fn)(struct request *); struct blk_mq_ops { @@ -201,6 +202,12 @@ struct blk_mq_ops { void (*initialize_rq_fn)(struct request *rq); /* + * Called before freeing one request which isn't completed yet, + * and usually for freeing the driver private data + */ + cleanup_rq_fn *cleanup_rq; + + /* * If set, returns whether or not this queue currently is busy */ busy_fn *busy; @@ -241,12 +248,12 @@ enum { struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *); struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set, - struct request_queue *q); + struct request_queue *q, + bool elevator_init); struct request_queue *blk_mq_init_sq_queue(struct blk_mq_tag_set *set, const struct blk_mq_ops *ops, unsigned int queue_depth, unsigned int set_flags); -int blk_mq_register_dev(struct device *, struct request_queue *); void blk_mq_unregister_dev(struct device *, struct request_queue *); int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set); @@ -296,6 +303,7 @@ static inline u16 blk_mq_unique_tag_to_tag(u32 unique_tag) int blk_mq_request_started(struct request *rq); +int blk_mq_request_completed(struct request *rq); void blk_mq_start_request(struct request *rq); void blk_mq_end_request(struct request *rq, blk_status_t error); void __blk_mq_end_request(struct request *rq, blk_status_t error); @@ -304,9 +312,8 @@ void blk_mq_requeue_request(struct request *rq, bool kick_requeue_list); void blk_mq_kick_requeue_list(struct request_queue *q); void blk_mq_delay_kick_requeue_list(struct request_queue *q, unsigned long msecs); bool blk_mq_complete_request(struct request *rq); -void blk_mq_complete_request_sync(struct request *rq); bool blk_mq_bio_list_merge(struct request_queue *q, struct list_head *list, - struct bio *bio); + struct bio *bio, unsigned int nr_segs); bool blk_mq_queue_stopped(struct request_queue *q); void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx); void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx); @@ -321,6 +328,7 @@ bool blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async); void blk_mq_run_hw_queues(struct request_queue *q, bool async); void blk_mq_tagset_busy_iter(struct blk_mq_tag_set *tagset, busy_tag_iter_fn *fn, void *priv); +void blk_mq_tagset_wait_completed_request(struct blk_mq_tag_set *tagset); void blk_mq_freeze_queue(struct request_queue *q); void blk_mq_unfreeze_queue(struct request_queue *q); void blk_freeze_queue_start(struct request_queue *q); @@ -366,4 +374,10 @@ static inline blk_qc_t request_to_qc_t(struct blk_mq_hw_ctx *hctx, BLK_QC_T_INTERNAL; } +static inline void blk_mq_cleanup_rq(struct request *rq) +{ + if (rq->q->mq_ops->cleanup_rq) + rq->q->mq_ops->cleanup_rq(rq); +} + #endif diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h index 95202f80676c..d688b96d1d63 100644 --- a/include/linux/blk_types.h +++ b/include/linux/blk_types.h @@ -154,11 +154,6 @@ struct bio { blk_status_t bi_status; u8 bi_partno; - /* Number of segments in this BIO after - * physical address coalescing is performed. - */ - unsigned int bi_phys_segments; - struct bvec_iter bi_iter; atomic_t __bi_remaining; @@ -174,6 +169,9 @@ struct bio { */ struct blkcg_gq *bi_blkg; struct bio_issue bi_issue; +#ifdef CONFIG_BLK_CGROUP_IOCOST + u64 bi_iocost_cost; +#endif #endif union { #if defined(CONFIG_BLK_DEV_INTEGRITY) @@ -210,11 +208,11 @@ struct bio { */ enum { BIO_NO_PAGE_REF, /* don't put release vec pages */ - BIO_SEG_VALID, /* bi_phys_segments valid */ BIO_CLONED, /* doesn't own data */ BIO_BOUNCED, /* bio is a bounce bio */ BIO_USER_MAPPED, /* contains user pages */ BIO_NULL_MAPPED, /* contains invalid user pages */ + BIO_WORKINGSET, /* contains userspace workingset pages */ BIO_QUIET, /* Make BIO Quiet */ BIO_CHAIN, /* chained bio, ->bi_remaining in effect */ BIO_REFFED, /* bio has elevated ->bi_cnt */ @@ -288,6 +286,8 @@ enum req_opf { REQ_OP_ZONE_RESET = 6, /* write the same sector many times */ REQ_OP_WRITE_SAME = 7, + /* reset all the zone present on the device */ + REQ_OP_ZONE_RESET_ALL = 8, /* write the zero filled sector many times */ REQ_OP_WRITE_ZEROES = 9, @@ -317,6 +317,15 @@ enum req_flag_bits { __REQ_RAHEAD, /* read ahead, can fail anytime */ __REQ_BACKGROUND, /* background IO */ __REQ_NOWAIT, /* Don't wait if request will block */ + __REQ_NOWAIT_INLINE, /* Return would-block error inline */ + /* + * When a shared kthread needs to issue a bio for a cgroup, doing + * so synchronously can lead to priority inversions as the kthread + * can be trapped waiting for that cgroup. CGROUP_PUNT flag makes + * submit_bio() punt the actual issuing to a dedicated per-blkcg + * work item to avoid such priority inversions. + */ + __REQ_CGROUP_PUNT, /* command specific flags for REQ_OP_WRITE_ZEROES: */ __REQ_NOUNMAP, /* do not free blocks when zeroing */ @@ -343,6 +352,9 @@ enum req_flag_bits { #define REQ_RAHEAD (1ULL << __REQ_RAHEAD) #define REQ_BACKGROUND (1ULL << __REQ_BACKGROUND) #define REQ_NOWAIT (1ULL << __REQ_NOWAIT) +#define REQ_NOWAIT_INLINE (1ULL << __REQ_NOWAIT_INLINE) +#define REQ_CGROUP_PUNT (1ULL << __REQ_CGROUP_PUNT) + #define REQ_NOUNMAP (1ULL << __REQ_NOUNMAP) #define REQ_HIPRI (1ULL << __REQ_HIPRI) @@ -414,12 +426,13 @@ static inline int op_stat_group(unsigned int op) typedef unsigned int blk_qc_t; #define BLK_QC_T_NONE -1U +#define BLK_QC_T_EAGAIN -2U #define BLK_QC_T_SHIFT 16 #define BLK_QC_T_INTERNAL (1U << 31) static inline bool blk_qc_t_valid(blk_qc_t cookie) { - return cookie != BLK_QC_T_NONE; + return cookie != BLK_QC_T_NONE && cookie != BLK_QC_T_EAGAIN; } static inline unsigned int blk_qc_t_to_queue_num(blk_qc_t cookie) diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 592669bcc536..f3ea78b0c91c 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -137,11 +137,11 @@ struct request { unsigned int cmd_flags; /* op and common flags */ req_flags_t rq_flags; + int tag; int internal_tag; /* the following two fields are internal, NEVER access directly */ unsigned int __data_len; /* total data len */ - int tag; sector_t __sector; /* sector cursor */ struct bio *bio; @@ -194,7 +194,11 @@ struct request { struct gendisk *rq_disk; struct hd_struct *part; - /* Time that I/O was submitted to the kernel. */ +#ifdef CONFIG_BLK_RQ_ALLOC_TIME + /* Time that the first bio started allocating this request. */ + u64 alloc_time_ns; +#endif + /* Time that this request was allocated for this IO. */ u64 start_time_ns; /* Time that I/O was submitted to the device. */ u64 io_start_time_ns; @@ -202,9 +206,12 @@ struct request { #ifdef CONFIG_BLK_WBT unsigned short wbt_flags; #endif -#ifdef CONFIG_BLK_DEV_THROTTLING_LOW - unsigned short throtl_size; -#endif + /* + * rq sectors used for blk stats. It has the same value + * with blk_rq_sectors(rq), except that it never be zeroed + * by completion. + */ + unsigned short stats_sectors; /* * Number of scatter-gather DMA addr+len pairs after @@ -344,10 +351,15 @@ struct queue_limits { #ifdef CONFIG_BLK_DEV_ZONED +/* + * Maximum number of zones to report with a single report zones command. + */ +#define BLK_ZONED_REPORT_MAX_ZONES 8192U + extern unsigned int blkdev_nr_zones(struct block_device *bdev); extern int blkdev_report_zones(struct block_device *bdev, sector_t sector, struct blk_zone *zones, - unsigned int *nr_zones, gfp_t gfp_mask); + unsigned int *nr_zones); extern int blkdev_reset_zones(struct block_device *bdev, sector_t sectors, sector_t nr_sectors, gfp_t gfp_mask); extern int blk_revalidate_disk_zones(struct gendisk *disk); @@ -386,10 +398,6 @@ static inline int blkdev_reset_zones_ioctl(struct block_device *bdev, #endif /* CONFIG_BLK_DEV_ZONED */ struct request_queue { - /* - * Together with queue_head for cacheline sharing - */ - struct list_head queue_head; struct request *last_merge; struct elevator_queue *elevator; @@ -491,6 +499,8 @@ struct request_queue { struct queue_limits limits; + unsigned int required_elevator_features; + #ifdef CONFIG_BLK_DEV_ZONED /* * Zoned block device information for request dispatch control. @@ -534,6 +544,7 @@ struct request_queue { struct delayed_work requeue_work; struct mutex sysfs_lock; + struct mutex sysfs_dir_lock; /* * for reusing dead hctx instance in case of updating @@ -606,6 +617,8 @@ struct request_queue { #define QUEUE_FLAG_SCSI_PASSTHROUGH 23 /* queue supports SCSI commands */ #define QUEUE_FLAG_QUIESCED 24 /* queue has been quiesced */ #define QUEUE_FLAG_PCI_P2PDMA 25 /* device supports PCI p2p requests */ +#define QUEUE_FLAG_ZONE_RESETALL 26 /* supports Zone Reset All */ +#define QUEUE_FLAG_RQ_ALLOC_TIME 27 /* record rq->alloc_time_ns */ #define QUEUE_FLAG_MQ_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ (1 << QUEUE_FLAG_SAME_COMP)) @@ -625,6 +638,8 @@ bool blk_queue_flag_test_and_set(unsigned int flag, struct request_queue *q); #define blk_queue_io_stat(q) test_bit(QUEUE_FLAG_IO_STAT, &(q)->queue_flags) #define blk_queue_add_random(q) test_bit(QUEUE_FLAG_ADD_RANDOM, &(q)->queue_flags) #define blk_queue_discard(q) test_bit(QUEUE_FLAG_DISCARD, &(q)->queue_flags) +#define blk_queue_zone_resetall(q) \ + test_bit(QUEUE_FLAG_ZONE_RESETALL, &(q)->queue_flags) #define blk_queue_secure_erase(q) \ (test_bit(QUEUE_FLAG_SECERASE, &(q)->queue_flags)) #define blk_queue_dax(q) test_bit(QUEUE_FLAG_DAX, &(q)->queue_flags) @@ -632,6 +647,12 @@ bool blk_queue_flag_test_and_set(unsigned int flag, struct request_queue *q); test_bit(QUEUE_FLAG_SCSI_PASSTHROUGH, &(q)->queue_flags) #define blk_queue_pci_p2pdma(q) \ test_bit(QUEUE_FLAG_PCI_P2PDMA, &(q)->queue_flags) +#ifdef CONFIG_BLK_RQ_ALLOC_TIME +#define blk_queue_rq_alloc_time(q) \ + test_bit(QUEUE_FLAG_RQ_ALLOC_TIME, &(q)->queue_flags) +#else +#define blk_queue_rq_alloc_time(q) false +#endif #define blk_noretry_request(rq) \ ((rq)->cmd_flags & (REQ_FAILFAST_DEV|REQ_FAILFAST_TRANSPORT| \ @@ -639,6 +660,7 @@ bool blk_queue_flag_test_and_set(unsigned int flag, struct request_queue *q); #define blk_queue_quiesced(q) test_bit(QUEUE_FLAG_QUIESCED, &(q)->queue_flags) #define blk_queue_pm_only(q) atomic_read(&(q)->pm_only) #define blk_queue_fua(q) test_bit(QUEUE_FLAG_FUA, &(q)->queue_flags) +#define blk_queue_registered(q) test_bit(QUEUE_FLAG_REGISTERED, &(q)->queue_flags) extern void blk_set_pm_only(struct request_queue *q); extern void blk_clear_pm_only(struct request_queue *q); @@ -681,7 +703,7 @@ static inline bool blk_queue_is_zoned(struct request_queue *q) } } -static inline unsigned int blk_queue_zone_sectors(struct request_queue *q) +static inline sector_t blk_queue_zone_sectors(struct request_queue *q) { return blk_queue_is_zoned(q) ? q->limits.chunk_sectors : 0; } @@ -828,7 +850,6 @@ extern void blk_unregister_queue(struct gendisk *disk); extern blk_qc_t generic_make_request(struct bio *bio); extern blk_qc_t direct_make_request(struct bio *bio); extern void blk_rq_init(struct request_queue *q, struct request *rq); -extern void blk_init_request_from_bio(struct request *req, struct bio *bio); extern void blk_put_request(struct request *); extern struct request *blk_get_request(struct request_queue *, unsigned int op, blk_mq_req_flags_t flags); @@ -842,7 +863,6 @@ extern blk_status_t blk_insert_cloned_request(struct request_queue *q, struct request *rq); extern int blk_rq_append_bio(struct request *rq, struct bio **bio); extern void blk_queue_split(struct request_queue *, struct bio **); -extern void blk_recount_segments(struct request_queue *, struct bio *); extern int scsi_verify_blk_ioctl(struct block_device *, unsigned int); extern int scsi_cmd_blk_ioctl(struct block_device *, fmode_t, unsigned int, void __user *); @@ -867,6 +887,9 @@ extern void blk_execute_rq(struct request_queue *, struct gendisk *, extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *, struct request *, int, rq_end_io_fn *); +/* Helper to convert REQ_OP_XXX to its string format XXX */ +extern const char *blk_op_str(unsigned int op); + int blk_status_to_errno(blk_status_t status); blk_status_t errno_to_blk_status(int errno); @@ -897,6 +920,7 @@ static inline struct request_queue *bdev_get_queue(struct block_device *bdev) * blk_rq_err_bytes() : bytes left till the next error boundary * blk_rq_sectors() : sectors left in the entire request * blk_rq_cur_sectors() : sectors left in the current segment + * blk_rq_stats_sectors() : sectors of the entire request used for stats */ static inline sector_t blk_rq_pos(const struct request *rq) { @@ -925,6 +949,11 @@ static inline unsigned int blk_rq_cur_sectors(const struct request *rq) return blk_rq_cur_bytes(rq) >> SECTOR_SHIFT; } +static inline unsigned int blk_rq_stats_sectors(const struct request *rq) +{ + return rq->stats_sectors; +} + #ifdef CONFIG_BLK_DEV_ZONED static inline unsigned int blk_rq_zone_no(struct request *rq) { @@ -1026,21 +1055,9 @@ void blk_steal_bios(struct bio_list *list, struct request *rq); * * blk_update_request() completes given number of bytes and updates * the request without completing it. - * - * blk_end_request() and friends. __blk_end_request() must be called - * with the request queue spinlock acquired. - * - * Several drivers define their own end_request and call - * blk_end_request() for parts of the original function. - * This prevents code duplication in drivers. */ extern bool blk_update_request(struct request *rq, blk_status_t error, unsigned int nr_bytes); -extern void blk_end_request_all(struct request *rq, blk_status_t error); -extern bool __blk_end_request(struct request *rq, blk_status_t error, - unsigned int nr_bytes); -extern void __blk_end_request_all(struct request *rq, blk_status_t error); -extern bool __blk_end_request_cur(struct request *rq, blk_status_t error); extern void __blk_complete_request(struct request *); extern void blk_abort_request(struct request *); @@ -1091,6 +1108,10 @@ extern void blk_queue_dma_alignment(struct request_queue *, int); extern void blk_queue_update_dma_alignment(struct request_queue *, int); extern void blk_queue_rq_timeout(struct request_queue *, unsigned int); extern void blk_queue_write_cache(struct request_queue *q, bool enabled, bool fua); +extern void blk_queue_required_elevator_features(struct request_queue *q, + unsigned int features); +extern bool blk_queue_can_use_dma_map_merging(struct request_queue *q, + struct device *dev); /* * Number of physical segments as sent to the device. @@ -1238,42 +1259,42 @@ enum blk_default_limits { BLK_SEG_BOUNDARY_MASK = 0xFFFFFFFFUL, }; -static inline unsigned long queue_segment_boundary(struct request_queue *q) +static inline unsigned long queue_segment_boundary(const struct request_queue *q) { return q->limits.seg_boundary_mask; } -static inline unsigned long queue_virt_boundary(struct request_queue *q) +static inline unsigned long queue_virt_boundary(const struct request_queue *q) { return q->limits.virt_boundary_mask; } -static inline unsigned int queue_max_sectors(struct request_queue *q) +static inline unsigned int queue_max_sectors(const struct request_queue *q) { return q->limits.max_sectors; } -static inline unsigned int queue_max_hw_sectors(struct request_queue *q) +static inline unsigned int queue_max_hw_sectors(const struct request_queue *q) { return q->limits.max_hw_sectors; } -static inline unsigned short queue_max_segments(struct request_queue *q) +static inline unsigned short queue_max_segments(const struct request_queue *q) { return q->limits.max_segments; } -static inline unsigned short queue_max_discard_segments(struct request_queue *q) +static inline unsigned short queue_max_discard_segments(const struct request_queue *q) { return q->limits.max_discard_segments; } -static inline unsigned int queue_max_segment_size(struct request_queue *q) +static inline unsigned int queue_max_segment_size(const struct request_queue *q) { return q->limits.max_segment_size; } -static inline unsigned short queue_logical_block_size(struct request_queue *q) +static inline unsigned short queue_logical_block_size(const struct request_queue *q) { int retval = 512; @@ -1288,7 +1309,7 @@ static inline unsigned short bdev_logical_block_size(struct block_device *bdev) return queue_logical_block_size(bdev_get_queue(bdev)); } -static inline unsigned int queue_physical_block_size(struct request_queue *q) +static inline unsigned int queue_physical_block_size(const struct request_queue *q) { return q->limits.physical_block_size; } @@ -1298,7 +1319,7 @@ static inline unsigned int bdev_physical_block_size(struct block_device *bdev) return queue_physical_block_size(bdev_get_queue(bdev)); } -static inline unsigned int queue_io_min(struct request_queue *q) +static inline unsigned int queue_io_min(const struct request_queue *q) { return q->limits.io_min; } @@ -1308,7 +1329,7 @@ static inline int bdev_io_min(struct block_device *bdev) return queue_io_min(bdev_get_queue(bdev)); } -static inline unsigned int queue_io_opt(struct request_queue *q) +static inline unsigned int queue_io_opt(const struct request_queue *q) { return q->limits.io_opt; } @@ -1318,7 +1339,7 @@ static inline int bdev_io_opt(struct block_device *bdev) return queue_io_opt(bdev_get_queue(bdev)); } -static inline int queue_alignment_offset(struct request_queue *q) +static inline int queue_alignment_offset(const struct request_queue *q) { if (q->limits.misaligned) return -1; @@ -1348,7 +1369,7 @@ static inline int bdev_alignment_offset(struct block_device *bdev) return q->limits.alignment_offset; } -static inline int queue_discard_alignment(struct request_queue *q) +static inline int queue_discard_alignment(const struct request_queue *q) { if (q->limits.discard_misaligned) return -1; @@ -1429,7 +1450,7 @@ static inline bool bdev_is_zoned(struct block_device *bdev) return false; } -static inline unsigned int bdev_zone_sectors(struct block_device *bdev) +static inline sector_t bdev_zone_sectors(struct block_device *bdev) { struct request_queue *q = bdev_get_queue(bdev); @@ -1438,7 +1459,7 @@ static inline unsigned int bdev_zone_sectors(struct block_device *bdev) return 0; } -static inline int queue_dma_alignment(struct request_queue *q) +static inline int queue_dma_alignment(const struct request_queue *q) { return q ? q->dma_alignment : 511; } @@ -1503,10 +1524,14 @@ struct blk_integrity_iter { }; typedef blk_status_t (integrity_processing_fn) (struct blk_integrity_iter *); +typedef void (integrity_prepare_fn) (struct request *); +typedef void (integrity_complete_fn) (struct request *, unsigned int); struct blk_integrity_profile { integrity_processing_fn *generate_fn; integrity_processing_fn *verify_fn; + integrity_prepare_fn *prepare_fn; + integrity_complete_fn *complete_fn; const char *name; }; @@ -1549,7 +1574,7 @@ static inline void blk_queue_max_integrity_segments(struct request_queue *q, } static inline unsigned short -queue_max_integrity_segments(struct request_queue *q) +queue_max_integrity_segments(const struct request_queue *q) { return q->limits.max_integrity_segments; } @@ -1632,7 +1657,7 @@ static inline void blk_queue_max_integrity_segments(struct request_queue *q, unsigned int segs) { } -static inline unsigned short queue_max_integrity_segments(struct request_queue *q) +static inline unsigned short queue_max_integrity_segments(const struct request_queue *q) { return 0; } @@ -1684,8 +1709,7 @@ struct block_device_operations { /* this callback is with swap_lock and sometimes page table lock held */ void (*swap_slot_free_notify) (struct block_device *, unsigned long); int (*report_zones)(struct gendisk *, sector_t sector, - struct blk_zone *zones, unsigned int *nr_zones, - gfp_t gfp_mask); + struct blk_zone *zones, unsigned int *nr_zones); struct module *owner; const struct pr_ops *pr_ops; }; diff --git a/include/linux/bpf-cgroup.h b/include/linux/bpf-cgroup.h index a7f7a98ec39d..169fd25f6bc2 100644 --- a/include/linux/bpf-cgroup.h +++ b/include/linux/bpf-cgroup.h @@ -6,6 +6,7 @@ #include <linux/errno.h> #include <linux/jump_label.h> #include <linux/percpu.h> +#include <linux/percpu-refcount.h> #include <linux/rbtree.h> #include <uapi/linux/bpf.h> @@ -71,11 +72,17 @@ struct cgroup_bpf { u32 flags[MAX_BPF_ATTACH_TYPE]; /* temp storage for effective prog array used by prog_attach/detach */ - struct bpf_prog_array __rcu *inactive; + struct bpf_prog_array *inactive; + + /* reference counter used to detach bpf programs after cgroup removal */ + struct percpu_ref refcnt; + + /* cgroup_bpf is released using a work queue */ + struct work_struct release_work; }; -void cgroup_bpf_put(struct cgroup *cgrp); int cgroup_bpf_inherit(struct cgroup *cgrp); +void cgroup_bpf_offline(struct cgroup *cgrp); int __cgroup_bpf_attach(struct cgroup *cgrp, struct bpf_prog *prog, enum bpf_attach_type type, u32 flags); @@ -117,6 +124,14 @@ int __cgroup_bpf_run_filter_sysctl(struct ctl_table_header *head, loff_t *ppos, void **new_buf, enum bpf_attach_type type); +int __cgroup_bpf_run_filter_setsockopt(struct sock *sock, int *level, + int *optname, char __user *optval, + int *optlen, char **kernel_optval); +int __cgroup_bpf_run_filter_getsockopt(struct sock *sk, int level, + int optname, char __user *optval, + int __user *optlen, int max_optlen, + int retval); + static inline enum bpf_cgroup_storage_type cgroup_storage_type( struct bpf_map *map) { @@ -279,6 +294,38 @@ int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key, __ret; \ }) +#define BPF_CGROUP_RUN_PROG_SETSOCKOPT(sock, level, optname, optval, optlen, \ + kernel_optval) \ +({ \ + int __ret = 0; \ + if (cgroup_bpf_enabled) \ + __ret = __cgroup_bpf_run_filter_setsockopt(sock, level, \ + optname, optval, \ + optlen, \ + kernel_optval); \ + __ret; \ +}) + +#define BPF_CGROUP_GETSOCKOPT_MAX_OPTLEN(optlen) \ +({ \ + int __ret = 0; \ + if (cgroup_bpf_enabled) \ + get_user(__ret, optlen); \ + __ret; \ +}) + +#define BPF_CGROUP_RUN_PROG_GETSOCKOPT(sock, level, optname, optval, optlen, \ + max_optlen, retval) \ +({ \ + int __ret = retval; \ + if (cgroup_bpf_enabled) \ + __ret = __cgroup_bpf_run_filter_getsockopt(sock, level, \ + optname, optval, \ + optlen, max_optlen, \ + retval); \ + __ret; \ +}) + int cgroup_bpf_prog_attach(const union bpf_attr *attr, enum bpf_prog_type ptype, struct bpf_prog *prog); int cgroup_bpf_prog_detach(const union bpf_attr *attr, @@ -289,8 +336,8 @@ int cgroup_bpf_prog_query(const union bpf_attr *attr, struct bpf_prog; struct cgroup_bpf {}; -static inline void cgroup_bpf_put(struct cgroup *cgrp) {} static inline int cgroup_bpf_inherit(struct cgroup *cgrp) { return 0; } +static inline void cgroup_bpf_offline(struct cgroup *cgrp) {} static inline int cgroup_bpf_prog_attach(const union bpf_attr *attr, enum bpf_prog_type ptype, @@ -350,6 +397,11 @@ static inline int bpf_percpu_cgroup_storage_update(struct bpf_map *map, #define BPF_CGROUP_RUN_PROG_SOCK_OPS(sock_ops) ({ 0; }) #define BPF_CGROUP_RUN_PROG_DEVICE_CGROUP(type,major,minor,access) ({ 0; }) #define BPF_CGROUP_RUN_PROG_SYSCTL(head,table,write,buf,count,pos,nbuf) ({ 0; }) +#define BPF_CGROUP_GETSOCKOPT_MAX_OPTLEN(optlen) ({ 0; }) +#define BPF_CGROUP_RUN_PROG_GETSOCKOPT(sock, level, optname, optval, \ + optlen, max_optlen, retval) ({ retval; }) +#define BPF_CGROUP_RUN_PROG_SETSOCKOPT(sock, level, optname, optval, optlen, \ + kernel_optval) ({ 0; }) #define for_each_cgroup_storage_type(stype) for (; false; ) diff --git a/include/linux/bpf.h b/include/linux/bpf.h index b92ef9f73e42..5b9d22338606 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -24,6 +24,9 @@ struct seq_file; struct btf; struct btf_type; +extern struct idr btf_idr; +extern spinlock_t btf_idr_lock; + /* map is generic key/value storage optionally accesible by eBPF programs */ struct bpf_map_ops { /* funcs callable from userspace (via syscall) */ @@ -63,6 +66,11 @@ struct bpf_map_ops { u64 imm, u32 *off); }; +struct bpf_map_memory { + u32 pages; + struct user_struct *user; +}; + struct bpf_map { /* The first two cachelines with read-mostly members of which some * are also accessed in fast-path (e.g. ops, max_entries). @@ -83,7 +91,7 @@ struct bpf_map { u32 btf_key_type_id; u32 btf_value_type_id; struct btf *btf; - u32 pages; + struct bpf_map_memory memory; bool unpriv_array; bool frozen; /* write-once */ /* 48 bytes hole */ @@ -91,8 +99,7 @@ struct bpf_map { /* The 3rd and 4th cacheline with misc members to avoid false sharing * particularly with refcounting. */ - struct user_struct *user ____cacheline_aligned; - atomic_t refcnt; + atomic_t refcnt ____cacheline_aligned; atomic_t usercnt; struct work_struct work; char name[BPF_OBJ_NAME_LEN]; @@ -273,6 +280,7 @@ enum bpf_reg_type { PTR_TO_TCP_SOCK, /* reg points to struct tcp_sock */ PTR_TO_TCP_SOCK_OR_NULL, /* reg points to struct tcp_sock or NULL */ PTR_TO_TP_BUFFER, /* reg points to a writable raw tp's buffer */ + PTR_TO_XDP_SOCK, /* reg points to struct xdp_sock */ }; /* The information passed from prog-specific *_is_valid_access @@ -367,6 +375,7 @@ struct bpf_prog_aux { u32 id; u32 func_cnt; /* used by non-func prog as the number of func progs */ u32 func_idx; /* 0 for non-func prog, the index in func array for func prog */ + bool verifier_zext; /* Zero extensions has been inserted by verifier. */ bool offload_requested; struct bpf_prog **func; void *jit_data; /* JIT specific data. arch dependent */ @@ -510,17 +519,18 @@ struct bpf_prog_array { }; struct bpf_prog_array *bpf_prog_array_alloc(u32 prog_cnt, gfp_t flags); -void bpf_prog_array_free(struct bpf_prog_array __rcu *progs); -int bpf_prog_array_length(struct bpf_prog_array __rcu *progs); -int bpf_prog_array_copy_to_user(struct bpf_prog_array __rcu *progs, +void bpf_prog_array_free(struct bpf_prog_array *progs); +int bpf_prog_array_length(struct bpf_prog_array *progs); +bool bpf_prog_array_is_empty(struct bpf_prog_array *array); +int bpf_prog_array_copy_to_user(struct bpf_prog_array *progs, __u32 __user *prog_ids, u32 cnt); -void bpf_prog_array_delete_safe(struct bpf_prog_array __rcu *progs, +void bpf_prog_array_delete_safe(struct bpf_prog_array *progs, struct bpf_prog *old_prog); -int bpf_prog_array_copy_info(struct bpf_prog_array __rcu *array, +int bpf_prog_array_copy_info(struct bpf_prog_array *array, u32 *prog_ids, u32 request_cnt, u32 *prog_cnt); -int bpf_prog_array_copy(struct bpf_prog_array __rcu *old_array, +int bpf_prog_array_copy(struct bpf_prog_array *old_array, struct bpf_prog *exclude_prog, struct bpf_prog *include_prog, struct bpf_prog_array **new_array); @@ -548,6 +558,56 @@ _out: \ _ret; \ }) +/* To be used by __cgroup_bpf_run_filter_skb for EGRESS BPF progs + * so BPF programs can request cwr for TCP packets. + * + * Current cgroup skb programs can only return 0 or 1 (0 to drop the + * packet. This macro changes the behavior so the low order bit + * indicates whether the packet should be dropped (0) or not (1) + * and the next bit is a congestion notification bit. This could be + * used by TCP to call tcp_enter_cwr() + * + * Hence, new allowed return values of CGROUP EGRESS BPF programs are: + * 0: drop packet + * 1: keep packet + * 2: drop packet and cn + * 3: keep packet and cn + * + * This macro then converts it to one of the NET_XMIT or an error + * code that is then interpreted as drop packet (and no cn): + * 0: NET_XMIT_SUCCESS skb should be transmitted + * 1: NET_XMIT_DROP skb should be dropped and cn + * 2: NET_XMIT_CN skb should be transmitted and cn + * 3: -EPERM skb should be dropped + */ +#define BPF_PROG_CGROUP_INET_EGRESS_RUN_ARRAY(array, ctx, func) \ + ({ \ + struct bpf_prog_array_item *_item; \ + struct bpf_prog *_prog; \ + struct bpf_prog_array *_array; \ + u32 ret; \ + u32 _ret = 1; \ + u32 _cn = 0; \ + preempt_disable(); \ + rcu_read_lock(); \ + _array = rcu_dereference(array); \ + _item = &_array->items[0]; \ + while ((_prog = READ_ONCE(_item->prog))) { \ + bpf_cgroup_storage_set(_item->cgroup_storage); \ + ret = func(_prog, ctx); \ + _ret &= (ret & 1); \ + _cn |= (ret & 2); \ + _item++; \ + } \ + rcu_read_unlock(); \ + preempt_enable(); \ + if (_ret) \ + _ret = (_cn ? NET_XMIT_CN : NET_XMIT_SUCCESS); \ + else \ + _ret = (_cn ? NET_XMIT_DROP : -EPERM); \ + _ret; \ + }) + #define BPF_PROG_RUN_ARRAY(array, ctx, func) \ __BPF_PROG_RUN_ARRAY(array, ctx, func, false) @@ -590,11 +650,16 @@ void bpf_map_free_id(struct bpf_map *map, bool do_idr_lock); struct bpf_map *bpf_map_get_with_uref(u32 ufd); struct bpf_map *__bpf_map_get(struct fd f); struct bpf_map * __must_check bpf_map_inc(struct bpf_map *map, bool uref); +struct bpf_map * __must_check bpf_map_inc_not_zero(struct bpf_map *map, + bool uref); void bpf_map_put_with_uref(struct bpf_map *map); void bpf_map_put(struct bpf_map *map); -int bpf_map_precharge_memlock(u32 pages); int bpf_map_charge_memlock(struct bpf_map *map, u32 pages); void bpf_map_uncharge_memlock(struct bpf_map *map, u32 pages); +int bpf_map_charge_init(struct bpf_map_memory *mem, size_t size); +void bpf_map_charge_finish(struct bpf_map_memory *mem); +void bpf_map_charge_move(struct bpf_map_memory *dst, + struct bpf_map_memory *src); void *bpf_map_area_alloc(size_t size, int numa_node); void bpf_map_area_free(void *base); void bpf_map_init_from_attr(struct bpf_map *map, union bpf_attr *attr); @@ -653,7 +718,7 @@ struct xdp_buff; struct sk_buff; struct bpf_dtab_netdev *__dev_map_lookup_elem(struct bpf_map *map, u32 key); -void __dev_map_insert_ctx(struct bpf_map *map, u32 index); +struct bpf_dtab_netdev *__dev_map_hash_lookup_elem(struct bpf_map *map, u32 key); void __dev_map_flush(struct bpf_map *map); int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_buff *xdp, struct net_device *dev_rx); @@ -661,7 +726,6 @@ int dev_map_generic_redirect(struct bpf_dtab_netdev *dst, struct sk_buff *skb, struct bpf_prog *xdp_prog); struct bpf_cpu_map_entry *__cpu_map_lookup_elem(struct bpf_map *map, u32 key); -void __cpu_map_insert_ctx(struct bpf_map *map, u32 index); void __cpu_map_flush(struct bpf_map *map); int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_buff *xdp, struct net_device *dev_rx); @@ -741,8 +805,10 @@ static inline struct net_device *__dev_map_lookup_elem(struct bpf_map *map, return NULL; } -static inline void __dev_map_insert_ctx(struct bpf_map *map, u32 index) +static inline struct net_device *__dev_map_hash_lookup_elem(struct bpf_map *map, + u32 key) { + return NULL; } static inline void __dev_map_flush(struct bpf_map *map) @@ -774,10 +840,6 @@ struct bpf_cpu_map_entry *__cpu_map_lookup_elem(struct bpf_map *map, u32 key) return NULL; } -static inline void __cpu_map_insert_ctx(struct bpf_map *map, u32 index) -{ -} - static inline void __cpu_map_flush(struct bpf_map *map) { } @@ -992,6 +1054,7 @@ extern const struct bpf_func_proto bpf_spin_unlock_proto; extern const struct bpf_func_proto bpf_get_local_storage_proto; extern const struct bpf_func_proto bpf_strtol_proto; extern const struct bpf_func_proto bpf_strtoul_proto; +extern const struct bpf_func_proto bpf_tcp_sock_proto; /* Shared helpers among cBPF and eBPF. */ void bpf_user_rnd_init_once(void); @@ -1040,6 +1103,15 @@ u32 bpf_tcp_sock_convert_ctx_access(enum bpf_access_type type, struct bpf_insn *insn_buf, struct bpf_prog *prog, u32 *target_size); + +bool bpf_xdp_sock_is_valid_access(int off, int size, enum bpf_access_type type, + struct bpf_insn_access_aux *info); + +u32 bpf_xdp_sock_convert_ctx_access(enum bpf_access_type type, + const struct bpf_insn *si, + struct bpf_insn *insn_buf, + struct bpf_prog *prog, + u32 *target_size); #else static inline bool bpf_tcp_sock_is_valid_access(int off, int size, enum bpf_access_type type, @@ -1056,6 +1128,21 @@ static inline u32 bpf_tcp_sock_convert_ctx_access(enum bpf_access_type type, { return 0; } +static inline bool bpf_xdp_sock_is_valid_access(int off, int size, + enum bpf_access_type type, + struct bpf_insn_access_aux *info) +{ + return false; +} + +static inline u32 bpf_xdp_sock_convert_ctx_access(enum bpf_access_type type, + const struct bpf_insn *si, + struct bpf_insn *insn_buf, + struct bpf_prog *prog, + u32 *target_size) +{ + return 0; +} #endif /* CONFIG_INET */ #endif /* _LINUX_BPF_H */ diff --git a/include/linux/bpf_types.h b/include/linux/bpf_types.h index 5a9975678d6f..36a9c2325176 100644 --- a/include/linux/bpf_types.h +++ b/include/linux/bpf_types.h @@ -30,6 +30,7 @@ BPF_PROG_TYPE(BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE, raw_tracepoint_writable) #ifdef CONFIG_CGROUP_BPF BPF_PROG_TYPE(BPF_PROG_TYPE_CGROUP_DEVICE, cg_dev) BPF_PROG_TYPE(BPF_PROG_TYPE_CGROUP_SYSCTL, cg_sysctl) +BPF_PROG_TYPE(BPF_PROG_TYPE_CGROUP_SOCKOPT, cg_sockopt) #endif #ifdef CONFIG_BPF_LIRC_MODE2 BPF_PROG_TYPE(BPF_PROG_TYPE_LIRC_MODE2, lirc_mode2) @@ -61,6 +62,7 @@ BPF_MAP_TYPE(BPF_MAP_TYPE_ARRAY_OF_MAPS, array_of_maps_map_ops) BPF_MAP_TYPE(BPF_MAP_TYPE_HASH_OF_MAPS, htab_of_maps_map_ops) #ifdef CONFIG_NET BPF_MAP_TYPE(BPF_MAP_TYPE_DEVMAP, dev_map_ops) +BPF_MAP_TYPE(BPF_MAP_TYPE_DEVMAP_HASH, dev_map_hash_ops) BPF_MAP_TYPE(BPF_MAP_TYPE_SK_STORAGE, sk_storage_map_ops) #if defined(CONFIG_BPF_STREAM_PARSER) BPF_MAP_TYPE(BPF_MAP_TYPE_SOCKMAP, sock_map_ops) diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h index 519aafabc40c..26a6d58ca78c 100644 --- a/include/linux/bpf_verifier.h +++ b/include/linux/bpf_verifier.h @@ -33,9 +33,11 @@ */ enum bpf_reg_liveness { REG_LIVE_NONE = 0, /* reg hasn't been read or written this branch */ - REG_LIVE_READ, /* reg was read, so we're sensitive to initial value */ - REG_LIVE_WRITTEN, /* reg was written first, screening off later reads */ - REG_LIVE_DONE = 4, /* liveness won't be updating this register anymore */ + REG_LIVE_READ32 = 0x1, /* reg was read, so we're sensitive to initial value */ + REG_LIVE_READ64 = 0x2, /* likewise, but full 64-bit content matters */ + REG_LIVE_READ = REG_LIVE_READ32 | REG_LIVE_READ64, + REG_LIVE_WRITTEN = 0x4, /* reg was written first, screening off later reads */ + REG_LIVE_DONE = 0x8, /* liveness won't be updating this register anymore */ }; struct bpf_reg_state { @@ -128,7 +130,14 @@ struct bpf_reg_state { * pointing to bpf_func_state. */ u32 frameno; + /* Tracks subreg definition. The stored value is the insn_idx of the + * writing insn. This is safe because subreg_def is used before any insn + * patching which only happens after main verification finished. + */ + s32 subreg_def; enum bpf_reg_liveness live; + /* if (!precise && SCALAR_VALUE) min/max/tnum don't affect safety */ + bool precise; }; enum bpf_stack_slot_type { @@ -180,13 +189,77 @@ struct bpf_func_state { struct bpf_stack_state *stack; }; +struct bpf_idx_pair { + u32 prev_idx; + u32 idx; +}; + #define MAX_CALL_FRAMES 8 struct bpf_verifier_state { /* call stack tracking */ struct bpf_func_state *frame[MAX_CALL_FRAMES]; + struct bpf_verifier_state *parent; + /* + * 'branches' field is the number of branches left to explore: + * 0 - all possible paths from this state reached bpf_exit or + * were safely pruned + * 1 - at least one path is being explored. + * This state hasn't reached bpf_exit + * 2 - at least two paths are being explored. + * This state is an immediate parent of two children. + * One is fallthrough branch with branches==1 and another + * state is pushed into stack (to be explored later) also with + * branches==1. The parent of this state has branches==1. + * The verifier state tree connected via 'parent' pointer looks like: + * 1 + * 1 + * 2 -> 1 (first 'if' pushed into stack) + * 1 + * 2 -> 1 (second 'if' pushed into stack) + * 1 + * 1 + * 1 bpf_exit. + * + * Once do_check() reaches bpf_exit, it calls update_branch_counts() + * and the verifier state tree will look: + * 1 + * 1 + * 2 -> 1 (first 'if' pushed into stack) + * 1 + * 1 -> 1 (second 'if' pushed into stack) + * 0 + * 0 + * 0 bpf_exit. + * After pop_stack() the do_check() will resume at second 'if'. + * + * If is_state_visited() sees a state with branches > 0 it means + * there is a loop. If such state is exactly equal to the current state + * it's an infinite loop. Note states_equal() checks for states + * equvalency, so two states being 'states_equal' does not mean + * infinite loop. The exact comparison is provided by + * states_maybe_looping() function. It's a stronger pre-check and + * much faster than states_equal(). + * + * This algorithm may not find all possible infinite loops or + * loop iteration count may be too high. + * In such cases BPF_COMPLEXITY_LIMIT_INSNS limit kicks in. + */ + u32 branches; + u32 insn_idx; u32 curframe; u32 active_spin_lock; bool speculative; + + /* first and last insn idx of this verifier state */ + u32 first_insn_idx; + u32 last_insn_idx; + /* jmp history recorded from first to last. + * backtracking is using it to go from last to first. + * For most states jmp_history_cnt is [0-3]. + * For loops can go up to ~40. + */ + struct bpf_idx_pair *jmp_history; + u32 jmp_history_cnt; }; #define bpf_get_spilled_reg(slot, frame) \ @@ -229,7 +302,9 @@ struct bpf_insn_aux_data { int ctx_field_size; /* the ctx field size for load insn, maybe 0 */ int sanitize_stack_off; /* stack slot to be cleared */ bool seen; /* this insn was processed by the verifier */ + bool zext_dst; /* this insn zero extends dst reg */ u8 alu_state; /* used in combination with alu_limit */ + bool prune_point; unsigned int orig_idx; /* original instruction index */ }; @@ -280,6 +355,7 @@ struct bpf_verifier_env { struct bpf_verifier_stack_elem *head; /* stack of verifier states to be processed */ int stack_size; /* number of states to be processed */ bool strict_alignment; /* perform strict pointer alignment checks */ + bool test_state_freq; /* test verifier with different pruning frequency */ struct bpf_verifier_state *cur_state; /* current verifier state */ struct bpf_verifier_state_list **explored_states; /* search pruning optimization */ struct bpf_verifier_state_list *free_list; @@ -299,7 +375,9 @@ struct bpf_verifier_env { } cfg; u32 subprog_cnt; /* number of instructions analyzed by the verifier */ - u32 insn_processed; + u32 prev_insn_processed, insn_processed; + /* number of jmps, calls, exits analyzed so far */ + u32 prev_jmps_processed, jmps_processed; /* total verification time */ u64 verification_time; /* maximum number of verifier states kept in 'branching' instructions */ diff --git a/include/linux/bug.h b/include/linux/bug.h index fe5916550da8..f639bd0122f3 100644 --- a/include/linux/bug.h +++ b/include/linux/bug.h @@ -47,6 +47,11 @@ void generic_bug_clear_once(void); #else /* !CONFIG_GENERIC_BUG */ +static inline void *find_bug(unsigned long bugaddr) +{ + return NULL; +} + static inline enum bug_trap_type report_bug(unsigned long bug_addr, struct pt_regs *regs) { diff --git a/include/linux/cacheinfo.h b/include/linux/cacheinfo.h index 70e19bc6cc9f..46b92cd61d0c 100644 --- a/include/linux/cacheinfo.h +++ b/include/linux/cacheinfo.h @@ -17,6 +17,8 @@ enum cache_type { CACHE_TYPE_UNIFIED = BIT(2), }; +extern unsigned int coherency_max_size; + /** * struct cacheinfo - represent a cache leaf node * @id: This cache's id. It is unique among caches with the same (type, level). diff --git a/include/linux/can/can-ml.h b/include/linux/can/can-ml.h new file mode 100644 index 000000000000..2f5d731ae251 --- /dev/null +++ b/include/linux/can/can-ml.h @@ -0,0 +1,68 @@ +/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */ +/* Copyright (c) 2002-2007 Volkswagen Group Electronic Research + * Copyright (c) 2017 Pengutronix, Marc Kleine-Budde <kernel@pengutronix.de> + * + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of Volkswagen nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * Alternatively, provided that this notice is retained in full, this + * software may be distributed under the terms of the GNU General + * Public License ("GPL") version 2, in which case the provisions of the + * GPL apply INSTEAD OF those given above. + * + * The provided data structures and external interfaces from this code + * are not restricted to be used by modules with a GPL compatible license. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH + * DAMAGE. + * + */ + +#ifndef CAN_ML_H +#define CAN_ML_H + +#include <linux/can.h> +#include <linux/list.h> + +#define CAN_SFF_RCV_ARRAY_SZ (1 << CAN_SFF_ID_BITS) +#define CAN_EFF_RCV_HASH_BITS 10 +#define CAN_EFF_RCV_ARRAY_SZ (1 << CAN_EFF_RCV_HASH_BITS) + +enum { RX_ERR, RX_ALL, RX_FIL, RX_INV, RX_MAX }; + +struct can_dev_rcv_lists { + struct hlist_head rx[RX_MAX]; + struct hlist_head rx_sff[CAN_SFF_RCV_ARRAY_SZ]; + struct hlist_head rx_eff[CAN_EFF_RCV_ARRAY_SZ]; + int entries; +}; + +struct can_ml_priv { + struct can_dev_rcv_lists dev_rcv_lists; +#ifdef CAN_J1939 + struct j1939_priv *j1939_priv; +#endif +}; + +#endif /* CAN_ML_H */ diff --git a/include/linux/can/core.h b/include/linux/can/core.h index 6099bc18bd0c..8339071ab08b 100644 --- a/include/linux/can/core.h +++ b/include/linux/can/core.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 */ +/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */ /* * linux/can/core.h * @@ -41,6 +41,14 @@ struct can_proto { struct proto *prot; }; +/* required_size + * macro to find the minimum size of a struct + * that includes a requested member + */ +#define CAN_REQUIRED_SIZE(struct_type, member) \ + (offsetof(typeof(struct_type), member) + \ + sizeof(((typeof(struct_type) *)(NULL))->member)) + /* function prototypes for the CAN networklayer core (af_can.c) */ extern int can_proto_register(const struct can_proto *cp); @@ -57,6 +65,5 @@ extern void can_rx_unregister(struct net *net, struct net_device *dev, void *data); extern int can_send(struct sk_buff *skb, int loop); -extern int can_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg); #endif /* !_CAN_CORE_H */ diff --git a/include/linux/can/dev.h b/include/linux/can/dev.h index f01623aef2f7..9b3c720a31b1 100644 --- a/include/linux/can/dev.h +++ b/include/linux/can/dev.h @@ -169,7 +169,8 @@ void can_change_state(struct net_device *dev, struct can_frame *cf, void can_put_echo_skb(struct sk_buff *skb, struct net_device *dev, unsigned int idx); -struct sk_buff *__can_get_echo_skb(struct net_device *dev, unsigned int idx, u8 *len_ptr); +struct sk_buff *__can_get_echo_skb(struct net_device *dev, unsigned int idx, + u8 *len_ptr); unsigned int can_get_echo_skb(struct net_device *dev, unsigned int idx); void can_free_echo_skb(struct net_device *dev, unsigned int idx); diff --git a/include/linux/can/platform/rcar_can.h b/include/linux/can/platform/rcar_can.h deleted file mode 100644 index a43dcd0cf79e..000000000000 --- a/include/linux/can/platform/rcar_can.h +++ /dev/null @@ -1,18 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _CAN_PLATFORM_RCAR_CAN_H_ -#define _CAN_PLATFORM_RCAR_CAN_H_ - -#include <linux/types.h> - -/* Clock Select Register settings */ -enum CLKR { - CLKR_CLKP1 = 0, /* Peripheral clock (clkp1) */ - CLKR_CLKP2 = 1, /* Peripheral clock (clkp2) */ - CLKR_CLKEXT = 3 /* Externally input clock */ -}; - -struct rcar_can_platform_data { - enum CLKR clock_select; /* Clock source select */ -}; - -#endif /* !_CAN_PLATFORM_RCAR_CAN_H_ */ diff --git a/include/linux/can/rx-offload.h b/include/linux/can/rx-offload.h index 9daa1119ea42..01219f2902bf 100644 --- a/include/linux/can/rx-offload.h +++ b/include/linux/can/rx-offload.h @@ -15,7 +15,8 @@ struct can_rx_offload { struct net_device *dev; - unsigned int (*mailbox_read)(struct can_rx_offload *offload, struct can_frame *cf, + unsigned int (*mailbox_read)(struct can_rx_offload *offload, + struct can_frame *cf, u32 *timestamp, unsigned int mb); struct sk_buff_head skb_queue; @@ -29,9 +30,13 @@ struct can_rx_offload { bool inc; }; -int can_rx_offload_add_timestamp(struct net_device *dev, struct can_rx_offload *offload); -int can_rx_offload_add_fifo(struct net_device *dev, struct can_rx_offload *offload, unsigned int weight); -int can_rx_offload_irq_offload_timestamp(struct can_rx_offload *offload, u64 reg); +int can_rx_offload_add_timestamp(struct net_device *dev, + struct can_rx_offload *offload); +int can_rx_offload_add_fifo(struct net_device *dev, + struct can_rx_offload *offload, + unsigned int weight); +int can_rx_offload_irq_offload_timestamp(struct can_rx_offload *offload, + u64 reg); int can_rx_offload_irq_offload_fifo(struct can_rx_offload *offload); int can_rx_offload_queue_sorted(struct can_rx_offload *offload, struct sk_buff *skb, u32 timestamp); diff --git a/include/linux/can/skb.h b/include/linux/can/skb.h index b3379a97245c..a954def26c0d 100644 --- a/include/linux/can/skb.h +++ b/include/linux/can/skb.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 */ +/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */ /* * linux/can/skb.h * diff --git a/include/linux/ccp.h b/include/linux/ccp.h index 55cb455cfcb0..a5dfbaf2470d 100644 --- a/include/linux/ccp.h +++ b/include/linux/ccp.h @@ -170,6 +170,8 @@ struct ccp_aes_engine { enum ccp_aes_mode mode; enum ccp_aes_action action; + u32 authsize; + struct scatterlist *key; u32 key_len; /* In bytes */ diff --git a/include/linux/ceph/buffer.h b/include/linux/ceph/buffer.h index 5e58bb29b1a3..11cdc7c60480 100644 --- a/include/linux/ceph/buffer.h +++ b/include/linux/ceph/buffer.h @@ -30,7 +30,8 @@ static inline struct ceph_buffer *ceph_buffer_get(struct ceph_buffer *b) static inline void ceph_buffer_put(struct ceph_buffer *b) { - kref_put(&b->kref, ceph_buffer_release); + if (b) + kref_put(&b->kref, ceph_buffer_release); } extern int ceph_decode_buffer(struct ceph_buffer **b, void **p, void *end); diff --git a/include/linux/ceph/ceph_features.h b/include/linux/ceph/ceph_features.h index 65a38c4a02a1..39e6f4c57580 100644 --- a/include/linux/ceph/ceph_features.h +++ b/include/linux/ceph/ceph_features.h @@ -211,6 +211,7 @@ DEFINE_CEPH_FEATURE_DEPRECATED(63, 1, RESERVED_BROKEN, LUMINOUS) // client-facin CEPH_FEATURE_MON_STATEFUL_SUB | \ CEPH_FEATURE_CRUSH_TUNABLES5 | \ CEPH_FEATURE_NEW_OSDOPREPLY_ENCODING | \ + CEPH_FEATURE_MSG_ADDR2 | \ CEPH_FEATURE_CEPHX_V2) #define CEPH_FEATURES_REQUIRED_DEFAULT 0 diff --git a/include/linux/ceph/ceph_fs.h b/include/linux/ceph/ceph_fs.h index 3ac0feaf2b5e..cb21c5cf12c3 100644 --- a/include/linux/ceph/ceph_fs.h +++ b/include/linux/ceph/ceph_fs.h @@ -682,7 +682,7 @@ extern const char *ceph_cap_op_name(int op); /* flags field in client cap messages (version >= 10) */ #define CEPH_CLIENT_CAPS_SYNC (1<<0) #define CEPH_CLIENT_CAPS_NO_CAPSNAP (1<<1) -#define CEPH_CLIENT_CAPS_PENDING_CAPSNAP (1<<2); +#define CEPH_CLIENT_CAPS_PENDING_CAPSNAP (1<<2) /* * caps message, used for capability callbacks, acks, requests, etc. diff --git a/include/linux/ceph/cls_lock_client.h b/include/linux/ceph/cls_lock_client.h index bea6c77d2093..17bc7584d1fe 100644 --- a/include/linux/ceph/cls_lock_client.h +++ b/include/linux/ceph/cls_lock_client.h @@ -52,4 +52,7 @@ int ceph_cls_lock_info(struct ceph_osd_client *osdc, char *lock_name, u8 *type, char **tag, struct ceph_locker **lockers, u32 *num_lockers); +int ceph_cls_assert_locked(struct ceph_osd_request *req, int which, + char *lock_name, u8 type, char *cookie, char *tag); + #endif diff --git a/include/linux/ceph/debugfs.h b/include/linux/ceph/debugfs.h index fa5f9b7f5dbb..cf5e840eec71 100644 --- a/include/linux/ceph/debugfs.h +++ b/include/linux/ceph/debugfs.h @@ -19,9 +19,9 @@ static const struct file_operations name##_fops = { \ }; /* debugfs.c */ -extern int ceph_debugfs_init(void); +extern void ceph_debugfs_init(void); extern void ceph_debugfs_cleanup(void); -extern int ceph_debugfs_client_init(struct ceph_client *client); +extern void ceph_debugfs_client_init(struct ceph_client *client); extern void ceph_debugfs_client_cleanup(struct ceph_client *client); #endif diff --git a/include/linux/ceph/decode.h b/include/linux/ceph/decode.h index a6c2a48d42e0..450384fe487c 100644 --- a/include/linux/ceph/decode.h +++ b/include/linux/ceph/decode.h @@ -218,18 +218,27 @@ static inline void ceph_encode_timespec64(struct ceph_timespec *tv, /* * sockaddr_storage <-> ceph_sockaddr */ -static inline void ceph_encode_addr(struct ceph_entity_addr *a) +#define CEPH_ENTITY_ADDR_TYPE_NONE 0 +#define CEPH_ENTITY_ADDR_TYPE_LEGACY __cpu_to_le32(1) + +static inline void ceph_encode_banner_addr(struct ceph_entity_addr *a) { __be16 ss_family = htons(a->in_addr.ss_family); a->in_addr.ss_family = *(__u16 *)&ss_family; + + /* Banner addresses require TYPE_NONE */ + a->type = CEPH_ENTITY_ADDR_TYPE_NONE; } -static inline void ceph_decode_addr(struct ceph_entity_addr *a) +static inline void ceph_decode_banner_addr(struct ceph_entity_addr *a) { __be16 ss_family = *(__be16 *)&a->in_addr.ss_family; a->in_addr.ss_family = ntohs(ss_family); WARN_ON(a->in_addr.ss_family == 512); + a->type = CEPH_ENTITY_ADDR_TYPE_LEGACY; } +extern int ceph_decode_entity_addr(void **p, void *end, + struct ceph_entity_addr *addr); /* * encoders */ diff --git a/include/linux/ceph/libceph.h b/include/linux/ceph/libceph.h index 337d5049ff93..b9dbda1c26aa 100644 --- a/include/linux/ceph/libceph.h +++ b/include/linux/ceph/libceph.h @@ -84,11 +84,13 @@ struct ceph_options { #define CEPH_MSG_MAX_MIDDLE_LEN (16*1024*1024) /* - * Handle the largest possible rbd object in one message. + * The largest possible rbd data object is 32M. + * The largest possible rbd object map object is 64M. + * * There is no limit on the size of cephfs objects, but it has to obey * rsize and wsize mount options anyway. */ -#define CEPH_MSG_MAX_DATA_LEN (32*1024*1024) +#define CEPH_MSG_MAX_DATA_LEN (64*1024*1024) #define CEPH_AUTH_NAME_DEFAULT "guest" @@ -291,6 +293,7 @@ struct ceph_client *ceph_create_client(struct ceph_options *opt, void *private); struct ceph_entity_addr *ceph_client_addr(struct ceph_client *client); u64 ceph_client_gid(struct ceph_client *client); extern void ceph_destroy_client(struct ceph_client *client); +extern void ceph_reset_client_addr(struct ceph_client *client); extern int __ceph_open_session(struct ceph_client *client, unsigned long started); extern int ceph_open_session(struct ceph_client *client); @@ -299,10 +302,6 @@ int ceph_wait_for_latest_osdmap(struct ceph_client *client, /* pagevec.c */ extern void ceph_release_page_vector(struct page **pages, int num_pages); - -extern struct page **ceph_get_direct_page_vector(const void __user *data, - int num_pages, - bool write_page); extern void ceph_put_page_vector(struct page **pages, int num_pages, bool dirty); extern struct page **ceph_alloc_page_vector(int num_pages, gfp_t flags); diff --git a/include/linux/ceph/messenger.h b/include/linux/ceph/messenger.h index 23895d178149..c4458dc6a757 100644 --- a/include/linux/ceph/messenger.h +++ b/include/linux/ceph/messenger.h @@ -337,6 +337,7 @@ extern void ceph_msgr_flush(void); extern void ceph_messenger_init(struct ceph_messenger *msgr, struct ceph_entity_addr *myaddr); extern void ceph_messenger_fini(struct ceph_messenger *msgr); +extern void ceph_messenger_reset_nonce(struct ceph_messenger *msgr); extern void ceph_con_init(struct ceph_connection *con, void *private, const struct ceph_connection_operations *ops, diff --git a/include/linux/ceph/mon_client.h b/include/linux/ceph/mon_client.h index 3a4688af7455..dbb8a6959a73 100644 --- a/include/linux/ceph/mon_client.h +++ b/include/linux/ceph/mon_client.h @@ -104,12 +104,12 @@ struct ceph_mon_client { #endif }; -extern struct ceph_monmap *ceph_monmap_decode(void *p, void *end); extern int ceph_monmap_contains(struct ceph_monmap *m, struct ceph_entity_addr *addr); extern int ceph_monc_init(struct ceph_mon_client *monc, struct ceph_client *cl); extern void ceph_monc_stop(struct ceph_mon_client *monc); +extern void ceph_monc_reopen_session(struct ceph_mon_client *monc); enum { CEPH_SUB_MONMAP = 0, diff --git a/include/linux/ceph/osd_client.h b/include/linux/ceph/osd_client.h index 2294f963dab7..eaffbdddf89a 100644 --- a/include/linux/ceph/osd_client.h +++ b/include/linux/ceph/osd_client.h @@ -198,9 +198,9 @@ struct ceph_osd_request { bool r_mempool; struct completion r_completion; /* private to osd_client.c */ ceph_osdc_callback_t r_callback; - struct list_head r_unsafe_item; struct inode *r_inode; /* for use by callbacks */ + struct list_head r_private_item; /* ditto */ void *r_priv; /* ditto */ /* set by submitter */ @@ -381,6 +381,7 @@ extern void ceph_osdc_cleanup(void); extern int ceph_osdc_init(struct ceph_osd_client *osdc, struct ceph_client *client); extern void ceph_osdc_stop(struct ceph_osd_client *osdc); +extern void ceph_osdc_reopen_osds(struct ceph_osd_client *osdc); extern void ceph_osdc_handle_reply(struct ceph_osd_client *osdc, struct ceph_msg *msg); @@ -388,6 +389,15 @@ extern void ceph_osdc_handle_map(struct ceph_osd_client *osdc, struct ceph_msg *msg); void ceph_osdc_update_epoch_barrier(struct ceph_osd_client *osdc, u32 eb); void ceph_osdc_abort_requests(struct ceph_osd_client *osdc, int err); +void ceph_osdc_clear_abort_err(struct ceph_osd_client *osdc); + +#define osd_req_op_data(oreq, whch, typ, fld) \ +({ \ + struct ceph_osd_request *__oreq = (oreq); \ + unsigned int __whch = (whch); \ + BUG_ON(__whch >= __oreq->r_num_ops); \ + &__oreq->r_ops[__whch].typ.fld; \ +}) extern void osd_req_op_init(struct ceph_osd_request *osd_req, unsigned int which, u16 opcode, u32 flags); @@ -497,7 +507,7 @@ int ceph_osdc_call(struct ceph_osd_client *osdc, const char *class, const char *method, unsigned int flags, struct page *req_page, size_t req_len, - struct page *resp_page, size_t *resp_len); + struct page **resp_pages, size_t *resp_len); extern int ceph_osdc_readpages(struct ceph_osd_client *osdc, struct ceph_vino vino, diff --git a/include/linux/ceph/striper.h b/include/linux/ceph/striper.h index cbd0d24b7148..3486636c0e6e 100644 --- a/include/linux/ceph/striper.h +++ b/include/linux/ceph/striper.h @@ -66,4 +66,6 @@ int ceph_extent_to_file(struct ceph_file_layout *l, struct ceph_file_extent **file_extents, u32 *num_file_extents); +u64 ceph_get_num_objects(struct ceph_file_layout *l, u64 size); + #endif diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h index b4e766e93f6e..430e219e3aba 100644 --- a/include/linux/cgroup-defs.h +++ b/include/linux/cgroup-defs.h @@ -624,7 +624,7 @@ struct cftype { /* * Control Group subsystem type. - * See Documentation/cgroup-v1/cgroups.txt for details + * See Documentation/admin-guide/cgroup-v1/cgroups.rst for details */ struct cgroup_subsys { struct cgroup_subsys_state *(*css_alloc)(struct cgroup_subsys_state *parent_css); diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h index 0297f930a56e..3ba3e6da13a6 100644 --- a/include/linux/cgroup.h +++ b/include/linux/cgroup.h @@ -131,6 +131,8 @@ void cgroup_free(struct task_struct *p); int cgroup_init_early(void); int cgroup_init(void); +int cgroup_parse_float(const char *input, unsigned dec_shift, s64 *v); + /* * Iteration helpers and macros. */ @@ -148,6 +150,7 @@ struct task_struct *cgroup_taskset_first(struct cgroup_taskset *tset, struct task_struct *cgroup_taskset_next(struct cgroup_taskset *tset, struct cgroup_subsys_state **dst_cssp); +void cgroup_enable_task_cg_lists(void); void css_task_iter_start(struct cgroup_subsys_state *css, unsigned int flags, struct css_task_iter *it); struct task_struct *css_task_iter_next(struct css_task_iter *it); @@ -697,6 +700,7 @@ void cgroup_path_from_kernfs_id(const union kernfs_node_id *id, struct cgroup_subsys_state; struct cgroup; +static inline void css_get(struct cgroup_subsys_state *css) {} static inline void css_put(struct cgroup_subsys_state *css) {} static inline int cgroup_attach_task_all(struct task_struct *from, struct task_struct *t) { return 0; } @@ -934,4 +938,22 @@ static inline bool cgroup_task_frozen(struct task_struct *task) #endif /* !CONFIG_CGROUPS */ +#ifdef CONFIG_CGROUP_BPF +static inline void cgroup_bpf_get(struct cgroup *cgrp) +{ + percpu_ref_get(&cgrp->bpf.refcnt); +} + +static inline void cgroup_bpf_put(struct cgroup *cgrp) +{ + percpu_ref_put(&cgrp->bpf.refcnt); +} + +#else /* CONFIG_CGROUP_BPF */ + +static inline void cgroup_bpf_get(struct cgroup *cgrp) {} +static inline void cgroup_bpf_put(struct cgroup *cgrp) {} + +#endif /* CONFIG_CGROUP_BPF */ + #endif /* _LINUX_CGROUP_H */ diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h index bb6118f79784..2fdfe8061363 100644 --- a/include/linux/clk-provider.h +++ b/include/linux/clk-provider.h @@ -9,8 +9,6 @@ #include <linux/of.h> #include <linux/of_clk.h> -#ifdef CONFIG_COMMON_CLK - /* * flags used across common struct clk. these flags should only affect the * top-level framework. custom flags for dealing with hardware specifics @@ -301,7 +299,8 @@ struct clk_init_data { * into the clk API * * @init: pointer to struct clk_init_data that contains the init data shared - * with the common clock framework. + * with the common clock framework. This pointer will be set to NULL once + * a clk_register() variant is called on this clk_hw pointer. */ struct clk_hw { struct clk_core *core; @@ -807,11 +806,19 @@ void devm_clk_hw_unregister(struct device *dev, struct clk_hw *hw); /* helper functions */ const char *__clk_get_name(const struct clk *clk); const char *clk_hw_get_name(const struct clk_hw *hw); +#ifdef CONFIG_COMMON_CLK struct clk_hw *__clk_get_hw(struct clk *clk); +#else +static inline struct clk_hw *__clk_get_hw(struct clk *clk) +{ + return (struct clk_hw *)clk; +} +#endif unsigned int clk_hw_get_num_parents(const struct clk_hw *hw); struct clk_hw *clk_hw_get_parent(const struct clk_hw *hw); struct clk_hw *clk_hw_get_parent_by_index(const struct clk_hw *hw, unsigned int index); +int clk_hw_set_parent(struct clk_hw *hw, struct clk_hw *new_parent); unsigned int __clk_get_enable_count(struct clk *clk); unsigned long clk_hw_get_rate(const struct clk_hw *hw); unsigned long __clk_get_flags(struct clk *clk); @@ -867,8 +874,6 @@ static inline long divider_ro_round_rate(struct clk_hw *hw, unsigned long rate, */ unsigned long clk_hw_round_rate(struct clk_hw *hw, unsigned long rate); -struct of_device_id; - struct clk_onecell_data { struct clk **clks; unsigned int clk_num; @@ -879,8 +884,6 @@ struct clk_hw_onecell_data { struct clk_hw *hws[]; }; -extern struct of_device_id __clk_of_table; - #define CLK_OF_DECLARE(name, compat, fn) OF_DECLARE_1(clk, name, compat, fn) /* @@ -904,6 +907,40 @@ extern struct of_device_id __clk_of_table; .ops = _ops, \ }) +#define CLK_HW_INIT_HW(_name, _parent, _ops, _flags) \ + (&(struct clk_init_data) { \ + .flags = _flags, \ + .name = _name, \ + .parent_hws = (const struct clk_hw*[]) { _parent }, \ + .num_parents = 1, \ + .ops = _ops, \ + }) + +/* + * This macro is intended for drivers to be able to share the otherwise + * individual struct clk_hw[] compound literals created by the compiler + * when using CLK_HW_INIT_HW. It does NOT support multiple parents. + */ +#define CLK_HW_INIT_HWS(_name, _parent, _ops, _flags) \ + (&(struct clk_init_data) { \ + .flags = _flags, \ + .name = _name, \ + .parent_hws = _parent, \ + .num_parents = 1, \ + .ops = _ops, \ + }) + +#define CLK_HW_INIT_FW_NAME(_name, _parent, _ops, _flags) \ + (&(struct clk_init_data) { \ + .flags = _flags, \ + .name = _name, \ + .parent_data = (const struct clk_parent_data[]) { \ + { .fw_name = _parent }, \ + }, \ + .num_parents = 1, \ + .ops = _ops, \ + }) + #define CLK_HW_INIT_PARENTS(_name, _parents, _ops, _flags) \ (&(struct clk_init_data) { \ .flags = _flags, \ @@ -913,6 +950,24 @@ extern struct of_device_id __clk_of_table; .ops = _ops, \ }) +#define CLK_HW_INIT_PARENTS_HW(_name, _parents, _ops, _flags) \ + (&(struct clk_init_data) { \ + .flags = _flags, \ + .name = _name, \ + .parent_hws = _parents, \ + .num_parents = ARRAY_SIZE(_parents), \ + .ops = _ops, \ + }) + +#define CLK_HW_INIT_PARENTS_DATA(_name, _parents, _ops, _flags) \ + (&(struct clk_init_data) { \ + .flags = _flags, \ + .name = _name, \ + .parent_data = _parents, \ + .num_parents = ARRAY_SIZE(_parents), \ + .ops = _ops, \ + }) + #define CLK_HW_INIT_NO_PARENT(_name, _ops, _flags) \ (&(struct clk_init_data) { \ .flags = _flags, \ @@ -933,6 +988,43 @@ extern struct of_device_id __clk_of_table; _flags), \ } +#define CLK_FIXED_FACTOR_HW(_struct, _name, _parent, \ + _div, _mult, _flags) \ + struct clk_fixed_factor _struct = { \ + .div = _div, \ + .mult = _mult, \ + .hw.init = CLK_HW_INIT_HW(_name, \ + _parent, \ + &clk_fixed_factor_ops, \ + _flags), \ + } + +/* + * This macro allows the driver to reuse the _parent array for multiple + * fixed factor clk declarations. + */ +#define CLK_FIXED_FACTOR_HWS(_struct, _name, _parent, \ + _div, _mult, _flags) \ + struct clk_fixed_factor _struct = { \ + .div = _div, \ + .mult = _mult, \ + .hw.init = CLK_HW_INIT_HWS(_name, \ + _parent, \ + &clk_fixed_factor_ops, \ + _flags), \ + } + +#define CLK_FIXED_FACTOR_FW_NAME(_struct, _name, _parent, \ + _div, _mult, _flags) \ + struct clk_fixed_factor _struct = { \ + .div = _div, \ + .mult = _mult, \ + .hw.init = CLK_HW_INIT_FW_NAME(_name, \ + _parent, \ + &clk_fixed_factor_ops, \ + _flags), \ + } + #ifdef CONFIG_OF int of_clk_add_provider(struct device_node *np, struct clk *(*clk_src_get)(struct of_phandle_args *args, @@ -1019,5 +1111,4 @@ static inline int of_clk_detect_critical(struct device_node *np, int index, void clk_gate_restore_context(struct clk_hw *hw); -#endif /* CONFIG_COMMON_CLK */ #endif /* CLK_PROVIDER_H */ diff --git a/include/linux/clk.h b/include/linux/clk.h index c8e3325868bd..18b7b95a8253 100644 --- a/include/linux/clk.h +++ b/include/linux/clk.h @@ -239,7 +239,8 @@ static inline int clk_prepare(struct clk *clk) return 0; } -static inline int __must_check clk_bulk_prepare(int num_clks, struct clk_bulk_data *clks) +static inline int __must_check +clk_bulk_prepare(int num_clks, const struct clk_bulk_data *clks) { might_sleep(); return 0; @@ -263,7 +264,8 @@ static inline void clk_unprepare(struct clk *clk) { might_sleep(); } -static inline void clk_bulk_unprepare(int num_clks, struct clk_bulk_data *clks) +static inline void clk_bulk_unprepare(int num_clks, + const struct clk_bulk_data *clks) { might_sleep(); } @@ -329,6 +331,19 @@ int __must_check clk_bulk_get(struct device *dev, int num_clks, */ int __must_check clk_bulk_get_all(struct device *dev, struct clk_bulk_data **clks); + +/** + * clk_bulk_get_optional - lookup and obtain a number of references to clock producer + * @dev: device for clock "consumer" + * @num_clks: the number of clk_bulk_data + * @clks: the clk_bulk_data table of consumer + * + * Behaves the same as clk_bulk_get() except where there is no clock producer. + * In this case, instead of returning -ENOENT, the function returns 0 and + * NULL for a clk for which a clock producer could not be determined. + */ +int __must_check clk_bulk_get_optional(struct device *dev, int num_clks, + struct clk_bulk_data *clks); /** * devm_clk_bulk_get - managed get multiple clk consumers * @dev: device for clock "consumer" @@ -344,6 +359,29 @@ int __must_check clk_bulk_get_all(struct device *dev, int __must_check devm_clk_bulk_get(struct device *dev, int num_clks, struct clk_bulk_data *clks); /** + * devm_clk_bulk_get_optional - managed get multiple optional consumer clocks + * @dev: device for clock "consumer" + * @num_clks: the number of clk_bulk_data + * @clks: pointer to the clk_bulk_data table of consumer + * + * Behaves the same as devm_clk_bulk_get() except where there is no clock + * producer. In this case, instead of returning -ENOENT, the function returns + * NULL for given clk. It is assumed all clocks in clk_bulk_data are optional. + * + * Returns 0 if all clocks specified in clk_bulk_data table are obtained + * successfully or for any clk there was no clk provider available, otherwise + * returns valid IS_ERR() condition containing errno. + * The implementation uses @dev and @clk_bulk_data.id to determine the + * clock consumer, and thereby the clock producer. + * The clock returned is stored in each @clk_bulk_data.clk field. + * + * Drivers must assume that the clock source is not enabled. + * + * clk_bulk_get should not be called from within interrupt context. + */ +int __must_check devm_clk_bulk_get_optional(struct device *dev, int num_clks, + struct clk_bulk_data *clks); +/** * devm_clk_bulk_get_all - managed get multiple clk consumers * @dev: device for clock "consumer" * @clks: pointer to the clk_bulk_data table of consumer @@ -715,6 +753,12 @@ static inline int __must_check clk_bulk_get(struct device *dev, int num_clks, return 0; } +static inline int __must_check clk_bulk_get_optional(struct device *dev, + int num_clks, struct clk_bulk_data *clks) +{ + return 0; +} + static inline int __must_check clk_bulk_get_all(struct device *dev, struct clk_bulk_data **clks) { @@ -738,6 +782,12 @@ static inline int __must_check devm_clk_bulk_get(struct device *dev, int num_clk return 0; } +static inline int __must_check devm_clk_bulk_get_optional(struct device *dev, + int num_clks, struct clk_bulk_data *clks) +{ + return 0; +} + static inline int __must_check devm_clk_bulk_get_all(struct device *dev, struct clk_bulk_data **clks) { @@ -772,7 +822,8 @@ static inline int clk_enable(struct clk *clk) return 0; } -static inline int __must_check clk_bulk_enable(int num_clks, struct clk_bulk_data *clks) +static inline int __must_check clk_bulk_enable(int num_clks, + const struct clk_bulk_data *clks) { return 0; } @@ -781,7 +832,7 @@ static inline void clk_disable(struct clk *clk) {} static inline void clk_bulk_disable(int num_clks, - struct clk_bulk_data *clks) {} + const struct clk_bulk_data *clks) {} static inline unsigned long clk_get_rate(struct clk *clk) { @@ -870,8 +921,8 @@ static inline void clk_disable_unprepare(struct clk *clk) clk_unprepare(clk); } -static inline int __must_check clk_bulk_prepare_enable(int num_clks, - struct clk_bulk_data *clks) +static inline int __must_check +clk_bulk_prepare_enable(int num_clks, const struct clk_bulk_data *clks) { int ret; @@ -886,7 +937,7 @@ static inline int __must_check clk_bulk_prepare_enable(int num_clks, } static inline void clk_bulk_disable_unprepare(int num_clks, - struct clk_bulk_data *clks) + const struct clk_bulk_data *clks) { clk_bulk_disable(num_clks, clks); clk_bulk_unprepare(num_clks, clks); diff --git a/include/linux/clk/clk-conf.h b/include/linux/clk/clk-conf.h index 85f8cf9d1226..eae9652c70cd 100644 --- a/include/linux/clk/clk-conf.h +++ b/include/linux/clk/clk-conf.h @@ -4,6 +4,9 @@ * Sylwester Nawrocki <s.nawrocki@samsung.com> */ +#ifndef __CLK_CONF_H +#define __CLK_CONF_H + #include <linux/types.h> struct device_node; @@ -17,3 +20,5 @@ static inline int of_clk_set_defaults(struct device_node *node, return 0; } #endif + +#endif /* __CLK_CONF_H */ diff --git a/include/linux/coda.h b/include/linux/coda.h index d30209b9cef8..0ca0c83fdb1c 100644 --- a/include/linux/coda.h +++ b/include/linux/coda.h @@ -58,8 +58,7 @@ Mellon the rights to redistribute these changes without encumbrance. #ifndef _CODA_HEADER_ #define _CODA_HEADER_ -#if defined(__linux__) typedef unsigned long long u_quad_t; -#endif + #include <uapi/linux/coda.h> #endif diff --git a/include/linux/coda_psdev.h b/include/linux/coda_psdev.h deleted file mode 100644 index 15170954aa2b..000000000000 --- a/include/linux/coda_psdev.h +++ /dev/null @@ -1,72 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef __CODA_PSDEV_H -#define __CODA_PSDEV_H - -#include <linux/backing-dev.h> -#include <linux/mutex.h> -#include <uapi/linux/coda_psdev.h> - -struct kstatfs; - -/* communication pending/processing queues */ -struct venus_comm { - u_long vc_seq; - wait_queue_head_t vc_waitq; /* Venus wait queue */ - struct list_head vc_pending; - struct list_head vc_processing; - int vc_inuse; - struct super_block *vc_sb; - struct mutex vc_mutex; -}; - - -static inline struct venus_comm *coda_vcp(struct super_block *sb) -{ - return (struct venus_comm *)((sb)->s_fs_info); -} - -/* upcalls */ -int venus_rootfid(struct super_block *sb, struct CodaFid *fidp); -int venus_getattr(struct super_block *sb, struct CodaFid *fid, - struct coda_vattr *attr); -int venus_setattr(struct super_block *, struct CodaFid *, struct coda_vattr *); -int venus_lookup(struct super_block *sb, struct CodaFid *fid, - const char *name, int length, int *type, - struct CodaFid *resfid); -int venus_close(struct super_block *sb, struct CodaFid *fid, int flags, - kuid_t uid); -int venus_open(struct super_block *sb, struct CodaFid *fid, int flags, - struct file **f); -int venus_mkdir(struct super_block *sb, struct CodaFid *dirfid, - const char *name, int length, - struct CodaFid *newfid, struct coda_vattr *attrs); -int venus_create(struct super_block *sb, struct CodaFid *dirfid, - const char *name, int length, int excl, int mode, - struct CodaFid *newfid, struct coda_vattr *attrs) ; -int venus_rmdir(struct super_block *sb, struct CodaFid *dirfid, - const char *name, int length); -int venus_remove(struct super_block *sb, struct CodaFid *dirfid, - const char *name, int length); -int venus_readlink(struct super_block *sb, struct CodaFid *fid, - char *buffer, int *length); -int venus_rename(struct super_block *, struct CodaFid *new_fid, - struct CodaFid *old_fid, size_t old_length, - size_t new_length, const char *old_name, - const char *new_name); -int venus_link(struct super_block *sb, struct CodaFid *fid, - struct CodaFid *dirfid, const char *name, int len ); -int venus_symlink(struct super_block *sb, struct CodaFid *fid, - const char *name, int len, const char *symname, int symlen); -int venus_access(struct super_block *sb, struct CodaFid *fid, int mask); -int venus_pioctl(struct super_block *sb, struct CodaFid *fid, - unsigned int cmd, struct PioctlData *data); -int coda_downcall(struct venus_comm *vcp, int opcode, union outputArgs *out); -int venus_fsync(struct super_block *sb, struct CodaFid *fid); -int venus_statfs(struct dentry *dentry, struct kstatfs *sfs); - -/* - * Statistics - */ - -extern struct venus_comm coda_comms[]; -#endif diff --git a/include/linux/compaction.h b/include/linux/compaction.h index 9569e7c786d3..4b898cdbdf05 100644 --- a/include/linux/compaction.h +++ b/include/linux/compaction.h @@ -129,11 +129,8 @@ static inline bool compaction_failed(enum compact_result result) return false; } -/* - * Compaction has backed off for some reason. It might be throttling or - * lock contention. Retrying is still worthwhile. - */ -static inline bool compaction_withdrawn(enum compact_result result) +/* Compaction needs reclaim to be performed first, so it can continue. */ +static inline bool compaction_needs_reclaim(enum compact_result result) { /* * Compaction backed off due to watermark checks for order-0 @@ -142,6 +139,16 @@ static inline bool compaction_withdrawn(enum compact_result result) if (result == COMPACT_SKIPPED) return true; + return false; +} + +/* + * Compaction has backed off for some reason after doing some work or none + * at all. It might be throttling or lock contention. Retrying might be still + * worthwhile, but with a higher priority if allowed. + */ +static inline bool compaction_withdrawn(enum compact_result result) +{ /* * If compaction is deferred for high-order allocations, it is * because sync compaction recently failed. If this is the case @@ -207,6 +214,11 @@ static inline bool compaction_failed(enum compact_result result) return false; } +static inline bool compaction_needs_reclaim(enum compact_result result) +{ + return false; +} + static inline bool compaction_withdrawn(enum compact_result result) { return true; diff --git a/include/linux/compat.h b/include/linux/compat.h index ebddcb6cfcf8..16dafd9f4b86 100644 --- a/include/linux/compat.h +++ b/include/linux/compat.h @@ -138,8 +138,7 @@ typedef struct { compat_sigset_word sig[_COMPAT_NSIG_WORDS]; } compat_sigset_t; -int set_compat_user_sigmask(const compat_sigset_t __user *usigmask, - sigset_t *set, sigset_t *oldset, +int set_compat_user_sigmask(const compat_sigset_t __user *umask, size_t sigsetsize); struct compat_sigaction { diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h index e8579412ad21..d7ee4c6bad48 100644 --- a/include/linux/compiler-gcc.h +++ b/include/linux/compiler-gcc.h @@ -170,3 +170,5 @@ #else #define __diag_GCC_8(s) #endif + +#define __no_fgcse __attribute__((optimize("-fno-gcse"))) diff --git a/include/linux/compiler.h b/include/linux/compiler.h index 8aaf7cd026b0..5e88e7e33abe 100644 --- a/include/linux/compiler.h +++ b/include/linux/compiler.h @@ -24,7 +24,7 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val, long ______r; \ static struct ftrace_likely_data \ __aligned(4) \ - __section("_ftrace_annotated_branch") \ + __section(_ftrace_annotated_branch) \ ______f = { \ .data.func = __func__, \ .data.file = __FILE__, \ @@ -60,7 +60,7 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val, #define __trace_if_value(cond) ({ \ static struct ftrace_branch_data \ __aligned(4) \ - __section("_ftrace_branch") \ + __section(_ftrace_branch) \ __if_trace = { \ .func = __func__, \ .file = __FILE__, \ @@ -116,9 +116,14 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val, ".pushsection .discard.unreachable\n\t" \ ".long 999b - .\n\t" \ ".popsection\n\t" + +/* Annotate a C jump table to allow objtool to follow the code flow */ +#define __annotate_jump_table __section(.rodata..c_jump_table) + #else #define annotate_reachable() #define annotate_unreachable() +#define __annotate_jump_table #endif #ifndef ASM_UNREACHABLE @@ -293,7 +298,7 @@ unsigned long read_word_at_a_time(const void *addr) * visible to the compiler. */ #define __ADDRESSABLE(sym) \ - static void * __section(".discard.addressable") __used \ + static void * __section(.discard.addressable) __used \ __PASTE(__addressable_##sym, __LINE__) = (void *)&sym; /** diff --git a/include/linux/compiler_attributes.h b/include/linux/compiler_attributes.h index 6b318efd8a74..cdf016596659 100644 --- a/include/linux/compiler_attributes.h +++ b/include/linux/compiler_attributes.h @@ -40,6 +40,7 @@ # define __GCC4_has_attribute___noclone__ 1 # define __GCC4_has_attribute___nonstring__ 0 # define __GCC4_has_attribute___no_sanitize_address__ (__GNUC_MINOR__ >= 8) +# define __GCC4_has_attribute___fallthrough__ 0 #endif /* @@ -186,6 +187,22 @@ #endif /* + * Add the pseudo keyword 'fallthrough' so case statement blocks + * must end with any of these keywords: + * break; + * fallthrough; + * goto <label>; + * return [expression]; + * + * gcc: https://gcc.gnu.org/onlinedocs/gcc/Statement-Attributes.html#Statement-Attributes + */ +#if __has_attribute(__fallthrough__) +# define fallthrough __attribute__((__fallthrough__)) +#else +# define fallthrough do {} while (0) /* fallthrough */ +#endif + +/* * Note the missing underscores. * * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-noinline-function-attribute diff --git a/include/linux/compiler_types.h b/include/linux/compiler_types.h index 19e58b9138a0..72393a8c1a6c 100644 --- a/include/linux/compiler_types.h +++ b/include/linux/compiler_types.h @@ -112,6 +112,8 @@ struct ftrace_likely_data { #if defined(CC_USING_HOTPATCH) #define notrace __attribute__((hotpatch(0, 0))) +#elif defined(CC_USING_PATCHABLE_FUNCTION_ENTRY) +#define notrace __attribute__((patchable_function_entry(0, 0))) #else #define notrace __attribute__((__no_instrument_function__)) #endif @@ -128,10 +130,6 @@ struct ftrace_likely_data { /* * Force always-inline if the user requests it so via the .config. - * GCC does not warn about unused static inline functions for - * -Wunused-function. This turns out to avoid the need for complex #ifdef - * directives. Suppress the warning in clang as well by using "unused" - * function attribute, which is redundant but not harmful for gcc. * Prefer gnu_inline, so that extern inline functions do not emit an * externally visible function. This makes extern inline behave as per gnu89 * semantics rather than c99. This prevents multiple symbol definition errors @@ -142,14 +140,35 @@ struct ftrace_likely_data { */ #if !defined(CONFIG_OPTIMIZE_INLINING) #define inline inline __attribute__((__always_inline__)) __gnu_inline \ - __maybe_unused notrace + __inline_maybe_unused notrace #else #define inline inline __gnu_inline \ - __maybe_unused notrace + __inline_maybe_unused notrace #endif +/* + * gcc provides both __inline__ and __inline as alternate spellings of + * the inline keyword, though the latter is undocumented. New kernel + * code should only use the inline spelling, but some existing code + * uses __inline__. Since we #define inline above, to ensure + * __inline__ has the same semantics, we need this #define. + * + * However, the spelling __inline is strictly reserved for referring + * to the bare keyword. + */ #define __inline__ inline -#define __inline inline + +/* + * GCC does not warn about unused static inline functions for -Wunused-function. + * Suppress the warning in clang as well by using __maybe_unused, but enable it + * for W=1 build. This will allow clang to find unused functions. Remove the + * __inline_maybe_unused entirely after fixing most of -Wunused-function warnings. + */ +#ifdef KBUILD_EXTRA_WARN1 +#define __inline_maybe_unused +#else +#define __inline_maybe_unused __maybe_unused +#endif /* * Rather then using noinline to prevent stack consumption, use @@ -187,6 +206,16 @@ struct ftrace_likely_data { #define asm_volatile_goto(x...) asm goto(x) #endif +#ifdef CONFIG_CC_HAS_ASM_INLINE +#define asm_inline asm __inline +#else +#define asm_inline asm +#endif + +#ifndef __no_fgcse +# define __no_fgcse +#endif + /* Are two types/vars the same type (ignoring qualifiers)? */ #define __same_type(a, b) __builtin_types_compatible_p(typeof(a), typeof(b)) diff --git a/include/linux/concap.h b/include/linux/concap.h deleted file mode 100644 index 977acb3d1fb2..000000000000 --- a/include/linux/concap.h +++ /dev/null @@ -1,112 +0,0 @@ -/* $Id: concap.h,v 1.3.2.2 2004/01/12 23:08:35 keil Exp $ - * - * Copyright 1997 by Henner Eisen <eis@baty.hanse.de> - * - * This software may be used and distributed according to the terms - * of the GNU General Public License, incorporated herein by reference. - */ - -#ifndef _LINUX_CONCAP_H -#define _LINUX_CONCAP_H - -#include <linux/skbuff.h> -#include <linux/netdevice.h> - -/* Stuff to support encapsulation protocols genericly. The encapsulation - protocol is processed at the uppermost layer of the network interface. - - Based on a ideas developed in a 'synchronous device' thread in the - linux-x25 mailing list contributed by Alan Cox, Thomasz Motylewski - and Jonathan Naylor. - - For more documetation on this refer to Documentation/isdn/README.concap -*/ - -struct concap_proto_ops; -struct concap_device_ops; - -/* this manages all data needed by the encapsulation protocol - */ -struct concap_proto{ - struct net_device *net_dev; /* net device using our service */ - struct concap_device_ops *dops; /* callbacks provided by device */ - struct concap_proto_ops *pops; /* callbacks provided by us */ - spinlock_t lock; - int flags; - void *proto_data; /* protocol specific private data, to - be accessed via *pops methods only*/ - /* - : - whatever - : - */ -}; - -/* Operations to be supported by the net device. Called by the encapsulation - * protocol entity. No receive method is offered because the encapsulation - * protocol directly calls netif_rx(). - */ -struct concap_device_ops{ - - /* to request data is submitted by device*/ - int (*data_req)(struct concap_proto *, struct sk_buff *); - - /* Control methods must be set to NULL by devices which do not - support connection control.*/ - /* to request a connection is set up */ - int (*connect_req)(struct concap_proto *); - - /* to request a connection is released */ - int (*disconn_req)(struct concap_proto *); -}; - -/* Operations to be supported by the encapsulation protocol. Called by - * device driver. - */ -struct concap_proto_ops{ - - /* create a new encapsulation protocol instance of same type */ - struct concap_proto * (*proto_new) (void); - - /* delete encapsulation protocol instance and free all its resources. - cprot may no loger be referenced after calling this */ - void (*proto_del)(struct concap_proto *cprot); - - /* initialize the protocol's data. To be called at interface startup - or when the device driver resets the interface. All services of the - encapsulation protocol may be used after this*/ - int (*restart)(struct concap_proto *cprot, - struct net_device *ndev, - struct concap_device_ops *dops); - - /* inactivate an encapsulation protocol instance. The encapsulation - protocol may not call any *dops methods after this. */ - int (*close)(struct concap_proto *cprot); - - /* process a frame handed down to us by upper layer */ - int (*encap_and_xmit)(struct concap_proto *cprot, struct sk_buff *skb); - - /* to be called for each data entity received from lower layer*/ - int (*data_ind)(struct concap_proto *cprot, struct sk_buff *skb); - - /* to be called when a connection was set up/down. - Protocols that don't process these primitives might fill in - dummy methods here */ - int (*connect_ind)(struct concap_proto *cprot); - int (*disconn_ind)(struct concap_proto *cprot); - /* - Some network device support functions, like net_header(), rebuild_header(), - and others, that depend solely on the encapsulation protocol, might - be provided here, too. The net device would just fill them in its - corresponding fields when it is opened. - */ -}; - -/* dummy restart/close/connect/reset/disconn methods - */ -extern int concap_nop(struct concap_proto *cprot); - -/* dummy submit method - */ -extern int concap_drop_skb(struct concap_proto *cprot, struct sk_buff *skb); -#endif diff --git a/include/linux/connector.h b/include/linux/connector.h index 1d72ef76f24f..cb732643471b 100644 --- a/include/linux/connector.h +++ b/include/linux/connector.h @@ -50,15 +50,75 @@ struct cn_dev { u32 seq, groups; struct sock *nls; - void (*input) (struct sk_buff *skb); struct cn_queue_dev *cbdev; }; +/** + * cn_add_callback() - Registers new callback with connector core. + * + * @id: unique connector's user identifier. + * It must be registered in connector.h for legal + * in-kernel users. + * @name: connector's callback symbolic name. + * @callback: connector's callback. + * parameters are %cn_msg and the sender's credentials + */ int cn_add_callback(struct cb_id *id, const char *name, void (*callback)(struct cn_msg *, struct netlink_skb_parms *)); -void cn_del_callback(struct cb_id *); +/** + * cn_del_callback() - Unregisters new callback with connector core. + * + * @id: unique connector's user identifier. + */ +void cn_del_callback(struct cb_id *id); + + +/** + * cn_netlink_send_mult - Sends message to the specified groups. + * + * @msg: message header(with attached data). + * @len: Number of @msg to be sent. + * @portid: destination port. + * If non-zero the message will be sent to the given port, + * which should be set to the original sender. + * @group: destination group. + * If @portid and @group is zero, then appropriate group will + * be searched through all registered connector users, and + * message will be delivered to the group which was created + * for user with the same ID as in @msg. + * If @group is not zero, then message will be delivered + * to the specified group. + * @gfp_mask: GFP mask. + * + * It can be safely called from softirq context, but may silently + * fail under strong memory pressure. + * + * If there are no listeners for given group %-ESRCH can be returned. + */ int cn_netlink_send_mult(struct cn_msg *msg, u16 len, u32 portid, u32 group, gfp_t gfp_mask); + +/** + * cn_netlink_send_mult - Sends message to the specified groups. + * + * @msg: message header(with attached data). + * @portid: destination port. + * If non-zero the message will be sent to the given port, + * which should be set to the original sender. + * @group: destination group. + * If @portid and @group is zero, then appropriate group will + * be searched through all registered connector users, and + * message will be delivered to the group which was created + * for user with the same ID as in @msg. + * If @group is not zero, then message will be delivered + * to the specified group. + * @gfp_mask: GFP mask. + * + * It can be safely called from softirq context, but may silently + * fail under strong memory pressure. + * + * If there are no listeners for given group %-ESRCH can be returned. + */ int cn_netlink_send(struct cn_msg *msg, u32 portid, u32 group, gfp_t gfp_mask); int cn_queue_add_callback(struct cn_queue_dev *dev, const char *name, diff --git a/include/linux/console_struct.h b/include/linux/console_struct.h index ed798e114663..24d4c16e3ae0 100644 --- a/include/linux/console_struct.h +++ b/include/linux/console_struct.h @@ -168,9 +168,6 @@ extern void vc_SAK(struct work_struct *work); #define CUR_DEFAULT CUR_UNDERLINE -static inline bool con_is_visible(const struct vc_data *vc) -{ - return *vc->vc_display_fg == vc; -} +bool con_is_visible(const struct vc_data *vc); #endif /* _LINUX_CONSOLE_STRUCT_H */ diff --git a/include/linux/container.h b/include/linux/container.h index 0cc2ee91905c..2566a1baa736 100644 --- a/include/linux/container.h +++ b/include/linux/container.h @@ -6,6 +6,9 @@ * Author: Rafael J. Wysocki <rafael.j.wysocki@intel.com> */ +#ifndef _LINUX_CONTAINER_H +#define _LINUX_CONTAINER_H + #include <linux/device.h> /* drivers/base/power/container.c */ @@ -20,3 +23,5 @@ static inline struct container_dev *to_container_dev(struct device *dev) { return container_of(dev, struct container_dev, dev); } + +#endif /* _LINUX_CONTAINER_H */ diff --git a/include/linux/coresight.h b/include/linux/coresight.h index 62a520df8add..a2b68823717b 100644 --- a/include/linux/coresight.h +++ b/include/linux/coresight.h @@ -91,15 +91,11 @@ union coresight_dev_subtype { /** * struct coresight_platform_data - data harvested from the DT specification - * @cpu: the CPU a source belongs to. Only applicable for ETM/PTMs. - * @name: name of the component as shown under sysfs. * @nr_inport: number of input ports for this component. * @nr_outport: number of output ports for this component. * @conns: Array of nr_outport connections from this component */ struct coresight_platform_data { - int cpu; - const char *name; int nr_inport; int nr_outport; struct coresight_connection *conns; @@ -110,11 +106,12 @@ struct coresight_platform_data { * @type: as defined by @coresight_dev_type. * @subtype: as defined by @coresight_dev_subtype. * @ops: generic operations for this component, as defined - by @coresight_ops. + * by @coresight_ops. * @pdata: platform data collected from DT. * @dev: The device entity associated to this component. * @groups: operations specific to this component. These will end up - in the component's sysfs sub-directory. + * in the component's sysfs sub-directory. + * @name: name for the coresight device, also shown under sysfs. */ struct coresight_desc { enum coresight_dev_type type; @@ -123,28 +120,27 @@ struct coresight_desc { struct coresight_platform_data *pdata; struct device *dev; const struct attribute_group **groups; + const char *name; }; /** * struct coresight_connection - representation of a single connection * @outport: a connection's output port number. - * @chid_name: remote component's name. * @child_port: remote component's port number @output is connected to. + * @chid_fwnode: remote component's fwnode handle. * @child_dev: a @coresight_device representation of the component connected to @outport. */ struct coresight_connection { int outport; - const char *child_name; int child_port; + struct fwnode_handle *child_fwnode; struct coresight_device *child_dev; }; /** * struct coresight_device - representation of a device as used by the framework - * @conns: array of coresight_connections associated to this component. - * @nr_inport: number of input port associated to this component. - * @nr_outport: number of output port associated to this component. + * @pdata: Platform data with device connections associated to this device. * @type: as defined by @coresight_dev_type. * @subtype: as defined by @coresight_dev_subtype. * @ops: generic operations for this component, as defined @@ -159,9 +155,7 @@ struct coresight_connection { * @ea: Device attribute for sink representation under PMU directory. */ struct coresight_device { - struct coresight_connection *conns; - int nr_inport; - int nr_outport; + struct coresight_platform_data *pdata; enum coresight_dev_type type; union coresight_dev_subtype subtype; const struct coresight_ops *ops; @@ -174,6 +168,28 @@ struct coresight_device { struct dev_ext_attribute *ea; }; +/* + * coresight_dev_list - Mapping for devices to "name" index for device + * names. + * + * @nr_idx: Number of entries already allocated. + * @pfx: Prefix pattern for device name. + * @fwnode_list: Array of fwnode_handles associated with each allocated + * index, upto nr_idx entries. + */ +struct coresight_dev_list { + int nr_idx; + const char *pfx; + struct fwnode_handle **fwnode_list; +}; + +#define DEFINE_CORESIGHT_DEVLIST(var, dev_pfx) \ +static struct coresight_dev_list (var) = { \ + .pfx = dev_pfx, \ + .nr_idx = 0, \ + .fwnode_list = NULL, \ +} + #define to_coresight_device(d) container_of(d, struct coresight_device, dev) #define source_ops(csdev) csdev->ops->source_ops @@ -267,7 +283,8 @@ extern int coresight_claim_device_unlocked(void __iomem *base); extern void coresight_disclaim_device(void __iomem *base); extern void coresight_disclaim_device_unlocked(void __iomem *base); - +extern char *coresight_alloc_device_name(struct coresight_dev_list *devs, + struct device *dev); #else static inline struct coresight_device * coresight_register(struct coresight_desc *desc) { return NULL; } @@ -292,16 +309,8 @@ static inline void coresight_disclaim_device_unlocked(void __iomem *base) {} #endif -#ifdef CONFIG_OF -extern int of_coresight_get_cpu(const struct device_node *node); -extern struct coresight_platform_data * -of_get_coresight_platform_data(struct device *dev, - const struct device_node *node); -#else -static inline int of_coresight_get_cpu(const struct device_node *node) -{ return 0; } -static inline struct coresight_platform_data *of_get_coresight_platform_data( - struct device *dev, const struct device_node *node) { return NULL; } -#endif +extern int coresight_get_cpu(struct device *dev); + +struct coresight_platform_data *coresight_get_platform_data(struct device *dev); #endif diff --git a/include/linux/cpu.h b/include/linux/cpu.h index fcb1386bb0d4..d0633ebdaa9c 100644 --- a/include/linux/cpu.h +++ b/include/linux/cpu.h @@ -179,7 +179,7 @@ void arch_cpu_idle_dead(void); int cpu_report_state(int cpu); int cpu_check_up_prepare(int cpu); void cpu_set_state_online(int cpu); -void play_idle(unsigned long duration_ms); +void play_idle(unsigned long duration_us); #ifdef CONFIG_HOTPLUG_CPU bool cpu_wait_death(unsigned int cpu, int seconds); @@ -201,12 +201,14 @@ enum cpuhp_smt_control { extern enum cpuhp_smt_control cpu_smt_control; extern void cpu_smt_disable(bool force); extern void cpu_smt_check_topology(void); +extern bool cpu_smt_possible(void); extern int cpuhp_smt_enable(void); extern int cpuhp_smt_disable(enum cpuhp_smt_control ctrlval); #else # define cpu_smt_control (CPU_SMT_NOT_IMPLEMENTED) static inline void cpu_smt_disable(bool force) { } static inline void cpu_smt_check_topology(void) { } +static inline bool cpu_smt_possible(void) { return false; } static inline int cpuhp_smt_enable(void) { return 0; } static inline int cpuhp_smt_disable(enum cpuhp_smt_control ctrlval) { return 0; } #endif diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h index 32a1733014f5..92d5fdc8154e 100644 --- a/include/linux/cpufreq.h +++ b/include/linux/cpufreq.h @@ -13,6 +13,7 @@ #include <linux/completion.h> #include <linux/kobject.h> #include <linux/notifier.h> +#include <linux/pm_qos.h> #include <linux/spinlock.h> #include <linux/sysfs.h> @@ -47,11 +48,6 @@ struct cpufreq_cpuinfo { unsigned int transition_latency; }; -struct cpufreq_user_policy { - unsigned int min; /* in kHz */ - unsigned int max; /* in kHz */ -}; - struct cpufreq_policy { /* CPUs sharing clock, require sw coordination */ cpumask_var_t cpus; /* Online CPUs only */ @@ -81,7 +77,10 @@ struct cpufreq_policy { struct work_struct update; /* if update_policy() needs to be * called, but you're in IRQ context */ - struct cpufreq_user_policy user_policy; + struct freq_constraints constraints; + struct freq_qos_request *min_freq_req; + struct freq_qos_request *max_freq_req; + struct cpufreq_frequency_table *freq_table; enum cpufreq_table_sorting freq_table_sorted; @@ -144,6 +143,9 @@ struct cpufreq_policy { /* Pointer to the cooling device if used for thermal mitigation */ struct thermal_cooling_device *cdev; + + struct notifier_block nb_min; + struct notifier_block nb_max; }; struct cpufreq_freqs { @@ -201,6 +203,7 @@ void cpufreq_cpu_release(struct cpufreq_policy *policy); int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu); int cpufreq_set_policy(struct cpufreq_policy *policy, struct cpufreq_policy *new_policy); +void refresh_frequency_limits(struct cpufreq_policy *policy); void cpufreq_update_policy(unsigned int cpu); void cpufreq_update_limits(unsigned int cpu); bool have_governor_per_policy(void); @@ -406,6 +409,12 @@ int cpufreq_unregister_driver(struct cpufreq_driver *driver_data); const char *cpufreq_get_current_driver(void); void *cpufreq_get_driver_data(void); +static inline int cpufreq_thermal_control_enabled(struct cpufreq_driver *drv) +{ + return IS_ENABLED(CONFIG_CPU_THERMAL) && + (drv->flags & CPUFREQ_IS_COOLING_DEV); +} + static inline void cpufreq_verify_within_limits(struct cpufreq_policy *policy, unsigned int min, unsigned int max) { @@ -450,8 +459,8 @@ static inline void cpufreq_resume(void) {} #define CPUFREQ_POSTCHANGE (1) /* Policy Notifiers */ -#define CPUFREQ_ADJUST (0) -#define CPUFREQ_NOTIFY (1) +#define CPUFREQ_CREATE_POLICY (0) +#define CPUFREQ_REMOVE_POLICY (1) #ifdef CONFIG_CPU_FREQ int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list); @@ -986,7 +995,7 @@ extern struct freq_attr *cpufreq_generic_attr[]; int cpufreq_table_validate_and_sort(struct cpufreq_policy *policy); unsigned int cpufreq_generic_get(unsigned int cpu); -int cpufreq_generic_init(struct cpufreq_policy *policy, +void cpufreq_generic_init(struct cpufreq_policy *policy, struct cpufreq_frequency_table *table, unsigned int transition_latency); #endif /* _LINUX_CPUFREQ_H */ diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h index 5c6062206760..068793a619ca 100644 --- a/include/linux/cpuhotplug.h +++ b/include/linux/cpuhotplug.h @@ -116,10 +116,10 @@ enum cpuhp_state { CPUHP_AP_PERF_ARM_ACPI_STARTING, CPUHP_AP_PERF_ARM_STARTING, CPUHP_AP_ARM_L2X0_STARTING, + CPUHP_AP_EXYNOS4_MCT_TIMER_STARTING, CPUHP_AP_ARM_ARCH_TIMER_STARTING, CPUHP_AP_ARM_GLOBAL_TIMER_STARTING, CPUHP_AP_JCORE_TIMER_STARTING, - CPUHP_AP_EXYNOS4_MCT_TIMER_STARTING, CPUHP_AP_ARM_TWD_STARTING, CPUHP_AP_QCOM_TIMER_STARTING, CPUHP_AP_TEGRA_TIMER_STARTING, @@ -176,6 +176,7 @@ enum cpuhp_state { CPUHP_AP_WATCHDOG_ONLINE, CPUHP_AP_WORKQUEUE_ONLINE, CPUHP_AP_RCUTREE_ONLINE, + CPUHP_AP_BASE_CACHEINFO_ONLINE, CPUHP_AP_ONLINE_DYN, CPUHP_AP_ONLINE_DYN_END = CPUHP_AP_ONLINE_DYN + 30, CPUHP_AP_X86_HPET_ONLINE, diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h index bb9a0db89f1a..4b6b5bea8f79 100644 --- a/include/linux/cpuidle.h +++ b/include/linux/cpuidle.h @@ -85,7 +85,9 @@ struct cpuidle_device { unsigned int cpu; ktime_t next_hrtimer; + int last_state_idx; int last_residency; + u64 poll_limit_ns; struct cpuidle_state_usage states_usage[CPUIDLE_STATE_MAX]; struct cpuidle_state_kobj *kobjs[CPUIDLE_STATE_MAX]; struct cpuidle_driver_kobj *kobj_driver; @@ -119,6 +121,9 @@ struct cpuidle_driver { /* the driver handles the cpus in cpumask */ struct cpumask *cpumask; + + /* preferred governor to switch at register time */ + const char *governor; }; #ifdef CONFIG_CPU_IDLE @@ -132,6 +137,8 @@ extern int cpuidle_select(struct cpuidle_driver *drv, extern int cpuidle_enter(struct cpuidle_driver *drv, struct cpuidle_device *dev, int index); extern void cpuidle_reflect(struct cpuidle_device *dev, int index); +extern u64 cpuidle_poll_time(struct cpuidle_driver *drv, + struct cpuidle_device *dev); extern int cpuidle_register_driver(struct cpuidle_driver *drv); extern struct cpuidle_driver *cpuidle_get_driver(void); @@ -166,6 +173,9 @@ static inline int cpuidle_enter(struct cpuidle_driver *drv, struct cpuidle_device *dev, int index) {return -ENODEV; } static inline void cpuidle_reflect(struct cpuidle_device *dev, int index) { } +static inline u64 cpuidle_poll_time(struct cpuidle_driver *drv, + struct cpuidle_device *dev) +{return 0; } static inline int cpuidle_register_driver(struct cpuidle_driver *drv) {return -ENODEV; } static inline struct cpuidle_driver *cpuidle_get_driver(void) {return NULL; } @@ -256,7 +266,10 @@ static inline int cpuidle_register_governor(struct cpuidle_governor *gov) {return 0;} #endif -#define __CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, idx, is_retention) \ +#define __CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, \ + idx, \ + state, \ + is_retention) \ ({ \ int __ret = 0; \ \ @@ -268,7 +281,7 @@ static inline int cpuidle_register_governor(struct cpuidle_governor *gov) if (!is_retention) \ __ret = cpu_pm_enter(); \ if (!__ret) { \ - __ret = low_level_idle_enter(idx); \ + __ret = low_level_idle_enter(state); \ if (!is_retention) \ cpu_pm_exit(); \ } \ @@ -277,9 +290,15 @@ static inline int cpuidle_register_governor(struct cpuidle_governor *gov) }) #define CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, idx) \ - __CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, idx, 0) + __CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, idx, idx, 0) #define CPU_PM_CPU_IDLE_ENTER_RETENTION(low_level_idle_enter, idx) \ - __CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, idx, 1) + __CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, idx, idx, 1) + +#define CPU_PM_CPU_IDLE_ENTER_PARAM(low_level_idle_enter, idx, state) \ + __CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, idx, state, 0) + +#define CPU_PM_CPU_IDLE_ENTER_RETENTION_PARAM(low_level_idle_enter, idx, state) \ + __CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, idx, state, 1) #endif /* _LINUX_CPUIDLE_H */ diff --git a/include/linux/cpuidle_haltpoll.h b/include/linux/cpuidle_haltpoll.h new file mode 100644 index 000000000000..d50c1e0411a2 --- /dev/null +++ b/include/linux/cpuidle_haltpoll.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _CPUIDLE_HALTPOLL_H +#define _CPUIDLE_HALTPOLL_H + +#ifdef CONFIG_ARCH_CPUIDLE_HALTPOLL +#include <asm/cpuidle_haltpoll.h> +#else +static inline void arch_haltpoll_enable(unsigned int cpu) +{ +} + +static inline void arch_haltpoll_disable(unsigned int cpu) +{ +} +#endif +#endif diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h index 21755471b1c3..78a73eba64dd 100644 --- a/include/linux/cpumask.h +++ b/include/linux/cpumask.h @@ -10,6 +10,7 @@ #include <linux/kernel.h> #include <linux/threads.h> #include <linux/bitmap.h> +#include <linux/atomic.h> #include <linux/bug.h> /* Don't assign or return these: may not be this big! */ @@ -95,8 +96,21 @@ extern struct cpumask __cpu_active_mask; #define cpu_present_mask ((const struct cpumask *)&__cpu_present_mask) #define cpu_active_mask ((const struct cpumask *)&__cpu_active_mask) +extern atomic_t __num_online_cpus; + #if NR_CPUS > 1 -#define num_online_cpus() cpumask_weight(cpu_online_mask) +/** + * num_online_cpus() - Read the number of online CPUs + * + * Despite the fact that __num_online_cpus is of type atomic_t, this + * interface gives only a momentary snapshot and is not protected against + * concurrent CPU hotplug operations unless invoked from a cpuhp_lock held + * region. + */ +static inline unsigned int num_online_cpus(void) +{ + return atomic_read(&__num_online_cpus); +} #define num_possible_cpus() cpumask_weight(cpu_possible_mask) #define num_present_cpus() cpumask_weight(cpu_present_mask) #define num_active_cpus() cpumask_weight(cpu_active_mask) @@ -115,6 +129,8 @@ extern struct cpumask __cpu_active_mask; #define cpu_active(cpu) ((cpu) == 0) #endif +extern cpumask_t cpus_booted_once_mask; + static inline void cpu_max_bits_warn(unsigned int cpu, unsigned int bits) { #ifdef CONFIG_DEBUG_PER_CPU_MAPS @@ -184,8 +200,8 @@ static inline unsigned int cpumask_local_spread(unsigned int i, int node) for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask) #define for_each_cpu_wrap(cpu, mask, start) \ for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask, (void)(start)) -#define for_each_cpu_and(cpu, mask, and) \ - for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask, (void)and) +#define for_each_cpu_and(cpu, mask1, mask2) \ + for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask1, (void)mask2) #else /** * cpumask_first - get the first cpu in a cpumask @@ -274,20 +290,20 @@ extern int cpumask_next_wrap(int n, const struct cpumask *mask, int start, bool /** * for_each_cpu_and - iterate over every cpu in both masks * @cpu: the (optionally unsigned) integer iterator - * @mask: the first cpumask pointer - * @and: the second cpumask pointer + * @mask1: the first cpumask pointer + * @mask2: the second cpumask pointer * * This saves a temporary CPU mask in many places. It is equivalent to: * struct cpumask tmp; - * cpumask_and(&tmp, &mask, &and); + * cpumask_and(&tmp, &mask1, &mask2); * for_each_cpu(cpu, &tmp) * ... * * After the loop, cpu is >= nr_cpu_ids. */ -#define for_each_cpu_and(cpu, mask, and) \ +#define for_each_cpu_and(cpu, mask1, mask2) \ for ((cpu) = -1; \ - (cpu) = cpumask_next_and((cpu), (mask), (and)), \ + (cpu) = cpumask_next_and((cpu), (mask1), (mask2)), \ (cpu) < nr_cpu_ids;) #endif /* SMP */ @@ -474,6 +490,20 @@ static inline bool cpumask_equal(const struct cpumask *src1p, } /** + * cpumask_or_equal - *src1p | *src2p == *src3p + * @src1p: the first input + * @src2p: the second input + * @src3p: the third input + */ +static inline bool cpumask_or_equal(const struct cpumask *src1p, + const struct cpumask *src2p, + const struct cpumask *src3p) +{ + return bitmap_or_equal(cpumask_bits(src1p), cpumask_bits(src2p), + cpumask_bits(src3p), nr_cpumask_bits); +} + +/** * cpumask_intersects - (*src1p & *src2p) != 0 * @src1p: the first input * @src2p: the second input @@ -805,14 +835,7 @@ set_cpu_present(unsigned int cpu, bool present) cpumask_clear_cpu(cpu, &__cpu_present_mask); } -static inline void -set_cpu_online(unsigned int cpu, bool online) -{ - if (online) - cpumask_set_cpu(cpu, &__cpu_online_mask); - else - cpumask_clear_cpu(cpu, &__cpu_online_mask); -} +void set_cpu_online(unsigned int cpu, bool online); static inline void set_cpu_active(unsigned int cpu, bool active) diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h index 934633a05d20..04c20de66afc 100644 --- a/include/linux/cpuset.h +++ b/include/linux/cpuset.h @@ -40,14 +40,14 @@ static inline bool cpusets_enabled(void) static inline void cpuset_inc(void) { - static_branch_inc(&cpusets_pre_enable_key); - static_branch_inc(&cpusets_enabled_key); + static_branch_inc_cpuslocked(&cpusets_pre_enable_key); + static_branch_inc_cpuslocked(&cpusets_enabled_key); } static inline void cpuset_dec(void) { - static_branch_dec(&cpusets_enabled_key); - static_branch_dec(&cpusets_pre_enable_key); + static_branch_dec_cpuslocked(&cpusets_enabled_key); + static_branch_dec_cpuslocked(&cpusets_pre_enable_key); } extern int cpuset_init(void); @@ -55,6 +55,8 @@ extern void cpuset_init_smp(void); extern void cpuset_force_rebuild(void); extern void cpuset_update_active_cpus(void); extern void cpuset_wait_for_hotplug(void); +extern void cpuset_read_lock(void); +extern void cpuset_read_unlock(void); extern void cpuset_cpus_allowed(struct task_struct *p, struct cpumask *mask); extern void cpuset_cpus_allowed_fallback(struct task_struct *p); extern nodemask_t cpuset_mems_allowed(struct task_struct *p); @@ -176,6 +178,9 @@ static inline void cpuset_update_active_cpus(void) static inline void cpuset_wait_for_hotplug(void) { } +static inline void cpuset_read_lock(void) { } +static inline void cpuset_read_unlock(void) { } + static inline void cpuset_cpus_allowed(struct task_struct *p, struct cpumask *mask) { diff --git a/include/linux/crash_dump.h b/include/linux/crash_dump.h index f774c5eb9e3c..4664fc1871de 100644 --- a/include/linux/crash_dump.h +++ b/include/linux/crash_dump.h @@ -115,4 +115,18 @@ static inline int vmcore_add_device_dump(struct vmcoredd_data *data) return -EOPNOTSUPP; } #endif /* CONFIG_PROC_VMCORE_DEVICE_DUMP */ + +#ifdef CONFIG_PROC_VMCORE +ssize_t read_from_oldmem(char *buf, size_t count, + u64 *ppos, int userbuf, + bool encrypted); +#else +static inline ssize_t read_from_oldmem(char *buf, size_t count, + u64 *ppos, int userbuf, + bool encrypted) +{ + return -EOPNOTSUPP; +} +#endif /* CONFIG_PROC_VMCORE */ + #endif /* LINUX_CRASHDUMP_H */ diff --git a/include/linux/cred.h b/include/linux/cred.h index 7eb43a038330..18639c069263 100644 --- a/include/linux/cred.h +++ b/include/linux/cred.h @@ -145,7 +145,11 @@ struct cred { struct user_struct *user; /* real user ID subscription */ struct user_namespace *user_ns; /* user_ns the caps and keyrings are relative to. */ struct group_info *group_info; /* supplementary groups for euid/fsgid */ - struct rcu_head rcu; /* RCU deletion hook */ + /* RCU deletion */ + union { + int non_rcu; /* Can we skip RCU deletion? */ + struct rcu_head rcu; /* RCU deletion hook */ + }; } __randomize_layout; extern void __put_cred(struct cred *); @@ -246,6 +250,7 @@ static inline const struct cred *get_cred(const struct cred *cred) if (!cred) return cred; validate_creds(cred); + nonconst_cred->non_rcu = 0; return get_new_cred(nonconst_cred); } @@ -257,6 +262,7 @@ static inline const struct cred *get_cred_rcu(const struct cred *cred) if (!atomic_inc_not_zero(&nonconst_cred->usage)) return NULL; validate_creds(cred); + nonconst_cred->non_rcu = 0; return cred; } @@ -380,7 +386,6 @@ static inline void put_cred(const struct cred *_cred) #define current_fsgid() (current_cred_xxx(fsgid)) #define current_cap() (current_cred_xxx(cap_effective)) #define current_user() (current_cred_xxx(user)) -#define current_security() (current_cred_xxx(security)) extern struct user_namespace init_user_ns; #ifdef CONFIG_USER_NS diff --git a/include/linux/crypto.h b/include/linux/crypto.h index 9cf8f3ce0e50..19ea3a371d7b 100644 --- a/include/linux/crypto.h +++ b/include/linux/crypto.h @@ -49,7 +49,6 @@ #define CRYPTO_ALG_TYPE_SCOMPRESS 0x0000000b #define CRYPTO_ALG_TYPE_RNG 0x0000000c #define CRYPTO_ALG_TYPE_AKCIPHER 0x0000000d -#define CRYPTO_ALG_TYPE_DIGEST 0x0000000e #define CRYPTO_ALG_TYPE_HASH 0x0000000e #define CRYPTO_ALG_TYPE_SHASH 0x0000000e #define CRYPTO_ALG_TYPE_AHASH 0x0000000f @@ -323,6 +322,17 @@ struct cipher_alg { void (*cia_decrypt)(struct crypto_tfm *tfm, u8 *dst, const u8 *src); }; +/** + * struct compress_alg - compression/decompression algorithm + * @coa_compress: Compress a buffer of specified length, storing the resulting + * data in the specified buffer. Return the length of the + * compressed data in dlen. + * @coa_decompress: Decompress the source buffer, storing the uncompressed + * data in the specified buffer. The length of the data is + * returned in dlen. + * + * All fields are mandatory. + */ struct compress_alg { int (*coa_compress)(struct crypto_tfm *tfm, const u8 *src, unsigned int slen, u8 *dst, unsigned int *dlen); diff --git a/include/linux/dax.h b/include/linux/dax.h index becaea5f4488..9bd8528bd305 100644 --- a/include/linux/dax.h +++ b/include/linux/dax.h @@ -7,6 +7,9 @@ #include <linux/radix-tree.h> #include <asm/pgtable.h> +/* Flag for synchronous flush */ +#define DAXDEV_F_SYNC (1UL << 0) + typedef unsigned long dax_entry_t; struct iomap_ops; @@ -38,18 +41,40 @@ extern struct attribute_group dax_attribute_group; #if IS_ENABLED(CONFIG_DAX) struct dax_device *dax_get_by_host(const char *host); struct dax_device *alloc_dax(void *private, const char *host, - const struct dax_operations *ops); + const struct dax_operations *ops, unsigned long flags); void put_dax(struct dax_device *dax_dev); void kill_dax(struct dax_device *dax_dev); void dax_write_cache(struct dax_device *dax_dev, bool wc); bool dax_write_cache_enabled(struct dax_device *dax_dev); +bool __dax_synchronous(struct dax_device *dax_dev); +static inline bool dax_synchronous(struct dax_device *dax_dev) +{ + return __dax_synchronous(dax_dev); +} +void __set_dax_synchronous(struct dax_device *dax_dev); +static inline void set_dax_synchronous(struct dax_device *dax_dev) +{ + __set_dax_synchronous(dax_dev); +} +/* + * Check if given mapping is supported by the file / underlying device. + */ +static inline bool daxdev_mapping_supported(struct vm_area_struct *vma, + struct dax_device *dax_dev) +{ + if (!(vma->vm_flags & VM_SYNC)) + return true; + if (!IS_DAX(file_inode(vma->vm_file))) + return false; + return dax_synchronous(dax_dev); +} #else static inline struct dax_device *dax_get_by_host(const char *host) { return NULL; } static inline struct dax_device *alloc_dax(void *private, const char *host, - const struct dax_operations *ops) + const struct dax_operations *ops, unsigned long flags) { /* * Callers should check IS_ENABLED(CONFIG_DAX) to know if this @@ -70,6 +95,18 @@ static inline bool dax_write_cache_enabled(struct dax_device *dax_dev) { return false; } +static inline bool dax_synchronous(struct dax_device *dax_dev) +{ + return true; +} +static inline void set_dax_synchronous(struct dax_device *dax_dev) +{ +} +static inline bool daxdev_mapping_supported(struct vm_area_struct *vma, + struct dax_device *dax_dev) +{ + return !(vma->vm_flags & VM_SYNC); +} #endif struct writeback_control; diff --git a/include/linux/dcache.h b/include/linux/dcache.h index f14e587c5d5d..10090f11ab95 100644 --- a/include/linux/dcache.h +++ b/include/linux/dcache.h @@ -151,9 +151,9 @@ struct dentry_operations { /* * Locking rules for dentry_operations callbacks are to be found in - * Documentation/filesystems/Locking. Keep it updated! + * Documentation/filesystems/locking.rst. Keep it updated! * - * FUrther descriptions are found in Documentation/filesystems/vfs.txt. + * FUrther descriptions are found in Documentation/filesystems/vfs.rst. * Keep it updated too! */ @@ -291,7 +291,6 @@ static inline unsigned d_count(const struct dentry *dentry) */ extern __printf(4, 5) char *dynamic_dname(struct dentry *, char *, int, const char *, ...); -extern char *simple_dname(struct dentry *, char *, int); extern char *__d_path(const struct path *, const struct path *, char *, int); extern char *d_absolute_path(const struct path *, char *, int); @@ -568,7 +567,7 @@ static inline struct dentry *d_backing_dentry(struct dentry *upper) * If dentry is on a union/overlay, then return the underlying, real dentry. * Otherwise return the dentry itself. * - * See also: Documentation/filesystems/vfs.txt + * See also: Documentation/filesystems/vfs.rst */ static inline struct dentry *d_real(struct dentry *dentry, const struct inode *inode) diff --git a/include/linux/debugfs.h b/include/linux/debugfs.h index 3b0ba54cc4d5..58424eb3b329 100644 --- a/include/linux/debugfs.h +++ b/include/linux/debugfs.h @@ -133,9 +133,8 @@ struct dentry *debugfs_create_regset32(const char *name, umode_t mode, void debugfs_print_regs32(struct seq_file *s, const struct debugfs_reg32 *regs, int nregs, void __iomem *base, char *prefix); -struct dentry *debugfs_create_u32_array(const char *name, umode_t mode, - struct dentry *parent, - u32 *array, u32 elements); +void debugfs_create_u32_array(const char *name, umode_t mode, + struct dentry *parent, u32 *array, u32 elements); struct dentry *debugfs_create_devm_seqfile(struct device *dev, const char *name, struct dentry *parent, @@ -353,11 +352,10 @@ static inline bool debugfs_initialized(void) return false; } -static inline struct dentry *debugfs_create_u32_array(const char *name, umode_t mode, - struct dentry *parent, - u32 *array, u32 elements) +static inline void debugfs_create_u32_array(const char *name, umode_t mode, + struct dentry *parent, u32 *array, + u32 elements) { - return ERR_PTR(-ENODEV); } static inline struct dentry *debugfs_create_devm_seqfile(struct device *dev, diff --git a/include/linux/devfreq-event.h b/include/linux/devfreq-event.h index 29fc0dd735ae..f14f17f8cb7f 100644 --- a/include/linux/devfreq-event.h +++ b/include/linux/devfreq-event.h @@ -78,14 +78,20 @@ struct devfreq_event_ops { * struct devfreq_event_desc - the descriptor of devfreq-event device * * @name : the name of devfreq-event device. + * @event_type : the type of the event determined and used by driver * @driver_data : the private data for devfreq-event driver. * @ops : the operation to control devfreq-event device. * * Each devfreq-event device is described with a this structure. * This structure contains the various data for devfreq-event device. + * The event_type describes what is going to be counted in the register. + * It might choose to count e.g. read requests, write data in bytes, etc. + * The full supported list of types is present in specyfic header in: + * include/dt-bindings/pmu/. */ struct devfreq_event_desc { const char *name; + u32 event_type; void *driver_data; const struct devfreq_event_ops *ops; diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h index e1f51d607cc5..399ad8632356 100644 --- a/include/linux/device-mapper.h +++ b/include/linux/device-mapper.h @@ -95,8 +95,7 @@ typedef int (*dm_prepare_ioctl_fn) (struct dm_target *ti, struct block_device ** typedef int (*dm_report_zones_fn) (struct dm_target *ti, sector_t sector, struct blk_zone *zones, - unsigned int *nr_zones, - gfp_t gfp_mask); + unsigned int *nr_zones); /* * These iteration functions are typically used to check (and combine) @@ -530,29 +529,20 @@ void *dm_vcalloc(unsigned long nmemb, unsigned long elem_size); *---------------------------------------------------------------*/ #define DM_NAME "device-mapper" -#define DM_RATELIMIT(pr_func, fmt, ...) \ -do { \ - static DEFINE_RATELIMIT_STATE(rs, DEFAULT_RATELIMIT_INTERVAL, \ - DEFAULT_RATELIMIT_BURST); \ - \ - if (__ratelimit(&rs)) \ - pr_func(DM_FMT(fmt), ##__VA_ARGS__); \ -} while (0) - #define DM_FMT(fmt) DM_NAME ": " DM_MSG_PREFIX ": " fmt "\n" #define DMCRIT(fmt, ...) pr_crit(DM_FMT(fmt), ##__VA_ARGS__) #define DMERR(fmt, ...) pr_err(DM_FMT(fmt), ##__VA_ARGS__) -#define DMERR_LIMIT(fmt, ...) DM_RATELIMIT(pr_err, fmt, ##__VA_ARGS__) +#define DMERR_LIMIT(fmt, ...) pr_err_ratelimited(DM_FMT(fmt), ##__VA_ARGS__) #define DMWARN(fmt, ...) pr_warn(DM_FMT(fmt), ##__VA_ARGS__) -#define DMWARN_LIMIT(fmt, ...) DM_RATELIMIT(pr_warn, fmt, ##__VA_ARGS__) +#define DMWARN_LIMIT(fmt, ...) pr_warn_ratelimited(DM_FMT(fmt), ##__VA_ARGS__) #define DMINFO(fmt, ...) pr_info(DM_FMT(fmt), ##__VA_ARGS__) -#define DMINFO_LIMIT(fmt, ...) DM_RATELIMIT(pr_info, fmt, ##__VA_ARGS__) +#define DMINFO_LIMIT(fmt, ...) pr_info_ratelimited(DM_FMT(fmt), ##__VA_ARGS__) #ifdef CONFIG_DM_DEBUG #define DMDEBUG(fmt, ...) printk(KERN_DEBUG DM_FMT(fmt), ##__VA_ARGS__) -#define DMDEBUG_LIMIT(fmt, ...) DM_RATELIMIT(pr_debug, fmt, ##__VA_ARGS__) +#define DMDEBUG_LIMIT(fmt, ...) pr_debug_ratelimited(DM_FMT(fmt), ##__VA_ARGS__) #else #define DMDEBUG(fmt, ...) no_printk(fmt, ##__VA_ARGS__) #define DMDEBUG_LIMIT(fmt, ...) no_printk(fmt, ##__VA_ARGS__) diff --git a/include/linux/device.h b/include/linux/device.h index 848fc71c6ba6..297239a08bb7 100644 --- a/include/linux/device.h +++ b/include/linux/device.h @@ -6,7 +6,7 @@ * Copyright (c) 2004-2009 Greg Kroah-Hartman <gregkh@suse.de> * Copyright (c) 2008-2009 Novell Inc. * - * See Documentation/driver-model/ for more information. + * See Documentation/driver-api/driver-model/ for more information. */ #ifndef _DEVICE_H_ @@ -42,6 +42,7 @@ struct iommu_ops; struct iommu_group; struct iommu_fwspec; struct dev_pin_info; +struct iommu_param; struct bus_attribute { struct attribute attr; @@ -163,14 +164,102 @@ void subsys_dev_iter_init(struct subsys_dev_iter *iter, struct device *subsys_dev_iter_next(struct subsys_dev_iter *iter); void subsys_dev_iter_exit(struct subsys_dev_iter *iter); +int device_match_name(struct device *dev, const void *name); +int device_match_of_node(struct device *dev, const void *np); +int device_match_fwnode(struct device *dev, const void *fwnode); +int device_match_devt(struct device *dev, const void *pdevt); +int device_match_acpi_dev(struct device *dev, const void *adev); +int device_match_any(struct device *dev, const void *unused); + int bus_for_each_dev(struct bus_type *bus, struct device *start, void *data, int (*fn)(struct device *dev, void *data)); struct device *bus_find_device(struct bus_type *bus, struct device *start, - void *data, - int (*match)(struct device *dev, void *data)); -struct device *bus_find_device_by_name(struct bus_type *bus, - struct device *start, - const char *name); + const void *data, + int (*match)(struct device *dev, const void *data)); +/** + * bus_find_device_by_name - device iterator for locating a particular device + * of a specific name. + * @bus: bus type + * @start: Device to begin with + * @name: name of the device to match + */ +static inline struct device *bus_find_device_by_name(struct bus_type *bus, + struct device *start, + const char *name) +{ + return bus_find_device(bus, start, name, device_match_name); +} + +/** + * bus_find_device_by_of_node : device iterator for locating a particular device + * matching the of_node. + * @bus: bus type + * @np: of_node of the device to match. + */ +static inline struct device * +bus_find_device_by_of_node(struct bus_type *bus, const struct device_node *np) +{ + return bus_find_device(bus, NULL, np, device_match_of_node); +} + +/** + * bus_find_device_by_fwnode : device iterator for locating a particular device + * matching the fwnode. + * @bus: bus type + * @fwnode: fwnode of the device to match. + */ +static inline struct device * +bus_find_device_by_fwnode(struct bus_type *bus, const struct fwnode_handle *fwnode) +{ + return bus_find_device(bus, NULL, fwnode, device_match_fwnode); +} + +/** + * bus_find_device_by_devt : device iterator for locating a particular device + * matching the device type. + * @bus: bus type + * @devt: device type of the device to match. + */ +static inline struct device *bus_find_device_by_devt(struct bus_type *bus, + dev_t devt) +{ + return bus_find_device(bus, NULL, &devt, device_match_devt); +} + +/** + * bus_find_next_device - Find the next device after a given device in a + * given bus. + * @bus: bus type + * @cur: device to begin the search with. + */ +static inline struct device * +bus_find_next_device(struct bus_type *bus,struct device *cur) +{ + return bus_find_device(bus, cur, NULL, device_match_any); +} + +#ifdef CONFIG_ACPI +struct acpi_device; + +/** + * bus_find_device_by_acpi_dev : device iterator for locating a particular device + * matching the ACPI COMPANION device. + * @bus: bus type + * @adev: ACPI COMPANION device to match. + */ +static inline struct device * +bus_find_device_by_acpi_dev(struct bus_type *bus, const struct acpi_device *adev) +{ + return bus_find_device(bus, NULL, adev, device_match_acpi_dev); +} +#else +static inline struct device * +bus_find_device_by_acpi_dev(struct bus_type *bus, const void *adev) +{ + return NULL; +} +#endif + struct device *subsys_find_device_by_id(struct bus_type *bus, unsigned int id, struct device *hint); int bus_for_each_drv(struct bus_type *bus, struct device_driver *start, @@ -259,6 +348,8 @@ enum probe_type { * @resume: Called to bring a device from sleep mode. * @groups: Default attributes that get created by the driver core * automatically. + * @dev_groups: Additional attributes attached to device instance once the + * it is bound to the driver. * @pm: Power management operations of the device which matched * this driver. * @coredump: Called when sysfs entry is written to. The device driver @@ -293,6 +384,7 @@ struct device_driver { int (*suspend) (struct device *dev, pm_message_t state); int (*resume) (struct device *dev); const struct attribute_group **groups; + const struct attribute_group **dev_groups; const struct dev_pm_ops *pm; void (*coredump) (struct device *dev); @@ -336,11 +428,89 @@ extern int __must_check driver_for_each_device(struct device_driver *drv, int (*fn)(struct device *dev, void *)); struct device *driver_find_device(struct device_driver *drv, - struct device *start, void *data, - int (*match)(struct device *dev, void *data)); + struct device *start, const void *data, + int (*match)(struct device *dev, const void *data)); + +/** + * driver_find_device_by_name - device iterator for locating a particular device + * of a specific name. + * @drv: the driver we're iterating + * @name: name of the device to match + */ +static inline struct device *driver_find_device_by_name(struct device_driver *drv, + const char *name) +{ + return driver_find_device(drv, NULL, name, device_match_name); +} + +/** + * driver_find_device_by_of_node- device iterator for locating a particular device + * by of_node pointer. + * @drv: the driver we're iterating + * @np: of_node pointer to match. + */ +static inline struct device * +driver_find_device_by_of_node(struct device_driver *drv, + const struct device_node *np) +{ + return driver_find_device(drv, NULL, np, device_match_of_node); +} + +/** + * driver_find_device_by_fwnode- device iterator for locating a particular device + * by fwnode pointer. + * @drv: the driver we're iterating + * @fwnode: fwnode pointer to match. + */ +static inline struct device * +driver_find_device_by_fwnode(struct device_driver *drv, + const struct fwnode_handle *fwnode) +{ + return driver_find_device(drv, NULL, fwnode, device_match_fwnode); +} + +/** + * driver_find_device_by_devt- device iterator for locating a particular device + * by devt. + * @drv: the driver we're iterating + * @devt: devt pointer to match. + */ +static inline struct device *driver_find_device_by_devt(struct device_driver *drv, + dev_t devt) +{ + return driver_find_device(drv, NULL, &devt, device_match_devt); +} + +static inline struct device *driver_find_next_device(struct device_driver *drv, + struct device *start) +{ + return driver_find_device(drv, start, NULL, device_match_any); +} + +#ifdef CONFIG_ACPI +/** + * driver_find_device_by_acpi_dev : device iterator for locating a particular + * device matching the ACPI_COMPANION device. + * @drv: the driver we're iterating + * @adev: ACPI_COMPANION device to match. + */ +static inline struct device * +driver_find_device_by_acpi_dev(struct device_driver *drv, + const struct acpi_device *adev) +{ + return driver_find_device(drv, NULL, adev, device_match_acpi_dev); +} +#else +static inline struct device * +driver_find_device_by_acpi_dev(struct device_driver *drv, const void *adev) +{ + return NULL; +} +#endif void driver_deferred_probe_add(struct device *dev); int driver_deferred_probe_check_state(struct device *dev); +int driver_deferred_probe_check_state_continue(struct device *dev); /** * struct subsys_interface - interfaces to device functions @@ -467,6 +637,76 @@ extern struct device *class_find_device(struct class *class, struct device *start, const void *data, int (*match)(struct device *, const void *)); +/** + * class_find_device_by_name - device iterator for locating a particular device + * of a specific name. + * @class: class type + * @name: name of the device to match + */ +static inline struct device *class_find_device_by_name(struct class *class, + const char *name) +{ + return class_find_device(class, NULL, name, device_match_name); +} + +/** + * class_find_device_by_of_node : device iterator for locating a particular device + * matching the of_node. + * @class: class type + * @np: of_node of the device to match. + */ +static inline struct device * +class_find_device_by_of_node(struct class *class, const struct device_node *np) +{ + return class_find_device(class, NULL, np, device_match_of_node); +} + +/** + * class_find_device_by_fwnode : device iterator for locating a particular device + * matching the fwnode. + * @class: class type + * @fwnode: fwnode of the device to match. + */ +static inline struct device * +class_find_device_by_fwnode(struct class *class, + const struct fwnode_handle *fwnode) +{ + return class_find_device(class, NULL, fwnode, device_match_fwnode); +} + +/** + * class_find_device_by_devt : device iterator for locating a particular device + * matching the device type. + * @class: class type + * @devt: device type of the device to match. + */ +static inline struct device *class_find_device_by_devt(struct class *class, + dev_t devt) +{ + return class_find_device(class, NULL, &devt, device_match_devt); +} + +#ifdef CONFIG_ACPI +struct acpi_device; +/** + * class_find_device_by_acpi_dev : device iterator for locating a particular + * device matching the ACPI_COMPANION device. + * @class: class type + * @adev: ACPI_COMPANION device to match. + */ +static inline struct device * +class_find_device_by_acpi_dev(struct class *class, const struct acpi_device *adev) +{ + return class_find_device(class, NULL, adev, device_match_acpi_dev); +} +#else +static inline struct device * +class_find_device_by_acpi_dev(struct class *class, const void *adev) +{ + return NULL; +} +#endif + struct class_attribute { struct attribute attr; ssize_t (*show)(struct class *class, struct class_attribute *attr, @@ -704,7 +944,8 @@ extern unsigned long devm_get_free_pages(struct device *dev, gfp_t gfp_mask, unsigned int order); extern void devm_free_pages(struct device *dev, unsigned long addr); -void __iomem *devm_ioremap_resource(struct device *dev, struct resource *res); +void __iomem *devm_ioremap_resource(struct device *dev, + const struct resource *res); void __iomem *devm_of_iomap(struct device *dev, struct device_node *node, int index, @@ -773,10 +1014,14 @@ struct device_connection { struct list_head list; }; +typedef void *(*devcon_match_fn_t)(struct device_connection *con, int ep, + void *data); + +void *fwnode_connection_find_match(struct fwnode_handle *fwnode, + const char *con_id, void *data, + devcon_match_fn_t match); void *device_connection_find_match(struct device *dev, const char *con_id, - void *data, - void *(*match)(struct device_connection *con, - int ep, void *data)); + void *data, devcon_match_fn_t match); struct device *device_connection_find(struct device *dev, const char *con_id); @@ -828,12 +1073,13 @@ enum device_link_state { /* * Device link flags. * - * STATELESS: The core won't track the presence of supplier/consumer drivers. + * STATELESS: The core will not remove this link automatically. * AUTOREMOVE_CONSUMER: Remove the link automatically on consumer driver unbind. * PM_RUNTIME: If set, the runtime PM framework will use this link. * RPM_ACTIVE: Run pm_runtime_get_sync() on the supplier during link creation. * AUTOREMOVE_SUPPLIER: Remove the link automatically on supplier driver unbind. * AUTOPROBE_CONSUMER: Probe consumer driver automatically after supplier binds. + * MANAGED: The core tracks presence of supplier/consumer drivers (internal). */ #define DL_FLAG_STATELESS BIT(0) #define DL_FLAG_AUTOREMOVE_CONSUMER BIT(1) @@ -841,6 +1087,7 @@ enum device_link_state { #define DL_FLAG_RPM_ACTIVE BIT(3) #define DL_FLAG_AUTOREMOVE_SUPPLIER BIT(4) #define DL_FLAG_AUTOPROBE_CONSUMER BIT(5) +#define DL_FLAG_MANAGED BIT(6) /** * struct device_link - Device link representation. @@ -910,6 +1157,8 @@ struct dev_links_info { * This identifies the device type and carries type-specific * information. * @mutex: Mutex to synchronize calls to its driver. + * @lockdep_mutex: An optional debug lock that a subsystem can use as a + * peer lock to gain localized lockdep coverage of the device_lock. * @bus: Type of bus device is on. * @driver: Which driver has allocated this * @platform_data: Platform data specific to the device. @@ -960,6 +1209,7 @@ struct dev_links_info { * device (i.e. the bus driver that discovered the device). * @iommu_group: IOMMU group the device belongs to. * @iommu_fwspec: IOMMU-specific properties supplied by firmware. + * @iommu_param: Per device generic IOMMU runtime data * * @offline_disabled: If set, the device is permanently online. * @offline: Set after successful invocation of bus type's .offline(). @@ -992,6 +1242,9 @@ struct device { core doesn't touch it */ void *driver_data; /* Driver data, set and get with dev_set_drvdata/dev_get_drvdata */ +#ifdef CONFIG_PROVE_LOCKING + struct mutex lockdep_mutex; +#endif struct mutex mutex; /* mutex to synchronize calls to * its driver. */ @@ -1053,6 +1306,7 @@ struct device { void (*release)(struct device *dev); struct iommu_group *iommu_group; struct iommu_fwspec *iommu_fwspec; + struct iommu_param *iommu_param; bool offline_disabled:1; bool offline:1; @@ -1251,6 +1505,8 @@ extern int device_for_each_child_reverse(struct device *dev, void *data, int (*fn)(struct device *dev, void *data)); extern struct device *device_find_child(struct device *dev, void *data, int (*match)(struct device *dev, void *data)); +extern struct device *device_find_child_by_name(struct device *parent, + const char *name); extern int device_rename(struct device *dev, const char *new_name); extern int device_move(struct device *dev, struct device *new_parent, enum dpm_order dpm_order); @@ -1374,6 +1630,7 @@ extern int (*platform_notify_remove)(struct device *dev); */ extern struct device *get_device(struct device *dev); extern void put_device(struct device *dev); +extern bool kill_device(struct device *dev); #ifdef CONFIG_DEVTMPFS extern int devtmpfs_create_node(struct device *dev); diff --git a/include/linux/dim.h b/include/linux/dim.h new file mode 100644 index 000000000000..9fa4b3f88c39 --- /dev/null +++ b/include/linux/dim.h @@ -0,0 +1,333 @@ +/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ +/* Copyright (c) 2019 Mellanox Technologies. */ + +#ifndef DIM_H +#define DIM_H + +#include <linux/module.h> + +/** + * Number of events between DIM iterations. + * Causes a moderation of the algorithm run. + */ +#define DIM_NEVENTS 64 + +/** + * Is a difference between values justifies taking an action. + * We consider 10% difference as significant. + */ +#define IS_SIGNIFICANT_DIFF(val, ref) \ + (((100UL * abs((val) - (ref))) / (ref)) > 10) + +/** + * Calculate the gap between two values. + * Take wrap-around and variable size into consideration. + */ +#define BIT_GAP(bits, end, start) ((((end) - (start)) + BIT_ULL(bits)) \ + & (BIT_ULL(bits) - 1)) + +/** + * Structure for CQ moderation values. + * Used for communications between DIM and its consumer. + * + * @usec: CQ timer suggestion (by DIM) + * @pkts: CQ packet counter suggestion (by DIM) + * @cq_period_mode: CQ priod count mode (from CQE/EQE) + */ +struct dim_cq_moder { + u16 usec; + u16 pkts; + u16 comps; + u8 cq_period_mode; +}; + +/** + * Structure for DIM sample data. + * Used for communications between DIM and its consumer. + * + * @time: Sample timestamp + * @pkt_ctr: Number of packets + * @byte_ctr: Number of bytes + * @event_ctr: Number of events + */ +struct dim_sample { + ktime_t time; + u32 pkt_ctr; + u32 byte_ctr; + u16 event_ctr; + u32 comp_ctr; +}; + +/** + * Structure for DIM stats. + * Used for holding current measured rates. + * + * @ppms: Packets per msec + * @bpms: Bytes per msec + * @epms: Events per msec + */ +struct dim_stats { + int ppms; /* packets per msec */ + int bpms; /* bytes per msec */ + int epms; /* events per msec */ + int cpms; /* completions per msec */ + int cpe_ratio; /* ratio of completions to events */ +}; + +/** + * Main structure for dynamic interrupt moderation (DIM). + * Used for holding all information about a specific DIM instance. + * + * @state: Algorithm state (see below) + * @prev_stats: Measured rates from previous iteration (for comparison) + * @start_sample: Sampled data at start of current iteration + * @work: Work to perform on action required + * @priv: A pointer to the struct that points to dim + * @profile_ix: Current moderation profile + * @mode: CQ period count mode + * @tune_state: Algorithm tuning state (see below) + * @steps_right: Number of steps taken towards higher moderation + * @steps_left: Number of steps taken towards lower moderation + * @tired: Parking depth counter + */ +struct dim { + u8 state; + struct dim_stats prev_stats; + struct dim_sample start_sample; + struct dim_sample measuring_sample; + struct work_struct work; + void *priv; + u8 profile_ix; + u8 mode; + u8 tune_state; + u8 steps_right; + u8 steps_left; + u8 tired; +}; + +/** + * enum dim_cq_period_mode + * + * These are the modes for CQ period count. + * + * @DIM_CQ_PERIOD_MODE_START_FROM_EQE: Start counting from EQE + * @DIM_CQ_PERIOD_MODE_START_FROM_CQE: Start counting from CQE (implies timer reset) + * @DIM_CQ_PERIOD_NUM_MODES: Number of modes + */ +enum { + DIM_CQ_PERIOD_MODE_START_FROM_EQE = 0x0, + DIM_CQ_PERIOD_MODE_START_FROM_CQE = 0x1, + DIM_CQ_PERIOD_NUM_MODES +}; + +/** + * enum dim_state + * + * These are the DIM algorithm states. + * These will determine if the algorithm is in a valid state to start an iteration. + * + * @DIM_START_MEASURE: This is the first iteration (also after applying a new profile) + * @DIM_MEASURE_IN_PROGRESS: Algorithm is already in progress - check if + * need to perform an action + * @DIM_APPLY_NEW_PROFILE: DIM consumer is currently applying a profile - no need to measure + */ +enum { + DIM_START_MEASURE, + DIM_MEASURE_IN_PROGRESS, + DIM_APPLY_NEW_PROFILE, +}; + +/** + * enum dim_tune_state + * + * These are the DIM algorithm tune states. + * These will determine which action the algorithm should perform. + * + * @DIM_PARKING_ON_TOP: Algorithm found a local top point - exit on significant difference + * @DIM_PARKING_TIRED: Algorithm found a deep top point - don't exit if tired > 0 + * @DIM_GOING_RIGHT: Algorithm is currently trying higher moderation levels + * @DIM_GOING_LEFT: Algorithm is currently trying lower moderation levels + */ +enum { + DIM_PARKING_ON_TOP, + DIM_PARKING_TIRED, + DIM_GOING_RIGHT, + DIM_GOING_LEFT, +}; + +/** + * enum dim_stats_state + * + * These are the DIM algorithm statistics states. + * These will determine the verdict of current iteration. + * + * @DIM_STATS_WORSE: Current iteration shows worse performance than before + * @DIM_STATS_WORSE: Current iteration shows same performance than before + * @DIM_STATS_WORSE: Current iteration shows better performance than before + */ +enum { + DIM_STATS_WORSE, + DIM_STATS_SAME, + DIM_STATS_BETTER, +}; + +/** + * enum dim_step_result + * + * These are the DIM algorithm step results. + * These describe the result of a step. + * + * @DIM_STEPPED: Performed a regular step + * @DIM_TOO_TIRED: Same kind of step was done multiple times - should go to + * tired parking + * @DIM_ON_EDGE: Stepped to the most left/right profile + */ +enum { + DIM_STEPPED, + DIM_TOO_TIRED, + DIM_ON_EDGE, +}; + +/** + * dim_on_top - check if current state is a good place to stop (top location) + * @dim: DIM context + * + * Check if current profile is a good place to park at. + * This will result in reducing the DIM checks frequency as we assume we + * shouldn't probably change profiles, unless traffic pattern wasn't changed. + */ +bool dim_on_top(struct dim *dim); + +/** + * dim_turn - change profile alterning direction + * @dim: DIM context + * + * Go left if we were going right and vice-versa. + * Do nothing if currently parking. + */ +void dim_turn(struct dim *dim); + +/** + * dim_park_on_top - enter a parking state on a top location + * @dim: DIM context + * + * Enter parking state. + * Clear all movement history. + */ +void dim_park_on_top(struct dim *dim); + +/** + * dim_park_tired - enter a tired parking state + * @dim: DIM context + * + * Enter parking state. + * Clear all movement history and cause DIM checks frequency to reduce. + */ +void dim_park_tired(struct dim *dim); + +/** + * dim_calc_stats - calculate the difference between two samples + * @start: start sample + * @end: end sample + * @curr_stats: delta between samples + * + * Calculate the delta between two samples (in data rates). + * Takes into consideration counter wrap-around. + */ +void dim_calc_stats(struct dim_sample *start, struct dim_sample *end, + struct dim_stats *curr_stats); + +/** + * dim_update_sample - set a sample's fields with give values + * @event_ctr: number of events to set + * @packets: number of packets to set + * @bytes: number of bytes to set + * @s: DIM sample + */ +static inline void +dim_update_sample(u16 event_ctr, u64 packets, u64 bytes, struct dim_sample *s) +{ + s->time = ktime_get(); + s->pkt_ctr = packets; + s->byte_ctr = bytes; + s->event_ctr = event_ctr; +} + +/** + * dim_update_sample_with_comps - set a sample's fields with given + * values including the completion parameter + * @event_ctr: number of events to set + * @packets: number of packets to set + * @bytes: number of bytes to set + * @comps: number of completions to set + * @s: DIM sample + */ +static inline void +dim_update_sample_with_comps(u16 event_ctr, u64 packets, u64 bytes, u64 comps, + struct dim_sample *s) +{ + dim_update_sample(event_ctr, packets, bytes, s); + s->comp_ctr = comps; +} + +/* Net DIM */ + +/** + * net_dim_get_rx_moderation - provide a CQ moderation object for the given RX profile + * @cq_period_mode: CQ period mode + * @ix: Profile index + */ +struct dim_cq_moder net_dim_get_rx_moderation(u8 cq_period_mode, int ix); + +/** + * net_dim_get_def_rx_moderation - provide the default RX moderation + * @cq_period_mode: CQ period mode + */ +struct dim_cq_moder net_dim_get_def_rx_moderation(u8 cq_period_mode); + +/** + * net_dim_get_tx_moderation - provide a CQ moderation object for the given TX profile + * @cq_period_mode: CQ period mode + * @ix: Profile index + */ +struct dim_cq_moder net_dim_get_tx_moderation(u8 cq_period_mode, int ix); + +/** + * net_dim_get_def_tx_moderation - provide the default TX moderation + * @cq_period_mode: CQ period mode + */ +struct dim_cq_moder net_dim_get_def_tx_moderation(u8 cq_period_mode); + +/** + * net_dim - main DIM algorithm entry point + * @dim: DIM instance information + * @end_sample: Current data measurement + * + * Called by the consumer. + * This is the main logic of the algorithm, where data is processed in order to decide on next + * required action. + */ +void net_dim(struct dim *dim, struct dim_sample end_sample); + +/* RDMA DIM */ + +/* + * RDMA DIM profile: + * profile size must be of RDMA_DIM_PARAMS_NUM_PROFILES. + */ +#define RDMA_DIM_PARAMS_NUM_PROFILES 9 +#define RDMA_DIM_START_PROFILE 0 + +/** + * rdma_dim - Runs the adaptive moderation. + * @dim: The moderation struct. + * @completions: The number of completions collected in this round. + * + * Each call to rdma_dim takes the latest amount of completions that + * have been collected and counts them as a new event. + * Once enough events have been collected the algorithm decides a new + * moderation level. + */ +void rdma_dim(struct dim *dim, u64 completions); + +#endif /* DIM_H */ diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h index 9b84114f74ce..ec212cb27fdc 100644 --- a/include/linux/dma-buf.h +++ b/include/linux/dma-buf.h @@ -28,19 +28,21 @@ struct dma_buf_attachment; /** * struct dma_buf_ops - operations possible on struct dma_buf - * @map_atomic: [optional] maps a page from the buffer into kernel address - * space, users may not block until the subsequent unmap call. - * This callback must not sleep. - * @unmap_atomic: [optional] unmaps a atomically mapped page from the buffer. - * This Callback must not sleep. - * @map: [optional] maps a page from the buffer into kernel address space. - * @unmap: [optional] unmaps a page from the buffer. * @vmap: [optional] creates a virtual mapping for the buffer into kernel * address space. Same restrictions as for vmap and friends apply. * @vunmap: [optional] unmaps a vmap from the buffer */ struct dma_buf_ops { /** + * @cache_sgt_mapping: + * + * If true the framework will cache the first mapping made for each + * attachment. This avoids creating mappings for attachments multiple + * times. + */ + bool cache_sgt_mapping; + + /** * @attach: * * This is called from dma_buf_attach() to make sure that a given @@ -194,8 +196,6 @@ struct dma_buf_ops { * to be restarted. */ int (*end_cpu_access)(struct dma_buf *, enum dma_data_direction); - void *(*map)(struct dma_buf *, unsigned long); - void (*unmap)(struct dma_buf *, unsigned long, void *); /** * @mmap: @@ -234,6 +234,31 @@ struct dma_buf_ops { */ int (*mmap)(struct dma_buf *, struct vm_area_struct *vma); + /** + * @map: + * + * Maps a page from the buffer into kernel address space. The page is + * specified by offset into the buffer in PAGE_SIZE units. + * + * This callback is optional. + * + * Returns: + * + * Virtual address pointer where requested page can be accessed. NULL + * on error or when this function is unimplemented by the exporter. + */ + void *(*map)(struct dma_buf *, unsigned long); + + /** + * @unmap: + * + * Unmaps a page from the buffer. Page offset and address pointer should + * be the same as the one passed to and returned by matching call to map. + * + * This callback is optional. + */ + void (*unmap)(struct dma_buf *, unsigned long, void *); + void *(*vmap)(struct dma_buf *); void (*vunmap)(struct dma_buf *, void *vaddr); }; @@ -244,10 +269,12 @@ struct dma_buf_ops { * @file: file pointer used for sharing buffers across, and for refcounting. * @attachments: list of dma_buf_attachment that denotes all devices attached. * @ops: dma_buf_ops associated with this buffer object. - * @lock: used internally to serialize list manipulation, attach/detach and vmap/unmap + * @lock: used internally to serialize list manipulation, attach/detach and + * vmap/unmap, and accesses to name * @vmapping_counter: used internally to refcnt the vmaps * @vmap_ptr: the current vmap ptr if vmapping_counter > 0 * @exp_name: name of the exporter; useful for debugging. + * @name: userspace-provided name; useful for accounting and debugging. * @owner: pointer to exporter module; used for refcounting when exporter is a * kernel module. * @list_node: node for dma_buf accounting and debugging. @@ -275,10 +302,11 @@ struct dma_buf { unsigned vmapping_counter; void *vmap_ptr; const char *exp_name; + const char *name; struct module *owner; struct list_head list_node; void *priv; - struct reservation_object *resv; + struct dma_resv *resv; /* poll support */ wait_queue_head_t poll; @@ -296,6 +324,8 @@ struct dma_buf { * @dmabuf: buffer for this attachment. * @dev: device attached to the buffer. * @node: list of dma_buf_attachment. + * @sgt: cached mapping. + * @dir: direction of cached mapping. * @priv: exporter specific attachment data. * * This structure holds the attachment information between the dma_buf buffer @@ -311,6 +341,8 @@ struct dma_buf_attachment { struct dma_buf *dmabuf; struct device *dev; struct list_head node; + struct sg_table *sgt; + enum dma_data_direction dir; void *priv; }; @@ -333,7 +365,7 @@ struct dma_buf_export_info { const struct dma_buf_ops *ops; size_t size; int flags; - struct reservation_object *resv; + struct dma_resv *resv; void *priv; }; diff --git a/include/linux/dma-contiguous.h b/include/linux/dma-contiguous.h index 6665fa03c0d1..03f8e98e3bcc 100644 --- a/include/linux/dma-contiguous.h +++ b/include/linux/dma-contiguous.h @@ -50,6 +50,7 @@ #ifdef __KERNEL__ #include <linux/device.h> +#include <linux/mm.h> struct cma; struct page; @@ -111,6 +112,8 @@ struct page *dma_alloc_from_contiguous(struct device *dev, size_t count, unsigned int order, bool no_warn); bool dma_release_from_contiguous(struct device *dev, struct page *pages, int count); +struct page *dma_alloc_contiguous(struct device *dev, size_t size, gfp_t gfp); +void dma_free_contiguous(struct device *dev, struct page *page, size_t size); #else @@ -153,6 +156,19 @@ bool dma_release_from_contiguous(struct device *dev, struct page *pages, return false; } +/* Use fallback alloc() and free() when CONFIG_DMA_CMA=n */ +static inline struct page *dma_alloc_contiguous(struct device *dev, size_t size, + gfp_t gfp) +{ + return NULL; +} + +static inline void dma_free_contiguous(struct device *dev, struct page *page, + size_t size) +{ + __free_pages(page, get_order(size)); +} + #endif #endif diff --git a/include/linux/dma-direct.h b/include/linux/dma-direct.h index b7338702592a..adf993a3bd58 100644 --- a/include/linux/dma-direct.h +++ b/include/linux/dma-direct.h @@ -32,6 +32,15 @@ static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size) } #endif /* !CONFIG_ARCH_HAS_PHYS_TO_DMA */ +#ifdef CONFIG_ARCH_HAS_FORCE_DMA_UNENCRYPTED +bool force_dma_unencrypted(struct device *dev); +#else +static inline bool force_dma_unencrypted(struct device *dev) +{ + return false; +} +#endif /* CONFIG_ARCH_HAS_FORCE_DMA_UNENCRYPTED */ + /* * If memory encryption is supported, phys_to_dma will set the memory encryption * bit in the DMA address, and dma_to_phys will clear it. The raw __phys_to_dma diff --git a/include/linux/dma-fence.h b/include/linux/dma-fence.h index 05d29dbc7e62..3347c54f3a87 100644 --- a/include/linux/dma-fence.h +++ b/include/linux/dma-fence.h @@ -63,15 +63,35 @@ struct dma_fence_cb; * been completed, or never called at all. */ struct dma_fence { - struct kref refcount; - const struct dma_fence_ops *ops; - struct rcu_head rcu; - struct list_head cb_list; spinlock_t *lock; + const struct dma_fence_ops *ops; + /* + * We clear the callback list on kref_put so that by the time we + * release the fence it is unused. No one should be adding to the + * cb_list that they don't themselves hold a reference for. + * + * The lifetime of the timestamp is similarly tied to both the + * rcu freelist and the cb_list. The timestamp is only set upon + * signaling while simultaneously notifying the cb_list. Ergo, we + * only use either the cb_list of timestamp. Upon destruction, + * neither are accessible, and so we can use the rcu. This means + * that the cb_list is *only* valid until the signal bit is set, + * and to read either you *must* hold a reference to the fence, + * and not just the rcu_read_lock. + * + * Listed in chronological order. + */ + union { + struct list_head cb_list; + /* @cb_list replaced by @timestamp on dma_fence_signal() */ + ktime_t timestamp; + /* @timestamp replaced by @rcu on dma_fence_release() */ + struct rcu_head rcu; + }; u64 context; u64 seqno; unsigned long flags; - ktime_t timestamp; + struct kref refcount; int error; }; @@ -273,7 +293,7 @@ static inline struct dma_fence *dma_fence_get(struct dma_fence *fence) } /** - * dma_fence_get_rcu - get a fence from a reservation_object_list with + * dma_fence_get_rcu - get a fence from a dma_resv_list with * rcu read lock * @fence: fence to increase refcount of * @@ -297,7 +317,7 @@ static inline struct dma_fence *dma_fence_get_rcu(struct dma_fence *fence) * so long as the caller is using RCU on the pointer to the fence. * * An alternative mechanism is to employ a seqlock to protect a bunch of - * fences, such as used by struct reservation_object. When using a seqlock, + * fences, such as used by struct dma_resv. When using a seqlock, * the seqlock must be taken before and checked after a reference to the * fence is acquired (as shown here). * diff --git a/include/linux/dma-iommu.h b/include/linux/dma-iommu.h index 37258c8b2063..2112f21f73d8 100644 --- a/include/linux/dma-iommu.h +++ b/include/linux/dma-iommu.h @@ -5,59 +5,21 @@ #ifndef __DMA_IOMMU_H #define __DMA_IOMMU_H -#ifdef __KERNEL__ +#include <linux/errno.h> #include <linux/types.h> -#include <asm/errno.h> #ifdef CONFIG_IOMMU_DMA #include <linux/dma-mapping.h> #include <linux/iommu.h> #include <linux/msi.h> -int iommu_dma_init(void); - /* Domain management interface for IOMMU drivers */ int iommu_get_dma_cookie(struct iommu_domain *domain); int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base); void iommu_put_dma_cookie(struct iommu_domain *domain); /* Setup call for arch DMA mapping code */ -int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base, - u64 size, struct device *dev); - -/* General helpers for DMA-API <-> IOMMU-API interaction */ -int dma_info_to_prot(enum dma_data_direction dir, bool coherent, - unsigned long attrs); - -/* - * These implement the bulk of the relevant DMA mapping callbacks, but require - * the arch code to take care of attributes and cache maintenance - */ -struct page **iommu_dma_alloc(struct device *dev, size_t size, gfp_t gfp, - unsigned long attrs, int prot, dma_addr_t *handle, - void (*flush_page)(struct device *, const void *, phys_addr_t)); -void iommu_dma_free(struct device *dev, struct page **pages, size_t size, - dma_addr_t *handle); - -int iommu_dma_mmap(struct page **pages, size_t size, struct vm_area_struct *vma); - -dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page, - unsigned long offset, size_t size, int prot); -int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg, - int nents, int prot); - -/* - * Arch code with no special attribute handling may use these - * directly as DMA mapping callbacks for simplicity - */ -void iommu_dma_unmap_page(struct device *dev, dma_addr_t handle, size_t size, - enum dma_data_direction dir, unsigned long attrs); -void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, - enum dma_data_direction dir, unsigned long attrs); -dma_addr_t iommu_dma_map_resource(struct device *dev, phys_addr_t phys, - size_t size, enum dma_data_direction dir, unsigned long attrs); -void iommu_dma_unmap_resource(struct device *dev, dma_addr_t handle, - size_t size, enum dma_data_direction dir, unsigned long attrs); +void iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 size); /* The DMA API isn't _quite_ the whole story, though... */ /* @@ -75,16 +37,16 @@ void iommu_dma_compose_msi_msg(struct msi_desc *desc, void iommu_dma_get_resv_regions(struct device *dev, struct list_head *list); -#else +#else /* CONFIG_IOMMU_DMA */ struct iommu_domain; struct msi_desc; struct msi_msg; struct device; -static inline int iommu_dma_init(void) +static inline void iommu_setup_dma_ops(struct device *dev, u64 dma_base, + u64 size) { - return 0; } static inline int iommu_get_dma_cookie(struct iommu_domain *domain) @@ -117,5 +79,4 @@ static inline void iommu_dma_get_resv_regions(struct device *dev, struct list_he } #endif /* CONFIG_IOMMU_DMA */ -#endif /* __KERNEL__ */ #endif /* __DMA_IOMMU_H */ diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h index 6309a721394b..4a1c4fca475a 100644 --- a/include/linux/dma-mapping.h +++ b/include/linux/dma-mapping.h @@ -131,6 +131,7 @@ struct dma_map_ops { int (*dma_supported)(struct device *dev, u64 mask); u64 (*get_required_mask)(struct device *dev); size_t (*max_mapping_size)(struct device *dev); + unsigned long (*get_merge_boundary)(struct device *dev); }; #define DMA_MAPPING_ERROR (~(dma_addr_t)0) @@ -149,11 +150,6 @@ static inline int valid_dma_direction(int dma_direction) (dma_direction == DMA_FROM_DEVICE)); } -static inline int is_device_dma_capable(struct device *dev) -{ - return dev->dma_mask != NULL && *dev->dma_mask != DMA_MASK_NONE; -} - #ifdef CONFIG_DMA_DECLARE_COHERENT /* * These three functions are only for dma allocator. @@ -462,11 +458,13 @@ int dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt, int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma, void *cpu_addr, dma_addr_t dma_addr, size_t size, unsigned long attrs); +bool dma_can_mmap(struct device *dev); int dma_supported(struct device *dev, u64 mask); int dma_set_mask(struct device *dev, u64 mask); int dma_set_coherent_mask(struct device *dev, u64 mask); u64 dma_get_required_mask(struct device *dev); size_t dma_max_mapping_size(struct device *dev); +unsigned long dma_get_merge_boundary(struct device *dev); #else /* CONFIG_HAS_DMA */ static inline dma_addr_t dma_map_page_attrs(struct device *dev, struct page *page, size_t offset, size_t size, @@ -552,6 +550,10 @@ static inline int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma, { return -ENXIO; } +static inline bool dma_can_mmap(struct device *dev) +{ + return false; +} static inline int dma_supported(struct device *dev, u64 mask) { return 0; @@ -572,6 +574,10 @@ static inline size_t dma_max_mapping_size(struct device *dev) { return 0; } +static inline unsigned long dma_get_merge_boundary(struct device *dev) +{ + return 0; +} #endif /* CONFIG_HAS_DMA */ static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr, @@ -615,16 +621,14 @@ extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma, void *cpu_addr, dma_addr_t dma_addr, size_t size, unsigned long attrs); +struct page **dma_common_find_pages(void *cpu_addr); void *dma_common_contiguous_remap(struct page *page, size_t size, - unsigned long vm_flags, pgprot_t prot, const void *caller); void *dma_common_pages_remap(struct page **pages, size_t size, - unsigned long vm_flags, pgprot_t prot, - const void *caller); -void dma_common_free_remap(void *cpu_addr, size_t size, unsigned long vm_flags); + pgprot_t prot, const void *caller); +void dma_common_free_remap(void *cpu_addr, size_t size); -int __init dma_atomic_pool_init(gfp_t gfp, pgprot_t prot); bool dma_in_atomic_pool(void *start, size_t size); void *dma_alloc_from_pool(size_t size, struct page **ret_page, gfp_t flags); bool dma_free_from_pool(void *start, size_t size); @@ -679,6 +683,20 @@ static inline int dma_coerce_mask_and_coherent(struct device *dev, u64 mask) return dma_set_mask_and_coherent(dev, mask); } +/** + * dma_addressing_limited - return if the device is addressing limited + * @dev: device to check + * + * Return %true if the devices DMA mask is too small to address all memory in + * the system, else %false. Lack of addressing bits is the prime reason for + * bounce buffering, but might not be the only one. + */ +static inline bool dma_addressing_limited(struct device *dev) +{ + return min_not_zero(dma_get_mask(dev), dev->bus_dma_mask) < + dma_get_required_mask(dev); +} + #ifdef CONFIG_ARCH_HAS_SETUP_DMA_OPS void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, const struct iommu_ops *iommu, bool coherent); @@ -729,13 +747,6 @@ static inline int dma_set_seg_boundary(struct device *dev, unsigned long mask) return -EIO; } -#ifndef dma_max_pfn -static inline unsigned long dma_max_pfn(struct device *dev) -{ - return (*dev->dma_mask >> PAGE_SHIFT) + dev->dma_pfn_offset; -} -#endif - static inline int dma_get_cache_alignment(void) { #ifdef ARCH_DMA_MINALIGN @@ -747,7 +758,6 @@ static inline int dma_get_cache_alignment(void) #ifdef CONFIG_DMA_DECLARE_COHERENT int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr, dma_addr_t device_addr, size_t size); -void dma_release_declared_memory(struct device *dev); #else static inline int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr, @@ -755,11 +765,6 @@ dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr, { return -ENOSYS; } - -static inline void -dma_release_declared_memory(struct device *dev) -{ -} #endif /* CONFIG_DMA_DECLARE_COHERENT */ static inline void *dmam_alloc_coherent(struct device *dev, size_t size, @@ -779,9 +784,6 @@ static inline void *dma_alloc_wc(struct device *dev, size_t size, return dma_alloc_attrs(dev, size, dma_addr, gfp, attrs); } -#ifndef dma_alloc_writecombine -#define dma_alloc_writecombine dma_alloc_wc -#endif static inline void dma_free_wc(struct device *dev, size_t size, void *cpu_addr, dma_addr_t dma_addr) @@ -789,9 +791,6 @@ static inline void dma_free_wc(struct device *dev, size_t size, return dma_free_attrs(dev, size, cpu_addr, dma_addr, DMA_ATTR_WRITE_COMBINE); } -#ifndef dma_free_writecombine -#define dma_free_writecombine dma_free_wc -#endif static inline int dma_mmap_wc(struct device *dev, struct vm_area_struct *vma, @@ -801,9 +800,6 @@ static inline int dma_mmap_wc(struct device *dev, return dma_mmap_attrs(dev, vma, cpu_addr, dma_addr, size, DMA_ATTR_WRITE_COMBINE); } -#ifndef dma_mmap_writecombine -#define dma_mmap_writecombine dma_mmap_wc -#endif #ifdef CONFIG_NEED_DMA_MAP_STATE #define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME) dma_addr_t ADDR_NAME diff --git a/include/linux/dma-noncoherent.h b/include/linux/dma-noncoherent.h index 9741767e400f..dd3de6d88fc0 100644 --- a/include/linux/dma-noncoherent.h +++ b/include/linux/dma-noncoherent.h @@ -3,6 +3,7 @@ #define _LINUX_DMA_NONCOHERENT_H 1 #include <linux/dma-mapping.h> +#include <asm/pgtable.h> #ifdef CONFIG_ARCH_HAS_DMA_COHERENCE_H #include <asm/dma-coherence.h> @@ -20,6 +21,22 @@ static inline bool dev_is_dma_coherent(struct device *dev) } #endif /* CONFIG_ARCH_HAS_DMA_COHERENCE_H */ +/* + * Check if an allocation needs to be marked uncached to be coherent. + */ +static __always_inline bool dma_alloc_need_uncached(struct device *dev, + unsigned long attrs) +{ + if (dev_is_dma_coherent(dev)) + return false; + if (attrs & DMA_ATTR_NO_KERNEL_MAPPING) + return false; + if (IS_ENABLED(CONFIG_DMA_NONCOHERENT_CACHE_SYNC) && + (attrs & DMA_ATTR_NON_CONSISTENT)) + return false; + return true; +} + void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs); void arch_dma_free(struct device *dev, size_t size, void *cpu_addr, @@ -27,13 +44,26 @@ void arch_dma_free(struct device *dev, size_t size, void *cpu_addr, long arch_dma_coherent_to_pfn(struct device *dev, void *cpu_addr, dma_addr_t dma_addr); -#ifdef CONFIG_ARCH_HAS_DMA_MMAP_PGPROT -pgprot_t arch_dma_mmap_pgprot(struct device *dev, pgprot_t prot, - unsigned long attrs); -#else -# define arch_dma_mmap_pgprot(dev, prot, attrs) pgprot_noncached(prot) +#ifdef CONFIG_MMU +/* + * Page protection so that devices that can't snoop CPU caches can use the + * memory coherently. We default to pgprot_noncached which is usually used + * for ioremap as a safe bet, but architectures can override this with less + * strict semantics if possible. + */ +#ifndef pgprot_dmacoherent +#define pgprot_dmacoherent(prot) pgprot_noncached(prot) #endif +pgprot_t dma_pgprot(struct device *dev, pgprot_t prot, unsigned long attrs); +#else +static inline pgprot_t dma_pgprot(struct device *dev, pgprot_t prot, + unsigned long attrs) +{ + return prot; /* no protection bits supported without page tables */ +} +#endif /* CONFIG_MMU */ + #ifdef CONFIG_DMA_NONCOHERENT_CACHE_SYNC void arch_dma_cache_sync(struct device *dev, void *vaddr, size_t size, enum dma_data_direction direction); @@ -80,4 +110,7 @@ static inline void arch_dma_prep_coherent(struct page *page, size_t size) } #endif /* CONFIG_ARCH_HAS_DMA_PREP_COHERENT */ +void *uncached_kernel_address(void *addr); +void *cached_kernel_address(void *addr); + #endif /* _LINUX_DMA_NONCOHERENT_H */ diff --git a/include/linux/reservation.h b/include/linux/dma-resv.h index ee750765cc94..ee50d10f052b 100644 --- a/include/linux/reservation.h +++ b/include/linux/dma-resv.h @@ -50,98 +50,52 @@ extern struct lock_class_key reservation_seqcount_class; extern const char reservation_seqcount_string[]; /** - * struct reservation_object_list - a list of shared fences + * struct dma_resv_list - a list of shared fences * @rcu: for internal use * @shared_count: table of shared fences * @shared_max: for growing shared fence table * @shared: shared fence table */ -struct reservation_object_list { +struct dma_resv_list { struct rcu_head rcu; u32 shared_count, shared_max; struct dma_fence __rcu *shared[]; }; /** - * struct reservation_object - a reservation object manages fences for a buffer + * struct dma_resv - a reservation object manages fences for a buffer * @lock: update side lock * @seq: sequence count for managing RCU read-side synchronization * @fence_excl: the exclusive fence, if there is one currently * @fence: list of current shared fences */ -struct reservation_object { +struct dma_resv { struct ww_mutex lock; seqcount_t seq; struct dma_fence __rcu *fence_excl; - struct reservation_object_list __rcu *fence; + struct dma_resv_list __rcu *fence; }; -#define reservation_object_held(obj) lockdep_is_held(&(obj)->lock.base) -#define reservation_object_assert_held(obj) \ - lockdep_assert_held(&(obj)->lock.base) +#define dma_resv_held(obj) lockdep_is_held(&(obj)->lock.base) +#define dma_resv_assert_held(obj) lockdep_assert_held(&(obj)->lock.base) /** - * reservation_object_init - initialize a reservation object - * @obj: the reservation object - */ -static inline void -reservation_object_init(struct reservation_object *obj) -{ - ww_mutex_init(&obj->lock, &reservation_ww_class); - - __seqcount_init(&obj->seq, reservation_seqcount_string, &reservation_seqcount_class); - RCU_INIT_POINTER(obj->fence, NULL); - RCU_INIT_POINTER(obj->fence_excl, NULL); -} - -/** - * reservation_object_fini - destroys a reservation object - * @obj: the reservation object - */ -static inline void -reservation_object_fini(struct reservation_object *obj) -{ - int i; - struct reservation_object_list *fobj; - struct dma_fence *excl; - - /* - * This object should be dead and all references must have - * been released to it, so no need to be protected with rcu. - */ - excl = rcu_dereference_protected(obj->fence_excl, 1); - if (excl) - dma_fence_put(excl); - - fobj = rcu_dereference_protected(obj->fence, 1); - if (fobj) { - for (i = 0; i < fobj->shared_count; ++i) - dma_fence_put(rcu_dereference_protected(fobj->shared[i], 1)); - - kfree(fobj); - } - - ww_mutex_destroy(&obj->lock); -} - -/** - * reservation_object_get_list - get the reservation object's + * dma_resv_get_list - get the reservation object's * shared fence list, with update-side lock held * @obj: the reservation object * * Returns the shared fence list. Does NOT take references to * the fence. The obj->lock must be held. */ -static inline struct reservation_object_list * -reservation_object_get_list(struct reservation_object *obj) +static inline struct dma_resv_list *dma_resv_get_list(struct dma_resv *obj) { return rcu_dereference_protected(obj->fence, - reservation_object_held(obj)); + dma_resv_held(obj)); } /** - * reservation_object_lock - lock the reservation object + * dma_resv_lock - lock the reservation object * @obj: the reservation object * @ctx: the locking context * @@ -155,15 +109,14 @@ reservation_object_get_list(struct reservation_object *obj) * is detected. See ww_mutex_lock() and ww_acquire_init(). A reservation * object may be locked by itself by passing NULL as @ctx. */ -static inline int -reservation_object_lock(struct reservation_object *obj, - struct ww_acquire_ctx *ctx) +static inline int dma_resv_lock(struct dma_resv *obj, + struct ww_acquire_ctx *ctx) { return ww_mutex_lock(&obj->lock, ctx); } /** - * reservation_object_lock_interruptible - lock the reservation object + * dma_resv_lock_interruptible - lock the reservation object * @obj: the reservation object * @ctx: the locking context * @@ -177,16 +130,45 @@ reservation_object_lock(struct reservation_object *obj, * is detected. See ww_mutex_lock() and ww_acquire_init(). A reservation * object may be locked by itself by passing NULL as @ctx. */ -static inline int -reservation_object_lock_interruptible(struct reservation_object *obj, - struct ww_acquire_ctx *ctx) +static inline int dma_resv_lock_interruptible(struct dma_resv *obj, + struct ww_acquire_ctx *ctx) { return ww_mutex_lock_interruptible(&obj->lock, ctx); } +/** + * dma_resv_lock_slow - slowpath lock the reservation object + * @obj: the reservation object + * @ctx: the locking context + * + * Acquires the reservation object after a die case. This function + * will sleep until the lock becomes available. See dma_resv_lock() as + * well. + */ +static inline void dma_resv_lock_slow(struct dma_resv *obj, + struct ww_acquire_ctx *ctx) +{ + ww_mutex_lock_slow(&obj->lock, ctx); +} /** - * reservation_object_trylock - trylock the reservation object + * dma_resv_lock_slow_interruptible - slowpath lock the reservation + * object, interruptible + * @obj: the reservation object + * @ctx: the locking context + * + * Acquires the reservation object interruptible after a die case. This function + * will sleep until the lock becomes available. See + * dma_resv_lock_interruptible() as well. + */ +static inline int dma_resv_lock_slow_interruptible(struct dma_resv *obj, + struct ww_acquire_ctx *ctx) +{ + return ww_mutex_lock_slow_interruptible(&obj->lock, ctx); +} + +/** + * dma_resv_trylock - trylock the reservation object * @obj: the reservation object * * Tries to lock the reservation object for exclusive access and modification. @@ -199,31 +181,55 @@ reservation_object_lock_interruptible(struct reservation_object *obj, * * Returns true if the lock was acquired, false otherwise. */ -static inline bool __must_check -reservation_object_trylock(struct reservation_object *obj) +static inline bool __must_check dma_resv_trylock(struct dma_resv *obj) { return ww_mutex_trylock(&obj->lock); } /** - * reservation_object_unlock - unlock the reservation object + * dma_resv_is_locked - is the reservation object locked + * @obj: the reservation object + * + * Returns true if the mutex is locked, false if unlocked. + */ +static inline bool dma_resv_is_locked(struct dma_resv *obj) +{ + return ww_mutex_is_locked(&obj->lock); +} + +/** + * dma_resv_locking_ctx - returns the context used to lock the object + * @obj: the reservation object + * + * Returns the context used to lock a reservation object or NULL if no context + * was used or the object is not locked at all. + */ +static inline struct ww_acquire_ctx *dma_resv_locking_ctx(struct dma_resv *obj) +{ + return READ_ONCE(obj->lock.ctx); +} + +/** + * dma_resv_unlock - unlock the reservation object * @obj: the reservation object * * Unlocks the reservation object following exclusive access. */ -static inline void -reservation_object_unlock(struct reservation_object *obj) +static inline void dma_resv_unlock(struct dma_resv *obj) { #ifdef CONFIG_DEBUG_MUTEXES /* Test shared fence slot reservation */ - if (obj->fence) - obj->fence->shared_max = obj->fence->shared_count; + if (rcu_access_pointer(obj->fence)) { + struct dma_resv_list *fence = dma_resv_get_list(obj); + + fence->shared_max = fence->shared_count; + } #endif ww_mutex_unlock(&obj->lock); } /** - * reservation_object_get_excl - get the reservation object's + * dma_resv_get_excl - get the reservation object's * exclusive fence, with update-side lock held * @obj: the reservation object * @@ -235,14 +241,14 @@ reservation_object_unlock(struct reservation_object *obj) * The exclusive fence or NULL */ static inline struct dma_fence * -reservation_object_get_excl(struct reservation_object *obj) +dma_resv_get_excl(struct dma_resv *obj) { return rcu_dereference_protected(obj->fence_excl, - reservation_object_held(obj)); + dma_resv_held(obj)); } /** - * reservation_object_get_excl_rcu - get the reservation object's + * dma_resv_get_excl_rcu - get the reservation object's * exclusive fence, without lock held. * @obj: the reservation object * @@ -253,7 +259,7 @@ reservation_object_get_excl(struct reservation_object *obj) * The exclusive fence or NULL if none */ static inline struct dma_fence * -reservation_object_get_excl_rcu(struct reservation_object *obj) +dma_resv_get_excl_rcu(struct dma_resv *obj) { struct dma_fence *fence; @@ -267,27 +273,23 @@ reservation_object_get_excl_rcu(struct reservation_object *obj) return fence; } -int reservation_object_reserve_shared(struct reservation_object *obj, - unsigned int num_fences); -void reservation_object_add_shared_fence(struct reservation_object *obj, - struct dma_fence *fence); +void dma_resv_init(struct dma_resv *obj); +void dma_resv_fini(struct dma_resv *obj); +int dma_resv_reserve_shared(struct dma_resv *obj, unsigned int num_fences); +void dma_resv_add_shared_fence(struct dma_resv *obj, struct dma_fence *fence); -void reservation_object_add_excl_fence(struct reservation_object *obj, - struct dma_fence *fence); +void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence); -int reservation_object_get_fences_rcu(struct reservation_object *obj, - struct dma_fence **pfence_excl, - unsigned *pshared_count, - struct dma_fence ***pshared); +int dma_resv_get_fences_rcu(struct dma_resv *obj, + struct dma_fence **pfence_excl, + unsigned *pshared_count, + struct dma_fence ***pshared); -int reservation_object_copy_fences(struct reservation_object *dst, - struct reservation_object *src); +int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src); -long reservation_object_wait_timeout_rcu(struct reservation_object *obj, - bool wait_all, bool intr, - unsigned long timeout); +long dma_resv_wait_timeout_rcu(struct dma_resv *obj, bool wait_all, bool intr, + unsigned long timeout); -bool reservation_object_test_signaled_rcu(struct reservation_object *obj, - bool test_all); +bool dma_resv_test_signaled_rcu(struct dma_resv *obj, bool test_all); #endif /* _LINUX_RESERVATION_H */ diff --git a/include/linux/dma/edma.h b/include/linux/dma/edma.h new file mode 100644 index 000000000000..cab6e18773da --- /dev/null +++ b/include/linux/dma/edma.h @@ -0,0 +1,47 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2018-2019 Synopsys, Inc. and/or its affiliates. + * Synopsys DesignWare eDMA core driver + * + * Author: Gustavo Pimentel <gustavo.pimentel@synopsys.com> + */ + +#ifndef _DW_EDMA_H +#define _DW_EDMA_H + +#include <linux/device.h> +#include <linux/dmaengine.h> + +struct dw_edma; + +/** + * struct dw_edma_chip - representation of DesignWare eDMA controller hardware + * @dev: struct device of the eDMA controller + * @id: instance ID + * @irq: irq line + * @dw: struct dw_edma that is filed by dw_edma_probe() + */ +struct dw_edma_chip { + struct device *dev; + int id; + int irq; + struct dw_edma *dw; +}; + +/* Export to the platform drivers */ +#if IS_ENABLED(CONFIG_DW_EDMA) +int dw_edma_probe(struct dw_edma_chip *chip); +int dw_edma_remove(struct dw_edma_chip *chip); +#else +static inline int dw_edma_probe(struct dw_edma_chip *chip) +{ + return -ENODEV; +} + +static inline int dw_edma_remove(struct dw_edma_chip *chip) +{ + return 0; +} +#endif /* CONFIG_DW_EDMA */ + +#endif /* _DW_EDMA_H */ diff --git a/include/linux/dma/mxs-dma.h b/include/linux/dma/mxs-dma.h new file mode 100644 index 000000000000..069d9f5a609e --- /dev/null +++ b/include/linux/dma/mxs-dma.h @@ -0,0 +1,24 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _MXS_DMA_H_ +#define _MXS_DMA_H_ + +#include <linux/dmaengine.h> + +#define MXS_DMA_CTRL_WAIT4END BIT(31) +#define MXS_DMA_CTRL_WAIT4RDY BIT(30) + +/* + * The mxs dmaengine can do PIO transfers. We pass a pointer to the PIO words + * in the second argument to dmaengine_prep_slave_sg when the direction is + * set to DMA_TRANS_NONE. To make this clear and to prevent users from doing + * the error prone casting we have this wrapper function + */ +static inline struct dma_async_tx_descriptor *mxs_dmaengine_prep_pio( + struct dma_chan *chan, u32 *pio, unsigned int npio, + enum dma_transfer_direction dir, unsigned long flags) +{ + return dmaengine_prep_slave_sg(chan, (struct scatterlist *)pio, npio, + dir, flags); +} + +#endif /* _MXS_DMA_H_ */ diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index c952f987ee57..8fcdee1c0cf9 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h @@ -1302,7 +1302,8 @@ enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie); enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx); void dma_issue_pending_all(void); struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask, - dma_filter_fn fn, void *fn_param); + dma_filter_fn fn, void *fn_param, + struct device_node *np); struct dma_chan *dma_request_slave_channel(struct device *dev, const char *name); struct dma_chan *dma_request_chan(struct device *dev, const char *name); @@ -1327,7 +1328,9 @@ static inline void dma_issue_pending_all(void) { } static inline struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask, - dma_filter_fn fn, void *fn_param) + dma_filter_fn fn, + void *fn_param, + struct device_node *np) { return NULL; } @@ -1399,7 +1402,8 @@ void dma_async_device_unregister(struct dma_device *device); void dma_run_dependencies(struct dma_async_tx_descriptor *tx); struct dma_chan *dma_get_slave_channel(struct dma_chan *chan); struct dma_chan *dma_get_any_slave_channel(struct dma_device *device); -#define dma_request_channel(mask, x, y) __dma_request_channel(&(mask), x, y) +#define dma_request_channel(mask, x, y) \ + __dma_request_channel(&(mask), x, y, NULL) #define dma_request_slave_channel_compat(mask, x, y, dev, name) \ __dma_request_slave_channel_compat(&(mask), x, y, dev, name) @@ -1417,6 +1421,6 @@ static inline struct dma_chan if (!fn || !fn_param) return NULL; - return __dma_request_channel(mask, fn, fn_param); + return __dma_request_channel(mask, fn, fn_param, NULL); } #endif /* DMAENGINE_H */ diff --git a/include/linux/dmar.h b/include/linux/dmar.h index 28813c6f44b6..a7cf3599d9a1 100644 --- a/include/linux/dmar.h +++ b/include/linux/dmar.h @@ -92,12 +92,14 @@ static inline bool dmar_rcu_check(void) #define dmar_rcu_dereference(p) rcu_dereference_check((p), dmar_rcu_check()) -#define for_each_dev_scope(a, c, p, d) \ - for ((p) = 0; ((d) = (p) < (c) ? dmar_rcu_dereference((a)[(p)].dev) : \ - NULL, (p) < (c)); (p)++) - -#define for_each_active_dev_scope(a, c, p, d) \ - for_each_dev_scope((a), (c), (p), (d)) if (!(d)) { continue; } else +#define for_each_dev_scope(devs, cnt, i, tmp) \ + for ((i) = 0; ((tmp) = (i) < (cnt) ? \ + dmar_rcu_dereference((devs)[(i)].dev) : NULL, (i) < (cnt)); \ + (i)++) + +#define for_each_active_dev_scope(devs, cnt, i, tmp) \ + for_each_dev_scope((devs), (cnt), (i), (tmp)) \ + if (!(tmp)) { continue; } else extern int dmar_table_init(void); extern int dmar_dev_scope_init(void); diff --git a/include/linux/dns_resolver.h b/include/linux/dns_resolver.h index f2b3ae22e6b7..976cbbdb2832 100644 --- a/include/linux/dns_resolver.h +++ b/include/linux/dns_resolver.h @@ -26,7 +26,8 @@ #include <uapi/linux/dns_resolver.h> -extern int dns_query(const char *type, const char *name, size_t namelen, +struct net; +extern int dns_query(struct net *net, const char *type, const char *name, size_t namelen, const char *options, char **_result, time64_t *_expiry, bool invalidate); diff --git a/include/linux/dsa/8021q.h b/include/linux/dsa/8021q.h index 3911e0586478..0aa803c451a3 100644 --- a/include/linux/dsa/8021q.h +++ b/include/linux/dsa/8021q.h @@ -20,9 +20,6 @@ int dsa_port_setup_8021q_tagging(struct dsa_switch *ds, int index, struct sk_buff *dsa_8021q_xmit(struct sk_buff *skb, struct net_device *netdev, u16 tpid, u16 tci); -struct sk_buff *dsa_8021q_rcv(struct sk_buff *skb, struct net_device *netdev, - struct packet_type *pt, u16 *tpid, u16 *tci); - u16 dsa_8021q_tx_vid(struct dsa_switch *ds, int port); u16 dsa_8021q_rx_vid(struct dsa_switch *ds, int port); @@ -31,6 +28,8 @@ int dsa_8021q_rx_switch_id(u16 vid); int dsa_8021q_rx_source_port(u16 vid); +struct sk_buff *dsa_8021q_remove_header(struct sk_buff *skb); + #else int dsa_port_setup_8021q_tagging(struct dsa_switch *ds, int index, @@ -45,12 +44,6 @@ struct sk_buff *dsa_8021q_xmit(struct sk_buff *skb, struct net_device *netdev, return NULL; } -struct sk_buff *dsa_8021q_rcv(struct sk_buff *skb, struct net_device *netdev, - struct packet_type *pt, u16 *tpid, u16 *tci) -{ - return NULL; -} - u16 dsa_8021q_tx_vid(struct dsa_switch *ds, int port) { return 0; @@ -71,6 +64,11 @@ int dsa_8021q_rx_source_port(u16 vid) return 0; } +struct sk_buff *dsa_8021q_remove_header(struct sk_buff *skb) +{ + return NULL; +} + #endif /* IS_ENABLED(CONFIG_NET_DSA_TAG_8021Q) */ #endif /* _NET_DSA_8021Q_H */ diff --git a/include/linux/dsa/sja1105.h b/include/linux/dsa/sja1105.h index e46e18c47d41..897e799dbcb9 100644 --- a/include/linux/dsa/sja1105.h +++ b/include/linux/dsa/sja1105.h @@ -12,6 +12,7 @@ #include <net/dsa.h> #define ETH_P_SJA1105 ETH_P_DSA_8021Q +#define ETH_P_SJA1105_META 0x0008 /* IEEE 802.3 Annex 57A: Slow Protocols PDUs (01:80:C2:xx:xx:xx) */ #define SJA1105_LINKLOCAL_FILTER_A 0x0180C2000000ull @@ -20,8 +21,43 @@ #define SJA1105_LINKLOCAL_FILTER_B 0x011B19000000ull #define SJA1105_LINKLOCAL_FILTER_B_MASK 0xFFFFFF000000ull +/* Source and Destination MAC of follow-up meta frames. + * Whereas the choice of SMAC only affects the unique identification of the + * switch as sender of meta frames, the DMAC must be an address that is present + * in the DSA master port's multicast MAC filter. + * 01-80-C2-00-00-0E is a good choice for this, as all profiles of IEEE 1588 + * over L2 use this address for some purpose already. + */ +#define SJA1105_META_SMAC 0x222222222222ull +#define SJA1105_META_DMAC 0x0180C200000Eull + +#define SJA1105_HWTS_RX_EN 0 + +/* Global tagger data: each struct sja1105_port has a reference to + * the structure defined in struct sja1105_private. + */ +struct sja1105_tagger_data { + struct sk_buff_head skb_rxtstamp_queue; + struct work_struct rxtstamp_work; + struct sk_buff *stampable_skb; + /* Protects concurrent access to the meta state machine + * from taggers running on multiple ports on SMP systems + */ + spinlock_t meta_lock; + unsigned long state; +}; + +struct sja1105_skb_cb { + u32 meta_tstamp; +}; + +#define SJA1105_SKB_CB(skb) \ + ((struct sja1105_skb_cb *)DSA_SKB_CB_PRIV(skb)) + struct sja1105_port { + struct sja1105_tagger_data *data; struct dsa_port *dp; + bool hwts_tx_en; int mgmt_slot; }; diff --git a/include/linux/dynamic_debug.h b/include/linux/dynamic_debug.h index 6c809440f319..4cf02ecd67de 100644 --- a/include/linux/dynamic_debug.h +++ b/include/linux/dynamic_debug.h @@ -204,6 +204,12 @@ static inline int ddebug_dyndbg_module_param_cb(char *param, char *val, do { if (0) printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__); } while (0) #define dynamic_dev_dbg(dev, fmt, ...) \ do { if (0) dev_printk(KERN_DEBUG, dev, fmt, ##__VA_ARGS__); } while (0) +#define dynamic_hex_dump(prefix_str, prefix_type, rowsize, \ + groupsize, buf, len, ascii) \ + do { if (0) \ + print_hex_dump(KERN_DEBUG, prefix_str, prefix_type, \ + rowsize, groupsize, buf, len, ascii); \ + } while (0) #endif #endif diff --git a/include/linux/edac.h b/include/linux/edac.h index 342dabda9c7e..c19483b90079 100644 --- a/include/linux/edac.h +++ b/include/linux/edac.h @@ -440,7 +440,7 @@ struct dimm_info { char label[EDAC_MC_LABEL_LEN + 1]; /* DIMM label on motherboard */ /* Memory location data */ - unsigned location[EDAC_MAX_LAYERS]; + unsigned int location[EDAC_MAX_LAYERS]; struct mem_ctl_info *mci; /* the parent */ @@ -451,7 +451,7 @@ struct dimm_info { u32 nr_pages; /* number of pages on this dimm */ - unsigned csrow, cschannel; /* Points to the old API data */ + unsigned int csrow, cschannel; /* Points to the old API data */ u16 smbios_handle; /* Handle for SMBIOS type 17 */ }; @@ -597,7 +597,7 @@ struct mem_ctl_info { unsigned long page); int mc_idx; struct csrow_info **csrows; - unsigned nr_csrows, num_cschannel; + unsigned int nr_csrows, num_cschannel; /* * Memory Controller hierarchy @@ -608,14 +608,14 @@ struct mem_ctl_info { * of the recent drivers enumerate memories per DIMM, instead. * When the memory controller is per rank, csbased is true. */ - unsigned n_layers; + unsigned int n_layers; struct edac_mc_layer *layers; bool csbased; /* * DIMM info. Will eventually remove the entire csrows_info some day */ - unsigned tot_dimms; + unsigned int tot_dimms; struct dimm_info **dimms; /* diff --git a/include/linux/edma.h b/include/linux/edma.h deleted file mode 100644 index a1307e7827e8..000000000000 --- a/include/linux/edma.h +++ /dev/null @@ -1,29 +0,0 @@ -/* - * TI EDMA DMA engine driver - * - * Copyright 2012 Texas Instruments - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License as - * published by the Free Software Foundation version 2. - * - * This program is distributed "as is" WITHOUT ANY WARRANTY of any - * kind, whether express or implied; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ -#ifndef __LINUX_EDMA_H -#define __LINUX_EDMA_H - -struct dma_chan; - -#if defined(CONFIG_TI_EDMA) || defined(CONFIG_TI_EDMA_MODULE) -bool edma_filter_fn(struct dma_chan *, void *); -#else -static inline bool edma_filter_fn(struct dma_chan *chan, void *param) -{ - return false; -} -#endif - -#endif diff --git a/include/linux/efi.h b/include/linux/efi.h index 6ebc2098cfe1..d87acf62958e 100644 --- a/include/linux/efi.h +++ b/include/linux/efi.h @@ -689,8 +689,12 @@ void efi_native_runtime_setup(void); #define LINUX_EFI_LOADER_ENTRY_GUID EFI_GUID(0x4a67b082, 0x0a4c, 0x41cf, 0xb6, 0xc7, 0x44, 0x0b, 0x29, 0xbb, 0x8c, 0x4f) #define LINUX_EFI_RANDOM_SEED_TABLE_GUID EFI_GUID(0x1ce1e5bc, 0x7ceb, 0x42f2, 0x81, 0xe5, 0x8a, 0xad, 0xf1, 0x80, 0xf5, 0x7b) #define LINUX_EFI_TPM_EVENT_LOG_GUID EFI_GUID(0xb7799cb0, 0xeca2, 0x4943, 0x96, 0x67, 0x1f, 0xae, 0x07, 0xb7, 0x47, 0xfa) +#define LINUX_EFI_TPM_FINAL_LOG_GUID EFI_GUID(0x1e2ed096, 0x30e2, 0x4254, 0xbd, 0x89, 0x86, 0x3b, 0xbe, 0xf8, 0x23, 0x25) #define LINUX_EFI_MEMRESERVE_TABLE_GUID EFI_GUID(0x888eb0c6, 0x8ede, 0x4ff5, 0xa8, 0xf0, 0x9a, 0xee, 0x5c, 0xb9, 0x77, 0xc2) +/* OEM GUIDs */ +#define DELLEMC_EFI_RCI2_TABLE_GUID EFI_GUID(0x2d9f28a2, 0xa886, 0x456a, 0x97, 0xa8, 0xf1, 0x1e, 0xf2, 0x4f, 0xf4, 0x55) + typedef struct { efi_guid_t guid; u64 table; @@ -983,11 +987,9 @@ extern struct efi { unsigned long acpi20; /* ACPI table (ACPI 2.0) */ unsigned long smbios; /* SMBIOS table (32 bit entry point) */ unsigned long smbios3; /* SMBIOS table (64 bit entry point) */ - unsigned long sal_systab; /* SAL system table */ unsigned long boot_info; /* boot info table */ unsigned long hcdp; /* HCDP table */ unsigned long uga; /* UGA table */ - unsigned long uv_systab; /* UV system table */ unsigned long fw_vendor; /* fw_vendor */ unsigned long runtime; /* runtime table */ unsigned long config_table; /* config tables */ @@ -996,6 +998,7 @@ extern struct efi { unsigned long mem_attr_table; /* memory attributes table */ unsigned long rng_seed; /* UEFI firmware random seed */ unsigned long tpm_log; /* TPM2 Event Log table */ + unsigned long tpm_final_log; /* TPM2 Final Events Log table */ unsigned long mem_reserve; /* Linux EFI memreserve table */ efi_get_time_t *get_time; efi_set_time_t *set_time; @@ -1209,8 +1212,6 @@ static inline bool efi_enabled(int feature) return test_bit(feature, &efi.flags) != 0; } extern void efi_reboot(enum reboot_mode reboot_mode, const char *__unused); - -extern bool efi_is_table_address(unsigned long phys_addr); #else static inline bool efi_enabled(int feature) { @@ -1224,11 +1225,6 @@ efi_capsule_pending(int *reset_type) { return false; } - -static inline bool efi_is_table_address(unsigned long phys_addr) -{ - return false; -} #endif extern int efi_status_to_err(efi_status_t status); @@ -1583,9 +1579,22 @@ char *efi_convert_cmdline(efi_system_table_t *sys_table_arg, efi_status_t efi_get_memory_map(efi_system_table_t *sys_table_arg, struct efi_boot_memmap *map); +efi_status_t efi_low_alloc_above(efi_system_table_t *sys_table_arg, + unsigned long size, unsigned long align, + unsigned long *addr, unsigned long min); + +static inline efi_status_t efi_low_alloc(efi_system_table_t *sys_table_arg, unsigned long size, unsigned long align, - unsigned long *addr); + unsigned long *addr) +{ + /* + * Don't allocate at 0x0. It will confuse code that + * checks pointers against NULL. Skip the first 8 + * bytes so we start at a nice even number. + */ + return efi_low_alloc_above(sys_table_arg, size, align, addr, 0x8); +} efi_status_t efi_high_alloc(efi_system_table_t *sys_table_arg, unsigned long size, unsigned long align, @@ -1596,7 +1605,8 @@ efi_status_t efi_relocate_kernel(efi_system_table_t *sys_table_arg, unsigned long image_size, unsigned long alloc_size, unsigned long preferred_addr, - unsigned long alignment); + unsigned long alignment, + unsigned long min_addr); efi_status_t handle_cmdline_files(efi_system_table_t *sys_table_arg, efi_loaded_image_t *image, @@ -1706,12 +1716,22 @@ struct linux_efi_random_seed { struct linux_efi_tpm_eventlog { u32 size; + u32 final_events_preboot_size; u8 version; u8 log[]; }; extern int efi_tpm_eventlog_init(void); +struct efi_tcg2_final_events_table { + u64 version; + u64 nr_events; + u8 events[]; +}; +extern int efi_tpm_final_log_size; + +extern unsigned long rci2_table_phys; + /* * efi_runtime_service() function identifiers. * "NONE" is used by efi_recover_from_page_fault() to check if the page diff --git a/include/linux/elevator.h b/include/linux/elevator.h index 6e8bc53740f0..901bda352dcb 100644 --- a/include/linux/elevator.h +++ b/include/linux/elevator.h @@ -34,7 +34,7 @@ struct elevator_mq_ops { void (*depth_updated)(struct blk_mq_hw_ctx *); bool (*allow_merge)(struct request_queue *, struct request *, struct bio *); - bool (*bio_merge)(struct blk_mq_hw_ctx *, struct bio *); + bool (*bio_merge)(struct blk_mq_hw_ctx *, struct bio *, unsigned int); int (*request_merge)(struct request_queue *q, struct request **, struct bio *); void (*request_merged)(struct request_queue *, struct request *, enum elv_merge); void (*requests_merged)(struct request_queue *, struct request *, struct request *); @@ -45,7 +45,6 @@ struct elevator_mq_ops { struct request *(*dispatch_request)(struct blk_mq_hw_ctx *); bool (*has_work)(struct blk_mq_hw_ctx *); void (*completed_request)(struct request *, u64); - void (*started_request)(struct request *); void (*requeue_request)(struct request *); struct request *(*former_request)(struct request_queue *, struct request *); struct request *(*next_request)(struct request_queue *, struct request *); @@ -75,8 +74,9 @@ struct elevator_type size_t icq_size; /* see iocontext.h */ size_t icq_align; /* ditto */ struct elv_fs_entry *elevator_attrs; - char elevator_name[ELV_NAME_MAX]; + const char *elevator_name; const char *elevator_alias; + const unsigned int elevator_features; struct module *elevator_owner; #ifdef CONFIG_BLK_DEBUG_FS const struct blk_mq_debugfs_attr *queue_debugfs_attrs; @@ -160,20 +160,18 @@ extern struct request *elv_rb_find(struct rb_root *, sector_t); #define ELEVATOR_INSERT_FLUSH 5 #define ELEVATOR_INSERT_SORT_MERGE 6 -/* - * return values from elevator_may_queue_fn - */ -enum { - ELV_MQUEUE_MAY, - ELV_MQUEUE_NO, - ELV_MQUEUE_MUST, -}; - #define rq_end_sector(rq) (blk_rq_pos(rq) + blk_rq_sectors(rq)) #define rb_entry_rq(node) rb_entry((node), struct request, rb_node) #define rq_entry_fifo(ptr) list_entry((ptr), struct request, queuelist) #define rq_fifo_clear(rq) list_del_init(&(rq)->queuelist) +/* + * Elevator features. + */ + +/* Supports zoned block devices sequential write constraint */ +#define ELEVATOR_F_ZBD_SEQ_WRITE (1U << 0) + #endif /* CONFIG_BLOCK */ #endif diff --git a/include/linux/energy_model.h b/include/linux/energy_model.h index aa027f7bcb3e..73f8c3cb9588 100644 --- a/include/linux/energy_model.h +++ b/include/linux/energy_model.h @@ -89,7 +89,7 @@ static inline unsigned long em_pd_energy(struct em_perf_domain *pd, * like schedutil. */ cpu = cpumask_first(to_cpumask(pd->cpus)); - scale_cpu = arch_scale_cpu_capacity(NULL, cpu); + scale_cpu = arch_scale_cpu_capacity(cpu); cs = &pd->table[pd->nr_cap_states - 1]; freq = map_util_freq(max_util, cs->frequency, scale_cpu); diff --git a/include/linux/error-injection.h b/include/linux/error-injection.h index 280c61ecbf20..635a95caf29f 100644 --- a/include/linux/error-injection.h +++ b/include/linux/error-injection.h @@ -2,16 +2,16 @@ #ifndef _LINUX_ERROR_INJECTION_H #define _LINUX_ERROR_INJECTION_H -#ifdef CONFIG_FUNCTION_ERROR_INJECTION +#include <linux/compiler.h> +#include <asm-generic/error-injection.h> -#include <asm/error-injection.h> +#ifdef CONFIG_FUNCTION_ERROR_INJECTION extern bool within_error_injection_list(unsigned long addr); extern int get_injectable_error_type(unsigned long addr); #else /* !CONFIG_FUNCTION_ERROR_INJECTION */ -#include <asm-generic/error-injection.h> static inline bool within_error_injection_list(unsigned long addr) { return false; diff --git a/include/linux/export.h b/include/linux/export.h index fd8711ed9ac4..941d075f03d6 100644 --- a/include/linux/export.h +++ b/include/linux/export.h @@ -18,9 +18,6 @@ extern struct module __this_module; #define THIS_MODULE ((struct module *)0) #endif -#ifdef CONFIG_MODULES - -#if defined(__KERNEL__) && !defined(__GENKSYMS__) #ifdef CONFIG_MODVERSIONS /* Mark the CRC weak since genksyms apparently decides not to * generate a checksums for some symbols */ @@ -29,13 +26,13 @@ extern struct module __this_module; asm(" .section \"___kcrctab" sec "+" #sym "\", \"a\" \n" \ " .weak __crc_" #sym " \n" \ " .long __crc_" #sym " - . \n" \ - " .previous \n"); + " .previous \n") #else #define __CRC_SYMBOL(sym, sec) \ asm(" .section \"___kcrctab" sec "+" #sym "\", \"a\" \n" \ " .weak __crc_" #sym " \n" \ " .long __crc_" #sym " \n" \ - " .previous \n"); + " .previous \n") #endif #else #define __CRC_SYMBOL(sym, sec) @@ -49,47 +46,87 @@ extern struct module __this_module; * absolute relocations that require runtime processing on relocatable * kernels. */ +#define __KSYMTAB_ENTRY_NS(sym, sec) \ + __ADDRESSABLE(sym) \ + asm(" .section \"___ksymtab" sec "+" #sym "\", \"a\" \n" \ + " .balign 4 \n" \ + "__ksymtab_" #sym ": \n" \ + " .long " #sym "- . \n" \ + " .long __kstrtab_" #sym "- . \n" \ + " .long __kstrtabns_" #sym "- . \n" \ + " .previous \n") + #define __KSYMTAB_ENTRY(sym, sec) \ __ADDRESSABLE(sym) \ asm(" .section \"___ksymtab" sec "+" #sym "\", \"a\" \n" \ - " .balign 8 \n" \ + " .balign 4 \n" \ "__ksymtab_" #sym ": \n" \ " .long " #sym "- . \n" \ " .long __kstrtab_" #sym "- . \n" \ + " .long 0 \n" \ " .previous \n") struct kernel_symbol { int value_offset; int name_offset; + int namespace_offset; }; #else +#define __KSYMTAB_ENTRY_NS(sym, sec) \ + static const struct kernel_symbol __ksymtab_##sym \ + __attribute__((section("___ksymtab" sec "+" #sym), used)) \ + __aligned(sizeof(void *)) \ + = { (unsigned long)&sym, __kstrtab_##sym, __kstrtabns_##sym } + #define __KSYMTAB_ENTRY(sym, sec) \ static const struct kernel_symbol __ksymtab_##sym \ __attribute__((section("___ksymtab" sec "+" #sym), used)) \ - = { (unsigned long)&sym, __kstrtab_##sym } + __aligned(sizeof(void *)) \ + = { (unsigned long)&sym, __kstrtab_##sym, NULL } struct kernel_symbol { unsigned long value; const char *name; + const char *namespace; }; #endif -/* For every exported symbol, place a struct in the __ksymtab section */ -#define ___EXPORT_SYMBOL(sym, sec) \ +#ifdef __GENKSYMS__ + +#define ___EXPORT_SYMBOL(sym,sec) __GENKSYMS_EXPORT_SYMBOL(sym) +#define ___EXPORT_SYMBOL_NS(sym,sec,ns) __GENKSYMS_EXPORT_SYMBOL(sym) + +#else + +#define ___export_symbol_common(sym, sec) \ extern typeof(sym) sym; \ - __CRC_SYMBOL(sym, sec) \ + __CRC_SYMBOL(sym, sec); \ static const char __kstrtab_##sym[] \ __attribute__((section("__ksymtab_strings"), used, aligned(1))) \ - = #sym; \ + = #sym \ + +/* For every exported symbol, place a struct in the __ksymtab section */ +#define ___EXPORT_SYMBOL_NS(sym, sec, ns) \ + ___export_symbol_common(sym, sec); \ + static const char __kstrtabns_##sym[] \ + __attribute__((section("__ksymtab_strings"), used, aligned(1))) \ + = #ns; \ + __KSYMTAB_ENTRY_NS(sym, sec) + +#define ___EXPORT_SYMBOL(sym, sec) \ + ___export_symbol_common(sym, sec); \ __KSYMTAB_ENTRY(sym, sec) -#if defined(__DISABLE_EXPORTS) +#endif + +#if !defined(CONFIG_MODULES) || defined(__DISABLE_EXPORTS) /* * Allow symbol exports to be disabled completely so that C code may * be reused in other execution contexts such as the UEFI stub or the * decompressor. */ +#define __EXPORT_SYMBOL_NS(sym, sec, ns) #define __EXPORT_SYMBOL(sym, sec) #elif defined(CONFIG_TRIM_UNUSED_KSYMS) @@ -116,38 +153,43 @@ struct kernel_symbol { #define __cond_export_sym_1(sym, sec) ___EXPORT_SYMBOL(sym, sec) #define __cond_export_sym_0(sym, sec) /* nothing */ +#define __EXPORT_SYMBOL_NS(sym, sec, ns) \ + __ksym_marker(sym); \ + __cond_export_ns_sym(sym, sec, ns, __is_defined(__KSYM_##sym)) +#define __cond_export_ns_sym(sym, sec, ns, conf) \ + ___cond_export_ns_sym(sym, sec, ns, conf) +#define ___cond_export_ns_sym(sym, sec, ns, enabled) \ + __cond_export_ns_sym_##enabled(sym, sec, ns) +#define __cond_export_ns_sym_1(sym, sec, ns) ___EXPORT_SYMBOL_NS(sym, sec, ns) +#define __cond_export_ns_sym_0(sym, sec, ns) /* nothing */ + #else -#define __EXPORT_SYMBOL ___EXPORT_SYMBOL -#endif -#define EXPORT_SYMBOL(sym) \ - __EXPORT_SYMBOL(sym, "") +#define __EXPORT_SYMBOL_NS(sym,sec,ns) ___EXPORT_SYMBOL_NS(sym,sec,ns) +#define __EXPORT_SYMBOL(sym,sec) ___EXPORT_SYMBOL(sym,sec) -#define EXPORT_SYMBOL_GPL(sym) \ - __EXPORT_SYMBOL(sym, "_gpl") +#endif /* CONFIG_MODULES */ -#define EXPORT_SYMBOL_GPL_FUTURE(sym) \ - __EXPORT_SYMBOL(sym, "_gpl_future") +#ifdef DEFAULT_SYMBOL_NAMESPACE +#undef __EXPORT_SYMBOL +#define __EXPORT_SYMBOL(sym, sec) \ + __EXPORT_SYMBOL_NS(sym, sec, DEFAULT_SYMBOL_NAMESPACE) +#endif + +#define EXPORT_SYMBOL(sym) __EXPORT_SYMBOL(sym, "") +#define EXPORT_SYMBOL_GPL(sym) __EXPORT_SYMBOL(sym, "_gpl") +#define EXPORT_SYMBOL_GPL_FUTURE(sym) __EXPORT_SYMBOL(sym, "_gpl_future") +#define EXPORT_SYMBOL_NS(sym, ns) __EXPORT_SYMBOL_NS(sym, "", ns) +#define EXPORT_SYMBOL_NS_GPL(sym, ns) __EXPORT_SYMBOL_NS(sym, "_gpl", ns) #ifdef CONFIG_UNUSED_SYMBOLS -#define EXPORT_UNUSED_SYMBOL(sym) __EXPORT_SYMBOL(sym, "_unused") -#define EXPORT_UNUSED_SYMBOL_GPL(sym) __EXPORT_SYMBOL(sym, "_unused_gpl") +#define EXPORT_UNUSED_SYMBOL(sym) __EXPORT_SYMBOL(sym, "_unused") +#define EXPORT_UNUSED_SYMBOL_GPL(sym) __EXPORT_SYMBOL(sym, "_unused_gpl") #else #define EXPORT_UNUSED_SYMBOL(sym) #define EXPORT_UNUSED_SYMBOL_GPL(sym) #endif -#endif /* __GENKSYMS__ */ - -#else /* !CONFIG_MODULES... */ - -#define EXPORT_SYMBOL(sym) -#define EXPORT_SYMBOL_GPL(sym) -#define EXPORT_SYMBOL_GPL_FUTURE(sym) -#define EXPORT_UNUSED_SYMBOL(sym) -#define EXPORT_UNUSED_SYMBOL_GPL(sym) - -#endif /* CONFIG_MODULES */ #endif /* !__ASSEMBLY__ */ #endif /* _LINUX_EXPORT_H */ diff --git a/include/linux/exportfs.h b/include/linux/exportfs.h index 0d3037419bc7..cf6571fc9c01 100644 --- a/include/linux/exportfs.h +++ b/include/linux/exportfs.h @@ -139,7 +139,7 @@ struct fid { * @get_parent: find the parent of a given directory * @commit_metadata: commit metadata changes to stable storage * - * See Documentation/filesystems/nfs/Exporting for details on how to use + * See Documentation/filesystems/nfs/exporting.rst for details on how to use * this interface correctly. * * encode_fh: diff --git a/include/linux/extable.h b/include/linux/extable.h index 41c5b3a25f67..81ecfaa83ad3 100644 --- a/include/linux/extable.h +++ b/include/linux/extable.h @@ -19,6 +19,8 @@ void trim_init_extable(struct module *m); /* Given an address, look for it in the exception tables */ const struct exception_table_entry *search_exception_tables(unsigned long add); +const struct exception_table_entry * +search_kernel_exception_table(unsigned long addr); #ifdef CONFIG_MODULES /* For extable.c to search modules' exception tables. */ diff --git a/include/linux/f2fs_fs.h b/include/linux/f2fs_fs.h index 65559900d4d7..284738996028 100644 --- a/include/linux/f2fs_fs.h +++ b/include/linux/f2fs_fs.h @@ -36,11 +36,17 @@ #define F2FS_MAX_QUOTAS 3 +#define F2FS_ENC_UTF8_12_1 1 +#define F2FS_ENC_STRICT_MODE_FL (1 << 0) +#define f2fs_has_strict_mode(sbi) \ + (sbi->s_encoding_flags & F2FS_ENC_STRICT_MODE_FL) + #define F2FS_IO_SIZE(sbi) (1 << F2FS_OPTION(sbi).write_io_size_bits) /* Blocks */ #define F2FS_IO_SIZE_KB(sbi) (1 << (F2FS_OPTION(sbi).write_io_size_bits + 2)) /* KB */ #define F2FS_IO_SIZE_BYTES(sbi) (1 << (F2FS_OPTION(sbi).write_io_size_bits + 12)) /* B */ #define F2FS_IO_SIZE_BITS(sbi) (F2FS_OPTION(sbi).write_io_size_bits) /* power of 2 */ #define F2FS_IO_SIZE_MASK(sbi) (F2FS_IO_SIZE(sbi) - 1) +#define F2FS_IO_ALIGNED(sbi) (F2FS_IO_SIZE(sbi) > 1) /* This flag is used by node and meta inodes, and by recovery */ #define GFP_F2FS_ZERO (GFP_NOFS | __GFP_ZERO) @@ -109,7 +115,9 @@ struct f2fs_super_block { struct f2fs_device devs[MAX_DEVICES]; /* device list */ __le32 qf_ino[F2FS_MAX_QUOTAS]; /* quota inode numbers */ __u8 hot_ext_count; /* # of hot file extension */ - __u8 reserved[310]; /* valid reserved region */ + __le16 s_encoding; /* Filename charset encoding */ + __le16 s_encoding_flags; /* Filename charset encoding flags */ + __u8 reserved[306]; /* valid reserved region */ __le32 crc; /* checksum of superblock */ } __packed; diff --git a/include/linux/fault-inject.h b/include/linux/fault-inject.h index 7e6c77740413..e525f6957c49 100644 --- a/include/linux/fault-inject.h +++ b/include/linux/fault-inject.h @@ -11,7 +11,7 @@ /* * For explanation of the elements of this struct, see - * Documentation/fault-injection/fault-injection.txt + * Documentation/fault-injection/fault-injection.rst */ struct fault_attr { unsigned long probability; diff --git a/include/linux/fb.h b/include/linux/fb.h index f52ef0ad6781..756706b666a1 100644 --- a/include/linux/fb.h +++ b/include/linux/fb.h @@ -126,43 +126,15 @@ struct fb_cursor_user { /* The resolution of the passed in fb_info about to change */ #define FB_EVENT_MODE_CHANGE 0x01 -/* The display on this fb_info is being suspended, no access to the - * framebuffer is allowed any more after that call returns - */ -#define FB_EVENT_SUSPEND 0x02 -/* The display on this fb_info was resumed, you can restore the display - * if you own it - */ -#define FB_EVENT_RESUME 0x03 -/* An entry from the modelist was removed */ -#define FB_EVENT_MODE_DELETE 0x04 -/* A driver registered itself */ + +#ifdef CONFIG_GUMSTIX_AM200EPD +/* only used by mach-pxa/am200epd.c */ #define FB_EVENT_FB_REGISTERED 0x05 -/* A driver unregistered itself */ #define FB_EVENT_FB_UNREGISTERED 0x06 -/* CONSOLE-SPECIFIC: get console to framebuffer mapping */ -#define FB_EVENT_GET_CONSOLE_MAP 0x07 -/* CONSOLE-SPECIFIC: set console to framebuffer mapping */ -#define FB_EVENT_SET_CONSOLE_MAP 0x08 -/* A hardware display blank change occurred */ +#endif + +/* A display blank is requested */ #define FB_EVENT_BLANK 0x09 -/* Private modelist is to be replaced */ -#define FB_EVENT_NEW_MODELIST 0x0A -/* The resolution of the passed in fb_info about to change and - all vc's should be changed */ -#define FB_EVENT_MODE_CHANGE_ALL 0x0B -/* A software display blank change occurred */ -#define FB_EVENT_CONBLANK 0x0C -/* Get drawing requirements */ -#define FB_EVENT_GET_REQ 0x0D -/* Unbind from the console if possible */ -#define FB_EVENT_FB_UNBIND 0x0E -/* CONSOLE-SPECIFIC: remap all consoles to new fb - for vga_switcheroo */ -#define FB_EVENT_REMAP_ALL_CONSOLE 0x0F -/* A hardware display blank early change occurred */ -#define FB_EARLY_EVENT_BLANK 0x10 -/* A hardware display blank revert early change occurred */ -#define FB_R_EARLY_EVENT_BLANK 0x11 struct fb_event { struct fb_info *info; @@ -633,8 +605,8 @@ extern ssize_t fb_sys_write(struct fb_info *info, const char __user *buf, /* drivers/video/fbmem.c */ extern int register_framebuffer(struct fb_info *fb_info); -extern int unregister_framebuffer(struct fb_info *fb_info); -extern int unlink_framebuffer(struct fb_info *fb_info); +extern void unregister_framebuffer(struct fb_info *fb_info); +extern void unlink_framebuffer(struct fb_info *fb_info); extern int remove_conflicting_pci_framebuffers(struct pci_dev *pdev, int res_id, const char *name); extern int remove_conflicting_framebuffers(struct apertures_struct *a, @@ -660,7 +632,10 @@ extern struct class *fb_class; for (i = 0; i < FB_MAX; i++) \ if (!registered_fb[i]) {} else -extern int lock_fb_info(struct fb_info *info); +static inline void lock_fb_info(struct fb_info *info) +{ + mutex_lock(&info->lock); +} static inline void unlock_fb_info(struct fb_info *info) { @@ -742,8 +717,6 @@ extern int fb_parse_edid(unsigned char *edid, struct fb_var_screeninfo *var); extern const unsigned char *fb_firmware_edid(struct device *device); extern void fb_edid_to_monspecs(unsigned char *edid, struct fb_monspecs *specs); -extern void fb_edid_add_monspecs(unsigned char *edid, - struct fb_monspecs *specs); extern void fb_destroy_modedb(struct fb_videomode *modedb); extern int fb_find_mode_cvt(struct fb_videomode *mode, int margins, int rb); extern unsigned char *fb_ddc_read(struct i2c_adapter *adapter); @@ -817,7 +790,6 @@ struct dmt_videomode { extern const char *fb_mode_option; extern const struct fb_videomode vesa_modes[]; -extern const struct fb_videomode cea_modes[65]; extern const struct dmt_videomode dmt_modes[]; struct fb_modelist { diff --git a/include/linux/fbcon.h b/include/linux/fbcon.h index f68a7db14165..ff5596dd30f8 100644 --- a/include/linux/fbcon.h +++ b/include/linux/fbcon.h @@ -4,9 +4,39 @@ #ifdef CONFIG_FRAMEBUFFER_CONSOLE void __init fb_console_init(void); void __exit fb_console_exit(void); +int fbcon_fb_registered(struct fb_info *info); +void fbcon_fb_unregistered(struct fb_info *info); +void fbcon_fb_unbind(struct fb_info *info); +void fbcon_suspended(struct fb_info *info); +void fbcon_resumed(struct fb_info *info); +int fbcon_mode_deleted(struct fb_info *info, + struct fb_videomode *mode); +void fbcon_new_modelist(struct fb_info *info); +void fbcon_get_requirement(struct fb_info *info, + struct fb_blit_caps *caps); +void fbcon_fb_blanked(struct fb_info *info, int blank); +void fbcon_update_vcs(struct fb_info *info, bool all); +void fbcon_remap_all(struct fb_info *info); +int fbcon_set_con2fb_map_ioctl(void __user *argp); +int fbcon_get_con2fb_map_ioctl(void __user *argp); #else static inline void fb_console_init(void) {} static inline void fb_console_exit(void) {} +static inline int fbcon_fb_registered(struct fb_info *info) { return 0; } +static inline void fbcon_fb_unregistered(struct fb_info *info) {} +static inline void fbcon_fb_unbind(struct fb_info *info) {} +static inline void fbcon_suspended(struct fb_info *info) {} +static inline void fbcon_resumed(struct fb_info *info) {} +static inline int fbcon_mode_deleted(struct fb_info *info, + struct fb_videomode *mode) { return 0; } +static inline void fbcon_new_modelist(struct fb_info *info) {} +static inline void fbcon_get_requirement(struct fb_info *info, + struct fb_blit_caps *caps) {} +static inline void fbcon_fb_blanked(struct fb_info *info, int blank) {} +static inline void fbcon_update_vcs(struct fb_info *info, bool all) {} +static inline void fbcon_remap_all(struct fb_info *info) {} +static inline int fbcon_set_con2fb_map_ioctl(void __user *argp) { return 0; } +static inline int fbcon_get_con2fb_map_ioctl(void __user *argp) { return 0; } #endif #endif /* _LINUX_FBCON_H */ diff --git a/include/linux/filter.h b/include/linux/filter.h index 7148bab96943..0367a75f873b 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@ -24,6 +24,7 @@ #include <net/sch_generic.h> +#include <asm/byteorder.h> #include <uapi/linux/filter.h> #include <uapi/linux/bpf.h> @@ -160,6 +161,20 @@ struct ctl_table_header; .off = 0, \ .imm = IMM }) +/* Special form of mov32, used for doing explicit zero extension on dst. */ +#define BPF_ZEXT_REG(DST) \ + ((struct bpf_insn) { \ + .code = BPF_ALU | BPF_MOV | BPF_X, \ + .dst_reg = DST, \ + .src_reg = DST, \ + .off = 0, \ + .imm = 1 }) + +static inline bool insn_is_zext(const struct bpf_insn *insn) +{ + return insn->code == (BPF_ALU | BPF_MOV | BPF_X) && insn->imm == 1; +} + /* BPF_LD_IMM64 macro encodes single 'load 64-bit immediate' insn */ #define BPF_LD_IMM64(DST, IMM) \ BPF_LD_IMM64_RAW(DST, 0, IMM) @@ -512,7 +527,8 @@ struct bpf_prog { blinded:1, /* Was blinded */ is_func:1, /* program is a bpf function */ kprobe_override:1, /* Do we override a kprobe? */ - has_callchain_buf:1; /* callchain buffer allocated? */ + has_callchain_buf:1, /* callchain buffer allocated? */ + enforce_expected_attach_type:1; /* Enforce expected_attach_type checking at attach time */ enum bpf_prog_type type; /* Type of BPF program */ enum bpf_attach_type expected_attach_type; /* For some prog types */ u32 len; /* Number of filter blocks */ @@ -563,8 +579,9 @@ struct bpf_skb_data_end { }; struct bpf_redirect_info { - u32 ifindex; u32 flags; + u32 tgt_index; + void *tgt_value; struct bpf_map *map; struct bpf_map *map_to_flush; u32 kern_flags; @@ -731,6 +748,24 @@ bpf_ctx_narrow_access_ok(u32 off, u32 size, u32 size_default) return size <= size_default && (size & (size - 1)) == 0; } +static inline u8 +bpf_ctx_narrow_access_offset(u32 off, u32 size, u32 size_default) +{ + u8 access_off = off & (size_default - 1); + +#ifdef __LITTLE_ENDIAN + return access_off; +#else + return size_default - (access_off + size); +#endif +} + +#define bpf_ctx_wide_access_ok(off, size, type, field) \ + (size == sizeof(__u64) && \ + off >= offsetof(type, field) && \ + off + sizeof(__u64) <= offsetofend(type, field) && \ + off % sizeof(__u64) == 0) + #define bpf_classic_proglen(fprog) (fprog->len * sizeof(fprog->filter[0])) static inline void bpf_prog_lock_ro(struct bpf_prog *fp) @@ -811,6 +846,7 @@ u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5); struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog); void bpf_jit_compile(struct bpf_prog *prog); +bool bpf_jit_needs_zext(void); bool bpf_helper_changes_pkt_data(void *func); static inline bool bpf_dump_raw_ok(void) @@ -1063,7 +1099,6 @@ static inline void bpf_get_prog_name(const struct bpf_prog *prog, char *sym) #endif /* CONFIG_BPF_JIT */ -void bpf_prog_kallsyms_del_subprogs(struct bpf_prog *fp); void bpf_prog_kallsyms_del_all(struct bpf_prog *fp); #define BPF_ANC BIT(15) @@ -1183,4 +1218,14 @@ struct bpf_sysctl_kern { u64 tmp_reg; }; +struct bpf_sockopt_kern { + struct sock *sk; + u8 *optval; + u8 *optval_end; + s32 level; + s32 optname; + s32 optlen; + s32 retval; +}; + #endif /* __LINUX_FILTER_H__ */ diff --git a/include/linux/fips.h b/include/linux/fips.h index afeeece92302..c6961e932fef 100644 --- a/include/linux/fips.h +++ b/include/linux/fips.h @@ -4,8 +4,15 @@ #ifdef CONFIG_CRYPTO_FIPS extern int fips_enabled; +extern struct atomic_notifier_head fips_fail_notif_chain; + +void fips_fail_notify(void); + #else #define fips_enabled 0 + +static inline void fips_fail_notify(void) {} + #endif #endif diff --git a/include/linux/firmware/imx/dsp.h b/include/linux/firmware/imx/dsp.h new file mode 100644 index 000000000000..7562099c9e46 --- /dev/null +++ b/include/linux/firmware/imx/dsp.h @@ -0,0 +1,67 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * Copyright 2019 NXP + * + * Header file for the DSP IPC implementation + */ + +#ifndef _IMX_DSP_IPC_H +#define _IMX_DSP_IPC_H + +#include <linux/device.h> +#include <linux/types.h> +#include <linux/mailbox_client.h> + +#define DSP_MU_CHAN_NUM 4 + +struct imx_dsp_chan { + struct imx_dsp_ipc *ipc; + struct mbox_client cl; + struct mbox_chan *ch; + char *name; + int idx; +}; + +struct imx_dsp_ops { + void (*handle_reply)(struct imx_dsp_ipc *ipc); + void (*handle_request)(struct imx_dsp_ipc *ipc); +}; + +struct imx_dsp_ipc { + /* Host <-> DSP communication uses 2 txdb and 2 rxdb channels */ + struct imx_dsp_chan chans[DSP_MU_CHAN_NUM]; + struct device *dev; + struct imx_dsp_ops *ops; + void *private_data; +}; + +static inline void imx_dsp_set_data(struct imx_dsp_ipc *ipc, void *data) +{ + if (!ipc) + return; + + ipc->private_data = data; +} + +static inline void *imx_dsp_get_data(struct imx_dsp_ipc *ipc) +{ + if (!ipc) + return NULL; + + return ipc->private_data; +} + +#if IS_ENABLED(CONFIG_IMX_DSP) + +int imx_dsp_ring_doorbell(struct imx_dsp_ipc *dsp, unsigned int chan_idx); + +#else + +static inline int imx_dsp_ring_doorbell(struct imx_dsp_ipc *ipc, + unsigned int chan_idx) +{ + return -ENOTSUPP; +} + +#endif +#endif /* _IMX_DSP_IPC_H */ diff --git a/include/linux/firmware/intel/stratix10-smc.h b/include/linux/firmware/intel/stratix10-smc.h index 01684d935580..013ae4819deb 100644 --- a/include/linux/firmware/intel/stratix10-smc.h +++ b/include/linux/firmware/intel/stratix10-smc.h @@ -210,7 +210,7 @@ INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_FPGA_CONFIG_COMPLETED_WRITE) #define INTEL_SIP_SMC_FPGA_CONFIG_LOOPBACK \ INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_FPGA_CONFIG_LOOPBACK) -/* +/** * Request INTEL_SIP_SMC_REG_READ * * Read a protected register at EL3 @@ -229,7 +229,7 @@ INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_FPGA_CONFIG_COMPLETED_WRITE) #define INTEL_SIP_SMC_REG_READ \ INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_REG_READ) -/* +/** * Request INTEL_SIP_SMC_REG_WRITE * * Write a protected register at EL3 @@ -248,7 +248,7 @@ INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_FPGA_CONFIG_COMPLETED_WRITE) #define INTEL_SIP_SMC_REG_WRITE \ INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_REG_WRITE) -/* +/** * Request INTEL_SIP_SMC_FUNCID_REG_UPDATE * * Update one or more bits in a protected register at EL3 using a @@ -269,7 +269,7 @@ INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_FPGA_CONFIG_COMPLETED_WRITE) #define INTEL_SIP_SMC_REG_UPDATE \ INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_REG_UPDATE) -/* +/** * Request INTEL_SIP_SMC_RSU_STATUS * * Request remote status update boot log, call is synchronous. @@ -292,7 +292,7 @@ INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_FPGA_CONFIG_COMPLETED_WRITE) #define INTEL_SIP_SMC_RSU_STATUS \ INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_RSU_STATUS) -/* +/** * Request INTEL_SIP_SMC_RSU_UPDATE * * Request to set the offset of the bitstream to boot after reboot, call @@ -310,7 +310,7 @@ INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_FPGA_CONFIG_COMPLETED_WRITE) #define INTEL_SIP_SMC_RSU_UPDATE \ INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_RSU_UPDATE) -/* +/** * Request INTEL_SIP_SMC_ECC_DBE * * Sync call used by service driver at EL1 to alert EL3 that a Double @@ -329,3 +329,42 @@ INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_FPGA_CONFIG_COMPLETED_WRITE) INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_ECC_DBE) #endif + +/** + * Request INTEL_SIP_SMC_RSU_NOTIFY + * + * Sync call used by service driver at EL1 to report hard processor + * system execution stage to firmware + * + * Call register usage: + * a0 INTEL_SIP_SMC_RSU_NOTIFY + * a1 32bit value representing hard processor system execution stage + * a2-7 not used + * + * Return status + * a0 INTEL_SIP_SMC_STATUS_OK + */ +#define INTEL_SIP_SMC_FUNCID_RSU_NOTIFY 14 +#define INTEL_SIP_SMC_RSU_NOTIFY \ + INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_RSU_NOTIFY) + +/** + * Request INTEL_SIP_SMC_RSU_RETRY_COUNTER + * + * Sync call used by service driver at EL1 to query RSU retry counter + * + * Call register usage: + * a0 INTEL_SIP_SMC_RSU_RETRY_COUNTER + * a1-7 not used + * + * Return status + * a0 INTEL_SIP_SMC_STATUS_OK + * a1 the retry counter + * + * Or + * + * a0 INTEL_SIP_SMC_RSU_ERROR + */ +#define INTEL_SIP_SMC_FUNCID_RSU_RETRY_COUNTER 15 +#define INTEL_SIP_SMC_RSU_RETRY_COUNTER \ + INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_RSU_RETRY_COUNTER) diff --git a/include/linux/firmware/intel/stratix10-svc-client.h b/include/linux/firmware/intel/stratix10-svc-client.h index e521f172a47a..b6c4302a39e0 100644 --- a/include/linux/firmware/intel/stratix10-svc-client.h +++ b/include/linux/firmware/intel/stratix10-svc-client.h @@ -95,6 +95,13 @@ struct stratix10_svc_chan; * * @COMMAND_RSU_UPDATE: set the offset of the bitstream to boot after reboot, * return status is SVC_STATUS_RSU_OK or SVC_STATUS_RSU_ERROR + * + * @COMMAND_RSU_NOTIFY: report the status of hard processor system + * software to firmware, return status is SVC_STATUS_RSU_OK or + * SVC_STATUS_RSU_ERROR + * + * @COMMAND_RSU_RETRY: query firmware for the current image's retry counter, + * return status is SVC_STATUS_RSU_OK or SVC_STATUS_RSU_ERROR */ enum stratix10_svc_command_code { COMMAND_NOOP = 0, @@ -103,7 +110,9 @@ enum stratix10_svc_command_code { COMMAND_RECONFIG_DATA_CLAIM, COMMAND_RECONFIG_STATUS, COMMAND_RSU_STATUS, - COMMAND_RSU_UPDATE + COMMAND_RSU_UPDATE, + COMMAND_RSU_NOTIFY, + COMMAND_RSU_RETRY, }; /** diff --git a/include/linux/firmware/xlnx-zynqmp.h b/include/linux/firmware/xlnx-zynqmp.h index 1262ea6a1f4b..778abbbc7d94 100644 --- a/include/linux/firmware/xlnx-zynqmp.h +++ b/include/linux/firmware/xlnx-zynqmp.h @@ -46,7 +46,6 @@ #define ZYNQMP_PM_CAPABILITY_ACCESS 0x1U #define ZYNQMP_PM_CAPABILITY_CONTEXT 0x2U #define ZYNQMP_PM_CAPABILITY_WAKEUP 0x4U -#define ZYNQMP_PM_CAPABILITY_POWER 0x8U /* * Firmware FPGA Manager flags diff --git a/include/linux/flat.h b/include/linux/flat.h index 569b67d64d5c..83977c0ce3de 100644 --- a/include/linux/flat.h +++ b/include/linux/flat.h @@ -10,8 +10,41 @@ #ifndef _LINUX_FLAT_H #define _LINUX_FLAT_H -#include <uapi/linux/flat.h> -#include <asm/flat.h> +#define FLAT_VERSION 0x00000004L + +/* + * To make everything easier to port and manage cross platform + * development, all fields are in network byte order. + */ + +struct flat_hdr { + char magic[4]; + __be32 rev; /* version (as above) */ + __be32 entry; /* Offset of first executable instruction + with text segment from beginning of file */ + __be32 data_start; /* Offset of data segment from beginning of + file */ + __be32 data_end; /* Offset of end of data segment from beginning + of file */ + __be32 bss_end; /* Offset of end of bss segment from beginning + of file */ + + /* (It is assumed that data_end through bss_end forms the bss segment.) */ + + __be32 stack_size; /* Size of stack, in bytes */ + __be32 reloc_start; /* Offset of relocation records from beginning of + file */ + __be32 reloc_count; /* Number of relocation records */ + __be32 flags; + __be32 build_date; /* When the program/library was built */ + __u32 filler[5]; /* Reservered, set to zero */ +}; + +#define FLAT_FLAG_RAM 0x0001 /* load program entirely into RAM */ +#define FLAT_FLAG_GOTPIC 0x0002 /* program is PIC with GOT */ +#define FLAT_FLAG_GZIP 0x0004 /* all but the header is compressed */ +#define FLAT_FLAG_GZDATA 0x0008 /* only data/relocs are compressed (for XIP) */ +#define FLAT_FLAG_KTRACE 0x0010 /* output useful kernel trace for debugging */ /* * While it would be nice to keep this header clean, users of older @@ -22,28 +55,21 @@ * with the format above, except to fix bugs with old format support. */ -#include <asm/byteorder.h> - #define OLD_FLAT_VERSION 0x00000002L #define OLD_FLAT_RELOC_TYPE_TEXT 0 #define OLD_FLAT_RELOC_TYPE_DATA 1 #define OLD_FLAT_RELOC_TYPE_BSS 2 typedef union { - unsigned long value; + u32 value; struct { -# if defined(mc68000) && !defined(CONFIG_COLDFIRE) - signed long offset : 30; - unsigned long type : 2; -# define OLD_FLAT_FLAG_RAM 0x1 /* load program entirely into RAM */ +#if defined(__LITTLE_ENDIAN_BITFIELD) || \ + (defined(mc68000) && !defined(CONFIG_COLDFIRE)) + s32 offset : 30; + u32 type : 2; # elif defined(__BIG_ENDIAN_BITFIELD) - unsigned long type : 2; - signed long offset : 30; -# define OLD_FLAT_FLAG_RAM 0x1 /* load program entirely into RAM */ -# elif defined(__LITTLE_ENDIAN_BITFIELD) - signed long offset : 30; - unsigned long type : 2; -# define OLD_FLAT_FLAG_RAM 0x1 /* load program entirely into RAM */ + u32 type : 2; + s32 offset : 30; # else # error "Unknown bitfield order for flat files." # endif diff --git a/include/linux/fmc-sdb.h b/include/linux/fmc-sdb.h deleted file mode 100644 index bec899f0867c..000000000000 --- a/include/linux/fmc-sdb.h +++ /dev/null @@ -1,39 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * This file is separate from sdb.h, because I want that one to remain - * unchanged (as far as possible) from the official sdb distribution - * - * This file and associated functionality are a playground for me to - * understand stuff which will later be implemented in more generic places. - */ -#include <linux/sdb.h> - -/* This is the union of all currently defined types */ -union sdb_record { - struct sdb_interconnect ic; - struct sdb_device dev; - struct sdb_bridge bridge; - struct sdb_integration integr; - struct sdb_empty empty; - struct sdb_synthesis synthesis; - struct sdb_repo_url repo_url; -}; - -struct fmc_device; - -/* Every sdb table is turned into this structure */ -struct sdb_array { - int len; - int level; - unsigned long baseaddr; - struct fmc_device *fmc; /* the device that hosts it */ - struct sdb_array *parent; /* NULL at root */ - union sdb_record *record; /* copies of the struct */ - struct sdb_array **subtree; /* only valid for bridge items */ -}; - -extern int fmc_scan_sdb_tree(struct fmc_device *fmc, unsigned long address); -extern void fmc_show_sdb_tree(const struct fmc_device *fmc); -extern signed long fmc_find_sdb_device(struct sdb_array *tree, uint64_t vendor, - uint32_t device, unsigned long *sz); -extern int fmc_free_sdb_tree(struct fmc_device *fmc); diff --git a/include/linux/fmc.h b/include/linux/fmc.h deleted file mode 100644 index b355f3806f3f..000000000000 --- a/include/linux/fmc.h +++ /dev/null @@ -1,269 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ -/* - * Copyright (C) 2012 CERN (www.cern.ch) - * Author: Alessandro Rubini <rubini@gnudd.com> - * - * This work is part of the White Rabbit project, a research effort led - * by CERN, the European Institute for Nuclear Research. - */ -#ifndef __LINUX_FMC_H__ -#define __LINUX_FMC_H__ -#include <linux/types.h> -#include <linux/moduleparam.h> -#include <linux/device.h> -#include <linux/list.h> -#include <linux/interrupt.h> -#include <linux/io.h> - -struct fmc_device; -struct fmc_driver; - -/* - * This bus abstraction is developed separately from drivers, so we need - * to check the version of the data structures we receive. - */ - -#define FMC_MAJOR 3 -#define FMC_MINOR 0 -#define FMC_VERSION ((FMC_MAJOR << 16) | FMC_MINOR) -#define __FMC_MAJOR(x) ((x) >> 16) -#define __FMC_MINOR(x) ((x) & 0xffff) - -/* - * The device identification, as defined by the IPMI FRU (Field Replaceable - * Unit) includes four different strings to describe the device. Here we - * only match the "Board Manufacturer" and the "Board Product Name", - * ignoring the "Board Serial Number" and "Board Part Number". All 4 are - * expected to be strings, so they are treated as zero-terminated C strings. - * Unspecified string (NULL) means "any", so if both are unspecified this - * is a catch-all driver. So null entries are allowed and we use array - * and length. This is unlike pci and usb that use null-terminated arrays - */ -struct fmc_fru_id { - char *manufacturer; - char *product_name; -}; - -/* - * If the FPGA is already programmed (think Etherbone or the second - * SVEC slot), we can match on SDB devices in the memory image. This - * match uses an array of devices that must all be present, and the - * match is based on vendor and device only. Further checks are expected - * to happen in the probe function. Zero means "any" and catch-all is allowed. - */ -struct fmc_sdb_one_id { - uint64_t vendor; - uint32_t device; -}; -struct fmc_sdb_id { - struct fmc_sdb_one_id *cores; - int cores_nr; -}; - -struct fmc_device_id { - struct fmc_fru_id *fru_id; - int fru_id_nr; - struct fmc_sdb_id *sdb_id; - int sdb_id_nr; -}; - -/* This sizes the module_param_array used by generic module parameters */ -#define FMC_MAX_CARDS 32 - -/* The driver is a pretty simple thing */ -struct fmc_driver { - unsigned long version; - struct device_driver driver; - int (*probe)(struct fmc_device *); - int (*remove)(struct fmc_device *); - const struct fmc_device_id id_table; - /* What follows is for generic module parameters */ - int busid_n; - int busid_val[FMC_MAX_CARDS]; - int gw_n; - char *gw_val[FMC_MAX_CARDS]; -}; -#define to_fmc_driver(x) container_of((x), struct fmc_driver, driver) - -/* These are the generic parameters, that drivers may instantiate */ -#define FMC_PARAM_BUSID(_d) \ - module_param_array_named(busid, _d.busid_val, int, &_d.busid_n, 0444) -#define FMC_PARAM_GATEWARE(_d) \ - module_param_array_named(gateware, _d.gw_val, charp, &_d.gw_n, 0444) - -/* - * Drivers may need to configure gpio pins in the carrier. To read input - * (a very uncommon operation, and definitely not in the hot paths), just - * configure one gpio only and get 0 or 1 as retval of the config method - */ -struct fmc_gpio { - char *carrier_name; /* name or NULL for virtual pins */ - int gpio; - int _gpio; /* internal use by the carrier */ - int mode; /* GPIOF_DIR_OUT etc, from <linux/gpio.h> */ - int irqmode; /* IRQF_TRIGGER_LOW and so on */ -}; - -/* The numbering of gpio pins allows access to raw pins or virtual roles */ -#define FMC_GPIO_RAW(x) (x) /* 4096 of them */ -#define __FMC_GPIO_IS_RAW(x) ((x) < 0x1000) -#define FMC_GPIO_IRQ(x) ((x) + 0x1000) /* 256 of them */ -#define FMC_GPIO_LED(x) ((x) + 0x1100) /* 256 of them */ -#define FMC_GPIO_KEY(x) ((x) + 0x1200) /* 256 of them */ -#define FMC_GPIO_TP(x) ((x) + 0x1300) /* 256 of them */ -#define FMC_GPIO_USER(x) ((x) + 0x1400) /* 256 of them */ -/* We may add SCL and SDA, or other roles if the need arises */ - -/* GPIOF_DIR_IN etc are missing before 3.0. copy from <linux/gpio.h> */ -#ifndef GPIOF_DIR_IN -# define GPIOF_DIR_OUT (0 << 0) -# define GPIOF_DIR_IN (1 << 0) -# define GPIOF_INIT_LOW (0 << 1) -# define GPIOF_INIT_HIGH (1 << 1) -#endif - -/* - * The operations are offered by each carrier and should make driver - * design completely independent of the carrier. Named GPIO pins may be - * the exception. - */ -struct fmc_operations { - uint32_t (*read32)(struct fmc_device *fmc, int offset); - void (*write32)(struct fmc_device *fmc, uint32_t value, int offset); - int (*validate)(struct fmc_device *fmc, struct fmc_driver *drv); - int (*reprogram_raw)(struct fmc_device *f, struct fmc_driver *d, - void *gw, unsigned long len); - int (*reprogram)(struct fmc_device *f, struct fmc_driver *d, char *gw); - int (*irq_request)(struct fmc_device *fmc, irq_handler_t h, - char *name, int flags); - void (*irq_ack)(struct fmc_device *fmc); - int (*irq_free)(struct fmc_device *fmc); - int (*gpio_config)(struct fmc_device *fmc, struct fmc_gpio *gpio, - int ngpio); - int (*read_ee)(struct fmc_device *fmc, int pos, void *d, int l); - int (*write_ee)(struct fmc_device *fmc, int pos, const void *d, int l); -}; - -/* Prefer this helper rather than calling of fmc->reprogram directly */ -int fmc_reprogram_raw(struct fmc_device *fmc, struct fmc_driver *d, - void *gw, unsigned long len, int sdb_entry); -extern int fmc_reprogram(struct fmc_device *f, struct fmc_driver *d, char *gw, - int sdb_entry); - -/* - * The device reports all information needed to access hw. - * - * If we have eeprom_len and not contents, the core reads it. - * Then, parsing of identifiers is done by the core which fills fmc_fru_id.. - * Similarly a device that must be matched based on SDB cores must - * fill the entry point and the core will scan the bus (FIXME: sdb match) - */ -struct fmc_device { - unsigned long version; - unsigned long flags; - struct module *owner; /* char device must pin it */ - struct fmc_fru_id id; /* for EEPROM-based match */ - struct fmc_operations *op; /* carrier-provided */ - int irq; /* according to host bus. 0 == none */ - int eeprom_len; /* Usually 8kB, may be less */ - int eeprom_addr; /* 0x50, 0x52 etc */ - uint8_t *eeprom; /* Full contents or leading part */ - char *carrier_name; /* "SPEC" or similar, for special use */ - void *carrier_data; /* "struct spec *" or equivalent */ - __iomem void *fpga_base; /* May be NULL (Etherbone) */ - __iomem void *slot_base; /* Set by the driver */ - struct fmc_device **devarray; /* Allocated by the bus */ - int slot_id; /* Index in the slot array */ - int nr_slots; /* Number of slots in this carrier */ - unsigned long memlen; /* Used for the char device */ - struct device dev; /* For Linux use */ - struct device *hwdev; /* The underlying hardware device */ - unsigned long sdbfs_entry; - struct sdb_array *sdb; - uint32_t device_id; /* Filled by the device */ - char *mezzanine_name; /* Defaults to ``fmc'' */ - void *mezzanine_data; - - struct dentry *dbg_dir; - struct dentry *dbg_sdb_dump; -}; -#define to_fmc_device(x) container_of((x), struct fmc_device, dev) - -#define FMC_DEVICE_HAS_GOLDEN 1 -#define FMC_DEVICE_HAS_CUSTOM 2 -#define FMC_DEVICE_NO_MEZZANINE 4 -#define FMC_DEVICE_MATCH_SDB 8 /* fmc-core must scan sdb in fpga */ - -/* - * If fpga_base can be used, the carrier offers no readl/writel methods, and - * this expands to a single, fast, I/O access. - */ -static inline uint32_t fmc_readl(struct fmc_device *fmc, int offset) -{ - if (unlikely(fmc->op->read32)) - return fmc->op->read32(fmc, offset); - return readl(fmc->fpga_base + offset); -} -static inline void fmc_writel(struct fmc_device *fmc, uint32_t val, int off) -{ - if (unlikely(fmc->op->write32)) - fmc->op->write32(fmc, val, off); - else - writel(val, fmc->fpga_base + off); -} - -/* pci-like naming */ -static inline void *fmc_get_drvdata(const struct fmc_device *fmc) -{ - return dev_get_drvdata(&fmc->dev); -} - -static inline void fmc_set_drvdata(struct fmc_device *fmc, void *data) -{ - dev_set_drvdata(&fmc->dev, data); -} - -struct fmc_gateware { - void *bitstream; - unsigned long len; -}; - -/* The 5 access points */ -extern int fmc_driver_register(struct fmc_driver *drv); -extern void fmc_driver_unregister(struct fmc_driver *drv); -extern int fmc_device_register(struct fmc_device *tdev); -extern int fmc_device_register_gw(struct fmc_device *tdev, - struct fmc_gateware *gw); -extern void fmc_device_unregister(struct fmc_device *tdev); - -/* Three more for device sets, all driven by the same FPGA */ -extern int fmc_device_register_n(struct fmc_device **devs, int n); -extern int fmc_device_register_n_gw(struct fmc_device **devs, int n, - struct fmc_gateware *gw); -extern void fmc_device_unregister_n(struct fmc_device **devs, int n); - -/* Internal cross-calls between files; not exported to other modules */ -extern int fmc_match(struct device *dev, struct device_driver *drv); -extern int fmc_fill_id_info(struct fmc_device *fmc); -extern void fmc_free_id_info(struct fmc_device *fmc); -extern void fmc_dump_eeprom(const struct fmc_device *fmc); - -/* helpers for FMC operations */ -extern int fmc_irq_request(struct fmc_device *fmc, irq_handler_t h, - char *name, int flags); -extern void fmc_irq_free(struct fmc_device *fmc); -extern void fmc_irq_ack(struct fmc_device *fmc); -extern int fmc_validate(struct fmc_device *fmc, struct fmc_driver *drv); -extern int fmc_gpio_config(struct fmc_device *fmc, struct fmc_gpio *gpio, - int ngpio); -extern int fmc_read_ee(struct fmc_device *fmc, int pos, void *d, int l); -extern int fmc_write_ee(struct fmc_device *fmc, int pos, const void *d, int l); - -/* helpers for FMC operations */ -extern int fmc_irq_request(struct fmc_device *fmc, irq_handler_t h, - char *name, int flags); -extern void fmc_irq_free(struct fmc_device *fmc); -extern void fmc_irq_ack(struct fmc_device *fmc); -extern int fmc_validate(struct fmc_device *fmc, struct fmc_driver *drv); - -#endif /* __LINUX_FMC_H__ */ diff --git a/include/linux/fpga/adi-axi-common.h b/include/linux/fpga/adi-axi-common.h new file mode 100644 index 000000000000..7fc95d5c95bb --- /dev/null +++ b/include/linux/fpga/adi-axi-common.h @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Analog Devices AXI common registers & definitions + * + * Copyright 2019 Analog Devices Inc. + * + * https://wiki.analog.com/resources/fpga/docs/axi_ip + * https://wiki.analog.com/resources/fpga/docs/hdl/regmap + */ + +#ifndef ADI_AXI_COMMON_H_ +#define ADI_AXI_COMMON_H_ + +#define ADI_AXI_REG_VERSION 0x0000 + +#define ADI_AXI_PCORE_VER(major, minor, patch) \ + (((major) << 16) | ((minor) << 8) | (patch)) + +#endif /* ADI_AXI_COMMON_H_ */ diff --git a/include/linux/fpga/altera-pr-ip-core.h b/include/linux/fpga/altera-pr-ip-core.h index 7d4664730d60..0b08ac20ab16 100644 --- a/include/linux/fpga/altera-pr-ip-core.h +++ b/include/linux/fpga/altera-pr-ip-core.h @@ -13,6 +13,6 @@ #include <linux/io.h> int alt_pr_register(struct device *dev, void __iomem *reg_base); -int alt_pr_unregister(struct device *dev); +void alt_pr_unregister(struct device *dev); #endif /* _ALT_PR_IP_CORE_H */ diff --git a/include/linux/fs.h b/include/linux/fs.h index f7fdfe93e25d..e0d909d35763 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -64,6 +64,8 @@ struct workqueue_struct; struct iov_iter; struct fscrypt_info; struct fscrypt_operations; +struct fsverity_info; +struct fsverity_operations; struct fs_context; struct fs_parameter_description; @@ -427,6 +429,7 @@ int pagecache_write_end(struct file *, struct address_space *mapping, * @i_pages: Cached pages. * @gfp_mask: Memory allocation flags to use for allocating pages. * @i_mmap_writable: Number of VM_SHARED mappings. + * @nr_thps: Number of THPs in the pagecache (non-shmem only). * @i_mmap: Tree of private and shared mappings. * @i_mmap_rwsem: Protects @i_mmap and @i_mmap_writable. * @nrpages: Number of page entries, protected by the i_pages lock. @@ -444,6 +447,10 @@ struct address_space { struct xarray i_pages; gfp_t gfp_mask; atomic_t i_mmap_writable; +#ifdef CONFIG_READ_ONLY_THP_FOR_FS + /* number of thp, only for non-shmem files */ + atomic_t nr_thps; +#endif struct rb_root_cached i_mmap; struct rw_semaphore i_mmap_rwsem; unsigned long nrpages; @@ -694,7 +701,7 @@ struct inode { atomic_t i_count; atomic_t i_dio_count; atomic_t i_writecount; -#ifdef CONFIG_IMA +#if defined(CONFIG_IMA) || defined(CONFIG_FILE_LOCKING) atomic_t i_readcount; /* struct files open RO */ #endif union { @@ -723,9 +730,15 @@ struct inode { struct fscrypt_info *i_crypt_info; #endif +#ifdef CONFIG_FS_VERITY + struct fsverity_info *i_verity_info; +#endif + void *i_private; /* fs or device private pointer */ } __randomize_layout; +struct timespec64 timestamp_truncate(struct timespec64 t, struct inode *inode); + static inline unsigned int i_blocksize(const struct inode *node) { return (1 << node->i_blkbits); @@ -1019,8 +1032,6 @@ struct file_lock_operations { }; struct lock_manager_operations { - int (*lm_compare_owner)(struct file_lock *, struct file_lock *); - unsigned long (*lm_owner_key)(struct file_lock *); fl_owner_t (*lm_get_owner)(fl_owner_t); void (*lm_put_owner)(fl_owner_t); void (*lm_notify)(struct file_lock *); /* unblock callback */ @@ -1157,6 +1168,11 @@ extern void lease_get_mtime(struct inode *, struct timespec64 *time); extern int generic_setlease(struct file *, long, struct file_lock **, void **priv); extern int vfs_setlease(struct file *, long, struct file_lock **, void **); extern int lease_modify(struct file_lock *, int, struct list_head *); + +struct notifier_block; +extern int lease_register_notifier(struct notifier_block *); +extern void lease_unregister_notifier(struct notifier_block *); + struct files_struct; extern void show_fd_locks(struct seq_file *f, struct file *filp, struct files_struct *files); @@ -1429,6 +1445,10 @@ struct super_block { const struct xattr_handler **s_xattr; #ifdef CONFIG_FS_ENCRYPTION const struct fscrypt_operations *s_cop; + struct key *s_master_keys; /* master crypto keys in use */ +#endif +#ifdef CONFIG_FS_VERITY + const struct fsverity_operations *s_vop; #endif struct hlist_bl_head s_roots; /* alternate root dentries for NFS */ struct list_head s_mounts; /* list of mounts; _not_ for fs use */ @@ -1450,6 +1470,9 @@ struct super_block { /* Granularity of c/m/atime in ns (cannot be worse than a second) */ u32 s_time_gran; + /* Time limits for c/m/atime in seconds */ + time64_t s_time_min; + time64_t s_time_max; #ifdef CONFIG_FSNOTIFY __u32 s_fsnotify_mask; struct fsnotify_mark_connector __rcu *s_fsnotify_marks; @@ -1769,7 +1792,7 @@ struct block_device_operations; /* * These flags control the behavior of the remap_file_range function pointer. * If it is called with len == 0 that means "remap to end of source file". - * See Documentation/filesystems/vfs.txt for more details about this call. + * See Documentation/filesystems/vfs.rst for more details about this call. * * REMAP_FILE_DEDUP: only remap if contents identical (i.e. deduplicate) * REMAP_FILE_CAN_SHORTEN: caller can handle a shortened request @@ -1889,6 +1912,9 @@ extern ssize_t vfs_readv(struct file *, const struct iovec __user *, unsigned long, loff_t *, rwf_t); extern ssize_t vfs_copy_file_range(struct file *, loff_t , struct file *, loff_t, size_t, unsigned int); +extern ssize_t generic_copy_file_range(struct file *file_in, loff_t pos_in, + struct file *file_out, loff_t pos_out, + size_t len, unsigned int flags); extern int generic_remap_file_range_prep(struct file *file_in, loff_t pos_in, struct file *file_out, loff_t pos_out, loff_t *count, @@ -1964,6 +1990,7 @@ struct super_operations { #endif #define S_ENCRYPTED 16384 /* Encrypted file (using fs/crypto/) */ #define S_CASEFOLD 32768 /* Casefolded file */ +#define S_VERITY 65536 /* Verity file (using fs/verity/) */ /* * Note that nosuid etc flags are inode-specific: setting some file-system @@ -2005,6 +2032,7 @@ static inline bool sb_rdonly(const struct super_block *sb) { return sb->s_flags #define IS_DAX(inode) ((inode)->i_flags & S_DAX) #define IS_ENCRYPTED(inode) ((inode)->i_flags & S_ENCRYPTED) #define IS_CASEFOLDED(inode) ((inode)->i_flags & S_CASEFOLD) +#define IS_VERITY(inode) ((inode)->i_flags & S_VERITY) #define IS_WHITEOUT(inode) (S_ISCHR(inode->i_mode) && \ (inode)->i_rdev == WHITEOUT_DEV) @@ -2174,6 +2202,8 @@ static inline void file_accessed(struct file *file) touch_atime(&file->f_path); } +extern int file_modified(struct file *file); + int sync_inode(struct inode *inode, struct writeback_control *wbc); int sync_inode_metadata(struct inode *inode, int wait); @@ -2184,6 +2214,7 @@ struct file_system_type { #define FS_BINARY_MOUNTDATA 2 #define FS_HAS_SUBTYPE 4 #define FS_USERNS_MOUNT 8 /* Can be mounted by userns root */ +#define FS_DISALLOW_NOTIFY_PERM 16 /* Disable fanotify permission events */ #define FS_RENAME_DOES_D_MOVE 32768 /* FS will handle d_move() during rename() internally. */ int (*init_fs_context)(struct fs_context *); const struct fs_parameter_description *parameters; @@ -2206,9 +2237,6 @@ struct file_system_type { #define MODULE_ALIAS_FS(NAME) MODULE_ALIAS("fs-" NAME) -extern struct dentry *mount_ns(struct file_system_type *fs_type, - int flags, void *data, void *ns, struct user_namespace *user_ns, - int (*fill_super)(struct super_block *, void *, int)); #ifdef CONFIG_BLOCK extern struct dentry *mount_bdev(struct file_system_type *fs_type, int flags, const char *dev_name, void *data, @@ -2248,28 +2276,10 @@ void free_anon_bdev(dev_t); struct super_block *sget_fc(struct fs_context *fc, int (*test)(struct super_block *, struct fs_context *), int (*set)(struct super_block *, struct fs_context *)); -struct super_block *sget_userns(struct file_system_type *type, - int (*test)(struct super_block *,void *), - int (*set)(struct super_block *,void *), - int flags, struct user_namespace *user_ns, - void *data); struct super_block *sget(struct file_system_type *type, int (*test)(struct super_block *,void *), int (*set)(struct super_block *,void *), int flags, void *data); -extern struct dentry *mount_pseudo_xattr(struct file_system_type *, char *, - const struct super_operations *ops, - const struct xattr_handler **xattr, - const struct dentry_operations *dops, - unsigned long); - -static inline struct dentry * -mount_pseudo(struct file_system_type *fs_type, char *name, - const struct super_operations *ops, - const struct dentry_operations *dops, unsigned long magic) -{ - return mount_pseudo_xattr(fs_type, name, ops, NULL, dops, magic); -} /* Alas, no aliases. Too much hassle with bringing module.h everywhere */ #define fops_get(fops) \ @@ -2615,6 +2625,12 @@ extern struct block_device *blkdev_get_by_path(const char *path, fmode_t mode, void *holder); extern struct block_device *blkdev_get_by_dev(dev_t dev, fmode_t mode, void *holder); +extern struct block_device *bd_start_claiming(struct block_device *bdev, + void *holder); +extern void bd_finish_claiming(struct block_device *bdev, + struct block_device *whole, void *holder); +extern void bd_abort_claiming(struct block_device *bdev, + struct block_device *whole, void *holder); extern void blkdev_put(struct block_device *bdev, fmode_t mode); extern int __blkdev_reread_part(struct block_device *bdev); extern int blkdev_reread_part(struct block_device *bdev); @@ -2712,6 +2728,8 @@ extern int filemap_flush(struct address_space *); extern int filemap_fdatawait_keep_errors(struct address_space *mapping); extern int filemap_fdatawait_range(struct address_space *, loff_t lstart, loff_t lend); +extern int filemap_fdatawait_range_keep_errors(struct address_space *mapping, + loff_t start_byte, loff_t end_byte); static inline int filemap_fdatawait(struct address_space *mapping) { @@ -2790,6 +2808,33 @@ static inline errseq_t filemap_sample_wb_err(struct address_space *mapping) return errseq_sample(&mapping->wb_err); } +static inline int filemap_nr_thps(struct address_space *mapping) +{ +#ifdef CONFIG_READ_ONLY_THP_FOR_FS + return atomic_read(&mapping->nr_thps); +#else + return 0; +#endif +} + +static inline void filemap_nr_thps_inc(struct address_space *mapping) +{ +#ifdef CONFIG_READ_ONLY_THP_FOR_FS + atomic_inc(&mapping->nr_thps); +#else + WARN_ON_ONCE(1); +#endif +} + +static inline void filemap_nr_thps_dec(struct address_space *mapping) +{ +#ifdef CONFIG_READ_ONLY_THP_FOR_FS + atomic_dec(&mapping->nr_thps); +#else + WARN_ON_ONCE(1); +#endif +} + extern int vfs_fsync_range(struct file *file, loff_t start, loff_t end, int datasync); extern int vfs_fsync(struct file *file, int datasync); @@ -2890,7 +2935,7 @@ static inline bool inode_is_open_for_write(const struct inode *inode) return atomic_read(&inode->i_writecount) > 0; } -#ifdef CONFIG_IMA +#if defined(CONFIG_IMA) || defined(CONFIG_FILE_LOCKING) static inline void i_readcount_dec(struct inode *inode) { BUG_ON(!atomic_read(&inode->i_readcount)); @@ -3046,6 +3091,10 @@ extern ssize_t generic_write_checks(struct kiocb *, struct iov_iter *); extern int generic_remap_checks(struct file *file_in, loff_t pos_in, struct file *file_out, loff_t pos_out, loff_t *count, unsigned int remap_flags); +extern int generic_file_rw_checks(struct file *file_in, struct file *file_out); +extern int generic_copy_file_checks(struct file *file_in, loff_t pos_in, + struct file *file_out, loff_t pos_out, + size_t *count, unsigned int flags); extern ssize_t generic_file_read_iter(struct kiocb *, struct iov_iter *); extern ssize_t __generic_file_write_iter(struct kiocb *, struct iov_iter *); extern ssize_t generic_file_write_iter(struct kiocb *, struct iov_iter *); @@ -3536,6 +3585,8 @@ extern void inode_nohighmem(struct inode *inode); /* mm/fadvise.c */ extern int vfs_fadvise(struct file *file, loff_t offset, loff_t len, int advice); +extern int generic_fadvise(struct file *file, loff_t offset, loff_t len, + int advice); #if defined(CONFIG_IO_URING) extern struct sock *io_uring_get_socket(struct file *file); @@ -3546,4 +3597,27 @@ static inline struct sock *io_uring_get_socket(struct file *file) } #endif +int vfs_ioc_setflags_prepare(struct inode *inode, unsigned int oldflags, + unsigned int flags); + +int vfs_ioc_fssetxattr_check(struct inode *inode, const struct fsxattr *old_fa, + struct fsxattr *fa); + +static inline void simple_fill_fsxattr(struct fsxattr *fa, __u32 xflags) +{ + memset(fa, 0, sizeof(*fa)); + fa->fsx_xflags = xflags; +} + +/* + * Flush file data before changing attributes. Caller must hold any locks + * required to prevent further writes to this file until we're done setting + * flags. + */ +static inline int inode_drain_writes(struct inode *inode) +{ + inode_dio_wait(inode); + return filemap_write_and_wait(inode->i_mapping); +} + #endif /* _LINUX_FS_H */ diff --git a/include/linux/fs_context.h b/include/linux/fs_context.h index d476ff0c10df..e5c14e2c53d3 100644 --- a/include/linux/fs_context.h +++ b/include/linux/fs_context.h @@ -81,24 +81,25 @@ struct fs_parameter { * Superblock creation fills in ->root whereas reconfiguration begins with this * already set. * - * See Documentation/filesystems/mounting.txt + * See Documentation/filesystems/mount_api.txt */ struct fs_context { const struct fs_context_operations *ops; struct mutex uapi_mutex; /* Userspace access mutex */ struct file_system_type *fs_type; void *fs_private; /* The filesystem's context */ + void *sget_key; struct dentry *root; /* The root and superblock */ struct user_namespace *user_ns; /* The user namespace for this mount */ struct net *net_ns; /* The network namespace for this mount */ const struct cred *cred; /* The mounter's credentials */ struct fc_log *log; /* Logging buffer */ const char *source; /* The source name (eg. dev path) */ - const char *subtype; /* The subtype to set on the superblock */ void *security; /* Linux S&M options */ void *s_fs_info; /* Proposed s_fs_info */ unsigned int sb_flags; /* Proposed superblock flags (SB_*) */ unsigned int sb_flags_mask; /* Superblock flags that were changed */ + unsigned int s_iflags; /* OR'd with sb->s_iflags */ unsigned int lsm_flags; /* Information flags from the fs to the LSM */ enum fs_context_purpose purpose:8; enum fs_context_phase phase:8; /* The phase the context is in */ @@ -135,10 +136,11 @@ extern int vfs_get_tree(struct fs_context *fc); extern void put_fs_context(struct fs_context *fc); /* - * sget() wrapper to be called from the ->get_tree() op. + * sget() wrappers to be called from the ->get_tree() op. */ enum vfs_get_super_keying { vfs_get_single_super, /* Only one such superblock may exist */ + vfs_get_single_reconf_super, /* As above, but reconfigure if it exists */ vfs_get_keyed_super, /* Superblocks with different s_fs_info keys may exist */ vfs_get_independent_super, /* Multiple independent superblocks may exist */ }; @@ -147,6 +149,24 @@ extern int vfs_get_super(struct fs_context *fc, int (*fill_super)(struct super_block *sb, struct fs_context *fc)); +extern int get_tree_nodev(struct fs_context *fc, + int (*fill_super)(struct super_block *sb, + struct fs_context *fc)); +extern int get_tree_single(struct fs_context *fc, + int (*fill_super)(struct super_block *sb, + struct fs_context *fc)); +extern int get_tree_single_reconf(struct fs_context *fc, + int (*fill_super)(struct super_block *sb, + struct fs_context *fc)); +extern int get_tree_keyed(struct fs_context *fc, + int (*fill_super)(struct super_block *sb, + struct fs_context *fc), + void *key); + +extern int get_tree_bdev(struct fs_context *fc, + int (*fill_super)(struct super_block *sb, + struct fs_context *fc)); + extern const struct file_operations fscontext_fops; /* diff --git a/include/linux/fs_pin.h b/include/linux/fs_pin.h index 7cab74d66f85..bdd09fd2520c 100644 --- a/include/linux/fs_pin.h +++ b/include/linux/fs_pin.h @@ -20,6 +20,5 @@ static inline void init_fs_pin(struct fs_pin *p, void (*kill)(struct fs_pin *)) } void pin_remove(struct fs_pin *); -void pin_insert_group(struct fs_pin *, struct vfsmount *, struct hlist_head *); void pin_insert(struct fs_pin *, struct vfsmount *); void pin_kill(struct fs_pin *); diff --git a/include/linux/fscrypt.h b/include/linux/fscrypt.h index f7680ef1abd2..f622f7460ed8 100644 --- a/include/linux/fscrypt.h +++ b/include/linux/fscrypt.h @@ -16,6 +16,7 @@ #include <linux/fs.h> #include <linux/mm.h> #include <linux/slab.h> +#include <uapi/linux/fscrypt.h> #define FS_CRYPTO_BLOCK_SIZE 16 @@ -42,7 +43,7 @@ struct fscrypt_name { #define fname_len(p) ((p)->disk_name.len) /* Maximum value for the third parameter of fscrypt_operations.set_context(). */ -#define FSCRYPT_SET_CONTEXT_MAX_SIZE 28 +#define FSCRYPT_SET_CONTEXT_MAX_SIZE 40 #ifdef CONFIG_FS_ENCRYPTION /* @@ -63,16 +64,13 @@ struct fscrypt_operations { unsigned int max_namelen; }; +/* Decryption work */ struct fscrypt_ctx { union { struct { - struct page *bounce_page; /* Ciphertext page */ - struct page *control_page; /* Original page */ - } w; - struct { struct bio *bio; struct work_struct work; - } r; + }; struct list_head free_list; /* Free list */ }; u8 flags; /* Flags */ @@ -106,29 +104,54 @@ static inline void fscrypt_handle_d_move(struct dentry *dentry) extern void fscrypt_enqueue_decrypt_work(struct work_struct *); extern struct fscrypt_ctx *fscrypt_get_ctx(gfp_t); extern void fscrypt_release_ctx(struct fscrypt_ctx *); -extern struct page *fscrypt_encrypt_page(const struct inode *, struct page *, - unsigned int, unsigned int, - u64, gfp_t); -extern int fscrypt_decrypt_page(const struct inode *, struct page *, unsigned int, - unsigned int, u64); -static inline struct page *fscrypt_control_page(struct page *page) +extern struct page *fscrypt_encrypt_pagecache_blocks(struct page *page, + unsigned int len, + unsigned int offs, + gfp_t gfp_flags); +extern int fscrypt_encrypt_block_inplace(const struct inode *inode, + struct page *page, unsigned int len, + unsigned int offs, u64 lblk_num, + gfp_t gfp_flags); + +extern int fscrypt_decrypt_pagecache_blocks(struct page *page, unsigned int len, + unsigned int offs); +extern int fscrypt_decrypt_block_inplace(const struct inode *inode, + struct page *page, unsigned int len, + unsigned int offs, u64 lblk_num); + +static inline bool fscrypt_is_bounce_page(struct page *page) +{ + return page->mapping == NULL; +} + +static inline struct page *fscrypt_pagecache_page(struct page *bounce_page) { - return ((struct fscrypt_ctx *)page_private(page))->w.control_page; + return (struct page *)page_private(bounce_page); } -extern void fscrypt_restore_control_page(struct page *); +extern void fscrypt_free_bounce_page(struct page *bounce_page); /* policy.c */ extern int fscrypt_ioctl_set_policy(struct file *, const void __user *); extern int fscrypt_ioctl_get_policy(struct file *, void __user *); +extern int fscrypt_ioctl_get_policy_ex(struct file *, void __user *); extern int fscrypt_has_permitted_context(struct inode *, struct inode *); extern int fscrypt_inherit_context(struct inode *, struct inode *, void *, bool); -/* keyinfo.c */ +/* keyring.c */ +extern void fscrypt_sb_free(struct super_block *sb); +extern int fscrypt_ioctl_add_key(struct file *filp, void __user *arg); +extern int fscrypt_ioctl_remove_key(struct file *filp, void __user *arg); +extern int fscrypt_ioctl_remove_key_all_users(struct file *filp, + void __user *arg); +extern int fscrypt_ioctl_get_key_status(struct file *filp, void __user *arg); + +/* keysetup.c */ extern int fscrypt_get_encryption_info(struct inode *); extern void fscrypt_put_encryption_info(struct inode *); extern void fscrypt_free_inode(struct inode *); +extern int fscrypt_drop_inode(struct inode *inode); /* fname.c */ extern int fscrypt_setup_filename(struct inode *, const struct qstr *, @@ -223,7 +246,6 @@ static inline bool fscrypt_match_name(const struct fscrypt_name *fname, extern void fscrypt_decrypt_bio(struct bio *); extern void fscrypt_enqueue_decrypt_bio(struct fscrypt_ctx *ctx, struct bio *bio); -extern void fscrypt_pullback_bio_page(struct page **, bool); extern int fscrypt_zeroout_range(const struct inode *, pgoff_t, sector_t, unsigned int); @@ -283,32 +305,51 @@ static inline void fscrypt_release_ctx(struct fscrypt_ctx *ctx) return; } -static inline struct page *fscrypt_encrypt_page(const struct inode *inode, +static inline struct page *fscrypt_encrypt_pagecache_blocks(struct page *page, + unsigned int len, + unsigned int offs, + gfp_t gfp_flags) +{ + return ERR_PTR(-EOPNOTSUPP); +} + +static inline int fscrypt_encrypt_block_inplace(const struct inode *inode, struct page *page, unsigned int len, - unsigned int offs, - u64 lblk_num, gfp_t gfp_flags) + unsigned int offs, u64 lblk_num, + gfp_t gfp_flags) { - return ERR_PTR(-EOPNOTSUPP); + return -EOPNOTSUPP; +} + +static inline int fscrypt_decrypt_pagecache_blocks(struct page *page, + unsigned int len, + unsigned int offs) +{ + return -EOPNOTSUPP; } -static inline int fscrypt_decrypt_page(const struct inode *inode, - struct page *page, - unsigned int len, unsigned int offs, - u64 lblk_num) +static inline int fscrypt_decrypt_block_inplace(const struct inode *inode, + struct page *page, + unsigned int len, + unsigned int offs, u64 lblk_num) { return -EOPNOTSUPP; } -static inline struct page *fscrypt_control_page(struct page *page) +static inline bool fscrypt_is_bounce_page(struct page *page) +{ + return false; +} + +static inline struct page *fscrypt_pagecache_page(struct page *bounce_page) { WARN_ON_ONCE(1); return ERR_PTR(-EINVAL); } -static inline void fscrypt_restore_control_page(struct page *page) +static inline void fscrypt_free_bounce_page(struct page *bounce_page) { - return; } /* policy.c */ @@ -323,6 +364,12 @@ static inline int fscrypt_ioctl_get_policy(struct file *filp, void __user *arg) return -EOPNOTSUPP; } +static inline int fscrypt_ioctl_get_policy_ex(struct file *filp, + void __user *arg) +{ + return -EOPNOTSUPP; +} + static inline int fscrypt_has_permitted_context(struct inode *parent, struct inode *child) { @@ -336,7 +383,34 @@ static inline int fscrypt_inherit_context(struct inode *parent, return -EOPNOTSUPP; } -/* keyinfo.c */ +/* keyring.c */ +static inline void fscrypt_sb_free(struct super_block *sb) +{ +} + +static inline int fscrypt_ioctl_add_key(struct file *filp, void __user *arg) +{ + return -EOPNOTSUPP; +} + +static inline int fscrypt_ioctl_remove_key(struct file *filp, void __user *arg) +{ + return -EOPNOTSUPP; +} + +static inline int fscrypt_ioctl_remove_key_all_users(struct file *filp, + void __user *arg) +{ + return -EOPNOTSUPP; +} + +static inline int fscrypt_ioctl_get_key_status(struct file *filp, + void __user *arg) +{ + return -EOPNOTSUPP; +} + +/* keysetup.c */ static inline int fscrypt_get_encryption_info(struct inode *inode) { return -EOPNOTSUPP; @@ -351,6 +425,11 @@ static inline void fscrypt_free_inode(struct inode *inode) { } +static inline int fscrypt_drop_inode(struct inode *inode) +{ + return 0; +} + /* fname.c */ static inline int fscrypt_setup_filename(struct inode *dir, const struct qstr *iname, @@ -410,11 +489,6 @@ static inline void fscrypt_enqueue_decrypt_bio(struct fscrypt_ctx *ctx, { } -static inline void fscrypt_pullback_bio_page(struct page **page, bool restore) -{ - return; -} - static inline int fscrypt_zeroout_range(const struct inode *inode, pgoff_t lblk, sector_t pblk, unsigned int len) { @@ -692,4 +766,15 @@ static inline int fscrypt_encrypt_symlink(struct inode *inode, return 0; } +/* If *pagep is a bounce page, free it and set *pagep to the pagecache page */ +static inline void fscrypt_finalize_bounce_page(struct page **pagep) +{ + struct page *page = *pagep; + + if (fscrypt_is_bounce_page(page)) { + *pagep = fscrypt_pagecache_page(page); + fscrypt_free_bounce_page(page); + } +} + #endif /* _LINUX_FSCRYPT_H */ diff --git a/include/linux/fsl_devices.h b/include/linux/fsl_devices.h index cb2b46f57af3..5d231ce8709b 100644 --- a/include/linux/fsl_devices.h +++ b/include/linux/fsl_devices.h @@ -98,6 +98,7 @@ struct fsl_usb2_platform_data { unsigned has_fsl_erratum_14:1; unsigned has_fsl_erratum_a005275:1; unsigned has_fsl_erratum_a005697:1; + unsigned has_fsl_erratum_a006918:1; unsigned check_phy_clk_valid:1; /* register save area for suspend/resume */ diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h index 94972e8eb6d1..a2d5d175d3c1 100644 --- a/include/linux/fsnotify.h +++ b/include/linux/fsnotify.h @@ -189,6 +189,19 @@ static inline void fsnotify_link(struct inode *dir, struct inode *inode, struct } /* + * fsnotify_unlink - 'name' was unlinked + * + * Caller must make sure that dentry->d_name is stable. + */ +static inline void fsnotify_unlink(struct inode *dir, struct dentry *dentry) +{ + /* Expected to be called before d_delete() */ + WARN_ON_ONCE(d_is_negative(dentry)); + + fsnotify_dirent(dir, dentry, FS_DELETE); +} + +/* * fsnotify_mkdir - directory 'name' was created */ static inline void fsnotify_mkdir(struct inode *inode, struct dentry *dentry) @@ -199,6 +212,19 @@ static inline void fsnotify_mkdir(struct inode *inode, struct dentry *dentry) } /* + * fsnotify_rmdir - directory 'name' was removed + * + * Caller must make sure that dentry->d_name is stable. + */ +static inline void fsnotify_rmdir(struct inode *dir, struct dentry *dentry) +{ + /* Expected to be called before d_delete() */ + WARN_ON_ONCE(d_is_negative(dentry)); + + fsnotify_dirent(dir, dentry, FS_DELETE | FS_ISDIR); +} + +/* * fsnotify_access - file was read */ static inline void fsnotify_access(struct file *file) diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h index d4844cad2c2b..1915bdba2fad 100644 --- a/include/linux/fsnotify_backend.h +++ b/include/linux/fsnotify_backend.h @@ -357,7 +357,6 @@ extern int __fsnotify_parent(const struct path *path, struct dentry *dentry, __u extern void __fsnotify_inode_delete(struct inode *inode); extern void __fsnotify_vfsmount_delete(struct vfsmount *mnt); extern void fsnotify_sb_delete(struct super_block *sb); -extern void fsnotify_nameremove(struct dentry *dentry, int isdir); extern u32 fsnotify_get_cookie(void); static inline int fsnotify_inode_watches_children(struct inode *inode) @@ -476,6 +475,8 @@ extern void fsnotify_destroy_mark(struct fsnotify_mark *mark, extern void fsnotify_detach_mark(struct fsnotify_mark *mark); /* free mark */ extern void fsnotify_free_mark(struct fsnotify_mark *mark); +/* Wait until all marks queued for destruction are destroyed */ +extern void fsnotify_wait_marks_destroyed(void); /* run all the marks in a group, and clear all of the marks attached to given object type */ extern void fsnotify_clear_marks_by_group(struct fsnotify_group *group, unsigned int type); /* run all the marks in a group, and clear all of the vfsmount marks */ @@ -527,9 +528,6 @@ static inline void __fsnotify_vfsmount_delete(struct vfsmount *mnt) static inline void fsnotify_sb_delete(struct super_block *sb) {} -static inline void fsnotify_nameremove(struct dentry *dentry, int isdir) -{} - static inline void fsnotify_update_flags(struct dentry *dentry) {} diff --git a/include/linux/fsverity.h b/include/linux/fsverity.h new file mode 100644 index 000000000000..3b6b8ccebe7d --- /dev/null +++ b/include/linux/fsverity.h @@ -0,0 +1,211 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * fs-verity: read-only file-based authenticity protection + * + * This header declares the interface between the fs/verity/ support layer and + * filesystems that support fs-verity. + * + * Copyright 2019 Google LLC + */ + +#ifndef _LINUX_FSVERITY_H +#define _LINUX_FSVERITY_H + +#include <linux/fs.h> +#include <uapi/linux/fsverity.h> + +/* Verity operations for filesystems */ +struct fsverity_operations { + + /** + * Begin enabling verity on the given file. + * + * @filp: a readonly file descriptor for the file + * + * The filesystem must do any needed filesystem-specific preparations + * for enabling verity, e.g. evicting inline data. It also must return + * -EBUSY if verity is already being enabled on the given file. + * + * i_rwsem is held for write. + * + * Return: 0 on success, -errno on failure + */ + int (*begin_enable_verity)(struct file *filp); + + /** + * End enabling verity on the given file. + * + * @filp: a readonly file descriptor for the file + * @desc: the verity descriptor to write, or NULL on failure + * @desc_size: size of verity descriptor, or 0 on failure + * @merkle_tree_size: total bytes the Merkle tree took up + * + * If desc == NULL, then enabling verity failed and the filesystem only + * must do any necessary cleanups. Else, it must also store the given + * verity descriptor to a fs-specific location associated with the inode + * and do any fs-specific actions needed to mark the inode as a verity + * inode, e.g. setting a bit in the on-disk inode. The filesystem is + * also responsible for setting the S_VERITY flag in the VFS inode. + * + * i_rwsem is held for write, but it may have been dropped between + * ->begin_enable_verity() and ->end_enable_verity(). + * + * Return: 0 on success, -errno on failure + */ + int (*end_enable_verity)(struct file *filp, const void *desc, + size_t desc_size, u64 merkle_tree_size); + + /** + * Get the verity descriptor of the given inode. + * + * @inode: an inode with the S_VERITY flag set + * @buf: buffer in which to place the verity descriptor + * @bufsize: size of @buf, or 0 to retrieve the size only + * + * If bufsize == 0, then the size of the verity descriptor is returned. + * Otherwise the verity descriptor is written to 'buf' and its actual + * size is returned; -ERANGE is returned if it's too large. This may be + * called by multiple processes concurrently on the same inode. + * + * Return: the size on success, -errno on failure + */ + int (*get_verity_descriptor)(struct inode *inode, void *buf, + size_t bufsize); + + /** + * Read a Merkle tree page of the given inode. + * + * @inode: the inode + * @index: 0-based index of the page within the Merkle tree + * + * This can be called at any time on an open verity file, as well as + * between ->begin_enable_verity() and ->end_enable_verity(). It may be + * called by multiple processes concurrently, even with the same page. + * + * Note that this must retrieve a *page*, not necessarily a *block*. + * + * Return: the page on success, ERR_PTR() on failure + */ + struct page *(*read_merkle_tree_page)(struct inode *inode, + pgoff_t index); + + /** + * Write a Merkle tree block to the given inode. + * + * @inode: the inode for which the Merkle tree is being built + * @buf: block to write + * @index: 0-based index of the block within the Merkle tree + * @log_blocksize: log base 2 of the Merkle tree block size + * + * This is only called between ->begin_enable_verity() and + * ->end_enable_verity(). + * + * Return: 0 on success, -errno on failure + */ + int (*write_merkle_tree_block)(struct inode *inode, const void *buf, + u64 index, int log_blocksize); +}; + +#ifdef CONFIG_FS_VERITY + +static inline struct fsverity_info *fsverity_get_info(const struct inode *inode) +{ + /* pairs with the cmpxchg() in fsverity_set_info() */ + return READ_ONCE(inode->i_verity_info); +} + +/* enable.c */ + +extern int fsverity_ioctl_enable(struct file *filp, const void __user *arg); + +/* measure.c */ + +extern int fsverity_ioctl_measure(struct file *filp, void __user *arg); + +/* open.c */ + +extern int fsverity_file_open(struct inode *inode, struct file *filp); +extern int fsverity_prepare_setattr(struct dentry *dentry, struct iattr *attr); +extern void fsverity_cleanup_inode(struct inode *inode); + +/* verify.c */ + +extern bool fsverity_verify_page(struct page *page); +extern void fsverity_verify_bio(struct bio *bio); +extern void fsverity_enqueue_verify_work(struct work_struct *work); + +#else /* !CONFIG_FS_VERITY */ + +static inline struct fsverity_info *fsverity_get_info(const struct inode *inode) +{ + return NULL; +} + +/* enable.c */ + +static inline int fsverity_ioctl_enable(struct file *filp, + const void __user *arg) +{ + return -EOPNOTSUPP; +} + +/* measure.c */ + +static inline int fsverity_ioctl_measure(struct file *filp, void __user *arg) +{ + return -EOPNOTSUPP; +} + +/* open.c */ + +static inline int fsverity_file_open(struct inode *inode, struct file *filp) +{ + return IS_VERITY(inode) ? -EOPNOTSUPP : 0; +} + +static inline int fsverity_prepare_setattr(struct dentry *dentry, + struct iattr *attr) +{ + return IS_VERITY(d_inode(dentry)) ? -EOPNOTSUPP : 0; +} + +static inline void fsverity_cleanup_inode(struct inode *inode) +{ +} + +/* verify.c */ + +static inline bool fsverity_verify_page(struct page *page) +{ + WARN_ON(1); + return false; +} + +static inline void fsverity_verify_bio(struct bio *bio) +{ + WARN_ON(1); +} + +static inline void fsverity_enqueue_verify_work(struct work_struct *work) +{ + WARN_ON(1); +} + +#endif /* !CONFIG_FS_VERITY */ + +/** + * fsverity_active() - do reads from the inode need to go through fs-verity? + * + * This checks whether ->i_verity_info has been set. + * + * Filesystems call this from ->readpages() to check whether the pages need to + * be verified or not. Don't use IS_VERITY() for this purpose; it's subject to + * a race condition where the file is being read concurrently with + * FS_IOC_ENABLE_VERITY completing. (S_VERITY is set before ->i_verity_info.) + */ +static inline bool fsverity_active(const struct inode *inode) +{ + return fsverity_get_info(inode) != NULL; +} + +#endif /* _LINUX_FSVERITY_H */ diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h index 25e2995d4a4c..8a8cb3c401b2 100644 --- a/include/linux/ftrace.h +++ b/include/linux/ftrace.h @@ -427,8 +427,8 @@ struct dyn_ftrace *ftrace_rec_iter_record(struct ftrace_rec_iter *iter); iter = ftrace_rec_iter_next(iter)) -int ftrace_update_record(struct dyn_ftrace *rec, int enable); -int ftrace_test_record(struct dyn_ftrace *rec, int enable); +int ftrace_update_record(struct dyn_ftrace *rec, bool enable); +int ftrace_test_record(struct dyn_ftrace *rec, bool enable); void ftrace_run_stop_machine(int command); unsigned long ftrace_location(unsigned long ip); unsigned long ftrace_location_range(unsigned long start, unsigned long end); diff --git a/include/linux/fwnode.h b/include/linux/fwnode.h index a11c8c56c78b..ababd6bc82f3 100644 --- a/include/linux/fwnode.h +++ b/include/linux/fwnode.h @@ -110,10 +110,11 @@ struct fwnode_operations { (fwnode ? (fwnode_has_op(fwnode, op) ? \ (fwnode)->ops->op(fwnode, ## __VA_ARGS__) : -ENXIO) : \ -EINVAL) -#define fwnode_call_bool_op(fwnode, op, ...) \ - (fwnode ? (fwnode_has_op(fwnode, op) ? \ - (fwnode)->ops->op(fwnode, ## __VA_ARGS__) : false) : \ - false) + +#define fwnode_call_bool_op(fwnode, op, ...) \ + (fwnode_has_op(fwnode, op) ? \ + (fwnode)->ops->op(fwnode, ## __VA_ARGS__) : false) + #define fwnode_call_ptr_op(fwnode, op, ...) \ (fwnode_has_op(fwnode, op) ? \ (fwnode)->ops->op(fwnode, ## __VA_ARGS__) : NULL) diff --git a/include/linux/genalloc.h b/include/linux/genalloc.h index 205f62b8d291..4bd583bd6934 100644 --- a/include/linux/genalloc.h +++ b/include/linux/genalloc.h @@ -155,6 +155,15 @@ static inline unsigned long gen_pool_alloc(struct gen_pool *pool, size_t size) extern void *gen_pool_dma_alloc(struct gen_pool *pool, size_t size, dma_addr_t *dma); +extern void *gen_pool_dma_alloc_algo(struct gen_pool *pool, size_t size, + dma_addr_t *dma, genpool_algo_t algo, void *data); +extern void *gen_pool_dma_alloc_align(struct gen_pool *pool, size_t size, + dma_addr_t *dma, int align); +extern void *gen_pool_dma_zalloc(struct gen_pool *pool, size_t size, dma_addr_t *dma); +extern void *gen_pool_dma_zalloc_algo(struct gen_pool *pool, size_t size, + dma_addr_t *dma, genpool_algo_t algo, void *data); +extern void *gen_pool_dma_zalloc_align(struct gen_pool *pool, size_t size, + dma_addr_t *dma, int align); extern void gen_pool_free_owner(struct gen_pool *pool, unsigned long addr, size_t size, void **owner); static inline void gen_pool_free(struct gen_pool *pool, unsigned long addr, diff --git a/include/linux/gfp.h b/include/linux/gfp.h index fb07b503dc45..61f2f6ff9467 100644 --- a/include/linux/gfp.h +++ b/include/linux/gfp.h @@ -325,6 +325,29 @@ static inline bool gfpflags_allow_blocking(const gfp_t gfp_flags) return !!(gfp_flags & __GFP_DIRECT_RECLAIM); } +/** + * gfpflags_normal_context - is gfp_flags a normal sleepable context? + * @gfp_flags: gfp_flags to test + * + * Test whether @gfp_flags indicates that the allocation is from the + * %current context and allowed to sleep. + * + * An allocation being allowed to block doesn't mean it owns the %current + * context. When direct reclaim path tries to allocate memory, the + * allocation context is nested inside whatever %current was doing at the + * time of the original allocation. The nested allocation may be allowed + * to block but modifying anything %current owns can corrupt the outer + * context's expectations. + * + * %true result from this function indicates that the allocation context + * can sleep and use anything that's associated with %current. + */ +static inline bool gfpflags_normal_context(const gfp_t gfp_flags) +{ + return (gfp_flags & (__GFP_DIRECT_RECLAIM | __GFP_MEMALLOC)) == + __GFP_DIRECT_RECLAIM; +} + #ifdef CONFIG_HIGHMEM #define OPT_ZONE_HIGHMEM ZONE_HIGHMEM #else diff --git a/include/linux/gpio.h b/include/linux/gpio.h index 39745b8bdd65..2157717c2136 100644 --- a/include/linux/gpio.h +++ b/include/linux/gpio.h @@ -106,6 +106,7 @@ void devm_gpio_free(struct device *dev, unsigned int gpio); struct device; struct gpio_chip; +struct pinctrl_dev; static inline bool gpio_is_valid(int number) { @@ -220,19 +221,6 @@ static inline int gpio_to_irq(unsigned gpio) return -EINVAL; } -static inline int gpiochip_lock_as_irq(struct gpio_chip *chip, - unsigned int offset) -{ - WARN_ON(1); - return -EINVAL; -} - -static inline void gpiochip_unlock_as_irq(struct gpio_chip *chip, - unsigned int offset) -{ - WARN_ON(1); -} - static inline int irq_to_gpio(unsigned irq) { /* irq can never have been returned from gpio_to_irq() */ @@ -240,30 +228,6 @@ static inline int irq_to_gpio(unsigned irq) return -EINVAL; } -static inline int -gpiochip_add_pin_range(struct gpio_chip *chip, const char *pinctl_name, - unsigned int gpio_offset, unsigned int pin_offset, - unsigned int npins) -{ - WARN_ON(1); - return -EINVAL; -} - -static inline int -gpiochip_add_pingroup_range(struct gpio_chip *chip, - struct pinctrl_dev *pctldev, - unsigned int gpio_offset, const char *pin_group) -{ - WARN_ON(1); - return -EINVAL; -} - -static inline void -gpiochip_remove_pin_ranges(struct gpio_chip *chip) -{ - WARN_ON(1); -} - static inline int devm_gpio_request(struct device *dev, unsigned gpio, const char *label) { diff --git a/include/linux/gpio/consumer.h b/include/linux/gpio/consumer.h index 9ddcf50a3c59..b70af921c614 100644 --- a/include/linux/gpio/consumer.h +++ b/include/linux/gpio/consumer.h @@ -170,18 +170,8 @@ struct gpio_desc *gpio_to_desc(unsigned gpio); int desc_to_gpio(const struct gpio_desc *desc); /* Child properties interface */ -struct device_node; struct fwnode_handle; -struct gpio_desc *gpiod_get_from_of_node(struct device_node *node, - const char *propname, int index, - enum gpiod_flags dflags, - const char *label); -struct gpio_desc *devm_gpiod_get_from_of_node(struct device *dev, - struct device_node *node, - const char *propname, int index, - enum gpiod_flags dflags, - const char *label); struct gpio_desc *fwnode_get_named_gpiod(struct fwnode_handle *fwnode, const char *propname, int index, enum gpiod_flags dflags, @@ -247,7 +237,7 @@ static inline void gpiod_put(struct gpio_desc *desc) might_sleep(); /* GPIO can never have been requested */ - WARN_ON(1); + WARN_ON(desc); } static inline void devm_gpiod_unhinge(struct device *dev, @@ -256,7 +246,7 @@ static inline void devm_gpiod_unhinge(struct device *dev, might_sleep(); /* GPIO can never have been requested */ - WARN_ON(1); + WARN_ON(desc); } static inline void gpiod_put_array(struct gpio_descs *descs) @@ -264,7 +254,7 @@ static inline void gpiod_put_array(struct gpio_descs *descs) might_sleep(); /* GPIO can never have been requested */ - WARN_ON(1); + WARN_ON(descs); } static inline struct gpio_desc *__must_check @@ -317,7 +307,7 @@ static inline void devm_gpiod_put(struct device *dev, struct gpio_desc *desc) might_sleep(); /* GPIO can never have been requested */ - WARN_ON(1); + WARN_ON(desc); } static inline void devm_gpiod_put_array(struct device *dev, @@ -326,32 +316,32 @@ static inline void devm_gpiod_put_array(struct device *dev, might_sleep(); /* GPIO can never have been requested */ - WARN_ON(1); + WARN_ON(descs); } static inline int gpiod_get_direction(const struct gpio_desc *desc) { /* GPIO can never have been requested */ - WARN_ON(1); + WARN_ON(desc); return -ENOSYS; } static inline int gpiod_direction_input(struct gpio_desc *desc) { /* GPIO can never have been requested */ - WARN_ON(1); + WARN_ON(desc); return -ENOSYS; } static inline int gpiod_direction_output(struct gpio_desc *desc, int value) { /* GPIO can never have been requested */ - WARN_ON(1); + WARN_ON(desc); return -ENOSYS; } static inline int gpiod_direction_output_raw(struct gpio_desc *desc, int value) { /* GPIO can never have been requested */ - WARN_ON(1); + WARN_ON(desc); return -ENOSYS; } @@ -359,7 +349,7 @@ static inline int gpiod_direction_output_raw(struct gpio_desc *desc, int value) static inline int gpiod_get_value(const struct gpio_desc *desc) { /* GPIO can never have been requested */ - WARN_ON(1); + WARN_ON(desc); return 0; } static inline int gpiod_get_array_value(unsigned int array_size, @@ -368,13 +358,13 @@ static inline int gpiod_get_array_value(unsigned int array_size, unsigned long *value_bitmap) { /* GPIO can never have been requested */ - WARN_ON(1); + WARN_ON(desc_array); return 0; } static inline void gpiod_set_value(struct gpio_desc *desc, int value) { /* GPIO can never have been requested */ - WARN_ON(1); + WARN_ON(desc); } static inline int gpiod_set_array_value(unsigned int array_size, struct gpio_desc **desc_array, @@ -382,13 +372,13 @@ static inline int gpiod_set_array_value(unsigned int array_size, unsigned long *value_bitmap) { /* GPIO can never have been requested */ - WARN_ON(1); + WARN_ON(desc_array); return 0; } static inline int gpiod_get_raw_value(const struct gpio_desc *desc) { /* GPIO can never have been requested */ - WARN_ON(1); + WARN_ON(desc); return 0; } static inline int gpiod_get_raw_array_value(unsigned int array_size, @@ -397,13 +387,13 @@ static inline int gpiod_get_raw_array_value(unsigned int array_size, unsigned long *value_bitmap) { /* GPIO can never have been requested */ - WARN_ON(1); + WARN_ON(desc_array); return 0; } static inline void gpiod_set_raw_value(struct gpio_desc *desc, int value) { /* GPIO can never have been requested */ - WARN_ON(1); + WARN_ON(desc); } static inline int gpiod_set_raw_array_value(unsigned int array_size, struct gpio_desc **desc_array, @@ -411,14 +401,14 @@ static inline int gpiod_set_raw_array_value(unsigned int array_size, unsigned long *value_bitmap) { /* GPIO can never have been requested */ - WARN_ON(1); + WARN_ON(desc_array); return 0; } static inline int gpiod_get_value_cansleep(const struct gpio_desc *desc) { /* GPIO can never have been requested */ - WARN_ON(1); + WARN_ON(desc); return 0; } static inline int gpiod_get_array_value_cansleep(unsigned int array_size, @@ -427,13 +417,13 @@ static inline int gpiod_get_array_value_cansleep(unsigned int array_size, unsigned long *value_bitmap) { /* GPIO can never have been requested */ - WARN_ON(1); + WARN_ON(desc_array); return 0; } static inline void gpiod_set_value_cansleep(struct gpio_desc *desc, int value) { /* GPIO can never have been requested */ - WARN_ON(1); + WARN_ON(desc); } static inline int gpiod_set_array_value_cansleep(unsigned int array_size, struct gpio_desc **desc_array, @@ -441,13 +431,13 @@ static inline int gpiod_set_array_value_cansleep(unsigned int array_size, unsigned long *value_bitmap) { /* GPIO can never have been requested */ - WARN_ON(1); + WARN_ON(desc_array); return 0; } static inline int gpiod_get_raw_value_cansleep(const struct gpio_desc *desc) { /* GPIO can never have been requested */ - WARN_ON(1); + WARN_ON(desc); return 0; } static inline int gpiod_get_raw_array_value_cansleep(unsigned int array_size, @@ -456,14 +446,14 @@ static inline int gpiod_get_raw_array_value_cansleep(unsigned int array_size, unsigned long *value_bitmap) { /* GPIO can never have been requested */ - WARN_ON(1); + WARN_ON(desc_array); return 0; } static inline void gpiod_set_raw_value_cansleep(struct gpio_desc *desc, int value) { /* GPIO can never have been requested */ - WARN_ON(1); + WARN_ON(desc); } static inline int gpiod_set_raw_array_value_cansleep(unsigned int array_size, struct gpio_desc **desc_array, @@ -471,41 +461,41 @@ static inline int gpiod_set_raw_array_value_cansleep(unsigned int array_size, unsigned long *value_bitmap) { /* GPIO can never have been requested */ - WARN_ON(1); + WARN_ON(desc_array); return 0; } static inline int gpiod_set_debounce(struct gpio_desc *desc, unsigned debounce) { /* GPIO can never have been requested */ - WARN_ON(1); + WARN_ON(desc); return -ENOSYS; } static inline int gpiod_set_transitory(struct gpio_desc *desc, bool transitory) { /* GPIO can never have been requested */ - WARN_ON(1); + WARN_ON(desc); return -ENOSYS; } static inline int gpiod_is_active_low(const struct gpio_desc *desc) { /* GPIO can never have been requested */ - WARN_ON(1); + WARN_ON(desc); return 0; } static inline int gpiod_cansleep(const struct gpio_desc *desc) { /* GPIO can never have been requested */ - WARN_ON(1); + WARN_ON(desc); return 0; } static inline int gpiod_to_irq(const struct gpio_desc *desc) { /* GPIO can never have been requested */ - WARN_ON(1); + WARN_ON(desc); return -EINVAL; } @@ -513,7 +503,7 @@ static inline int gpiod_set_consumer_name(struct gpio_desc *desc, const char *name) { /* GPIO can never have been requested */ - WARN_ON(1); + WARN_ON(desc); return -EINVAL; } @@ -525,34 +515,14 @@ static inline struct gpio_desc *gpio_to_desc(unsigned gpio) static inline int desc_to_gpio(const struct gpio_desc *desc) { /* GPIO can never have been requested */ - WARN_ON(1); + WARN_ON(desc); return -EINVAL; } /* Child properties interface */ -struct device_node; struct fwnode_handle; static inline -struct gpio_desc *gpiod_get_from_of_node(struct device_node *node, - const char *propname, int index, - enum gpiod_flags dflags, - const char *label) -{ - return ERR_PTR(-ENOSYS); -} - -static inline -struct gpio_desc *devm_gpiod_get_from_of_node(struct device *dev, - struct device_node *node, - const char *propname, int index, - enum gpiod_flags dflags, - const char *label) -{ - return ERR_PTR(-ENOSYS); -} - -static inline struct gpio_desc *fwnode_get_named_gpiod(struct fwnode_handle *fwnode, const char *propname, int index, enum gpiod_flags dflags, @@ -584,6 +554,111 @@ struct gpio_desc *devm_fwnode_get_gpiod_from_child(struct device *dev, flags, label); } +#if IS_ENABLED(CONFIG_GPIOLIB) && IS_ENABLED(CONFIG_OF_GPIO) +struct device_node; + +struct gpio_desc *gpiod_get_from_of_node(struct device_node *node, + const char *propname, int index, + enum gpiod_flags dflags, + const char *label); + +#else /* CONFIG_GPIOLIB && CONFIG_OF_GPIO */ + +struct device_node; + +static inline +struct gpio_desc *gpiod_get_from_of_node(struct device_node *node, + const char *propname, int index, + enum gpiod_flags dflags, + const char *label) +{ + return ERR_PTR(-ENOSYS); +} + +#endif /* CONFIG_GPIOLIB && CONFIG_OF_GPIO */ + +#ifdef CONFIG_GPIOLIB +struct device_node; + +struct gpio_desc *devm_gpiod_get_from_of_node(struct device *dev, + struct device_node *node, + const char *propname, int index, + enum gpiod_flags dflags, + const char *label); + +#else /* CONFIG_GPIOLIB */ + +struct device_node; + +static inline +struct gpio_desc *devm_gpiod_get_from_of_node(struct device *dev, + struct device_node *node, + const char *propname, int index, + enum gpiod_flags dflags, + const char *label) +{ + return ERR_PTR(-ENOSYS); +} + +#endif /* CONFIG_GPIOLIB */ + +struct acpi_gpio_params { + unsigned int crs_entry_index; + unsigned int line_index; + bool active_low; +}; + +struct acpi_gpio_mapping { + const char *name; + const struct acpi_gpio_params *data; + unsigned int size; + +/* Ignore IoRestriction field */ +#define ACPI_GPIO_QUIRK_NO_IO_RESTRICTION BIT(0) +/* + * When ACPI GPIO mapping table is in use the index parameter inside it + * refers to the GPIO resource in _CRS method. That index has no + * distinction of actual type of the resource. When consumer wants to + * get GpioIo type explicitly, this quirk may be used. + */ +#define ACPI_GPIO_QUIRK_ONLY_GPIOIO BIT(1) + + unsigned int quirks; +}; + +#if IS_ENABLED(CONFIG_GPIOLIB) && IS_ENABLED(CONFIG_ACPI) + +struct acpi_device; + +int acpi_dev_add_driver_gpios(struct acpi_device *adev, + const struct acpi_gpio_mapping *gpios); +void acpi_dev_remove_driver_gpios(struct acpi_device *adev); + +int devm_acpi_dev_add_driver_gpios(struct device *dev, + const struct acpi_gpio_mapping *gpios); +void devm_acpi_dev_remove_driver_gpios(struct device *dev); + +#else /* CONFIG_GPIOLIB && CONFIG_ACPI */ + +struct acpi_device; + +static inline int acpi_dev_add_driver_gpios(struct acpi_device *adev, + const struct acpi_gpio_mapping *gpios) +{ + return -ENXIO; +} +static inline void acpi_dev_remove_driver_gpios(struct acpi_device *adev) {} + +static inline int devm_acpi_dev_add_driver_gpios(struct device *dev, + const struct acpi_gpio_mapping *gpios) +{ + return -ENXIO; +} +static inline void devm_acpi_dev_remove_driver_gpios(struct device *dev) {} + +#endif /* CONFIG_GPIOLIB && CONFIG_ACPI */ + + #if IS_ENABLED(CONFIG_GPIOLIB) && IS_ENABLED(CONFIG_GPIO_SYSFS) int gpiod_export(struct gpio_desc *desc, bool direction_may_change); diff --git a/include/linux/gpio/driver.h b/include/linux/gpio/driver.h index a1d273c96016..5dd9c982e2cb 100644 --- a/include/linux/gpio/driver.h +++ b/include/linux/gpio/driver.h @@ -18,10 +18,10 @@ struct seq_file; struct gpio_device; struct module; enum gpiod_flags; +enum gpio_lookup_flags; -#ifdef CONFIG_GPIOLIB +struct gpio_chip; -#ifdef CONFIG_GPIOLIB_IRQCHIP /** * struct gpio_irq_chip - GPIO interrupt controller */ @@ -48,6 +48,84 @@ struct gpio_irq_chip { */ const struct irq_domain_ops *domain_ops; +#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY + /** + * @fwnode: + * + * Firmware node corresponding to this gpiochip/irqchip, necessary + * for hierarchical irqdomain support. + */ + struct fwnode_handle *fwnode; + + /** + * @parent_domain: + * + * If non-NULL, will be set as the parent of this GPIO interrupt + * controller's IRQ domain to establish a hierarchical interrupt + * domain. The presence of this will activate the hierarchical + * interrupt support. + */ + struct irq_domain *parent_domain; + + /** + * @child_to_parent_hwirq: + * + * This callback translates a child hardware IRQ offset to a parent + * hardware IRQ offset on a hierarchical interrupt chip. The child + * hardware IRQs correspond to the GPIO index 0..ngpio-1 (see the + * ngpio field of struct gpio_chip) and the corresponding parent + * hardware IRQ and type (such as IRQ_TYPE_*) shall be returned by + * the driver. The driver can calculate this from an offset or using + * a lookup table or whatever method is best for this chip. Return + * 0 on successful translation in the driver. + * + * If some ranges of hardware IRQs do not have a corresponding parent + * HWIRQ, return -EINVAL, but also make sure to fill in @valid_mask and + * @need_valid_mask to make these GPIO lines unavailable for + * translation. + */ + int (*child_to_parent_hwirq)(struct gpio_chip *chip, + unsigned int child_hwirq, + unsigned int child_type, + unsigned int *parent_hwirq, + unsigned int *parent_type); + + /** + * @populate_parent_fwspec: + * + * This optional callback populates the &struct irq_fwspec for the + * parent's IRQ domain. If this is not specified, then + * &gpiochip_populate_parent_fwspec_twocell will be used. A four-cell + * variant named &gpiochip_populate_parent_fwspec_fourcell is also + * available. + */ + void (*populate_parent_fwspec)(struct gpio_chip *chip, + struct irq_fwspec *fwspec, + unsigned int parent_hwirq, + unsigned int parent_type); + + /** + * @child_offset_to_irq: + * + * This optional callback is used to translate the child's GPIO line + * offset on the GPIO chip to an IRQ number for the GPIO to_irq() + * callback. If this is not specified, then a default callback will be + * provided that returns the line offset. + */ + unsigned int (*child_offset_to_irq)(struct gpio_chip *chip, + unsigned int pin); + + /** + * @child_irq_domain_ops: + * + * The IRQ domain operations that will be used for this GPIO IRQ + * chip. If no operations are provided, then default callbacks will + * be populated to setup the IRQ hierarchy. Some drivers need to + * supply their own translate function. + */ + struct irq_domain_ops child_irq_domain_ops; +#endif + /** * @handler: * @@ -102,13 +180,6 @@ struct gpio_irq_chip { unsigned int num_parents; /** - * @parent_irq: - * - * For use by gpiochip_set_cascaded_irqchip() - */ - unsigned int parent_irq; - - /** * @parents: * * A list of interrupt parents of a GPIO chip. This is owned by the @@ -131,11 +202,25 @@ struct gpio_irq_chip { bool threaded; /** - * @need_valid_mask: - * - * If set core allocates @valid_mask with all bits set to one. + * @init_hw: optional routine to initialize hardware before + * an IRQ chip will be added. This is quite useful when + * a particular driver wants to clear IRQ related registers + * in order to avoid undesired events. */ - bool need_valid_mask; + int (*init_hw)(struct gpio_chip *chip); + + /** + * @init_valid_mask: optional routine to initialize @valid_mask, to be + * used if not all GPIO lines are valid interrupts. Sometimes some + * lines just cannot fire interrupts, and this routine, when defined, + * is passed a bitmap in "valid_mask" and it will have ngpios + * bits from 0..(ngpios-1) set to "1" as in valid. The callback can + * then directly set some bits to "0" if they cannot be used for + * interrupts. + */ + void (*init_valid_mask)(struct gpio_chip *chip, + unsigned long *valid_mask, + unsigned int ngpios); /** * @valid_mask: @@ -167,7 +252,6 @@ struct gpio_irq_chip { */ void (*irq_disable)(struct irq_data *data); }; -#endif /** * struct gpio_chip - abstract a GPIO controller @@ -200,6 +284,8 @@ struct gpio_irq_chip { * @dbg_show: optional routine to show contents in debugfs; default code * will be used when this is omitted, but custom code can show extra * state (such as pullup/pulldown configuration). + * @init_valid_mask: optional routine to initialize @valid_mask, to be used if + * not all GPIOs are valid. * @base: identifies the first GPIO number handled by this chip; * or, if negative during registration, requests dynamic ID allocation. * DEPRECATION: providing anything non-negative and nailing the base @@ -286,7 +372,9 @@ struct gpio_chip { void (*dbg_show)(struct seq_file *s, struct gpio_chip *chip); - int (*init_valid_mask)(struct gpio_chip *chip); + int (*init_valid_mask)(struct gpio_chip *chip, + unsigned long *valid_mask, + unsigned int ngpios); int base; u16 ngpio; @@ -307,7 +395,7 @@ struct gpio_chip { spinlock_t bgpio_lock; unsigned long bgpio_data; unsigned long bgpio_dir; -#endif +#endif /* CONFIG_GPIO_GENERIC */ #ifdef CONFIG_GPIOLIB_IRQCHIP /* @@ -322,16 +410,7 @@ struct gpio_chip { * used to handle IRQs for most practical cases. */ struct gpio_irq_chip irq; -#endif - - /** - * @need_valid_mask: - * - * If set core allocates @valid_mask with all its values initialized - * with init_valid_mask() or set to one if init_valid_mask() is not - * defined - */ - bool need_valid_mask; +#endif /* CONFIG_GPIOLIB_IRQCHIP */ /** * @valid_mask: @@ -369,7 +448,7 @@ struct gpio_chip { */ int (*of_xlate)(struct gpio_chip *gc, const struct of_phandle_args *gpiospec, u32 *flags); -#endif +#endif /* CONFIG_OF_GPIO */ }; extern const char *gpiochip_is_requested(struct gpio_chip *chip, @@ -412,7 +491,7 @@ extern int gpiochip_add_data_with_key(struct gpio_chip *chip, void *data, }) #else #define gpiochip_add_data(chip, data) gpiochip_add_data_with_key(chip, data, NULL, NULL) -#endif +#endif /* CONFIG_LOCKDEP */ static inline int gpiochip_add(struct gpio_chip *chip) { @@ -425,9 +504,6 @@ extern int devm_gpiochip_add_data(struct device *dev, struct gpio_chip *chip, extern struct gpio_chip *gpiochip_find(void *data, int (*match)(struct gpio_chip *chip, void *data)); -/* lock/unlock as IRQ */ -int gpiochip_lock_as_irq(struct gpio_chip *chip, unsigned int offset); -void gpiochip_unlock_as_irq(struct gpio_chip *chip, unsigned int offset); bool gpiochip_line_is_irq(struct gpio_chip *chip, unsigned int offset); int gpiochip_reqres_irq(struct gpio_chip *chip, unsigned int offset); void gpiochip_relres_irq(struct gpio_chip *chip, unsigned int offset); @@ -445,15 +521,40 @@ bool gpiochip_line_is_valid(const struct gpio_chip *chip, unsigned int offset); /* get driver data */ void *gpiochip_get_data(struct gpio_chip *chip); -struct gpio_chip *gpiod_to_chip(const struct gpio_desc *desc); - struct bgpio_pdata { const char *label; int base; int ngpio; }; -#if IS_ENABLED(CONFIG_GPIO_GENERIC) +#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY + +void gpiochip_populate_parent_fwspec_twocell(struct gpio_chip *chip, + struct irq_fwspec *fwspec, + unsigned int parent_hwirq, + unsigned int parent_type); +void gpiochip_populate_parent_fwspec_fourcell(struct gpio_chip *chip, + struct irq_fwspec *fwspec, + unsigned int parent_hwirq, + unsigned int parent_type); + +#else + +static inline void gpiochip_populate_parent_fwspec_twocell(struct gpio_chip *chip, + struct irq_fwspec *fwspec, + unsigned int parent_hwirq, + unsigned int parent_type) +{ +} + +static inline void gpiochip_populate_parent_fwspec_fourcell(struct gpio_chip *chip, + struct irq_fwspec *fwspec, + unsigned int parent_hwirq, + unsigned int parent_type) +{ +} + +#endif /* CONFIG_IRQ_DOMAIN_HIERARCHY */ int bgpio_init(struct gpio_chip *gc, struct device *dev, unsigned long sz, void __iomem *dat, void __iomem *set, @@ -467,10 +568,6 @@ int bgpio_init(struct gpio_chip *gc, struct device *dev, #define BGPIOF_READ_OUTPUT_REG_SET BIT(4) /* reg_set stores output value */ #define BGPIOF_NO_OUTPUT BIT(5) /* only input */ -#endif - -#ifdef CONFIG_GPIOLIB_IRQCHIP - int gpiochip_irq_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hwirq); void gpiochip_irq_unmap(struct irq_domain *d, unsigned int irq); @@ -537,7 +634,7 @@ static inline int gpiochip_irqchip_add_nested(struct gpio_chip *gpiochip, handler, type, true, &lock_key, &request_key); } -#else +#else /* ! CONFIG_LOCKDEP */ static inline int gpiochip_irqchip_add(struct gpio_chip *gpiochip, struct irq_chip *irqchip, unsigned int first_irq, @@ -559,15 +656,11 @@ static inline int gpiochip_irqchip_add_nested(struct gpio_chip *gpiochip, } #endif /* CONFIG_LOCKDEP */ -#endif /* CONFIG_GPIOLIB_IRQCHIP */ - int gpiochip_generic_request(struct gpio_chip *chip, unsigned offset); void gpiochip_generic_free(struct gpio_chip *chip, unsigned offset); int gpiochip_generic_config(struct gpio_chip *chip, unsigned offset, unsigned long config); -#ifdef CONFIG_PINCTRL - /** * struct gpio_pin_range - pin range controlled by a gpio chip * @node: list for maintaining set of pin ranges, used internally @@ -580,6 +673,8 @@ struct gpio_pin_range { struct pinctrl_gpio_range range; }; +#ifdef CONFIG_PINCTRL + int gpiochip_add_pin_range(struct gpio_chip *chip, const char *pinctl_name, unsigned int gpio_offset, unsigned int pin_offset, unsigned int npins); @@ -588,7 +683,7 @@ int gpiochip_add_pingroup_range(struct gpio_chip *chip, unsigned int gpio_offset, const char *pin_group); void gpiochip_remove_pin_ranges(struct gpio_chip *chip); -#else +#else /* ! CONFIG_PINCTRL */ static inline int gpiochip_add_pin_range(struct gpio_chip *chip, const char *pinctl_name, @@ -614,12 +709,22 @@ gpiochip_remove_pin_ranges(struct gpio_chip *chip) struct gpio_desc *gpiochip_request_own_desc(struct gpio_chip *chip, u16 hwnum, const char *label, - enum gpiod_flags flags); + enum gpio_lookup_flags lflags, + enum gpiod_flags dflags); void gpiochip_free_own_desc(struct gpio_desc *desc); void devprop_gpiochip_set_names(struct gpio_chip *chip, const struct fwnode_handle *fwnode); +#ifdef CONFIG_GPIOLIB + +/* lock/unlock as IRQ */ +int gpiochip_lock_as_irq(struct gpio_chip *chip, unsigned int offset); +void gpiochip_unlock_as_irq(struct gpio_chip *chip, unsigned int offset); + + +struct gpio_chip *gpiod_to_chip(const struct gpio_desc *desc); + #else /* CONFIG_GPIOLIB */ static inline struct gpio_chip *gpiod_to_chip(const struct gpio_desc *desc) @@ -629,6 +734,18 @@ static inline struct gpio_chip *gpiod_to_chip(const struct gpio_desc *desc) return ERR_PTR(-ENODEV); } +static inline int gpiochip_lock_as_irq(struct gpio_chip *chip, + unsigned int offset) +{ + WARN_ON(1); + return -EINVAL; +} + +static inline void gpiochip_unlock_as_irq(struct gpio_chip *chip, + unsigned int offset) +{ + WARN_ON(1); +} #endif /* CONFIG_GPIOLIB */ -#endif +#endif /* __LINUX_GPIO_DRIVER_H */ diff --git a/include/linux/gpio/gpio-reg.h b/include/linux/gpio/gpio-reg.h index 5c6efd394cb0..39b888c40b39 100644 --- a/include/linux/gpio/gpio-reg.h +++ b/include/linux/gpio/gpio-reg.h @@ -11,4 +11,4 @@ struct gpio_chip *gpio_reg_init(struct device *dev, void __iomem *reg, int gpio_reg_resume(struct gpio_chip *gc); -#endif +#endif /* GPIO_REG_H */ diff --git a/include/linux/gpio/machine.h b/include/linux/gpio/machine.h index 35f299d1f6a7..1ebe5be05d5f 100644 --- a/include/linux/gpio/machine.h +++ b/include/linux/gpio/machine.h @@ -97,7 +97,7 @@ void gpiod_add_lookup_table(struct gpiod_lookup_table *table); void gpiod_add_lookup_tables(struct gpiod_lookup_table **tables, size_t n); void gpiod_remove_lookup_table(struct gpiod_lookup_table *table); void gpiod_add_hogs(struct gpiod_hog *hogs); -#else +#else /* ! CONFIG_GPIOLIB */ static inline void gpiod_add_lookup_table(struct gpiod_lookup_table *table) {} static inline @@ -105,6 +105,6 @@ void gpiod_add_lookup_tables(struct gpiod_lookup_table **tables, size_t n) {} static inline void gpiod_remove_lookup_table(struct gpiod_lookup_table *table) {} static inline void gpiod_add_hogs(struct gpiod_hog *hogs) {} -#endif +#endif /* CONFIG_GPIOLIB */ #endif /* __LINUX_GPIO_MACHINE_H */ diff --git a/include/linux/greybus.h b/include/linux/greybus.h new file mode 100644 index 000000000000..18c0fb958b74 --- /dev/null +++ b/include/linux/greybus.h @@ -0,0 +1,152 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Greybus driver and device API + * + * Copyright 2014-2015 Google Inc. + * Copyright 2014-2015 Linaro Ltd. + */ + +#ifndef __LINUX_GREYBUS_H +#define __LINUX_GREYBUS_H + +#ifdef __KERNEL__ + +#include <linux/kernel.h> +#include <linux/types.h> +#include <linux/list.h> +#include <linux/slab.h> +#include <linux/device.h> +#include <linux/module.h> +#include <linux/pm_runtime.h> +#include <linux/idr.h> + +#include <linux/greybus/greybus_id.h> +#include <linux/greybus/greybus_manifest.h> +#include <linux/greybus/greybus_protocols.h> +#include <linux/greybus/manifest.h> +#include <linux/greybus/hd.h> +#include <linux/greybus/svc.h> +#include <linux/greybus/control.h> +#include <linux/greybus/module.h> +#include <linux/greybus/interface.h> +#include <linux/greybus/bundle.h> +#include <linux/greybus/connection.h> +#include <linux/greybus/operation.h> + +/* Matches up with the Greybus Protocol specification document */ +#define GREYBUS_VERSION_MAJOR 0x00 +#define GREYBUS_VERSION_MINOR 0x01 + +#define GREYBUS_ID_MATCH_DEVICE \ + (GREYBUS_ID_MATCH_VENDOR | GREYBUS_ID_MATCH_PRODUCT) + +#define GREYBUS_DEVICE(v, p) \ + .match_flags = GREYBUS_ID_MATCH_DEVICE, \ + .vendor = (v), \ + .product = (p), + +#define GREYBUS_DEVICE_CLASS(c) \ + .match_flags = GREYBUS_ID_MATCH_CLASS, \ + .class = (c), + +/* Maximum number of CPorts */ +#define CPORT_ID_MAX 4095 /* UniPro max id is 4095 */ +#define CPORT_ID_BAD U16_MAX + +struct greybus_driver { + const char *name; + + int (*probe)(struct gb_bundle *bundle, + const struct greybus_bundle_id *id); + void (*disconnect)(struct gb_bundle *bundle); + + const struct greybus_bundle_id *id_table; + + struct device_driver driver; +}; +#define to_greybus_driver(d) container_of(d, struct greybus_driver, driver) + +static inline void greybus_set_drvdata(struct gb_bundle *bundle, void *data) +{ + dev_set_drvdata(&bundle->dev, data); +} + +static inline void *greybus_get_drvdata(struct gb_bundle *bundle) +{ + return dev_get_drvdata(&bundle->dev); +} + +/* Don't call these directly, use the module_greybus_driver() macro instead */ +int greybus_register_driver(struct greybus_driver *driver, + struct module *module, const char *mod_name); +void greybus_deregister_driver(struct greybus_driver *driver); + +/* define to get proper THIS_MODULE and KBUILD_MODNAME values */ +#define greybus_register(driver) \ + greybus_register_driver(driver, THIS_MODULE, KBUILD_MODNAME) +#define greybus_deregister(driver) \ + greybus_deregister_driver(driver) + +/** + * module_greybus_driver() - Helper macro for registering a Greybus driver + * @__greybus_driver: greybus_driver structure + * + * Helper macro for Greybus drivers to set up proper module init / exit + * functions. Replaces module_init() and module_exit() and keeps people from + * printing pointless things to the kernel log when their driver is loaded. + */ +#define module_greybus_driver(__greybus_driver) \ + module_driver(__greybus_driver, greybus_register, greybus_deregister) + +int greybus_disabled(void); + +void gb_debugfs_init(void); +void gb_debugfs_cleanup(void); +struct dentry *gb_debugfs_get(void); + +extern struct bus_type greybus_bus_type; + +extern struct device_type greybus_hd_type; +extern struct device_type greybus_module_type; +extern struct device_type greybus_interface_type; +extern struct device_type greybus_control_type; +extern struct device_type greybus_bundle_type; +extern struct device_type greybus_svc_type; + +static inline int is_gb_host_device(const struct device *dev) +{ + return dev->type == &greybus_hd_type; +} + +static inline int is_gb_module(const struct device *dev) +{ + return dev->type == &greybus_module_type; +} + +static inline int is_gb_interface(const struct device *dev) +{ + return dev->type == &greybus_interface_type; +} + +static inline int is_gb_control(const struct device *dev) +{ + return dev->type == &greybus_control_type; +} + +static inline int is_gb_bundle(const struct device *dev) +{ + return dev->type == &greybus_bundle_type; +} + +static inline int is_gb_svc(const struct device *dev) +{ + return dev->type == &greybus_svc_type; +} + +static inline bool cport_id_valid(struct gb_host_device *hd, u16 cport_id) +{ + return cport_id != CPORT_ID_BAD && cport_id < hd->num_cports; +} + +#endif /* __KERNEL__ */ +#endif /* __LINUX_GREYBUS_H */ diff --git a/include/linux/greybus/bundle.h b/include/linux/greybus/bundle.h new file mode 100644 index 000000000000..df8d88424cb7 --- /dev/null +++ b/include/linux/greybus/bundle.h @@ -0,0 +1,92 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Greybus bundles + * + * Copyright 2014 Google Inc. + * Copyright 2014 Linaro Ltd. + */ + +#ifndef __BUNDLE_H +#define __BUNDLE_H + +#include <linux/types.h> +#include <linux/list.h> +#include <linux/pm_runtime.h> +#include <linux/device.h> + +#define BUNDLE_ID_NONE U8_MAX + +/* Greybus "public" definitions" */ +struct gb_bundle { + struct device dev; + struct gb_interface *intf; + + u8 id; + u8 class; + u8 class_major; + u8 class_minor; + + size_t num_cports; + struct greybus_descriptor_cport *cport_desc; + + struct list_head connections; + u8 *state; + + struct list_head links; /* interface->bundles */ +}; +#define to_gb_bundle(d) container_of(d, struct gb_bundle, dev) + +/* Greybus "private" definitions" */ +struct gb_bundle *gb_bundle_create(struct gb_interface *intf, u8 bundle_id, + u8 class); +int gb_bundle_add(struct gb_bundle *bundle); +void gb_bundle_destroy(struct gb_bundle *bundle); + +/* Bundle Runtime PM wrappers */ +#ifdef CONFIG_PM +static inline int gb_pm_runtime_get_sync(struct gb_bundle *bundle) +{ + int retval; + + retval = pm_runtime_get_sync(&bundle->dev); + if (retval < 0) { + dev_err(&bundle->dev, + "pm_runtime_get_sync failed: %d\n", retval); + pm_runtime_put_noidle(&bundle->dev); + return retval; + } + + return 0; +} + +static inline int gb_pm_runtime_put_autosuspend(struct gb_bundle *bundle) +{ + int retval; + + pm_runtime_mark_last_busy(&bundle->dev); + retval = pm_runtime_put_autosuspend(&bundle->dev); + + return retval; +} + +static inline void gb_pm_runtime_get_noresume(struct gb_bundle *bundle) +{ + pm_runtime_get_noresume(&bundle->dev); +} + +static inline void gb_pm_runtime_put_noidle(struct gb_bundle *bundle) +{ + pm_runtime_put_noidle(&bundle->dev); +} + +#else +static inline int gb_pm_runtime_get_sync(struct gb_bundle *bundle) +{ return 0; } +static inline int gb_pm_runtime_put_autosuspend(struct gb_bundle *bundle) +{ return 0; } + +static inline void gb_pm_runtime_get_noresume(struct gb_bundle *bundle) {} +static inline void gb_pm_runtime_put_noidle(struct gb_bundle *bundle) {} +#endif + +#endif /* __BUNDLE_H */ diff --git a/include/linux/greybus/connection.h b/include/linux/greybus/connection.h new file mode 100644 index 000000000000..d59b7fc1de3e --- /dev/null +++ b/include/linux/greybus/connection.h @@ -0,0 +1,131 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Greybus connections + * + * Copyright 2014 Google Inc. + * Copyright 2014 Linaro Ltd. + */ + +#ifndef __CONNECTION_H +#define __CONNECTION_H + +#include <linux/bits.h> +#include <linux/list.h> +#include <linux/kfifo.h> +#include <linux/kref.h> +#include <linux/workqueue.h> + +#define GB_CONNECTION_FLAG_CSD BIT(0) +#define GB_CONNECTION_FLAG_NO_FLOWCTRL BIT(1) +#define GB_CONNECTION_FLAG_OFFLOADED BIT(2) +#define GB_CONNECTION_FLAG_CDSI1 BIT(3) +#define GB_CONNECTION_FLAG_CONTROL BIT(4) +#define GB_CONNECTION_FLAG_HIGH_PRIO BIT(5) + +#define GB_CONNECTION_FLAG_CORE_MASK GB_CONNECTION_FLAG_CONTROL + +enum gb_connection_state { + GB_CONNECTION_STATE_DISABLED = 0, + GB_CONNECTION_STATE_ENABLED_TX = 1, + GB_CONNECTION_STATE_ENABLED = 2, + GB_CONNECTION_STATE_DISCONNECTING = 3, +}; + +struct gb_operation; + +typedef int (*gb_request_handler_t)(struct gb_operation *); + +struct gb_connection { + struct gb_host_device *hd; + struct gb_interface *intf; + struct gb_bundle *bundle; + struct kref kref; + u16 hd_cport_id; + u16 intf_cport_id; + + struct list_head hd_links; + struct list_head bundle_links; + + gb_request_handler_t handler; + unsigned long flags; + + struct mutex mutex; + spinlock_t lock; + enum gb_connection_state state; + struct list_head operations; + + char name[16]; + struct workqueue_struct *wq; + + atomic_t op_cycle; + + void *private; + + bool mode_switch; +}; + +struct gb_connection *gb_connection_create_static(struct gb_host_device *hd, + u16 hd_cport_id, gb_request_handler_t handler); +struct gb_connection *gb_connection_create_control(struct gb_interface *intf); +struct gb_connection *gb_connection_create(struct gb_bundle *bundle, + u16 cport_id, gb_request_handler_t handler); +struct gb_connection *gb_connection_create_flags(struct gb_bundle *bundle, + u16 cport_id, gb_request_handler_t handler, + unsigned long flags); +struct gb_connection *gb_connection_create_offloaded(struct gb_bundle *bundle, + u16 cport_id, unsigned long flags); +void gb_connection_destroy(struct gb_connection *connection); + +static inline bool gb_connection_is_static(struct gb_connection *connection) +{ + return !connection->intf; +} + +int gb_connection_enable(struct gb_connection *connection); +int gb_connection_enable_tx(struct gb_connection *connection); +void gb_connection_disable_rx(struct gb_connection *connection); +void gb_connection_disable(struct gb_connection *connection); +void gb_connection_disable_forced(struct gb_connection *connection); + +void gb_connection_mode_switch_prepare(struct gb_connection *connection); +void gb_connection_mode_switch_complete(struct gb_connection *connection); + +void greybus_data_rcvd(struct gb_host_device *hd, u16 cport_id, + u8 *data, size_t length); + +void gb_connection_latency_tag_enable(struct gb_connection *connection); +void gb_connection_latency_tag_disable(struct gb_connection *connection); + +static inline bool gb_connection_e2efc_enabled(struct gb_connection *connection) +{ + return !(connection->flags & GB_CONNECTION_FLAG_CSD); +} + +static inline bool +gb_connection_flow_control_disabled(struct gb_connection *connection) +{ + return connection->flags & GB_CONNECTION_FLAG_NO_FLOWCTRL; +} + +static inline bool gb_connection_is_offloaded(struct gb_connection *connection) +{ + return connection->flags & GB_CONNECTION_FLAG_OFFLOADED; +} + +static inline bool gb_connection_is_control(struct gb_connection *connection) +{ + return connection->flags & GB_CONNECTION_FLAG_CONTROL; +} + +static inline void *gb_connection_get_data(struct gb_connection *connection) +{ + return connection->private; +} + +static inline void gb_connection_set_data(struct gb_connection *connection, + void *data) +{ + connection->private = data; +} + +#endif /* __CONNECTION_H */ diff --git a/include/linux/greybus/control.h b/include/linux/greybus/control.h new file mode 100644 index 000000000000..da11fe871653 --- /dev/null +++ b/include/linux/greybus/control.h @@ -0,0 +1,60 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Greybus CPort control protocol + * + * Copyright 2015 Google Inc. + * Copyright 2015 Linaro Ltd. + */ + +#ifndef __CONTROL_H +#define __CONTROL_H + +#include <linux/types.h> +#include <linux/device.h> + +struct gb_control { + struct device dev; + struct gb_interface *intf; + + struct gb_connection *connection; + + u8 protocol_major; + u8 protocol_minor; + + bool has_bundle_activate; + bool has_bundle_version; + + char *vendor_string; + char *product_string; +}; +#define to_gb_control(d) container_of(d, struct gb_control, dev) + +struct gb_control *gb_control_create(struct gb_interface *intf); +int gb_control_enable(struct gb_control *control); +void gb_control_disable(struct gb_control *control); +int gb_control_suspend(struct gb_control *control); +int gb_control_resume(struct gb_control *control); +int gb_control_add(struct gb_control *control); +void gb_control_del(struct gb_control *control); +struct gb_control *gb_control_get(struct gb_control *control); +void gb_control_put(struct gb_control *control); + +int gb_control_get_bundle_versions(struct gb_control *control); +int gb_control_connected_operation(struct gb_control *control, u16 cport_id); +int gb_control_disconnected_operation(struct gb_control *control, u16 cport_id); +int gb_control_disconnecting_operation(struct gb_control *control, + u16 cport_id); +int gb_control_mode_switch_operation(struct gb_control *control); +void gb_control_mode_switch_prepare(struct gb_control *control); +void gb_control_mode_switch_complete(struct gb_control *control); +int gb_control_get_manifest_size_operation(struct gb_interface *intf); +int gb_control_get_manifest_operation(struct gb_interface *intf, void *manifest, + size_t size); +int gb_control_bundle_suspend(struct gb_control *control, u8 bundle_id); +int gb_control_bundle_resume(struct gb_control *control, u8 bundle_id); +int gb_control_bundle_deactivate(struct gb_control *control, u8 bundle_id); +int gb_control_bundle_activate(struct gb_control *control, u8 bundle_id); +int gb_control_interface_suspend_prepare(struct gb_control *control); +int gb_control_interface_deactivate_prepare(struct gb_control *control); +int gb_control_interface_hibernate_abort(struct gb_control *control); +#endif /* __CONTROL_H */ diff --git a/include/linux/greybus/greybus_id.h b/include/linux/greybus/greybus_id.h new file mode 100644 index 000000000000..f4c8440093e4 --- /dev/null +++ b/include/linux/greybus/greybus_id.h @@ -0,0 +1,27 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* FIXME + * move this to include/linux/mod_devicetable.h when merging + */ + +#ifndef __LINUX_GREYBUS_ID_H +#define __LINUX_GREYBUS_ID_H + +#include <linux/types.h> +#include <linux/mod_devicetable.h> + + +struct greybus_bundle_id { + __u16 match_flags; + __u32 vendor; + __u32 product; + __u8 class; + + kernel_ulong_t driver_info __aligned(sizeof(kernel_ulong_t)); +}; + +/* Used to match the greybus_bundle_id */ +#define GREYBUS_ID_MATCH_VENDOR BIT(0) +#define GREYBUS_ID_MATCH_PRODUCT BIT(1) +#define GREYBUS_ID_MATCH_CLASS BIT(2) + +#endif /* __LINUX_GREYBUS_ID_H */ diff --git a/include/linux/greybus/greybus_manifest.h b/include/linux/greybus/greybus_manifest.h new file mode 100644 index 000000000000..6e62fe478712 --- /dev/null +++ b/include/linux/greybus/greybus_manifest.h @@ -0,0 +1,181 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Greybus manifest definition + * + * See "Greybus Application Protocol" document (version 0.1) for + * details on these values and structures. + * + * Copyright 2014-2015 Google Inc. + * Copyright 2014-2015 Linaro Ltd. + * + * Released under the GPLv2 and BSD licenses. + */ + +#ifndef __GREYBUS_MANIFEST_H +#define __GREYBUS_MANIFEST_H + +#include <linux/bits.h> +#include <linux/types.h> + +enum greybus_descriptor_type { + GREYBUS_TYPE_INVALID = 0x00, + GREYBUS_TYPE_INTERFACE = 0x01, + GREYBUS_TYPE_STRING = 0x02, + GREYBUS_TYPE_BUNDLE = 0x03, + GREYBUS_TYPE_CPORT = 0x04, +}; + +enum greybus_protocol { + GREYBUS_PROTOCOL_CONTROL = 0x00, + /* 0x01 is unused */ + GREYBUS_PROTOCOL_GPIO = 0x02, + GREYBUS_PROTOCOL_I2C = 0x03, + GREYBUS_PROTOCOL_UART = 0x04, + GREYBUS_PROTOCOL_HID = 0x05, + GREYBUS_PROTOCOL_USB = 0x06, + GREYBUS_PROTOCOL_SDIO = 0x07, + GREYBUS_PROTOCOL_POWER_SUPPLY = 0x08, + GREYBUS_PROTOCOL_PWM = 0x09, + /* 0x0a is unused */ + GREYBUS_PROTOCOL_SPI = 0x0b, + GREYBUS_PROTOCOL_DISPLAY = 0x0c, + GREYBUS_PROTOCOL_CAMERA_MGMT = 0x0d, + GREYBUS_PROTOCOL_SENSOR = 0x0e, + GREYBUS_PROTOCOL_LIGHTS = 0x0f, + GREYBUS_PROTOCOL_VIBRATOR = 0x10, + GREYBUS_PROTOCOL_LOOPBACK = 0x11, + GREYBUS_PROTOCOL_AUDIO_MGMT = 0x12, + GREYBUS_PROTOCOL_AUDIO_DATA = 0x13, + GREYBUS_PROTOCOL_SVC = 0x14, + GREYBUS_PROTOCOL_BOOTROM = 0x15, + GREYBUS_PROTOCOL_CAMERA_DATA = 0x16, + GREYBUS_PROTOCOL_FW_DOWNLOAD = 0x17, + GREYBUS_PROTOCOL_FW_MANAGEMENT = 0x18, + GREYBUS_PROTOCOL_AUTHENTICATION = 0x19, + GREYBUS_PROTOCOL_LOG = 0x1a, + /* ... */ + GREYBUS_PROTOCOL_RAW = 0xfe, + GREYBUS_PROTOCOL_VENDOR = 0xff, +}; + +enum greybus_class_type { + GREYBUS_CLASS_CONTROL = 0x00, + /* 0x01 is unused */ + /* 0x02 is unused */ + /* 0x03 is unused */ + /* 0x04 is unused */ + GREYBUS_CLASS_HID = 0x05, + /* 0x06 is unused */ + /* 0x07 is unused */ + GREYBUS_CLASS_POWER_SUPPLY = 0x08, + /* 0x09 is unused */ + GREYBUS_CLASS_BRIDGED_PHY = 0x0a, + /* 0x0b is unused */ + GREYBUS_CLASS_DISPLAY = 0x0c, + GREYBUS_CLASS_CAMERA = 0x0d, + GREYBUS_CLASS_SENSOR = 0x0e, + GREYBUS_CLASS_LIGHTS = 0x0f, + GREYBUS_CLASS_VIBRATOR = 0x10, + GREYBUS_CLASS_LOOPBACK = 0x11, + GREYBUS_CLASS_AUDIO = 0x12, + /* 0x13 is unused */ + /* 0x14 is unused */ + GREYBUS_CLASS_BOOTROM = 0x15, + GREYBUS_CLASS_FW_MANAGEMENT = 0x16, + GREYBUS_CLASS_LOG = 0x17, + /* ... */ + GREYBUS_CLASS_RAW = 0xfe, + GREYBUS_CLASS_VENDOR = 0xff, +}; + +enum { + GREYBUS_INTERFACE_FEATURE_TIMESYNC = BIT(0), +}; + +/* + * The string in a string descriptor is not NUL-terminated. The + * size of the descriptor will be rounded up to a multiple of 4 + * bytes, by padding the string with 0x00 bytes if necessary. + */ +struct greybus_descriptor_string { + __u8 length; + __u8 id; + __u8 string[0]; +} __packed; + +/* + * An interface descriptor describes information about an interface as a whole, + * *not* the functions within it. + */ +struct greybus_descriptor_interface { + __u8 vendor_stringid; + __u8 product_stringid; + __u8 features; + __u8 pad; +} __packed; + +/* + * An bundle descriptor defines an identification number and a class for + * each bundle. + * + * @id: Uniquely identifies a bundle within a interface, its sole purpose is to + * allow CPort descriptors to specify which bundle they are associated with. + * The first bundle will have id 0, second will have 1 and so on. + * + * The largest CPort id associated with an bundle (defined by a + * CPort descriptor in the manifest) is used to determine how to + * encode the device id and module number in UniPro packets + * that use the bundle. + * + * @class: It is used by kernel to know the functionality provided by the + * bundle and will be matched against drivers functinality while probing greybus + * driver. It should contain one of the values defined in + * 'enum greybus_class_type'. + * + */ +struct greybus_descriptor_bundle { + __u8 id; /* interface-relative id (0..) */ + __u8 class; + __u8 pad[2]; +} __packed; + +/* + * A CPort descriptor indicates the id of the bundle within the + * module it's associated with, along with the CPort id used to + * address the CPort. The protocol id defines the format of messages + * exchanged using the CPort. + */ +struct greybus_descriptor_cport { + __le16 id; + __u8 bundle; + __u8 protocol_id; /* enum greybus_protocol */ +} __packed; + +struct greybus_descriptor_header { + __le16 size; + __u8 type; /* enum greybus_descriptor_type */ + __u8 pad; +} __packed; + +struct greybus_descriptor { + struct greybus_descriptor_header header; + union { + struct greybus_descriptor_string string; + struct greybus_descriptor_interface interface; + struct greybus_descriptor_bundle bundle; + struct greybus_descriptor_cport cport; + }; +} __packed; + +struct greybus_manifest_header { + __le16 size; + __u8 version_major; + __u8 version_minor; +} __packed; + +struct greybus_manifest { + struct greybus_manifest_header header; + struct greybus_descriptor descriptors[0]; +} __packed; + +#endif /* __GREYBUS_MANIFEST_H */ diff --git a/include/linux/greybus/greybus_protocols.h b/include/linux/greybus/greybus_protocols.h new file mode 100644 index 000000000000..dfbc6c39a74b --- /dev/null +++ b/include/linux/greybus/greybus_protocols.h @@ -0,0 +1,2178 @@ +/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */ +/* + * Copyright(c) 2014 - 2015 Google Inc. All rights reserved. + * Copyright(c) 2014 - 2015 Linaro Ltd. All rights reserved. + */ + +#ifndef __GREYBUS_PROTOCOLS_H +#define __GREYBUS_PROTOCOLS_H + +#include <linux/types.h> + +/* Fixed IDs for control/svc protocols */ + +/* SVC switch-port device ids */ +#define GB_SVC_DEVICE_ID_SVC 0 +#define GB_SVC_DEVICE_ID_AP 1 +#define GB_SVC_DEVICE_ID_MIN 2 +#define GB_SVC_DEVICE_ID_MAX 31 + +#define GB_SVC_CPORT_ID 0 +#define GB_CONTROL_BUNDLE_ID 0 +#define GB_CONTROL_CPORT_ID 0 + + +/* + * All operation messages (both requests and responses) begin with + * a header that encodes the size of the message (header included). + * This header also contains a unique identifier, that associates a + * response message with its operation. The header contains an + * operation type field, whose interpretation is dependent on what + * type of protocol is used over the connection. The high bit + * (0x80) of the operation type field is used to indicate whether + * the message is a request (clear) or a response (set). + * + * Response messages include an additional result byte, which + * communicates the result of the corresponding request. A zero + * result value means the operation completed successfully. Any + * other value indicates an error; in this case, the payload of the + * response message (if any) is ignored. The result byte must be + * zero in the header for a request message. + * + * The wire format for all numeric fields in the header is little + * endian. Any operation-specific data begins immediately after the + * header. + */ +struct gb_operation_msg_hdr { + __le16 size; /* Size in bytes of header + payload */ + __le16 operation_id; /* Operation unique id */ + __u8 type; /* E.g GB_I2C_TYPE_* or GB_GPIO_TYPE_* */ + __u8 result; /* Result of request (in responses only) */ + __u8 pad[2]; /* must be zero (ignore when read) */ +} __packed; + + +/* Generic request types */ +#define GB_REQUEST_TYPE_CPORT_SHUTDOWN 0x00 +#define GB_REQUEST_TYPE_INVALID 0x7f + +struct gb_cport_shutdown_request { + __u8 phase; +} __packed; + + +/* Control Protocol */ + +/* Greybus control request types */ +#define GB_CONTROL_TYPE_VERSION 0x01 +#define GB_CONTROL_TYPE_PROBE_AP 0x02 +#define GB_CONTROL_TYPE_GET_MANIFEST_SIZE 0x03 +#define GB_CONTROL_TYPE_GET_MANIFEST 0x04 +#define GB_CONTROL_TYPE_CONNECTED 0x05 +#define GB_CONTROL_TYPE_DISCONNECTED 0x06 +#define GB_CONTROL_TYPE_TIMESYNC_ENABLE 0x07 +#define GB_CONTROL_TYPE_TIMESYNC_DISABLE 0x08 +#define GB_CONTROL_TYPE_TIMESYNC_AUTHORITATIVE 0x09 +/* Unused 0x0a */ +#define GB_CONTROL_TYPE_BUNDLE_VERSION 0x0b +#define GB_CONTROL_TYPE_DISCONNECTING 0x0c +#define GB_CONTROL_TYPE_TIMESYNC_GET_LAST_EVENT 0x0d +#define GB_CONTROL_TYPE_MODE_SWITCH 0x0e +#define GB_CONTROL_TYPE_BUNDLE_SUSPEND 0x0f +#define GB_CONTROL_TYPE_BUNDLE_RESUME 0x10 +#define GB_CONTROL_TYPE_BUNDLE_DEACTIVATE 0x11 +#define GB_CONTROL_TYPE_BUNDLE_ACTIVATE 0x12 +#define GB_CONTROL_TYPE_INTF_SUSPEND_PREPARE 0x13 +#define GB_CONTROL_TYPE_INTF_DEACTIVATE_PREPARE 0x14 +#define GB_CONTROL_TYPE_INTF_HIBERNATE_ABORT 0x15 + +struct gb_control_version_request { + __u8 major; + __u8 minor; +} __packed; + +struct gb_control_version_response { + __u8 major; + __u8 minor; +} __packed; + +struct gb_control_bundle_version_request { + __u8 bundle_id; +} __packed; + +struct gb_control_bundle_version_response { + __u8 major; + __u8 minor; +} __packed; + +/* Control protocol manifest get size request has no payload*/ +struct gb_control_get_manifest_size_response { + __le16 size; +} __packed; + +/* Control protocol manifest get request has no payload */ +struct gb_control_get_manifest_response { + __u8 data[0]; +} __packed; + +/* Control protocol [dis]connected request */ +struct gb_control_connected_request { + __le16 cport_id; +} __packed; + +struct gb_control_disconnecting_request { + __le16 cport_id; +} __packed; +/* disconnecting response has no payload */ + +struct gb_control_disconnected_request { + __le16 cport_id; +} __packed; +/* Control protocol [dis]connected response has no payload */ + +/* + * All Bundle power management operations use the same request and response + * layout and status codes. + */ + +#define GB_CONTROL_BUNDLE_PM_OK 0x00 +#define GB_CONTROL_BUNDLE_PM_INVAL 0x01 +#define GB_CONTROL_BUNDLE_PM_BUSY 0x02 +#define GB_CONTROL_BUNDLE_PM_FAIL 0x03 +#define GB_CONTROL_BUNDLE_PM_NA 0x04 + +struct gb_control_bundle_pm_request { + __u8 bundle_id; +} __packed; + +struct gb_control_bundle_pm_response { + __u8 status; +} __packed; + +/* + * Interface Suspend Prepare and Deactivate Prepare operations use the same + * response layout and error codes. Define a single response structure and reuse + * it. Both operations have no payload. + */ + +#define GB_CONTROL_INTF_PM_OK 0x00 +#define GB_CONTROL_INTF_PM_BUSY 0x01 +#define GB_CONTROL_INTF_PM_NA 0x02 + +struct gb_control_intf_pm_response { + __u8 status; +} __packed; + +/* APBridge protocol */ + +/* request APB1 log */ +#define GB_APB_REQUEST_LOG 0x02 + +/* request to map a cport to bulk in and bulk out endpoints */ +#define GB_APB_REQUEST_EP_MAPPING 0x03 + +/* request to get the number of cports available */ +#define GB_APB_REQUEST_CPORT_COUNT 0x04 + +/* request to reset a cport state */ +#define GB_APB_REQUEST_RESET_CPORT 0x05 + +/* request to time the latency of messages on a given cport */ +#define GB_APB_REQUEST_LATENCY_TAG_EN 0x06 +#define GB_APB_REQUEST_LATENCY_TAG_DIS 0x07 + +/* request to control the CSI transmitter */ +#define GB_APB_REQUEST_CSI_TX_CONTROL 0x08 + +/* request to control audio streaming */ +#define GB_APB_REQUEST_AUDIO_CONTROL 0x09 + +/* TimeSync requests */ +#define GB_APB_REQUEST_TIMESYNC_ENABLE 0x0d +#define GB_APB_REQUEST_TIMESYNC_DISABLE 0x0e +#define GB_APB_REQUEST_TIMESYNC_AUTHORITATIVE 0x0f +#define GB_APB_REQUEST_TIMESYNC_GET_LAST_EVENT 0x10 + +/* requests to set Greybus CPort flags */ +#define GB_APB_REQUEST_CPORT_FLAGS 0x11 + +/* ARPC request */ +#define GB_APB_REQUEST_ARPC_RUN 0x12 + +struct gb_apb_request_cport_flags { + __le32 flags; +#define GB_APB_CPORT_FLAG_CONTROL 0x01 +#define GB_APB_CPORT_FLAG_HIGH_PRIO 0x02 +} __packed; + + +/* Firmware Download Protocol */ + +/* Request Types */ +#define GB_FW_DOWNLOAD_TYPE_FIND_FIRMWARE 0x01 +#define GB_FW_DOWNLOAD_TYPE_FETCH_FIRMWARE 0x02 +#define GB_FW_DOWNLOAD_TYPE_RELEASE_FIRMWARE 0x03 + +#define GB_FIRMWARE_TAG_MAX_SIZE 10 + +/* firmware download find firmware request/response */ +struct gb_fw_download_find_firmware_request { + __u8 firmware_tag[GB_FIRMWARE_TAG_MAX_SIZE]; +} __packed; + +struct gb_fw_download_find_firmware_response { + __u8 firmware_id; + __le32 size; +} __packed; + +/* firmware download fetch firmware request/response */ +struct gb_fw_download_fetch_firmware_request { + __u8 firmware_id; + __le32 offset; + __le32 size; +} __packed; + +struct gb_fw_download_fetch_firmware_response { + __u8 data[0]; +} __packed; + +/* firmware download release firmware request */ +struct gb_fw_download_release_firmware_request { + __u8 firmware_id; +} __packed; +/* firmware download release firmware response has no payload */ + + +/* Firmware Management Protocol */ + +/* Request Types */ +#define GB_FW_MGMT_TYPE_INTERFACE_FW_VERSION 0x01 +#define GB_FW_MGMT_TYPE_LOAD_AND_VALIDATE_FW 0x02 +#define GB_FW_MGMT_TYPE_LOADED_FW 0x03 +#define GB_FW_MGMT_TYPE_BACKEND_FW_VERSION 0x04 +#define GB_FW_MGMT_TYPE_BACKEND_FW_UPDATE 0x05 +#define GB_FW_MGMT_TYPE_BACKEND_FW_UPDATED 0x06 + +#define GB_FW_LOAD_METHOD_UNIPRO 0x01 +#define GB_FW_LOAD_METHOD_INTERNAL 0x02 + +#define GB_FW_LOAD_STATUS_FAILED 0x00 +#define GB_FW_LOAD_STATUS_UNVALIDATED 0x01 +#define GB_FW_LOAD_STATUS_VALIDATED 0x02 +#define GB_FW_LOAD_STATUS_VALIDATION_FAILED 0x03 + +#define GB_FW_BACKEND_FW_STATUS_SUCCESS 0x01 +#define GB_FW_BACKEND_FW_STATUS_FAIL_FIND 0x02 +#define GB_FW_BACKEND_FW_STATUS_FAIL_FETCH 0x03 +#define GB_FW_BACKEND_FW_STATUS_FAIL_WRITE 0x04 +#define GB_FW_BACKEND_FW_STATUS_INT 0x05 +#define GB_FW_BACKEND_FW_STATUS_RETRY 0x06 +#define GB_FW_BACKEND_FW_STATUS_NOT_SUPPORTED 0x07 + +#define GB_FW_BACKEND_VERSION_STATUS_SUCCESS 0x01 +#define GB_FW_BACKEND_VERSION_STATUS_NOT_AVAILABLE 0x02 +#define GB_FW_BACKEND_VERSION_STATUS_NOT_SUPPORTED 0x03 +#define GB_FW_BACKEND_VERSION_STATUS_RETRY 0x04 +#define GB_FW_BACKEND_VERSION_STATUS_FAIL_INT 0x05 + +/* firmware management interface firmware version request has no payload */ +struct gb_fw_mgmt_interface_fw_version_response { + __u8 firmware_tag[GB_FIRMWARE_TAG_MAX_SIZE]; + __le16 major; + __le16 minor; +} __packed; + +/* firmware management load and validate firmware request/response */ +struct gb_fw_mgmt_load_and_validate_fw_request { + __u8 request_id; + __u8 load_method; + __u8 firmware_tag[GB_FIRMWARE_TAG_MAX_SIZE]; +} __packed; +/* firmware management load and validate firmware response has no payload*/ + +/* firmware management loaded firmware request */ +struct gb_fw_mgmt_loaded_fw_request { + __u8 request_id; + __u8 status; + __le16 major; + __le16 minor; +} __packed; +/* firmware management loaded firmware response has no payload */ + +/* firmware management backend firmware version request/response */ +struct gb_fw_mgmt_backend_fw_version_request { + __u8 firmware_tag[GB_FIRMWARE_TAG_MAX_SIZE]; +} __packed; + +struct gb_fw_mgmt_backend_fw_version_response { + __le16 major; + __le16 minor; + __u8 status; +} __packed; + +/* firmware management backend firmware update request */ +struct gb_fw_mgmt_backend_fw_update_request { + __u8 request_id; + __u8 firmware_tag[GB_FIRMWARE_TAG_MAX_SIZE]; +} __packed; +/* firmware management backend firmware update response has no payload */ + +/* firmware management backend firmware updated request */ +struct gb_fw_mgmt_backend_fw_updated_request { + __u8 request_id; + __u8 status; +} __packed; +/* firmware management backend firmware updated response has no payload */ + + +/* Component Authentication Protocol (CAP) */ + +/* Request Types */ +#define GB_CAP_TYPE_GET_ENDPOINT_UID 0x01 +#define GB_CAP_TYPE_GET_IMS_CERTIFICATE 0x02 +#define GB_CAP_TYPE_AUTHENTICATE 0x03 + +/* CAP get endpoint uid request has no payload */ +struct gb_cap_get_endpoint_uid_response { + __u8 uid[8]; +} __packed; + +/* CAP get endpoint ims certificate request/response */ +struct gb_cap_get_ims_certificate_request { + __le32 certificate_class; + __le32 certificate_id; +} __packed; + +struct gb_cap_get_ims_certificate_response { + __u8 result_code; + __u8 certificate[0]; +} __packed; + +/* CAP authenticate request/response */ +struct gb_cap_authenticate_request { + __le32 auth_type; + __u8 uid[8]; + __u8 challenge[32]; +} __packed; + +struct gb_cap_authenticate_response { + __u8 result_code; + __u8 response[64]; + __u8 signature[0]; +} __packed; + + +/* Bootrom Protocol */ + +/* Version of the Greybus bootrom protocol we support */ +#define GB_BOOTROM_VERSION_MAJOR 0x00 +#define GB_BOOTROM_VERSION_MINOR 0x01 + +/* Greybus bootrom request types */ +#define GB_BOOTROM_TYPE_VERSION 0x01 +#define GB_BOOTROM_TYPE_FIRMWARE_SIZE 0x02 +#define GB_BOOTROM_TYPE_GET_FIRMWARE 0x03 +#define GB_BOOTROM_TYPE_READY_TO_BOOT 0x04 +#define GB_BOOTROM_TYPE_AP_READY 0x05 /* Request with no-payload */ +#define GB_BOOTROM_TYPE_GET_VID_PID 0x06 /* Request with no-payload */ + +/* Greybus bootrom boot stages */ +#define GB_BOOTROM_BOOT_STAGE_ONE 0x01 /* Reserved for the boot ROM */ +#define GB_BOOTROM_BOOT_STAGE_TWO 0x02 /* Bootrom package to be loaded by the boot ROM */ +#define GB_BOOTROM_BOOT_STAGE_THREE 0x03 /* Module personality package loaded by Stage 2 firmware */ + +/* Greybus bootrom ready to boot status */ +#define GB_BOOTROM_BOOT_STATUS_INVALID 0x00 /* Firmware blob could not be validated */ +#define GB_BOOTROM_BOOT_STATUS_INSECURE 0x01 /* Firmware blob is valid but insecure */ +#define GB_BOOTROM_BOOT_STATUS_SECURE 0x02 /* Firmware blob is valid and secure */ + +/* Max bootrom data fetch size in bytes */ +#define GB_BOOTROM_FETCH_MAX 2000 + +struct gb_bootrom_version_request { + __u8 major; + __u8 minor; +} __packed; + +struct gb_bootrom_version_response { + __u8 major; + __u8 minor; +} __packed; + +/* Bootrom protocol firmware size request/response */ +struct gb_bootrom_firmware_size_request { + __u8 stage; +} __packed; + +struct gb_bootrom_firmware_size_response { + __le32 size; +} __packed; + +/* Bootrom protocol get firmware request/response */ +struct gb_bootrom_get_firmware_request { + __le32 offset; + __le32 size; +} __packed; + +struct gb_bootrom_get_firmware_response { + __u8 data[0]; +} __packed; + +/* Bootrom protocol Ready to boot request */ +struct gb_bootrom_ready_to_boot_request { + __u8 status; +} __packed; +/* Bootrom protocol Ready to boot response has no payload */ + +/* Bootrom protocol get VID/PID request has no payload */ +struct gb_bootrom_get_vid_pid_response { + __le32 vendor_id; + __le32 product_id; +} __packed; + + +/* Power Supply */ + +/* Greybus power supply request types */ +#define GB_POWER_SUPPLY_TYPE_GET_SUPPLIES 0x02 +#define GB_POWER_SUPPLY_TYPE_GET_DESCRIPTION 0x03 +#define GB_POWER_SUPPLY_TYPE_GET_PROP_DESCRIPTORS 0x04 +#define GB_POWER_SUPPLY_TYPE_GET_PROPERTY 0x05 +#define GB_POWER_SUPPLY_TYPE_SET_PROPERTY 0x06 +#define GB_POWER_SUPPLY_TYPE_EVENT 0x07 + +/* Greybus power supply battery technologies types */ +#define GB_POWER_SUPPLY_TECH_UNKNOWN 0x0000 +#define GB_POWER_SUPPLY_TECH_NiMH 0x0001 +#define GB_POWER_SUPPLY_TECH_LION 0x0002 +#define GB_POWER_SUPPLY_TECH_LIPO 0x0003 +#define GB_POWER_SUPPLY_TECH_LiFe 0x0004 +#define GB_POWER_SUPPLY_TECH_NiCd 0x0005 +#define GB_POWER_SUPPLY_TECH_LiMn 0x0006 + +/* Greybus power supply types */ +#define GB_POWER_SUPPLY_UNKNOWN_TYPE 0x0000 +#define GB_POWER_SUPPLY_BATTERY_TYPE 0x0001 +#define GB_POWER_SUPPLY_UPS_TYPE 0x0002 +#define GB_POWER_SUPPLY_MAINS_TYPE 0x0003 +#define GB_POWER_SUPPLY_USB_TYPE 0x0004 +#define GB_POWER_SUPPLY_USB_DCP_TYPE 0x0005 +#define GB_POWER_SUPPLY_USB_CDP_TYPE 0x0006 +#define GB_POWER_SUPPLY_USB_ACA_TYPE 0x0007 + +/* Greybus power supply health values */ +#define GB_POWER_SUPPLY_HEALTH_UNKNOWN 0x0000 +#define GB_POWER_SUPPLY_HEALTH_GOOD 0x0001 +#define GB_POWER_SUPPLY_HEALTH_OVERHEAT 0x0002 +#define GB_POWER_SUPPLY_HEALTH_DEAD 0x0003 +#define GB_POWER_SUPPLY_HEALTH_OVERVOLTAGE 0x0004 +#define GB_POWER_SUPPLY_HEALTH_UNSPEC_FAILURE 0x0005 +#define GB_POWER_SUPPLY_HEALTH_COLD 0x0006 +#define GB_POWER_SUPPLY_HEALTH_WATCHDOG_TIMER_EXPIRE 0x0007 +#define GB_POWER_SUPPLY_HEALTH_SAFETY_TIMER_EXPIRE 0x0008 + +/* Greybus power supply status values */ +#define GB_POWER_SUPPLY_STATUS_UNKNOWN 0x0000 +#define GB_POWER_SUPPLY_STATUS_CHARGING 0x0001 +#define GB_POWER_SUPPLY_STATUS_DISCHARGING 0x0002 +#define GB_POWER_SUPPLY_STATUS_NOT_CHARGING 0x0003 +#define GB_POWER_SUPPLY_STATUS_FULL 0x0004 + +/* Greybus power supply capacity level values */ +#define GB_POWER_SUPPLY_CAPACITY_LEVEL_UNKNOWN 0x0000 +#define GB_POWER_SUPPLY_CAPACITY_LEVEL_CRITICAL 0x0001 +#define GB_POWER_SUPPLY_CAPACITY_LEVEL_LOW 0x0002 +#define GB_POWER_SUPPLY_CAPACITY_LEVEL_NORMAL 0x0003 +#define GB_POWER_SUPPLY_CAPACITY_LEVEL_HIGH 0x0004 +#define GB_POWER_SUPPLY_CAPACITY_LEVEL_FULL 0x0005 + +/* Greybus power supply scope values */ +#define GB_POWER_SUPPLY_SCOPE_UNKNOWN 0x0000 +#define GB_POWER_SUPPLY_SCOPE_SYSTEM 0x0001 +#define GB_POWER_SUPPLY_SCOPE_DEVICE 0x0002 + +struct gb_power_supply_get_supplies_response { + __u8 supplies_count; +} __packed; + +struct gb_power_supply_get_description_request { + __u8 psy_id; +} __packed; + +struct gb_power_supply_get_description_response { + __u8 manufacturer[32]; + __u8 model[32]; + __u8 serial_number[32]; + __le16 type; + __u8 properties_count; +} __packed; + +struct gb_power_supply_props_desc { + __u8 property; +#define GB_POWER_SUPPLY_PROP_STATUS 0x00 +#define GB_POWER_SUPPLY_PROP_CHARGE_TYPE 0x01 +#define GB_POWER_SUPPLY_PROP_HEALTH 0x02 +#define GB_POWER_SUPPLY_PROP_PRESENT 0x03 +#define GB_POWER_SUPPLY_PROP_ONLINE 0x04 +#define GB_POWER_SUPPLY_PROP_AUTHENTIC 0x05 +#define GB_POWER_SUPPLY_PROP_TECHNOLOGY 0x06 +#define GB_POWER_SUPPLY_PROP_CYCLE_COUNT 0x07 +#define GB_POWER_SUPPLY_PROP_VOLTAGE_MAX 0x08 +#define GB_POWER_SUPPLY_PROP_VOLTAGE_MIN 0x09 +#define GB_POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN 0x0A +#define GB_POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN 0x0B +#define GB_POWER_SUPPLY_PROP_VOLTAGE_NOW 0x0C +#define GB_POWER_SUPPLY_PROP_VOLTAGE_AVG 0x0D +#define GB_POWER_SUPPLY_PROP_VOLTAGE_OCV 0x0E +#define GB_POWER_SUPPLY_PROP_VOLTAGE_BOOT 0x0F +#define GB_POWER_SUPPLY_PROP_CURRENT_MAX 0x10 +#define GB_POWER_SUPPLY_PROP_CURRENT_NOW 0x11 +#define GB_POWER_SUPPLY_PROP_CURRENT_AVG 0x12 +#define GB_POWER_SUPPLY_PROP_CURRENT_BOOT 0x13 +#define GB_POWER_SUPPLY_PROP_POWER_NOW 0x14 +#define GB_POWER_SUPPLY_PROP_POWER_AVG 0x15 +#define GB_POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN 0x16 +#define GB_POWER_SUPPLY_PROP_CHARGE_EMPTY_DESIGN 0x17 +#define GB_POWER_SUPPLY_PROP_CHARGE_FULL 0x18 +#define GB_POWER_SUPPLY_PROP_CHARGE_EMPTY 0x19 +#define GB_POWER_SUPPLY_PROP_CHARGE_NOW 0x1A +#define GB_POWER_SUPPLY_PROP_CHARGE_AVG 0x1B +#define GB_POWER_SUPPLY_PROP_CHARGE_COUNTER 0x1C +#define GB_POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT 0x1D +#define GB_POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX 0x1E +#define GB_POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE 0x1F +#define GB_POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE_MAX 0x20 +#define GB_POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT 0x21 +#define GB_POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT_MAX 0x22 +#define GB_POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT 0x23 +#define GB_POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN 0x24 +#define GB_POWER_SUPPLY_PROP_ENERGY_EMPTY_DESIGN 0x25 +#define GB_POWER_SUPPLY_PROP_ENERGY_FULL 0x26 +#define GB_POWER_SUPPLY_PROP_ENERGY_EMPTY 0x27 +#define GB_POWER_SUPPLY_PROP_ENERGY_NOW 0x28 +#define GB_POWER_SUPPLY_PROP_ENERGY_AVG 0x29 +#define GB_POWER_SUPPLY_PROP_CAPACITY 0x2A +#define GB_POWER_SUPPLY_PROP_CAPACITY_ALERT_MIN 0x2B +#define GB_POWER_SUPPLY_PROP_CAPACITY_ALERT_MAX 0x2C +#define GB_POWER_SUPPLY_PROP_CAPACITY_LEVEL 0x2D +#define GB_POWER_SUPPLY_PROP_TEMP 0x2E +#define GB_POWER_SUPPLY_PROP_TEMP_MAX 0x2F +#define GB_POWER_SUPPLY_PROP_TEMP_MIN 0x30 +#define GB_POWER_SUPPLY_PROP_TEMP_ALERT_MIN 0x31 +#define GB_POWER_SUPPLY_PROP_TEMP_ALERT_MAX 0x32 +#define GB_POWER_SUPPLY_PROP_TEMP_AMBIENT 0x33 +#define GB_POWER_SUPPLY_PROP_TEMP_AMBIENT_ALERT_MIN 0x34 +#define GB_POWER_SUPPLY_PROP_TEMP_AMBIENT_ALERT_MAX 0x35 +#define GB_POWER_SUPPLY_PROP_TIME_TO_EMPTY_NOW 0x36 +#define GB_POWER_SUPPLY_PROP_TIME_TO_EMPTY_AVG 0x37 +#define GB_POWER_SUPPLY_PROP_TIME_TO_FULL_NOW 0x38 +#define GB_POWER_SUPPLY_PROP_TIME_TO_FULL_AVG 0x39 +#define GB_POWER_SUPPLY_PROP_TYPE 0x3A +#define GB_POWER_SUPPLY_PROP_SCOPE 0x3B +#define GB_POWER_SUPPLY_PROP_CHARGE_TERM_CURRENT 0x3C +#define GB_POWER_SUPPLY_PROP_CALIBRATE 0x3D + __u8 is_writeable; +} __packed; + +struct gb_power_supply_get_property_descriptors_request { + __u8 psy_id; +} __packed; + +struct gb_power_supply_get_property_descriptors_response { + __u8 properties_count; + struct gb_power_supply_props_desc props[]; +} __packed; + +struct gb_power_supply_get_property_request { + __u8 psy_id; + __u8 property; +} __packed; + +struct gb_power_supply_get_property_response { + __le32 prop_val; +}; + +struct gb_power_supply_set_property_request { + __u8 psy_id; + __u8 property; + __le32 prop_val; +} __packed; + +struct gb_power_supply_event_request { + __u8 psy_id; + __u8 event; +#define GB_POWER_SUPPLY_UPDATE 0x01 +} __packed; + + +/* HID */ + +/* Greybus HID operation types */ +#define GB_HID_TYPE_GET_DESC 0x02 +#define GB_HID_TYPE_GET_REPORT_DESC 0x03 +#define GB_HID_TYPE_PWR_ON 0x04 +#define GB_HID_TYPE_PWR_OFF 0x05 +#define GB_HID_TYPE_GET_REPORT 0x06 +#define GB_HID_TYPE_SET_REPORT 0x07 +#define GB_HID_TYPE_IRQ_EVENT 0x08 + +/* Report type */ +#define GB_HID_INPUT_REPORT 0 +#define GB_HID_OUTPUT_REPORT 1 +#define GB_HID_FEATURE_REPORT 2 + +/* Different request/response structures */ +/* HID get descriptor response */ +struct gb_hid_desc_response { + __u8 bLength; + __le16 wReportDescLength; + __le16 bcdHID; + __le16 wProductID; + __le16 wVendorID; + __u8 bCountryCode; +} __packed; + +/* HID get report request/response */ +struct gb_hid_get_report_request { + __u8 report_type; + __u8 report_id; +} __packed; + +/* HID set report request */ +struct gb_hid_set_report_request { + __u8 report_type; + __u8 report_id; + __u8 report[0]; +} __packed; + +/* HID input report request, via interrupt pipe */ +struct gb_hid_input_report_request { + __u8 report[0]; +} __packed; + + +/* I2C */ + +/* Greybus i2c request types */ +#define GB_I2C_TYPE_FUNCTIONALITY 0x02 +#define GB_I2C_TYPE_TRANSFER 0x05 + +/* functionality request has no payload */ +struct gb_i2c_functionality_response { + __le32 functionality; +} __packed; + +/* + * Outgoing data immediately follows the op count and ops array. + * The data for each write (master -> slave) op in the array is sent + * in order, with no (e.g. pad) bytes separating them. + * + * Short reads cause the entire transfer request to fail So response + * payload consists only of bytes read, and the number of bytes is + * exactly what was specified in the corresponding op. Like + * outgoing data, the incoming data is in order and contiguous. + */ +struct gb_i2c_transfer_op { + __le16 addr; + __le16 flags; + __le16 size; +} __packed; + +struct gb_i2c_transfer_request { + __le16 op_count; + struct gb_i2c_transfer_op ops[0]; /* op_count of these */ +} __packed; +struct gb_i2c_transfer_response { + __u8 data[0]; /* inbound data */ +} __packed; + + +/* GPIO */ + +/* Greybus GPIO request types */ +#define GB_GPIO_TYPE_LINE_COUNT 0x02 +#define GB_GPIO_TYPE_ACTIVATE 0x03 +#define GB_GPIO_TYPE_DEACTIVATE 0x04 +#define GB_GPIO_TYPE_GET_DIRECTION 0x05 +#define GB_GPIO_TYPE_DIRECTION_IN 0x06 +#define GB_GPIO_TYPE_DIRECTION_OUT 0x07 +#define GB_GPIO_TYPE_GET_VALUE 0x08 +#define GB_GPIO_TYPE_SET_VALUE 0x09 +#define GB_GPIO_TYPE_SET_DEBOUNCE 0x0a +#define GB_GPIO_TYPE_IRQ_TYPE 0x0b +#define GB_GPIO_TYPE_IRQ_MASK 0x0c +#define GB_GPIO_TYPE_IRQ_UNMASK 0x0d +#define GB_GPIO_TYPE_IRQ_EVENT 0x0e + +#define GB_GPIO_IRQ_TYPE_NONE 0x00 +#define GB_GPIO_IRQ_TYPE_EDGE_RISING 0x01 +#define GB_GPIO_IRQ_TYPE_EDGE_FALLING 0x02 +#define GB_GPIO_IRQ_TYPE_EDGE_BOTH 0x03 +#define GB_GPIO_IRQ_TYPE_LEVEL_HIGH 0x04 +#define GB_GPIO_IRQ_TYPE_LEVEL_LOW 0x08 + +/* line count request has no payload */ +struct gb_gpio_line_count_response { + __u8 count; +} __packed; + +struct gb_gpio_activate_request { + __u8 which; +} __packed; +/* activate response has no payload */ + +struct gb_gpio_deactivate_request { + __u8 which; +} __packed; +/* deactivate response has no payload */ + +struct gb_gpio_get_direction_request { + __u8 which; +} __packed; +struct gb_gpio_get_direction_response { + __u8 direction; +} __packed; + +struct gb_gpio_direction_in_request { + __u8 which; +} __packed; +/* direction in response has no payload */ + +struct gb_gpio_direction_out_request { + __u8 which; + __u8 value; +} __packed; +/* direction out response has no payload */ + +struct gb_gpio_get_value_request { + __u8 which; +} __packed; +struct gb_gpio_get_value_response { + __u8 value; +} __packed; + +struct gb_gpio_set_value_request { + __u8 which; + __u8 value; +} __packed; +/* set value response has no payload */ + +struct gb_gpio_set_debounce_request { + __u8 which; + __le16 usec; +} __packed; +/* debounce response has no payload */ + +struct gb_gpio_irq_type_request { + __u8 which; + __u8 type; +} __packed; +/* irq type response has no payload */ + +struct gb_gpio_irq_mask_request { + __u8 which; +} __packed; +/* irq mask response has no payload */ + +struct gb_gpio_irq_unmask_request { + __u8 which; +} __packed; +/* irq unmask response has no payload */ + +/* irq event requests originate on another module and are handled on the AP */ +struct gb_gpio_irq_event_request { + __u8 which; +} __packed; +/* irq event has no response */ + + +/* PWM */ + +/* Greybus PWM operation types */ +#define GB_PWM_TYPE_PWM_COUNT 0x02 +#define GB_PWM_TYPE_ACTIVATE 0x03 +#define GB_PWM_TYPE_DEACTIVATE 0x04 +#define GB_PWM_TYPE_CONFIG 0x05 +#define GB_PWM_TYPE_POLARITY 0x06 +#define GB_PWM_TYPE_ENABLE 0x07 +#define GB_PWM_TYPE_DISABLE 0x08 + +/* pwm count request has no payload */ +struct gb_pwm_count_response { + __u8 count; +} __packed; + +struct gb_pwm_activate_request { + __u8 which; +} __packed; + +struct gb_pwm_deactivate_request { + __u8 which; +} __packed; + +struct gb_pwm_config_request { + __u8 which; + __le32 duty; + __le32 period; +} __packed; + +struct gb_pwm_polarity_request { + __u8 which; + __u8 polarity; +} __packed; + +struct gb_pwm_enable_request { + __u8 which; +} __packed; + +struct gb_pwm_disable_request { + __u8 which; +} __packed; + +/* SPI */ + +/* Should match up with modes in linux/spi/spi.h */ +#define GB_SPI_MODE_CPHA 0x01 /* clock phase */ +#define GB_SPI_MODE_CPOL 0x02 /* clock polarity */ +#define GB_SPI_MODE_MODE_0 (0 | 0) /* (original MicroWire) */ +#define GB_SPI_MODE_MODE_1 (0 | GB_SPI_MODE_CPHA) +#define GB_SPI_MODE_MODE_2 (GB_SPI_MODE_CPOL | 0) +#define GB_SPI_MODE_MODE_3 (GB_SPI_MODE_CPOL | GB_SPI_MODE_CPHA) +#define GB_SPI_MODE_CS_HIGH 0x04 /* chipselect active high? */ +#define GB_SPI_MODE_LSB_FIRST 0x08 /* per-word bits-on-wire */ +#define GB_SPI_MODE_3WIRE 0x10 /* SI/SO signals shared */ +#define GB_SPI_MODE_LOOP 0x20 /* loopback mode */ +#define GB_SPI_MODE_NO_CS 0x40 /* 1 dev/bus, no chipselect */ +#define GB_SPI_MODE_READY 0x80 /* slave pulls low to pause */ + +/* Should match up with flags in linux/spi/spi.h */ +#define GB_SPI_FLAG_HALF_DUPLEX BIT(0) /* can't do full duplex */ +#define GB_SPI_FLAG_NO_RX BIT(1) /* can't do buffer read */ +#define GB_SPI_FLAG_NO_TX BIT(2) /* can't do buffer write */ + +/* Greybus spi operation types */ +#define GB_SPI_TYPE_MASTER_CONFIG 0x02 +#define GB_SPI_TYPE_DEVICE_CONFIG 0x03 +#define GB_SPI_TYPE_TRANSFER 0x04 + +/* mode request has no payload */ +struct gb_spi_master_config_response { + __le32 bits_per_word_mask; + __le32 min_speed_hz; + __le32 max_speed_hz; + __le16 mode; + __le16 flags; + __u8 num_chipselect; +} __packed; + +struct gb_spi_device_config_request { + __u8 chip_select; +} __packed; + +struct gb_spi_device_config_response { + __le16 mode; + __u8 bits_per_word; + __le32 max_speed_hz; + __u8 device_type; +#define GB_SPI_SPI_DEV 0x00 +#define GB_SPI_SPI_NOR 0x01 +#define GB_SPI_SPI_MODALIAS 0x02 + __u8 name[32]; +} __packed; + +/** + * struct gb_spi_transfer - a read/write buffer pair + * @speed_hz: Select a speed other than the device default for this transfer. If + * 0 the default (from @spi_device) is used. + * @len: size of rx and tx buffers (in bytes) + * @delay_usecs: microseconds to delay after this transfer before (optionally) + * changing the chipselect status, then starting the next transfer or + * completing this spi_message. + * @cs_change: affects chipselect after this transfer completes + * @bits_per_word: select a bits_per_word other than the device default for this + * transfer. If 0 the default (from @spi_device) is used. + */ +struct gb_spi_transfer { + __le32 speed_hz; + __le32 len; + __le16 delay_usecs; + __u8 cs_change; + __u8 bits_per_word; + __u8 xfer_flags; +#define GB_SPI_XFER_READ 0x01 +#define GB_SPI_XFER_WRITE 0x02 +#define GB_SPI_XFER_INPROGRESS 0x04 +} __packed; + +struct gb_spi_transfer_request { + __u8 chip_select; /* of the spi device */ + __u8 mode; /* of the spi device */ + __le16 count; + struct gb_spi_transfer transfers[0]; /* count of these */ +} __packed; + +struct gb_spi_transfer_response { + __u8 data[0]; /* inbound data */ +} __packed; + +/* Version of the Greybus SVC protocol we support */ +#define GB_SVC_VERSION_MAJOR 0x00 +#define GB_SVC_VERSION_MINOR 0x01 + +/* Greybus SVC request types */ +#define GB_SVC_TYPE_PROTOCOL_VERSION 0x01 +#define GB_SVC_TYPE_SVC_HELLO 0x02 +#define GB_SVC_TYPE_INTF_DEVICE_ID 0x03 +#define GB_SVC_TYPE_INTF_RESET 0x06 +#define GB_SVC_TYPE_CONN_CREATE 0x07 +#define GB_SVC_TYPE_CONN_DESTROY 0x08 +#define GB_SVC_TYPE_DME_PEER_GET 0x09 +#define GB_SVC_TYPE_DME_PEER_SET 0x0a +#define GB_SVC_TYPE_ROUTE_CREATE 0x0b +#define GB_SVC_TYPE_ROUTE_DESTROY 0x0c +#define GB_SVC_TYPE_TIMESYNC_ENABLE 0x0d +#define GB_SVC_TYPE_TIMESYNC_DISABLE 0x0e +#define GB_SVC_TYPE_TIMESYNC_AUTHORITATIVE 0x0f +#define GB_SVC_TYPE_INTF_SET_PWRM 0x10 +#define GB_SVC_TYPE_INTF_EJECT 0x11 +#define GB_SVC_TYPE_PING 0x13 +#define GB_SVC_TYPE_PWRMON_RAIL_COUNT_GET 0x14 +#define GB_SVC_TYPE_PWRMON_RAIL_NAMES_GET 0x15 +#define GB_SVC_TYPE_PWRMON_SAMPLE_GET 0x16 +#define GB_SVC_TYPE_PWRMON_INTF_SAMPLE_GET 0x17 +#define GB_SVC_TYPE_TIMESYNC_WAKE_PINS_ACQUIRE 0x18 +#define GB_SVC_TYPE_TIMESYNC_WAKE_PINS_RELEASE 0x19 +#define GB_SVC_TYPE_TIMESYNC_PING 0x1a +#define GB_SVC_TYPE_MODULE_INSERTED 0x1f +#define GB_SVC_TYPE_MODULE_REMOVED 0x20 +#define GB_SVC_TYPE_INTF_VSYS_ENABLE 0x21 +#define GB_SVC_TYPE_INTF_VSYS_DISABLE 0x22 +#define GB_SVC_TYPE_INTF_REFCLK_ENABLE 0x23 +#define GB_SVC_TYPE_INTF_REFCLK_DISABLE 0x24 +#define GB_SVC_TYPE_INTF_UNIPRO_ENABLE 0x25 +#define GB_SVC_TYPE_INTF_UNIPRO_DISABLE 0x26 +#define GB_SVC_TYPE_INTF_ACTIVATE 0x27 +#define GB_SVC_TYPE_INTF_RESUME 0x28 +#define GB_SVC_TYPE_INTF_MAILBOX_EVENT 0x29 +#define GB_SVC_TYPE_INTF_OOPS 0x2a + +/* Greybus SVC protocol status values */ +#define GB_SVC_OP_SUCCESS 0x00 +#define GB_SVC_OP_UNKNOWN_ERROR 0x01 +#define GB_SVC_INTF_NOT_DETECTED 0x02 +#define GB_SVC_INTF_NO_UPRO_LINK 0x03 +#define GB_SVC_INTF_UPRO_NOT_DOWN 0x04 +#define GB_SVC_INTF_UPRO_NOT_HIBERNATED 0x05 +#define GB_SVC_INTF_NO_V_SYS 0x06 +#define GB_SVC_INTF_V_CHG 0x07 +#define GB_SVC_INTF_WAKE_BUSY 0x08 +#define GB_SVC_INTF_NO_REFCLK 0x09 +#define GB_SVC_INTF_RELEASING 0x0a +#define GB_SVC_INTF_NO_ORDER 0x0b +#define GB_SVC_INTF_MBOX_SET 0x0c +#define GB_SVC_INTF_BAD_MBOX 0x0d +#define GB_SVC_INTF_OP_TIMEOUT 0x0e +#define GB_SVC_PWRMON_OP_NOT_PRESENT 0x0f + +struct gb_svc_version_request { + __u8 major; + __u8 minor; +} __packed; + +struct gb_svc_version_response { + __u8 major; + __u8 minor; +} __packed; + +/* SVC protocol hello request */ +struct gb_svc_hello_request { + __le16 endo_id; + __u8 interface_id; +} __packed; +/* hello response has no payload */ + +struct gb_svc_intf_device_id_request { + __u8 intf_id; + __u8 device_id; +} __packed; +/* device id response has no payload */ + +struct gb_svc_intf_reset_request { + __u8 intf_id; +} __packed; +/* interface reset response has no payload */ + +struct gb_svc_intf_eject_request { + __u8 intf_id; +} __packed; +/* interface eject response has no payload */ + +struct gb_svc_conn_create_request { + __u8 intf1_id; + __le16 cport1_id; + __u8 intf2_id; + __le16 cport2_id; + __u8 tc; + __u8 flags; +} __packed; +/* connection create response has no payload */ + +struct gb_svc_conn_destroy_request { + __u8 intf1_id; + __le16 cport1_id; + __u8 intf2_id; + __le16 cport2_id; +} __packed; +/* connection destroy response has no payload */ + +struct gb_svc_dme_peer_get_request { + __u8 intf_id; + __le16 attr; + __le16 selector; +} __packed; + +struct gb_svc_dme_peer_get_response { + __le16 result_code; + __le32 attr_value; +} __packed; + +struct gb_svc_dme_peer_set_request { + __u8 intf_id; + __le16 attr; + __le16 selector; + __le32 value; +} __packed; + +struct gb_svc_dme_peer_set_response { + __le16 result_code; +} __packed; + +/* Greybus init-status values, currently retrieved using DME peer gets. */ +#define GB_INIT_SPI_BOOT_STARTED 0x02 +#define GB_INIT_TRUSTED_SPI_BOOT_FINISHED 0x03 +#define GB_INIT_UNTRUSTED_SPI_BOOT_FINISHED 0x04 +#define GB_INIT_BOOTROM_UNIPRO_BOOT_STARTED 0x06 +#define GB_INIT_BOOTROM_FALLBACK_UNIPRO_BOOT_STARTED 0x09 +#define GB_INIT_S2_LOADER_BOOT_STARTED 0x0D + +struct gb_svc_route_create_request { + __u8 intf1_id; + __u8 dev1_id; + __u8 intf2_id; + __u8 dev2_id; +} __packed; +/* route create response has no payload */ + +struct gb_svc_route_destroy_request { + __u8 intf1_id; + __u8 intf2_id; +} __packed; +/* route destroy response has no payload */ + +/* used for svc_intf_vsys_{enable,disable} */ +struct gb_svc_intf_vsys_request { + __u8 intf_id; +} __packed; + +struct gb_svc_intf_vsys_response { + __u8 result_code; +#define GB_SVC_INTF_VSYS_OK 0x00 + /* 0x01 is reserved */ +#define GB_SVC_INTF_VSYS_FAIL 0x02 +} __packed; + +/* used for svc_intf_refclk_{enable,disable} */ +struct gb_svc_intf_refclk_request { + __u8 intf_id; +} __packed; + +struct gb_svc_intf_refclk_response { + __u8 result_code; +#define GB_SVC_INTF_REFCLK_OK 0x00 + /* 0x01 is reserved */ +#define GB_SVC_INTF_REFCLK_FAIL 0x02 +} __packed; + +/* used for svc_intf_unipro_{enable,disable} */ +struct gb_svc_intf_unipro_request { + __u8 intf_id; +} __packed; + +struct gb_svc_intf_unipro_response { + __u8 result_code; +#define GB_SVC_INTF_UNIPRO_OK 0x00 + /* 0x01 is reserved */ +#define GB_SVC_INTF_UNIPRO_FAIL 0x02 +#define GB_SVC_INTF_UNIPRO_NOT_OFF 0x03 +} __packed; + +#define GB_SVC_UNIPRO_FAST_MODE 0x01 +#define GB_SVC_UNIPRO_SLOW_MODE 0x02 +#define GB_SVC_UNIPRO_FAST_AUTO_MODE 0x04 +#define GB_SVC_UNIPRO_SLOW_AUTO_MODE 0x05 +#define GB_SVC_UNIPRO_MODE_UNCHANGED 0x07 +#define GB_SVC_UNIPRO_HIBERNATE_MODE 0x11 +#define GB_SVC_UNIPRO_OFF_MODE 0x12 + +#define GB_SVC_SMALL_AMPLITUDE 0x01 +#define GB_SVC_LARGE_AMPLITUDE 0x02 + +#define GB_SVC_NO_DE_EMPHASIS 0x00 +#define GB_SVC_SMALL_DE_EMPHASIS 0x01 +#define GB_SVC_LARGE_DE_EMPHASIS 0x02 + +#define GB_SVC_PWRM_RXTERMINATION 0x01 +#define GB_SVC_PWRM_TXTERMINATION 0x02 +#define GB_SVC_PWRM_LINE_RESET 0x04 +#define GB_SVC_PWRM_SCRAMBLING 0x20 + +#define GB_SVC_PWRM_QUIRK_HSSER 0x00000001 + +#define GB_SVC_UNIPRO_HS_SERIES_A 0x01 +#define GB_SVC_UNIPRO_HS_SERIES_B 0x02 + +#define GB_SVC_SETPWRM_PWR_OK 0x00 +#define GB_SVC_SETPWRM_PWR_LOCAL 0x01 +#define GB_SVC_SETPWRM_PWR_REMOTE 0x02 +#define GB_SVC_SETPWRM_PWR_BUSY 0x03 +#define GB_SVC_SETPWRM_PWR_ERROR_CAP 0x04 +#define GB_SVC_SETPWRM_PWR_FATAL_ERROR 0x05 + +struct gb_svc_l2_timer_cfg { + __le16 tsb_fc0_protection_timeout; + __le16 tsb_tc0_replay_timeout; + __le16 tsb_afc0_req_timeout; + __le16 tsb_fc1_protection_timeout; + __le16 tsb_tc1_replay_timeout; + __le16 tsb_afc1_req_timeout; + __le16 reserved_for_tc2[3]; + __le16 reserved_for_tc3[3]; +} __packed; + +struct gb_svc_intf_set_pwrm_request { + __u8 intf_id; + __u8 hs_series; + __u8 tx_mode; + __u8 tx_gear; + __u8 tx_nlanes; + __u8 tx_amplitude; + __u8 tx_hs_equalizer; + __u8 rx_mode; + __u8 rx_gear; + __u8 rx_nlanes; + __u8 flags; + __le32 quirks; + struct gb_svc_l2_timer_cfg local_l2timerdata, remote_l2timerdata; +} __packed; + +struct gb_svc_intf_set_pwrm_response { + __u8 result_code; +} __packed; + +struct gb_svc_key_event_request { + __le16 key_code; +#define GB_KEYCODE_ARA 0x00 + + __u8 key_event; +#define GB_SVC_KEY_RELEASED 0x00 +#define GB_SVC_KEY_PRESSED 0x01 +} __packed; + +#define GB_SVC_PWRMON_MAX_RAIL_COUNT 254 + +struct gb_svc_pwrmon_rail_count_get_response { + __u8 rail_count; +} __packed; + +#define GB_SVC_PWRMON_RAIL_NAME_BUFSIZE 32 + +struct gb_svc_pwrmon_rail_names_get_response { + __u8 status; + __u8 name[0][GB_SVC_PWRMON_RAIL_NAME_BUFSIZE]; +} __packed; + +#define GB_SVC_PWRMON_TYPE_CURR 0x01 +#define GB_SVC_PWRMON_TYPE_VOL 0x02 +#define GB_SVC_PWRMON_TYPE_PWR 0x03 + +#define GB_SVC_PWRMON_GET_SAMPLE_OK 0x00 +#define GB_SVC_PWRMON_GET_SAMPLE_INVAL 0x01 +#define GB_SVC_PWRMON_GET_SAMPLE_NOSUPP 0x02 +#define GB_SVC_PWRMON_GET_SAMPLE_HWERR 0x03 + +struct gb_svc_pwrmon_sample_get_request { + __u8 rail_id; + __u8 measurement_type; +} __packed; + +struct gb_svc_pwrmon_sample_get_response { + __u8 result; + __le32 measurement; +} __packed; + +struct gb_svc_pwrmon_intf_sample_get_request { + __u8 intf_id; + __u8 measurement_type; +} __packed; + +struct gb_svc_pwrmon_intf_sample_get_response { + __u8 result; + __le32 measurement; +} __packed; + +#define GB_SVC_MODULE_INSERTED_FLAG_NO_PRIMARY 0x0001 + +struct gb_svc_module_inserted_request { + __u8 primary_intf_id; + __u8 intf_count; + __le16 flags; +} __packed; +/* module_inserted response has no payload */ + +struct gb_svc_module_removed_request { + __u8 primary_intf_id; +} __packed; +/* module_removed response has no payload */ + +struct gb_svc_intf_activate_request { + __u8 intf_id; +} __packed; + +#define GB_SVC_INTF_TYPE_UNKNOWN 0x00 +#define GB_SVC_INTF_TYPE_DUMMY 0x01 +#define GB_SVC_INTF_TYPE_UNIPRO 0x02 +#define GB_SVC_INTF_TYPE_GREYBUS 0x03 + +struct gb_svc_intf_activate_response { + __u8 status; + __u8 intf_type; +} __packed; + +struct gb_svc_intf_resume_request { + __u8 intf_id; +} __packed; + +struct gb_svc_intf_resume_response { + __u8 status; +} __packed; + +#define GB_SVC_INTF_MAILBOX_NONE 0x00 +#define GB_SVC_INTF_MAILBOX_AP 0x01 +#define GB_SVC_INTF_MAILBOX_GREYBUS 0x02 + +struct gb_svc_intf_mailbox_event_request { + __u8 intf_id; + __le16 result_code; + __le32 mailbox; +} __packed; +/* intf_mailbox_event response has no payload */ + +struct gb_svc_intf_oops_request { + __u8 intf_id; + __u8 reason; +} __packed; +/* intf_oops response has no payload */ + + +/* RAW */ + +/* Greybus raw request types */ +#define GB_RAW_TYPE_SEND 0x02 + +struct gb_raw_send_request { + __le32 len; + __u8 data[0]; +} __packed; + + +/* UART */ + +/* Greybus UART operation types */ +#define GB_UART_TYPE_SEND_DATA 0x02 +#define GB_UART_TYPE_RECEIVE_DATA 0x03 /* Unsolicited data */ +#define GB_UART_TYPE_SET_LINE_CODING 0x04 +#define GB_UART_TYPE_SET_CONTROL_LINE_STATE 0x05 +#define GB_UART_TYPE_SEND_BREAK 0x06 +#define GB_UART_TYPE_SERIAL_STATE 0x07 /* Unsolicited data */ +#define GB_UART_TYPE_RECEIVE_CREDITS 0x08 +#define GB_UART_TYPE_FLUSH_FIFOS 0x09 + +/* Represents data from AP -> Module */ +struct gb_uart_send_data_request { + __le16 size; + __u8 data[0]; +} __packed; + +/* recv-data-request flags */ +#define GB_UART_RECV_FLAG_FRAMING 0x01 /* Framing error */ +#define GB_UART_RECV_FLAG_PARITY 0x02 /* Parity error */ +#define GB_UART_RECV_FLAG_OVERRUN 0x04 /* Overrun error */ +#define GB_UART_RECV_FLAG_BREAK 0x08 /* Break */ + +/* Represents data from Module -> AP */ +struct gb_uart_recv_data_request { + __le16 size; + __u8 flags; + __u8 data[0]; +} __packed; + +struct gb_uart_receive_credits_request { + __le16 count; +} __packed; + +struct gb_uart_set_line_coding_request { + __le32 rate; + __u8 format; +#define GB_SERIAL_1_STOP_BITS 0 +#define GB_SERIAL_1_5_STOP_BITS 1 +#define GB_SERIAL_2_STOP_BITS 2 + + __u8 parity; +#define GB_SERIAL_NO_PARITY 0 +#define GB_SERIAL_ODD_PARITY 1 +#define GB_SERIAL_EVEN_PARITY 2 +#define GB_SERIAL_MARK_PARITY 3 +#define GB_SERIAL_SPACE_PARITY 4 + + __u8 data_bits; + + __u8 flow_control; +#define GB_SERIAL_AUTO_RTSCTS_EN 0x1 +} __packed; + +/* output control lines */ +#define GB_UART_CTRL_DTR 0x01 +#define GB_UART_CTRL_RTS 0x02 + +struct gb_uart_set_control_line_state_request { + __u8 control; +} __packed; + +struct gb_uart_set_break_request { + __u8 state; +} __packed; + +/* input control lines and line errors */ +#define GB_UART_CTRL_DCD 0x01 +#define GB_UART_CTRL_DSR 0x02 +#define GB_UART_CTRL_RI 0x04 + +struct gb_uart_serial_state_request { + __u8 control; +} __packed; + +struct gb_uart_serial_flush_request { + __u8 flags; +#define GB_SERIAL_FLAG_FLUSH_TRANSMITTER 0x01 +#define GB_SERIAL_FLAG_FLUSH_RECEIVER 0x02 +} __packed; + +/* Loopback */ + +/* Greybus loopback request types */ +#define GB_LOOPBACK_TYPE_PING 0x02 +#define GB_LOOPBACK_TYPE_TRANSFER 0x03 +#define GB_LOOPBACK_TYPE_SINK 0x04 + +/* + * Loopback request/response header format should be identical + * to simplify bandwidth and data movement analysis. + */ +struct gb_loopback_transfer_request { + __le32 len; + __le32 reserved0; + __le32 reserved1; + __u8 data[0]; +} __packed; + +struct gb_loopback_transfer_response { + __le32 len; + __le32 reserved0; + __le32 reserved1; + __u8 data[0]; +} __packed; + +/* SDIO */ +/* Greybus SDIO operation types */ +#define GB_SDIO_TYPE_GET_CAPABILITIES 0x02 +#define GB_SDIO_TYPE_SET_IOS 0x03 +#define GB_SDIO_TYPE_COMMAND 0x04 +#define GB_SDIO_TYPE_TRANSFER 0x05 +#define GB_SDIO_TYPE_EVENT 0x06 + +/* get caps response: request has no payload */ +struct gb_sdio_get_caps_response { + __le32 caps; +#define GB_SDIO_CAP_NONREMOVABLE 0x00000001 +#define GB_SDIO_CAP_4_BIT_DATA 0x00000002 +#define GB_SDIO_CAP_8_BIT_DATA 0x00000004 +#define GB_SDIO_CAP_MMC_HS 0x00000008 +#define GB_SDIO_CAP_SD_HS 0x00000010 +#define GB_SDIO_CAP_ERASE 0x00000020 +#define GB_SDIO_CAP_1_2V_DDR 0x00000040 +#define GB_SDIO_CAP_1_8V_DDR 0x00000080 +#define GB_SDIO_CAP_POWER_OFF_CARD 0x00000100 +#define GB_SDIO_CAP_UHS_SDR12 0x00000200 +#define GB_SDIO_CAP_UHS_SDR25 0x00000400 +#define GB_SDIO_CAP_UHS_SDR50 0x00000800 +#define GB_SDIO_CAP_UHS_SDR104 0x00001000 +#define GB_SDIO_CAP_UHS_DDR50 0x00002000 +#define GB_SDIO_CAP_DRIVER_TYPE_A 0x00004000 +#define GB_SDIO_CAP_DRIVER_TYPE_C 0x00008000 +#define GB_SDIO_CAP_DRIVER_TYPE_D 0x00010000 +#define GB_SDIO_CAP_HS200_1_2V 0x00020000 +#define GB_SDIO_CAP_HS200_1_8V 0x00040000 +#define GB_SDIO_CAP_HS400_1_2V 0x00080000 +#define GB_SDIO_CAP_HS400_1_8V 0x00100000 + + /* see possible values below at vdd */ + __le32 ocr; + __le32 f_min; + __le32 f_max; + __le16 max_blk_count; + __le16 max_blk_size; +} __packed; + +/* set ios request: response has no payload */ +struct gb_sdio_set_ios_request { + __le32 clock; + __le32 vdd; +#define GB_SDIO_VDD_165_195 0x00000001 +#define GB_SDIO_VDD_20_21 0x00000002 +#define GB_SDIO_VDD_21_22 0x00000004 +#define GB_SDIO_VDD_22_23 0x00000008 +#define GB_SDIO_VDD_23_24 0x00000010 +#define GB_SDIO_VDD_24_25 0x00000020 +#define GB_SDIO_VDD_25_26 0x00000040 +#define GB_SDIO_VDD_26_27 0x00000080 +#define GB_SDIO_VDD_27_28 0x00000100 +#define GB_SDIO_VDD_28_29 0x00000200 +#define GB_SDIO_VDD_29_30 0x00000400 +#define GB_SDIO_VDD_30_31 0x00000800 +#define GB_SDIO_VDD_31_32 0x00001000 +#define GB_SDIO_VDD_32_33 0x00002000 +#define GB_SDIO_VDD_33_34 0x00004000 +#define GB_SDIO_VDD_34_35 0x00008000 +#define GB_SDIO_VDD_35_36 0x00010000 + + __u8 bus_mode; +#define GB_SDIO_BUSMODE_OPENDRAIN 0x00 +#define GB_SDIO_BUSMODE_PUSHPULL 0x01 + + __u8 power_mode; +#define GB_SDIO_POWER_OFF 0x00 +#define GB_SDIO_POWER_UP 0x01 +#define GB_SDIO_POWER_ON 0x02 +#define GB_SDIO_POWER_UNDEFINED 0x03 + + __u8 bus_width; +#define GB_SDIO_BUS_WIDTH_1 0x00 +#define GB_SDIO_BUS_WIDTH_4 0x02 +#define GB_SDIO_BUS_WIDTH_8 0x03 + + __u8 timing; +#define GB_SDIO_TIMING_LEGACY 0x00 +#define GB_SDIO_TIMING_MMC_HS 0x01 +#define GB_SDIO_TIMING_SD_HS 0x02 +#define GB_SDIO_TIMING_UHS_SDR12 0x03 +#define GB_SDIO_TIMING_UHS_SDR25 0x04 +#define GB_SDIO_TIMING_UHS_SDR50 0x05 +#define GB_SDIO_TIMING_UHS_SDR104 0x06 +#define GB_SDIO_TIMING_UHS_DDR50 0x07 +#define GB_SDIO_TIMING_MMC_DDR52 0x08 +#define GB_SDIO_TIMING_MMC_HS200 0x09 +#define GB_SDIO_TIMING_MMC_HS400 0x0A + + __u8 signal_voltage; +#define GB_SDIO_SIGNAL_VOLTAGE_330 0x00 +#define GB_SDIO_SIGNAL_VOLTAGE_180 0x01 +#define GB_SDIO_SIGNAL_VOLTAGE_120 0x02 + + __u8 drv_type; +#define GB_SDIO_SET_DRIVER_TYPE_B 0x00 +#define GB_SDIO_SET_DRIVER_TYPE_A 0x01 +#define GB_SDIO_SET_DRIVER_TYPE_C 0x02 +#define GB_SDIO_SET_DRIVER_TYPE_D 0x03 +} __packed; + +/* command request */ +struct gb_sdio_command_request { + __u8 cmd; + __u8 cmd_flags; +#define GB_SDIO_RSP_NONE 0x00 +#define GB_SDIO_RSP_PRESENT 0x01 +#define GB_SDIO_RSP_136 0x02 +#define GB_SDIO_RSP_CRC 0x04 +#define GB_SDIO_RSP_BUSY 0x08 +#define GB_SDIO_RSP_OPCODE 0x10 + + __u8 cmd_type; +#define GB_SDIO_CMD_AC 0x00 +#define GB_SDIO_CMD_ADTC 0x01 +#define GB_SDIO_CMD_BC 0x02 +#define GB_SDIO_CMD_BCR 0x03 + + __le32 cmd_arg; + __le16 data_blocks; + __le16 data_blksz; +} __packed; + +struct gb_sdio_command_response { + __le32 resp[4]; +} __packed; + +/* transfer request */ +struct gb_sdio_transfer_request { + __u8 data_flags; +#define GB_SDIO_DATA_WRITE 0x01 +#define GB_SDIO_DATA_READ 0x02 +#define GB_SDIO_DATA_STREAM 0x04 + + __le16 data_blocks; + __le16 data_blksz; + __u8 data[0]; +} __packed; + +struct gb_sdio_transfer_response { + __le16 data_blocks; + __le16 data_blksz; + __u8 data[0]; +} __packed; + +/* event request: generated by module and is defined as unidirectional */ +struct gb_sdio_event_request { + __u8 event; +#define GB_SDIO_CARD_INSERTED 0x01 +#define GB_SDIO_CARD_REMOVED 0x02 +#define GB_SDIO_WP 0x04 +} __packed; + +/* Camera */ + +/* Greybus Camera request types */ +#define GB_CAMERA_TYPE_CAPABILITIES 0x02 +#define GB_CAMERA_TYPE_CONFIGURE_STREAMS 0x03 +#define GB_CAMERA_TYPE_CAPTURE 0x04 +#define GB_CAMERA_TYPE_FLUSH 0x05 +#define GB_CAMERA_TYPE_METADATA 0x06 + +#define GB_CAMERA_MAX_STREAMS 4 +#define GB_CAMERA_MAX_SETTINGS_SIZE 8192 + +/* Greybus Camera Configure Streams request payload */ +struct gb_camera_stream_config_request { + __le16 width; + __le16 height; + __le16 format; + __le16 padding; +} __packed; + +struct gb_camera_configure_streams_request { + __u8 num_streams; + __u8 flags; +#define GB_CAMERA_CONFIGURE_STREAMS_TEST_ONLY 0x01 + __le16 padding; + struct gb_camera_stream_config_request config[0]; +} __packed; + +/* Greybus Camera Configure Streams response payload */ +struct gb_camera_stream_config_response { + __le16 width; + __le16 height; + __le16 format; + __u8 virtual_channel; + __u8 data_type[2]; + __le16 max_pkt_size; + __u8 padding; + __le32 max_size; +} __packed; + +struct gb_camera_configure_streams_response { + __u8 num_streams; +#define GB_CAMERA_CONFIGURE_STREAMS_ADJUSTED 0x01 + __u8 flags; + __u8 padding[2]; + __le32 data_rate; + struct gb_camera_stream_config_response config[0]; +}; + +/* Greybus Camera Capture request payload - response has no payload */ +struct gb_camera_capture_request { + __le32 request_id; + __u8 streams; + __u8 padding; + __le16 num_frames; + __u8 settings[0]; +} __packed; + +/* Greybus Camera Flush response payload - request has no payload */ +struct gb_camera_flush_response { + __le32 request_id; +} __packed; + +/* Greybus Camera Metadata request payload - operation has no response */ +struct gb_camera_metadata_request { + __le32 request_id; + __le16 frame_number; + __u8 stream; + __u8 padding; + __u8 metadata[0]; +} __packed; + +/* Lights */ + +/* Greybus Lights request types */ +#define GB_LIGHTS_TYPE_GET_LIGHTS 0x02 +#define GB_LIGHTS_TYPE_GET_LIGHT_CONFIG 0x03 +#define GB_LIGHTS_TYPE_GET_CHANNEL_CONFIG 0x04 +#define GB_LIGHTS_TYPE_GET_CHANNEL_FLASH_CONFIG 0x05 +#define GB_LIGHTS_TYPE_SET_BRIGHTNESS 0x06 +#define GB_LIGHTS_TYPE_SET_BLINK 0x07 +#define GB_LIGHTS_TYPE_SET_COLOR 0x08 +#define GB_LIGHTS_TYPE_SET_FADE 0x09 +#define GB_LIGHTS_TYPE_EVENT 0x0A +#define GB_LIGHTS_TYPE_SET_FLASH_INTENSITY 0x0B +#define GB_LIGHTS_TYPE_SET_FLASH_STROBE 0x0C +#define GB_LIGHTS_TYPE_SET_FLASH_TIMEOUT 0x0D +#define GB_LIGHTS_TYPE_GET_FLASH_FAULT 0x0E + +/* Greybus Light modes */ + +/* + * if you add any specific mode below, update also the + * GB_CHANNEL_MODE_DEFINED_RANGE value accordingly + */ +#define GB_CHANNEL_MODE_NONE 0x00000000 +#define GB_CHANNEL_MODE_BATTERY 0x00000001 +#define GB_CHANNEL_MODE_POWER 0x00000002 +#define GB_CHANNEL_MODE_WIRELESS 0x00000004 +#define GB_CHANNEL_MODE_BLUETOOTH 0x00000008 +#define GB_CHANNEL_MODE_KEYBOARD 0x00000010 +#define GB_CHANNEL_MODE_BUTTONS 0x00000020 +#define GB_CHANNEL_MODE_NOTIFICATION 0x00000040 +#define GB_CHANNEL_MODE_ATTENTION 0x00000080 +#define GB_CHANNEL_MODE_FLASH 0x00000100 +#define GB_CHANNEL_MODE_TORCH 0x00000200 +#define GB_CHANNEL_MODE_INDICATOR 0x00000400 + +/* Lights Mode valid bit values */ +#define GB_CHANNEL_MODE_DEFINED_RANGE 0x000004FF +#define GB_CHANNEL_MODE_VENDOR_RANGE 0x00F00000 + +/* Greybus Light Channels Flags */ +#define GB_LIGHT_CHANNEL_MULTICOLOR 0x00000001 +#define GB_LIGHT_CHANNEL_FADER 0x00000002 +#define GB_LIGHT_CHANNEL_BLINK 0x00000004 + +/* get count of lights in module */ +struct gb_lights_get_lights_response { + __u8 lights_count; +} __packed; + +/* light config request payload */ +struct gb_lights_get_light_config_request { + __u8 id; +} __packed; + +/* light config response payload */ +struct gb_lights_get_light_config_response { + __u8 channel_count; + __u8 name[32]; +} __packed; + +/* channel config request payload */ +struct gb_lights_get_channel_config_request { + __u8 light_id; + __u8 channel_id; +} __packed; + +/* channel flash config request payload */ +struct gb_lights_get_channel_flash_config_request { + __u8 light_id; + __u8 channel_id; +} __packed; + +/* channel config response payload */ +struct gb_lights_get_channel_config_response { + __u8 max_brightness; + __le32 flags; + __le32 color; + __u8 color_name[32]; + __le32 mode; + __u8 mode_name[32]; +} __packed; + +/* channel flash config response payload */ +struct gb_lights_get_channel_flash_config_response { + __le32 intensity_min_uA; + __le32 intensity_max_uA; + __le32 intensity_step_uA; + __le32 timeout_min_us; + __le32 timeout_max_us; + __le32 timeout_step_us; +} __packed; + +/* blink request payload: response have no payload */ +struct gb_lights_blink_request { + __u8 light_id; + __u8 channel_id; + __le16 time_on_ms; + __le16 time_off_ms; +} __packed; + +/* set brightness request payload: response have no payload */ +struct gb_lights_set_brightness_request { + __u8 light_id; + __u8 channel_id; + __u8 brightness; +} __packed; + +/* set color request payload: response have no payload */ +struct gb_lights_set_color_request { + __u8 light_id; + __u8 channel_id; + __le32 color; +} __packed; + +/* set fade request payload: response have no payload */ +struct gb_lights_set_fade_request { + __u8 light_id; + __u8 channel_id; + __u8 fade_in; + __u8 fade_out; +} __packed; + +/* event request: generated by module */ +struct gb_lights_event_request { + __u8 light_id; + __u8 event; +#define GB_LIGHTS_LIGHT_CONFIG 0x01 +} __packed; + +/* set flash intensity request payload: response have no payload */ +struct gb_lights_set_flash_intensity_request { + __u8 light_id; + __u8 channel_id; + __le32 intensity_uA; +} __packed; + +/* set flash strobe state request payload: response have no payload */ +struct gb_lights_set_flash_strobe_request { + __u8 light_id; + __u8 channel_id; + __u8 state; +} __packed; + +/* set flash timeout request payload: response have no payload */ +struct gb_lights_set_flash_timeout_request { + __u8 light_id; + __u8 channel_id; + __le32 timeout_us; +} __packed; + +/* get flash fault request payload */ +struct gb_lights_get_flash_fault_request { + __u8 light_id; + __u8 channel_id; +} __packed; + +/* get flash fault response payload */ +struct gb_lights_get_flash_fault_response { + __le32 fault; +#define GB_LIGHTS_FLASH_FAULT_OVER_VOLTAGE 0x00000000 +#define GB_LIGHTS_FLASH_FAULT_TIMEOUT 0x00000001 +#define GB_LIGHTS_FLASH_FAULT_OVER_TEMPERATURE 0x00000002 +#define GB_LIGHTS_FLASH_FAULT_SHORT_CIRCUIT 0x00000004 +#define GB_LIGHTS_FLASH_FAULT_OVER_CURRENT 0x00000008 +#define GB_LIGHTS_FLASH_FAULT_INDICATOR 0x00000010 +#define GB_LIGHTS_FLASH_FAULT_UNDER_VOLTAGE 0x00000020 +#define GB_LIGHTS_FLASH_FAULT_INPUT_VOLTAGE 0x00000040 +#define GB_LIGHTS_FLASH_FAULT_LED_OVER_TEMPERATURE 0x00000080 +} __packed; + +/* Audio */ + +#define GB_AUDIO_TYPE_GET_TOPOLOGY_SIZE 0x02 +#define GB_AUDIO_TYPE_GET_TOPOLOGY 0x03 +#define GB_AUDIO_TYPE_GET_CONTROL 0x04 +#define GB_AUDIO_TYPE_SET_CONTROL 0x05 +#define GB_AUDIO_TYPE_ENABLE_WIDGET 0x06 +#define GB_AUDIO_TYPE_DISABLE_WIDGET 0x07 +#define GB_AUDIO_TYPE_GET_PCM 0x08 +#define GB_AUDIO_TYPE_SET_PCM 0x09 +#define GB_AUDIO_TYPE_SET_TX_DATA_SIZE 0x0a + /* 0x0b unused */ +#define GB_AUDIO_TYPE_ACTIVATE_TX 0x0c +#define GB_AUDIO_TYPE_DEACTIVATE_TX 0x0d +#define GB_AUDIO_TYPE_SET_RX_DATA_SIZE 0x0e + /* 0x0f unused */ +#define GB_AUDIO_TYPE_ACTIVATE_RX 0x10 +#define GB_AUDIO_TYPE_DEACTIVATE_RX 0x11 +#define GB_AUDIO_TYPE_JACK_EVENT 0x12 +#define GB_AUDIO_TYPE_BUTTON_EVENT 0x13 +#define GB_AUDIO_TYPE_STREAMING_EVENT 0x14 +#define GB_AUDIO_TYPE_SEND_DATA 0x15 + +/* Module must be able to buffer 10ms of audio data, minimum */ +#define GB_AUDIO_SAMPLE_BUFFER_MIN_US 10000 + +#define GB_AUDIO_PCM_NAME_MAX 32 +#define AUDIO_DAI_NAME_MAX 32 +#define AUDIO_CONTROL_NAME_MAX 32 +#define AUDIO_CTL_ELEM_NAME_MAX 44 +#define AUDIO_ENUM_NAME_MAX 64 +#define AUDIO_WIDGET_NAME_MAX 32 + +/* See SNDRV_PCM_FMTBIT_* in Linux source */ +#define GB_AUDIO_PCM_FMT_S8 BIT(0) +#define GB_AUDIO_PCM_FMT_U8 BIT(1) +#define GB_AUDIO_PCM_FMT_S16_LE BIT(2) +#define GB_AUDIO_PCM_FMT_S16_BE BIT(3) +#define GB_AUDIO_PCM_FMT_U16_LE BIT(4) +#define GB_AUDIO_PCM_FMT_U16_BE BIT(5) +#define GB_AUDIO_PCM_FMT_S24_LE BIT(6) +#define GB_AUDIO_PCM_FMT_S24_BE BIT(7) +#define GB_AUDIO_PCM_FMT_U24_LE BIT(8) +#define GB_AUDIO_PCM_FMT_U24_BE BIT(9) +#define GB_AUDIO_PCM_FMT_S32_LE BIT(10) +#define GB_AUDIO_PCM_FMT_S32_BE BIT(11) +#define GB_AUDIO_PCM_FMT_U32_LE BIT(12) +#define GB_AUDIO_PCM_FMT_U32_BE BIT(13) + +/* See SNDRV_PCM_RATE_* in Linux source */ +#define GB_AUDIO_PCM_RATE_5512 BIT(0) +#define GB_AUDIO_PCM_RATE_8000 BIT(1) +#define GB_AUDIO_PCM_RATE_11025 BIT(2) +#define GB_AUDIO_PCM_RATE_16000 BIT(3) +#define GB_AUDIO_PCM_RATE_22050 BIT(4) +#define GB_AUDIO_PCM_RATE_32000 BIT(5) +#define GB_AUDIO_PCM_RATE_44100 BIT(6) +#define GB_AUDIO_PCM_RATE_48000 BIT(7) +#define GB_AUDIO_PCM_RATE_64000 BIT(8) +#define GB_AUDIO_PCM_RATE_88200 BIT(9) +#define GB_AUDIO_PCM_RATE_96000 BIT(10) +#define GB_AUDIO_PCM_RATE_176400 BIT(11) +#define GB_AUDIO_PCM_RATE_192000 BIT(12) + +#define GB_AUDIO_STREAM_TYPE_CAPTURE 0x1 +#define GB_AUDIO_STREAM_TYPE_PLAYBACK 0x2 + +#define GB_AUDIO_CTL_ELEM_ACCESS_READ BIT(0) +#define GB_AUDIO_CTL_ELEM_ACCESS_WRITE BIT(1) + +/* See SNDRV_CTL_ELEM_TYPE_* in Linux source */ +#define GB_AUDIO_CTL_ELEM_TYPE_BOOLEAN 0x01 +#define GB_AUDIO_CTL_ELEM_TYPE_INTEGER 0x02 +#define GB_AUDIO_CTL_ELEM_TYPE_ENUMERATED 0x03 +#define GB_AUDIO_CTL_ELEM_TYPE_INTEGER64 0x06 + +/* See SNDRV_CTL_ELEM_IFACE_* in Linux source */ +#define GB_AUDIO_CTL_ELEM_IFACE_CARD 0x00 +#define GB_AUDIO_CTL_ELEM_IFACE_HWDEP 0x01 +#define GB_AUDIO_CTL_ELEM_IFACE_MIXER 0x02 +#define GB_AUDIO_CTL_ELEM_IFACE_PCM 0x03 +#define GB_AUDIO_CTL_ELEM_IFACE_RAWMIDI 0x04 +#define GB_AUDIO_CTL_ELEM_IFACE_TIMER 0x05 +#define GB_AUDIO_CTL_ELEM_IFACE_SEQUENCER 0x06 + +/* SNDRV_CTL_ELEM_ACCESS_* in Linux source */ +#define GB_AUDIO_ACCESS_READ BIT(0) +#define GB_AUDIO_ACCESS_WRITE BIT(1) +#define GB_AUDIO_ACCESS_VOLATILE BIT(2) +#define GB_AUDIO_ACCESS_TIMESTAMP BIT(3) +#define GB_AUDIO_ACCESS_TLV_READ BIT(4) +#define GB_AUDIO_ACCESS_TLV_WRITE BIT(5) +#define GB_AUDIO_ACCESS_TLV_COMMAND BIT(6) +#define GB_AUDIO_ACCESS_INACTIVE BIT(7) +#define GB_AUDIO_ACCESS_LOCK BIT(8) +#define GB_AUDIO_ACCESS_OWNER BIT(9) + +/* enum snd_soc_dapm_type */ +#define GB_AUDIO_WIDGET_TYPE_INPUT 0x0 +#define GB_AUDIO_WIDGET_TYPE_OUTPUT 0x1 +#define GB_AUDIO_WIDGET_TYPE_MUX 0x2 +#define GB_AUDIO_WIDGET_TYPE_VIRT_MUX 0x3 +#define GB_AUDIO_WIDGET_TYPE_VALUE_MUX 0x4 +#define GB_AUDIO_WIDGET_TYPE_MIXER 0x5 +#define GB_AUDIO_WIDGET_TYPE_MIXER_NAMED_CTL 0x6 +#define GB_AUDIO_WIDGET_TYPE_PGA 0x7 +#define GB_AUDIO_WIDGET_TYPE_OUT_DRV 0x8 +#define GB_AUDIO_WIDGET_TYPE_ADC 0x9 +#define GB_AUDIO_WIDGET_TYPE_DAC 0xa +#define GB_AUDIO_WIDGET_TYPE_MICBIAS 0xb +#define GB_AUDIO_WIDGET_TYPE_MIC 0xc +#define GB_AUDIO_WIDGET_TYPE_HP 0xd +#define GB_AUDIO_WIDGET_TYPE_SPK 0xe +#define GB_AUDIO_WIDGET_TYPE_LINE 0xf +#define GB_AUDIO_WIDGET_TYPE_SWITCH 0x10 +#define GB_AUDIO_WIDGET_TYPE_VMID 0x11 +#define GB_AUDIO_WIDGET_TYPE_PRE 0x12 +#define GB_AUDIO_WIDGET_TYPE_POST 0x13 +#define GB_AUDIO_WIDGET_TYPE_SUPPLY 0x14 +#define GB_AUDIO_WIDGET_TYPE_REGULATOR_SUPPLY 0x15 +#define GB_AUDIO_WIDGET_TYPE_CLOCK_SUPPLY 0x16 +#define GB_AUDIO_WIDGET_TYPE_AIF_IN 0x17 +#define GB_AUDIO_WIDGET_TYPE_AIF_OUT 0x18 +#define GB_AUDIO_WIDGET_TYPE_SIGGEN 0x19 +#define GB_AUDIO_WIDGET_TYPE_DAI_IN 0x1a +#define GB_AUDIO_WIDGET_TYPE_DAI_OUT 0x1b +#define GB_AUDIO_WIDGET_TYPE_DAI_LINK 0x1c + +#define GB_AUDIO_WIDGET_STATE_DISABLED 0x01 +#define GB_AUDIO_WIDGET_STATE_ENAABLED 0x02 + +#define GB_AUDIO_JACK_EVENT_INSERTION 0x1 +#define GB_AUDIO_JACK_EVENT_REMOVAL 0x2 + +#define GB_AUDIO_BUTTON_EVENT_PRESS 0x1 +#define GB_AUDIO_BUTTON_EVENT_RELEASE 0x2 + +#define GB_AUDIO_STREAMING_EVENT_UNSPECIFIED 0x1 +#define GB_AUDIO_STREAMING_EVENT_HALT 0x2 +#define GB_AUDIO_STREAMING_EVENT_INTERNAL_ERROR 0x3 +#define GB_AUDIO_STREAMING_EVENT_PROTOCOL_ERROR 0x4 +#define GB_AUDIO_STREAMING_EVENT_FAILURE 0x5 +#define GB_AUDIO_STREAMING_EVENT_UNDERRUN 0x6 +#define GB_AUDIO_STREAMING_EVENT_OVERRUN 0x7 +#define GB_AUDIO_STREAMING_EVENT_CLOCKING 0x8 +#define GB_AUDIO_STREAMING_EVENT_DATA_LEN 0x9 + +#define GB_AUDIO_INVALID_INDEX 0xff + +/* enum snd_jack_types */ +#define GB_AUDIO_JACK_HEADPHONE 0x0000001 +#define GB_AUDIO_JACK_MICROPHONE 0x0000002 +#define GB_AUDIO_JACK_HEADSET (GB_AUDIO_JACK_HEADPHONE | \ + GB_AUDIO_JACK_MICROPHONE) +#define GB_AUDIO_JACK_LINEOUT 0x0000004 +#define GB_AUDIO_JACK_MECHANICAL 0x0000008 +#define GB_AUDIO_JACK_VIDEOOUT 0x0000010 +#define GB_AUDIO_JACK_AVOUT (GB_AUDIO_JACK_LINEOUT | \ + GB_AUDIO_JACK_VIDEOOUT) +#define GB_AUDIO_JACK_LINEIN 0x0000020 +#define GB_AUDIO_JACK_OC_HPHL 0x0000040 +#define GB_AUDIO_JACK_OC_HPHR 0x0000080 +#define GB_AUDIO_JACK_MICROPHONE2 0x0000200 +#define GB_AUDIO_JACK_ANC_HEADPHONE (GB_AUDIO_JACK_HEADPHONE | \ + GB_AUDIO_JACK_MICROPHONE | \ + GB_AUDIO_JACK_MICROPHONE2) +/* Kept separate from switches to facilitate implementation */ +#define GB_AUDIO_JACK_BTN_0 0x4000000 +#define GB_AUDIO_JACK_BTN_1 0x2000000 +#define GB_AUDIO_JACK_BTN_2 0x1000000 +#define GB_AUDIO_JACK_BTN_3 0x0800000 + +struct gb_audio_pcm { + __u8 stream_name[GB_AUDIO_PCM_NAME_MAX]; + __le32 formats; /* GB_AUDIO_PCM_FMT_* */ + __le32 rates; /* GB_AUDIO_PCM_RATE_* */ + __u8 chan_min; + __u8 chan_max; + __u8 sig_bits; /* number of bits of content */ +} __packed; + +struct gb_audio_dai { + __u8 name[AUDIO_DAI_NAME_MAX]; + __le16 data_cport; + struct gb_audio_pcm capture; + struct gb_audio_pcm playback; +} __packed; + +struct gb_audio_integer { + __le32 min; + __le32 max; + __le32 step; +} __packed; + +struct gb_audio_integer64 { + __le64 min; + __le64 max; + __le64 step; +} __packed; + +struct gb_audio_enumerated { + __le32 items; + __le16 names_length; + __u8 names[0]; +} __packed; + +struct gb_audio_ctl_elem_info { /* See snd_ctl_elem_info in Linux source */ + __u8 type; /* GB_AUDIO_CTL_ELEM_TYPE_* */ + __le16 dimen[4]; + union { + struct gb_audio_integer integer; + struct gb_audio_integer64 integer64; + struct gb_audio_enumerated enumerated; + } value; +} __packed; + +struct gb_audio_ctl_elem_value { /* See snd_ctl_elem_value in Linux source */ + __le64 timestamp; /* XXX needed? */ + union { + __le32 integer_value[2]; /* consider CTL_DOUBLE_xxx */ + __le64 integer64_value[2]; + __le32 enumerated_item[2]; + } value; +} __packed; + +struct gb_audio_control { + __u8 name[AUDIO_CONTROL_NAME_MAX]; + __u8 id; /* 0-63 */ + __u8 iface; /* GB_AUDIO_IFACE_* */ + __le16 data_cport; + __le32 access; /* GB_AUDIO_ACCESS_* */ + __u8 count; /* count of same elements */ + __u8 count_values; /* count of values, max=2 for CTL_DOUBLE_xxx */ + struct gb_audio_ctl_elem_info info; +} __packed; + +struct gb_audio_widget { + __u8 name[AUDIO_WIDGET_NAME_MAX]; + __u8 sname[AUDIO_WIDGET_NAME_MAX]; + __u8 id; + __u8 type; /* GB_AUDIO_WIDGET_TYPE_* */ + __u8 state; /* GB_AUDIO_WIDGET_STATE_* */ + __u8 ncontrols; + struct gb_audio_control ctl[0]; /* 'ncontrols' entries */ +} __packed; + +struct gb_audio_route { + __u8 source_id; /* widget id */ + __u8 destination_id; /* widget id */ + __u8 control_id; /* 0-63 */ + __u8 index; /* Selection within the control */ +} __packed; + +struct gb_audio_topology { + __u8 num_dais; + __u8 num_controls; + __u8 num_widgets; + __u8 num_routes; + __le32 size_dais; + __le32 size_controls; + __le32 size_widgets; + __le32 size_routes; + __le32 jack_type; + /* + * struct gb_audio_dai dai[num_dais]; + * struct gb_audio_control controls[num_controls]; + * struct gb_audio_widget widgets[num_widgets]; + * struct gb_audio_route routes[num_routes]; + */ + __u8 data[0]; +} __packed; + +struct gb_audio_get_topology_size_response { + __le16 size; +} __packed; + +struct gb_audio_get_topology_response { + struct gb_audio_topology topology; +} __packed; + +struct gb_audio_get_control_request { + __u8 control_id; + __u8 index; +} __packed; + +struct gb_audio_get_control_response { + struct gb_audio_ctl_elem_value value; +} __packed; + +struct gb_audio_set_control_request { + __u8 control_id; + __u8 index; + struct gb_audio_ctl_elem_value value; +} __packed; + +struct gb_audio_enable_widget_request { + __u8 widget_id; +} __packed; + +struct gb_audio_disable_widget_request { + __u8 widget_id; +} __packed; + +struct gb_audio_get_pcm_request { + __le16 data_cport; +} __packed; + +struct gb_audio_get_pcm_response { + __le32 format; + __le32 rate; + __u8 channels; + __u8 sig_bits; +} __packed; + +struct gb_audio_set_pcm_request { + __le16 data_cport; + __le32 format; + __le32 rate; + __u8 channels; + __u8 sig_bits; +} __packed; + +struct gb_audio_set_tx_data_size_request { + __le16 data_cport; + __le16 size; +} __packed; + +struct gb_audio_activate_tx_request { + __le16 data_cport; +} __packed; + +struct gb_audio_deactivate_tx_request { + __le16 data_cport; +} __packed; + +struct gb_audio_set_rx_data_size_request { + __le16 data_cport; + __le16 size; +} __packed; + +struct gb_audio_activate_rx_request { + __le16 data_cport; +} __packed; + +struct gb_audio_deactivate_rx_request { + __le16 data_cport; +} __packed; + +struct gb_audio_jack_event_request { + __u8 widget_id; + __u8 jack_attribute; + __u8 event; +} __packed; + +struct gb_audio_button_event_request { + __u8 widget_id; + __u8 button_id; + __u8 event; +} __packed; + +struct gb_audio_streaming_event_request { + __le16 data_cport; + __u8 event; +} __packed; + +struct gb_audio_send_data_request { + __le64 timestamp; + __u8 data[0]; +} __packed; + + +/* Log */ + +/* operations */ +#define GB_LOG_TYPE_SEND_LOG 0x02 + +/* length */ +#define GB_LOG_MAX_LEN 1024 + +struct gb_log_send_log_request { + __le16 len; + __u8 msg[0]; +} __packed; + +#endif /* __GREYBUS_PROTOCOLS_H */ + diff --git a/include/linux/greybus/hd.h b/include/linux/greybus/hd.h new file mode 100644 index 000000000000..d3faf0c1a569 --- /dev/null +++ b/include/linux/greybus/hd.h @@ -0,0 +1,85 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Greybus Host Device + * + * Copyright 2014-2015 Google Inc. + * Copyright 2014-2015 Linaro Ltd. + */ + +#ifndef __HD_H +#define __HD_H + +#include <linux/types.h> +#include <linux/device.h> + +struct gb_host_device; +struct gb_message; + +struct gb_hd_driver { + size_t hd_priv_size; + + int (*cport_allocate)(struct gb_host_device *hd, int cport_id, + unsigned long flags); + void (*cport_release)(struct gb_host_device *hd, u16 cport_id); + int (*cport_enable)(struct gb_host_device *hd, u16 cport_id, + unsigned long flags); + int (*cport_disable)(struct gb_host_device *hd, u16 cport_id); + int (*cport_connected)(struct gb_host_device *hd, u16 cport_id); + int (*cport_flush)(struct gb_host_device *hd, u16 cport_id); + int (*cport_shutdown)(struct gb_host_device *hd, u16 cport_id, + u8 phase, unsigned int timeout); + int (*cport_quiesce)(struct gb_host_device *hd, u16 cport_id, + size_t peer_space, unsigned int timeout); + int (*cport_clear)(struct gb_host_device *hd, u16 cport_id); + + int (*message_send)(struct gb_host_device *hd, u16 dest_cport_id, + struct gb_message *message, gfp_t gfp_mask); + void (*message_cancel)(struct gb_message *message); + int (*latency_tag_enable)(struct gb_host_device *hd, u16 cport_id); + int (*latency_tag_disable)(struct gb_host_device *hd, u16 cport_id); + int (*output)(struct gb_host_device *hd, void *req, u16 size, u8 cmd, + bool async); +}; + +struct gb_host_device { + struct device dev; + int bus_id; + const struct gb_hd_driver *driver; + + struct list_head modules; + struct list_head connections; + struct ida cport_id_map; + + /* Number of CPorts supported by the UniPro IP */ + size_t num_cports; + + /* Host device buffer constraints */ + size_t buffer_size_max; + + struct gb_svc *svc; + /* Private data for the host driver */ + unsigned long hd_priv[0] __aligned(sizeof(s64)); +}; +#define to_gb_host_device(d) container_of(d, struct gb_host_device, dev) + +int gb_hd_cport_reserve(struct gb_host_device *hd, u16 cport_id); +void gb_hd_cport_release_reserved(struct gb_host_device *hd, u16 cport_id); +int gb_hd_cport_allocate(struct gb_host_device *hd, int cport_id, + unsigned long flags); +void gb_hd_cport_release(struct gb_host_device *hd, u16 cport_id); + +struct gb_host_device *gb_hd_create(struct gb_hd_driver *driver, + struct device *parent, + size_t buffer_size_max, + size_t num_cports); +int gb_hd_add(struct gb_host_device *hd); +void gb_hd_del(struct gb_host_device *hd); +void gb_hd_shutdown(struct gb_host_device *hd); +void gb_hd_put(struct gb_host_device *hd); +int gb_hd_output(struct gb_host_device *hd, void *req, u16 size, u8 cmd, + bool in_irq); + +int gb_hd_init(void); +void gb_hd_exit(void); + +#endif /* __HD_H */ diff --git a/include/linux/greybus/interface.h b/include/linux/greybus/interface.h new file mode 100644 index 000000000000..ce4def881e6f --- /dev/null +++ b/include/linux/greybus/interface.h @@ -0,0 +1,85 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Greybus Interface Block code + * + * Copyright 2014 Google Inc. + * Copyright 2014 Linaro Ltd. + */ + +#ifndef __INTERFACE_H +#define __INTERFACE_H + +#include <linux/types.h> +#include <linux/device.h> + +enum gb_interface_type { + GB_INTERFACE_TYPE_INVALID = 0, + GB_INTERFACE_TYPE_UNKNOWN, + GB_INTERFACE_TYPE_DUMMY, + GB_INTERFACE_TYPE_UNIPRO, + GB_INTERFACE_TYPE_GREYBUS, +}; + +#define GB_INTERFACE_QUIRK_NO_CPORT_FEATURES BIT(0) +#define GB_INTERFACE_QUIRK_NO_INIT_STATUS BIT(1) +#define GB_INTERFACE_QUIRK_NO_GMP_IDS BIT(2) +#define GB_INTERFACE_QUIRK_FORCED_DISABLE BIT(3) +#define GB_INTERFACE_QUIRK_LEGACY_MODE_SWITCH BIT(4) +#define GB_INTERFACE_QUIRK_NO_BUNDLE_ACTIVATE BIT(5) +#define GB_INTERFACE_QUIRK_NO_PM BIT(6) + +struct gb_interface { + struct device dev; + struct gb_control *control; + + struct list_head bundles; + struct list_head module_node; + struct list_head manifest_descs; + u8 interface_id; /* Physical location within the Endo */ + u8 device_id; + u8 features; /* Feature flags set in the manifest */ + + enum gb_interface_type type; + + u32 ddbl1_manufacturer_id; + u32 ddbl1_product_id; + u32 vendor_id; + u32 product_id; + u64 serial_number; + + struct gb_host_device *hd; + struct gb_module *module; + + unsigned long quirks; + + struct mutex mutex; + + bool disconnected; + + bool ejected; + bool removed; + bool active; + bool enabled; + bool mode_switch; + bool dme_read; + + struct work_struct mode_switch_work; + struct completion mode_switch_completion; +}; +#define to_gb_interface(d) container_of(d, struct gb_interface, dev) + +struct gb_interface *gb_interface_create(struct gb_module *module, + u8 interface_id); +int gb_interface_activate(struct gb_interface *intf); +void gb_interface_deactivate(struct gb_interface *intf); +int gb_interface_enable(struct gb_interface *intf); +void gb_interface_disable(struct gb_interface *intf); +int gb_interface_add(struct gb_interface *intf); +void gb_interface_del(struct gb_interface *intf); +void gb_interface_put(struct gb_interface *intf); +void gb_interface_mailbox_event(struct gb_interface *intf, u16 result, + u32 mailbox); + +int gb_interface_request_mode_switch(struct gb_interface *intf); + +#endif /* __INTERFACE_H */ diff --git a/include/linux/greybus/manifest.h b/include/linux/greybus/manifest.h new file mode 100644 index 000000000000..830301b7a8bc --- /dev/null +++ b/include/linux/greybus/manifest.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Greybus manifest parsing + * + * Copyright 2014 Google Inc. + * Copyright 2014 Linaro Ltd. + */ + +#ifndef __MANIFEST_H +#define __MANIFEST_H + +#include <linux/types.h> + +struct gb_interface; +bool gb_manifest_parse(struct gb_interface *intf, void *data, size_t size); + +#endif /* __MANIFEST_H */ diff --git a/include/linux/greybus/module.h b/include/linux/greybus/module.h new file mode 100644 index 000000000000..47b839af145d --- /dev/null +++ b/include/linux/greybus/module.h @@ -0,0 +1,36 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Greybus Module code + * + * Copyright 2016 Google Inc. + * Copyright 2016 Linaro Ltd. + */ + +#ifndef __MODULE_H +#define __MODULE_H + +#include <linux/types.h> +#include <linux/device.h> + +struct gb_module { + struct device dev; + struct gb_host_device *hd; + + struct list_head hd_node; + + u8 module_id; + size_t num_interfaces; + + bool disconnected; + + struct gb_interface *interfaces[0]; +}; +#define to_gb_module(d) container_of(d, struct gb_module, dev) + +struct gb_module *gb_module_create(struct gb_host_device *hd, u8 module_id, + size_t num_interfaces); +int gb_module_add(struct gb_module *module); +void gb_module_del(struct gb_module *module); +void gb_module_put(struct gb_module *module); + +#endif /* __MODULE_H */ diff --git a/include/linux/greybus/operation.h b/include/linux/greybus/operation.h new file mode 100644 index 000000000000..cb8e4ef45222 --- /dev/null +++ b/include/linux/greybus/operation.h @@ -0,0 +1,229 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Greybus operations + * + * Copyright 2014 Google Inc. + * Copyright 2014 Linaro Ltd. + */ + +#ifndef __OPERATION_H +#define __OPERATION_H + +#include <linux/completion.h> +#include <linux/kref.h> +#include <linux/timer.h> +#include <linux/types.h> +#include <linux/workqueue.h> + +struct gb_host_device; +struct gb_operation; + +/* The default amount of time a request is given to complete */ +#define GB_OPERATION_TIMEOUT_DEFAULT 1000 /* milliseconds */ + +/* + * The top bit of the type in an operation message header indicates + * whether the message is a request (bit clear) or response (bit set) + */ +#define GB_MESSAGE_TYPE_RESPONSE ((u8)0x80) + +enum gb_operation_result { + GB_OP_SUCCESS = 0x00, + GB_OP_INTERRUPTED = 0x01, + GB_OP_TIMEOUT = 0x02, + GB_OP_NO_MEMORY = 0x03, + GB_OP_PROTOCOL_BAD = 0x04, + GB_OP_OVERFLOW = 0x05, + GB_OP_INVALID = 0x06, + GB_OP_RETRY = 0x07, + GB_OP_NONEXISTENT = 0x08, + GB_OP_UNKNOWN_ERROR = 0xfe, + GB_OP_MALFUNCTION = 0xff, +}; + +#define GB_OPERATION_MESSAGE_SIZE_MIN sizeof(struct gb_operation_msg_hdr) +#define GB_OPERATION_MESSAGE_SIZE_MAX U16_MAX + +/* + * Protocol code should only examine the payload and payload_size fields, and + * host-controller drivers may use the hcpriv field. All other fields are + * intended to be private to the operations core code. + */ +struct gb_message { + struct gb_operation *operation; + struct gb_operation_msg_hdr *header; + + void *payload; + size_t payload_size; + + void *buffer; + + void *hcpriv; +}; + +#define GB_OPERATION_FLAG_INCOMING BIT(0) +#define GB_OPERATION_FLAG_UNIDIRECTIONAL BIT(1) +#define GB_OPERATION_FLAG_SHORT_RESPONSE BIT(2) +#define GB_OPERATION_FLAG_CORE BIT(3) + +#define GB_OPERATION_FLAG_USER_MASK (GB_OPERATION_FLAG_SHORT_RESPONSE | \ + GB_OPERATION_FLAG_UNIDIRECTIONAL) + +/* + * A Greybus operation is a remote procedure call performed over a + * connection between two UniPro interfaces. + * + * Every operation consists of a request message sent to the other + * end of the connection coupled with a reply message returned to + * the sender. Every operation has a type, whose interpretation is + * dependent on the protocol associated with the connection. + * + * Only four things in an operation structure are intended to be + * directly usable by protocol handlers: the operation's connection + * pointer; the operation type; the request message payload (and + * size); and the response message payload (and size). Note that a + * message with a 0-byte payload has a null message payload pointer. + * + * In addition, every operation has a result, which is an errno + * value. Protocol handlers access the operation result using + * gb_operation_result(). + */ +typedef void (*gb_operation_callback)(struct gb_operation *); +struct gb_operation { + struct gb_connection *connection; + struct gb_message *request; + struct gb_message *response; + + unsigned long flags; + u8 type; + u16 id; + int errno; /* Operation result */ + + struct work_struct work; + gb_operation_callback callback; + struct completion completion; + struct timer_list timer; + + struct kref kref; + atomic_t waiters; + + int active; + struct list_head links; /* connection->operations */ + + void *private; +}; + +static inline bool +gb_operation_is_incoming(struct gb_operation *operation) +{ + return operation->flags & GB_OPERATION_FLAG_INCOMING; +} + +static inline bool +gb_operation_is_unidirectional(struct gb_operation *operation) +{ + return operation->flags & GB_OPERATION_FLAG_UNIDIRECTIONAL; +} + +static inline bool +gb_operation_short_response_allowed(struct gb_operation *operation) +{ + return operation->flags & GB_OPERATION_FLAG_SHORT_RESPONSE; +} + +static inline bool gb_operation_is_core(struct gb_operation *operation) +{ + return operation->flags & GB_OPERATION_FLAG_CORE; +} + +void gb_connection_recv(struct gb_connection *connection, + void *data, size_t size); + +int gb_operation_result(struct gb_operation *operation); + +size_t gb_operation_get_payload_size_max(struct gb_connection *connection); +struct gb_operation * +gb_operation_create_flags(struct gb_connection *connection, + u8 type, size_t request_size, + size_t response_size, unsigned long flags, + gfp_t gfp); + +static inline struct gb_operation * +gb_operation_create(struct gb_connection *connection, + u8 type, size_t request_size, + size_t response_size, gfp_t gfp) +{ + return gb_operation_create_flags(connection, type, request_size, + response_size, 0, gfp); +} + +struct gb_operation * +gb_operation_create_core(struct gb_connection *connection, + u8 type, size_t request_size, + size_t response_size, unsigned long flags, + gfp_t gfp); + +void gb_operation_get(struct gb_operation *operation); +void gb_operation_put(struct gb_operation *operation); + +bool gb_operation_response_alloc(struct gb_operation *operation, + size_t response_size, gfp_t gfp); + +int gb_operation_request_send(struct gb_operation *operation, + gb_operation_callback callback, + unsigned int timeout, + gfp_t gfp); +int gb_operation_request_send_sync_timeout(struct gb_operation *operation, + unsigned int timeout); +static inline int +gb_operation_request_send_sync(struct gb_operation *operation) +{ + return gb_operation_request_send_sync_timeout(operation, + GB_OPERATION_TIMEOUT_DEFAULT); +} + +void gb_operation_cancel(struct gb_operation *operation, int errno); +void gb_operation_cancel_incoming(struct gb_operation *operation, int errno); + +void greybus_message_sent(struct gb_host_device *hd, + struct gb_message *message, int status); + +int gb_operation_sync_timeout(struct gb_connection *connection, int type, + void *request, int request_size, + void *response, int response_size, + unsigned int timeout); +int gb_operation_unidirectional_timeout(struct gb_connection *connection, + int type, void *request, int request_size, + unsigned int timeout); + +static inline int gb_operation_sync(struct gb_connection *connection, int type, + void *request, int request_size, + void *response, int response_size) +{ + return gb_operation_sync_timeout(connection, type, + request, request_size, response, response_size, + GB_OPERATION_TIMEOUT_DEFAULT); +} + +static inline int gb_operation_unidirectional(struct gb_connection *connection, + int type, void *request, int request_size) +{ + return gb_operation_unidirectional_timeout(connection, type, + request, request_size, GB_OPERATION_TIMEOUT_DEFAULT); +} + +static inline void *gb_operation_get_data(struct gb_operation *operation) +{ + return operation->private; +} + +static inline void gb_operation_set_data(struct gb_operation *operation, + void *data) +{ + operation->private = data; +} + +int gb_operation_init(void); +void gb_operation_exit(void); + +#endif /* !__OPERATION_H */ diff --git a/include/linux/greybus/svc.h b/include/linux/greybus/svc.h new file mode 100644 index 000000000000..5afaf5f06856 --- /dev/null +++ b/include/linux/greybus/svc.h @@ -0,0 +1,106 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Greybus SVC code + * + * Copyright 2015 Google Inc. + * Copyright 2015 Linaro Ltd. + */ + +#ifndef __SVC_H +#define __SVC_H + +#include <linux/types.h> +#include <linux/device.h> + +struct gb_svc_l2_timer_cfg; + +#define GB_SVC_CPORT_FLAG_E2EFC BIT(0) +#define GB_SVC_CPORT_FLAG_CSD_N BIT(1) +#define GB_SVC_CPORT_FLAG_CSV_N BIT(2) + +enum gb_svc_state { + GB_SVC_STATE_RESET, + GB_SVC_STATE_PROTOCOL_VERSION, + GB_SVC_STATE_SVC_HELLO, +}; + +enum gb_svc_watchdog_bite { + GB_SVC_WATCHDOG_BITE_RESET_UNIPRO = 0, + GB_SVC_WATCHDOG_BITE_PANIC_KERNEL, +}; + +struct gb_svc_watchdog; + +struct svc_debugfs_pwrmon_rail { + u8 id; + struct gb_svc *svc; +}; + +struct gb_svc { + struct device dev; + + struct gb_host_device *hd; + struct gb_connection *connection; + enum gb_svc_state state; + struct ida device_id_map; + struct workqueue_struct *wq; + + u16 endo_id; + u8 ap_intf_id; + + u8 protocol_major; + u8 protocol_minor; + + struct gb_svc_watchdog *watchdog; + enum gb_svc_watchdog_bite action; + + struct dentry *debugfs_dentry; + struct svc_debugfs_pwrmon_rail *pwrmon_rails; +}; +#define to_gb_svc(d) container_of(d, struct gb_svc, dev) + +struct gb_svc *gb_svc_create(struct gb_host_device *hd); +int gb_svc_add(struct gb_svc *svc); +void gb_svc_del(struct gb_svc *svc); +void gb_svc_put(struct gb_svc *svc); + +int gb_svc_pwrmon_intf_sample_get(struct gb_svc *svc, u8 intf_id, + u8 measurement_type, u32 *value); +int gb_svc_intf_device_id(struct gb_svc *svc, u8 intf_id, u8 device_id); +int gb_svc_route_create(struct gb_svc *svc, u8 intf1_id, u8 dev1_id, + u8 intf2_id, u8 dev2_id); +void gb_svc_route_destroy(struct gb_svc *svc, u8 intf1_id, u8 intf2_id); +int gb_svc_connection_create(struct gb_svc *svc, u8 intf1_id, u16 cport1_id, + u8 intf2_id, u16 cport2_id, u8 cport_flags); +void gb_svc_connection_destroy(struct gb_svc *svc, u8 intf1_id, u16 cport1_id, + u8 intf2_id, u16 cport2_id); +int gb_svc_intf_eject(struct gb_svc *svc, u8 intf_id); +int gb_svc_intf_vsys_set(struct gb_svc *svc, u8 intf_id, bool enable); +int gb_svc_intf_refclk_set(struct gb_svc *svc, u8 intf_id, bool enable); +int gb_svc_intf_unipro_set(struct gb_svc *svc, u8 intf_id, bool enable); +int gb_svc_intf_activate(struct gb_svc *svc, u8 intf_id, u8 *intf_type); +int gb_svc_intf_resume(struct gb_svc *svc, u8 intf_id); + +int gb_svc_dme_peer_get(struct gb_svc *svc, u8 intf_id, u16 attr, u16 selector, + u32 *value); +int gb_svc_dme_peer_set(struct gb_svc *svc, u8 intf_id, u16 attr, u16 selector, + u32 value); +int gb_svc_intf_set_power_mode(struct gb_svc *svc, u8 intf_id, u8 hs_series, + u8 tx_mode, u8 tx_gear, u8 tx_nlanes, + u8 tx_amplitude, u8 tx_hs_equalizer, + u8 rx_mode, u8 rx_gear, u8 rx_nlanes, + u8 flags, u32 quirks, + struct gb_svc_l2_timer_cfg *local, + struct gb_svc_l2_timer_cfg *remote); +int gb_svc_intf_set_power_mode_hibernate(struct gb_svc *svc, u8 intf_id); +int gb_svc_ping(struct gb_svc *svc); +int gb_svc_watchdog_create(struct gb_svc *svc); +void gb_svc_watchdog_destroy(struct gb_svc *svc); +bool gb_svc_watchdog_enabled(struct gb_svc *svc); +int gb_svc_watchdog_enable(struct gb_svc *svc); +int gb_svc_watchdog_disable(struct gb_svc *svc); + +int gb_svc_protocol_init(void); +void gb_svc_protocol_exit(void); + +#endif /* __SVC_H */ diff --git a/include/linux/hdmi.h b/include/linux/hdmi.h index 927ad6451105..9918a6c910c5 100644 --- a/include/linux/hdmi.h +++ b/include/linux/hdmi.h @@ -47,6 +47,7 @@ enum hdmi_infoframe_type { HDMI_INFOFRAME_TYPE_AVI = 0x82, HDMI_INFOFRAME_TYPE_SPD = 0x83, HDMI_INFOFRAME_TYPE_AUDIO = 0x84, + HDMI_INFOFRAME_TYPE_DRM = 0x87, }; #define HDMI_IEEE_OUI 0x000c03 @@ -55,6 +56,7 @@ enum hdmi_infoframe_type { #define HDMI_AVI_INFOFRAME_SIZE 13 #define HDMI_SPD_INFOFRAME_SIZE 25 #define HDMI_AUDIO_INFOFRAME_SIZE 10 +#define HDMI_DRM_INFOFRAME_SIZE 26 #define HDMI_INFOFRAME_SIZE(type) \ (HDMI_INFOFRAME_HEADER_SIZE + HDMI_ ## type ## _INFOFRAME_SIZE) @@ -152,6 +154,17 @@ enum hdmi_content_type { HDMI_CONTENT_TYPE_GAME, }; +enum hdmi_metadata_type { + HDMI_STATIC_METADATA_TYPE1 = 1, +}; + +enum hdmi_eotf { + HDMI_EOTF_TRADITIONAL_GAMMA_SDR, + HDMI_EOTF_TRADITIONAL_GAMMA_HDR, + HDMI_EOTF_SMPTE_ST2084, + HDMI_EOTF_BT_2100_HLG, +}; + struct hdmi_avi_infoframe { enum hdmi_infoframe_type type; unsigned char version; @@ -175,12 +188,37 @@ struct hdmi_avi_infoframe { unsigned short right_bar; }; +/* DRM Infoframe as per CTA 861.G spec */ +struct hdmi_drm_infoframe { + enum hdmi_infoframe_type type; + unsigned char version; + unsigned char length; + enum hdmi_eotf eotf; + enum hdmi_metadata_type metadata_type; + struct { + u16 x, y; + } display_primaries[3]; + struct { + u16 x, y; + } white_point; + u16 max_display_mastering_luminance; + u16 min_display_mastering_luminance; + u16 max_cll; + u16 max_fall; +}; + int hdmi_avi_infoframe_init(struct hdmi_avi_infoframe *frame); ssize_t hdmi_avi_infoframe_pack(struct hdmi_avi_infoframe *frame, void *buffer, size_t size); ssize_t hdmi_avi_infoframe_pack_only(const struct hdmi_avi_infoframe *frame, void *buffer, size_t size); int hdmi_avi_infoframe_check(struct hdmi_avi_infoframe *frame); +int hdmi_drm_infoframe_init(struct hdmi_drm_infoframe *frame); +ssize_t hdmi_drm_infoframe_pack(struct hdmi_drm_infoframe *frame, void *buffer, + size_t size); +ssize_t hdmi_drm_infoframe_pack_only(const struct hdmi_drm_infoframe *frame, + void *buffer, size_t size); +int hdmi_drm_infoframe_check(struct hdmi_drm_infoframe *frame); enum hdmi_spd_sdi { HDMI_SPD_SDI_UNKNOWN, @@ -320,6 +358,33 @@ struct hdmi_vendor_infoframe { unsigned int s3d_ext_data; }; +/* HDR Metadata as per 861.G spec */ +struct hdr_static_metadata { + __u8 eotf; + __u8 metadata_type; + __u16 max_cll; + __u16 max_fall; + __u16 min_cll; +}; + +/** + * struct hdr_sink_metadata - HDR sink metadata + * + * Metadata Information read from Sink's EDID + */ +struct hdr_sink_metadata { + /** + * @metadata_type: Static_Metadata_Descriptor_ID. + */ + __u32 metadata_type; + /** + * @hdmi_type1: HDR Metadata Infoframe. + */ + union { + struct hdr_static_metadata hdmi_type1; + }; +}; + int hdmi_vendor_infoframe_init(struct hdmi_vendor_infoframe *frame); ssize_t hdmi_vendor_infoframe_pack(struct hdmi_vendor_infoframe *frame, void *buffer, size_t size); @@ -344,6 +409,7 @@ union hdmi_vendor_any_infoframe { * @spd: spd infoframe * @vendor: union of all vendor infoframes * @audio: audio infoframe + * @drm: Dynamic Range and Mastering infoframe * * This is used by the generic pack function. This works since all infoframes * have the same header which also indicates which type of infoframe should be @@ -355,6 +421,7 @@ union hdmi_infoframe { struct hdmi_spd_infoframe spd; union hdmi_vendor_any_infoframe vendor; struct hdmi_audio_infoframe audio; + struct hdmi_drm_infoframe drm; }; ssize_t hdmi_infoframe_pack(union hdmi_infoframe *frame, void *buffer, diff --git a/include/linux/hid.h b/include/linux/hid.h index d770ab1a0479..cd41f209043f 100644 --- a/include/linux/hid.h +++ b/include/linux/hid.h @@ -1154,29 +1154,32 @@ int hid_pidff_init(struct hid_device *hid); #define hid_pidff_init NULL #endif -#define dbg_hid(format, arg...) \ +#define dbg_hid(fmt, ...) \ do { \ if (hid_debug) \ - printk(KERN_DEBUG "%s: " format, __FILE__, ##arg); \ + printk(KERN_DEBUG "%s: " fmt, __FILE__, ##__VA_ARGS__); \ } while (0) -#define hid_printk(level, hid, fmt, arg...) \ - dev_printk(level, &(hid)->dev, fmt, ##arg) -#define hid_emerg(hid, fmt, arg...) \ - dev_emerg(&(hid)->dev, fmt, ##arg) -#define hid_crit(hid, fmt, arg...) \ - dev_crit(&(hid)->dev, fmt, ##arg) -#define hid_alert(hid, fmt, arg...) \ - dev_alert(&(hid)->dev, fmt, ##arg) -#define hid_err(hid, fmt, arg...) \ - dev_err(&(hid)->dev, fmt, ##arg) -#define hid_notice(hid, fmt, arg...) \ - dev_notice(&(hid)->dev, fmt, ##arg) -#define hid_warn(hid, fmt, arg...) \ - dev_warn(&(hid)->dev, fmt, ##arg) -#define hid_info(hid, fmt, arg...) \ - dev_info(&(hid)->dev, fmt, ##arg) -#define hid_dbg(hid, fmt, arg...) \ - dev_dbg(&(hid)->dev, fmt, ##arg) +#define hid_err(hid, fmt, ...) \ + dev_err(&(hid)->dev, fmt, ##__VA_ARGS__) +#define hid_notice(hid, fmt, ...) \ + dev_notice(&(hid)->dev, fmt, ##__VA_ARGS__) +#define hid_warn(hid, fmt, ...) \ + dev_warn(&(hid)->dev, fmt, ##__VA_ARGS__) +#define hid_info(hid, fmt, ...) \ + dev_info(&(hid)->dev, fmt, ##__VA_ARGS__) +#define hid_dbg(hid, fmt, ...) \ + dev_dbg(&(hid)->dev, fmt, ##__VA_ARGS__) + +#define hid_err_once(hid, fmt, ...) \ + dev_err_once(&(hid)->dev, fmt, ##__VA_ARGS__) +#define hid_notice_once(hid, fmt, ...) \ + dev_notice_once(&(hid)->dev, fmt, ##__VA_ARGS__) +#define hid_warn_once(hid, fmt, ...) \ + dev_warn_once(&(hid)->dev, fmt, ##__VA_ARGS__) +#define hid_info_once(hid, fmt, ...) \ + dev_info_once(&(hid)->dev, fmt, ##__VA_ARGS__) +#define hid_dbg_once(hid, fmt, ...) \ + dev_dbg_once(&(hid)->dev, fmt, ##__VA_ARGS__) #endif diff --git a/include/linux/hmm.h b/include/linux/hmm.h index 044a36d7c3f8..3fec513b9c00 100644 --- a/include/linux/hmm.h +++ b/include/linux/hmm.h @@ -21,8 +21,8 @@ * * HMM address space mirroring API: * - * Use HMM address space mirroring if you want to mirror range of the CPU page - * table of a process into a device page table. Here, "mirror" means "keep + * Use HMM address space mirroring if you want to mirror a range of the CPU + * page tables of a process into a device page table. Here, "mirror" means "keep * synchronized". Prerequisites: the device must provide the ability to write- * protect its page tables (at PAGE_SIZE granularity), and must be able to * recover from the resulting potential page faults. @@ -62,7 +62,7 @@ #include <linux/kconfig.h> #include <asm/pgtable.h> -#if IS_ENABLED(CONFIG_HMM) +#ifdef CONFIG_HMM_MIRROR #include <linux/device.h> #include <linux/migrate.h> @@ -82,19 +82,15 @@ * @mirrors_sem: read/write semaphore protecting the mirrors list * @wq: wait queue for user waiting on a range invalidation * @notifiers: count of active mmu notifiers - * @dead: is the mm dead ? */ struct hmm { - struct mm_struct *mm; - struct kref kref; - struct mutex lock; + struct mmu_notifier mmu_notifier; + spinlock_t ranges_lock; struct list_head ranges; struct list_head mirrors; - struct mmu_notifier mmu_notifier; struct rw_semaphore mirrors_sem; wait_queue_head_t wq; long notifiers; - bool dead; }; /* @@ -105,10 +101,11 @@ struct hmm { * HMM_PFN_WRITE: CPU page table has write permission set * HMM_PFN_DEVICE_PRIVATE: private device memory (ZONE_DEVICE) * - * The driver provide a flags array, if driver valid bit for an entry is bit - * 3 ie (entry & (1 << 3)) is true if entry is valid then driver must provide + * The driver provides a flags array for mapping page protections to device + * PTE bits. If the driver valid bit for an entry is bit 3, + * i.e., (entry & (1 << 3)), then the driver must provide * an array in hmm_range.flags with hmm_range.flags[HMM_PFN_VALID] == 1 << 3. - * Same logic apply to all flags. This is same idea as vm_page_prot in vma + * Same logic apply to all flags. This is the same idea as vm_page_prot in vma * except that this is per device driver rather than per architecture. */ enum hmm_pfn_flag_e { @@ -129,13 +126,13 @@ enum hmm_pfn_flag_e { * be mirrored by a device, because the entry will never have HMM_PFN_VALID * set and the pfn value is undefined. * - * Driver provide entry value for none entry, error entry and special entry, - * driver can alias (ie use same value for error and special for instance). It - * should not alias none and error or special. + * Driver provides values for none entry, error entry, and special entry. + * Driver can alias (i.e., use same value) error and special, but + * it should not alias none with error or special. * * HMM pfn value returned by hmm_vma_get_pfns() or hmm_vma_fault() will be: * hmm_range.values[HMM_PFN_ERROR] if CPU page table entry is poisonous, - * hmm_range.values[HMM_PFN_NONE] if there is no CPU page table + * hmm_range.values[HMM_PFN_NONE] if there is no CPU page table entry, * hmm_range.values[HMM_PFN_SPECIAL] if CPU page table entry is a special one */ enum hmm_pfn_value_e { @@ -163,7 +160,6 @@ enum hmm_pfn_value_e { */ struct hmm_range { struct hmm *hmm; - struct vm_area_struct *vma; struct list_head list; unsigned long start; unsigned long end; @@ -172,57 +168,27 @@ struct hmm_range { const uint64_t *values; uint64_t default_flags; uint64_t pfn_flags_mask; - uint8_t page_shift; uint8_t pfn_shift; bool valid; }; /* - * hmm_range_page_shift() - return the page shift for the range - * @range: range being queried - * Returns: page shift (page size = 1 << page shift) for the range - */ -static inline unsigned hmm_range_page_shift(const struct hmm_range *range) -{ - return range->page_shift; -} - -/* - * hmm_range_page_size() - return the page size for the range - * @range: range being queried - * Returns: page size for the range in bytes - */ -static inline unsigned long hmm_range_page_size(const struct hmm_range *range) -{ - return 1UL << hmm_range_page_shift(range); -} - -/* * hmm_range_wait_until_valid() - wait for range to be valid * @range: range affected by invalidation to wait on * @timeout: time out for wait in ms (ie abort wait after that period of time) - * Returns: true if the range is valid, false otherwise. + * Return: true if the range is valid, false otherwise. */ static inline bool hmm_range_wait_until_valid(struct hmm_range *range, unsigned long timeout) { - /* Check if mm is dead ? */ - if (range->hmm == NULL || range->hmm->dead || range->hmm->mm == NULL) { - range->valid = false; - return false; - } - if (range->valid) - return true; - wait_event_timeout(range->hmm->wq, range->valid || range->hmm->dead, - msecs_to_jiffies(timeout)); - /* Return current valid status just in case we get lucky */ - return range->valid; + return wait_event_timeout(range->hmm->wq, range->valid, + msecs_to_jiffies(timeout)) != 0; } /* * hmm_range_valid() - test if a range is valid or not * @range: range - * Returns: true if the range is valid, false otherwise. + * Return: true if the range is valid, false otherwise. */ static inline bool hmm_range_valid(struct hmm_range *range) { @@ -233,7 +199,7 @@ static inline bool hmm_range_valid(struct hmm_range *range) * hmm_device_entry_to_page() - return struct page pointed to by a device entry * @range: range use to decode device entry value * @entry: device entry value to get corresponding struct page from - * Returns: struct page pointer if entry is a valid, NULL otherwise + * Return: struct page pointer if entry is a valid, NULL otherwise * * If the device entry is valid (ie valid flag set) then return the struct page * matching the entry value. Otherwise return NULL. @@ -256,7 +222,7 @@ static inline struct page *hmm_device_entry_to_page(const struct hmm_range *rang * hmm_device_entry_to_pfn() - return pfn value store in a device entry * @range: range use to decode device entry value * @entry: device entry to extract pfn from - * Returns: pfn value if device entry is valid, -1UL otherwise + * Return: pfn value if device entry is valid, -1UL otherwise */ static inline unsigned long hmm_device_entry_to_pfn(const struct hmm_range *range, uint64_t pfn) @@ -276,7 +242,7 @@ hmm_device_entry_to_pfn(const struct hmm_range *range, uint64_t pfn) * hmm_device_entry_from_page() - create a valid device entry for a page * @range: range use to encode HMM pfn value * @page: page for which to create the device entry - * Returns: valid device entry for the page + * Return: valid device entry for the page */ static inline uint64_t hmm_device_entry_from_page(const struct hmm_range *range, struct page *page) @@ -289,7 +255,7 @@ static inline uint64_t hmm_device_entry_from_page(const struct hmm_range *range, * hmm_device_entry_from_pfn() - create a valid device entry value from pfn * @range: range use to encode HMM pfn value * @pfn: pfn value for which to create the device entry - * Returns: valid device entry for the pfn + * Return: valid device entry for the pfn */ static inline uint64_t hmm_device_entry_from_pfn(const struct hmm_range *range, unsigned long pfn) @@ -299,43 +265,6 @@ static inline uint64_t hmm_device_entry_from_pfn(const struct hmm_range *range, } /* - * Old API: - * hmm_pfn_to_page() - * hmm_pfn_to_pfn() - * hmm_pfn_from_page() - * hmm_pfn_from_pfn() - * - * This are the OLD API please use new API, it is here to avoid cross-tree - * merge painfullness ie we convert things to new API in stages. - */ -static inline struct page *hmm_pfn_to_page(const struct hmm_range *range, - uint64_t pfn) -{ - return hmm_device_entry_to_page(range, pfn); -} - -static inline unsigned long hmm_pfn_to_pfn(const struct hmm_range *range, - uint64_t pfn) -{ - return hmm_device_entry_to_pfn(range, pfn); -} - -static inline uint64_t hmm_pfn_from_page(const struct hmm_range *range, - struct page *page) -{ - return hmm_device_entry_from_page(range, page); -} - -static inline uint64_t hmm_pfn_from_pfn(const struct hmm_range *range, - unsigned long pfn) -{ - return hmm_device_entry_from_pfn(range, pfn); -} - - - -#if IS_ENABLED(CONFIG_HMM_MIRROR) -/* * Mirroring: how to synchronize device page table with CPU page table. * * A device driver that is participating in HMM mirroring must always @@ -386,29 +315,6 @@ static inline uint64_t hmm_pfn_from_pfn(const struct hmm_range *range, struct hmm_mirror; /* - * enum hmm_update_event - type of update - * @HMM_UPDATE_INVALIDATE: invalidate range (no indication as to why) - */ -enum hmm_update_event { - HMM_UPDATE_INVALIDATE, -}; - -/* - * struct hmm_update - HMM update informations for callback - * - * @start: virtual start address of the range to update - * @end: virtual end address of the range to update - * @event: event triggering the update (what is happening) - * @blockable: can the callback block/sleep ? - */ -struct hmm_update { - unsigned long start; - unsigned long end; - enum hmm_update_event event; - bool blockable; -}; - -/* * struct hmm_mirror_ops - HMM mirror device operations callback * * @update: callback to update range on a device @@ -418,18 +324,19 @@ struct hmm_mirror_ops { * * @mirror: pointer to struct hmm_mirror * - * This is called when the mm_struct is being released. - * The callback should make sure no references to the mirror occur - * after the callback returns. + * This is called when the mm_struct is being released. The callback + * must ensure that all access to any pages obtained from this mirror + * is halted before the callback returns. All future access should + * fault. */ void (*release)(struct hmm_mirror *mirror); /* sync_cpu_device_pagetables() - synchronize page tables * * @mirror: pointer to struct hmm_mirror - * @update: update informations (see struct hmm_update) - * Returns: -EAGAIN if update.blockable false and callback need to - * block, 0 otherwise. + * @update: update information (see struct mmu_notifier_range) + * Return: -EAGAIN if mmu_notifier_range_blockable(update) is false + * and callback needs to block, 0 otherwise. * * This callback ultimately originates from mmu_notifiers when the CPU * page table is updated. The device driver must update its page table @@ -440,8 +347,9 @@ struct hmm_mirror_ops { * page tables are completely updated (TLBs flushed, etc); this is a * synchronous call. */ - int (*sync_cpu_device_pagetables)(struct hmm_mirror *mirror, - const struct hmm_update *update); + int (*sync_cpu_device_pagetables)( + struct hmm_mirror *mirror, + const struct mmu_notifier_range *update); }; /* @@ -465,47 +373,26 @@ int hmm_mirror_register(struct hmm_mirror *mirror, struct mm_struct *mm); void hmm_mirror_unregister(struct hmm_mirror *mirror); /* - * hmm_mirror_mm_is_alive() - test if mm is still alive - * @mirror: the HMM mm mirror for which we want to lock the mmap_sem - * Returns: false if the mm is dead, true otherwise - * - * This is an optimization it will not accurately always return -EINVAL if the - * mm is dead ie there can be false negative (process is being kill but HMM is - * not yet inform of that). It is only intented to be use to optimize out case - * where driver is about to do something time consuming and it would be better - * to skip it if the mm is dead. + * Please see Documentation/vm/hmm.rst for how to use the range API. */ -static inline bool hmm_mirror_mm_is_alive(struct hmm_mirror *mirror) -{ - struct mm_struct *mm; +int hmm_range_register(struct hmm_range *range, struct hmm_mirror *mirror); +void hmm_range_unregister(struct hmm_range *range); - if (!mirror || !mirror->hmm) - return false; - mm = READ_ONCE(mirror->hmm->mm); - if (mirror->hmm->dead || !mm) - return false; +/* + * Retry fault if non-blocking, drop mmap_sem and return -EAGAIN in that case. + */ +#define HMM_FAULT_ALLOW_RETRY (1 << 0) - return true; -} +/* Don't fault in missing PTEs, just snapshot the current state. */ +#define HMM_FAULT_SNAPSHOT (1 << 1) +long hmm_range_fault(struct hmm_range *range, unsigned int flags); -/* - * Please see Documentation/vm/hmm.rst for how to use the range API. - */ -int hmm_range_register(struct hmm_range *range, - struct mm_struct *mm, - unsigned long start, - unsigned long end, - unsigned page_shift); -void hmm_range_unregister(struct hmm_range *range); -long hmm_range_snapshot(struct hmm_range *range); -long hmm_range_fault(struct hmm_range *range, bool block); long hmm_range_dma_map(struct hmm_range *range, struct device *device, dma_addr_t *daddrs, - bool block); + unsigned int flags); long hmm_range_dma_unmap(struct hmm_range *range, - struct vm_area_struct *vma, struct device *device, dma_addr_t *daddrs, bool dirty); @@ -519,262 +406,6 @@ long hmm_range_dma_unmap(struct hmm_range *range, */ #define HMM_RANGE_DEFAULT_TIMEOUT 1000 -/* This is a temporary helper to avoid merge conflict between trees. */ -static inline bool hmm_vma_range_done(struct hmm_range *range) -{ - bool ret = hmm_range_valid(range); - - hmm_range_unregister(range); - return ret; -} - -/* This is a temporary helper to avoid merge conflict between trees. */ -static inline int hmm_vma_fault(struct hmm_range *range, bool block) -{ - long ret; - - /* - * With the old API the driver must set each individual entries with - * the requested flags (valid, write, ...). So here we set the mask to - * keep intact the entries provided by the driver and zero out the - * default_flags. - */ - range->default_flags = 0; - range->pfn_flags_mask = -1UL; - - ret = hmm_range_register(range, range->vma->vm_mm, - range->start, range->end, - PAGE_SHIFT); - if (ret) - return (int)ret; - - if (!hmm_range_wait_until_valid(range, HMM_RANGE_DEFAULT_TIMEOUT)) { - /* - * The mmap_sem was taken by driver we release it here and - * returns -EAGAIN which correspond to mmap_sem have been - * drop in the old API. - */ - up_read(&range->vma->vm_mm->mmap_sem); - return -EAGAIN; - } - - ret = hmm_range_fault(range, block); - if (ret <= 0) { - if (ret == -EBUSY || !ret) { - /* Same as above drop mmap_sem to match old API. */ - up_read(&range->vma->vm_mm->mmap_sem); - ret = -EBUSY; - } else if (ret == -EAGAIN) - ret = -EBUSY; - hmm_range_unregister(range); - return ret; - } - return 0; -} - -/* Below are for HMM internal use only! Not to be used by device driver! */ -void hmm_mm_destroy(struct mm_struct *mm); - -static inline void hmm_mm_init(struct mm_struct *mm) -{ - mm->hmm = NULL; -} -#else /* IS_ENABLED(CONFIG_HMM_MIRROR) */ -static inline void hmm_mm_destroy(struct mm_struct *mm) {} -static inline void hmm_mm_init(struct mm_struct *mm) {} #endif /* IS_ENABLED(CONFIG_HMM_MIRROR) */ -#if IS_ENABLED(CONFIG_DEVICE_PRIVATE) || IS_ENABLED(CONFIG_DEVICE_PUBLIC) -struct hmm_devmem; - -struct page *hmm_vma_alloc_locked_page(struct vm_area_struct *vma, - unsigned long addr); - -/* - * struct hmm_devmem_ops - callback for ZONE_DEVICE memory events - * - * @free: call when refcount on page reach 1 and thus is no longer use - * @fault: call when there is a page fault to unaddressable memory - * - * Both callback happens from page_free() and page_fault() callback of struct - * dev_pagemap respectively. See include/linux/memremap.h for more details on - * those. - * - * The hmm_devmem_ops callback are just here to provide a coherent and - * uniq API to device driver and device driver should not register their - * own page_free() or page_fault() but rely on the hmm_devmem_ops call- - * back. - */ -struct hmm_devmem_ops { - /* - * free() - free a device page - * @devmem: device memory structure (see struct hmm_devmem) - * @page: pointer to struct page being freed - * - * Call back occurs whenever a device page refcount reach 1 which - * means that no one is holding any reference on the page anymore - * (ZONE_DEVICE page have an elevated refcount of 1 as default so - * that they are not release to the general page allocator). - * - * Note that callback has exclusive ownership of the page (as no - * one is holding any reference). - */ - void (*free)(struct hmm_devmem *devmem, struct page *page); - /* - * fault() - CPU page fault or get user page (GUP) - * @devmem: device memory structure (see struct hmm_devmem) - * @vma: virtual memory area containing the virtual address - * @addr: virtual address that faulted or for which there is a GUP - * @page: pointer to struct page backing virtual address (unreliable) - * @flags: FAULT_FLAG_* (see include/linux/mm.h) - * @pmdp: page middle directory - * Returns: VM_FAULT_MINOR/MAJOR on success or one of VM_FAULT_ERROR - * on error - * - * The callback occurs whenever there is a CPU page fault or GUP on a - * virtual address. This means that the device driver must migrate the - * page back to regular memory (CPU accessible). - * - * The device driver is free to migrate more than one page from the - * fault() callback as an optimization. However if device decide to - * migrate more than one page it must always priotirize the faulting - * address over the others. - * - * The struct page pointer is only given as an hint to allow quick - * lookup of internal device driver data. A concurrent migration - * might have already free that page and the virtual address might - * not longer be back by it. So it should not be modified by the - * callback. - * - * Note that mmap semaphore is held in read mode at least when this - * callback occurs, hence the vma is valid upon callback entry. - */ - vm_fault_t (*fault)(struct hmm_devmem *devmem, - struct vm_area_struct *vma, - unsigned long addr, - const struct page *page, - unsigned int flags, - pmd_t *pmdp); -}; - -/* - * struct hmm_devmem - track device memory - * - * @completion: completion object for device memory - * @pfn_first: first pfn for this resource (set by hmm_devmem_add()) - * @pfn_last: last pfn for this resource (set by hmm_devmem_add()) - * @resource: IO resource reserved for this chunk of memory - * @pagemap: device page map for that chunk - * @device: device to bind resource to - * @ops: memory operations callback - * @ref: per CPU refcount - * @page_fault: callback when CPU fault on an unaddressable device page - * - * This an helper structure for device drivers that do not wish to implement - * the gory details related to hotplugging new memoy and allocating struct - * pages. - * - * Device drivers can directly use ZONE_DEVICE memory on their own if they - * wish to do so. - * - * The page_fault() callback must migrate page back, from device memory to - * system memory, so that the CPU can access it. This might fail for various - * reasons (device issues, device have been unplugged, ...). When such error - * conditions happen, the page_fault() callback must return VM_FAULT_SIGBUS and - * set the CPU page table entry to "poisoned". - * - * Note that because memory cgroup charges are transferred to the device memory, - * this should never fail due to memory restrictions. However, allocation - * of a regular system page might still fail because we are out of memory. If - * that happens, the page_fault() callback must return VM_FAULT_OOM. - * - * The page_fault() callback can also try to migrate back multiple pages in one - * chunk, as an optimization. It must, however, prioritize the faulting address - * over all the others. - */ -typedef vm_fault_t (*dev_page_fault_t)(struct vm_area_struct *vma, - unsigned long addr, - const struct page *page, - unsigned int flags, - pmd_t *pmdp); - -struct hmm_devmem { - struct completion completion; - unsigned long pfn_first; - unsigned long pfn_last; - struct resource *resource; - struct device *device; - struct dev_pagemap pagemap; - const struct hmm_devmem_ops *ops; - struct percpu_ref ref; - dev_page_fault_t page_fault; -}; - -/* - * To add (hotplug) device memory, HMM assumes that there is no real resource - * that reserves a range in the physical address space (this is intended to be - * use by unaddressable device memory). It will reserve a physical range big - * enough and allocate struct page for it. - * - * The device driver can wrap the hmm_devmem struct inside a private device - * driver struct. - */ -struct hmm_devmem *hmm_devmem_add(const struct hmm_devmem_ops *ops, - struct device *device, - unsigned long size); -struct hmm_devmem *hmm_devmem_add_resource(const struct hmm_devmem_ops *ops, - struct device *device, - struct resource *res); - -/* - * hmm_devmem_page_set_drvdata - set per-page driver data field - * - * @page: pointer to struct page - * @data: driver data value to set - * - * Because page can not be on lru we have an unsigned long that driver can use - * to store a per page field. This just a simple helper to do that. - */ -static inline void hmm_devmem_page_set_drvdata(struct page *page, - unsigned long data) -{ - page->hmm_data = data; -} - -/* - * hmm_devmem_page_get_drvdata - get per page driver data field - * - * @page: pointer to struct page - * Return: driver data value - */ -static inline unsigned long hmm_devmem_page_get_drvdata(const struct page *page) -{ - return page->hmm_data; -} - - -/* - * struct hmm_device - fake device to hang device memory onto - * - * @device: device struct - * @minor: device minor number - */ -struct hmm_device { - struct device device; - unsigned int minor; -}; - -/* - * A device driver that wants to handle multiple devices memory through a - * single fake device can use hmm_device to do so. This is purely a helper and - * it is not strictly needed, in order to make use of any HMM functionality. - */ -struct hmm_device *hmm_device_new(void *drvdata); -void hmm_device_put(struct hmm_device *hmm_device); -#endif /* CONFIG_DEVICE_PRIVATE || CONFIG_DEVICE_PUBLIC */ -#else /* IS_ENABLED(CONFIG_HMM) */ -static inline void hmm_mm_destroy(struct mm_struct *mm) {} -static inline void hmm_mm_init(struct mm_struct *mm) {} -#endif /* IS_ENABLED(CONFIG_HMM) */ - #endif /* LINUX_HMM_H */ diff --git a/include/linux/host1x.h b/include/linux/host1x.h index cfff30b9a62e..e6eea45e1154 100644 --- a/include/linux/host1x.h +++ b/include/linux/host1x.h @@ -297,6 +297,8 @@ struct host1x_device { struct list_head clients; bool registered; + + struct device_dma_parameters dma_parms; }; static inline struct host1x_device *to_host1x_device(struct device *dev) diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h index 2e8957eac4d4..1b9a51a1bccb 100644 --- a/include/linux/hrtimer.h +++ b/include/linux/hrtimer.h @@ -12,8 +12,8 @@ #ifndef _LINUX_HRTIMER_H #define _LINUX_HRTIMER_H +#include <linux/hrtimer_defs.h> #include <linux/rbtree.h> -#include <linux/ktime.h> #include <linux/init.h> #include <linux/list.h> #include <linux/percpu.h> @@ -32,12 +32,15 @@ struct hrtimer_cpu_base; * when starting the timer) * HRTIMER_MODE_SOFT - Timer callback function will be executed in * soft irq context + * HRTIMER_MODE_HARD - Timer callback function will be executed in + * hard irq context even on PREEMPT_RT. */ enum hrtimer_mode { HRTIMER_MODE_ABS = 0x00, HRTIMER_MODE_REL = 0x01, HRTIMER_MODE_PINNED = 0x02, HRTIMER_MODE_SOFT = 0x04, + HRTIMER_MODE_HARD = 0x08, HRTIMER_MODE_ABS_PINNED = HRTIMER_MODE_ABS | HRTIMER_MODE_PINNED, HRTIMER_MODE_REL_PINNED = HRTIMER_MODE_REL | HRTIMER_MODE_PINNED, @@ -48,6 +51,11 @@ enum hrtimer_mode { HRTIMER_MODE_ABS_PINNED_SOFT = HRTIMER_MODE_ABS_PINNED | HRTIMER_MODE_SOFT, HRTIMER_MODE_REL_PINNED_SOFT = HRTIMER_MODE_REL_PINNED | HRTIMER_MODE_SOFT, + HRTIMER_MODE_ABS_HARD = HRTIMER_MODE_ABS | HRTIMER_MODE_HARD, + HRTIMER_MODE_REL_HARD = HRTIMER_MODE_REL | HRTIMER_MODE_HARD, + + HRTIMER_MODE_ABS_PINNED_HARD = HRTIMER_MODE_ABS_PINNED | HRTIMER_MODE_HARD, + HRTIMER_MODE_REL_PINNED_HARD = HRTIMER_MODE_REL_PINNED | HRTIMER_MODE_HARD, }; /* @@ -101,6 +109,8 @@ enum hrtimer_restart { * @state: state information (See bit values above) * @is_rel: Set if the timer was armed relative * @is_soft: Set if hrtimer will be expired in soft interrupt context. + * @is_hard: Set if hrtimer will be expired in hard interrupt context + * even on RT. * * The hrtimer structure must be initialized by hrtimer_init() */ @@ -112,6 +122,7 @@ struct hrtimer { u8 state; u8 is_rel; u8 is_soft; + u8 is_hard; }; /** @@ -183,6 +194,10 @@ enum hrtimer_base_type { * @nr_retries: Total number of hrtimer interrupt retries * @nr_hangs: Total number of hrtimer interrupt hangs * @max_hang_time: Maximum time spent in hrtimer_interrupt + * @softirq_expiry_lock: Lock which is taken while softirq based hrtimer are + * expired + * @timer_waiters: A hrtimer_cancel() invocation waits for the timer + * callback to finish. * @expires_next: absolute time of the next event, is required for remote * hrtimer enqueue; it is the total first expiry time (hard * and soft hrtimer are taken into account) @@ -210,6 +225,10 @@ struct hrtimer_cpu_base { unsigned short nr_hangs; unsigned int max_hang_time; #endif +#ifdef CONFIG_PREEMPT_RT + spinlock_t softirq_expiry_lock; + atomic_t timer_waiters; +#endif ktime_t expires_next; struct hrtimer *next_timer; ktime_t softirq_expires_next; @@ -298,26 +317,12 @@ struct clock_event_device; extern void hrtimer_interrupt(struct clock_event_device *dev); -/* - * The resolution of the clocks. The resolution value is returned in - * the clock_getres() system call to give application programmers an - * idea of the (in)accuracy of timers. Timer values are rounded up to - * this resolution values. - */ -# define HIGH_RES_NSEC 1 -# define KTIME_HIGH_RES (HIGH_RES_NSEC) -# define MONOTONIC_RES_NSEC HIGH_RES_NSEC -# define KTIME_MONOTONIC_RES KTIME_HIGH_RES - extern void clock_was_set_delayed(void); extern unsigned int hrtimer_resolution; #else -# define MONOTONIC_RES_NSEC LOW_RES_NSEC -# define KTIME_MONOTONIC_RES KTIME_LOW_RES - #define hrtimer_resolution (unsigned int)LOW_RES_NSEC static inline void clock_was_set_delayed(void) { } @@ -355,16 +360,29 @@ extern void hrtimers_resume(void); DECLARE_PER_CPU(struct tick_device, tick_cpu_device); +#ifdef CONFIG_PREEMPT_RT +void hrtimer_cancel_wait_running(const struct hrtimer *timer); +#else +static inline void hrtimer_cancel_wait_running(struct hrtimer *timer) +{ + cpu_relax(); +} +#endif /* Exported timer functions: */ /* Initialize timers: */ extern void hrtimer_init(struct hrtimer *timer, clockid_t which_clock, enum hrtimer_mode mode); +extern void hrtimer_init_sleeper(struct hrtimer_sleeper *sl, clockid_t clock_id, + enum hrtimer_mode mode); #ifdef CONFIG_DEBUG_OBJECTS_TIMERS extern void hrtimer_init_on_stack(struct hrtimer *timer, clockid_t which_clock, enum hrtimer_mode mode); +extern void hrtimer_init_sleeper_on_stack(struct hrtimer_sleeper *sl, + clockid_t clock_id, + enum hrtimer_mode mode); extern void destroy_hrtimer_on_stack(struct hrtimer *timer); #else @@ -374,6 +392,14 @@ static inline void hrtimer_init_on_stack(struct hrtimer *timer, { hrtimer_init(timer, which_clock, mode); } + +static inline void hrtimer_init_sleeper_on_stack(struct hrtimer_sleeper *sl, + clockid_t clock_id, + enum hrtimer_mode mode) +{ + hrtimer_init_sleeper(sl, clock_id, mode); +} + static inline void destroy_hrtimer_on_stack(struct hrtimer *timer) { } #endif @@ -409,6 +435,9 @@ static inline void hrtimer_start_expires(struct hrtimer *timer, hrtimer_start_range_ns(timer, soft, delta, mode); } +void hrtimer_sleeper_start_expires(struct hrtimer_sleeper *sl, + enum hrtimer_mode mode); + static inline void hrtimer_restart(struct hrtimer *timer) { hrtimer_start_expires(timer, HRTIMER_MODE_ABS); @@ -477,11 +506,8 @@ extern long hrtimer_nanosleep(const struct timespec64 *rqtp, const enum hrtimer_mode mode, const clockid_t clockid); -extern void hrtimer_init_sleeper(struct hrtimer_sleeper *sl, - struct task_struct *tsk); - extern int schedule_hrtimeout_range(ktime_t *expires, u64 delta, - const enum hrtimer_mode mode); + const enum hrtimer_mode mode); extern int schedule_hrtimeout_range_clock(ktime_t *expires, u64 delta, const enum hrtimer_mode mode, diff --git a/include/linux/hrtimer_defs.h b/include/linux/hrtimer_defs.h new file mode 100644 index 000000000000..2d3e3c5fb946 --- /dev/null +++ b/include/linux/hrtimer_defs.h @@ -0,0 +1,27 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_HRTIMER_DEFS_H +#define _LINUX_HRTIMER_DEFS_H + +#include <linux/ktime.h> + +#ifdef CONFIG_HIGH_RES_TIMERS + +/* + * The resolution of the clocks. The resolution value is returned in + * the clock_getres() system call to give application programmers an + * idea of the (in)accuracy of timers. Timer values are rounded up to + * this resolution values. + */ +# define HIGH_RES_NSEC 1 +# define KTIME_HIGH_RES (HIGH_RES_NSEC) +# define MONOTONIC_RES_NSEC HIGH_RES_NSEC +# define KTIME_MONOTONIC_RES KTIME_HIGH_RES + +#else + +# define MONOTONIC_RES_NSEC LOW_RES_NSEC +# define KTIME_MONOTONIC_RES KTIME_LOW_RES + +#endif + +#endif diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h index 7cd5c150c21d..93d5cf0bc716 100644 --- a/include/linux/huge_mm.h +++ b/include/linux/huge_mm.h @@ -108,7 +108,12 @@ static inline bool __transparent_hugepage_enabled(struct vm_area_struct *vma) if (transparent_hugepage_flags & (1 << TRANSPARENT_HUGEPAGE_FLAG)) return true; - + /* + * For dax vmas, try to always use hugepage mappings. If the kernel does + * not support hugepages, fsdax mappings will fallback to PAGE_SIZE + * mappings, and device-dax namespaces, that try to guarantee a given + * mapping size, will fail to enable + */ if (vma_is_dax(vma)) return true; @@ -121,6 +126,23 @@ static inline bool __transparent_hugepage_enabled(struct vm_area_struct *vma) bool transparent_hugepage_enabled(struct vm_area_struct *vma); +#define HPAGE_CACHE_INDEX_MASK (HPAGE_PMD_NR - 1) + +static inline bool transhuge_vma_suitable(struct vm_area_struct *vma, + unsigned long haddr) +{ + /* Don't have to check pgoff for anonymous vma */ + if (!vma_is_anonymous(vma)) { + if (((vma->vm_start >> PAGE_SHIFT) & HPAGE_CACHE_INDEX_MASK) != + (vma->vm_pgoff & HPAGE_CACHE_INDEX_MASK)) + return false; + } + + if (haddr < vma->vm_start || haddr + HPAGE_PMD_SIZE > vma->vm_end) + return false; + return true; +} + #define transparent_hugepage_use_zero_page() \ (transparent_hugepage_flags & \ (1<<TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG)) @@ -250,6 +272,15 @@ static inline bool thp_migration_supported(void) return IS_ENABLED(CONFIG_ARCH_ENABLE_THP_MIGRATION); } +static inline struct list_head *page_deferred_list(struct page *page) +{ + /* + * Global or memcg deferred list in the second tail pages is + * occupied by compound_head. + */ + return &page[2].deferred_list; +} + #else /* CONFIG_TRANSPARENT_HUGEPAGE */ #define HPAGE_PMD_SHIFT ({ BUILD_BUG(); 0; }) #define HPAGE_PMD_MASK ({ BUILD_BUG(); 0; }) @@ -271,6 +302,12 @@ static inline bool transparent_hugepage_enabled(struct vm_area_struct *vma) return false; } +static inline bool transhuge_vma_suitable(struct vm_area_struct *vma, + unsigned long haddr) +{ + return false; +} + static inline void prep_transhuge_page(struct page *page) {} #define transparent_hugepage_flags 0UL diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index edf476c8cfb9..53fc34f930d0 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h @@ -16,29 +16,11 @@ struct user_struct; struct mmu_gather; #ifndef is_hugepd -/* - * Some architectures requires a hugepage directory format that is - * required to support multiple hugepage sizes. For example - * a4fe3ce76 "powerpc/mm: Allow more flexible layouts for hugepage pagetables" - * introduced the same on powerpc. This allows for a more flexible hugepage - * pagetable layout. - */ typedef struct { unsigned long pd; } hugepd_t; #define is_hugepd(hugepd) (0) #define __hugepd(x) ((hugepd_t) { (x) }) -static inline int gup_huge_pd(hugepd_t hugepd, unsigned long addr, - unsigned pdshift, unsigned long end, - int write, struct page **pages, int *nr) -{ - return 0; -} -#else -extern int gup_huge_pd(hugepd_t hugepd, unsigned long addr, - unsigned pdshift, unsigned long end, - int write, struct page **pages, int *nr); #endif - #ifdef CONFIG_HUGETLB_PAGE #include <linux/mempolicy.h> @@ -472,7 +454,7 @@ static inline pte_t arch_make_huge_pte(pte_t entry, struct vm_area_struct *vma, static inline struct hstate *page_hstate(struct page *page) { VM_BUG_ON_PAGE(!PageHuge(page), page); - return size_to_hstate(PAGE_SIZE << compound_order(page)); + return size_to_hstate(page_size(page)); } static inline unsigned hstate_index_to_shift(unsigned index) @@ -608,22 +590,92 @@ static inline void huge_ptep_modify_prot_commit(struct vm_area_struct *vma, #else /* CONFIG_HUGETLB_PAGE */ struct hstate {}; -#define alloc_huge_page(v, a, r) NULL -#define alloc_huge_page_node(h, nid) NULL -#define alloc_huge_page_nodemask(h, preferred_nid, nmask) NULL -#define alloc_huge_page_vma(h, vma, address) NULL -#define alloc_bootmem_huge_page(h) NULL -#define hstate_file(f) NULL -#define hstate_sizelog(s) NULL -#define hstate_vma(v) NULL -#define hstate_inode(i) NULL -#define page_hstate(page) NULL -#define huge_page_size(h) PAGE_SIZE -#define huge_page_mask(h) PAGE_MASK -#define vma_kernel_pagesize(v) PAGE_SIZE -#define vma_mmu_pagesize(v) PAGE_SIZE -#define huge_page_order(h) 0 -#define huge_page_shift(h) PAGE_SHIFT + +static inline struct page *alloc_huge_page(struct vm_area_struct *vma, + unsigned long addr, + int avoid_reserve) +{ + return NULL; +} + +static inline struct page *alloc_huge_page_node(struct hstate *h, int nid) +{ + return NULL; +} + +static inline struct page * +alloc_huge_page_nodemask(struct hstate *h, int preferred_nid, nodemask_t *nmask) +{ + return NULL; +} + +static inline struct page *alloc_huge_page_vma(struct hstate *h, + struct vm_area_struct *vma, + unsigned long address) +{ + return NULL; +} + +static inline int __alloc_bootmem_huge_page(struct hstate *h) +{ + return 0; +} + +static inline struct hstate *hstate_file(struct file *f) +{ + return NULL; +} + +static inline struct hstate *hstate_sizelog(int page_size_log) +{ + return NULL; +} + +static inline struct hstate *hstate_vma(struct vm_area_struct *vma) +{ + return NULL; +} + +static inline struct hstate *hstate_inode(struct inode *i) +{ + return NULL; +} + +static inline struct hstate *page_hstate(struct page *page) +{ + return NULL; +} + +static inline unsigned long huge_page_size(struct hstate *h) +{ + return PAGE_SIZE; +} + +static inline unsigned long huge_page_mask(struct hstate *h) +{ + return PAGE_MASK; +} + +static inline unsigned long vma_kernel_pagesize(struct vm_area_struct *vma) +{ + return PAGE_SIZE; +} + +static inline unsigned long vma_mmu_pagesize(struct vm_area_struct *vma) +{ + return PAGE_SIZE; +} + +static inline unsigned int huge_page_order(struct hstate *h) +{ + return 0; +} + +static inline unsigned int huge_page_shift(struct hstate *h) +{ + return PAGE_SHIFT; +} + static inline bool hstate_is_gigantic(struct hstate *h) { return false; diff --git a/include/linux/hw_random.h b/include/linux/hw_random.h index c0b93e0ff0c0..8e6dd908da21 100644 --- a/include/linux/hw_random.h +++ b/include/linux/hw_random.h @@ -1,7 +1,7 @@ /* Hardware Random Number Generator - Please read Documentation/hw_random.txt for details on use. + Please read Documentation/admin-guide/hw_random.rst for details on use. ---------------------------------------------------------- This software may be used and distributed according to the terms diff --git a/include/linux/hwmon.h b/include/linux/hwmon.h index 04c36b7a61dd..72579168189d 100644 --- a/include/linux/hwmon.h +++ b/include/linux/hwmon.h @@ -235,7 +235,7 @@ enum hwmon_power_attributes { #define HWMON_P_LABEL BIT(hwmon_power_label) #define HWMON_P_ALARM BIT(hwmon_power_alarm) #define HWMON_P_CAP_ALARM BIT(hwmon_power_cap_alarm) -#define HWMON_P_MIN_ALARM BIT(hwmon_power_max_alarm) +#define HWMON_P_MIN_ALARM BIT(hwmon_power_min_alarm) #define HWMON_P_MAX_ALARM BIT(hwmon_power_max_alarm) #define HWMON_P_LCRIT_ALARM BIT(hwmon_power_lcrit_alarm) #define HWMON_P_CRIT_ALARM BIT(hwmon_power_crit_alarm) diff --git a/include/linux/hwspinlock.h b/include/linux/hwspinlock.h index 0afe693be5f4..bfe7c1f1ac6d 100644 --- a/include/linux/hwspinlock.h +++ b/include/linux/hwspinlock.h @@ -14,9 +14,10 @@ #include <linux/sched.h> /* hwspinlock mode argument */ -#define HWLOCK_IRQSTATE 0x01 /* Disable interrupts, save state */ -#define HWLOCK_IRQ 0x02 /* Disable interrupts, don't save state */ -#define HWLOCK_RAW 0x03 +#define HWLOCK_IRQSTATE 0x01 /* Disable interrupts, save state */ +#define HWLOCK_IRQ 0x02 /* Disable interrupts, don't save state */ +#define HWLOCK_RAW 0x03 +#define HWLOCK_IN_ATOMIC 0x04 /* Called while in atomic context */ struct device; struct device_node; @@ -223,6 +224,23 @@ static inline int hwspin_trylock_raw(struct hwspinlock *hwlock) } /** + * hwspin_trylock_in_atomic() - attempt to lock a specific hwspinlock + * @hwlock: an hwspinlock which we want to trylock + * + * This function attempts to lock an hwspinlock, and will immediately fail + * if the hwspinlock is already taken. + * + * This function shall be called only from an atomic context. + * + * Returns 0 if we successfully locked the hwspinlock, -EBUSY if + * the hwspinlock was already taken, and -EINVAL if @hwlock is invalid. + */ +static inline int hwspin_trylock_in_atomic(struct hwspinlock *hwlock) +{ + return __hwspin_trylock(hwlock, HWLOCK_IN_ATOMIC, NULL); +} + +/** * hwspin_trylock() - attempt to lock a specific hwspinlock * @hwlock: an hwspinlock which we want to trylock * @@ -313,6 +331,28 @@ int hwspin_lock_timeout_raw(struct hwspinlock *hwlock, unsigned int to) } /** + * hwspin_lock_timeout_in_atomic() - lock an hwspinlock with timeout limit + * @hwlock: the hwspinlock to be locked + * @to: timeout value in msecs + * + * This function locks the underlying @hwlock. If the @hwlock + * is already taken, the function will busy loop waiting for it to + * be released, but give up when @timeout msecs have elapsed. + * + * This function shall be called only from an atomic context and the timeout + * value shall not exceed a few msecs. + * + * Returns 0 when the @hwlock was successfully taken, and an appropriate + * error code otherwise (most notably an -ETIMEDOUT if the @hwlock is still + * busy after @timeout msecs). The function will never sleep. + */ +static inline +int hwspin_lock_timeout_in_atomic(struct hwspinlock *hwlock, unsigned int to) +{ + return __hwspin_lock_timeout(hwlock, to, HWLOCK_IN_ATOMIC, NULL); +} + +/** * hwspin_lock_timeout() - lock an hwspinlock with timeout limit * @hwlock: the hwspinlock to be locked * @to: timeout value in msecs @@ -387,6 +427,21 @@ static inline void hwspin_unlock_raw(struct hwspinlock *hwlock) } /** + * hwspin_unlock_in_atomic() - unlock hwspinlock + * @hwlock: a previously-acquired hwspinlock which we want to unlock + * + * This function will unlock a specific hwspinlock. + * + * @hwlock must be already locked (e.g. by hwspin_trylock()) before calling + * this function: it is a bug to call unlock on a @hwlock that is already + * unlocked. + */ +static inline void hwspin_unlock_in_atomic(struct hwspinlock *hwlock) +{ + __hwspin_unlock(hwlock, HWLOCK_IN_ATOMIC, NULL); +} + +/** * hwspin_unlock() - unlock hwspinlock * @hwlock: a previously-acquired hwspinlock which we want to unlock * diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h index 6256cc34c4a6..b4a017093b69 100644 --- a/include/linux/hyperv.h +++ b/include/linux/hyperv.h @@ -245,7 +245,10 @@ struct vmbus_channel_offer { } pipe; } u; /* - * The sub_channel_index is defined in win8. + * The sub_channel_index is defined in Win8: a value of zero means a + * primary channel and a value of non-zero means a sub-channel. + * + * Before Win8, the field is reserved, meaning it's always zero. */ u16 sub_channel_index; u16 reserved3; @@ -423,6 +426,9 @@ enum vmbus_channel_message_type { CHANNELMSG_COUNT }; +/* Hyper-V supports about 2048 channels, and the RELIDs start with 1. */ +#define INVALID_RELID U32_MAX + struct vmbus_channel_message_header { enum vmbus_channel_message_type msgtype; u32 padding; @@ -934,6 +940,11 @@ static inline bool is_hvsock_channel(const struct vmbus_channel *c) VMBUS_CHANNEL_TLNPI_PROVIDER_OFFER); } +static inline bool is_sub_channel(const struct vmbus_channel *c) +{ + return c->offermsg.offer.sub_channel_index != 0; +} + static inline void set_channel_affinity_state(struct vmbus_channel *c, enum hv_numa_policy policy) { @@ -1149,6 +1160,9 @@ struct hv_driver { int (*remove)(struct hv_device *); void (*shutdown)(struct hv_device *); + int (*suspend)(struct hv_device *); + int (*resume)(struct hv_device *); + }; /* Base device object */ @@ -1578,4 +1592,33 @@ hv_pkt_iter_next(struct vmbus_channel *channel, for (pkt = hv_pkt_iter_first(channel); pkt; \ pkt = hv_pkt_iter_next(channel, pkt)) +/* + * Interface for passing data between SR-IOV PF and VF drivers. The VF driver + * sends requests to read and write blocks. Each block must be 128 bytes or + * smaller. Optionally, the VF driver can register a callback function which + * will be invoked when the host says that one or more of the first 64 block + * IDs is "invalid" which means that the VF driver should reread them. + */ +#define HV_CONFIG_BLOCK_SIZE_MAX 128 + +int hyperv_read_cfg_blk(struct pci_dev *dev, void *buf, unsigned int buf_len, + unsigned int block_id, unsigned int *bytes_returned); +int hyperv_write_cfg_blk(struct pci_dev *dev, void *buf, unsigned int len, + unsigned int block_id); +int hyperv_reg_block_invalidate(struct pci_dev *dev, void *context, + void (*block_invalidate)(void *context, + u64 block_mask)); + +struct hyperv_pci_block_ops { + int (*read_block)(struct pci_dev *dev, void *buf, unsigned int buf_len, + unsigned int block_id, unsigned int *bytes_returned); + int (*write_block)(struct pci_dev *dev, void *buf, unsigned int len, + unsigned int block_id); + int (*reg_blk_invalidate)(struct pci_dev *dev, void *context, + void (*block_invalidate)(void *context, + u64 block_mask)); +}; + +extern struct hyperv_pci_block_ops hvpci_block_ops; + #endif /* _HYPERV_H */ diff --git a/include/linux/i2c.h b/include/linux/i2c.h index 1308126fc384..1361637c369d 100644 --- a/include/linux/i2c.h +++ b/include/linux/i2c.h @@ -1,19 +1,16 @@ /* SPDX-License-Identifier: GPL-2.0-or-later */ -/* ------------------------------------------------------------------------- */ -/* */ -/* i2c.h - definitions for the i2c-bus interface */ -/* */ -/* ------------------------------------------------------------------------- */ -/* Copyright (C) 1995-2000 Simon G. Vogl - +/* + * i2c.h - definitions for the Linux i2c bus interface + * Copyright (C) 1995-2000 Simon G. Vogl + * Copyright (C) 2013-2019 Wolfram Sang <wsa@the-dreams.de> + * + * With some changes from Kyösti Mälkki <kmalkki@cc.hut.fi> and + * Frodo Looijaard <frodol@dds.nl> */ -/* ------------------------------------------------------------------------- */ - -/* With some changes from Kyösti Mälkki <kmalkki@cc.hut.fi> and - Frodo Looijaard <frodol@dds.nl> */ #ifndef _LINUX_I2C_H #define _LINUX_I2C_H +#include <linux/acpi.h> /* for acpi_handle */ #include <linux/mod_devicetable.h> #include <linux/device.h> /* for struct device */ #include <linux/sched.h> /* for completion */ @@ -39,7 +36,8 @@ struct i2c_device_identity; union i2c_smbus_data; struct i2c_board_info; enum i2c_slave_event; -typedef int (*i2c_slave_cb_t)(struct i2c_client *, enum i2c_slave_event, u8 *); +typedef int (*i2c_slave_cb_t)(struct i2c_client *client, + enum i2c_slave_event event, u8 *val); struct module; struct property_entry; @@ -256,16 +254,16 @@ struct i2c_driver { unsigned int class; /* Standard driver model interfaces */ - int (*probe)(struct i2c_client *, const struct i2c_device_id *); - int (*remove)(struct i2c_client *); + int (*probe)(struct i2c_client *client, const struct i2c_device_id *id); + int (*remove)(struct i2c_client *client); /* New driver model interface to aid the seamless removal of the * current probe()'s, more commonly unused than used second parameter. */ - int (*probe_new)(struct i2c_client *); + int (*probe_new)(struct i2c_client *client); /* driver model interfaces that don't relate to enumeration */ - void (*shutdown)(struct i2c_client *); + void (*shutdown)(struct i2c_client *client); /* Alert callback, for example for the SMBus alert protocol. * The format and meaning of the data value depends on the protocol. @@ -274,7 +272,7 @@ struct i2c_driver { * For the SMBus Host Notify protocol, the data corresponds to the * 16-bit payload data reported by the slave device acting as master. */ - void (*alert)(struct i2c_client *, enum i2c_alert_protocol protocol, + void (*alert)(struct i2c_client *client, enum i2c_alert_protocol protocol, unsigned int data); /* a ioctl like command that can be used to perform specific functions @@ -286,7 +284,7 @@ struct i2c_driver { const struct i2c_device_id *id_table; /* Device detection callback for automatic device creation */ - int (*detect)(struct i2c_client *, struct i2c_board_info *); + int (*detect)(struct i2c_client *client, struct i2c_board_info *info); const unsigned short *address_list; struct list_head clients; @@ -296,8 +294,7 @@ struct i2c_driver { /** * struct i2c_client - represent an I2C slave device - * @flags: I2C_CLIENT_TEN indicates the device uses a ten bit chip address; - * I2C_CLIENT_PEC indicates it uses SMBus Packet Error Checking + * @flags: see I2C_CLIENT_* for possible flags * @addr: Address used on the I2C bus connected to the parent adapter. * @name: Indicates the type of the device, usually a chip name that's * generic enough to hide second-sourcing and compatible revisions. @@ -315,6 +312,15 @@ struct i2c_driver { */ struct i2c_client { unsigned short flags; /* div., see below */ +#define I2C_CLIENT_PEC 0x04 /* Use Packet Error Checking */ +#define I2C_CLIENT_TEN 0x10 /* we have a ten bit chip address */ + /* Must equal I2C_M_TEN below */ +#define I2C_CLIENT_SLAVE 0x20 /* we are the slave */ +#define I2C_CLIENT_HOST_NOTIFY 0x40 /* We want to use I2C host notify */ +#define I2C_CLIENT_WAKE 0x80 /* for board_info; true iff can wake */ +#define I2C_CLIENT_SCCB 0x9000 /* Use Omnivision SCCB protocol */ + /* Must match I2C_M_STOP|IGNORE_NAK */ + unsigned short addr; /* chip address - NOTE: 7bit */ /* addresses are stored in the */ /* _LOWER_ 7 bits */ @@ -436,6 +442,9 @@ struct i2c_board_info { extern struct i2c_client * i2c_new_device(struct i2c_adapter *adap, struct i2c_board_info const *info); +extern struct i2c_client * +i2c_new_client_device(struct i2c_adapter *adap, struct i2c_board_info const *info); + /* If you don't know the exact address of an I2C device, use this variant * instead, which can probe for device presence in a list of possible * addresses. The "probe" callback function is optional. If it is provided, @@ -446,10 +455,10 @@ extern struct i2c_client * i2c_new_probed_device(struct i2c_adapter *adap, struct i2c_board_info *info, unsigned short const *addr_list, - int (*probe)(struct i2c_adapter *, unsigned short addr)); + int (*probe)(struct i2c_adapter *adap, unsigned short addr)); /* Common custom probe functions */ -extern int i2c_probe_func_quick_read(struct i2c_adapter *, unsigned short addr); +extern int i2c_probe_func_quick_read(struct i2c_adapter *adap, unsigned short addr); /* For devices that use several addresses, use i2c_new_dummy() to make * client handles for the extra addresses. @@ -458,14 +467,17 @@ extern struct i2c_client * i2c_new_dummy(struct i2c_adapter *adap, u16 address); extern struct i2c_client * +i2c_new_dummy_device(struct i2c_adapter *adapter, u16 address); + +extern struct i2c_client * devm_i2c_new_dummy_device(struct device *dev, struct i2c_adapter *adap, u16 address); extern struct i2c_client * -i2c_new_secondary_device(struct i2c_client *client, +i2c_new_ancillary_device(struct i2c_client *client, const char *name, u16 default_addr); -extern void i2c_unregister_device(struct i2c_client *); +extern void i2c_unregister_device(struct i2c_client *client); #endif /* I2C */ /* Mainboard arch_initcall() code should register all its I2C devices. @@ -509,7 +521,7 @@ i2c_register_board_info(int busnum, struct i2c_board_info const *info, * * The return codes from the @master_xfer{_atomic} fields should indicate the * type of error code that occurred during the transfer, as documented in the - * Kernel Documentation file Documentation/i2c/fault-codes. + * Kernel Documentation file Documentation/i2c/fault-codes.rst. */ struct i2c_algorithm { /* @@ -550,9 +562,9 @@ struct i2c_algorithm { * The main operations are wrapped by i2c_lock_bus and i2c_unlock_bus. */ struct i2c_lock_operations { - void (*lock_bus)(struct i2c_adapter *, unsigned int flags); - int (*trylock_bus)(struct i2c_adapter *, unsigned int flags); - void (*unlock_bus)(struct i2c_adapter *, unsigned int flags); + void (*lock_bus)(struct i2c_adapter *adapter, unsigned int flags); + int (*trylock_bus)(struct i2c_adapter *adapter, unsigned int flags); + void (*unlock_bus)(struct i2c_adapter *adapter, unsigned int flags); }; /** @@ -702,14 +714,14 @@ struct i2c_adapter { }; #define to_i2c_adapter(d) container_of(d, struct i2c_adapter, dev) -static inline void *i2c_get_adapdata(const struct i2c_adapter *dev) +static inline void *i2c_get_adapdata(const struct i2c_adapter *adap) { - return dev_get_drvdata(&dev->dev); + return dev_get_drvdata(&adap->dev); } -static inline void i2c_set_adapdata(struct i2c_adapter *dev, void *data) +static inline void i2c_set_adapdata(struct i2c_adapter *adap, void *data) { - dev_set_drvdata(&dev->dev, data); + dev_set_drvdata(&adap->dev, data); } static inline struct i2c_adapter * @@ -725,7 +737,7 @@ i2c_parent_is_i2c_adapter(const struct i2c_adapter *adapter) return NULL; } -int i2c_for_each_dev(void *data, int (*fn)(struct device *, void *)); +int i2c_for_each_dev(void *data, int (*fn)(struct device *dev, void *data)); /* Adapter locking functions, exported for shared pin cases */ #define I2C_LOCK_ROOT_ADAPTER BIT(0) @@ -801,16 +813,6 @@ static inline void i2c_mark_adapter_resumed(struct i2c_adapter *adap) i2c_unlock_bus(adap, I2C_LOCK_ROOT_ADAPTER); } -/*flags for the client struct: */ -#define I2C_CLIENT_PEC 0x04 /* Use Packet Error Checking */ -#define I2C_CLIENT_TEN 0x10 /* we have a ten bit chip address */ - /* Must equal I2C_M_TEN below */ -#define I2C_CLIENT_SLAVE 0x20 /* we are the slave */ -#define I2C_CLIENT_HOST_NOTIFY 0x40 /* We want to use I2C host notify */ -#define I2C_CLIENT_WAKE 0x80 /* for board_info; true iff can wake */ -#define I2C_CLIENT_SCCB 0x9000 /* Use Omnivision SCCB protocol */ - /* Must match I2C_M_STOP|IGNORE_NAK */ - /* i2c adapter classes (bitmask) */ #define I2C_CLASS_HWMON (1<<0) /* lm_sensors, ... */ #define I2C_CLASS_DDC (1<<3) /* DDC bus on graphics adapters */ @@ -831,12 +833,12 @@ static inline void i2c_mark_adapter_resumed(struct i2c_adapter *adap) /* administration... */ #if IS_ENABLED(CONFIG_I2C) -extern int i2c_add_adapter(struct i2c_adapter *); -extern void i2c_del_adapter(struct i2c_adapter *); -extern int i2c_add_numbered_adapter(struct i2c_adapter *); +extern int i2c_add_adapter(struct i2c_adapter *adap); +extern void i2c_del_adapter(struct i2c_adapter *adap); +extern int i2c_add_numbered_adapter(struct i2c_adapter *adap); -extern int i2c_register_driver(struct module *, struct i2c_driver *); -extern void i2c_del_driver(struct i2c_driver *); +extern int i2c_register_driver(struct module *owner, struct i2c_driver *driver); +extern void i2c_del_driver(struct i2c_driver *driver); /* use a define to avoid include chaining to get THIS_MODULE */ #define i2c_add_driver(driver) \ @@ -981,6 +983,7 @@ bool i2c_acpi_get_i2c_resource(struct acpi_resource *ares, u32 i2c_acpi_find_bus_speed(struct device *dev); struct i2c_client *i2c_acpi_new_device(struct device *dev, int index, struct i2c_board_info *info); +struct i2c_adapter *i2c_acpi_find_adapter_by_handle(acpi_handle handle); #else static inline bool i2c_acpi_get_i2c_resource(struct acpi_resource *ares, struct acpi_resource_i2c_serialbus **i2c) @@ -996,6 +999,10 @@ static inline struct i2c_client *i2c_acpi_new_device(struct device *dev, { return NULL; } +static inline struct i2c_adapter *i2c_acpi_find_adapter_by_handle(acpi_handle handle) +{ + return NULL; +} #endif /* CONFIG_ACPI */ #endif /* _LINUX_I2C_H */ diff --git a/include/linux/i3c/device.h b/include/linux/i3c/device.h index 5ecb055fd375..de102e4418ab 100644 --- a/include/linux/i3c/device.h +++ b/include/linux/i3c/device.h @@ -188,6 +188,10 @@ static inline struct i3c_driver *drv_to_i3cdrv(struct device_driver *drv) struct device *i3cdev_to_dev(struct i3c_device *i3cdev); struct i3c_device *dev_to_i3cdev(struct device *dev); +const struct i3c_device_id * +i3c_device_match_id(struct i3c_device *i3cdev, + const struct i3c_device_id *id_table); + static inline void i3cdev_set_drvdata(struct i3c_device *i3cdev, void *data) { diff --git a/include/linux/i3c/master.h b/include/linux/i3c/master.h index f13fd8b1dd79..9cb39d901cd5 100644 --- a/include/linux/i3c/master.h +++ b/include/linux/i3c/master.h @@ -48,7 +48,7 @@ struct i3c_i2c_dev_desc { #define I3C_LVR_I2C_INDEX(x) ((x) << 5) #define I3C_LVR_I2C_FM_MODE BIT(4) -#define I2C_MAX_ADDR GENMASK(9, 0) +#define I2C_MAX_ADDR GENMASK(6, 0) /** * struct i2c_dev_boardinfo - I2C device board information @@ -71,6 +71,9 @@ struct i2c_dev_boardinfo { * @common: common part of the I2C device descriptor * @boardinfo: pointer to the boardinfo attached to this I2C device * @dev: I2C device object registered to the I2C framework + * @addr: I2C device address + * @lvr: LVR (Legacy Virtual Register) needed by the I3C core to know about + * the I2C device limitations * * Each I2C device connected on the bus will have an i2c_dev_desc. * This object is created by the core and later attached to the controller @@ -84,6 +87,8 @@ struct i2c_dev_desc { struct i3c_i2c_dev_desc common; const struct i2c_dev_boardinfo *boardinfo; struct i2c_client *dev; + u16 addr; + u8 lvr; }; /** @@ -250,12 +255,17 @@ struct i3c_device { * the bus. The only impact in this mode is that the * high SCL pulse has to stay below 50ns to trick I2C * devices when transmitting I3C frames + * @I3C_BUS_MODE_MIXED_LIMITED: I2C devices without 50ns spike filter are + * present on the bus. However they allow + * compliance up to the maximum SDR SCL clock + * frequency. * @I3C_BUS_MODE_MIXED_SLOW: I2C devices without 50ns spike filter are present * on the bus */ enum i3c_bus_mode { I3C_BUS_MODE_PURE, I3C_BUS_MODE_MIXED_FAST, + I3C_BUS_MODE_MIXED_LIMITED, I3C_BUS_MODE_MIXED_SLOW, }; @@ -390,8 +400,6 @@ struct i3c_bus { * and i2c_put_dma_safe_msg_buf() helpers provided by the I2C * framework. * This method is mandatory. - * @i2c_funcs: expose the supported I2C functionalities. - * This method is mandatory. * @request_ibi: attach an IBI handler to an I3C device. This implies defining * an IBI handler and the constraints of the IBI (maximum payload * length and number of pre-allocated slots). @@ -437,7 +445,6 @@ struct i3c_master_controller_ops { void (*detach_i2c_dev)(struct i2c_dev_desc *dev); int (*i2c_xfers)(struct i2c_dev_desc *dev, const struct i2c_msg *xfers, int nxfers); - u32 (*i2c_funcs)(struct i3c_master_controller *master); int (*request_ibi)(struct i3c_dev_desc *dev, const struct i3c_ibi_setup *req); void (*free_ibi)(struct i3c_dev_desc *dev); diff --git a/include/linux/ide.h b/include/linux/ide.h index 971cf76a78a0..46b771d6999e 100644 --- a/include/linux/ide.h +++ b/include/linux/ide.h @@ -253,9 +253,9 @@ static inline void ide_std_init_ports(struct ide_hw *hw, * Special Driver Flags */ enum { - IDE_SFLAG_SET_GEOMETRY = (1 << 0), - IDE_SFLAG_RECALIBRATE = (1 << 1), - IDE_SFLAG_SET_MULTMODE = (1 << 2), + IDE_SFLAG_SET_GEOMETRY = BIT(0), + IDE_SFLAG_RECALIBRATE = BIT(1), + IDE_SFLAG_SET_MULTMODE = BIT(2), }; /* @@ -267,13 +267,13 @@ typedef enum { } ide_startstop_t; enum { - IDE_VALID_ERROR = (1 << 1), + IDE_VALID_ERROR = BIT(1), IDE_VALID_FEATURE = IDE_VALID_ERROR, - IDE_VALID_NSECT = (1 << 2), - IDE_VALID_LBAL = (1 << 3), - IDE_VALID_LBAM = (1 << 4), - IDE_VALID_LBAH = (1 << 5), - IDE_VALID_DEVICE = (1 << 6), + IDE_VALID_NSECT = BIT(2), + IDE_VALID_LBAL = BIT(3), + IDE_VALID_LBAM = BIT(4), + IDE_VALID_LBAH = BIT(5), + IDE_VALID_DEVICE = BIT(6), IDE_VALID_LBA = IDE_VALID_LBAL | IDE_VALID_LBAM | IDE_VALID_LBAH, @@ -289,24 +289,24 @@ enum { }; enum { - IDE_TFLAG_LBA48 = (1 << 0), - IDE_TFLAG_WRITE = (1 << 1), - IDE_TFLAG_CUSTOM_HANDLER = (1 << 2), - IDE_TFLAG_DMA_PIO_FALLBACK = (1 << 3), + IDE_TFLAG_LBA48 = BIT(0), + IDE_TFLAG_WRITE = BIT(1), + IDE_TFLAG_CUSTOM_HANDLER = BIT(2), + IDE_TFLAG_DMA_PIO_FALLBACK = BIT(3), /* force 16-bit I/O operations */ - IDE_TFLAG_IO_16BIT = (1 << 4), + IDE_TFLAG_IO_16BIT = BIT(4), /* struct ide_cmd was allocated using kmalloc() */ - IDE_TFLAG_DYN = (1 << 5), - IDE_TFLAG_FS = (1 << 6), - IDE_TFLAG_MULTI_PIO = (1 << 7), - IDE_TFLAG_SET_XFER = (1 << 8), + IDE_TFLAG_DYN = BIT(5), + IDE_TFLAG_FS = BIT(6), + IDE_TFLAG_MULTI_PIO = BIT(7), + IDE_TFLAG_SET_XFER = BIT(8), }; enum { - IDE_FTFLAG_FLAGGED = (1 << 0), - IDE_FTFLAG_SET_IN_FLAGS = (1 << 1), - IDE_FTFLAG_OUT_DATA = (1 << 2), - IDE_FTFLAG_IN_DATA = (1 << 3), + IDE_FTFLAG_FLAGGED = BIT(0), + IDE_FTFLAG_SET_IN_FLAGS = BIT(1), + IDE_FTFLAG_OUT_DATA = BIT(2), + IDE_FTFLAG_IN_DATA = BIT(3), }; struct ide_taskfile { @@ -357,13 +357,13 @@ struct ide_cmd { /* ATAPI packet command flags */ enum { /* set when an error is considered normal - no retry (ide-tape) */ - PC_FLAG_ABORT = (1 << 0), - PC_FLAG_SUPPRESS_ERROR = (1 << 1), - PC_FLAG_WAIT_FOR_DSC = (1 << 2), - PC_FLAG_DMA_OK = (1 << 3), - PC_FLAG_DMA_IN_PROGRESS = (1 << 4), - PC_FLAG_DMA_ERROR = (1 << 5), - PC_FLAG_WRITING = (1 << 6), + PC_FLAG_ABORT = BIT(0), + PC_FLAG_SUPPRESS_ERROR = BIT(1), + PC_FLAG_WAIT_FOR_DSC = BIT(2), + PC_FLAG_DMA_OK = BIT(3), + PC_FLAG_DMA_IN_PROGRESS = BIT(4), + PC_FLAG_DMA_ERROR = BIT(5), + PC_FLAG_WRITING = BIT(6), }; #define ATAPI_WAIT_PC (60 * HZ) @@ -417,111 +417,111 @@ struct ide_disk_ops { /* ATAPI device flags */ enum { - IDE_AFLAG_DRQ_INTERRUPT = (1 << 0), + IDE_AFLAG_DRQ_INTERRUPT = BIT(0), /* ide-cd */ /* Drive cannot eject the disc. */ - IDE_AFLAG_NO_EJECT = (1 << 1), + IDE_AFLAG_NO_EJECT = BIT(1), /* Drive is a pre ATAPI 1.2 drive. */ - IDE_AFLAG_PRE_ATAPI12 = (1 << 2), + IDE_AFLAG_PRE_ATAPI12 = BIT(2), /* TOC addresses are in BCD. */ - IDE_AFLAG_TOCADDR_AS_BCD = (1 << 3), + IDE_AFLAG_TOCADDR_AS_BCD = BIT(3), /* TOC track numbers are in BCD. */ - IDE_AFLAG_TOCTRACKS_AS_BCD = (1 << 4), + IDE_AFLAG_TOCTRACKS_AS_BCD = BIT(4), /* Saved TOC information is current. */ - IDE_AFLAG_TOC_VALID = (1 << 6), + IDE_AFLAG_TOC_VALID = BIT(6), /* We think that the drive door is locked. */ - IDE_AFLAG_DOOR_LOCKED = (1 << 7), + IDE_AFLAG_DOOR_LOCKED = BIT(7), /* SET_CD_SPEED command is unsupported. */ - IDE_AFLAG_NO_SPEED_SELECT = (1 << 8), - IDE_AFLAG_VERTOS_300_SSD = (1 << 9), - IDE_AFLAG_VERTOS_600_ESD = (1 << 10), - IDE_AFLAG_SANYO_3CD = (1 << 11), - IDE_AFLAG_FULL_CAPS_PAGE = (1 << 12), - IDE_AFLAG_PLAY_AUDIO_OK = (1 << 13), - IDE_AFLAG_LE_SPEED_FIELDS = (1 << 14), + IDE_AFLAG_NO_SPEED_SELECT = BIT(8), + IDE_AFLAG_VERTOS_300_SSD = BIT(9), + IDE_AFLAG_VERTOS_600_ESD = BIT(10), + IDE_AFLAG_SANYO_3CD = BIT(11), + IDE_AFLAG_FULL_CAPS_PAGE = BIT(12), + IDE_AFLAG_PLAY_AUDIO_OK = BIT(13), + IDE_AFLAG_LE_SPEED_FIELDS = BIT(14), /* ide-floppy */ /* Avoid commands not supported in Clik drive */ - IDE_AFLAG_CLIK_DRIVE = (1 << 15), + IDE_AFLAG_CLIK_DRIVE = BIT(15), /* Requires BH algorithm for packets */ - IDE_AFLAG_ZIP_DRIVE = (1 << 16), + IDE_AFLAG_ZIP_DRIVE = BIT(16), /* Supports format progress report */ - IDE_AFLAG_SRFP = (1 << 17), + IDE_AFLAG_SRFP = BIT(17), /* ide-tape */ - IDE_AFLAG_IGNORE_DSC = (1 << 18), + IDE_AFLAG_IGNORE_DSC = BIT(18), /* 0 When the tape position is unknown */ - IDE_AFLAG_ADDRESS_VALID = (1 << 19), + IDE_AFLAG_ADDRESS_VALID = BIT(19), /* Device already opened */ - IDE_AFLAG_BUSY = (1 << 20), + IDE_AFLAG_BUSY = BIT(20), /* Attempt to auto-detect the current user block size */ - IDE_AFLAG_DETECT_BS = (1 << 21), + IDE_AFLAG_DETECT_BS = BIT(21), /* Currently on a filemark */ - IDE_AFLAG_FILEMARK = (1 << 22), + IDE_AFLAG_FILEMARK = BIT(22), /* 0 = no tape is loaded, so we don't rewind after ejecting */ - IDE_AFLAG_MEDIUM_PRESENT = (1 << 23), + IDE_AFLAG_MEDIUM_PRESENT = BIT(23), - IDE_AFLAG_NO_AUTOCLOSE = (1 << 24), + IDE_AFLAG_NO_AUTOCLOSE = BIT(24), }; /* device flags */ enum { /* restore settings after device reset */ - IDE_DFLAG_KEEP_SETTINGS = (1 << 0), + IDE_DFLAG_KEEP_SETTINGS = BIT(0), /* device is using DMA for read/write */ - IDE_DFLAG_USING_DMA = (1 << 1), + IDE_DFLAG_USING_DMA = BIT(1), /* okay to unmask other IRQs */ - IDE_DFLAG_UNMASK = (1 << 2), + IDE_DFLAG_UNMASK = BIT(2), /* don't attempt flushes */ - IDE_DFLAG_NOFLUSH = (1 << 3), + IDE_DFLAG_NOFLUSH = BIT(3), /* DSC overlap */ - IDE_DFLAG_DSC_OVERLAP = (1 << 4), + IDE_DFLAG_DSC_OVERLAP = BIT(4), /* give potential excess bandwidth */ - IDE_DFLAG_NICE1 = (1 << 5), + IDE_DFLAG_NICE1 = BIT(5), /* device is physically present */ - IDE_DFLAG_PRESENT = (1 << 6), + IDE_DFLAG_PRESENT = BIT(6), /* disable Host Protected Area */ - IDE_DFLAG_NOHPA = (1 << 7), + IDE_DFLAG_NOHPA = BIT(7), /* id read from device (synthetic if not set) */ - IDE_DFLAG_ID_READ = (1 << 8), - IDE_DFLAG_NOPROBE = (1 << 9), + IDE_DFLAG_ID_READ = BIT(8), + IDE_DFLAG_NOPROBE = BIT(9), /* need to do check_media_change() */ - IDE_DFLAG_REMOVABLE = (1 << 10), + IDE_DFLAG_REMOVABLE = BIT(10), /* needed for removable devices */ - IDE_DFLAG_ATTACH = (1 << 11), - IDE_DFLAG_FORCED_GEOM = (1 << 12), + IDE_DFLAG_ATTACH = BIT(11), + IDE_DFLAG_FORCED_GEOM = BIT(12), /* disallow setting unmask bit */ - IDE_DFLAG_NO_UNMASK = (1 << 13), + IDE_DFLAG_NO_UNMASK = BIT(13), /* disallow enabling 32-bit I/O */ - IDE_DFLAG_NO_IO_32BIT = (1 << 14), + IDE_DFLAG_NO_IO_32BIT = BIT(14), /* for removable only: door lock/unlock works */ - IDE_DFLAG_DOORLOCKING = (1 << 15), + IDE_DFLAG_DOORLOCKING = BIT(15), /* disallow DMA */ - IDE_DFLAG_NODMA = (1 << 16), + IDE_DFLAG_NODMA = BIT(16), /* powermanagement told us not to do anything, so sleep nicely */ - IDE_DFLAG_BLOCKED = (1 << 17), + IDE_DFLAG_BLOCKED = BIT(17), /* sleeping & sleep field valid */ - IDE_DFLAG_SLEEPING = (1 << 18), - IDE_DFLAG_POST_RESET = (1 << 19), - IDE_DFLAG_UDMA33_WARNED = (1 << 20), - IDE_DFLAG_LBA48 = (1 << 21), + IDE_DFLAG_SLEEPING = BIT(18), + IDE_DFLAG_POST_RESET = BIT(19), + IDE_DFLAG_UDMA33_WARNED = BIT(20), + IDE_DFLAG_LBA48 = BIT(21), /* status of write cache */ - IDE_DFLAG_WCACHE = (1 << 22), + IDE_DFLAG_WCACHE = BIT(22), /* used for ignoring ATA_DF */ - IDE_DFLAG_NOWERR = (1 << 23), + IDE_DFLAG_NOWERR = BIT(23), /* retrying in PIO */ - IDE_DFLAG_DMA_PIO_RETRY = (1 << 24), - IDE_DFLAG_LBA = (1 << 25), + IDE_DFLAG_DMA_PIO_RETRY = BIT(24), + IDE_DFLAG_LBA = BIT(25), /* don't unload heads */ - IDE_DFLAG_NO_UNLOAD = (1 << 26), + IDE_DFLAG_NO_UNLOAD = BIT(26), /* heads unloaded, please don't reset port */ - IDE_DFLAG_PARKED = (1 << 27), - IDE_DFLAG_MEDIA_CHANGED = (1 << 28), + IDE_DFLAG_PARKED = BIT(27), + IDE_DFLAG_MEDIA_CHANGED = BIT(28), /* write protect */ - IDE_DFLAG_WP = (1 << 29), - IDE_DFLAG_FORMAT_IN_PROGRESS = (1 << 30), - IDE_DFLAG_NIEN_QUIRK = (1 << 31), + IDE_DFLAG_WP = BIT(29), + IDE_DFLAG_FORMAT_IN_PROGRESS = BIT(30), + IDE_DFLAG_NIEN_QUIRK = BIT(31), }; struct ide_drive_s { @@ -709,7 +709,7 @@ struct ide_dma_ops { }; enum { - IDE_PFLAG_PROBING = (1 << 0), + IDE_PFLAG_PROBING = BIT(0), }; struct ide_host; @@ -862,7 +862,7 @@ extern struct mutex ide_setting_mtx; * configurable drive settings */ -#define DS_SYNC (1 << 0) +#define DS_SYNC BIT(0) struct ide_devset { int (*get)(ide_drive_t *); @@ -1000,15 +1000,15 @@ static inline void ide_proc_unregister_driver(ide_drive_t *drive, enum { /* enter/exit functions */ - IDE_DBG_FUNC = (1 << 0), + IDE_DBG_FUNC = BIT(0), /* sense key/asc handling */ - IDE_DBG_SENSE = (1 << 1), + IDE_DBG_SENSE = BIT(1), /* packet commands handling */ - IDE_DBG_PC = (1 << 2), + IDE_DBG_PC = BIT(2), /* request handling */ - IDE_DBG_RQ = (1 << 3), + IDE_DBG_RQ = BIT(3), /* driver probing/setup */ - IDE_DBG_PROBE = (1 << 4), + IDE_DBG_PROBE = BIT(4), }; /* DRV_NAME has to be defined in the driver before using the macro below */ @@ -1171,10 +1171,10 @@ ssize_t ide_park_store(struct device *dev, struct device_attribute *attr, * the tail of our block device request queue and wait for their completion. */ enum { - REQ_IDETAPE_PC1 = (1 << 0), /* packet command (first stage) */ - REQ_IDETAPE_PC2 = (1 << 1), /* packet command (second stage) */ - REQ_IDETAPE_READ = (1 << 2), - REQ_IDETAPE_WRITE = (1 << 3), + REQ_IDETAPE_PC1 = BIT(0), /* packet command (first stage) */ + REQ_IDETAPE_PC2 = BIT(1), /* packet command (second stage) */ + REQ_IDETAPE_READ = BIT(2), + REQ_IDETAPE_WRITE = BIT(3), }; int ide_queue_pc_tail(ide_drive_t *, struct gendisk *, struct ide_atapi_pc *, @@ -1264,71 +1264,71 @@ struct ide_pci_enablebit { enum { /* Uses ISA control ports not PCI ones. */ - IDE_HFLAG_ISA_PORTS = (1 << 0), + IDE_HFLAG_ISA_PORTS = BIT(0), /* single port device */ - IDE_HFLAG_SINGLE = (1 << 1), + IDE_HFLAG_SINGLE = BIT(1), /* don't use legacy PIO blacklist */ - IDE_HFLAG_PIO_NO_BLACKLIST = (1 << 2), + IDE_HFLAG_PIO_NO_BLACKLIST = BIT(2), /* set for the second port of QD65xx */ - IDE_HFLAG_QD_2ND_PORT = (1 << 3), + IDE_HFLAG_QD_2ND_PORT = BIT(3), /* use PIO8/9 for prefetch off/on */ - IDE_HFLAG_ABUSE_PREFETCH = (1 << 4), + IDE_HFLAG_ABUSE_PREFETCH = BIT(4), /* use PIO6/7 for fast-devsel off/on */ - IDE_HFLAG_ABUSE_FAST_DEVSEL = (1 << 5), + IDE_HFLAG_ABUSE_FAST_DEVSEL = BIT(5), /* use 100-102 and 200-202 PIO values to set DMA modes */ - IDE_HFLAG_ABUSE_DMA_MODES = (1 << 6), + IDE_HFLAG_ABUSE_DMA_MODES = BIT(6), /* * keep DMA setting when programming PIO mode, may be used only * for hosts which have separate PIO and DMA timings (ie. PMAC) */ - IDE_HFLAG_SET_PIO_MODE_KEEP_DMA = (1 << 7), + IDE_HFLAG_SET_PIO_MODE_KEEP_DMA = BIT(7), /* program host for the transfer mode after programming device */ - IDE_HFLAG_POST_SET_MODE = (1 << 8), + IDE_HFLAG_POST_SET_MODE = BIT(8), /* don't program host/device for the transfer mode ("smart" hosts) */ - IDE_HFLAG_NO_SET_MODE = (1 << 9), + IDE_HFLAG_NO_SET_MODE = BIT(9), /* trust BIOS for programming chipset/device for DMA */ - IDE_HFLAG_TRUST_BIOS_FOR_DMA = (1 << 10), + IDE_HFLAG_TRUST_BIOS_FOR_DMA = BIT(10), /* host is CS5510/CS5520 */ - IDE_HFLAG_CS5520 = (1 << 11), + IDE_HFLAG_CS5520 = BIT(11), /* ATAPI DMA is unsupported */ - IDE_HFLAG_NO_ATAPI_DMA = (1 << 12), + IDE_HFLAG_NO_ATAPI_DMA = BIT(12), /* set if host is a "non-bootable" controller */ - IDE_HFLAG_NON_BOOTABLE = (1 << 13), + IDE_HFLAG_NON_BOOTABLE = BIT(13), /* host doesn't support DMA */ - IDE_HFLAG_NO_DMA = (1 << 14), + IDE_HFLAG_NO_DMA = BIT(14), /* check if host is PCI IDE device before allowing DMA */ - IDE_HFLAG_NO_AUTODMA = (1 << 15), + IDE_HFLAG_NO_AUTODMA = BIT(15), /* host uses MMIO */ - IDE_HFLAG_MMIO = (1 << 16), + IDE_HFLAG_MMIO = BIT(16), /* no LBA48 */ - IDE_HFLAG_NO_LBA48 = (1 << 17), + IDE_HFLAG_NO_LBA48 = BIT(17), /* no LBA48 DMA */ - IDE_HFLAG_NO_LBA48_DMA = (1 << 18), + IDE_HFLAG_NO_LBA48_DMA = BIT(18), /* data FIFO is cleared by an error */ - IDE_HFLAG_ERROR_STOPS_FIFO = (1 << 19), + IDE_HFLAG_ERROR_STOPS_FIFO = BIT(19), /* serialize ports */ - IDE_HFLAG_SERIALIZE = (1 << 20), + IDE_HFLAG_SERIALIZE = BIT(20), /* host is DTC2278 */ - IDE_HFLAG_DTC2278 = (1 << 21), + IDE_HFLAG_DTC2278 = BIT(21), /* 4 devices on a single set of I/O ports */ - IDE_HFLAG_4DRIVES = (1 << 22), + IDE_HFLAG_4DRIVES = BIT(22), /* host is TRM290 */ - IDE_HFLAG_TRM290 = (1 << 23), + IDE_HFLAG_TRM290 = BIT(23), /* use 32-bit I/O ops */ - IDE_HFLAG_IO_32BIT = (1 << 24), + IDE_HFLAG_IO_32BIT = BIT(24), /* unmask IRQs */ - IDE_HFLAG_UNMASK_IRQS = (1 << 25), - IDE_HFLAG_BROKEN_ALTSTATUS = (1 << 26), + IDE_HFLAG_UNMASK_IRQS = BIT(25), + IDE_HFLAG_BROKEN_ALTSTATUS = BIT(26), /* serialize ports if DMA is possible (for sl82c105) */ - IDE_HFLAG_SERIALIZE_DMA = (1 << 27), + IDE_HFLAG_SERIALIZE_DMA = BIT(27), /* force host out of "simplex" mode */ - IDE_HFLAG_CLEAR_SIMPLEX = (1 << 28), + IDE_HFLAG_CLEAR_SIMPLEX = BIT(28), /* DSC overlap is unsupported */ - IDE_HFLAG_NO_DSC = (1 << 29), + IDE_HFLAG_NO_DSC = BIT(29), /* never use 32-bit I/O ops */ - IDE_HFLAG_NO_IO_32BIT = (1 << 30), + IDE_HFLAG_NO_IO_32BIT = BIT(30), /* never unmask IRQs */ - IDE_HFLAG_NO_UNMASK_IRQS = (1 << 31), + IDE_HFLAG_NO_UNMASK_IRQS = BIT(31), }; #ifdef CONFIG_BLK_DEV_OFFBOARD @@ -1536,16 +1536,16 @@ struct ide_timing { }; enum { - IDE_TIMING_SETUP = (1 << 0), - IDE_TIMING_ACT8B = (1 << 1), - IDE_TIMING_REC8B = (1 << 2), - IDE_TIMING_CYC8B = (1 << 3), + IDE_TIMING_SETUP = BIT(0), + IDE_TIMING_ACT8B = BIT(1), + IDE_TIMING_REC8B = BIT(2), + IDE_TIMING_CYC8B = BIT(3), IDE_TIMING_8BIT = IDE_TIMING_ACT8B | IDE_TIMING_REC8B | IDE_TIMING_CYC8B, - IDE_TIMING_ACTIVE = (1 << 4), - IDE_TIMING_RECOVER = (1 << 5), - IDE_TIMING_CYCLE = (1 << 6), - IDE_TIMING_UDMA = (1 << 7), + IDE_TIMING_ACTIVE = BIT(4), + IDE_TIMING_RECOVER = BIT(5), + IDE_TIMING_CYCLE = BIT(6), + IDE_TIMING_UDMA = BIT(7), IDE_TIMING_ALL = IDE_TIMING_SETUP | IDE_TIMING_8BIT | IDE_TIMING_ACTIVE | IDE_TIMING_RECOVER | IDE_TIMING_CYCLE | IDE_TIMING_UDMA, diff --git a/include/linux/idle_inject.h b/include/linux/idle_inject.h index bdc0293fb6cb..a445cd1a36c5 100644 --- a/include/linux/idle_inject.h +++ b/include/linux/idle_inject.h @@ -20,10 +20,10 @@ int idle_inject_start(struct idle_inject_device *ii_dev); void idle_inject_stop(struct idle_inject_device *ii_dev); void idle_inject_set_duration(struct idle_inject_device *ii_dev, - unsigned int run_duration_ms, - unsigned int idle_duration_ms); + unsigned int run_duration_us, + unsigned int idle_duration_us); void idle_inject_get_duration(struct idle_inject_device *ii_dev, - unsigned int *run_duration_ms, - unsigned int *idle_duration_ms); + unsigned int *run_duration_us, + unsigned int *idle_duration_us); #endif /* __IDLE_INJECT_H__ */ diff --git a/include/linux/idr.h b/include/linux/idr.h index dc09bd646bcb..ac6e946b6767 100644 --- a/include/linux/idr.h +++ b/include/linux/idr.h @@ -191,14 +191,17 @@ static inline void idr_preload_end(void) * idr_for_each_entry_ul() - Iterate over an IDR's elements of a given type. * @idr: IDR handle. * @entry: The type * to use as cursor. + * @tmp: A temporary placeholder for ID. * @id: Entry ID. * * @entry and @id do not need to be initialized before the loop, and * after normal termination @entry is left with the value NULL. This * is convenient for a "not found" value. */ -#define idr_for_each_entry_ul(idr, entry, id) \ - for (id = 0; ((entry) = idr_get_next_ul(idr, &(id))) != NULL; ++id) +#define idr_for_each_entry_ul(idr, entry, tmp, id) \ + for (tmp = 0, id = 0; \ + tmp <= id && ((entry) = idr_get_next_ul(idr, &(id))) != NULL; \ + tmp = id, ++id) /** * idr_for_each_entry_continue() - Continue iteration over an IDR's elements of a given type @@ -213,6 +216,20 @@ static inline void idr_preload_end(void) entry; \ ++id, (entry) = idr_get_next((idr), &(id))) +/** + * idr_for_each_entry_continue_ul() - Continue iteration over an IDR's elements of a given type + * @idr: IDR handle. + * @entry: The type * to use as a cursor. + * @tmp: A temporary placeholder for ID. + * @id: Entry ID. + * + * Continue to iterate over entries, continuing after the current position. + */ +#define idr_for_each_entry_continue_ul(idr, entry, tmp, id) \ + for (tmp = id; \ + tmp <= id && ((entry) = idr_get_next_ul(idr, &(id))) != NULL; \ + tmp = id, ++id) + /* * IDA - ID Allocator, use when translation from id to pointer isn't necessary. */ diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h index 42690007d612..7d3f2ced92d1 100644 --- a/include/linux/ieee80211.h +++ b/include/linux/ieee80211.h @@ -881,6 +881,14 @@ struct ieee80211_tpc_report_ie { u8 link_margin; } __packed; +#define IEEE80211_ADDBA_EXT_FRAG_LEVEL_MASK GENMASK(2, 1) +#define IEEE80211_ADDBA_EXT_FRAG_LEVEL_SHIFT 1 +#define IEEE80211_ADDBA_EXT_NO_FRAG BIT(0) + +struct ieee80211_addba_ext_ie { + u8 data; +} __packed; + struct ieee80211_mgmt { __le16 frame_control; __le16 duration; @@ -973,6 +981,8 @@ struct ieee80211_mgmt { __le16 capab; __le16 timeout; __le16 start_seq_num; + /* followed by BA Extension */ + u8 variable[0]; } __packed addba_req; struct{ u8 action_code; @@ -1626,6 +1636,18 @@ struct ieee80211_he_operation { } __packed; /** + * struct ieee80211_he_spr - HE spatial reuse element + * + * This structure is the "HE spatial reuse element" element as + * described in P802.11ax_D4.0 section 9.4.2.241 + */ +struct ieee80211_he_spr { + u8 he_sr_control; + /* Optional 0 to 19 bytes: depends on @he_sr_control */ + u8 optional[0]; +} __packed; + +/** * struct ieee80211_he_mu_edca_param_ac_rec - MU AC Parameter Record field * * This structure is the "MU AC Parameter Record" fields as @@ -2033,8 +2055,8 @@ ieee80211_he_ppe_size(u8 ppe_thres_hdr, const u8 *phy_cap_info) * ieee80211_he_oper_size - calculate 802.11ax HE Operations IE size * @he_oper_ie: byte data of the He Operations IE, stating from the the byte * after the ext ID byte. It is assumed that he_oper_ie has at least - * sizeof(struct ieee80211_he_operation) bytes, checked already in - * ieee802_11_parse_elems_crc() + * sizeof(struct ieee80211_he_operation) bytes, the caller must have + * validated this. * @return the actual size of the IE data (not including header), or 0 on error */ static inline u8 @@ -2063,6 +2085,42 @@ ieee80211_he_oper_size(const u8 *he_oper_ie) return oper_len; } +/* HE Spatial Reuse defines */ +#define IEEE80211_HE_SPR_NON_SRG_OFFSET_PRESENT 0x4 +#define IEEE80211_HE_SPR_SRG_INFORMATION_PRESENT 0x8 + +/* + * ieee80211_he_spr_size - calculate 802.11ax HE Spatial Reuse IE size + * @he_spr_ie: byte data of the He Spatial Reuse IE, stating from the the byte + * after the ext ID byte. It is assumed that he_spr_ie has at least + * sizeof(struct ieee80211_he_spr) bytes, the caller must have validated + * this + * @return the actual size of the IE data (not including header), or 0 on error + */ +static inline u8 +ieee80211_he_spr_size(const u8 *he_spr_ie) +{ + struct ieee80211_he_spr *he_spr = (void *)he_spr_ie; + u8 spr_len = sizeof(struct ieee80211_he_spr); + u32 he_spr_params; + + /* Make sure the input is not NULL */ + if (!he_spr_ie) + return 0; + + /* Calc required length */ + he_spr_params = le32_to_cpu(he_spr->he_sr_control); + if (he_spr_params & IEEE80211_HE_SPR_NON_SRG_OFFSET_PRESENT) + spr_len++; + if (he_spr_params & IEEE80211_HE_SPR_SRG_INFORMATION_PRESENT) + spr_len += 18; + + /* Add the first byte (extension ID) to the total length */ + spr_len++; + + return spr_len; +} + /* Authentication algorithms */ #define WLAN_AUTH_OPEN 0 #define WLAN_AUTH_SHARED_KEY 1 @@ -2485,6 +2543,7 @@ enum ieee80211_eid_ext { WLAN_EID_EXT_HE_OPERATION = 36, WLAN_EID_EXT_UORA = 37, WLAN_EID_EXT_HE_MU_EDCA = 38, + WLAN_EID_EXT_HE_SPR = 39, WLAN_EID_EXT_MAX_CHANNEL_SWITCH_TIME = 52, WLAN_EID_EXT_MULTIPLE_BSSID_CONFIGURATION = 55, WLAN_EID_EXT_NON_INHERITANCE = 56, @@ -2609,6 +2668,7 @@ enum ieee80211_key_len { #define FILS_ERP_MAX_RRK_LEN 64 #define PMK_MAX_LEN 64 +#define SAE_PASSWORD_MAX_LEN 128 /* Public action codes (IEEE Std 802.11-2016, 9.6.8.1, Table 9-307) */ enum ieee80211_pub_actioncode { @@ -2709,6 +2769,13 @@ enum ieee80211_tdls_actioncode { #define WLAN_EXT_CAPA10_TWT_REQUESTER_SUPPORT BIT(5) #define WLAN_EXT_CAPA10_TWT_RESPONDER_SUPPORT BIT(6) +/* + * When set, indicates that the AP is able to tolerate 26-tone RU UL + * OFDMA transmissions using HE TB PPDU from OBSS (not falsely classify the + * 26-tone RU UL OFDMA transmissions as radar pulses). + */ +#define WLAN_EXT_CAPA10_OBSS_NARROW_BW_RU_TOLERANCE_SUPPORT BIT(7) + /* Defines support for enhanced multi-bssid advertisement*/ #define WLAN_EXT_CAPA11_EMA_SUPPORT BIT(1) diff --git a/include/linux/if_bridge.h b/include/linux/if_bridge.h index f3fab5d0ea97..9e57c4411734 100644 --- a/include/linux/if_bridge.h +++ b/include/linux/if_bridge.h @@ -88,6 +88,8 @@ static inline bool br_multicast_router(const struct net_device *dev) #if IS_ENABLED(CONFIG_BRIDGE) && IS_ENABLED(CONFIG_BRIDGE_VLAN_FILTERING) bool br_vlan_enabled(const struct net_device *dev); int br_vlan_get_pvid(const struct net_device *dev, u16 *p_pvid); +int br_vlan_get_pvid_rcu(const struct net_device *dev, u16 *p_pvid); +int br_vlan_get_proto(const struct net_device *dev, u16 *p_proto); int br_vlan_get_info(const struct net_device *dev, u16 vid, struct bridge_vlan_info *p_vinfo); #else @@ -101,6 +103,16 @@ static inline int br_vlan_get_pvid(const struct net_device *dev, u16 *p_pvid) return -EINVAL; } +static inline int br_vlan_get_proto(const struct net_device *dev, u16 *p_proto) +{ + return -EINVAL; +} + +static inline int br_vlan_get_pvid_rcu(const struct net_device *dev, u16 *p_pvid) +{ + return -EINVAL; +} + static inline int br_vlan_get_info(const struct net_device *dev, u16 vid, struct bridge_vlan_info *p_vinfo) { diff --git a/include/linux/if_macvlan.h b/include/linux/if_macvlan.h index 2e55e4cdbd8a..a367ead4bf4b 100644 --- a/include/linux/if_macvlan.h +++ b/include/linux/if_macvlan.h @@ -29,7 +29,6 @@ struct macvlan_dev { netdev_features_t set_features; enum macvlan_mode mode; u16 flags; - int nest_level; unsigned int macaddr_count; #ifdef CONFIG_NET_POLL_CONTROLLER struct netpoll *netpoll; diff --git a/include/linux/if_pppox.h b/include/linux/if_pppox.h index 8b728750a625..69e813bcb947 100644 --- a/include/linux/if_pppox.h +++ b/include/linux/if_pppox.h @@ -80,6 +80,9 @@ extern int register_pppox_proto(int proto_num, const struct pppox_proto *pp); extern void unregister_pppox_proto(int proto_num); extern void pppox_unbind_sock(struct sock *sk);/* delete ppp-channel binding */ extern int pppox_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg); +extern int pppox_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg); + +#define PPPOEIOCSFWD32 _IOW(0xB1 ,0, compat_size_t) /* PPPoX socket states */ enum { diff --git a/include/linux/if_rmnet.h b/include/linux/if_rmnet.h new file mode 100644 index 000000000000..9661416a9bb4 --- /dev/null +++ b/include/linux/if_rmnet.h @@ -0,0 +1,55 @@ +/* SPDX-License-Identifier: GPL-2.0-only + * Copyright (c) 2013-2019, The Linux Foundation. All rights reserved. + */ + +#ifndef _LINUX_IF_RMNET_H_ +#define _LINUX_IF_RMNET_H_ + +struct rmnet_map_header { +#if defined(__LITTLE_ENDIAN_BITFIELD) + u8 pad_len:6; + u8 reserved_bit:1; + u8 cd_bit:1; +#elif defined (__BIG_ENDIAN_BITFIELD) + u8 cd_bit:1; + u8 reserved_bit:1; + u8 pad_len:6; +#else +#error "Please fix <asm/byteorder.h>" +#endif + u8 mux_id; + __be16 pkt_len; +} __aligned(1); + +struct rmnet_map_dl_csum_trailer { + u8 reserved1; +#if defined(__LITTLE_ENDIAN_BITFIELD) + u8 valid:1; + u8 reserved2:7; +#elif defined (__BIG_ENDIAN_BITFIELD) + u8 reserved2:7; + u8 valid:1; +#else +#error "Please fix <asm/byteorder.h>" +#endif + u16 csum_start_offset; + u16 csum_length; + __be16 csum_value; +} __aligned(1); + +struct rmnet_map_ul_csum_header { + __be16 csum_start_offset; +#if defined(__LITTLE_ENDIAN_BITFIELD) + u16 csum_insert_offset:14; + u16 udp_ind:1; + u16 csum_enabled:1; +#elif defined (__BIG_ENDIAN_BITFIELD) + u16 csum_enabled:1; + u16 udp_ind:1; + u16 csum_insert_offset:14; +#else +#error "Please fix <asm/byteorder.h>" +#endif +} __aligned(1); + +#endif /* !(_LINUX_IF_RMNET_H_) */ diff --git a/include/linux/if_tap.h b/include/linux/if_tap.h index 8e66866c11be..915a187cfabd 100644 --- a/include/linux/if_tap.h +++ b/include/linux/if_tap.h @@ -62,7 +62,6 @@ struct tap_dev { struct tap_queue { struct sock sk; struct socket sock; - struct socket_wq wq; int vnet_hdr_sz; struct tap_dev __rcu *tap; struct file *file; diff --git a/include/linux/if_team.h b/include/linux/if_team.h index 06faa066496f..ec7e4bd07f82 100644 --- a/include/linux/if_team.h +++ b/include/linux/if_team.h @@ -223,6 +223,7 @@ struct team { atomic_t count_pending; struct delayed_work dw; } mcast_rejoin; + struct lock_class_key team_lock_key; long mode_priv[TEAM_MODE_PRIV_LONGS]; }; diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h index 244278d5c222..b05e855f1ddd 100644 --- a/include/linux/if_vlan.h +++ b/include/linux/if_vlan.h @@ -182,7 +182,6 @@ struct vlan_dev_priv { #ifdef CONFIG_NET_POLL_CONTROLLER struct netpoll *netpoll; #endif - unsigned int nest_level; }; static inline struct vlan_dev_priv *vlan_dev_priv(const struct net_device *dev) @@ -221,11 +220,6 @@ extern void vlan_vids_del_by_dev(struct net_device *dev, extern bool vlan_uses_dev(const struct net_device *dev); -static inline int vlan_get_encap_level(struct net_device *dev) -{ - BUG_ON(!is_vlan_dev(dev)); - return vlan_dev_priv(dev)->nest_level; -} #else static inline struct net_device * __vlan_find_dev_deep_rcu(struct net_device *real_dev, @@ -295,11 +289,6 @@ static inline bool vlan_uses_dev(const struct net_device *dev) { return false; } -static inline int vlan_get_encap_level(struct net_device *dev) -{ - BUG(); - return 0; -} #endif /** diff --git a/include/linux/igmp.h b/include/linux/igmp.h index 9cbbd1baaf85..463047d0190b 100644 --- a/include/linux/igmp.h +++ b/include/linux/igmp.h @@ -60,8 +60,8 @@ struct ip_mc_socklist { struct ip_sf_list { struct ip_sf_list *sf_next; - __be32 sf_inaddr; unsigned long sf_count[2]; /* include/exclude counts */ + __be32 sf_inaddr; unsigned char sf_gsresp; /* include in g & s response? */ unsigned char sf_oldin; /* change state */ unsigned char sf_crcount; /* retrans. left to send */ diff --git a/include/linux/iio/common/cros_ec_sensors_core.h b/include/linux/iio/common/cros_ec_sensors_core.h index 0c636b9fe8d7..bb331e6356a9 100644 --- a/include/linux/iio/common/cros_ec_sensors_core.h +++ b/include/linux/iio/common/cros_ec_sensors_core.h @@ -10,7 +10,8 @@ #include <linux/iio/iio.h> #include <linux/irqreturn.h> -#include <linux/mfd/cros_ec.h> +#include <linux/platform_data/cros_ec_commands.h> +#include <linux/platform_data/cros_ec_proto.h> enum { CROS_EC_SENSOR_X, @@ -62,14 +63,20 @@ struct cros_ec_sensors_core_state { enum motionsensor_type type; enum motionsensor_location loc; - s16 calib[CROS_EC_SENSOR_MAX_AXIS]; - + struct calib_data { + s16 offset; + u16 scale; + } calib[CROS_EC_SENSOR_MAX_AXIS]; + s8 sign[CROS_EC_SENSOR_MAX_AXIS]; u8 samples[CROS_EC_SAMPLE_SIZE]; int (*read_ec_sensors_data)(struct iio_dev *indio_dev, unsigned long scan_mask, s16 *data); int curr_sampl_freq; + + /* Table of known available frequencies : 0, Min and Max in mHz */ + int frequencies[3]; }; /** @@ -151,6 +158,24 @@ int cros_ec_sensors_core_read(struct cros_ec_sensors_core_state *st, int *val, int *val2, long mask); /** + * cros_ec_sensors_core_read_avail() - get available values + * @indio_dev: pointer to state information for device + * @chan: channel specification structure table + * @vals: list of available values + * @type: type of data returned + * @length: number of data returned in the array + * @mask: specifies which values to be requested + * + * Return: an error code, IIO_AVAIL_RANGE or IIO_AVAIL_LIST + */ +int cros_ec_sensors_core_read_avail(struct iio_dev *indio_dev, + struct iio_chan_spec const *chan, + const int **vals, + int *type, + int *length, + long mask); + +/** * cros_ec_sensors_core_write() - function to write a value to the sensor * @st: pointer to state information for device * @chan: channel specification structure table diff --git a/include/linux/iio/common/st_sensors.h b/include/linux/iio/common/st_sensors.h index 2948ac99e2da..686be532f4cb 100644 --- a/include/linux/iio/common/st_sensors.h +++ b/include/linux/iio/common/st_sensors.h @@ -16,11 +16,16 @@ #include <linux/iio/trigger.h> #include <linux/bitops.h> #include <linux/regulator/consumer.h> +#include <linux/regmap.h> #include <linux/platform_data/st_sensors_pdata.h> -#define ST_SENSORS_TX_MAX_LENGTH 2 -#define ST_SENSORS_RX_MAX_LENGTH 6 +/* + * Buffer size max case: 2bytes per channel, 3 channels in total + + * 8bytes timestamp channel (s64) + */ +#define ST_SENSORS_MAX_BUFFER_SIZE (ALIGN(2 * 3, sizeof(s64)) + \ + sizeof(s64)) #define ST_SENSORS_ODR_LIST_MAX 10 #define ST_SENSORS_FULLSCALE_AVL_MAX 10 @@ -170,36 +175,6 @@ struct st_sensor_data_ready_irq { }; /** - * struct st_sensor_transfer_buffer - ST sensor device I/O buffer - * @buf_lock: Mutex to protect rx and tx buffers. - * @tx_buf: Buffer used by SPI transfer function to send data to the sensors. - * This buffer is used to avoid DMA not-aligned issue. - * @rx_buf: Buffer used by SPI transfer to receive data from sensors. - * This buffer is used to avoid DMA not-aligned issue. - */ -struct st_sensor_transfer_buffer { - struct mutex buf_lock; - u8 rx_buf[ST_SENSORS_RX_MAX_LENGTH]; - u8 tx_buf[ST_SENSORS_TX_MAX_LENGTH] ____cacheline_aligned; -}; - -/** - * struct st_sensor_transfer_function - ST sensor device I/O function - * @read_byte: Function used to read one byte. - * @write_byte: Function used to write one byte. - * @read_multiple_byte: Function used to read multiple byte. - */ -struct st_sensor_transfer_function { - int (*read_byte) (struct st_sensor_transfer_buffer *tb, - struct device *dev, u8 reg_addr, u8 *res_byte); - int (*write_byte) (struct st_sensor_transfer_buffer *tb, - struct device *dev, u8 reg_addr, u8 data); - int (*read_multiple_byte) (struct st_sensor_transfer_buffer *tb, - struct device *dev, u8 reg_addr, int len, u8 *data, - bool multiread_bit); -}; - -/** * struct st_sensor_settings - ST specific sensor settings * @wai: Contents of WhoAmI register. * @wai_addr: The address of WhoAmI register. @@ -242,19 +217,17 @@ struct st_sensor_settings { * @current_fullscale: Maximum range of measure by the sensor. * @vdd: Pointer to sensor's Vdd power supply * @vdd_io: Pointer to sensor's Vdd-IO power supply + * @regmap: Pointer to specific sensor regmap configuration. * @enabled: Status of the sensor (false->off, true->on). - * @multiread_bit: Use or not particular bit for [I2C/SPI] multiread. - * @buffer_data: Data used by buffer part. * @odr: Output data rate of the sensor [Hz]. * num_data_channels: Number of data channels used in buffer. * @drdy_int_pin: Redirect DRDY on pin 1 (1) or pin 2 (2). * @int_pin_open_drain: Set the interrupt/DRDY to open drain. - * @get_irq_data_ready: Function to get the IRQ used for data ready signal. - * @tf: Transfer function structure used by I/O operations. - * @tb: Transfer buffers and mutex used by I/O operations. + * @irq: the IRQ number. * @edge_irq: the IRQ triggers on edges and need special handling. * @hw_irq_trigger: if we're using the hardware interrupt on the sensor. * @hw_timestamp: Latest timestamp from the interrupt handler, when in use. + * @buffer_data: Data used by buffer part. */ struct st_sensor_data { struct device *dev; @@ -264,26 +237,22 @@ struct st_sensor_data { struct st_sensor_fullscale_avl *current_fullscale; struct regulator *vdd; struct regulator *vdd_io; + struct regmap *regmap; bool enabled; - bool multiread_bit; - - char *buffer_data; unsigned int odr; unsigned int num_data_channels; u8 drdy_int_pin; bool int_pin_open_drain; - - unsigned int (*get_irq_data_ready) (struct iio_dev *indio_dev); - - const struct st_sensor_transfer_function *tf; - struct st_sensor_transfer_buffer tb; + int irq; bool edge_irq; bool hw_irq_trigger; s64 hw_timestamp; + + char buffer_data[ST_SENSORS_MAX_BUFFER_SIZE] ____cacheline_aligned; }; #ifdef CONFIG_IIO_BUFFER @@ -334,8 +303,11 @@ int st_sensors_set_fullscale_by_gain(struct iio_dev *indio_dev, int scale); int st_sensors_read_info_raw(struct iio_dev *indio_dev, struct iio_chan_spec const *ch, int *val); -int st_sensors_check_device_support(struct iio_dev *indio_dev, - int num_sensors_list, const struct st_sensor_settings *sensor_settings); +int st_sensors_get_settings_index(const char *name, + const struct st_sensor_settings *list, + const int list_length); + +int st_sensors_verify_id(struct iio_dev *indio_dev); ssize_t st_sensors_sysfs_sampling_frequency_avail(struct device *dev, struct device_attribute *attr, char *buf); diff --git a/include/linux/iio/common/st_sensors_i2c.h b/include/linux/iio/common/st_sensors_i2c.h index 5ada89944698..01e424e2af4f 100644 --- a/include/linux/iio/common/st_sensors_i2c.h +++ b/include/linux/iio/common/st_sensors_i2c.h @@ -14,8 +14,8 @@ #include <linux/iio/common/st_sensors.h> #include <linux/of.h> -void st_sensors_i2c_configure(struct iio_dev *indio_dev, - struct i2c_client *client, struct st_sensor_data *sdata); +int st_sensors_i2c_configure(struct iio_dev *indio_dev, + struct i2c_client *client); #ifdef CONFIG_ACPI int st_sensors_match_acpi_device(struct device *dev); diff --git a/include/linux/iio/common/st_sensors_spi.h b/include/linux/iio/common/st_sensors_spi.h index 6020f7167859..90b25f087f06 100644 --- a/include/linux/iio/common/st_sensors_spi.h +++ b/include/linux/iio/common/st_sensors_spi.h @@ -13,7 +13,7 @@ #include <linux/spi/spi.h> #include <linux/iio/common/st_sensors.h> -void st_sensors_spi_configure(struct iio_dev *indio_dev, - struct spi_device *spi, struct st_sensor_data *sdata); +int st_sensors_spi_configure(struct iio_dev *indio_dev, + struct spi_device *spi); #endif /* ST_SENSORS_SPI_H */ diff --git a/include/linux/iio/imu/adis.h b/include/linux/iio/imu/adis.h index 3428d06b2f44..4c53815bb729 100644 --- a/include/linux/iio/imu/adis.h +++ b/include/linux/iio/imu/adis.h @@ -26,6 +26,7 @@ struct adis_burst; * struct adis_data - ADIS chip variant specific data * @read_delay: SPI delay for read operations in us * @write_delay: SPI delay for write operations in us + * @cs_change_delay: SPI delay between CS changes in us * @glob_cmd_reg: Register address of the GLOB_CMD register * @msc_ctrl_reg: Register address of the MSC_CTRL register * @diag_stat_reg: Register address of the DIAG_STAT register @@ -35,6 +36,7 @@ struct adis_burst; struct adis_data { unsigned int read_delay; unsigned int write_delay; + unsigned int cs_change_delay; unsigned int glob_cmd_reg; unsigned int msc_ctrl_reg; diff --git a/include/linux/ima.h b/include/linux/ima.h index 00036d2f57c3..1c37f17f7203 100644 --- a/include/linux/ima.h +++ b/include/linux/ima.h @@ -23,6 +23,7 @@ extern int ima_read_file(struct file *file, enum kernel_read_file_id id); extern int ima_post_read_file(struct file *file, void *buf, loff_t size, enum kernel_read_file_id id); extern void ima_post_path_mknod(struct dentry *dentry); +extern void ima_kexec_cmdline(const void *buf, int size); #ifdef CONFIG_IMA_KEXEC extern void ima_add_kexec_buffer(struct kimage *image); @@ -89,6 +90,7 @@ static inline void ima_post_path_mknod(struct dentry *dentry) return; } +static inline void ima_kexec_cmdline(const void *buf, int size) {} #endif /* CONFIG_IMA */ #ifndef CONFIG_IMA_KEXEC @@ -129,4 +131,13 @@ static inline int ima_inode_removexattr(struct dentry *dentry, return 0; } #endif /* CONFIG_IMA_APPRAISE */ + +#if defined(CONFIG_IMA_APPRAISE) && defined(CONFIG_INTEGRITY_TRUSTED_KEYRING) +extern bool ima_appraise_signature(enum kernel_read_file_id func); +#else +static inline bool ima_appraise_signature(enum kernel_read_file_id func) +{ + return false; +} +#endif /* CONFIG_IMA_APPRAISE && CONFIG_INTEGRITY_TRUSTED_KEYRING */ #endif /* _LINUX_IMA_H */ diff --git a/include/linux/in.h b/include/linux/in.h index 4d2fedfb753a..1873ef642605 100644 --- a/include/linux/in.h +++ b/include/linux/in.h @@ -63,7 +63,7 @@ static inline bool ipv4_is_all_snoopers(__be32 addr) static inline bool ipv4_is_zeronet(__be32 addr) { - return (addr & htonl(0xff000000)) == htonl(0x00000000); + return (addr == 0); } /* Special-Use IPv4 Addresses (RFC3330) */ diff --git a/include/linux/inetdevice.h b/include/linux/inetdevice.h index 367dc2a0f84a..3515ca64e638 100644 --- a/include/linux/inetdevice.h +++ b/include/linux/inetdevice.h @@ -26,7 +26,7 @@ struct in_device { struct net_device *dev; refcount_t refcnt; int dead; - struct in_ifaddr *ifa_list; /* IP ifaddr chain */ + struct in_ifaddr __rcu *ifa_list;/* IP ifaddr chain */ struct ip_mc_list __rcu *mc_list; /* IP multicast filter chain */ struct ip_mc_list __rcu * __rcu *mc_hash; @@ -136,7 +136,7 @@ static inline void ipv4_devconf_setall(struct in_device *in_dev) struct in_ifaddr { struct hlist_node hash; - struct in_ifaddr *ifa_next; + struct in_ifaddr __rcu *ifa_next; struct in_device *ifa_dev; struct rcu_head rcu_head; __be32 ifa_local; @@ -186,7 +186,7 @@ __be32 inet_confirm_addr(struct net *net, struct in_device *in_dev, __be32 dst, struct in_ifaddr *inet_ifa_byprefix(struct in_device *in_dev, __be32 prefix, __be32 mask); struct in_ifaddr *inet_lookup_ifaddr_rcu(struct net *net, __be32 addr); -static __inline__ bool inet_ifa_match(__be32 addr, struct in_ifaddr *ifa) +static inline bool inet_ifa_match(__be32 addr, const struct in_ifaddr *ifa) { return !((addr^ifa->ifa_address)&ifa->ifa_mask); } @@ -206,14 +206,13 @@ static __inline__ bool bad_mask(__be32 mask, __be32 addr) return false; } -#define for_primary_ifa(in_dev) { struct in_ifaddr *ifa; \ - for (ifa = (in_dev)->ifa_list; ifa && !(ifa->ifa_flags&IFA_F_SECONDARY); ifa = ifa->ifa_next) +#define in_dev_for_each_ifa_rtnl(ifa, in_dev) \ + for (ifa = rtnl_dereference((in_dev)->ifa_list); ifa; \ + ifa = rtnl_dereference(ifa->ifa_next)) -#define for_ifa(in_dev) { struct in_ifaddr *ifa; \ - for (ifa = (in_dev)->ifa_list; ifa; ifa = ifa->ifa_next) - - -#define endfor_ifa(in_dev) } +#define in_dev_for_each_ifa_rcu(ifa, in_dev) \ + for (ifa = rcu_dereference((in_dev)->ifa_list); ifa; \ + ifa = rcu_dereference(ifa->ifa_next)) static inline struct in_device *__in_dev_get_rcu(const struct net_device *dev) { diff --git a/include/linux/init.h b/include/linux/init.h index 5255069f5a9f..212fc9e2f691 100644 --- a/include/linux/init.h +++ b/include/linux/init.h @@ -137,6 +137,8 @@ extern initcall_entry_t __con_initcall_start[], __con_initcall_end[]; /* Used for contructor calls. */ typedef void (*ctor_fn_t)(void); +struct file_system_type; + /* Defined in init/main.c */ extern int do_one_initcall(initcall_t fn); extern char __initdata boot_command_line[]; @@ -146,7 +148,8 @@ extern unsigned int reset_devices; /* used by init/main.c */ void setup_arch(char **); void prepare_namespace(void); -int __init init_rootfs(void); +void __init init_rootfs(void); +extern struct file_system_type rootfs_fs_type; #if defined(CONFIG_STRICT_KERNEL_RWX) || defined(CONFIG_STRICT_MODULE_RWX) extern bool rodata_enabled; diff --git a/include/linux/init_task.h b/include/linux/init_task.h index 6049baa5b8bc..2c620d7ac432 100644 --- a/include/linux/init_task.h +++ b/include/linux/init_task.h @@ -36,17 +36,6 @@ extern struct cred init_cred; #define INIT_PREV_CPUTIME(x) #endif -#ifdef CONFIG_POSIX_TIMERS -#define INIT_CPU_TIMERS(s) \ - .cpu_timers = { \ - LIST_HEAD_INIT(s.cpu_timers[0]), \ - LIST_HEAD_INIT(s.cpu_timers[1]), \ - LIST_HEAD_INIT(s.cpu_timers[2]), \ - }, -#else -#define INIT_CPU_TIMERS(s) -#endif - #define INIT_TASK_COMM "swapper" /* Attach to the init_task data structure for proper alignment */ diff --git a/include/linux/input.h b/include/linux/input.h index 510e78558c10..94f277cd806a 100644 --- a/include/linux/input.h +++ b/include/linux/input.h @@ -21,6 +21,8 @@ #include <linux/timer.h> #include <linux/mod_devicetable.h> +struct input_dev_poller; + /** * struct input_value - input value representation * @type: type of value (EV_KEY, EV_ABS, etc) @@ -33,6 +35,13 @@ struct input_value { __s32 value; }; +enum input_clock_type { + INPUT_CLK_REAL = 0, + INPUT_CLK_MONO, + INPUT_CLK_BOOT, + INPUT_CLK_MAX +}; + /** * struct input_dev - represents an input device * @name: name of the device @@ -64,6 +73,8 @@ struct input_value { * not sleep * @ff: force feedback structure associated with the device if device * supports force feedback effects + * @poller: poller structure associated with the device if device is + * set up to use polling mode * @repeat_key: stores key code of the last key pressed; used to implement * software autorepeat * @timer: timer for software autorepeat @@ -114,6 +125,8 @@ struct input_value { * @vals: array of values queued in the current frame * @devres_managed: indicates that devices is managed with devres framework * and needs not be explicitly unregistered or freed. + * @timestamp: storage for a timestamp set by input_set_timestamp called + * by a driver */ struct input_dev { const char *name; @@ -147,6 +160,8 @@ struct input_dev { struct ff_device *ff; + struct input_dev_poller *poller; + unsigned int repeat_key; struct timer_list timer; @@ -184,6 +199,8 @@ struct input_dev { struct input_value *vals; bool devres_managed; + + ktime_t timestamp[INPUT_CLK_MAX]; }; #define to_input_dev(d) container_of(d, struct input_dev, dev) @@ -361,6 +378,12 @@ void input_unregister_device(struct input_dev *); void input_reset_device(struct input_dev *); +int input_setup_polling(struct input_dev *dev, + void (*poll_fn)(struct input_dev *dev)); +void input_set_poll_interval(struct input_dev *dev, unsigned int interval); +void input_set_min_poll_interval(struct input_dev *dev, unsigned int interval); +void input_set_max_poll_interval(struct input_dev *dev, unsigned int interval); + int __must_check input_register_handler(struct input_handler *); void input_unregister_handler(struct input_handler *); @@ -382,6 +405,9 @@ void input_close_device(struct input_handle *); int input_flush_device(struct input_handle *handle, struct file *file); +void input_set_timestamp(struct input_dev *dev, ktime_t timestamp); +ktime_t *input_get_timestamp(struct input_dev *dev); + void input_event(struct input_dev *dev, unsigned int type, unsigned int code, int value); void input_inject_event(struct input_handle *handle, unsigned int type, unsigned int code, int value); diff --git a/include/linux/input/bu21013.h b/include/linux/input/bu21013.h deleted file mode 100644 index 7e5b7e978e8a..000000000000 --- a/include/linux/input/bu21013.h +++ /dev/null @@ -1,34 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Copyright (C) ST-Ericsson SA 2010 - * Author: Naveen Kumar G <naveen.gaddipati@stericsson.com> for ST-Ericsson - */ - -#ifndef _BU21013_H -#define _BU21013_H - -/** - * struct bu21013_platform_device - Handle the platform data - * @touch_x_max: touch x max - * @touch_y_max: touch y max - * @cs_pin: chip select pin - * @touch_pin: touch gpio pin - * @ext_clk: external clock flag - * @x_flip: x flip flag - * @y_flip: y flip flag - * @wakeup: wakeup flag - * - * This is used to handle the platform data - */ -struct bu21013_platform_device { - int touch_x_max; - int touch_y_max; - unsigned int cs_pin; - unsigned int touch_pin; - bool ext_clk; - bool x_flip; - bool y_flip; - bool wakeup; -}; - -#endif diff --git a/include/linux/input/elan-i2c-ids.h b/include/linux/input/elan-i2c-ids.h new file mode 100644 index 000000000000..1ecb6b45812c --- /dev/null +++ b/include/linux/input/elan-i2c-ids.h @@ -0,0 +1,76 @@ +/* + * Elan I2C/SMBus Touchpad device whitelist + * + * Copyright (c) 2013 ELAN Microelectronics Corp. + * + * Author: æ維 (Duson Lin) <dusonlin@emc.com.tw> + * Author: KT Liao <kt.liao@emc.com.tw> + * Version: 1.6.3 + * + * Based on cyapa driver: + * copyright (c) 2011-2012 Cypress Semiconductor, Inc. + * copyright (c) 2011-2012 Google, Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation. + * + * Trademarks are the property of their respective owners. + */ + +#ifndef __ELAN_I2C_IDS_H +#define __ELAN_I2C_IDS_H + +#include <linux/mod_devicetable.h> + +static const struct acpi_device_id elan_acpi_id[] = { + { "ELAN0000", 0 }, + { "ELAN0100", 0 }, + { "ELAN0600", 0 }, + { "ELAN0601", 0 }, + { "ELAN0602", 0 }, + { "ELAN0603", 0 }, + { "ELAN0604", 0 }, + { "ELAN0605", 0 }, + { "ELAN0606", 0 }, + { "ELAN0607", 0 }, + { "ELAN0608", 0 }, + { "ELAN0609", 0 }, + { "ELAN060B", 0 }, + { "ELAN060C", 0 }, + { "ELAN060F", 0 }, + { "ELAN0610", 0 }, + { "ELAN0611", 0 }, + { "ELAN0612", 0 }, + { "ELAN0615", 0 }, + { "ELAN0616", 0 }, + { "ELAN0617", 0 }, + { "ELAN0618", 0 }, + { "ELAN0619", 0 }, + { "ELAN061A", 0 }, +/* { "ELAN061B", 0 }, not working on the Lenovo Legion Y7000 */ + { "ELAN061C", 0 }, + { "ELAN061D", 0 }, + { "ELAN061E", 0 }, + { "ELAN061F", 0 }, + { "ELAN0620", 0 }, + { "ELAN0621", 0 }, + { "ELAN0622", 0 }, + { "ELAN0623", 0 }, + { "ELAN0624", 0 }, + { "ELAN0625", 0 }, + { "ELAN0626", 0 }, + { "ELAN0627", 0 }, + { "ELAN0628", 0 }, + { "ELAN0629", 0 }, + { "ELAN062A", 0 }, + { "ELAN062B", 0 }, + { "ELAN062C", 0 }, + { "ELAN062D", 0 }, + { "ELAN0631", 0 }, + { "ELAN0632", 0 }, + { "ELAN1000", 0 }, + { } +}; + +#endif /* __ELAN_I2C_IDS_H */ diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h index 6a8dd4af0147..ed11ef594378 100644 --- a/include/linux/intel-iommu.h +++ b/include/linux/intel-iommu.h @@ -272,6 +272,8 @@ #define dma_frcd_type(d) ((d >> 30) & 1) #define dma_frcd_fault_reason(c) (c & 0xff) #define dma_frcd_source_id(c) (c & 0xffff) +#define dma_frcd_pasid_value(c) (((c) >> 8) & 0xfffff) +#define dma_frcd_pasid_present(c) (((c) >> 31) & 1) /* low 64 bit */ #define dma_frcd_page_addr(d) (d & (((u64)-1) << PAGE_SHIFT)) @@ -346,7 +348,6 @@ enum { #define QI_PC_PASID_SEL (QI_PC_TYPE | QI_PC_GRAN(1)) #define QI_EIOTLB_ADDR(addr) ((u64)(addr) & VTD_PAGE_MASK) -#define QI_EIOTLB_GL(gl) (((u64)gl) << 7) #define QI_EIOTLB_IH(ih) (((u64)ih) << 6) #define QI_EIOTLB_AM(am) (((u64)am)) #define QI_EIOTLB_PASID(pasid) (((u64)pasid) << 32) @@ -378,8 +379,6 @@ enum { #define QI_RESP_INVALID 0x1 #define QI_RESP_FAILURE 0xf -#define QI_GRAN_ALL_ALL 0 -#define QI_GRAN_NONG_ALL 1 #define QI_GRAN_NONG_PASID 2 #define QI_GRAN_PSI_PASID 3 @@ -435,6 +434,12 @@ enum { #define VTD_FLAG_TRANS_PRE_ENABLED (1 << 0) #define VTD_FLAG_IRQ_REMAP_PRE_ENABLED (1 << 1) +extern int intel_iommu_sm; + +#define sm_supported(iommu) (intel_iommu_sm && ecap_smts((iommu)->ecap)) +#define pasid_supported(iommu) (sm_supported(iommu) && \ + ecap_pasid((iommu)->ecap)) + struct pasid_entry; struct pasid_state_entry; struct page_req_dsc; @@ -642,7 +647,6 @@ extern int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu); extern int dmar_ir_support(void); -struct dmar_domain *get_valid_domain_for_dev(struct device *dev); void *alloc_pgtable_page(int node); void free_pgtable_page(void *vaddr); struct intel_iommu *domain_get_iommu(struct dmar_domain *domain); diff --git a/include/linux/intel-svm.h b/include/linux/intel-svm.h index 54ffcc6a322e..94f047a8a845 100644 --- a/include/linux/intel-svm.h +++ b/include/linux/intel-svm.h @@ -49,7 +49,7 @@ struct svm_dev_ops { /** * intel_svm_bind_mm() - Bind the current process to a PASID - * @dev: Device to be granted acccess + * @dev: Device to be granted access * @pasid: Address for allocated PASID * @flags: Flags. Later for requesting supervisor mode, etc. * @ops: Callbacks to device driver diff --git a/include/linux/intel_rapl.h b/include/linux/intel_rapl.h new file mode 100644 index 000000000000..efb3ce892c20 --- /dev/null +++ b/include/linux/intel_rapl.h @@ -0,0 +1,155 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Data types and headers for RAPL support + * + * Copyright (C) 2019 Intel Corporation. + * + * Author: Zhang Rui <rui.zhang@intel.com> + */ + +#ifndef __INTEL_RAPL_H__ +#define __INTEL_RAPL_H__ + +#include <linux/types.h> +#include <linux/powercap.h> +#include <linux/cpuhotplug.h> + +enum rapl_domain_type { + RAPL_DOMAIN_PACKAGE, /* entire package/socket */ + RAPL_DOMAIN_PP0, /* core power plane */ + RAPL_DOMAIN_PP1, /* graphics uncore */ + RAPL_DOMAIN_DRAM, /* DRAM control_type */ + RAPL_DOMAIN_PLATFORM, /* PSys control_type */ + RAPL_DOMAIN_MAX, +}; + +enum rapl_domain_reg_id { + RAPL_DOMAIN_REG_LIMIT, + RAPL_DOMAIN_REG_STATUS, + RAPL_DOMAIN_REG_PERF, + RAPL_DOMAIN_REG_POLICY, + RAPL_DOMAIN_REG_INFO, + RAPL_DOMAIN_REG_MAX, +}; + +struct rapl_package; + +enum rapl_primitives { + ENERGY_COUNTER, + POWER_LIMIT1, + POWER_LIMIT2, + FW_LOCK, + + PL1_ENABLE, /* power limit 1, aka long term */ + PL1_CLAMP, /* allow frequency to go below OS request */ + PL2_ENABLE, /* power limit 2, aka short term, instantaneous */ + PL2_CLAMP, + + TIME_WINDOW1, /* long term */ + TIME_WINDOW2, /* short term */ + THERMAL_SPEC_POWER, + MAX_POWER, + + MIN_POWER, + MAX_TIME_WINDOW, + THROTTLED_TIME, + PRIORITY_LEVEL, + + /* below are not raw primitive data */ + AVERAGE_POWER, + NR_RAPL_PRIMITIVES, +}; + +struct rapl_domain_data { + u64 primitives[NR_RAPL_PRIMITIVES]; + unsigned long timestamp; +}; + +#define NR_POWER_LIMITS (2) +struct rapl_power_limit { + struct powercap_zone_constraint *constraint; + int prim_id; /* primitive ID used to enable */ + struct rapl_domain *domain; + const char *name; + u64 last_power_limit; +}; + +struct rapl_package; + +struct rapl_domain { + const char *name; + enum rapl_domain_type id; + u64 regs[RAPL_DOMAIN_REG_MAX]; + struct powercap_zone power_zone; + struct rapl_domain_data rdd; + struct rapl_power_limit rpl[NR_POWER_LIMITS]; + u64 attr_map; /* track capabilities */ + unsigned int state; + unsigned int domain_energy_unit; + struct rapl_package *rp; +}; + +struct reg_action { + u64 reg; + u64 mask; + u64 value; + int err; +}; + +/** + * struct rapl_if_priv: private data for different RAPL interfaces + * @control_type: Each RAPL interface must have its own powercap + * control type. + * @platform_rapl_domain: Optional. Some RAPL interface may have platform + * level RAPL control. + * @pcap_rapl_online: CPU hotplug state for each RAPL interface. + * @reg_unit: Register for getting energy/power/time unit. + * @regs: Register sets for different RAPL Domains. + * @limits: Number of power limits supported by each domain. + * @read_raw: Callback for reading RAPL interface specific + * registers. + * @write_raw: Callback for writing RAPL interface specific + * registers. + */ +struct rapl_if_priv { + struct powercap_control_type *control_type; + struct rapl_domain *platform_rapl_domain; + enum cpuhp_state pcap_rapl_online; + u64 reg_unit; + u64 regs[RAPL_DOMAIN_MAX][RAPL_DOMAIN_REG_MAX]; + int limits[RAPL_DOMAIN_MAX]; + int (*read_raw)(int cpu, struct reg_action *ra); + int (*write_raw)(int cpu, struct reg_action *ra); +}; + +/* maximum rapl package domain name: package-%d-die-%d */ +#define PACKAGE_DOMAIN_NAME_LENGTH 30 + +struct rapl_package { + unsigned int id; /* logical die id, equals physical 1-die systems */ + unsigned int nr_domains; + unsigned long domain_map; /* bit map of active domains */ + unsigned int power_unit; + unsigned int energy_unit; + unsigned int time_unit; + struct rapl_domain *domains; /* array of domains, sized at runtime */ + struct powercap_zone *power_zone; /* keep track of parent zone */ + unsigned long power_limit_irq; /* keep track of package power limit + * notify interrupt enable status. + */ + struct list_head plist; + int lead_cpu; /* one active cpu per package for access */ + /* Track active cpus */ + struct cpumask cpumask; + char name[PACKAGE_DOMAIN_NAME_LENGTH]; + struct rapl_if_priv *priv; +}; + +struct rapl_package *rapl_find_package_domain(int cpu, struct rapl_if_priv *priv); +struct rapl_package *rapl_add_package(int cpu, struct rapl_if_priv *priv); +void rapl_remove_package(struct rapl_package *rp); + +int rapl_add_platform_domain(struct rapl_if_priv *priv); +void rapl_remove_platform_domain(struct rapl_if_priv *priv); + +#endif /* __INTEL_RAPL_H__ */ diff --git a/include/linux/intel_th.h b/include/linux/intel_th.h new file mode 100644 index 000000000000..9b7f4c22499c --- /dev/null +++ b/include/linux/intel_th.h @@ -0,0 +1,79 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Intel(R) Trace Hub data structures for implementing buffer sinks. + * + * Copyright (C) 2019 Intel Corporation. + */ + +#ifndef _INTEL_TH_H_ +#define _INTEL_TH_H_ + +#include <linux/scatterlist.h> + +/* MSC operating modes (MSC_MODE) */ +enum { + MSC_MODE_SINGLE = 0, + MSC_MODE_MULTI, + MSC_MODE_EXI, + MSC_MODE_DEBUG, +}; + +struct msu_buffer { + const char *name; + /* + * ->assign() called when buffer 'mode' is set to this driver + * (aka mode_store()) + * @device: struct device * of the msc + * @mode: allows the driver to set HW mode (see the enum above) + * Returns: a pointer to a private structure associated with this + * msc or NULL in case of error. This private structure + * will then be passed into all other callbacks. + */ + void *(*assign)(struct device *dev, int *mode); + /* ->unassign(): some other mode is selected, clean up */ + void (*unassign)(void *priv); + /* + * ->alloc_window(): allocate memory for the window of a given + * size + * @sgt: pointer to sg_table, can be overridden by the buffer + * driver, or kept intact + * Returns: number of sg table entries <= number of pages; + * 0 is treated as an allocation failure. + */ + int (*alloc_window)(void *priv, struct sg_table **sgt, + size_t size); + void (*free_window)(void *priv, struct sg_table *sgt); + /* ->activate(): trace has started */ + void (*activate)(void *priv); + /* ->deactivate(): trace is about to stop */ + void (*deactivate)(void *priv); + /* + * ->ready(): window @sgt is filled up to the last block OR + * tracing is stopped by the user; this window contains + * @bytes data. The window in question transitions into + * the "LOCKED" state, indicating that it can't be used + * by hardware. To clear this state and make the window + * available to the hardware again, call + * intel_th_msc_window_unlock(). + */ + int (*ready)(void *priv, struct sg_table *sgt, size_t bytes); +}; + +int intel_th_msu_buffer_register(const struct msu_buffer *mbuf, + struct module *owner); +void intel_th_msu_buffer_unregister(const struct msu_buffer *mbuf); +void intel_th_msc_window_unlock(struct device *dev, struct sg_table *sgt); + +#define module_intel_th_msu_buffer(__buffer) \ +static int __init __buffer##_init(void) \ +{ \ + return intel_th_msu_buffer_register(&(__buffer), THIS_MODULE); \ +} \ +module_init(__buffer##_init); \ +static void __exit __buffer##_exit(void) \ +{ \ + intel_th_msu_buffer_unregister(&(__buffer)); \ +} \ +module_exit(__buffer##_exit); + +#endif /* _INTEL_TH_H_ */ diff --git a/include/linux/interconnect-provider.h b/include/linux/interconnect-provider.h index 63caccadc2db..b16f9effa555 100644 --- a/include/linux/interconnect-provider.h +++ b/include/linux/interconnect-provider.h @@ -36,6 +36,8 @@ struct icc_node *of_icc_xlate_onecell(struct of_phandle_args *spec, * @nodes: internal list of the interconnect provider nodes * @set: pointer to device specific set operation function * @aggregate: pointer to device specific aggregate operation function + * @pre_aggregate: pointer to device specific function that is called + * before the aggregation begins (optional) * @xlate: provider-specific callback for mapping nodes from phandle arguments * @dev: the device this interconnect provider belongs to * @users: count of active users @@ -45,8 +47,9 @@ struct icc_provider { struct list_head provider_list; struct list_head nodes; int (*set)(struct icc_node *src, struct icc_node *dst); - int (*aggregate)(struct icc_node *node, u32 avg_bw, u32 peak_bw, - u32 *agg_avg, u32 *agg_peak); + int (*aggregate)(struct icc_node *node, u32 tag, u32 avg_bw, + u32 peak_bw, u32 *agg_avg, u32 *agg_peak); + void (*pre_aggregate)(struct icc_node *node); struct icc_node* (*xlate)(struct of_phandle_args *spec, void *data); struct device *dev; int users; diff --git a/include/linux/interconnect.h b/include/linux/interconnect.h index dc25864755ba..d70a914cba11 100644 --- a/include/linux/interconnect.h +++ b/include/linux/interconnect.h @@ -30,6 +30,7 @@ struct icc_path *icc_get(struct device *dev, const int src_id, struct icc_path *of_icc_get(struct device *dev, const char *name); void icc_put(struct icc_path *path); int icc_set_bw(struct icc_path *path, u32 avg_bw, u32 peak_bw); +void icc_set_tag(struct icc_path *path, u32 tag); #else @@ -54,6 +55,10 @@ static inline int icc_set_bw(struct icc_path *path, u32 avg_bw, u32 peak_bw) return 0; } +static inline void icc_set_tag(struct icc_path *path, u32 tag) +{ +} + #endif /* CONFIG_INTERCONNECT */ #endif /* __LINUX_INTERCONNECT_H */ diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index c7eef32e7739..89fc59dab57d 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h @@ -52,7 +52,7 @@ * irq line disabled until the threaded handler has been run. * IRQF_NO_SUSPEND - Do not disable this IRQ during suspend. Does not guarantee * that this interrupt will wake the system from a suspended - * state. See Documentation/power/suspend-and-interrupts.txt + * state. See Documentation/power/suspend-and-interrupts.rst * IRQF_FORCE_RESUME - Force enable it on resume even if IRQF_NO_SUSPEND is set * IRQF_NO_THREAD - Interrupt cannot be threaded * IRQF_EARLY_RESUME - Resume IRQ early during syscore instead of at device @@ -238,6 +238,7 @@ extern void teardown_percpu_nmi(unsigned int irq); /* The following three functions are for the core kernel use only. */ extern void suspend_device_irqs(void); extern void resume_device_irqs(void); +extern void rearm_wake_irq(unsigned int irq); /** * struct irq_affinity_notify - context for notification of IRQ affinity changes @@ -472,7 +473,11 @@ extern int irq_set_irqchip_state(unsigned int irq, enum irqchip_irq_state which, bool state); #ifdef CONFIG_IRQ_FORCED_THREADING +# ifdef CONFIG_PREEMPT_RT +# define force_irqthreads (true) +# else extern bool force_irqthreads; +# endif #else #define force_irqthreads (0) #endif diff --git a/include/linux/interval_tree_generic.h b/include/linux/interval_tree_generic.h index 855476145fe1..aaa8a0767aa3 100644 --- a/include/linux/interval_tree_generic.h +++ b/include/linux/interval_tree_generic.h @@ -30,26 +30,8 @@ \ /* Callbacks for augmented rbtree insert and remove */ \ \ -static inline ITTYPE ITPREFIX ## _compute_subtree_last(ITSTRUCT *node) \ -{ \ - ITTYPE max = ITLAST(node), subtree_last; \ - if (node->ITRB.rb_left) { \ - subtree_last = rb_entry(node->ITRB.rb_left, \ - ITSTRUCT, ITRB)->ITSUBTREE; \ - if (max < subtree_last) \ - max = subtree_last; \ - } \ - if (node->ITRB.rb_right) { \ - subtree_last = rb_entry(node->ITRB.rb_right, \ - ITSTRUCT, ITRB)->ITSUBTREE; \ - if (max < subtree_last) \ - max = subtree_last; \ - } \ - return max; \ -} \ - \ -RB_DECLARE_CALLBACKS(static, ITPREFIX ## _augment, ITSTRUCT, ITRB, \ - ITTYPE, ITSUBTREE, ITPREFIX ## _compute_subtree_last) \ +RB_DECLARE_CALLBACKS_MAX(static, ITPREFIX ## _augment, \ + ITSTRUCT, ITRB, ITTYPE, ITSUBTREE, ITLAST) \ \ /* Insert / remove interval nodes from the tree */ \ \ diff --git a/include/linux/io-pgtable.h b/include/linux/io-pgtable.h index 76969a564831..ec7a13405f10 100644 --- a/include/linux/io-pgtable.h +++ b/include/linux/io-pgtable.h @@ -1,7 +1,9 @@ /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __IO_PGTABLE_H #define __IO_PGTABLE_H + #include <linux/bitops.h> +#include <linux/iommu.h> /* * Public API for use by IOMMU drivers @@ -17,22 +19,31 @@ enum io_pgtable_fmt { }; /** - * struct iommu_gather_ops - IOMMU callbacks for TLB and page table management. + * struct iommu_flush_ops - IOMMU callbacks for TLB and page table management. * - * @tlb_flush_all: Synchronously invalidate the entire TLB context. - * @tlb_add_flush: Queue up a TLB invalidation for a virtual address range. - * @tlb_sync: Ensure any queued TLB invalidation has taken effect, and - * any corresponding page table updates are visible to the - * IOMMU. + * @tlb_flush_all: Synchronously invalidate the entire TLB context. + * @tlb_flush_walk: Synchronously invalidate all intermediate TLB state + * (sometimes referred to as the "walk cache") for a virtual + * address range. + * @tlb_flush_leaf: Synchronously invalidate all leaf TLB state for a virtual + * address range. + * @tlb_add_page: Optional callback to queue up leaf TLB invalidation for a + * single page. IOMMUs that cannot batch TLB invalidation + * operations efficiently will typically issue them here, but + * others may decide to update the iommu_iotlb_gather structure + * and defer the invalidation until iommu_tlb_sync() instead. * * Note that these can all be called in atomic context and must therefore * not block. */ -struct iommu_gather_ops { +struct iommu_flush_ops { void (*tlb_flush_all)(void *cookie); - void (*tlb_add_flush)(unsigned long iova, size_t size, size_t granule, - bool leaf, void *cookie); - void (*tlb_sync)(void *cookie); + void (*tlb_flush_walk)(unsigned long iova, size_t size, size_t granule, + void *cookie); + void (*tlb_flush_leaf)(unsigned long iova, size_t size, size_t granule, + void *cookie); + void (*tlb_add_page)(struct iommu_iotlb_gather *gather, + unsigned long iova, size_t granule, void *cookie); }; /** @@ -44,6 +55,8 @@ struct iommu_gather_ops { * tables. * @ias: Input address (iova) size, in bits. * @oas: Output address (paddr) size, in bits. + * @coherent_walk A flag to indicate whether or not page table walks made + * by the IOMMU are coherent with the CPU caches. * @tlb: TLB management callbacks for this set of tables. * @iommu_dev: The device representing the DMA configuration for the * page table walker. @@ -63,15 +76,9 @@ struct io_pgtable_cfg { * (unmapped) entries but the hardware might do so anyway, perform * TLB maintenance when mapping as well as when unmapping. * - * IO_PGTABLE_QUIRK_ARM_MTK_4GB: (ARM v7s format) Set bit 9 in all - * PTEs, for Mediatek IOMMUs which treat it as a 33rd address bit - * when the SoC is in "4GB mode" and they can only access the high - * remap of DRAM (0x1_00000000 to 0x1_ffffffff). - * - * IO_PGTABLE_QUIRK_NO_DMA: Guarantees that the tables will only ever - * be accessed by a fully cache-coherent IOMMU or CPU (e.g. for a - * software-emulated IOMMU), such that pagetable updates need not - * be treated as explicit DMA data. + * IO_PGTABLE_QUIRK_ARM_MTK_EXT: (ARM v7s format) MediaTek IOMMUs extend + * to support up to 34 bits PA where the bit32 and bit33 are + * encoded in the bit9 and bit4 of the PTE respectively. * * IO_PGTABLE_QUIRK_NON_STRICT: Skip issuing synchronous leaf TLBIs * on unmap, for DMA domains using the flush queue mechanism for @@ -80,14 +87,14 @@ struct io_pgtable_cfg { #define IO_PGTABLE_QUIRK_ARM_NS BIT(0) #define IO_PGTABLE_QUIRK_NO_PERMS BIT(1) #define IO_PGTABLE_QUIRK_TLBI_ON_MAP BIT(2) - #define IO_PGTABLE_QUIRK_ARM_MTK_4GB BIT(3) - #define IO_PGTABLE_QUIRK_NO_DMA BIT(4) - #define IO_PGTABLE_QUIRK_NON_STRICT BIT(5) + #define IO_PGTABLE_QUIRK_ARM_MTK_EXT BIT(3) + #define IO_PGTABLE_QUIRK_NON_STRICT BIT(4) unsigned long quirks; unsigned long pgsize_bitmap; unsigned int ias; unsigned int oas; - const struct iommu_gather_ops *tlb; + bool coherent_walk; + const struct iommu_flush_ops *tlb; struct device *iommu_dev; /* Low-level data specific to the table format */ @@ -131,7 +138,7 @@ struct io_pgtable_ops { int (*map)(struct io_pgtable_ops *ops, unsigned long iova, phys_addr_t paddr, size_t size, int prot); size_t (*unmap)(struct io_pgtable_ops *ops, unsigned long iova, - size_t size); + size_t size, struct iommu_iotlb_gather *gather); phys_addr_t (*iova_to_phys)(struct io_pgtable_ops *ops, unsigned long iova); }; @@ -187,15 +194,27 @@ static inline void io_pgtable_tlb_flush_all(struct io_pgtable *iop) iop->cfg.tlb->tlb_flush_all(iop->cookie); } -static inline void io_pgtable_tlb_add_flush(struct io_pgtable *iop, - unsigned long iova, size_t size, size_t granule, bool leaf) +static inline void +io_pgtable_tlb_flush_walk(struct io_pgtable *iop, unsigned long iova, + size_t size, size_t granule) +{ + iop->cfg.tlb->tlb_flush_walk(iova, size, granule, iop->cookie); +} + +static inline void +io_pgtable_tlb_flush_leaf(struct io_pgtable *iop, unsigned long iova, + size_t size, size_t granule) { - iop->cfg.tlb->tlb_add_flush(iova, size, granule, leaf, iop->cookie); + iop->cfg.tlb->tlb_flush_leaf(iova, size, granule, iop->cookie); } -static inline void io_pgtable_tlb_sync(struct io_pgtable *iop) +static inline void +io_pgtable_tlb_add_page(struct io_pgtable *iop, + struct iommu_iotlb_gather * gather, unsigned long iova, + size_t granule) { - iop->cfg.tlb->tlb_sync(iop->cookie); + if (iop->cfg.tlb->tlb_add_page) + iop->cfg.tlb->tlb_add_page(gather, iova, granule, iop->cookie); } /** diff --git a/include/linux/io.h b/include/linux/io.h index 9876e5801a9d..accac822336a 100644 --- a/include/linux/io.h +++ b/include/linux/io.h @@ -33,6 +33,7 @@ static inline int ioremap_page_range(unsigned long addr, unsigned long end, #ifdef CONFIG_HAVE_ARCH_HUGE_VMAP void __init ioremap_huge_init(void); +int arch_ioremap_p4d_supported(void); int arch_ioremap_pud_supported(void); int arch_ioremap_pmd_supported(void); #else diff --git a/include/linux/ioc4.h b/include/linux/ioc4.h deleted file mode 100644 index 51e2b9fb6372..000000000000 --- a/include/linux/ioc4.h +++ /dev/null @@ -1,184 +0,0 @@ -/* - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - * - * Copyright (c) 2005 Silicon Graphics, Inc. All Rights Reserved. - */ - -#ifndef _LINUX_IOC4_H -#define _LINUX_IOC4_H - -#include <linux/interrupt.h> - -/*************** - * Definitions * - ***************/ - -/* Miscellaneous values inherent to hardware */ - -#define IOC4_EXTINT_COUNT_DIVISOR 520 /* PCI clocks per COUNT tick */ - -/*********************************** - * Structures needed by subdrivers * - ***********************************/ - -/* This structure fully describes the IOC4 miscellaneous registers which - * appear at bar[0]+0x00000 through bar[0]+0x0005c. The corresponding - * PCI resource is managed by the main IOC4 driver because it contains - * registers of interest to many different IOC4 subdrivers. - */ -struct ioc4_misc_regs { - /* Miscellaneous IOC4 registers */ - union ioc4_pci_err_addr_l { - uint32_t raw; - struct { - uint32_t valid:1; /* Address captured */ - uint32_t master_id:4; /* Unit causing error - * 0/1: Serial port 0 TX/RX - * 2/3: Serial port 1 TX/RX - * 4/5: Serial port 2 TX/RX - * 6/7: Serial port 3 TX/RX - * 8: ATA/ATAPI - * 9-15: Undefined - */ - uint32_t mul_err:1; /* Multiple errors occurred */ - uint32_t addr:26; /* Bits 31-6 of error addr */ - } fields; - } pci_err_addr_l; - uint32_t pci_err_addr_h; /* Bits 63-32 of error addr */ - union ioc4_sio_int { - uint32_t raw; - struct { - uint8_t tx_mt:1; /* TX ring buffer empty */ - uint8_t rx_full:1; /* RX ring buffer full */ - uint8_t rx_high:1; /* RX high-water exceeded */ - uint8_t rx_timer:1; /* RX timer has triggered */ - uint8_t delta_dcd:1; /* DELTA_DCD seen */ - uint8_t delta_cts:1; /* DELTA_CTS seen */ - uint8_t intr_pass:1; /* Interrupt pass-through */ - uint8_t tx_explicit:1; /* TX, MCW, or delay complete */ - } fields[4]; - } sio_ir; /* Serial interrupt state */ - union ioc4_other_int { - uint32_t raw; - struct { - uint32_t ata_int:1; /* ATA port passthru */ - uint32_t ata_memerr:1; /* ATA halted by mem error */ - uint32_t memerr:4; /* Serial halted by mem err */ - uint32_t kbd_int:1; /* kbd/mouse intr asserted */ - uint32_t reserved:16; /* zero */ - uint32_t rt_int:1; /* INT_OUT section latch */ - uint32_t gen_int:8; /* Intr. from generic pins */ - } fields; - } other_ir; /* Other interrupt state */ - union ioc4_sio_int sio_ies; /* Serial interrupt enable set */ - union ioc4_other_int other_ies; /* Other interrupt enable set */ - union ioc4_sio_int sio_iec; /* Serial interrupt enable clear */ - union ioc4_other_int other_iec; /* Other interrupt enable clear */ - union ioc4_sio_cr { - uint32_t raw; - struct { - uint32_t cmd_pulse:4; /* Bytebus strobe width */ - uint32_t arb_diag:3; /* PCI bus requester */ - uint32_t sio_diag_idle:1; /* Active ser req? */ - uint32_t ata_diag_idle:1; /* Active ATA req? */ - uint32_t ata_diag_active:1; /* ATA req is winner */ - uint32_t reserved:22; /* zero */ - } fields; - } sio_cr; - uint32_t unused1; - union ioc4_int_out { - uint32_t raw; - struct { - uint32_t count:16; /* Period control */ - uint32_t mode:3; /* Output signal shape */ - uint32_t reserved:11; /* zero */ - uint32_t diag:1; /* Timebase control */ - uint32_t int_out:1; /* Current value */ - } fields; - } int_out; /* External interrupt output control */ - uint32_t unused2; - union ioc4_gpcr { - uint32_t raw; - struct { - uint32_t dir:8; /* Pin direction */ - uint32_t edge:8; /* Edge/level mode */ - uint32_t reserved1:4; /* zero */ - uint32_t int_out_en:1; /* INT_OUT enable */ - uint32_t reserved2:11; /* zero */ - } fields; - } gpcr_s; /* Generic PIO control set */ - union ioc4_gpcr gpcr_c; /* Generic PIO control clear */ - union ioc4_gpdr { - uint32_t raw; - struct { - uint32_t gen_pin:8; /* State of pins */ - uint32_t reserved:24; - } fields; - } gpdr; /* Generic PIO data */ - uint32_t unused3; - union ioc4_gppr { - uint32_t raw; - struct { - uint32_t gen_pin:1; /* Single pin state */ - uint32_t reserved:31; - } fields; - } gppr[8]; /* Generic PIO pins */ -}; - -/* Masks for GPCR DIR pins */ -#define IOC4_GPCR_DIR_0 0x01 /* External interrupt output */ -#define IOC4_GPCR_DIR_1 0x02 /* External interrupt input */ -#define IOC4_GPCR_DIR_2 0x04 -#define IOC4_GPCR_DIR_3 0x08 /* Keyboard/mouse presence */ -#define IOC4_GPCR_DIR_4 0x10 /* Ser. port 0 xcvr select (0=232, 1=422) */ -#define IOC4_GPCR_DIR_5 0x20 /* Ser. port 1 xcvr select (0=232, 1=422) */ -#define IOC4_GPCR_DIR_6 0x40 /* Ser. port 2 xcvr select (0=232, 1=422) */ -#define IOC4_GPCR_DIR_7 0x80 /* Ser. port 3 xcvr select (0=232, 1=422) */ - -/* Masks for GPCR EDGE pins */ -#define IOC4_GPCR_EDGE_0 0x01 -#define IOC4_GPCR_EDGE_1 0x02 /* External interrupt input */ -#define IOC4_GPCR_EDGE_2 0x04 -#define IOC4_GPCR_EDGE_3 0x08 -#define IOC4_GPCR_EDGE_4 0x10 -#define IOC4_GPCR_EDGE_5 0x20 -#define IOC4_GPCR_EDGE_6 0x40 -#define IOC4_GPCR_EDGE_7 0x80 - -#define IOC4_VARIANT_IO9 0x0900 -#define IOC4_VARIANT_PCI_RT 0x0901 -#define IOC4_VARIANT_IO10 0x1000 - -/* One of these per IOC4 */ -struct ioc4_driver_data { - struct list_head idd_list; - unsigned long idd_bar0; - struct pci_dev *idd_pdev; - const struct pci_device_id *idd_pci_id; - struct ioc4_misc_regs __iomem *idd_misc_regs; - unsigned long count_period; - void *idd_serial_data; - unsigned int idd_variant; -}; - -/* One per submodule */ -struct ioc4_submodule { - struct list_head is_list; - char *is_name; - struct module *is_owner; - int (*is_probe) (struct ioc4_driver_data *); - int (*is_remove) (struct ioc4_driver_data *); -}; - -#define IOC4_NUM_CARDS 8 /* max cards per partition */ - -/********************************** - * Functions needed by submodules * - **********************************/ - -extern int ioc4_register_submodule(struct ioc4_submodule *); -extern void ioc4_unregister_submodule(struct ioc4_submodule *); - -#endif /* _LINUX_IOC4_H */ diff --git a/include/linux/iomap.h b/include/linux/iomap.h index 2103b94cb1bf..7aa5d6117936 100644 --- a/include/linux/iomap.h +++ b/include/linux/iomap.h @@ -7,6 +7,7 @@ #include <linux/mm.h> #include <linux/types.h> #include <linux/mm_types.h> +#include <linux/blkdev.h> struct address_space; struct fiemap_extent_info; @@ -35,6 +36,7 @@ struct vm_fault; #define IOMAP_F_NEW 0x01 /* blocks have been newly allocated */ #define IOMAP_F_DIRTY 0x02 /* uncommitted metadata */ #define IOMAP_F_BUFFER_HEAD 0x04 /* file system requires buffer heads */ +#define IOMAP_F_SIZE_CHANGED 0x08 /* file size has changed */ /* * Flags that only need to be reported for IOMAP_REPORT requests: @@ -68,6 +70,12 @@ struct iomap { const struct iomap_page_ops *page_ops; }; +static inline sector_t +iomap_sector(struct iomap *iomap, loff_t pos) +{ + return (iomap->addr + pos - iomap->offset) >> SECTOR_SHIFT; +} + /* * When a filesystem sets page_ops in an iomap mapping it returns, page_prepare * and page_done will be called for each page written to. This only applies to @@ -115,6 +123,16 @@ struct iomap_ops { }; /* + * Main iomap iterator function. + */ +typedef loff_t (*iomap_actor_t)(struct inode *inode, loff_t pos, loff_t len, + void *data, struct iomap *iomap); + +loff_t iomap_apply(struct inode *inode, loff_t pos, loff_t length, + unsigned flags, const struct iomap_ops *ops, void *data, + iomap_actor_t actor); + +/* * Structure allocate for each page when block size < PAGE_SIZE to track * sub-page uptodate status and I/O completions. */ @@ -170,10 +188,14 @@ sector_t iomap_bmap(struct address_space *mapping, sector_t bno, */ #define IOMAP_DIO_UNWRITTEN (1 << 0) /* covers unwritten extent(s) */ #define IOMAP_DIO_COW (1 << 1) /* covers COW extent(s) */ -typedef int (iomap_dio_end_io_t)(struct kiocb *iocb, ssize_t ret, - unsigned flags); + +struct iomap_dio_ops { + int (*end_io)(struct kiocb *iocb, ssize_t size, int error, + unsigned flags); +}; + ssize_t iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter, - const struct iomap_ops *ops, iomap_dio_end_io_t end_io); + const struct iomap_ops *ops, const struct iomap_dio_ops *dops); int iomap_dio_iopoll(struct kiocb *kiocb, bool spin); #ifdef CONFIG_SWAP diff --git a/include/linux/iommu.h b/include/linux/iommu.h index e552c3b63f6f..29bac5345563 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h @@ -13,6 +13,7 @@ #include <linux/errno.h> #include <linux/err.h> #include <linux/of.h> +#include <uapi/linux/iommu.h> #define IOMMU_READ (1 << 0) #define IOMMU_WRITE (1 << 1) @@ -29,6 +30,12 @@ * if the IOMMU page table format is equivalent. */ #define IOMMU_PRIV (1 << 5) +/* + * Non-coherent masters on few Qualcomm SoCs can use this page protection flag + * to set correct cacheability attributes to use an outer level of cache - + * last level cache, aka system cache. + */ +#define IOMMU_QCOM_SYS_CACHE (1 << 6) struct iommu_ops; struct iommu_group; @@ -37,6 +44,7 @@ struct device; struct iommu_domain; struct notifier_block; struct iommu_sva; +struct iommu_fault_event; /* iommu fault flags */ #define IOMMU_FAULT_READ 0x0 @@ -46,6 +54,7 @@ typedef int (*iommu_fault_handler_t)(struct iommu_domain *, struct device *, unsigned long, int, void *); typedef int (*iommu_mm_exit_handler_t)(struct device *dev, struct iommu_sva *, void *); +typedef int (*iommu_dev_fault_handler_t)(struct iommu_fault *, void *); struct iommu_domain_geometry { dma_addr_t aperture_start; /* First address that can be mapped */ @@ -123,6 +132,12 @@ enum iommu_attr { enum iommu_resv_type { /* Memory regions which must be mapped 1:1 at all times */ IOMMU_RESV_DIRECT, + /* + * Memory regions which are advertised to be 1:1 but are + * commonly considered relaxable in some conditions, + * for instance in device assignment use case (USB, Graphics) + */ + IOMMU_RESV_DIRECT_RELAXABLE, /* Arbitrary "never map this or give it to a device" address ranges */ IOMMU_RESV_RESERVED, /* Hardware MSI region (untranslated) */ @@ -177,6 +192,23 @@ struct iommu_sva_ops { #ifdef CONFIG_IOMMU_API /** + * struct iommu_iotlb_gather - Range information for a pending IOTLB flush + * + * @start: IOVA representing the start of the range to be flushed + * @end: IOVA representing the end of the range to be flushed (exclusive) + * @pgsize: The interval at which to perform the flush + * + * This structure is intended to be updated by multiple calls to the + * ->unmap() function in struct iommu_ops before eventually being passed + * into ->iotlb_sync(). + */ +struct iommu_iotlb_gather { + unsigned long start; + unsigned long end; + size_t pgsize; +}; + +/** * struct iommu_ops - iommu ops and capabilities * @capable: check capability * @domain_alloc: allocate iommu domain @@ -186,7 +218,6 @@ struct iommu_sva_ops { * @map: map a physically contiguous memory region to an iommu domain * @unmap: unmap a physically contiguous memory region from an iommu domain * @flush_iotlb_all: Synchronously flush all hardware TLBs for this domain - * @iotlb_range_add: Add a given iova range to the flush queue for this domain * @iotlb_sync_map: Sync mappings created recently using @map to the hardware * @iotlb_sync: Flush all queued ranges from the hardware TLBs and empty flush * queue @@ -212,6 +243,7 @@ struct iommu_sva_ops { * @sva_bind: Bind process address space to device * @sva_unbind: Unbind process address space from device * @sva_get_pasid: Get PASID associated to a SVA handle + * @page_response: handle page request response * @pgsize_bitmap: bitmap of all possible supported page sizes */ struct iommu_ops { @@ -226,12 +258,11 @@ struct iommu_ops { int (*map)(struct iommu_domain *domain, unsigned long iova, phys_addr_t paddr, size_t size, int prot); size_t (*unmap)(struct iommu_domain *domain, unsigned long iova, - size_t size); + size_t size, struct iommu_iotlb_gather *iotlb_gather); void (*flush_iotlb_all)(struct iommu_domain *domain); - void (*iotlb_range_add)(struct iommu_domain *domain, - unsigned long iova, size_t size); void (*iotlb_sync_map)(struct iommu_domain *domain); - void (*iotlb_sync)(struct iommu_domain *domain); + void (*iotlb_sync)(struct iommu_domain *domain, + struct iommu_iotlb_gather *iotlb_gather); phys_addr_t (*iova_to_phys)(struct iommu_domain *domain, dma_addr_t iova); int (*add_device)(struct device *dev); void (*remove_device)(struct device *dev); @@ -272,6 +303,10 @@ struct iommu_ops { void (*sva_unbind)(struct iommu_sva *handle); int (*sva_get_pasid)(struct iommu_sva *handle); + int (*page_response)(struct device *dev, + struct iommu_fault_event *evt, + struct iommu_page_response *msg); + unsigned long pgsize_bitmap; }; @@ -289,6 +324,48 @@ struct iommu_device { struct device *dev; }; +/** + * struct iommu_fault_event - Generic fault event + * + * Can represent recoverable faults such as a page requests or + * unrecoverable faults such as DMA or IRQ remapping faults. + * + * @fault: fault descriptor + * @list: pending fault event list, used for tracking responses + */ +struct iommu_fault_event { + struct iommu_fault fault; + struct list_head list; +}; + +/** + * struct iommu_fault_param - per-device IOMMU fault data + * @handler: Callback function to handle IOMMU faults at device level + * @data: handler private data + * @faults: holds the pending faults which needs response + * @lock: protect pending faults list + */ +struct iommu_fault_param { + iommu_dev_fault_handler_t handler; + void *data; + struct list_head faults; + struct mutex lock; +}; + +/** + * struct iommu_param - collection of per-device IOMMU data + * + * @fault_param: IOMMU detected device fault reporting data + * + * TODO: migrate other per device data pointers under iommu_dev_data, e.g. + * struct iommu_group *iommu_group; + * struct iommu_fwspec *iommu_fwspec; + */ +struct iommu_param { + struct mutex lock; + struct iommu_fault_param *fault_param; +}; + int iommu_device_register(struct iommu_device *iommu); void iommu_device_unregister(struct iommu_device *iommu); int iommu_device_sysfs_add(struct iommu_device *iommu, @@ -316,6 +393,13 @@ static inline struct iommu_device *dev_to_iommu_device(struct device *dev) return (struct iommu_device *)dev_get_drvdata(dev); } +static inline void iommu_iotlb_gather_init(struct iommu_iotlb_gather *gather) +{ + *gather = (struct iommu_iotlb_gather) { + .start = ULONG_MAX, + }; +} + #define IOMMU_GROUP_NOTIFY_ADD_DEVICE 1 /* Device added */ #define IOMMU_GROUP_NOTIFY_DEL_DEVICE 2 /* Pre Device removed */ #define IOMMU_GROUP_NOTIFY_BIND_DRIVER 3 /* Pre Driver bind */ @@ -340,7 +424,8 @@ extern int iommu_map(struct iommu_domain *domain, unsigned long iova, extern size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova, size_t size); extern size_t iommu_unmap_fast(struct iommu_domain *domain, - unsigned long iova, size_t size); + unsigned long iova, size_t size, + struct iommu_iotlb_gather *iotlb_gather); extern size_t iommu_map_sg(struct iommu_domain *domain, unsigned long iova, struct scatterlist *sg,unsigned int nents, int prot); extern phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova); @@ -350,6 +435,10 @@ extern void iommu_set_fault_handler(struct iommu_domain *domain, extern void iommu_get_resv_regions(struct device *dev, struct list_head *list); extern void iommu_put_resv_regions(struct device *dev, struct list_head *list); extern int iommu_request_dm_for_dev(struct device *dev); +extern int iommu_request_dma_domain_for_dev(struct device *dev); +extern void iommu_set_default_passthrough(bool cmd_line); +extern void iommu_set_default_translated(bool cmd_line); +extern bool iommu_default_passthrough(void); extern struct iommu_resv_region * iommu_alloc_resv_region(phys_addr_t start, size_t length, int prot, enum iommu_resv_type type); @@ -378,6 +467,17 @@ extern int iommu_group_register_notifier(struct iommu_group *group, struct notifier_block *nb); extern int iommu_group_unregister_notifier(struct iommu_group *group, struct notifier_block *nb); +extern int iommu_register_device_fault_handler(struct device *dev, + iommu_dev_fault_handler_t handler, + void *data); + +extern int iommu_unregister_device_fault_handler(struct device *dev); + +extern int iommu_report_device_fault(struct device *dev, + struct iommu_fault_event *evt); +extern int iommu_page_response(struct device *dev, + struct iommu_page_response *msg); + extern int iommu_group_id(struct iommu_group *group); extern struct iommu_group *iommu_group_get_for_dev(struct device *dev); extern struct iommu_domain *iommu_group_default_domain(struct iommu_group *); @@ -402,17 +502,38 @@ static inline void iommu_flush_tlb_all(struct iommu_domain *domain) domain->ops->flush_iotlb_all(domain); } -static inline void iommu_tlb_range_add(struct iommu_domain *domain, - unsigned long iova, size_t size) +static inline void iommu_tlb_sync(struct iommu_domain *domain, + struct iommu_iotlb_gather *iotlb_gather) { - if (domain->ops->iotlb_range_add) - domain->ops->iotlb_range_add(domain, iova, size); + if (domain->ops->iotlb_sync) + domain->ops->iotlb_sync(domain, iotlb_gather); + + iommu_iotlb_gather_init(iotlb_gather); } -static inline void iommu_tlb_sync(struct iommu_domain *domain) +static inline void iommu_iotlb_gather_add_page(struct iommu_domain *domain, + struct iommu_iotlb_gather *gather, + unsigned long iova, size_t size) { - if (domain->ops->iotlb_sync) - domain->ops->iotlb_sync(domain); + unsigned long start = iova, end = start + size; + + /* + * If the new page is disjoint from the current range or is mapped at + * a different granularity, then sync the TLB so that the gather + * structure can be rewritten. + */ + if (gather->pgsize != size || + end < gather->start || start > gather->end) { + if (gather->pgsize) + iommu_tlb_sync(domain, gather); + gather->pgsize = size; + } + + if (gather->end < end) + gather->end = end; + + if (gather->start > start) + gather->start = start; } /* PCI device grouping function */ @@ -492,6 +613,8 @@ struct iommu_ops {}; struct iommu_group {}; struct iommu_fwspec {}; struct iommu_device {}; +struct iommu_fault_param {}; +struct iommu_iotlb_gather {}; static inline bool iommu_present(struct bus_type *bus) { @@ -546,7 +669,8 @@ static inline size_t iommu_unmap(struct iommu_domain *domain, } static inline size_t iommu_unmap_fast(struct iommu_domain *domain, - unsigned long iova, int gfp_order) + unsigned long iova, int gfp_order, + struct iommu_iotlb_gather *iotlb_gather) { return 0; } @@ -562,12 +686,8 @@ static inline void iommu_flush_tlb_all(struct iommu_domain *domain) { } -static inline void iommu_tlb_range_add(struct iommu_domain *domain, - unsigned long iova, size_t size) -{ -} - -static inline void iommu_tlb_sync(struct iommu_domain *domain) +static inline void iommu_tlb_sync(struct iommu_domain *domain, + struct iommu_iotlb_gather *iotlb_gather) { } @@ -614,6 +734,24 @@ static inline int iommu_request_dm_for_dev(struct device *dev) return -ENODEV; } +static inline int iommu_request_dma_domain_for_dev(struct device *dev) +{ + return -ENODEV; +} + +static inline void iommu_set_default_passthrough(bool cmd_line) +{ +} + +static inline void iommu_set_default_translated(bool cmd_line) +{ +} + +static inline bool iommu_default_passthrough(void) +{ + return true; +} + static inline int iommu_attach_group(struct iommu_domain *domain, struct iommu_group *group) { @@ -685,6 +823,31 @@ static inline int iommu_group_unregister_notifier(struct iommu_group *group, return 0; } +static inline +int iommu_register_device_fault_handler(struct device *dev, + iommu_dev_fault_handler_t handler, + void *data) +{ + return -ENODEV; +} + +static inline int iommu_unregister_device_fault_handler(struct device *dev) +{ + return 0; +} + +static inline +int iommu_report_device_fault(struct device *dev, struct iommu_fault_event *evt) +{ + return -ENODEV; +} + +static inline int iommu_page_response(struct device *dev, + struct iommu_page_response *msg) +{ + return -ENODEV; +} + static inline int iommu_group_id(struct iommu_group *group) { return -ENODEV; @@ -722,6 +885,16 @@ static inline struct iommu_device *dev_to_iommu_device(struct device *dev) return NULL; } +static inline void iommu_iotlb_gather_init(struct iommu_iotlb_gather *gather) +{ +} + +static inline void iommu_iotlb_gather_add_page(struct iommu_domain *domain, + struct iommu_iotlb_gather *gather, + unsigned long iova, size_t size) +{ +} + static inline void iommu_device_unregister(struct iommu_device *iommu) { } diff --git a/include/linux/iopoll.h b/include/linux/iopoll.h index 3908353deec6..35e15dfd4155 100644 --- a/include/linux/iopoll.h +++ b/include/linux/iopoll.h @@ -21,7 +21,7 @@ * @cond: Break condition (usually involving @val) * @sleep_us: Maximum time to sleep between reads in us (0 * tight-loops). Should be less than ~20ms since usleep_range - * is used (see Documentation/timers/timers-howto.txt). + * is used (see Documentation/timers/timers-howto.rst). * @timeout_us: Timeout in us, 0 means never timeout * * Returns 0 on success and -ETIMEDOUT upon a timeout. In either @@ -60,7 +60,7 @@ * @cond: Break condition (usually involving @val) * @delay_us: Time to udelay between reads in us (0 tight-loops). Should * be less than ~10us since udelay is used (see - * Documentation/timers/timers-howto.txt). + * Documentation/timers/timers-howto.rst). * @timeout_us: Timeout in us, 0 means never timeout * * Returns 0 on success and -ETIMEDOUT upon a timeout. In either diff --git a/include/linux/ioport.h b/include/linux/ioport.h index da0ebaec25f0..7bddddfc76d6 100644 --- a/include/linux/ioport.h +++ b/include/linux/ioport.h @@ -12,6 +12,7 @@ #ifndef __ASSEMBLY__ #include <linux/compiler.h> #include <linux/types.h> +#include <linux/bits.h> /* * Resources are tree-like, allowing * nesting etc.. @@ -132,7 +133,15 @@ enum { IORES_DESC_PERSISTENT_MEMORY = 4, IORES_DESC_PERSISTENT_MEMORY_LEGACY = 5, IORES_DESC_DEVICE_PRIVATE_MEMORY = 6, - IORES_DESC_DEVICE_PUBLIC_MEMORY = 7, + IORES_DESC_RESERVED = 7, +}; + +/* + * Flags controlling ioremap() behavior. + */ +enum { + IORES_MAP_SYSTEM_RAM = BIT(0), + IORES_MAP_ENCRYPTED = BIT(1), }; /* helpers to define resources */ @@ -286,6 +295,10 @@ static inline bool resource_overlaps(struct resource *r1, struct resource *r2) return (r1->start <= r2->end && r1->end >= r2->start); } +struct resource *devm_request_free_mem_region(struct device *dev, + struct resource *base, unsigned long size); +struct resource *request_free_mem_region(struct resource *base, + unsigned long size, const char *name); #endif /* __ASSEMBLY__ */ #endif /* _LINUX_IOPORT_H */ diff --git a/include/linux/iova.h b/include/linux/iova.h index 781b96ac706f..a0637abffee8 100644 --- a/include/linux/iova.h +++ b/include/linux/iova.h @@ -155,6 +155,7 @@ struct iova *reserve_iova(struct iova_domain *iovad, unsigned long pfn_lo, void copy_reserved_iova(struct iova_domain *from, struct iova_domain *to); void init_iova_domain(struct iova_domain *iovad, unsigned long granule, unsigned long start_pfn); +bool has_iova_flush_queue(struct iova_domain *iovad); int init_iova_flush_queue(struct iova_domain *iovad, iova_flush_cb flush_cb, iova_entry_dtor entry_dtor); struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn); @@ -235,6 +236,11 @@ static inline void init_iova_domain(struct iova_domain *iovad, { } +static inline bool has_iova_flush_queue(struct iova_domain *iovad) +{ + return false; +} + static inline int init_iova_flush_queue(struct iova_domain *iovad, iova_flush_cb flush_cb, iova_entry_dtor entry_dtor) diff --git a/include/linux/irqchip/arm-gic-common.h b/include/linux/irqchip/arm-gic-common.h index 626283858563..b9850f5f1906 100644 --- a/include/linux/irqchip/arm-gic-common.h +++ b/include/linux/irqchip/arm-gic-common.h @@ -36,4 +36,9 @@ struct gic_kvm_info { const struct gic_kvm_info *gic_get_kvm_info(void); +struct irq_domain; +struct fwnode_handle; +int gicv2m_init(struct fwnode_handle *parent_handle, + struct irq_domain *parent); + #endif /* __LINUX_IRQCHIP_ARM_GIC_COMMON_H */ diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h index 67c4b9806d43..5cc10cf7cb3e 100644 --- a/include/linux/irqchip/arm-gic-v3.h +++ b/include/linux/irqchip/arm-gic-v3.h @@ -30,10 +30,22 @@ #define GICD_ICFGR 0x0C00 #define GICD_IGRPMODR 0x0D00 #define GICD_NSACR 0x0E00 +#define GICD_IGROUPRnE 0x1000 +#define GICD_ISENABLERnE 0x1200 +#define GICD_ICENABLERnE 0x1400 +#define GICD_ISPENDRnE 0x1600 +#define GICD_ICPENDRnE 0x1800 +#define GICD_ISACTIVERnE 0x1A00 +#define GICD_ICACTIVERnE 0x1C00 +#define GICD_IPRIORITYRnE 0x2000 +#define GICD_ICFGRnE 0x3000 #define GICD_IROUTER 0x6000 +#define GICD_IROUTERnE 0x8000 #define GICD_IDREGS 0xFFD0 #define GICD_PIDR2 0xFFE8 +#define ESPI_BASE_INTID 4096 + /* * Those registers are actually from GICv2, but the spec demands that they * are implemented as RES0 if ARE is 1 (which we do in KVM's emulated GICv3). @@ -69,10 +81,13 @@ #define GICD_TYPER_RSS (1U << 26) #define GICD_TYPER_LPIS (1U << 17) #define GICD_TYPER_MBIS (1U << 16) +#define GICD_TYPER_ESPI (1U << 8) #define GICD_TYPER_ID_BITS(typer) ((((typer) >> 19) & 0x1f) + 1) #define GICD_TYPER_NUM_LPIS(typer) ((((typer) >> 11) & 0x1f) + 1) -#define GICD_TYPER_IRQS(typer) ((((typer) & 0x1f) + 1) * 32) +#define GICD_TYPER_SPIS(typer) ((((typer) & 0x1f) + 1) * 32) +#define GICD_TYPER_ESPIS(typer) \ + (((typer) & GICD_TYPER_ESPI) ? GICD_TYPER_SPIS((typer) >> 27) : 0) #define GICD_IROUTER_SPI_MODE_ONE (0U << 31) #define GICD_IROUTER_SPI_MODE_ANY (1U << 31) @@ -109,6 +124,18 @@ #define GICR_TYPER_CPU_NUMBER(r) (((r) >> 8) & 0xffff) +#define EPPI_BASE_INTID 1056 + +#define GICR_TYPER_NR_PPIS(r) \ + ({ \ + unsigned int __ppinum = ((r) >> 27) & 0x1f; \ + unsigned int __nr_ppis = 16; \ + if (__ppinum == 1 || __ppinum == 2) \ + __nr_ppis += __ppinum * 32; \ + \ + __nr_ppis; \ + }) + #define GICR_WAKER_ProcessorSleep (1U << 1) #define GICR_WAKER_ChildrenAsleep (1U << 2) @@ -469,6 +496,7 @@ #define ICC_CTLR_EL1_A3V_SHIFT 15 #define ICC_CTLR_EL1_A3V_MASK (0x1 << ICC_CTLR_EL1_A3V_SHIFT) #define ICC_CTLR_EL1_RSS (0x1 << 18) +#define ICC_CTLR_EL1_ExtRange (0x1 << 19) #define ICC_PMR_EL1_SHIFT 0 #define ICC_PMR_EL1_MASK (0xff << ICC_PMR_EL1_SHIFT) #define ICC_BPR0_EL1_SHIFT 0 diff --git a/include/linux/irqchip/arm-gic.h b/include/linux/irqchip/arm-gic.h index 316087da1d09..5686711b0f40 100644 --- a/include/linux/irqchip/arm-gic.h +++ b/include/linux/irqchip/arm-gic.h @@ -157,9 +157,6 @@ int gic_of_init_child(struct device *dev, struct gic_chip_data **gic, int irq); */ void gic_init(void __iomem *dist , void __iomem *cpu); -int gicv2m_init(struct fwnode_handle *parent_handle, - struct irq_domain *parent); - void gic_send_sgi(unsigned int cpu_id, unsigned int irq); int gic_get_cpu_id(unsigned int cpu); void gic_migrate_target(unsigned int new_cpu_id); diff --git a/include/linux/irqchip/irq-partition-percpu.h b/include/linux/irqchip/irq-partition-percpu.h index a783ddb58444..2f6ae7551748 100644 --- a/include/linux/irqchip/irq-partition-percpu.h +++ b/include/linux/irqchip/irq-partition-percpu.h @@ -4,6 +4,9 @@ * Author: Marc Zyngier <marc.zyngier@arm.com> */ +#ifndef __LINUX_IRQCHIP_IRQ_PARTITION_PERCPU_H +#define __LINUX_IRQCHIP_IRQ_PARTITION_PERCPU_H + #include <linux/fwnode.h> #include <linux/cpumask.h> #include <linux/irqdomain.h> @@ -46,3 +49,5 @@ struct irq_domain *partition_get_domain(struct partition_desc *dsc) return NULL; } #endif + +#endif /* __LINUX_IRQCHIP_IRQ_PARTITION_PERCPU_H */ diff --git a/include/linux/irqdomain.h b/include/linux/irqdomain.h index 07ec8b390161..583e7abd07f9 100644 --- a/include/linux/irqdomain.h +++ b/include/linux/irqdomain.h @@ -220,7 +220,7 @@ static inline struct device_node *irq_domain_get_of_node(struct irq_domain *d) #ifdef CONFIG_IRQ_DOMAIN struct fwnode_handle *__irq_domain_alloc_fwnode(unsigned int type, int id, - const char *name, void *data); + const char *name, phys_addr_t *pa); enum { IRQCHIP_FWNODE_REAL, @@ -241,9 +241,9 @@ struct fwnode_handle *irq_domain_alloc_named_id_fwnode(const char *name, int id) NULL); } -static inline struct fwnode_handle *irq_domain_alloc_fwnode(void *data) +static inline struct fwnode_handle *irq_domain_alloc_fwnode(phys_addr_t *pa) { - return __irq_domain_alloc_fwnode(IRQCHIP_FWNODE_REAL, 0, NULL, data); + return __irq_domain_alloc_fwnode(IRQCHIP_FWNODE_REAL, 0, NULL, pa); } void irq_domain_free_fwnode(struct fwnode_handle *fwnode); diff --git a/include/linux/isdn.h b/include/linux/isdn.h deleted file mode 100644 index df97c8444f5d..000000000000 --- a/include/linux/isdn.h +++ /dev/null @@ -1,473 +0,0 @@ -/* $Id: isdn.h,v 1.125.2.3 2004/02/10 01:07:14 keil Exp $ - * - * Main header for the Linux ISDN subsystem (linklevel). - * - * Copyright 1994,95,96 by Fritz Elfert (fritz@isdn4linux.de) - * Copyright 1995,96 by Thinking Objects Software GmbH Wuerzburg - * Copyright 1995,96 by Michael Hipp (Michael.Hipp@student.uni-tuebingen.de) - * - * This software may be used and distributed according to the terms - * of the GNU General Public License, incorporated herein by reference. - * - */ -#ifndef __ISDN_H__ -#define __ISDN_H__ - - -#include <linux/errno.h> -#include <linux/fs.h> -#include <linux/major.h> -#include <asm/io.h> -#include <linux/kernel.h> -#include <linux/signal.h> -#include <linux/slab.h> -#include <linux/timer.h> -#include <linux/wait.h> -#include <linux/tty.h> -#include <linux/tty_flip.h> -#include <linux/serial_reg.h> -#include <linux/fcntl.h> -#include <linux/types.h> -#include <linux/interrupt.h> -#include <linux/ip.h> -#include <linux/in.h> -#include <linux/netdevice.h> -#include <linux/etherdevice.h> -#include <linux/skbuff.h> -#include <linux/tcp.h> -#include <linux/mutex.h> -#include <uapi/linux/isdn.h> - -#define ISDN_TTY_MAJOR 43 -#define ISDN_TTYAUX_MAJOR 44 -#define ISDN_MAJOR 45 - -/* The minor-devicenumbers for Channel 0 and 1 are used as arguments for - * physical Channel-Mapping, so they MUST NOT be changed without changing - * the correspondent code in isdn.c - */ - -#define ISDN_MINOR_B 0 -#define ISDN_MINOR_BMAX (ISDN_MAX_CHANNELS-1) -#define ISDN_MINOR_CTRL 64 -#define ISDN_MINOR_CTRLMAX (64 + (ISDN_MAX_CHANNELS-1)) -#define ISDN_MINOR_PPP 128 -#define ISDN_MINOR_PPPMAX (128 + (ISDN_MAX_CHANNELS-1)) -#define ISDN_MINOR_STATUS 255 - -#ifdef CONFIG_ISDN_PPP - -#ifdef CONFIG_ISDN_PPP_VJ -# include <net/slhc_vj.h> -#endif - -#include <linux/ppp_defs.h> -#include <linux/ppp-ioctl.h> - -#include <linux/isdn_ppp.h> -#endif - -#ifdef CONFIG_ISDN_X25 -# include <linux/concap.h> -#endif - -#include <linux/isdnif.h> - -#define ISDN_DRVIOCTL_MASK 0x7f /* Mask for Device-ioctl */ - -/* Until now unused */ -#define ISDN_SERVICE_VOICE 1 -#define ISDN_SERVICE_AB 1<<1 -#define ISDN_SERVICE_X21 1<<2 -#define ISDN_SERVICE_G4 1<<3 -#define ISDN_SERVICE_BTX 1<<4 -#define ISDN_SERVICE_DFUE 1<<5 -#define ISDN_SERVICE_X25 1<<6 -#define ISDN_SERVICE_TTX 1<<7 -#define ISDN_SERVICE_MIXED 1<<8 -#define ISDN_SERVICE_FW 1<<9 -#define ISDN_SERVICE_GTEL 1<<10 -#define ISDN_SERVICE_BTXN 1<<11 -#define ISDN_SERVICE_BTEL 1<<12 - -/* Macros checking plain usage */ -#define USG_NONE(x) ((x & ISDN_USAGE_MASK)==ISDN_USAGE_NONE) -#define USG_RAW(x) ((x & ISDN_USAGE_MASK)==ISDN_USAGE_RAW) -#define USG_MODEM(x) ((x & ISDN_USAGE_MASK)==ISDN_USAGE_MODEM) -#define USG_VOICE(x) ((x & ISDN_USAGE_MASK)==ISDN_USAGE_VOICE) -#define USG_NET(x) ((x & ISDN_USAGE_MASK)==ISDN_USAGE_NET) -#define USG_FAX(x) ((x & ISDN_USAGE_MASK)==ISDN_USAGE_FAX) -#define USG_OUTGOING(x) ((x & ISDN_USAGE_OUTGOING)==ISDN_USAGE_OUTGOING) -#define USG_MODEMORVOICE(x) (((x & ISDN_USAGE_MASK)==ISDN_USAGE_MODEM) || \ - ((x & ISDN_USAGE_MASK)==ISDN_USAGE_VOICE) ) - -/* Timer-delays and scheduling-flags */ -#define ISDN_TIMER_RES 4 /* Main Timer-Resolution */ -#define ISDN_TIMER_02SEC (HZ/ISDN_TIMER_RES/5) /* Slow-Timer1 .2 sec */ -#define ISDN_TIMER_1SEC (HZ/ISDN_TIMER_RES) /* Slow-Timer2 1 sec */ -#define ISDN_TIMER_RINGING 5 /* tty RINGs = ISDN_TIMER_1SEC * this factor */ -#define ISDN_TIMER_KEEPINT 10 /* Cisco-Keepalive = ISDN_TIMER_1SEC * this factor */ -#define ISDN_TIMER_MODEMREAD 1 -#define ISDN_TIMER_MODEMPLUS 2 -#define ISDN_TIMER_MODEMRING 4 -#define ISDN_TIMER_MODEMXMIT 8 -#define ISDN_TIMER_NETDIAL 16 -#define ISDN_TIMER_NETHANGUP 32 -#define ISDN_TIMER_CARRIER 256 /* Wait for Carrier */ -#define ISDN_TIMER_FAST (ISDN_TIMER_MODEMREAD | ISDN_TIMER_MODEMPLUS | \ - ISDN_TIMER_MODEMXMIT) -#define ISDN_TIMER_SLOW (ISDN_TIMER_MODEMRING | ISDN_TIMER_NETHANGUP | \ - ISDN_TIMER_NETDIAL | ISDN_TIMER_CARRIER) - -/* Timeout-Values for isdn_net_dial() */ -#define ISDN_TIMER_DTIMEOUT10 (10*HZ/(ISDN_TIMER_02SEC*(ISDN_TIMER_RES+1))) -#define ISDN_TIMER_DTIMEOUT15 (15*HZ/(ISDN_TIMER_02SEC*(ISDN_TIMER_RES+1))) -#define ISDN_TIMER_DTIMEOUT60 (60*HZ/(ISDN_TIMER_02SEC*(ISDN_TIMER_RES+1))) - -/* GLOBAL_FLAGS */ -#define ISDN_GLOBAL_STOPPED 1 - -/*=================== Start of ip-over-ISDN stuff =========================*/ - -/* Feature- and status-flags for a net-interface */ -#define ISDN_NET_CONNECTED 0x01 /* Bound to ISDN-Channel */ -#define ISDN_NET_SECURE 0x02 /* Accept calls from phonelist only */ -#define ISDN_NET_CALLBACK 0x04 /* activate callback */ -#define ISDN_NET_CBHUP 0x08 /* hangup before callback */ -#define ISDN_NET_CBOUT 0x10 /* remote machine does callback */ - -#define ISDN_NET_MAGIC 0x49344C02 /* for paranoia-checking */ - -/* Phone-list-element */ -typedef struct { - void *next; - char num[ISDN_MSNLEN]; -} isdn_net_phone; - -/* - Principles when extending structures for generic encapsulation protocol - ("concap") support: - - Stuff which is hardware specific (here i4l-specific) goes in - the netdev -> local structure (here: isdn_net_local) - - Stuff which is encapsulation protocol specific goes in the structure - which holds the linux device structure (here: isdn_net_device) -*/ - -/* Local interface-data */ -typedef struct isdn_net_local_s { - ulong magic; - struct net_device_stats stats; /* Ethernet Statistics */ - int isdn_device; /* Index to isdn-device */ - int isdn_channel; /* Index to isdn-channel */ - int ppp_slot; /* PPPD device slot number */ - int pre_device; /* Preselected isdn-device */ - int pre_channel; /* Preselected isdn-channel */ - int exclusive; /* If non-zero idx to reserved chan.*/ - int flags; /* Connection-flags */ - int dialretry; /* Counter for Dialout-retries */ - int dialmax; /* Max. Number of Dial-retries */ - int cbdelay; /* Delay before Callback starts */ - int dtimer; /* Timeout-counter for dialing */ - char msn[ISDN_MSNLEN]; /* MSNs/EAZs for this interface */ - u_char cbhup; /* Flag: Reject Call before Callback*/ - u_char dialstate; /* State for dialing */ - u_char p_encap; /* Packet encapsulation */ - /* 0 = Ethernet over ISDN */ - /* 1 = RAW-IP */ - /* 2 = IP with type field */ - u_char l2_proto; /* Layer-2-protocol */ - /* See ISDN_PROTO_L2..-constants in */ - /* isdnif.h */ - /* 0 = X75/LAPB with I-Frames */ - /* 1 = X75/LAPB with UI-Frames */ - /* 2 = X75/LAPB with BUI-Frames */ - /* 3 = HDLC */ - u_char l3_proto; /* Layer-3-protocol */ - /* See ISDN_PROTO_L3..-constants in */ - /* isdnif.h */ - /* 0 = Transparent */ - int huptimer; /* Timeout-counter for auto-hangup */ - int charge; /* Counter for charging units */ - ulong chargetime; /* Timer for Charging info */ - int hupflags; /* Flags for charge-unit-hangup: */ - /* bit0: chargeint is invalid */ - /* bit1: Getting charge-interval */ - /* bit2: Do charge-unit-hangup */ - /* bit3: Do hangup even on incoming */ - int outgoing; /* Flag: outgoing call */ - int onhtime; /* Time to keep link up */ - int chargeint; /* Interval between charge-infos */ - int onum; /* Flag: at least 1 outgoing number */ - int cps; /* current speed of this interface */ - int transcount; /* byte-counter for cps-calculation */ - int sqfull; /* Flag: netdev-queue overloaded */ - ulong sqfull_stamp; /* Start-Time of overload */ - ulong slavedelay; /* Dynamic bundling delaytime */ - int triggercps; /* BogoCPS needed for trigger slave */ - isdn_net_phone *phone[2]; /* List of remote-phonenumbers */ - /* phone[0] = Incoming Numbers */ - /* phone[1] = Outgoing Numbers */ - isdn_net_phone *dial; /* Pointer to dialed number */ - struct net_device *master; /* Ptr to Master device for slaves */ - struct net_device *slave; /* Ptr to Slave device for masters */ - struct isdn_net_local_s *next; /* Ptr to next link in bundle */ - struct isdn_net_local_s *last; /* Ptr to last link in bundle */ - struct isdn_net_dev_s *netdev; /* Ptr to netdev */ - struct sk_buff_head super_tx_queue; /* List of supervisory frames to */ - /* be transmitted asap */ - atomic_t frame_cnt; /* number of frames currently */ - /* queued in HL driver */ - /* Ptr to orig. hard_header_cache */ - spinlock_t xmit_lock; /* used to protect the xmit path of */ - /* a particular channel (including */ - /* the frame_cnt */ - - int pppbind; /* ippp device for bindings */ - int dialtimeout; /* How long shall we try on dialing? (jiffies) */ - int dialwait; /* How long shall we wait after failed attempt? (jiffies) */ - ulong dialstarted; /* jiffies of first dialing-attempt */ - ulong dialwait_timer; /* jiffies of earliest next dialing-attempt */ - int huptimeout; /* How long will the connection be up? (seconds) */ -#ifdef CONFIG_ISDN_X25 - struct concap_device_ops *dops; /* callbacks used by encapsulator */ -#endif - /* use an own struct for that in later versions */ - ulong cisco_myseq; /* Local keepalive seq. for Cisco */ - ulong cisco_mineseen; /* returned keepalive seq. from remote */ - ulong cisco_yourseq; /* Remote keepalive seq. for Cisco */ - int cisco_keepalive_period; /* keepalive period */ - ulong cisco_last_slarp_in; /* jiffie of last keepalive packet we received */ - char cisco_line_state; /* state of line according to keepalive packets */ - char cisco_debserint; /* debugging flag of cisco hdlc with slarp */ - struct timer_list cisco_timer; - struct work_struct tqueue; -} isdn_net_local; - -/* the interface itself */ -typedef struct isdn_net_dev_s { - isdn_net_local *local; - isdn_net_local *queue; /* circular list of all bundled - channels, which are currently - online */ - spinlock_t queue_lock; /* lock to protect queue */ - void *next; /* Pointer to next isdn-interface */ - struct net_device *dev; /* interface to upper levels */ -#ifdef CONFIG_ISDN_PPP - ippp_bundle * pb; /* pointer to the common bundle structure - * with the per-bundle data */ -#endif -#ifdef CONFIG_ISDN_X25 - struct concap_proto *cprot; /* connection oriented encapsulation protocol */ -#endif - -} isdn_net_dev; - -/*===================== End of ip-over-ISDN stuff ===========================*/ - -/*======================= Start of ISDN-tty stuff ===========================*/ - -#define ISDN_ASYNC_MAGIC 0x49344C01 /* for paranoia-checking */ -#define ISDN_SERIAL_XMIT_SIZE 1024 /* Default bufsize for write */ -#define ISDN_SERIAL_XMIT_MAX 4000 /* Maximum bufsize for write */ - -#ifdef CONFIG_ISDN_AUDIO -/* For using sk_buffs with audio we need some private variables - * within each sk_buff. For this purpose, we declare a struct here, - * and put it always at the private skb->cb data array. A few macros help - * accessing the variables. - */ -typedef struct _isdn_audio_data { - unsigned short dle_count; - unsigned char lock; -} isdn_audio_data_t; - -#define ISDN_AUDIO_SKB_DLECOUNT(skb) (((isdn_audio_data_t *)&skb->cb[0])->dle_count) -#define ISDN_AUDIO_SKB_LOCK(skb) (((isdn_audio_data_t *)&skb->cb[0])->lock) -#endif - -/* Private data of AT-command-interpreter */ -typedef struct atemu { - u_char profile[ISDN_MODEM_NUMREG]; /* Modem-Regs. Profile 0 */ - u_char mdmreg[ISDN_MODEM_NUMREG]; /* Modem-Registers */ - char pmsn[ISDN_MSNLEN]; /* EAZ/MSNs Profile 0 */ - char msn[ISDN_MSNLEN]; /* EAZ/MSN */ - char plmsn[ISDN_LMSNLEN]; /* Listening MSNs Profile 0 */ - char lmsn[ISDN_LMSNLEN]; /* Listening MSNs */ - char cpn[ISDN_MSNLEN]; /* CalledPartyNumber on incoming call */ - char connmsg[ISDN_CMSGLEN]; /* CONNECT-Msg from HL-Driver */ -#ifdef CONFIG_ISDN_AUDIO - u_char vpar[10]; /* Voice-parameters */ - int lastDLE; /* Flag for voice-coding: DLE seen */ -#endif - int mdmcmdl; /* Length of Modem-Commandbuffer */ - int pluscount; /* Counter for +++ sequence */ - u_long lastplus; /* Timestamp of last + */ - int carrierwait; /* Seconds of carrier waiting */ - char mdmcmd[255]; /* Modem-Commandbuffer */ - unsigned int charge; /* Charge units of current connection */ -} atemu; - -/* Private data (similar to async_struct in <linux/serial.h>) */ -typedef struct modem_info { - int magic; - struct tty_port port; - int x_char; /* xon/xoff character */ - int mcr; /* Modem control register */ - int msr; /* Modem status register */ - int lsr; /* Line status register */ - int line; - int online; /* 1 = B-Channel is up, drop data */ - /* 2 = B-Channel is up, deliver d.*/ - int dialing; /* Dial in progress or ATA */ - int closing; - int rcvsched; /* Receive needs schedule */ - int isdn_driver; /* Index to isdn-driver */ - int isdn_channel; /* Index to isdn-channel */ - int drv_index; /* Index to dev->usage */ - int ncarrier; /* Flag: schedule NO CARRIER */ - unsigned char last_cause[8]; /* Last cause message */ - unsigned char last_num[ISDN_MSNLEN]; - /* Last phone-number */ - unsigned char last_l2; /* Last layer-2 protocol */ - unsigned char last_si; /* Last service */ - unsigned char last_lhup; /* Last hangup local? */ - unsigned char last_dir; /* Last direction (in or out) */ - struct timer_list nc_timer; /* Timer for delayed NO CARRIER */ - int send_outstanding;/* # of outstanding send-requests */ - int xmit_size; /* max. # of chars in xmit_buf */ - int xmit_count; /* # of chars in xmit_buf */ - struct sk_buff_head xmit_queue; /* transmit queue */ - atomic_t xmit_lock; /* Semaphore for isdn_tty_write */ -#ifdef CONFIG_ISDN_AUDIO - int vonline; /* Voice-channel status */ - /* Bit 0 = recording */ - /* Bit 1 = playback */ - /* Bit 2 = playback, DLE-ETX seen */ - struct sk_buff_head dtmf_queue; /* queue for dtmf results */ - void *adpcms; /* state for adpcm decompression */ - void *adpcmr; /* state for adpcm compression */ - void *dtmf_state; /* state for dtmf decoder */ - void *silence_state; /* state for silence detection */ -#endif -#ifdef CONFIG_ISDN_TTY_FAX - struct T30_s *fax; /* T30 Fax Group 3 data/interface */ - int faxonline; /* Fax-channel status */ -#endif - atemu emu; /* AT-emulator data */ - spinlock_t readlock; -} modem_info; - -#define ISDN_MODEM_WINSIZE 8 - -/* Description of one ISDN-tty */ -typedef struct _isdn_modem { - int refcount; /* Number of opens */ - struct tty_driver *tty_modem; /* tty-device */ - struct tty_struct *modem_table[ISDN_MAX_CHANNELS]; /* ?? copied from Orig */ - struct ktermios *modem_termios[ISDN_MAX_CHANNELS]; - struct ktermios *modem_termios_locked[ISDN_MAX_CHANNELS]; - modem_info info[ISDN_MAX_CHANNELS]; /* Private data */ -} isdn_modem_t; - -/*======================= End of ISDN-tty stuff ============================*/ - -/*======================== Start of V.110 stuff ============================*/ -#define V110_BUFSIZE 1024 - -typedef struct { - int nbytes; /* 1 Matrixbyte -> nbytes in stream */ - int nbits; /* Number of used bits in streambyte */ - unsigned char key; /* Bitmask in stream eg. 11 (nbits=2) */ - int decodelen; /* Amount of data in decodebuf */ - int SyncInit; /* Number of sync frames to send */ - unsigned char *OnlineFrame; /* Precalculated V110 idle frame */ - unsigned char *OfflineFrame; /* Precalculated V110 sync Frame */ - int framelen; /* Length of frames */ - int skbuser; /* Number of unacked userdata skbs */ - int skbidle; /* Number of unacked idle/sync skbs */ - int introducer; /* Local vars for decoder */ - int dbit; - unsigned char b; - int skbres; /* space to reserve in outgoing skb */ - int maxsize; /* maxbufsize of lowlevel driver */ - unsigned char *encodebuf; /* temporary buffer for encoding */ - unsigned char decodebuf[V110_BUFSIZE]; /* incomplete V110 matrices */ -} isdn_v110_stream; - -/*========================= End of V.110 stuff =============================*/ - -/*======================= Start of general stuff ===========================*/ - -typedef struct { - char *next; - char *private; -} infostruct; - -#define DRV_FLAG_RUNNING 1 -#define DRV_FLAG_REJBUS 2 -#define DRV_FLAG_LOADED 4 - -/* Description of hardware-level-driver */ -typedef struct _isdn_driver { - ulong online; /* Channel-Online flags */ - ulong flags; /* Misc driver Flags */ - int locks; /* Number of locks for this driver */ - int channels; /* Number of channels */ - wait_queue_head_t st_waitq; /* Wait-Queue for status-read's */ - int maxbufsize; /* Maximum Buffersize supported */ - unsigned long pktcount; /* Until now: unused */ - int stavail; /* Chars avail on Status-device */ - isdn_if *interface; /* Interface to driver */ - int *rcverr; /* Error-counters for B-Ch.-receive */ - int *rcvcount; /* Byte-counters for B-Ch.-receive */ -#ifdef CONFIG_ISDN_AUDIO - unsigned long DLEflag; /* Flags: Insert DLE at next read */ -#endif - struct sk_buff_head *rpqueue; /* Pointers to start of Rcv-Queue */ - wait_queue_head_t *rcv_waitq; /* Wait-Queues for B-Channel-Reads */ - wait_queue_head_t *snd_waitq; /* Wait-Queue for B-Channel-Send's */ - char msn2eaz[10][ISDN_MSNLEN]; /* Mapping-Table MSN->EAZ */ -} isdn_driver_t; - -/* Main driver-data */ -typedef struct isdn_devt { - struct module *owner; - spinlock_t lock; - unsigned short flags; /* Bitmapped Flags: */ - int drivers; /* Current number of drivers */ - int channels; /* Current number of channels */ - int net_verbose; /* Verbose-Flag */ - int modempoll; /* Flag: tty-read active */ - spinlock_t timerlock; - int tflags; /* Timer-Flags: */ - /* see ISDN_TIMER_..defines */ - int global_flags; - infostruct *infochain; /* List of open info-devs. */ - wait_queue_head_t info_waitq; /* Wait-Queue for isdninfo */ - struct timer_list timer; /* Misc.-function Timer */ - int chanmap[ISDN_MAX_CHANNELS]; /* Map minor->device-channel */ - int drvmap[ISDN_MAX_CHANNELS]; /* Map minor->driver-index */ - int usage[ISDN_MAX_CHANNELS]; /* Used by tty/ip/voice */ - char num[ISDN_MAX_CHANNELS][ISDN_MSNLEN]; - /* Remote number of active ch.*/ - int m_idx[ISDN_MAX_CHANNELS]; /* Index for mdm.... */ - isdn_driver_t *drv[ISDN_MAX_DRIVERS]; /* Array of drivers */ - isdn_net_dev *netdev; /* Linked list of net-if's */ - char drvid[ISDN_MAX_DRIVERS][20];/* Driver-ID */ - struct task_struct *profd; /* For iprofd */ - isdn_modem_t mdm; /* tty-driver-data */ - isdn_net_dev *rx_netdev[ISDN_MAX_CHANNELS]; /* rx netdev-pointers */ - isdn_net_dev *st_netdev[ISDN_MAX_CHANNELS]; /* stat netdev-pointers */ - ulong ibytes[ISDN_MAX_CHANNELS]; /* Statistics incoming bytes */ - ulong obytes[ISDN_MAX_CHANNELS]; /* Statistics outgoing bytes */ - int v110emu[ISDN_MAX_CHANNELS]; /* V.110 emulator-mode 0=none */ - atomic_t v110use[ISDN_MAX_CHANNELS]; /* Usage-Semaphore for stream */ - isdn_v110_stream *v110[ISDN_MAX_CHANNELS]; /* V.110 private data */ - struct mutex mtx; /* serialize list access*/ - unsigned long global_features; -} isdn_dev; - -extern isdn_dev *dev; - - -#endif /* __ISDN_H__ */ diff --git a/include/linux/isdn/hdlc.h b/include/linux/isdn/hdlc.h deleted file mode 100644 index fe2c1279c139..000000000000 --- a/include/linux/isdn/hdlc.h +++ /dev/null @@ -1,69 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ -/* - * hdlc.h -- General purpose ISDN HDLC decoder. - * - * Implementation of a HDLC decoder/encoder in software. - * Necessary because some ISDN devices don't have HDLC - * controllers. - * - * Copyright (C) - * 2009 Karsten Keil <keil@b1-systems.de> - * 2002 Wolfgang Mües <wolfgang@iksw-muees.de> - * 2001 Frode Isaksen <fisaksen@bewan.com> - * 2001 Kai Germaschewski <kai.germaschewski@gmx.de> - */ - -#ifndef __ISDNHDLC_H__ -#define __ISDNHDLC_H__ - -struct isdnhdlc_vars { - int bit_shift; - int hdlc_bits1; - int data_bits; - int ffbit_shift; /* encoding only */ - int state; - int dstpos; - - u16 crc; - - u8 cbin; - u8 shift_reg; - u8 ffvalue; - - /* set if transferring data */ - u32 data_received:1; - /* set if D channel (send idle instead of flags) */ - u32 dchannel:1; - /* set if 56K adaptation */ - u32 do_adapt56:1; - /* set if in closing phase (need to send CRC + flag) */ - u32 do_closing:1; - /* set if data is bitreverse */ - u32 do_bitreverse:1; -}; - -/* Feature Flags */ -#define HDLC_56KBIT 0x01 -#define HDLC_DCHANNEL 0x02 -#define HDLC_BITREVERSE 0x04 - -/* - The return value from isdnhdlc_decode is - the frame length, 0 if no complete frame was decoded, - or a negative error number -*/ -#define HDLC_FRAMING_ERROR 1 -#define HDLC_CRC_ERROR 2 -#define HDLC_LENGTH_ERROR 3 - -extern void isdnhdlc_rcv_init(struct isdnhdlc_vars *hdlc, u32 features); - -extern int isdnhdlc_decode(struct isdnhdlc_vars *hdlc, const u8 *src, - int slen, int *count, u8 *dst, int dsize); - -extern void isdnhdlc_out_init(struct isdnhdlc_vars *hdlc, u32 features); - -extern int isdnhdlc_encode(struct isdnhdlc_vars *hdlc, const u8 *src, - u16 slen, int *count, u8 *dst, int dsize); - -#endif /* __ISDNHDLC_H__ */ diff --git a/include/linux/isdn_divertif.h b/include/linux/isdn_divertif.h deleted file mode 100644 index 19ab361f9f07..000000000000 --- a/include/linux/isdn_divertif.h +++ /dev/null @@ -1,35 +0,0 @@ -/* $Id: isdn_divertif.h,v 1.4.6.1 2001/09/23 22:25:05 kai Exp $ - * - * Header for the diversion supplementary interface for i4l. - * - * Author Werner Cornelius (werner@titro.de) - * Copyright by Werner Cornelius (werner@titro.de) - * - * This software may be used and distributed according to the terms - * of the GNU General Public License, incorporated herein by reference. - * - */ -#ifndef _LINUX_ISDN_DIVERTIF_H -#define _LINUX_ISDN_DIVERTIF_H - -#include <linux/isdnif.h> -#include <linux/types.h> -#include <uapi/linux/isdn_divertif.h> - -/***************************************************************/ -/* structure exchanging data between isdn hl and divert module */ -/***************************************************************/ -typedef struct - { ulong if_magic; /* magic info and version */ - int cmd; /* command */ - int (*stat_callback)(isdn_ctrl *); /* supplied by divert module when calling */ - int (*ll_cmd)(isdn_ctrl *); /* supplied by hl on return */ - char * (*drv_to_name)(int); /* map a driver id to name, supplied by hl */ - int (*name_to_drv)(char *); /* map a driver id to name, supplied by hl */ - } isdn_divert_if; - -/*********************/ -/* function register */ -/*********************/ -extern int DIVERT_REG_NAME(isdn_divert_if *); -#endif /* _LINUX_ISDN_DIVERTIF_H */ diff --git a/include/linux/isdn_ppp.h b/include/linux/isdn_ppp.h deleted file mode 100644 index a0070c6dfaf8..000000000000 --- a/include/linux/isdn_ppp.h +++ /dev/null @@ -1,194 +0,0 @@ -/* Linux ISDN subsystem, sync PPP, interface to ipppd - * - * Copyright 1994-1999 by Fritz Elfert (fritz@isdn4linux.de) - * Copyright 1995,96 Thinking Objects Software GmbH Wuerzburg - * Copyright 1995,96 by Michael Hipp (Michael.Hipp@student.uni-tuebingen.de) - * Copyright 2000-2002 by Kai Germaschewski (kai@germaschewski.name) - * - * This software may be used and distributed according to the terms - * of the GNU General Public License, incorporated herein by reference. - * - */ -#ifndef _LINUX_ISDN_PPP_H -#define _LINUX_ISDN_PPP_H - - - - -#ifdef CONFIG_IPPP_FILTER -#include <linux/filter.h> -#endif -#include <uapi/linux/isdn_ppp.h> - -#define DECOMP_ERR_NOMEM (-10) - -#define MP_END_FRAG 0x40 -#define MP_BEGIN_FRAG 0x80 - -#define MP_MAX_QUEUE_LEN 16 - -/* - * We need a way for the decompressor to influence the generation of CCP - * Reset-Requests in a variety of ways. The decompressor is already returning - * a lot of information (generated skb length, error conditions) so we use - * another parameter. This parameter is a pointer to a structure which is - * to be marked valid by the decompressor and only in this case is ever used. - * Furthermore, the only case where this data is used is when the decom- - * pressor returns DECOMP_ERROR. - * - * We use this same struct for the reset entry of the compressor to commu- - * nicate to its caller how to deal with sending of a Reset Ack. In this - * case, expra is not used, but other options still apply (suppressing - * sending with rsend, appending arbitrary data, etc). - */ - -#define IPPP_RESET_MAXDATABYTES 32 - -struct isdn_ppp_resetparams { - unsigned char valid:1; /* rw Is this structure filled at all ? */ - unsigned char rsend:1; /* rw Should we send one at all ? */ - unsigned char idval:1; /* rw Is the id field valid ? */ - unsigned char dtval:1; /* rw Is the data field valid ? */ - unsigned char expra:1; /* rw Is an Ack expected for this Req ? */ - unsigned char id; /* wo Send CCP ResetReq with this id */ - unsigned short maxdlen; /* ro Max bytes to be stored in data field */ - unsigned short dlen; /* rw Bytes stored in data field */ - unsigned char *data; /* wo Data for ResetReq info field */ -}; - -/* - * this is an 'old friend' from ppp-comp.h under a new name - * check the original include for more information - */ -struct isdn_ppp_compressor { - struct isdn_ppp_compressor *next, *prev; - struct module *owner; - int num; /* CCP compression protocol number */ - - void *(*alloc) (struct isdn_ppp_comp_data *); - void (*free) (void *state); - int (*init) (void *state, struct isdn_ppp_comp_data *, - int unit,int debug); - - /* The reset entry needs to get more exact information about the - ResetReq or ResetAck it was called with. The parameters are - obvious. If reset is called without a Req or Ack frame which - could be handed into it, code MUST be set to 0. Using rsparm, - the reset entry can control if and how a ResetAck is returned. */ - - void (*reset) (void *state, unsigned char code, unsigned char id, - unsigned char *data, unsigned len, - struct isdn_ppp_resetparams *rsparm); - - int (*compress) (void *state, struct sk_buff *in, - struct sk_buff *skb_out, int proto); - - int (*decompress) (void *state,struct sk_buff *in, - struct sk_buff *skb_out, - struct isdn_ppp_resetparams *rsparm); - - void (*incomp) (void *state, struct sk_buff *in,int proto); - void (*stat) (void *state, struct compstat *stats); -}; - -extern int isdn_ppp_register_compressor(struct isdn_ppp_compressor *); -extern int isdn_ppp_unregister_compressor(struct isdn_ppp_compressor *); -extern int isdn_ppp_dial_slave(char *); -extern int isdn_ppp_hangup_slave(char *); - -typedef struct { - unsigned long seqerrs; - unsigned long frame_drops; - unsigned long overflows; - unsigned long max_queue_len; -} isdn_mppp_stats; - -typedef struct { - int mp_mrru; /* unused */ - struct sk_buff * frags; /* fragments sl list -- use skb->next */ - long frames; /* number of frames in the frame list */ - unsigned int seq; /* last processed packet seq #: any packets - * with smaller seq # will be dropped - * unconditionally */ - spinlock_t lock; - int ref_ct; - /* statistics */ - isdn_mppp_stats stats; -} ippp_bundle; - -#define NUM_RCV_BUFFS 64 - -struct ippp_buf_queue { - struct ippp_buf_queue *next; - struct ippp_buf_queue *last; - char *buf; /* NULL here indicates end of queue */ - int len; -}; - -/* The data structure for one CCP reset transaction */ -enum ippp_ccp_reset_states { - CCPResetIdle, - CCPResetSentReq, - CCPResetRcvdReq, - CCPResetSentAck, - CCPResetRcvdAck -}; - -struct ippp_ccp_reset_state { - enum ippp_ccp_reset_states state; /* State of this transaction */ - struct ippp_struct *is; /* Backlink to device stuff */ - unsigned char id; /* Backlink id index */ - unsigned char ta:1; /* The timer is active (flag) */ - unsigned char expra:1; /* We expect a ResetAck at all */ - int dlen; /* Databytes stored in data */ - struct timer_list timer; /* For timeouts/retries */ - /* This is a hack but seems sufficient for the moment. We do not want - to have this be yet another allocation for some bytes, it is more - memory management overhead than the whole mess is worth. */ - unsigned char data[IPPP_RESET_MAXDATABYTES]; -}; - -/* The data structure keeping track of the currently outstanding CCP Reset - transactions. */ -struct ippp_ccp_reset { - struct ippp_ccp_reset_state *rs[256]; /* One per possible id */ - unsigned char lastid; /* Last id allocated by the engine */ -}; - -struct ippp_struct { - struct ippp_struct *next_link; - int state; - spinlock_t buflock; - struct ippp_buf_queue rq[NUM_RCV_BUFFS]; /* packet queue for isdn_ppp_read() */ - struct ippp_buf_queue *first; /* pointer to (current) first packet */ - struct ippp_buf_queue *last; /* pointer to (current) last used packet in queue */ - wait_queue_head_t wq; - struct task_struct *tk; - unsigned int mpppcfg; - unsigned int pppcfg; - unsigned int mru; - unsigned int mpmru; - unsigned int mpmtu; - unsigned int maxcid; - struct isdn_net_local_s *lp; - int unit; - int minor; - unsigned int last_link_seqno; - long mp_seqno; -#ifdef CONFIG_ISDN_PPP_VJ - unsigned char *cbuf; - struct slcompress *slcomp; -#endif -#ifdef CONFIG_IPPP_FILTER - struct bpf_prog *pass_filter; /* filter for packets to pass */ - struct bpf_prog *active_filter; /* filter for pkts to reset idle */ -#endif - unsigned long debug; - struct isdn_ppp_compressor *compressor,*decompressor; - struct isdn_ppp_compressor *link_compressor,*link_decompressor; - void *decomp_stat,*comp_stat,*link_decomp_stat,*link_comp_stat; - struct ippp_ccp_reset *reset; /* Allocated on demand, may never be needed */ - unsigned long compflags; -}; - -#endif /* _LINUX_ISDN_PPP_H */ diff --git a/include/linux/isdnif.h b/include/linux/isdnif.h deleted file mode 100644 index 8d80fdc68647..000000000000 --- a/include/linux/isdnif.h +++ /dev/null @@ -1,505 +0,0 @@ -/* $Id: isdnif.h,v 1.43.2.2 2004/01/12 23:08:35 keil Exp $ - * - * Linux ISDN subsystem - * Definition of the interface between the subsystem and its low-level drivers. - * - * Copyright 1994,95,96 by Fritz Elfert (fritz@isdn4linux.de) - * Copyright 1995,96 Thinking Objects Software GmbH Wuerzburg - * - * This software may be used and distributed according to the terms - * of the GNU General Public License, incorporated herein by reference. - * - */ -#ifndef __ISDNIF_H__ -#define __ISDNIF_H__ - - -#include <linux/skbuff.h> -#include <uapi/linux/isdnif.h> - -/***************************************************************************/ -/* Extensions made by Werner Cornelius (werner@ikt.de) */ -/* */ -/* The proceed command holds a incoming call in a state to leave processes */ -/* enough time to check whether ist should be accepted. */ -/* The PROT_IO Command extends the interface to make protocol dependent */ -/* features available (call diversion, call waiting...). */ -/* */ -/* The PROT_IO Command is executed with the desired driver id and the arg */ -/* parameter coded as follows: */ -/* The lower 8 bits of arg contain the desired protocol from ISDN_PTYPE */ -/* definitions. The upper 24 bits represent the protocol specific cmd/stat.*/ -/* Any additional data is protocol and command specific. */ -/* This mechanism also applies to the statcallb callback STAT_PROT. */ -/* */ -/* This suggested extension permits an easy expansion of protocol specific */ -/* handling. Extensions may be added at any time without changing the HL */ -/* driver code and not getting conflicts without certifications. */ -/* The well known CAPI 2.0 interface handles such extensions in a similar */ -/* way. Perhaps a protocol specific module may be added and separately */ -/* loaded and linked to the basic isdn module for handling. */ -/***************************************************************************/ - -/*****************/ -/* DSS1 commands */ -/*****************/ -#define DSS1_CMD_INVOKE ((0x00 << 8) | ISDN_PTYPE_EURO) /* invoke a supplementary service */ -#define DSS1_CMD_INVOKE_ABORT ((0x01 << 8) | ISDN_PTYPE_EURO) /* abort a invoke cmd */ - -/*******************************/ -/* DSS1 Status callback values */ -/*******************************/ -#define DSS1_STAT_INVOKE_RES ((0x80 << 8) | ISDN_PTYPE_EURO) /* Result for invocation */ -#define DSS1_STAT_INVOKE_ERR ((0x81 << 8) | ISDN_PTYPE_EURO) /* Error Return for invocation */ -#define DSS1_STAT_INVOKE_BRD ((0x82 << 8) | ISDN_PTYPE_EURO) /* Deliver invoke broadcast info */ - - -/*********************************************************************/ -/* structures for DSS1 commands and callback */ -/* */ -/* An action is invoked by sending a DSS1_CMD_INVOKE. The ll_id, proc*/ -/* timeout, datalen and data fields must be set before calling. */ -/* */ -/* The return value is a positive hl_id value also delivered in the */ -/* hl_id field. A value of zero signals no more left hl_id capacitys.*/ -/* A negative return value signals errors in LL. So if the return */ -/* value is <= 0 no action in LL will be taken -> request ignored */ -/* */ -/* The timeout field must be filled with a positive value specifying */ -/* the amount of time the INVOKED process waits for a reaction from */ -/* the network. */ -/* If a response (either error or result) is received during this */ -/* intervall, a reporting callback is initiated and the process will */ -/* be deleted, the hl identifier will be freed. */ -/* If no response is received during the specified intervall, a error*/ -/* callback is initiated with timeout set to -1 and a datalen set */ -/* to 0. */ -/* If timeout is set to a value <= 0 during INVOCATION the process is*/ -/* immediately deleted after sending the data. No callback occurs ! */ -/* */ -/* A currently waiting process may be aborted with INVOKE_ABORT. No */ -/* callback will occur when a process has been aborted. */ -/* */ -/* Broadcast invoke frames from the network are reported via the */ -/* STAT_INVOKE_BRD callback. The ll_id is set to 0, the other fields */ -/* are supplied by the network and not by the HL. */ -/*********************************************************************/ - -/*****************/ -/* NI1 commands */ -/*****************/ -#define NI1_CMD_INVOKE ((0x00 << 8) | ISDN_PTYPE_NI1) /* invoke a supplementary service */ -#define NI1_CMD_INVOKE_ABORT ((0x01 << 8) | ISDN_PTYPE_NI1) /* abort a invoke cmd */ - -/*******************************/ -/* NI1 Status callback values */ -/*******************************/ -#define NI1_STAT_INVOKE_RES ((0x80 << 8) | ISDN_PTYPE_NI1) /* Result for invocation */ -#define NI1_STAT_INVOKE_ERR ((0x81 << 8) | ISDN_PTYPE_NI1) /* Error Return for invocation */ -#define NI1_STAT_INVOKE_BRD ((0x82 << 8) | ISDN_PTYPE_NI1) /* Deliver invoke broadcast info */ - -typedef struct - { ulong ll_id; /* ID supplied by LL when executing */ - /* a command and returned by HL for */ - /* INVOKE_RES and INVOKE_ERR */ - int hl_id; /* ID supplied by HL when called */ - /* for executing a cmd and delivered */ - /* for results and errors */ - /* must be supplied by LL when aborting*/ - int proc; /* invoke procedure used by CMD_INVOKE */ - /* returned by callback and broadcast */ - int timeout; /* timeout for INVOKE CMD in ms */ - /* -1 in stat callback when timed out */ - /* error value when error callback */ - int datalen; /* length of cmd or stat data */ - u_char *data;/* pointer to data delivered or send */ - } isdn_cmd_stat; - -/* - * Commands from linklevel to lowlevel - * - */ -#define ISDN_CMD_IOCTL 0 /* Perform ioctl */ -#define ISDN_CMD_DIAL 1 /* Dial out */ -#define ISDN_CMD_ACCEPTD 2 /* Accept an incoming call on D-Chan. */ -#define ISDN_CMD_ACCEPTB 3 /* Request B-Channel connect. */ -#define ISDN_CMD_HANGUP 4 /* Hangup */ -#define ISDN_CMD_CLREAZ 5 /* Clear EAZ(s) of channel */ -#define ISDN_CMD_SETEAZ 6 /* Set EAZ(s) of channel */ -#define ISDN_CMD_GETEAZ 7 /* Get EAZ(s) of channel */ -#define ISDN_CMD_SETSIL 8 /* Set Service-Indicator-List of channel */ -#define ISDN_CMD_GETSIL 9 /* Get Service-Indicator-List of channel */ -#define ISDN_CMD_SETL2 10 /* Set B-Chan. Layer2-Parameter */ -#define ISDN_CMD_GETL2 11 /* Get B-Chan. Layer2-Parameter */ -#define ISDN_CMD_SETL3 12 /* Set B-Chan. Layer3-Parameter */ -#define ISDN_CMD_GETL3 13 /* Get B-Chan. Layer3-Parameter */ -// #define ISDN_CMD_LOCK 14 /* Signal usage by upper levels */ -// #define ISDN_CMD_UNLOCK 15 /* Release usage-lock */ -#define ISDN_CMD_SUSPEND 16 /* Suspend connection */ -#define ISDN_CMD_RESUME 17 /* Resume connection */ -#define ISDN_CMD_PROCEED 18 /* Proceed with call establishment */ -#define ISDN_CMD_ALERT 19 /* Alert after Proceeding */ -#define ISDN_CMD_REDIR 20 /* Redir a incoming call */ -#define ISDN_CMD_PROT_IO 21 /* Protocol specific commands */ -#define CAPI_PUT_MESSAGE 22 /* CAPI message send down or up */ -#define ISDN_CMD_FAXCMD 23 /* FAX commands to HL-driver */ -#define ISDN_CMD_AUDIO 24 /* DSP, DTMF, ... settings */ - -/* - * Status-Values delivered from lowlevel to linklevel via - * statcallb(). - * - */ -#define ISDN_STAT_STAVAIL 256 /* Raw status-data available */ -#define ISDN_STAT_ICALL 257 /* Incoming call detected */ -#define ISDN_STAT_RUN 258 /* Signal protocol-code is running */ -#define ISDN_STAT_STOP 259 /* Signal halt of protocol-code */ -#define ISDN_STAT_DCONN 260 /* Signal D-Channel connect */ -#define ISDN_STAT_BCONN 261 /* Signal B-Channel connect */ -#define ISDN_STAT_DHUP 262 /* Signal D-Channel disconnect */ -#define ISDN_STAT_BHUP 263 /* Signal B-Channel disconnect */ -#define ISDN_STAT_CINF 264 /* Charge-Info */ -#define ISDN_STAT_LOAD 265 /* Signal new lowlevel-driver is loaded */ -#define ISDN_STAT_UNLOAD 266 /* Signal unload of lowlevel-driver */ -#define ISDN_STAT_BSENT 267 /* Signal packet sent */ -#define ISDN_STAT_NODCH 268 /* Signal no D-Channel */ -#define ISDN_STAT_ADDCH 269 /* Add more Channels */ -#define ISDN_STAT_CAUSE 270 /* Cause-Message */ -#define ISDN_STAT_ICALLW 271 /* Incoming call without B-chan waiting */ -#define ISDN_STAT_REDIR 272 /* Redir result */ -#define ISDN_STAT_PROT 273 /* protocol IO specific callback */ -#define ISDN_STAT_DISPLAY 274 /* deliver a received display message */ -#define ISDN_STAT_L1ERR 275 /* Signal Layer-1 Error */ -#define ISDN_STAT_FAXIND 276 /* FAX indications from HL-driver */ -#define ISDN_STAT_AUDIO 277 /* DTMF, DSP indications */ -#define ISDN_STAT_DISCH 278 /* Disable/Enable channel usage */ - -/* - * Audio commands - */ -#define ISDN_AUDIO_SETDD 0 /* Set DTMF detection */ -#define ISDN_AUDIO_DTMF 1 /* Rx/Tx DTMF */ - -/* - * Values for errcode field - */ -#define ISDN_STAT_L1ERR_SEND 1 -#define ISDN_STAT_L1ERR_RECV 2 - -/* - * Values for feature-field of interface-struct. - */ -/* Layer 2 */ -#define ISDN_FEATURE_L2_X75I (0x0001 << ISDN_PROTO_L2_X75I) -#define ISDN_FEATURE_L2_X75UI (0x0001 << ISDN_PROTO_L2_X75UI) -#define ISDN_FEATURE_L2_X75BUI (0x0001 << ISDN_PROTO_L2_X75BUI) -#define ISDN_FEATURE_L2_HDLC (0x0001 << ISDN_PROTO_L2_HDLC) -#define ISDN_FEATURE_L2_TRANS (0x0001 << ISDN_PROTO_L2_TRANS) -#define ISDN_FEATURE_L2_X25DTE (0x0001 << ISDN_PROTO_L2_X25DTE) -#define ISDN_FEATURE_L2_X25DCE (0x0001 << ISDN_PROTO_L2_X25DCE) -#define ISDN_FEATURE_L2_V11096 (0x0001 << ISDN_PROTO_L2_V11096) -#define ISDN_FEATURE_L2_V11019 (0x0001 << ISDN_PROTO_L2_V11019) -#define ISDN_FEATURE_L2_V11038 (0x0001 << ISDN_PROTO_L2_V11038) -#define ISDN_FEATURE_L2_MODEM (0x0001 << ISDN_PROTO_L2_MODEM) -#define ISDN_FEATURE_L2_FAX (0x0001 << ISDN_PROTO_L2_FAX) -#define ISDN_FEATURE_L2_HDLC_56K (0x0001 << ISDN_PROTO_L2_HDLC_56K) - -#define ISDN_FEATURE_L2_MASK (0x0FFFF) /* Max. 16 protocols */ -#define ISDN_FEATURE_L2_SHIFT (0) - -/* Layer 3 */ -#define ISDN_FEATURE_L3_TRANS (0x10000 << ISDN_PROTO_L3_TRANS) -#define ISDN_FEATURE_L3_TRANSDSP (0x10000 << ISDN_PROTO_L3_TRANSDSP) -#define ISDN_FEATURE_L3_FCLASS2 (0x10000 << ISDN_PROTO_L3_FCLASS2) -#define ISDN_FEATURE_L3_FCLASS1 (0x10000 << ISDN_PROTO_L3_FCLASS1) - -#define ISDN_FEATURE_L3_MASK (0x0FF0000) /* Max. 8 Protocols */ -#define ISDN_FEATURE_L3_SHIFT (16) - -/* Signaling */ -#define ISDN_FEATURE_P_UNKNOWN (0x1000000 << ISDN_PTYPE_UNKNOWN) -#define ISDN_FEATURE_P_1TR6 (0x1000000 << ISDN_PTYPE_1TR6) -#define ISDN_FEATURE_P_EURO (0x1000000 << ISDN_PTYPE_EURO) -#define ISDN_FEATURE_P_NI1 (0x1000000 << ISDN_PTYPE_NI1) - -#define ISDN_FEATURE_P_MASK (0x0FF000000) /* Max. 8 Protocols */ -#define ISDN_FEATURE_P_SHIFT (24) - -typedef struct setup_parm { - unsigned char phone[32]; /* Remote Phone-Number */ - unsigned char eazmsn[32]; /* Local EAZ or MSN */ - unsigned char si1; /* Service Indicator 1 */ - unsigned char si2; /* Service Indicator 2 */ - unsigned char plan; /* Numbering plan */ - unsigned char screen; /* Screening info */ -} setup_parm; - - -#ifdef CONFIG_ISDN_TTY_FAX -/* T.30 Fax G3 */ - -#define FAXIDLEN 21 - -typedef struct T30_s { - /* session parameters */ - __u8 resolution; - __u8 rate; - __u8 width; - __u8 length; - __u8 compression; - __u8 ecm; - __u8 binary; - __u8 scantime; - __u8 id[FAXIDLEN]; - /* additional parameters */ - __u8 phase; - __u8 direction; - __u8 code; - __u8 badlin; - __u8 badmul; - __u8 bor; - __u8 fet; - __u8 pollid[FAXIDLEN]; - __u8 cq; - __u8 cr; - __u8 ctcrty; - __u8 minsp; - __u8 phcto; - __u8 rel; - __u8 nbc; - /* remote station parameters */ - __u8 r_resolution; - __u8 r_rate; - __u8 r_width; - __u8 r_length; - __u8 r_compression; - __u8 r_ecm; - __u8 r_binary; - __u8 r_scantime; - __u8 r_id[FAXIDLEN]; - __u8 r_code; -} __packed T30_s; - -#define ISDN_TTY_FAX_CONN_IN 0 -#define ISDN_TTY_FAX_CONN_OUT 1 - -#define ISDN_TTY_FAX_FCON 0 -#define ISDN_TTY_FAX_DIS 1 -#define ISDN_TTY_FAX_FTT 2 -#define ISDN_TTY_FAX_MCF 3 -#define ISDN_TTY_FAX_DCS 4 -#define ISDN_TTY_FAX_TRAIN_OK 5 -#define ISDN_TTY_FAX_EOP 6 -#define ISDN_TTY_FAX_EOM 7 -#define ISDN_TTY_FAX_MPS 8 -#define ISDN_TTY_FAX_DTC 9 -#define ISDN_TTY_FAX_RID 10 -#define ISDN_TTY_FAX_HNG 11 -#define ISDN_TTY_FAX_DT 12 -#define ISDN_TTY_FAX_FCON_I 13 -#define ISDN_TTY_FAX_DR 14 -#define ISDN_TTY_FAX_ET 15 -#define ISDN_TTY_FAX_CFR 16 -#define ISDN_TTY_FAX_PTS 17 -#define ISDN_TTY_FAX_SENT 18 - -#define ISDN_FAX_PHASE_IDLE 0 -#define ISDN_FAX_PHASE_A 1 -#define ISDN_FAX_PHASE_B 2 -#define ISDN_FAX_PHASE_C 3 -#define ISDN_FAX_PHASE_D 4 -#define ISDN_FAX_PHASE_E 5 - -#endif /* TTY_FAX */ - -#define ISDN_FAX_CLASS1_FAE 0 -#define ISDN_FAX_CLASS1_FTS 1 -#define ISDN_FAX_CLASS1_FRS 2 -#define ISDN_FAX_CLASS1_FTM 3 -#define ISDN_FAX_CLASS1_FRM 4 -#define ISDN_FAX_CLASS1_FTH 5 -#define ISDN_FAX_CLASS1_FRH 6 -#define ISDN_FAX_CLASS1_CTRL 7 - -#define ISDN_FAX_CLASS1_OK 0 -#define ISDN_FAX_CLASS1_CONNECT 1 -#define ISDN_FAX_CLASS1_NOCARR 2 -#define ISDN_FAX_CLASS1_ERROR 3 -#define ISDN_FAX_CLASS1_FCERROR 4 -#define ISDN_FAX_CLASS1_QUERY 5 - -typedef struct { - __u8 cmd; - __u8 subcmd; - __u8 para[50]; -} aux_s; - -#define AT_COMMAND 0 -#define AT_EQ_VALUE 1 -#define AT_QUERY 2 -#define AT_EQ_QUERY 3 - -/* CAPI structs */ - -/* this is compatible to the old union size */ -#define MAX_CAPI_PARA_LEN 50 - -typedef struct { - /* Header */ - __u16 Length; - __u16 ApplId; - __u8 Command; - __u8 Subcommand; - __u16 Messagenumber; - - /* Parameter */ - union { - __u32 Controller; - __u32 PLCI; - __u32 NCCI; - } adr; - __u8 para[MAX_CAPI_PARA_LEN]; -} capi_msg; - -/* - * Structure for exchanging above infos - * - */ -typedef struct { - int driver; /* Lowlevel-Driver-ID */ - int command; /* Command or Status (see above) */ - ulong arg; /* Additional Data */ - union { - ulong errcode; /* Type of error with STAT_L1ERR */ - int length; /* Amount of bytes sent with STAT_BSENT */ - u_char num[50]; /* Additional Data */ - setup_parm setup;/* For SETUP msg */ - capi_msg cmsg; /* For CAPI like messages */ - char display[85];/* display message data */ - isdn_cmd_stat isdn_io; /* ISDN IO-parameter/result */ - aux_s aux; /* for modem commands/indications */ -#ifdef CONFIG_ISDN_TTY_FAX - T30_s *fax; /* Pointer to ttys fax struct */ -#endif - ulong userdata; /* User Data */ - } parm; -} isdn_ctrl; - -#define dss1_io isdn_io -#define ni1_io isdn_io - -/* - * The interface-struct itself (initialized at load-time of lowlevel-driver) - * - * See Documentation/isdn/INTERFACE for a description, how the communication - * between the ISDN subsystem and its drivers is done. - * - */ -typedef struct { - struct module *owner; - - /* Number of channels supported by this driver - */ - int channels; - - /* - * Maximum Size of transmit/receive-buffer this driver supports. - */ - int maxbufsize; - - /* Feature-Flags for this driver. - * See defines ISDN_FEATURE_... for Values - */ - unsigned long features; - - /* - * Needed for calculating - * dev->hard_header_len = linklayer header + hl_hdrlen; - * Drivers, not supporting sk_buff's should set this to 0. - */ - unsigned short hl_hdrlen; - - /* - * Receive-Callback using sk_buff's - * Parameters: - * int Driver-ID - * int local channel-number (0 ...) - * struct sk_buff *skb received Data - */ - void (*rcvcallb_skb)(int, int, struct sk_buff *); - - /* Status-Callback - * Parameters: - * isdn_ctrl* - * driver = Driver ID. - * command = One of above ISDN_STAT_... constants. - * arg = depending on status-type. - * num = depending on status-type. - */ - int (*statcallb)(isdn_ctrl*); - - /* Send command - * Parameters: - * isdn_ctrl* - * driver = Driver ID. - * command = One of above ISDN_CMD_... constants. - * arg = depending on command. - * num = depending on command. - */ - int (*command)(isdn_ctrl*); - - /* - * Send data using sk_buff's - * Parameters: - * int driverId - * int local channel-number (0...) - * int Flag: Need ACK for this packet. - * struct sk_buff *skb Data to send - */ - int (*writebuf_skb) (int, int, int, struct sk_buff *); - - /* Send raw D-Channel-Commands - * Parameters: - * u_char pointer data - * int length of data - * int driverId - * int local channel-number (0 ...) - */ - int (*writecmd)(const u_char __user *, int, int, int); - - /* Read raw Status replies - * u_char pointer data (volatile) - * int length of buffer - * int driverId - * int local channel-number (0 ...) - */ - int (*readstat)(u_char __user *, int, int, int); - - char id[20]; -} isdn_if; - -/* - * Function which must be called by lowlevel-driver at loadtime with - * the following fields of above struct set: - * - * channels Number of channels that will be supported. - * hl_hdrlen Space to preserve in sk_buff's when sending. Drivers, not - * supporting sk_buff's should set this to 0. - * command Address of Command-Handler. - * features Bitwise coded Features of this driver. (use ISDN_FEATURE_...) - * writebuf_skb Address of Skbuff-Send-Handler. - * writecmd " " D-Channel " which accepts raw D-Ch-Commands. - * readstat " " D-Channel " which delivers raw Status-Data. - * - * The linklevel-driver fills the following fields: - * - * channels Driver-ID assigned to this driver. (Must be used on all - * subsequent callbacks. - * rcvcallb_skb Address of handler for received Skbuff's. - * statcallb " " " for status-changes. - * - */ -extern int register_isdn(isdn_if*); -#include <linux/uaccess.h> - -#endif /* __ISDNIF_H__ */ diff --git a/include/linux/iversion.h b/include/linux/iversion.h index be50ef7cedab..2917ef990d43 100644 --- a/include/linux/iversion.h +++ b/include/linux/iversion.h @@ -113,6 +113,30 @@ inode_peek_iversion_raw(const struct inode *inode) } /** + * inode_set_max_iversion_raw - update i_version new value is larger + * @inode: inode to set + * @val: new i_version to set + * + * Some self-managed filesystems (e.g Ceph) will only update the i_version + * value if the new value is larger than the one we already have. + */ +static inline void +inode_set_max_iversion_raw(struct inode *inode, u64 val) +{ + u64 cur, old; + + cur = inode_peek_iversion_raw(inode); + for (;;) { + if (cur > val) + break; + old = atomic64_cmpxchg(&inode->i_version, cur, val); + if (likely(old == cur)) + break; + cur = old; + } +} + +/** * inode_set_iversion - set i_version to a particular value * @inode: inode to set * @val: new i_version value to set diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h index 5c04181b7c6d..603fbc4e2f70 100644 --- a/include/linux/jbd2.h +++ b/include/linux/jbd2.h @@ -451,6 +451,22 @@ struct jbd2_inode { * @i_flags: Flags of inode [j_list_lock] */ unsigned long i_flags; + + /** + * @i_dirty_start: + * + * Offset in bytes where the dirty range for this inode starts. + * [j_list_lock] + */ + loff_t i_dirty_start; + + /** + * @i_dirty_end: + * + * Inclusive offset in bytes where the dirty range for this inode + * ends. [j_list_lock] + */ + loff_t i_dirty_end; }; struct jbd2_revoke_table_s; @@ -1357,7 +1373,6 @@ void jbd2_journal_set_triggers(struct buffer_head *, struct jbd2_buffer_trigger_type *type); extern int jbd2_journal_dirty_metadata (handle_t *, struct buffer_head *); extern int jbd2_journal_forget (handle_t *, struct buffer_head *); -extern void journal_sync_buffer (struct buffer_head *); extern int jbd2_journal_invalidatepage(journal_t *, struct page *, unsigned int, unsigned int); extern int jbd2_journal_try_to_free_buffers(journal_t *, struct page *, gfp_t); @@ -1395,8 +1410,12 @@ extern int jbd2_journal_clear_err (journal_t *); extern int jbd2_journal_bmap(journal_t *, unsigned long, unsigned long long *); extern int jbd2_journal_force_commit(journal_t *); extern int jbd2_journal_force_commit_nested(journal_t *); -extern int jbd2_journal_inode_add_write(handle_t *handle, struct jbd2_inode *inode); -extern int jbd2_journal_inode_add_wait(handle_t *handle, struct jbd2_inode *inode); +extern int jbd2_journal_inode_ranged_write(handle_t *handle, + struct jbd2_inode *inode, loff_t start_byte, + loff_t length); +extern int jbd2_journal_inode_ranged_wait(handle_t *handle, + struct jbd2_inode *inode, loff_t start_byte, + loff_t length); extern int jbd2_journal_begin_ordered_truncate(journal_t *journal, struct jbd2_inode *inode, loff_t new_size); extern void jbd2_journal_init_jbd_inode(struct jbd2_inode *jinode, struct inode *inode); diff --git a/include/linux/jhash.h b/include/linux/jhash.h index 8037850f3104..ba2f6a9776b6 100644 --- a/include/linux/jhash.h +++ b/include/linux/jhash.h @@ -17,7 +17,7 @@ * if SELF_TEST is defined. You can use this free for any purpose. It's in * the public domain. It has no warranty. * - * Copyright (C) 2009-2010 Jozsef Kadlecsik (kadlec@blackhole.kfki.hu) + * Copyright (C) 2009-2010 Jozsef Kadlecsik (kadlec@netfilter.org) * * I've modified Bob's hash to be useful in the Linux kernel, and * any bugs present are my fault. diff --git a/include/linux/jump_label.h b/include/linux/jump_label.h index 3e113a1fa0f1..3526c0aee954 100644 --- a/include/linux/jump_label.h +++ b/include/linux/jump_label.h @@ -215,6 +215,9 @@ extern void arch_jump_label_transform(struct jump_entry *entry, enum jump_label_type type); extern void arch_jump_label_transform_static(struct jump_entry *entry, enum jump_label_type type); +extern bool arch_jump_label_transform_queue(struct jump_entry *entry, + enum jump_label_type type); +extern void arch_jump_label_transform_apply(void); extern int jump_label_text_reserved(void *start, void *end); extern void static_key_slow_inc(struct static_key *key); extern void static_key_slow_dec(struct static_key *key); diff --git a/include/linux/jump_label_ratelimit.h b/include/linux/jump_label_ratelimit.h index 42710d5949ba..8c3ee291b2d8 100644 --- a/include/linux/jump_label_ratelimit.h +++ b/include/linux/jump_label_ratelimit.h @@ -60,8 +60,6 @@ extern void jump_label_update_timeout(struct work_struct *work); 0), \ } -#define static_branch_deferred_inc(x) static_branch_inc(&(x)->key) - #else /* !CONFIG_JUMP_LABEL */ struct static_key_deferred { struct static_key key; @@ -95,4 +93,7 @@ jump_label_rate_limit(struct static_key_deferred *key, STATIC_KEY_CHECK_USE(key); } #endif /* CONFIG_JUMP_LABEL */ + +#define static_branch_deferred_inc(x) static_branch_inc(&(x)->key) + #endif /* _LINUX_JUMP_LABEL_RATELIMIT_H */ diff --git a/include/linux/kasan-checks.h b/include/linux/kasan-checks.h index a61dc075e2ce..ac6aba632f2d 100644 --- a/include/linux/kasan-checks.h +++ b/include/linux/kasan-checks.h @@ -2,14 +2,43 @@ #ifndef _LINUX_KASAN_CHECKS_H #define _LINUX_KASAN_CHECKS_H -#if defined(__SANITIZE_ADDRESS__) || defined(__KASAN_INTERNAL) -void kasan_check_read(const volatile void *p, unsigned int size); -void kasan_check_write(const volatile void *p, unsigned int size); +#include <linux/types.h> + +/* + * __kasan_check_*: Always available when KASAN is enabled. This may be used + * even in compilation units that selectively disable KASAN, but must use KASAN + * to validate access to an address. Never use these in header files! + */ +#ifdef CONFIG_KASAN +bool __kasan_check_read(const volatile void *p, unsigned int size); +bool __kasan_check_write(const volatile void *p, unsigned int size); +#else +static inline bool __kasan_check_read(const volatile void *p, unsigned int size) +{ + return true; +} +static inline bool __kasan_check_write(const volatile void *p, unsigned int size) +{ + return true; +} +#endif + +/* + * kasan_check_*: Only available when the particular compilation unit has KASAN + * instrumentation enabled. May be used in header files. + */ +#ifdef __SANITIZE_ADDRESS__ +#define kasan_check_read __kasan_check_read +#define kasan_check_write __kasan_check_write #else -static inline void kasan_check_read(const volatile void *p, unsigned int size) -{ } -static inline void kasan_check_write(const volatile void *p, unsigned int size) -{ } +static inline bool kasan_check_read(const volatile void *p, unsigned int size) +{ + return true; +} +static inline bool kasan_check_write(const volatile void *p, unsigned int size) +{ + return true; +} #endif #endif diff --git a/include/linux/kasan.h b/include/linux/kasan.h index b40ea104dd36..cc8a03cc9674 100644 --- a/include/linux/kasan.h +++ b/include/linux/kasan.h @@ -76,8 +76,11 @@ void kasan_free_shadow(const struct vm_struct *vm); int kasan_add_zero_shadow(void *start, unsigned long size); void kasan_remove_zero_shadow(void *start, unsigned long size); -size_t ksize(const void *); -static inline void kasan_unpoison_slab(const void *ptr) { ksize(ptr); } +size_t __ksize(const void *); +static inline void kasan_unpoison_slab(const void *ptr) +{ + kasan_unpoison_shadow(ptr, __ksize(ptr)); +} size_t kasan_metadata_size(struct kmem_cache *cache); bool kasan_save_enable_multi_shot(void); diff --git a/include/linux/kernel.h b/include/linux/kernel.h index 0c9bc231107f..d83d403dac2e 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h @@ -88,6 +88,8 @@ */ #define FIELD_SIZEOF(t, f) (sizeof(((t*)0)->f)) +#define typeof_member(T, m) typeof(((T*)0)->m) + #define DIV_ROUND_UP __KERNEL_DIV_ROUND_UP #define DIV_ROUND_DOWN_ULL(ll, d) \ @@ -215,7 +217,9 @@ extern void __cant_sleep(const char *file, int line, int preempt_offset); * might_sleep - annotation for functions that can sleep * * this macro will print a stack trace if it is executed in an atomic - * context (spinlock, irq-handler, ...). + * context (spinlock, irq-handler, ...). Additional sections where blocking is + * not allowed can be annotated with non_block_start() and non_block_end() + * pairs. * * This is a useful debugging help to be able to catch problems early and not * be bitten later when the calling function happens to sleep when it is not @@ -231,6 +235,23 @@ extern void __cant_sleep(const char *file, int line, int preempt_offset); # define cant_sleep() \ do { __cant_sleep(__FILE__, __LINE__, 0); } while (0) # define sched_annotate_sleep() (current->task_state_change = 0) +/** + * non_block_start - annotate the start of section where sleeping is prohibited + * + * This is on behalf of the oom reaper, specifically when it is calling the mmu + * notifiers. The problem is that if the notifier were to block on, for example, + * mutex_lock() and if the process which holds that mutex were to perform a + * sleeping memory allocation, the oom reaper is now blocked on completion of + * that memory allocation. Other blocking calls like wait_event() pose similar + * issues. + */ +# define non_block_start() (current->non_block_count++) +/** + * non_block_end - annotate the end of section where sleeping is prohibited + * + * Closes a section opened by non_block_start(). + */ +# define non_block_end() WARN_ON(current->non_block_count-- == 0) #else static inline void ___might_sleep(const char *file, int line, int preempt_offset) { } @@ -239,6 +260,8 @@ extern void __cant_sleep(const char *file, int line, int preempt_offset); # define might_sleep() do { might_resched(); } while (0) # define cant_sleep() do { } while (0) # define sched_annotate_sleep() do { } while (0) +# define non_block_start() do { } while (0) +# define non_block_end() do { } while (0) #endif #define might_sleep_if(cond) do { if (cond) might_sleep(); } while (0) diff --git a/include/linux/kexec.h b/include/linux/kexec.h index b9b1bc5f9669..1776eb2e43a4 100644 --- a/include/linux/kexec.h +++ b/include/linux/kexec.h @@ -125,7 +125,7 @@ typedef void *(kexec_load_t)(struct kimage *image, char *kernel_buf, unsigned long cmdline_len); typedef int (kexec_cleanup_t)(void *loader_data); -#ifdef CONFIG_KEXEC_VERIFY_SIG +#ifdef CONFIG_KEXEC_SIG typedef int (kexec_verify_sig_t)(const char *kernel_buf, unsigned long kernel_len); #endif @@ -134,7 +134,7 @@ struct kexec_file_ops { kexec_probe_t *probe; kexec_load_t *load; kexec_cleanup_t *cleanup; -#ifdef CONFIG_KEXEC_VERIFY_SIG +#ifdef CONFIG_KEXEC_SIG kexec_verify_sig_t *verify_sig; #endif }; @@ -183,6 +183,8 @@ int kexec_purgatory_get_set_symbol(struct kimage *image, const char *name, bool get_value); void *kexec_purgatory_get_symbol_addr(struct kimage *image, const char *name); +int __weak arch_kexec_kernel_image_probe(struct kimage *image, void *buf, + unsigned long buf_len); void * __weak arch_kexec_kernel_image_load(struct kimage *image); int __weak arch_kexec_apply_relocations_add(struct purgatory_info *pi, Elf_Shdr *section, @@ -216,6 +218,29 @@ extern int crash_prepare_elf64_headers(struct crash_mem *mem, int kernel_map, void **addr, unsigned long *sz); #endif /* CONFIG_KEXEC_FILE */ +#ifdef CONFIG_KEXEC_ELF +struct kexec_elf_info { + /* + * Where the ELF binary contents are kept. + * Memory managed by the user of the struct. + */ + const char *buffer; + + const struct elfhdr *ehdr; + const struct elf_phdr *proghdrs; +}; + +int kexec_build_elf_info(const char *buf, size_t len, struct elfhdr *ehdr, + struct kexec_elf_info *elf_info); + +int kexec_elf_load(struct kimage *image, struct elfhdr *ehdr, + struct kexec_elf_info *elf_info, + struct kexec_buf *kbuf, + unsigned long *lowest_load_addr); + +void kexec_free_elf_info(struct kexec_elf_info *elf_info); +int kexec_elf_probe(const char *buf, unsigned long len); +#endif struct kimage { kimage_entry_t head; kimage_entry_t *entry; diff --git a/include/linux/key-type.h b/include/linux/key-type.h index 331cab70db09..4ded94bcf274 100644 --- a/include/linux/key-type.h +++ b/include/linux/key-type.h @@ -70,6 +70,9 @@ struct key_type { */ size_t def_datalen; + unsigned int flags; +#define KEY_TYPE_NET_DOMAIN 0x00000001 /* Keys of this type have a net namespace domain */ + /* vet a description */ int (*vet_description)(const char *description); diff --git a/include/linux/key.h b/include/linux/key.h index 1c8b88b455ef..6cf8e71cf8b7 100644 --- a/include/linux/key.h +++ b/include/linux/key.h @@ -31,6 +31,7 @@ typedef int32_t key_serial_t; typedef uint32_t key_perm_t; struct key; +struct net; #ifdef CONFIG_KEYS @@ -77,13 +78,34 @@ struct cred; struct key_type; struct key_owner; +struct key_tag; struct keyring_list; struct keyring_name; +struct key_tag { + struct rcu_head rcu; + refcount_t usage; + bool removed; /* T when subject removed */ +}; + struct keyring_index_key { + /* [!] If this structure is altered, the union in struct key must change too! */ + unsigned long hash; /* Hash value */ + union { + struct { +#ifdef __LITTLE_ENDIAN /* Put desc_len at the LSB of x */ + u16 desc_len; + char desc[sizeof(long) - 2]; /* First few chars of description */ +#else + char desc[sizeof(long) - 2]; /* First few chars of description */ + u16 desc_len; +#endif + }; + unsigned long x; + }; struct key_type *type; + struct key_tag *domain_tag; /* Domain of operation */ const char *description; - size_t desc_len; }; union key_payload { @@ -197,7 +219,10 @@ struct key { union { struct keyring_index_key index_key; struct { + unsigned long hash; + unsigned long len_desc; struct key_type *type; /* type of key */ + struct key_tag *domain_tag; /* Domain of operation */ char *description; }; }; @@ -248,6 +273,8 @@ extern struct key *key_alloc(struct key_type *type, extern void key_revoke(struct key *key); extern void key_invalidate(struct key *key); extern void key_put(struct key *key); +extern bool key_put_tag(struct key_tag *tag); +extern void key_remove_domain(struct key_tag *domain_tag); static inline struct key *__key_get(struct key *key) { @@ -265,26 +292,68 @@ static inline void key_ref_put(key_ref_t key_ref) key_put(key_ref_to_ptr(key_ref)); } -extern struct key *request_key(struct key_type *type, - const char *description, - const char *callout_info); +extern struct key *request_key_tag(struct key_type *type, + const char *description, + struct key_tag *domain_tag, + const char *callout_info); + +extern struct key *request_key_rcu(struct key_type *type, + const char *description, + struct key_tag *domain_tag); extern struct key *request_key_with_auxdata(struct key_type *type, const char *description, + struct key_tag *domain_tag, const void *callout_info, size_t callout_len, void *aux); -extern struct key *request_key_async(struct key_type *type, - const char *description, - const void *callout_info, - size_t callout_len); +/** + * request_key - Request a key and wait for construction + * @type: Type of key. + * @description: The searchable description of the key. + * @callout_info: The data to pass to the instantiation upcall (or NULL). + * + * As for request_key_tag(), but with the default global domain tag. + */ +static inline struct key *request_key(struct key_type *type, + const char *description, + const char *callout_info) +{ + return request_key_tag(type, description, NULL, callout_info); +} -extern struct key *request_key_async_with_auxdata(struct key_type *type, - const char *description, - const void *callout_info, - size_t callout_len, - void *aux); +#ifdef CONFIG_NET +/** + * request_key_net - Request a key for a net namespace and wait for construction + * @type: Type of key. + * @description: The searchable description of the key. + * @net: The network namespace that is the key's domain of operation. + * @callout_info: The data to pass to the instantiation upcall (or NULL). + * + * As for request_key() except that it does not add the returned key to a + * keyring if found, new keys are always allocated in the user's quota, the + * callout_info must be a NUL-terminated string and no auxiliary data can be + * passed. Only keys that operate the specified network namespace are used. + * + * Furthermore, it then works as wait_for_key_construction() to wait for the + * completion of keys undergoing construction with a non-interruptible wait. + */ +#define request_key_net(type, description, net, callout_info) \ + request_key_tag(type, description, net->key_domain, callout_info); + +/** + * request_key_net_rcu - Request a key for a net namespace under RCU conditions + * @type: Type of key. + * @description: The searchable description of the key. + * @net: The network namespace that is the key's domain of operation. + * + * As for request_key_rcu() except that only keys that operate the specified + * network namespace are used. + */ +#define request_key_net_rcu(type, description, net) \ + request_key_rcu(type, description, net->key_domain); +#endif /* CONFIG_NET */ extern int wait_for_key_construction(struct key *key, bool intr); @@ -305,6 +374,11 @@ extern int key_update(key_ref_t key, extern int key_link(struct key *keyring, struct key *key); +extern int key_move(struct key *key, + struct key *from_keyring, + struct key *to_keyring, + unsigned int flags); + extern int key_unlink(struct key *keyring, struct key *key); @@ -324,7 +398,8 @@ extern int keyring_clear(struct key *keyring); extern key_ref_t keyring_search(key_ref_t keyring, struct key_type *type, - const char *description); + const char *description, + bool recurse); extern int keyring_add_key(struct key *keyring, struct key *key); @@ -343,6 +418,7 @@ extern void key_set_timeout(struct key *, unsigned); extern key_ref_t lookup_user_key(key_serial_t id, unsigned long flags, key_perm_t perm); +extern void key_free_user_ns(struct user_namespace *); /* * The permissions required on a key that we're looking up. @@ -397,8 +473,8 @@ extern struct ctl_table key_sysctls[]; * the userspace interface */ extern int install_thread_keyring_to_cred(struct cred *cred); -extern void key_fsuid_changed(struct task_struct *tsk); -extern void key_fsgid_changed(struct task_struct *tsk); +extern void key_fsuid_changed(struct cred *new_cred); +extern void key_fsgid_changed(struct cred *new_cred); extern void key_init(void); #else /* CONFIG_KEYS */ @@ -413,9 +489,11 @@ extern void key_init(void); #define make_key_ref(k, p) NULL #define key_ref_to_ptr(k) NULL #define is_key_possessed(k) 0 -#define key_fsuid_changed(t) do { } while(0) -#define key_fsgid_changed(t) do { } while(0) +#define key_fsuid_changed(c) do { } while(0) +#define key_fsgid_changed(c) do { } while(0) #define key_init() do { } while(0) +#define key_free_user_ns(ns) do { } while(0) +#define key_remove_domain(d) do { } while(0) #endif /* CONFIG_KEYS */ #endif /* __KERNEL__ */ diff --git a/include/linux/kgdb.h b/include/linux/kgdb.h index fbf144aaa749..b072aeb1fd78 100644 --- a/include/linux/kgdb.h +++ b/include/linux/kgdb.h @@ -326,8 +326,10 @@ extern atomic_t kgdb_active; (raw_smp_processor_id() == atomic_read(&kgdb_active)) extern bool dbg_is_early; extern void __init dbg_late_init(void); +extern void kgdb_panic(const char *msg); #else /* ! CONFIG_KGDB */ #define in_dbg_master() (0) #define dbg_late_init() +static inline void kgdb_panic(const char *msg) {} #endif /* ! CONFIG_KGDB */ #endif /* _KGDB_H_ */ diff --git a/include/linux/khugepaged.h b/include/linux/khugepaged.h index 082d1d2a5216..bc45ea1efbf7 100644 --- a/include/linux/khugepaged.h +++ b/include/linux/khugepaged.h @@ -15,6 +15,14 @@ extern int __khugepaged_enter(struct mm_struct *mm); extern void __khugepaged_exit(struct mm_struct *mm); extern int khugepaged_enter_vma_merge(struct vm_area_struct *vma, unsigned long vm_flags); +#ifdef CONFIG_SHMEM +extern void collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr); +#else +static inline void collapse_pte_mapped_thp(struct mm_struct *mm, + unsigned long addr) +{ +} +#endif #define khugepaged_enabled() \ (transparent_hugepage_flags & \ @@ -73,6 +81,10 @@ static inline int khugepaged_enter_vma_merge(struct vm_area_struct *vma, { return 0; } +static inline void collapse_pte_mapped_thp(struct mm_struct *mm, + unsigned long addr) +{ +} #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ #endif /* _LINUX_KHUGEPAGED_H */ diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h index 443d9800ca3f..04bdaf01112c 100644 --- a/include/linux/kprobes.h +++ b/include/linux/kprobes.h @@ -458,4 +458,23 @@ static inline bool is_kprobe_optinsn_slot(unsigned long addr) } #endif +/* Returns true if kprobes handled the fault */ +static nokprobe_inline bool kprobe_page_fault(struct pt_regs *regs, + unsigned int trap) +{ + if (!kprobes_built_in()) + return false; + if (user_mode(regs)) + return false; + /* + * To be potentially processing a kprobe fault and to be allowed + * to call kprobe_running(), we have to be non-preemptible. + */ + if (preemptible()) + return false; + if (!kprobe_running()) + return false; + return kprobe_fault_handler(regs, trap); +} + #endif /* _LINUX_KPROBES_H */ diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index d1ad38a3f048..719fc3e15ea4 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -159,7 +159,7 @@ static inline bool is_error_page(struct page *page) extern struct kmem_cache *kvm_vcpu_cache; -extern spinlock_t kvm_lock; +extern struct mutex kvm_lock; extern struct list_head vm_list; struct kvm_io_range { @@ -318,6 +318,7 @@ struct kvm_vcpu { } spin_loop; #endif bool preempted; + bool ready; struct kvm_vcpu_arch arch; struct dentry *debugfs_dentry; }; @@ -860,17 +861,19 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu); void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu); void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu); -bool kvm_arch_has_vcpu_debugfs(void); -int kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu); +#ifdef __KVM_HAVE_ARCH_VCPU_DEBUGFS +void kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu); +#endif int kvm_arch_hardware_enable(void); void kvm_arch_hardware_disable(void); int kvm_arch_hardware_setup(void); void kvm_arch_hardware_unsetup(void); -void kvm_arch_check_processor_compat(void *rtn); +int kvm_arch_check_processor_compat(void); int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu); bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu); int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu); +bool kvm_arch_dy_runnable(struct kvm_vcpu *vcpu); #ifndef __KVM_HAVE_ARCH_VM_ALLOC /* @@ -990,6 +993,7 @@ void kvm_unregister_irq_ack_notifier(struct kvm *kvm, struct kvm_irq_ack_notifier *kian); int kvm_request_irq_source_id(struct kvm *kvm); void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id); +bool kvm_arch_irqfd_allowed(struct kvm *kvm, struct kvm_irqfd *args); /* * search_memslots() and __gfn_to_memslot() are here because they are @@ -1086,6 +1090,7 @@ enum kvm_stat_kind { struct kvm_stat_data { int offset; + int mode; struct kvm *kvm; }; @@ -1093,6 +1098,7 @@ struct kvm_stats_debugfs_item { const char *name; int offset; enum kvm_stat_kind kind; + int mode; }; extern struct kvm_stats_debugfs_item debugfs_entries[]; extern struct dentry *kvm_debugfs_dir; diff --git a/include/linux/lcd.h b/include/linux/lcd.h index 851eee8fff25..238fb1dfed98 100644 --- a/include/linux/lcd.h +++ b/include/linux/lcd.h @@ -41,16 +41,6 @@ struct lcd_ops { /* Get the LCD panel power status (0: full on, 1..3: controller power on, flat panel power off, 4: full off), see FB_BLANK_XXX */ int (*get_power)(struct lcd_device *); - /* - * Enable or disable power to the LCD(0: on; 4: off, see FB_BLANK_XXX) - * and this callback would be called proir to fb driver's callback. - * - * P.S. note that if early_set_power is not NULL then early fb notifier - * would be registered. - */ - int (*early_set_power)(struct lcd_device *, int power); - /* revert the effects of the early blank event. */ - int (*r_early_set_power)(struct lcd_device *, int power); /* Enable or disable power to the LCD (0: on; 4: off, see FB_BLANK_XXX) */ int (*set_power)(struct lcd_device *, int power); /* Get the current contrast setting (0-max_contrast) */ diff --git a/include/linux/led-class-flash.h b/include/linux/led-class-flash.h index f52713f0a269..1e824963af17 100644 --- a/include/linux/led-class-flash.h +++ b/include/linux/led-class-flash.h @@ -86,15 +86,20 @@ static inline struct led_classdev_flash *lcdev_to_flcdev( } /** - * led_classdev_flash_register - register a new object of led_classdev class - * with support for flash LEDs - * @parent: the flash LED to register + * led_classdev_flash_register_ext - register a new object of LED class with + * init data and with support for flash LEDs + * @parent: LED flash controller device this flash LED is driven by * @fled_cdev: the led_classdev_flash structure for this device + * @init_data: the LED class flash device initialization data * * Returns: 0 on success or negative error value on failure */ -extern int led_classdev_flash_register(struct device *parent, - struct led_classdev_flash *fled_cdev); +extern int led_classdev_flash_register_ext(struct device *parent, + struct led_classdev_flash *fled_cdev, + struct led_init_data *init_data); + +#define led_classdev_flash_register(parent, fled_cdev) \ + led_classdev_flash_register_ext(parent, fled_cdev, NULL) /** * led_classdev_flash_unregister - unregisters an object of led_classdev class diff --git a/include/linux/leds-ti-lmu-common.h b/include/linux/leds-ti-lmu-common.h new file mode 100644 index 000000000000..5eb111f38803 --- /dev/null +++ b/include/linux/leds-ti-lmu-common.h @@ -0,0 +1,47 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +// TI LMU Common Core +// Copyright (C) 2018 Texas Instruments Incorporated - http://www.ti.com/ + +#ifndef _TI_LMU_COMMON_H_ +#define _TI_LMU_COMMON_H_ + +#include <linux/delay.h> +#include <linux/device.h> +#include <linux/init.h> +#include <linux/leds.h> +#include <linux/module.h> +#include <linux/regmap.h> +#include <linux/slab.h> +#include <uapi/linux/uleds.h> + +#define LMU_11BIT_LSB_MASK (BIT(0) | BIT(1) | BIT(2)) +#define LMU_11BIT_MSB_SHIFT 3 + +#define MAX_BRIGHTNESS_8BIT 255 +#define MAX_BRIGHTNESS_11BIT 2047 + +struct ti_lmu_bank { + struct regmap *regmap; + + int max_brightness; + + u8 lsb_brightness_reg; + u8 msb_brightness_reg; + + u8 runtime_ramp_reg; + u32 ramp_up_usec; + u32 ramp_down_usec; +}; + +int ti_lmu_common_set_brightness(struct ti_lmu_bank *lmu_bank, int brightness); + +int ti_lmu_common_set_ramp(struct ti_lmu_bank *lmu_bank); + +int ti_lmu_common_get_ramp_params(struct device *dev, + struct fwnode_handle *child, + struct ti_lmu_bank *lmu_data); + +int ti_lmu_common_get_brt_res(struct device *dev, struct fwnode_handle *child, + struct ti_lmu_bank *lmu_data); + +#endif /* _TI_LMU_COMMON_H_ */ diff --git a/include/linux/leds.h b/include/linux/leds.h index 9b2bf574a17a..efb309dba914 100644 --- a/include/linux/leds.h +++ b/include/linux/leds.h @@ -8,6 +8,7 @@ #ifndef __LINUX_LEDS_H_INCLUDED #define __LINUX_LEDS_H_INCLUDED +#include <dt-bindings/leds/common.h> #include <linux/device.h> #include <linux/kernfs.h> #include <linux/list.h> @@ -30,6 +31,30 @@ enum led_brightness { LED_FULL = 255, }; +struct led_init_data { + /* device fwnode handle */ + struct fwnode_handle *fwnode; + /* + * default <color:function> tuple, for backward compatibility + * with in-driver hard-coded LED names used as a fallback when + * DT "label" property is absent; it should be set to NULL + * in new LED class drivers. + */ + const char *default_label; + /* + * string to be used for devicename section of LED class device + * either for label based LED name composition path or for fwnode + * based when devname_mandatory is true + */ + const char *devicename; + /* + * indicates if LED name should always comprise devicename section; + * only LEDs exposed by drivers of hot-pluggable devices should + * set it to true + */ + bool devname_mandatory; +}; + struct led_classdev { const char *name; enum led_brightness brightness; @@ -125,16 +150,46 @@ struct led_classdev { struct mutex led_access; }; -extern int of_led_classdev_register(struct device *parent, - struct device_node *np, - struct led_classdev *led_cdev); -#define led_classdev_register(parent, led_cdev) \ - of_led_classdev_register(parent, NULL, led_cdev) -extern int devm_of_led_classdev_register(struct device *parent, - struct device_node *np, - struct led_classdev *led_cdev); -#define devm_led_classdev_register(parent, led_cdev) \ - devm_of_led_classdev_register(parent, NULL, led_cdev) +/** + * led_classdev_register_ext - register a new object of LED class with + * init data + * @parent: LED controller device this LED is driven by + * @led_cdev: the led_classdev structure for this device + * @init_data: the LED class device initialization data + * + * Register a new object of LED class, with name derived from init_data. + * + * Returns: 0 on success or negative error value on failure + */ +extern int led_classdev_register_ext(struct device *parent, + struct led_classdev *led_cdev, + struct led_init_data *init_data); + +/** + * led_classdev_register - register a new object of LED class + * @parent: LED controller device this LED is driven by + * @led_cdev: the led_classdev structure for this device + * + * Register a new object of LED class, with name derived from the name property + * of passed led_cdev argument. + * + * Returns: 0 on success or negative error value on failure + */ +static inline int led_classdev_register(struct device *parent, + struct led_classdev *led_cdev) +{ + return led_classdev_register_ext(parent, led_cdev, NULL); +} + +extern int devm_led_classdev_register_ext(struct device *parent, + struct led_classdev *led_cdev, + struct led_init_data *init_data); + +static inline int devm_led_classdev_register(struct device *parent, + struct led_classdev *led_cdev) +{ + return devm_led_classdev_register_ext(parent, led_cdev, NULL); +} extern void led_classdev_unregister(struct led_classdev *led_cdev); extern void devm_led_classdev_unregister(struct device *parent, struct led_classdev *led_cdev); @@ -192,7 +247,7 @@ extern void led_set_brightness(struct led_classdev *led_cdev, /** * led_set_brightness_sync - set LED brightness synchronously * @led_cdev: the LED to set - * @brightness: the brightness to set it to + * @value: the brightness to set it to * * Set an LED's brightness immediately. This function will block * the caller for the time required for accessing device registers, @@ -244,6 +299,21 @@ extern void led_sysfs_disable(struct led_classdev *led_cdev); extern void led_sysfs_enable(struct led_classdev *led_cdev); /** + * led_compose_name - compose LED class device name + * @dev: LED controller device object + * @init_data: the LED class device initialization data + * @led_classdev_name: composed LED class device name + * + * Create LED class device name basing on the provided init_data argument. + * The name can have <devicename:color:function> or <color:function>. + * form, depending on the init_data configuration. + * + * Returns: 0 on success or negative error value on failure + */ +extern int led_compose_name(struct device *dev, struct led_init_data *init_data, + char *led_classdev_name); + +/** * led_sysfs_is_disabled - check if LED sysfs interface is disabled * @led_cdev: the LED to query * @@ -420,6 +490,15 @@ struct led_platform_data { struct led_info *leds; }; +struct led_properties { + u32 color; + bool color_present; + const char *function; + u32 func_enum; + bool func_enum_present; + const char *label; +}; + struct gpio_desc; typedef int (*gpio_blink_set_t)(struct gpio_desc *desc, int state, unsigned long *delay_on, diff --git a/include/linux/libnvdimm.h b/include/linux/libnvdimm.h index 03d5c3aece9d..b6eddf912568 100644 --- a/include/linux/libnvdimm.h +++ b/include/linux/libnvdimm.h @@ -11,6 +11,7 @@ #include <linux/types.h> #include <linux/uuid.h> #include <linux/spinlock.h> +#include <linux/bio.h> struct badrange_entry { u64 start; @@ -57,6 +58,9 @@ enum { */ ND_REGION_PERSIST_MEMCTRL = 2, + /* Platform provides asynchronous flush mechanism */ + ND_REGION_ASYNC = 3, + /* mark newly adjusted resources as requiring a label update */ DPA_RESOURCE_ADJUSTED = 1 << 0, }; @@ -113,6 +117,7 @@ struct nd_mapping_desc { int position; }; +struct nd_region; struct nd_region_desc { struct resource *res; struct nd_mapping_desc *mapping; @@ -125,6 +130,7 @@ struct nd_region_desc { int target_node; unsigned long flags; struct device_node *of_node; + int (*flush)(struct nd_region *nd_region, struct bio *bio); }; struct device; @@ -154,8 +160,11 @@ static inline struct nd_blk_region_desc *to_blk_region_desc( } -enum nvdimm_security_state { - NVDIMM_SECURITY_ERROR = -1, +/* + * Note that separate bits for locked + unlocked are defined so that + * 'flags == 0' corresponds to an error / not-supported state. + */ +enum nvdimm_security_bits { NVDIMM_SECURITY_DISABLED, NVDIMM_SECURITY_UNLOCKED, NVDIMM_SECURITY_LOCKED, @@ -176,7 +185,7 @@ enum nvdimm_passphrase_type { }; struct nvdimm_security_ops { - enum nvdimm_security_state (*state)(struct nvdimm *nvdimm, + unsigned long (*get_flags)(struct nvdimm *nvdimm, enum nvdimm_passphrase_type pass_type); int (*freeze)(struct nvdimm *nvdimm); int (*change_key)(struct nvdimm *nvdimm, @@ -252,10 +261,12 @@ unsigned long nd_blk_memremap_flags(struct nd_blk_region *ndbr); unsigned int nd_region_acquire_lane(struct nd_region *nd_region); void nd_region_release_lane(struct nd_region *nd_region, unsigned int lane); u64 nd_fletcher64(void *addr, size_t len, bool le); -void nvdimm_flush(struct nd_region *nd_region); +int nvdimm_flush(struct nd_region *nd_region, struct bio *bio); +int generic_nvdimm_flush(struct nd_region *nd_region); int nvdimm_has_flush(struct nd_region *nd_region); int nvdimm_has_cache(struct nd_region *nd_region); int nvdimm_in_overwrite(struct nvdimm *nvdimm); +bool is_nvdimm_sync(struct nd_region *nd_region); static inline int nvdimm_ctl(struct nvdimm *nvdimm, unsigned int cmd, void *buf, unsigned int buf_len, int *cmd_rc) diff --git a/include/linux/lightnvm.h b/include/linux/lightnvm.h index 4d0d5655c7b2..ee8ec2e68055 100644 --- a/include/linux/lightnvm.h +++ b/include/linux/lightnvm.h @@ -88,8 +88,7 @@ typedef int (nvm_op_bb_tbl_fn)(struct nvm_dev *, struct ppa_addr, u8 *); typedef int (nvm_op_set_bb_fn)(struct nvm_dev *, struct ppa_addr *, int, int); typedef int (nvm_get_chk_meta_fn)(struct nvm_dev *, sector_t, int, struct nvm_chk_meta *); -typedef int (nvm_submit_io_fn)(struct nvm_dev *, struct nvm_rq *); -typedef int (nvm_submit_io_sync_fn)(struct nvm_dev *, struct nvm_rq *); +typedef int (nvm_submit_io_fn)(struct nvm_dev *, struct nvm_rq *, void *); typedef void *(nvm_create_dma_pool_fn)(struct nvm_dev *, char *, int); typedef void (nvm_destroy_dma_pool_fn)(void *); typedef void *(nvm_dev_dma_alloc_fn)(struct nvm_dev *, void *, gfp_t, @@ -104,7 +103,6 @@ struct nvm_dev_ops { nvm_get_chk_meta_fn *get_chk_meta; nvm_submit_io_fn *submit_io; - nvm_submit_io_sync_fn *submit_io_sync; nvm_create_dma_pool_fn *create_dma_pool; nvm_destroy_dma_pool_fn *destroy_dma_pool; @@ -682,8 +680,8 @@ extern int nvm_get_chunk_meta(struct nvm_tgt_dev *, struct ppa_addr, int, struct nvm_chk_meta *); extern int nvm_set_chunk_meta(struct nvm_tgt_dev *, struct ppa_addr *, int, int); -extern int nvm_submit_io(struct nvm_tgt_dev *, struct nvm_rq *); -extern int nvm_submit_io_sync(struct nvm_tgt_dev *, struct nvm_rq *); +extern int nvm_submit_io(struct nvm_tgt_dev *, struct nvm_rq *, void *); +extern int nvm_submit_io_sync(struct nvm_tgt_dev *, struct nvm_rq *, void *); extern void nvm_end_io(struct nvm_rq *); #else /* CONFIG_NVM */ diff --git a/include/linux/list.h b/include/linux/list.h index e951228db4b2..85c92555e31f 100644 --- a/include/linux/list.h +++ b/include/linux/list.h @@ -106,6 +106,20 @@ static inline void __list_del(struct list_head * prev, struct list_head * next) WRITE_ONCE(prev->next, next); } +/* + * Delete a list entry and clear the 'prev' pointer. + * + * This is a special-purpose list clearing method used in the networking code + * for lists allocated as per-cpu, where we don't want to incur the extra + * WRITE_ONCE() overhead of a regular list_del_init(). The code that uses this + * needs to check the node 'prev' pointer instead of calling list_empty(). + */ +static inline void __list_del_clearprev(struct list_head *entry) +{ + __list_del(entry->prev, entry->next); + entry->prev = NULL; +} + /** * list_del - deletes entry from list. * @entry: the element to delete from the list. diff --git a/include/linux/livepatch.h b/include/linux/livepatch.h index eeba421cc671..273400814020 100644 --- a/include/linux/livepatch.h +++ b/include/linux/livepatch.h @@ -35,7 +35,6 @@ * @stack_node: list node for klp_ops func_stack list * @old_size: size of the old function * @new_size: size of the new function - * @kobj_added: @kobj has been added and needs freeing * @nop: temporary patch to use the original code again; dyn. allocated * @patched: the func has been added to the klp_ops list * @transition: the func is currently being applied or reverted @@ -113,7 +112,6 @@ struct klp_callbacks { * @node: list node for klp_patch obj_list * @mod: kernel module associated with the patched object * (NULL for vmlinux) - * @kobj_added: @kobj has been added and needs freeing * @dynamic: temporary object for nop functions; dynamically allocated * @patched: the object's funcs have been added to the klp_ops list */ @@ -140,7 +138,6 @@ struct klp_object { * @list: list node for global list of actively used patches * @kobj: kobject for sysfs resources * @obj_list: dynamic list of the object entries - * @kobj_added: @kobj has been added and needs freeing * @enabled: the patch is enabled (but operation may be incomplete) * @forced: was involved in a forced transition * @free_work: patch cleanup from workqueue-context diff --git a/include/linux/lockd/lockd.h b/include/linux/lockd/lockd.h index c9b422dde542..d294dde9e546 100644 --- a/include/linux/lockd/lockd.h +++ b/include/linux/lockd/lockd.h @@ -282,6 +282,7 @@ void nlmsvc_traverse_blocks(struct nlm_host *, struct nlm_file *, nlm_host_match_fn_t match); void nlmsvc_grant_reply(struct nlm_cookie *, __be32); void nlmsvc_release_call(struct nlm_rqst *); +void nlmsvc_locks_init_private(struct file_lock *, struct nlm_host *, pid_t); /* * File handling for the server personality @@ -289,6 +290,7 @@ void nlmsvc_release_call(struct nlm_rqst *); __be32 nlm_lookup_file(struct svc_rqst *, struct nlm_file **, struct nfs_fh *); void nlm_release_file(struct nlm_file *); +void nlmsvc_release_lockowner(struct nlm_lock *); void nlmsvc_mark_resources(struct net *); void nlmsvc_free_host_resources(struct nlm_host *); void nlmsvc_invalidate_all(void); diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h index 6e2377e6c1d6..b8a835fd611b 100644 --- a/include/linux/lockdep.h +++ b/include/linux/lockdep.h @@ -5,7 +5,7 @@ * Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra * - * see Documentation/locking/lockdep-design.txt for more details. + * see Documentation/locking/lockdep-design.rst for more details. */ #ifndef __LINUX_LOCKDEP_H #define __LINUX_LOCKDEP_H @@ -66,10 +66,7 @@ struct lock_class_key { extern struct lock_class_key __lockdep_no_validate__; -struct lock_trace { - unsigned int nr_entries; - unsigned int offset; -}; +struct lock_trace; #define LOCKSTAT_POINTS 4 @@ -97,7 +94,7 @@ struct lock_class { */ struct list_head locks_after, locks_before; - struct lockdep_subclass_key *key; + const struct lockdep_subclass_key *key; unsigned int subclass; unsigned int dep_gen_id; @@ -105,7 +102,7 @@ struct lock_class { * IRQ/softirq usage tracking bits: */ unsigned long usage_mask; - struct lock_trace usage_traces[XXX_LOCK_USAGE_STATES]; + const struct lock_trace *usage_traces[XXX_LOCK_USAGE_STATES]; /* * Generation counter, when doing certain classes of graph walking, @@ -193,7 +190,7 @@ struct lock_list { struct list_head entry; struct lock_class *class; struct lock_class *links_to; - struct lock_trace trace; + const struct lock_trace *trace; int distance; /* @@ -203,11 +200,17 @@ struct lock_list { struct lock_list *parent; }; -/* - * We record lock dependency chains, so that we can cache them: +/** + * struct lock_chain - lock dependency chain record + * + * @irq_context: the same as irq_context in held_lock below + * @depth: the number of held locks in this chain + * @base: the index in chain_hlocks for this chain + * @entry: the collided lock chains in lock_chain hash list + * @chain_key: the hash key of this lock_chain */ struct lock_chain { - /* see BUILD_BUG_ON()s in lookup_chain_cache() */ + /* see BUILD_BUG_ON()s in add_chain_cache() */ unsigned int irq_context : 2, depth : 6, base : 24; @@ -217,12 +220,8 @@ struct lock_chain { }; #define MAX_LOCKDEP_KEYS_BITS 13 -/* - * Subtract one because we offset hlock->class_idx by 1 in order - * to make 0 mean no class. This avoids overflowing the class_idx - * bitfield and hitting the BUG in hlock_class(). - */ -#define MAX_LOCKDEP_KEYS ((1UL << MAX_LOCKDEP_KEYS_BITS) - 1) +#define MAX_LOCKDEP_KEYS (1UL << MAX_LOCKDEP_KEYS_BITS) +#define INITIAL_CHAIN_KEY -1 struct held_lock { /* @@ -247,6 +246,11 @@ struct held_lock { u64 waittime_stamp; u64 holdtime_stamp; #endif + /* + * class_idx is zero-indexed; it points to the element in + * lock_classes this held lock instance belongs to. class_idx is in + * the range from 0 to (MAX_LOCKDEP_KEYS-1) inclusive. + */ unsigned int class_idx:MAX_LOCKDEP_KEYS_BITS; /* * The lock-stack is unified in that the lock chains of interrupt @@ -281,6 +285,8 @@ extern void lockdep_free_key_range(void *start, unsigned long size); extern asmlinkage void lockdep_sys_exit(void); extern void lockdep_set_selftest_task(struct task_struct *task); +extern void lockdep_init_task(struct task_struct *task); + extern void lockdep_off(void); extern void lockdep_on(void); @@ -385,7 +391,7 @@ extern void lock_unpin_lock(struct lockdep_map *lock, struct pin_cookie); WARN_ON(debug_locks && !lockdep_is_held(l)); \ } while (0) -#define lockdep_assert_held_exclusive(l) do { \ +#define lockdep_assert_held_write(l) do { \ WARN_ON(debug_locks && !lockdep_is_held_type(l, 0)); \ } while (0) @@ -405,6 +411,10 @@ extern void lock_unpin_lock(struct lockdep_map *lock, struct pin_cookie); #else /* !CONFIG_LOCKDEP */ +static inline void lockdep_init_task(struct task_struct *task) +{ +} + static inline void lockdep_off(void) { } @@ -466,7 +476,7 @@ struct lockdep_map { }; #define lockdep_is_held_type(l, r) (1) #define lockdep_assert_held(l) do { (void)(l); } while (0) -#define lockdep_assert_held_exclusive(l) do { (void)(l); } while (0) +#define lockdep_assert_held_write(l) do { (void)(l); } while (0) #define lockdep_assert_held_read(l) do { (void)(l); } while (0) #define lockdep_assert_held_once(l) do { (void)(l); } while (0) @@ -497,7 +507,6 @@ enum xhlock_context_t { { .name = (_name), .key = (void *)(_key), } static inline void lockdep_invariant_state(bool force) {} -static inline void lockdep_init_task(struct task_struct *task) {} static inline void lockdep_free_task(struct task_struct *task) {} #ifdef CONFIG_LOCK_STAT @@ -632,11 +641,18 @@ do { \ "IRQs not disabled as expected\n"); \ } while (0) +#define lockdep_assert_in_irq() do { \ + WARN_ONCE(debug_locks && !current->lockdep_recursion && \ + !current->hardirq_context, \ + "Not in hardirq as expected\n"); \ + } while (0) + #else # define might_lock(lock) do { } while (0) # define might_lock_read(lock) do { } while (0) # define lockdep_assert_irqs_enabled() do { } while (0) # define lockdep_assert_irqs_disabled() do { } while (0) +# define lockdep_assert_in_irq() do { } while (0) #endif #ifdef CONFIG_LOCKDEP diff --git a/include/linux/log2.h b/include/linux/log2.h index 1aec01365ed4..83a4a3ca3e8a 100644 --- a/include/linux/log2.h +++ b/include/linux/log2.h @@ -220,4 +220,38 @@ int __order_base_2(unsigned long n) ilog2((n) - 1) + 1) : \ __order_base_2(n) \ ) + +static inline __attribute__((const)) +int __bits_per(unsigned long n) +{ + if (n < 2) + return 1; + if (is_power_of_2(n)) + return order_base_2(n) + 1; + return order_base_2(n); +} + +/** + * bits_per - calculate the number of bits required for the argument + * @n: parameter + * + * This is constant-capable and can be used for compile time + * initializations, e.g bitfields. + * + * The first few values calculated by this routine: + * bf(0) = 1 + * bf(1) = 1 + * bf(2) = 2 + * bf(3) = 2 + * bf(4) = 3 + * ... and so on. + */ +#define bits_per(n) \ +( \ + __builtin_constant_p(n) ? ( \ + ((n) == 0 || (n) == 1) \ + ? 1 : ilog2(n) + 1 \ + ) : \ + __bits_per(n) \ +) #endif /* _LINUX_LOG2_H */ diff --git a/include/linux/logic_pio.h b/include/linux/logic_pio.h index cbd9d8495690..88e1e6304a71 100644 --- a/include/linux/logic_pio.h +++ b/include/linux/logic_pio.h @@ -117,6 +117,7 @@ struct logic_pio_hwaddr *find_io_range_by_fwnode(struct fwnode_handle *fwnode); unsigned long logic_pio_trans_hwaddr(struct fwnode_handle *fwnode, resource_size_t hw_addr, resource_size_t size); int logic_pio_register_range(struct logic_pio_hwaddr *newrange); +void logic_pio_unregister_range(struct logic_pio_hwaddr *range); resource_size_t logic_pio_to_hwaddr(unsigned long pio); unsigned long logic_pio_trans_cpuaddr(resource_size_t hw_addr); diff --git a/include/linux/lsm_hooks.h b/include/linux/lsm_hooks.h index 47f58cfb6a19..a3763247547c 100644 --- a/include/linux/lsm_hooks.h +++ b/include/linux/lsm_hooks.h @@ -77,7 +77,7 @@ * state. This is called immediately after commit_creds(). * * Security hooks for mount using fs_context. - * [See also Documentation/filesystems/mounting.txt] + * [See also Documentation/filesystems/mount_api.txt] * * @fs_context_dup: * Allocate and attach a security structure to sc->security. This pointer @@ -339,6 +339,9 @@ * Check for permission to change root directory. * @path contains the path structure. * Return 0 if permission is granted. + * @path_notify: + * Check permissions before setting a watch on events as defined by @mask, + * on an object at @path, whose type is defined by @obj_type. * @inode_readlink: * Check the permission to read the symbolic link. * @dentry contains the dentry structure for the file link. @@ -1446,6 +1449,11 @@ * @bpf_prog_free_security: * Clean up the security information stored inside bpf prog. * + * @locked_down + * Determine whether a kernel feature that potentially enables arbitrary + * code execution in kernel space should be permitted. + * + * @what: kernel feature being accessed */ union security_list_options { int (*binder_set_context_mgr)(struct task_struct *mgr); @@ -1535,7 +1543,9 @@ union security_list_options { int (*path_chown)(const struct path *path, kuid_t uid, kgid_t gid); int (*path_chroot)(const struct path *path); #endif - + /* Needed for inode based security check */ + int (*path_notify)(const struct path *path, u64 mask, + unsigned int obj_type); int (*inode_alloc_security)(struct inode *inode); void (*inode_free_security)(struct inode *inode); int (*inode_init_security)(struct inode *inode, struct inode *dir, @@ -1807,6 +1817,7 @@ union security_list_options { int (*bpf_prog_alloc_security)(struct bpf_prog_aux *aux); void (*bpf_prog_free_security)(struct bpf_prog_aux *aux); #endif /* CONFIG_BPF_SYSCALL */ + int (*locked_down)(enum lockdown_reason what); }; struct security_hook_heads { @@ -1860,6 +1871,8 @@ struct security_hook_heads { struct hlist_head path_chown; struct hlist_head path_chroot; #endif + /* Needed for inode based modules as well */ + struct hlist_head path_notify; struct hlist_head inode_alloc_security; struct hlist_head inode_free_security; struct hlist_head inode_init_security; @@ -2046,6 +2059,7 @@ struct security_hook_heads { struct hlist_head bpf_prog_alloc_security; struct hlist_head bpf_prog_free_security; #endif /* CONFIG_BPF_SYSCALL */ + struct hlist_head locked_down; } __randomize_layout; /* @@ -2104,12 +2118,18 @@ struct lsm_info { }; extern struct lsm_info __start_lsm_info[], __end_lsm_info[]; +extern struct lsm_info __start_early_lsm_info[], __end_early_lsm_info[]; #define DEFINE_LSM(lsm) \ static struct lsm_info __lsm_##lsm \ __used __section(.lsm_info.init) \ __aligned(sizeof(unsigned long)) +#define DEFINE_EARLY_LSM(lsm) \ + static struct lsm_info __early_lsm_##lsm \ + __used __section(.early_lsm_info.init) \ + __aligned(sizeof(unsigned long)) + #ifdef CONFIG_SECURITY_SELINUX_DISABLE /* * Assuring the safety of deleting a security module is up to diff --git a/include/linux/lz4.h b/include/linux/lz4.h index 394e3d9213b8..b16e15b9587a 100644 --- a/include/linux/lz4.h +++ b/include/linux/lz4.h @@ -278,7 +278,7 @@ int LZ4_decompress_fast(const char *source, char *dest, int originalSize); * @compressedSize: is the precise full size of the compressed block * @maxDecompressedSize: is the size of 'dest' buffer * - * Decompresses data fom 'source' into 'dest'. + * Decompresses data from 'source' into 'dest'. * If the source stream is detected malformed, the function will * stop decoding and return a negative result. * This function is protected against buffer overflow exploits, @@ -522,7 +522,7 @@ int LZ4_setStreamDecode(LZ4_streamDecode_t *LZ4_streamDecode, const char *dictionary, int dictSize); /** - * LZ4_decompress_fast_continue() - Decompress blocks in streaming mode + * LZ4_decompress_safe_continue() - Decompress blocks in streaming mode * @LZ4_streamDecode: the 'LZ4_streamDecode_t' structure * @source: source address of the compressed data * @dest: output buffer address of the uncompressed data @@ -530,7 +530,7 @@ int LZ4_setStreamDecode(LZ4_streamDecode_t *LZ4_streamDecode, * @compressedSize: is the precise full size of the compressed block * @maxDecompressedSize: is the size of 'dest' buffer * - * These decoding function allows decompression of multiple blocks + * This decoding function allows decompression of multiple blocks * in "streaming" mode. * Previously decoded blocks *must* remain available at the memory position * where they were decoded (up to 64 KB) @@ -569,7 +569,7 @@ int LZ4_decompress_safe_continue(LZ4_streamDecode_t *LZ4_streamDecode, * which must be already allocated with 'originalSize' bytes * @originalSize: is the original and therefore uncompressed size * - * These decoding function allows decompression of multiple blocks + * This decoding function allows decompression of multiple blocks * in "streaming" mode. * Previously decoded blocks *must* remain available at the memory position * where they were decoded (up to 64 KB) @@ -610,10 +610,10 @@ int LZ4_decompress_fast_continue(LZ4_streamDecode_t *LZ4_streamDecode, * @dictStart: pointer to the start of the dictionary in memory * @dictSize: size of dictionary * - * These decoding function works the same as + * This decoding function works the same as * a combination of LZ4_setStreamDecode() followed by * LZ4_decompress_safe_continue() - * It is stand-alone, and don'tn eed a LZ4_streamDecode_t structure. + * It is stand-alone, and doesn't need an LZ4_streamDecode_t structure. * * Return: number of bytes decompressed into destination buffer * (necessarily <= maxDecompressedSize) @@ -633,10 +633,10 @@ int LZ4_decompress_safe_usingDict(const char *source, char *dest, * @dictStart: pointer to the start of the dictionary in memory * @dictSize: size of dictionary * - * These decoding function works the same as + * This decoding function works the same as * a combination of LZ4_setStreamDecode() followed by - * LZ4_decompress_safe_continue() - * It is stand-alone, and don'tn eed a LZ4_streamDecode_t structure. + * LZ4_decompress_fast_continue() + * It is stand-alone, and doesn't need an LZ4_streamDecode_t structure. * * Return: number of bytes decompressed into destination buffer * (necessarily <= maxDecompressedSize) diff --git a/include/linux/mailbox/mtk-cmdq-mailbox.h b/include/linux/mailbox/mtk-cmdq-mailbox.h index ccb73422c2fa..e6f54ef6698b 100644 --- a/include/linux/mailbox/mtk-cmdq-mailbox.h +++ b/include/linux/mailbox/mtk-cmdq-mailbox.h @@ -20,6 +20,9 @@ #define CMDQ_WFE_WAIT BIT(15) #define CMDQ_WFE_WAIT_VALUE 0x1 +/** cmdq event maximum */ +#define CMDQ_MAX_EVENT 0x3ff + /* * CMDQ_CODE_MASK: * set write mask diff --git a/include/linux/mdio.h b/include/linux/mdio.h index e8242ad88c81..a7604248777b 100644 --- a/include/linux/mdio.h +++ b/include/linux/mdio.h @@ -68,6 +68,17 @@ struct mdio_driver { #define to_mdio_driver(d) \ container_of(to_mdio_common_driver(d), struct mdio_driver, mdiodrv) +/* device driver data */ +static inline void mdiodev_set_drvdata(struct mdio_device *mdio, void *data) +{ + dev_set_drvdata(&mdio->dev, data); +} + +static inline void *mdiodev_get_drvdata(struct mdio_device *mdio) +{ + return dev_get_drvdata(&mdio->dev); +} + void mdio_device_free(struct mdio_device *mdiodev); struct mdio_device *mdio_device_create(struct mii_bus *bus, int addr); int mdio_device_register(struct mdio_device *mdiodev); diff --git a/include/linux/mem_encrypt.h b/include/linux/mem_encrypt.h index 470bd53a89df..5c4a18a91f89 100644 --- a/include/linux/mem_encrypt.h +++ b/include/linux/mem_encrypt.h @@ -18,23 +18,10 @@ #else /* !CONFIG_ARCH_HAS_MEM_ENCRYPT */ -#define sme_me_mask 0ULL - -static inline bool sme_active(void) { return false; } -static inline bool sev_active(void) { return false; } +static inline bool mem_encrypt_active(void) { return false; } #endif /* CONFIG_ARCH_HAS_MEM_ENCRYPT */ -static inline bool mem_encrypt_active(void) -{ - return sme_me_mask; -} - -static inline u64 sme_get_me_mask(void) -{ - return sme_me_mask; -} - #ifdef CONFIG_AMD_MEM_ENCRYPT /* * The __sme_set() and __sme_clr() macros are useful for adding or removing diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index 1dcb763bb610..ae703ea3ef48 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -128,9 +128,8 @@ struct mem_cgroup_per_node { struct mem_cgroup_reclaim_iter iter[DEF_PRIORITY + 1]; -#ifdef CONFIG_MEMCG_KMEM struct memcg_shrinker_map __rcu *shrinker_map; -#endif + struct rb_node tree_node; /* RB tree node */ unsigned long usage_in_excess;/* Set to the value by which */ /* the soft limit is exceeded*/ @@ -184,6 +183,23 @@ struct memcg_padding { #endif /* + * Remember four most recent foreign writebacks with dirty pages in this + * cgroup. Inode sharing is expected to be uncommon and, even if we miss + * one in a given round, we're likely to catch it later if it keeps + * foreign-dirtying, so a fairly low count should be enough. + * + * See mem_cgroup_track_foreign_dirty_slowpath() for details. + */ +#define MEMCG_CGWB_FRN_CNT 4 + +struct memcg_cgwb_frn { + u64 bdi_id; /* bdi->id of the foreign inode */ + int memcg_id; /* memcg->css.id of foreign inode */ + u64 at; /* jiffies_64 at the time of dirtying */ + struct wb_completion done; /* tracks in-flight foreign writebacks */ +}; + +/* * The memory controller data structure. The memory controller controls both * page cache and RSS per cgroup. We would eventually like to provide * statistics based on the statistics developed by Rik Van Riel for clock-pro, @@ -233,8 +249,9 @@ struct mem_cgroup { /* OOM-Killer disable */ int oom_kill_disable; - /* memory.events */ + /* memory.events and memory.events.local */ struct cgroup_file events_file; + struct cgroup_file events_local_file; /* handle for "memory.swap.events" */ struct cgroup_file swap_events_file; @@ -281,6 +298,7 @@ struct mem_cgroup { /* memory.events */ atomic_long_t memory_events[MEMCG_NR_MEMORY_EVENTS]; + atomic_long_t memory_events_local[MEMCG_NR_MEMORY_EVENTS]; unsigned long socket_pressure; @@ -305,12 +323,17 @@ struct mem_cgroup { #ifdef CONFIG_CGROUP_WRITEBACK struct list_head cgwb_list; struct wb_domain cgwb_domain; + struct memcg_cgwb_frn cgwb_frn[MEMCG_CGWB_FRN_CNT]; #endif /* List of events which userspace want to receive */ struct list_head event_list; spinlock_t event_list_lock; +#ifdef CONFIG_TRANSPARENT_HUGEPAGE + struct deferred_split deferred_split_queue; +#endif + struct mem_cgroup_per_node *nodeinfo[0]; /* WARNING: nodeinfo must be the last member here */ }; @@ -333,6 +356,19 @@ static inline bool mem_cgroup_disabled(void) return !cgroup_subsys_enabled(memory_cgrp_subsys); } +static inline unsigned long mem_cgroup_protection(struct mem_cgroup *memcg, + bool in_low_reclaim) +{ + if (mem_cgroup_disabled()) + return 0; + + if (in_low_reclaim) + return READ_ONCE(memcg->memory.emin); + + return max(READ_ONCE(memcg->memory.emin), + READ_ONCE(memcg->memory.elow)); +} + enum mem_cgroup_protection mem_cgroup_protected(struct mem_cgroup *root, struct mem_cgroup *memcg); @@ -392,7 +428,6 @@ out: struct lruvec *mem_cgroup_page_lruvec(struct page *, struct pglist_data *); -bool task_in_mem_cgroup(struct task_struct *task, struct mem_cgroup *memcg); struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p); struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm); @@ -515,6 +550,8 @@ void mem_cgroup_handle_over_high(void); unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg); +unsigned long mem_cgroup_size(struct mem_cgroup *memcg); + void mem_cgroup_print_oom_context(struct mem_cgroup *memcg, struct task_struct *p); @@ -667,6 +704,7 @@ static inline unsigned long lruvec_page_state_local(struct lruvec *lruvec, void __mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx, int val); +void __mod_lruvec_slab_state(void *p, enum node_stat_item idx, int val); static inline void mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx, int val) @@ -747,6 +785,9 @@ static inline void count_memcg_event_mm(struct mm_struct *mm, static inline void memcg_memory_event(struct mem_cgroup *memcg, enum memcg_memory_event event) { + atomic_long_inc(&memcg->memory_events_local[event]); + cgroup_file_notify(&memcg->events_local_file); + do { atomic_long_inc(&memcg->memory_events[event]); cgroup_file_notify(&memcg->events_file); @@ -803,6 +844,12 @@ static inline void memcg_memory_event_mm(struct mm_struct *mm, { } +static inline unsigned long mem_cgroup_protection(struct mem_cgroup *memcg, + bool in_low_reclaim) +{ + return 0; +} + static inline enum mem_cgroup_protection mem_cgroup_protected( struct mem_cgroup *root, struct mem_cgroup *memcg) { @@ -870,12 +917,6 @@ static inline bool mm_match_cgroup(struct mm_struct *mm, return true; } -static inline bool task_in_mem_cgroup(struct task_struct *task, - const struct mem_cgroup *memcg) -{ - return true; -} - static inline struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm) { return NULL; @@ -948,6 +989,11 @@ static inline unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg) return 0; } +static inline unsigned long mem_cgroup_size(struct mem_cgroup *memcg) +{ + return 0; +} + static inline void mem_cgroup_print_oom_context(struct mem_cgroup *memcg, struct task_struct *p) { @@ -1074,6 +1120,14 @@ static inline void mod_lruvec_page_state(struct page *page, mod_node_page_state(page_pgdat(page), idx, val); } +static inline void __mod_lruvec_slab_state(void *p, enum node_stat_item idx, + int val) +{ + struct page *page = virt_to_head_page(p); + + __mod_node_page_state(page_pgdat(page), idx, val); +} + static inline unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order, gfp_t gfp_mask, @@ -1161,6 +1215,16 @@ static inline void __dec_lruvec_page_state(struct page *page, __mod_lruvec_page_state(page, idx, -1); } +static inline void __inc_lruvec_slab_state(void *p, enum node_stat_item idx) +{ + __mod_lruvec_slab_state(p, idx, 1); +} + +static inline void __dec_lruvec_slab_state(void *p, enum node_stat_item idx) +{ + __mod_lruvec_slab_state(p, idx, -1); +} + /* idx can be of type enum memcg_stat_item or node_stat_item */ static inline void inc_memcg_state(struct mem_cgroup *memcg, int idx) @@ -1220,6 +1284,21 @@ void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages, unsigned long *pheadroom, unsigned long *pdirty, unsigned long *pwriteback); +void mem_cgroup_track_foreign_dirty_slowpath(struct page *page, + struct bdi_writeback *wb); + +static inline void mem_cgroup_track_foreign_dirty(struct page *page, + struct bdi_writeback *wb) +{ + if (mem_cgroup_disabled()) + return; + + if (unlikely(&page->mem_cgroup->css != wb->memcg_css)) + mem_cgroup_track_foreign_dirty_slowpath(page, wb); +} + +void mem_cgroup_flush_foreign(struct bdi_writeback *wb); + #else /* CONFIG_CGROUP_WRITEBACK */ static inline struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb) @@ -1235,6 +1314,15 @@ static inline void mem_cgroup_wb_stats(struct bdi_writeback *wb, { } +static inline void mem_cgroup_track_foreign_dirty(struct page *page, + struct bdi_writeback *wb) +{ +} + +static inline void mem_cgroup_flush_foreign(struct bdi_writeback *wb) +{ +} + #endif /* CONFIG_CGROUP_WRITEBACK */ struct sock; @@ -1255,6 +1343,11 @@ static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg) } while ((memcg = parent_mem_cgroup(memcg))); return false; } + +extern int memcg_expand_shrinker_maps(int new_id); + +extern void memcg_set_shrinker_bit(struct mem_cgroup *memcg, + int nid, int shrinker_id); #else #define mem_cgroup_sockets_enabled 0 static inline void mem_cgroup_sk_alloc(struct sock *sk) { }; @@ -1263,6 +1356,11 @@ static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg) { return false; } + +static inline void memcg_set_shrinker_bit(struct mem_cgroup *memcg, + int nid, int shrinker_id) +{ +} #endif struct kmem_cache *memcg_kmem_get_cache(struct kmem_cache *cachep); @@ -1273,6 +1371,8 @@ int __memcg_kmem_charge(struct page *page, gfp_t gfp, int order); void __memcg_kmem_uncharge(struct page *page, int order); int __memcg_kmem_charge_memcg(struct page *page, gfp_t gfp, int order, struct mem_cgroup *memcg); +void __memcg_kmem_uncharge_memcg(struct mem_cgroup *memcg, + unsigned int nr_pages); extern struct static_key_false memcg_kmem_enabled_key; extern struct workqueue_struct *memcg_kmem_cache_wq; @@ -1314,6 +1414,14 @@ static inline int memcg_kmem_charge_memcg(struct page *page, gfp_t gfp, return __memcg_kmem_charge_memcg(page, gfp, order, memcg); return 0; } + +static inline void memcg_kmem_uncharge_memcg(struct page *page, int order, + struct mem_cgroup *memcg) +{ + if (memcg_kmem_enabled()) + __memcg_kmem_uncharge_memcg(memcg, 1 << order); +} + /* * helper for accessing a memcg's index. It will be used as an index in the * child cache array in kmem_cache, and also to derive its name. This function @@ -1324,10 +1432,6 @@ static inline int memcg_cache_id(struct mem_cgroup *memcg) return memcg ? memcg->kmemcg_id : -1; } -extern int memcg_expand_shrinker_maps(int new_id); - -extern void memcg_set_shrinker_bit(struct mem_cgroup *memcg, - int nid, int shrinker_id); #else static inline int memcg_kmem_charge(struct page *page, gfp_t gfp, int order) @@ -1369,8 +1473,6 @@ static inline void memcg_put_cache_ids(void) { } -static inline void memcg_set_shrinker_bit(struct mem_cgroup *memcg, - int nid, int shrinker_id) { } #endif /* CONFIG_MEMCG_KMEM */ #endif /* _LINUX_MEMCONTROL_H */ diff --git a/include/linux/memory.h b/include/linux/memory.h index e1dc1bb2b787..0ebb105eb261 100644 --- a/include/linux/memory.h +++ b/include/linux/memory.h @@ -25,7 +25,6 @@ struct memory_block { unsigned long start_section_nr; - unsigned long end_section_nr; unsigned long state; /* serialized by the dev->lock */ int section_count; /* serialized by mem_sysfs_mutex */ int online_type; /* for passing data to online routine */ @@ -80,9 +79,9 @@ struct mem_section; #define IPC_CALLBACK_PRI 10 #ifndef CONFIG_MEMORY_HOTPLUG_SPARSE -static inline int memory_dev_init(void) +static inline void memory_dev_init(void) { - return 0; + return; } static inline int register_memory_notifier(struct notifier_block *nb) { @@ -111,16 +110,15 @@ extern int register_memory_notifier(struct notifier_block *nb); extern void unregister_memory_notifier(struct notifier_block *nb); extern int register_memory_isolate_notifier(struct notifier_block *nb); extern void unregister_memory_isolate_notifier(struct notifier_block *nb); -int hotplug_memory_register(int nid, struct mem_section *section); -#ifdef CONFIG_MEMORY_HOTREMOVE -extern void unregister_memory_section(struct mem_section *); -#endif -extern int memory_dev_init(void); +int create_memory_block_devices(unsigned long start, unsigned long size); +void remove_memory_block_devices(unsigned long start, unsigned long size); +extern void memory_dev_init(void); extern int memory_notify(unsigned long val, void *v); extern int memory_isolate_notify(unsigned long val, void *v); -extern struct memory_block *find_memory_block_hinted(struct mem_section *, - struct memory_block *); extern struct memory_block *find_memory_block(struct mem_section *); +typedef int (*walk_memory_blocks_func_t)(struct memory_block *, void *); +extern int walk_memory_blocks(unsigned long start, unsigned long size, + void *arg, walk_memory_blocks_func_t func); #define CONFIG_MEM_BLOCK_SIZE (PAGES_PER_SECTION<<PAGE_SHIFT) #endif /* CONFIG_MEMORY_HOTPLUG_SPARSE */ diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h index ae892eef8b82..f46ea71b4ffd 100644 --- a/include/linux/memory_hotplug.h +++ b/include/linux/memory_hotplug.h @@ -123,20 +123,10 @@ static inline bool movable_node_is_enabled(void) return movable_node_enabled; } -#ifdef CONFIG_MEMORY_HOTREMOVE extern void arch_remove_memory(int nid, u64 start, u64 size, struct vmem_altmap *altmap); extern void __remove_pages(struct zone *zone, unsigned long start_pfn, unsigned long nr_pages, struct vmem_altmap *altmap); -#endif /* CONFIG_MEMORY_HOTREMOVE */ - -/* - * Do we want sysfs memblock files created. This will allow userspace to online - * and offline memory explicitly. Lack of this bit means that the caller has to - * call move_pfn_range_to_zone to finish the initialization. - */ - -#define MHP_MEMBLOCK_API (1<<0) /* reasonably generic interface to expand the physical pages */ extern int __add_pages(int nid, unsigned long start_pfn, unsigned long nr_pages, @@ -324,7 +314,7 @@ static inline void pgdat_resize_init(struct pglist_data *pgdat) {} extern bool is_mem_section_removable(unsigned long pfn, unsigned long nr_pages); extern void try_offline_node(int nid); extern int offline_pages(unsigned long start_pfn, unsigned long nr_pages); -extern void remove_memory(int nid, u64 start, u64 size); +extern int remove_memory(int nid, u64 start, u64 size); extern void __remove_memory(int nid, u64 start, u64 size); #else @@ -341,22 +331,25 @@ static inline int offline_pages(unsigned long start_pfn, unsigned long nr_pages) return -EINVAL; } -static inline void remove_memory(int nid, u64 start, u64 size) {} +static inline int remove_memory(int nid, u64 start, u64 size) +{ + return -EBUSY; +} + static inline void __remove_memory(int nid, u64 start, u64 size) {} #endif /* CONFIG_MEMORY_HOTREMOVE */ extern void __ref free_area_init_core_hotplug(int nid); -extern int walk_memory_range(unsigned long start_pfn, unsigned long end_pfn, - void *arg, int (*func)(struct memory_block *, void *)); extern int __add_memory(int nid, u64 start, u64 size); extern int add_memory(int nid, u64 start, u64 size); extern int add_memory_resource(int nid, struct resource *resource); extern void move_pfn_range_to_zone(struct zone *zone, unsigned long start_pfn, unsigned long nr_pages, struct vmem_altmap *altmap); extern bool is_memblock_offlined(struct memory_block *mem); -extern int sparse_add_one_section(int nid, unsigned long start_pfn, - struct vmem_altmap *altmap); -extern void sparse_remove_one_section(struct zone *zone, struct mem_section *ms, +extern int sparse_add_section(int nid, unsigned long pfn, + unsigned long nr_pages, struct vmem_altmap *altmap); +extern void sparse_remove_section(struct mem_section *ms, + unsigned long pfn, unsigned long nr_pages, unsigned long map_offset, struct vmem_altmap *altmap); extern struct page *sparse_decode_mem_map(unsigned long coded_mem_map, unsigned long pnum); diff --git a/include/linux/memremap.h b/include/linux/memremap.h index 1732dea030b2..6fefb09af7c3 100644 --- a/include/linux/memremap.h +++ b/include/linux/memremap.h @@ -17,6 +17,7 @@ struct device; */ struct vmem_altmap { const unsigned long base_pfn; + const unsigned long end_pfn; const unsigned long reserve; unsigned long free; unsigned long align; @@ -37,13 +38,6 @@ struct vmem_altmap { * A more complete discussion of unaddressable memory may be found in * include/linux/hmm.h and Documentation/vm/hmm.rst. * - * MEMORY_DEVICE_PUBLIC: - * Device memory that is cache coherent from device and CPU point of view. This - * is use on platform that have an advance system bus (like CAPI or CCIX). A - * driver can hotplug the device memory using ZONE_DEVICE and with that memory - * type. Any page of a process can be migrated to such memory. However no one - * should be allow to pin such memory so that it can always be evicted. - * * MEMORY_DEVICE_FS_DAX: * Host memory that has similar access semantics as System RAM i.e. DMA * coherent and supports page pinning. In support of coordinating page @@ -52,55 +46,85 @@ struct vmem_altmap { * wakeup is used to coordinate physical address space management (ex: * fs truncate/hole punch) vs pinned pages (ex: device dma). * + * MEMORY_DEVICE_DEVDAX: + * Host memory that has similar access semantics as System RAM i.e. DMA + * coherent and supports page pinning. In contrast to + * MEMORY_DEVICE_FS_DAX, this memory is access via a device-dax + * character device. + * * MEMORY_DEVICE_PCI_P2PDMA: * Device memory residing in a PCI BAR intended for use with Peer-to-Peer * transactions. */ enum memory_type { + /* 0 is reserved to catch uninitialized type fields */ MEMORY_DEVICE_PRIVATE = 1, - MEMORY_DEVICE_PUBLIC, MEMORY_DEVICE_FS_DAX, + MEMORY_DEVICE_DEVDAX, MEMORY_DEVICE_PCI_P2PDMA, }; -/* - * Additional notes about MEMORY_DEVICE_PRIVATE may be found in - * include/linux/hmm.h and Documentation/vm/hmm.rst. There is also a brief - * explanation in include/linux/memory_hotplug.h. - * - * The page_free() callback is called once the page refcount reaches 1 - * (ZONE_DEVICE pages never reach 0 refcount unless there is a refcount bug. - * This allows the device driver to implement its own memory management.) - */ -typedef void (*dev_page_free_t)(struct page *page, void *data); +struct dev_pagemap_ops { + /* + * Called once the page refcount reaches 1. (ZONE_DEVICE pages never + * reach 0 refcount unless there is a refcount bug. This allows the + * device driver to implement its own memory management.) + */ + void (*page_free)(struct page *page); + + /* + * Transition the refcount in struct dev_pagemap to the dead state. + */ + void (*kill)(struct dev_pagemap *pgmap); + + /* + * Wait for refcount in struct dev_pagemap to be idle and reap it. + */ + void (*cleanup)(struct dev_pagemap *pgmap); + + /* + * Used for private (un-addressable) device memory only. Must migrate + * the page back to a CPU accessible page. + */ + vm_fault_t (*migrate_to_ram)(struct vm_fault *vmf); +}; + +#define PGMAP_ALTMAP_VALID (1 << 0) /** * struct dev_pagemap - metadata for ZONE_DEVICE mappings - * @page_free: free page callback when page refcount reaches 1 * @altmap: pre-allocated/reserved memory for vmemmap allocations * @res: physical address range covered by @ref * @ref: reference count that pins the devm_memremap_pages() mapping - * @kill: callback to transition @ref to the dead state - * @cleanup: callback to wait for @ref to be idle and reap it + * @internal_ref: internal reference if @ref is not provided by the caller + * @done: completion for @internal_ref * @dev: host device of the mapping for debug * @data: private data pointer for page_free() * @type: memory type: see MEMORY_* in memory_hotplug.h + * @flags: PGMAP_* flags to specify defailed behavior + * @ops: method table */ struct dev_pagemap { - dev_page_free_t page_free; struct vmem_altmap altmap; - bool altmap_valid; struct resource res; struct percpu_ref *ref; - void (*kill)(struct percpu_ref *ref); - void (*cleanup)(struct percpu_ref *ref); - struct device *dev; - void *data; + struct percpu_ref internal_ref; + struct completion done; enum memory_type type; - u64 pci_p2pdma_bus_offset; + unsigned int flags; + const struct dev_pagemap_ops *ops; }; +static inline struct vmem_altmap *pgmap_altmap(struct dev_pagemap *pgmap) +{ + if (pgmap->flags & PGMAP_ALTMAP_VALID) + return &pgmap->altmap; + return NULL; +} + #ifdef CONFIG_ZONE_DEVICE +void *memremap_pages(struct dev_pagemap *pgmap, int nid); +void memunmap_pages(struct dev_pagemap *pgmap); void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap); void devm_memunmap_pages(struct device *dev, struct dev_pagemap *pgmap); struct dev_pagemap *get_dev_pagemap(unsigned long pfn, diff --git a/include/linux/mfd/aat2870.h b/include/linux/mfd/aat2870.h index af7267c480ee..2445842d482d 100644 --- a/include/linux/mfd/aat2870.h +++ b/include/linux/mfd/aat2870.h @@ -136,7 +136,6 @@ struct aat2870_data { /* for debugfs */ struct dentry *dentry_root; - struct dentry *dentry_reg; }; struct aat2870_subdev_info { diff --git a/include/linux/mfd/cros_ec.h b/include/linux/mfd/cros_ec.h index 5ddca44be06d..61c2875c2a40 100644 --- a/include/linux/mfd/cros_ec.h +++ b/include/linux/mfd/cros_ec.h @@ -8,181 +8,11 @@ #ifndef __LINUX_MFD_CROS_EC_H #define __LINUX_MFD_CROS_EC_H -#include <linux/cdev.h> #include <linux/device.h> -#include <linux/notifier.h> -#include <linux/mfd/cros_ec_commands.h> -#include <linux/mutex.h> - -#define CROS_EC_DEV_NAME "cros_ec" -#define CROS_EC_DEV_FP_NAME "cros_fp" -#define CROS_EC_DEV_PD_NAME "cros_pd" -#define CROS_EC_DEV_TP_NAME "cros_tp" -#define CROS_EC_DEV_ISH_NAME "cros_ish" - -/* - * The EC is unresponsive for a time after a reboot command. Add a - * simple delay to make sure that the bus stays locked. - */ -#define EC_REBOOT_DELAY_MS 50 - -/* - * Max bus-specific overhead incurred by request/responses. - * I2C requires 1 additional byte for requests. - * I2C requires 2 additional bytes for responses. - * SPI requires up to 32 additional bytes for responses. - */ -#define EC_PROTO_VERSION_UNKNOWN 0 -#define EC_MAX_REQUEST_OVERHEAD 1 -#define EC_MAX_RESPONSE_OVERHEAD 32 - -/* - * Command interface between EC and AP, for LPC, I2C and SPI interfaces. - */ -enum { - EC_MSG_TX_HEADER_BYTES = 3, - EC_MSG_TX_TRAILER_BYTES = 1, - EC_MSG_TX_PROTO_BYTES = EC_MSG_TX_HEADER_BYTES + - EC_MSG_TX_TRAILER_BYTES, - EC_MSG_RX_PROTO_BYTES = 3, - - /* Max length of messages for proto 2*/ - EC_PROTO2_MSG_BYTES = EC_PROTO2_MAX_PARAM_SIZE + - EC_MSG_TX_PROTO_BYTES, - - EC_MAX_MSG_BYTES = 64 * 1024, -}; - -/** - * struct cros_ec_command - Information about a ChromeOS EC command. - * @version: Command version number (often 0). - * @command: Command to send (EC_CMD_...). - * @outsize: Outgoing length in bytes. - * @insize: Max number of bytes to accept from the EC. - * @result: EC's response to the command (separate from communication failure). - * @data: Where to put the incoming data from EC and outgoing data to EC. - */ -struct cros_ec_command { - uint32_t version; - uint32_t command; - uint32_t outsize; - uint32_t insize; - uint32_t result; - uint8_t data[0]; -}; - -/** - * struct cros_ec_device - Information about a ChromeOS EC device. - * @phys_name: Name of physical comms layer (e.g. 'i2c-4'). - * @dev: Device pointer for physical comms device - * @was_wake_device: True if this device was set to wake the system from - * sleep at the last suspend. - * @cros_class: The class structure for this device. - * @cmd_readmem: Direct read of the EC memory-mapped region, if supported. - * @offset: Is within EC_LPC_ADDR_MEMMAP region. - * @bytes: Number of bytes to read. zero means "read a string" (including - * the trailing '\0'). At most only EC_MEMMAP_SIZE bytes can be - * read. Caller must ensure that the buffer is large enough for the - * result when reading a string. - * @max_request: Max size of message requested. - * @max_response: Max size of message response. - * @max_passthru: Max sice of passthru message. - * @proto_version: The protocol version used for this device. - * @priv: Private data. - * @irq: Interrupt to use. - * @id: Device id. - * @din: Input buffer (for data from EC). This buffer will always be - * dword-aligned and include enough space for up to 7 word-alignment - * bytes also, so we can ensure that the body of the message is always - * dword-aligned (64-bit). We use this alignment to keep ARM and x86 - * happy. Probably word alignment would be OK, there might be a small - * performance advantage to using dword. - * @dout: Output buffer (for data to EC). This buffer will always be - * dword-aligned and include enough space for up to 7 word-alignment - * bytes also, so we can ensure that the body of the message is always - * dword-aligned (64-bit). We use this alignment to keep ARM and x86 - * happy. Probably word alignment would be OK, there might be a small - * performance advantage to using dword. - * @din_size: Size of din buffer to allocate (zero to use static din). - * @dout_size: Size of dout buffer to allocate (zero to use static dout). - * @wake_enabled: True if this device can wake the system from sleep. - * @suspended: True if this device had been suspended. - * @cmd_xfer: Send command to EC and get response. - * Returns the number of bytes received if the communication - * succeeded, but that doesn't mean the EC was happy with the - * command. The caller should check msg.result for the EC's result - * code. - * @pkt_xfer: Send packet to EC and get response. - * @lock: One transaction at a time. - * @mkbp_event_supported: True if this EC supports the MKBP event protocol. - * @host_sleep_v1: True if this EC supports the sleep v1 command. - * @event_notifier: Interrupt event notifier for transport devices. - * @event_data: Raw payload transferred with the MKBP event. - * @event_size: Size in bytes of the event data. - * @host_event_wake_mask: Mask of host events that cause wake from suspend. - */ -struct cros_ec_device { - /* These are used by other drivers that want to talk to the EC */ - const char *phys_name; - struct device *dev; - bool was_wake_device; - struct class *cros_class; - int (*cmd_readmem)(struct cros_ec_device *ec, unsigned int offset, - unsigned int bytes, void *dest); - - /* These are used to implement the platform-specific interface */ - u16 max_request; - u16 max_response; - u16 max_passthru; - u16 proto_version; - void *priv; - int irq; - u8 *din; - u8 *dout; - int din_size; - int dout_size; - bool wake_enabled; - bool suspended; - int (*cmd_xfer)(struct cros_ec_device *ec, - struct cros_ec_command *msg); - int (*pkt_xfer)(struct cros_ec_device *ec, - struct cros_ec_command *msg); - struct mutex lock; - bool mkbp_event_supported; - bool host_sleep_v1; - struct blocking_notifier_head event_notifier; - - struct ec_response_get_next_event_v1 event_data; - int event_size; - u32 host_event_wake_mask; -}; - -/** - * struct cros_ec_sensor_platform - ChromeOS EC sensor platform information. - * @sensor_num: Id of the sensor, as reported by the EC. - */ -struct cros_ec_sensor_platform { - u8 sensor_num; -}; - -/** - * struct cros_ec_platform - ChromeOS EC platform information. - * @ec_name: Name of EC device (e.g. 'cros-ec', 'cros-pd', ...) - * used in /dev/ and sysfs. - * @cmd_offset: Offset to apply for each command. Set when - * registering a device behind another one. - */ -struct cros_ec_platform { - const char *ec_name; - u16 cmd_offset; -}; - -struct cros_ec_debugfs; /** * struct cros_ec_dev - ChromeOS EC device entry point. * @class_dev: Device structure used in sysfs. - * @cdev: Character device structure in /dev. * @ec_dev: cros_ec_device structure to talk to the physical device. * @dev: Pointer to the platform device. * @debug_info: cros_ec_debugfs structure for debugging information. @@ -192,7 +22,6 @@ struct cros_ec_debugfs; */ struct cros_ec_dev { struct device class_dev; - struct cdev cdev; struct cros_ec_device *ec_dev; struct device *dev; struct cros_ec_debugfs *debug_info; @@ -203,123 +32,4 @@ struct cros_ec_dev { #define to_cros_ec_dev(dev) container_of(dev, struct cros_ec_dev, class_dev) -/** - * cros_ec_suspend() - Handle a suspend operation for the ChromeOS EC device. - * @ec_dev: Device to suspend. - * - * This can be called by drivers to handle a suspend event. - * - * Return: 0 on success or negative error code. - */ -int cros_ec_suspend(struct cros_ec_device *ec_dev); - -/** - * cros_ec_resume() - Handle a resume operation for the ChromeOS EC device. - * @ec_dev: Device to resume. - * - * This can be called by drivers to handle a resume event. - * - * Return: 0 on success or negative error code. - */ -int cros_ec_resume(struct cros_ec_device *ec_dev); - -/** - * cros_ec_prepare_tx() - Prepare an outgoing message in the output buffer. - * @ec_dev: Device to register. - * @msg: Message to write. - * - * This is intended to be used by all ChromeOS EC drivers, but at present - * only SPI uses it. Once LPC uses the same protocol it can start using it. - * I2C could use it now, with a refactor of the existing code. - * - * Return: 0 on success or negative error code. - */ -int cros_ec_prepare_tx(struct cros_ec_device *ec_dev, - struct cros_ec_command *msg); - -/** - * cros_ec_check_result() - Check ec_msg->result. - * @ec_dev: EC device. - * @msg: Message to check. - * - * This is used by ChromeOS EC drivers to check the ec_msg->result for - * errors and to warn about them. - * - * Return: 0 on success or negative error code. - */ -int cros_ec_check_result(struct cros_ec_device *ec_dev, - struct cros_ec_command *msg); - -/** - * cros_ec_cmd_xfer() - Send a command to the ChromeOS EC. - * @ec_dev: EC device. - * @msg: Message to write. - * - * Call this to send a command to the ChromeOS EC. This should be used - * instead of calling the EC's cmd_xfer() callback directly. - * - * Return: 0 on success or negative error code. - */ -int cros_ec_cmd_xfer(struct cros_ec_device *ec_dev, - struct cros_ec_command *msg); - -/** - * cros_ec_cmd_xfer_status() - Send a command to the ChromeOS EC. - * @ec_dev: EC device. - * @msg: Message to write. - * - * This function is identical to cros_ec_cmd_xfer, except it returns success - * status only if both the command was transmitted successfully and the EC - * replied with success status. It's not necessary to check msg->result when - * using this function. - * - * Return: The number of bytes transferred on success or negative error code. - */ -int cros_ec_cmd_xfer_status(struct cros_ec_device *ec_dev, - struct cros_ec_command *msg); - -/** - * cros_ec_register() - Register a new ChromeOS EC, using the provided info. - * @ec_dev: Device to register. - * - * Before calling this, allocate a pointer to a new device and then fill - * in all the fields up to the --private-- marker. - * - * Return: 0 on success or negative error code. - */ -int cros_ec_register(struct cros_ec_device *ec_dev); - -/** - * cros_ec_query_all() - Query the protocol version supported by the - * ChromeOS EC. - * @ec_dev: Device to register. - * - * Return: 0 on success or negative error code. - */ -int cros_ec_query_all(struct cros_ec_device *ec_dev); - -/** - * cros_ec_get_next_event() - Fetch next event from the ChromeOS EC. - * @ec_dev: Device to fetch event from. - * @wake_event: Pointer to a bool set to true upon return if the event might be - * treated as a wake event. Ignored if null. - * - * Return: negative error code on errors; 0 for no data; or else number of - * bytes received (i.e., an event was retrieved successfully). Event types are - * written out to @ec_dev->event_data.event_type on success. - */ -int cros_ec_get_next_event(struct cros_ec_device *ec_dev, bool *wake_event); - -/** - * cros_ec_get_host_event() - Return a mask of event set by the ChromeOS EC. - * @ec_dev: Device to fetch event from. - * - * When MKBP is supported, when the EC raises an interrupt, we collect the - * events raised and call the functions in the ec notifier. This function - * is a helper to know which events are raised. - * - * Return: 0 on error or non-zero bitmask of one or more EC_HOST_EVENT_*. - */ -u32 cros_ec_get_host_event(struct cros_ec_device *ec_dev); - #endif /* __LINUX_MFD_CROS_EC_H */ diff --git a/include/linux/mfd/da9062/registers.h b/include/linux/mfd/da9062/registers.h index fe04b708742b..2906bf6160fb 100644 --- a/include/linux/mfd/da9062/registers.h +++ b/include/linux/mfd/da9062/registers.h @@ -797,6 +797,9 @@ #define DA9062AA_BUCK3_SL_A_SHIFT 7 #define DA9062AA_BUCK3_SL_A_MASK BIT(7) +/* DA9062AA_VLDO[1-4]_A common */ +#define DA9062AA_VLDO_A_MIN_SEL 2 + /* DA9062AA_VLDO1_A = 0x0A9 */ #define DA9062AA_VLDO1_A_SHIFT 0 #define DA9062AA_VLDO1_A_MASK 0x3f diff --git a/include/linux/mfd/da9063/pdata.h b/include/linux/mfd/da9063/pdata.h deleted file mode 100644 index 77c566ab96ab..000000000000 --- a/include/linux/mfd/da9063/pdata.h +++ /dev/null @@ -1,109 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ -/* - * Platform configuration options for DA9063 - * - * Copyright 2012 Dialog Semiconductor Ltd. - * - * Author: Michal Hajduk, Dialog Semiconductor - * Author: Krystian Garbaciak, Dialog Semiconductor - */ - -#ifndef __MFD_DA9063_PDATA_H__ -#define __MFD_DA9063_PDATA_H__ - -#include <linux/regulator/machine.h> - -/* - * Regulator configuration - */ -/* DA9063 and DA9063L regulator IDs */ -enum { - /* BUCKs */ - DA9063_ID_BCORE1, - DA9063_ID_BCORE2, - DA9063_ID_BPRO, - DA9063_ID_BMEM, - DA9063_ID_BIO, - DA9063_ID_BPERI, - - /* BCORE1 and BCORE2 in merged mode */ - DA9063_ID_BCORES_MERGED, - /* BMEM and BIO in merged mode */ - DA9063_ID_BMEM_BIO_MERGED, - /* When two BUCKs are merged, they cannot be reused separately */ - - /* LDOs on both DA9063 and DA9063L */ - DA9063_ID_LDO3, - DA9063_ID_LDO7, - DA9063_ID_LDO8, - DA9063_ID_LDO9, - DA9063_ID_LDO11, - - /* DA9063-only LDOs */ - DA9063_ID_LDO1, - DA9063_ID_LDO2, - DA9063_ID_LDO4, - DA9063_ID_LDO5, - DA9063_ID_LDO6, - DA9063_ID_LDO10, -}; - -/* Regulators platform data */ -struct da9063_regulator_data { - int id; - struct regulator_init_data *initdata; -}; - -struct da9063_regulators_pdata { - unsigned n_regulators; - struct da9063_regulator_data *regulator_data; -}; - - -/* - * RGB LED configuration - */ -/* LED IDs for flags in struct led_info. */ -enum { - DA9063_GPIO11_LED, - DA9063_GPIO14_LED, - DA9063_GPIO15_LED, - - DA9063_LED_NUM -}; -#define DA9063_LED_ID_MASK 0x3 - -/* LED polarity for flags in struct led_info. */ -#define DA9063_LED_HIGH_LEVEL_ACTIVE 0x0 -#define DA9063_LED_LOW_LEVEL_ACTIVE 0x4 - - -/* - * General PMIC configuration - */ -/* HWMON ADC channels configuration */ -#define DA9063_FLG_FORCE_IN0_MANUAL_MODE 0x0010 -#define DA9063_FLG_FORCE_IN0_AUTO_MODE 0x0020 -#define DA9063_FLG_FORCE_IN1_MANUAL_MODE 0x0040 -#define DA9063_FLG_FORCE_IN1_AUTO_MODE 0x0080 -#define DA9063_FLG_FORCE_IN2_MANUAL_MODE 0x0100 -#define DA9063_FLG_FORCE_IN2_AUTO_MODE 0x0200 -#define DA9063_FLG_FORCE_IN3_MANUAL_MODE 0x0400 -#define DA9063_FLG_FORCE_IN3_AUTO_MODE 0x0800 - -/* Disable register caching. */ -#define DA9063_FLG_NO_CACHE 0x0008 - -struct da9063; - -/* DA9063 platform data */ -struct da9063_pdata { - int (*init)(struct da9063 *da9063); - int irq_base; - bool key_power; - unsigned flags; - struct da9063_regulators_pdata *regulators_pdata; - struct led_platform_data *leds_pdata; -}; - -#endif /* __MFD_DA9063_PDATA_H__ */ diff --git a/include/linux/mfd/intel_soc_pmic_mrfld.h b/include/linux/mfd/intel_soc_pmic_mrfld.h new file mode 100644 index 000000000000..4daecd682275 --- /dev/null +++ b/include/linux/mfd/intel_soc_pmic_mrfld.h @@ -0,0 +1,81 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Header file for Intel Merrifield Basin Cove PMIC + * + * Copyright (C) 2019 Intel Corporation. All rights reserved. + */ + +#ifndef __INTEL_SOC_PMIC_MRFLD_H__ +#define __INTEL_SOC_PMIC_MRFLD_H__ + +#include <linux/bits.h> + +#define BCOVE_ID 0x00 + +#define BCOVE_ID_MINREV0 GENMASK(2, 0) +#define BCOVE_ID_MAJREV0 GENMASK(5, 3) +#define BCOVE_ID_VENDID0 GENMASK(7, 6) + +#define BCOVE_MINOR(x) (unsigned int)(((x) & BCOVE_ID_MINREV0) >> 0) +#define BCOVE_MAJOR(x) (unsigned int)(((x) & BCOVE_ID_MAJREV0) >> 3) +#define BCOVE_VENDOR(x) (unsigned int)(((x) & BCOVE_ID_VENDID0) >> 6) + +#define BCOVE_IRQLVL1 0x01 + +#define BCOVE_PBIRQ 0x02 +#define BCOVE_TMUIRQ 0x03 +#define BCOVE_THRMIRQ 0x04 +#define BCOVE_BCUIRQ 0x05 +#define BCOVE_ADCIRQ 0x06 +#define BCOVE_CHGRIRQ0 0x07 +#define BCOVE_CHGRIRQ1 0x08 +#define BCOVE_GPIOIRQ 0x09 +#define BCOVE_CRITIRQ 0x0B + +#define BCOVE_MIRQLVL1 0x0C + +#define BCOVE_MPBIRQ 0x0D +#define BCOVE_MTMUIRQ 0x0E +#define BCOVE_MTHRMIRQ 0x0F +#define BCOVE_MBCUIRQ 0x10 +#define BCOVE_MADCIRQ 0x11 +#define BCOVE_MCHGRIRQ0 0x12 +#define BCOVE_MCHGRIRQ1 0x13 +#define BCOVE_MGPIOIRQ 0x14 +#define BCOVE_MCRITIRQ 0x16 + +#define BCOVE_SCHGRIRQ0 0x4E +#define BCOVE_SCHGRIRQ1 0x4F + +/* Level 1 IRQs */ +#define BCOVE_LVL1_PWRBTN BIT(0) /* power button */ +#define BCOVE_LVL1_TMU BIT(1) /* time management unit */ +#define BCOVE_LVL1_THRM BIT(2) /* thermal */ +#define BCOVE_LVL1_BCU BIT(3) /* burst control unit */ +#define BCOVE_LVL1_ADC BIT(4) /* ADC */ +#define BCOVE_LVL1_CHGR BIT(5) /* charger */ +#define BCOVE_LVL1_GPIO BIT(6) /* GPIO */ +#define BCOVE_LVL1_CRIT BIT(7) /* critical event */ + +/* Level 2 IRQs: power button */ +#define BCOVE_PBIRQ_PBTN BIT(0) +#define BCOVE_PBIRQ_UBTN BIT(1) + +/* Level 2 IRQs: ADC */ +#define BCOVE_ADCIRQ_BATTEMP BIT(2) +#define BCOVE_ADCIRQ_SYSTEMP BIT(3) +#define BCOVE_ADCIRQ_BATTID BIT(4) +#define BCOVE_ADCIRQ_VIBATT BIT(5) +#define BCOVE_ADCIRQ_CCTICK BIT(7) + +/* Level 2 IRQs: charger */ +#define BCOVE_CHGRIRQ_BAT0ALRT BIT(4) +#define BCOVE_CHGRIRQ_BAT1ALRT BIT(5) +#define BCOVE_CHGRIRQ_BATCRIT BIT(6) + +#define BCOVE_CHGRIRQ_VBUSDET BIT(0) +#define BCOVE_CHGRIRQ_DCDET BIT(1) +#define BCOVE_CHGRIRQ_BATTDET BIT(2) +#define BCOVE_CHGRIRQ_USBIDDET BIT(3) + +#endif /* __INTEL_SOC_PMIC_MRFLD_H__ */ diff --git a/include/linux/mfd/lp87565.h b/include/linux/mfd/lp87565.h index e619def115b4..ce965354bbad 100644 --- a/include/linux/mfd/lp87565.h +++ b/include/linux/mfd/lp87565.h @@ -14,6 +14,7 @@ enum lp87565_device_type { LP87565_DEVICE_TYPE_UNKNOWN = 0, + LP87565_DEVICE_TYPE_LP87561_Q1, LP87565_DEVICE_TYPE_LP87565_Q1, }; @@ -246,6 +247,7 @@ enum LP87565_regulator_id { LP87565_BUCK_3, LP87565_BUCK_10, LP87565_BUCK_23, + LP87565_BUCK_3210, }; /** diff --git a/include/linux/mfd/madera/core.h b/include/linux/mfd/madera/core.h index 4d5d51a9c8a6..7ffa696cce7c 100644 --- a/include/linux/mfd/madera/core.h +++ b/include/linux/mfd/madera/core.h @@ -1,12 +1,8 @@ -// SPDX-License-Identifier: GPL-2.0 +/* SPDX-License-Identifier: GPL-2.0-only */ /* * MFD internals for Cirrus Logic Madera codecs * * Copyright (C) 2015-2018 Cirrus Logic - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by the - * Free Software Foundation; version 2. */ #ifndef MADERA_CORE_H @@ -26,15 +22,21 @@ enum madera_type { CS47L85 = 2, CS47L90 = 3, CS47L91 = 4, + CS47L92 = 5, + CS47L93 = 6, WM1840 = 7, + CS47L15 = 8, + CS42L92 = 9, }; #define MADERA_MAX_CORE_SUPPLIES 2 #define MADERA_MAX_GPIOS 40 +#define CS47L15_NUM_GPIOS 15 #define CS47L35_NUM_GPIOS 16 #define CS47L85_NUM_GPIOS 40 #define CS47L90_NUM_GPIOS 38 +#define CS47L92_NUM_GPIOS 16 #define MADERA_MAX_MICBIAS 4 diff --git a/include/linux/mfd/madera/pdata.h b/include/linux/mfd/madera/pdata.h index 8dc852402dbb..fa9595dd42ba 100644 --- a/include/linux/mfd/madera/pdata.h +++ b/include/linux/mfd/madera/pdata.h @@ -1,12 +1,8 @@ -// SPDX-License-Identifier: GPL-2.0 +/* SPDX-License-Identifier: GPL-2.0-only */ /* * Platform data for Cirrus Logic Madera codecs * * Copyright (C) 2015-2018 Cirrus Logic - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by the - * Free Software Foundation; version 2. */ #ifndef MADERA_PDATA_H @@ -16,6 +12,7 @@ #include <linux/regulator/arizona-ldo1.h> #include <linux/regulator/arizona-micsupp.h> #include <linux/regulator/machine.h> +#include <sound/madera-pdata.h> #define MADERA_MAX_MICBIAS 4 #define MADERA_MAX_CHILD_MICBIAS 4 @@ -34,11 +31,13 @@ struct madera_codec_pdata; * @micvdd: Substruct of pdata for the MICVDD regulator * @irq_flags: Mode for primary IRQ (defaults to active low) * @gpio_base: Base GPIO number - * @gpio_configs: Array of GPIO configurations (See Documentation/pinctrl.txt) + * @gpio_configs: Array of GPIO configurations (See + * Documentation/driver-api/pinctl.rst) * @n_gpio_configs: Number of entries in gpio_configs * @gpsw: General purpose switch mode setting. Depends on the external * hardware connected to the switch. (See the SW1_MODE field * in the datasheet for the available values for your codec) + * @codec: Substruct of pdata for the ASoC codec driver */ struct madera_pdata { struct gpio_desc *reset; @@ -53,6 +52,8 @@ struct madera_pdata { int n_gpio_configs; u32 gpsw[MADERA_MAX_GPSW]; + + struct madera_codec_pdata codec; }; #endif diff --git a/include/linux/mfd/madera/registers.h b/include/linux/mfd/madera/registers.h index 977e06101711..fe909d177762 100644 --- a/include/linux/mfd/madera/registers.h +++ b/include/linux/mfd/madera/registers.h @@ -1,12 +1,8 @@ -// SPDX-License-Identifier: GPL-2.0 +/* SPDX-License-Identifier: GPL-2.0-only */ /* * Madera register definitions * * Copyright (C) 2015-2018 Cirrus Logic - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by the - * Free Software Foundation; version 2. */ #ifndef MADERA_REGISTERS_H @@ -76,10 +72,14 @@ #define MADERA_FLL1_CONTROL_4 0x174 #define MADERA_FLL1_CONTROL_5 0x175 #define MADERA_FLL1_CONTROL_6 0x176 -#define MADERA_FLL1_LOOP_FILTER_TEST_1 0x177 -#define MADERA_FLL1_NCO_TEST_0 0x178 +#define CS47L92_FLL1_CONTROL_7 0x177 +#define CS47L92_FLL1_CONTROL_8 0x178 #define MADERA_FLL1_CONTROL_7 0x179 +#define CS47L92_FLL1_CONTROL_9 0x179 #define MADERA_FLL1_EFS_2 0x17A +#define CS47L92_FLL1_CONTROL_10 0x17A +#define MADERA_FLL1_CONTROL_11 0x17B +#define MADERA_FLL1_DIGITAL_TEST_1 0x17D #define CS47L35_FLL1_SYNCHRONISER_1 0x17F #define CS47L35_FLL1_SYNCHRONISER_2 0x180 #define CS47L35_FLL1_SYNCHRONISER_3 0x181 @@ -98,16 +98,21 @@ #define MADERA_FLL1_SYNCHRONISER_7 0x187 #define MADERA_FLL1_SPREAD_SPECTRUM 0x189 #define MADERA_FLL1_GPIO_CLOCK 0x18A +#define CS47L92_FLL1_GPIO_CLOCK 0x18E #define MADERA_FLL2_CONTROL_1 0x191 #define MADERA_FLL2_CONTROL_2 0x192 #define MADERA_FLL2_CONTROL_3 0x193 #define MADERA_FLL2_CONTROL_4 0x194 #define MADERA_FLL2_CONTROL_5 0x195 #define MADERA_FLL2_CONTROL_6 0x196 -#define MADERA_FLL2_LOOP_FILTER_TEST_1 0x197 -#define MADERA_FLL2_NCO_TEST_0 0x198 +#define CS47L92_FLL2_CONTROL_7 0x197 +#define CS47L92_FLL2_CONTROL_8 0x198 #define MADERA_FLL2_CONTROL_7 0x199 +#define CS47L92_FLL2_CONTROL_9 0x199 #define MADERA_FLL2_EFS_2 0x19A +#define CS47L92_FLL2_CONTROL_10 0x19A +#define MADERA_FLL2_CONTROL_11 0x19B +#define MADERA_FLL2_DIGITAL_TEST_1 0x19D #define MADERA_FLL2_SYNCHRONISER_1 0x1A1 #define MADERA_FLL2_SYNCHRONISER_2 0x1A2 #define MADERA_FLL2_SYNCHRONISER_3 0x1A3 @@ -117,14 +122,13 @@ #define MADERA_FLL2_SYNCHRONISER_7 0x1A7 #define MADERA_FLL2_SPREAD_SPECTRUM 0x1A9 #define MADERA_FLL2_GPIO_CLOCK 0x1AA +#define CS47L92_FLL2_GPIO_CLOCK 0x1AE #define MADERA_FLL3_CONTROL_1 0x1B1 #define MADERA_FLL3_CONTROL_2 0x1B2 #define MADERA_FLL3_CONTROL_3 0x1B3 #define MADERA_FLL3_CONTROL_4 0x1B4 #define MADERA_FLL3_CONTROL_5 0x1B5 #define MADERA_FLL3_CONTROL_6 0x1B6 -#define MADERA_FLL3_LOOP_FILTER_TEST_1 0x1B7 -#define MADERA_FLL3_NCO_TEST_0 0x1B8 #define MADERA_FLL3_CONTROL_7 0x1B9 #define MADERA_FLL3_SYNCHRONISER_1 0x1C1 #define MADERA_FLL3_SYNCHRONISER_2 0x1C2 @@ -244,6 +248,8 @@ #define MADERA_IN6R_CONTROL 0x33C #define MADERA_ADC_DIGITAL_VOLUME_6R 0x33D #define MADERA_DMIC6R_CONTROL 0x33E +#define CS47L15_ADC_INT_BIAS 0x3A8 +#define CS47L15_PGA_BIAS_SEL 0x3C4 #define MADERA_OUTPUT_ENABLES_1 0x400 #define MADERA_OUTPUT_STATUS_1 0x401 #define MADERA_RAW_OUTPUT_STATUS_1 0x406 @@ -265,6 +271,7 @@ #define MADERA_NOISE_GATE_SELECT_2R 0x41F #define MADERA_OUTPUT_PATH_CONFIG_3L 0x420 #define MADERA_DAC_DIGITAL_VOLUME_3L 0x421 +#define MADERA_OUTPUT_PATH_CONFIG_3 0x422 #define MADERA_NOISE_GATE_SELECT_3L 0x423 #define MADERA_OUTPUT_PATH_CONFIG_3R 0x424 #define MADERA_DAC_DIGITAL_VOLUME_3R 0x425 @@ -287,9 +294,6 @@ #define MADERA_OUTPUT_PATH_CONFIG_6R 0x43C #define MADERA_DAC_DIGITAL_VOLUME_6R 0x43D #define MADERA_NOISE_GATE_SELECT_6R 0x43F -#define MADERA_DRE_ENABLE 0x440 -#define MADERA_EDRE_ENABLE 0x448 -#define MADERA_EDRE_MANUAL 0x44A #define MADERA_DAC_AEC_CONTROL_1 0x450 #define MADERA_DAC_AEC_CONTROL_2 0x451 #define MADERA_NOISE_GATE_CONTROL 0x458 @@ -367,8 +371,20 @@ #define MADERA_AIF3_FRAME_CTRL_2 0x588 #define MADERA_AIF3_FRAME_CTRL_3 0x589 #define MADERA_AIF3_FRAME_CTRL_4 0x58A +#define MADERA_AIF3_FRAME_CTRL_5 0x58B +#define MADERA_AIF3_FRAME_CTRL_6 0x58C +#define MADERA_AIF3_FRAME_CTRL_7 0x58D +#define MADERA_AIF3_FRAME_CTRL_8 0x58E +#define MADERA_AIF3_FRAME_CTRL_9 0x58F +#define MADERA_AIF3_FRAME_CTRL_10 0x590 #define MADERA_AIF3_FRAME_CTRL_11 0x591 #define MADERA_AIF3_FRAME_CTRL_12 0x592 +#define MADERA_AIF3_FRAME_CTRL_13 0x593 +#define MADERA_AIF3_FRAME_CTRL_14 0x594 +#define MADERA_AIF3_FRAME_CTRL_15 0x595 +#define MADERA_AIF3_FRAME_CTRL_16 0x596 +#define MADERA_AIF3_FRAME_CTRL_17 0x597 +#define MADERA_AIF3_FRAME_CTRL_18 0x598 #define MADERA_AIF3_TX_ENABLES 0x599 #define MADERA_AIF3_RX_ENABLES 0x59A #define MADERA_AIF3_FORCE_WRITE 0x59B @@ -660,6 +676,54 @@ #define MADERA_AIF3TX2MIX_INPUT_3_VOLUME 0x78D #define MADERA_AIF3TX2MIX_INPUT_4_SOURCE 0x78E #define MADERA_AIF3TX2MIX_INPUT_4_VOLUME 0x78F +#define MADERA_AIF3TX3MIX_INPUT_1_SOURCE 0x790 +#define MADERA_AIF3TX3MIX_INPUT_1_VOLUME 0x791 +#define MADERA_AIF3TX3MIX_INPUT_2_SOURCE 0x792 +#define MADERA_AIF3TX3MIX_INPUT_2_VOLUME 0x793 +#define MADERA_AIF3TX3MIX_INPUT_3_SOURCE 0x794 +#define MADERA_AIF3TX3MIX_INPUT_3_VOLUME 0x795 +#define MADERA_AIF3TX3MIX_INPUT_4_SOURCE 0x796 +#define MADERA_AIF3TX3MIX_INPUT_4_VOLUME 0x797 +#define MADERA_AIF3TX4MIX_INPUT_1_SOURCE 0x798 +#define MADERA_AIF3TX4MIX_INPUT_1_VOLUME 0x799 +#define MADERA_AIF3TX4MIX_INPUT_2_SOURCE 0x79A +#define MADERA_AIF3TX4MIX_INPUT_2_VOLUME 0x79B +#define MADERA_AIF3TX4MIX_INPUT_3_SOURCE 0x79C +#define MADERA_AIF3TX4MIX_INPUT_3_VOLUME 0x79D +#define MADERA_AIF3TX4MIX_INPUT_4_SOURCE 0x79E +#define MADERA_AIF3TX4MIX_INPUT_4_VOLUME 0x79F +#define CS47L92_AIF3TX5MIX_INPUT_1_SOURCE 0x7A0 +#define CS47L92_AIF3TX5MIX_INPUT_1_VOLUME 0x7A1 +#define CS47L92_AIF3TX5MIX_INPUT_2_SOURCE 0x7A2 +#define CS47L92_AIF3TX5MIX_INPUT_2_VOLUME 0x7A3 +#define CS47L92_AIF3TX5MIX_INPUT_3_SOURCE 0x7A4 +#define CS47L92_AIF3TX5MIX_INPUT_3_VOLUME 0x7A5 +#define CS47L92_AIF3TX5MIX_INPUT_4_SOURCE 0x7A6 +#define CS47L92_AIF3TX5MIX_INPUT_4_VOLUME 0x7A7 +#define CS47L92_AIF3TX6MIX_INPUT_1_SOURCE 0x7A8 +#define CS47L92_AIF3TX6MIX_INPUT_1_VOLUME 0x7A9 +#define CS47L92_AIF3TX6MIX_INPUT_2_SOURCE 0x7AA +#define CS47L92_AIF3TX6MIX_INPUT_2_VOLUME 0x7AB +#define CS47L92_AIF3TX6MIX_INPUT_3_SOURCE 0x7AC +#define CS47L92_AIF3TX6MIX_INPUT_3_VOLUME 0x7AD +#define CS47L92_AIF3TX6MIX_INPUT_4_SOURCE 0x7AE +#define CS47L92_AIF3TX6MIX_INPUT_4_VOLUME 0x7AF +#define CS47L92_AIF3TX7MIX_INPUT_1_SOURCE 0x7B0 +#define CS47L92_AIF3TX7MIX_INPUT_1_VOLUME 0x7B1 +#define CS47L92_AIF3TX7MIX_INPUT_2_SOURCE 0x7B2 +#define CS47L92_AIF3TX7MIX_INPUT_2_VOLUME 0x7B3 +#define CS47L92_AIF3TX7MIX_INPUT_3_SOURCE 0x7B4 +#define CS47L92_AIF3TX7MIX_INPUT_3_VOLUME 0x7B5 +#define CS47L92_AIF3TX7MIX_INPUT_4_SOURCE 0x7B6 +#define CS47L92_AIF3TX7MIX_INPUT_4_VOLUME 0x7B7 +#define CS47L92_AIF3TX8MIX_INPUT_1_SOURCE 0x7B8 +#define CS47L92_AIF3TX8MIX_INPUT_1_VOLUME 0x7B9 +#define CS47L92_AIF3TX8MIX_INPUT_2_SOURCE 0x7BA +#define CS47L92_AIF3TX8MIX_INPUT_2_VOLUME 0x7BB +#define CS47L92_AIF3TX8MIX_INPUT_3_SOURCE 0x7BC +#define CS47L92_AIF3TX8MIX_INPUT_3_VOLUME 0x7BD +#define CS47L92_AIF3TX8MIX_INPUT_4_SOURCE 0x7BE +#define CS47L92_AIF3TX8MIX_INPUT_4_VOLUME 0x7BF #define MADERA_AIF4TX1MIX_INPUT_1_SOURCE 0x7A0 #define MADERA_AIF4TX1MIX_INPUT_1_VOLUME 0x7A1 #define MADERA_AIF4TX1MIX_INPUT_2_SOURCE 0x7A2 @@ -1103,68 +1167,8 @@ #define MADERA_FCR_ADC_REFORMATTER_CONTROL 0xF73 #define MADERA_FCR_COEFF_START 0xF74 #define MADERA_FCR_COEFF_END 0xFC5 -#define MADERA_DAC_COMP_1 0x1300 -#define MADERA_DAC_COMP_2 0x1302 -#define MADERA_FRF_COEFFICIENT_1L_1 0x1380 -#define MADERA_FRF_COEFFICIENT_1L_2 0x1381 -#define MADERA_FRF_COEFFICIENT_1L_3 0x1382 -#define MADERA_FRF_COEFFICIENT_1L_4 0x1383 -#define MADERA_FRF_COEFFICIENT_1R_1 0x1390 -#define MADERA_FRF_COEFFICIENT_1R_2 0x1391 -#define MADERA_FRF_COEFFICIENT_1R_3 0x1392 -#define MADERA_FRF_COEFFICIENT_1R_4 0x1393 -#define MADERA_FRF_COEFFICIENT_2L_1 0x13A0 -#define MADERA_FRF_COEFFICIENT_2L_2 0x13A1 -#define MADERA_FRF_COEFFICIENT_2L_3 0x13A2 -#define MADERA_FRF_COEFFICIENT_2L_4 0x13A3 -#define MADERA_FRF_COEFFICIENT_2R_1 0x13B0 -#define MADERA_FRF_COEFFICIENT_2R_2 0x13B1 -#define MADERA_FRF_COEFFICIENT_2R_3 0x13B2 -#define MADERA_FRF_COEFFICIENT_2R_4 0x13B3 -#define MADERA_FRF_COEFFICIENT_3L_1 0x13C0 -#define MADERA_FRF_COEFFICIENT_3L_2 0x13C1 -#define MADERA_FRF_COEFFICIENT_3L_3 0x13C2 -#define MADERA_FRF_COEFFICIENT_3L_4 0x13C3 -#define MADERA_FRF_COEFFICIENT_3R_1 0x13D0 -#define MADERA_FRF_COEFFICIENT_3R_2 0x13D1 -#define MADERA_FRF_COEFFICIENT_3R_3 0x13D2 -#define MADERA_FRF_COEFFICIENT_3R_4 0x13D3 -#define MADERA_FRF_COEFFICIENT_4L_1 0x13E0 -#define MADERA_FRF_COEFFICIENT_4L_2 0x13E1 -#define MADERA_FRF_COEFFICIENT_4L_3 0x13E2 -#define MADERA_FRF_COEFFICIENT_4L_4 0x13E3 -#define MADERA_FRF_COEFFICIENT_4R_1 0x13F0 -#define MADERA_FRF_COEFFICIENT_4R_2 0x13F1 -#define MADERA_FRF_COEFFICIENT_4R_3 0x13F2 -#define MADERA_FRF_COEFFICIENT_4R_4 0x13F3 -#define CS47L35_FRF_COEFFICIENT_4L_1 0x13A0 -#define CS47L35_FRF_COEFFICIENT_4L_2 0x13A1 -#define CS47L35_FRF_COEFFICIENT_4L_3 0x13A2 -#define CS47L35_FRF_COEFFICIENT_4L_4 0x13A3 -#define CS47L35_FRF_COEFFICIENT_5L_1 0x13B0 -#define CS47L35_FRF_COEFFICIENT_5L_2 0x13B1 -#define CS47L35_FRF_COEFFICIENT_5L_3 0x13B2 -#define CS47L35_FRF_COEFFICIENT_5L_4 0x13B3 -#define CS47L35_FRF_COEFFICIENT_5R_1 0x13C0 -#define CS47L35_FRF_COEFFICIENT_5R_2 0x13C1 -#define CS47L35_FRF_COEFFICIENT_5R_3 0x13C2 -#define CS47L35_FRF_COEFFICIENT_5R_4 0x13C3 -#define MADERA_FRF_COEFFICIENT_5L_1 0x1400 -#define MADERA_FRF_COEFFICIENT_5L_2 0x1401 -#define MADERA_FRF_COEFFICIENT_5L_3 0x1402 -#define MADERA_FRF_COEFFICIENT_5L_4 0x1403 -#define MADERA_FRF_COEFFICIENT_5R_1 0x1410 -#define MADERA_FRF_COEFFICIENT_5R_2 0x1411 -#define MADERA_FRF_COEFFICIENT_5R_3 0x1412 -#define MADERA_FRF_COEFFICIENT_5R_4 0x1413 -#define MADERA_FRF_COEFFICIENT_6L_1 0x1420 -#define MADERA_FRF_COEFFICIENT_6L_2 0x1421 -#define MADERA_FRF_COEFFICIENT_6L_3 0x1422 -#define MADERA_FRF_COEFFICIENT_6L_4 0x1423 -#define MADERA_FRF_COEFFICIENT_6R_1 0x1430 -#define MADERA_FRF_COEFFICIENT_6R_2 0x1431 -#define MADERA_FRF_COEFFICIENT_6R_3 0x1432 -#define MADERA_FRF_COEFFICIENT_6R_4 0x1433 +#define MADERA_AUXPDM1_CTRL_0 0x10C0 +#define MADERA_AUXPDM1_CTRL_1 0x10C1 #define MADERA_DFC1_CTRL 0x1480 #define MADERA_DFC1_RX 0x1482 #define MADERA_DFC1_TX 0x1484 @@ -1202,6 +1206,8 @@ #define MADERA_GPIO1_CTRL_2 0x1701 #define MADERA_GPIO2_CTRL_1 0x1702 #define MADERA_GPIO2_CTRL_2 0x1703 +#define MADERA_GPIO15_CTRL_1 0x171C +#define MADERA_GPIO15_CTRL_2 0x171D #define MADERA_GPIO16_CTRL_1 0x171E #define MADERA_GPIO16_CTRL_2 0x171F #define MADERA_GPIO38_CTRL_1 0x174A @@ -1232,6 +1238,7 @@ #define MADERA_IRQ2_CTRL 0x1A82 #define MADERA_INTERRUPT_RAW_STATUS_1 0x1AA0 #define MADERA_WSEQ_SEQUENCE_1 0x3000 +#define MADERA_WSEQ_SEQUENCE_225 0x31C0 #define MADERA_WSEQ_SEQUENCE_252 0x31F6 #define CS47L35_OTP_HPDET_CAL_1 0x31F8 #define CS47L35_OTP_HPDET_CAL_2 0x31FA @@ -1441,6 +1448,12 @@ #define MADERA_OPCLK_ASYNC_SEL_WIDTH 3 /* (0x0171) FLL1_Control_1 */ +#define CS47L92_FLL1_REFCLK_SRC_MASK 0xF000 +#define CS47L92_FLL1_REFCLK_SRC_SHIFT 12 +#define CS47L92_FLL1_REFCLK_SRC_WIDTH 4 +#define MADERA_FLL1_HOLD_MASK 0x0004 +#define MADERA_FLL1_HOLD_SHIFT 2 +#define MADERA_FLL1_HOLD_WIDTH 1 #define MADERA_FLL1_FREERUN 0x0002 #define MADERA_FLL1_FREERUN_MASK 0x0002 #define MADERA_FLL1_FREERUN_SHIFT 1 @@ -1473,6 +1486,9 @@ #define MADERA_FLL1_FRATIO_MASK 0x0F00 #define MADERA_FLL1_FRATIO_SHIFT 8 #define MADERA_FLL1_FRATIO_WIDTH 4 +#define MADERA_FLL1_FB_DIV_MASK 0x03FF +#define MADERA_FLL1_FB_DIV_SHIFT 0 +#define MADERA_FLL1_FB_DIV_WIDTH 10 /* (0x0176) FLL1_Control_6 */ #define MADERA_FLL1_REFCLK_DIV_MASK 0x00C0 @@ -1482,15 +1498,6 @@ #define MADERA_FLL1_REFCLK_SRC_SHIFT 0 #define MADERA_FLL1_REFCLK_SRC_WIDTH 4 -/* (0x0177) FLL1_Loop_Filter_Test_1 */ -#define MADERA_FLL1_FRC_INTEG_UPD 0x8000 -#define MADERA_FLL1_FRC_INTEG_UPD_MASK 0x8000 -#define MADERA_FLL1_FRC_INTEG_UPD_SHIFT 15 -#define MADERA_FLL1_FRC_INTEG_UPD_WIDTH 1 -#define MADERA_FLL1_FRC_INTEG_VAL_MASK 0x0FFF -#define MADERA_FLL1_FRC_INTEG_VAL_SHIFT 0 -#define MADERA_FLL1_FRC_INTEG_VAL_WIDTH 12 - /* (0x0179) FLL1_Control_7 */ #define MADERA_FLL1_GAIN_MASK 0x003c #define MADERA_FLL1_GAIN_SHIFT 2 @@ -1504,6 +1511,30 @@ #define MADERA_FLL1_PHASE_ENA_SHIFT 11 #define MADERA_FLL1_PHASE_ENA_WIDTH 1 +/* (0x017A) FLL1_Control_10 */ +#define MADERA_FLL1_HP_MASK 0xC000 +#define MADERA_FLL1_HP_SHIFT 14 +#define MADERA_FLL1_HP_WIDTH 2 +#define MADERA_FLL1_PHASEDET_ENA_MASK 0x1000 +#define MADERA_FLL1_PHASEDET_ENA_SHIFT 12 +#define MADERA_FLL1_PHASEDET_ENA_WIDTH 1 + +/* (0x017B) FLL1_Control_11 */ +#define MADERA_FLL1_LOCKDET_THR_MASK 0x001E +#define MADERA_FLL1_LOCKDET_THR_SHIFT 1 +#define MADERA_FLL1_LOCKDET_THR_WIDTH 4 +#define MADERA_FLL1_LOCKDET_MASK 0x0001 +#define MADERA_FLL1_LOCKDET_SHIFT 0 +#define MADERA_FLL1_LOCKDET_WIDTH 1 + +/* (0x017D) FLL1_Digital_Test_1 */ +#define MADERA_FLL1_SYNC_EFS_ENA_MASK 0x0100 +#define MADERA_FLL1_SYNC_EFS_ENA_SHIFT 8 +#define MADERA_FLL1_SYNC_EFS_ENA_WIDTH 1 +#define MADERA_FLL1_CLK_VCO_FAST_SRC_MASK 0x0003 +#define MADERA_FLL1_CLK_VCO_FAST_SRC_SHIFT 0 +#define MADERA_FLL1_CLK_VCO_FAST_SRC_WIDTH 2 + /* (0x0181) FLL1_Synchroniser_1 */ #define MADERA_FLL1_SYNC_ENA 0x0001 #define MADERA_FLL1_SYNC_ENA_MASK 0x0001 @@ -1625,6 +1656,13 @@ #define MADERA_LDO2_ENA_WIDTH 1 /* (0x0218) Mic_Bias_Ctrl_1 */ +#define MADERA_MICB1_EXT_CAP 0x8000 +#define MADERA_MICB1_EXT_CAP_MASK 0x8000 +#define MADERA_MICB1_EXT_CAP_SHIFT 15 +#define MADERA_MICB1_EXT_CAP_WIDTH 1 +#define MADERA_MICB1_LVL_MASK 0x01E0 +#define MADERA_MICB1_LVL_SHIFT 5 +#define MADERA_MICB1_LVL_WIDTH 4 #define MADERA_MICB1_ENA 0x0001 #define MADERA_MICB1_ENA_MASK 0x0001 #define MADERA_MICB1_ENA_SHIFT 0 @@ -2308,6 +2346,17 @@ #define MADERA_OUT1R_ENA_SHIFT 0 #define MADERA_OUT1R_ENA_WIDTH 1 +/* (0x0408) Output_Rate_1 */ +#define MADERA_CP_DAC_MODE_MASK 0x0040 +#define MADERA_CP_DAC_MODE_SHIFT 6 +#define MADERA_CP_DAC_MODE_WIDTH 1 +#define MADERA_OUT_EXT_CLK_DIV_MASK 0x0030 +#define MADERA_OUT_EXT_CLK_DIV_SHIFT 4 +#define MADERA_OUT_EXT_CLK_DIV_WIDTH 2 +#define MADERA_OUT_CLK_SRC_MASK 0x0007 +#define MADERA_OUT_CLK_SRC_SHIFT 0 +#define MADERA_OUT_CLK_SRC_WIDTH 3 + /* (0x0409) Output_Volume_Ramp */ #define MADERA_OUT_VD_RAMP_MASK 0x0070 #define MADERA_OUT_VD_RAMP_SHIFT 4 @@ -2829,6 +2878,30 @@ #define MADERA_AIF2RX1_ENA_WIDTH 1 /* (0x0599) AIF3_Tx_Enables */ +#define MADERA_AIF3TX8_ENA 0x0080 +#define MADERA_AIF3TX8_ENA_MASK 0x0080 +#define MADERA_AIF3TX8_ENA_SHIFT 7 +#define MADERA_AIF3TX8_ENA_WIDTH 1 +#define MADERA_AIF3TX7_ENA 0x0040 +#define MADERA_AIF3TX7_ENA_MASK 0x0040 +#define MADERA_AIF3TX7_ENA_SHIFT 6 +#define MADERA_AIF3TX7_ENA_WIDTH 1 +#define MADERA_AIF3TX6_ENA 0x0020 +#define MADERA_AIF3TX6_ENA_MASK 0x0020 +#define MADERA_AIF3TX6_ENA_SHIFT 5 +#define MADERA_AIF3TX6_ENA_WIDTH 1 +#define MADERA_AIF3TX5_ENA 0x0010 +#define MADERA_AIF3TX5_ENA_MASK 0x0010 +#define MADERA_AIF3TX5_ENA_SHIFT 4 +#define MADERA_AIF3TX5_ENA_WIDTH 1 +#define MADERA_AIF3TX4_ENA 0x0008 +#define MADERA_AIF3TX4_ENA_MASK 0x0008 +#define MADERA_AIF3TX4_ENA_SHIFT 3 +#define MADERA_AIF3TX4_ENA_WIDTH 1 +#define MADERA_AIF3TX3_ENA 0x0004 +#define MADERA_AIF3TX3_ENA_MASK 0x0004 +#define MADERA_AIF3TX3_ENA_SHIFT 2 +#define MADERA_AIF3TX3_ENA_WIDTH 1 #define MADERA_AIF3TX2_ENA 0x0002 #define MADERA_AIF3TX2_ENA_MASK 0x0002 #define MADERA_AIF3TX2_ENA_SHIFT 1 @@ -2839,6 +2912,30 @@ #define MADERA_AIF3TX1_ENA_WIDTH 1 /* (0x059A) AIF3_Rx_Enables */ +#define MADERA_AIF3RX8_ENA 0x0080 +#define MADERA_AIF3RX8_ENA_MASK 0x0080 +#define MADERA_AIF3RX8_ENA_SHIFT 7 +#define MADERA_AIF3RX8_ENA_WIDTH 1 +#define MADERA_AIF3RX7_ENA 0x0040 +#define MADERA_AIF3RX7_ENA_MASK 0x0040 +#define MADERA_AIF3RX7_ENA_SHIFT 6 +#define MADERA_AIF3RX7_ENA_WIDTH 1 +#define MADERA_AIF3RX6_ENA 0x0020 +#define MADERA_AIF3RX6_ENA_MASK 0x0020 +#define MADERA_AIF3RX6_ENA_SHIFT 5 +#define MADERA_AIF3RX6_ENA_WIDTH 1 +#define MADERA_AIF3RX5_ENA 0x0010 +#define MADERA_AIF3RX5_ENA_MASK 0x0010 +#define MADERA_AIF3RX5_ENA_SHIFT 4 +#define MADERA_AIF3RX5_ENA_WIDTH 1 +#define MADERA_AIF3RX4_ENA 0x0008 +#define MADERA_AIF3RX4_ENA_MASK 0x0008 +#define MADERA_AIF3RX4_ENA_SHIFT 3 +#define MADERA_AIF3RX4_ENA_WIDTH 1 +#define MADERA_AIF3RX3_ENA 0x0004 +#define MADERA_AIF3RX3_ENA_MASK 0x0004 +#define MADERA_AIF3RX3_ENA_SHIFT 2 +#define MADERA_AIF3RX3_ENA_WIDTH 1 #define MADERA_AIF3RX2_ENA 0x0002 #define MADERA_AIF3RX2_ENA_MASK 0x0002 #define MADERA_AIF3RX2_ENA_SHIFT 1 @@ -3453,6 +3550,25 @@ #define MADERA_FCR_MIC_MODE_SEL_SHIFT 2 #define MADERA_FCR_MIC_MODE_SEL_WIDTH 2 +/* (0x10C0) AUXPDM1_CTRL_0 */ +#define MADERA_AUXPDM1_SRC_MASK 0x0F00 +#define MADERA_AUXPDM1_SRC_SHIFT 8 +#define MADERA_AUXPDM1_SRC_WIDTH 4 +#define MADERA_AUXPDM1_TXEDGE_MASK 0x0010 +#define MADERA_AUXPDM1_TXEDGE_SHIFT 4 +#define MADERA_AUXPDM1_TXEDGE_WIDTH 1 +#define MADERA_AUXPDM1_MSTR_MASK 0x0008 +#define MADERA_AUXPDM1_MSTR_SHIFT 3 +#define MADERA_AUXPDM1_MSTR_WIDTH 1 +#define MADERA_AUXPDM1_ENABLE_MASK 0x0001 +#define MADERA_AUXPDM1_ENABLE_SHIFT 0 +#define MADERA_AUXPDM1_ENABLE_WIDTH 1 + +/* (0x10C1) AUXPDM1_CTRL_1 */ +#define MADERA_AUXPDM1_CLK_FREQ_MASK 0xC000 +#define MADERA_AUXPDM1_CLK_FREQ_SHIFT 14 +#define MADERA_AUXPDM1_CLK_FREQ_WIDTH 2 + /* (0x1480) DFC1_CTRL_W0 */ #define MADERA_DFC1_RATE_MASK 0x007C #define MADERA_DFC1_RATE_SHIFT 2 diff --git a/include/linux/mfd/mt6397/core.h b/include/linux/mfd/mt6397/core.h index 25a95e72179b..fc88d315bdde 100644 --- a/include/linux/mfd/mt6397/core.h +++ b/include/linux/mfd/mt6397/core.h @@ -7,6 +7,14 @@ #ifndef __MFD_MT6397_CORE_H__ #define __MFD_MT6397_CORE_H__ +#include <linux/mutex.h> + +enum chip_id { + MT6323_CHIP_ID = 0x23, + MT6391_CHIP_ID = 0x91, + MT6397_CHIP_ID = 0x97, +}; + enum mt6397_irq_numbers { MT6397_IRQ_SPKL_AB = 0, MT6397_IRQ_SPKR_AB, @@ -54,6 +62,9 @@ struct mt6397_chip { u16 irq_masks_cache[2]; u16 int_con[2]; u16 int_status[2]; + u16 chip_id; }; +int mt6397_irq_init(struct mt6397_chip *chip); + #endif /* __MFD_MT6397_CORE_H__ */ diff --git a/include/linux/mfd/rk808.h b/include/linux/mfd/rk808.h index 1d831c7222b9..7cfd2b0504df 100644 --- a/include/linux/mfd/rk808.h +++ b/include/linux/mfd/rk808.h @@ -374,6 +374,7 @@ enum rk805_reg { #define SWITCH1_EN BIT(5) #define DEV_OFF_RST BIT(3) #define DEV_OFF BIT(0) +#define RTC_STOP BIT(0) #define VB_LO_ACT BIT(4) #define VB_LO_SEL_3500MV (7 << 0) @@ -387,7 +388,179 @@ enum rk805_reg { #define SHUTDOWN_FUN (0x2 << 2) #define SLEEP_FUN (0x1 << 2) #define RK8XX_ID_MSK 0xfff0 +#define PWM_MODE_MSK BIT(7) #define FPWM_MODE BIT(7) +#define AUTO_PWM_MODE 0 + +enum rk817_reg_id { + RK817_ID_DCDC1 = 0, + RK817_ID_DCDC2, + RK817_ID_DCDC3, + RK817_ID_DCDC4, + RK817_ID_LDO1, + RK817_ID_LDO2, + RK817_ID_LDO3, + RK817_ID_LDO4, + RK817_ID_LDO5, + RK817_ID_LDO6, + RK817_ID_LDO7, + RK817_ID_LDO8, + RK817_ID_LDO9, + RK817_ID_BOOST, + RK817_ID_BOOST_OTG_SW, + RK817_NUM_REGULATORS +}; + +enum rk809_reg_id { + RK809_ID_DCDC5 = RK817_ID_BOOST, + RK809_ID_SW1, + RK809_ID_SW2, + RK809_NUM_REGULATORS +}; + +#define RK817_SECONDS_REG 0x00 +#define RK817_MINUTES_REG 0x01 +#define RK817_HOURS_REG 0x02 +#define RK817_DAYS_REG 0x03 +#define RK817_MONTHS_REG 0x04 +#define RK817_YEARS_REG 0x05 +#define RK817_WEEKS_REG 0x06 +#define RK817_ALARM_SECONDS_REG 0x07 +#define RK817_ALARM_MINUTES_REG 0x08 +#define RK817_ALARM_HOURS_REG 0x09 +#define RK817_ALARM_DAYS_REG 0x0a +#define RK817_ALARM_MONTHS_REG 0x0b +#define RK817_ALARM_YEARS_REG 0x0c +#define RK817_RTC_CTRL_REG 0xd +#define RK817_RTC_STATUS_REG 0xe +#define RK817_RTC_INT_REG 0xf +#define RK817_RTC_COMP_LSB_REG 0x10 +#define RK817_RTC_COMP_MSB_REG 0x11 + +#define RK817_POWER_EN_REG(i) (0xb1 + (i)) +#define RK817_POWER_SLP_EN_REG(i) (0xb5 + (i)) + +#define RK817_POWER_CONFIG (0xb9) + +#define RK817_BUCK_CONFIG_REG(i) (0xba + (i) * 3) + +#define RK817_BUCK1_ON_VSEL_REG 0xBB +#define RK817_BUCK1_SLP_VSEL_REG 0xBC + +#define RK817_BUCK2_CONFIG_REG 0xBD +#define RK817_BUCK2_ON_VSEL_REG 0xBE +#define RK817_BUCK2_SLP_VSEL_REG 0xBF + +#define RK817_BUCK3_CONFIG_REG 0xC0 +#define RK817_BUCK3_ON_VSEL_REG 0xC1 +#define RK817_BUCK3_SLP_VSEL_REG 0xC2 + +#define RK817_BUCK4_CONFIG_REG 0xC3 +#define RK817_BUCK4_ON_VSEL_REG 0xC4 +#define RK817_BUCK4_SLP_VSEL_REG 0xC5 + +#define RK817_LDO_ON_VSEL_REG(idx) (0xcc + (idx) * 2) +#define RK817_BOOST_OTG_CFG (0xde) + +#define RK817_ID_MSB 0xed +#define RK817_ID_LSB 0xee + +#define RK817_SYS_STS 0xf0 +#define RK817_SYS_CFG(i) (0xf1 + (i)) + +#define RK817_ON_SOURCE_REG 0xf5 +#define RK817_OFF_SOURCE_REG 0xf6 + +/* INTERRUPT REGISTER */ +#define RK817_INT_STS_REG0 0xf8 +#define RK817_INT_STS_MSK_REG0 0xf9 +#define RK817_INT_STS_REG1 0xfa +#define RK817_INT_STS_MSK_REG1 0xfb +#define RK817_INT_STS_REG2 0xfc +#define RK817_INT_STS_MSK_REG2 0xfd +#define RK817_GPIO_INT_CFG 0xfe + +/* IRQ Definitions */ +#define RK817_IRQ_PWRON_FALL 0 +#define RK817_IRQ_PWRON_RISE 1 +#define RK817_IRQ_PWRON 2 +#define RK817_IRQ_PWMON_LP 3 +#define RK817_IRQ_HOTDIE 4 +#define RK817_IRQ_RTC_ALARM 5 +#define RK817_IRQ_RTC_PERIOD 6 +#define RK817_IRQ_VB_LO 7 +#define RK817_IRQ_PLUG_IN 8 +#define RK817_IRQ_PLUG_OUT 9 +#define RK817_IRQ_CHRG_TERM 10 +#define RK817_IRQ_CHRG_TIME 11 +#define RK817_IRQ_CHRG_TS 12 +#define RK817_IRQ_USB_OV 13 +#define RK817_IRQ_CHRG_IN_CLMP 14 +#define RK817_IRQ_BAT_DIS_ILIM 15 +#define RK817_IRQ_GATE_GPIO 16 +#define RK817_IRQ_TS_GPIO 17 +#define RK817_IRQ_CODEC_PD 18 +#define RK817_IRQ_CODEC_PO 19 +#define RK817_IRQ_CLASSD_MUTE_DONE 20 +#define RK817_IRQ_CLASSD_OCP 21 +#define RK817_IRQ_BAT_OVP 22 +#define RK817_IRQ_CHRG_BAT_HI 23 +#define RK817_IRQ_END (RK817_IRQ_CHRG_BAT_HI + 1) + +/* + * rtc_ctrl 0xd + * same as 808, except bit4 + */ +#define RK817_RTC_CTRL_RSV4 BIT(4) + +/* power config 0xb9 */ +#define RK817_BUCK3_FB_RES_MSK BIT(6) +#define RK817_BUCK3_FB_RES_INTER BIT(6) +#define RK817_BUCK3_FB_RES_EXT 0 + +/* buck config 0xba */ +#define RK817_RAMP_RATE_OFFSET 6 +#define RK817_RAMP_RATE_MASK (0x3 << RK817_RAMP_RATE_OFFSET) +#define RK817_RAMP_RATE_3MV_PER_US (0x0 << RK817_RAMP_RATE_OFFSET) +#define RK817_RAMP_RATE_6_3MV_PER_US (0x1 << RK817_RAMP_RATE_OFFSET) +#define RK817_RAMP_RATE_12_5MV_PER_US (0x2 << RK817_RAMP_RATE_OFFSET) +#define RK817_RAMP_RATE_25MV_PER_US (0x3 << RK817_RAMP_RATE_OFFSET) + +/* sys_cfg1 0xf2 */ +#define RK817_HOTDIE_TEMP_MSK (0x3 << 4) +#define RK817_HOTDIE_85 (0x0 << 4) +#define RK817_HOTDIE_95 (0x1 << 4) +#define RK817_HOTDIE_105 (0x2 << 4) +#define RK817_HOTDIE_115 (0x3 << 4) + +#define RK817_TSD_TEMP_MSK BIT(6) +#define RK817_TSD_140 0 +#define RK817_TSD_160 BIT(6) + +#define RK817_CLK32KOUT2_EN BIT(7) + +/* sys_cfg3 0xf4 */ +#define RK817_SLPPIN_FUNC_MSK (0x3 << 3) +#define SLPPIN_NULL_FUN (0x0 << 3) +#define SLPPIN_SLP_FUN (0x1 << 3) +#define SLPPIN_DN_FUN (0x2 << 3) +#define SLPPIN_RST_FUN (0x3 << 3) + +#define RK817_RST_FUNC_MSK (0x3 << 6) +#define RK817_RST_FUNC_SFT (6) +#define RK817_RST_FUNC_CNT (3) +#define RK817_RST_FUNC_DEV (0) /* reset the dev */ +#define RK817_RST_FUNC_REG (0x1 << 6) /* reset the reg only */ + +#define RK817_SLPPOL_MSK BIT(5) +#define RK817_SLPPOL_H BIT(5) +#define RK817_SLPPOL_L (0) + +/* gpio&int 0xfe */ +#define RK817_INT_POL_MSK BIT(1) +#define RK817_INT_POL_H BIT(1) +#define RK817_INT_POL_L 0 +#define RK809_BUCK5_CONFIG(i) (RK817_BOOST_OTG_CFG + (i) * 1) enum { BUCK_ILMIN_50MA, @@ -435,6 +608,8 @@ enum { enum { RK805_ID = 0x8050, RK808_ID = 0x0000, + RK809_ID = 0x8090, + RK817_ID = 0x8170, RK818_ID = 0x8181, }; @@ -445,5 +620,7 @@ struct rk808 { long variant; const struct regmap_config *regmap_cfg; const struct regmap_irq_chip *regmap_irq_chip; + void (*pm_pwroff_fn)(void); + void (*pm_pwroff_prep_fn)(void); }; #endif /* __LINUX_REGULATOR_RK808_H */ diff --git a/include/linux/mfd/rohm-bd70528.h b/include/linux/mfd/rohm-bd70528.h new file mode 100644 index 000000000000..1013e60c5b25 --- /dev/null +++ b/include/linux/mfd/rohm-bd70528.h @@ -0,0 +1,408 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* Copyright (C) 2018 ROHM Semiconductors */ + +#ifndef __LINUX_MFD_BD70528_H__ +#define __LINUX_MFD_BD70528_H__ + +#include <linux/bits.h> +#include <linux/device.h> +#include <linux/mfd/rohm-generic.h> +#include <linux/regmap.h> + +enum { + BD70528_BUCK1, + BD70528_BUCK2, + BD70528_BUCK3, + BD70528_LDO1, + BD70528_LDO2, + BD70528_LDO3, + BD70528_LED1, + BD70528_LED2, +}; + +struct bd70528_data { + struct rohm_regmap_dev chip; + struct mutex rtc_timer_lock; +}; + +#define BD70528_BUCK_VOLTS 17 +#define BD70528_BUCK_VOLTS 17 +#define BD70528_BUCK_VOLTS 17 +#define BD70528_LDO_VOLTS 0x20 + +#define BD70528_REG_BUCK1_EN 0x0F +#define BD70528_REG_BUCK1_VOLT 0x15 +#define BD70528_REG_BUCK2_EN 0x10 +#define BD70528_REG_BUCK2_VOLT 0x16 +#define BD70528_REG_BUCK3_EN 0x11 +#define BD70528_REG_BUCK3_VOLT 0x17 +#define BD70528_REG_LDO1_EN 0x1b +#define BD70528_REG_LDO1_VOLT 0x1e +#define BD70528_REG_LDO2_EN 0x1c +#define BD70528_REG_LDO2_VOLT 0x1f +#define BD70528_REG_LDO3_EN 0x1d +#define BD70528_REG_LDO3_VOLT 0x20 +#define BD70528_REG_LED_CTRL 0x2b +#define BD70528_REG_LED_VOLT 0x29 +#define BD70528_REG_LED_EN 0x2a + +/* main irq registers */ +#define BD70528_REG_INT_MAIN 0x7E +#define BD70528_REG_INT_MAIN_MASK 0x74 + +/* 'sub irq' registers */ +#define BD70528_REG_INT_SHDN 0x7F +#define BD70528_REG_INT_PWR_FLT 0x80 +#define BD70528_REG_INT_VR_FLT 0x81 +#define BD70528_REG_INT_MISC 0x82 +#define BD70528_REG_INT_BAT1 0x83 +#define BD70528_REG_INT_BAT2 0x84 +#define BD70528_REG_INT_RTC 0x85 +#define BD70528_REG_INT_GPIO 0x86 +#define BD70528_REG_INT_OP_FAIL 0x87 + +#define BD70528_REG_INT_SHDN_MASK 0x75 +#define BD70528_REG_INT_PWR_FLT_MASK 0x76 +#define BD70528_REG_INT_VR_FLT_MASK 0x77 +#define BD70528_REG_INT_MISC_MASK 0x78 +#define BD70528_REG_INT_BAT1_MASK 0x79 +#define BD70528_REG_INT_BAT2_MASK 0x7a +#define BD70528_REG_INT_RTC_MASK 0x7b +#define BD70528_REG_INT_GPIO_MASK 0x7c +#define BD70528_REG_INT_OP_FAIL_MASK 0x7d + +/* Reset related 'magic' registers */ +#define BD70528_REG_SHIPMODE 0x03 +#define BD70528_REG_HWRESET 0x04 +#define BD70528_REG_WARMRESET 0x05 +#define BD70528_REG_STANDBY 0x06 + +/* GPIO registers */ +#define BD70528_REG_GPIO_STATE 0x8F + +#define BD70528_REG_GPIO1_IN 0x4d +#define BD70528_REG_GPIO2_IN 0x4f +#define BD70528_REG_GPIO3_IN 0x51 +#define BD70528_REG_GPIO4_IN 0x53 +#define BD70528_REG_GPIO1_OUT 0x4e +#define BD70528_REG_GPIO2_OUT 0x50 +#define BD70528_REG_GPIO3_OUT 0x52 +#define BD70528_REG_GPIO4_OUT 0x54 + +/* clk control */ + +#define BD70528_REG_CLK_OUT 0x2c + +/* RTC */ + +#define BD70528_REG_RTC_COUNT_H 0x2d +#define BD70528_REG_RTC_COUNT_L 0x2e +#define BD70528_REG_RTC_SEC 0x2f +#define BD70528_REG_RTC_MINUTE 0x30 +#define BD70528_REG_RTC_HOUR 0x31 +#define BD70528_REG_RTC_WEEK 0x32 +#define BD70528_REG_RTC_DAY 0x33 +#define BD70528_REG_RTC_MONTH 0x34 +#define BD70528_REG_RTC_YEAR 0x35 + +#define BD70528_REG_RTC_ALM_SEC 0x36 +#define BD70528_REG_RTC_ALM_START BD70528_REG_RTC_ALM_SEC +#define BD70528_REG_RTC_ALM_MINUTE 0x37 +#define BD70528_REG_RTC_ALM_HOUR 0x38 +#define BD70528_REG_RTC_ALM_WEEK 0x39 +#define BD70528_REG_RTC_ALM_DAY 0x3a +#define BD70528_REG_RTC_ALM_MONTH 0x3b +#define BD70528_REG_RTC_ALM_YEAR 0x3c +#define BD70528_REG_RTC_ALM_MASK 0x3d +#define BD70528_REG_RTC_ALM_REPEAT 0x3e +#define BD70528_REG_RTC_START BD70528_REG_RTC_SEC + +#define BD70528_REG_RTC_WAKE_SEC 0x43 +#define BD70528_REG_RTC_WAKE_START BD70528_REG_RTC_WAKE_SEC +#define BD70528_REG_RTC_WAKE_MIN 0x44 +#define BD70528_REG_RTC_WAKE_HOUR 0x45 +#define BD70528_REG_RTC_WAKE_CTRL 0x46 + +#define BD70528_REG_ELAPSED_TIMER_EN 0x42 +#define BD70528_REG_WAKE_EN 0x46 + +/* WDT registers */ +#define BD70528_REG_WDT_CTRL 0x4A +#define BD70528_REG_WDT_HOUR 0x49 +#define BD70528_REG_WDT_MINUTE 0x48 +#define BD70528_REG_WDT_SEC 0x47 + +/* Charger / Battery */ +#define BD70528_REG_CHG_CURR_STAT 0x59 +#define BD70528_REG_CHG_BAT_STAT 0x57 +#define BD70528_REG_CHG_BAT_TEMP 0x58 +#define BD70528_REG_CHG_IN_STAT 0x56 +#define BD70528_REG_CHG_DCIN_ILIM 0x5d +#define BD70528_REG_CHG_CHG_CURR_WARM 0x61 +#define BD70528_REG_CHG_CHG_CURR_COLD 0x62 + +/* Masks for main IRQ register bits */ +enum { + BD70528_INT_SHDN, +#define BD70528_INT_SHDN_MASK BIT(BD70528_INT_SHDN) + BD70528_INT_PWR_FLT, +#define BD70528_INT_PWR_FLT_MASK BIT(BD70528_INT_PWR_FLT) + BD70528_INT_VR_FLT, +#define BD70528_INT_VR_FLT_MASK BIT(BD70528_INT_VR_FLT) + BD70528_INT_MISC, +#define BD70528_INT_MISC_MASK BIT(BD70528_INT_MISC) + BD70528_INT_BAT1, +#define BD70528_INT_BAT1_MASK BIT(BD70528_INT_BAT1) + BD70528_INT_RTC, +#define BD70528_INT_RTC_MASK BIT(BD70528_INT_RTC) + BD70528_INT_GPIO, +#define BD70528_INT_GPIO_MASK BIT(BD70528_INT_GPIO) + BD70528_INT_OP_FAIL, +#define BD70528_INT_OP_FAIL_MASK BIT(BD70528_INT_OP_FAIL) +}; + +/* IRQs */ +enum { + /* Shutdown register IRQs */ + BD70528_INT_LONGPUSH, + BD70528_INT_WDT, + BD70528_INT_HWRESET, + BD70528_INT_RSTB_FAULT, + BD70528_INT_VBAT_UVLO, + BD70528_INT_TSD, + BD70528_INT_RSTIN, + /* Power failure register IRQs */ + BD70528_INT_BUCK1_FAULT, + BD70528_INT_BUCK2_FAULT, + BD70528_INT_BUCK3_FAULT, + BD70528_INT_LDO1_FAULT, + BD70528_INT_LDO2_FAULT, + BD70528_INT_LDO3_FAULT, + BD70528_INT_LED1_FAULT, + BD70528_INT_LED2_FAULT, + /* VR FAULT register IRQs */ + BD70528_INT_BUCK1_OCP, + BD70528_INT_BUCK2_OCP, + BD70528_INT_BUCK3_OCP, + BD70528_INT_LED1_OCP, + BD70528_INT_LED2_OCP, + BD70528_INT_BUCK1_FULLON, + BD70528_INT_BUCK2_FULLON, + /* PMU register interrupts */ + BD70528_INT_SHORTPUSH, + BD70528_INT_AUTO_WAKEUP, + BD70528_INT_STATE_CHANGE, + /* Charger 1 register IRQs */ + BD70528_INT_BAT_OV_RES, + BD70528_INT_BAT_OV_DET, + BD70528_INT_DBAT_DET, + BD70528_INT_BATTSD_COLD_RES, + BD70528_INT_BATTSD_COLD_DET, + BD70528_INT_BATTSD_HOT_RES, + BD70528_INT_BATTSD_HOT_DET, + BD70528_INT_CHG_TSD, + /* Charger 2 register IRQs */ + BD70528_INT_BAT_RMV, + BD70528_INT_BAT_DET, + BD70528_INT_DCIN2_OV_RES, + BD70528_INT_DCIN2_OV_DET, + BD70528_INT_DCIN2_RMV, + BD70528_INT_DCIN2_DET, + BD70528_INT_DCIN1_RMV, + BD70528_INT_DCIN1_DET, + /* RTC register IRQs */ + BD70528_INT_RTC_ALARM, + BD70528_INT_ELPS_TIM, + /* GPIO register IRQs */ + BD70528_INT_GPIO0, + BD70528_INT_GPIO1, + BD70528_INT_GPIO2, + BD70528_INT_GPIO3, + /* Invalid operation register IRQs */ + BD70528_INT_BUCK1_DVS_OPFAIL, + BD70528_INT_BUCK2_DVS_OPFAIL, + BD70528_INT_BUCK3_DVS_OPFAIL, + BD70528_INT_LED1_VOLT_OPFAIL, + BD70528_INT_LED2_VOLT_OPFAIL, +}; + +/* Masks */ +#define BD70528_INT_LONGPUSH_MASK 0x1 +#define BD70528_INT_WDT_MASK 0x2 +#define BD70528_INT_HWRESET_MASK 0x4 +#define BD70528_INT_RSTB_FAULT_MASK 0x8 +#define BD70528_INT_VBAT_UVLO_MASK 0x10 +#define BD70528_INT_TSD_MASK 0x20 +#define BD70528_INT_RSTIN_MASK 0x40 + +#define BD70528_INT_BUCK1_FAULT_MASK 0x1 +#define BD70528_INT_BUCK2_FAULT_MASK 0x2 +#define BD70528_INT_BUCK3_FAULT_MASK 0x4 +#define BD70528_INT_LDO1_FAULT_MASK 0x8 +#define BD70528_INT_LDO2_FAULT_MASK 0x10 +#define BD70528_INT_LDO3_FAULT_MASK 0x20 +#define BD70528_INT_LED1_FAULT_MASK 0x40 +#define BD70528_INT_LED2_FAULT_MASK 0x80 + +#define BD70528_INT_BUCK1_OCP_MASK 0x1 +#define BD70528_INT_BUCK2_OCP_MASK 0x2 +#define BD70528_INT_BUCK3_OCP_MASK 0x4 +#define BD70528_INT_LED1_OCP_MASK 0x8 +#define BD70528_INT_LED2_OCP_MASK 0x10 +#define BD70528_INT_BUCK1_FULLON_MASK 0x20 +#define BD70528_INT_BUCK2_FULLON_MASK 0x40 + +#define BD70528_INT_SHORTPUSH_MASK 0x1 +#define BD70528_INT_AUTO_WAKEUP_MASK 0x2 +#define BD70528_INT_STATE_CHANGE_MASK 0x10 + +#define BD70528_INT_BAT_OV_RES_MASK 0x1 +#define BD70528_INT_BAT_OV_DET_MASK 0x2 +#define BD70528_INT_DBAT_DET_MASK 0x4 +#define BD70528_INT_BATTSD_COLD_RES_MASK 0x8 +#define BD70528_INT_BATTSD_COLD_DET_MASK 0x10 +#define BD70528_INT_BATTSD_HOT_RES_MASK 0x20 +#define BD70528_INT_BATTSD_HOT_DET_MASK 0x40 +#define BD70528_INT_CHG_TSD_MASK 0x80 + +#define BD70528_INT_BAT_RMV_MASK 0x1 +#define BD70528_INT_BAT_DET_MASK 0x2 +#define BD70528_INT_DCIN2_OV_RES_MASK 0x4 +#define BD70528_INT_DCIN2_OV_DET_MASK 0x8 +#define BD70528_INT_DCIN2_RMV_MASK 0x10 +#define BD70528_INT_DCIN2_DET_MASK 0x20 +#define BD70528_INT_DCIN1_RMV_MASK 0x40 +#define BD70528_INT_DCIN1_DET_MASK 0x80 + +#define BD70528_INT_RTC_ALARM_MASK 0x1 +#define BD70528_INT_ELPS_TIM_MASK 0x2 + +#define BD70528_INT_GPIO0_MASK 0x1 +#define BD70528_INT_GPIO1_MASK 0x2 +#define BD70528_INT_GPIO2_MASK 0x4 +#define BD70528_INT_GPIO3_MASK 0x8 + +#define BD70528_INT_BUCK1_DVS_OPFAIL_MASK 0x1 +#define BD70528_INT_BUCK2_DVS_OPFAIL_MASK 0x2 +#define BD70528_INT_BUCK3_DVS_OPFAIL_MASK 0x4 +#define BD70528_INT_LED1_VOLT_OPFAIL_MASK 0x10 +#define BD70528_INT_LED2_VOLT_OPFAIL_MASK 0x20 + +#define BD70528_DEBOUNCE_MASK 0x3 + +#define BD70528_DEBOUNCE_DISABLE 0 +#define BD70528_DEBOUNCE_15MS 1 +#define BD70528_DEBOUNCE_30MS 2 +#define BD70528_DEBOUNCE_50MS 3 + +#define BD70528_GPIO_DRIVE_MASK 0x2 +#define BD70528_GPIO_PUSH_PULL 0x0 +#define BD70528_GPIO_OPEN_DRAIN 0x2 + +#define BD70528_GPIO_OUT_EN_MASK 0x80 +#define BD70528_GPIO_OUT_ENABLE 0x80 +#define BD70528_GPIO_OUT_DISABLE 0x0 + +#define BD70528_GPIO_OUT_HI 0x1 +#define BD70528_GPIO_OUT_LO 0x0 +#define BD70528_GPIO_OUT_MASK 0x1 + +#define BD70528_GPIO_IN_STATE_BASE 1 + +#define BD70528_CLK_OUT_EN_MASK 0x1 + +/* RTC masks to mask out reserved bits */ + +#define BD70528_MASK_RTC_SEC 0x7f +#define BD70528_MASK_RTC_MINUTE 0x7f +#define BD70528_MASK_RTC_HOUR_24H 0x80 +#define BD70528_MASK_RTC_HOUR_PM 0x20 +#define BD70528_MASK_RTC_HOUR 0x1f +#define BD70528_MASK_RTC_DAY 0x3f +#define BD70528_MASK_RTC_WEEK 0x07 +#define BD70528_MASK_RTC_MONTH 0x1f +#define BD70528_MASK_RTC_YEAR 0xff +#define BD70528_MASK_RTC_COUNT_L 0x7f + +#define BD70528_MASK_ELAPSED_TIMER_EN 0x1 +/* Mask second, min and hour fields + * HW would support ALM irq for over 24h + * (by setting day, month and year too) + * but as we wish to keep this same as for + * wake-up we limit ALM to 24H and only + * unmask sec, min and hour + */ +#define BD70528_MASK_ALM_EN 0x7 +#define BD70528_MASK_WAKE_EN 0x1 + +/* WDT masks */ +#define BD70528_MASK_WDT_EN 0x1 +#define BD70528_MASK_WDT_HOUR 0x1 +#define BD70528_MASK_WDT_MINUTE 0x7f +#define BD70528_MASK_WDT_SEC 0x7f + +#define BD70528_WDT_STATE_BIT 0x1 +#define BD70528_ELAPSED_STATE_BIT 0x2 +#define BD70528_WAKE_STATE_BIT 0x4 + +/* Charger masks */ +#define BD70528_MASK_CHG_STAT 0x7f +#define BD70528_MASK_CHG_BAT_TIMER 0x20 +#define BD70528_MASK_CHG_BAT_OVERVOLT 0x10 +#define BD70528_MASK_CHG_BAT_DETECT 0x1 +#define BD70528_MASK_CHG_DCIN1_UVLO 0x1 +#define BD70528_MASK_CHG_DCIN_ILIM 0x3f +#define BD70528_MASK_CHG_CHG_CURR 0x1f +#define BD70528_MASK_CHG_TRICKLE_CURR 0x10 + +/* + * Note, external battery register is the lonely rider at + * address 0xc5. See how to stuff that in the regmap + */ +#define BD70528_MAX_REGISTER 0x94 + +/* Buck control masks */ +#define BD70528_MASK_RUN_EN 0x4 +#define BD70528_MASK_STBY_EN 0x2 +#define BD70528_MASK_IDLE_EN 0x1 +#define BD70528_MASK_LED1_EN 0x1 +#define BD70528_MASK_LED2_EN 0x10 + +#define BD70528_MASK_BUCK_VOLT 0xf +#define BD70528_MASK_LDO_VOLT 0x1f +#define BD70528_MASK_LED1_VOLT 0x1 +#define BD70528_MASK_LED2_VOLT 0x10 + +/* Misc irq masks */ +#define BD70528_INT_MASK_SHORT_PUSH 1 +#define BD70528_INT_MASK_AUTO_WAKE 2 +#define BD70528_INT_MASK_POWER_STATE 4 + +#define BD70528_MASK_BUCK_RAMP 0x10 +#define BD70528_SIFT_BUCK_RAMP 4 + +#if IS_ENABLED(CONFIG_BD70528_WATCHDOG) + +int bd70528_wdt_set(struct rohm_regmap_dev *data, int enable, int *old_state); +void bd70528_wdt_lock(struct rohm_regmap_dev *data); +void bd70528_wdt_unlock(struct rohm_regmap_dev *data); + +#else /* CONFIG_BD70528_WATCHDOG */ + +static inline int bd70528_wdt_set(struct rohm_regmap_dev *data, int enable, + int *old_state) +{ + return 0; +} + +static inline void bd70528_wdt_lock(struct rohm_regmap_dev *data) +{ +} + +static inline void bd70528_wdt_unlock(struct rohm_regmap_dev *data) +{ +} + +#endif /* CONFIG_BD70528_WATCHDOG */ + +#endif /* __LINUX_MFD_BD70528_H__ */ diff --git a/include/linux/mfd/rohm-bd718x7.h b/include/linux/mfd/rohm-bd718x7.h index fd194bfc836f..7f2dbde402a1 100644 --- a/include/linux/mfd/rohm-bd718x7.h +++ b/include/linux/mfd/rohm-bd718x7.h @@ -4,15 +4,10 @@ #ifndef __LINUX_MFD_BD718XX_H__ #define __LINUX_MFD_BD718XX_H__ +#include <linux/mfd/rohm-generic.h> #include <linux/regmap.h> enum { - BD718XX_TYPE_BD71837 = 0, - BD718XX_TYPE_BD71847, - BD718XX_TYPE_AMOUNT -}; - -enum { BD718XX_BUCK1 = 0, BD718XX_BUCK2, BD718XX_BUCK3, @@ -321,18 +316,17 @@ enum { BD718XX_PWRBTN_LONG_PRESS_15S }; -struct bd718xx_clk; - struct bd718xx { - unsigned int chip_type; - struct device *dev; - struct regmap *regmap; - unsigned long int id; + /* + * Please keep this as the first member here as some + * drivers (clk) supporting more than one chip may only know this + * generic struct 'struct rohm_regmap_dev' and assume it is + * the first chunk of parent device's private data. + */ + struct rohm_regmap_dev chip; int chip_irq; struct regmap_irq_chip_data *irq_data; - - struct bd718xx_clk *clk; }; #endif /* __LINUX_MFD_BD718XX_H__ */ diff --git a/include/linux/mfd/rohm-generic.h b/include/linux/mfd/rohm-generic.h new file mode 100644 index 000000000000..bff15ac26f2c --- /dev/null +++ b/include/linux/mfd/rohm-generic.h @@ -0,0 +1,20 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* Copyright (C) 2018 ROHM Semiconductors */ + +#ifndef __LINUX_MFD_ROHM_H__ +#define __LINUX_MFD_ROHM_H__ + +enum { + ROHM_CHIP_TYPE_BD71837 = 0, + ROHM_CHIP_TYPE_BD71847, + ROHM_CHIP_TYPE_BD70528, + ROHM_CHIP_TYPE_AMOUNT +}; + +struct rohm_regmap_dev { + unsigned int chip_type; + struct device *dev; + struct regmap *regmap; +}; + +#endif diff --git a/include/linux/mfd/samsung/core.h b/include/linux/mfd/samsung/core.h index 3ca17eb89aa2..f1631a39acfc 100644 --- a/include/linux/mfd/samsung/core.h +++ b/include/linux/mfd/samsung/core.h @@ -20,6 +20,7 @@ #define MIN_850_MV 850000 #define MIN_800_MV 800000 #define MIN_750_MV 750000 +#define MIN_650_MV 650000 #define MIN_600_MV 600000 #define MIN_500_MV 500000 diff --git a/include/linux/mfd/samsung/s2mps11.h b/include/linux/mfd/samsung/s2mps11.h index 6e7668a389a1..4805c90609c4 100644 --- a/include/linux/mfd/samsung/s2mps11.h +++ b/include/linux/mfd/samsung/s2mps11.h @@ -170,7 +170,9 @@ enum s2mps11_regulators { #define S2MPS11_ENABLE_MASK (0x03 << S2MPS11_ENABLE_SHIFT) #define S2MPS11_ENABLE_SHIFT 0x06 #define S2MPS11_LDO_N_VOLTAGES (S2MPS11_LDO_VSEL_MASK + 1) -#define S2MPS11_BUCK_N_VOLTAGES (S2MPS11_BUCK_VSEL_MASK + 1) +#define S2MPS11_BUCK12346_N_VOLTAGES 153 +#define S2MPS11_BUCK5_N_VOLTAGES 216 +#define S2MPS11_BUCK7810_N_VOLTAGES 225 #define S2MPS11_BUCK9_N_VOLTAGES (S2MPS11_BUCK9_VSEL_MASK + 1) #define S2MPS11_RAMP_DELAY 25000 /* uV/us */ @@ -188,4 +190,9 @@ enum s2mps11_regulators { #define S2MPS11_BUCK6_RAMP_EN_SHIFT 0 #define S2MPS11_PMIC_EN_SHIFT 6 +/* + * Bits for "enable suspend" (On/Off controlled by PWREN) + * are the same as in S2MPS14: S2MPS14_ENABLE_SUSPEND + */ + #endif /* __LINUX_MFD_S2MPS11_H */ diff --git a/include/linux/mfd/stmfx.h b/include/linux/mfd/stmfx.h index d890595b89b6..3c67983678ec 100644 --- a/include/linux/mfd/stmfx.h +++ b/include/linux/mfd/stmfx.h @@ -5,7 +5,7 @@ */ #ifndef MFD_STMFX_H -#define MFX_STMFX_H +#define MFD_STMFX_H #include <linux/regmap.h> diff --git a/include/linux/mfd/syscon.h b/include/linux/mfd/syscon.h index f0273c9e972b..112dc66262cc 100644 --- a/include/linux/mfd/syscon.h +++ b/include/linux/mfd/syscon.h @@ -17,24 +17,24 @@ struct device_node; #ifdef CONFIG_MFD_SYSCON +extern struct regmap *device_node_to_regmap(struct device_node *np); extern struct regmap *syscon_node_to_regmap(struct device_node *np); extern struct regmap *syscon_regmap_lookup_by_compatible(const char *s); -extern struct regmap *syscon_regmap_lookup_by_pdevname(const char *s); extern struct regmap *syscon_regmap_lookup_by_phandle( struct device_node *np, const char *property); #else -static inline struct regmap *syscon_node_to_regmap(struct device_node *np) +static inline struct regmap *device_node_to_regmap(struct device_node *np) { return ERR_PTR(-ENOTSUPP); } -static inline struct regmap *syscon_regmap_lookup_by_compatible(const char *s) +static inline struct regmap *syscon_node_to_regmap(struct device_node *np) { return ERR_PTR(-ENOTSUPP); } -static inline struct regmap *syscon_regmap_lookup_by_pdevname(const char *s) +static inline struct regmap *syscon_regmap_lookup_by_compatible(const char *s) { return ERR_PTR(-ENOTSUPP); } diff --git a/include/linux/mfd/ti-lmu-register.h b/include/linux/mfd/ti-lmu-register.h index 222cb14c5b0f..116a749e0302 100644 --- a/include/linux/mfd/ti-lmu-register.h +++ b/include/linux/mfd/ti-lmu-register.h @@ -187,47 +187,26 @@ #define LM3695_MAX_REG 0x14 -/* LM3697 */ -#define LM3697_REG_HVLED_OUTPUT_CFG 0x10 -#define LM3697_HVLED1_CFG_MASK BIT(0) -#define LM3697_HVLED2_CFG_MASK BIT(1) -#define LM3697_HVLED3_CFG_MASK BIT(2) -#define LM3697_HVLED1_CFG_SHIFT 0 -#define LM3697_HVLED2_CFG_SHIFT 1 -#define LM3697_HVLED3_CFG_SHIFT 2 +/* LM36274 */ +#define LM36274_REG_REV 0x01 +#define LM36274_REG_BL_CFG_1 0x02 +#define LM36274_REG_BL_CFG_2 0x03 +#define LM36274_REG_BRT_LSB 0x04 +#define LM36274_REG_BRT_MSB 0x05 +#define LM36274_REG_BL_EN 0x08 + +#define LM36274_REG_BIAS_CONFIG_1 0x09 +#define LM36274_EXT_EN_MASK BIT(0) +#define LM36274_EN_VNEG_MASK BIT(1) +#define LM36274_EN_VPOS_MASK BIT(2) + +#define LM36274_REG_BIAS_CONFIG_2 0x0a +#define LM36274_REG_BIAS_CONFIG_3 0x0b +#define LM36274_REG_VOUT_BOOST 0x0c +#define LM36274_REG_VOUT_POS 0x0d +#define LM36274_REG_VOUT_NEG 0x0e +#define LM36274_VOUT_MASK 0x3F + +#define LM36274_MAX_REG 0x13 -#define LM3697_REG_BL0_RAMP 0x11 -#define LM3697_REG_BL1_RAMP 0x12 -#define LM3697_RAMPUP_MASK 0xF0 -#define LM3697_RAMPUP_SHIFT 4 -#define LM3697_RAMPDN_MASK 0x0F -#define LM3697_RAMPDN_SHIFT 0 - -#define LM3697_REG_RAMP_CONF 0x14 -#define LM3697_RAMP_MASK 0x0F -#define LM3697_RAMP_EACH 0x05 - -#define LM3697_REG_PWM_CFG 0x1C -#define LM3697_PWM_A_MASK BIT(0) -#define LM3697_PWM_B_MASK BIT(1) - -#define LM3697_REG_IMAX_A 0x17 -#define LM3697_REG_IMAX_B 0x18 - -#define LM3697_REG_FEEDBACK_ENABLE 0x19 - -#define LM3697_REG_BRT_A_LSB 0x20 -#define LM3697_REG_BRT_A_MSB 0x21 -#define LM3697_REG_BRT_B_LSB 0x22 -#define LM3697_REG_BRT_B_MSB 0x23 - -#define LM3697_REG_ENABLE 0x24 - -#define LM3697_REG_OPEN_FAULT_STATUS 0xB0 - -#define LM3697_REG_SHORT_FAULT_STATUS 0xB2 - -#define LM3697_REG_MONITOR_ENABLE 0xB4 - -#define LM3697_MAX_REG 0xB4 #endif diff --git a/include/linux/mfd/ti-lmu.h b/include/linux/mfd/ti-lmu.h index 7d1e9c24f818..0bc0e8199798 100644 --- a/include/linux/mfd/ti-lmu.h +++ b/include/linux/mfd/ti-lmu.h @@ -23,7 +23,7 @@ enum ti_lmu_id { LM3632, LM3633, LM3695, - LM3697, + LM36274, LMU_MAX_ID, }; @@ -65,6 +65,9 @@ enum lm363x_regulator_id { LM3632_BOOST, /* Boost output */ LM3632_LDO_POS, /* Positive display bias output */ LM3632_LDO_NEG, /* Negative display bias output */ + LM36274_BOOST, /* Boost output */ + LM36274_LDO_POS, /* Positive display bias output */ + LM36274_LDO_NEG, /* Negative display bias output */ }; /** diff --git a/include/linux/mfd/wm831x/pdata.h b/include/linux/mfd/wm831x/pdata.h index 071cdf3e16cf..986986fe4e4e 100644 --- a/include/linux/mfd/wm831x/pdata.h +++ b/include/linux/mfd/wm831x/pdata.h @@ -47,7 +47,6 @@ struct wm831x_battery_pdata { * I2C or SPI buses. */ struct wm831x_buckv_pdata { - int dvs_gpio; /** CPU GPIO to use for DVS switching */ int dvs_control_src; /** Hardware DVS source to use (1 or 2) */ int dvs_init_state; /** DVS state to expect on startup */ int dvs_state_gpio; /** CPU GPIO to use for monitoring status */ diff --git a/include/linux/micrel_phy.h b/include/linux/micrel_phy.h index ad24554f11f9..75f880c25bb8 100644 --- a/include/linux/micrel_phy.h +++ b/include/linux/micrel_phy.h @@ -31,7 +31,7 @@ #define PHY_ID_KSZ886X 0x00221430 #define PHY_ID_KSZ8863 0x00221435 -#define PHY_ID_KSZ8795 0x00221550 +#define PHY_ID_KSZ87XX 0x00221550 #define PHY_ID_KSZ9477 0x00221631 diff --git a/include/linux/migrate.h b/include/linux/migrate.h index e13d9bf2f9a5..72120061b7d4 100644 --- a/include/linux/migrate.h +++ b/include/linux/migrate.h @@ -77,8 +77,7 @@ extern void migrate_page_copy(struct page *newpage, struct page *page); extern int migrate_huge_page_move_mapping(struct address_space *mapping, struct page *newpage, struct page *page); extern int migrate_page_move_mapping(struct address_space *mapping, - struct page *newpage, struct page *page, enum migrate_mode mode, - int extra_count); + struct page *newpage, struct page *page, int extra_count); #else static inline void putback_movable_pages(struct list_head *l) {} @@ -167,8 +166,6 @@ static inline int migrate_misplaced_transhuge_page(struct mm_struct *mm, #define MIGRATE_PFN_MIGRATE (1UL << 1) #define MIGRATE_PFN_LOCKED (1UL << 2) #define MIGRATE_PFN_WRITE (1UL << 3) -#define MIGRATE_PFN_DEVICE (1UL << 4) -#define MIGRATE_PFN_ERROR (1UL << 5) #define MIGRATE_PFN_SHIFT 6 static inline struct page *migrate_pfn_to_page(unsigned long mpfn) @@ -183,107 +180,27 @@ static inline unsigned long migrate_pfn(unsigned long pfn) return (pfn << MIGRATE_PFN_SHIFT) | MIGRATE_PFN_VALID; } -/* - * struct migrate_vma_ops - migrate operation callback - * - * @alloc_and_copy: alloc destination memory and copy source memory to it - * @finalize_and_map: allow caller to map the successfully migrated pages - * - * - * The alloc_and_copy() callback happens once all source pages have been locked, - * unmapped and checked (checked whether pinned or not). All pages that can be - * migrated will have an entry in the src array set with the pfn value of the - * page and with the MIGRATE_PFN_VALID and MIGRATE_PFN_MIGRATE flag set (other - * flags might be set but should be ignored by the callback). - * - * The alloc_and_copy() callback can then allocate destination memory and copy - * source memory to it for all those entries (ie with MIGRATE_PFN_VALID and - * MIGRATE_PFN_MIGRATE flag set). Once these are allocated and copied, the - * callback must update each corresponding entry in the dst array with the pfn - * value of the destination page and with the MIGRATE_PFN_VALID and - * MIGRATE_PFN_LOCKED flags set (destination pages must have their struct pages - * locked, via lock_page()). - * - * At this point the alloc_and_copy() callback is done and returns. - * - * Note that the callback does not have to migrate all the pages that are - * marked with MIGRATE_PFN_MIGRATE flag in src array unless this is a migration - * from device memory to system memory (ie the MIGRATE_PFN_DEVICE flag is also - * set in the src array entry). If the device driver cannot migrate a device - * page back to system memory, then it must set the corresponding dst array - * entry to MIGRATE_PFN_ERROR. This will trigger a SIGBUS if CPU tries to - * access any of the virtual addresses originally backed by this page. Because - * a SIGBUS is such a severe result for the userspace process, the device - * driver should avoid setting MIGRATE_PFN_ERROR unless it is really in an - * unrecoverable state. - * - * For empty entry inside CPU page table (pte_none() or pmd_none() is true) we - * do set MIGRATE_PFN_MIGRATE flag inside the corresponding source array thus - * allowing device driver to allocate device memory for those unback virtual - * address. For this the device driver simply have to allocate device memory - * and properly set the destination entry like for regular migration. Note that - * this can still fails and thus inside the device driver must check if the - * migration was successful for those entry inside the finalize_and_map() - * callback just like for regular migration. - * - * THE alloc_and_copy() CALLBACK MUST NOT CHANGE ANY OF THE SRC ARRAY ENTRIES - * OR BAD THINGS WILL HAPPEN ! - * - * - * The finalize_and_map() callback happens after struct page migration from - * source to destination (destination struct pages are the struct pages for the - * memory allocated by the alloc_and_copy() callback). Migration can fail, and - * thus the finalize_and_map() allows the driver to inspect which pages were - * successfully migrated, and which were not. Successfully migrated pages will - * have the MIGRATE_PFN_MIGRATE flag set for their src array entry. - * - * It is safe to update device page table from within the finalize_and_map() - * callback because both destination and source page are still locked, and the - * mmap_sem is held in read mode (hence no one can unmap the range being - * migrated). - * - * Once callback is done cleaning up things and updating its page table (if it - * chose to do so, this is not an obligation) then it returns. At this point, - * the HMM core will finish up the final steps, and the migration is complete. - * - * THE finalize_and_map() CALLBACK MUST NOT CHANGE ANY OF THE SRC OR DST ARRAY - * ENTRIES OR BAD THINGS WILL HAPPEN ! - */ -struct migrate_vma_ops { - void (*alloc_and_copy)(struct vm_area_struct *vma, - const unsigned long *src, - unsigned long *dst, - unsigned long start, - unsigned long end, - void *private); - void (*finalize_and_map)(struct vm_area_struct *vma, - const unsigned long *src, - const unsigned long *dst, - unsigned long start, - unsigned long end, - void *private); +struct migrate_vma { + struct vm_area_struct *vma; + /* + * Both src and dst array must be big enough for + * (end - start) >> PAGE_SHIFT entries. + * + * The src array must not be modified by the caller after + * migrate_vma_setup(), and must not change the dst array after + * migrate_vma_pages() returns. + */ + unsigned long *dst; + unsigned long *src; + unsigned long cpages; + unsigned long npages; + unsigned long start; + unsigned long end; }; -#if defined(CONFIG_MIGRATE_VMA_HELPER) -int migrate_vma(const struct migrate_vma_ops *ops, - struct vm_area_struct *vma, - unsigned long start, - unsigned long end, - unsigned long *src, - unsigned long *dst, - void *private); -#else -static inline int migrate_vma(const struct migrate_vma_ops *ops, - struct vm_area_struct *vma, - unsigned long start, - unsigned long end, - unsigned long *src, - unsigned long *dst, - void *private) -{ - return -EINVAL; -} -#endif /* IS_ENABLED(CONFIG_MIGRATE_VMA_HELPER) */ +int migrate_vma_setup(struct migrate_vma *args); +void migrate_vma_pages(struct migrate_vma *migrate); +void migrate_vma_finalize(struct migrate_vma *migrate); #endif /* CONFIG_MIGRATION */ diff --git a/include/linux/mii.h b/include/linux/mii.h index 5cd824c1c0ca..4ce8901a1af6 100644 --- a/include/linux/mii.h +++ b/include/linux/mii.h @@ -455,6 +455,15 @@ static inline void mii_lpa_mod_linkmode_lpa_t(unsigned long *lp_advertising, lp_advertising, lpa & LPA_LPACK); } +static inline void mii_ctrl1000_mod_linkmode_adv_t(unsigned long *advertising, + u32 ctrl1000) +{ + linkmode_mod_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT, advertising, + ctrl1000 & ADVERTISE_1000HALF); + linkmode_mod_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, advertising, + ctrl1000 & ADVERTISE_1000FULL); +} + /** * linkmode_adv_to_lcl_adv_t * @advertising:pointer to linkmode advertising diff --git a/include/linux/mlx5/accel.h b/include/linux/mlx5/accel.h index 70e7e5673ce9..5613e677a5f9 100644 --- a/include/linux/mlx5/accel.h +++ b/include/linux/mlx5/accel.h @@ -114,7 +114,7 @@ enum mlx5_accel_ipsec_cap { MLX5_ACCEL_IPSEC_CAP_TX_IV_IS_ESN = 1 << 7, }; -#ifdef CONFIG_MLX5_ACCEL +#ifdef CONFIG_MLX5_FPGA_IPSEC u32 mlx5_accel_ipsec_device_caps(struct mlx5_core_dev *mdev); diff --git a/include/linux/mlx5/cq.h b/include/linux/mlx5/cq.h index 769326ea1d9b..40748fc1b11b 100644 --- a/include/linux/mlx5/cq.h +++ b/include/linux/mlx5/cq.h @@ -47,7 +47,7 @@ struct mlx5_core_cq { struct completion free; unsigned vector; unsigned int irqn; - void (*comp) (struct mlx5_core_cq *); + void (*comp)(struct mlx5_core_cq *cq, struct mlx5_eqe *eqe); void (*event) (struct mlx5_core_cq *, enum mlx5_event); u32 cons_index; unsigned arm_sn; @@ -55,7 +55,7 @@ struct mlx5_core_cq { int pid; struct { struct list_head list; - void (*comp)(struct mlx5_core_cq *); + void (*comp)(struct mlx5_core_cq *cq, struct mlx5_eqe *eqe); void *priv; } tasklet_ctx; int reset_notify_added; @@ -185,7 +185,7 @@ static inline void mlx5_cq_put(struct mlx5_core_cq *cq) } int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, - u32 *in, int inlen); + u32 *in, int inlen, u32 *out, int outlen); int mlx5_core_destroy_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq); int mlx5_core_query_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, u32 *out, int outlen); diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h index fc2b6e807f06..cc1c230f10ee 100644 --- a/include/linux/mlx5/device.h +++ b/include/linux/mlx5/device.h @@ -328,6 +328,7 @@ enum mlx5_event { MLX5_EVENT_TYPE_GPIO_EVENT = 0x15, MLX5_EVENT_TYPE_PORT_MODULE_EVENT = 0x16, MLX5_EVENT_TYPE_TEMP_WARN_EVENT = 0x17, + MLX5_EVENT_TYPE_XRQ_ERROR = 0x18, MLX5_EVENT_TYPE_REMOTE_CONFIG = 0x19, MLX5_EVENT_TYPE_GENERAL_EVENT = 0x22, MLX5_EVENT_TYPE_MONITOR_COUNTER = 0x24, @@ -342,16 +343,17 @@ enum mlx5_event { MLX5_EVENT_TYPE_PAGE_FAULT = 0xc, MLX5_EVENT_TYPE_NIC_VPORT_CHANGE = 0xd, - MLX5_EVENT_TYPE_HOST_PARAMS_CHANGE = 0xe, + MLX5_EVENT_TYPE_ESW_FUNCTIONS_CHANGED = 0xe, MLX5_EVENT_TYPE_DCT_DRAINED = 0x1c, + MLX5_EVENT_TYPE_DCT_KEY_VIOLATION = 0x1d, MLX5_EVENT_TYPE_FPGA_ERROR = 0x20, MLX5_EVENT_TYPE_FPGA_QP_ERROR = 0x21, MLX5_EVENT_TYPE_DEVICE_TRACER = 0x26, - MLX5_EVENT_TYPE_MAX = MLX5_EVENT_TYPE_DEVICE_TRACER + 1, + MLX5_EVENT_TYPE_MAX = 0x100, }; enum { @@ -437,6 +439,7 @@ enum { MLX5_OPCODE_SET_PSV = 0x20, MLX5_OPCODE_GET_PSV = 0x21, MLX5_OPCODE_CHECK_PSV = 0x22, + MLX5_OPCODE_DUMP = 0x23, MLX5_OPCODE_RGET_PSV = 0x26, MLX5_OPCODE_RCHECK_PSV = 0x27, @@ -445,6 +448,14 @@ enum { }; enum { + MLX5_OPC_MOD_TLS_TIS_STATIC_PARAMS = 0x1, +}; + +enum { + MLX5_OPC_MOD_TLS_TIS_PROGRESS_PARAMS = 0x1, +}; + +enum { MLX5_SET_PORT_RESET_QKEY = 0, MLX5_SET_PORT_GUID0 = 16, MLX5_SET_PORT_NODE_GUID = 17, @@ -510,6 +521,10 @@ struct mlx5_cmd_layout { u8 status_own; }; +enum mlx5_fatal_assert_bit_offsets { + MLX5_RFR_OFFSET = 31, +}; + struct health_buffer { __be32 assert_var[5]; __be32 rsvd0[3]; @@ -518,12 +533,16 @@ struct health_buffer { __be32 rsvd1[2]; __be32 fw_ver; __be32 hw_id; - __be32 rsvd2; + __be32 rfr; u8 irisc_index; u8 synd; __be16 ext_synd; }; +enum mlx5_initializing_bit_offsets { + MLX5_FW_RESET_SUPPORTED_OFFSET = 30, +}; + enum mlx5_cmd_addr_l_sz_offset { MLX5_NIC_IFC_OFFSET = 8, }; @@ -567,6 +586,12 @@ struct mlx5_eqe_cq_err { u8 syndrome; }; +struct mlx5_eqe_xrq_err { + __be32 reserved1[5]; + __be32 type_xrqn; + __be32 reserved2; +}; + struct mlx5_eqe_port_state { u8 reserved0[8]; u8 port; @@ -681,6 +706,7 @@ union ev_data { struct mlx5_eqe_pps pps; struct mlx5_eqe_dct dct; struct mlx5_eqe_temp_warning temp_warning; + struct mlx5_eqe_xrq_err xrq_err; } __packed; struct mlx5_eqe { @@ -1077,6 +1103,9 @@ enum mlx5_cap_type { MLX5_CAP_DEBUG, MLX5_CAP_RESERVED_14, MLX5_CAP_DEV_MEM, + MLX5_CAP_RESERVED_16, + MLX5_CAP_TLS, + MLX5_CAP_DEV_EVENT = 0x14, /* NUM OF CAP Types */ MLX5_CAP_NUM }; @@ -1142,6 +1171,9 @@ enum mlx5_qcam_feature_groups { #define MLX5_CAP_FLOWTABLE(mdev, cap) \ MLX5_GET(flow_table_nic_cap, mdev->caps.hca_cur[MLX5_CAP_FLOW_TABLE], cap) +#define MLX5_CAP64_FLOWTABLE(mdev, cap) \ + MLX5_GET64(flow_table_nic_cap, (mdev)->caps.hca_cur[MLX5_CAP_FLOW_TABLE], cap) + #define MLX5_CAP_FLOWTABLE_MAX(mdev, cap) \ MLX5_GET(flow_table_nic_cap, mdev->caps.hca_max[MLX5_CAP_FLOW_TABLE], cap) @@ -1205,6 +1237,10 @@ enum mlx5_qcam_feature_groups { MLX5_GET(e_switch_cap, \ mdev->caps.hca_cur[MLX5_CAP_ESWITCH], cap) +#define MLX5_CAP64_ESW_FLOWTABLE(mdev, cap) \ + MLX5_GET64(flow_table_eswitch_cap, \ + (mdev)->caps.hca_cur[MLX5_CAP_ESWITCH_FLOW_TABLE], cap) + #define MLX5_CAP_ESW_MAX(mdev, cap) \ MLX5_GET(e_switch_cap, \ mdev->caps.hca_max[MLX5_CAP_ESWITCH], cap) @@ -1255,6 +1291,12 @@ enum mlx5_qcam_feature_groups { #define MLX5_CAP64_DEV_MEM(mdev, cap)\ MLX5_GET64(device_mem_cap, mdev->caps.hca_cur[MLX5_CAP_DEV_MEM], cap) +#define MLX5_CAP_TLS(mdev, cap) \ + MLX5_GET(tls_cap, (mdev)->caps.hca_cur[MLX5_CAP_TLS], cap) + +#define MLX5_CAP_DEV_EVENT(mdev, cap)\ + MLX5_ADDR_OF(device_event_cap, (mdev)->caps.hca_cur[MLX5_CAP_DEV_EVENT], cap) + enum { MLX5_CMD_STAT_OK = 0x0, MLX5_CMD_STAT_INT_ERR = 0x1, @@ -1283,6 +1325,7 @@ enum { MLX5_PER_PRIORITY_COUNTERS_GROUP = 0x10, MLX5_PER_TRAFFIC_CLASS_COUNTERS_GROUP = 0x11, MLX5_PHYSICAL_LAYER_COUNTERS_GROUP = 0x12, + MLX5_PER_TRAFFIC_CLASS_CONGESTION_GROUP = 0x13, MLX5_PHYSICAL_LAYER_STATISTICAL_GROUP = 0x16, MLX5_INFINIBAND_PORT_COUNTERS_GROUP = 0x20, }; diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index 5a27246db883..3e80f03a387f 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -41,18 +41,20 @@ #include <linux/semaphore.h> #include <linux/slab.h> #include <linux/vmalloc.h> -#include <linux/radix-tree.h> +#include <linux/xarray.h> #include <linux/workqueue.h> #include <linux/mempool.h> #include <linux/interrupt.h> #include <linux/idr.h> #include <linux/notifier.h> +#include <linux/refcount.h> #include <linux/mlx5/device.h> #include <linux/mlx5/doorbell.h> #include <linux/mlx5/eq.h> #include <linux/timecounter.h> #include <linux/ptp_clock_kernel.h> +#include <net/devlink.h> enum { MLX5_BOARD_ID_LEN = 64, @@ -107,6 +109,7 @@ enum { MLX5_REG_FPGA_CAP = 0x4022, MLX5_REG_FPGA_CTRL = 0x4023, MLX5_REG_FPGA_ACCESS_REG = 0x4024, + MLX5_REG_CORE_DUMP = 0x402e, MLX5_REG_PCAP = 0x5001, MLX5_REG_PMTU = 0x5003, MLX5_REG_PTYS = 0x5004, @@ -137,6 +140,7 @@ enum { MLX5_REG_MTPPS = 0x9053, MLX5_REG_MTPPSE = 0x9054, MLX5_REG_MPEGC = 0x9056, + MLX5_REG_MCQS = 0x9060, MLX5_REG_MCQI = 0x9061, MLX5_REG_MCC = 0x9062, MLX5_REG_MCDA = 0x9063, @@ -180,8 +184,12 @@ enum port_state_policy { MLX5_POLICY_INVALID = 0xffffffff }; +enum mlx5_coredev_type { + MLX5_COREDEV_PF, + MLX5_COREDEV_VF +}; + struct mlx5_field_desc { - struct dentry *dent; int i; }; @@ -234,11 +242,6 @@ struct mlx5_cmd_msg { struct mlx5_cmd_debug { struct dentry *dbg_root; - struct dentry *dbg_in; - struct dentry *dbg_out; - struct dentry *dbg_outlen; - struct dentry *dbg_status; - struct dentry *dbg_run; void *in_msg; void *out_msg; u8 status; @@ -263,8 +266,6 @@ struct mlx5_cmd_stats { u64 sum; u64 n; struct dentry *root; - struct dentry *avg; - struct dentry *count; /* protect command average calculations */ spinlock_t lock; }; @@ -390,7 +391,7 @@ enum mlx5_res_type { struct mlx5_core_rsc_common { enum mlx5_res_type res; - atomic_t refcount; + refcount_t refcount; struct completion free; }; @@ -433,13 +434,18 @@ struct mlx5_core_health { struct timer_list timer; u32 prev; int miss_counter; - bool sick; + u8 synd; + u32 fatal_error; + u32 crdump_size; /* wq spinlock to synchronize draining */ spinlock_t wq_lock; struct workqueue_struct *wq; unsigned long flags; - struct work_struct work; + struct work_struct fatal_report_work; + struct work_struct report_work; struct delayed_work recover_work; + struct devlink_health_reporter *fw_reporter; + struct devlink_health_reporter *fw_fatal_reporter; }; struct mlx5_qp_table { @@ -451,13 +457,6 @@ struct mlx5_qp_table { struct radix_tree_root tree; }; -struct mlx5_mkey_table { - /* protect radix tree - */ - rwlock_t lock; - struct radix_tree_root tree; -}; - struct mlx5_vf_context { int enabled; u64 port_guid; @@ -468,7 +467,18 @@ struct mlx5_vf_context { struct mlx5_core_sriov { struct mlx5_vf_context *vfs_ctx; int num_vfs; - int enabled_vfs; + u16 max_vfs; +}; + +struct mlx5_fc_pool { + struct mlx5_core_dev *dev; + struct mutex pool_lock; /* protects pool lists */ + struct list_head fully_used; + struct list_head partially_used; + struct list_head unused; + int available_fcs; + int used_fcs; + int threshold; }; struct mlx5_fc_stats { @@ -482,6 +492,8 @@ struct mlx5_fc_stats { struct delayed_work work; unsigned long next_query; unsigned long sampling_interval; /* jiffies */ + u32 *bulk_query_out; + struct mlx5_fc_pool fc_pool; }; struct mlx5_events; @@ -490,6 +502,7 @@ struct mlx5_eswitch; struct mlx5_lag; struct mlx5_devcom; struct mlx5_eq_table; +struct mlx5_irq_table; struct mlx5_rate_limit { u32 rate; @@ -519,6 +532,8 @@ struct mlx5_core_roce { }; struct mlx5_priv { + /* IRQ table valid only for real pci devices PF or VF */ + struct mlx5_irq_table *irq_table; struct mlx5_eq_table *eq_table; /* pages stuff */ @@ -541,9 +556,7 @@ struct mlx5_priv { struct dentry *cmdif_debugfs; /* end: qp staff */ - /* start: mkey staff */ - struct mlx5_mkey_table mkey_table; - /* end: mkey staff */ + struct xarray mkey_table; /* start: alloc staff */ /* protect buffer alocation according to numa node */ @@ -570,7 +583,6 @@ struct mlx5_priv { struct mlx5_core_sriov sriov; struct mlx5_lag *lag; struct mlx5_devcom *devcom; - unsigned long pci_dev_data; struct mlx5_core_roce roce; struct mlx5_fc_stats fc_stats; struct mlx5_rl_table rl_table; @@ -580,6 +592,7 @@ struct mlx5_priv { }; enum mlx5_device_state { + MLX5_DEVICE_STATE_UNINITIALIZED, MLX5_DEVICE_STATE_UP, MLX5_DEVICE_STATE_INTERNAL_ERROR, }; @@ -613,6 +626,11 @@ struct mlx5e_resources { struct mlx5_sq_bfreg bfreg; }; +enum mlx5_sw_icm_type { + MLX5_SW_ICM_TYPE_STEERING, + MLX5_SW_ICM_TYPE_HEADER_MODIFY, +}; + #define MLX5_MAX_RESERVED_GIDS 8 struct mlx5_rsvd_gids { @@ -644,11 +662,18 @@ struct mlx5_clock { struct mlx5_pps pps_info; }; +struct mlx5_dm; struct mlx5_fw_tracer; struct mlx5_vxlan; +struct mlx5_geneve; +struct mlx5_hv_vhca; + +#define MLX5_LOG_SW_ICM_BLOCK_SIZE(dev) (MLX5_CAP_DEV_MEM(dev, log_sw_icm_alloc_granularity)) +#define MLX5_SW_ICM_BLOCK_SIZE(dev) (1 << MLX5_LOG_SW_ICM_BLOCK_SIZE(dev)) struct mlx5_core_dev { struct device *device; + enum mlx5_coredev_type coredev_type; struct pci_dev *pdev; /* sync pci state */ struct mutex pci_status_mutex; @@ -679,7 +704,9 @@ struct mlx5_core_dev { atomic_t num_qps; u32 issi; struct mlx5e_resources mlx5e_res; + struct mlx5_dm *dm; struct mlx5_vxlan *vxlan; + struct mlx5_geneve *geneve; struct { struct mlx5_rsvd_gids reserved_gids; u32 roce_en; @@ -690,6 +717,8 @@ struct mlx5_core_dev { struct mlx5_clock clock; struct mlx5_ib_clock_info *clock_info; struct mlx5_fw_tracer *tracer; + u32 vsc_addr; + struct mlx5_hv_vhca *hv_vhca; }; struct mlx5_db { @@ -901,7 +930,6 @@ void mlx5_start_health_poll(struct mlx5_core_dev *dev); void mlx5_stop_health_poll(struct mlx5_core_dev *dev, bool disable_health); void mlx5_drain_health_wq(struct mlx5_core_dev *dev); void mlx5_trigger_health_work(struct mlx5_core_dev *dev); -void mlx5_drain_health_recovery(struct mlx5_core_dev *dev); int mlx5_buf_alloc_node(struct mlx5_core_dev *dev, int size, struct mlx5_frag_buf *buf, int node); int mlx5_buf_alloc(struct mlx5_core_dev *dev, @@ -949,7 +977,7 @@ int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn, int mlx5_core_attach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn); int mlx5_core_detach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn); -int mlx5_qp_debugfs_init(struct mlx5_core_dev *dev); +void mlx5_qp_debugfs_init(struct mlx5_core_dev *dev); void mlx5_qp_debugfs_cleanup(struct mlx5_core_dev *dev); int mlx5_core_access_reg(struct mlx5_core_dev *dev, void *data_in, int size_in, void *data_out, int size_out, @@ -961,7 +989,7 @@ int mlx5_db_alloc_node(struct mlx5_core_dev *dev, struct mlx5_db *db, void mlx5_db_free(struct mlx5_core_dev *dev, struct mlx5_db *db); const char *mlx5_command_str(int command); -int mlx5_cmdif_debugfs_init(struct mlx5_core_dev *dev); +void mlx5_cmdif_debugfs_init(struct mlx5_core_dev *dev); void mlx5_cmdif_debugfs_cleanup(struct mlx5_core_dev *dev); int mlx5_core_create_psv(struct mlx5_core_dev *dev, u32 pdn, int npsvs, u32 *sig_index); @@ -1042,6 +1070,8 @@ int mlx5_register_interface(struct mlx5_interface *intf); void mlx5_unregister_interface(struct mlx5_interface *intf); int mlx5_notifier_register(struct mlx5_core_dev *dev, struct notifier_block *nb); int mlx5_notifier_unregister(struct mlx5_core_dev *dev, struct notifier_block *nb); +int mlx5_eq_notifier_register(struct mlx5_core_dev *dev, struct mlx5_nb *nb); +int mlx5_eq_notifier_unregister(struct mlx5_core_dev *dev, struct mlx5_nb *nb); int mlx5_core_query_vendor_id(struct mlx5_core_dev *mdev, u32 *vendor_id); @@ -1058,6 +1088,10 @@ int mlx5_lag_query_cong_counters(struct mlx5_core_dev *dev, size_t *offsets); struct mlx5_uars_page *mlx5_get_uars_page(struct mlx5_core_dev *mdev); void mlx5_put_uars_page(struct mlx5_core_dev *mdev, struct mlx5_uars_page *up); +int mlx5_dm_sw_icm_alloc(struct mlx5_core_dev *dev, enum mlx5_sw_icm_type type, + u64 length, u16 uid, phys_addr_t *addr, u32 *obj_id); +int mlx5_dm_sw_icm_dealloc(struct mlx5_core_dev *dev, enum mlx5_sw_icm_type type, + u64 length, u16 uid, phys_addr_t addr, u32 obj_id); #ifdef CONFIG_MLX5_CORE_IPOIB struct net_device *mlx5_rdma_netdev_alloc(struct mlx5_core_dev *mdev, @@ -1082,9 +1116,9 @@ enum { MLX5_PCI_DEV_IS_VF = 1 << 0, }; -static inline int mlx5_core_is_pf(struct mlx5_core_dev *dev) +static inline bool mlx5_core_is_pf(const struct mlx5_core_dev *dev) { - return !(dev->priv.pci_dev_data & MLX5_PCI_DEV_IS_VF); + return dev->coredev_type == MLX5_COREDEV_PF; } static inline bool mlx5_core_is_ecpf(struct mlx5_core_dev *dev) @@ -1092,23 +1126,20 @@ static inline bool mlx5_core_is_ecpf(struct mlx5_core_dev *dev) return dev->caps.embedded_cpu; } -static inline bool mlx5_core_is_ecpf_esw_manager(struct mlx5_core_dev *dev) +static inline bool +mlx5_core_is_ecpf_esw_manager(const struct mlx5_core_dev *dev) { return dev->caps.embedded_cpu && MLX5_CAP_GEN(dev, eswitch_manager); } -static inline bool mlx5_ecpf_vport_exists(struct mlx5_core_dev *dev) +static inline bool mlx5_ecpf_vport_exists(const struct mlx5_core_dev *dev) { return mlx5_core_is_pf(dev) && MLX5_CAP_ESW(dev, ecpf_vport_exists); } -#define MLX5_HOST_PF_MAX_VFS (127u) -static inline u16 mlx5_core_max_vfs(struct mlx5_core_dev *dev) +static inline u16 mlx5_core_max_vfs(const struct mlx5_core_dev *dev) { - if (mlx5_core_is_ecpf_esw_manager(dev)) - return MLX5_HOST_PF_MAX_VFS; - else - return pci_sriov_get_totalvfs(dev->pdev); + return dev->priv.sriov.max_vfs; } static inline int mlx5_get_gid_table_len(u16 param) diff --git a/include/linux/mlx5/eq.h b/include/linux/mlx5/eq.h index 00045cc4ea11..e49d8c0d4f26 100644 --- a/include/linux/mlx5/eq.h +++ b/include/linux/mlx5/eq.h @@ -4,17 +4,7 @@ #ifndef MLX5_CORE_EQ_H #define MLX5_CORE_EQ_H -enum { - MLX5_EQ_PAGEREQ_IDX = 0, - MLX5_EQ_CMD_IDX = 1, - MLX5_EQ_ASYNC_IDX = 2, - /* reserved to be used by mlx5_core ulps (mlx5e/mlx5_ib) */ - MLX5_EQ_PFAULT_IDX = 3, - MLX5_EQ_MAX_ASYNC_EQS, - /* completion eqs vector indices start here */ - MLX5_EQ_VEC_COMP_BASE = MLX5_EQ_MAX_ASYNC_EQS, -}; - +#define MLX5_IRQ_VEC_COMP_BASE 1 #define MLX5_NUM_CMD_EQE (32) #define MLX5_NUM_ASYNC_EQE (0x1000) #define MLX5_NUM_SPARE_EQE (0x80) @@ -23,18 +13,19 @@ struct mlx5_eq; struct mlx5_core_dev; struct mlx5_eq_param { - u8 index; + u8 irq_index; int nent; - u64 mask; - void *context; - irq_handler_t handler; + u64 mask[4]; }; struct mlx5_eq * -mlx5_eq_create_generic(struct mlx5_core_dev *dev, const char *name, - struct mlx5_eq_param *param); +mlx5_eq_create_generic(struct mlx5_core_dev *dev, struct mlx5_eq_param *param); int mlx5_eq_destroy_generic(struct mlx5_core_dev *dev, struct mlx5_eq *eq); +int mlx5_eq_enable(struct mlx5_core_dev *dev, struct mlx5_eq *eq, + struct notifier_block *nb); +void mlx5_eq_disable(struct mlx5_core_dev *dev, struct mlx5_eq *eq, + struct notifier_block *nb); struct mlx5_eqe *mlx5_eq_get_eqe(struct mlx5_eq *eq, u32 cc); void mlx5_eq_update_ci(struct mlx5_eq *eq, u32 cc, bool arm); diff --git a/include/linux/mlx5/eswitch.h b/include/linux/mlx5/eswitch.h index cf226c190329..98e667b176ef 100644 --- a/include/linux/mlx5/eswitch.h +++ b/include/linux/mlx5/eswitch.h @@ -7,13 +7,14 @@ #define _MLX5_ESWITCH_ #include <linux/mlx5/driver.h> +#include <net/devlink.h> #define MLX5_ESWITCH_MANAGER(mdev) MLX5_CAP_GEN(mdev, eswitch_manager) enum { - SRIOV_NONE, - SRIOV_LEGACY, - SRIOV_OFFLOADS + MLX5_ESWITCH_NONE, + MLX5_ESWITCH_LEGACY, + MLX5_ESWITCH_OFFLOADS }; enum { @@ -29,25 +30,28 @@ enum { }; struct mlx5_eswitch_rep; -struct mlx5_eswitch_rep_if { - int (*load)(struct mlx5_core_dev *dev, - struct mlx5_eswitch_rep *rep); - void (*unload)(struct mlx5_eswitch_rep *rep); - void *(*get_proto_dev)(struct mlx5_eswitch_rep *rep); - void *priv; - atomic_t state; +struct mlx5_eswitch_rep_ops { + int (*load)(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep); + void (*unload)(struct mlx5_eswitch_rep *rep); + void *(*get_proto_dev)(struct mlx5_eswitch_rep *rep); +}; + +struct mlx5_eswitch_rep_data { + void *priv; + atomic_t state; }; struct mlx5_eswitch_rep { - struct mlx5_eswitch_rep_if rep_if[NUM_REP_TYPES]; + struct mlx5_eswitch_rep_data rep_data[NUM_REP_TYPES]; u16 vport; - u8 hw_id[ETH_ALEN]; u16 vlan; + /* Only IB rep is using vport_index */ + u16 vport_index; u32 vlan_refcount; }; void mlx5_eswitch_register_vport_reps(struct mlx5_eswitch *esw, - struct mlx5_eswitch_rep_if *rep_if, + const struct mlx5_eswitch_rep_ops *ops, u8 rep_type); void mlx5_eswitch_unregister_vport_reps(struct mlx5_eswitch *esw, u8 rep_type); void *mlx5_eswitch_get_proto_dev(struct mlx5_eswitch *esw, @@ -56,8 +60,45 @@ void *mlx5_eswitch_get_proto_dev(struct mlx5_eswitch *esw, struct mlx5_eswitch_rep *mlx5_eswitch_vport_rep(struct mlx5_eswitch *esw, u16 vport_num); void *mlx5_eswitch_uplink_get_proto_dev(struct mlx5_eswitch *esw, u8 rep_type); -u8 mlx5_eswitch_mode(struct mlx5_eswitch *esw); struct mlx5_flow_handle * mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *esw, u16 vport_num, u32 sqn); + +u16 mlx5_eswitch_get_total_vports(const struct mlx5_core_dev *dev); + +#ifdef CONFIG_MLX5_ESWITCH +enum devlink_eswitch_encap_mode +mlx5_eswitch_get_encap_mode(const struct mlx5_core_dev *dev); + +bool mlx5_eswitch_vport_match_metadata_enabled(const struct mlx5_eswitch *esw); +u32 mlx5_eswitch_get_vport_metadata_for_match(const struct mlx5_eswitch *esw, + u16 vport_num); +u8 mlx5_eswitch_mode(struct mlx5_eswitch *esw); +#else /* CONFIG_MLX5_ESWITCH */ + +static inline u8 mlx5_eswitch_mode(struct mlx5_eswitch *esw) +{ + return MLX5_ESWITCH_NONE; +} + +static inline enum devlink_eswitch_encap_mode +mlx5_eswitch_get_encap_mode(const struct mlx5_core_dev *dev) +{ + return DEVLINK_ESWITCH_ENCAP_MODE_NONE; +} + +static inline bool +mlx5_eswitch_vport_match_metadata_enabled(const struct mlx5_eswitch *esw) +{ + return false; +}; + +static inline u32 +mlx5_eswitch_get_vport_metadata_for_match(const struct mlx5_eswitch *esw, + int vport_num) +{ + return 0; +}; +#endif /* CONFIG_MLX5_ESWITCH */ + #endif diff --git a/include/linux/mlx5/fs.h b/include/linux/mlx5/fs.h index e690ba0f965c..724d276ea133 100644 --- a/include/linux/mlx5/fs.h +++ b/include/linux/mlx5/fs.h @@ -47,6 +47,7 @@ enum { enum { MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT = BIT(0), MLX5_FLOW_TABLE_TUNNEL_EN_DECAP = BIT(1), + MLX5_FLOW_TABLE_TERMINATION = BIT(2), }; #define LEFTOVERS_RULE_NUM 2 @@ -74,6 +75,7 @@ enum mlx5_flow_namespace_type { MLX5_FLOW_NAMESPACE_SNIFFER_TX, MLX5_FLOW_NAMESPACE_EGRESS, MLX5_FLOW_NAMESPACE_RDMA_RX, + MLX5_FLOW_NAMESPACE_RDMA_RX_KERNEL, }; enum { @@ -82,15 +84,28 @@ enum { FDB_SLOW_PATH, }; +struct mlx5_pkt_reformat; +struct mlx5_modify_hdr; struct mlx5_flow_table; struct mlx5_flow_group; struct mlx5_flow_namespace; struct mlx5_flow_handle; +enum { + FLOW_CONTEXT_HAS_TAG = BIT(0), +}; + +struct mlx5_flow_context { + u32 flags; + u32 flow_tag; + u32 flow_source; +}; + struct mlx5_flow_spec { u8 match_criteria_enable; u32 match_criteria[MLX5_ST_SZ_DW(fte_match_param)]; u32 match_value[MLX5_ST_SZ_DW(fte_match_param)]; + struct mlx5_flow_context flow_context; }; enum { @@ -108,12 +123,17 @@ struct mlx5_flow_destination { struct { u16 num; u16 vhca_id; - u32 reformat_id; + struct mlx5_pkt_reformat *pkt_reformat; u8 flags; } vport; }; }; +struct mod_hdr_tbl { + struct mutex lock; /* protects hlist */ + DECLARE_HASHTABLE(hlist, 8); +}; + struct mlx5_flow_namespace * mlx5_get_fdb_sub_ns(struct mlx5_core_dev *dev, int n); struct mlx5_flow_namespace * @@ -172,15 +192,13 @@ struct mlx5_fs_vlan { #define MLX5_FS_VLAN_DEPTH 2 enum { - FLOW_ACT_HAS_TAG = BIT(0), - FLOW_ACT_NO_APPEND = BIT(1), + FLOW_ACT_NO_APPEND = BIT(0), }; struct mlx5_flow_act { u32 action; - u32 flow_tag; - u32 reformat_id; - u32 modify_id; + struct mlx5_modify_hdr *modify_hdr; + struct mlx5_pkt_reformat *pkt_reformat; uintptr_t esp_id; u32 flags; struct mlx5_fs_vlan vlan[MLX5_FS_VLAN_DEPTH]; @@ -189,9 +207,6 @@ struct mlx5_flow_act { #define MLX5_DECLARE_FLOW_ACT(name) \ struct mlx5_flow_act name = { .action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,\ - .flow_tag = MLX5_FS_DEFAULT_FLOW_TAG, \ - .reformat_id = 0, \ - .modify_id = 0, \ .flags = 0, } /* Single destination per rule. @@ -199,7 +214,7 @@ struct mlx5_flow_act { */ struct mlx5_flow_handle * mlx5_add_flow_rules(struct mlx5_flow_table *ft, - struct mlx5_flow_spec *spec, + const struct mlx5_flow_spec *spec, struct mlx5_flow_act *flow_act, struct mlx5_flow_destination *dest, int num_dest); @@ -211,6 +226,7 @@ int mlx5_modify_rule_destination(struct mlx5_flow_handle *handler, struct mlx5_fc *mlx5_fc_create(struct mlx5_core_dev *dev, bool aging); void mlx5_fc_destroy(struct mlx5_core_dev *dev, struct mlx5_fc *counter); +u64 mlx5_fc_query_lastuse(struct mlx5_fc *counter); void mlx5_fc_query_cached(struct mlx5_fc *counter, u64 *bytes, u64 *packets, u64 *lastuse); int mlx5_fc_query(struct mlx5_core_dev *dev, struct mlx5_fc *counter, @@ -220,19 +236,18 @@ u32 mlx5_fc_id(struct mlx5_fc *counter); int mlx5_fs_add_rx_underlay_qpn(struct mlx5_core_dev *dev, u32 underlay_qpn); int mlx5_fs_remove_rx_underlay_qpn(struct mlx5_core_dev *dev, u32 underlay_qpn); -int mlx5_modify_header_alloc(struct mlx5_core_dev *dev, - u8 namespace, u8 num_actions, - void *modify_actions, u32 *modify_header_id); +struct mlx5_modify_hdr *mlx5_modify_header_alloc(struct mlx5_core_dev *dev, + u8 ns_type, u8 num_actions, + void *modify_actions); void mlx5_modify_header_dealloc(struct mlx5_core_dev *dev, - u32 modify_header_id); - -int mlx5_packet_reformat_alloc(struct mlx5_core_dev *dev, - int reformat_type, - size_t size, - void *reformat_data, - enum mlx5_flow_namespace_type namespace, - u32 *packet_reformat_id); + struct mlx5_modify_hdr *modify_hdr); + +struct mlx5_pkt_reformat *mlx5_packet_reformat_alloc(struct mlx5_core_dev *dev, + int reformat_type, + size_t size, + void *reformat_data, + enum mlx5_flow_namespace_type ns_type); void mlx5_packet_reformat_dealloc(struct mlx5_core_dev *dev, - u32 packet_reformat_id); + struct mlx5_pkt_reformat *reformat); #endif diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index 5e74305e2e57..0836fe232f97 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h @@ -91,6 +91,20 @@ enum { enum { MLX5_OBJ_TYPE_GENEVE_TLV_OPT = 0x000b, + MLX5_OBJ_TYPE_MKEY = 0xff01, + MLX5_OBJ_TYPE_QP = 0xff02, + MLX5_OBJ_TYPE_PSV = 0xff03, + MLX5_OBJ_TYPE_RMP = 0xff04, + MLX5_OBJ_TYPE_XRC_SRQ = 0xff05, + MLX5_OBJ_TYPE_RQ = 0xff06, + MLX5_OBJ_TYPE_SQ = 0xff07, + MLX5_OBJ_TYPE_TIR = 0xff08, + MLX5_OBJ_TYPE_TIS = 0xff09, + MLX5_OBJ_TYPE_DCT = 0xff0a, + MLX5_OBJ_TYPE_XRQ = 0xff0b, + MLX5_OBJ_TYPE_RQT = 0xff0e, + MLX5_OBJ_TYPE_FLOW_COUNTER = 0xff0f, + MLX5_OBJ_TYPE_CQ = 0xff10, }; enum { @@ -106,6 +120,9 @@ enum { MLX5_CMD_OP_QUERY_ISSI = 0x10a, MLX5_CMD_OP_SET_ISSI = 0x10b, MLX5_CMD_OP_SET_DRIVER_VERSION = 0x10d, + MLX5_CMD_OP_QUERY_SF_PARTITION = 0x111, + MLX5_CMD_OP_ALLOC_SF = 0x113, + MLX5_CMD_OP_DEALLOC_SF = 0x114, MLX5_CMD_OP_CREATE_MKEY = 0x200, MLX5_CMD_OP_QUERY_MKEY = 0x201, MLX5_CMD_OP_DESTROY_MKEY = 0x202, @@ -155,7 +172,9 @@ enum { MLX5_CMD_OP_QUERY_XRQ_DC_PARAMS_ENTRY = 0x725, MLX5_CMD_OP_SET_XRQ_DC_PARAMS_ENTRY = 0x726, MLX5_CMD_OP_QUERY_XRQ_ERROR_PARAMS = 0x727, - MLX5_CMD_OP_QUERY_HOST_PARAMS = 0x740, + MLX5_CMD_OP_RELEASE_XRQ_ERROR = 0x729, + MLX5_CMD_OP_MODIFY_XRQ = 0x72a, + MLX5_CMD_OP_QUERY_ESW_FUNCTIONS = 0x740, MLX5_CMD_OP_QUERY_VPORT_STATE = 0x750, MLX5_CMD_OP_MODIFY_VPORT_STATE = 0x751, MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT = 0x752, @@ -276,6 +295,7 @@ enum { MLX5_CMD_OP_DESTROY_UCTX = 0xa06, MLX5_CMD_OP_CREATE_UMEM = 0xa08, MLX5_CMD_OP_DESTROY_UMEM = 0xa0a, + MLX5_CMD_OP_SYNC_STEERING = 0xb00, MLX5_CMD_OP_MAX }; @@ -382,7 +402,8 @@ struct mlx5_ifc_flow_table_prop_layout_bits { u8 reformat_and_modify_action[0x1]; u8 reserved_at_15[0x2]; u8 table_miss_action_domain[0x1]; - u8 reserved_at_18[0x8]; + u8 termination_table[0x1]; + u8 reserved_at_19[0x7]; u8 reserved_at_20[0x2]; u8 log_max_ft_size[0x6]; u8 log_max_modify_header_context[0x8]; @@ -465,7 +486,11 @@ union mlx5_ifc_gre_key_bits { }; struct mlx5_ifc_fte_match_set_misc_bits { - u8 reserved_at_0[0x8]; + u8 gre_c_present[0x1]; + u8 reserved_at_1[0x1]; + u8 gre_k_present[0x1]; + u8 gre_s_present[0x1]; + u8 source_vhca_port[0x4]; u8 source_sqn[0x18]; u8 source_eswitch_owner_vhca_id[0x10]; @@ -527,16 +552,56 @@ struct mlx5_ifc_fte_match_set_misc2_bits { struct mlx5_ifc_fte_match_mpls_bits outer_first_mpls_over_udp; - u8 reserved_at_80[0x100]; + u8 metadata_reg_c_7[0x20]; + + u8 metadata_reg_c_6[0x20]; + + u8 metadata_reg_c_5[0x20]; + + u8 metadata_reg_c_4[0x20]; + + u8 metadata_reg_c_3[0x20]; + + u8 metadata_reg_c_2[0x20]; + + u8 metadata_reg_c_1[0x20]; + + u8 metadata_reg_c_0[0x20]; u8 metadata_reg_a[0x20]; - u8 reserved_at_1a0[0x60]; + u8 metadata_reg_b[0x20]; + + u8 reserved_at_1c0[0x40]; }; struct mlx5_ifc_fte_match_set_misc3_bits { - u8 reserved_at_0[0x120]; + u8 inner_tcp_seq_num[0x20]; + + u8 outer_tcp_seq_num[0x20]; + + u8 inner_tcp_ack_num[0x20]; + + u8 outer_tcp_ack_num[0x20]; + + u8 reserved_at_80[0x8]; + u8 outer_vxlan_gpe_vni[0x18]; + + u8 outer_vxlan_gpe_next_protocol[0x8]; + u8 outer_vxlan_gpe_flags[0x8]; + u8 reserved_at_b0[0x10]; + + u8 icmp_header_data[0x20]; + + u8 icmpv6_header_data[0x20]; + + u8 icmp_type[0x8]; + u8 icmp_code[0x8]; + u8 icmpv6_type[0x8]; + u8 icmpv6_code[0x8]; + u8 geneve_tlv_option_0_data[0x20]; + u8 reserved_at_140[0xc0]; }; @@ -632,11 +697,33 @@ struct mlx5_ifc_flow_table_nic_cap_bits { struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_transmit_sniffer; - u8 reserved_at_e00[0x7200]; + u8 reserved_at_e00[0x1200]; + + u8 sw_steering_nic_rx_action_drop_icm_address[0x40]; + + u8 sw_steering_nic_tx_action_drop_icm_address[0x40]; + + u8 sw_steering_nic_tx_action_allow_icm_address[0x40]; + + u8 reserved_at_20c0[0x5f40]; +}; + +enum { + MLX5_FDB_TO_VPORT_REG_C_0 = 0x01, + MLX5_FDB_TO_VPORT_REG_C_1 = 0x02, + MLX5_FDB_TO_VPORT_REG_C_2 = 0x04, + MLX5_FDB_TO_VPORT_REG_C_3 = 0x08, + MLX5_FDB_TO_VPORT_REG_C_4 = 0x10, + MLX5_FDB_TO_VPORT_REG_C_5 = 0x20, + MLX5_FDB_TO_VPORT_REG_C_6 = 0x40, + MLX5_FDB_TO_VPORT_REG_C_7 = 0x80, }; struct mlx5_ifc_flow_table_eswitch_cap_bits { - u8 reserved_at_0[0x1a]; + u8 fdb_to_vport_reg_c_id[0x8]; + u8 reserved_at_8[0xf]; + u8 flow_source[0x1]; + u8 reserved_at_18[0x2]; u8 multi_fdb_encap[0x1]; u8 reserved_at_1b[0x1]; u8 fdb_multi_path_to_table[0x1]; @@ -650,7 +737,17 @@ struct mlx5_ifc_flow_table_eswitch_cap_bits { struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_esw_acl_egress; - u8 reserved_at_800[0x7800]; + u8 reserved_at_800[0x1000]; + + u8 sw_steering_fdb_action_drop_icm_address_rx[0x40]; + + u8 sw_steering_fdb_action_drop_icm_address_tx[0x40]; + + u8 sw_steering_uplink_icm_address_rx[0x40]; + + u8 sw_steering_uplink_icm_address_tx[0x40]; + + u8 reserved_at_1900[0x6700]; }; enum { @@ -664,7 +761,11 @@ struct mlx5_ifc_e_switch_cap_bits { u8 vport_svlan_insert[0x1]; u8 vport_cvlan_insert_if_not_exist[0x1]; u8 vport_cvlan_insert_overwrite[0x1]; - u8 reserved_at_5[0x16]; + u8 reserved_at_5[0x3]; + u8 esw_uplink_ingress_acl[0x1]; + u8 reserved_at_9[0x10]; + u8 esw_functions_changed[0x1]; + u8 reserved_at_1a[0x1]; u8 ecpf_vport_exists[0x1]; u8 counter_eswitch_affinity[0x1]; u8 merged_eswitch[0x1]; @@ -680,7 +781,11 @@ struct mlx5_ifc_e_switch_cap_bits { u8 reserved_2b[0x6]; u8 max_encap_header_size[0xa]; - u8 reserved_40[0x7c0]; + u8 reserved_at_40[0xb]; + u8 log_max_esw_sf[0x5]; + u8 esw_sf_base_id[0x10]; + + u8 reserved_at_60[0x7a0]; }; @@ -715,7 +820,9 @@ struct mlx5_ifc_qos_cap_bits { }; struct mlx5_ifc_debug_cap_bits { - u8 reserved_at_0[0x20]; + u8 core_dump_general[0x1]; + u8 core_dump_qp[0x1]; + u8 reserved_at_2[0x1e]; u8 reserved_at_20[0x2]; u8 stall_detect[0x1]; @@ -749,7 +856,10 @@ struct mlx5_ifc_per_protocol_networking_offload_caps_bits { u8 swp[0x1]; u8 swp_csum[0x1]; u8 swp_lso[0x1]; - u8 reserved_at_23[0xd]; + u8 cqe_checksum_full[0x1]; + u8 reserved_at_24[0x5]; + u8 tunnel_stateless_ip_over_ip[0x1]; + u8 reserved_at_2a[0x6]; u8 max_vxlan_udp_ports[0x8]; u8 reserved_at_38[0x6]; u8 max_geneve_opt_len[0x1]; @@ -788,6 +898,25 @@ struct mlx5_ifc_roce_cap_bits { u8 reserved_at_100[0x700]; }; +struct mlx5_ifc_sync_steering_in_bits { + u8 opcode[0x10]; + u8 uid[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 reserved_at_40[0xc0]; +}; + +struct mlx5_ifc_sync_steering_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x40]; +}; + struct mlx5_ifc_device_mem_cap_bits { u8 memic[0x1]; u8 reserved_at_1[0x1f]; @@ -818,6 +947,12 @@ struct mlx5_ifc_device_mem_cap_bits { u8 reserved_at_180[0x680]; }; +struct mlx5_ifc_device_event_cap_bits { + u8 user_affiliated_events[4][0x40]; + + u8 user_unaffiliated_events[4][0x40]; +}; + enum { MLX5_ATOMIC_CAPS_ATOMIC_SIZE_QP_1_BYTE = 0x0, MLX5_ATOMIC_CAPS_ATOMIC_SIZE_QP_2_BYTES = 0x2, @@ -881,7 +1016,9 @@ struct mlx5_ifc_odp_cap_bits { struct mlx5_ifc_odp_per_transport_service_cap_bits xrc_odp_caps; - u8 reserved_at_100[0x700]; + struct mlx5_ifc_odp_per_transport_service_cap_bits dc_odp_caps; + + u8 reserved_at_120[0x6E0]; }; struct mlx5_ifc_calc_op { @@ -911,6 +1048,16 @@ struct mlx5_ifc_vector_calc_cap_bits { u8 reserved_at_c0[0x720]; }; +struct mlx5_ifc_tls_cap_bits { + u8 tls_1_2_aes_gcm_128[0x1]; + u8 tls_1_3_aes_gcm_128[0x1]; + u8 tls_1_2_aes_gcm_256[0x1]; + u8 tls_1_3_aes_gcm_256[0x1]; + u8 reserved_at_4[0x1c]; + + u8 reserved_at_20[0x7e0]; +}; + enum { MLX5_WQ_TYPE_LINKED_LIST = 0x0, MLX5_WQ_TYPE_CYCLIC = 0x1, @@ -963,10 +1110,31 @@ enum { }; enum { + MLX5_FLEX_PARSER_VXLAN_GPE_ENABLED = 1 << 7, + MLX5_FLEX_PARSER_ICMP_V4_ENABLED = 1 << 8, + MLX5_FLEX_PARSER_ICMP_V6_ENABLED = 1 << 9, +}; + +enum { MLX5_UCTX_CAP_RAW_TX = 1UL << 0, MLX5_UCTX_CAP_INTERNAL_DEV_RES = 1UL << 1, }; +#define MLX5_FC_BULK_SIZE_FACTOR 128 + +enum mlx5_fc_bulk_alloc_bitmask { + MLX5_FC_BULK_128 = (1 << 0), + MLX5_FC_BULK_256 = (1 << 1), + MLX5_FC_BULK_512 = (1 << 2), + MLX5_FC_BULK_1024 = (1 << 3), + MLX5_FC_BULK_2048 = (1 << 4), + MLX5_FC_BULK_4096 = (1 << 5), + MLX5_FC_BULK_8192 = (1 << 6), + MLX5_FC_BULK_16384 = (1 << 7), +}; + +#define MLX5_FC_BULK_NUM_FCS(fc_enum) (MLX5_FC_BULK_SIZE_FACTOR * (fc_enum)) + struct mlx5_ifc_cmd_hca_cap_bits { u8 reserved_at_0[0x30]; u8 vhca_id[0x10]; @@ -975,7 +1143,8 @@ struct mlx5_ifc_cmd_hca_cap_bits { u8 log_max_srq_sz[0x8]; u8 log_max_qp_sz[0x8]; - u8 reserved_at_90[0x8]; + u8 event_cap[0x1]; + u8 reserved_at_91[0x7]; u8 prio_tag_required[0x1]; u8 reserved_at_99[0x2]; u8 log_max_qp[0x5]; @@ -1023,7 +1192,12 @@ struct mlx5_ifc_cmd_hca_cap_bits { u8 cc_modify_allowed[0x1]; u8 start_pad[0x1]; u8 cache_line_128byte[0x1]; - u8 reserved_at_165[0xa]; + u8 reserved_at_165[0x4]; + u8 rts2rts_qp_counters_set_id[0x1]; + u8 reserved_at_16a[0x2]; + u8 vnic_env_int_rq_oob[0x1]; + u8 sbcam_reg[0x1]; + u8 reserved_at_16e[0x1]; u8 qcam_reg[0x1]; u8 gid_table_size[0x10]; @@ -1152,7 +1326,9 @@ struct mlx5_ifc_cmd_hca_cap_bits { u8 reserved_at_263[0x8]; u8 log_bf_reg_size[0x5]; - u8 reserved_at_270[0xb]; + u8 reserved_at_270[0x8]; + u8 lag_tx_port_affinity[0x1]; + u8 reserved_at_279[0x2]; u8 lag_master[0x1]; u8 num_lag_ports[0x4]; @@ -1168,7 +1344,8 @@ struct mlx5_ifc_cmd_hca_cap_bits { u8 reserved_at_2e0[0x7]; u8 max_qp_mcg[0x19]; - u8 reserved_at_300[0x18]; + u8 reserved_at_300[0x10]; + u8 flow_counter_bulk_alloc[0x8]; u8 log_max_mcg[0x8]; u8 reserved_at_320[0x3]; @@ -1240,7 +1417,8 @@ struct mlx5_ifc_cmd_hca_cap_bits { u8 reserved_at_440[0x20]; - u8 reserved_at_460[0x3]; + u8 tls[0x1]; + u8 reserved_at_461[0x2]; u8 log_max_uctx[0x5]; u8 reserved_at_468[0x3]; u8 log_max_umem[0x5]; @@ -1265,7 +1443,9 @@ struct mlx5_ifc_cmd_hca_cap_bits { u8 max_geneve_tlv_option_data_len[0x5]; u8 reserved_at_570[0x10]; - u8 reserved_at_580[0x3c]; + u8 reserved_at_580[0x33]; + u8 log_max_dek[0x5]; + u8 reserved_at_5b8[0x4]; u8 mini_cqe_resp_stride_index[0x1]; u8 cqe_128_always[0x1]; u8 cqe_compression_128[0x1]; @@ -1295,13 +1475,33 @@ struct mlx5_ifc_cmd_hca_cap_bits { u8 reserved_at_640[0x10]; u8 num_q_monitor_counters[0x10]; - u8 reserved_at_660[0x40]; + u8 reserved_at_660[0x20]; + + u8 sf[0x1]; + u8 sf_set_partition[0x1]; + u8 reserved_at_682[0x1]; + u8 log_max_sf[0x5]; + u8 reserved_at_688[0x8]; + u8 log_min_sf_size[0x8]; + u8 max_num_sf_partitions[0x8]; u8 uctx_cap[0x20]; u8 reserved_at_6c0[0x4]; u8 flex_parser_id_geneve_tlv_option_0[0x4]; - u8 reserved_at_6c8[0x138]; + u8 flex_parser_id_icmp_dw1[0x4]; + u8 flex_parser_id_icmp_dw0[0x4]; + u8 flex_parser_id_icmpv6_dw1[0x4]; + u8 flex_parser_id_icmpv6_dw0[0x4]; + u8 flex_parser_id_outer_first_mpls_over_gre[0x4]; + u8 flex_parser_id_outer_first_mpls_over_udp_label[0x4]; + + u8 reserved_at_6e0[0x10]; + u8 sf_base_id[0x10]; + + u8 reserved_at_700[0x80]; + u8 vhca_tunnel_commands[0x40]; + u8 reserved_at_7c0[0x40]; }; enum mlx5_flow_destination_type { @@ -1345,9 +1545,8 @@ struct mlx5_ifc_extended_dest_format_bits { }; union mlx5_ifc_dest_format_struct_flow_counter_list_auto_bits { - struct mlx5_ifc_dest_format_struct_bits dest_format_struct; + struct mlx5_ifc_extended_dest_format_bits extended_dest_format; struct mlx5_ifc_flow_counter_list_bits flow_counter_list; - u8 reserved_at_0[0x40]; }; struct mlx5_ifc_fte_match_param_bits { @@ -1761,12 +1960,28 @@ struct mlx5_ifc_ib_port_cntrs_grp_data_layout_bits { u8 port_xmit_wait[0x20]; }; -struct mlx5_ifc_eth_per_traffic_grp_data_layout_bits { +struct mlx5_ifc_eth_per_tc_prio_grp_data_layout_bits { u8 transmit_queue_high[0x20]; u8 transmit_queue_low[0x20]; - u8 reserved_at_40[0x780]; + u8 no_buffer_discard_uc_high[0x20]; + + u8 no_buffer_discard_uc_low[0x20]; + + u8 reserved_at_80[0x740]; +}; + +struct mlx5_ifc_eth_per_tc_congest_prio_grp_data_layout_bits { + u8 wred_discard_high[0x20]; + + u8 wred_discard_low[0x20]; + + u8 ecn_marked_tc_high[0x20]; + + u8 ecn_marked_tc_low[0x20]; + + u8 reserved_at_80[0x740]; }; struct mlx5_ifc_eth_per_prio_grp_data_layout_bits { @@ -2531,7 +2746,10 @@ union mlx5_ifc_hca_cap_union_bits { struct mlx5_ifc_e_switch_cap_bits e_switch_cap; struct mlx5_ifc_vector_calc_cap_bits vector_calc_cap; struct mlx5_ifc_qos_cap_bits qos_cap; + struct mlx5_ifc_debug_cap_bits debug_cap; struct mlx5_ifc_fpga_cap_bits fpga_cap; + struct mlx5_ifc_tls_cap_bits tls_cap; + struct mlx5_ifc_device_mem_cap_bits device_mem_cap; u8 reserved_at_0[0x8000]; }; @@ -2549,6 +2767,12 @@ enum { MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2 = 0x800, }; +enum { + MLX5_FLOW_CONTEXT_FLOW_SOURCE_ANY_VPORT = 0x0, + MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK = 0x1, + MLX5_FLOW_CONTEXT_FLOW_SOURCE_LOCAL_VPORT = 0x2, +}; + struct mlx5_ifc_vlan_bits { u8 ethtype[0x10]; u8 prio[0x3]; @@ -2568,7 +2792,9 @@ struct mlx5_ifc_flow_context_bits { u8 action[0x10]; u8 extended_destination[0x1]; - u8 reserved_at_80[0x7]; + u8 reserved_at_81[0x1]; + u8 flow_source[0x2]; + u8 reserved_at_84[0x4]; u8 destination_list_size[0x18]; u8 reserved_at_a0[0x8]; @@ -2652,7 +2878,11 @@ struct mlx5_ifc_vnic_diagnostic_statistics_bits { u8 transmit_discard_vport_down[0x40]; - u8 reserved_at_140[0xec0]; + u8 reserved_at_140[0xa0]; + + u8 internal_rq_out_of_buffer[0x20]; + + u8 reserved_at_200[0xe00]; }; struct mlx5_ifc_traffic_counter_bits { @@ -2663,7 +2893,8 @@ struct mlx5_ifc_traffic_counter_bits { struct mlx5_ifc_tisc_bits { u8 strict_lag_tx_port_affinity[0x1]; - u8 reserved_at_1[0x3]; + u8 tls_en[0x1]; + u8 reserved_at_2[0x2]; u8 lag_tx_port_affinity[0x04]; u8 reserved_at_8[0x4]; @@ -2677,7 +2908,11 @@ struct mlx5_ifc_tisc_bits { u8 reserved_at_140[0x8]; u8 underlay_qpn[0x18]; - u8 reserved_at_160[0x3a0]; + + u8 reserved_at_160[0x8]; + u8 pd[0x18]; + + u8 reserved_at_180[0x380]; }; enum { @@ -2834,6 +3069,13 @@ enum { SCHEDULING_CONTEXT_ELEMENT_TYPE_PARA_VPORT_TC = 0x3, }; +enum { + ELEMENT_TYPE_CAP_MASK_TASR = 1 << 0, + ELEMENT_TYPE_CAP_MASK_VPORT = 1 << 1, + ELEMENT_TYPE_CAP_MASK_VPORT_TC = 1 << 2, + ELEMENT_TYPE_CAP_MASK_PARA_VPORT_TC = 1 << 3, +}; + struct mlx5_ifc_scheduling_context_bits { u8 element_type[0x8]; u8 reserved_at_8[0x18]; @@ -3093,12 +3335,14 @@ struct mlx5_ifc_hca_vport_context_bits { }; struct mlx5_ifc_esw_vport_context_bits { - u8 reserved_at_0[0x3]; + u8 fdb_to_vport_reg_c[0x1]; + u8 reserved_at_1[0x2]; u8 vport_svlan_strip[0x1]; u8 vport_cvlan_strip[0x1]; u8 vport_svlan_insert[0x1]; u8 vport_cvlan_insert[0x2]; - u8 reserved_at_8[0x18]; + u8 fdb_to_vport_reg_c_id[0x8]; + u8 reserved_at_10[0x10]; u8 reserved_at_20[0x20]; @@ -3109,7 +3353,11 @@ struct mlx5_ifc_esw_vport_context_bits { u8 cvlan_pcp[0x3]; u8 cvlan_id[0xc]; - u8 reserved_at_60[0x7a0]; + u8 reserved_at_60[0x720]; + + u8 sw_steering_vport_icm_address_rx[0x40]; + + u8 sw_steering_vport_icm_address_tx[0x40]; }; enum { @@ -3410,7 +3658,8 @@ union mlx5_ifc_eth_cntrs_grp_data_layout_auto_bits { struct mlx5_ifc_eth_3635_cntrs_grp_data_layout_bits eth_3635_cntrs_grp_data_layout; struct mlx5_ifc_eth_extended_cntrs_grp_data_layout_bits eth_extended_cntrs_grp_data_layout; struct mlx5_ifc_eth_per_prio_grp_data_layout_bits eth_per_prio_grp_data_layout; - struct mlx5_ifc_eth_per_traffic_grp_data_layout_bits eth_per_traffic_grp_data_layout; + struct mlx5_ifc_eth_per_tc_prio_grp_data_layout_bits eth_per_tc_prio_grp_data_layout; + struct mlx5_ifc_eth_per_tc_congest_prio_grp_data_layout_bits eth_per_tc_congest_prio_grp_data_layout; struct mlx5_ifc_ib_port_cntrs_grp_data_layout_bits ib_port_cntrs_grp_data_layout; struct mlx5_ifc_phys_layer_cntrs_bits phys_layer_cntrs; struct mlx5_ifc_phys_layer_statistical_cntrs_bits phys_layer_statistical_cntrs; @@ -4795,23 +5044,98 @@ struct mlx5_ifc_query_hca_cap_in_bits { u8 reserved_at_20[0x10]; u8 op_mod[0x10]; + u8 other_function[0x1]; + u8 reserved_at_41[0xf]; + u8 function_id[0x10]; + + u8 reserved_at_60[0x20]; +}; + +struct mlx5_ifc_other_hca_cap_bits { + u8 roce[0x1]; + u8 reserved_at_1[0x27f]; +}; + +struct mlx5_ifc_query_other_hca_cap_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + u8 reserved_at_40[0x40]; + + struct mlx5_ifc_other_hca_cap_bits other_capability; }; -struct mlx5_ifc_query_flow_table_out_bits { +struct mlx5_ifc_query_other_hca_cap_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 reserved_at_40[0x10]; + u8 function_id[0x10]; + + u8 reserved_at_60[0x20]; +}; + +struct mlx5_ifc_modify_other_hca_cap_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; u8 syndrome[0x20]; - u8 reserved_at_40[0x80]; + u8 reserved_at_40[0x40]; +}; - u8 reserved_at_c0[0x8]; +struct mlx5_ifc_modify_other_hca_cap_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 reserved_at_40[0x10]; + u8 function_id[0x10]; + u8 field_select[0x20]; + + struct mlx5_ifc_other_hca_cap_bits other_capability; +}; + +struct mlx5_ifc_flow_table_context_bits { + u8 reformat_en[0x1]; + u8 decap_en[0x1]; + u8 sw_owner[0x1]; + u8 termination_table[0x1]; + u8 table_miss_action[0x4]; u8 level[0x8]; - u8 reserved_at_d0[0x8]; + u8 reserved_at_10[0x8]; u8 log_size[0x8]; - u8 reserved_at_e0[0x120]; + u8 reserved_at_20[0x8]; + u8 table_miss_id[0x18]; + + u8 reserved_at_40[0x8]; + u8 lag_master_next_table_id[0x18]; + + u8 reserved_at_60[0x60]; + + u8 sw_owner_icm_root_1[0x40]; + + u8 sw_owner_icm_root_0[0x40]; + +}; + +struct mlx5_ifc_query_flow_table_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x80]; + + struct mlx5_ifc_flow_table_context_bits flow_table_context; }; struct mlx5_ifc_query_flow_table_in_bits { @@ -4979,7 +5303,8 @@ struct mlx5_ifc_modify_esw_vport_context_out_bits { }; struct mlx5_ifc_esw_vport_context_fields_select_bits { - u8 reserved_at_0[0x1c]; + u8 reserved_at_0[0x1b]; + u8 fdb_to_vport_reg_c_id[0x1]; u8 vport_cvlan_insert[0x1]; u8 vport_svlan_insert[0x1]; u8 vport_cvlan_strip[0x1]; @@ -5080,7 +5405,7 @@ struct mlx5_ifc_alloc_packet_reformat_context_out_bits { u8 reserved_at_60[0x20]; }; -enum { +enum mlx5_reformat_ctx_type { MLX5_REFORMAT_TYPE_L2_TO_VXLAN = 0x0, MLX5_REFORMAT_TYPE_L2_TO_NVGRE = 0x1, MLX5_REFORMAT_TYPE_L2_TO_L2_TUNNEL = 0x2, @@ -5176,6 +5501,16 @@ enum { MLX5_ACTION_IN_FIELD_OUT_DIPV4 = 0x16, MLX5_ACTION_IN_FIELD_OUT_FIRST_VID = 0x17, MLX5_ACTION_IN_FIELD_OUT_IPV6_HOPLIMIT = 0x47, + MLX5_ACTION_IN_FIELD_METADATA_REG_A = 0x49, + MLX5_ACTION_IN_FIELD_METADATA_REG_B = 0x50, + MLX5_ACTION_IN_FIELD_METADATA_REG_C_0 = 0x51, + MLX5_ACTION_IN_FIELD_METADATA_REG_C_1 = 0x52, + MLX5_ACTION_IN_FIELD_METADATA_REG_C_2 = 0x53, + MLX5_ACTION_IN_FIELD_METADATA_REG_C_3 = 0x54, + MLX5_ACTION_IN_FIELD_METADATA_REG_C_4 = 0x55, + MLX5_ACTION_IN_FIELD_METADATA_REG_C_5 = 0x56, + MLX5_ACTION_IN_FIELD_OUT_TCP_SEQ_NUM = 0x59, + MLX5_ACTION_IN_FIELD_OUT_TCP_ACK_NUM = 0x5B, }; struct mlx5_ifc_alloc_modify_header_context_out_bits { @@ -5864,10 +6199,12 @@ struct mlx5_ifc_modify_cq_in_bits { struct mlx5_ifc_cqc_bits cq_context; - u8 reserved_at_280[0x40]; + u8 reserved_at_280[0x60]; u8 cq_umem_valid[0x1]; - u8 reserved_at_2c1[0x5bf]; + u8 reserved_at_2e1[0x1f]; + + u8 reserved_at_300[0x580]; u8 pas[0][0x40]; }; @@ -7221,34 +7558,26 @@ struct mlx5_ifc_create_mkey_in_bits { u8 klm_pas_mtt[0][0x20]; }; +enum { + MLX5_FLOW_TABLE_TYPE_NIC_RX = 0x0, + MLX5_FLOW_TABLE_TYPE_NIC_TX = 0x1, + MLX5_FLOW_TABLE_TYPE_ESW_EGRESS_ACL = 0x2, + MLX5_FLOW_TABLE_TYPE_ESW_INGRESS_ACL = 0x3, + MLX5_FLOW_TABLE_TYPE_FDB = 0X4, + MLX5_FLOW_TABLE_TYPE_SNIFFER_RX = 0X5, + MLX5_FLOW_TABLE_TYPE_SNIFFER_TX = 0X6, +}; + struct mlx5_ifc_create_flow_table_out_bits { u8 status[0x8]; - u8 reserved_at_8[0x18]; + u8 icm_address_63_40[0x18]; u8 syndrome[0x20]; - u8 reserved_at_40[0x8]; + u8 icm_address_39_32[0x8]; u8 table_id[0x18]; - u8 reserved_at_60[0x20]; -}; - -struct mlx5_ifc_flow_table_context_bits { - u8 reformat_en[0x1]; - u8 decap_en[0x1]; - u8 reserved_at_2[0x2]; - u8 table_miss_action[0x4]; - u8 level[0x8]; - u8 reserved_at_10[0x8]; - u8 log_size[0x8]; - - u8 reserved_at_20[0x8]; - u8 table_miss_id[0x18]; - - u8 reserved_at_40[0x8]; - u8 lag_master_next_table_id[0x18]; - - u8 reserved_at_60[0xe0]; + u8 icm_address_31_0[0x20]; }; struct mlx5_ifc_create_flow_table_in_bits { @@ -7355,9 +7684,9 @@ struct mlx5_ifc_create_eq_in_bits { u8 reserved_at_280[0x40]; - u8 event_bitmask[0x40]; + u8 event_bitmask[4][0x40]; - u8 reserved_at_300[0x580]; + u8 reserved_at_3c0[0x4c0]; u8 pas[0][0x40]; }; @@ -7703,7 +8032,8 @@ struct mlx5_ifc_alloc_flow_counter_in_bits { u8 reserved_at_20[0x10]; u8 op_mod[0x10]; - u8 reserved_at_40[0x40]; + u8 reserved_at_40[0x38]; + u8 flow_counter_bulk[0x8]; }; struct mlx5_ifc_add_vxlan_udp_dport_out_bits { @@ -8475,7 +8805,7 @@ struct mlx5_ifc_mcam_access_reg_bits { u8 mcda[0x1]; u8 mcc[0x1]; u8 mcqi[0x1]; - u8 reserved_at_1f[0x1]; + u8 mcqs[0x1]; u8 regs_95_to_87[0x9]; u8 mpegc[0x1]; @@ -8546,6 +8876,18 @@ struct mlx5_ifc_qcam_reg_bits { u8 reserved_at_1c0[0x80]; }; +struct mlx5_ifc_core_dump_reg_bits { + u8 reserved_at_0[0x18]; + u8 core_dump_type[0x8]; + + u8 reserved_at_20[0x30]; + u8 vhca_id[0x10]; + + u8 reserved_at_60[0x8]; + u8 qpn[0x18]; + u8 reserved_at_80[0x180]; +}; + struct mlx5_ifc_pcap_reg_bits { u8 reserved_at_0[0x8]; u8 local_port[0x8]; @@ -8955,6 +9297,24 @@ struct mlx5_ifc_mtppse_reg_bits { u8 reserved_at_40[0x40]; }; +struct mlx5_ifc_mcqs_reg_bits { + u8 last_index_flag[0x1]; + u8 reserved_at_1[0x7]; + u8 fw_device[0x8]; + u8 component_index[0x10]; + + u8 reserved_at_20[0x10]; + u8 identifier[0x10]; + + u8 reserved_at_40[0x17]; + u8 component_status[0x5]; + u8 component_update_state[0x4]; + + u8 last_update_state_changer_type[0x4]; + u8 last_update_state_changer_host_id[0x4]; + u8 reserved_at_68[0x18]; +}; + struct mlx5_ifc_mcqi_cap_bits { u8 supported_info_bitmask[0x20]; @@ -8975,6 +9335,43 @@ struct mlx5_ifc_mcqi_cap_bits { u8 reserved_at_86[0x1a]; }; +struct mlx5_ifc_mcqi_version_bits { + u8 reserved_at_0[0x2]; + u8 build_time_valid[0x1]; + u8 user_defined_time_valid[0x1]; + u8 reserved_at_4[0x14]; + u8 version_string_length[0x8]; + + u8 version[0x20]; + + u8 build_time[0x40]; + + u8 user_defined_time[0x40]; + + u8 build_tool_version[0x20]; + + u8 reserved_at_e0[0x20]; + + u8 version_string[92][0x8]; +}; + +struct mlx5_ifc_mcqi_activation_method_bits { + u8 pending_server_ac_power_cycle[0x1]; + u8 pending_server_dc_power_cycle[0x1]; + u8 pending_server_reboot[0x1]; + u8 pending_fw_reset[0x1]; + u8 auto_activate[0x1]; + u8 all_hosts_sync[0x1]; + u8 device_hw_reset[0x1]; + u8 reserved_at_7[0x19]; +}; + +union mlx5_ifc_mcqi_reg_data_bits { + struct mlx5_ifc_mcqi_cap_bits mcqi_caps; + struct mlx5_ifc_mcqi_version_bits mcqi_version; + struct mlx5_ifc_mcqi_activation_method_bits mcqi_activation_mathod; +}; + struct mlx5_ifc_mcqi_reg_bits { u8 read_pending_component[0x1]; u8 reserved_at_1[0xf]; @@ -8992,7 +9389,7 @@ struct mlx5_ifc_mcqi_reg_bits { u8 reserved_at_a0[0x10]; u8 data_size[0x10]; - u8 data[0][0x20]; + union mlx5_ifc_mcqi_reg_data_bits data[0]; }; struct mlx5_ifc_mcc_reg_bits { @@ -9042,7 +9439,8 @@ union mlx5_ifc_ports_control_registers_document_bits { struct mlx5_ifc_eth_802_3_cntrs_grp_data_layout_bits eth_802_3_cntrs_grp_data_layout; struct mlx5_ifc_eth_extended_cntrs_grp_data_layout_bits eth_extended_cntrs_grp_data_layout; struct mlx5_ifc_eth_per_prio_grp_data_layout_bits eth_per_prio_grp_data_layout; - struct mlx5_ifc_eth_per_traffic_grp_data_layout_bits eth_per_traffic_grp_data_layout; + struct mlx5_ifc_eth_per_tc_prio_grp_data_layout_bits eth_per_tc_prio_grp_data_layout; + struct mlx5_ifc_eth_per_tc_congest_prio_grp_data_layout_bits eth_per_tc_congest_prio_grp_data_layout; struct mlx5_ifc_lane_2_module_mapping_bits lane_2_module_mapping; struct mlx5_ifc_pamp_reg_bits pamp_reg; struct mlx5_ifc_paos_reg_bits paos_reg; @@ -9389,8 +9787,6 @@ struct mlx5_ifc_query_lag_out_bits { u8 syndrome[0x20]; - u8 reserved_at_40[0x40]; - struct mlx5_ifc_lagc_bits ctx; }; @@ -9518,7 +9914,7 @@ struct mlx5_ifc_general_obj_in_cmd_hdr_bits { u8 opcode[0x10]; u8 uid[0x10]; - u8 reserved_at_20[0x10]; + u8 vhca_tunnel_id[0x10]; u8 obj_type[0x10]; u8 obj_id[0x20]; @@ -9689,10 +10085,11 @@ struct mlx5_ifc_mtrc_ctrl_bits { struct mlx5_ifc_host_params_context_bits { u8 host_number[0x8]; - u8 reserved_at_8[0x8]; + u8 reserved_at_8[0x7]; + u8 host_pf_disabled[0x1]; u8 host_num_of_vfs[0x10]; - u8 reserved_at_20[0x10]; + u8 host_total_vfs[0x10]; u8 host_pci_bus[0x10]; u8 reserved_at_40[0x10]; @@ -9704,7 +10101,7 @@ struct mlx5_ifc_host_params_context_bits { u8 reserved_at_80[0x180]; }; -struct mlx5_ifc_query_host_params_in_bits { +struct mlx5_ifc_query_esw_functions_in_bits { u8 opcode[0x10]; u8 reserved_at_10[0x10]; @@ -9714,7 +10111,7 @@ struct mlx5_ifc_query_host_params_in_bits { u8 reserved_at_40[0x40]; }; -struct mlx5_ifc_query_host_params_out_bits { +struct mlx5_ifc_query_esw_functions_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; @@ -9725,6 +10122,164 @@ struct mlx5_ifc_query_host_params_out_bits { struct mlx5_ifc_host_params_context_bits host_params_context; u8 reserved_at_280[0x180]; + u8 host_sf_enable[0][0x40]; +}; + +struct mlx5_ifc_sf_partition_bits { + u8 reserved_at_0[0x10]; + u8 log_num_sf[0x8]; + u8 log_sf_bar_size[0x8]; +}; + +struct mlx5_ifc_query_sf_partitions_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x18]; + u8 num_sf_partitions[0x8]; + + u8 reserved_at_60[0x20]; + + struct mlx5_ifc_sf_partition_bits sf_partition[0]; +}; + +struct mlx5_ifc_query_sf_partitions_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 reserved_at_40[0x40]; +}; + +struct mlx5_ifc_dealloc_sf_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x40]; +}; + +struct mlx5_ifc_dealloc_sf_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 reserved_at_40[0x10]; + u8 function_id[0x10]; + + u8 reserved_at_60[0x20]; +}; + +struct mlx5_ifc_alloc_sf_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x40]; +}; + +struct mlx5_ifc_alloc_sf_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 reserved_at_40[0x10]; + u8 function_id[0x10]; + + u8 reserved_at_60[0x20]; +}; + +struct mlx5_ifc_affiliated_event_header_bits { + u8 reserved_at_0[0x10]; + u8 obj_type[0x10]; + + u8 obj_id[0x20]; +}; + +enum { + MLX5_HCA_CAP_GENERAL_OBJECT_TYPES_ENCRYPTION_KEY = BIT(0xc), +}; + +enum { + MLX5_GENERAL_OBJECT_TYPES_ENCRYPTION_KEY = 0xc, +}; + +struct mlx5_ifc_encryption_key_obj_bits { + u8 modify_field_select[0x40]; + + u8 reserved_at_40[0x14]; + u8 key_size[0x4]; + u8 reserved_at_58[0x4]; + u8 key_type[0x4]; + + u8 reserved_at_60[0x8]; + u8 pd[0x18]; + + u8 reserved_at_80[0x180]; + u8 key[8][0x20]; + + u8 reserved_at_300[0x500]; +}; + +struct mlx5_ifc_create_encryption_key_in_bits { + struct mlx5_ifc_general_obj_in_cmd_hdr_bits general_obj_in_cmd_hdr; + struct mlx5_ifc_encryption_key_obj_bits encryption_key_object; +}; + +enum { + MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_KEY_SIZE_128 = 0x0, + MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_KEY_SIZE_256 = 0x1, +}; + +enum { + MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_TYPE_DEK = 0x1, +}; + +struct mlx5_ifc_tls_static_params_bits { + u8 const_2[0x2]; + u8 tls_version[0x4]; + u8 const_1[0x2]; + u8 reserved_at_8[0x14]; + u8 encryption_standard[0x4]; + + u8 reserved_at_20[0x20]; + + u8 initial_record_number[0x40]; + + u8 resync_tcp_sn[0x20]; + + u8 gcm_iv[0x20]; + + u8 implicit_iv[0x40]; + + u8 reserved_at_100[0x8]; + u8 dek_index[0x18]; + + u8 reserved_at_120[0xe0]; +}; + +struct mlx5_ifc_tls_progress_params_bits { + u8 reserved_at_0[0x8]; + u8 tisn[0x18]; + + u8 next_record_tcp_sn[0x20]; + + u8 hw_resync_tcp_sn[0x20]; + + u8 record_tracker_state[0x2]; + u8 auth_state[0x2]; + u8 reserved_at_64[0x4]; + u8 hw_offset_record_number[0x18]; }; #endif /* MLX5_IFC_H */ diff --git a/include/linux/mlx5/qp.h b/include/linux/mlx5/qp.h index 3ba4edbd17a6..ae63b1ae9004 100644 --- a/include/linux/mlx5/qp.h +++ b/include/linux/mlx5/qp.h @@ -37,7 +37,8 @@ #include <linux/mlx5/driver.h> #define MLX5_INVALID_LKEY 0x100 -#define MLX5_SIG_WQE_SIZE (MLX5_SEND_WQE_BB * 5) +/* UMR (3 WQE_BB's) + SIG (3 WQE_BB's) + PSV (mem) + PSV (wire) */ +#define MLX5_SIG_WQE_SIZE (MLX5_SEND_WQE_BB * 8) #define MLX5_DIF_SIZE 8 #define MLX5_STRIDE_BLOCK_OP 0x400 #define MLX5_CPY_GRD_MASK 0xc0 @@ -70,6 +71,7 @@ enum mlx5_qp_optpar { MLX5_QP_OPTPAR_CQN_RCV = 1 << 19, MLX5_QP_OPTPAR_DC_HS = 1 << 20, MLX5_QP_OPTPAR_DC_KEY = 1 << 21, + MLX5_QP_OPTPAR_COUNTER_SET_ID = 1 << 25, }; enum mlx5_qp_state { @@ -202,7 +204,12 @@ struct mlx5_wqe_ctrl_seg { u8 signature; u8 rsvd[2]; u8 fm_ce_se; - __be32 imm; + union { + __be32 general_id; + __be32 imm; + __be32 umr_mkey; + __be32 tisn; + }; }; #define MLX5_WQE_CTRL_DS_MASK 0x3f @@ -551,11 +558,6 @@ static inline struct mlx5_core_qp *__mlx5_qp_lookup(struct mlx5_core_dev *dev, u return radix_tree_lookup(&dev->priv.qp_table.tree, qpn); } -static inline struct mlx5_core_mkey *__mlx5_mr_lookup(struct mlx5_core_dev *dev, u32 key) -{ - return radix_tree_lookup(&dev->priv.mkey_table.tree, key); -} - int mlx5_core_create_dct(struct mlx5_core_dev *dev, struct mlx5_core_dct *qp, u32 *in, int inlen, diff --git a/include/linux/mlx5/vport.h b/include/linux/mlx5/vport.h index 3d1c6cdbbba7..16060fb9b5e5 100644 --- a/include/linux/mlx5/vport.h +++ b/include/linux/mlx5/vport.h @@ -44,9 +44,6 @@ MLX5_VPORT_UPLINK_PLACEHOLDER + \ MLX5_VPORT_ECPF_PLACEHOLDER(mdev)) -#define MLX5_TOTAL_VPORTS(mdev) (MLX5_SPECIAL_VPORTS(mdev) + \ - mlx5_core_max_vfs(mdev)) - #define MLX5_VPORT_MANAGER(mdev) \ (MLX5_CAP_GEN(mdev, vport_group_manager) && \ (MLX5_CAP_GEN(mdev, port_type) == MLX5_CAP_PORT_TYPE_ETH) && \ @@ -58,6 +55,7 @@ enum { MLX5_CAP_INLINE_MODE_NOT_REQUIRED, }; +/* Vport number for each function must keep unchanged */ enum { MLX5_VPORT_PF = 0x0, MLX5_VPORT_FIRST_VF = 0x1, @@ -69,7 +67,8 @@ u8 mlx5_query_vport_state(struct mlx5_core_dev *mdev, u8 opmod, u16 vport); int mlx5_modify_vport_admin_state(struct mlx5_core_dev *mdev, u8 opmod, u16 vport, u8 other_vport, u8 state); int mlx5_query_nic_vport_mac_address(struct mlx5_core_dev *mdev, - u16 vport, u8 *addr); + u16 vport, bool other, u8 *addr); +int mlx5_query_mac_address(struct mlx5_core_dev *mdev, u8 *addr); int mlx5_query_nic_vport_min_inline(struct mlx5_core_dev *mdev, u16 vport, u8 *min_inline); void mlx5_query_min_inline(struct mlx5_core_dev *mdev, u8 *min_inline); diff --git a/include/linux/mm.h b/include/linux/mm.h index dd0b5f4e1e45..a2adf95b3f9c 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -541,13 +541,30 @@ static inline void vma_set_anonymous(struct vm_area_struct *vma) vma->vm_ops = NULL; } +static inline bool vma_is_anonymous(struct vm_area_struct *vma) +{ + return !vma->vm_ops; +} + +#ifdef CONFIG_SHMEM +/* + * The vma_is_shmem is not inline because it is used only by slow + * paths in userfault. + */ +bool vma_is_shmem(struct vm_area_struct *vma); +#else +static inline bool vma_is_shmem(struct vm_area_struct *vma) { return false; } +#endif + +int vma_is_stack_for_current(struct vm_area_struct *vma); + /* flush_tlb_range() takes a vma, not a mm, and can care about flags */ #define TLB_FLUSH_VMA(mm,flags) { .vm_mm = (mm), .vm_flags = (flags) } struct mmu_gather; struct inode; -#if !defined(__HAVE_ARCH_PTE_DEVMAP) || !defined(CONFIG_TRANSPARENT_HUGEPAGE) +#if !defined(CONFIG_ARCH_HAS_PTE_DEVMAP) || !defined(CONFIG_TRANSPARENT_HUGEPAGE) static inline int pmd_devmap(pmd_t pmd) { return 0; @@ -633,6 +650,11 @@ static inline bool is_vmalloc_addr(const void *x) return false; #endif } + +#ifndef is_ioremap_addr +#define is_ioremap_addr(x) is_vmalloc_addr(x) +#endif + #ifdef CONFIG_MMU extern int is_vmalloc_or_module_addr(const void *x); #else @@ -673,11 +695,6 @@ static inline void *kvcalloc(size_t n, size_t size, gfp_t flags) extern void kvfree(const void *addr); -static inline atomic_t *compound_mapcount_ptr(struct page *page) -{ - return &page[1].compound_mapcount; -} - static inline int compound_mapcount(struct page *page) { VM_BUG_ON_PAGE(!PageCompound(page), page); @@ -783,6 +800,24 @@ static inline void set_compound_order(struct page *page, unsigned int order) page[1].compound_order = order; } +/* Returns the number of pages in this potentially compound page. */ +static inline unsigned long compound_nr(struct page *page) +{ + return 1UL << compound_order(page); +} + +/* Returns the number of bytes in this potentially compound page. */ +static inline unsigned long page_size(struct page *page) +{ + return PAGE_SIZE << compound_order(page); +} + +/* Returns the number of bits needed for the number of bytes in a page */ +static inline unsigned int page_shift(struct page *page) +{ + return PAGE_SHIFT + compound_order(page); +} + void free_compound_page(struct page *page); #ifdef CONFIG_MMU @@ -932,8 +967,6 @@ static inline bool is_zone_device_page(const struct page *page) #endif #ifdef CONFIG_DEV_PAGEMAP_OPS -void dev_pagemap_get_ops(void); -void dev_pagemap_put_ops(void); void __put_devmap_managed_page(struct page *page); DECLARE_STATIC_KEY_FALSE(devmap_managed_key); static inline bool put_devmap_managed_page(struct page *page) @@ -944,7 +977,6 @@ static inline bool put_devmap_managed_page(struct page *page) return false; switch (page->pgmap->type) { case MEMORY_DEVICE_PRIVATE: - case MEMORY_DEVICE_PUBLIC: case MEMORY_DEVICE_FS_DAX: __put_devmap_managed_page(page); return true; @@ -954,60 +986,28 @@ static inline bool put_devmap_managed_page(struct page *page) return false; } -static inline bool is_device_private_page(const struct page *page) -{ - return is_zone_device_page(page) && - page->pgmap->type == MEMORY_DEVICE_PRIVATE; -} - -static inline bool is_device_public_page(const struct page *page) -{ - return is_zone_device_page(page) && - page->pgmap->type == MEMORY_DEVICE_PUBLIC; -} - -#ifdef CONFIG_PCI_P2PDMA -static inline bool is_pci_p2pdma_page(const struct page *page) -{ - return is_zone_device_page(page) && - page->pgmap->type == MEMORY_DEVICE_PCI_P2PDMA; -} -#else /* CONFIG_PCI_P2PDMA */ -static inline bool is_pci_p2pdma_page(const struct page *page) -{ - return false; -} -#endif /* CONFIG_PCI_P2PDMA */ - #else /* CONFIG_DEV_PAGEMAP_OPS */ -static inline void dev_pagemap_get_ops(void) -{ -} - -static inline void dev_pagemap_put_ops(void) -{ -} - static inline bool put_devmap_managed_page(struct page *page) { return false; } +#endif /* CONFIG_DEV_PAGEMAP_OPS */ static inline bool is_device_private_page(const struct page *page) { - return false; -} - -static inline bool is_device_public_page(const struct page *page) -{ - return false; + return IS_ENABLED(CONFIG_DEV_PAGEMAP_OPS) && + IS_ENABLED(CONFIG_DEVICE_PRIVATE) && + is_zone_device_page(page) && + page->pgmap->type == MEMORY_DEVICE_PRIVATE; } static inline bool is_pci_p2pdma_page(const struct page *page) { - return false; + return IS_ENABLED(CONFIG_DEV_PAGEMAP_OPS) && + IS_ENABLED(CONFIG_PCI_P2PDMA) && + is_zone_device_page(page) && + page->pgmap->type == MEMORY_DEVICE_PCI_P2PDMA; } -#endif /* CONFIG_DEV_PAGEMAP_OPS */ /* 127: arbitrary random number, small enough to assemble well */ #define page_ref_zero_or_close_to_overflow(page) \ @@ -1070,8 +1070,9 @@ static inline void put_user_page(struct page *page) put_page(page); } -void put_user_pages_dirty(struct page **pages, unsigned long npages); -void put_user_pages_dirty_lock(struct page **pages, unsigned long npages); +void put_user_pages_dirty_lock(struct page **pages, unsigned long npages, + bool make_dirty); + void put_user_pages(struct page **pages, unsigned long npages); #if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP) @@ -1418,7 +1419,11 @@ extern void pagefault_out_of_memory(void); extern void show_free_areas(unsigned int flags, nodemask_t *nodemask); +#ifdef CONFIG_MMU extern bool can_do_mlock(void); +#else +static inline bool can_do_mlock(void) { return false; } +#endif extern int user_shm_lock(size_t, struct user_struct *); extern void user_shm_unlock(size_t, struct user_struct *); @@ -1431,10 +1436,8 @@ struct zap_details { pgoff_t last_index; /* Highest page->index to unmap */ }; -struct page *_vm_normal_page(struct vm_area_struct *vma, unsigned long addr, - pte_t pte, bool with_public_device); -#define vm_normal_page(vma, addr, pte) _vm_normal_page(vma, addr, pte, false) - +struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr, + pte_t pte); struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr, pmd_t pmd); @@ -1445,54 +1448,8 @@ void zap_page_range(struct vm_area_struct *vma, unsigned long address, void unmap_vmas(struct mmu_gather *tlb, struct vm_area_struct *start_vma, unsigned long start, unsigned long end); -/** - * mm_walk - callbacks for walk_page_range - * @pud_entry: if set, called for each non-empty PUD (2nd-level) entry - * this handler should only handle pud_trans_huge() puds. - * the pmd_entry or pte_entry callbacks will be used for - * regular PUDs. - * @pmd_entry: if set, called for each non-empty PMD (3rd-level) entry - * this handler is required to be able to handle - * pmd_trans_huge() pmds. They may simply choose to - * split_huge_page() instead of handling it explicitly. - * @pte_entry: if set, called for each non-empty PTE (4th-level) entry - * @pte_hole: if set, called for each hole at all levels - * @hugetlb_entry: if set, called for each hugetlb entry - * @test_walk: caller specific callback function to determine whether - * we walk over the current vma or not. Returning 0 - * value means "do page table walk over the current vma," - * and a negative one means "abort current page table walk - * right now." 1 means "skip the current vma." - * @mm: mm_struct representing the target process of page table walk - * @vma: vma currently walked (NULL if walking outside vmas) - * @private: private data for callbacks' usage - * - * (see the comment on walk_page_range() for more details) - */ -struct mm_walk { - int (*pud_entry)(pud_t *pud, unsigned long addr, - unsigned long next, struct mm_walk *walk); - int (*pmd_entry)(pmd_t *pmd, unsigned long addr, - unsigned long next, struct mm_walk *walk); - int (*pte_entry)(pte_t *pte, unsigned long addr, - unsigned long next, struct mm_walk *walk); - int (*pte_hole)(unsigned long addr, unsigned long next, - struct mm_walk *walk); - int (*hugetlb_entry)(pte_t *pte, unsigned long hmask, - unsigned long addr, unsigned long next, - struct mm_walk *walk); - int (*test_walk)(unsigned long addr, unsigned long next, - struct mm_walk *walk); - struct mm_struct *mm; - struct vm_area_struct *vma; - void *private; -}; - struct mmu_notifier_range; -int walk_page_range(unsigned long addr, unsigned long end, - struct mm_walk *walk); -int walk_page_vma(struct vm_area_struct *vma, struct mm_walk *walk); void free_pgd_range(struct mmu_gather *tlb, unsigned long addr, unsigned long end, unsigned long floor, unsigned long ceiling); int copy_page_range(struct mm_struct *dst, struct mm_struct *src, @@ -1575,6 +1532,10 @@ long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages, int get_user_pages_fast(unsigned long start, int nr_pages, unsigned int gup_flags, struct page **pages); +int account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc); +int __account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc, + struct task_struct *task, bool bypass_rlim); + /* Container for pinned pfns / pages */ struct frame_vector { unsigned int nr_allocated; /* Number of frames we have space for */ @@ -1648,23 +1609,6 @@ int clear_page_dirty_for_io(struct page *page); int get_cmdline(struct task_struct *task, char *buffer, int buflen); -static inline bool vma_is_anonymous(struct vm_area_struct *vma) -{ - return !vma->vm_ops; -} - -#ifdef CONFIG_SHMEM -/* - * The vma_is_shmem is not inline because it is used only by slow - * paths in userfault. - */ -bool vma_is_shmem(struct vm_area_struct *vma); -#else -static inline bool vma_is_shmem(struct vm_area_struct *vma) { return false; } -#endif - -int vma_is_stack_for_current(struct vm_area_struct *vma); - extern unsigned long move_page_tables(struct vm_area_struct *vma, unsigned long old_addr, struct vm_area_struct *new_vma, unsigned long new_addr, unsigned long len, @@ -1782,7 +1726,7 @@ static inline void sync_mm_rss(struct mm_struct *mm) } #endif -#ifndef __HAVE_ARCH_PTE_DEVMAP +#ifndef CONFIG_ARCH_HAS_PTE_DEVMAP static inline int pte_devmap(pte_t pte) { return 0; @@ -2000,7 +1944,7 @@ static inline void pgtable_init(void) pgtable_cache_init(); } -static inline bool pgtable_page_ctor(struct page *page) +static inline bool pgtable_pte_page_ctor(struct page *page) { if (!ptlock_init(page)) return false; @@ -2009,7 +1953,7 @@ static inline bool pgtable_page_ctor(struct page *page) return true; } -static inline void pgtable_page_dtor(struct page *page) +static inline void pgtable_pte_page_dtor(struct page *page) { ptlock_free(page); __ClearPageTable(page); @@ -2379,6 +2323,8 @@ extern int install_special_mapping(struct mm_struct *mm, unsigned long addr, unsigned long len, unsigned long flags, struct page **pages); +unsigned long randomize_stack_top(unsigned long stack_top); + extern unsigned long get_unmapped_area(struct file *, unsigned long, unsigned long, unsigned long, unsigned long); extern unsigned long mmap_region(struct file *file, unsigned long addr, @@ -2642,6 +2588,7 @@ struct page *follow_page(struct vm_area_struct *vma, unsigned long address, #define FOLL_COW 0x4000 /* internal GUP flag */ #define FOLL_ANON 0x8000 /* don't do file mappings */ #define FOLL_LONGTERM 0x10000 /* mapping lifetime is indefinite: see below */ +#define FOLL_SPLIT_PMD 0x20000 /* split huge pmd before returning */ /* * NOTE on FOLL_LONGTERM: @@ -2681,8 +2628,7 @@ static inline int vm_fault_to_errno(vm_fault_t vm_fault, int foll_flags) return 0; } -typedef int (*pte_fn_t)(pte_t *pte, pgtable_t token, unsigned long addr, - void *data); +typedef int (*pte_fn_t)(pte_t *pte, unsigned long addr, void *data); extern int apply_to_page_range(struct mm_struct *mm, unsigned long address, unsigned long size, pte_fn_t fn, void *data); @@ -2696,11 +2642,42 @@ static inline void kernel_poison_pages(struct page *page, int numpages, int enable) { } #endif -extern bool _debug_pagealloc_enabled; +#ifdef CONFIG_INIT_ON_ALLOC_DEFAULT_ON +DECLARE_STATIC_KEY_TRUE(init_on_alloc); +#else +DECLARE_STATIC_KEY_FALSE(init_on_alloc); +#endif +static inline bool want_init_on_alloc(gfp_t flags) +{ + if (static_branch_unlikely(&init_on_alloc) && + !page_poisoning_enabled()) + return true; + return flags & __GFP_ZERO; +} + +#ifdef CONFIG_INIT_ON_FREE_DEFAULT_ON +DECLARE_STATIC_KEY_TRUE(init_on_free); +#else +DECLARE_STATIC_KEY_FALSE(init_on_free); +#endif +static inline bool want_init_on_free(void) +{ + return static_branch_unlikely(&init_on_free) && + !page_poisoning_enabled(); +} + +#ifdef CONFIG_DEBUG_PAGEALLOC_ENABLE_DEFAULT +DECLARE_STATIC_KEY_TRUE(_debug_pagealloc_enabled); +#else +DECLARE_STATIC_KEY_FALSE(_debug_pagealloc_enabled); +#endif static inline bool debug_pagealloc_enabled(void) { - return IS_ENABLED(CONFIG_DEBUG_PAGEALLOC) && _debug_pagealloc_enabled; + if (!IS_ENABLED(CONFIG_DEBUG_PAGEALLOC)) + return false; + + return static_branch_unlikely(&_debug_pagealloc_enabled); } #if defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_ARCH_HAS_SET_DIRECT_MAP) @@ -2756,11 +2733,17 @@ extern int randomize_va_space; #endif const char * arch_vma_name(struct vm_area_struct *vma); +#ifdef CONFIG_MMU void print_vma_addr(char *prefix, unsigned long rip); +#else +static inline void print_vma_addr(char *prefix, unsigned long rip) +{ +} +#endif void *sparse_buffer_alloc(unsigned long size); -struct page *sparse_mem_map_populate(unsigned long pnum, int nid, - struct vmem_altmap *altmap); +struct page * __populate_section_memmap(unsigned long pfn, + unsigned long nr_pages, int nid, struct vmem_altmap *altmap); pgd_t *vmemmap_pgd_populate(unsigned long addr, int node); p4d_t *vmemmap_p4d_populate(pgd_t *pgd, unsigned long addr, int node); pud_t *vmemmap_pud_populate(p4d_t *p4d, unsigned long addr, int node); @@ -2850,11 +2833,9 @@ extern long copy_huge_page_from_user(struct page *dst_page, bool allow_pagefault); #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */ -extern struct page_ext_operations debug_guardpage_ops; - #ifdef CONFIG_DEBUG_PAGEALLOC extern unsigned int _debug_guardpage_minorder; -extern bool _debug_guardpage_enabled; +DECLARE_STATIC_KEY_FALSE(_debug_guardpage_enabled); static inline unsigned int debug_guardpage_minorder(void) { @@ -2863,21 +2844,15 @@ static inline unsigned int debug_guardpage_minorder(void) static inline bool debug_guardpage_enabled(void) { - return _debug_guardpage_enabled; + return static_branch_unlikely(&_debug_guardpage_enabled); } static inline bool page_is_guard(struct page *page) { - struct page_ext *page_ext; - if (!debug_guardpage_enabled()) return false; - page_ext = lookup_page_ext(page); - if (unlikely(!page_ext)) - return false; - - return test_bit(PAGE_EXT_DEBUG_GUARD, &page_ext->flags); + return PageGuard(page); } #else static inline unsigned int debug_guardpage_minorder(void) { return 0; } @@ -2891,5 +2866,12 @@ void __init setup_nr_node_ids(void); static inline void setup_nr_node_ids(void) {} #endif +extern int memcmp_pages(struct page *page1, struct page *page2); + +static inline int pages_identical(struct page *page1, struct page *page2) +{ + return !memcmp_pages(page1, page2); +} + #endif /* __KERNEL__ */ #endif /* _LINUX_MM_H */ diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index 8ec38b11b361..270aa8fd2800 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -25,7 +25,6 @@ struct address_space; struct mem_cgroup; -struct hmm; /* * Each physical page in the system has a struct page associated with @@ -139,6 +138,7 @@ struct page { struct { /* Second tail page of compound page */ unsigned long _compound_pad_1; /* compound_head */ unsigned long _compound_pad_2; + /* For both global and memcg */ struct list_head deferred_list; }; struct { /* Page table pages */ @@ -158,8 +158,17 @@ struct page { struct { /* ZONE_DEVICE pages */ /** @pgmap: Points to the hosting device page map. */ struct dev_pagemap *pgmap; - unsigned long hmm_data; - unsigned long _zd_pad_1; /* uses mapping */ + void *zone_device_data; + /* + * ZONE_DEVICE private pages are counted as being + * mapped so the next 3 words hold the mapping, index, + * and private fields from the source anonymous or + * page cache page while the page is migrated to device + * private memory. + * ZONE_DEVICE MEMORY_DEVICE_FS_DAX pages also + * use the mapping, index, and private fields when + * pmem backed DAX files are mapped. + */ }; /** @rcu_head: You can use this to free a page by RCU. */ @@ -212,6 +221,11 @@ struct page { #endif } _struct_page_alignment; +static inline atomic_t *compound_mapcount_ptr(struct page *page) +{ + return &page[1].compound_mapcount; +} + /* * Used for sizing the vmemmap region on some architectures */ @@ -329,7 +343,9 @@ struct vm_area_struct { struct file * vm_file; /* File we map to (can be NULL). */ void * vm_private_data; /* was vm_pte (shared mem) */ +#ifdef CONFIG_SWAP atomic_long_t swap_readahead_info; +#endif #ifndef CONFIG_MMU struct vm_region *vm_region; /* NOMMU mapping region */ #endif @@ -372,6 +388,16 @@ struct mm_struct { unsigned long highest_vm_end; /* highest vma end address */ pgd_t * pgd; +#ifdef CONFIG_MEMBARRIER + /** + * @membarrier_state: Flags controlling membarrier behavior. + * + * This field is close to @pgd to hopefully fit in the same + * cache-line, which needs to be touched by switch_mm(). + */ + atomic_t membarrier_state; +#endif + /** * @mm_users: The number of users including userspace. * @@ -441,9 +467,7 @@ struct mm_struct { unsigned long flags; /* Must use atomic bitops to access */ struct core_state *core_state; /* coredumping support */ -#ifdef CONFIG_MEMBARRIER - atomic_t membarrier_state; -#endif + #ifdef CONFIG_AIO spinlock_t ioctx_lock; struct kioctx_table __rcu *ioctx_table; @@ -500,11 +524,6 @@ struct mm_struct { atomic_long_t hugetlb_usage; #endif struct work_struct async_put_work; - -#if IS_ENABLED(CONFIG_HMM) - /* HMM needs to track a few things per mm */ - struct hmm *hmm; -#endif } __randomize_layout; /* diff --git a/include/linux/mm_types_task.h b/include/linux/mm_types_task.h index d7016dcb245e..c1bc6731125c 100644 --- a/include/linux/mm_types_task.h +++ b/include/linux/mm_types_task.h @@ -36,6 +36,10 @@ struct vmacache { struct vm_area_struct *vmas[VMACACHE_SIZE]; }; +/* + * When updating this, please also update struct resident_page_types[] in + * kernel/fork.c + */ enum { MM_FILEPAGES, /* Resident file mapping pages */ MM_ANONPAGES, /* Resident anonymous pages */ diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h index 7ac3755444d3..ba703384bea0 100644 --- a/include/linux/mmc/host.h +++ b/include/linux/mmc/host.h @@ -128,6 +128,7 @@ struct mmc_host_ops { int (*get_cd)(struct mmc_host *host); void (*enable_sdio_irq)(struct mmc_host *host, int enable); + /* Mandatory callback when using MMC_CAP2_SDIO_IRQ_NOTHREAD. */ void (*ack_sdio_irq)(struct mmc_host *host); /* optional callback for HC quirks */ @@ -367,6 +368,7 @@ struct mmc_host { #define MMC_CAP2_CQE (1 << 23) /* Has eMMC command queue engine */ #define MMC_CAP2_CQE_DCMD (1 << 24) /* CQE can issue a direct command */ #define MMC_CAP2_AVOID_3_3V (1 << 25) /* Host must negotiate down from 3.3V */ +#define MMC_CAP2_MERGE_CAPABLE (1 << 26) /* Host can merge a segment over the segment size */ int fixed_drv_type; /* fixed driver type for non-removable media */ @@ -396,6 +398,7 @@ struct mmc_host { unsigned int retune_paused:1; /* re-tuning is temporarily disabled */ unsigned int use_blk_mq:1; /* use blk-mq */ unsigned int retune_crc_disable:1; /* don't trigger retune upon crc */ + unsigned int can_dma_map_merge:1; /* merging can be used */ int rescan_disable; /* disable card detection */ int rescan_entered; /* used with nonremovable devices */ @@ -493,6 +496,15 @@ void mmc_command_done(struct mmc_host *host, struct mmc_request *mrq); void mmc_cqe_request_done(struct mmc_host *host, struct mmc_request *mrq); +/* + * May be called from host driver's system/runtime suspend/resume callbacks, + * to know if SDIO IRQs has been claimed. + */ +static inline bool sdio_irq_claimed(struct mmc_host *host) +{ + return host->sdio_irqs > 0; +} + static inline void mmc_signal_sdio_irq(struct mmc_host *host) { host->ops->enable_sdio_irq(host, 0); @@ -501,7 +513,6 @@ static inline void mmc_signal_sdio_irq(struct mmc_host *host) wake_up_process(host->sdio_irq_thread); } -void sdio_run_irqs(struct mmc_host *host); void sdio_signal_irq(struct mmc_host *host); #ifdef CONFIG_REGULATOR diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h index b6c004bd9f6a..1bd8e6a09a3c 100644 --- a/include/linux/mmu_notifier.h +++ b/include/linux/mmu_notifier.h @@ -42,6 +42,10 @@ enum mmu_notifier_event { #ifdef CONFIG_MMU_NOTIFIER +#ifdef CONFIG_LOCKDEP +extern struct lockdep_map __mmu_notifier_invalidate_range_start_map; +#endif + /* * The mmu notifier_mm structure is allocated and installed in * mm->mmu_notifier_mm inside the mm_take_all_locks() protected @@ -211,6 +215,19 @@ struct mmu_notifier_ops { */ void (*invalidate_range)(struct mmu_notifier *mn, struct mm_struct *mm, unsigned long start, unsigned long end); + + /* + * These callbacks are used with the get/put interface to manage the + * lifetime of the mmu_notifier memory. alloc_notifier() returns a new + * notifier for use with the mm. + * + * free_notifier() is only called after the mmu_notifier has been + * fully put, calls to any ops callback are prevented and no ops + * callbacks are currently running. It is called from a SRCU callback + * and cannot sleep. + */ + struct mmu_notifier *(*alloc_notifier)(struct mm_struct *mm); + void (*free_notifier)(struct mmu_notifier *mn); }; /* @@ -227,6 +244,9 @@ struct mmu_notifier_ops { struct mmu_notifier { struct hlist_node hlist; const struct mmu_notifier_ops *ops; + struct mm_struct *mm; + struct rcu_head rcu; + unsigned int users; }; static inline int mm_has_notifiers(struct mm_struct *mm) @@ -234,14 +254,27 @@ static inline int mm_has_notifiers(struct mm_struct *mm) return unlikely(mm->mmu_notifier_mm); } +struct mmu_notifier *mmu_notifier_get_locked(const struct mmu_notifier_ops *ops, + struct mm_struct *mm); +static inline struct mmu_notifier * +mmu_notifier_get(const struct mmu_notifier_ops *ops, struct mm_struct *mm) +{ + struct mmu_notifier *ret; + + down_write(&mm->mmap_sem); + ret = mmu_notifier_get_locked(ops, mm); + up_write(&mm->mmap_sem); + return ret; +} +void mmu_notifier_put(struct mmu_notifier *mn); +void mmu_notifier_synchronize(void); + extern int mmu_notifier_register(struct mmu_notifier *mn, struct mm_struct *mm); extern int __mmu_notifier_register(struct mmu_notifier *mn, struct mm_struct *mm); extern void mmu_notifier_unregister(struct mmu_notifier *mn, struct mm_struct *mm); -extern void mmu_notifier_unregister_no_release(struct mmu_notifier *mn, - struct mm_struct *mm); extern void __mmu_notifier_mm_destroy(struct mm_struct *mm); extern void __mmu_notifier_release(struct mm_struct *mm); extern int __mmu_notifier_clear_flush_young(struct mm_struct *mm, @@ -310,25 +343,36 @@ static inline void mmu_notifier_change_pte(struct mm_struct *mm, static inline void mmu_notifier_invalidate_range_start(struct mmu_notifier_range *range) { + might_sleep(); + + lock_map_acquire(&__mmu_notifier_invalidate_range_start_map); if (mm_has_notifiers(range->mm)) { range->flags |= MMU_NOTIFIER_RANGE_BLOCKABLE; __mmu_notifier_invalidate_range_start(range); } + lock_map_release(&__mmu_notifier_invalidate_range_start_map); } static inline int mmu_notifier_invalidate_range_start_nonblock(struct mmu_notifier_range *range) { + int ret = 0; + + lock_map_acquire(&__mmu_notifier_invalidate_range_start_map); if (mm_has_notifiers(range->mm)) { range->flags &= ~MMU_NOTIFIER_RANGE_BLOCKABLE; - return __mmu_notifier_invalidate_range_start(range); + ret = __mmu_notifier_invalidate_range_start(range); } - return 0; + lock_map_release(&__mmu_notifier_invalidate_range_start_map); + return ret; } static inline void mmu_notifier_invalidate_range_end(struct mmu_notifier_range *range) { + if (mmu_notifier_range_blockable(range)) + might_sleep(); + if (mm_has_notifiers(range->mm)) __mmu_notifier_invalidate_range_end(range, false); } @@ -482,9 +526,6 @@ static inline void mmu_notifier_range_init(struct mmu_notifier_range *range, set_pte_at(___mm, ___address, __ptep, ___pte); \ }) -extern void mmu_notifier_call_srcu(struct rcu_head *rcu, - void (*func)(struct rcu_head *rcu)); - #else /* CONFIG_MMU_NOTIFIER */ struct mmu_notifier_range { @@ -581,6 +622,10 @@ static inline void mmu_notifier_mm_destroy(struct mm_struct *mm) #define pudp_huge_clear_flush_notify pudp_huge_clear_flush #define set_pte_at_notify set_pte_at +static inline void mmu_notifier_synchronize(void) +{ +} + #endif /* CONFIG_MMU_NOTIFIER */ #endif /* _LINUX_MMU_NOTIFIER_H */ diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 70394cabaf4e..bda20282746b 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -215,8 +215,9 @@ enum node_stat_item { NR_INACTIVE_FILE, /* " " " " " */ NR_ACTIVE_FILE, /* " " " " " */ NR_UNEVICTABLE, /* " " " " " */ - NR_SLAB_RECLAIMABLE, - NR_SLAB_UNRECLAIMABLE, + NR_SLAB_RECLAIMABLE, /* Please do not reorder this item */ + NR_SLAB_UNRECLAIMABLE, /* and this one without looking at + * memcg_flush_percpu_vmstats() first. */ NR_ISOLATED_ANON, /* Temporary isolated pages from anon lru */ NR_ISOLATED_FILE, /* Temporary isolated pages from file lru */ WORKINGSET_NODES, @@ -234,6 +235,8 @@ enum node_stat_item { NR_SHMEM, /* shmem pages (included tmpfs/GEM pages) */ NR_SHMEM_THPS, NR_SHMEM_PMDMAPPED, + NR_FILE_THPS, + NR_FILE_PMDMAPPED, NR_ANON_THPS, NR_UNSTABLE_NFS, /* NFS unstable pages */ NR_VMSCAN_WRITE, @@ -676,6 +679,14 @@ struct zonelist { extern struct page *mem_map; #endif +#ifdef CONFIG_TRANSPARENT_HUGEPAGE +struct deferred_split { + spinlock_t split_queue_lock; + struct list_head split_queue; + unsigned long split_queue_len; +}; +#endif + /* * On NUMA machines, each NUMA node would have a pg_data_t to describe * it's memory layout. On UMA machines there is a single pglist_data which @@ -755,9 +766,7 @@ typedef struct pglist_data { #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */ #ifdef CONFIG_TRANSPARENT_HUGEPAGE - spinlock_t split_queue_lock; - struct list_head split_queue; - unsigned long split_queue_len; + struct deferred_split deferred_split_queue; #endif /* Fields commonly accessed by the page reclaim scanner */ @@ -855,18 +864,6 @@ static inline int local_memory_node(int node_id) { return node_id; }; */ #define zone_idx(zone) ((zone) - (zone)->zone_pgdat->node_zones) -#ifdef CONFIG_ZONE_DEVICE -static inline bool is_dev_zone(const struct zone *zone) -{ - return zone_idx(zone) == ZONE_DEVICE; -} -#else -static inline bool is_dev_zone(const struct zone *zone) -{ - return false; -} -#endif - /* * Returns true if a zone has pages managed by the buddy allocator. * All the reclaim decisions have to use this function rather than @@ -1160,6 +1157,29 @@ static inline unsigned long section_nr_to_pfn(unsigned long sec) #define SECTION_ALIGN_UP(pfn) (((pfn) + PAGES_PER_SECTION - 1) & PAGE_SECTION_MASK) #define SECTION_ALIGN_DOWN(pfn) ((pfn) & PAGE_SECTION_MASK) +#define SUBSECTION_SHIFT 21 + +#define PFN_SUBSECTION_SHIFT (SUBSECTION_SHIFT - PAGE_SHIFT) +#define PAGES_PER_SUBSECTION (1UL << PFN_SUBSECTION_SHIFT) +#define PAGE_SUBSECTION_MASK (~(PAGES_PER_SUBSECTION-1)) + +#if SUBSECTION_SHIFT > SECTION_SIZE_BITS +#error Subsection size exceeds section size +#else +#define SUBSECTIONS_PER_SECTION (1UL << (SECTION_SIZE_BITS - SUBSECTION_SHIFT)) +#endif + +#define SUBSECTION_ALIGN_UP(pfn) ALIGN((pfn), PAGES_PER_SUBSECTION) +#define SUBSECTION_ALIGN_DOWN(pfn) ((pfn) & PAGE_SUBSECTION_MASK) + +struct mem_section_usage { + DECLARE_BITMAP(subsection_map, SUBSECTIONS_PER_SECTION); + /* See declaration of similar field in struct zone */ + unsigned long pageblock_flags[0]; +}; + +void subsection_map_init(unsigned long pfn, unsigned long nr_pages); + struct page; struct page_ext; struct mem_section { @@ -1177,8 +1197,7 @@ struct mem_section { */ unsigned long section_mem_map; - /* See declaration of similar field in struct zone */ - unsigned long *pageblock_flags; + struct mem_section_usage *usage; #ifdef CONFIG_PAGE_EXTENSION /* * If SPARSEMEM, pgdat doesn't have page_ext pointer. We use @@ -1209,6 +1228,11 @@ extern struct mem_section **mem_section; extern struct mem_section mem_section[NR_SECTION_ROOTS][SECTIONS_PER_ROOT]; #endif +static inline unsigned long *section_to_usemap(struct mem_section *ms) +{ + return ms->usage->pageblock_flags; +} + static inline struct mem_section *__nr_to_section(unsigned long nr) { #ifdef CONFIG_SPARSEMEM_EXTREME @@ -1219,8 +1243,8 @@ static inline struct mem_section *__nr_to_section(unsigned long nr) return NULL; return &mem_section[SECTION_NR_TO_ROOT(nr)][nr & SECTION_ROOT_MASK]; } -extern int __section_nr(struct mem_section* ms); -extern unsigned long usemap_size(void); +extern unsigned long __section_nr(struct mem_section *ms); +extern size_t mem_section_usage_size(void); /* * We use the lower bits of the mem_map pointer to store @@ -1238,7 +1262,8 @@ extern unsigned long usemap_size(void); #define SECTION_MARKED_PRESENT (1UL<<0) #define SECTION_HAS_MEM_MAP (1UL<<1) #define SECTION_IS_ONLINE (1UL<<2) -#define SECTION_MAP_LAST_BIT (1UL<<3) +#define SECTION_IS_EARLY (1UL<<3) +#define SECTION_MAP_LAST_BIT (1UL<<4) #define SECTION_MAP_MASK (~(SECTION_MAP_LAST_BIT-1)) #define SECTION_NID_SHIFT 3 @@ -1264,6 +1289,11 @@ static inline int valid_section(struct mem_section *section) return (section && (section->section_mem_map & SECTION_HAS_MEM_MAP)); } +static inline int early_section(struct mem_section *section) +{ + return (section && (section->section_mem_map & SECTION_IS_EARLY)); +} + static inline int valid_section_nr(unsigned long nr) { return valid_section(__nr_to_section(nr)); @@ -1291,14 +1321,42 @@ static inline struct mem_section *__pfn_to_section(unsigned long pfn) return __nr_to_section(pfn_to_section_nr(pfn)); } -extern int __highest_present_section_nr; +extern unsigned long __highest_present_section_nr; + +static inline int subsection_map_index(unsigned long pfn) +{ + return (pfn & ~(PAGE_SECTION_MASK)) / PAGES_PER_SUBSECTION; +} + +#ifdef CONFIG_SPARSEMEM_VMEMMAP +static inline int pfn_section_valid(struct mem_section *ms, unsigned long pfn) +{ + int idx = subsection_map_index(pfn); + + return test_bit(idx, ms->usage->subsection_map); +} +#else +static inline int pfn_section_valid(struct mem_section *ms, unsigned long pfn) +{ + return 1; +} +#endif #ifndef CONFIG_HAVE_ARCH_PFN_VALID static inline int pfn_valid(unsigned long pfn) { + struct mem_section *ms; + if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS) return 0; - return valid_section(__nr_to_section(pfn_to_section_nr(pfn))); + ms = __nr_to_section(pfn_to_section_nr(pfn)); + if (!valid_section(ms)) + return 0; + /* + * Traditionally early sections always returned pfn_valid() for + * the entire section-sized span. + */ + return early_section(ms) || pfn_section_valid(ms, pfn); } #endif @@ -1330,6 +1388,7 @@ void sparse_init(void); #define sparse_init() do {} while (0) #define sparse_index_init(_sec, _nid) do {} while (0) #define pfn_present pfn_valid +#define subsection_map_init(_pfn, _nr_pages) do {} while (0) #endif /* CONFIG_SPARSEMEM */ /* diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h index 448621c32e4d..5714fd35a83c 100644 --- a/include/linux/mod_devicetable.h +++ b/include/linux/mod_devicetable.h @@ -16,6 +16,25 @@ typedef unsigned long kernel_ulong_t; #define PCI_ANY_ID (~0) +/** + * struct pci_device_id - PCI device ID structure + * @vendor: Vendor ID to match (or PCI_ANY_ID) + * @device: Device ID to match (or PCI_ANY_ID) + * @subvendor: Subsystem vendor ID to match (or PCI_ANY_ID) + * @subdevice: Subsystem device ID to match (or PCI_ANY_ID) + * @class: Device class, subclass, and "interface" to match. + * See Appendix D of the PCI Local Bus Spec or + * include/linux/pci_ids.h for a full list of classes. + * Most drivers do not need to specify class/class_mask + * as vendor/device is normally sufficient. + * @class_mask: Limit which sub-fields of the class field are compared. + * See drivers/scsi/sym53c8xx_2/ for example of usage. + * @driver_data: Data private to the driver. + * Most drivers don't need to use driver_data field. + * Best practice is to use driver_data as an index + * into a static list of equivalent device types, + * instead of using it as a pointer. + */ struct pci_device_id { __u32 vendor, device; /* Vendor and device ID or PCI_ANY_ID*/ __u32 subvendor, subdevice; /* Subsystem ID's or PCI_ANY_ID */ @@ -257,17 +276,17 @@ struct pcmcia_device_id { __u16 match_flags; __u16 manf_id; - __u16 card_id; + __u16 card_id; - __u8 func_id; + __u8 func_id; /* for real multi-function devices */ - __u8 function; + __u8 function; /* for pseudo multi-function devices */ - __u8 device_no; + __u8 device_no; - __u32 prod_id_hash[4]; + __u32 prod_id_hash[4]; /* not matched against in kernelspace */ const char * prod_id[4]; @@ -795,9 +814,11 @@ struct tee_client_device_id { /** * struct wmi_device_id - WMI device identifier * @guid_string: 36 char string of the form fa50ff2b-f2e8-45de-83fa-65417f2f49ba + * @context: pointer to driver specific data */ struct wmi_device_id { const char guid_string[UUID_STRING_LEN+1]; + const void *context; }; #endif /* LINUX_MOD_DEVICETABLE_H */ diff --git a/include/linux/module.h b/include/linux/module.h index 188998d3dca9..6d20895e7739 100644 --- a/include/linux/module.h +++ b/include/linux/module.h @@ -21,13 +21,11 @@ #include <linux/rbtree_latch.h> #include <linux/error-injection.h> #include <linux/tracepoint-defs.h> +#include <linux/srcu.h> #include <linux/percpu.h> #include <asm/module.h> -/* In stripped ARM and x86-64 modules, ~ is surprisingly rare. */ -#define MODULE_SIG_STRING "~Module signature appended~\n" - /* Not Yet Implemented */ #define MODULE_SUPPORTED_DEVICE(name) @@ -275,6 +273,8 @@ extern typeof(name) __mod_##type##__##name##_device_table \ * files require multiple MODULE_FIRMWARE() specifiers */ #define MODULE_FIRMWARE(_firmware) MODULE_INFO(firmware, _firmware) +#define MODULE_IMPORT_NS(ns) MODULE_INFO(import_ns, #ns) + struct notifier_block; #ifdef CONFIG_MODULES @@ -450,6 +450,10 @@ struct module { unsigned int num_tracepoints; tracepoint_ptr_t *tracepoints_ptrs; #endif +#ifdef CONFIG_TREE_SRCU + unsigned int num_srcu_structs; + struct srcu_struct **srcu_struct_ptrs; +#endif #ifdef CONFIG_BPF_EVENTS unsigned int num_bpf_raw_events; struct bpf_raw_event_map *bpf_raw_events; diff --git a/include/linux/module_signature.h b/include/linux/module_signature.h new file mode 100644 index 000000000000..7eb4b00381ac --- /dev/null +++ b/include/linux/module_signature.h @@ -0,0 +1,46 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * Module signature handling. + * + * Copyright (C) 2012 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + */ + +#ifndef _LINUX_MODULE_SIGNATURE_H +#define _LINUX_MODULE_SIGNATURE_H + +#include <linux/types.h> + +/* In stripped ARM and x86-64 modules, ~ is surprisingly rare. */ +#define MODULE_SIG_STRING "~Module signature appended~\n" + +enum pkey_id_type { + PKEY_ID_PGP, /* OpenPGP generated key ID */ + PKEY_ID_X509, /* X.509 arbitrary subjectKeyIdentifier */ + PKEY_ID_PKCS7, /* Signature in PKCS#7 message */ +}; + +/* + * Module signature information block. + * + * The constituents of the signature section are, in order: + * + * - Signer's name + * - Key identifier + * - Signature data + * - Information block + */ +struct module_signature { + u8 algo; /* Public-key crypto algorithm [0] */ + u8 hash; /* Digest algorithm [0] */ + u8 id_type; /* Key identifier type [PKEY_ID_PKCS7] */ + u8 signer_len; /* Length of signer's name [0] */ + u8 key_id_len; /* Length of key identifier [0] */ + u8 __pad[3]; + __be32 sig_len; /* Length of signature data */ +}; + +int mod_check_sig(const struct module_signature *ms, size_t file_len, + const char *name); + +#endif /* _LINUX_MODULE_SIGNATURE_H */ diff --git a/include/linux/moduleloader.h b/include/linux/moduleloader.h index 31013c2effd3..5229c18025e9 100644 --- a/include/linux/moduleloader.h +++ b/include/linux/moduleloader.h @@ -29,6 +29,11 @@ void *module_alloc(unsigned long size); /* Free memory returned from module_alloc. */ void module_memfree(void *module_region); +/* Determines if the section name is an exit section (that is only used during + * module unloading) + */ +bool module_exit_section(const char *name); + /* * Apply the given relocation to the (simplified) ELF. Return -error * or 0. diff --git a/include/linux/moxtet.h b/include/linux/moxtet.h new file mode 100644 index 000000000000..490db6886dcc --- /dev/null +++ b/include/linux/moxtet.h @@ -0,0 +1,109 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Turris Mox module configuration bus driver + * + * Copyright (C) 2019 Marek Behun <marek.behun@nic.cz> + */ + +#ifndef __LINUX_MOXTET_H +#define __LINUX_MOXTET_H + +#include <linux/device.h> +#include <linux/irq.h> +#include <linux/irqdomain.h> +#include <linux/mutex.h> + +#define TURRIS_MOX_MAX_MODULES 10 + +enum turris_mox_cpu_module_id { + TURRIS_MOX_CPU_ID_EMMC = 0x00, + TURRIS_MOX_CPU_ID_SD = 0x10, +}; + +enum turris_mox_module_id { + TURRIS_MOX_MODULE_FIRST = 0x01, + + TURRIS_MOX_MODULE_SFP = 0x01, + TURRIS_MOX_MODULE_PCI = 0x02, + TURRIS_MOX_MODULE_TOPAZ = 0x03, + TURRIS_MOX_MODULE_PERIDOT = 0x04, + TURRIS_MOX_MODULE_USB3 = 0x05, + TURRIS_MOX_MODULE_PCI_BRIDGE = 0x06, + + TURRIS_MOX_MODULE_LAST = 0x06, +}; + +#define MOXTET_NIRQS 16 + +extern struct bus_type moxtet_type; + +struct moxtet { + struct device *dev; + struct mutex lock; + u8 modules[TURRIS_MOX_MAX_MODULES]; + int count; + u8 tx[TURRIS_MOX_MAX_MODULES]; + int dev_irq; + struct { + struct irq_domain *domain; + struct irq_chip chip; + unsigned long masked, exists; + struct moxtet_irqpos { + u8 idx; + u8 bit; + } position[MOXTET_NIRQS]; + } irq; +#ifdef CONFIG_DEBUG_FS + struct dentry *debugfs_root; +#endif +}; + +struct moxtet_driver { + const enum turris_mox_module_id *id_table; + struct device_driver driver; +}; + +static inline struct moxtet_driver * +to_moxtet_driver(struct device_driver *drv) +{ + if (!drv) + return NULL; + return container_of(drv, struct moxtet_driver, driver); +} + +extern int __moxtet_register_driver(struct module *owner, + struct moxtet_driver *mdrv); + +static inline void moxtet_unregister_driver(struct moxtet_driver *mdrv) +{ + if (mdrv) + driver_unregister(&mdrv->driver); +} + +#define moxtet_register_driver(driver) \ + __moxtet_register_driver(THIS_MODULE, driver) + +#define module_moxtet_driver(__moxtet_driver) \ + module_driver(__moxtet_driver, moxtet_register_driver, \ + moxtet_unregister_driver) + +struct moxtet_device { + struct device dev; + struct moxtet *moxtet; + enum turris_mox_module_id id; + unsigned int idx; +}; + +extern int moxtet_device_read(struct device *dev); +extern int moxtet_device_write(struct device *dev, u8 val); +extern int moxtet_device_written(struct device *dev); + +static inline struct moxtet_device * +to_moxtet_device(struct device *dev) +{ + if (!dev) + return NULL; + return container_of(dev, struct moxtet_device, dev); +} + +#endif /* __LINUX_MOXTET_H */ diff --git a/include/linux/msi.h b/include/linux/msi.h index d48e919d55ae..8ad679e9d9c0 100644 --- a/include/linux/msi.h +++ b/include/linux/msi.h @@ -64,6 +64,10 @@ struct ti_sci_inta_msi_desc { * @msg: The last set MSI message cached for reuse * @affinity: Optional pointer to a cpu affinity mask for this descriptor * + * @write_msi_msg: Callback that may be called when the MSI message + * address or data changes + * @write_msi_msg_data: Data parameter for the callback. + * * @masked: [PCI MSI/X] Mask bits * @is_msix: [PCI MSI/X] True if MSI-X * @multiple: [PCI MSI/X] log2 num of messages allocated @@ -90,6 +94,9 @@ struct msi_desc { const void *iommu_cookie; #endif + void (*write_msi_msg)(struct msi_desc *entry, void *data); + void *write_msi_msg_data; + union { /* PCI MSI/X specific data */ struct { @@ -100,6 +107,7 @@ struct msi_desc { u8 multi_cap : 3; u8 maskbit : 1; u8 is_64 : 1; + u8 is_virtual : 1; u16 entry_nr; unsigned default_irq; } msi_attrib; diff --git a/include/linux/mtd/cfi.h b/include/linux/mtd/cfi.h index 208c87cf2e3e..c98a21108688 100644 --- a/include/linux/mtd/cfi.h +++ b/include/linux/mtd/cfi.h @@ -219,6 +219,13 @@ struct cfi_pri_amdstd { uint8_t VppMin; uint8_t VppMax; uint8_t TopBottom; + /* Below field are added from version 1.5 */ + uint8_t ProgramSuspend; + uint8_t UnlockBypass; + uint8_t SecureSiliconSector; + uint8_t SoftwareFeatures; +#define CFI_POLL_STATUS_REG BIT(0) +#define CFI_POLL_DQ BIT(1) } __packed; /* Vendor-Specific PRI for Atmel chips (command set 0x0002) */ diff --git a/include/linux/mtd/hyperbus.h b/include/linux/mtd/hyperbus.h new file mode 100644 index 000000000000..2dfe65964f6e --- /dev/null +++ b/include/linux/mtd/hyperbus.h @@ -0,0 +1,84 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com/ + */ + +#ifndef __LINUX_MTD_HYPERBUS_H__ +#define __LINUX_MTD_HYPERBUS_H__ + +#include <linux/mtd/map.h> + +enum hyperbus_memtype { + HYPERFLASH, + HYPERRAM, +}; + +/** + * struct hyperbus_device - struct representing HyperBus slave device + * @map: map_info struct for accessing MMIO HyperBus flash memory + * @np: pointer to HyperBus slave device node + * @mtd: pointer to MTD struct + * @ctlr: pointer to HyperBus controller struct + * @memtype: type of memory device: HyperFlash or HyperRAM + */ + +struct hyperbus_device { + struct map_info map; + struct device_node *np; + struct mtd_info *mtd; + struct hyperbus_ctlr *ctlr; + enum hyperbus_memtype memtype; +}; + +/** + * struct hyperbus_ops - struct representing custom HyperBus operations + * @read16: read 16 bit of data from flash in a single burst. Used to read + * from non default address space, such as ID/CFI space + * @write16: write 16 bit of data to flash in a single burst. Used to + * send cmd to flash or write single 16 bit word at a time. + * @copy_from: copy data from flash memory + * @copy_to: copy data to flash memory + * @calibrate: calibrate HyperBus controller + */ + +struct hyperbus_ops { + u16 (*read16)(struct hyperbus_device *hbdev, unsigned long addr); + void (*write16)(struct hyperbus_device *hbdev, + unsigned long addr, u16 val); + void (*copy_from)(struct hyperbus_device *hbdev, void *to, + unsigned long from, ssize_t len); + void (*copy_to)(struct hyperbus_device *dev, unsigned long to, + const void *from, ssize_t len); + int (*calibrate)(struct hyperbus_device *dev); +}; + +/** + * struct hyperbus_ctlr - struct representing HyperBus controller + * @dev: pointer to HyperBus controller device + * @calibrated: flag to indicate ctlr calibration sequence is complete + * @ops: HyperBus controller ops + */ +struct hyperbus_ctlr { + struct device *dev; + bool calibrated; + + const struct hyperbus_ops *ops; +}; + +/** + * hyperbus_register_device - probe and register a HyperBus slave memory device + * @hbdev: hyperbus_device struct with dev, np and ctlr field populated + * + * Return: 0 for success, others for failure. + */ +int hyperbus_register_device(struct hyperbus_device *hbdev); + +/** + * hyperbus_unregister_device - deregister HyperBus slave memory device + * @hbdev: hyperbus_device to be unregistered + * + * Return: 0 for success, others for failure. + */ +int hyperbus_unregister_device(struct hyperbus_device *hbdev); + +#endif /* __LINUX_MTD_HYPERBUS_H__ */ diff --git a/include/linux/mtd/mtd.h b/include/linux/mtd/mtd.h index 936a3fdb48b5..249e8d9bfbcd 100644 --- a/include/linux/mtd/mtd.h +++ b/include/linux/mtd/mtd.h @@ -189,6 +189,9 @@ struct module; /* only needed for owner field in mtd_info */ */ struct mtd_debug_info { struct dentry *dfs_dir; + + const char *partname; + const char *partid; }; struct mtd_info { @@ -316,6 +319,12 @@ struct mtd_info { int (*_get_device) (struct mtd_info *mtd); void (*_put_device) (struct mtd_info *mtd); + /* + * flag indicates a panic write, low level drivers can take appropriate + * action if required to ensure writes go through + */ + bool oops_panic_write; + struct notifier_block reboot_notifier; /* default mode before reboot */ /* ECC status information */ diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h index cebc38b6d6f5..0c7483843a32 100644 --- a/include/linux/mtd/nand.h +++ b/include/linux/mtd/nand.h @@ -346,7 +346,7 @@ static inline unsigned int nanddev_ntargets(const struct nand_device *nand) } /** - * nanddev_neraseblocks() - Get the total number of erasablocks + * nanddev_neraseblocks() - Get the total number of eraseblocks * @nand: NAND device * * Return: the total number of eraseblocks exposed by @nand. diff --git a/include/linux/mtd/onenand_regs.h b/include/linux/mtd/onenand_regs.h index 2d12a1b18742..5f728407a579 100644 --- a/include/linux/mtd/onenand_regs.h +++ b/include/linux/mtd/onenand_regs.h @@ -77,6 +77,7 @@ #define ONENAND_DEVICE_DENSITY_1Gb (0x003) #define ONENAND_DEVICE_DENSITY_2Gb (0x004) #define ONENAND_DEVICE_DENSITY_4Gb (0x005) +#define ONENAND_DEVICE_DENSITY_8Gb (0x006) /* * Version ID Register F002h (R) diff --git a/include/linux/mtd/rawnand.h b/include/linux/mtd/rawnand.h index ac3884a28dea..4ab9bccfcde0 100644 --- a/include/linux/mtd/rawnand.h +++ b/include/linux/mtd/rawnand.h @@ -874,6 +874,42 @@ int nand_op_parser_exec_op(struct nand_chip *chip, const struct nand_op_parser *parser, const struct nand_operation *op, bool check_only); +static inline void nand_op_trace(const char *prefix, + const struct nand_op_instr *instr) +{ +#if IS_ENABLED(CONFIG_DYNAMIC_DEBUG) || defined(DEBUG) + switch (instr->type) { + case NAND_OP_CMD_INSTR: + pr_debug("%sCMD [0x%02x]\n", prefix, + instr->ctx.cmd.opcode); + break; + case NAND_OP_ADDR_INSTR: + pr_debug("%sADDR [%d cyc: %*ph]\n", prefix, + instr->ctx.addr.naddrs, + instr->ctx.addr.naddrs < 64 ? + instr->ctx.addr.naddrs : 64, + instr->ctx.addr.addrs); + break; + case NAND_OP_DATA_IN_INSTR: + pr_debug("%sDATA_IN [%d B%s]\n", prefix, + instr->ctx.data.len, + instr->ctx.data.force_8bit ? + ", force 8-bit" : ""); + break; + case NAND_OP_DATA_OUT_INSTR: + pr_debug("%sDATA_OUT [%d B%s]\n", prefix, + instr->ctx.data.len, + instr->ctx.data.force_8bit ? + ", force 8-bit" : ""); + break; + case NAND_OP_WAITRDY_INSTR: + pr_debug("%sWAITRDY [max %d ms]\n", prefix, + instr->ctx.waitrdy.timeout_ms); + break; + } +#endif +} + /** * struct nand_controller_ops - Controller operations * diff --git a/include/linux/mtd/sharpsl.h b/include/linux/mtd/sharpsl.h index 01306ebe266d..d2c3cf29e0d1 100644 --- a/include/linux/mtd/sharpsl.h +++ b/include/linux/mtd/sharpsl.h @@ -5,6 +5,9 @@ * Copyright (C) 2008 Dmitry Baryshkov */ +#ifndef _MTD_SHARPSL_H +#define _MTD_SHARPSL_H + #include <linux/mtd/rawnand.h> #include <linux/mtd/nand_ecc.h> #include <linux/mtd/partitions.h> @@ -16,3 +19,5 @@ struct sharpsl_nand_platform_data { unsigned int nr_partitions; const char *const *part_parsers; }; + +#endif /* _MTD_SHARPSL_H */ diff --git a/include/linux/mtd/spi-nor.h b/include/linux/mtd/spi-nor.h index 9f57cdfcc93d..fc0b4b19c900 100644 --- a/include/linux/mtd/spi-nor.h +++ b/include/linux/mtd/spi-nor.h @@ -9,6 +9,7 @@ #include <linux/bitops.h> #include <linux/mtd/cfi.h> #include <linux/mtd/mtd.h> +#include <linux/spi/spi-mem.h> /* * Manufacturer IDs @@ -224,7 +225,6 @@ static inline u8 spi_nor_get_protocol_width(enum spi_nor_protocol proto) return spi_nor_get_protocol_data_nbits(proto); } -#define SPI_NOR_MAX_CMD_SIZE 8 enum spi_nor_ops { SPI_NOR_OPS_READ = 0, SPI_NOR_OPS_WRITE, @@ -237,12 +237,12 @@ enum spi_nor_option_flags { SNOR_F_USE_FSR = BIT(0), SNOR_F_HAS_SR_TB = BIT(1), SNOR_F_NO_OP_CHIP_ERASE = BIT(2), - SNOR_F_S3AN_ADDR_DEFAULT = BIT(3), - SNOR_F_READY_XSR_RDY = BIT(4), - SNOR_F_USE_CLSR = BIT(5), - SNOR_F_BROKEN_RESET = BIT(6), - SNOR_F_4B_OPCODES = BIT(7), - SNOR_F_HAS_4BAIT = BIT(8), + SNOR_F_READY_XSR_RDY = BIT(3), + SNOR_F_USE_CLSR = BIT(4), + SNOR_F_BROKEN_RESET = BIT(5), + SNOR_F_4B_OPCODES = BIT(6), + SNOR_F_HAS_4BAIT = BIT(7), + SNOR_F_HAS_LOCK = BIT(8), }; /** @@ -334,6 +334,195 @@ struct spi_nor_erase_map { }; /** + * struct spi_nor_hwcaps - Structure for describing the hardware capabilies + * supported by the SPI controller (bus master). + * @mask: the bitmask listing all the supported hw capabilies + */ +struct spi_nor_hwcaps { + u32 mask; +}; + +/* + *(Fast) Read capabilities. + * MUST be ordered by priority: the higher bit position, the higher priority. + * As a matter of performances, it is relevant to use Octal SPI protocols first, + * then Quad SPI protocols before Dual SPI protocols, Fast Read and lastly + * (Slow) Read. + */ +#define SNOR_HWCAPS_READ_MASK GENMASK(14, 0) +#define SNOR_HWCAPS_READ BIT(0) +#define SNOR_HWCAPS_READ_FAST BIT(1) +#define SNOR_HWCAPS_READ_1_1_1_DTR BIT(2) + +#define SNOR_HWCAPS_READ_DUAL GENMASK(6, 3) +#define SNOR_HWCAPS_READ_1_1_2 BIT(3) +#define SNOR_HWCAPS_READ_1_2_2 BIT(4) +#define SNOR_HWCAPS_READ_2_2_2 BIT(5) +#define SNOR_HWCAPS_READ_1_2_2_DTR BIT(6) + +#define SNOR_HWCAPS_READ_QUAD GENMASK(10, 7) +#define SNOR_HWCAPS_READ_1_1_4 BIT(7) +#define SNOR_HWCAPS_READ_1_4_4 BIT(8) +#define SNOR_HWCAPS_READ_4_4_4 BIT(9) +#define SNOR_HWCAPS_READ_1_4_4_DTR BIT(10) + +#define SNOR_HWCAPS_READ_OCTAL GENMASK(14, 11) +#define SNOR_HWCAPS_READ_1_1_8 BIT(11) +#define SNOR_HWCAPS_READ_1_8_8 BIT(12) +#define SNOR_HWCAPS_READ_8_8_8 BIT(13) +#define SNOR_HWCAPS_READ_1_8_8_DTR BIT(14) + +/* + * Page Program capabilities. + * MUST be ordered by priority: the higher bit position, the higher priority. + * Like (Fast) Read capabilities, Octal/Quad SPI protocols are preferred to the + * legacy SPI 1-1-1 protocol. + * Note that Dual Page Programs are not supported because there is no existing + * JEDEC/SFDP standard to define them. Also at this moment no SPI flash memory + * implements such commands. + */ +#define SNOR_HWCAPS_PP_MASK GENMASK(22, 16) +#define SNOR_HWCAPS_PP BIT(16) + +#define SNOR_HWCAPS_PP_QUAD GENMASK(19, 17) +#define SNOR_HWCAPS_PP_1_1_4 BIT(17) +#define SNOR_HWCAPS_PP_1_4_4 BIT(18) +#define SNOR_HWCAPS_PP_4_4_4 BIT(19) + +#define SNOR_HWCAPS_PP_OCTAL GENMASK(22, 20) +#define SNOR_HWCAPS_PP_1_1_8 BIT(20) +#define SNOR_HWCAPS_PP_1_8_8 BIT(21) +#define SNOR_HWCAPS_PP_8_8_8 BIT(22) + +#define SNOR_HWCAPS_X_X_X (SNOR_HWCAPS_READ_2_2_2 | \ + SNOR_HWCAPS_READ_4_4_4 | \ + SNOR_HWCAPS_READ_8_8_8 | \ + SNOR_HWCAPS_PP_4_4_4 | \ + SNOR_HWCAPS_PP_8_8_8) + +#define SNOR_HWCAPS_DTR (SNOR_HWCAPS_READ_1_1_1_DTR | \ + SNOR_HWCAPS_READ_1_2_2_DTR | \ + SNOR_HWCAPS_READ_1_4_4_DTR | \ + SNOR_HWCAPS_READ_1_8_8_DTR) + +#define SNOR_HWCAPS_ALL (SNOR_HWCAPS_READ_MASK | \ + SNOR_HWCAPS_PP_MASK) + +struct spi_nor_read_command { + u8 num_mode_clocks; + u8 num_wait_states; + u8 opcode; + enum spi_nor_protocol proto; +}; + +struct spi_nor_pp_command { + u8 opcode; + enum spi_nor_protocol proto; +}; + +enum spi_nor_read_command_index { + SNOR_CMD_READ, + SNOR_CMD_READ_FAST, + SNOR_CMD_READ_1_1_1_DTR, + + /* Dual SPI */ + SNOR_CMD_READ_1_1_2, + SNOR_CMD_READ_1_2_2, + SNOR_CMD_READ_2_2_2, + SNOR_CMD_READ_1_2_2_DTR, + + /* Quad SPI */ + SNOR_CMD_READ_1_1_4, + SNOR_CMD_READ_1_4_4, + SNOR_CMD_READ_4_4_4, + SNOR_CMD_READ_1_4_4_DTR, + + /* Octal SPI */ + SNOR_CMD_READ_1_1_8, + SNOR_CMD_READ_1_8_8, + SNOR_CMD_READ_8_8_8, + SNOR_CMD_READ_1_8_8_DTR, + + SNOR_CMD_READ_MAX +}; + +enum spi_nor_pp_command_index { + SNOR_CMD_PP, + + /* Quad SPI */ + SNOR_CMD_PP_1_1_4, + SNOR_CMD_PP_1_4_4, + SNOR_CMD_PP_4_4_4, + + /* Octal SPI */ + SNOR_CMD_PP_1_1_8, + SNOR_CMD_PP_1_8_8, + SNOR_CMD_PP_8_8_8, + + SNOR_CMD_PP_MAX +}; + +/* Forward declaration that will be used in 'struct spi_nor_flash_parameter' */ +struct spi_nor; + +/** + * struct spi_nor_locking_ops - SPI NOR locking methods + * @lock: lock a region of the SPI NOR. + * @unlock: unlock a region of the SPI NOR. + * @is_locked: check if a region of the SPI NOR is completely locked + */ +struct spi_nor_locking_ops { + int (*lock)(struct spi_nor *nor, loff_t ofs, uint64_t len); + int (*unlock)(struct spi_nor *nor, loff_t ofs, uint64_t len); + int (*is_locked)(struct spi_nor *nor, loff_t ofs, uint64_t len); +}; + +/** + * struct spi_nor_flash_parameter - SPI NOR flash parameters and settings. + * Includes legacy flash parameters and settings that can be overwritten + * by the spi_nor_fixups hooks, or dynamically when parsing the JESD216 + * Serial Flash Discoverable Parameters (SFDP) tables. + * + * @size: the flash memory density in bytes. + * @page_size: the page size of the SPI NOR flash memory. + * @hwcaps: describes the read and page program hardware + * capabilities. + * @reads: read capabilities ordered by priority: the higher index + * in the array, the higher priority. + * @page_programs: page program capabilities ordered by priority: the + * higher index in the array, the higher priority. + * @erase_map: the erase map parsed from the SFDP Sector Map Parameter + * Table. + * @quad_enable: enables SPI NOR quad mode. + * @set_4byte: puts the SPI NOR in 4 byte addressing mode. + * @convert_addr: converts an absolute address into something the flash + * will understand. Particularly useful when pagesize is + * not a power-of-2. + * @setup: configures the SPI NOR memory. Useful for SPI NOR + * flashes that have peculiarities to the SPI NOR standard + * e.g. different opcodes, specific address calculation, + * page size, etc. + * @locking_ops: SPI NOR locking methods. + */ +struct spi_nor_flash_parameter { + u64 size; + u32 page_size; + + struct spi_nor_hwcaps hwcaps; + struct spi_nor_read_command reads[SNOR_CMD_READ_MAX]; + struct spi_nor_pp_command page_programs[SNOR_CMD_PP_MAX]; + + struct spi_nor_erase_map erase_map; + + int (*quad_enable)(struct spi_nor *nor); + int (*set_4byte)(struct spi_nor *nor, bool enable); + u32 (*convert_addr)(struct spi_nor *nor, u32 addr); + int (*setup)(struct spi_nor *nor, const struct spi_nor_hwcaps *hwcaps); + + const struct spi_nor_locking_ops *locking_ops; +}; + +/** * struct flash_info - Forward declaration of a structure used internally by * spi_nor_scan() */ @@ -344,6 +533,10 @@ struct flash_info; * @mtd: point to a mtd_info structure * @lock: the lock for the read/write/erase/lock/unlock operations * @dev: point to a spi device, or a spi nor controller device. + * @spimem: point to the spi mem device + * @bouncebuf: bounce buffer used when the buffer passed by the MTD + * layer is not DMA-able + * @bouncebuf_size: size of the bounce buffer * @info: spi-nor part JDEC MFR id and other info * @page_size: the page size of the SPI NOR * @addr_width: number of address bytes @@ -356,8 +549,6 @@ struct flash_info; * @read_proto: the SPI protocol for read operations * @write_proto: the SPI protocol for write operations * @reg_proto the SPI protocol for read_reg/write_reg/erase operations - * @cmd_buf: used by the write_reg - * @erase_map: the erase map of the SPI NOR * @prepare: [OPTIONAL] do some preparations for the * read/write/erase/lock/unlock operations * @unprepare: [OPTIONAL] do some post work after the @@ -369,19 +560,21 @@ struct flash_info; * @erase: [DRIVER-SPECIFIC] erase a sector of the SPI NOR * at the offset @offs; if not provided by the driver, * spi-nor will send the erase opcode via write_reg() - * @flash_lock: [FLASH-SPECIFIC] lock a region of the SPI NOR - * @flash_unlock: [FLASH-SPECIFIC] unlock a region of the SPI NOR - * @flash_is_locked: [FLASH-SPECIFIC] check if a region of the SPI NOR is - * @quad_enable: [FLASH-SPECIFIC] enables SPI NOR quad mode * @clear_sr_bp: [FLASH-SPECIFIC] clears the Block Protection Bits from * the SPI NOR Status Register. - * completely locked + * @params: [FLASH-SPECIFIC] SPI-NOR flash parameters and settings. + * The structure includes legacy flash parameters and + * settings that can be overwritten by the spi_nor_fixups + * hooks, or dynamically when parsing the SFDP tables. * @priv: the private data */ struct spi_nor { struct mtd_info mtd; struct mutex lock; struct device *dev; + struct spi_mem *spimem; + u8 *bouncebuf; + size_t bouncebuf_size; const struct flash_info *info; u32 page_size; u8 addr_width; @@ -394,8 +587,6 @@ struct spi_nor { enum spi_nor_protocol reg_proto; bool sst_write_second; u32 flags; - u8 cmd_buf[SPI_NOR_MAX_CMD_SIZE]; - struct spi_nor_erase_map erase_map; int (*prepare)(struct spi_nor *nor, enum spi_nor_ops ops); void (*unprepare)(struct spi_nor *nor, enum spi_nor_ops ops); @@ -408,11 +599,8 @@ struct spi_nor { size_t len, const u_char *write_buf); int (*erase)(struct spi_nor *nor, loff_t offs); - int (*flash_lock)(struct spi_nor *nor, loff_t ofs, uint64_t len); - int (*flash_unlock)(struct spi_nor *nor, loff_t ofs, uint64_t len); - int (*flash_is_locked)(struct spi_nor *nor, loff_t ofs, uint64_t len); - int (*quad_enable)(struct spi_nor *nor); int (*clear_sr_bp)(struct spi_nor *nor); + struct spi_nor_flash_parameter params; void *priv; }; @@ -443,7 +631,7 @@ spi_nor_region_mark_overlay(struct spi_nor_erase_region *region) static bool __maybe_unused spi_nor_has_uniform_erase(const struct spi_nor *nor) { - return !!nor->erase_map.uniform_erase_type; + return !!nor->params.erase_map.uniform_erase_type; } static inline void spi_nor_set_flash_node(struct spi_nor *nor, @@ -458,67 +646,6 @@ static inline struct device_node *spi_nor_get_flash_node(struct spi_nor *nor) } /** - * struct spi_nor_hwcaps - Structure for describing the hardware capabilies - * supported by the SPI controller (bus master). - * @mask: the bitmask listing all the supported hw capabilies - */ -struct spi_nor_hwcaps { - u32 mask; -}; - -/* - *(Fast) Read capabilities. - * MUST be ordered by priority: the higher bit position, the higher priority. - * As a matter of performances, it is relevant to use Octal SPI protocols first, - * then Quad SPI protocols before Dual SPI protocols, Fast Read and lastly - * (Slow) Read. - */ -#define SNOR_HWCAPS_READ_MASK GENMASK(14, 0) -#define SNOR_HWCAPS_READ BIT(0) -#define SNOR_HWCAPS_READ_FAST BIT(1) -#define SNOR_HWCAPS_READ_1_1_1_DTR BIT(2) - -#define SNOR_HWCAPS_READ_DUAL GENMASK(6, 3) -#define SNOR_HWCAPS_READ_1_1_2 BIT(3) -#define SNOR_HWCAPS_READ_1_2_2 BIT(4) -#define SNOR_HWCAPS_READ_2_2_2 BIT(5) -#define SNOR_HWCAPS_READ_1_2_2_DTR BIT(6) - -#define SNOR_HWCAPS_READ_QUAD GENMASK(10, 7) -#define SNOR_HWCAPS_READ_1_1_4 BIT(7) -#define SNOR_HWCAPS_READ_1_4_4 BIT(8) -#define SNOR_HWCAPS_READ_4_4_4 BIT(9) -#define SNOR_HWCAPS_READ_1_4_4_DTR BIT(10) - -#define SNOR_HWCAPS_READ_OCTAL GENMASK(14, 11) -#define SNOR_HWCAPS_READ_1_1_8 BIT(11) -#define SNOR_HWCAPS_READ_1_8_8 BIT(12) -#define SNOR_HWCAPS_READ_8_8_8 BIT(13) -#define SNOR_HWCAPS_READ_1_8_8_DTR BIT(14) - -/* - * Page Program capabilities. - * MUST be ordered by priority: the higher bit position, the higher priority. - * Like (Fast) Read capabilities, Octal/Quad SPI protocols are preferred to the - * legacy SPI 1-1-1 protocol. - * Note that Dual Page Programs are not supported because there is no existing - * JEDEC/SFDP standard to define them. Also at this moment no SPI flash memory - * implements such commands. - */ -#define SNOR_HWCAPS_PP_MASK GENMASK(22, 16) -#define SNOR_HWCAPS_PP BIT(16) - -#define SNOR_HWCAPS_PP_QUAD GENMASK(19, 17) -#define SNOR_HWCAPS_PP_1_1_4 BIT(17) -#define SNOR_HWCAPS_PP_1_4_4 BIT(18) -#define SNOR_HWCAPS_PP_4_4_4 BIT(19) - -#define SNOR_HWCAPS_PP_OCTAL GENMASK(22, 20) -#define SNOR_HWCAPS_PP_1_1_8 BIT(20) -#define SNOR_HWCAPS_PP_1_8_8 BIT(21) -#define SNOR_HWCAPS_PP_8_8_8 BIT(22) - -/** * spi_nor_scan() - scan the SPI NOR * @nor: the spi_nor structure * @name: the chip type name diff --git a/include/linux/mtd/spinand.h b/include/linux/mtd/spinand.h index 507f7e289bd1..4ea558bd3c46 100644 --- a/include/linux/mtd/spinand.h +++ b/include/linux/mtd/spinand.h @@ -68,30 +68,60 @@ SPI_MEM_OP_DUMMY(ndummy, 1), \ SPI_MEM_OP_DATA_IN(len, buf, 1)) +#define SPINAND_PAGE_READ_FROM_CACHE_OP_3A(fast, addr, ndummy, buf, len) \ + SPI_MEM_OP(SPI_MEM_OP_CMD(fast ? 0x0b : 0x03, 1), \ + SPI_MEM_OP_ADDR(3, addr, 1), \ + SPI_MEM_OP_DUMMY(ndummy, 1), \ + SPI_MEM_OP_DATA_IN(len, buf, 1)) + #define SPINAND_PAGE_READ_FROM_CACHE_X2_OP(addr, ndummy, buf, len) \ SPI_MEM_OP(SPI_MEM_OP_CMD(0x3b, 1), \ SPI_MEM_OP_ADDR(2, addr, 1), \ SPI_MEM_OP_DUMMY(ndummy, 1), \ SPI_MEM_OP_DATA_IN(len, buf, 2)) +#define SPINAND_PAGE_READ_FROM_CACHE_X2_OP_3A(addr, ndummy, buf, len) \ + SPI_MEM_OP(SPI_MEM_OP_CMD(0x3b, 1), \ + SPI_MEM_OP_ADDR(3, addr, 1), \ + SPI_MEM_OP_DUMMY(ndummy, 1), \ + SPI_MEM_OP_DATA_IN(len, buf, 2)) + #define SPINAND_PAGE_READ_FROM_CACHE_X4_OP(addr, ndummy, buf, len) \ SPI_MEM_OP(SPI_MEM_OP_CMD(0x6b, 1), \ SPI_MEM_OP_ADDR(2, addr, 1), \ SPI_MEM_OP_DUMMY(ndummy, 1), \ SPI_MEM_OP_DATA_IN(len, buf, 4)) +#define SPINAND_PAGE_READ_FROM_CACHE_X4_OP_3A(addr, ndummy, buf, len) \ + SPI_MEM_OP(SPI_MEM_OP_CMD(0x6b, 1), \ + SPI_MEM_OP_ADDR(3, addr, 1), \ + SPI_MEM_OP_DUMMY(ndummy, 1), \ + SPI_MEM_OP_DATA_IN(len, buf, 4)) + #define SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(addr, ndummy, buf, len) \ SPI_MEM_OP(SPI_MEM_OP_CMD(0xbb, 1), \ SPI_MEM_OP_ADDR(2, addr, 2), \ SPI_MEM_OP_DUMMY(ndummy, 2), \ SPI_MEM_OP_DATA_IN(len, buf, 2)) +#define SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP_3A(addr, ndummy, buf, len) \ + SPI_MEM_OP(SPI_MEM_OP_CMD(0xbb, 1), \ + SPI_MEM_OP_ADDR(3, addr, 2), \ + SPI_MEM_OP_DUMMY(ndummy, 2), \ + SPI_MEM_OP_DATA_IN(len, buf, 2)) + #define SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(addr, ndummy, buf, len) \ SPI_MEM_OP(SPI_MEM_OP_CMD(0xeb, 1), \ SPI_MEM_OP_ADDR(2, addr, 4), \ SPI_MEM_OP_DUMMY(ndummy, 4), \ SPI_MEM_OP_DATA_IN(len, buf, 4)) +#define SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP_3A(addr, ndummy, buf, len) \ + SPI_MEM_OP(SPI_MEM_OP_CMD(0xeb, 1), \ + SPI_MEM_OP_ADDR(3, addr, 4), \ + SPI_MEM_OP_DUMMY(ndummy, 4), \ + SPI_MEM_OP_DATA_IN(len, buf, 4)) + #define SPINAND_PROG_EXEC_OP(addr) \ SPI_MEM_OP(SPI_MEM_OP_CMD(0x10, 1), \ SPI_MEM_OP_ADDR(3, addr, 1), \ @@ -197,6 +227,7 @@ struct spinand_manufacturer { extern const struct spinand_manufacturer gigadevice_spinand_manufacturer; extern const struct spinand_manufacturer macronix_spinand_manufacturer; extern const struct spinand_manufacturer micron_spinand_manufacturer; +extern const struct spinand_manufacturer paragon_spinand_manufacturer; extern const struct spinand_manufacturer toshiba_spinand_manufacturer; extern const struct spinand_manufacturer winbond_spinand_manufacturer; @@ -260,7 +291,7 @@ struct spinand_ecc_info { */ struct spinand_info { const char *model; - u8 devid; + u16 devid; u32 flags; struct nand_memory_organization memorg; struct nand_ecc_req eccreq; @@ -422,7 +453,7 @@ static inline void spinand_set_of_node(struct spinand_device *spinand, int spinand_match_and_init(struct spinand_device *dev, const struct spinand_info *table, - unsigned int table_size, u8 devid); + unsigned int table_size, u16 devid); int spinand_upd_cfg(struct spinand_device *spinand, u8 mask, u8 val); int spinand_select_target(struct spinand_device *spinand, unsigned int target); diff --git a/include/linux/mtd/super.h b/include/linux/mtd/super.h index 056c9932c723..3608a6c36fac 100644 --- a/include/linux/mtd/super.h +++ b/include/linux/mtd/super.h @@ -14,9 +14,9 @@ #include <linux/fs.h> #include <linux/mount.h> -extern struct dentry *mount_mtd(struct file_system_type *fs_type, int flags, - const char *dev_name, void *data, - int (*fill_super)(struct super_block *, void *, int)); +extern int get_tree_mtd(struct fs_context *fc, + int (*fill_super)(struct super_block *sb, + struct fs_context *fc)); extern void kill_mtd_super(struct super_block *sb); diff --git a/include/linux/mutex.h b/include/linux/mutex.h index 3093dd162424..aca8f36dfac9 100644 --- a/include/linux/mutex.h +++ b/include/linux/mutex.h @@ -66,16 +66,6 @@ struct mutex { }; /* - * Internal helper function; C doesn't allow us to hide it :/ - * - * DO NOT USE (outside of mutex code). - */ -static inline struct task_struct *__mutex_owner(struct mutex *lock) -{ - return (struct task_struct *)(atomic_long_read(&lock->owner) & ~0x07); -} - -/* * This is the control structure for tasks blocked on mutex, * which resides on the blocked task's kernel stack: */ @@ -144,14 +134,11 @@ extern void __mutex_init(struct mutex *lock, const char *name, * * Returns true if the mutex is locked, false if unlocked. */ -static inline bool mutex_is_locked(struct mutex *lock) -{ - return __mutex_owner(lock) != NULL; -} +extern bool mutex_is_locked(struct mutex *lock); /* * See kernel/locking/mutex.c for detailed documentation of these APIs. - * Also see Documentation/locking/mutex-design.txt. + * Also see Documentation/locking/mutex-design.rst. */ #ifdef CONFIG_DEBUG_LOCK_ALLOC extern void mutex_lock_nested(struct mutex *lock, unsigned int subclass); @@ -220,13 +207,7 @@ enum mutex_trylock_recursive_enum { * - MUTEX_TRYLOCK_SUCCESS - lock acquired, * - MUTEX_TRYLOCK_RECURSIVE - we already owned the lock. */ -static inline /* __deprecated */ __must_check enum mutex_trylock_recursive_enum -mutex_trylock_recursive(struct mutex *lock) -{ - if (unlikely(__mutex_owner(lock) == current)) - return MUTEX_TRYLOCK_RECURSIVE; - - return mutex_trylock(lock); -} +extern /* __deprecated */ __must_check enum mutex_trylock_recursive_enum +mutex_trylock_recursive(struct mutex *lock); #endif /* __LINUX_MUTEX_H */ diff --git a/include/linux/mv643xx.h b/include/linux/mv643xx.h index 4471cf96ef69..47e5679b48e1 100644 --- a/include/linux/mv643xx.h +++ b/include/linux/mv643xx.h @@ -918,52 +918,6 @@ extern void mv64340_irq_init(unsigned int base); -/* MPSC Platform Device, Driver Data (Shared register regions) */ -#define MPSC_SHARED_NAME "mpsc_shared" - -#define MPSC_ROUTING_BASE_ORDER 0 -#define MPSC_SDMA_INTR_BASE_ORDER 1 - -#define MPSC_ROUTING_REG_BLOCK_SIZE 0x000c -#define MPSC_SDMA_INTR_REG_BLOCK_SIZE 0x0084 - -struct mpsc_shared_pdata { - u32 mrr_val; - u32 rcrr_val; - u32 tcrr_val; - u32 intr_cause_val; - u32 intr_mask_val; -}; - -/* MPSC Platform Device, Driver Data */ -#define MPSC_CTLR_NAME "mpsc" - -#define MPSC_BASE_ORDER 0 -#define MPSC_SDMA_BASE_ORDER 1 -#define MPSC_BRG_BASE_ORDER 2 - -#define MPSC_REG_BLOCK_SIZE 0x0038 -#define MPSC_SDMA_REG_BLOCK_SIZE 0x0c18 -#define MPSC_BRG_REG_BLOCK_SIZE 0x0008 - -struct mpsc_pdata { - u8 mirror_regs; - u8 cache_mgmt; - u8 max_idle; - int default_baud; - int default_bits; - int default_parity; - int default_flow; - u32 chr_1_val; - u32 chr_2_val; - u32 chr_10_val; - u32 mpcr_val; - u32 bcr_val; - u8 brg_can_tune; - u8 brg_clk_src; - u32 brg_clk_freq; -}; - /* Watchdog Platform Device, Driver Data */ #define MV64x60_WDT_NAME "mv64x60_wdt" diff --git a/include/linux/namei.h b/include/linux/namei.h index 9138b4471dbf..397a08ade6a2 100644 --- a/include/linux/namei.h +++ b/include/linux/namei.h @@ -16,39 +16,28 @@ enum { MAX_NESTED_LINKS = 8 }; */ enum {LAST_NORM, LAST_ROOT, LAST_DOT, LAST_DOTDOT, LAST_BIND}; -/* - * The bitmask for a lookup event: - * - follow links at the end - * - require a directory - * - ending slashes ok even for nonexistent files - * - internal "there are more path components" flag - * - dentry cache is untrusted; force a real lookup - * - suppress terminal automount - * - skip revalidation - * - don't fetch xattrs on audit_inode - */ -#define LOOKUP_FOLLOW 0x0001 -#define LOOKUP_DIRECTORY 0x0002 -#define LOOKUP_AUTOMOUNT 0x0004 - +/* pathwalk mode */ +#define LOOKUP_FOLLOW 0x0001 /* follow links at the end */ +#define LOOKUP_DIRECTORY 0x0002 /* require a directory */ +#define LOOKUP_AUTOMOUNT 0x0004 /* force terminal automount */ +#define LOOKUP_EMPTY 0x4000 /* accept empty path [user_... only] */ +#define LOOKUP_DOWN 0x8000 /* follow mounts in the starting point */ + +#define LOOKUP_REVAL 0x0020 /* tell ->d_revalidate() to trust no cache */ +#define LOOKUP_RCU 0x0040 /* RCU pathwalk mode; semi-internal */ + +/* These tell filesystem methods that we are dealing with the final component... */ +#define LOOKUP_OPEN 0x0100 /* ... in open */ +#define LOOKUP_CREATE 0x0200 /* ... in object creation */ +#define LOOKUP_EXCL 0x0400 /* ... in exclusive creation */ +#define LOOKUP_RENAME_TARGET 0x0800 /* ... in destination of rename() */ + +/* internal use only */ #define LOOKUP_PARENT 0x0010 -#define LOOKUP_REVAL 0x0020 -#define LOOKUP_RCU 0x0040 #define LOOKUP_NO_REVAL 0x0080 -#define LOOKUP_NO_EVAL 0x0100 - -/* - * Intent data - */ -#define LOOKUP_OPEN 0x0100 -#define LOOKUP_CREATE 0x0200 -#define LOOKUP_EXCL 0x0400 -#define LOOKUP_RENAME_TARGET 0x0800 - #define LOOKUP_JUMPED 0x1000 #define LOOKUP_ROOT 0x2000 -#define LOOKUP_EMPTY 0x4000 -#define LOOKUP_DOWN 0x8000 +#define LOOKUP_ROOT_GRABBED 0x0008 extern int path_pts(struct path *path); @@ -60,22 +49,6 @@ static inline int user_path_at(int dfd, const char __user *name, unsigned flags, return user_path_at_empty(dfd, name, flags, path, NULL); } -static inline int user_path(const char __user *name, struct path *path) -{ - return user_path_at_empty(AT_FDCWD, name, LOOKUP_FOLLOW, path, NULL); -} - -static inline int user_lpath(const char __user *name, struct path *path) -{ - return user_path_at_empty(AT_FDCWD, name, 0, path, NULL); -} - -static inline int user_path_dir(const char __user *name, struct path *path) -{ - return user_path_at_empty(AT_FDCWD, name, - LOOKUP_FOLLOW | LOOKUP_DIRECTORY, path, NULL); -} - extern int kern_path(const char *, unsigned, struct path *); extern struct dentry *kern_path_create(int, const char *, struct path *, unsigned int); diff --git a/include/linux/net.h b/include/linux/net.h index f7d672cf25b5..9cafb5f353a9 100644 --- a/include/linux/net.h +++ b/include/linux/net.h @@ -116,11 +116,11 @@ struct socket { unsigned long flags; - struct socket_wq *wq; - struct file *file; struct sock *sk; const struct proto_ops *ops; + + struct socket_wq wq; }; struct vm_area_struct; diff --git a/include/linux/net_dim.h b/include/linux/net_dim.h deleted file mode 100644 index fd458389f7d1..000000000000 --- a/include/linux/net_dim.h +++ /dev/null @@ -1,418 +0,0 @@ -/* - * Copyright (c) 2016, Mellanox Technologies. All rights reserved. - * Copyright (c) 2017-2018, Broadcom Limited. All rights reserved. - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -#ifndef NET_DIM_H -#define NET_DIM_H - -#include <linux/module.h> - -struct net_dim_cq_moder { - u16 usec; - u16 pkts; - u8 cq_period_mode; -}; - -struct net_dim_sample { - ktime_t time; - u32 pkt_ctr; - u32 byte_ctr; - u16 event_ctr; -}; - -struct net_dim_stats { - int ppms; /* packets per msec */ - int bpms; /* bytes per msec */ - int epms; /* events per msec */ -}; - -struct net_dim { /* Adaptive Moderation */ - u8 state; - struct net_dim_stats prev_stats; - struct net_dim_sample start_sample; - struct work_struct work; - u8 profile_ix; - u8 mode; - u8 tune_state; - u8 steps_right; - u8 steps_left; - u8 tired; -}; - -enum { - NET_DIM_CQ_PERIOD_MODE_START_FROM_EQE = 0x0, - NET_DIM_CQ_PERIOD_MODE_START_FROM_CQE = 0x1, - NET_DIM_CQ_PERIOD_NUM_MODES -}; - -/* Adaptive moderation logic */ -enum { - NET_DIM_START_MEASURE, - NET_DIM_MEASURE_IN_PROGRESS, - NET_DIM_APPLY_NEW_PROFILE, -}; - -enum { - NET_DIM_PARKING_ON_TOP, - NET_DIM_PARKING_TIRED, - NET_DIM_GOING_RIGHT, - NET_DIM_GOING_LEFT, -}; - -enum { - NET_DIM_STATS_WORSE, - NET_DIM_STATS_SAME, - NET_DIM_STATS_BETTER, -}; - -enum { - NET_DIM_STEPPED, - NET_DIM_TOO_TIRED, - NET_DIM_ON_EDGE, -}; - -#define NET_DIM_PARAMS_NUM_PROFILES 5 -/* Adaptive moderation profiles */ -#define NET_DIM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE 256 -#define NET_DIM_DEFAULT_TX_CQ_MODERATION_PKTS_FROM_EQE 128 -#define NET_DIM_DEF_PROFILE_CQE 1 -#define NET_DIM_DEF_PROFILE_EQE 1 - -/* All profiles sizes must be NET_PARAMS_DIM_NUM_PROFILES */ -#define NET_DIM_RX_EQE_PROFILES { \ - {1, NET_DIM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \ - {8, NET_DIM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \ - {64, NET_DIM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \ - {128, NET_DIM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \ - {256, NET_DIM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \ -} - -#define NET_DIM_RX_CQE_PROFILES { \ - {2, 256}, \ - {8, 128}, \ - {16, 64}, \ - {32, 64}, \ - {64, 64} \ -} - -#define NET_DIM_TX_EQE_PROFILES { \ - {1, NET_DIM_DEFAULT_TX_CQ_MODERATION_PKTS_FROM_EQE}, \ - {8, NET_DIM_DEFAULT_TX_CQ_MODERATION_PKTS_FROM_EQE}, \ - {32, NET_DIM_DEFAULT_TX_CQ_MODERATION_PKTS_FROM_EQE}, \ - {64, NET_DIM_DEFAULT_TX_CQ_MODERATION_PKTS_FROM_EQE}, \ - {128, NET_DIM_DEFAULT_TX_CQ_MODERATION_PKTS_FROM_EQE} \ -} - -#define NET_DIM_TX_CQE_PROFILES { \ - {5, 128}, \ - {8, 64}, \ - {16, 32}, \ - {32, 32}, \ - {64, 32} \ -} - -static const struct net_dim_cq_moder -rx_profile[NET_DIM_CQ_PERIOD_NUM_MODES][NET_DIM_PARAMS_NUM_PROFILES] = { - NET_DIM_RX_EQE_PROFILES, - NET_DIM_RX_CQE_PROFILES, -}; - -static const struct net_dim_cq_moder -tx_profile[NET_DIM_CQ_PERIOD_NUM_MODES][NET_DIM_PARAMS_NUM_PROFILES] = { - NET_DIM_TX_EQE_PROFILES, - NET_DIM_TX_CQE_PROFILES, -}; - -static inline struct net_dim_cq_moder -net_dim_get_rx_moderation(u8 cq_period_mode, int ix) -{ - struct net_dim_cq_moder cq_moder = rx_profile[cq_period_mode][ix]; - - cq_moder.cq_period_mode = cq_period_mode; - return cq_moder; -} - -static inline struct net_dim_cq_moder -net_dim_get_def_rx_moderation(u8 cq_period_mode) -{ - u8 profile_ix = cq_period_mode == NET_DIM_CQ_PERIOD_MODE_START_FROM_CQE ? - NET_DIM_DEF_PROFILE_CQE : NET_DIM_DEF_PROFILE_EQE; - - return net_dim_get_rx_moderation(cq_period_mode, profile_ix); -} - -static inline struct net_dim_cq_moder -net_dim_get_tx_moderation(u8 cq_period_mode, int ix) -{ - struct net_dim_cq_moder cq_moder = tx_profile[cq_period_mode][ix]; - - cq_moder.cq_period_mode = cq_period_mode; - return cq_moder; -} - -static inline struct net_dim_cq_moder -net_dim_get_def_tx_moderation(u8 cq_period_mode) -{ - u8 profile_ix = cq_period_mode == NET_DIM_CQ_PERIOD_MODE_START_FROM_CQE ? - NET_DIM_DEF_PROFILE_CQE : NET_DIM_DEF_PROFILE_EQE; - - return net_dim_get_tx_moderation(cq_period_mode, profile_ix); -} - -static inline bool net_dim_on_top(struct net_dim *dim) -{ - switch (dim->tune_state) { - case NET_DIM_PARKING_ON_TOP: - case NET_DIM_PARKING_TIRED: - return true; - case NET_DIM_GOING_RIGHT: - return (dim->steps_left > 1) && (dim->steps_right == 1); - default: /* NET_DIM_GOING_LEFT */ - return (dim->steps_right > 1) && (dim->steps_left == 1); - } -} - -static inline void net_dim_turn(struct net_dim *dim) -{ - switch (dim->tune_state) { - case NET_DIM_PARKING_ON_TOP: - case NET_DIM_PARKING_TIRED: - break; - case NET_DIM_GOING_RIGHT: - dim->tune_state = NET_DIM_GOING_LEFT; - dim->steps_left = 0; - break; - case NET_DIM_GOING_LEFT: - dim->tune_state = NET_DIM_GOING_RIGHT; - dim->steps_right = 0; - break; - } -} - -static inline int net_dim_step(struct net_dim *dim) -{ - if (dim->tired == (NET_DIM_PARAMS_NUM_PROFILES * 2)) - return NET_DIM_TOO_TIRED; - - switch (dim->tune_state) { - case NET_DIM_PARKING_ON_TOP: - case NET_DIM_PARKING_TIRED: - break; - case NET_DIM_GOING_RIGHT: - if (dim->profile_ix == (NET_DIM_PARAMS_NUM_PROFILES - 1)) - return NET_DIM_ON_EDGE; - dim->profile_ix++; - dim->steps_right++; - break; - case NET_DIM_GOING_LEFT: - if (dim->profile_ix == 0) - return NET_DIM_ON_EDGE; - dim->profile_ix--; - dim->steps_left++; - break; - } - - dim->tired++; - return NET_DIM_STEPPED; -} - -static inline void net_dim_park_on_top(struct net_dim *dim) -{ - dim->steps_right = 0; - dim->steps_left = 0; - dim->tired = 0; - dim->tune_state = NET_DIM_PARKING_ON_TOP; -} - -static inline void net_dim_park_tired(struct net_dim *dim) -{ - dim->steps_right = 0; - dim->steps_left = 0; - dim->tune_state = NET_DIM_PARKING_TIRED; -} - -static inline void net_dim_exit_parking(struct net_dim *dim) -{ - dim->tune_state = dim->profile_ix ? NET_DIM_GOING_LEFT : - NET_DIM_GOING_RIGHT; - net_dim_step(dim); -} - -#define IS_SIGNIFICANT_DIFF(val, ref) \ - (((100UL * abs((val) - (ref))) / (ref)) > 10) /* more than 10% difference */ - -static inline int net_dim_stats_compare(struct net_dim_stats *curr, - struct net_dim_stats *prev) -{ - if (!prev->bpms) - return curr->bpms ? NET_DIM_STATS_BETTER : - NET_DIM_STATS_SAME; - - if (IS_SIGNIFICANT_DIFF(curr->bpms, prev->bpms)) - return (curr->bpms > prev->bpms) ? NET_DIM_STATS_BETTER : - NET_DIM_STATS_WORSE; - - if (!prev->ppms) - return curr->ppms ? NET_DIM_STATS_BETTER : - NET_DIM_STATS_SAME; - - if (IS_SIGNIFICANT_DIFF(curr->ppms, prev->ppms)) - return (curr->ppms > prev->ppms) ? NET_DIM_STATS_BETTER : - NET_DIM_STATS_WORSE; - - if (!prev->epms) - return NET_DIM_STATS_SAME; - - if (IS_SIGNIFICANT_DIFF(curr->epms, prev->epms)) - return (curr->epms < prev->epms) ? NET_DIM_STATS_BETTER : - NET_DIM_STATS_WORSE; - - return NET_DIM_STATS_SAME; -} - -static inline bool net_dim_decision(struct net_dim_stats *curr_stats, - struct net_dim *dim) -{ - int prev_state = dim->tune_state; - int prev_ix = dim->profile_ix; - int stats_res; - int step_res; - - switch (dim->tune_state) { - case NET_DIM_PARKING_ON_TOP: - stats_res = net_dim_stats_compare(curr_stats, &dim->prev_stats); - if (stats_res != NET_DIM_STATS_SAME) - net_dim_exit_parking(dim); - break; - - case NET_DIM_PARKING_TIRED: - dim->tired--; - if (!dim->tired) - net_dim_exit_parking(dim); - break; - - case NET_DIM_GOING_RIGHT: - case NET_DIM_GOING_LEFT: - stats_res = net_dim_stats_compare(curr_stats, &dim->prev_stats); - if (stats_res != NET_DIM_STATS_BETTER) - net_dim_turn(dim); - - if (net_dim_on_top(dim)) { - net_dim_park_on_top(dim); - break; - } - - step_res = net_dim_step(dim); - switch (step_res) { - case NET_DIM_ON_EDGE: - net_dim_park_on_top(dim); - break; - case NET_DIM_TOO_TIRED: - net_dim_park_tired(dim); - break; - } - - break; - } - - if ((prev_state != NET_DIM_PARKING_ON_TOP) || - (dim->tune_state != NET_DIM_PARKING_ON_TOP)) - dim->prev_stats = *curr_stats; - - return dim->profile_ix != prev_ix; -} - -static inline void net_dim_sample(u16 event_ctr, - u64 packets, - u64 bytes, - struct net_dim_sample *s) -{ - s->time = ktime_get(); - s->pkt_ctr = packets; - s->byte_ctr = bytes; - s->event_ctr = event_ctr; -} - -#define NET_DIM_NEVENTS 64 -#define BIT_GAP(bits, end, start) ((((end) - (start)) + BIT_ULL(bits)) & (BIT_ULL(bits) - 1)) - -static inline void net_dim_calc_stats(struct net_dim_sample *start, - struct net_dim_sample *end, - struct net_dim_stats *curr_stats) -{ - /* u32 holds up to 71 minutes, should be enough */ - u32 delta_us = ktime_us_delta(end->time, start->time); - u32 npkts = BIT_GAP(BITS_PER_TYPE(u32), end->pkt_ctr, start->pkt_ctr); - u32 nbytes = BIT_GAP(BITS_PER_TYPE(u32), end->byte_ctr, - start->byte_ctr); - - if (!delta_us) - return; - - curr_stats->ppms = DIV_ROUND_UP(npkts * USEC_PER_MSEC, delta_us); - curr_stats->bpms = DIV_ROUND_UP(nbytes * USEC_PER_MSEC, delta_us); - curr_stats->epms = DIV_ROUND_UP(NET_DIM_NEVENTS * USEC_PER_MSEC, - delta_us); -} - -static inline void net_dim(struct net_dim *dim, - struct net_dim_sample end_sample) -{ - struct net_dim_stats curr_stats; - u16 nevents; - - switch (dim->state) { - case NET_DIM_MEASURE_IN_PROGRESS: - nevents = BIT_GAP(BITS_PER_TYPE(u16), - end_sample.event_ctr, - dim->start_sample.event_ctr); - if (nevents < NET_DIM_NEVENTS) - break; - net_dim_calc_stats(&dim->start_sample, &end_sample, - &curr_stats); - if (net_dim_decision(&curr_stats, dim)) { - dim->state = NET_DIM_APPLY_NEW_PROFILE; - schedule_work(&dim->work); - break; - } - /* fall through */ - case NET_DIM_START_MEASURE: - net_dim_sample(end_sample.event_ctr, end_sample.pkt_ctr, end_sample.byte_ctr, - &dim->start_sample); - dim->state = NET_DIM_MEASURE_IN_PROGRESS; - break; - case NET_DIM_APPLY_NEW_PROFILE: - break; - } -} - -#endif /* NET_DIM_H */ diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index eeacebd7debb..c20f190b4c18 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -332,6 +332,8 @@ struct napi_struct { struct net_device *dev; struct gro_list gro_hash[GRO_HASH_BUCKETS]; struct sk_buff *skb; + struct list_head rx_list; /* Pending GRO_NORMAL skbs */ + int rx_count; /* length of rx_list */ struct hrtimer timer; struct list_head dev_list; struct hlist_node napi_hash_node; @@ -845,6 +847,7 @@ enum tc_setup_type { TC_SETUP_QDISC_ETF, TC_SETUP_ROOT_QDISC, TC_SETUP_QDISC_GRED, + TC_SETUP_QDISC_TAPRIO, }; /* These structures hold the attributes of bpf state that are being passed @@ -899,6 +902,10 @@ struct netdev_bpf { }; }; +/* Flags for ndo_xsk_wakeup. */ +#define XDP_WAKEUP_RX (1 << 0) +#define XDP_WAKEUP_TX (1 << 1) + #ifdef CONFIG_XFRM_OFFLOAD struct xfrmdev_ops { int (*xdo_dev_state_add) (struct xfrm_state *x); @@ -918,6 +925,7 @@ struct dev_ifalias { struct devlink; struct tlsdev_ops; + /* * This structure defines the management hooks for network devices. * The following hooks can be defined; unless noted otherwise, they are @@ -1225,6 +1233,12 @@ struct tlsdev_ops; * that got dropped are freed/returned via xdp_return_frame(). * Returns negative number, means general error invoking ndo, meaning * no frames were xmit'ed and core-caller will free all frames. + * int (*ndo_xsk_wakeup)(struct net_device *dev, u32 queue_id, u32 flags); + * This function is used to wake up the softirq, ksoftirqd or kthread + * responsible for sending and/or receiving packets on a specific + * queue id bound to an AF_XDP socket. The flags field specifies if + * only RX, only Tx, or both should be woken up using the flags + * XDP_WAKEUP_RX and XDP_WAKEUP_TX. * struct devlink_port *(*ndo_get_devlink_port)(struct net_device *dev); * Get devlink port instance associated with a given netdev. * Called with a reference on the netdevice and devlink locks only, @@ -1408,7 +1422,6 @@ struct net_device_ops { void (*ndo_dfwd_del_station)(struct net_device *pdev, void *priv); - int (*ndo_get_lock_subclass)(struct net_device *dev); int (*ndo_set_tx_maxrate)(struct net_device *dev, int queue_index, u32 maxrate); @@ -1424,8 +1437,8 @@ struct net_device_ops { int (*ndo_xdp_xmit)(struct net_device *dev, int n, struct xdp_frame **xdp, u32 flags); - int (*ndo_xsk_async_xmit)(struct net_device *dev, - u32 queue_id); + int (*ndo_xsk_wakeup)(struct net_device *dev, + u32 queue_id, u32 flags); struct devlink_port * (*ndo_get_devlink_port)(struct net_device *dev); }; @@ -1636,6 +1649,8 @@ enum netdev_priv_flags { * @perm_addr: Permanent hw address * @addr_assign_type: Hw address assignment type * @addr_len: Hardware address length + * @upper_level: Maximum depth level of upper devices. + * @lower_level: Maximum depth level of lower devices. * @neigh_priv_len: Used in neigh_alloc() * @dev_id: Used to differentiate devices that share * the same link layer address @@ -1745,9 +1760,13 @@ enum netdev_priv_flags { * @phydev: Physical device may attach itself * for hardware timestamping * @sfp_bus: attached &struct sfp_bus structure. - * - * @qdisc_tx_busylock: lockdep class annotating Qdisc->busylock spinlock - * @qdisc_running_key: lockdep class annotating Qdisc->running seqcount + * @qdisc_tx_busylock_key: lockdep class annotating Qdisc->busylock + spinlock + * @qdisc_running_key: lockdep class annotating Qdisc->running seqcount + * @qdisc_xmit_lock_key: lockdep class annotating + * netdev_queue->_xmit_lock spinlock + * @addr_list_lock_key: lockdep class annotating + * net_device->addr_list_lock spinlock * * @proto_down: protocol port state information can be sent to the * switch driver and used to set the phys state of the @@ -1862,6 +1881,8 @@ struct net_device { unsigned char perm_addr[MAX_ADDR_LEN]; unsigned char addr_assign_type; unsigned char addr_len; + unsigned char upper_level; + unsigned char lower_level; unsigned short neigh_priv_len; unsigned short dev_id; unsigned short dev_port; @@ -2032,8 +2053,10 @@ struct net_device { #endif struct phy_device *phydev; struct sfp_bus *sfp_bus; - struct lock_class_key *qdisc_tx_busylock; - struct lock_class_key *qdisc_running_key; + struct lock_class_key qdisc_tx_busylock_key; + struct lock_class_key qdisc_running_key; + struct lock_class_key qdisc_xmit_lock_key; + struct lock_class_key addr_list_lock_key; bool proto_down; unsigned wol_enabled:1; }; @@ -2111,23 +2134,6 @@ static inline void netdev_for_each_tx_queue(struct net_device *dev, f(dev, &dev->_tx[i], arg); } -#define netdev_lockdep_set_classes(dev) \ -{ \ - static struct lock_class_key qdisc_tx_busylock_key; \ - static struct lock_class_key qdisc_running_key; \ - static struct lock_class_key qdisc_xmit_lock_key; \ - static struct lock_class_key dev_addr_list_lock_key; \ - unsigned int i; \ - \ - (dev)->qdisc_tx_busylock = &qdisc_tx_busylock_key; \ - (dev)->qdisc_running_key = &qdisc_running_key; \ - lockdep_set_class(&(dev)->addr_list_lock, \ - &dev_addr_list_lock_key); \ - for (i = 0; i < (dev)->num_tx_queues; i++) \ - lockdep_set_class(&(dev)->_tx[i]._xmit_lock, \ - &qdisc_xmit_lock_key); \ -} - u16 netdev_pick_tx(struct net_device *dev, struct sk_buff *skb, struct net_device *sb_dev); struct netdev_queue *netdev_core_pick_tx(struct net_device *dev, @@ -3126,6 +3132,7 @@ static inline void netif_stop_queue(struct net_device *dev) } void netif_tx_stop_all_queues(struct net_device *dev); +void netdev_update_lockdep_key(struct net_device *dev); static inline bool netif_tx_queue_stopped(const struct netdev_queue *dev_queue) { @@ -3270,7 +3277,7 @@ static inline void netdev_tx_completed_queue(struct netdev_queue *dev_queue, */ smp_mb(); - if (dql_avail(&dev_queue->dql) < 0) + if (unlikely(dql_avail(&dev_queue->dql) < 0)) return; if (test_and_clear_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state)) @@ -4043,16 +4050,6 @@ static inline void netif_addr_lock(struct net_device *dev) spin_lock(&dev->addr_list_lock); } -static inline void netif_addr_lock_nested(struct net_device *dev) -{ - int subclass = SINGLE_DEPTH_NESTING; - - if (dev->netdev_ops->ndo_get_lock_subclass) - subclass = dev->netdev_ops->ndo_get_lock_subclass(dev); - - spin_lock_nested(&dev->addr_list_lock, subclass); -} - static inline void netif_addr_lock_bh(struct net_device *dev) { spin_lock_bh(&dev->addr_list_lock); @@ -4239,6 +4236,7 @@ extern int dev_weight_rx_bias; extern int dev_weight_tx_bias; extern int dev_rx_weight; extern int dev_tx_weight; +extern int gro_normal_batch; bool netdev_has_upper_dev(struct net_device *dev, struct net_device *upper_dev); struct net_device *netdev_upper_get_next_dev_rcu(struct net_device *dev, @@ -4315,6 +4313,16 @@ int netdev_master_upper_dev_link(struct net_device *dev, struct netlink_ext_ack *extack); void netdev_upper_dev_unlink(struct net_device *dev, struct net_device *upper_dev); +int netdev_adjacent_change_prepare(struct net_device *old_dev, + struct net_device *new_dev, + struct net_device *dev, + struct netlink_ext_ack *extack); +void netdev_adjacent_change_commit(struct net_device *old_dev, + struct net_device *new_dev, + struct net_device *dev); +void netdev_adjacent_change_abort(struct net_device *old_dev, + struct net_device *new_dev, + struct net_device *dev); void netdev_adjacent_rename_links(struct net_device *dev, char *oldname); void *netdev_lower_dev_get_private(struct net_device *dev, struct net_device *lower_dev); @@ -4326,7 +4334,6 @@ void netdev_lower_state_changed(struct net_device *lower_dev, extern u8 netdev_rss_key[NETDEV_RSS_KEY_LEN] __read_mostly; void netdev_rss_key_fill(void *buffer, size_t len); -int dev_get_nest_level(struct net_device *dev); int skb_checksum_help(struct sk_buff *skb); int skb_crc32c_csum_help(struct sk_buff *skb); int skb_csum_hwoffload_help(struct sk_buff *skb, @@ -4870,4 +4877,6 @@ do { \ #define PTYPE_HASH_SIZE (16) #define PTYPE_HASH_MASK (PTYPE_HASH_SIZE - 1) +extern struct net_device *blackhole_netdev; + #endif /* _LINUX_NETDEVICE_H */ diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h index 996bc247ef6e..77ebb61faf48 100644 --- a/include/linux/netfilter.h +++ b/include/linux/netfilter.h @@ -15,7 +15,6 @@ #include <linux/netdevice.h> #include <net/net_namespace.h> -#ifdef CONFIG_NETFILTER static inline int NF_DROP_GETERR(int verdict) { return -(verdict >> NF_VERDICT_QBITS); @@ -118,6 +117,7 @@ struct nf_hook_entries { */ }; +#ifdef CONFIG_NETFILTER static inline struct nf_hook_ops **nf_hook_entries_get_hook_ops(const struct nf_hook_entries *e) { unsigned int n = e->num_hook_entries; @@ -336,11 +336,6 @@ int compat_nf_getsockopt(struct sock *sk, u_int8_t pf, int optval, char __user *opt, int *len); #endif -/* Call this before modifying an existing packet: ensures it is - modifiable and linear to the point you care about (writable_len). - Returns true or false. */ -int skb_make_writable(struct sk_buff *skb, unsigned int writable_len); - struct flowi; struct nf_queue_entry; @@ -427,7 +422,7 @@ nf_nat_decode_session(struct sk_buff *skb, struct flowi *fl, u_int8_t family) } #endif /*CONFIG_NETFILTER*/ -#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) +#if IS_ENABLED(CONFIG_NF_CONNTRACK) #include <linux/netfilter/nf_conntrack_zones_common.h> extern void (*ip_ct_attach)(struct sk_buff *, const struct sk_buff *) __rcu; diff --git a/include/linux/netfilter/ipset/ip_set.h b/include/linux/netfilter/ipset/ip_set.h index f5e03809cdb2..9bc255a8461b 100644 --- a/include/linux/netfilter/ipset/ip_set.h +++ b/include/linux/netfilter/ipset/ip_set.h @@ -2,7 +2,7 @@ /* Copyright (C) 2000-2002 Joakim Axelsson <gozem@linux.nu> * Patrick Schaaf <bof@bof.de> * Martin Josefsson <gandalf@wlug.westbo.se> - * Copyright (C) 2003-2013 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu> + * Copyright (C) 2003-2013 Jozsef Kadlecsik <kadlec@netfilter.org> */ #ifndef _IP_SET_H #define _IP_SET_H @@ -452,10 +452,240 @@ bitmap_bytes(u32 a, u32 b) return 4 * ((((b - a + 8) / 8) + 3) / 4); } -#include <linux/netfilter/ipset/ip_set_timeout.h> -#include <linux/netfilter/ipset/ip_set_comment.h> -#include <linux/netfilter/ipset/ip_set_counter.h> -#include <linux/netfilter/ipset/ip_set_skbinfo.h> +/* How often should the gc be run by default */ +#define IPSET_GC_TIME (3 * 60) + +/* Timeout period depending on the timeout value of the given set */ +#define IPSET_GC_PERIOD(timeout) \ + ((timeout/3) ? min_t(u32, (timeout)/3, IPSET_GC_TIME) : 1) + +/* Entry is set with no timeout value */ +#define IPSET_ELEM_PERMANENT 0 + +/* Set is defined with timeout support: timeout value may be 0 */ +#define IPSET_NO_TIMEOUT UINT_MAX + +/* Max timeout value, see msecs_to_jiffies() in jiffies.h */ +#define IPSET_MAX_TIMEOUT (UINT_MAX >> 1)/MSEC_PER_SEC + +#define ip_set_adt_opt_timeout(opt, set) \ +((opt)->ext.timeout != IPSET_NO_TIMEOUT ? (opt)->ext.timeout : (set)->timeout) + +static inline unsigned int +ip_set_timeout_uget(struct nlattr *tb) +{ + unsigned int timeout = ip_set_get_h32(tb); + + /* Normalize to fit into jiffies */ + if (timeout > IPSET_MAX_TIMEOUT) + timeout = IPSET_MAX_TIMEOUT; + + return timeout; +} + +static inline bool +ip_set_timeout_expired(const unsigned long *t) +{ + return *t != IPSET_ELEM_PERMANENT && time_is_before_jiffies(*t); +} + +static inline void +ip_set_timeout_set(unsigned long *timeout, u32 value) +{ + unsigned long t; + + if (!value) { + *timeout = IPSET_ELEM_PERMANENT; + return; + } + + t = msecs_to_jiffies(value * MSEC_PER_SEC) + jiffies; + if (t == IPSET_ELEM_PERMANENT) + /* Bingo! :-) */ + t--; + *timeout = t; +} + +static inline u32 +ip_set_timeout_get(const unsigned long *timeout) +{ + u32 t; + + if (*timeout == IPSET_ELEM_PERMANENT) + return 0; + + t = jiffies_to_msecs(*timeout - jiffies)/MSEC_PER_SEC; + /* Zero value in userspace means no timeout */ + return t == 0 ? 1 : t; +} + +static inline char* +ip_set_comment_uget(struct nlattr *tb) +{ + return nla_data(tb); +} + +/* Called from uadd only, protected by the set spinlock. + * The kadt functions don't use the comment extensions in any way. + */ +static inline void +ip_set_init_comment(struct ip_set *set, struct ip_set_comment *comment, + const struct ip_set_ext *ext) +{ + struct ip_set_comment_rcu *c = rcu_dereference_protected(comment->c, 1); + size_t len = ext->comment ? strlen(ext->comment) : 0; + + if (unlikely(c)) { + set->ext_size -= sizeof(*c) + strlen(c->str) + 1; + kfree_rcu(c, rcu); + rcu_assign_pointer(comment->c, NULL); + } + if (!len) + return; + if (unlikely(len > IPSET_MAX_COMMENT_SIZE)) + len = IPSET_MAX_COMMENT_SIZE; + c = kmalloc(sizeof(*c) + len + 1, GFP_ATOMIC); + if (unlikely(!c)) + return; + strlcpy(c->str, ext->comment, len + 1); + set->ext_size += sizeof(*c) + strlen(c->str) + 1; + rcu_assign_pointer(comment->c, c); +} + +/* Used only when dumping a set, protected by rcu_read_lock() */ +static inline int +ip_set_put_comment(struct sk_buff *skb, const struct ip_set_comment *comment) +{ + struct ip_set_comment_rcu *c = rcu_dereference(comment->c); + + if (!c) + return 0; + return nla_put_string(skb, IPSET_ATTR_COMMENT, c->str); +} + +/* Called from uadd/udel, flush or the garbage collectors protected + * by the set spinlock. + * Called when the set is destroyed and when there can't be any user + * of the set data anymore. + */ +static inline void +ip_set_comment_free(struct ip_set *set, struct ip_set_comment *comment) +{ + struct ip_set_comment_rcu *c; + + c = rcu_dereference_protected(comment->c, 1); + if (unlikely(!c)) + return; + set->ext_size -= sizeof(*c) + strlen(c->str) + 1; + kfree_rcu(c, rcu); + rcu_assign_pointer(comment->c, NULL); +} + +static inline void +ip_set_add_bytes(u64 bytes, struct ip_set_counter *counter) +{ + atomic64_add((long long)bytes, &(counter)->bytes); +} + +static inline void +ip_set_add_packets(u64 packets, struct ip_set_counter *counter) +{ + atomic64_add((long long)packets, &(counter)->packets); +} + +static inline u64 +ip_set_get_bytes(const struct ip_set_counter *counter) +{ + return (u64)atomic64_read(&(counter)->bytes); +} + +static inline u64 +ip_set_get_packets(const struct ip_set_counter *counter) +{ + return (u64)atomic64_read(&(counter)->packets); +} + +static inline bool +ip_set_match_counter(u64 counter, u64 match, u8 op) +{ + switch (op) { + case IPSET_COUNTER_NONE: + return true; + case IPSET_COUNTER_EQ: + return counter == match; + case IPSET_COUNTER_NE: + return counter != match; + case IPSET_COUNTER_LT: + return counter < match; + case IPSET_COUNTER_GT: + return counter > match; + } + return false; +} + +static inline void +ip_set_update_counter(struct ip_set_counter *counter, + const struct ip_set_ext *ext, u32 flags) +{ + if (ext->packets != ULLONG_MAX && + !(flags & IPSET_FLAG_SKIP_COUNTER_UPDATE)) { + ip_set_add_bytes(ext->bytes, counter); + ip_set_add_packets(ext->packets, counter); + } +} + +static inline bool +ip_set_put_counter(struct sk_buff *skb, const struct ip_set_counter *counter) +{ + return nla_put_net64(skb, IPSET_ATTR_BYTES, + cpu_to_be64(ip_set_get_bytes(counter)), + IPSET_ATTR_PAD) || + nla_put_net64(skb, IPSET_ATTR_PACKETS, + cpu_to_be64(ip_set_get_packets(counter)), + IPSET_ATTR_PAD); +} + +static inline void +ip_set_init_counter(struct ip_set_counter *counter, + const struct ip_set_ext *ext) +{ + if (ext->bytes != ULLONG_MAX) + atomic64_set(&(counter)->bytes, (long long)(ext->bytes)); + if (ext->packets != ULLONG_MAX) + atomic64_set(&(counter)->packets, (long long)(ext->packets)); +} + +static inline void +ip_set_get_skbinfo(struct ip_set_skbinfo *skbinfo, + const struct ip_set_ext *ext, + struct ip_set_ext *mext, u32 flags) +{ + mext->skbinfo = *skbinfo; +} + +static inline bool +ip_set_put_skbinfo(struct sk_buff *skb, const struct ip_set_skbinfo *skbinfo) +{ + /* Send nonzero parameters only */ + return ((skbinfo->skbmark || skbinfo->skbmarkmask) && + nla_put_net64(skb, IPSET_ATTR_SKBMARK, + cpu_to_be64((u64)skbinfo->skbmark << 32 | + skbinfo->skbmarkmask), + IPSET_ATTR_PAD)) || + (skbinfo->skbprio && + nla_put_net32(skb, IPSET_ATTR_SKBPRIO, + cpu_to_be32(skbinfo->skbprio))) || + (skbinfo->skbqueue && + nla_put_net16(skb, IPSET_ATTR_SKBQUEUE, + cpu_to_be16(skbinfo->skbqueue))); +} + +static inline void +ip_set_init_skbinfo(struct ip_set_skbinfo *skbinfo, + const struct ip_set_ext *ext) +{ + *skbinfo = ext->skbinfo; +} #define IP_SET_INIT_KEXT(skb, opt, set) \ { .bytes = (skb)->len, .packets = 1, \ diff --git a/include/linux/netfilter/ipset/ip_set_comment.h b/include/linux/netfilter/ipset/ip_set_comment.h deleted file mode 100644 index 0b894d81bbf2..000000000000 --- a/include/linux/netfilter/ipset/ip_set_comment.h +++ /dev/null @@ -1,73 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -#ifndef _IP_SET_COMMENT_H -#define _IP_SET_COMMENT_H - -/* Copyright (C) 2013 Oliver Smith <oliver@8.c.9.b.0.7.4.0.1.0.0.2.ip6.arpa> - */ - -#ifdef __KERNEL__ - -static inline char* -ip_set_comment_uget(struct nlattr *tb) -{ - return nla_data(tb); -} - -/* Called from uadd only, protected by the set spinlock. - * The kadt functions don't use the comment extensions in any way. - */ -static inline void -ip_set_init_comment(struct ip_set *set, struct ip_set_comment *comment, - const struct ip_set_ext *ext) -{ - struct ip_set_comment_rcu *c = rcu_dereference_protected(comment->c, 1); - size_t len = ext->comment ? strlen(ext->comment) : 0; - - if (unlikely(c)) { - set->ext_size -= sizeof(*c) + strlen(c->str) + 1; - kfree_rcu(c, rcu); - rcu_assign_pointer(comment->c, NULL); - } - if (!len) - return; - if (unlikely(len > IPSET_MAX_COMMENT_SIZE)) - len = IPSET_MAX_COMMENT_SIZE; - c = kmalloc(sizeof(*c) + len + 1, GFP_ATOMIC); - if (unlikely(!c)) - return; - strlcpy(c->str, ext->comment, len + 1); - set->ext_size += sizeof(*c) + strlen(c->str) + 1; - rcu_assign_pointer(comment->c, c); -} - -/* Used only when dumping a set, protected by rcu_read_lock() */ -static inline int -ip_set_put_comment(struct sk_buff *skb, const struct ip_set_comment *comment) -{ - struct ip_set_comment_rcu *c = rcu_dereference(comment->c); - - if (!c) - return 0; - return nla_put_string(skb, IPSET_ATTR_COMMENT, c->str); -} - -/* Called from uadd/udel, flush or the garbage collectors protected - * by the set spinlock. - * Called when the set is destroyed and when there can't be any user - * of the set data anymore. - */ -static inline void -ip_set_comment_free(struct ip_set *set, struct ip_set_comment *comment) -{ - struct ip_set_comment_rcu *c; - - c = rcu_dereference_protected(comment->c, 1); - if (unlikely(!c)) - return; - set->ext_size -= sizeof(*c) + strlen(c->str) + 1; - kfree_rcu(c, rcu); - rcu_assign_pointer(comment->c, NULL); -} - -#endif -#endif diff --git a/include/linux/netfilter/ipset/ip_set_counter.h b/include/linux/netfilter/ipset/ip_set_counter.h deleted file mode 100644 index 5477492c8374..000000000000 --- a/include/linux/netfilter/ipset/ip_set_counter.h +++ /dev/null @@ -1,85 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -#ifndef _IP_SET_COUNTER_H -#define _IP_SET_COUNTER_H - -/* Copyright (C) 2015 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu> - */ - -#ifdef __KERNEL__ - -static inline void -ip_set_add_bytes(u64 bytes, struct ip_set_counter *counter) -{ - atomic64_add((long long)bytes, &(counter)->bytes); -} - -static inline void -ip_set_add_packets(u64 packets, struct ip_set_counter *counter) -{ - atomic64_add((long long)packets, &(counter)->packets); -} - -static inline u64 -ip_set_get_bytes(const struct ip_set_counter *counter) -{ - return (u64)atomic64_read(&(counter)->bytes); -} - -static inline u64 -ip_set_get_packets(const struct ip_set_counter *counter) -{ - return (u64)atomic64_read(&(counter)->packets); -} - -static inline bool -ip_set_match_counter(u64 counter, u64 match, u8 op) -{ - switch (op) { - case IPSET_COUNTER_NONE: - return true; - case IPSET_COUNTER_EQ: - return counter == match; - case IPSET_COUNTER_NE: - return counter != match; - case IPSET_COUNTER_LT: - return counter < match; - case IPSET_COUNTER_GT: - return counter > match; - } - return false; -} - -static inline void -ip_set_update_counter(struct ip_set_counter *counter, - const struct ip_set_ext *ext, u32 flags) -{ - if (ext->packets != ULLONG_MAX && - !(flags & IPSET_FLAG_SKIP_COUNTER_UPDATE)) { - ip_set_add_bytes(ext->bytes, counter); - ip_set_add_packets(ext->packets, counter); - } -} - -static inline bool -ip_set_put_counter(struct sk_buff *skb, const struct ip_set_counter *counter) -{ - return nla_put_net64(skb, IPSET_ATTR_BYTES, - cpu_to_be64(ip_set_get_bytes(counter)), - IPSET_ATTR_PAD) || - nla_put_net64(skb, IPSET_ATTR_PACKETS, - cpu_to_be64(ip_set_get_packets(counter)), - IPSET_ATTR_PAD); -} - -static inline void -ip_set_init_counter(struct ip_set_counter *counter, - const struct ip_set_ext *ext) -{ - if (ext->bytes != ULLONG_MAX) - atomic64_set(&(counter)->bytes, (long long)(ext->bytes)); - if (ext->packets != ULLONG_MAX) - atomic64_set(&(counter)->packets, (long long)(ext->packets)); -} - -#endif /* __KERNEL__ */ -#endif /* _IP_SET_COUNTER_H */ diff --git a/include/linux/netfilter/ipset/ip_set_getport.h b/include/linux/netfilter/ipset/ip_set_getport.h index ac6a11d38a19..d74cd112b88a 100644 --- a/include/linux/netfilter/ipset/ip_set_getport.h +++ b/include/linux/netfilter/ipset/ip_set_getport.h @@ -2,10 +2,14 @@ #ifndef _IP_SET_GETPORT_H #define _IP_SET_GETPORT_H +#include <linux/skbuff.h> +#include <linux/types.h> +#include <uapi/linux/in.h> + extern bool ip_set_get_ip4_port(const struct sk_buff *skb, bool src, __be16 *port, u8 *proto); -#if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE) +#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES) extern bool ip_set_get_ip6_port(const struct sk_buff *skb, bool src, __be16 *port, u8 *proto); #else diff --git a/include/linux/netfilter/ipset/ip_set_skbinfo.h b/include/linux/netfilter/ipset/ip_set_skbinfo.h deleted file mode 100644 index aae081e085c6..000000000000 --- a/include/linux/netfilter/ipset/ip_set_skbinfo.h +++ /dev/null @@ -1,43 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -#ifndef _IP_SET_SKBINFO_H -#define _IP_SET_SKBINFO_H - -/* Copyright (C) 2015 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu> - */ - -#ifdef __KERNEL__ - -static inline void -ip_set_get_skbinfo(struct ip_set_skbinfo *skbinfo, - const struct ip_set_ext *ext, - struct ip_set_ext *mext, u32 flags) -{ - mext->skbinfo = *skbinfo; -} - -static inline bool -ip_set_put_skbinfo(struct sk_buff *skb, const struct ip_set_skbinfo *skbinfo) -{ - /* Send nonzero parameters only */ - return ((skbinfo->skbmark || skbinfo->skbmarkmask) && - nla_put_net64(skb, IPSET_ATTR_SKBMARK, - cpu_to_be64((u64)skbinfo->skbmark << 32 | - skbinfo->skbmarkmask), - IPSET_ATTR_PAD)) || - (skbinfo->skbprio && - nla_put_net32(skb, IPSET_ATTR_SKBPRIO, - cpu_to_be32(skbinfo->skbprio))) || - (skbinfo->skbqueue && - nla_put_net16(skb, IPSET_ATTR_SKBQUEUE, - cpu_to_be16(skbinfo->skbqueue))); -} - -static inline void -ip_set_init_skbinfo(struct ip_set_skbinfo *skbinfo, - const struct ip_set_ext *ext) -{ - *skbinfo = ext->skbinfo; -} - -#endif /* __KERNEL__ */ -#endif /* _IP_SET_SKBINFO_H */ diff --git a/include/linux/netfilter/ipset/ip_set_timeout.h b/include/linux/netfilter/ipset/ip_set_timeout.h deleted file mode 100644 index 88926b4c75f0..000000000000 --- a/include/linux/netfilter/ipset/ip_set_timeout.h +++ /dev/null @@ -1,78 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -#ifndef _IP_SET_TIMEOUT_H -#define _IP_SET_TIMEOUT_H - -/* Copyright (C) 2003-2013 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu> - */ - -#ifdef __KERNEL__ - -/* How often should the gc be run by default */ -#define IPSET_GC_TIME (3 * 60) - -/* Timeout period depending on the timeout value of the given set */ -#define IPSET_GC_PERIOD(timeout) \ - ((timeout/3) ? min_t(u32, (timeout)/3, IPSET_GC_TIME) : 1) - -/* Entry is set with no timeout value */ -#define IPSET_ELEM_PERMANENT 0 - -/* Set is defined with timeout support: timeout value may be 0 */ -#define IPSET_NO_TIMEOUT UINT_MAX - -/* Max timeout value, see msecs_to_jiffies() in jiffies.h */ -#define IPSET_MAX_TIMEOUT (UINT_MAX >> 1)/MSEC_PER_SEC - -#define ip_set_adt_opt_timeout(opt, set) \ -((opt)->ext.timeout != IPSET_NO_TIMEOUT ? (opt)->ext.timeout : (set)->timeout) - -static inline unsigned int -ip_set_timeout_uget(struct nlattr *tb) -{ - unsigned int timeout = ip_set_get_h32(tb); - - /* Normalize to fit into jiffies */ - if (timeout > IPSET_MAX_TIMEOUT) - timeout = IPSET_MAX_TIMEOUT; - - return timeout; -} - -static inline bool -ip_set_timeout_expired(const unsigned long *t) -{ - return *t != IPSET_ELEM_PERMANENT && time_is_before_jiffies(*t); -} - -static inline void -ip_set_timeout_set(unsigned long *timeout, u32 value) -{ - unsigned long t; - - if (!value) { - *timeout = IPSET_ELEM_PERMANENT; - return; - } - - t = msecs_to_jiffies(value * MSEC_PER_SEC) + jiffies; - if (t == IPSET_ELEM_PERMANENT) - /* Bingo! :-) */ - t--; - *timeout = t; -} - -static inline u32 -ip_set_timeout_get(const unsigned long *timeout) -{ - u32 t; - - if (*timeout == IPSET_ELEM_PERMANENT) - return 0; - - t = jiffies_to_msecs(*timeout - jiffies)/MSEC_PER_SEC; - /* Zero value in userspace means no timeout */ - return t == 0 ? 1 : t; -} - -#endif /* __KERNEL__ */ -#endif /* _IP_SET_TIMEOUT_H */ diff --git a/include/linux/netfilter/nf_conntrack_amanda.h b/include/linux/netfilter/nf_conntrack_amanda.h index 34345e543ba2..6f0ac896fcc9 100644 --- a/include/linux/netfilter/nf_conntrack_amanda.h +++ b/include/linux/netfilter/nf_conntrack_amanda.h @@ -3,6 +3,10 @@ #define _NF_CONNTRACK_AMANDA_H /* AMANDA tracking. */ +#include <linux/netfilter.h> +#include <linux/skbuff.h> +#include <net/netfilter/nf_conntrack_expect.h> + extern unsigned int (*nf_nat_amanda_hook)(struct sk_buff *skb, enum ip_conntrack_info ctinfo, unsigned int protoff, diff --git a/include/linux/netfilter/nf_conntrack_common.h b/include/linux/netfilter/nf_conntrack_common.h index e142b2b5f1ea..1db83c931d9c 100644 --- a/include/linux/netfilter/nf_conntrack_common.h +++ b/include/linux/netfilter/nf_conntrack_common.h @@ -2,6 +2,7 @@ #ifndef _NF_CONNTRACK_COMMON_H #define _NF_CONNTRACK_COMMON_H +#include <linux/atomic.h> #include <uapi/linux/netfilter/nf_conntrack_common.h> struct ip_conntrack_stat { @@ -19,4 +20,23 @@ struct ip_conntrack_stat { unsigned int search_restart; }; +#define NFCT_INFOMASK 7UL +#define NFCT_PTRMASK ~(NFCT_INFOMASK) + +struct nf_conntrack { + atomic_t use; +}; + +void nf_conntrack_destroy(struct nf_conntrack *nfct); +static inline void nf_conntrack_put(struct nf_conntrack *nfct) +{ + if (nfct && atomic_dec_and_test(&nfct->use)) + nf_conntrack_destroy(nfct); +} +static inline void nf_conntrack_get(struct nf_conntrack *nfct) +{ + if (nfct) + atomic_inc(&nfct->use); +} + #endif /* _NF_CONNTRACK_COMMON_H */ diff --git a/include/linux/netfilter/nf_conntrack_dccp.h b/include/linux/netfilter/nf_conntrack_dccp.h index ace0f952d50f..c509ed76e714 100644 --- a/include/linux/netfilter/nf_conntrack_dccp.h +++ b/include/linux/netfilter/nf_conntrack_dccp.h @@ -25,7 +25,6 @@ enum ct_dccp_roles { }; #define CT_DCCP_ROLE_MAX (__CT_DCCP_ROLE_MAX - 1) -#ifdef __KERNEL__ #include <linux/netfilter/nf_conntrack_tuple_common.h> struct nf_ct_dccp { @@ -36,6 +35,4 @@ struct nf_ct_dccp { u_int64_t handshake_seq; }; -#endif /* __KERNEL__ */ - #endif /* _NF_CONNTRACK_DCCP_H */ diff --git a/include/linux/netfilter/nf_conntrack_ftp.h b/include/linux/netfilter/nf_conntrack_ftp.h index 73a296dfd019..0e38302820b9 100644 --- a/include/linux/netfilter/nf_conntrack_ftp.h +++ b/include/linux/netfilter/nf_conntrack_ftp.h @@ -2,8 +2,12 @@ #ifndef _NF_CONNTRACK_FTP_H #define _NF_CONNTRACK_FTP_H +#include <linux/netfilter.h> +#include <linux/skbuff.h> +#include <linux/types.h> +#include <net/netfilter/nf_conntrack_expect.h> #include <uapi/linux/netfilter/nf_conntrack_ftp.h> - +#include <uapi/linux/netfilter/nf_conntrack_tuple_common.h> #define FTP_PORT 21 @@ -20,8 +24,6 @@ struct nf_ct_ftp_master { u_int16_t flags[IP_CT_DIR_MAX]; }; -struct nf_conntrack_expect; - /* For NAT to hook in when we find a packet which describes what other * connection we should expect. */ extern unsigned int (*nf_nat_ftp_hook)(struct sk_buff *skb, diff --git a/include/linux/netfilter/nf_conntrack_h323.h b/include/linux/netfilter/nf_conntrack_h323.h index f76ed373a2a5..4561ec0fcea4 100644 --- a/include/linux/netfilter/nf_conntrack_h323.h +++ b/include/linux/netfilter/nf_conntrack_h323.h @@ -2,9 +2,12 @@ #ifndef _NF_CONNTRACK_H323_H #define _NF_CONNTRACK_H323_H -#ifdef __KERNEL__ - +#include <linux/netfilter.h> +#include <linux/skbuff.h> +#include <linux/types.h> #include <linux/netfilter/nf_conntrack_h323_asn1.h> +#include <net/netfilter/nf_conntrack_expect.h> +#include <uapi/linux/netfilter/nf_conntrack_tuple_common.h> #define RAS_PORT 1719 #define Q931_PORT 1720 @@ -28,8 +31,6 @@ struct nf_ct_h323_master { }; }; -struct nf_conn; - int get_h225_addr(struct nf_conn *ct, unsigned char *data, TransportAddress *taddr, union nf_inet_addr *addr, __be16 *port); @@ -94,5 +95,3 @@ extern int (*nat_q931_hook) (struct sk_buff *skb, struct nf_conn *ct, struct nf_conntrack_expect *exp); #endif - -#endif diff --git a/include/linux/netfilter/nf_conntrack_h323_asn1.h b/include/linux/netfilter/nf_conntrack_h323_asn1.h index 91d6275292a5..bd6797f823b2 100644 --- a/include/linux/netfilter/nf_conntrack_h323_asn1.h +++ b/include/linux/netfilter/nf_conntrack_h323_asn1.h @@ -1,7 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0-only */ /**************************************************************************** - * ip_conntrack_h323_asn1.h - BER and PER decoding library for H.323 - * conntrack/NAT module. + * BER and PER decoding library for H.323 conntrack/NAT module. * * Copyright (c) 2006 by Jing Min Zhao <zhaojingmin@users.sourceforge.net> * @@ -38,6 +37,8 @@ /***************************************************************************** * H.323 Types ****************************************************************************/ + +#include <linux/types.h> #include <linux/netfilter/nf_conntrack_h323_types.h> typedef struct { diff --git a/include/linux/netfilter/nf_conntrack_h323_types.h b/include/linux/netfilter/nf_conntrack_h323_types.h index 7a6871ac8784..74c6f9241944 100644 --- a/include/linux/netfilter/nf_conntrack_h323_types.h +++ b/include/linux/netfilter/nf_conntrack_h323_types.h @@ -4,6 +4,9 @@ * Copyright (c) 2006 Jing Min Zhao <zhaojingmin@users.sourceforge.net> */ +#ifndef _NF_CONNTRACK_H323_TYPES_H +#define _NF_CONNTRACK_H323_TYPES_H + typedef struct TransportAddress_ipAddress { /* SEQUENCE */ int options; /* No use */ unsigned int ip; @@ -931,3 +934,5 @@ typedef struct RasMessage { /* CHOICE */ InfoRequestResponse infoRequestResponse; }; } RasMessage; + +#endif /* _NF_CONNTRACK_H323_TYPES_H */ diff --git a/include/linux/netfilter/nf_conntrack_irc.h b/include/linux/netfilter/nf_conntrack_irc.h index 00c2b74206e1..d02255f721e1 100644 --- a/include/linux/netfilter/nf_conntrack_irc.h +++ b/include/linux/netfilter/nf_conntrack_irc.h @@ -2,7 +2,9 @@ #ifndef _NF_CONNTRACK_IRC_H #define _NF_CONNTRACK_IRC_H -#ifdef __KERNEL__ +#include <linux/netfilter.h> +#include <linux/skbuff.h> +#include <net/netfilter/nf_conntrack_expect.h> #define IRC_PORT 6667 @@ -13,5 +15,4 @@ extern unsigned int (*nf_nat_irc_hook)(struct sk_buff *skb, unsigned int matchlen, struct nf_conntrack_expect *exp); -#endif /* __KERNEL__ */ #endif /* _NF_CONNTRACK_IRC_H */ diff --git a/include/linux/netfilter/nf_conntrack_pptp.h b/include/linux/netfilter/nf_conntrack_pptp.h index 833a5b2255ea..fcc409de31a4 100644 --- a/include/linux/netfilter/nf_conntrack_pptp.h +++ b/include/linux/netfilter/nf_conntrack_pptp.h @@ -3,7 +3,12 @@ #ifndef _NF_CONNTRACK_PPTP_H #define _NF_CONNTRACK_PPTP_H +#include <linux/netfilter.h> +#include <linux/skbuff.h> +#include <linux/types.h> #include <linux/netfilter/nf_conntrack_common.h> +#include <net/netfilter/nf_conntrack_expect.h> +#include <uapi/linux/netfilter/nf_conntrack_tuple_common.h> extern const char *const pptp_msg_name[]; @@ -45,8 +50,6 @@ struct nf_nat_pptp { __be16 pac_call_id; /* NAT'ed PAC call id */ }; -#ifdef __KERNEL__ - #define PPTP_CONTROL_PORT 1723 #define PPTP_PACKET_CONTROL 1 @@ -297,10 +300,6 @@ union pptp_ctrl_union { struct PptpSetLinkInfo setlink; }; -/* crap needed for nf_conntrack_compat.h */ -struct nf_conn; -struct nf_conntrack_expect; - extern int (*nf_nat_pptp_hook_outbound)(struct sk_buff *skb, struct nf_conn *ct, enum ip_conntrack_info ctinfo, @@ -323,5 +322,4 @@ extern void (*nf_nat_pptp_hook_expectfn)(struct nf_conn *ct, struct nf_conntrack_expect *exp); -#endif /* __KERNEL__ */ #endif /* _NF_CONNTRACK_PPTP_H */ diff --git a/include/linux/netfilter/nf_conntrack_proto_gre.h b/include/linux/netfilter/nf_conntrack_proto_gre.h index 25f9a770fb84..f33aa6021364 100644 --- a/include/linux/netfilter/nf_conntrack_proto_gre.h +++ b/include/linux/netfilter/nf_conntrack_proto_gre.h @@ -10,7 +10,6 @@ struct nf_ct_gre { unsigned int timeout; }; -#ifdef __KERNEL__ #include <net/netfilter/nf_conntrack_tuple.h> struct nf_conn; @@ -32,5 +31,4 @@ void nf_ct_gre_keymap_destroy(struct nf_conn *ct); bool gre_pkt_to_tuple(const struct sk_buff *skb, unsigned int dataoff, struct net *net, struct nf_conntrack_tuple *tuple); -#endif /* __KERNEL__ */ #endif /* _CONNTRACK_PROTO_GRE_H */ diff --git a/include/linux/netfilter/nf_conntrack_sane.h b/include/linux/netfilter/nf_conntrack_sane.h index 7d2de44edce3..46c7acd1b4a7 100644 --- a/include/linux/netfilter/nf_conntrack_sane.h +++ b/include/linux/netfilter/nf_conntrack_sane.h @@ -3,8 +3,6 @@ #define _NF_CONNTRACK_SANE_H /* SANE tracking. */ -#ifdef __KERNEL__ - #define SANE_PORT 6566 enum sane_state { @@ -17,6 +15,4 @@ struct nf_ct_sane_master { enum sane_state state; }; -#endif /* __KERNEL__ */ - #endif /* _NF_CONNTRACK_SANE_H */ diff --git a/include/linux/netfilter/nf_conntrack_sip.h b/include/linux/netfilter/nf_conntrack_sip.h index c7fc38807a33..c620521c42bc 100644 --- a/include/linux/netfilter/nf_conntrack_sip.h +++ b/include/linux/netfilter/nf_conntrack_sip.h @@ -1,11 +1,10 @@ /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __NF_CONNTRACK_SIP_H__ #define __NF_CONNTRACK_SIP_H__ -#ifdef __KERNEL__ - -#include <net/netfilter/nf_conntrack_expect.h> +#include <linux/skbuff.h> #include <linux/types.h> +#include <net/netfilter/nf_conntrack_expect.h> #define SIP_PORT 5060 #define SIP_TIMEOUT 3600 @@ -196,5 +195,4 @@ int ct_sip_get_sdp_header(const struct nf_conn *ct, const char *dptr, enum sdp_header_types term, unsigned int *matchoff, unsigned int *matchlen); -#endif /* __KERNEL__ */ #endif /* __NF_CONNTRACK_SIP_H__ */ diff --git a/include/linux/netfilter/nf_conntrack_snmp.h b/include/linux/netfilter/nf_conntrack_snmp.h index 818088c47475..87e4f33eb55f 100644 --- a/include/linux/netfilter/nf_conntrack_snmp.h +++ b/include/linux/netfilter/nf_conntrack_snmp.h @@ -2,6 +2,9 @@ #ifndef _NF_CONNTRACK_SNMP_H #define _NF_CONNTRACK_SNMP_H +#include <linux/netfilter.h> +#include <linux/skbuff.h> + extern int (*nf_nat_snmp_hook)(struct sk_buff *skb, unsigned int protoff, struct nf_conn *ct, diff --git a/include/linux/netfilter/nf_conntrack_tftp.h b/include/linux/netfilter/nf_conntrack_tftp.h index 5769e12dd0a2..dc4c1b9beac0 100644 --- a/include/linux/netfilter/nf_conntrack_tftp.h +++ b/include/linux/netfilter/nf_conntrack_tftp.h @@ -4,6 +4,11 @@ #define TFTP_PORT 69 +#include <linux/netfilter.h> +#include <linux/skbuff.h> +#include <linux/types.h> +#include <net/netfilter/nf_conntrack_expect.h> + struct tftphdr { __be16 opcode; }; diff --git a/include/linux/netfilter/x_tables.h b/include/linux/netfilter/x_tables.h index 1f852ef7b098..1b261c51b3a3 100644 --- a/include/linux/netfilter/x_tables.h +++ b/include/linux/netfilter/x_tables.h @@ -336,7 +336,7 @@ void xt_free_table_info(struct xt_table_info *info); /** * xt_recseq - recursive seqcount for netfilter use - * + * * Packet processing changes the seqcount only if no recursion happened * get_counters() can use read_seqcount_begin()/read_seqcount_retry(), * because we use the normal seqcount convention : diff --git a/include/linux/netfilter/xt_hashlimit.h b/include/linux/netfilter/xt_hashlimit.h deleted file mode 100644 index 169d03983589..000000000000 --- a/include/linux/netfilter/xt_hashlimit.h +++ /dev/null @@ -1,11 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _XT_HASHLIMIT_H -#define _XT_HASHLIMIT_H - -#include <uapi/linux/netfilter/xt_hashlimit.h> - -#define XT_HASHLIMIT_ALL (XT_HASHLIMIT_HASH_DIP | XT_HASHLIMIT_HASH_DPT | \ - XT_HASHLIMIT_HASH_SIP | XT_HASHLIMIT_HASH_SPT | \ - XT_HASHLIMIT_INVERT | XT_HASHLIMIT_BYTES |\ - XT_HASHLIMIT_RATE_MATCH) -#endif /*_XT_HASHLIMIT_H*/ diff --git a/include/linux/netfilter/xt_physdev.h b/include/linux/netfilter/xt_physdev.h deleted file mode 100644 index 4ca0593949cd..000000000000 --- a/include/linux/netfilter/xt_physdev.h +++ /dev/null @@ -1,8 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _XT_PHYSDEV_H -#define _XT_PHYSDEV_H - -#include <linux/if.h> -#include <uapi/linux/netfilter/xt_physdev.h> - -#endif /*_XT_PHYSDEV_H*/ diff --git a/include/linux/netfilter_bridge.h b/include/linux/netfilter_bridge.h index 5f2614d02e03..f980edfdd278 100644 --- a/include/linux/netfilter_bridge.h +++ b/include/linux/netfilter_bridge.h @@ -5,6 +5,13 @@ #include <uapi/linux/netfilter_bridge.h> #include <linux/skbuff.h> +struct nf_bridge_frag_data { + char mac[ETH_HLEN]; + bool vlan_present; + u16 vlan_tci; + __be16 vlan_proto; +}; + #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) int br_handle_frame_finish(struct net *net, struct sock *sk, struct sk_buff *skb); diff --git a/include/linux/netfilter_bridge/ebt_802_3.h b/include/linux/netfilter_bridge/ebt_802_3.h deleted file mode 100644 index c6147f9c0d80..000000000000 --- a/include/linux/netfilter_bridge/ebt_802_3.h +++ /dev/null @@ -1,12 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef __LINUX_BRIDGE_EBT_802_3_H -#define __LINUX_BRIDGE_EBT_802_3_H - -#include <linux/skbuff.h> -#include <uapi/linux/netfilter_bridge/ebt_802_3.h> - -static inline struct ebt_802_3_hdr *ebt_802_3_hdr(const struct sk_buff *skb) -{ - return (struct ebt_802_3_hdr *)skb_mac_header(skb); -} -#endif diff --git a/include/linux/netfilter_bridge/ebtables.h b/include/linux/netfilter_bridge/ebtables.h index c6935be7c6ca..162f59d0d17a 100644 --- a/include/linux/netfilter_bridge/ebtables.h +++ b/include/linux/netfilter_bridge/ebtables.h @@ -105,6 +105,7 @@ struct ebt_table { #define EBT_ALIGN(s) (((s) + (__alignof__(struct _xt_align)-1)) & \ ~(__alignof__(struct _xt_align)-1)) + extern int ebt_register_table(struct net *net, const struct ebt_table *table, const struct nf_hook_ops *ops, diff --git a/include/linux/netfilter_ipv4/ip_tables.h b/include/linux/netfilter_ipv4/ip_tables.h index d026e63a5aa4..e9e1ed74cdf1 100644 --- a/include/linux/netfilter_ipv4/ip_tables.h +++ b/include/linux/netfilter_ipv4/ip_tables.h @@ -17,14 +17,11 @@ #include <linux/if.h> #include <linux/in.h> +#include <linux/init.h> #include <linux/ip.h> #include <linux/skbuff.h> - -#include <linux/init.h> #include <uapi/linux/netfilter_ipv4/ip_tables.h> -extern void ipt_init(void) __init; - int ipt_register_table(struct net *net, const struct xt_table *table, const struct ipt_replace *repl, const struct nf_hook_ops *ops, struct xt_table **res); diff --git a/include/linux/netfilter_ipv6.h b/include/linux/netfilter_ipv6.h index 12113e502656..aac42c28fe62 100644 --- a/include/linux/netfilter_ipv6.h +++ b/include/linux/netfilter_ipv6.h @@ -1,13 +1,26 @@ /* IPv6-specific defines for netfilter. * (C)1998 Rusty Russell -- This code is GPL. * (C)1999 David Jeffery - * this header was blatantly ripped from netfilter_ipv4.h + * this header was blatantly ripped from netfilter_ipv4.h * it's amazing what adding a bunch of 6s can do =8^) */ #ifndef __LINUX_IP6_NETFILTER_H #define __LINUX_IP6_NETFILTER_H #include <uapi/linux/netfilter_ipv6.h> +#include <net/tcp.h> + +/* Check for an extension */ +static inline int +nf_ip6_ext_hdr(u8 nexthdr) +{ return (nexthdr == IPPROTO_HOPOPTS) || + (nexthdr == IPPROTO_ROUTING) || + (nexthdr == IPPROTO_FRAGMENT) || + (nexthdr == IPPROTO_ESP) || + (nexthdr == IPPROTO_AH) || + (nexthdr == IPPROTO_NONE) || + (nexthdr == IPPROTO_DSTOPTS); +} /* Extra routing may needed on local out, as the QUEUE target never returns * control to the table. @@ -19,6 +32,7 @@ struct ip6_rt_info { }; struct nf_queue_entry; +struct nf_bridge_frag_data; /* * Hook functions for ipv6 to allow xt_* modules to be built-in even @@ -34,11 +48,24 @@ struct nf_ipv6_ops { struct in6_addr *saddr); int (*route)(struct net *net, struct dst_entry **dst, struct flowi *fl, bool strict); + u32 (*cookie_init_sequence)(const struct ipv6hdr *iph, + const struct tcphdr *th, u16 *mssp); + int (*cookie_v6_check)(const struct ipv6hdr *iph, + const struct tcphdr *th, __u32 cookie); #endif void (*route_input)(struct sk_buff *skb); int (*fragment)(struct net *net, struct sock *sk, struct sk_buff *skb, int (*output)(struct net *, struct sock *, struct sk_buff *)); int (*reroute)(struct sk_buff *skb, const struct nf_queue_entry *entry); +#if IS_MODULE(CONFIG_IPV6) + int (*br_defrag)(struct net *net, struct sk_buff *skb, u32 user); + int (*br_fragment)(struct net *net, struct sock *sk, + struct sk_buff *skb, + struct nf_bridge_frag_data *data, + int (*output)(struct net *, struct sock *sk, + const struct nf_bridge_frag_data *data, + struct sk_buff *)); +#endif }; #ifdef CONFIG_NETFILTER @@ -60,8 +87,10 @@ static inline int nf_ipv6_chk_addr(struct net *net, const struct in6_addr *addr, return 1; return v6_ops->chk_addr(net, addr, dev, strict); -#else +#elif IS_BUILTIN(CONFIG_IPV6) return ipv6_chk_addr(net, addr, dev, strict); +#else + return 1; #endif } @@ -86,6 +115,52 @@ static inline int nf_ip6_route(struct net *net, struct dst_entry **dst, #endif } +#include <net/netfilter/ipv6/nf_defrag_ipv6.h> + +static inline int nf_ipv6_br_defrag(struct net *net, struct sk_buff *skb, + u32 user) +{ +#if IS_MODULE(CONFIG_IPV6) + const struct nf_ipv6_ops *v6_ops = nf_get_ipv6_ops(); + + if (!v6_ops) + return 1; + + return v6_ops->br_defrag(net, skb, user); +#elif IS_BUILTIN(CONFIG_IPV6) + return nf_ct_frag6_gather(net, skb, user); +#else + return 1; +#endif +} + +int br_ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb, + struct nf_bridge_frag_data *data, + int (*output)(struct net *, struct sock *sk, + const struct nf_bridge_frag_data *data, + struct sk_buff *)); + +static inline int nf_br_ip6_fragment(struct net *net, struct sock *sk, + struct sk_buff *skb, + struct nf_bridge_frag_data *data, + int (*output)(struct net *, struct sock *sk, + const struct nf_bridge_frag_data *data, + struct sk_buff *)) +{ +#if IS_MODULE(CONFIG_IPV6) + const struct nf_ipv6_ops *v6_ops = nf_get_ipv6_ops(); + + if (!v6_ops) + return 1; + + return v6_ops->br_fragment(net, sk, skb, data, output); +#elif IS_BUILTIN(CONFIG_IPV6) + return br_ip6_fragment(net, sk, skb, data, output); +#else + return 1; +#endif +} + int ip6_route_me_harder(struct net *net, struct sk_buff *skb); static inline int nf_ip6_route_me_harder(struct net *net, struct sk_buff *skb) @@ -97,9 +172,44 @@ static inline int nf_ip6_route_me_harder(struct net *net, struct sk_buff *skb) return -EHOSTUNREACH; return v6_ops->route_me_harder(net, skb); -#else +#elif IS_BUILTIN(CONFIG_IPV6) return ip6_route_me_harder(net, skb); +#else + return -EHOSTUNREACH; +#endif +} + +static inline u32 nf_ipv6_cookie_init_sequence(const struct ipv6hdr *iph, + const struct tcphdr *th, + u16 *mssp) +{ +#if IS_ENABLED(CONFIG_SYN_COOKIES) +#if IS_MODULE(CONFIG_IPV6) + const struct nf_ipv6_ops *v6_ops = nf_get_ipv6_ops(); + + if (v6_ops) + return v6_ops->cookie_init_sequence(iph, th, mssp); +#elif IS_BUILTIN(CONFIG_IPV6) + return __cookie_v6_init_sequence(iph, th, mssp); +#endif +#endif + return 0; +} + +static inline int nf_cookie_v6_check(const struct ipv6hdr *iph, + const struct tcphdr *th, __u32 cookie) +{ +#if IS_ENABLED(CONFIG_SYN_COOKIES) +#if IS_MODULE(CONFIG_IPV6) + const struct nf_ipv6_ops *v6_ops = nf_get_ipv6_ops(); + + if (v6_ops) + return v6_ops->cookie_v6_check(iph, th, cookie); +#elif IS_BUILTIN(CONFIG_IPV6) + return __cookie_v6_check(iph, th, cookie); +#endif #endif + return 0; } __sum16 nf_ip6_checksum(struct sk_buff *skb, unsigned int hook, diff --git a/include/linux/netfilter_ipv6/ip6_tables.h b/include/linux/netfilter_ipv6/ip6_tables.h index 99cbfd3add40..78ab959c4575 100644 --- a/include/linux/netfilter_ipv6/ip6_tables.h +++ b/include/linux/netfilter_ipv6/ip6_tables.h @@ -17,15 +17,13 @@ #include <linux/if.h> #include <linux/in6.h> +#include <linux/init.h> #include <linux/ipv6.h> #include <linux/skbuff.h> - -#include <linux/init.h> #include <uapi/linux/netfilter_ipv6/ip6_tables.h> -extern void ip6t_init(void) __init; - extern void *ip6t_alloc_initial_table(const struct xt_table *); + int ip6t_register_table(struct net *net, const struct xt_table *table, const struct ip6t_replace *repl, const struct nf_hook_ops *ops, struct xt_table **res); @@ -35,18 +33,6 @@ extern unsigned int ip6t_do_table(struct sk_buff *skb, const struct nf_hook_state *state, struct xt_table *table); -/* Check for an extension */ -static inline int -ip6t_ext_hdr(u8 nexthdr) -{ return (nexthdr == IPPROTO_HOPOPTS) || - (nexthdr == IPPROTO_ROUTING) || - (nexthdr == IPPROTO_FRAGMENT) || - (nexthdr == IPPROTO_ESP) || - (nexthdr == IPPROTO_AH) || - (nexthdr == IPPROTO_NONE) || - (nexthdr == IPPROTO_DSTOPTS); -} - #ifdef CONFIG_COMPAT #include <net/compat.h> diff --git a/include/linux/netlink.h b/include/linux/netlink.h index 593d1b9c33a8..205fa7b1f07a 100644 --- a/include/linux/netlink.h +++ b/include/linux/netlink.h @@ -192,7 +192,14 @@ struct netlink_callback { bool strict_check; u16 answer_flags; unsigned int prev_seq, seq; - long args[6]; + union { + u8 ctx[48]; + + /* args is deprecated. Cast a struct over ctx instead + * for proper type safety. + */ + long args[6]; + }; }; struct netlink_notify { diff --git a/include/linux/nfs4.h b/include/linux/nfs4.h index 22494d170619..fd59904a282c 100644 --- a/include/linux/nfs4.h +++ b/include/linux/nfs4.h @@ -660,6 +660,7 @@ enum pnfs_update_layout_reason { PNFS_UPDATE_LAYOUT_BLOCKED, PNFS_UPDATE_LAYOUT_INVALID_OPEN, PNFS_UPDATE_LAYOUT_SEND_LAYOUTGET, + PNFS_UPDATE_LAYOUT_EXIT, }; #define NFS4_OP_MAP_NUM_LONGS \ diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h index d363d5765cdf..570a60c2f4f4 100644 --- a/include/linux/nfs_fs.h +++ b/include/linux/nfs_fs.h @@ -223,6 +223,8 @@ struct nfs4_copy_state { #define NFS_INO_INVALID_MTIME BIT(10) /* cached mtime is invalid */ #define NFS_INO_INVALID_SIZE BIT(11) /* cached size is invalid */ #define NFS_INO_INVALID_OTHER BIT(12) /* other attrs are invalid */ +#define NFS_INO_DATA_INVAL_DEFER \ + BIT(13) /* Deferred cache invalidation */ #define NFS_INO_INVALID_ATTR (NFS_INO_INVALID_CHANGE \ | NFS_INO_INVALID_CTIME \ @@ -488,6 +490,9 @@ extern const struct file_operations nfs_dir_operations; extern const struct dentry_operations nfs_dentry_operations; extern void nfs_force_lookup_revalidate(struct inode *dir); +extern struct dentry *nfs_add_or_obtain(struct dentry *dentry, + struct nfs_fh *fh, struct nfs_fattr *fattr, + struct nfs4_label *label); extern int nfs_instantiate(struct dentry *dentry, struct nfs_fh *fh, struct nfs_fattr *fattr, struct nfs4_label *label); extern int nfs_may_open(struct inode *inode, const struct cred *cred, int openflags); diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h index 1e78032a174b..a87fe854f008 100644 --- a/include/linux/nfs_fs_sb.h +++ b/include/linux/nfs_fs_sb.h @@ -58,6 +58,7 @@ struct nfs_client { struct nfs_subversion * cl_nfs_mod; /* pointer to nfs version module */ u32 cl_minorversion;/* NFSv4 minorversion */ + unsigned int cl_nconnect; /* Number of connections */ const char * cl_principal; /* used for machine cred */ #if IS_ENABLED(CONFIG_NFS_V4) diff --git a/include/linux/node.h b/include/linux/node.h index 1a557c589ecb..4866f32a02d8 100644 --- a/include/linux/node.h +++ b/include/linux/node.h @@ -137,10 +137,7 @@ static inline int register_one_node(int nid) extern void unregister_one_node(int nid); extern int register_cpu_under_node(unsigned int cpu, unsigned int nid); extern int unregister_cpu_under_node(unsigned int cpu, unsigned int nid); -extern int register_mem_sect_under_node(struct memory_block *mem_blk, - void *arg); -extern int unregister_mem_sect_under_nodes(struct memory_block *mem_blk, - unsigned long phys_index); +extern void unregister_memory_block_under_nodes(struct memory_block *mem_blk); extern int register_memory_node_under_compute_node(unsigned int mem_nid, unsigned int cpu_nid, @@ -171,15 +168,8 @@ static inline int unregister_cpu_under_node(unsigned int cpu, unsigned int nid) { return 0; } -static inline int register_mem_sect_under_node(struct memory_block *mem_blk, - void *arg) +static inline void unregister_memory_block_under_nodes(struct memory_block *mem_blk) { - return 0; -} -static inline int unregister_mem_sect_under_nodes(struct memory_block *mem_blk, - unsigned long phys_index) -{ - return 0; } static inline void register_hugetlbfs_with_node(node_registration_func_t reg, diff --git a/include/linux/ntb.h b/include/linux/ntb.h index 56a92e3ae3ae..8c13538aeffe 100644 --- a/include/linux/ntb.h +++ b/include/linux/ntb.h @@ -58,9 +58,11 @@ #include <linux/completion.h> #include <linux/device.h> +#include <linux/interrupt.h> struct ntb_client; struct ntb_dev; +struct ntb_msi; struct pci_dev; /** @@ -205,7 +207,7 @@ static inline int ntb_ctx_ops_is_valid(const struct ntb_ctx_ops *ops) } /** - * struct ntb_ctx_ops - ntb device operations + * struct ntb_dev_ops - ntb device operations * @port_number: See ntb_port_number(). * @peer_port_count: See ntb_peer_port_count(). * @peer_port_number: See ntb_peer_port_number(). @@ -404,7 +406,7 @@ struct ntb_client { #define drv_ntb_client(__drv) container_of((__drv), struct ntb_client, drv) /** - * struct ntb_device - ntb device + * struct ntb_dev - ntb device * @dev: Linux device object. * @pdev: PCI device entry of the ntb. * @topo: Detected topology of the ntb. @@ -426,6 +428,10 @@ struct ntb_dev { spinlock_t ctx_lock; /* block unregister until device is fully released */ struct completion released; + +#ifdef CONFIG_NTB_MSI + struct ntb_msi *msi; +#endif }; #define dev_ntb(__dev) container_of((__dev), struct ntb_dev, dev) @@ -616,7 +622,6 @@ static inline int ntb_port_number(struct ntb_dev *ntb) return ntb->ops->port_number(ntb); } - /** * ntb_peer_port_count() - get the number of peer device ports * @ntb: NTB device context. @@ -654,6 +659,58 @@ static inline int ntb_peer_port_number(struct ntb_dev *ntb, int pidx) } /** + * ntb_logical_port_number() - get the logical port number of the local port + * @ntb: NTB device context. + * + * The Logical Port Number is defined to be a unique number for each + * port starting from zero through to the number of ports minus one. + * This is in contrast to the Port Number where each port can be assigned + * any unique physical number by the hardware. + * + * The logical port number is useful for calculating the resource indexes + * used by peers. + * + * Return: the logical port number or negative value indicating an error + */ +static inline int ntb_logical_port_number(struct ntb_dev *ntb) +{ + int lport = ntb_port_number(ntb); + int pidx; + + if (lport < 0) + return lport; + + for (pidx = 0; pidx < ntb_peer_port_count(ntb); pidx++) + if (lport <= ntb_peer_port_number(ntb, pidx)) + return pidx; + + return pidx; +} + +/** + * ntb_peer_logical_port_number() - get the logical peer port by given index + * @ntb: NTB device context. + * @pidx: Peer port index. + * + * The Logical Port Number is defined to be a unique number for each + * port starting from zero through to the number of ports minus one. + * This is in contrast to the Port Number where each port can be assigned + * any unique physical number by the hardware. + * + * The logical port number is useful for calculating the resource indexes + * used by peers. + * + * Return: the peer's logical port number or negative value indicating an error + */ +static inline int ntb_peer_logical_port_number(struct ntb_dev *ntb, int pidx) +{ + if (ntb_peer_port_number(ntb, pidx) < ntb_port_number(ntb)) + return pidx; + else + return pidx + 1; +} + +/** * ntb_peer_port_idx() - get the peer device port index by given port number * @ntb: NTB device context. * @port: Peer port number. @@ -1506,4 +1563,141 @@ static inline int ntb_peer_msg_write(struct ntb_dev *ntb, int pidx, int midx, return ntb->ops->peer_msg_write(ntb, pidx, midx, msg); } +/** + * ntb_peer_resource_idx() - get a resource index for a given peer idx + * @ntb: NTB device context. + * @pidx: Peer port index. + * + * When constructing a graph of peers, each remote peer must use a different + * resource index (mw, doorbell, etc) to communicate with each other + * peer. + * + * In a two peer system, this function should always return 0 such that + * resource 0 points to the remote peer on both ports. + * + * In a 5 peer system, this function will return the following matrix + * + * pidx \ port 0 1 2 3 4 + * 0 0 0 1 2 3 + * 1 0 1 1 2 3 + * 2 0 1 2 2 3 + * 3 0 1 2 3 3 + * + * For example, if this function is used to program peer's memory + * windows, port 0 will program MW 0 on all it's peers to point to itself. + * port 1 will program MW 0 in port 0 to point to itself and MW 1 on all + * other ports. etc. + * + * For the legacy two host case, ntb_port_number() and ntb_peer_port_number() + * both return zero and therefore this function will always return zero. + * So MW 0 on each host would be programmed to point to the other host. + * + * Return: the resource index to use for that peer. + */ +static inline int ntb_peer_resource_idx(struct ntb_dev *ntb, int pidx) +{ + int local_port, peer_port; + + if (pidx >= ntb_peer_port_count(ntb)) + return -EINVAL; + + local_port = ntb_logical_port_number(ntb); + peer_port = ntb_peer_logical_port_number(ntb, pidx); + + if (peer_port < local_port) + return local_port - 1; + else + return local_port; +} + +/** + * ntb_peer_highest_mw_idx() - get a memory window index for a given peer idx + * using the highest index memory windows first + * + * @ntb: NTB device context. + * @pidx: Peer port index. + * + * Like ntb_peer_resource_idx(), except it returns indexes starting with + * last memory window index. + * + * Return: the resource index to use for that peer. + */ +static inline int ntb_peer_highest_mw_idx(struct ntb_dev *ntb, int pidx) +{ + int ret; + + ret = ntb_peer_resource_idx(ntb, pidx); + if (ret < 0) + return ret; + + return ntb_mw_count(ntb, pidx) - ret - 1; +} + +struct ntb_msi_desc { + u32 addr_offset; + u32 data; +}; + +#ifdef CONFIG_NTB_MSI + +int ntb_msi_init(struct ntb_dev *ntb, void (*desc_changed)(void *ctx)); +int ntb_msi_setup_mws(struct ntb_dev *ntb); +void ntb_msi_clear_mws(struct ntb_dev *ntb); +int ntbm_msi_request_threaded_irq(struct ntb_dev *ntb, irq_handler_t handler, + irq_handler_t thread_fn, + const char *name, void *dev_id, + struct ntb_msi_desc *msi_desc); +void ntbm_msi_free_irq(struct ntb_dev *ntb, unsigned int irq, void *dev_id); +int ntb_msi_peer_trigger(struct ntb_dev *ntb, int peer, + struct ntb_msi_desc *desc); +int ntb_msi_peer_addr(struct ntb_dev *ntb, int peer, + struct ntb_msi_desc *desc, + phys_addr_t *msi_addr); + +#else /* not CONFIG_NTB_MSI */ + +static inline int ntb_msi_init(struct ntb_dev *ntb, + void (*desc_changed)(void *ctx)) +{ + return -EOPNOTSUPP; +} +static inline int ntb_msi_setup_mws(struct ntb_dev *ntb) +{ + return -EOPNOTSUPP; +} +static inline void ntb_msi_clear_mws(struct ntb_dev *ntb) {} +static inline int ntbm_msi_request_threaded_irq(struct ntb_dev *ntb, + irq_handler_t handler, + irq_handler_t thread_fn, + const char *name, void *dev_id, + struct ntb_msi_desc *msi_desc) +{ + return -EOPNOTSUPP; +} +static inline void ntbm_msi_free_irq(struct ntb_dev *ntb, unsigned int irq, + void *dev_id) {} +static inline int ntb_msi_peer_trigger(struct ntb_dev *ntb, int peer, + struct ntb_msi_desc *desc) +{ + return -EOPNOTSUPP; +} +static inline int ntb_msi_peer_addr(struct ntb_dev *ntb, int peer, + struct ntb_msi_desc *desc, + phys_addr_t *msi_addr) +{ + return -EOPNOTSUPP; + +} + +#endif /* CONFIG_NTB_MSI */ + +static inline int ntbm_msi_request_irq(struct ntb_dev *ntb, + irq_handler_t handler, + const char *name, void *dev_id, + struct ntb_msi_desc *msi_desc) +{ + return ntbm_msi_request_threaded_irq(ntb, handler, NULL, name, + dev_id, msi_desc); +} + #endif diff --git a/include/linux/nvme-fc-driver.h b/include/linux/nvme-fc-driver.h index c48e96436f56..10f81629b9ce 100644 --- a/include/linux/nvme-fc-driver.h +++ b/include/linux/nvme-fc-driver.h @@ -6,6 +6,8 @@ #ifndef _NVME_FC_DRIVER_H #define _NVME_FC_DRIVER_H 1 +#include <linux/scatterlist.h> + /* * ********************** LLDD FC-NVME Host API ******************** @@ -791,6 +793,11 @@ struct nvmet_fc_target_port { * nvmefc_tgt_fcp_req. * Entrypoint is Optional. * + * @discovery_event: Called by the transport to generate an RSCN + * change notifications to NVME initiators. The RSCN notifications + * should cause the initiator to rescan the discovery controller + * on the targetport. + * * @max_hw_queues: indicates the maximum number of hw queues the LLDD * supports for cpu affinitization. * Value is Mandatory. Must be at least 1. @@ -832,6 +839,7 @@ struct nvmet_fc_target_template { struct nvmefc_tgt_fcp_req *fcpreq); void (*defer_rcv)(struct nvmet_fc_target_port *tgtport, struct nvmefc_tgt_fcp_req *fcpreq); + void (*discovery_event)(struct nvmet_fc_target_port *tgtport); u32 max_hw_queues; u16 max_sgl_segments; diff --git a/include/linux/nvme.h b/include/linux/nvme.h index 8028adacaff3..f61d6906e59d 100644 --- a/include/linux/nvme.h +++ b/include/linux/nvme.h @@ -140,6 +140,7 @@ enum { * Submission and Completion Queue Entry Sizes for the NVM command set. * (In bytes and specified as a power of two (2^n)). */ +#define NVME_ADM_SQES 6 #define NVME_NVM_IOSQES 6 #define NVME_NVM_IOCQES 4 @@ -315,7 +316,7 @@ struct nvme_id_ns { __u8 nmic; __u8 rescap; __u8 fpi; - __u8 rsvd33; + __u8 dlfeat; __le16 nawun; __le16 nawupf; __le16 nacwu; @@ -324,11 +325,17 @@ struct nvme_id_ns { __le16 nabspf; __le16 noiob; __u8 nvmcap[16]; - __u8 rsvd64[28]; + __le16 npwg; + __le16 npwa; + __le16 npdg; + __le16 npda; + __le16 nows; + __u8 rsvd74[18]; __le32 anagrpid; __u8 rsvd96[3]; __u8 nsattr; - __u8 rsvd100[4]; + __le16 nvmsetid; + __le16 endgid; __u8 nguid[16]; __u8 eui64[8]; struct nvme_lbaf lbaf[16]; @@ -562,6 +569,22 @@ enum nvme_opcode { nvme_cmd_resv_release = 0x15, }; +#define nvme_opcode_name(opcode) { opcode, #opcode } +#define show_nvm_opcode_name(val) \ + __print_symbolic(val, \ + nvme_opcode_name(nvme_cmd_flush), \ + nvme_opcode_name(nvme_cmd_write), \ + nvme_opcode_name(nvme_cmd_read), \ + nvme_opcode_name(nvme_cmd_write_uncor), \ + nvme_opcode_name(nvme_cmd_compare), \ + nvme_opcode_name(nvme_cmd_write_zeroes), \ + nvme_opcode_name(nvme_cmd_dsm), \ + nvme_opcode_name(nvme_cmd_resv_register), \ + nvme_opcode_name(nvme_cmd_resv_report), \ + nvme_opcode_name(nvme_cmd_resv_acquire), \ + nvme_opcode_name(nvme_cmd_resv_release)) + + /* * Descriptor subtype - lower 4 bits of nvme_(keyed_)sgl_desc identifier * @@ -792,7 +815,35 @@ enum nvme_admin_opcode { nvme_admin_security_send = 0x81, nvme_admin_security_recv = 0x82, nvme_admin_sanitize_nvm = 0x84, -}; + nvme_admin_get_lba_status = 0x86, +}; + +#define nvme_admin_opcode_name(opcode) { opcode, #opcode } +#define show_admin_opcode_name(val) \ + __print_symbolic(val, \ + nvme_admin_opcode_name(nvme_admin_delete_sq), \ + nvme_admin_opcode_name(nvme_admin_create_sq), \ + nvme_admin_opcode_name(nvme_admin_get_log_page), \ + nvme_admin_opcode_name(nvme_admin_delete_cq), \ + nvme_admin_opcode_name(nvme_admin_create_cq), \ + nvme_admin_opcode_name(nvme_admin_identify), \ + nvme_admin_opcode_name(nvme_admin_abort_cmd), \ + nvme_admin_opcode_name(nvme_admin_set_features), \ + nvme_admin_opcode_name(nvme_admin_get_features), \ + nvme_admin_opcode_name(nvme_admin_async_event), \ + nvme_admin_opcode_name(nvme_admin_ns_mgmt), \ + nvme_admin_opcode_name(nvme_admin_activate_fw), \ + nvme_admin_opcode_name(nvme_admin_download_fw), \ + nvme_admin_opcode_name(nvme_admin_ns_attach), \ + nvme_admin_opcode_name(nvme_admin_keep_alive), \ + nvme_admin_opcode_name(nvme_admin_directive_send), \ + nvme_admin_opcode_name(nvme_admin_directive_recv), \ + nvme_admin_opcode_name(nvme_admin_dbbuf), \ + nvme_admin_opcode_name(nvme_admin_format_nvm), \ + nvme_admin_opcode_name(nvme_admin_security_send), \ + nvme_admin_opcode_name(nvme_admin_security_recv), \ + nvme_admin_opcode_name(nvme_admin_sanitize_nvm), \ + nvme_admin_opcode_name(nvme_admin_get_lba_status)) enum { NVME_QUEUE_PHYS_CONTIG = (1 << 0), @@ -1008,6 +1059,23 @@ enum nvmf_capsule_command { nvme_fabrics_type_property_get = 0x04, }; +#define nvme_fabrics_type_name(type) { type, #type } +#define show_fabrics_type_name(type) \ + __print_symbolic(type, \ + nvme_fabrics_type_name(nvme_fabrics_type_property_set), \ + nvme_fabrics_type_name(nvme_fabrics_type_connect), \ + nvme_fabrics_type_name(nvme_fabrics_type_property_get)) + +/* + * If not fabrics command, fctype will be ignored. + */ +#define show_opcode_name(qid, opcode, fctype) \ + ((opcode) == nvme_fabrics_command ? \ + show_fabrics_type_name(fctype) : \ + ((qid) ? \ + show_nvm_opcode_name(opcode) : \ + show_admin_opcode_name(opcode))) + struct nvmf_common_command { __u8 opcode; __u8 resv1; @@ -1165,6 +1233,11 @@ struct nvme_command { }; }; +static inline bool nvme_is_fabrics(struct nvme_command *cmd) +{ + return cmd->common.opcode == nvme_fabrics_command; +} + struct nvme_error_slot { __le64 error_count; __le16 sqid; @@ -1186,7 +1259,7 @@ static inline bool nvme_is_write(struct nvme_command *cmd) * * Why can't we simply have a Fabrics In and Fabrics out command? */ - if (unlikely(cmd->common.opcode == nvme_fabrics_command)) + if (unlikely(nvme_is_fabrics(cmd))) return cmd->fabrics.fctype & 1; return cmd->common.opcode & 1; } diff --git a/include/linux/of.h b/include/linux/of.h index 0cf857012f11..844f89e1b039 100644 --- a/include/linux/of.h +++ b/include/linux/of.h @@ -1164,7 +1164,7 @@ static inline int of_property_read_string_index(const struct device_node *np, } /** - * of_property_read_bool - Findfrom a property + * of_property_read_bool - Find a property * @np: device node from which the property value is to be read. * @propname: name of the property to be searched. * diff --git a/include/linux/of_fdt.h b/include/linux/of_fdt.h index a713e5d156d8..acf820e88952 100644 --- a/include/linux/of_fdt.h +++ b/include/linux/of_fdt.h @@ -23,15 +23,6 @@ struct device_node; /* For scanning an arbitrary device-tree at any time */ -extern char *of_fdt_get_string(const void *blob, u32 offset); -extern void *of_fdt_get_property(const void *blob, - unsigned long node, - const char *name, - int *size); -extern bool of_fdt_is_big_endian(const void *blob, - unsigned long node); -extern int of_fdt_match(const void *blob, unsigned long node, - const char *const *compat); extern void *of_fdt_unflatten_tree(const unsigned long *blob, struct device_node *dad, struct device_node **mynodes); @@ -64,9 +55,7 @@ extern int of_get_flat_dt_subnode_by_name(unsigned long node, extern const void *of_get_flat_dt_prop(unsigned long node, const char *name, int *size); extern int of_flat_dt_is_compatible(unsigned long node, const char *name); -extern int of_flat_dt_match(unsigned long node, const char *const *matches); extern unsigned long of_get_flat_dt_root(void); -extern int of_get_flat_dt_size(void); extern uint32_t of_get_flat_dt_phandle(unsigned long node); extern int early_init_dt_scan_chosen(unsigned long node, const char *uname, diff --git a/include/linux/of_gpio.h b/include/linux/of_gpio.h index f9737dea9d1f..16967390a3fe 100644 --- a/include/linux/of_gpio.h +++ b/include/linux/of_gpio.h @@ -61,10 +61,6 @@ static inline int of_mm_gpiochip_add(struct device_node *np, } extern void of_mm_gpiochip_remove(struct of_mm_gpio_chip *mm_gc); -extern int of_gpio_simple_xlate(struct gpio_chip *gc, - const struct of_phandle_args *gpiospec, - u32 *flags); - #else /* CONFIG_OF_GPIO */ /* Drivers may not strictly depend on the GPIO support, so let them link. */ @@ -77,13 +73,6 @@ static inline int of_get_named_gpio_flags(struct device_node *np, return -ENOSYS; } -static inline int of_gpio_simple_xlate(struct gpio_chip *gc, - const struct of_phandle_args *gpiospec, - u32 *flags) -{ - return -ENOSYS; -} - #endif /* CONFIG_OF_GPIO */ /** diff --git a/include/linux/olpc-ec.h b/include/linux/olpc-ec.h index 79bdc6328c52..c4602364e909 100644 --- a/include/linux/olpc-ec.h +++ b/include/linux/olpc-ec.h @@ -2,6 +2,8 @@ #ifndef _LINUX_OLPC_EC_H #define _LINUX_OLPC_EC_H +#include <linux/bits.h> + /* XO-1 EC commands */ #define EC_FIRMWARE_REV 0x08 #define EC_WRITE_SCI_MASK 0x1b @@ -16,28 +18,57 @@ #define EC_SCI_QUERY 0x84 #define EC_EXT_SCI_QUERY 0x85 +/* SCI source values */ +#define EC_SCI_SRC_GAME BIT(0) +#define EC_SCI_SRC_BATTERY BIT(1) +#define EC_SCI_SRC_BATSOC BIT(2) +#define EC_SCI_SRC_BATERR BIT(3) +#define EC_SCI_SRC_EBOOK BIT(4) /* XO-1 only */ +#define EC_SCI_SRC_WLAN BIT(5) /* XO-1 only */ +#define EC_SCI_SRC_ACPWR BIT(6) +#define EC_SCI_SRC_BATCRIT BIT(7) +#define EC_SCI_SRC_GPWAKE BIT(8) /* XO-1.5 only */ +#define EC_SCI_SRC_ALL GENMASK(8, 0) + struct platform_device; struct olpc_ec_driver { - int (*probe)(struct platform_device *); int (*suspend)(struct platform_device *); int (*resume)(struct platform_device *); int (*ec_cmd)(u8, u8 *, size_t, u8 *, size_t, void *); + + bool wakeup_available; }; -#ifdef CONFIG_OLPC +#ifdef CONFIG_OLPC_EC extern void olpc_ec_driver_register(struct olpc_ec_driver *drv, void *arg); extern int olpc_ec_cmd(u8 cmd, u8 *inbuf, size_t inlen, u8 *outbuf, size_t outlen); +extern void olpc_ec_wakeup_set(u16 value); +extern void olpc_ec_wakeup_clear(u16 value); + +extern int olpc_ec_mask_write(u16 bits); +extern int olpc_ec_sci_query(u16 *sci_value); + +extern bool olpc_ec_wakeup_available(void); + #else static inline int olpc_ec_cmd(u8 cmd, u8 *inbuf, size_t inlen, u8 *outbuf, size_t outlen) { return -ENODEV; } -#endif /* CONFIG_OLPC */ +static inline void olpc_ec_wakeup_set(u16 value) { } +static inline void olpc_ec_wakeup_clear(u16 value) { } + +static inline bool olpc_ec_wakeup_available(void) +{ + return false; +} + +#endif /* CONFIG_OLPC_EC */ #endif /* _LINUX_OLPC_EC_H */ diff --git a/include/linux/omap-dma.h b/include/linux/omap-dma.h index 840ce551e773..ba3cfbb52312 100644 --- a/include/linux/omap-dma.h +++ b/include/linux/omap-dma.h @@ -1,8 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __LINUX_OMAP_DMA_H #define __LINUX_OMAP_DMA_H -#include <linux/omap-dmaengine.h> - /* * Legacy OMAP DMA handling defines and functions * diff --git a/include/linux/omap-dmaengine.h b/include/linux/omap-dmaengine.h deleted file mode 100644 index b6e42f933c40..000000000000 --- a/include/linux/omap-dmaengine.h +++ /dev/null @@ -1,18 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * OMAP DMA Engine support - */ -#ifndef __LINUX_OMAP_DMAENGINE_H -#define __LINUX_OMAP_DMAENGINE_H - -struct dma_chan; - -#if defined(CONFIG_DMA_OMAP) || (defined(CONFIG_DMA_OMAP_MODULE) && defined(MODULE)) -bool omap_dma_filter_fn(struct dma_chan *, void *); -#else -static inline bool omap_dma_filter_fn(struct dma_chan *c, void *d) -{ - return false; -} -#endif -#endif /* __LINUX_OMAP_DMAENGINE_H */ diff --git a/include/linux/omap-iommu.h b/include/linux/omap-iommu.h index 153bf25b4df3..2c32ca09df02 100644 --- a/include/linux/omap-iommu.h +++ b/include/linux/omap-iommu.h @@ -10,12 +10,27 @@ #ifndef _OMAP_IOMMU_H_ #define _OMAP_IOMMU_H_ +struct iommu_domain; + #ifdef CONFIG_OMAP_IOMMU extern void omap_iommu_save_ctx(struct device *dev); extern void omap_iommu_restore_ctx(struct device *dev); + +int omap_iommu_domain_deactivate(struct iommu_domain *domain); +int omap_iommu_domain_activate(struct iommu_domain *domain); #else static inline void omap_iommu_save_ctx(struct device *dev) {} static inline void omap_iommu_restore_ctx(struct device *dev) {} + +static inline int omap_iommu_domain_deactivate(struct iommu_domain *domain) +{ + return -ENODEV; +} + +static inline int omap_iommu_domain_activate(struct iommu_domain *domain) +{ + return -ENODEV; +} #endif #endif diff --git a/include/linux/omap-mailbox.h b/include/linux/omap-mailbox.h index 6dbcd2da0332..8aa984ec1f38 100644 --- a/include/linux/omap-mailbox.h +++ b/include/linux/omap-mailbox.h @@ -6,7 +6,9 @@ #ifndef OMAP_MAILBOX_H #define OMAP_MAILBOX_H -typedef u32 mbox_msg_t; +typedef uintptr_t mbox_msg_t; + +#define omap_mbox_message(data) (u32)(mbox_msg_t)(data) typedef int __bitwise omap_mbox_irq_t; #define IRQ_TX ((__force omap_mbox_irq_t) 1) diff --git a/include/linux/oom.h b/include/linux/oom.h index d07992009265..c696c265f019 100644 --- a/include/linux/oom.h +++ b/include/linux/oom.h @@ -108,7 +108,6 @@ static inline vm_fault_t check_stable_address_space(struct mm_struct *mm) bool __oom_reap_task_mm(struct mm_struct *mm); extern unsigned long oom_badness(struct task_struct *p, - struct mem_cgroup *memcg, const nodemask_t *nodemask, unsigned long totalpages); extern bool out_of_memory(struct oom_control *oc); diff --git a/include/linux/oxu210hp.h b/include/linux/oxu210hp.h deleted file mode 100644 index 94cd25165c08..000000000000 --- a/include/linux/oxu210hp.h +++ /dev/null @@ -1,8 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* platform data for the OXU210HP HCD */ - -struct oxu210hp_platform_data { - unsigned int bus16:1; - unsigned int use_hcd_otg:1; - unsigned int use_hcd_sph:1; -}; diff --git a/include/linux/padata.h b/include/linux/padata.h index 56f09e36f770..23717eeaad23 100644 --- a/include/linux/padata.h +++ b/include/linux/padata.h @@ -12,7 +12,6 @@ #include <linux/workqueue.h> #include <linux/spinlock.h> #include <linux/list.h> -#include <linux/timer.h> #include <linux/notifier.h> #include <linux/kobject.h> @@ -36,6 +35,7 @@ struct padata_priv { struct parallel_data *pd; int cb_cpu; int cpu; + unsigned int seq_nr; int info; void (*parallel)(struct padata_priv *padata); void (*serial)(struct padata_priv *padata); @@ -73,20 +73,14 @@ struct padata_serial_queue { * @serial: List to wait for serialization after reordering. * @pwork: work struct for parallelization. * @swork: work struct for serialization. - * @pd: Backpointer to the internal control structure. * @work: work struct for parallelization. - * @reorder_work: work struct for reordering. * @num_obj: Number of objects that are processed by this cpu. - * @cpu_index: Index of the cpu. */ struct padata_parallel_queue { struct padata_list parallel; struct padata_list reorder; - struct parallel_data *pd; struct work_struct work; - struct work_struct reorder_work; atomic_t num_obj; - int cpu_index; }; /** @@ -110,10 +104,11 @@ struct padata_cpumask { * @reorder_objects: Number of objects waiting in the reorder queues. * @refcnt: Number of objects holding a reference on this parallel_data. * @max_seq_nr: Maximal used sequence number. + * @processed: Number of already processed objects. + * @cpu: Next CPU to be processed. * @cpumask: The cpumasks in use for parallel and serial workers. + * @reorder_work: work struct for reordering. * @lock: Reorder lock. - * @processed: Number of already processed objects. - * @timer: Reorder timer. */ struct parallel_data { struct padata_instance *pinst; @@ -122,17 +117,19 @@ struct parallel_data { atomic_t reorder_objects; atomic_t refcnt; atomic_t seq_nr; + unsigned int processed; + int cpu; struct padata_cpumask cpumask; + struct work_struct reorder_work; spinlock_t lock ____cacheline_aligned; - unsigned int processed; - struct timer_list timer; }; /** * struct padata_instance - The overall control structure. * * @cpu_notifier: cpu hotplug notifier. - * @wq: The workqueue in use. + * @parallel_wq: The workqueue used for parallel work. + * @serial_wq: The workqueue used for serial work. * @pd: The internal control structure. * @cpumask: User supplied cpumasks for parallel and serial works. * @cpumask_change_notifier: Notifiers chain for user-defined notify @@ -144,7 +141,8 @@ struct parallel_data { */ struct padata_instance { struct hlist_node node; - struct workqueue_struct *wq; + struct workqueue_struct *parallel_wq; + struct workqueue_struct *serial_wq; struct parallel_data *pd; struct padata_cpumask cpumask; struct blocking_notifier_head cpumask_change_notifier; @@ -156,11 +154,10 @@ struct padata_instance { #define PADATA_INVALID 4 }; -extern struct padata_instance *padata_alloc_possible( - struct workqueue_struct *wq); +extern struct padata_instance *padata_alloc_possible(const char *name); extern void padata_free(struct padata_instance *pinst); extern int padata_do_parallel(struct padata_instance *pinst, - struct padata_priv *padata, int cb_cpu); + struct padata_priv *padata, int *cb_cpu); extern void padata_do_serial(struct padata_priv *padata); extern int padata_set_cpumask(struct padata_instance *pinst, int cpumask_type, cpumask_var_t cpumask); diff --git a/include/linux/page-flags-layout.h b/include/linux/page-flags-layout.h index 1dda31825ec4..71283739ffd2 100644 --- a/include/linux/page-flags-layout.h +++ b/include/linux/page-flags-layout.h @@ -32,6 +32,7 @@ #endif /* CONFIG_SPARSEMEM */ +#ifndef BUILD_VDSO32_64 /* * page->flags layout: * @@ -76,20 +77,22 @@ #define LAST_CPUPID_SHIFT 0 #endif -#if SECTIONS_WIDTH+ZONES_WIDTH+NODES_SHIFT+LAST_CPUPID_SHIFT <= BITS_PER_LONG - NR_PAGEFLAGS +#ifdef CONFIG_KASAN_SW_TAGS +#define KASAN_TAG_WIDTH 8 +#else +#define KASAN_TAG_WIDTH 0 +#endif + +#if SECTIONS_WIDTH+ZONES_WIDTH+NODES_SHIFT+LAST_CPUPID_SHIFT+KASAN_TAG_WIDTH \ + <= BITS_PER_LONG - NR_PAGEFLAGS #define LAST_CPUPID_WIDTH LAST_CPUPID_SHIFT #else #define LAST_CPUPID_WIDTH 0 #endif -#ifdef CONFIG_KASAN_SW_TAGS -#define KASAN_TAG_WIDTH 8 #if SECTIONS_WIDTH+NODES_WIDTH+ZONES_WIDTH+LAST_CPUPID_WIDTH+KASAN_TAG_WIDTH \ > BITS_PER_LONG - NR_PAGEFLAGS -#error "KASAN: not enough bits in page flags for tag" -#endif -#else -#define KASAN_TAG_WIDTH 0 +#error "Not enough bits in page flags" #endif /* @@ -104,4 +107,5 @@ #define LAST_CPUPID_NOT_IN_PAGE_FLAGS #endif +#endif #endif /* _LINUX_PAGE_FLAGS_LAYOUT */ diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h index 9f8712a4b1a5..1bf83c8fcaa7 100644 --- a/include/linux/page-flags.h +++ b/include/linux/page-flags.h @@ -152,6 +152,8 @@ enum pageflags { PG_savepinned = PG_dirty, /* Has a grant mapping of another (foreign) domain's page. */ PG_foreign = PG_owner_priv_1, + /* Remapped by swiotlb-xen. */ + PG_xen_remapped = PG_owner_priv_1, /* SLOB */ PG_slob_free = PG_private, @@ -329,6 +331,8 @@ PAGEFLAG(Pinned, pinned, PF_NO_COMPOUND) TESTSCFLAG(Pinned, pinned, PF_NO_COMPOUND) PAGEFLAG(SavePinned, savepinned, PF_NO_COMPOUND); PAGEFLAG(Foreign, foreign, PF_NO_COMPOUND); +PAGEFLAG(XenRemapped, xen_remapped, PF_NO_COMPOUND) + TESTCLEARFLAG(XenRemapped, xen_remapped, PF_NO_COMPOUND) PAGEFLAG(Reserved, reserved, PF_NO_COMPOUND) __CLEARPAGEFLAG(Reserved, reserved, PF_NO_COMPOUND) @@ -618,12 +622,28 @@ static inline int PageTransCompound(struct page *page) * * Unlike PageTransCompound, this is safe to be called only while * split_huge_pmd() cannot run from under us, like if protected by the - * MMU notifier, otherwise it may result in page->_mapcount < 0 false + * MMU notifier, otherwise it may result in page->_mapcount check false * positives. + * + * We have to treat page cache THP differently since every subpage of it + * would get _mapcount inc'ed once it is PMD mapped. But, it may be PTE + * mapped in the current process so comparing subpage's _mapcount to + * compound_mapcount to filter out PTE mapped case. */ static inline int PageTransCompoundMap(struct page *page) { - return PageTransCompound(page) && atomic_read(&page->_mapcount) < 0; + struct page *head; + + if (!PageTransCompound(page)) + return 0; + + if (PageAnon(page)) + return atomic_read(&page->_mapcount) < 0; + + head = compound_head(page); + /* File THP is PMD mapped and not PTE mapped */ + return atomic_read(&page->_mapcount) == + atomic_read(compound_mapcount_ptr(head)); } /* @@ -703,6 +723,7 @@ PAGEFLAG_FALSE(DoubleMap) #define PG_offline 0x00000100 #define PG_kmemcg 0x00000200 #define PG_table 0x00000400 +#define PG_guard 0x00000800 #define PageType(page, flag) \ ((page->page_type & (PAGE_TYPE_BASE | flag)) == PAGE_TYPE_BASE) @@ -754,6 +775,11 @@ PAGE_TYPE_OPS(Kmemcg, kmemcg) */ PAGE_TYPE_OPS(Table, table) +/* + * Marks guardpages used with debug_pagealloc. + */ +PAGE_TYPE_OPS(Guard, guard) + extern bool is_free_buddy_page(struct page *page); __PAGEFLAG(Isolated, isolated, PF_ANY); diff --git a/include/linux/page-isolation.h b/include/linux/page-isolation.h index 280ae96dc4c3..1099c2fee20f 100644 --- a/include/linux/page-isolation.h +++ b/include/linux/page-isolation.h @@ -50,7 +50,7 @@ start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn, * Changes MIGRATE_ISOLATE to MIGRATE_MOVABLE. * target range is [start_pfn, end_pfn) */ -int +void undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn, unsigned migratetype); diff --git a/include/linux/page_ext.h b/include/linux/page_ext.h index f84f167ec04c..cfce186f0c4e 100644 --- a/include/linux/page_ext.h +++ b/include/linux/page_ext.h @@ -17,8 +17,8 @@ struct page_ext_operations { #ifdef CONFIG_PAGE_EXTENSION enum page_ext_flags { - PAGE_EXT_DEBUG_GUARD, PAGE_EXT_OWNER, + PAGE_EXT_OWNER_ALLOCATED, #if defined(CONFIG_IDLE_PAGE_TRACKING) && !defined(CONFIG_64BIT) PAGE_EXT_YOUNG, PAGE_EXT_IDLE, @@ -36,6 +36,7 @@ struct page_ext { unsigned long flags; }; +extern unsigned long page_ext_size; extern void pgdat_page_ext_init(struct pglist_data *pgdat); #ifdef CONFIG_SPARSEMEM @@ -52,6 +53,13 @@ static inline void page_ext_init(void) struct page_ext *lookup_page_ext(const struct page *page); +static inline struct page_ext *page_ext_next(struct page_ext *curr) +{ + void *next = curr; + next += page_ext_size; + return next; +} + #else /* !CONFIG_PAGE_EXTENSION */ struct page_ext; diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index 9ec3544baee2..37a4d9e32cd3 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h @@ -335,15 +335,12 @@ static inline struct page *grab_cache_page_nowait(struct address_space *mapping, static inline struct page *find_subpage(struct page *page, pgoff_t offset) { - unsigned long mask; - if (PageHuge(page)) return page; VM_BUG_ON_PAGE(PageTail(page), page); - mask = (1UL << compound_order(page)) - 1; - return page + (offset & mask); + return page + (offset & (compound_nr(page) - 1)); } struct page *find_get_entry(struct address_space *mapping, pgoff_t offset); @@ -396,8 +393,7 @@ extern int read_cache_pages(struct address_space *mapping, static inline struct page *read_mapping_page(struct address_space *mapping, pgoff_t index, void *data) { - filler_t *filler = (filler_t *)mapping->a_ops->readpage; - return read_cache_page(mapping, index, filler, data); + return read_cache_page(mapping, index, NULL, data); } /* @@ -465,6 +461,9 @@ extern int __lock_page_or_retry(struct page *page, struct mm_struct *mm, unsigned int flags); extern void unlock_page(struct page *page); +/* + * Return true if the page was successfully locked + */ static inline int trylock_page(struct page *page) { page = compound_head(page); diff --git a/include/linux/pagewalk.h b/include/linux/pagewalk.h new file mode 100644 index 000000000000..bddd9759bab9 --- /dev/null +++ b/include/linux/pagewalk.h @@ -0,0 +1,66 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_PAGEWALK_H +#define _LINUX_PAGEWALK_H + +#include <linux/mm.h> + +struct mm_walk; + +/** + * mm_walk_ops - callbacks for walk_page_range + * @pud_entry: if set, called for each non-empty PUD (2nd-level) entry + * this handler should only handle pud_trans_huge() puds. + * the pmd_entry or pte_entry callbacks will be used for + * regular PUDs. + * @pmd_entry: if set, called for each non-empty PMD (3rd-level) entry + * this handler is required to be able to handle + * pmd_trans_huge() pmds. They may simply choose to + * split_huge_page() instead of handling it explicitly. + * @pte_entry: if set, called for each non-empty PTE (4th-level) entry + * @pte_hole: if set, called for each hole at all levels + * @hugetlb_entry: if set, called for each hugetlb entry + * @test_walk: caller specific callback function to determine whether + * we walk over the current vma or not. Returning 0 means + * "do page table walk over the current vma", returning + * a negative value means "abort current page table walk + * right now" and returning 1 means "skip the current vma" + */ +struct mm_walk_ops { + int (*pud_entry)(pud_t *pud, unsigned long addr, + unsigned long next, struct mm_walk *walk); + int (*pmd_entry)(pmd_t *pmd, unsigned long addr, + unsigned long next, struct mm_walk *walk); + int (*pte_entry)(pte_t *pte, unsigned long addr, + unsigned long next, struct mm_walk *walk); + int (*pte_hole)(unsigned long addr, unsigned long next, + struct mm_walk *walk); + int (*hugetlb_entry)(pte_t *pte, unsigned long hmask, + unsigned long addr, unsigned long next, + struct mm_walk *walk); + int (*test_walk)(unsigned long addr, unsigned long next, + struct mm_walk *walk); +}; + +/** + * mm_walk - walk_page_range data + * @ops: operation to call during the walk + * @mm: mm_struct representing the target process of page table walk + * @vma: vma currently walked (NULL if walking outside vmas) + * @private: private data for callbacks' usage + * + * (see the comment on walk_page_range() for more details) + */ +struct mm_walk { + const struct mm_walk_ops *ops; + struct mm_struct *mm; + struct vm_area_struct *vma; + void *private; +}; + +int walk_page_range(struct mm_struct *mm, unsigned long start, + unsigned long end, const struct mm_walk_ops *ops, + void *private); +int walk_page_vma(struct vm_area_struct *vma, const struct mm_walk_ops *ops, + void *private); + +#endif /* _LINUX_PAGEWALK_H */ diff --git a/include/linux/pci-acpi.h b/include/linux/pci-acpi.h index 8082b612f561..62b7fdcc661c 100644 --- a/include/linux/pci-acpi.h +++ b/include/linux/pci-acpi.h @@ -107,9 +107,10 @@ static inline void acpiphp_check_host_bridge(struct acpi_device *adev) { } #endif extern const guid_t pci_acpi_dsm_guid; -#define DEVICE_LABEL_DSM 0x07 -#define RESET_DELAY_DSM 0x08 -#define FUNCTION_DELAY_DSM 0x09 +#define IGNORE_PCI_BOOT_CONFIG_DSM 0x05 +#define DEVICE_LABEL_DSM 0x07 +#define RESET_DELAY_DSM 0x08 +#define FUNCTION_DELAY_DSM 0x09 #else /* CONFIG_ACPI */ static inline void acpi_pci_add_bus(struct pci_bus *bus) { } diff --git a/include/linux/pci-aspm.h b/include/linux/pci-aspm.h deleted file mode 100644 index df28af5cef21..000000000000 --- a/include/linux/pci-aspm.h +++ /dev/null @@ -1,35 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * aspm.h - * - * PCI Express ASPM defines and function prototypes - * - * Copyright (C) 2007 Intel Corp. - * Zhang Yanmin (yanmin.zhang@intel.com) - * Shaohua Li (shaohua.li@intel.com) - * - * For more information, please consult the following manuals (look at - * http://www.pcisig.com/ for how to get them): - * - * PCI Express Specification - */ - -#ifndef LINUX_ASPM_H -#define LINUX_ASPM_H - -#include <linux/pci.h> - -#define PCIE_LINK_STATE_L0S 1 -#define PCIE_LINK_STATE_L1 2 -#define PCIE_LINK_STATE_CLKPM 4 - -#ifdef CONFIG_PCIEASPM -void pci_disable_link_state(struct pci_dev *pdev, int state); -void pci_disable_link_state_locked(struct pci_dev *pdev, int state); -void pcie_no_aspm(void); -#else -static inline void pci_disable_link_state(struct pci_dev *pdev, int state) { } -static inline void pcie_no_aspm(void) { } -#endif - -#endif /* LINUX_ASPM_H */ diff --git a/include/linux/pci-p2pdma.h b/include/linux/pci-p2pdma.h index bca9bc3e5be7..8318a97c9c61 100644 --- a/include/linux/pci-p2pdma.h +++ b/include/linux/pci-p2pdma.h @@ -30,8 +30,10 @@ struct scatterlist *pci_p2pmem_alloc_sgl(struct pci_dev *pdev, unsigned int *nents, u32 length); void pci_p2pmem_free_sgl(struct pci_dev *pdev, struct scatterlist *sgl); void pci_p2pmem_publish(struct pci_dev *pdev, bool publish); -int pci_p2pdma_map_sg(struct device *dev, struct scatterlist *sg, int nents, - enum dma_data_direction dir); +int pci_p2pdma_map_sg_attrs(struct device *dev, struct scatterlist *sg, + int nents, enum dma_data_direction dir, unsigned long attrs); +void pci_p2pdma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg, + int nents, enum dma_data_direction dir, unsigned long attrs); int pci_p2pdma_enable_store(const char *page, struct pci_dev **p2p_dev, bool *use_p2pdma); ssize_t pci_p2pdma_enable_show(char *page, struct pci_dev *p2p_dev, @@ -81,11 +83,17 @@ static inline void pci_p2pmem_free_sgl(struct pci_dev *pdev, static inline void pci_p2pmem_publish(struct pci_dev *pdev, bool publish) { } -static inline int pci_p2pdma_map_sg(struct device *dev, - struct scatterlist *sg, int nents, enum dma_data_direction dir) +static inline int pci_p2pdma_map_sg_attrs(struct device *dev, + struct scatterlist *sg, int nents, enum dma_data_direction dir, + unsigned long attrs) { return 0; } +static inline void pci_p2pdma_unmap_sg_attrs(struct device *dev, + struct scatterlist *sg, int nents, enum dma_data_direction dir, + unsigned long attrs) +{ +} static inline int pci_p2pdma_enable_store(const char *page, struct pci_dev **p2p_dev, bool *use_p2pdma) { @@ -111,4 +119,16 @@ static inline struct pci_dev *pci_p2pmem_find(struct device *client) return pci_p2pmem_find_many(&client, 1); } +static inline int pci_p2pdma_map_sg(struct device *dev, struct scatterlist *sg, + int nents, enum dma_data_direction dir) +{ + return pci_p2pdma_map_sg_attrs(dev, sg, nents, dir, 0); +} + +static inline void pci_p2pdma_unmap_sg(struct device *dev, + struct scatterlist *sg, int nents, enum dma_data_direction dir) +{ + pci_p2pdma_unmap_sg_attrs(dev, sg, nents, dir, 0); +} + #endif /* _LINUX_PCI_P2P_H */ diff --git a/include/linux/pci.h b/include/linux/pci.h index dd436da7eccc..f9088c89a534 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -6,12 +6,18 @@ * Copyright 1994, Drew Eckhardt * Copyright 1997--1999 Martin Mares <mj@ucw.cz> * + * PCI Express ASPM defines and function prototypes + * Copyright (c) 2007 Intel Corp. + * Zhang Yanmin (yanmin.zhang@intel.com) + * Shaohua Li (shaohua.li@intel.com) + * * For more information, please consult the following manuals (look at * http://www.pcisig.com/ for how to get them): * * PCI BIOS Specification * PCI Local Bus Specification * PCI to PCI Bridge Specification + * PCI Express Specification * PCI System Design Guide */ #ifndef LINUX_PCI_H @@ -145,12 +151,9 @@ static inline const char *pci_power_name(pci_power_t state) return pci_power_names[1 + (__force int) state]; } -#define PCI_PM_D2_DELAY 200 -#define PCI_PM_D3_WAIT 10 -#define PCI_PM_D3COLD_WAIT 100 -#define PCI_PM_BUS_WAIT 50 - /** + * typedef pci_channel_state_t + * * The pci_channel state describes connectivity between the CPU and * the PCI device. If some PCI bus between here and the PCI device * has crashed or locked up, this info is reflected here. @@ -258,6 +261,7 @@ enum pci_bus_speed { PCIE_SPEED_5_0GT = 0x15, PCIE_SPEED_8_0GT = 0x16, PCIE_SPEED_16_0GT = 0x17, + PCIE_SPEED_32_0GT = 0x18, PCI_SPEED_UNKNOWN = 0xff, }; @@ -383,7 +387,7 @@ struct pci_dev { unsigned int is_busmaster:1; /* Is busmaster */ unsigned int no_msi:1; /* May not use MSI */ - unsigned int no_64bit_msi:1; /* May only use 32-bit MSIs */ + unsigned int no_64bit_msi:1; /* May only use 32-bit MSIs */ unsigned int block_cfg_access:1; /* Config space access blocked */ unsigned int broken_parity_status:1; /* Generates false positive parity */ unsigned int irq_reroute_variant:2; /* Needs IRQ rerouting variant */ @@ -415,7 +419,6 @@ struct pci_dev { unsigned int broken_intx_masking:1; /* INTx masking can't be used */ unsigned int io_window_1k:1; /* Intel bridge 1K I/O windows */ unsigned int irq_managed:1; - unsigned int has_secondary_link:1; unsigned int non_compliant_bars:1; /* Broken BARs; ignore them */ unsigned int is_probed:1; /* Device probing in progress */ unsigned int link_active_reporting:1;/* Device capable of reporting link active */ @@ -506,6 +509,8 @@ struct pci_host_bridge { unsigned int native_shpc_hotplug:1; /* OS may use SHPC hotplug */ unsigned int native_pme:1; /* OS may use PCIe PME */ unsigned int native_ltr:1; /* OS may use PCIe LTR */ + unsigned int preserve_config:1; /* Preserve FW resource setup */ + /* Resource alignment requirements */ resource_size_t (*align_resource)(struct pci_dev *dev, const struct resource *res, @@ -644,9 +649,6 @@ static inline struct pci_dev *pci_upstream_bridge(struct pci_dev *dev) return dev->bus->self; } -struct device *pci_get_host_bridge_device(struct pci_dev *dev); -void pci_put_host_bridge_device(struct device *dev); - #ifdef CONFIG_PCI_MSI static inline bool pci_dev_msi_enabled(struct pci_dev *pci_dev) { @@ -776,6 +778,50 @@ struct pci_error_handlers { struct module; + +/** + * struct pci_driver - PCI driver structure + * @node: List of driver structures. + * @name: Driver name. + * @id_table: Pointer to table of device IDs the driver is + * interested in. Most drivers should export this + * table using MODULE_DEVICE_TABLE(pci,...). + * @probe: This probing function gets called (during execution + * of pci_register_driver() for already existing + * devices or later if a new device gets inserted) for + * all PCI devices which match the ID table and are not + * "owned" by the other drivers yet. This function gets + * passed a "struct pci_dev \*" for each device whose + * entry in the ID table matches the device. The probe + * function returns zero when the driver chooses to + * take "ownership" of the device or an error code + * (negative number) otherwise. + * The probe function always gets called from process + * context, so it can sleep. + * @remove: The remove() function gets called whenever a device + * being handled by this driver is removed (either during + * deregistration of the driver or when it's manually + * pulled out of a hot-pluggable slot). + * The remove function always gets called from process + * context, so it can sleep. + * @suspend: Put device into low power state. + * @suspend_late: Put device into low power state. + * @resume_early: Wake device from low power state. + * @resume: Wake device from low power state. + * (Please see Documentation/power/pci.rst for descriptions + * of PCI Power Management and the related functions.) + * @shutdown: Hook into reboot_notifier_list (kernel/sys.c). + * Intended to stop any idling DMA operations. + * Useful for enabling wake-on-lan (NIC) or changing + * the power state of a device before reboot. + * e.g. drivers/net/e100.c. + * @sriov_configure: Optional driver callback to allow configuration of + * number of VFs to enable via sysfs "sriov_numvfs" file. + * @err_handler: See Documentation/PCI/pci-error-recovery.rst + * @groups: Sysfs attribute groups. + * @driver: Driver model structure. + * @dynids: List of dynamically added device IDs. + */ struct pci_driver { struct list_head node; const char *name; @@ -876,6 +922,11 @@ enum { PCI_SCAN_ALL_PCIE_DEVS = 0x00000040, /* Scan all, not just dev 0 */ }; +#define PCI_IRQ_LEGACY (1 << 0) /* Allow legacy interrupts */ +#define PCI_IRQ_MSI (1 << 1) /* Allow MSI interrupts */ +#define PCI_IRQ_MSIX (1 << 2) /* Allow MSI-X interrupts */ +#define PCI_IRQ_AFFINITY (1 << 3) /* Auto-assign affinity */ + /* These external functions are only available when PCI support is enabled */ #ifdef CONFIG_PCI @@ -920,7 +971,7 @@ resource_size_t pcibios_align_resource(void *, const struct resource *, resource_size_t, resource_size_t); -/* Weak but can be overriden by arch */ +/* Weak but can be overridden by arch */ void pci_fixup_cardbus(struct pci_bus *); /* Generic PCI functions used internally */ @@ -946,7 +997,6 @@ struct pci_bus *pci_scan_root_bus(struct device *parent, int bus, int pci_scan_root_bus_bridge(struct pci_host_bridge *bridge); struct pci_bus *pci_add_new_bus(struct pci_bus *parent, struct pci_dev *dev, int busnr); -void pcie_update_link_speed(struct pci_bus *bus, u16 link_status); struct pci_slot *pci_create_slot(struct pci_bus *parent, int slot_nr, const char *name, struct hotplug_slot *hotplug); @@ -1192,19 +1242,12 @@ int pci_wake_from_d3(struct pci_dev *dev, bool enable); int pci_prepare_to_sleep(struct pci_dev *dev); int pci_back_from_sleep(struct pci_dev *dev); bool pci_dev_run_wake(struct pci_dev *dev); -bool pci_check_pme_status(struct pci_dev *dev); -void pci_pme_wakeup_bus(struct pci_bus *bus); void pci_d3cold_enable(struct pci_dev *dev); void pci_d3cold_disable(struct pci_dev *dev); bool pcie_relaxed_ordering_enabled(struct pci_dev *dev); void pci_wakeup_bus(struct pci_bus *bus); void pci_bus_set_current_state(struct pci_bus *bus, pci_power_t state); -/* PCI Virtual Channel */ -int pci_save_vc_state(struct pci_dev *dev); -void pci_restore_vc_state(struct pci_dev *dev); -void pci_allocate_vc_save_buffers(struct pci_dev *dev); - /* For use by arch with custom probe code */ void set_pcie_port_type(struct pci_dev *pdev); void set_pcie_hotplug_bridge(struct pci_dev *pdev); @@ -1248,8 +1291,6 @@ int pci_request_selected_regions_exclusive(struct pci_dev *, int, const char *); void pci_release_selected_regions(struct pci_dev *, int); /* drivers/pci/bus.c */ -struct pci_bus *pci_bus_get(struct pci_bus *bus); -void pci_bus_put(struct pci_bus *bus); void pci_add_resource(struct list_head *resources, struct resource *res); void pci_add_resource_offset(struct list_head *resources, struct resource *res, resource_size_t offset); @@ -1359,10 +1400,14 @@ resource_size_t pcibios_window_alignment(struct pci_bus *bus, int pci_set_vga_state(struct pci_dev *pdev, bool decode, unsigned int command_bits, u32 flags); -#define PCI_IRQ_LEGACY (1 << 0) /* Allow legacy interrupts */ -#define PCI_IRQ_MSI (1 << 1) /* Allow MSI interrupts */ -#define PCI_IRQ_MSIX (1 << 2) /* Allow MSI-X interrupts */ -#define PCI_IRQ_AFFINITY (1 << 3) /* Auto-assign affinity */ +/* + * Virtual interrupts allow for more interrupts to be allocated + * than the device has interrupts for. These are not programmed + * into the device's MSI-X table and must be handled by some + * other driver means. + */ +#define PCI_IRQ_VIRTUAL (1 << 4) + #define PCI_IRQ_ALL_TYPES \ (PCI_IRQ_LEGACY | PCI_IRQ_MSI | PCI_IRQ_MSIX) @@ -1459,14 +1504,6 @@ static inline int pci_irq_get_node(struct pci_dev *pdev, int vec) } #endif -static inline int -pci_alloc_irq_vectors(struct pci_dev *dev, unsigned int min_vecs, - unsigned int max_vecs, unsigned int flags) -{ - return pci_alloc_irq_vectors_affinity(dev, min_vecs, max_vecs, flags, - NULL); -} - /** * pci_irqd_intx_xlate() - Translate PCI INTx value to an IRQ domain hwirq * @d: the INTx IRQ domain @@ -1507,10 +1544,24 @@ extern bool pcie_ports_native; #define pcie_ports_native false #endif +#define PCIE_LINK_STATE_L0S 1 +#define PCIE_LINK_STATE_L1 2 +#define PCIE_LINK_STATE_CLKPM 4 + #ifdef CONFIG_PCIEASPM +int pci_disable_link_state(struct pci_dev *pdev, int state); +int pci_disable_link_state_locked(struct pci_dev *pdev, int state); +void pcie_no_aspm(void); bool pcie_aspm_support_enabled(void); +bool pcie_aspm_enabled(struct pci_dev *pdev); #else +static inline int pci_disable_link_state(struct pci_dev *pdev, int state) +{ return 0; } +static inline int pci_disable_link_state_locked(struct pci_dev *pdev, int state) +{ return 0; } +static inline void pcie_no_aspm(void) { } static inline bool pcie_aspm_support_enabled(void) { return false; } +static inline bool pcie_aspm_enabled(struct pci_dev *pdev) { return false; } #endif #ifdef CONFIG_PCIEAER @@ -1519,23 +1570,8 @@ bool pci_aer_available(void); static inline bool pci_aer_available(void) { return false; } #endif -#ifdef CONFIG_PCIE_ECRC -void pcie_set_ecrc_checking(struct pci_dev *dev); -void pcie_ecrc_get_policy(char *str); -#else -static inline void pcie_set_ecrc_checking(struct pci_dev *dev) { } -static inline void pcie_ecrc_get_policy(char *str) { } -#endif - bool pci_ats_disabled(void); -#ifdef CONFIG_PCIE_PTM -int pci_enable_ptm(struct pci_dev *dev, u8 *granularity); -#else -static inline int pci_enable_ptm(struct pci_dev *dev, u8 *granularity) -{ return -EINVAL; } -#endif - void pci_cfg_access_lock(struct pci_dev *dev); bool pci_cfg_access_trylock(struct pci_dev *dev); void pci_cfg_access_unlock(struct pci_dev *dev); @@ -1689,11 +1725,6 @@ static inline void pci_release_regions(struct pci_dev *dev) { } static inline unsigned long pci_address_to_pio(phys_addr_t addr) { return -1; } -static inline void pci_block_cfg_access(struct pci_dev *dev) { } -static inline int pci_block_cfg_access_in_atomic(struct pci_dev *dev) -{ return 0; } -static inline void pci_unblock_cfg_access(struct pci_dev *dev) { } - static inline struct pci_bus *pci_find_next_bus(const struct pci_bus *from) { return NULL; } static inline struct pci_dev *pci_get_slot(struct pci_bus *bus, @@ -1722,17 +1753,36 @@ static inline const struct pci_device_id *pci_match_id(const struct pci_device_i struct pci_dev *dev) { return NULL; } static inline bool pci_ats_disabled(void) { return true; } + +static inline int pci_irq_vector(struct pci_dev *dev, unsigned int nr) +{ + return -EINVAL; +} + +static inline int +pci_alloc_irq_vectors_affinity(struct pci_dev *dev, unsigned int min_vecs, + unsigned int max_vecs, unsigned int flags, + struct irq_affinity *aff_desc) +{ + return -ENOSPC; +} #endif /* CONFIG_PCI */ +static inline int +pci_alloc_irq_vectors(struct pci_dev *dev, unsigned int min_vecs, + unsigned int max_vecs, unsigned int flags) +{ + return pci_alloc_irq_vectors_affinity(dev, min_vecs, max_vecs, flags, + NULL); +} + #ifdef CONFIG_PCI_ATS /* Address Translation Service */ -void pci_ats_init(struct pci_dev *dev); int pci_enable_ats(struct pci_dev *dev, int ps); void pci_disable_ats(struct pci_dev *dev); int pci_ats_queue_depth(struct pci_dev *dev); int pci_ats_page_aligned(struct pci_dev *dev); #else -static inline void pci_ats_init(struct pci_dev *d) { } static inline int pci_enable_ats(struct pci_dev *d, int ps) { return -ENODEV; } static inline void pci_disable_ats(struct pci_dev *d) { } static inline int pci_ats_queue_depth(struct pci_dev *d) { return -ENODEV; } @@ -1743,7 +1793,7 @@ static inline int pci_ats_page_aligned(struct pci_dev *dev) { return 0; } #include <asm/pci.h> -/* These two functions provide almost identical functionality. Depennding +/* These two functions provide almost identical functionality. Depending * on the architecture, one will be implemented as a wrapper around the * other (in drivers/pci/mmap.c). * @@ -1812,25 +1862,9 @@ static inline const char *pci_name(const struct pci_dev *pdev) return dev_name(&pdev->dev); } - -/* - * Some archs don't want to expose struct resource to userland as-is - * in sysfs and /proc - */ -#ifdef HAVE_ARCH_PCI_RESOURCE_TO_USER void pci_resource_to_user(const struct pci_dev *dev, int bar, const struct resource *rsrc, resource_size_t *start, resource_size_t *end); -#else -static inline void pci_resource_to_user(const struct pci_dev *dev, int bar, - const struct resource *rsrc, resource_size_t *start, - resource_size_t *end) -{ - *start = rsrc->start; - *end = rsrc->end; -} -#endif /* HAVE_ARCH_PCI_RESOURCE_TO_USER */ - /* * The world is not perfect and supplies us with broken PCI devices. @@ -1972,10 +2006,6 @@ extern unsigned long pci_cardbus_mem_size; extern u8 pci_dfl_cache_line_size; extern u8 pci_cache_line_size; -extern unsigned long pci_hotplug_io_size; -extern unsigned long pci_hotplug_mem_size; -extern unsigned long pci_hotplug_bus_size; - /* Architecture-specific versions may override these (weak) */ void pcibios_disable_device(struct pci_dev *dev); void pcibios_set_master(struct pci_dev *dev); @@ -2207,7 +2237,7 @@ static inline u8 pci_vpd_srdt_tag(const u8 *srdt) /** * pci_vpd_info_field_size - Extracts the information field length - * @lrdt: Pointer to the beginning of an information field header + * @info_field: Pointer to the beginning of an information field header * * Returns the extracted information field length. */ @@ -2245,10 +2275,6 @@ int pci_vpd_find_info_keyword(const u8 *buf, unsigned int off, #ifdef CONFIG_OF struct device_node; struct irq_domain; -void pci_set_of_node(struct pci_dev *dev); -void pci_release_of_node(struct pci_dev *dev); -void pci_set_bus_of_node(struct pci_bus *bus); -void pci_release_bus_of_node(struct pci_bus *bus); struct irq_domain *pci_host_bridge_of_msi_domain(struct pci_bus *bus); int pci_parse_request_of_pci_ranges(struct device *dev, struct list_head *resources, @@ -2258,10 +2284,6 @@ int pci_parse_request_of_pci_ranges(struct device *dev, struct device_node *pcibios_get_phb_of_node(struct pci_bus *bus); #else /* CONFIG_OF */ -static inline void pci_set_of_node(struct pci_dev *dev) { } -static inline void pci_release_of_node(struct pci_dev *dev) { } -static inline void pci_set_bus_of_node(struct pci_bus *bus) { } -static inline void pci_release_bus_of_node(struct pci_bus *bus) { } static inline struct irq_domain * pci_host_bridge_of_msi_domain(struct pci_bus *bus) { return NULL; } static inline int pci_parse_request_of_pci_ranges(struct device *dev, @@ -2375,4 +2397,7 @@ void pci_uevent_ers(struct pci_dev *pdev, enum pci_ers_result err_type); #define pci_notice_ratelimited(pdev, fmt, arg...) \ dev_notice_ratelimited(&(pdev)->dev, fmt, ##arg) +#define pci_info_ratelimited(pdev, fmt, arg...) \ + dev_info_ratelimited(&(pdev)->dev, fmt, ##arg) + #endif /* LINUX_PCI_H */ diff --git a/include/linux/pci_hotplug.h b/include/linux/pci_hotplug.h index f694eb2ca978..b482e42d7153 100644 --- a/include/linux/pci_hotplug.h +++ b/include/linux/pci_hotplug.h @@ -86,114 +86,14 @@ void pci_hp_deregister(struct hotplug_slot *slot); #define pci_hp_initialize(slot, bus, nr, name) \ __pci_hp_initialize(slot, bus, nr, name, THIS_MODULE, KBUILD_MODNAME) -/* PCI Setting Record (Type 0) */ -struct hpp_type0 { - u32 revision; - u8 cache_line_size; - u8 latency_timer; - u8 enable_serr; - u8 enable_perr; -}; - -/* PCI-X Setting Record (Type 1) */ -struct hpp_type1 { - u32 revision; - u8 max_mem_read; - u8 avg_max_split; - u16 tot_max_split; -}; - -/* PCI Express Setting Record (Type 2) */ -struct hpp_type2 { - u32 revision; - u32 unc_err_mask_and; - u32 unc_err_mask_or; - u32 unc_err_sever_and; - u32 unc_err_sever_or; - u32 cor_err_mask_and; - u32 cor_err_mask_or; - u32 adv_err_cap_and; - u32 adv_err_cap_or; - u16 pci_exp_devctl_and; - u16 pci_exp_devctl_or; - u16 pci_exp_lnkctl_and; - u16 pci_exp_lnkctl_or; - u32 sec_unc_err_sever_and; - u32 sec_unc_err_sever_or; - u32 sec_unc_err_mask_and; - u32 sec_unc_err_mask_or; -}; - -/* - * _HPX PCI Express Setting Record (Type 3) - */ -struct hpx_type3 { - u16 device_type; - u16 function_type; - u16 config_space_location; - u16 pci_exp_cap_id; - u16 pci_exp_cap_ver; - u16 pci_exp_vendor_id; - u16 dvsec_id; - u16 dvsec_rev; - u16 match_offset; - u32 match_mask_and; - u32 match_value; - u16 reg_offset; - u32 reg_mask_and; - u32 reg_mask_or; -}; - -struct hotplug_program_ops { - void (*program_type0)(struct pci_dev *dev, struct hpp_type0 *hpp); - void (*program_type1)(struct pci_dev *dev, struct hpp_type1 *hpp); - void (*program_type2)(struct pci_dev *dev, struct hpp_type2 *hpp); - void (*program_type3)(struct pci_dev *dev, struct hpx_type3 *hpp); -}; - -enum hpx_type3_dev_type { - HPX_TYPE_ENDPOINT = BIT(0), - HPX_TYPE_LEG_END = BIT(1), - HPX_TYPE_RC_END = BIT(2), - HPX_TYPE_RC_EC = BIT(3), - HPX_TYPE_ROOT_PORT = BIT(4), - HPX_TYPE_UPSTREAM = BIT(5), - HPX_TYPE_DOWNSTREAM = BIT(6), - HPX_TYPE_PCI_BRIDGE = BIT(7), - HPX_TYPE_PCIE_BRIDGE = BIT(8), -}; - -enum hpx_type3_fn_type { - HPX_FN_NORMAL = BIT(0), - HPX_FN_SRIOV_PHYS = BIT(1), - HPX_FN_SRIOV_VIRT = BIT(2), -}; - -enum hpx_type3_cfg_loc { - HPX_CFG_PCICFG = 0, - HPX_CFG_PCIE_CAP = 1, - HPX_CFG_PCIE_CAP_EXT = 2, - HPX_CFG_VEND_CAP = 3, - HPX_CFG_DVSEC = 4, - HPX_CFG_MAX, -}; - #ifdef CONFIG_ACPI #include <linux/acpi.h> -int pci_acpi_program_hp_params(struct pci_dev *dev, - const struct hotplug_program_ops *hp_ops); bool pciehp_is_native(struct pci_dev *bridge); int acpi_get_hp_hw_control_from_firmware(struct pci_dev *bridge); bool shpchp_is_native(struct pci_dev *bridge); int acpi_pci_check_ejectable(struct pci_bus *pbus, acpi_handle handle); int acpi_pci_detect_ejectable(acpi_handle handle); #else -static inline int pci_acpi_program_hp_params(struct pci_dev *dev, - const struct hotplug_program_ops *hp_ops) -{ - return -ENODEV; -} - static inline int acpi_get_hp_hw_control_from_firmware(struct pci_dev *bridge) { return 0; diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index 70e86148cb1e..21a572469a4e 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h @@ -548,6 +548,7 @@ #define PCI_DEVICE_ID_AMD_17H_DF_F3 0x1463 #define PCI_DEVICE_ID_AMD_17H_M10H_DF_F3 0x15eb #define PCI_DEVICE_ID_AMD_17H_M30H_DF_F3 0x1493 +#define PCI_DEVICE_ID_AMD_17H_M70H_DF_F3 0x1443 #define PCI_DEVICE_ID_AMD_CNB17H_F3 0x1703 #define PCI_DEVICE_ID_AMD_LANCE 0x2000 #define PCI_DEVICE_ID_AMD_LANCE_HOME 0x2001 @@ -1070,7 +1071,6 @@ #define PCI_VENDOR_ID_SGI 0x10a9 #define PCI_DEVICE_ID_SGI_IOC3 0x0003 #define PCI_DEVICE_ID_SGI_LITHIUM 0x1002 -#define PCI_DEVICE_ID_SGI_IOC4 0x100a #define PCI_VENDOR_ID_WINBOND 0x10ad #define PCI_DEVICE_ID_WINBOND_82C105 0x0105 @@ -1112,7 +1112,7 @@ #define PCI_VENDOR_ID_AL 0x10b9 #define PCI_DEVICE_ID_AL_M1533 0x1533 -#define PCI_DEVICE_ID_AL_M1535 0x1535 +#define PCI_DEVICE_ID_AL_M1535 0x1535 #define PCI_DEVICE_ID_AL_M1541 0x1541 #define PCI_DEVICE_ID_AL_M1563 0x1563 #define PCI_DEVICE_ID_AL_M1621 0x1621 @@ -1336,6 +1336,7 @@ #define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP78S_SMBUS 0x0752 #define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP77_IDE 0x0759 #define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP73_SMBUS 0x07D8 +#define PCI_DEVICE_ID_NVIDIA_GEFORCE_320M 0x08A0 #define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP79_SMBUS 0x0AA2 #define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP89_SATA 0x0D85 @@ -1752,7 +1753,7 @@ #define PCI_VENDOR_ID_STALLION 0x124d /* Allied Telesyn */ -#define PCI_VENDOR_ID_AT 0x1259 +#define PCI_VENDOR_ID_AT 0x1259 #define PCI_SUBDEVICE_ID_AT_2700FX 0x2701 #define PCI_SUBDEVICE_ID_AT_2701FX 0x2703 @@ -1950,6 +1951,8 @@ #define PCI_VENDOR_ID_DIGIGRAM 0x1369 #define PCI_SUBDEVICE_ID_DIGIGRAM_LX6464ES_SERIAL_SUBSYSTEM 0xc001 #define PCI_SUBDEVICE_ID_DIGIGRAM_LX6464ES_CAE_SERIAL_SUBSYSTEM 0xc002 +#define PCI_SUBDEVICE_ID_DIGIGRAM_LX6464ESE_SERIAL_SUBSYSTEM 0xc021 +#define PCI_SUBDEVICE_ID_DIGIGRAM_LX6464ESE_CAE_SERIAL_SUBSYSTEM 0xc022 #define PCI_VENDOR_ID_KAWASAKI 0x136b #define PCI_DEVICE_ID_MCHIP_KL5A72002 0xff01 @@ -2131,6 +2134,7 @@ #define PCI_VENDOR_ID_MYRICOM 0x14c1 #define PCI_VENDOR_ID_MEDIATEK 0x14c3 +#define PCI_DEVICE_ID_MEDIATEK_7629 0x7629 #define PCI_VENDOR_ID_TITAN 0x14D2 #define PCI_DEVICE_ID_TITAN_010L 0x8001 @@ -2366,6 +2370,7 @@ #define PCI_DEVICE_ID_SYNOPSYS_HAPSUSB3 0xabcd #define PCI_DEVICE_ID_SYNOPSYS_HAPSUSB3_AXI 0xabce #define PCI_DEVICE_ID_SYNOPSYS_HAPSUSB31 0xabcf +#define PCI_DEVICE_ID_SYNOPSYS_EDDA 0xedda #define PCI_VENDOR_ID_USR 0x16ec @@ -2403,6 +2408,8 @@ #define PCI_DEVICE_ID_RDC_R6061 0x6061 #define PCI_DEVICE_ID_RDC_D1010 0x1010 +#define PCI_VENDOR_ID_GLI 0x17a0 + #define PCI_VENDOR_ID_LENOVO 0x17aa #define PCI_VENDOR_ID_QCOM 0x17cb @@ -2550,7 +2557,7 @@ #define PCI_DEVICE_ID_KORENIX_JETCARDF2 0x1700 #define PCI_DEVICE_ID_KORENIX_JETCARDF3 0x17ff -#define PCI_VENDOR_ID_HUAWEI 0x19e5 +#define PCI_VENDOR_ID_HUAWEI 0x19e5 #define PCI_VENDOR_ID_NETRONOME 0x19ee #define PCI_DEVICE_ID_NETRONOME_NFP4000 0x4000 @@ -2568,6 +2575,8 @@ #define PCI_VENDOR_ID_ASMEDIA 0x1b21 +#define PCI_VENDOR_ID_AMAZON_ANNAPURNA_LABS 0x1c36 + #define PCI_VENDOR_ID_CIRCUITCO 0x1cc8 #define PCI_SUBSYSTEM_ID_CIRCUITCO_MINNOWBOARD 0x0001 diff --git a/include/linux/percpu-refcount.h b/include/linux/percpu-refcount.h index b297cd1cd4f1..7aef0abc194a 100644 --- a/include/linux/percpu-refcount.h +++ b/include/linux/percpu-refcount.h @@ -75,14 +75,21 @@ enum { * operation using percpu_ref_switch_to_percpu(). If initialized * with this flag, the ref will stay in atomic mode until * percpu_ref_switch_to_percpu() is invoked on it. + * Implies ALLOW_REINIT. */ PERCPU_REF_INIT_ATOMIC = 1 << 0, /* * Start dead w/ ref == 0 in atomic mode. Must be revived with - * percpu_ref_reinit() before used. Implies INIT_ATOMIC. + * percpu_ref_reinit() before used. Implies INIT_ATOMIC and + * ALLOW_REINIT. */ PERCPU_REF_INIT_DEAD = 1 << 1, + + /* + * Allow switching from atomic mode to percpu mode. + */ + PERCPU_REF_ALLOW_REINIT = 1 << 2, }; struct percpu_ref { @@ -95,6 +102,7 @@ struct percpu_ref { percpu_ref_func_t *release; percpu_ref_func_t *confirm_switch; bool force_atomic:1; + bool allow_reinit:1; struct rcu_head rcu; }; diff --git a/include/linux/percpu-rwsem.h b/include/linux/percpu-rwsem.h index 03cb4b6f842e..3998cdf9cd14 100644 --- a/include/linux/percpu-rwsem.h +++ b/include/linux/percpu-rwsem.h @@ -17,14 +17,18 @@ struct percpu_rw_semaphore { int readers_block; }; -#define DEFINE_STATIC_PERCPU_RWSEM(name) \ +#define __DEFINE_PERCPU_RWSEM(name, is_static) \ static DEFINE_PER_CPU(unsigned int, __percpu_rwsem_rc_##name); \ -static struct percpu_rw_semaphore name = { \ - .rss = __RCU_SYNC_INITIALIZER(name.rss, RCU_SCHED_SYNC), \ +is_static struct percpu_rw_semaphore name = { \ + .rss = __RCU_SYNC_INITIALIZER(name.rss), \ .read_count = &__percpu_rwsem_rc_##name, \ .rw_sem = __RWSEM_INITIALIZER(name.rw_sem), \ .writer = __RCUWAIT_INITIALIZER(name.writer), \ } +#define DEFINE_PERCPU_RWSEM(name) \ + __DEFINE_PERCPU_RWSEM(name, /* not static */) +#define DEFINE_STATIC_PERCPU_RWSEM(name) \ + __DEFINE_PERCPU_RWSEM(name, static) extern int __percpu_down_read(struct percpu_rw_semaphore *, int); extern void __percpu_up_read(struct percpu_rw_semaphore *); @@ -117,7 +121,7 @@ static inline void percpu_rwsem_release(struct percpu_rw_semaphore *sem, lock_release(&sem->rw_sem.dep_map, 1, ip); #ifdef CONFIG_RWSEM_SPIN_ON_OWNER if (!read) - sem->rw_sem.owner = RWSEM_OWNER_UNKNOWN; + atomic_long_set(&sem->rw_sem.owner, RWSEM_OWNER_UNKNOWN); #endif } @@ -127,7 +131,7 @@ static inline void percpu_rwsem_acquire(struct percpu_rw_semaphore *sem, lock_acquire(&sem->rw_sem.dep_map, 0, 1, read, 1, NULL, ip); #ifdef CONFIG_RWSEM_SPIN_ON_OWNER if (!read) - sem->rw_sem.owner = current; + atomic_long_set(&sem->rw_sem.owner, (long)current); #endif } diff --git a/include/linux/percpu.h b/include/linux/percpu.h index 9909dc0e273a..5e76af742c80 100644 --- a/include/linux/percpu.h +++ b/include/linux/percpu.h @@ -105,7 +105,7 @@ extern struct pcpu_alloc_info * __init pcpu_alloc_alloc_info(int nr_groups, int nr_units); extern void __init pcpu_free_alloc_info(struct pcpu_alloc_info *ai); -extern int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai, +extern void __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai, void *base_addr); #ifdef CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK diff --git a/include/linux/perf/arm_pmu.h b/include/linux/perf/arm_pmu.h index a9b0ee408fbd..71f525a35ac2 100644 --- a/include/linux/perf/arm_pmu.h +++ b/include/linux/perf/arm_pmu.h @@ -171,4 +171,6 @@ void armpmu_free_irq(int irq, int cpu); #endif /* CONFIG_ARM_PMU */ +#define ARMV8_SPE_PDEV_NAME "arm,spe-v1" + #endif /* __ARM_PMU_H__ */ diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 2bca72f3028b..68ccc5b1913b 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -246,6 +246,7 @@ struct perf_event; #define PERF_PMU_CAP_ITRACE 0x20 #define PERF_PMU_CAP_HETEROGENEOUS_CPUS 0x40 #define PERF_PMU_CAP_NO_EXCLUDE 0x80 +#define PERF_PMU_CAP_AUX_OUTPUT 0x100 /** * struct pmu - generic performance monitoring unit @@ -256,6 +257,7 @@ struct pmu { struct module *module; struct device *dev; const struct attribute_group **attr_groups; + const struct attribute_group **attr_update; const char *name; int type; @@ -290,7 +292,7 @@ struct pmu { * -EBUSY -- @event is for this PMU but PMU temporarily unavailable * -EINVAL -- @event is for this PMU but @event is not valid * -EOPNOTSUPP -- @event is for this PMU, @event is valid, but not supported - * -EACCESS -- @event is for this PMU, @event is valid, but no privilidges + * -EACCES -- @event is for this PMU, @event is valid, but no privileges * * 0 -- @event is for this PMU and valid * @@ -446,6 +448,16 @@ struct pmu { /* optional */ /* + * Check if event can be used for aux_output purposes for + * events of this PMU. + * + * Runs from perf_event_open(). Should return 0 for "no match" + * or non-zero for "match". + */ + int (*aux_output_match) (struct perf_event *event); + /* optional */ + + /* * Filter events for PMU-specific reasons. */ int (*filter_match) (struct perf_event *event); /* optional */ @@ -680,6 +692,9 @@ struct perf_event { struct perf_addr_filter_range *addr_filter_ranges; unsigned long addr_filters_gen; + /* for aux_output events */ + struct perf_event *aux_event; + void (*destroy)(struct perf_event *); struct rcu_head rcu_head; @@ -749,6 +764,11 @@ struct perf_event_context { int nr_stat; int nr_freq; int rotate_disable; + /* + * Set when nr_events != nr_active, except tolerant to events not + * necessary to be active due to scheduling constraints, such as cgroups. + */ + int rotate_necessary; refcount_t refcount; struct task_struct *task; @@ -1049,6 +1069,11 @@ static inline int in_software_context(struct perf_event *event) return event->ctx->pmu->task_ctx_nr == perf_sw_context; } +static inline int is_exclusive_pmu(struct pmu *pmu) +{ + return pmu->capabilities & PERF_PMU_CAP_EXCLUSIVE; +} + extern struct static_key perf_swevent_enabled[PERF_COUNT_SW_MAX]; extern void ___perf_sw_event(u32, u64, struct pt_regs *, u64); diff --git a/include/linux/pfn_t.h b/include/linux/pfn_t.h index 3c202a11a79e..2d9148221e9a 100644 --- a/include/linux/pfn_t.h +++ b/include/linux/pfn_t.h @@ -66,13 +66,6 @@ static inline phys_addr_t pfn_t_to_phys(pfn_t pfn) return PFN_PHYS(pfn_t_to_pfn(pfn)); } -static inline void *pfn_t_to_virt(pfn_t pfn) -{ - if (pfn_t_has_page(pfn) && !is_device_private_page(pfn_t_to_page(pfn))) - return __va(pfn_t_to_phys(pfn)); - return NULL; -} - static inline pfn_t page_to_pfn_t(struct page *page) { return pfn_to_pfn_t(page_to_pfn(page)); @@ -104,7 +97,7 @@ static inline pud_t pfn_t_pud(pfn_t pfn, pgprot_t pgprot) #endif #endif -#ifdef __HAVE_ARCH_PTE_DEVMAP +#ifdef CONFIG_ARCH_HAS_PTE_DEVMAP static inline bool pfn_t_devmap(pfn_t pfn) { const u64 flags = PFN_DEV|PFN_MAP; @@ -122,7 +115,7 @@ pmd_t pmd_mkdevmap(pmd_t pmd); defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD) pud_t pud_mkdevmap(pud_t pud); #endif -#endif /* __HAVE_ARCH_PTE_DEVMAP */ +#endif /* CONFIG_ARCH_HAS_PTE_DEVMAP */ #ifdef CONFIG_ARCH_HAS_PTE_SPECIAL static inline bool pfn_t_special(pfn_t pfn) diff --git a/include/linux/phy.h b/include/linux/phy.h index 6424586fe2d6..9a0e981df502 100644 --- a/include/linux/phy.h +++ b/include/linux/phy.h @@ -55,6 +55,9 @@ extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_10gbit_full_features) __ro_after_ini #define PHY_10GBIT_FEC_FEATURES ((unsigned long *)&phy_10gbit_fec_features) #define PHY_10GBIT_FULL_FEATURES ((unsigned long *)&phy_10gbit_full_features) +extern const int phy_basic_ports_array[3]; +extern const int phy_fibre_port_array[1]; +extern const int phy_all_ports_features_array[7]; extern const int phy_10_100_features_array[4]; extern const int phy_basic_t1_features_array[2]; extern const int phy_gbit_features_array[2]; @@ -98,6 +101,7 @@ typedef enum { PHY_INTERFACE_MODE_XAUI, /* 10GBASE-KR, XFI, SFI - single lane 10G Serdes */ PHY_INTERFACE_MODE_10GKR, + PHY_INTERFACE_MODE_USXGMII, PHY_INTERFACE_MODE_MAX, } phy_interface_t; @@ -173,6 +177,8 @@ static inline const char *phy_modes(phy_interface_t interface) return "xaui"; case PHY_INTERFACE_MODE_10GKR: return "10gbase-kr"; + case PHY_INTERFACE_MODE_USXGMII: + return "usxgmii"; default: return "unknown"; } @@ -180,7 +186,6 @@ static inline const char *phy_modes(phy_interface_t interface) #define PHY_INIT_TIMEOUT 100000 -#define PHY_STATE_TIME 1 #define PHY_FORCE_TIMEOUT 10 #define PHY_MAX_ADDR 32 @@ -193,6 +198,8 @@ static inline const char *phy_modes(phy_interface_t interface) /* Or MII_ADDR_C45 into regnum for read/write on mii_bus to enable the 21 bit IEEE 802.3ae clause 45 addressing mode used by 10GIGE phy chips. */ #define MII_ADDR_C45 (1<<30) +#define MII_DEVADDR_C45_SHIFT 16 +#define MII_REGADDR_C45_MASK GENMASK(15, 0) struct device; struct phylink; @@ -290,12 +297,6 @@ struct phy_device *mdiobus_scan(struct mii_bus *bus, int addr); * - irq or timer will set RUNNING if link comes back * - phy_stop moves to HALTED * - * FORCING: PHY is being configured with forced settings - * - if link is up, move to RUNNING - * - If link is down, we drop to the next highest setting, and - * retry (FORCING) after a timeout - * - phy_stop moves to HALTED - * * RUNNING: PHY is currently up, running, and possibly sending * and/or receiving packets * - irq or timer will set NOLINK if link goes down @@ -312,7 +313,6 @@ enum phy_state { PHY_UP, PHY_RUNNING, PHY_NOLINK, - PHY_FORCING, }; /** @@ -340,8 +340,6 @@ struct phy_c45_device_ids { * loopback_enabled: Set true if this phy has been loopbacked successfully. * state: state of the PHY for management purposes * dev_flags: Device-specific flags used by the PHY driver. - * link_timeout: The number of timer firings to wait before the - * giving up on the current attempt at acquiring a link * irq: IRQ number of the PHY's interrupt (-1 if none) * phy_timer: The timer for handling the state machine * attached_dev: The attached enet driver's device instance ptr @@ -405,12 +403,12 @@ struct phy_device { __ETHTOOL_DECLARE_LINK_MODE_MASK(supported); __ETHTOOL_DECLARE_LINK_MODE_MASK(advertising); __ETHTOOL_DECLARE_LINK_MODE_MASK(lp_advertising); + /* used with phy_speed_down */ + __ETHTOOL_DECLARE_LINK_MODE_MASK(adv_old); /* Energy efficient ethernet modes which should be prohibited */ u32 eee_broken_modes; - int link_timeout; - #ifdef CONFIG_LED_TRIGGER_PHY struct phy_led_trigger *phy_led_triggers; unsigned int phy_num_led_triggers; @@ -529,6 +527,9 @@ struct phy_driver { */ int (*did_interrupt)(struct phy_device *phydev); + /* Override default interrupt handling */ + int (*handle_interrupt)(struct phy_device *phydev); + /* Clears up any memory if needed */ void (*remove)(struct phy_device *phydev); @@ -666,6 +667,7 @@ size_t phy_speeds(unsigned int *speeds, size_t size, unsigned long *mask); void of_set_phy_supported(struct phy_device *phydev); void of_set_phy_eee_broken(struct phy_device *phydev); +int phy_speed_down_core(struct phy_device *phydev); /** * phy_is_started - Convenience function to check whether PHY is started @@ -676,6 +678,7 @@ static inline bool phy_is_started(struct phy_device *phydev) return phydev->state >= PHY_UP; } +void phy_resolve_aneg_pause(struct phy_device *phydev); void phy_resolve_aneg_linkmode(struct phy_device *phydev); /** @@ -985,6 +988,8 @@ int phy_select_page(struct phy_device *phydev, int page); int phy_restore_page(struct phy_device *phydev, int oldpage, int ret); int phy_read_paged(struct phy_device *phydev, int page, u32 regnum); int phy_write_paged(struct phy_device *phydev, int page, u32 regnum, u16 val); +int phy_modify_paged_changed(struct phy_device *phydev, int page, u32 regnum, + u16 mask, u16 set); int phy_modify_paged(struct phy_device *phydev, int page, u32 regnum, u16 mask, u16 set); @@ -1065,19 +1070,25 @@ void phy_attached_print(struct phy_device *phydev, const char *fmt, ...) void phy_attached_info(struct phy_device *phydev); /* Clause 22 PHY */ -int genphy_config_init(struct phy_device *phydev); int genphy_read_abilities(struct phy_device *phydev); int genphy_setup_forced(struct phy_device *phydev); int genphy_restart_aneg(struct phy_device *phydev); int genphy_config_eee_advert(struct phy_device *phydev); -int genphy_config_aneg(struct phy_device *phydev); +int __genphy_config_aneg(struct phy_device *phydev, bool changed); int genphy_aneg_done(struct phy_device *phydev); int genphy_update_link(struct phy_device *phydev); +int genphy_read_lpa(struct phy_device *phydev); int genphy_read_status(struct phy_device *phydev); int genphy_suspend(struct phy_device *phydev); int genphy_resume(struct phy_device *phydev); int genphy_loopback(struct phy_device *phydev, bool enable); int genphy_soft_reset(struct phy_device *phydev); + +static inline int genphy_config_aneg(struct phy_device *phydev) +{ + return __genphy_config_aneg(phydev, false); +} + static inline int genphy_no_soft_reset(struct phy_device *phydev) { return 0; @@ -1108,6 +1119,7 @@ int genphy_c45_an_disable_aneg(struct phy_device *phydev); int genphy_c45_read_mdix(struct phy_device *phydev); int genphy_c45_pma_read_abilities(struct phy_device *phydev); int genphy_c45_read_status(struct phy_device *phydev); +int genphy_c45_config_aneg(struct phy_device *phydev); /* The gen10g_* functions are the old Clause 45 stub */ int gen10g_config_aneg(struct phy_device *phydev); @@ -1129,6 +1141,7 @@ int phy_driver_register(struct phy_driver *new_driver, struct module *owner); int phy_drivers_register(struct phy_driver *new_driver, int n, struct module *owner); void phy_state_machine(struct work_struct *work); +void phy_queue_state_machine(struct phy_device *phydev, unsigned long jiffies); void phy_mac_interrupt(struct phy_device *phydev); void phy_start_machine(struct phy_device *phydev); void phy_stop_machine(struct phy_device *phydev); @@ -1139,6 +1152,7 @@ int phy_ethtool_ksettings_set(struct phy_device *phydev, const struct ethtool_link_ksettings *cmd); int phy_mii_ioctl(struct phy_device *phydev, struct ifreq *ifr, int cmd); void phy_request_interrupt(struct phy_device *phydev); +void phy_free_interrupt(struct phy_device *phydev); void phy_print_status(struct phy_device *phydev); int phy_set_max_speed(struct phy_device *phydev, u32 max_speed); void phy_remove_link_mode(struct phy_device *phydev, u32 link_mode); diff --git a/include/linux/phy_fixed.h b/include/linux/phy_fixed.h index 1e5d86ebdaeb..52bc8e487ef7 100644 --- a/include/linux/phy_fixed.h +++ b/include/linux/phy_fixed.h @@ -11,6 +11,7 @@ struct fixed_phy_status { }; struct device_node; +struct gpio_desc; #if IS_ENABLED(CONFIG_FIXED_PHY) extern int fixed_phy_change_carrier(struct net_device *dev, bool new_carrier); diff --git a/include/linux/phylink.h b/include/linux/phylink.h index 2d2e55dfea94..300ecdb6790a 100644 --- a/include/linux/phylink.h +++ b/include/linux/phylink.h @@ -54,6 +54,21 @@ struct phylink_link_state { unsigned int an_complete:1; }; +enum phylink_op_type { + PHYLINK_NETDEV = 0, + PHYLINK_DEV, +}; + +/** + * struct phylink_config - PHYLINK configuration structure + * @dev: a pointer to a struct device associated with the MAC + * @type: operation type of PHYLINK instance + */ +struct phylink_config { + struct device *dev; + enum phylink_op_type type; +}; + /** * struct phylink_mac_ops - MAC operations structure. * @validate: Validate and update the link configuration. @@ -66,16 +81,17 @@ struct phylink_link_state { * The individual methods are described more fully below. */ struct phylink_mac_ops { - void (*validate)(struct net_device *ndev, unsigned long *supported, + void (*validate)(struct phylink_config *config, + unsigned long *supported, struct phylink_link_state *state); - int (*mac_link_state)(struct net_device *ndev, + int (*mac_link_state)(struct phylink_config *config, struct phylink_link_state *state); - void (*mac_config)(struct net_device *ndev, unsigned int mode, + void (*mac_config)(struct phylink_config *config, unsigned int mode, const struct phylink_link_state *state); - void (*mac_an_restart)(struct net_device *ndev); - void (*mac_link_down)(struct net_device *ndev, unsigned int mode, + void (*mac_an_restart)(struct phylink_config *config); + void (*mac_link_down)(struct phylink_config *config, unsigned int mode, phy_interface_t interface); - void (*mac_link_up)(struct net_device *ndev, unsigned int mode, + void (*mac_link_up)(struct phylink_config *config, unsigned int mode, phy_interface_t interface, struct phy_device *phy); }; @@ -83,7 +99,7 @@ struct phylink_mac_ops { #if 0 /* For kernel-doc purposes only. */ /** * validate - Validate and update the link configuration - * @ndev: a pointer to a &struct net_device for the MAC. + * @config: a pointer to a &struct phylink_config. * @supported: ethtool bitmask for supported link modes. * @state: a pointer to a &struct phylink_link_state. * @@ -93,19 +109,26 @@ struct phylink_mac_ops { * Note that the PHY may be able to transform from one connection * technology to another, so, eg, don't clear 1000BaseX just * because the MAC is unable to BaseX mode. This is more about - * clearing unsupported speeds and duplex settings. + * clearing unsupported speeds and duplex settings. The port modes + * should not be cleared; phylink_set_port_modes() will help with this. * * If the @state->interface mode is %PHY_INTERFACE_MODE_1000BASEX * or %PHY_INTERFACE_MODE_2500BASEX, select the appropriate mode * based on @state->advertising and/or @state->speed and update - * @state->interface accordingly. + * @state->interface accordingly. See phylink_helper_basex_speed(). + * + * When @state->interface is %PHY_INTERFACE_MODE_NA, phylink expects the + * MAC driver to return all supported link modes. + * + * If the @state->interface mode is not supported, then the @supported + * mask must be cleared. */ -void validate(struct net_device *ndev, unsigned long *supported, +void validate(struct phylink_config *config, unsigned long *supported, struct phylink_link_state *state); /** * mac_link_state() - Read the current link state from the hardware - * @ndev: a pointer to a &struct net_device for the MAC. + * @config: a pointer to a &struct phylink_config. * @state: a pointer to a &struct phylink_link_state. * * Read the current link state from the MAC, reporting the current @@ -114,12 +137,12 @@ void validate(struct net_device *ndev, unsigned long *supported, * negotiation completion state in @state->an_complete, and link * up state in @state->link. */ -int mac_link_state(struct net_device *ndev, +int mac_link_state(struct phylink_config *config, struct phylink_link_state *state); /** * mac_config() - configure the MAC for the selected mode and state - * @ndev: a pointer to a &struct net_device for the MAC. + * @config: a pointer to a &struct phylink_config. * @mode: one of %MLO_AN_FIXED, %MLO_AN_PHY, %MLO_AN_INBAND. * @state: a pointer to a &struct phylink_link_state. * @@ -168,18 +191,18 @@ int mac_link_state(struct net_device *ndev, * down. This "update" behaviour is critical to avoid bouncing the * link up status. */ -void mac_config(struct net_device *ndev, unsigned int mode, +void mac_config(struct phylink_config *config, unsigned int mode, const struct phylink_link_state *state); /** * mac_an_restart() - restart 802.3z BaseX autonegotiation - * @ndev: a pointer to a &struct net_device for the MAC. + * @config: a pointer to a &struct phylink_config. */ -void mac_an_restart(struct net_device *ndev); +void mac_an_restart(struct phylink_config *config); /** * mac_link_down() - take the link down - * @ndev: a pointer to a &struct net_device for the MAC. + * @config: a pointer to a &struct phylink_config. * @mode: link autonegotiation mode * @interface: link &typedef phy_interface_t mode * @@ -188,12 +211,12 @@ void mac_an_restart(struct net_device *ndev); * Energy Efficient Ethernet MAC configuration. Interface type * selection must be done in mac_config(). */ -void mac_link_down(struct net_device *ndev, unsigned int mode, +void mac_link_down(struct phylink_config *config, unsigned int mode, phy_interface_t interface); /** * mac_link_up() - allow the link to come up - * @ndev: a pointer to a &struct net_device for the MAC. + * @config: a pointer to a &struct phylink_config. * @mode: link autonegotiation mode * @interface: link &typedef phy_interface_t mode * @phy: any attached phy @@ -204,13 +227,14 @@ void mac_link_down(struct net_device *ndev, unsigned int mode, * phy_init_eee() and perform appropriate MAC configuration for EEE. * Interface type selection must be done in mac_config(). */ -void mac_link_up(struct net_device *ndev, unsigned int mode, +void mac_link_up(struct phylink_config *config, unsigned int mode, phy_interface_t interface, struct phy_device *phy); #endif -struct phylink *phylink_create(struct net_device *, struct fwnode_handle *, - phy_interface_t iface, const struct phylink_mac_ops *ops); +struct phylink *phylink_create(struct phylink_config *, struct fwnode_handle *, + phy_interface_t iface, + const struct phylink_mac_ops *ops); void phylink_destroy(struct phylink *); int phylink_connect_phy(struct phylink *, struct phy_device *); diff --git a/include/linux/pid.h b/include/linux/pid.h index 3c8ef5a199ca..9645b1194c98 100644 --- a/include/linux/pid.h +++ b/include/linux/pid.h @@ -3,6 +3,8 @@ #define _LINUX_PID_H #include <linux/rculist.h> +#include <linux/wait.h> +#include <linux/refcount.h> enum pid_type { @@ -56,10 +58,12 @@ struct upid { struct pid { - atomic_t count; + refcount_t count; unsigned int level; /* lists of tasks that use this pid */ struct hlist_head tasks[PIDTYPE_MAX]; + /* wait queue for pidfd notifications */ + wait_queue_head_t wait_pidfd; struct rcu_head rcu; struct upid numbers[1]; }; @@ -68,10 +72,14 @@ extern struct pid init_struct_pid; extern const struct file_operations pidfd_fops; +struct file; + +extern struct pid *pidfd_pid(const struct file *file); + static inline struct pid *get_pid(struct pid *pid) { if (pid) - atomic_inc(&pid->count); + refcount_inc(&pid->count); return pid; } diff --git a/include/linux/pinctrl/consumer.h b/include/linux/pinctrl/consumer.h index 86720a5a384f..7f8c7d9583d3 100644 --- a/include/linux/pinctrl/consumer.h +++ b/include/linux/pinctrl/consumer.h @@ -24,6 +24,7 @@ struct device; #ifdef CONFIG_PINCTRL /* External interface to pin control */ +extern bool pinctrl_gpio_can_use_line(unsigned gpio); extern int pinctrl_gpio_request(unsigned gpio); extern void pinctrl_gpio_free(unsigned gpio); extern int pinctrl_gpio_direction_input(unsigned gpio); @@ -61,6 +62,11 @@ static inline int pinctrl_pm_select_idle_state(struct device *dev) #else /* !CONFIG_PINCTRL */ +static inline bool pinctrl_gpio_can_use_line(unsigned gpio) +{ + return true; +} + static inline int pinctrl_gpio_request(unsigned gpio) { return 0; diff --git a/include/linux/pinctrl/pinconf-generic.h b/include/linux/pinctrl/pinconf-generic.h index 6f260c1d3467..6aeb711f7cd1 100644 --- a/include/linux/pinctrl/pinconf-generic.h +++ b/include/linux/pinctrl/pinconf-generic.h @@ -11,6 +11,12 @@ #ifndef __LINUX_PINCTRL_PINCONF_GENERIC_H #define __LINUX_PINCTRL_PINCONF_GENERIC_H +#include <linux/device.h> +#include <linux/pinctrl/machine.h> + +struct pinctrl_dev; +struct pinctrl_map; + /** * enum pin_config_param - possible pin configuration parameters * @PIN_CONFIG_BIAS_BUS_HOLD: the pin will be set to weakly latch so that it @@ -54,6 +60,8 @@ * push-pull mode, the argument is ignored. * @PIN_CONFIG_DRIVE_STRENGTH: the pin will sink or source at most the current * passed as argument. The argument is in mA. + * @PIN_CONFIG_DRIVE_STRENGTH_UA: the pin will sink or source at most the current + * passed as argument. The argument is in uA. * @PIN_CONFIG_INPUT_DEBOUNCE: this will configure the pin to debounce mode, * which means it will wait for signals to settle when reading inputs. The * argument gives the debounce time in usecs. Setting the @@ -111,6 +119,7 @@ enum pin_config_param { PIN_CONFIG_DRIVE_OPEN_SOURCE, PIN_CONFIG_DRIVE_PUSH_PULL, PIN_CONFIG_DRIVE_STRENGTH, + PIN_CONFIG_DRIVE_STRENGTH_UA, PIN_CONFIG_INPUT_DEBOUNCE, PIN_CONFIG_INPUT_ENABLE, PIN_CONFIG_INPUT_SCHMITT, @@ -155,9 +164,6 @@ static inline unsigned long pinconf_to_config_packed(enum pin_config_param param return PIN_CONF_PACKED(param, argument); } -#ifdef CONFIG_GENERIC_PINCONF - -#ifdef CONFIG_DEBUG_FS #define PCONFDUMP(a, b, c, d) { \ .param = a, .display = b, .format = c, .has_arg = d \ } @@ -168,14 +174,6 @@ struct pin_config_item { const char * const format; bool has_arg; }; -#endif /* CONFIG_DEBUG_FS */ - -#ifdef CONFIG_OF - -#include <linux/device.h> -#include <linux/pinctrl/machine.h> -struct pinctrl_dev; -struct pinctrl_map; struct pinconf_generic_params { const char * const property; @@ -220,8 +218,5 @@ static inline int pinconf_generic_dt_node_to_map_all( return pinconf_generic_dt_node_to_map(pctldev, np_config, map, num_maps, PIN_MAP_TYPE_INVALID); } -#endif - -#endif /* CONFIG_GENERIC_PINCONF */ #endif /* __LINUX_PINCTRL_PINCONF_GENERIC_H */ diff --git a/include/linux/pinctrl/pinconf.h b/include/linux/pinctrl/pinconf.h index 514414a5ad01..f8a8215e9021 100644 --- a/include/linux/pinctrl/pinconf.h +++ b/include/linux/pinctrl/pinconf.h @@ -11,7 +11,7 @@ #ifndef __LINUX_PINCTRL_PINCONF_H #define __LINUX_PINCTRL_PINCONF_H -#ifdef CONFIG_PINCONF +#include <linux/types.h> struct pinctrl_dev; struct seq_file; @@ -64,6 +64,4 @@ struct pinconf_ops { unsigned long config); }; -#endif - #endif /* __LINUX_PINCTRL_PINCONF_H */ diff --git a/include/linux/pinctrl/pinctrl-state.h b/include/linux/pinctrl/pinctrl-state.h index a0e785815a64..635d97e9285e 100644 --- a/include/linux/pinctrl/pinctrl-state.h +++ b/include/linux/pinctrl/pinctrl-state.h @@ -3,6 +3,9 @@ * Standard pin control state definitions */ +#ifndef __LINUX_PINCTRL_PINCTRL_STATE_H +#define __LINUX_PINCTRL_PINCTRL_STATE_H + /** * @PINCTRL_STATE_DEFAULT: the state the pinctrl handle shall be put * into as default, usually this means the pins are up and ready to @@ -31,3 +34,5 @@ #define PINCTRL_STATE_INIT "init" #define PINCTRL_STATE_IDLE "idle" #define PINCTRL_STATE_SLEEP "sleep" + +#endif /* __LINUX_PINCTRL_PINCTRL_STATE_H */ diff --git a/include/linux/pinctrl/pinctrl.h b/include/linux/pinctrl/pinctrl.h index e429e5d92dd6..7ce23450a1cb 100644 --- a/include/linux/pinctrl/pinctrl.h +++ b/include/linux/pinctrl/pinctrl.h @@ -11,8 +11,6 @@ #ifndef __LINUX_PINCTRL_PINCTRL_H #define __LINUX_PINCTRL_PINCTRL_H -#ifdef CONFIG_PINCTRL - #include <linux/radix-tree.h> #include <linux/list.h> #include <linux/seq_file.h> @@ -124,6 +122,10 @@ struct pinctrl_ops { * the hardware description * @custom_conf_items: Information how to print @params in debugfs, must be * the same size as the @custom_params, i.e. @num_custom_params + * @link_consumers: If true create a device link between pinctrl and its + * consumers (i.e. the devices requesting pin control states). This is + * sometimes necessary to ascertain the right suspend/resume order for + * example. */ struct pinctrl_desc { const char *name; @@ -138,6 +140,7 @@ struct pinctrl_desc { const struct pinconf_generic_params *custom_params; const struct pin_config_item *custom_conf_items; #endif + bool link_consumers; }; /* External interface to pin controller */ @@ -166,7 +169,6 @@ extern struct pinctrl_dev *devm_pinctrl_register(struct device *dev, extern void devm_pinctrl_unregister(struct device *dev, struct pinctrl_dev *pctldev); -extern bool pin_is_valid(struct pinctrl_dev *pctldev, int pin); extern void pinctrl_add_gpio_range(struct pinctrl_dev *pctldev, struct pinctrl_gpio_range *range); extern void pinctrl_add_gpio_ranges(struct pinctrl_dev *pctldev, @@ -197,16 +199,5 @@ struct pinctrl_dev *of_pinctrl_get(struct device_node *np) extern const char *pinctrl_dev_get_name(struct pinctrl_dev *pctldev); extern const char *pinctrl_dev_get_devname(struct pinctrl_dev *pctldev); extern void *pinctrl_dev_get_drvdata(struct pinctrl_dev *pctldev); -#else - -struct pinctrl_dev; - -/* Sufficiently stupid default functions when pinctrl is not in use */ -static inline bool pin_is_valid(struct pinctrl_dev *pctldev, int pin) -{ - return pin >= 0; -} - -#endif /* !CONFIG_PINCTRL */ #endif /* __LINUX_PINCTRL_PINCTRL_H */ diff --git a/include/linux/pinctrl/pinmux.h b/include/linux/pinctrl/pinmux.h index e873ed97d79e..9a647fa5c8f1 100644 --- a/include/linux/pinctrl/pinmux.h +++ b/include/linux/pinctrl/pinmux.h @@ -15,8 +15,6 @@ #include <linux/seq_file.h> #include <linux/pinctrl/pinctrl.h> -#ifdef CONFIG_PINMUX - struct pinctrl_dev; /** @@ -84,6 +82,4 @@ struct pinmux_ops { bool strict; }; -#endif /* CONFIG_PINMUX */ - #endif /* __LINUX_PINCTRL_PINMUX_H */ diff --git a/include/linux/platform_data/cros_ec_chardev.h b/include/linux/platform_data/cros_ec_chardev.h new file mode 100644 index 000000000000..7de8faaf77df --- /dev/null +++ b/include/linux/platform_data/cros_ec_chardev.h @@ -0,0 +1,38 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * ChromeOS EC device interface. + * + * Copyright (C) 2014 Google, Inc. + */ + +#ifndef _UAPI_LINUX_CROS_EC_DEV_H_ +#define _UAPI_LINUX_CROS_EC_DEV_H_ + +#include <linux/bits.h> +#include <linux/ioctl.h> +#include <linux/types.h> + +#include <linux/platform_data/cros_ec_commands.h> + +#define CROS_EC_DEV_VERSION "1.0.0" + +/** + * struct cros_ec_readmem - Struct used to read mapped memory. + * @offset: Within EC_LPC_ADDR_MEMMAP region. + * @bytes: Number of bytes to read. Zero means "read a string" (including '\0') + * At most only EC_MEMMAP_SIZE bytes can be read. + * @buffer: Where to store the result. The ioctl returns the number of bytes + * read or negative on error. + */ +struct cros_ec_readmem { + uint32_t offset; + uint32_t bytes; + uint8_t buffer[EC_MEMMAP_SIZE]; +}; + +#define CROS_EC_DEV_IOC 0xEC +#define CROS_EC_DEV_IOCXCMD _IOWR(CROS_EC_DEV_IOC, 0, struct cros_ec_command) +#define CROS_EC_DEV_IOCRDMEM _IOWR(CROS_EC_DEV_IOC, 1, struct cros_ec_readmem) +#define CROS_EC_DEV_IOCEVENTMASK _IO(CROS_EC_DEV_IOC, 2) + +#endif /* _CROS_EC_DEV_H_ */ diff --git a/include/linux/mfd/cros_ec_commands.h b/include/linux/platform_data/cros_ec_commands.h index 114614e20e4d..98415686cbfa 100644 --- a/include/linux/mfd/cros_ec_commands.h +++ b/include/linux/platform_data/cros_ec_commands.h @@ -4,17 +4,20 @@ * * Copyright (C) 2012 Google, Inc * - * The ChromeOS EC multi function device is used to mux all the requests - * to the EC device for its multiple features: keyboard controller, - * battery charging and regulator control, firmware update. - * - * NOTE: This file is copied verbatim from the ChromeOS EC Open Source - * project in an attempt to make future updates easy to make. + * NOTE: This file is auto-generated from ChromeOS EC Open Source code from + * https://chromium.googlesource.com/chromiumos/platform/ec/+/master/include/ec_commands.h */ +/* Host communication command constants for Chrome EC */ + #ifndef __CROS_EC_COMMANDS_H #define __CROS_EC_COMMANDS_H + + + +#define BUILD_ASSERT(_cond) + /* * Current version of this protocol * @@ -25,7 +28,7 @@ #define EC_PROTO_VERSION 0x00000002 /* Command version mask */ -#define EC_VER_MASK(version) (1UL << (version)) +#define EC_VER_MASK(version) BIT(version) /* I/O addresses for ACPI commands */ #define EC_LPC_ADDR_ACPI_DATA 0x62 @@ -39,25 +42,28 @@ /* Protocol version 2 */ #define EC_LPC_ADDR_HOST_ARGS 0x800 /* And 0x801, 0x802, 0x803 */ #define EC_LPC_ADDR_HOST_PARAM 0x804 /* For version 2 params; size is - * EC_PROTO2_MAX_PARAM_SIZE */ + * EC_PROTO2_MAX_PARAM_SIZE + */ /* Protocol version 3 */ #define EC_LPC_ADDR_HOST_PACKET 0x800 /* Offset of version 3 packet */ #define EC_LPC_HOST_PACKET_SIZE 0x100 /* Max size of version 3 packet */ -/* The actual block is 0x800-0x8ff, but some BIOSes think it's 0x880-0x8ff - * and they tell the kernel that so we have to think of it as two parts. */ +/* + * The actual block is 0x800-0x8ff, but some BIOSes think it's 0x880-0x8ff + * and they tell the kernel that so we have to think of it as two parts. + */ #define EC_HOST_CMD_REGION0 0x800 #define EC_HOST_CMD_REGION1 0x880 #define EC_HOST_CMD_REGION_SIZE 0x80 /* EC command register bit functions */ -#define EC_LPC_CMDR_DATA (1 << 0) /* Data ready for host to read */ -#define EC_LPC_CMDR_PENDING (1 << 1) /* Write pending to EC */ -#define EC_LPC_CMDR_BUSY (1 << 2) /* EC is busy processing a command */ -#define EC_LPC_CMDR_CMD (1 << 3) /* Last host write was a command */ -#define EC_LPC_CMDR_ACPI_BRST (1 << 4) /* Burst mode (not used) */ -#define EC_LPC_CMDR_SCI (1 << 5) /* SCI event is pending */ -#define EC_LPC_CMDR_SMI (1 << 6) /* SMI event is pending */ +#define EC_LPC_CMDR_DATA BIT(0) /* Data ready for host to read */ +#define EC_LPC_CMDR_PENDING BIT(1) /* Write pending to EC */ +#define EC_LPC_CMDR_BUSY BIT(2) /* EC is busy processing a command */ +#define EC_LPC_CMDR_CMD BIT(3) /* Last host write was a command */ +#define EC_LPC_CMDR_ACPI_BRST BIT(4) /* Burst mode (not used) */ +#define EC_LPC_CMDR_SCI BIT(5) /* SCI event is pending */ +#define EC_LPC_CMDR_SMI BIT(6) /* SMI event is pending */ #define EC_LPC_ADDR_MEMMAP 0x900 #define EC_MEMMAP_SIZE 255 /* ACPI IO buffer max is 255 bytes */ @@ -77,13 +83,15 @@ /* Unused 0x28 - 0x2f */ #define EC_MEMMAP_SWITCHES 0x30 /* 8 bits */ /* Unused 0x31 - 0x33 */ -#define EC_MEMMAP_HOST_EVENTS 0x34 /* 32 bits */ -/* Reserve 0x38 - 0x3f for additional host event-related stuff */ -/* Battery values are all 32 bits */ +#define EC_MEMMAP_HOST_EVENTS 0x34 /* 64 bits */ +/* Battery values are all 32 bits, unless otherwise noted. */ #define EC_MEMMAP_BATT_VOLT 0x40 /* Battery Present Voltage */ #define EC_MEMMAP_BATT_RATE 0x44 /* Battery Present Rate */ #define EC_MEMMAP_BATT_CAP 0x48 /* Battery Remaining Capacity */ -#define EC_MEMMAP_BATT_FLAG 0x4c /* Battery State, defined below */ +#define EC_MEMMAP_BATT_FLAG 0x4c /* Battery State, see below (8-bit) */ +#define EC_MEMMAP_BATT_COUNT 0x4d /* Battery Count (8-bit) */ +#define EC_MEMMAP_BATT_INDEX 0x4e /* Current Battery Data Index (8-bit) */ +/* Unused 0x4f */ #define EC_MEMMAP_BATT_DCAP 0x50 /* Battery Design Capacity */ #define EC_MEMMAP_BATT_DVLT 0x54 /* Battery Design Voltage */ #define EC_MEMMAP_BATT_LFCC 0x58 /* Battery Last Full Charge Capacity */ @@ -97,15 +105,24 @@ /* Unused 0x84 - 0x8f */ #define EC_MEMMAP_ACC_STATUS 0x90 /* Accelerometer status (8 bits )*/ /* Unused 0x91 */ -#define EC_MEMMAP_ACC_DATA 0x92 /* Accelerometer data 0x92 - 0x9f */ +#define EC_MEMMAP_ACC_DATA 0x92 /* Accelerometers data 0x92 - 0x9f */ +/* 0x92: Lid Angle if available, LID_ANGLE_UNRELIABLE otherwise */ +/* 0x94 - 0x99: 1st Accelerometer */ +/* 0x9a - 0x9f: 2nd Accelerometer */ #define EC_MEMMAP_GYRO_DATA 0xa0 /* Gyroscope data 0xa0 - 0xa5 */ -/* Unused 0xa6 - 0xfe (remember, 0xff is NOT part of the memmap region) */ +/* Unused 0xa6 - 0xdf */ +/* + * ACPI is unable to access memory mapped data at or above this offset due to + * limitations of the ACPI protocol. Do not place data in the range 0xe0 - 0xfe + * which might be needed by ACPI. + */ +#define EC_MEMMAP_NO_ACPI 0xe0 /* Define the format of the accelerometer mapped memory status byte. */ #define EC_MEMMAP_ACC_STATUS_SAMPLE_ID_MASK 0x0f -#define EC_MEMMAP_ACC_STATUS_BUSY_BIT (1 << 4) -#define EC_MEMMAP_ACC_STATUS_PRESENCE_BIT (1 << 7) +#define EC_MEMMAP_ACC_STATUS_BUSY_BIT BIT(4) +#define EC_MEMMAP_ACC_STATUS_PRESENCE_BIT BIT(7) /* Number of temp sensors at EC_MEMMAP_TEMP_SENSOR */ #define EC_TEMP_SENSOR_ENTRIES 16 @@ -149,6 +166,8 @@ #define EC_BATT_FLAG_DISCHARGING 0x04 #define EC_BATT_FLAG_CHARGING 0x08 #define EC_BATT_FLAG_LEVEL_CRITICAL 0x10 +/* Set if some of the static/dynamic data is invalid (or outdated). */ +#define EC_BATT_FLAG_INVALID_DATA 0x20 /* Switch flags at EC_MEMMAP_SWITCHES */ #define EC_SWITCH_LID_OPEN 0x01 @@ -174,20 +193,242 @@ #define EC_WIRELESS_SWITCH_WWAN 0x04 /* WWAN power */ #define EC_WIRELESS_SWITCH_WLAN_POWER 0x08 /* WLAN power */ +/*****************************************************************************/ +/* + * ACPI commands + * + * These are valid ONLY on the ACPI command/data port. + */ + +/* + * ACPI Read Embedded Controller + * + * This reads from ACPI memory space on the EC (EC_ACPI_MEM_*). + * + * Use the following sequence: + * + * - Write EC_CMD_ACPI_READ to EC_LPC_ADDR_ACPI_CMD + * - Wait for EC_LPC_CMDR_PENDING bit to clear + * - Write address to EC_LPC_ADDR_ACPI_DATA + * - Wait for EC_LPC_CMDR_DATA bit to set + * - Read value from EC_LPC_ADDR_ACPI_DATA + */ +#define EC_CMD_ACPI_READ 0x0080 + +/* + * ACPI Write Embedded Controller + * + * This reads from ACPI memory space on the EC (EC_ACPI_MEM_*). + * + * Use the following sequence: + * + * - Write EC_CMD_ACPI_WRITE to EC_LPC_ADDR_ACPI_CMD + * - Wait for EC_LPC_CMDR_PENDING bit to clear + * - Write address to EC_LPC_ADDR_ACPI_DATA + * - Wait for EC_LPC_CMDR_PENDING bit to clear + * - Write value to EC_LPC_ADDR_ACPI_DATA + */ +#define EC_CMD_ACPI_WRITE 0x0081 + +/* + * ACPI Burst Enable Embedded Controller + * + * This enables burst mode on the EC to allow the host to issue several + * commands back-to-back. While in this mode, writes to mapped multi-byte + * data are locked out to ensure data consistency. + */ +#define EC_CMD_ACPI_BURST_ENABLE 0x0082 + +/* + * ACPI Burst Disable Embedded Controller + * + * This disables burst mode on the EC and stops preventing EC writes to mapped + * multi-byte data. + */ +#define EC_CMD_ACPI_BURST_DISABLE 0x0083 + +/* + * ACPI Query Embedded Controller + * + * This clears the lowest-order bit in the currently pending host events, and + * sets the result code to the 1-based index of the bit (event 0x00000001 = 1, + * event 0x80000000 = 32), or 0 if no event was pending. + */ +#define EC_CMD_ACPI_QUERY_EVENT 0x0084 + +/* Valid addresses in ACPI memory space, for read/write commands */ + +/* Memory space version; set to EC_ACPI_MEM_VERSION_CURRENT */ +#define EC_ACPI_MEM_VERSION 0x00 +/* + * Test location; writing value here updates test compliment byte to (0xff - + * value). + */ +#define EC_ACPI_MEM_TEST 0x01 +/* Test compliment; writes here are ignored. */ +#define EC_ACPI_MEM_TEST_COMPLIMENT 0x02 + +/* Keyboard backlight brightness percent (0 - 100) */ +#define EC_ACPI_MEM_KEYBOARD_BACKLIGHT 0x03 +/* DPTF Target Fan Duty (0-100, 0xff for auto/none) */ +#define EC_ACPI_MEM_FAN_DUTY 0x04 + +/* + * DPTF temp thresholds. Any of the EC's temp sensors can have up to two + * independent thresholds attached to them. The current value of the ID + * register determines which sensor is affected by the THRESHOLD and COMMIT + * registers. The THRESHOLD register uses the same EC_TEMP_SENSOR_OFFSET scheme + * as the memory-mapped sensors. The COMMIT register applies those settings. + * + * The spec does not mandate any way to read back the threshold settings + * themselves, but when a threshold is crossed the AP needs a way to determine + * which sensor(s) are responsible. Each reading of the ID register clears and + * returns one sensor ID that has crossed one of its threshold (in either + * direction) since the last read. A value of 0xFF means "no new thresholds + * have tripped". Setting or enabling the thresholds for a sensor will clear + * the unread event count for that sensor. + */ +#define EC_ACPI_MEM_TEMP_ID 0x05 +#define EC_ACPI_MEM_TEMP_THRESHOLD 0x06 +#define EC_ACPI_MEM_TEMP_COMMIT 0x07 +/* + * Here are the bits for the COMMIT register: + * bit 0 selects the threshold index for the chosen sensor (0/1) + * bit 1 enables/disables the selected threshold (0 = off, 1 = on) + * Each write to the commit register affects one threshold. + */ +#define EC_ACPI_MEM_TEMP_COMMIT_SELECT_MASK BIT(0) +#define EC_ACPI_MEM_TEMP_COMMIT_ENABLE_MASK BIT(1) +/* + * Example: + * + * Set the thresholds for sensor 2 to 50 C and 60 C: + * write 2 to [0x05] -- select temp sensor 2 + * write 0x7b to [0x06] -- C_TO_K(50) - EC_TEMP_SENSOR_OFFSET + * write 0x2 to [0x07] -- enable threshold 0 with this value + * write 0x85 to [0x06] -- C_TO_K(60) - EC_TEMP_SENSOR_OFFSET + * write 0x3 to [0x07] -- enable threshold 1 with this value + * + * Disable the 60 C threshold, leaving the 50 C threshold unchanged: + * write 2 to [0x05] -- select temp sensor 2 + * write 0x1 to [0x07] -- disable threshold 1 + */ + +/* DPTF battery charging current limit */ +#define EC_ACPI_MEM_CHARGING_LIMIT 0x08 + +/* Charging limit is specified in 64 mA steps */ +#define EC_ACPI_MEM_CHARGING_LIMIT_STEP_MA 64 +/* Value to disable DPTF battery charging limit */ +#define EC_ACPI_MEM_CHARGING_LIMIT_DISABLED 0xff + +/* + * Report device orientation + * Bits Definition + * 3:1 Device DPTF Profile Number (DDPN) + * 0 = Reserved for backward compatibility (indicates no valid + * profile number. Host should fall back to using TBMD). + * 1..7 = DPTF Profile number to indicate to host which table needs + * to be loaded. + * 0 Tablet Mode Device Indicator (TBMD) + */ +#define EC_ACPI_MEM_DEVICE_ORIENTATION 0x09 +#define EC_ACPI_MEM_TBMD_SHIFT 0 +#define EC_ACPI_MEM_TBMD_MASK 0x1 +#define EC_ACPI_MEM_DDPN_SHIFT 1 +#define EC_ACPI_MEM_DDPN_MASK 0x7 + +/* + * Report device features. Uses the same format as the host command, except: + * + * bit 0 (EC_FEATURE_LIMITED) changes meaning from "EC code has a limited set + * of features", which is of limited interest when the system is already + * interpreting ACPI bytecode, to "EC_FEATURES[0-7] is not supported". Since + * these are supported, it defaults to 0. + * This allows detecting the presence of this field since older versions of + * the EC codebase would simply return 0xff to that unknown address. Check + * FEATURES0 != 0xff (or FEATURES0[0] == 0) to make sure that the other bits + * are valid. + */ +#define EC_ACPI_MEM_DEVICE_FEATURES0 0x0a +#define EC_ACPI_MEM_DEVICE_FEATURES1 0x0b +#define EC_ACPI_MEM_DEVICE_FEATURES2 0x0c +#define EC_ACPI_MEM_DEVICE_FEATURES3 0x0d +#define EC_ACPI_MEM_DEVICE_FEATURES4 0x0e +#define EC_ACPI_MEM_DEVICE_FEATURES5 0x0f +#define EC_ACPI_MEM_DEVICE_FEATURES6 0x10 +#define EC_ACPI_MEM_DEVICE_FEATURES7 0x11 + +#define EC_ACPI_MEM_BATTERY_INDEX 0x12 + +/* + * USB Port Power. Each bit indicates whether the corresponding USB ports' power + * is enabled (1) or disabled (0). + * bit 0 USB port ID 0 + * ... + * bit 7 USB port ID 7 + */ +#define EC_ACPI_MEM_USB_PORT_POWER 0x13 + +/* + * ACPI addresses 0x20 - 0xff map to EC_MEMMAP offset 0x00 - 0xdf. This data + * is read-only from the AP. Added in EC_ACPI_MEM_VERSION 2. + */ +#define EC_ACPI_MEM_MAPPED_BEGIN 0x20 +#define EC_ACPI_MEM_MAPPED_SIZE 0xe0 + +/* Current version of ACPI memory address space */ +#define EC_ACPI_MEM_VERSION_CURRENT 2 + + /* * This header file is used in coreboot both in C and ACPI code. The ACPI code * is pre-processed to handle constants but the ASL compiler is unable to * handle actual C code so keep it separate. */ -#ifndef __ACPI__ + + +/* + * Attributes for EC request and response packets. Just defining __packed + * results in inefficient assembly code on ARM, if the structure is actually + * 32-bit aligned, as it should be for all buffers. + * + * Be very careful when adding these to existing structures. They will round + * up the structure size to the specified boundary. + * + * Also be very careful to make that if a structure is included in some other + * parent structure that the alignment will still be true given the packing of + * the parent structure. This is particularly important if the sub-structure + * will be passed as a pointer to another function, since that function will + * not know about the misaligment caused by the parent structure's packing. + * + * Also be very careful using __packed - particularly when nesting non-packed + * structures inside packed ones. In fact, DO NOT use __packed directly; + * always use one of these attributes. + * + * Once everything is annotated properly, the following search strings should + * not return ANY matches in this file other than right here: + * + * "__packed" - generates inefficient code; all sub-structs must also be packed + * + * "struct [^_]" - all structs should be annotated, except for structs that are + * members of other structs/unions (and their original declarations should be + * annotated). + */ /* - * Define __packed if someone hasn't beat us to it. Linux kernel style - * checking prefers __packed over __attribute__((packed)). + * Packed structures make no assumption about alignment, so they do inefficient + * byte-wise reads. */ -#ifndef __packed -#define __packed __attribute__((packed)) -#endif +#define __ec_align1 __packed +#define __ec_align2 __packed +#define __ec_align4 __packed +#define __ec_align_size1 __packed +#define __ec_align_offset1 __packed +#define __ec_align_offset2 __packed +#define __ec_todo_packed __packed +#define __ec_todo_unpacked + /* LPC command status byte masks */ /* EC has written a byte in the data register and host hasn't read it yet */ @@ -198,7 +439,7 @@ #define EC_LPC_STATUS_PROCESSING 0x04 /* Last write to EC was a command, not data */ #define EC_LPC_STATUS_LAST_CMD 0x08 -/* EC is in burst mode. Unsupported by Chrome EC, so this bit is never set */ +/* EC is in burst mode */ #define EC_LPC_STATUS_BURST_MODE 0x10 /* SCI event is pending (requesting SCI query) */ #define EC_LPC_STATUS_SCI_PENDING 0x20 @@ -214,7 +455,10 @@ #define EC_LPC_STATUS_BUSY_MASK \ (EC_LPC_STATUS_FROM_HOST | EC_LPC_STATUS_PROCESSING) -/* Host command response codes */ +/* + * Host command response codes (16-bit). Note that response codes should be + * stored in a uint16_t rather than directly in a value of this type. + */ enum ec_status { EC_RES_SUCCESS = 0, EC_RES_INVALID_COMMAND = 1, @@ -230,7 +474,13 @@ enum ec_status { EC_RES_OVERFLOW = 11, /* Table / data overflow */ EC_RES_INVALID_HEADER = 12, /* Header contains invalid data */ EC_RES_REQUEST_TRUNCATED = 13, /* Didn't get the entire request */ - EC_RES_RESPONSE_TOO_BIG = 14 /* Response was too big to handle */ + EC_RES_RESPONSE_TOO_BIG = 14, /* Response was too big to handle */ + EC_RES_BUS_ERROR = 15, /* Communications bus error */ + EC_RES_BUSY = 16, /* Up but too busy. Should retry */ + EC_RES_INVALID_HEADER_VERSION = 17, /* Header version invalid */ + EC_RES_INVALID_HEADER_CRC = 18, /* Header CRC invalid */ + EC_RES_INVALID_DATA_CRC = 19, /* Data CRC invalid */ + EC_RES_DUP_UNAVAILABLE = 20, /* Can't resend response */ }; /* @@ -250,7 +500,8 @@ enum host_event_code { EC_HOST_EVENT_BATTERY_CRITICAL = 7, EC_HOST_EVENT_BATTERY = 8, EC_HOST_EVENT_THERMAL_THRESHOLD = 9, - EC_HOST_EVENT_THERMAL_OVERLOAD = 10, + /* Event generated by a device attached to the EC */ + EC_HOST_EVENT_DEVICE = 10, EC_HOST_EVENT_THERMAL = 11, EC_HOST_EVENT_USB_CHARGER = 12, EC_HOST_EVENT_KEY_PRESSED = 13, @@ -277,15 +528,34 @@ enum host_event_code { EC_HOST_EVENT_HANG_DETECT = 20, /* Hang detect logic detected a hang and warm rebooted the AP */ EC_HOST_EVENT_HANG_REBOOT = 21, + /* PD MCU triggering host event */ EC_HOST_EVENT_PD_MCU = 22, - /* EC desires to change state of host-controlled USB mux */ - EC_HOST_EVENT_USB_MUX = 28, + /* Battery Status flags have changed */ + EC_HOST_EVENT_BATTERY_STATUS = 23, + + /* EC encountered a panic, triggering a reset */ + EC_HOST_EVENT_PANIC = 24, + + /* Keyboard fastboot combo has been pressed */ + EC_HOST_EVENT_KEYBOARD_FASTBOOT = 25, /* EC RTC event occurred */ EC_HOST_EVENT_RTC = 26, + /* Emulate MKBP event */ + EC_HOST_EVENT_MKBP = 27, + + /* EC desires to change state of host-controlled USB mux */ + EC_HOST_EVENT_USB_MUX = 28, + + /* TABLET/LAPTOP mode or detachable base attach/detach event */ + EC_HOST_EVENT_MODE_CHANGE = 29, + + /* Keyboard recovery combo with hardware reinitialization */ + EC_HOST_EVENT_KEYBOARD_RECOVERY_HW_REINIT = 30, + /* * The high bit of the event mask is not used as a host event code. If * it reads back as set, then the entire event mask should be @@ -296,7 +566,7 @@ enum host_event_code { EC_HOST_EVENT_INVALID = 32 }; /* Host event mask */ -#define EC_HOST_EVENT_MASK(event_code) (1UL << ((event_code) - 1)) +#define EC_HOST_EVENT_MASK(event_code) BIT_ULL((event_code) - 1) /** * struct ec_lpc_host_args - Arguments at EC_LPC_ADDR_HOST_ARGS @@ -311,7 +581,7 @@ struct ec_lpc_host_args { uint8_t command_version; uint8_t data_size; uint8_t checksum; -} __packed; +} __ec_align4; /* Flags for ec_lpc_host_args.flags */ /* @@ -321,7 +591,7 @@ struct ec_lpc_host_args { * If EC gets a command and this flag is not set, this is an old-style command. * Command version is 0 and params from host are at EC_LPC_ADDR_OLD_PARAM with * unknown length. EC must respond with an old-style response (that is, - * withouth setting EC_HOST_ARGS_FLAG_TO_HOST). + * without setting EC_HOST_ARGS_FLAG_TO_HOST). */ #define EC_HOST_ARGS_FLAG_FROM_HOST 0x01 /* @@ -482,7 +752,7 @@ struct ec_host_request { uint8_t command_version; uint8_t reserved; uint16_t data_len; -} __packed; +} __ec_align4; #define EC_HOST_RESPONSE_VERSION 3 @@ -501,18 +771,151 @@ struct ec_host_response { uint16_t result; uint16_t data_len; uint16_t reserved; -} __packed; +} __ec_align4; + +/*****************************************************************************/ + +/* + * Host command protocol V4. + * + * Packets always start with a request or response header. They are followed + * by data_len bytes of data. If the data_crc_present flag is set, the data + * bytes are followed by a CRC-8 of that data, using using x^8 + x^2 + x + 1 + * polynomial. + * + * Host algorithm when sending a request q: + * + * 101) tries_left=(some value, e.g. 3); + * 102) q.seq_num++ + * 103) q.seq_dup=0 + * 104) Calculate q.header_crc. + * 105) Send request q to EC. + * 106) Wait for response r. Go to 201 if received or 301 if timeout. + * + * 201) If r.struct_version != 4, go to 301. + * 202) If r.header_crc mismatches calculated CRC for r header, go to 301. + * 203) If r.data_crc_present and r.data_crc mismatches, go to 301. + * 204) If r.seq_num != q.seq_num, go to 301. + * 205) If r.seq_dup == q.seq_dup, return success. + * 207) If r.seq_dup == 1, go to 301. + * 208) Return error. + * + * 301) If --tries_left <= 0, return error. + * 302) If q.seq_dup == 1, go to 105. + * 303) q.seq_dup = 1 + * 304) Go to 104. + * + * EC algorithm when receiving a request q. + * EC has response buffer r, error buffer e. + * + * 101) If q.struct_version != 4, set e.result = EC_RES_INVALID_HEADER_VERSION + * and go to 301 + * 102) If q.header_crc mismatches calculated CRC, set e.result = + * EC_RES_INVALID_HEADER_CRC and go to 301 + * 103) If q.data_crc_present, calculate data CRC. If that mismatches the CRC + * byte at the end of the packet, set e.result = EC_RES_INVALID_DATA_CRC + * and go to 301. + * 104) If q.seq_dup == 0, go to 201. + * 105) If q.seq_num != r.seq_num, go to 201. + * 106) If q.seq_dup == r.seq_dup, go to 205, else go to 203. + * + * 201) Process request q into response r. + * 202) r.seq_num = q.seq_num + * 203) r.seq_dup = q.seq_dup + * 204) Calculate r.header_crc + * 205) If r.data_len > 0 and data is no longer available, set e.result = + * EC_RES_DUP_UNAVAILABLE and go to 301. + * 206) Send response r. + * + * 301) e.seq_num = q.seq_num + * 302) e.seq_dup = q.seq_dup + * 303) Calculate e.header_crc. + * 304) Send error response e. + */ + +/* Version 4 request from host */ +struct ec_host_request4 { + /* + * bits 0-3: struct_version: Structure version (=4) + * bit 4: is_response: Is response (=0) + * bits 5-6: seq_num: Sequence number + * bit 7: seq_dup: Sequence duplicate flag + */ + uint8_t fields0; + + /* + * bits 0-4: command_version: Command version + * bits 5-6: Reserved (set 0, ignore on read) + * bit 7: data_crc_present: Is data CRC present after data + */ + uint8_t fields1; + + /* Command code (EC_CMD_*) */ + uint16_t command; + + /* Length of data which follows this header (not including data CRC) */ + uint16_t data_len; + + /* Reserved (set 0, ignore on read) */ + uint8_t reserved; + + /* CRC-8 of above fields, using x^8 + x^2 + x + 1 polynomial */ + uint8_t header_crc; +} __ec_align4; + +/* Version 4 response from EC */ +struct ec_host_response4 { + /* + * bits 0-3: struct_version: Structure version (=4) + * bit 4: is_response: Is response (=1) + * bits 5-6: seq_num: Sequence number + * bit 7: seq_dup: Sequence duplicate flag + */ + uint8_t fields0; + + /* + * bits 0-6: Reserved (set 0, ignore on read) + * bit 7: data_crc_present: Is data CRC present after data + */ + uint8_t fields1; + + /* Result code (EC_RES_*) */ + uint16_t result; + + /* Length of data which follows this header (not including data CRC) */ + uint16_t data_len; + + /* Reserved (set 0, ignore on read) */ + uint8_t reserved; + + /* CRC-8 of above fields, using x^8 + x^2 + x + 1 polynomial */ + uint8_t header_crc; +} __ec_align4; + +/* Fields in fields0 byte */ +#define EC_PACKET4_0_STRUCT_VERSION_MASK 0x0f +#define EC_PACKET4_0_IS_RESPONSE_MASK 0x10 +#define EC_PACKET4_0_SEQ_NUM_SHIFT 5 +#define EC_PACKET4_0_SEQ_NUM_MASK 0x60 +#define EC_PACKET4_0_SEQ_DUP_MASK 0x80 + +/* Fields in fields1 byte */ +#define EC_PACKET4_1_COMMAND_VERSION_MASK 0x1f /* (request only) */ +#define EC_PACKET4_1_DATA_CRC_PRESENT_MASK 0x80 /*****************************************************************************/ /* * Notes on commands: * * Each command is an 16-bit command value. Commands which take params or - * return response data specify structs for that data. If no struct is + * return response data specify structures for that data. If no structure is * specified, the command does not input or output data, respectively. * Parameter/response length is implicit in the structs. Some underlying * communication protocols (I2C, SPI) may add length or checksum headers, but * those are implementation-dependent and not defined here. + * + * All commands MUST be #defined to be 4-digit UPPER CASE hex values + * (e.g., 0x00AB, not 0xab) for CONFIG_HOSTCMD_SECTION_SORTED to work. */ /*****************************************************************************/ @@ -522,7 +925,7 @@ struct ec_host_response { * Get protocol version, used to deal with non-backward compatible protocol * changes. */ -#define EC_CMD_PROTO_VERSION 0x00 +#define EC_CMD_PROTO_VERSION 0x0000 /** * struct ec_response_proto_version - Response to the proto version command. @@ -530,13 +933,13 @@ struct ec_host_response { */ struct ec_response_proto_version { uint32_t version; -} __packed; +} __ec_align4; /* * Hello. This is a simple command to test the EC is responsive to * commands. */ -#define EC_CMD_HELLO 0x01 +#define EC_CMD_HELLO 0x0001 /** * struct ec_params_hello - Parameters to the hello command. @@ -544,7 +947,7 @@ struct ec_response_proto_version { */ struct ec_params_hello { uint32_t in_data; -} __packed; +} __ec_align4; /** * struct ec_response_hello - Response to the hello command. @@ -552,10 +955,10 @@ struct ec_params_hello { */ struct ec_response_hello { uint32_t out_data; -} __packed; +} __ec_align4; /* Get version number */ -#define EC_CMD_GET_VERSION 0x02 +#define EC_CMD_GET_VERSION 0x0002 enum ec_current_image { EC_IMAGE_UNKNOWN = 0, @@ -575,10 +978,10 @@ struct ec_response_get_version { char version_string_rw[32]; char reserved[32]; uint32_t current_image; -} __packed; +} __ec_align4; /* Read test */ -#define EC_CMD_READ_TEST 0x03 +#define EC_CMD_READ_TEST 0x0003 /** * struct ec_params_read_test - Parameters for the read test command. @@ -588,7 +991,7 @@ struct ec_response_get_version { struct ec_params_read_test { uint32_t offset; uint32_t size; -} __packed; +} __ec_align4; /** * struct ec_response_read_test - Response to the read test command. @@ -596,17 +999,17 @@ struct ec_params_read_test { */ struct ec_response_read_test { uint32_t data[32]; -} __packed; +} __ec_align4; /* * Get build information * * Response is null-terminated string. */ -#define EC_CMD_GET_BUILD_INFO 0x04 +#define EC_CMD_GET_BUILD_INFO 0x0004 /* Get chip info */ -#define EC_CMD_GET_CHIP_INFO 0x05 +#define EC_CMD_GET_CHIP_INFO 0x0005 /** * struct ec_response_get_chip_info - Response to the get chip info command. @@ -618,10 +1021,10 @@ struct ec_response_get_chip_info { char vendor[32]; char name[32]; char revision[32]; -} __packed; +} __ec_align4; /* Get board HW version */ -#define EC_CMD_GET_BOARD_VERSION 0x06 +#define EC_CMD_GET_BOARD_VERSION 0x0006 /** * struct ec_response_board_version - Response to the board version command. @@ -629,7 +1032,7 @@ struct ec_response_get_chip_info { */ struct ec_response_board_version { uint16_t board_version; -} __packed; +} __ec_align2; /* * Read memory-mapped data. @@ -639,7 +1042,7 @@ struct ec_response_board_version { * * Response is params.size bytes of data. */ -#define EC_CMD_READ_MEMMAP 0x07 +#define EC_CMD_READ_MEMMAP 0x0007 /** * struct ec_params_read_memmap - Parameters for the read memory map command. @@ -649,10 +1052,10 @@ struct ec_response_board_version { struct ec_params_read_memmap { uint8_t offset; uint8_t size; -} __packed; +} __ec_align1; /* Read versions supported for a command */ -#define EC_CMD_GET_CMD_VERSIONS 0x08 +#define EC_CMD_GET_CMD_VERSIONS 0x0008 /** * struct ec_params_get_cmd_versions - Parameters for the get command versions. @@ -660,7 +1063,7 @@ struct ec_params_read_memmap { */ struct ec_params_get_cmd_versions { uint8_t cmd; -} __packed; +} __ec_align1; /** * struct ec_params_get_cmd_versions_v1 - Parameters for the get command @@ -669,7 +1072,7 @@ struct ec_params_get_cmd_versions { */ struct ec_params_get_cmd_versions_v1 { uint16_t cmd; -} __packed; +} __ec_align2; /** * struct ec_response_get_cmd_version - Response to the get command versions. @@ -678,20 +1081,20 @@ struct ec_params_get_cmd_versions_v1 { */ struct ec_response_get_cmd_versions { uint32_t version_mask; -} __packed; +} __ec_align4; /* - * Check EC communcations status (busy). This is needed on i2c/spi but not + * Check EC communications status (busy). This is needed on i2c/spi but not * on lpc since it has its own out-of-band busy indicator. * * lpc must read the status from the command register. Attempting this on * lpc will overwrite the args/parameter space and corrupt its data. */ -#define EC_CMD_GET_COMMS_STATUS 0x09 +#define EC_CMD_GET_COMMS_STATUS 0x0009 /* Avoid using ec_status which is for return values */ enum ec_comms_status { - EC_COMMS_STATUS_PROCESSING = 1 << 0, /* Processing cmd */ + EC_COMMS_STATUS_PROCESSING = BIT(0), /* Processing cmd */ }; /** @@ -701,29 +1104,29 @@ enum ec_comms_status { */ struct ec_response_get_comms_status { uint32_t flags; /* Mask of enum ec_comms_status */ -} __packed; +} __ec_align4; /* Fake a variety of responses, purely for testing purposes. */ -#define EC_CMD_TEST_PROTOCOL 0x0a +#define EC_CMD_TEST_PROTOCOL 0x000A /* Tell the EC what to send back to us. */ struct ec_params_test_protocol { uint32_t ec_result; uint32_t ret_len; uint8_t buf[32]; -} __packed; +} __ec_align4; /* Here it comes... */ struct ec_response_test_protocol { uint8_t buf[32]; -} __packed; +} __ec_align4; -/* Get prococol information */ -#define EC_CMD_GET_PROTOCOL_INFO 0x0b +/* Get protocol information */ +#define EC_CMD_GET_PROTOCOL_INFO 0x000B /* Flags for ec_response_get_protocol_info.flags */ /* EC_RES_IN_PROGRESS may be returned if a command is slow */ -#define EC_PROTOCOL_INFO_IN_PROGRESS_SUPPORTED (1 << 0) +#define EC_PROTOCOL_INFO_IN_PROGRESS_SUPPORTED BIT(0) /** * struct ec_response_get_protocol_info - Response to the get protocol info. @@ -739,7 +1142,7 @@ struct ec_response_get_protocol_info { uint16_t max_request_packet_size; uint16_t max_response_packet_size; uint32_t flags; -} __packed; +} __ec_align4; /*****************************************************************************/ @@ -757,19 +1160,19 @@ struct ec_response_get_protocol_info { struct ec_params_get_set_value { uint32_t flags; uint32_t value; -} __packed; +} __ec_align4; struct ec_response_get_set_value { uint32_t flags; uint32_t value; -} __packed; +} __ec_align4; -/* More than one command can use these structs to get/set paramters. */ -#define EC_CMD_GSV_PAUSE_IN_S5 0x0c +/* More than one command can use these structs to get/set parameters. */ +#define EC_CMD_GSV_PAUSE_IN_S5 0x000C /*****************************************************************************/ /* List the features supported by the firmware */ -#define EC_CMD_GET_FEATURES 0x0d +#define EC_CMD_GET_FEATURES 0x000D /* Supported features */ enum ec_feature_code { @@ -876,24 +1279,36 @@ enum ec_feature_code { EC_FEATURE_REFINED_TABLET_MODE_HYSTERESIS = 37, /* EC supports audio codec. */ EC_FEATURE_AUDIO_CODEC = 38, - /* EC Supports SCP. */ + /* The MCU is a System Companion Processor (SCP). */ EC_FEATURE_SCP = 39, /* The MCU is an Integrated Sensor Hub */ EC_FEATURE_ISH = 40, }; -#define EC_FEATURE_MASK_0(event_code) (1UL << (event_code % 32)) -#define EC_FEATURE_MASK_1(event_code) (1UL << (event_code - 32)) +#define EC_FEATURE_MASK_0(event_code) BIT(event_code % 32) +#define EC_FEATURE_MASK_1(event_code) BIT(event_code - 32) struct ec_response_get_features { uint32_t flags[2]; -} __packed; +} __ec_align4; + +/*****************************************************************************/ +/* Get the board's SKU ID from EC */ +#define EC_CMD_GET_SKU_ID 0x000E + +/* Set SKU ID from AP */ +#define EC_CMD_SET_SKU_ID 0x000F + +struct ec_sku_id_info { + uint32_t sku_id; +} __ec_align4; /*****************************************************************************/ /* Flash commands */ /* Get flash info */ -#define EC_CMD_FLASH_INFO 0x10 +#define EC_CMD_FLASH_INFO 0x0010 +#define EC_VER_FLASH_INFO 2 /** * struct ec_response_flash_info - Response to the flash info command. @@ -912,11 +1327,22 @@ struct ec_response_flash_info { uint32_t write_block_size; uint32_t erase_block_size; uint32_t protect_block_size; -} __packed; +} __ec_align4; -/* Flags for version 1+ flash info command */ -/* EC flash erases bits to 0 instead of 1 */ -#define EC_FLASH_INFO_ERASE_TO_0 (1 << 0) +/* + * Flags for version 1+ flash info command + * EC flash erases bits to 0 instead of 1. + */ +#define EC_FLASH_INFO_ERASE_TO_0 BIT(0) + +/* + * Flash must be selected for read/write/erase operations to succeed. This may + * be necessary on a chip where write/erase can be corrupted by other board + * activity, or where the chip needs to enable some sort of programming voltage, + * or where the read/write/erase operations require cleanly suspending other + * chip functionality. + */ +#define EC_FLASH_INFO_SELECT_REQUIRED BIT(1) /** * struct ec_response_flash_info_1 - Response to the flash info v1 command. @@ -938,7 +1364,14 @@ struct ec_response_flash_info { * fields following. * * gcc anonymous structs don't seem to get along with the __packed directive; - * if they did we'd define the version 0 struct as a sub-struct of this one. + * if they did we'd define the version 0 structure as a sub-structure of this + * one. + * + * Version 2 supports flash banks of different sizes: + * The caller specified the number of banks it has preallocated + * (num_banks_desc) + * The EC returns the number of banks describing the flash memory. + * It adds banks descriptions up to num_banks_desc. */ struct ec_response_flash_info_1 { /* Version 0 fields; see above for description */ @@ -950,14 +1383,50 @@ struct ec_response_flash_info_1 { /* Version 1 adds these fields: */ uint32_t write_ideal_size; uint32_t flags; -} __packed; +} __ec_align4; + +struct ec_params_flash_info_2 { + /* Number of banks to describe */ + uint16_t num_banks_desc; + /* Reserved; set 0; ignore on read */ + uint8_t reserved[2]; +} __ec_align4; + +struct ec_flash_bank { + /* Number of sector is in this bank. */ + uint16_t count; + /* Size in power of 2 of each sector (8 --> 256 bytes) */ + uint8_t size_exp; + /* Minimal write size for the sectors in this bank */ + uint8_t write_size_exp; + /* Erase size for the sectors in this bank */ + uint8_t erase_size_exp; + /* Size for write protection, usually identical to erase size. */ + uint8_t protect_size_exp; + /* Reserved; set 0; ignore on read */ + uint8_t reserved[2]; +}; + +struct ec_response_flash_info_2 { + /* Total flash in the EC. */ + uint32_t flash_size; + /* Flags; see EC_FLASH_INFO_* */ + uint32_t flags; + /* Maximum size to use to send data to write to the EC. */ + uint32_t write_ideal_size; + /* Number of banks present in the EC. */ + uint16_t num_banks_total; + /* Number of banks described in banks array. */ + uint16_t num_banks_desc; + struct ec_flash_bank banks[0]; +} __ec_align4; /* * Read flash * * Response is params.size bytes of data. */ -#define EC_CMD_FLASH_READ 0x11 +#define EC_CMD_FLASH_READ 0x0011 /** * struct ec_params_flash_read - Parameters for the flash read command. @@ -967,10 +1436,10 @@ struct ec_response_flash_info_1 { struct ec_params_flash_read { uint32_t offset; uint32_t size; -} __packed; +} __ec_align4; /* Write flash */ -#define EC_CMD_FLASH_WRITE 0x12 +#define EC_CMD_FLASH_WRITE 0x0012 #define EC_VER_FLASH_WRITE 1 /* Version 0 of the flash command supported only 64 bytes of data */ @@ -985,20 +1454,57 @@ struct ec_params_flash_write { uint32_t offset; uint32_t size; /* Followed by data to write */ -} __packed; +} __ec_align4; /* Erase flash */ -#define EC_CMD_FLASH_ERASE 0x13 +#define EC_CMD_FLASH_ERASE 0x0013 /** - * struct ec_params_flash_erase - Parameters for the flash erase command. + * struct ec_params_flash_erase - Parameters for the flash erase command, v0. * @offset: Byte offset to erase. * @size: Size to erase in bytes. */ struct ec_params_flash_erase { uint32_t offset; uint32_t size; -} __packed; +} __ec_align4; + +/* + * v1 add async erase: + * subcommands can returns: + * EC_RES_SUCCESS : erased (see ERASE_SECTOR_ASYNC case below). + * EC_RES_INVALID_PARAM : offset/size are not aligned on a erase boundary. + * EC_RES_ERROR : other errors. + * EC_RES_BUSY : an existing erase operation is in progress. + * EC_RES_ACCESS_DENIED: Trying to erase running image. + * + * When ERASE_SECTOR_ASYNC returns EC_RES_SUCCESS, the operation is just + * properly queued. The user must call ERASE_GET_RESULT subcommand to get + * the proper result. + * When ERASE_GET_RESULT returns EC_RES_BUSY, the caller must wait and send + * ERASE_GET_RESULT again to get the result of ERASE_SECTOR_ASYNC. + * ERASE_GET_RESULT command may timeout on EC where flash access is not + * permitted while erasing. (For instance, STM32F4). + */ +enum ec_flash_erase_cmd { + FLASH_ERASE_SECTOR, /* Erase and wait for result */ + FLASH_ERASE_SECTOR_ASYNC, /* Erase and return immediately. */ + FLASH_ERASE_GET_RESULT, /* Ask for last erase result */ +}; + +/** + * struct ec_params_flash_erase_v1 - Parameters for the flash erase command, v1. + * @cmd: One of ec_flash_erase_cmd. + * @reserved: Pad byte; currently always contains 0. + * @flag: No flags defined yet; set to 0. + * @params: Same as v0 parameters. + */ +struct ec_params_flash_erase_v1 { + uint8_t cmd; + uint8_t reserved; + uint16_t flag; + struct ec_params_flash_erase params; +} __ec_align4; /* * Get/set flash protection. @@ -1010,31 +1516,40 @@ struct ec_params_flash_erase { * * If mask=0, simply returns the current flags state. */ -#define EC_CMD_FLASH_PROTECT 0x15 +#define EC_CMD_FLASH_PROTECT 0x0015 #define EC_VER_FLASH_PROTECT 1 /* Command version 1 */ /* Flags for flash protection */ /* RO flash code protected when the EC boots */ -#define EC_FLASH_PROTECT_RO_AT_BOOT (1 << 0) +#define EC_FLASH_PROTECT_RO_AT_BOOT BIT(0) /* * RO flash code protected now. If this bit is set, at-boot status cannot * be changed. */ -#define EC_FLASH_PROTECT_RO_NOW (1 << 1) +#define EC_FLASH_PROTECT_RO_NOW BIT(1) /* Entire flash code protected now, until reboot. */ -#define EC_FLASH_PROTECT_ALL_NOW (1 << 2) +#define EC_FLASH_PROTECT_ALL_NOW BIT(2) /* Flash write protect GPIO is asserted now */ -#define EC_FLASH_PROTECT_GPIO_ASSERTED (1 << 3) +#define EC_FLASH_PROTECT_GPIO_ASSERTED BIT(3) /* Error - at least one bank of flash is stuck locked, and cannot be unlocked */ -#define EC_FLASH_PROTECT_ERROR_STUCK (1 << 4) +#define EC_FLASH_PROTECT_ERROR_STUCK BIT(4) /* * Error - flash protection is in inconsistent state. At least one bank of * flash which should be protected is not protected. Usually fixed by * re-requesting the desired flags, or by a hard reset if that fails. */ -#define EC_FLASH_PROTECT_ERROR_INCONSISTENT (1 << 5) -/* Entile flash code protected when the EC boots */ -#define EC_FLASH_PROTECT_ALL_AT_BOOT (1 << 6) +#define EC_FLASH_PROTECT_ERROR_INCONSISTENT BIT(5) +/* Entire flash code protected when the EC boots */ +#define EC_FLASH_PROTECT_ALL_AT_BOOT BIT(6) +/* RW flash code protected when the EC boots */ +#define EC_FLASH_PROTECT_RW_AT_BOOT BIT(7) +/* RW flash code protected now. */ +#define EC_FLASH_PROTECT_RW_NOW BIT(8) +/* Rollback information flash region protected when the EC boots */ +#define EC_FLASH_PROTECT_ROLLBACK_AT_BOOT BIT(9) +/* Rollback information flash region protected now */ +#define EC_FLASH_PROTECT_ROLLBACK_NOW BIT(10) + /** * struct ec_params_flash_protect - Parameters for the flash protect command. @@ -1044,7 +1559,7 @@ struct ec_params_flash_erase { struct ec_params_flash_protect { uint32_t mask; uint32_t flags; -} __packed; +} __ec_align4; /** * struct ec_response_flash_protect - Response to the flash protect command. @@ -1059,7 +1574,7 @@ struct ec_response_flash_protect { uint32_t flags; uint32_t valid_flags; uint32_t writable_flags; -} __packed; +} __ec_align4; /* * Note: commands 0x14 - 0x19 version 0 were old commands to get/set flash @@ -1067,22 +1582,37 @@ struct ec_response_flash_protect { */ /* Get the region offset/size */ -#define EC_CMD_FLASH_REGION_INFO 0x16 +#define EC_CMD_FLASH_REGION_INFO 0x0016 #define EC_VER_FLASH_REGION_INFO 1 enum ec_flash_region { /* Region which holds read-only EC image */ EC_FLASH_REGION_RO = 0, - /* Region which holds rewritable EC image */ - EC_FLASH_REGION_RW, + /* + * Region which holds active RW image. 'Active' is different from + * 'running'. Active means 'scheduled-to-run'. Since RO image always + * scheduled to run, active/non-active applies only to RW images (for + * the same reason 'update' applies only to RW images. It's a state of + * an image on a flash. Running image can be RO, RW_A, RW_B but active + * image can only be RW_A or RW_B. In recovery mode, an active RW image + * doesn't enter 'running' state but it's still active on a flash. + */ + EC_FLASH_REGION_ACTIVE, /* * Region which should be write-protected in the factory (a superset of * EC_FLASH_REGION_RO) */ EC_FLASH_REGION_WP_RO, + /* Region which holds updatable (non-active) RW image */ + EC_FLASH_REGION_UPDATE, /* Number of regions */ EC_FLASH_REGION_COUNT, }; +/* + * 'RW' is vague if there are multiple RW images; we mean the active one, + * so the old constant is deprecated. + */ +#define EC_FLASH_REGION_RW EC_FLASH_REGION_ACTIVE /** * struct ec_params_flash_region_info - Parameters for the flash region info @@ -1091,15 +1621,15 @@ enum ec_flash_region { */ struct ec_params_flash_region_info { uint32_t region; -} __packed; +} __ec_align4; struct ec_response_flash_region_info { uint32_t offset; uint32_t size; -} __packed; +} __ec_align4; /* Read/write VbNvContext */ -#define EC_CMD_VBNV_CONTEXT 0x17 +#define EC_CMD_VBNV_CONTEXT 0x0017 #define EC_VER_VBNV_CONTEXT 1 #define EC_VBNV_BLOCK_SIZE 16 @@ -1111,52 +1641,99 @@ enum ec_vbnvcontext_op { struct ec_params_vbnvcontext { uint32_t op; uint8_t block[EC_VBNV_BLOCK_SIZE]; -} __packed; +} __ec_align4; struct ec_response_vbnvcontext { uint8_t block[EC_VBNV_BLOCK_SIZE]; -} __packed; +} __ec_align4; + + +/* Get SPI flash information */ +#define EC_CMD_FLASH_SPI_INFO 0x0018 + +struct ec_response_flash_spi_info { + /* JEDEC info from command 0x9F (manufacturer, memory type, size) */ + uint8_t jedec[3]; + + /* Pad byte; currently always contains 0 */ + uint8_t reserved0; + + /* Manufacturer / device ID from command 0x90 */ + uint8_t mfr_dev_id[2]; + + /* Status registers from command 0x05 and 0x35 */ + uint8_t sr1, sr2; +} __ec_align1; + + +/* Select flash during flash operations */ +#define EC_CMD_FLASH_SELECT 0x0019 + +/** + * struct ec_params_flash_select - Parameters for the flash select command. + * @select: 1 to select flash, 0 to deselect flash + */ +struct ec_params_flash_select { + uint8_t select; +} __ec_align4; + /*****************************************************************************/ /* PWM commands */ /* Get fan target RPM */ -#define EC_CMD_PWM_GET_FAN_TARGET_RPM 0x20 +#define EC_CMD_PWM_GET_FAN_TARGET_RPM 0x0020 struct ec_response_pwm_get_fan_rpm { uint32_t rpm; -} __packed; +} __ec_align4; /* Set target fan RPM */ -#define EC_CMD_PWM_SET_FAN_TARGET_RPM 0x21 +#define EC_CMD_PWM_SET_FAN_TARGET_RPM 0x0021 -struct ec_params_pwm_set_fan_target_rpm { +/* Version 0 of input params */ +struct ec_params_pwm_set_fan_target_rpm_v0 { uint32_t rpm; -} __packed; +} __ec_align4; + +/* Version 1 of input params */ +struct ec_params_pwm_set_fan_target_rpm_v1 { + uint32_t rpm; + uint8_t fan_idx; +} __ec_align_size1; /* Get keyboard backlight */ -#define EC_CMD_PWM_GET_KEYBOARD_BACKLIGHT 0x22 +/* OBSOLETE - Use EC_CMD_PWM_SET_DUTY */ +#define EC_CMD_PWM_GET_KEYBOARD_BACKLIGHT 0x0022 struct ec_response_pwm_get_keyboard_backlight { uint8_t percent; uint8_t enabled; -} __packed; +} __ec_align1; /* Set keyboard backlight */ -#define EC_CMD_PWM_SET_KEYBOARD_BACKLIGHT 0x23 +/* OBSOLETE - Use EC_CMD_PWM_SET_DUTY */ +#define EC_CMD_PWM_SET_KEYBOARD_BACKLIGHT 0x0023 struct ec_params_pwm_set_keyboard_backlight { uint8_t percent; -} __packed; +} __ec_align1; /* Set target fan PWM duty cycle */ -#define EC_CMD_PWM_SET_FAN_DUTY 0x24 +#define EC_CMD_PWM_SET_FAN_DUTY 0x0024 -struct ec_params_pwm_set_fan_duty { +/* Version 0 of input params */ +struct ec_params_pwm_set_fan_duty_v0 { uint32_t percent; -} __packed; +} __ec_align4; -#define EC_CMD_PWM_SET_DUTY 0x25 +/* Version 1 of input params */ +struct ec_params_pwm_set_fan_duty_v1 { + uint32_t percent; + uint8_t fan_idx; +} __ec_align_size1; + +#define EC_CMD_PWM_SET_DUTY 0x0025 /* 16 bit duty cycle, 0xffff = 100% */ #define EC_PWM_MAX_DUTY 0xffff @@ -1174,18 +1751,18 @@ struct ec_params_pwm_set_duty { uint16_t duty; /* Duty cycle, EC_PWM_MAX_DUTY = 100% */ uint8_t pwm_type; /* ec_pwm_type */ uint8_t index; /* Type-specific index, or 0 if unique */ -} __packed; +} __ec_align4; -#define EC_CMD_PWM_GET_DUTY 0x26 +#define EC_CMD_PWM_GET_DUTY 0x0026 struct ec_params_pwm_get_duty { uint8_t pwm_type; /* ec_pwm_type */ uint8_t index; /* Type-specific index, or 0 if unique */ -} __packed; +} __ec_align1; struct ec_response_pwm_get_duty { uint16_t duty; /* Duty cycle, EC_PWM_MAX_DUTY = 100% */ -} __packed; +} __ec_align2; /*****************************************************************************/ /* @@ -1194,11 +1771,11 @@ struct ec_response_pwm_get_duty { * into a subcommand. We'll make separate structs for subcommands with * different input args, so that we know how much to expect. */ -#define EC_CMD_LIGHTBAR_CMD 0x28 +#define EC_CMD_LIGHTBAR_CMD 0x0028 struct rgb_s { uint8_t r, g, b; -}; +} __ec_todo_unpacked; #define LB_BATTERY_LEVELS 4 @@ -1238,7 +1815,7 @@ struct lightbar_params_v0 { /* Color palette */ struct rgb_s color[8]; /* 0-3 are Google colors */ -} __packed; +} __ec_todo_packed; struct lightbar_params_v1 { /* Timing */ @@ -1251,7 +1828,10 @@ struct lightbar_params_v1 { int32_t s3_sleep_for; int32_t s3_ramp_up; int32_t s3_ramp_down; + int32_t s5_ramp_up; + int32_t s5_ramp_down; int32_t tap_tick_delay; + int32_t tap_gate_delay; int32_t tap_display_time; /* Tap-for-battery params */ @@ -1279,84 +1859,182 @@ struct lightbar_params_v1 { uint8_t s0_idx[2][LB_BATTERY_LEVELS]; /* AP is running */ uint8_t s3_idx[2][LB_BATTERY_LEVELS]; /* AP is sleeping */ + /* s5: single color pulse on inhibited power-up */ + uint8_t s5_idx; + /* Color palette */ struct rgb_s color[8]; /* 0-3 are Google colors */ -} __packed; +} __ec_todo_packed; -/* Lightbar program */ +/* Lightbar command params v2 + * crbug.com/467716 + * + * lightbar_parms_v1 was too big for i2c, therefore in v2, we split them up by + * logical groups to make it more manageable ( < 120 bytes). + * + * NOTE: Each of these groups must be less than 120 bytes. + */ + +struct lightbar_params_v2_timing { + /* Timing */ + int32_t google_ramp_up; + int32_t google_ramp_down; + int32_t s3s0_ramp_up; + int32_t s0_tick_delay[2]; /* AC=0/1 */ + int32_t s0a_tick_delay[2]; /* AC=0/1 */ + int32_t s0s3_ramp_down; + int32_t s3_sleep_for; + int32_t s3_ramp_up; + int32_t s3_ramp_down; + int32_t s5_ramp_up; + int32_t s5_ramp_down; + int32_t tap_tick_delay; + int32_t tap_gate_delay; + int32_t tap_display_time; +} __ec_todo_packed; + +struct lightbar_params_v2_tap { + /* Tap-for-battery params */ + uint8_t tap_pct_red; + uint8_t tap_pct_green; + uint8_t tap_seg_min_on; + uint8_t tap_seg_max_on; + uint8_t tap_seg_osc; + uint8_t tap_idx[3]; +} __ec_todo_packed; + +struct lightbar_params_v2_oscillation { + /* Oscillation */ + uint8_t osc_min[2]; /* AC=0/1 */ + uint8_t osc_max[2]; /* AC=0/1 */ + uint8_t w_ofs[2]; /* AC=0/1 */ +} __ec_todo_packed; + +struct lightbar_params_v2_brightness { + /* Brightness limits based on the backlight and AC. */ + uint8_t bright_bl_off_fixed[2]; /* AC=0/1 */ + uint8_t bright_bl_on_min[2]; /* AC=0/1 */ + uint8_t bright_bl_on_max[2]; /* AC=0/1 */ +} __ec_todo_packed; + +struct lightbar_params_v2_thresholds { + /* Battery level thresholds */ + uint8_t battery_threshold[LB_BATTERY_LEVELS - 1]; +} __ec_todo_packed; + +struct lightbar_params_v2_colors { + /* Map [AC][battery_level] to color index */ + uint8_t s0_idx[2][LB_BATTERY_LEVELS]; /* AP is running */ + uint8_t s3_idx[2][LB_BATTERY_LEVELS]; /* AP is sleeping */ + + /* s5: single color pulse on inhibited power-up */ + uint8_t s5_idx; + + /* Color palette */ + struct rgb_s color[8]; /* 0-3 are Google colors */ +} __ec_todo_packed; + +/* Lightbar program. */ #define EC_LB_PROG_LEN 192 struct lightbar_program { uint8_t size; uint8_t data[EC_LB_PROG_LEN]; -}; +} __ec_todo_unpacked; struct ec_params_lightbar { uint8_t cmd; /* Command (see enum lightbar_command) */ union { - struct { - /* no args */ - } dump, off, on, init, get_seq, get_params_v0, get_params_v1, - version, get_brightness, get_demo, suspend, resume; + /* + * The following commands have no args: + * + * dump, off, on, init, get_seq, get_params_v0, get_params_v1, + * version, get_brightness, get_demo, suspend, resume, + * get_params_v2_timing, get_params_v2_tap, get_params_v2_osc, + * get_params_v2_bright, get_params_v2_thlds, + * get_params_v2_colors + * + * Don't use an empty struct, because C++ hates that. + */ - struct { + struct __ec_todo_unpacked { uint8_t num; } set_brightness, seq, demo; - struct { + struct __ec_todo_unpacked { uint8_t ctrl, reg, value; } reg; - struct { + struct __ec_todo_unpacked { uint8_t led, red, green, blue; } set_rgb; - struct { + struct __ec_todo_unpacked { uint8_t led; } get_rgb; - struct { + struct __ec_todo_unpacked { uint8_t enable; } manual_suspend_ctrl; struct lightbar_params_v0 set_params_v0; struct lightbar_params_v1 set_params_v1; + + struct lightbar_params_v2_timing set_v2par_timing; + struct lightbar_params_v2_tap set_v2par_tap; + struct lightbar_params_v2_oscillation set_v2par_osc; + struct lightbar_params_v2_brightness set_v2par_bright; + struct lightbar_params_v2_thresholds set_v2par_thlds; + struct lightbar_params_v2_colors set_v2par_colors; + struct lightbar_program set_program; }; -} __packed; +} __ec_todo_packed; struct ec_response_lightbar { union { - struct { - struct { + struct __ec_todo_unpacked { + struct __ec_todo_unpacked { uint8_t reg; uint8_t ic0; uint8_t ic1; } vals[23]; } dump; - struct { + struct __ec_todo_unpacked { uint8_t num; } get_seq, get_brightness, get_demo; struct lightbar_params_v0 get_params_v0; struct lightbar_params_v1 get_params_v1; - struct { + + struct lightbar_params_v2_timing get_params_v2_timing; + struct lightbar_params_v2_tap get_params_v2_tap; + struct lightbar_params_v2_oscillation get_params_v2_osc; + struct lightbar_params_v2_brightness get_params_v2_bright; + struct lightbar_params_v2_thresholds get_params_v2_thlds; + struct lightbar_params_v2_colors get_params_v2_colors; + + struct __ec_todo_unpacked { uint32_t num; uint32_t flags; } version; - struct { + struct __ec_todo_unpacked { uint8_t red, green, blue; } get_rgb; - struct { - /* no return params */ - } off, on, init, set_brightness, seq, reg, set_rgb, - demo, set_params_v0, set_params_v1, - set_program, manual_suspend_ctrl, suspend, resume; + /* + * The following commands have no response: + * + * off, on, init, set_brightness, seq, reg, set_rgb, demo, + * set_params_v0, set_params_v1, set_program, + * manual_suspend_ctrl, suspend, resume, set_v2par_timing, + * set_v2par_tap, set_v2par_osc, set_v2par_bright, + * set_v2par_thlds, set_v2par_colors + */ }; -} __packed; +} __ec_todo_packed; /* Lightbar commands */ enum lightbar_command { @@ -1382,13 +2060,25 @@ enum lightbar_command { LIGHTBAR_CMD_MANUAL_SUSPEND_CTRL = 19, LIGHTBAR_CMD_SUSPEND = 20, LIGHTBAR_CMD_RESUME = 21, + LIGHTBAR_CMD_GET_PARAMS_V2_TIMING = 22, + LIGHTBAR_CMD_SET_PARAMS_V2_TIMING = 23, + LIGHTBAR_CMD_GET_PARAMS_V2_TAP = 24, + LIGHTBAR_CMD_SET_PARAMS_V2_TAP = 25, + LIGHTBAR_CMD_GET_PARAMS_V2_OSCILLATION = 26, + LIGHTBAR_CMD_SET_PARAMS_V2_OSCILLATION = 27, + LIGHTBAR_CMD_GET_PARAMS_V2_BRIGHTNESS = 28, + LIGHTBAR_CMD_SET_PARAMS_V2_BRIGHTNESS = 29, + LIGHTBAR_CMD_GET_PARAMS_V2_THRESHOLDS = 30, + LIGHTBAR_CMD_SET_PARAMS_V2_THRESHOLDS = 31, + LIGHTBAR_CMD_GET_PARAMS_V2_COLORS = 32, + LIGHTBAR_CMD_SET_PARAMS_V2_COLORS = 33, LIGHTBAR_NUM_CMDS }; /*****************************************************************************/ /* LED control commands */ -#define EC_CMD_LED_CONTROL 0x29 +#define EC_CMD_LED_CONTROL 0x0029 enum ec_led_id { /* LED to indicate battery state of charge */ @@ -1400,13 +2090,21 @@ enum ec_led_id { EC_LED_ID_POWER_LED, /* LED on power adapter or its plug */ EC_LED_ID_ADAPTER_LED, + /* LED to indicate left side */ + EC_LED_ID_LEFT_LED, + /* LED to indicate right side */ + EC_LED_ID_RIGHT_LED, + /* LED to indicate recovery mode with HW_REINIT */ + EC_LED_ID_RECOVERY_HW_REINIT_LED, + /* LED to indicate sysrq debug mode. */ + EC_LED_ID_SYSRQ_DEBUG_LED, EC_LED_ID_COUNT }; /* LED control flags */ -#define EC_LED_FLAGS_QUERY (1 << 0) /* Query LED capability only */ -#define EC_LED_FLAGS_AUTO (1 << 1) /* Switch LED back to automatic control */ +#define EC_LED_FLAGS_QUERY BIT(0) /* Query LED capability only */ +#define EC_LED_FLAGS_AUTO BIT(1) /* Switch LED back to automatic control */ enum ec_led_colors { EC_LED_COLOR_RED = 0, @@ -1414,6 +2112,7 @@ enum ec_led_colors { EC_LED_COLOR_BLUE, EC_LED_COLOR_YELLOW, EC_LED_COLOR_WHITE, + EC_LED_COLOR_AMBER, EC_LED_COLOR_COUNT }; @@ -1423,7 +2122,7 @@ struct ec_params_led_control { uint8_t flags; /* Control flags */ uint8_t brightness[EC_LED_COLOR_COUNT]; -} __packed; +} __ec_align1; struct ec_response_led_control { /* @@ -1434,7 +2133,7 @@ struct ec_response_led_control { * Other values means the LED is control by PWM. */ uint8_t brightness_range[EC_LED_COLOR_COUNT]; -} __packed; +} __ec_align1; /*****************************************************************************/ /* Verified boot commands */ @@ -1445,7 +2144,7 @@ struct ec_response_led_control { */ /* Verified boot hash command */ -#define EC_CMD_VBOOT_HASH 0x2A +#define EC_CMD_VBOOT_HASH 0x002A struct ec_params_vboot_hash { uint8_t cmd; /* enum ec_vboot_hash_cmd */ @@ -1455,7 +2154,7 @@ struct ec_params_vboot_hash { uint32_t offset; /* Offset in flash to hash */ uint32_t size; /* Number of bytes to hash */ uint8_t nonce_data[64]; /* Nonce data; ignored if nonce_size=0 */ -} __packed; +} __ec_align4; struct ec_response_vboot_hash { uint8_t status; /* enum ec_vboot_hash_status */ @@ -1465,7 +2164,7 @@ struct ec_response_vboot_hash { uint32_t offset; /* Offset in flash which was hashed */ uint32_t size; /* Number of bytes hashed */ uint8_t hash_digest[64]; /* Hash digest data */ -} __packed; +} __ec_align4; enum ec_vboot_hash_cmd { EC_VBOOT_HASH_GET = 0, /* Get current hash status */ @@ -1489,15 +2188,22 @@ enum ec_vboot_hash_status { * If one of these is specified, the EC will automatically update offset and * size to the correct values for the specified image (RO or RW). */ -#define EC_VBOOT_HASH_OFFSET_RO 0xfffffffe -#define EC_VBOOT_HASH_OFFSET_RW 0xfffffffd +#define EC_VBOOT_HASH_OFFSET_RO 0xfffffffe +#define EC_VBOOT_HASH_OFFSET_ACTIVE 0xfffffffd +#define EC_VBOOT_HASH_OFFSET_UPDATE 0xfffffffc + +/* + * 'RW' is vague if there are multiple RW images; we mean the active one, + * so the old constant is deprecated. + */ +#define EC_VBOOT_HASH_OFFSET_RW EC_VBOOT_HASH_OFFSET_ACTIVE /*****************************************************************************/ /* * Motion sense commands. We'll make separate structs for sub-commands with * different input args, so that we know how much to expect. */ -#define EC_CMD_MOTION_SENSE_CMD 0x2B +#define EC_CMD_MOTION_SENSE_CMD 0x002B /* Motion sense commands */ enum motionsense_command { @@ -1516,7 +2222,13 @@ enum motionsense_command { /* * EC Rate command is a setter/getter command for the EC sampling rate - * of all motion sensors in milliseconds. + * in milliseconds. + * It is per sensor, the EC run sample task at the minimum of all + * sensors EC_RATE. + * For sensors without hardware FIFO, EC_RATE should be equals to 1/ODR + * to collect all the sensor samples. + * For sensor with hardware FIFO, EC_RATE is used as the maximal delay + * to process of all motion sensors in milliseconds. */ MOTIONSENSE_CMD_EC_RATE = 2, @@ -1547,32 +2259,76 @@ enum motionsense_command { MOTIONSENSE_CMD_DATA = 6, /* - * Perform low level calibration.. On sensors that support it, ask to - * do offset calibration. + * Return sensor fifo info. + */ + MOTIONSENSE_CMD_FIFO_INFO = 7, + + /* + * Insert a flush element in the fifo and return sensor fifo info. + * The host can use that element to synchronize its operation. + */ + MOTIONSENSE_CMD_FIFO_FLUSH = 8, + + /* + * Return a portion of the fifo. + */ + MOTIONSENSE_CMD_FIFO_READ = 9, + + /* + * Perform low level calibration. + * On sensors that support it, ask to do offset calibration. */ MOTIONSENSE_CMD_PERFORM_CALIB = 10, /* - * Sensor Offset command is a setter/getter command for the offset used - * for calibration. The offsets can be calculated by the host, or via + * Sensor Offset command is a setter/getter command for the offset + * used for calibration. + * The offsets can be calculated by the host, or via * PERFORM_CALIB command. */ MOTIONSENSE_CMD_SENSOR_OFFSET = 11, - /* Number of motionsense sub-commands. */ - MOTIONSENSE_NUM_CMDS -}; + /* + * List available activities for a MOTION sensor. + * Indicates if they are enabled or disabled. + */ + MOTIONSENSE_CMD_LIST_ACTIVITIES = 12, -enum motionsensor_id { - EC_MOTION_SENSOR_ACCEL_BASE = 0, - EC_MOTION_SENSOR_ACCEL_LID = 1, - EC_MOTION_SENSOR_GYRO = 2, + /* + * Activity management + * Enable/Disable activity recognition. + */ + MOTIONSENSE_CMD_SET_ACTIVITY = 13, /* - * Note, if more sensors are added and this count changes, the padding - * in ec_response_motion_sense dump command must be modified. + * Lid Angle */ - EC_MOTION_SENSOR_COUNT = 3 + MOTIONSENSE_CMD_LID_ANGLE = 14, + + /* + * Allow the FIFO to trigger interrupt via MKBP events. + * By default the FIFO does not send interrupt to process the FIFO + * until the AP is ready or it is coming from a wakeup sensor. + */ + MOTIONSENSE_CMD_FIFO_INT_ENABLE = 15, + + /* + * Spoof the readings of the sensors. The spoofed readings can be set + * to arbitrary values, or will lock to the last read actual values. + */ + MOTIONSENSE_CMD_SPOOF = 16, + + /* Set lid angle for tablet mode detection. */ + MOTIONSENSE_CMD_TABLET_MODE_LID_ANGLE = 17, + + /* + * Sensor Scale command is a setter/getter command for the calibration + * scale. + */ + MOTIONSENSE_CMD_SENSOR_SCALE = 18, + + /* Number of motionsense sub-commands. */ + MOTIONSENSE_NUM_CMDS }; /* List of motion sensor types. */ @@ -1584,6 +2340,7 @@ enum motionsensor_type { MOTIONSENSE_TYPE_LIGHT = 4, MOTIONSENSE_TYPE_ACTIVITY = 5, MOTIONSENSE_TYPE_BARO = 6, + MOTIONSENSE_TYPE_SYNC = 7, MOTIONSENSE_TYPE_MAX, }; @@ -1591,19 +2348,116 @@ enum motionsensor_type { enum motionsensor_location { MOTIONSENSE_LOC_BASE = 0, MOTIONSENSE_LOC_LID = 1, + MOTIONSENSE_LOC_CAMERA = 2, MOTIONSENSE_LOC_MAX, }; /* List of motion sensor chips. */ enum motionsensor_chip { MOTIONSENSE_CHIP_KXCJ9 = 0, + MOTIONSENSE_CHIP_LSM6DS0 = 1, + MOTIONSENSE_CHIP_BMI160 = 2, + MOTIONSENSE_CHIP_SI1141 = 3, + MOTIONSENSE_CHIP_SI1142 = 4, + MOTIONSENSE_CHIP_SI1143 = 5, + MOTIONSENSE_CHIP_KX022 = 6, + MOTIONSENSE_CHIP_L3GD20H = 7, + MOTIONSENSE_CHIP_BMA255 = 8, + MOTIONSENSE_CHIP_BMP280 = 9, + MOTIONSENSE_CHIP_OPT3001 = 10, + MOTIONSENSE_CHIP_BH1730 = 11, + MOTIONSENSE_CHIP_GPIO = 12, + MOTIONSENSE_CHIP_LIS2DH = 13, + MOTIONSENSE_CHIP_LSM6DSM = 14, + MOTIONSENSE_CHIP_LIS2DE = 15, + MOTIONSENSE_CHIP_LIS2MDL = 16, + MOTIONSENSE_CHIP_LSM6DS3 = 17, + MOTIONSENSE_CHIP_LSM6DSO = 18, + MOTIONSENSE_CHIP_LNG2DM = 19, + MOTIONSENSE_CHIP_MAX, }; +/* List of orientation positions */ +enum motionsensor_orientation { + MOTIONSENSE_ORIENTATION_LANDSCAPE = 0, + MOTIONSENSE_ORIENTATION_PORTRAIT = 1, + MOTIONSENSE_ORIENTATION_UPSIDE_DOWN_PORTRAIT = 2, + MOTIONSENSE_ORIENTATION_UPSIDE_DOWN_LANDSCAPE = 3, + MOTIONSENSE_ORIENTATION_UNKNOWN = 4, +}; + +struct ec_response_motion_sensor_data { + /* Flags for each sensor. */ + uint8_t flags; + /* Sensor number the data comes from. */ + uint8_t sensor_num; + /* Each sensor is up to 3-axis. */ + union { + int16_t data[3]; + struct __ec_todo_packed { + uint16_t reserved; + uint32_t timestamp; + }; + struct __ec_todo_unpacked { + uint8_t activity; /* motionsensor_activity */ + uint8_t state; + int16_t add_info[2]; + }; + }; +} __ec_todo_packed; + +/* Note: used in ec_response_get_next_data */ +struct ec_response_motion_sense_fifo_info { + /* Size of the fifo */ + uint16_t size; + /* Amount of space used in the fifo */ + uint16_t count; + /* Timestamp recorded in us. + * aka accurate timestamp when host event was triggered. + */ + uint32_t timestamp; + /* Total amount of vector lost */ + uint16_t total_lost; + /* Lost events since the last fifo_info, per sensors */ + uint16_t lost[0]; +} __ec_todo_packed; + +struct ec_response_motion_sense_fifo_data { + uint32_t number_data; + struct ec_response_motion_sensor_data data[0]; +} __ec_todo_packed; + +/* List supported activity recognition */ +enum motionsensor_activity { + MOTIONSENSE_ACTIVITY_RESERVED = 0, + MOTIONSENSE_ACTIVITY_SIG_MOTION = 1, + MOTIONSENSE_ACTIVITY_DOUBLE_TAP = 2, + MOTIONSENSE_ACTIVITY_ORIENTATION = 3, +}; + +struct ec_motion_sense_activity { + uint8_t sensor_num; + uint8_t activity; /* one of enum motionsensor_activity */ + uint8_t enable; /* 1: enable, 0: disable */ + uint8_t reserved; + uint16_t parameters[3]; /* activity dependent parameters */ +} __ec_todo_unpacked; + /* Module flag masks used for the dump sub-command. */ -#define MOTIONSENSE_MODULE_FLAG_ACTIVE (1<<0) +#define MOTIONSENSE_MODULE_FLAG_ACTIVE BIT(0) /* Sensor flag masks used for the dump sub-command. */ -#define MOTIONSENSE_SENSOR_FLAG_PRESENT (1<<0) +#define MOTIONSENSE_SENSOR_FLAG_PRESENT BIT(0) + +/* + * Flush entry for synchronization. + * data contains time stamp + */ +#define MOTIONSENSE_SENSOR_FLAG_FLUSH BIT(0) +#define MOTIONSENSE_SENSOR_FLAG_TIMESTAMP BIT(1) +#define MOTIONSENSE_SENSOR_FLAG_WAKEUP BIT(2) +#define MOTIONSENSE_SENSOR_FLAG_TABLET_MODE BIT(3) +#define MOTIONSENSE_SENSOR_FLAG_ODR BIT(4) /* * Send this value for the data element to only perform a read. If you @@ -1614,48 +2468,79 @@ enum motionsensor_chip { #define EC_MOTION_SENSE_INVALID_CALIB_TEMP 0x8000 +/* MOTIONSENSE_CMD_SENSOR_OFFSET subcommand flag */ /* Set Calibration information */ -#define MOTION_SENSE_SET_OFFSET 1 +#define MOTION_SENSE_SET_OFFSET BIT(0) -struct ec_response_motion_sensor_data { - /* Flags for each sensor. */ - uint8_t flags; - /* Sensor number the data comes from */ - uint8_t sensor_num; - /* Each sensor is up to 3-axis. */ - union { - int16_t data[3]; - struct { - uint16_t rsvd; - uint32_t timestamp; - } __packed; - struct { - uint8_t activity; /* motionsensor_activity */ - uint8_t state; - int16_t add_info[2]; - }; - }; -} __packed; +/* Default Scale value, factor 1. */ +#define MOTION_SENSE_DEFAULT_SCALE BIT(15) + +#define LID_ANGLE_UNRELIABLE 500 + +enum motionsense_spoof_mode { + /* Disable spoof mode. */ + MOTIONSENSE_SPOOF_MODE_DISABLE = 0, + + /* Enable spoof mode, but use provided component values. */ + MOTIONSENSE_SPOOF_MODE_CUSTOM, + + /* Enable spoof mode, but use the current sensor values. */ + MOTIONSENSE_SPOOF_MODE_LOCK_CURRENT, + + /* Query the current spoof mode status for the sensor. */ + MOTIONSENSE_SPOOF_MODE_QUERY, +}; struct ec_params_motion_sense { uint8_t cmd; union { /* Used for MOTIONSENSE_CMD_DUMP. */ - struct { - /* no args */ + struct __ec_todo_unpacked { + /* + * Maximal number of sensor the host is expecting. + * 0 means the host is only interested in the number + * of sensors controlled by the EC. + */ + uint8_t max_sensor_count; } dump; /* - * Used for MOTIONSENSE_CMD_EC_RATE and - * MOTIONSENSE_CMD_KB_WAKE_ANGLE. + * Used for MOTIONSENSE_CMD_KB_WAKE_ANGLE. */ - struct { - /* Data to set or EC_MOTION_SENSE_NO_VALUE to read. */ + struct __ec_todo_unpacked { + /* Data to set or EC_MOTION_SENSE_NO_VALUE to read. + * kb_wake_angle: angle to wakup AP. + */ int16_t data; - } ec_rate, kb_wake_angle; + } kb_wake_angle; + + /* + * Used for MOTIONSENSE_CMD_INFO, MOTIONSENSE_CMD_DATA + * and MOTIONSENSE_CMD_PERFORM_CALIB. + */ + struct __ec_todo_unpacked { + uint8_t sensor_num; + } info, info_3, data, fifo_flush, perform_calib, + list_activities; + + /* + * Used for MOTIONSENSE_CMD_EC_RATE, MOTIONSENSE_CMD_SENSOR_ODR + * and MOTIONSENSE_CMD_SENSOR_RANGE. + */ + struct __ec_todo_unpacked { + uint8_t sensor_num; + + /* Rounding flag, true for round-up, false for down. */ + uint8_t roundup; + + uint16_t reserved; + + /* Data to set or EC_MOTION_SENSE_NO_VALUE to read. */ + int32_t data; + } ec_rate, sensor_odr, sensor_range; /* Used for MOTIONSENSE_CMD_SENSOR_OFFSET */ - struct { + struct __ec_todo_packed { uint8_t sensor_num; /* @@ -1681,36 +2566,102 @@ struct ec_params_motion_sense { * Compass: 1/16 uT */ int16_t offset[3]; - } __packed sensor_offset; + } sensor_offset; - /* Used for MOTIONSENSE_CMD_INFO. */ - struct { + /* Used for MOTIONSENSE_CMD_SENSOR_SCALE */ + struct __ec_todo_packed { uint8_t sensor_num; - } info; - /* - * Used for MOTIONSENSE_CMD_SENSOR_ODR and - * MOTIONSENSE_CMD_SENSOR_RANGE. - */ - struct { - /* Should be element of enum motionsensor_id. */ - uint8_t sensor_num; + /* + * bit 0: If set (MOTION_SENSE_SET_OFFSET), set + * the calibration information in the EC. + * If unset, just retrieve calibration information. + */ + uint16_t flags; - /* Rounding flag, true for round-up, false for down. */ - uint8_t roundup; + /* + * Temperature at calibration, in units of 0.01 C + * 0x8000: invalid / unknown. + * 0x0: 0C + * 0x7fff: +327.67C + */ + int16_t temp; - uint16_t reserved; + /* + * Scale for calibration: + * By default scale is 1, it is encoded on 16bits: + * 1 = BIT(15) + * ~2 = 0xFFFF + * ~0 = 0. + */ + uint16_t scale[3]; + } sensor_scale; - /* Data to set or EC_MOTION_SENSE_NO_VALUE to read. */ - int32_t data; - } sensor_odr, sensor_range; + + /* Used for MOTIONSENSE_CMD_FIFO_INFO */ + /* (no params) */ + + /* Used for MOTIONSENSE_CMD_FIFO_READ */ + struct __ec_todo_unpacked { + /* + * Number of expected vector to return. + * EC may return less or 0 if none available. + */ + uint32_t max_data_vector; + } fifo_read; + + struct ec_motion_sense_activity set_activity; + + /* Used for MOTIONSENSE_CMD_LID_ANGLE */ + /* (no params) */ + + /* Used for MOTIONSENSE_CMD_FIFO_INT_ENABLE */ + struct __ec_todo_unpacked { + /* + * 1: enable, 0 disable fifo, + * EC_MOTION_SENSE_NO_VALUE return value. + */ + int8_t enable; + } fifo_int_enable; + + /* Used for MOTIONSENSE_CMD_SPOOF */ + struct __ec_todo_packed { + uint8_t sensor_id; + + /* See enum motionsense_spoof_mode. */ + uint8_t spoof_enable; + + /* Ignored, used for alignment. */ + uint8_t reserved; + + /* Individual component values to spoof. */ + int16_t components[3]; + } spoof; + + /* Used for MOTIONSENSE_CMD_TABLET_MODE_LID_ANGLE. */ + struct __ec_todo_unpacked { + /* + * Lid angle threshold for switching between tablet and + * clamshell mode. + */ + int16_t lid_angle; + + /* + * Hysteresis degree to prevent fluctuations between + * clamshell and tablet mode if lid angle keeps + * changing around the threshold. Lid motion driver will + * use lid_angle + hys_degree to trigger tablet mode and + * lid_angle - hys_degree to trigger clamshell mode. + */ + int16_t hys_degree; + } tablet_mode_threshold; }; -} __packed; +} __ec_todo_packed; struct ec_response_motion_sense { union { - /* Used for MOTIONSENSE_CMD_DUMP. */ - struct { + /* Used for MOTIONSENSE_CMD_DUMP */ + struct __ec_todo_unpacked { /* Flags representing the motion sensor module. */ uint8_t module_flags; @@ -1725,7 +2676,7 @@ struct ec_response_motion_sense { } dump; /* Used for MOTIONSENSE_CMD_INFO. */ - struct { + struct __ec_todo_unpacked { /* Should be element of enum motionsensor_type. */ uint8_t type; @@ -1736,37 +2687,129 @@ struct ec_response_motion_sense { uint8_t chip; } info; + /* Used for MOTIONSENSE_CMD_INFO version 3 */ + struct __ec_todo_unpacked { + /* Should be element of enum motionsensor_type. */ + uint8_t type; + + /* Should be element of enum motionsensor_location. */ + uint8_t location; + + /* Should be element of enum motionsensor_chip. */ + uint8_t chip; + + /* Minimum sensor sampling frequency */ + uint32_t min_frequency; + + /* Maximum sensor sampling frequency */ + uint32_t max_frequency; + + /* Max number of sensor events that could be in fifo */ + uint32_t fifo_max_event_count; + } info_3; + /* Used for MOTIONSENSE_CMD_DATA */ struct ec_response_motion_sensor_data data; /* * Used for MOTIONSENSE_CMD_EC_RATE, MOTIONSENSE_CMD_SENSOR_ODR, - * MOTIONSENSE_CMD_SENSOR_RANGE, and - * MOTIONSENSE_CMD_KB_WAKE_ANGLE. + * MOTIONSENSE_CMD_SENSOR_RANGE, + * MOTIONSENSE_CMD_KB_WAKE_ANGLE, + * MOTIONSENSE_CMD_FIFO_INT_ENABLE and + * MOTIONSENSE_CMD_SPOOF. */ - struct { + struct __ec_todo_unpacked { /* Current value of the parameter queried. */ int32_t ret; - } ec_rate, sensor_odr, sensor_range, kb_wake_angle; + } ec_rate, sensor_odr, sensor_range, kb_wake_angle, + fifo_int_enable, spoof; - /* Used for MOTIONSENSE_CMD_SENSOR_OFFSET */ - struct { + /* + * Used for MOTIONSENSE_CMD_SENSOR_OFFSET, + * PERFORM_CALIB. + */ + struct __ec_todo_unpacked { int16_t temp; int16_t offset[3]; } sensor_offset, perform_calib; + + /* Used for MOTIONSENSE_CMD_SENSOR_SCALE */ + struct __ec_todo_unpacked { + int16_t temp; + uint16_t scale[3]; + } sensor_scale; + + struct ec_response_motion_sense_fifo_info fifo_info, fifo_flush; + + struct ec_response_motion_sense_fifo_data fifo_read; + + struct __ec_todo_packed { + uint16_t reserved; + uint32_t enabled; + uint32_t disabled; + } list_activities; + + /* No params for set activity */ + + /* Used for MOTIONSENSE_CMD_LID_ANGLE */ + struct __ec_todo_unpacked { + /* + * Angle between 0 and 360 degree if available, + * LID_ANGLE_UNRELIABLE otherwise. + */ + uint16_t value; + } lid_angle; + + /* Used for MOTIONSENSE_CMD_TABLET_MODE_LID_ANGLE. */ + struct __ec_todo_unpacked { + /* + * Lid angle threshold for switching between tablet and + * clamshell mode. + */ + uint16_t lid_angle; + + /* Hysteresis degree. */ + uint16_t hys_degree; + } tablet_mode_threshold; + }; -} __packed; +} __ec_todo_packed; + +/*****************************************************************************/ +/* Force lid open command */ + +/* Make lid event always open */ +#define EC_CMD_FORCE_LID_OPEN 0x002C + +struct ec_params_force_lid_open { + uint8_t enabled; +} __ec_align1; + +/*****************************************************************************/ +/* Configure the behavior of the power button */ +#define EC_CMD_CONFIG_POWER_BUTTON 0x002D + +enum ec_config_power_button_flags { + /* Enable/Disable power button pulses for x86 devices */ + EC_POWER_BUTTON_ENABLE_PULSE = BIT(0), +}; + +struct ec_params_config_power_button { + /* See enum ec_config_power_button_flags */ + uint8_t flags; +} __ec_align1; /*****************************************************************************/ /* USB charging control commands */ /* Set USB port charging mode */ -#define EC_CMD_USB_CHARGE_SET_MODE 0x30 +#define EC_CMD_USB_CHARGE_SET_MODE 0x0030 struct ec_params_usb_charge_set_mode { uint8_t usb_port_id; - uint8_t mode; -} __packed; + uint8_t mode:7; + uint8_t inhibit_charge:1; +} __ec_align1; /*****************************************************************************/ /* Persistent storage for host */ @@ -1775,35 +2818,35 @@ struct ec_params_usb_charge_set_mode { #define EC_PSTORE_SIZE_MAX 64 /* Get persistent storage info */ -#define EC_CMD_PSTORE_INFO 0x40 +#define EC_CMD_PSTORE_INFO 0x0040 struct ec_response_pstore_info { /* Persistent storage size, in bytes */ uint32_t pstore_size; /* Access size; read/write offset and size must be a multiple of this */ uint32_t access_size; -} __packed; +} __ec_align4; /* * Read persistent storage * * Response is params.size bytes of data. */ -#define EC_CMD_PSTORE_READ 0x41 +#define EC_CMD_PSTORE_READ 0x0041 struct ec_params_pstore_read { uint32_t offset; /* Byte offset to read */ uint32_t size; /* Size to read in bytes */ -} __packed; +} __ec_align4; /* Write persistent storage */ -#define EC_CMD_PSTORE_WRITE 0x42 +#define EC_CMD_PSTORE_WRITE 0x0042 struct ec_params_pstore_write { uint32_t offset; /* Byte offset to write */ uint32_t size; /* Size to write in bytes */ uint8_t data[EC_PSTORE_SIZE_MAX]; -} __packed; +} __ec_align4; /*****************************************************************************/ /* Real-time clock */ @@ -1811,21 +2854,21 @@ struct ec_params_pstore_write { /* RTC params and response structures */ struct ec_params_rtc { uint32_t time; -} __packed; +} __ec_align4; struct ec_response_rtc { uint32_t time; -} __packed; +} __ec_align4; /* These use ec_response_rtc */ -#define EC_CMD_RTC_GET_VALUE 0x44 -#define EC_CMD_RTC_GET_ALARM 0x45 +#define EC_CMD_RTC_GET_VALUE 0x0044 +#define EC_CMD_RTC_GET_ALARM 0x0045 /* These all use ec_params_rtc */ -#define EC_CMD_RTC_SET_VALUE 0x46 -#define EC_CMD_RTC_SET_ALARM 0x47 +#define EC_CMD_RTC_SET_VALUE 0x0046 +#define EC_CMD_RTC_SET_ALARM 0x0047 -/* Pass as param to SET_ALARM to clear the current alarm */ +/* Pass as time param to SET_ALARM to clear the current alarm */ #define EC_RTC_ALARM_CLEAR 0 /*****************************************************************************/ @@ -1835,8 +2878,8 @@ struct ec_response_rtc { #define EC_PORT80_SIZE_MAX 32 /* Get last port80 code from previous boot */ -#define EC_CMD_PORT80_LAST_BOOT 0x48 -#define EC_CMD_PORT80_READ 0x48 +#define EC_CMD_PORT80_LAST_BOOT 0x0048 +#define EC_CMD_PORT80_READ 0x0048 enum ec_port80_subcmd { EC_PORT80_GET_INFO = 0, @@ -1846,29 +2889,72 @@ enum ec_port80_subcmd { struct ec_params_port80_read { uint16_t subcmd; union { - struct { + struct __ec_todo_unpacked { uint32_t offset; uint32_t num_entries; } read_buffer; }; -} __packed; +} __ec_todo_packed; struct ec_response_port80_read { union { - struct { + struct __ec_todo_unpacked { uint32_t writes; uint32_t history_size; uint32_t last_boot; } get_info; - struct { + struct __ec_todo_unpacked { uint16_t codes[EC_PORT80_SIZE_MAX]; } data; }; -} __packed; +} __ec_todo_packed; struct ec_response_port80_last_boot { uint16_t code; -} __packed; +} __ec_align2; + +/*****************************************************************************/ +/* Temporary secure storage for host verified boot use */ + +/* Number of bytes in a vstore slot */ +#define EC_VSTORE_SLOT_SIZE 64 + +/* Maximum number of vstore slots */ +#define EC_VSTORE_SLOT_MAX 32 + +/* Get persistent storage info */ +#define EC_CMD_VSTORE_INFO 0x0049 +struct ec_response_vstore_info { + /* Indicates which slots are locked */ + uint32_t slot_locked; + /* Total number of slots available */ + uint8_t slot_count; +} __ec_align_size1; + +/* + * Read temporary secure storage + * + * Response is EC_VSTORE_SLOT_SIZE bytes of data. + */ +#define EC_CMD_VSTORE_READ 0x004A + +struct ec_params_vstore_read { + uint8_t slot; /* Slot to read from */ +} __ec_align1; + +struct ec_response_vstore_read { + uint8_t data[EC_VSTORE_SLOT_SIZE]; +} __ec_align1; + +/* + * Write temporary secure storage and lock it. + */ +#define EC_CMD_VSTORE_WRITE 0x004B + +struct ec_params_vstore_write { + uint8_t slot; /* Slot to write to */ + uint8_t data[EC_VSTORE_SLOT_SIZE]; +} __ec_align1; /*****************************************************************************/ /* Thermal engine commands. Note that there are two implementations. We'll @@ -1877,8 +2963,8 @@ struct ec_response_port80_last_boot { * Version 1 separates the CPU thermal limits from the fan control. */ -#define EC_CMD_THERMAL_SET_THRESHOLD 0x50 -#define EC_CMD_THERMAL_GET_THRESHOLD 0x51 +#define EC_CMD_THERMAL_SET_THRESHOLD 0x0050 +#define EC_CMD_THERMAL_GET_THRESHOLD 0x0051 /* The version 0 structs are opaque. You have to know what they are for * the get/set commands to make any sense. @@ -1889,17 +2975,17 @@ struct ec_params_thermal_set_threshold { uint8_t sensor_type; uint8_t threshold_id; uint16_t value; -} __packed; +} __ec_align2; /* Version 0 - get */ struct ec_params_thermal_get_threshold { uint8_t sensor_type; uint8_t threshold_id; -} __packed; +} __ec_align1; struct ec_response_thermal_get_threshold { uint16_t value; -} __packed; +} __ec_align2; /* The version 1 structs are visible. */ @@ -1911,71 +2997,124 @@ enum ec_temp_thresholds { EC_TEMP_THRESH_COUNT }; -/* Thermal configuration for one temperature sensor. Temps are in degrees K. +/* + * Thermal configuration for one temperature sensor. Temps are in degrees K. * Zero values will be silently ignored by the thermal task. + * + * Set 'temp_host' value allows thermal task to trigger some event with 1 degree + * hysteresis. + * For example, + * temp_host[EC_TEMP_THRESH_HIGH] = 300 K + * temp_host_release[EC_TEMP_THRESH_HIGH] = 0 K + * EC will throttle ap when temperature >= 301 K, and release throttling when + * temperature <= 299 K. + * + * Set 'temp_host_release' value allows thermal task has a custom hysteresis. + * For example, + * temp_host[EC_TEMP_THRESH_HIGH] = 300 K + * temp_host_release[EC_TEMP_THRESH_HIGH] = 295 K + * EC will throttle ap when temperature >= 301 K, and release throttling when + * temperature <= 294 K. + * + * Note that this structure is a sub-structure of + * ec_params_thermal_set_threshold_v1, but maintains its alignment there. */ struct ec_thermal_config { uint32_t temp_host[EC_TEMP_THRESH_COUNT]; /* levels of hotness */ + uint32_t temp_host_release[EC_TEMP_THRESH_COUNT]; /* release levels */ uint32_t temp_fan_off; /* no active cooling needed */ uint32_t temp_fan_max; /* max active cooling needed */ -} __packed; +} __ec_align4; /* Version 1 - get config for one sensor. */ struct ec_params_thermal_get_threshold_v1 { uint32_t sensor_num; -} __packed; +} __ec_align4; /* This returns a struct ec_thermal_config */ -/* Version 1 - set config for one sensor. - * Use read-modify-write for best results! */ +/* + * Version 1 - set config for one sensor. + * Use read-modify-write for best results! + */ struct ec_params_thermal_set_threshold_v1 { uint32_t sensor_num; struct ec_thermal_config cfg; -} __packed; +} __ec_align4; /* This returns no data */ /****************************************************************************/ /* Toggle automatic fan control */ -#define EC_CMD_THERMAL_AUTO_FAN_CTRL 0x52 +#define EC_CMD_THERMAL_AUTO_FAN_CTRL 0x0052 + +/* Version 1 of input params */ +struct ec_params_auto_fan_ctrl_v1 { + uint8_t fan_idx; +} __ec_align1; -/* Get TMP006 calibration data */ -#define EC_CMD_TMP006_GET_CALIBRATION 0x53 +/* Get/Set TMP006 calibration data */ +#define EC_CMD_TMP006_GET_CALIBRATION 0x0053 +#define EC_CMD_TMP006_SET_CALIBRATION 0x0054 + +/* + * The original TMP006 calibration only needed four params, but now we need + * more. Since the algorithm is nothing but magic numbers anyway, we'll leave + * the params opaque. The v1 "get" response will include the algorithm number + * and how many params it requires. That way we can change the EC code without + * needing to update this file. We can also use a different algorithm on each + * sensor. + */ +/* This is the same struct for both v0 and v1. */ struct ec_params_tmp006_get_calibration { uint8_t index; -} __packed; +} __ec_align1; -struct ec_response_tmp006_get_calibration { +/* Version 0 */ +struct ec_response_tmp006_get_calibration_v0 { float s0; float b0; float b1; float b2; -} __packed; - -/* Set TMP006 calibration data */ -#define EC_CMD_TMP006_SET_CALIBRATION 0x54 +} __ec_align4; -struct ec_params_tmp006_set_calibration { +struct ec_params_tmp006_set_calibration_v0 { uint8_t index; - uint8_t reserved[3]; /* Reserved; set 0 */ + uint8_t reserved[3]; float s0; float b0; float b1; float b2; -} __packed; +} __ec_align4; + +/* Version 1 */ +struct ec_response_tmp006_get_calibration_v1 { + uint8_t algorithm; + uint8_t num_params; + uint8_t reserved[2]; + float val[0]; +} __ec_align4; + +struct ec_params_tmp006_set_calibration_v1 { + uint8_t index; + uint8_t algorithm; + uint8_t num_params; + uint8_t reserved; + float val[0]; +} __ec_align4; + /* Read raw TMP006 data */ -#define EC_CMD_TMP006_GET_RAW 0x55 +#define EC_CMD_TMP006_GET_RAW 0x0055 struct ec_params_tmp006_get_raw { uint8_t index; -} __packed; +} __ec_align1; struct ec_response_tmp006_get_raw { int32_t t; /* In 1/100 K */ int32_t v; /* In nV */ -}; +} __ec_align4; /*****************************************************************************/ /* MKBP - Matrix KeyBoard Protocol */ @@ -1990,24 +3129,24 @@ struct ec_response_tmp006_get_raw { * to obtain the instantaneous state, use EC_CMD_MKBP_INFO with the type * EC_MKBP_INFO_CURRENT and event EC_MKBP_EVENT_KEY_MATRIX. */ -#define EC_CMD_MKBP_STATE 0x60 +#define EC_CMD_MKBP_STATE 0x0060 /* * Provide information about various MKBP things. See enum ec_mkbp_info_type. */ -#define EC_CMD_MKBP_INFO 0x61 +#define EC_CMD_MKBP_INFO 0x0061 struct ec_response_mkbp_info { uint32_t rows; uint32_t cols; /* Formerly "switches", which was 0. */ uint8_t reserved; -} __packed; +} __ec_align_size1; struct ec_params_mkbp_info { uint8_t info_type; uint8_t event_type; -} __packed; +} __ec_align1; enum ec_mkbp_info_type { /* @@ -2049,17 +3188,28 @@ enum ec_mkbp_info_type { }; /* Simulate key press */ -#define EC_CMD_MKBP_SIMULATE_KEY 0x62 +#define EC_CMD_MKBP_SIMULATE_KEY 0x0062 struct ec_params_mkbp_simulate_key { uint8_t col; uint8_t row; uint8_t pressed; -} __packed; +} __ec_align1; + +#define EC_CMD_GET_KEYBOARD_ID 0x0063 + +struct ec_response_keyboard_id { + uint32_t keyboard_id; +} __ec_align4; + +enum keyboard_id { + KEYBOARD_ID_UNSUPPORTED = 0, + KEYBOARD_ID_UNREADABLE = 0xffffffff, +}; /* Configure keyboard scanning */ -#define EC_CMD_MKBP_SET_CONFIG 0x64 -#define EC_CMD_MKBP_GET_CONFIG 0x65 +#define EC_CMD_MKBP_SET_CONFIG 0x0064 +#define EC_CMD_MKBP_GET_CONFIG 0x0065 /* flags */ enum mkbp_config_flags { @@ -2067,16 +3217,21 @@ enum mkbp_config_flags { }; enum mkbp_config_valid { - EC_MKBP_VALID_SCAN_PERIOD = 1 << 0, - EC_MKBP_VALID_POLL_TIMEOUT = 1 << 1, - EC_MKBP_VALID_MIN_POST_SCAN_DELAY = 1 << 3, - EC_MKBP_VALID_OUTPUT_SETTLE = 1 << 4, - EC_MKBP_VALID_DEBOUNCE_DOWN = 1 << 5, - EC_MKBP_VALID_DEBOUNCE_UP = 1 << 6, - EC_MKBP_VALID_FIFO_MAX_DEPTH = 1 << 7, + EC_MKBP_VALID_SCAN_PERIOD = BIT(0), + EC_MKBP_VALID_POLL_TIMEOUT = BIT(1), + EC_MKBP_VALID_MIN_POST_SCAN_DELAY = BIT(3), + EC_MKBP_VALID_OUTPUT_SETTLE = BIT(4), + EC_MKBP_VALID_DEBOUNCE_DOWN = BIT(5), + EC_MKBP_VALID_DEBOUNCE_UP = BIT(6), + EC_MKBP_VALID_FIFO_MAX_DEPTH = BIT(7), }; -/* Configuration for our key scanning algorithm */ +/* + * Configuration for our key scanning algorithm. + * + * Note that this is used as a sub-structure of + * ec_{params/response}_mkbp_get_config. + */ struct ec_mkbp_config { uint32_t valid_mask; /* valid fields */ uint8_t flags; /* some flags (enum mkbp_config_flags) */ @@ -2096,18 +3251,18 @@ struct ec_mkbp_config { uint16_t debounce_up_us; /* time for debounce on key up */ /* maximum depth to allow for fifo (0 = no keyscan output) */ uint8_t fifo_max_depth; -} __packed; +} __ec_align_size1; struct ec_params_mkbp_set_config { struct ec_mkbp_config config; -} __packed; +} __ec_align_size1; struct ec_response_mkbp_get_config { struct ec_mkbp_config config; -} __packed; +} __ec_align_size1; /* Run the key scan emulation */ -#define EC_CMD_KEYSCAN_SEQ_CTRL 0x66 +#define EC_CMD_KEYSCAN_SEQ_CTRL 0x0066 enum ec_keyscan_seq_cmd { EC_KEYSCAN_SEQ_STATUS = 0, /* Get status information */ @@ -2122,23 +3277,23 @@ enum ec_collect_flags { * Indicates this scan was processed by the EC. Due to timing, some * scans may be skipped. */ - EC_KEYSCAN_SEQ_FLAG_DONE = 1 << 0, + EC_KEYSCAN_SEQ_FLAG_DONE = BIT(0), }; struct ec_collect_item { uint8_t flags; /* some flags (enum ec_collect_flags) */ -}; +} __ec_align1; struct ec_params_keyscan_seq_ctrl { uint8_t cmd; /* Command to send (enum ec_keyscan_seq_cmd) */ union { - struct { + struct __ec_align1 { uint8_t active; /* still active */ uint8_t num_items; /* number of items */ /* Current item being presented */ uint8_t cur_item; } status; - struct { + struct __ec_todo_unpacked { /* * Absolute time for this scan, measured from the * start of the sequence. @@ -2146,29 +3301,40 @@ struct ec_params_keyscan_seq_ctrl { uint32_t time_us; uint8_t scan[0]; /* keyscan data */ } add; - struct { + struct __ec_align1 { uint8_t start_item; /* First item to return */ uint8_t num_items; /* Number of items to return */ } collect; }; -} __packed; +} __ec_todo_packed; struct ec_result_keyscan_seq_ctrl { union { - struct { + struct __ec_todo_unpacked { uint8_t num_items; /* Number of items */ /* Data for each item */ struct ec_collect_item item[0]; } collect; }; -} __packed; +} __ec_todo_packed; /* - * Command for retrieving the next pending MKBP event from the EC device + * Get the next pending MKBP event. * - * The device replies with UNAVAILABLE if there aren't any pending events. + * Returns EC_RES_UNAVAILABLE if there is no event pending. */ -#define EC_CMD_GET_NEXT_EVENT 0x67 +#define EC_CMD_GET_NEXT_EVENT 0x0067 + +#define EC_MKBP_HAS_MORE_EVENTS_SHIFT 7 + +/* + * We use the most significant bit of the event type to indicate to the host + * that the EC has more MKBP events available to provide. + */ +#define EC_MKBP_HAS_MORE_EVENTS BIT(EC_MKBP_HAS_MORE_EVENTS_SHIFT) + +/* The mask to apply to get the raw event type */ +#define EC_MKBP_EVENT_TYPE_MASK (BIT(EC_MKBP_HAS_MORE_EVENTS_SHIFT) - 1) enum ec_mkbp_event { /* Keyboard matrix changed. The event data is the new matrix state. */ @@ -2186,9 +3352,21 @@ enum ec_mkbp_event { /* The state of the switches have changed. */ EC_MKBP_EVENT_SWITCH = 4, - /* EC sent a sysrq command */ + /* New Fingerprint sensor event, the event data is fp_events bitmap. */ + EC_MKBP_EVENT_FINGERPRINT = 5, + + /* + * Sysrq event: send emulated sysrq. The event data is sysrq, + * corresponding to the key to be pressed. + */ EC_MKBP_EVENT_SYSRQ = 6, + /* + * New 64-bit host event. + * The event data is 8 bytes of host event flags. + */ + EC_MKBP_EVENT_HOST_EVENT64 = 7, + /* Notify the AP that something happened on CEC */ EC_MKBP_EVENT_CEC_EVENT = 8, @@ -2198,65 +3376,140 @@ enum ec_mkbp_event { /* Number of MKBP events */ EC_MKBP_EVENT_COUNT, }; +BUILD_ASSERT(EC_MKBP_EVENT_COUNT <= EC_MKBP_EVENT_TYPE_MASK); -union ec_response_get_next_data { - uint8_t key_matrix[13]; +union __ec_align_offset1 ec_response_get_next_data { + uint8_t key_matrix[13]; /* Unaligned */ - uint32_t host_event; + uint32_t host_event; + uint64_t host_event64; + + struct __ec_todo_unpacked { + /* For aligning the fifo_info */ + uint8_t reserved[3]; + struct ec_response_motion_sense_fifo_info info; + } sensor_fifo; + + uint32_t buttons; + + uint32_t switches; + + uint32_t fp_events; - uint32_t buttons; - uint32_t switches; - uint32_t sysrq; -} __packed; + uint32_t sysrq; -union ec_response_get_next_data_v1 { + /* CEC events from enum mkbp_cec_event */ + uint32_t cec_events; +}; + +union __ec_align_offset1 ec_response_get_next_data_v1 { uint8_t key_matrix[16]; + + /* Unaligned */ uint32_t host_event; + uint64_t host_event64; + + struct __ec_todo_unpacked { + /* For aligning the fifo_info */ + uint8_t reserved[3]; + struct ec_response_motion_sense_fifo_info info; + } sensor_fifo; + uint32_t buttons; + uint32_t switches; + + uint32_t fp_events; + uint32_t sysrq; + + /* CEC events from enum mkbp_cec_event */ uint32_t cec_events; + uint8_t cec_message[16]; -} __packed; +}; +BUILD_ASSERT(sizeof(union ec_response_get_next_data_v1) == 16); struct ec_response_get_next_event { uint8_t event_type; /* Followed by event data if any */ union ec_response_get_next_data data; -} __packed; +} __ec_align1; struct ec_response_get_next_event_v1 { uint8_t event_type; /* Followed by event data if any */ union ec_response_get_next_data_v1 data; -} __packed; +} __ec_align1; /* Bit indices for buttons and switches.*/ /* Buttons */ #define EC_MKBP_POWER_BUTTON 0 #define EC_MKBP_VOL_UP 1 #define EC_MKBP_VOL_DOWN 2 +#define EC_MKBP_RECOVERY 3 /* Switches */ #define EC_MKBP_LID_OPEN 0 #define EC_MKBP_TABLET_MODE 1 #define EC_MKBP_BASE_ATTACHED 2 +/* Run keyboard factory test scanning */ +#define EC_CMD_KEYBOARD_FACTORY_TEST 0x0068 + +struct ec_response_keyboard_factory_test { + uint16_t shorted; /* Keyboard pins are shorted */ +} __ec_align2; + +/* Fingerprint events in 'fp_events' for EC_MKBP_EVENT_FINGERPRINT */ +#define EC_MKBP_FP_RAW_EVENT(fp_events) ((fp_events) & 0x00FFFFFF) +#define EC_MKBP_FP_ERRCODE(fp_events) ((fp_events) & 0x0000000F) +#define EC_MKBP_FP_ENROLL_PROGRESS_OFFSET 4 +#define EC_MKBP_FP_ENROLL_PROGRESS(fpe) (((fpe) & 0x00000FF0) \ + >> EC_MKBP_FP_ENROLL_PROGRESS_OFFSET) +#define EC_MKBP_FP_MATCH_IDX_OFFSET 12 +#define EC_MKBP_FP_MATCH_IDX_MASK 0x0000F000 +#define EC_MKBP_FP_MATCH_IDX(fpe) (((fpe) & EC_MKBP_FP_MATCH_IDX_MASK) \ + >> EC_MKBP_FP_MATCH_IDX_OFFSET) +#define EC_MKBP_FP_ENROLL BIT(27) +#define EC_MKBP_FP_MATCH BIT(28) +#define EC_MKBP_FP_FINGER_DOWN BIT(29) +#define EC_MKBP_FP_FINGER_UP BIT(30) +#define EC_MKBP_FP_IMAGE_READY BIT(31) +/* code given by EC_MKBP_FP_ERRCODE() when EC_MKBP_FP_ENROLL is set */ +#define EC_MKBP_FP_ERR_ENROLL_OK 0 +#define EC_MKBP_FP_ERR_ENROLL_LOW_QUALITY 1 +#define EC_MKBP_FP_ERR_ENROLL_IMMOBILE 2 +#define EC_MKBP_FP_ERR_ENROLL_LOW_COVERAGE 3 +#define EC_MKBP_FP_ERR_ENROLL_INTERNAL 5 +/* Can be used to detect if image was usable for enrollment or not. */ +#define EC_MKBP_FP_ERR_ENROLL_PROBLEM_MASK 1 +/* code given by EC_MKBP_FP_ERRCODE() when EC_MKBP_FP_MATCH is set */ +#define EC_MKBP_FP_ERR_MATCH_NO 0 +#define EC_MKBP_FP_ERR_MATCH_NO_INTERNAL 6 +#define EC_MKBP_FP_ERR_MATCH_NO_TEMPLATES 7 +#define EC_MKBP_FP_ERR_MATCH_NO_LOW_QUALITY 2 +#define EC_MKBP_FP_ERR_MATCH_NO_LOW_COVERAGE 4 +#define EC_MKBP_FP_ERR_MATCH_YES 1 +#define EC_MKBP_FP_ERR_MATCH_YES_UPDATED 3 +#define EC_MKBP_FP_ERR_MATCH_YES_UPDATE_FAILED 5 + + /*****************************************************************************/ /* Temperature sensor commands */ /* Read temperature sensor info */ -#define EC_CMD_TEMP_SENSOR_GET_INFO 0x70 +#define EC_CMD_TEMP_SENSOR_GET_INFO 0x0070 struct ec_params_temp_sensor_get_info { uint8_t id; -} __packed; +} __ec_align1; struct ec_response_temp_sensor_get_info { char sensor_name[32]; uint8_t sensor_type; -} __packed; +} __ec_align1; /*****************************************************************************/ @@ -2269,49 +3522,131 @@ struct ec_response_temp_sensor_get_info { /*****************************************************************************/ /* Host event commands */ + +/* Obsolete. New implementation should use EC_CMD_HOST_EVENT instead */ /* * Host event mask params and response structures, shared by all of the host * event commands below. */ struct ec_params_host_event_mask { uint32_t mask; -} __packed; +} __ec_align4; struct ec_response_host_event_mask { uint32_t mask; -} __packed; +} __ec_align4; /* These all use ec_response_host_event_mask */ -#define EC_CMD_HOST_EVENT_GET_B 0x87 -#define EC_CMD_HOST_EVENT_GET_SMI_MASK 0x88 -#define EC_CMD_HOST_EVENT_GET_SCI_MASK 0x89 -#define EC_CMD_HOST_EVENT_GET_WAKE_MASK 0x8d +#define EC_CMD_HOST_EVENT_GET_B 0x0087 +#define EC_CMD_HOST_EVENT_GET_SMI_MASK 0x0088 +#define EC_CMD_HOST_EVENT_GET_SCI_MASK 0x0089 +#define EC_CMD_HOST_EVENT_GET_WAKE_MASK 0x008D /* These all use ec_params_host_event_mask */ -#define EC_CMD_HOST_EVENT_SET_SMI_MASK 0x8a -#define EC_CMD_HOST_EVENT_SET_SCI_MASK 0x8b -#define EC_CMD_HOST_EVENT_CLEAR 0x8c -#define EC_CMD_HOST_EVENT_SET_WAKE_MASK 0x8e -#define EC_CMD_HOST_EVENT_CLEAR_B 0x8f +#define EC_CMD_HOST_EVENT_SET_SMI_MASK 0x008A +#define EC_CMD_HOST_EVENT_SET_SCI_MASK 0x008B +#define EC_CMD_HOST_EVENT_CLEAR 0x008C +#define EC_CMD_HOST_EVENT_SET_WAKE_MASK 0x008E +#define EC_CMD_HOST_EVENT_CLEAR_B 0x008F + +/* + * Unified host event programming interface - Should be used by newer versions + * of BIOS/OS to program host events and masks + */ + +struct ec_params_host_event { + + /* Action requested by host - one of enum ec_host_event_action. */ + uint8_t action; + + /* + * Mask type that the host requested the action on - one of + * enum ec_host_event_mask_type. + */ + uint8_t mask_type; + + /* Set to 0, ignore on read */ + uint16_t reserved; + + /* Value to be used in case of set operations. */ + uint64_t value; +} __ec_align4; + +/* + * Response structure returned by EC_CMD_HOST_EVENT. + * Update the value on a GET request. Set to 0 on GET/CLEAR + */ + +struct ec_response_host_event { + + /* Mask value in case of get operation */ + uint64_t value; +} __ec_align4; + +enum ec_host_event_action { + /* + * params.value is ignored. Value of mask_type populated + * in response.value + */ + EC_HOST_EVENT_GET, + + /* Bits in params.value are set */ + EC_HOST_EVENT_SET, + + /* Bits in params.value are cleared */ + EC_HOST_EVENT_CLEAR, +}; + +enum ec_host_event_mask_type { + + /* Main host event copy */ + EC_HOST_EVENT_MAIN, + + /* Copy B of host events */ + EC_HOST_EVENT_B, + + /* SCI Mask */ + EC_HOST_EVENT_SCI_MASK, + + /* SMI Mask */ + EC_HOST_EVENT_SMI_MASK, + + /* Mask of events that should be always reported in hostevents */ + EC_HOST_EVENT_ALWAYS_REPORT_MASK, + + /* Active wake mask */ + EC_HOST_EVENT_ACTIVE_WAKE_MASK, + + /* Lazy wake mask for S0ix */ + EC_HOST_EVENT_LAZY_WAKE_MASK_S0IX, + + /* Lazy wake mask for S3 */ + EC_HOST_EVENT_LAZY_WAKE_MASK_S3, + + /* Lazy wake mask for S5 */ + EC_HOST_EVENT_LAZY_WAKE_MASK_S5, +}; + +#define EC_CMD_HOST_EVENT 0x00A4 /*****************************************************************************/ /* Switch commands */ /* Enable/disable LCD backlight */ -#define EC_CMD_SWITCH_ENABLE_BKLIGHT 0x90 +#define EC_CMD_SWITCH_ENABLE_BKLIGHT 0x0090 struct ec_params_switch_enable_backlight { uint8_t enabled; -} __packed; +} __ec_align1; /* Enable/disable WLAN/Bluetooth */ -#define EC_CMD_SWITCH_ENABLE_WIRELESS 0x91 +#define EC_CMD_SWITCH_ENABLE_WIRELESS 0x0091 #define EC_VER_SWITCH_ENABLE_WIRELESS 1 /* Version 0 params; no response */ struct ec_params_switch_enable_wireless_v0 { uint8_t enabled; -} __packed; +} __ec_align1; /* Version 1 params */ struct ec_params_switch_enable_wireless_v1 { @@ -2330,7 +3665,7 @@ struct ec_params_switch_enable_wireless_v1 { /* Which flags to copy from suspend_flags */ uint8_t suspend_mask; -} __packed; +} __ec_align1; /* Version 1 response */ struct ec_response_switch_enable_wireless_v1 { @@ -2339,55 +3674,56 @@ struct ec_response_switch_enable_wireless_v1 { /* Flags to leave enabled in S3 */ uint8_t suspend_flags; -} __packed; +} __ec_align1; /*****************************************************************************/ /* GPIO commands. Only available on EC if write protect has been disabled. */ /* Set GPIO output value */ -#define EC_CMD_GPIO_SET 0x92 +#define EC_CMD_GPIO_SET 0x0092 struct ec_params_gpio_set { char name[32]; uint8_t val; -} __packed; +} __ec_align1; /* Get GPIO value */ -#define EC_CMD_GPIO_GET 0x93 +#define EC_CMD_GPIO_GET 0x0093 /* Version 0 of input params and response */ struct ec_params_gpio_get { char name[32]; -} __packed; +} __ec_align1; + struct ec_response_gpio_get { uint8_t val; -} __packed; +} __ec_align1; /* Version 1 of input params and response */ struct ec_params_gpio_get_v1 { uint8_t subcmd; union { - struct { + struct __ec_align1 { char name[32]; } get_value_by_name; - struct { + struct __ec_align1 { uint8_t index; } get_info; }; -} __packed; +} __ec_align1; struct ec_response_gpio_get_v1 { union { - struct { + struct __ec_align1 { uint8_t val; } get_value_by_name, get_count; - struct { + struct __ec_todo_unpacked { uint8_t val; char name[32]; uint32_t flags; } get_info; }; -} __packed; +} __ec_todo_packed; enum gpio_get_subcmd { EC_GPIO_GET_BY_NAME = 0, @@ -2399,25 +3735,28 @@ enum gpio_get_subcmd { /* I2C commands. Only available when flash write protect is unlocked. */ /* - * TODO(crosbug.com/p/23570): These commands are deprecated, and will be - * removed soon. Use EC_CMD_I2C_XFER instead. + * CAUTION: These commands are deprecated, and are not supported anymore in EC + * builds >= 8398.0.0 (see crosbug.com/p/23570). + * + * Use EC_CMD_I2C_PASSTHRU instead. */ /* Read I2C bus */ -#define EC_CMD_I2C_READ 0x94 +#define EC_CMD_I2C_READ 0x0094 struct ec_params_i2c_read { uint16_t addr; /* 8-bit address (7-bit shifted << 1) */ uint8_t read_size; /* Either 8 or 16. */ uint8_t port; uint8_t offset; -} __packed; +} __ec_align_size1; + struct ec_response_i2c_read { uint16_t data; -} __packed; +} __ec_align2; /* Write I2C bus */ -#define EC_CMD_I2C_WRITE 0x95 +#define EC_CMD_I2C_WRITE 0x0095 struct ec_params_i2c_write { uint16_t data; @@ -2425,7 +3764,7 @@ struct ec_params_i2c_write { uint8_t write_size; /* Either 8 or 16. */ uint8_t port; uint8_t offset; -} __packed; +} __ec_align_size1; /*****************************************************************************/ /* Charge state commands. Only available when flash write protect unlocked. */ @@ -2433,7 +3772,7 @@ struct ec_params_i2c_write { /* Force charge state machine to stop charging the battery or force it to * discharge the battery. */ -#define EC_CMD_CHARGE_CONTROL 0x96 +#define EC_CMD_CHARGE_CONTROL 0x0096 #define EC_VER_CHARGE_CONTROL 1 enum ec_charge_control_mode { @@ -2444,13 +3783,12 @@ enum ec_charge_control_mode { struct ec_params_charge_control { uint32_t mode; /* enum charge_control_mode */ -} __packed; +} __ec_align4; /*****************************************************************************/ -/* Console commands. Only available when flash write protect is unlocked. */ /* Snapshot console output buffer for use by EC_CMD_CONSOLE_READ. */ -#define EC_CMD_CONSOLE_SNAPSHOT 0x97 +#define EC_CMD_CONSOLE_SNAPSHOT 0x0097 /* * Read data from the saved snapshot. If the subcmd parameter is @@ -2464,7 +3802,7 @@ struct ec_params_charge_control { * Response is null-terminated string. Empty string, if there is no more * remaining output. */ -#define EC_CMD_CONSOLE_READ 0x98 +#define EC_CMD_CONSOLE_READ 0x0098 enum ec_console_read_subcmd { CONSOLE_READ_NEXT = 0, @@ -2473,7 +3811,7 @@ enum ec_console_read_subcmd { struct ec_params_console_read_v1 { uint8_t subcmd; /* enum ec_console_read_subcmd */ -} __packed; +} __ec_align1; /*****************************************************************************/ @@ -2484,14 +3822,13 @@ struct ec_params_console_read_v1 { * EC_RES_SUCCESS if the command was successful. * EC_RES_ERROR if the cut off command failed. */ +#define EC_CMD_BATTERY_CUT_OFF 0x0099 -#define EC_CMD_BATTERY_CUT_OFF 0x99 - -#define EC_BATTERY_CUTOFF_FLAG_AT_SHUTDOWN (1 << 0) +#define EC_BATTERY_CUTOFF_FLAG_AT_SHUTDOWN BIT(0) struct ec_params_battery_cutoff { uint8_t flags; -} __packed; +} __ec_align1; /*****************************************************************************/ /* USB port mux control. */ @@ -2499,11 +3836,11 @@ struct ec_params_battery_cutoff { /* * Switch USB mux or return to automatic switching. */ -#define EC_CMD_USB_MUX 0x9a +#define EC_CMD_USB_MUX 0x009A struct ec_params_usb_mux { uint8_t mux; -} __packed; +} __ec_align1; /*****************************************************************************/ /* LDOs / FETs control. */ @@ -2516,25 +3853,25 @@ enum ec_ldo_state { /* * Switch on/off a LDO. */ -#define EC_CMD_LDO_SET 0x9b +#define EC_CMD_LDO_SET 0x009B struct ec_params_ldo_set { uint8_t index; uint8_t state; -} __packed; +} __ec_align1; /* * Get LDO state. */ -#define EC_CMD_LDO_GET 0x9c +#define EC_CMD_LDO_GET 0x009C struct ec_params_ldo_get { uint8_t index; -} __packed; +} __ec_align1; struct ec_response_ldo_get { uint8_t state; -} __packed; +} __ec_align1; /*****************************************************************************/ /* Power info. */ @@ -2542,7 +3879,7 @@ struct ec_response_ldo_get { /* * Get power info. */ -#define EC_CMD_POWER_INFO 0x9d +#define EC_CMD_POWER_INFO 0x009D struct ec_response_power_info { uint32_t usb_dev_type; @@ -2550,21 +3887,21 @@ struct ec_response_power_info { uint16_t voltage_system; uint16_t current_system; uint16_t usb_current_limit; -} __packed; +} __ec_align4; /*****************************************************************************/ /* I2C passthru command */ -#define EC_CMD_I2C_PASSTHRU 0x9e +#define EC_CMD_I2C_PASSTHRU 0x009E /* Read data; if not present, message is a write */ -#define EC_I2C_FLAG_READ (1 << 15) +#define EC_I2C_FLAG_READ BIT(15) /* Mask for address */ #define EC_I2C_ADDR_MASK 0x3ff -#define EC_I2C_STATUS_NAK (1 << 0) /* Transfer was not acknowledged */ -#define EC_I2C_STATUS_TIMEOUT (1 << 1) /* Timeout during transfer */ +#define EC_I2C_STATUS_NAK BIT(0) /* Transfer was not acknowledged */ +#define EC_I2C_STATUS_TIMEOUT BIT(1) /* Timeout during transfer */ /* Any error */ #define EC_I2C_STATUS_ERROR (EC_I2C_STATUS_NAK | EC_I2C_STATUS_TIMEOUT) @@ -2572,49 +3909,49 @@ struct ec_response_power_info { struct ec_params_i2c_passthru_msg { uint16_t addr_flags; /* I2C slave address (7 or 10 bits) and flags */ uint16_t len; /* Number of bytes to read or write */ -} __packed; +} __ec_align2; struct ec_params_i2c_passthru { uint8_t port; /* I2C port number */ uint8_t num_msgs; /* Number of messages */ struct ec_params_i2c_passthru_msg msg[]; /* Data to write for all messages is concatenated here */ -} __packed; +} __ec_align2; struct ec_response_i2c_passthru { uint8_t i2c_status; /* Status flags (EC_I2C_STATUS_...) */ uint8_t num_msgs; /* Number of messages processed */ uint8_t data[]; /* Data read by messages concatenated here */ -} __packed; +} __ec_align1; /*****************************************************************************/ /* Power button hang detect */ -#define EC_CMD_HANG_DETECT 0x9f +#define EC_CMD_HANG_DETECT 0x009F /* Reasons to start hang detection timer */ /* Power button pressed */ -#define EC_HANG_START_ON_POWER_PRESS (1 << 0) +#define EC_HANG_START_ON_POWER_PRESS BIT(0) /* Lid closed */ -#define EC_HANG_START_ON_LID_CLOSE (1 << 1) +#define EC_HANG_START_ON_LID_CLOSE BIT(1) /* Lid opened */ -#define EC_HANG_START_ON_LID_OPEN (1 << 2) +#define EC_HANG_START_ON_LID_OPEN BIT(2) /* Start of AP S3->S0 transition (booting or resuming from suspend) */ -#define EC_HANG_START_ON_RESUME (1 << 3) +#define EC_HANG_START_ON_RESUME BIT(3) /* Reasons to cancel hang detection */ /* Power button released */ -#define EC_HANG_STOP_ON_POWER_RELEASE (1 << 8) +#define EC_HANG_STOP_ON_POWER_RELEASE BIT(8) /* Any host command from AP received */ -#define EC_HANG_STOP_ON_HOST_COMMAND (1 << 9) +#define EC_HANG_STOP_ON_HOST_COMMAND BIT(9) /* Stop on end of AP S0->S3 transition (suspending or shutting down) */ -#define EC_HANG_STOP_ON_SUSPEND (1 << 10) +#define EC_HANG_STOP_ON_SUSPEND BIT(10) /* * If this flag is set, all the other fields are ignored, and the hang detect @@ -2622,14 +3959,14 @@ struct ec_response_i2c_passthru { * without reconfiguring any of the other hang detect settings. Note that * you must previously have configured the timeouts. */ -#define EC_HANG_START_NOW (1 << 30) +#define EC_HANG_START_NOW BIT(30) /* * If this flag is set, all the other fields are ignored (including * EC_HANG_START_NOW). This provides the AP a way to stop the hang timer * without reconfiguring any of the other hang detect settings. */ -#define EC_HANG_STOP_NOW (1 << 31) +#define EC_HANG_STOP_NOW BIT(31) struct ec_params_hang_detect { /* Flags; see EC_HANG_* */ @@ -2640,7 +3977,7 @@ struct ec_params_hang_detect { /* Timeout in msec before generating warm reboot, if enabled */ uint16_t warm_reboot_timeout_msec; -} __packed; +} __ec_align4; /*****************************************************************************/ /* Commands for battery charging */ @@ -2649,7 +3986,7 @@ struct ec_params_hang_detect { * This is the single catch-all host command to exchange data regarding the * charge state machine (v2 and up). */ -#define EC_CMD_CHARGE_STATE 0xa0 +#define EC_CMD_CHARGE_STATE 0x00A0 /* Subcommands for this host command */ enum charge_state_command { @@ -2669,6 +4006,11 @@ enum charge_state_params { CS_PARAM_CHG_INPUT_CURRENT, /* charger input current limit */ CS_PARAM_CHG_STATUS, /* charger-specific status */ CS_PARAM_CHG_OPTION, /* charger-specific options */ + CS_PARAM_LIMIT_POWER, /* + * Check if power is limited due to + * low battery and / or a weak external + * charger. READ ONLY. + */ /* How many so far? */ CS_NUM_BASE_PARAMS, @@ -2676,30 +4018,39 @@ enum charge_state_params { CS_PARAM_CUSTOM_PROFILE_MIN = 0x10000, CS_PARAM_CUSTOM_PROFILE_MAX = 0x1ffff, + /* Range for CONFIG_CHARGE_STATE_DEBUG params */ + CS_PARAM_DEBUG_MIN = 0x20000, + CS_PARAM_DEBUG_CTL_MODE = 0x20000, + CS_PARAM_DEBUG_MANUAL_MODE, + CS_PARAM_DEBUG_SEEMS_DEAD, + CS_PARAM_DEBUG_SEEMS_DISCONNECTED, + CS_PARAM_DEBUG_BATT_REMOVED, + CS_PARAM_DEBUG_MANUAL_CURRENT, + CS_PARAM_DEBUG_MANUAL_VOLTAGE, + CS_PARAM_DEBUG_MAX = 0x2ffff, + /* Other custom param ranges go here... */ }; struct ec_params_charge_state { uint8_t cmd; /* enum charge_state_command */ union { - struct { - /* no args */ - } get_state; + /* get_state has no args */ - struct { + struct __ec_todo_unpacked { uint32_t param; /* enum charge_state_param */ } get_param; - struct { + struct __ec_todo_unpacked { uint32_t param; /* param to set */ uint32_t value; /* value to set */ } set_param; }; -} __packed; +} __ec_todo_packed; struct ec_response_charge_state { union { - struct { + struct __ec_align4 { int ac; int chg_voltage; int chg_current; @@ -2707,24 +4058,23 @@ struct ec_response_charge_state { int batt_state_of_charge; } get_state; - struct { + struct __ec_align4 { uint32_t value; } get_param; - struct { - /* no return values */ - } set_param; + + /* set_param returns no args */ }; -} __packed; +} __ec_align4; /* * Set maximum battery charging current. */ -#define EC_CMD_CHARGE_CURRENT_LIMIT 0xa1 +#define EC_CMD_CHARGE_CURRENT_LIMIT 0x00A1 struct ec_params_current_limit { uint32_t limit; /* in mA */ -} __packed; +} __ec_align4; /* * Set maximum external voltage / current. @@ -2735,23 +4085,69 @@ struct ec_params_current_limit { struct ec_params_external_power_limit_v1 { uint16_t current_lim; /* in mA, or EC_POWER_LIMIT_NONE to clear limit */ uint16_t voltage_lim; /* in mV, or EC_POWER_LIMIT_NONE to clear limit */ -} __packed; +} __ec_align2; #define EC_POWER_LIMIT_NONE 0xffff +/* + * Set maximum voltage & current of a dedicated charge port + */ +#define EC_CMD_OVERRIDE_DEDICATED_CHARGER_LIMIT 0x00A3 + +struct ec_params_dedicated_charger_limit { + uint16_t current_lim; /* in mA */ + uint16_t voltage_lim; /* in mV */ +} __ec_align2; + +/*****************************************************************************/ +/* Hibernate/Deep Sleep Commands */ + +/* Set the delay before going into hibernation. */ +#define EC_CMD_HIBERNATION_DELAY 0x00A8 + +struct ec_params_hibernation_delay { + /* + * Seconds to wait in G3 before hibernate. Pass in 0 to read the + * current settings without changing them. + */ + uint32_t seconds; +} __ec_align4; + +struct ec_response_hibernation_delay { + /* + * The current time in seconds in which the system has been in the G3 + * state. This value is reset if the EC transitions out of G3. + */ + uint32_t time_g3; + + /* + * The current time remaining in seconds until the EC should hibernate. + * This value is also reset if the EC transitions out of G3. + */ + uint32_t time_remaining; + + /* + * The current time in seconds that the EC should wait in G3 before + * hibernating. + */ + uint32_t hibernate_delay; +} __ec_align4; + /* Inform the EC when entering a sleep state */ -#define EC_CMD_HOST_SLEEP_EVENT 0xa9 +#define EC_CMD_HOST_SLEEP_EVENT 0x00A9 enum host_sleep_event { HOST_SLEEP_EVENT_S3_SUSPEND = 1, HOST_SLEEP_EVENT_S3_RESUME = 2, HOST_SLEEP_EVENT_S0IX_SUSPEND = 3, - HOST_SLEEP_EVENT_S0IX_RESUME = 4 + HOST_SLEEP_EVENT_S0IX_RESUME = 4, + /* S3 suspend with additional enabled wake sources */ + HOST_SLEEP_EVENT_S3_WAKEABLE_SUSPEND = 5, }; struct ec_params_host_sleep_event { uint8_t sleep_event; -} __packed; +} __ec_align1; /* * Use a default timeout value (CONFIG_SLEEP_TIMEOUT_MS) for detecting sleep @@ -2782,7 +4178,7 @@ struct ec_params_host_sleep_event_v1 { /* No parameters for non-suspend messages. */ }; -} __packed; +} __ec_align2; /* A timeout occurred when this bit is set */ #define EC_HOST_RESUME_SLEEP_TIMEOUT 0x80000000 @@ -2808,42 +4204,72 @@ struct ec_response_host_sleep_event_v1 { /* No response fields for non-resume messages. */ }; -} __packed; +} __ec_align4; + +/*****************************************************************************/ +/* Device events */ +#define EC_CMD_DEVICE_EVENT 0x00AA + +enum ec_device_event { + EC_DEVICE_EVENT_TRACKPAD, + EC_DEVICE_EVENT_DSP, + EC_DEVICE_EVENT_WIFI, +}; + +enum ec_device_event_param { + /* Get and clear pending device events */ + EC_DEVICE_EVENT_PARAM_GET_CURRENT_EVENTS, + /* Get device event mask */ + EC_DEVICE_EVENT_PARAM_GET_ENABLED_EVENTS, + /* Set device event mask */ + EC_DEVICE_EVENT_PARAM_SET_ENABLED_EVENTS, +}; + +#define EC_DEVICE_EVENT_MASK(event_code) BIT(event_code % 32) + +struct ec_params_device_event { + uint32_t event_mask; + uint8_t param; +} __ec_align_size1; + +struct ec_response_device_event { + uint32_t event_mask; +} __ec_align4; /*****************************************************************************/ /* Smart battery pass-through */ /* Get / Set 16-bit smart battery registers */ -#define EC_CMD_SB_READ_WORD 0xb0 -#define EC_CMD_SB_WRITE_WORD 0xb1 +#define EC_CMD_SB_READ_WORD 0x00B0 +#define EC_CMD_SB_WRITE_WORD 0x00B1 /* Get / Set string smart battery parameters * formatted as SMBUS "block". */ -#define EC_CMD_SB_READ_BLOCK 0xb2 -#define EC_CMD_SB_WRITE_BLOCK 0xb3 +#define EC_CMD_SB_READ_BLOCK 0x00B2 +#define EC_CMD_SB_WRITE_BLOCK 0x00B3 struct ec_params_sb_rd { uint8_t reg; -} __packed; +} __ec_align1; struct ec_response_sb_rd_word { uint16_t value; -} __packed; +} __ec_align2; struct ec_params_sb_wr_word { uint8_t reg; uint16_t value; -} __packed; +} __ec_align1; struct ec_response_sb_rd_block { uint8_t data[32]; -} __packed; +} __ec_align1; struct ec_params_sb_wr_block { uint8_t reg; uint16_t data[32]; -} __packed; +} __ec_align1; /*****************************************************************************/ /* Battery vendor parameters @@ -2854,7 +4280,7 @@ struct ec_params_sb_wr_block { * requested value. */ -#define EC_CMD_BATTERY_VENDOR_PARAM 0xb4 +#define EC_CMD_BATTERY_VENDOR_PARAM 0x00B4 enum ec_battery_vendor_param_mode { BATTERY_VENDOR_PARAM_MODE_GET = 0, @@ -2865,16 +4291,187 @@ struct ec_params_battery_vendor_param { uint32_t param; uint32_t value; uint8_t mode; -} __packed; +} __ec_align_size1; struct ec_response_battery_vendor_param { uint32_t value; -} __packed; +} __ec_align4; + +/*****************************************************************************/ +/* + * Smart Battery Firmware Update Commands + */ +#define EC_CMD_SB_FW_UPDATE 0x00B5 + +enum ec_sb_fw_update_subcmd { + EC_SB_FW_UPDATE_PREPARE = 0x0, + EC_SB_FW_UPDATE_INFO = 0x1, /*query sb info */ + EC_SB_FW_UPDATE_BEGIN = 0x2, /*check if protected */ + EC_SB_FW_UPDATE_WRITE = 0x3, /*check if protected */ + EC_SB_FW_UPDATE_END = 0x4, + EC_SB_FW_UPDATE_STATUS = 0x5, + EC_SB_FW_UPDATE_PROTECT = 0x6, + EC_SB_FW_UPDATE_MAX = 0x7, +}; + +#define SB_FW_UPDATE_CMD_WRITE_BLOCK_SIZE 32 +#define SB_FW_UPDATE_CMD_STATUS_SIZE 2 +#define SB_FW_UPDATE_CMD_INFO_SIZE 8 + +struct ec_sb_fw_update_header { + uint16_t subcmd; /* enum ec_sb_fw_update_subcmd */ + uint16_t fw_id; /* firmware id */ +} __ec_align4; + +struct ec_params_sb_fw_update { + struct ec_sb_fw_update_header hdr; + union { + /* EC_SB_FW_UPDATE_PREPARE = 0x0 */ + /* EC_SB_FW_UPDATE_INFO = 0x1 */ + /* EC_SB_FW_UPDATE_BEGIN = 0x2 */ + /* EC_SB_FW_UPDATE_END = 0x4 */ + /* EC_SB_FW_UPDATE_STATUS = 0x5 */ + /* EC_SB_FW_UPDATE_PROTECT = 0x6 */ + /* Those have no args */ + + /* EC_SB_FW_UPDATE_WRITE = 0x3 */ + struct __ec_align4 { + uint8_t data[SB_FW_UPDATE_CMD_WRITE_BLOCK_SIZE]; + } write; + }; +} __ec_align4; + +struct ec_response_sb_fw_update { + union { + /* EC_SB_FW_UPDATE_INFO = 0x1 */ + struct __ec_align1 { + uint8_t data[SB_FW_UPDATE_CMD_INFO_SIZE]; + } info; + + /* EC_SB_FW_UPDATE_STATUS = 0x5 */ + struct __ec_align1 { + uint8_t data[SB_FW_UPDATE_CMD_STATUS_SIZE]; + } status; + }; +} __ec_align1; + +/* + * Entering Verified Boot Mode Command + * Default mode is VBOOT_MODE_NORMAL if EC did not receive this command. + * Valid Modes are: normal, developer, and recovery. + */ +#define EC_CMD_ENTERING_MODE 0x00B6 + +struct ec_params_entering_mode { + int vboot_mode; +} __ec_align4; + +#define VBOOT_MODE_NORMAL 0 +#define VBOOT_MODE_DEVELOPER 1 +#define VBOOT_MODE_RECOVERY 2 /*****************************************************************************/ +/* + * I2C passthru protection command: Protects I2C tunnels against access on + * certain addresses (board-specific). + */ +#define EC_CMD_I2C_PASSTHRU_PROTECT 0x00B7 + +enum ec_i2c_passthru_protect_subcmd { + EC_CMD_I2C_PASSTHRU_PROTECT_STATUS = 0x0, + EC_CMD_I2C_PASSTHRU_PROTECT_ENABLE = 0x1, +}; + +struct ec_params_i2c_passthru_protect { + uint8_t subcmd; + uint8_t port; /* I2C port number */ +} __ec_align1; + +struct ec_response_i2c_passthru_protect { + uint8_t status; /* Status flags (0: unlocked, 1: locked) */ +} __ec_align1; + + +/*****************************************************************************/ +/* + * HDMI CEC commands + * + * These commands are for sending and receiving message via HDMI CEC + */ + +#define MAX_CEC_MSG_LEN 16 + +/* CEC message from the AP to be written on the CEC bus */ +#define EC_CMD_CEC_WRITE_MSG 0x00B8 + +/** + * struct ec_params_cec_write - Message to write to the CEC bus + * @msg: message content to write to the CEC bus + */ +struct ec_params_cec_write { + uint8_t msg[MAX_CEC_MSG_LEN]; +} __ec_align1; + +/* Set various CEC parameters */ +#define EC_CMD_CEC_SET 0x00BA + +/** + * struct ec_params_cec_set - CEC parameters set + * @cmd: parameter type, can be CEC_CMD_ENABLE or CEC_CMD_LOGICAL_ADDRESS + * @val: in case cmd is CEC_CMD_ENABLE, this field can be 0 to disable CEC + * or 1 to enable CEC functionality, in case cmd is + * CEC_CMD_LOGICAL_ADDRESS, this field encodes the requested logical + * address between 0 and 15 or 0xff to unregister + */ +struct ec_params_cec_set { + uint8_t cmd; /* enum cec_command */ + uint8_t val; +} __ec_align1; + +/* Read various CEC parameters */ +#define EC_CMD_CEC_GET 0x00BB + +/** + * struct ec_params_cec_get - CEC parameters get + * @cmd: parameter type, can be CEC_CMD_ENABLE or CEC_CMD_LOGICAL_ADDRESS + */ +struct ec_params_cec_get { + uint8_t cmd; /* enum cec_command */ +} __ec_align1; + +/** + * struct ec_response_cec_get - CEC parameters get response + * @val: in case cmd was CEC_CMD_ENABLE, this field will 0 if CEC is + * disabled or 1 if CEC functionality is enabled, + * in case cmd was CEC_CMD_LOGICAL_ADDRESS, this will encode the + * configured logical address between 0 and 15 or 0xff if unregistered + */ +struct ec_response_cec_get { + uint8_t val; +} __ec_align1; + +/* CEC parameters command */ +enum cec_command { + /* CEC reading, writing and events enable */ + CEC_CMD_ENABLE, + /* CEC logical address */ + CEC_CMD_LOGICAL_ADDRESS, +}; + +/* Events from CEC to AP */ +enum mkbp_cec_event { + /* Outgoing message was acknowledged by a follower */ + EC_MKBP_CEC_SEND_OK = BIT(0), + /* Outgoing message was not acknowledged */ + EC_MKBP_CEC_SEND_FAILED = BIT(1), +}; + +/*****************************************************************************/ + /* Commands for I2S recording on audio codec. */ #define EC_CMD_CODEC_I2S 0x00BC +#define EC_WOV_I2S_SAMPLE_RATE 48000 enum ec_codec_i2s_subcmd { EC_CODEC_SET_SAMPLE_DEPTH = 0x0, @@ -2884,6 +4481,7 @@ enum ec_codec_i2s_subcmd { EC_CODEC_I2S_SET_CONFIG = 0x4, EC_CODEC_I2S_SET_TDM_CONFIG = 0x5, EC_CODEC_I2S_SET_BCLK = 0x6, + EC_CODEC_I2S_SUBCMD_COUNT = 0x7, }; enum ec_sample_depth_value { @@ -2900,10 +4498,23 @@ enum ec_i2s_config { EC_DAI_FMT_PCM_TDM = 5, }; -struct ec_param_codec_i2s { - /* - * enum ec_codec_i2s_subcmd - */ +/* + * For subcommand EC_CODEC_GET_GAIN. + */ +struct __ec_align1 ec_codec_i2s_gain { + uint8_t left; + uint8_t right; +}; + +struct __ec_todo_unpacked ec_param_codec_i2s_tdm { + int16_t ch0_delay; /* 0 to 496 */ + int16_t ch1_delay; /* -1 to 496 */ + uint8_t adjacent_to_ch0; + uint8_t adjacent_to_ch1; +}; + +struct __ec_todo_packed ec_param_codec_i2s { + /* enum ec_codec_i2s_subcmd */ uint8_t cmd; union { /* @@ -2916,10 +4527,7 @@ struct ec_param_codec_i2s { * EC_CODEC_SET_GAIN * Value should be 0~43 for both channels. */ - struct ec_param_codec_i2s_set_gain { - uint8_t left; - uint8_t right; - } __packed gain; + struct ec_codec_i2s_gain gain; /* * EC_CODEC_I2S_ENABLE @@ -2928,7 +4536,7 @@ struct ec_param_codec_i2s { uint8_t i2s_enable; /* - * EC_CODEC_I2S_SET_COFNIG + * EC_CODEC_I2S_SET_CONFIG * Value should be one of ec_i2s_config. */ uint8_t i2s_config; @@ -2937,33 +4545,15 @@ struct ec_param_codec_i2s { * EC_CODEC_I2S_SET_TDM_CONFIG * Value should be one of ec_i2s_config. */ - struct ec_param_codec_i2s_tdm { - /* - * 0 to 496 - */ - int16_t ch0_delay; - /* - * -1 to 496 - */ - int16_t ch1_delay; - uint8_t adjacent_to_ch0; - uint8_t adjacent_to_ch1; - } __packed tdm_param; + struct ec_param_codec_i2s_tdm tdm_param; /* * EC_CODEC_I2S_SET_BCLK */ uint32_t bclk; }; -} __packed; +}; -/* - * For subcommand EC_CODEC_GET_GAIN. - */ -struct ec_response_codec_gain { - uint8_t left; - uint8_t right; -} __packed; /*****************************************************************************/ /* System commands */ @@ -2972,27 +4562,29 @@ struct ec_response_codec_gain { * TODO(crosbug.com/p/23747): This is a confusing name, since it doesn't * necessarily reboot the EC. Rename to "image" or something similar? */ -#define EC_CMD_REBOOT_EC 0xd2 +#define EC_CMD_REBOOT_EC 0x00D2 /* Command */ enum ec_reboot_cmd { EC_REBOOT_CANCEL = 0, /* Cancel a pending reboot */ EC_REBOOT_JUMP_RO = 1, /* Jump to RO without rebooting */ - EC_REBOOT_JUMP_RW = 2, /* Jump to RW without rebooting */ + EC_REBOOT_JUMP_RW = 2, /* Jump to active RW without rebooting */ /* (command 3 was jump to RW-B) */ EC_REBOOT_COLD = 4, /* Cold-reboot */ EC_REBOOT_DISABLE_JUMP = 5, /* Disable jump until next reboot */ - EC_REBOOT_HIBERNATE = 6 /* Hibernate EC */ + EC_REBOOT_HIBERNATE = 6, /* Hibernate EC */ + EC_REBOOT_HIBERNATE_CLEAR_AP_OFF = 7, /* and clears AP_OFF flag */ }; /* Flags for ec_params_reboot_ec.reboot_flags */ -#define EC_REBOOT_FLAG_RESERVED0 (1 << 0) /* Was recovery request */ -#define EC_REBOOT_FLAG_ON_AP_SHUTDOWN (1 << 1) /* Reboot after AP shutdown */ +#define EC_REBOOT_FLAG_RESERVED0 BIT(0) /* Was recovery request */ +#define EC_REBOOT_FLAG_ON_AP_SHUTDOWN BIT(1) /* Reboot after AP shutdown */ +#define EC_REBOOT_FLAG_SWITCH_RW_SLOT BIT(2) /* Switch RW slot */ struct ec_params_reboot_ec { uint8_t cmd; /* enum ec_reboot_cmd */ uint8_t flags; /* See EC_REBOOT_FLAG_* */ -} __packed; +} __ec_align1; /* * Get information on last EC panic. @@ -3000,196 +4592,7 @@ struct ec_params_reboot_ec { * Returns variable-length platform-dependent panic information. See panic.h * for details. */ -#define EC_CMD_GET_PANIC_INFO 0xd3 - -/*****************************************************************************/ -/* - * ACPI commands - * - * These are valid ONLY on the ACPI command/data port. - */ - -/* - * ACPI Read Embedded Controller - * - * This reads from ACPI memory space on the EC (EC_ACPI_MEM_*). - * - * Use the following sequence: - * - * - Write EC_CMD_ACPI_READ to EC_LPC_ADDR_ACPI_CMD - * - Wait for EC_LPC_CMDR_PENDING bit to clear - * - Write address to EC_LPC_ADDR_ACPI_DATA - * - Wait for EC_LPC_CMDR_DATA bit to set - * - Read value from EC_LPC_ADDR_ACPI_DATA - */ -#define EC_CMD_ACPI_READ 0x80 - -/* - * ACPI Write Embedded Controller - * - * This reads from ACPI memory space on the EC (EC_ACPI_MEM_*). - * - * Use the following sequence: - * - * - Write EC_CMD_ACPI_WRITE to EC_LPC_ADDR_ACPI_CMD - * - Wait for EC_LPC_CMDR_PENDING bit to clear - * - Write address to EC_LPC_ADDR_ACPI_DATA - * - Wait for EC_LPC_CMDR_PENDING bit to clear - * - Write value to EC_LPC_ADDR_ACPI_DATA - */ -#define EC_CMD_ACPI_WRITE 0x81 - -/* - * ACPI Query Embedded Controller - * - * This clears the lowest-order bit in the currently pending host events, and - * sets the result code to the 1-based index of the bit (event 0x00000001 = 1, - * event 0x80000000 = 32), or 0 if no event was pending. - */ -#define EC_CMD_ACPI_QUERY_EVENT 0x84 - -/* Valid addresses in ACPI memory space, for read/write commands */ - -/* Memory space version; set to EC_ACPI_MEM_VERSION_CURRENT */ -#define EC_ACPI_MEM_VERSION 0x00 -/* - * Test location; writing value here updates test compliment byte to (0xff - - * value). - */ -#define EC_ACPI_MEM_TEST 0x01 -/* Test compliment; writes here are ignored. */ -#define EC_ACPI_MEM_TEST_COMPLIMENT 0x02 - -/* Keyboard backlight brightness percent (0 - 100) */ -#define EC_ACPI_MEM_KEYBOARD_BACKLIGHT 0x03 -/* DPTF Target Fan Duty (0-100, 0xff for auto/none) */ -#define EC_ACPI_MEM_FAN_DUTY 0x04 - -/* - * DPTF temp thresholds. Any of the EC's temp sensors can have up to two - * independent thresholds attached to them. The current value of the ID - * register determines which sensor is affected by the THRESHOLD and COMMIT - * registers. The THRESHOLD register uses the same EC_TEMP_SENSOR_OFFSET scheme - * as the memory-mapped sensors. The COMMIT register applies those settings. - * - * The spec does not mandate any way to read back the threshold settings - * themselves, but when a threshold is crossed the AP needs a way to determine - * which sensor(s) are responsible. Each reading of the ID register clears and - * returns one sensor ID that has crossed one of its threshold (in either - * direction) since the last read. A value of 0xFF means "no new thresholds - * have tripped". Setting or enabling the thresholds for a sensor will clear - * the unread event count for that sensor. - */ -#define EC_ACPI_MEM_TEMP_ID 0x05 -#define EC_ACPI_MEM_TEMP_THRESHOLD 0x06 -#define EC_ACPI_MEM_TEMP_COMMIT 0x07 -/* - * Here are the bits for the COMMIT register: - * bit 0 selects the threshold index for the chosen sensor (0/1) - * bit 1 enables/disables the selected threshold (0 = off, 1 = on) - * Each write to the commit register affects one threshold. - */ -#define EC_ACPI_MEM_TEMP_COMMIT_SELECT_MASK (1 << 0) -#define EC_ACPI_MEM_TEMP_COMMIT_ENABLE_MASK (1 << 1) -/* - * Example: - * - * Set the thresholds for sensor 2 to 50 C and 60 C: - * write 2 to [0x05] -- select temp sensor 2 - * write 0x7b to [0x06] -- C_TO_K(50) - EC_TEMP_SENSOR_OFFSET - * write 0x2 to [0x07] -- enable threshold 0 with this value - * write 0x85 to [0x06] -- C_TO_K(60) - EC_TEMP_SENSOR_OFFSET - * write 0x3 to [0x07] -- enable threshold 1 with this value - * - * Disable the 60 C threshold, leaving the 50 C threshold unchanged: - * write 2 to [0x05] -- select temp sensor 2 - * write 0x1 to [0x07] -- disable threshold 1 - */ - -/* DPTF battery charging current limit */ -#define EC_ACPI_MEM_CHARGING_LIMIT 0x08 - -/* Charging limit is specified in 64 mA steps */ -#define EC_ACPI_MEM_CHARGING_LIMIT_STEP_MA 64 -/* Value to disable DPTF battery charging limit */ -#define EC_ACPI_MEM_CHARGING_LIMIT_DISABLED 0xff - -/* Current version of ACPI memory address space */ -#define EC_ACPI_MEM_VERSION_CURRENT 1 - - -/*****************************************************************************/ -/* - * HDMI CEC commands - * - * These commands are for sending and receiving message via HDMI CEC - */ -#define EC_MAX_CEC_MSG_LEN 16 - -/* CEC message from the AP to be written on the CEC bus */ -#define EC_CMD_CEC_WRITE_MSG 0x00B8 - -/** - * struct ec_params_cec_write - Message to write to the CEC bus - * @msg: message content to write to the CEC bus - */ -struct ec_params_cec_write { - uint8_t msg[EC_MAX_CEC_MSG_LEN]; -} __packed; - -/* Set various CEC parameters */ -#define EC_CMD_CEC_SET 0x00BA - -/** - * struct ec_params_cec_set - CEC parameters set - * @cmd: parameter type, can be CEC_CMD_ENABLE or CEC_CMD_LOGICAL_ADDRESS - * @val: in case cmd is CEC_CMD_ENABLE, this field can be 0 to disable CEC - * or 1 to enable CEC functionality, in case cmd is CEC_CMD_LOGICAL_ADDRESS, - * this field encodes the requested logical address between 0 and 15 - * or 0xff to unregister - */ -struct ec_params_cec_set { - uint8_t cmd; /* enum cec_command */ - uint8_t val; -} __packed; - -/* Read various CEC parameters */ -#define EC_CMD_CEC_GET 0x00BB - -/** - * struct ec_params_cec_get - CEC parameters get - * @cmd: parameter type, can be CEC_CMD_ENABLE or CEC_CMD_LOGICAL_ADDRESS - */ -struct ec_params_cec_get { - uint8_t cmd; /* enum cec_command */ -} __packed; - -/** - * struct ec_response_cec_get - CEC parameters get response - * @val: in case cmd was CEC_CMD_ENABLE, this field will 0 if CEC is - * disabled or 1 if CEC functionality is enabled, - * in case cmd was CEC_CMD_LOGICAL_ADDRESS, this will encode the - * configured logical address between 0 and 15 or 0xff if unregistered - */ -struct ec_response_cec_get { - uint8_t val; -} __packed; - -/* CEC parameters command */ -enum ec_cec_command { - /* CEC reading, writing and events enable */ - CEC_CMD_ENABLE, - /* CEC logical address */ - CEC_CMD_LOGICAL_ADDRESS, -}; - -/* Events from CEC to AP */ -enum mkbp_cec_event { - /* Outgoing message was acknowledged by a follower */ - EC_MKBP_CEC_SEND_OK = BIT(0), - /* Outgoing message was not acknowledged */ - EC_MKBP_CEC_SEND_FAILED = BIT(1), -}; +#define EC_CMD_GET_PANIC_INFO 0x00D3 /*****************************************************************************/ /* @@ -3208,7 +4611,7 @@ enum mkbp_cec_event { * * Use EC_CMD_REBOOT_EC to reboot the EC more politely. */ -#define EC_CMD_REBOOT 0xd1 /* Think "die" */ +#define EC_CMD_REBOOT 0x00D1 /* Think "die" */ /* * Resend last response (not supported on LPC). @@ -3217,7 +4620,7 @@ enum mkbp_cec_event { * there was no previous command, or the previous command's response was too * big to save. */ -#define EC_CMD_RESEND_RESPONSE 0xdb +#define EC_CMD_RESEND_RESPONSE 0x00DB /* * This header byte on a command indicate version 0. Any header byte less @@ -3229,9 +4632,7 @@ enum mkbp_cec_event { * * The old EC interface must not use commands 0xdc or higher. */ -#define EC_CMD_VERSION0 0xdc - -#endif /* !__ACPI__ */ +#define EC_CMD_VERSION0 0x00DC /*****************************************************************************/ /* @@ -3241,21 +4642,56 @@ enum mkbp_cec_event { */ /* EC to PD MCU exchange status command */ -#define EC_CMD_PD_EXCHANGE_STATUS 0x100 +#define EC_CMD_PD_EXCHANGE_STATUS 0x0100 +#define EC_VER_PD_EXCHANGE_STATUS 2 + +enum pd_charge_state { + PD_CHARGE_NO_CHANGE = 0, /* Don't change charge state */ + PD_CHARGE_NONE, /* No charging allowed */ + PD_CHARGE_5V, /* 5V charging only */ + PD_CHARGE_MAX /* Charge at max voltage */ +}; /* Status of EC being sent to PD */ +#define EC_STATUS_HIBERNATING BIT(0) + struct ec_params_pd_status { - int8_t batt_soc; /* battery state of charge */ -} __packed; + uint8_t status; /* EC status */ + int8_t batt_soc; /* battery state of charge */ + uint8_t charge_state; /* charging state (from enum pd_charge_state) */ +} __ec_align1; /* Status of PD being sent back to EC */ +#define PD_STATUS_HOST_EVENT BIT(0) /* Forward host event to AP */ +#define PD_STATUS_IN_RW BIT(1) /* Running RW image */ +#define PD_STATUS_JUMPED_TO_IMAGE BIT(2) /* Current image was jumped to */ +#define PD_STATUS_TCPC_ALERT_0 BIT(3) /* Alert active in port 0 TCPC */ +#define PD_STATUS_TCPC_ALERT_1 BIT(4) /* Alert active in port 1 TCPC */ +#define PD_STATUS_TCPC_ALERT_2 BIT(5) /* Alert active in port 2 TCPC */ +#define PD_STATUS_TCPC_ALERT_3 BIT(6) /* Alert active in port 3 TCPC */ +#define PD_STATUS_EC_INT_ACTIVE (PD_STATUS_TCPC_ALERT_0 | \ + PD_STATUS_TCPC_ALERT_1 | \ + PD_STATUS_HOST_EVENT) struct ec_response_pd_status { - int8_t status; /* PD MCU status */ - uint32_t curr_lim_ma; /* input current limit */ -} __packed; + uint32_t curr_lim_ma; /* input current limit */ + uint16_t status; /* PD MCU status */ + int8_t active_charge_port; /* active charging port */ +} __ec_align_size1; + +/* AP to PD MCU host event status command, cleared on read */ +#define EC_CMD_PD_HOST_EVENT_STATUS 0x0104 + +/* PD MCU host event status bits */ +#define PD_EVENT_UPDATE_DEVICE BIT(0) +#define PD_EVENT_POWER_CHANGE BIT(1) +#define PD_EVENT_IDENTITY_RECEIVED BIT(2) +#define PD_EVENT_DATA_SWAP BIT(3) +struct ec_response_host_event_status { + uint32_t status; /* PD MCU host event status */ +} __ec_align4; /* Set USB type-C port role and muxes */ -#define EC_CMD_USB_PD_CONTROL 0x101 +#define EC_CMD_USB_PD_CONTROL 0x0101 enum usb_pd_control_role { USB_PD_CTRL_ROLE_NO_CHANGE = 0, @@ -3263,6 +4699,8 @@ enum usb_pd_control_role { USB_PD_CTRL_ROLE_TOGGLE_OFF = 2, USB_PD_CTRL_ROLE_FORCE_SINK = 3, USB_PD_CTRL_ROLE_FORCE_SOURCE = 4, + USB_PD_CTRL_ROLE_FREEZE = 5, + USB_PD_CTRL_ROLE_COUNT }; enum usb_pd_control_mux { @@ -3272,6 +4710,7 @@ enum usb_pd_control_mux { USB_PD_CTRL_MUX_DP = 3, USB_PD_CTRL_MUX_DOCK = 4, USB_PD_CTRL_MUX_AUTO = 5, + USB_PD_CTRL_MUX_COUNT }; enum usb_pd_control_swap { @@ -3287,11 +4726,11 @@ struct ec_params_usb_pd_control { uint8_t role; uint8_t mux; uint8_t swap; -} __packed; +} __ec_align1; -#define PD_CTRL_RESP_ENABLED_COMMS (1 << 0) /* Communication enabled */ -#define PD_CTRL_RESP_ENABLED_CONNECTED (1 << 1) /* Device connected */ -#define PD_CTRL_RESP_ENABLED_PD_CAPABLE (1 << 2) /* Partner is PD capable */ +#define PD_CTRL_RESP_ENABLED_COMMS BIT(0) /* Communication enabled */ +#define PD_CTRL_RESP_ENABLED_CONNECTED BIT(1) /* Device connected */ +#define PD_CTRL_RESP_ENABLED_PD_CAPABLE BIT(2) /* Partner is PD capable */ #define PD_CTRL_RESP_ROLE_POWER BIT(0) /* 0=SNK/1=SRC */ #define PD_CTRL_RESP_ROLE_DATA BIT(1) /* 0=UFP/1=DFP */ @@ -3301,28 +4740,54 @@ struct ec_params_usb_pd_control { #define PD_CTRL_RESP_ROLE_USB_COMM BIT(5) /* Partner USB comm capable */ #define PD_CTRL_RESP_ROLE_EXT_POWERED BIT(6) /* Partner externally powerd */ +struct ec_response_usb_pd_control { + uint8_t enabled; + uint8_t role; + uint8_t polarity; + uint8_t state; +} __ec_align1; + struct ec_response_usb_pd_control_v1 { uint8_t enabled; uint8_t role; uint8_t polarity; char state[32]; -} __packed; +} __ec_align1; + +/* Values representing usbc PD CC state */ +#define USBC_PD_CC_NONE 0 /* No accessory connected */ +#define USBC_PD_CC_NO_UFP 1 /* No UFP accessory connected */ +#define USBC_PD_CC_AUDIO_ACC 2 /* Audio accessory connected */ +#define USBC_PD_CC_DEBUG_ACC 3 /* Debug accessory connected */ +#define USBC_PD_CC_UFP_ATTACHED 4 /* UFP attached to usbc */ +#define USBC_PD_CC_DFP_ATTACHED 5 /* DPF attached to usbc */ + +struct ec_response_usb_pd_control_v2 { + uint8_t enabled; + uint8_t role; + uint8_t polarity; + char state[32]; + uint8_t cc_state; /* USBC_PD_CC_*Encoded cc state */ + uint8_t dp_mode; /* Current DP pin mode (MODE_DP_PIN_[A-E]) */ + /* CL:1500994 Current cable type */ + uint8_t reserved_cable_type; +} __ec_align1; -#define EC_CMD_USB_PD_PORTS 0x102 +#define EC_CMD_USB_PD_PORTS 0x0102 /* Maximum number of PD ports on a device, num_ports will be <= this */ #define EC_USB_PD_MAX_PORTS 8 struct ec_response_usb_pd_ports { uint8_t num_ports; -} __packed; +} __ec_align1; -#define EC_CMD_USB_PD_POWER_INFO 0x103 +#define EC_CMD_USB_PD_POWER_INFO 0x0103 #define PD_POWER_CHARGING_PORT 0xff struct ec_params_usb_pd_power_info { uint8_t port; -} __packed; +} __ec_align1; enum usb_chg_type { USB_CHG_TYPE_NONE, @@ -3335,6 +4800,7 @@ enum usb_chg_type { USB_CHG_TYPE_OTHER, USB_CHG_TYPE_VBUS, USB_CHG_TYPE_UNKNOWN, + USB_CHG_TYPE_DEDICATED, }; enum usb_power_roles { USB_PD_PORT_POWER_DISCONNECTED, @@ -3348,7 +4814,7 @@ struct usb_chg_measures { uint16_t voltage_now; uint16_t current_max; uint16_t current_lim; -} __packed; +} __ec_align2; struct ec_response_usb_pd_power_info { uint8_t role; @@ -3357,11 +4823,8 @@ struct ec_response_usb_pd_power_info { uint8_t reserved1; struct usb_chg_measures meas; uint32_t max_power; -} __packed; +} __ec_align4; -struct ec_params_usb_pd_info_request { - uint8_t port; -} __packed; /* * This command will return the number of USB PD charge port + the number @@ -3371,7 +4834,47 @@ struct ec_params_usb_pd_info_request { #define EC_CMD_CHARGE_PORT_COUNT 0x0105 struct ec_response_charge_port_count { uint8_t port_count; -} __packed; +} __ec_align1; + +/* Write USB-PD device FW */ +#define EC_CMD_USB_PD_FW_UPDATE 0x0110 + +enum usb_pd_fw_update_cmds { + USB_PD_FW_REBOOT, + USB_PD_FW_FLASH_ERASE, + USB_PD_FW_FLASH_WRITE, + USB_PD_FW_ERASE_SIG, +}; + +struct ec_params_usb_pd_fw_update { + uint16_t dev_id; + uint8_t cmd; + uint8_t port; + uint32_t size; /* Size to write in bytes */ + /* Followed by data to write */ +} __ec_align4; + +/* Write USB-PD Accessory RW_HASH table entry */ +#define EC_CMD_USB_PD_RW_HASH_ENTRY 0x0111 +/* RW hash is first 20 bytes of SHA-256 of RW section */ +#define PD_RW_HASH_SIZE 20 +struct ec_params_usb_pd_rw_hash_entry { + uint16_t dev_id; + uint8_t dev_rw_hash[PD_RW_HASH_SIZE]; + uint8_t reserved; /* + * For alignment of current_image + * TODO(rspangler) but it's not aligned! + * Should have been reserved[2]. + */ + uint32_t current_image; /* One of ec_current_image */ +} __ec_align1; + +/* Read USB-PD Accessory info */ +#define EC_CMD_USB_PD_DEV_INFO 0x0112 + +struct ec_params_usb_pd_info_request { + uint8_t port; +} __ec_align1; /* Read USB-PD Device discovery info */ #define EC_CMD_USB_PD_DISCOVERY 0x0113 @@ -3379,7 +4882,7 @@ struct ec_params_usb_pd_discovery_entry { uint16_t vid; /* USB-IF VID */ uint16_t pid; /* USB-IF PID */ uint8_t ptype; /* product type (hub,periph,cable,ama) */ -} __packed; +} __ec_align_size1; /* Override default charge behavior */ #define EC_CMD_PD_CHARGE_PORT_OVERRIDE 0x0114 @@ -3393,9 +4896,13 @@ enum usb_pd_override_ports { struct ec_params_charge_port_override { int16_t override_port; /* Override port# */ -} __packed; +} __ec_align2; -/* Read (and delete) one entry of PD event log */ +/* + * Read (and delete) one entry of PD event log. + * TODO(crbug.com/751742): Make this host command more generic to accommodate + * future non-PD logs that use the same internal EC event_log. + */ #define EC_CMD_PD_GET_LOG_ENTRY 0x0115 struct ec_response_pd_log { @@ -3404,7 +4911,7 @@ struct ec_response_pd_log { uint8_t size_port; /* [7:5] port number [4:0] payload size in bytes */ uint16_t data; /* type-defined data payload */ uint8_t payload[0]; /* optional additional data payload: 0..16 bytes */ -} __packed; +} __ec_align4; /* The timestamp is the microsecond counter shifted to get about a ms. */ #define PD_LOG_TIMESTAMP_SHIFT 10 /* 1 LSB = 1024us */ @@ -3470,35 +4977,710 @@ struct mcdp_version { uint8_t major; uint8_t minor; uint16_t build; -} __packed; +} __ec_align4; struct mcdp_info { uint8_t family[2]; uint8_t chipid[2]; struct mcdp_version irom; struct mcdp_version fw; -} __packed; +} __ec_align4; /* struct mcdp_info field decoding */ #define MCDP_CHIPID(chipid) ((chipid[0] << 8) | chipid[1]) #define MCDP_FAMILY(family) ((family[0] << 8) | family[1]) +/* Get/Set USB-PD Alternate mode info */ +#define EC_CMD_USB_PD_GET_AMODE 0x0116 +struct ec_params_usb_pd_get_mode_request { + uint16_t svid_idx; /* SVID index to get */ + uint8_t port; /* port */ +} __ec_align_size1; + +struct ec_params_usb_pd_get_mode_response { + uint16_t svid; /* SVID */ + uint16_t opos; /* Object Position */ + uint32_t vdo[6]; /* Mode VDOs */ +} __ec_align4; + +#define EC_CMD_USB_PD_SET_AMODE 0x0117 + +enum pd_mode_cmd { + PD_EXIT_MODE = 0, + PD_ENTER_MODE = 1, + /* Not a command. Do NOT remove. */ + PD_MODE_CMD_COUNT, +}; + +struct ec_params_usb_pd_set_mode_request { + uint32_t cmd; /* enum pd_mode_cmd */ + uint16_t svid; /* SVID to set */ + uint8_t opos; /* Object Position */ + uint8_t port; /* port */ +} __ec_align4; + +/* Ask the PD MCU to record a log of a requested type */ +#define EC_CMD_PD_WRITE_LOG_ENTRY 0x0118 + +struct ec_params_pd_write_log_entry { + uint8_t type; /* event type : see PD_EVENT_xx above */ + uint8_t port; /* port#, or 0 for events unrelated to a given port */ +} __ec_align1; + + +/* Control USB-PD chip */ +#define EC_CMD_PD_CONTROL 0x0119 + +enum ec_pd_control_cmd { + PD_SUSPEND = 0, /* Suspend the PD chip (EC: stop talking to PD) */ + PD_RESUME, /* Resume the PD chip (EC: start talking to PD) */ + PD_RESET, /* Force reset the PD chip */ + PD_CONTROL_DISABLE, /* Disable further calls to this command */ + PD_CHIP_ON, /* Power on the PD chip */ +}; + +struct ec_params_pd_control { + uint8_t chip; /* chip id */ + uint8_t subcmd; +} __ec_align1; + /* Get info about USB-C SS muxes */ -#define EC_CMD_USB_PD_MUX_INFO 0x11a +#define EC_CMD_USB_PD_MUX_INFO 0x011A struct ec_params_usb_pd_mux_info { uint8_t port; /* USB-C port number */ -} __packed; +} __ec_align1; /* Flags representing mux state */ -#define USB_PD_MUX_USB_ENABLED (1 << 0) -#define USB_PD_MUX_DP_ENABLED (1 << 1) -#define USB_PD_MUX_POLARITY_INVERTED (1 << 2) -#define USB_PD_MUX_HPD_IRQ (1 << 3) +#define USB_PD_MUX_USB_ENABLED BIT(0) /* USB connected */ +#define USB_PD_MUX_DP_ENABLED BIT(1) /* DP connected */ +#define USB_PD_MUX_POLARITY_INVERTED BIT(2) /* CC line Polarity inverted */ +#define USB_PD_MUX_HPD_IRQ BIT(3) /* HPD IRQ is asserted */ +#define USB_PD_MUX_HPD_LVL BIT(4) /* HPD level is asserted */ struct ec_response_usb_pd_mux_info { uint8_t flags; /* USB_PD_MUX_*-encoded USB mux state */ -} __packed; +} __ec_align1; + +#define EC_CMD_PD_CHIP_INFO 0x011B + +struct ec_params_pd_chip_info { + uint8_t port; /* USB-C port number */ + uint8_t renew; /* Force renewal */ +} __ec_align1; + +struct ec_response_pd_chip_info { + uint16_t vendor_id; + uint16_t product_id; + uint16_t device_id; + union { + uint8_t fw_version_string[8]; + uint64_t fw_version_number; + }; +} __ec_align2; + +struct ec_response_pd_chip_info_v1 { + uint16_t vendor_id; + uint16_t product_id; + uint16_t device_id; + union { + uint8_t fw_version_string[8]; + uint64_t fw_version_number; + }; + union { + uint8_t min_req_fw_version_string[8]; + uint64_t min_req_fw_version_number; + }; +} __ec_align2; + +/* Run RW signature verification and get status */ +#define EC_CMD_RWSIG_CHECK_STATUS 0x011C + +struct ec_response_rwsig_check_status { + uint32_t status; +} __ec_align4; + +/* For controlling RWSIG task */ +#define EC_CMD_RWSIG_ACTION 0x011D + +enum rwsig_action { + RWSIG_ACTION_ABORT = 0, /* Abort RWSIG and prevent jumping */ + RWSIG_ACTION_CONTINUE = 1, /* Jump to RW immediately */ +}; + +struct ec_params_rwsig_action { + uint32_t action; +} __ec_align4; + +/* Run verification on a slot */ +#define EC_CMD_EFS_VERIFY 0x011E + +struct ec_params_efs_verify { + uint8_t region; /* enum ec_flash_region */ +} __ec_align1; + +/* + * Retrieve info from Cros Board Info store. Response is based on the data + * type. Integers return a uint32. Strings return a string, using the response + * size to determine how big it is. + */ +#define EC_CMD_GET_CROS_BOARD_INFO 0x011F +/* + * Write info into Cros Board Info on EEPROM. Write fails if the board has + * hardware write-protect enabled. + */ +#define EC_CMD_SET_CROS_BOARD_INFO 0x0120 + +enum cbi_data_tag { + CBI_TAG_BOARD_VERSION = 0, /* uint32_t or smaller */ + CBI_TAG_OEM_ID = 1, /* uint32_t or smaller */ + CBI_TAG_SKU_ID = 2, /* uint32_t or smaller */ + CBI_TAG_DRAM_PART_NUM = 3, /* variable length ascii, nul terminated. */ + CBI_TAG_OEM_NAME = 4, /* variable length ascii, nul terminated. */ + CBI_TAG_MODEL_ID = 5, /* uint32_t or smaller */ + CBI_TAG_COUNT, +}; + +/* + * Flags to control read operation + * + * RELOAD: Invalidate cache and read data from EEPROM. Useful to verify + * write was successful without reboot. + */ +#define CBI_GET_RELOAD BIT(0) + +struct ec_params_get_cbi { + uint32_t tag; /* enum cbi_data_tag */ + uint32_t flag; /* CBI_GET_* */ +} __ec_align4; + +/* + * Flags to control write behavior. + * + * NO_SYNC: Makes EC update data in RAM but skip writing to EEPROM. It's + * useful when writing multiple fields in a row. + * INIT: Need to be set when creating a new CBI from scratch. All fields + * will be initialized to zero first. + */ +#define CBI_SET_NO_SYNC BIT(0) +#define CBI_SET_INIT BIT(1) + +struct ec_params_set_cbi { + uint32_t tag; /* enum cbi_data_tag */ + uint32_t flag; /* CBI_SET_* */ + uint32_t size; /* Data size */ + uint8_t data[]; /* For string and raw data */ +} __ec_align1; + +/* + * Information about resets of the AP by the EC and the EC's own uptime. + */ +#define EC_CMD_GET_UPTIME_INFO 0x0121 + +struct ec_response_uptime_info { + /* + * Number of milliseconds since the last EC boot. Sysjump resets + * typically do not restart the EC's time_since_boot epoch. + * + * WARNING: The EC's sense of time is much less accurate than the AP's + * sense of time, in both phase and frequency. This timebase is similar + * to CLOCK_MONOTONIC_RAW, but with 1% or more frequency error. + */ + uint32_t time_since_ec_boot_ms; + + /* + * Number of times the AP was reset by the EC since the last EC boot. + * Note that the AP may be held in reset by the EC during the initial + * boot sequence, such that the very first AP boot may count as more + * than one here. + */ + uint32_t ap_resets_since_ec_boot; + + /* + * The set of flags which describe the EC's most recent reset. See + * include/system.h RESET_FLAG_* for details. + */ + uint32_t ec_reset_flags; + + /* Empty log entries have both the cause and timestamp set to zero. */ + struct ap_reset_log_entry { + /* + * See include/chipset.h: enum chipset_{reset,shutdown}_reason + * for details. + */ + uint16_t reset_cause; + + /* Reserved for protocol growth. */ + uint16_t reserved; + + /* + * The time of the reset's assertion, in milliseconds since the + * last EC boot, in the same epoch as time_since_ec_boot_ms. + * Set to zero if the log entry is empty. + */ + uint32_t reset_time_ms; + } recent_ap_reset[4]; +} __ec_align4; + +/* + * Add entropy to the device secret (stored in the rollback region). + * + * Depending on the chip, the operation may take a long time (e.g. to erase + * flash), so the commands are asynchronous. + */ +#define EC_CMD_ADD_ENTROPY 0x0122 + +enum add_entropy_action { + /* Add entropy to the current secret. */ + ADD_ENTROPY_ASYNC = 0, + /* + * Add entropy, and also make sure that the previous secret is erased. + * (this can be implemented by adding entropy multiple times until + * all rolback blocks have been overwritten). + */ + ADD_ENTROPY_RESET_ASYNC = 1, + /* Read back result from the previous operation. */ + ADD_ENTROPY_GET_RESULT = 2, +}; + +struct ec_params_rollback_add_entropy { + uint8_t action; +} __ec_align1; + +/* + * Perform a single read of a given ADC channel. + */ +#define EC_CMD_ADC_READ 0x0123 + +struct ec_params_adc_read { + uint8_t adc_channel; +} __ec_align1; + +struct ec_response_adc_read { + int32_t adc_value; +} __ec_align4; + +/* + * Read back rollback info + */ +#define EC_CMD_ROLLBACK_INFO 0x0124 + +struct ec_response_rollback_info { + int32_t id; /* Incrementing number to indicate which region to use. */ + int32_t rollback_min_version; + int32_t rw_rollback_version; +} __ec_align4; + + +/* Issue AP reset */ +#define EC_CMD_AP_RESET 0x0125 + +/*****************************************************************************/ +/* The command range 0x200-0x2FF is reserved for Rotor. */ + +/*****************************************************************************/ +/* + * Reserve a range of host commands for the CR51 firmware. + */ +#define EC_CMD_CR51_BASE 0x0300 +#define EC_CMD_CR51_LAST 0x03FF + +/*****************************************************************************/ +/* Fingerprint MCU commands: range 0x0400-0x040x */ + +/* Fingerprint SPI sensor passthru command: prototyping ONLY */ +#define EC_CMD_FP_PASSTHRU 0x0400 + +#define EC_FP_FLAG_NOT_COMPLETE 0x1 + +struct ec_params_fp_passthru { + uint16_t len; /* Number of bytes to write then read */ + uint16_t flags; /* EC_FP_FLAG_xxx */ + uint8_t data[]; /* Data to send */ +} __ec_align2; + +/* Configure the Fingerprint MCU behavior */ +#define EC_CMD_FP_MODE 0x0402 + +/* Put the sensor in its lowest power mode */ +#define FP_MODE_DEEPSLEEP BIT(0) +/* Wait to see a finger on the sensor */ +#define FP_MODE_FINGER_DOWN BIT(1) +/* Poll until the finger has left the sensor */ +#define FP_MODE_FINGER_UP BIT(2) +/* Capture the current finger image */ +#define FP_MODE_CAPTURE BIT(3) +/* Finger enrollment session on-going */ +#define FP_MODE_ENROLL_SESSION BIT(4) +/* Enroll the current finger image */ +#define FP_MODE_ENROLL_IMAGE BIT(5) +/* Try to match the current finger image */ +#define FP_MODE_MATCH BIT(6) +/* Reset and re-initialize the sensor. */ +#define FP_MODE_RESET_SENSOR BIT(7) +/* special value: don't change anything just read back current mode */ +#define FP_MODE_DONT_CHANGE BIT(31) + +#define FP_VALID_MODES (FP_MODE_DEEPSLEEP | \ + FP_MODE_FINGER_DOWN | \ + FP_MODE_FINGER_UP | \ + FP_MODE_CAPTURE | \ + FP_MODE_ENROLL_SESSION | \ + FP_MODE_ENROLL_IMAGE | \ + FP_MODE_MATCH | \ + FP_MODE_RESET_SENSOR | \ + FP_MODE_DONT_CHANGE) + +/* Capture types defined in bits [30..28] */ +#define FP_MODE_CAPTURE_TYPE_SHIFT 28 +#define FP_MODE_CAPTURE_TYPE_MASK (0x7 << FP_MODE_CAPTURE_TYPE_SHIFT) +/* + * This enum must remain ordered, if you add new values you must ensure that + * FP_CAPTURE_TYPE_MAX is still the last one. + */ +enum fp_capture_type { + /* Full blown vendor-defined capture (produces 'frame_size' bytes) */ + FP_CAPTURE_VENDOR_FORMAT = 0, + /* Simple raw image capture (produces width x height x bpp bits) */ + FP_CAPTURE_SIMPLE_IMAGE = 1, + /* Self test pattern (e.g. checkerboard) */ + FP_CAPTURE_PATTERN0 = 2, + /* Self test pattern (e.g. inverted checkerboard) */ + FP_CAPTURE_PATTERN1 = 3, + /* Capture for Quality test with fixed contrast */ + FP_CAPTURE_QUALITY_TEST = 4, + /* Capture for pixel reset value test */ + FP_CAPTURE_RESET_TEST = 5, + FP_CAPTURE_TYPE_MAX, +}; +/* Extracts the capture type from the sensor 'mode' word */ +#define FP_CAPTURE_TYPE(mode) (((mode) & FP_MODE_CAPTURE_TYPE_MASK) \ + >> FP_MODE_CAPTURE_TYPE_SHIFT) + +struct ec_params_fp_mode { + uint32_t mode; /* as defined by FP_MODE_ constants */ +} __ec_align4; + +struct ec_response_fp_mode { + uint32_t mode; /* as defined by FP_MODE_ constants */ +} __ec_align4; + +/* Retrieve Fingerprint sensor information */ +#define EC_CMD_FP_INFO 0x0403 + +/* Number of dead pixels detected on the last maintenance */ +#define FP_ERROR_DEAD_PIXELS(errors) ((errors) & 0x3FF) +/* Unknown number of dead pixels detected on the last maintenance */ +#define FP_ERROR_DEAD_PIXELS_UNKNOWN (0x3FF) +/* No interrupt from the sensor */ +#define FP_ERROR_NO_IRQ BIT(12) +/* SPI communication error */ +#define FP_ERROR_SPI_COMM BIT(13) +/* Invalid sensor Hardware ID */ +#define FP_ERROR_BAD_HWID BIT(14) +/* Sensor initialization failed */ +#define FP_ERROR_INIT_FAIL BIT(15) + +struct ec_response_fp_info_v0 { + /* Sensor identification */ + uint32_t vendor_id; + uint32_t product_id; + uint32_t model_id; + uint32_t version; + /* Image frame characteristics */ + uint32_t frame_size; + uint32_t pixel_format; /* using V4L2_PIX_FMT_ */ + uint16_t width; + uint16_t height; + uint16_t bpp; + uint16_t errors; /* see FP_ERROR_ flags above */ +} __ec_align4; + +struct ec_response_fp_info { + /* Sensor identification */ + uint32_t vendor_id; + uint32_t product_id; + uint32_t model_id; + uint32_t version; + /* Image frame characteristics */ + uint32_t frame_size; + uint32_t pixel_format; /* using V4L2_PIX_FMT_ */ + uint16_t width; + uint16_t height; + uint16_t bpp; + uint16_t errors; /* see FP_ERROR_ flags above */ + /* Template/finger current information */ + uint32_t template_size; /* max template size in bytes */ + uint16_t template_max; /* maximum number of fingers/templates */ + uint16_t template_valid; /* number of valid fingers/templates */ + uint32_t template_dirty; /* bitmap of templates with MCU side changes */ + uint32_t template_version; /* version of the template format */ +} __ec_align4; + +/* Get the last captured finger frame or a template content */ +#define EC_CMD_FP_FRAME 0x0404 + +/* constants defining the 'offset' field which also contains the frame index */ +#define FP_FRAME_INDEX_SHIFT 28 +/* Frame buffer where the captured image is stored */ +#define FP_FRAME_INDEX_RAW_IMAGE 0 +/* First frame buffer holding a template */ +#define FP_FRAME_INDEX_TEMPLATE 1 +#define FP_FRAME_GET_BUFFER_INDEX(offset) ((offset) >> FP_FRAME_INDEX_SHIFT) +#define FP_FRAME_OFFSET_MASK 0x0FFFFFFF + +/* Version of the format of the encrypted templates. */ +#define FP_TEMPLATE_FORMAT_VERSION 3 + +/* Constants for encryption parameters */ +#define FP_CONTEXT_NONCE_BYTES 12 +#define FP_CONTEXT_USERID_WORDS (32 / sizeof(uint32_t)) +#define FP_CONTEXT_TAG_BYTES 16 +#define FP_CONTEXT_SALT_BYTES 16 +#define FP_CONTEXT_TPM_BYTES 32 + +struct ec_fp_template_encryption_metadata { + /* + * Version of the structure format (N=3). + */ + uint16_t struct_version; + /* Reserved bytes, set to 0. */ + uint16_t reserved; + /* + * The salt is *only* ever used for key derivation. The nonce is unique, + * a different one is used for every message. + */ + uint8_t nonce[FP_CONTEXT_NONCE_BYTES]; + uint8_t salt[FP_CONTEXT_SALT_BYTES]; + uint8_t tag[FP_CONTEXT_TAG_BYTES]; +}; + +struct ec_params_fp_frame { + /* + * The offset contains the template index or FP_FRAME_INDEX_RAW_IMAGE + * in the high nibble, and the real offset within the frame in + * FP_FRAME_OFFSET_MASK. + */ + uint32_t offset; + uint32_t size; +} __ec_align4; + +/* Load a template into the MCU */ +#define EC_CMD_FP_TEMPLATE 0x0405 + +/* Flag in the 'size' field indicating that the full template has been sent */ +#define FP_TEMPLATE_COMMIT 0x80000000 + +struct ec_params_fp_template { + uint32_t offset; + uint32_t size; + uint8_t data[]; +} __ec_align4; + +/* Clear the current fingerprint user context and set a new one */ +#define EC_CMD_FP_CONTEXT 0x0406 + +struct ec_params_fp_context { + uint32_t userid[FP_CONTEXT_USERID_WORDS]; +} __ec_align4; + +#define EC_CMD_FP_STATS 0x0407 + +#define FPSTATS_CAPTURE_INV BIT(0) +#define FPSTATS_MATCHING_INV BIT(1) + +struct ec_response_fp_stats { + uint32_t capture_time_us; + uint32_t matching_time_us; + uint32_t overall_time_us; + struct { + uint32_t lo; + uint32_t hi; + } overall_t0; + uint8_t timestamps_invalid; + int8_t template_matched; +} __ec_align2; + +#define EC_CMD_FP_SEED 0x0408 +struct ec_params_fp_seed { + /* + * Version of the structure format (N=3). + */ + uint16_t struct_version; + /* Reserved bytes, set to 0. */ + uint16_t reserved; + /* Seed from the TPM. */ + uint8_t seed[FP_CONTEXT_TPM_BYTES]; +} __ec_align4; + +#define EC_CMD_FP_ENC_STATUS 0x0409 + +/* FP TPM seed has been set or not */ +#define FP_ENC_STATUS_SEED_SET BIT(0) + +struct ec_response_fp_encryption_status { + /* Used bits in encryption engine status */ + uint32_t valid_flags; + /* Encryption engine status */ + uint32_t status; +} __ec_align4; + +/*****************************************************************************/ +/* Touchpad MCU commands: range 0x0500-0x05FF */ + +/* Perform touchpad self test */ +#define EC_CMD_TP_SELF_TEST 0x0500 + +/* Get number of frame types, and the size of each type */ +#define EC_CMD_TP_FRAME_INFO 0x0501 + +struct ec_response_tp_frame_info { + uint32_t n_frames; + uint32_t frame_sizes[0]; +} __ec_align4; + +/* Create a snapshot of current frame readings */ +#define EC_CMD_TP_FRAME_SNAPSHOT 0x0502 + +/* Read the frame */ +#define EC_CMD_TP_FRAME_GET 0x0503 + +struct ec_params_tp_frame_get { + uint32_t frame_index; + uint32_t offset; + uint32_t size; +} __ec_align4; + +/*****************************************************************************/ +/* EC-EC communication commands: range 0x0600-0x06FF */ + +#define EC_COMM_TEXT_MAX 8 + +/* + * Get battery static information, i.e. information that never changes, or + * very infrequently. + */ +#define EC_CMD_BATTERY_GET_STATIC 0x0600 + +/** + * struct ec_params_battery_static_info - Battery static info parameters + * @index: Battery index. + */ +struct ec_params_battery_static_info { + uint8_t index; +} __ec_align_size1; + +/** + * struct ec_response_battery_static_info - Battery static info response + * @design_capacity: Battery Design Capacity (mAh) + * @design_voltage: Battery Design Voltage (mV) + * @manufacturer: Battery Manufacturer String + * @model: Battery Model Number String + * @serial: Battery Serial Number String + * @type: Battery Type String + * @cycle_count: Battery Cycle Count + */ +struct ec_response_battery_static_info { + uint16_t design_capacity; + uint16_t design_voltage; + char manufacturer[EC_COMM_TEXT_MAX]; + char model[EC_COMM_TEXT_MAX]; + char serial[EC_COMM_TEXT_MAX]; + char type[EC_COMM_TEXT_MAX]; + /* TODO(crbug.com/795991): Consider moving to dynamic structure. */ + uint32_t cycle_count; +} __ec_align4; + +/* + * Get battery dynamic information, i.e. information that is likely to change + * every time it is read. + */ +#define EC_CMD_BATTERY_GET_DYNAMIC 0x0601 + +/** + * struct ec_params_battery_dynamic_info - Battery dynamic info parameters + * @index: Battery index. + */ +struct ec_params_battery_dynamic_info { + uint8_t index; +} __ec_align_size1; + +/** + * struct ec_response_battery_dynamic_info - Battery dynamic info response + * @actual_voltage: Battery voltage (mV) + * @actual_current: Battery current (mA); negative=discharging + * @remaining_capacity: Remaining capacity (mAh) + * @full_capacity: Capacity (mAh, might change occasionally) + * @flags: Flags, see EC_BATT_FLAG_* + * @desired_voltage: Charging voltage desired by battery (mV) + * @desired_current: Charging current desired by battery (mA) + */ +struct ec_response_battery_dynamic_info { + int16_t actual_voltage; + int16_t actual_current; + int16_t remaining_capacity; + int16_t full_capacity; + int16_t flags; + int16_t desired_voltage; + int16_t desired_current; +} __ec_align2; + +/* + * Control charger chip. Used to control charger chip on the slave. + */ +#define EC_CMD_CHARGER_CONTROL 0x0602 + +/** + * struct ec_params_charger_control - Charger control parameters + * @max_current: Charger current (mA). Positive to allow base to draw up to + * max_current and (possibly) charge battery, negative to request current + * from base (OTG). + * @otg_voltage: Voltage (mV) to use in OTG mode, ignored if max_current is + * >= 0. + * @allow_charging: Allow base battery charging (only makes sense if + * max_current > 0). + */ +struct ec_params_charger_control { + int16_t max_current; + uint16_t otg_voltage; + uint8_t allow_charging; +} __ec_align_size1; + +/*****************************************************************************/ +/* + * Reserve a range of host commands for board-specific, experimental, or + * special purpose features. These can be (re)used without updating this file. + * + * CAUTION: Don't go nuts with this. Shipping products should document ALL + * their EC commands for easier development, testing, debugging, and support. + * + * All commands MUST be #defined to be 4-digit UPPER CASE hex values + * (e.g., 0x00AB, not 0xab) for CONFIG_HOSTCMD_SECTION_SORTED to work. + * + * In your experimental code, you may want to do something like this: + * + * #define EC_CMD_MAGIC_FOO 0x0000 + * #define EC_CMD_MAGIC_BAR 0x0001 + * #define EC_CMD_MAGIC_HEY 0x0002 + * + * DECLARE_PRIVATE_HOST_COMMAND(EC_CMD_MAGIC_FOO, magic_foo_handler, + * EC_VER_MASK(0); + * + * DECLARE_PRIVATE_HOST_COMMAND(EC_CMD_MAGIC_BAR, magic_bar_handler, + * EC_VER_MASK(0); + * + * DECLARE_PRIVATE_HOST_COMMAND(EC_CMD_MAGIC_HEY, magic_hey_handler, + * EC_VER_MASK(0); + */ +#define EC_CMD_BOARD_SPECIFIC_BASE 0x3E00 +#define EC_CMD_BOARD_SPECIFIC_LAST 0x3FFF + +/* + * Given the private host command offset, calculate the true private host + * command value. + */ +#define EC_PRIVATE_HOST_COMMAND_VALUE(command) \ + (EC_CMD_BOARD_SPECIFIC_BASE + (command)) /*****************************************************************************/ /* @@ -3538,4 +5720,6 @@ struct ec_response_usb_pd_mux_info { #define EC_LPC_ADDR_OLD_PARAM EC_HOST_CMD_REGION1 #define EC_OLD_PARAM_SIZE EC_HOST_CMD_REGION_SIZE + + #endif /* __CROS_EC_COMMANDS_H */ diff --git a/include/linux/platform_data/cros_ec_proto.h b/include/linux/platform_data/cros_ec_proto.h new file mode 100644 index 000000000000..eab7036cda09 --- /dev/null +++ b/include/linux/platform_data/cros_ec_proto.h @@ -0,0 +1,319 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * ChromeOS Embedded Controller protocol interface. + * + * Copyright (C) 2012 Google, Inc + */ + +#ifndef __LINUX_CROS_EC_PROTO_H +#define __LINUX_CROS_EC_PROTO_H + +#include <linux/device.h> +#include <linux/mutex.h> +#include <linux/notifier.h> + +#include <linux/platform_data/cros_ec_commands.h> + +#define CROS_EC_DEV_NAME "cros_ec" +#define CROS_EC_DEV_FP_NAME "cros_fp" +#define CROS_EC_DEV_ISH_NAME "cros_ish" +#define CROS_EC_DEV_PD_NAME "cros_pd" +#define CROS_EC_DEV_SCP_NAME "cros_scp" +#define CROS_EC_DEV_TP_NAME "cros_tp" + +/* + * The EC is unresponsive for a time after a reboot command. Add a + * simple delay to make sure that the bus stays locked. + */ +#define EC_REBOOT_DELAY_MS 50 + +/* + * Max bus-specific overhead incurred by request/responses. + * I2C requires 1 additional byte for requests. + * I2C requires 2 additional bytes for responses. + * SPI requires up to 32 additional bytes for responses. + */ +#define EC_PROTO_VERSION_UNKNOWN 0 +#define EC_MAX_REQUEST_OVERHEAD 1 +#define EC_MAX_RESPONSE_OVERHEAD 32 + +/* + * Command interface between EC and AP, for LPC, I2C and SPI interfaces. + */ +enum { + EC_MSG_TX_HEADER_BYTES = 3, + EC_MSG_TX_TRAILER_BYTES = 1, + EC_MSG_TX_PROTO_BYTES = EC_MSG_TX_HEADER_BYTES + + EC_MSG_TX_TRAILER_BYTES, + EC_MSG_RX_PROTO_BYTES = 3, + + /* Max length of messages for proto 2*/ + EC_PROTO2_MSG_BYTES = EC_PROTO2_MAX_PARAM_SIZE + + EC_MSG_TX_PROTO_BYTES, + + EC_MAX_MSG_BYTES = 64 * 1024, +}; + +/** + * struct cros_ec_command - Information about a ChromeOS EC command. + * @version: Command version number (often 0). + * @command: Command to send (EC_CMD_...). + * @outsize: Outgoing length in bytes. + * @insize: Max number of bytes to accept from the EC. + * @result: EC's response to the command (separate from communication failure). + * @data: Where to put the incoming data from EC and outgoing data to EC. + */ +struct cros_ec_command { + uint32_t version; + uint32_t command; + uint32_t outsize; + uint32_t insize; + uint32_t result; + uint8_t data[0]; +}; + +/** + * struct cros_ec_device - Information about a ChromeOS EC device. + * @phys_name: Name of physical comms layer (e.g. 'i2c-4'). + * @dev: Device pointer for physical comms device + * @was_wake_device: True if this device was set to wake the system from + * sleep at the last suspend. + * @cros_class: The class structure for this device. + * @cmd_readmem: Direct read of the EC memory-mapped region, if supported. + * @offset: Is within EC_LPC_ADDR_MEMMAP region. + * @bytes: Number of bytes to read. zero means "read a string" (including + * the trailing '\0'). At most only EC_MEMMAP_SIZE bytes can be + * read. Caller must ensure that the buffer is large enough for the + * result when reading a string. + * @max_request: Max size of message requested. + * @max_response: Max size of message response. + * @max_passthru: Max sice of passthru message. + * @proto_version: The protocol version used for this device. + * @priv: Private data. + * @irq: Interrupt to use. + * @id: Device id. + * @din: Input buffer (for data from EC). This buffer will always be + * dword-aligned and include enough space for up to 7 word-alignment + * bytes also, so we can ensure that the body of the message is always + * dword-aligned (64-bit). We use this alignment to keep ARM and x86 + * happy. Probably word alignment would be OK, there might be a small + * performance advantage to using dword. + * @dout: Output buffer (for data to EC). This buffer will always be + * dword-aligned and include enough space for up to 7 word-alignment + * bytes also, so we can ensure that the body of the message is always + * dword-aligned (64-bit). We use this alignment to keep ARM and x86 + * happy. Probably word alignment would be OK, there might be a small + * performance advantage to using dword. + * @din_size: Size of din buffer to allocate (zero to use static din). + * @dout_size: Size of dout buffer to allocate (zero to use static dout). + * @wake_enabled: True if this device can wake the system from sleep. + * @suspended: True if this device had been suspended. + * @cmd_xfer: Send command to EC and get response. + * Returns the number of bytes received if the communication + * succeeded, but that doesn't mean the EC was happy with the + * command. The caller should check msg.result for the EC's result + * code. + * @pkt_xfer: Send packet to EC and get response. + * @lock: One transaction at a time. + * @mkbp_event_supported: True if this EC supports the MKBP event protocol. + * @host_sleep_v1: True if this EC supports the sleep v1 command. + * @event_notifier: Interrupt event notifier for transport devices. + * @event_data: Raw payload transferred with the MKBP event. + * @event_size: Size in bytes of the event data. + * @host_event_wake_mask: Mask of host events that cause wake from suspend. + * @ec: The platform_device used by the mfd driver to interface with the + * main EC. + * @pd: The platform_device used by the mfd driver to interface with the + * PD behind an EC. + */ +struct cros_ec_device { + /* These are used by other drivers that want to talk to the EC */ + const char *phys_name; + struct device *dev; + bool was_wake_device; + struct class *cros_class; + int (*cmd_readmem)(struct cros_ec_device *ec, unsigned int offset, + unsigned int bytes, void *dest); + + /* These are used to implement the platform-specific interface */ + u16 max_request; + u16 max_response; + u16 max_passthru; + u16 proto_version; + void *priv; + int irq; + u8 *din; + u8 *dout; + int din_size; + int dout_size; + bool wake_enabled; + bool suspended; + int (*cmd_xfer)(struct cros_ec_device *ec, + struct cros_ec_command *msg); + int (*pkt_xfer)(struct cros_ec_device *ec, + struct cros_ec_command *msg); + struct mutex lock; + bool mkbp_event_supported; + bool host_sleep_v1; + struct blocking_notifier_head event_notifier; + + struct ec_response_get_next_event_v1 event_data; + int event_size; + u32 host_event_wake_mask; + u32 last_resume_result; + + /* The platform devices used by the mfd driver */ + struct platform_device *ec; + struct platform_device *pd; +}; + +/** + * struct cros_ec_sensor_platform - ChromeOS EC sensor platform information. + * @sensor_num: Id of the sensor, as reported by the EC. + */ +struct cros_ec_sensor_platform { + u8 sensor_num; +}; + +/** + * struct cros_ec_platform - ChromeOS EC platform information. + * @ec_name: Name of EC device (e.g. 'cros-ec', 'cros-pd', ...) + * used in /dev/ and sysfs. + * @cmd_offset: Offset to apply for each command. Set when + * registering a device behind another one. + */ +struct cros_ec_platform { + const char *ec_name; + u16 cmd_offset; +}; + +/** + * cros_ec_suspend() - Handle a suspend operation for the ChromeOS EC device. + * @ec_dev: Device to suspend. + * + * This can be called by drivers to handle a suspend event. + * + * Return: 0 on success or negative error code. + */ +int cros_ec_suspend(struct cros_ec_device *ec_dev); + +/** + * cros_ec_resume() - Handle a resume operation for the ChromeOS EC device. + * @ec_dev: Device to resume. + * + * This can be called by drivers to handle a resume event. + * + * Return: 0 on success or negative error code. + */ +int cros_ec_resume(struct cros_ec_device *ec_dev); + +/** + * cros_ec_prepare_tx() - Prepare an outgoing message in the output buffer. + * @ec_dev: Device to register. + * @msg: Message to write. + * + * This is intended to be used by all ChromeOS EC drivers, but at present + * only SPI uses it. Once LPC uses the same protocol it can start using it. + * I2C could use it now, with a refactor of the existing code. + * + * Return: 0 on success or negative error code. + */ +int cros_ec_prepare_tx(struct cros_ec_device *ec_dev, + struct cros_ec_command *msg); + +/** + * cros_ec_check_result() - Check ec_msg->result. + * @ec_dev: EC device. + * @msg: Message to check. + * + * This is used by ChromeOS EC drivers to check the ec_msg->result for + * errors and to warn about them. + * + * Return: 0 on success or negative error code. + */ +int cros_ec_check_result(struct cros_ec_device *ec_dev, + struct cros_ec_command *msg); + +/** + * cros_ec_cmd_xfer() - Send a command to the ChromeOS EC. + * @ec_dev: EC device. + * @msg: Message to write. + * + * Call this to send a command to the ChromeOS EC. This should be used + * instead of calling the EC's cmd_xfer() callback directly. + * + * Return: 0 on success or negative error code. + */ +int cros_ec_cmd_xfer(struct cros_ec_device *ec_dev, + struct cros_ec_command *msg); + +/** + * cros_ec_cmd_xfer_status() - Send a command to the ChromeOS EC. + * @ec_dev: EC device. + * @msg: Message to write. + * + * This function is identical to cros_ec_cmd_xfer, except it returns success + * status only if both the command was transmitted successfully and the EC + * replied with success status. It's not necessary to check msg->result when + * using this function. + * + * Return: The number of bytes transferred on success or negative error code. + */ +int cros_ec_cmd_xfer_status(struct cros_ec_device *ec_dev, + struct cros_ec_command *msg); + +/** + * cros_ec_register() - Register a new ChromeOS EC, using the provided info. + * @ec_dev: Device to register. + * + * Before calling this, allocate a pointer to a new device and then fill + * in all the fields up to the --private-- marker. + * + * Return: 0 on success or negative error code. + */ +int cros_ec_register(struct cros_ec_device *ec_dev); + +/** + * cros_ec_unregister() - Remove a ChromeOS EC. + * @ec_dev: Device to unregister. + * + * Call this to deregister a ChromeOS EC, then clean up any private data. + * + * Return: 0 on success or negative error code. + */ +int cros_ec_unregister(struct cros_ec_device *ec_dev); + +/** + * cros_ec_query_all() - Query the protocol version supported by the + * ChromeOS EC. + * @ec_dev: Device to register. + * + * Return: 0 on success or negative error code. + */ +int cros_ec_query_all(struct cros_ec_device *ec_dev); + +/** + * cros_ec_get_next_event() - Fetch next event from the ChromeOS EC. + * @ec_dev: Device to fetch event from. + * @wake_event: Pointer to a bool set to true upon return if the event might be + * treated as a wake event. Ignored if null. + * + * Return: negative error code on errors; 0 for no data; or else number of + * bytes received (i.e., an event was retrieved successfully). Event types are + * written out to @ec_dev->event_data.event_type on success. + */ +int cros_ec_get_next_event(struct cros_ec_device *ec_dev, bool *wake_event); + +/** + * cros_ec_get_host_event() - Return a mask of event set by the ChromeOS EC. + * @ec_dev: Device to fetch event from. + * + * When MKBP is supported, when the EC raises an interrupt, we collect the + * events raised and call the functions in the ec notifier. This function + * is a helper to know which events are raised. + * + * Return: 0 on error or non-zero bitmask of one or more EC_HOST_EVENT_*. + */ +u32 cros_ec_get_host_event(struct cros_ec_device *ec_dev); + +#endif /* __LINUX_CROS_EC_PROTO_H */ diff --git a/include/linux/platform_data/db8500_thermal.h b/include/linux/platform_data/db8500_thermal.h deleted file mode 100644 index 55e55750a165..000000000000 --- a/include/linux/platform_data/db8500_thermal.h +++ /dev/null @@ -1,29 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ -/* - * db8500_thermal.h - DB8500 Thermal Management Implementation - * - * Copyright (C) 2012 ST-Ericsson - * Copyright (C) 2012 Linaro Ltd. - * - * Author: Hongbo Zhang <hongbo.zhang@linaro.com> - */ - -#ifndef _DB8500_THERMAL_H_ -#define _DB8500_THERMAL_H_ - -#include <linux/thermal.h> - -#define COOLING_DEV_MAX 8 - -struct db8500_trip_point { - unsigned long temp; - enum thermal_trip_type type; - char cdev_name[COOLING_DEV_MAX][THERMAL_NAME_LENGTH]; -}; - -struct db8500_thsens_platform_data { - struct db8500_trip_point trip_points[THERMAL_MAX_TRIPS]; - int num_trips; -}; - -#endif /* _DB8500_THERMAL_H_ */ diff --git a/include/linux/platform_data/dma-imx-sdma.h b/include/linux/platform_data/dma-imx-sdma.h index 6eaa53cef0bd..30e676b36b24 100644 --- a/include/linux/platform_data/dma-imx-sdma.h +++ b/include/linux/platform_data/dma-imx-sdma.h @@ -51,7 +51,10 @@ struct sdma_script_start_addrs { /* End of v2 array */ s32 zcanfd_2_mcu_addr; s32 zqspi_2_mcu_addr; + s32 mcu_2_ecspi_addr; /* End of v3 array */ + s32 mcu_2_zqspi_addr; + /* End of v4 array */ }; /** diff --git a/include/linux/platform_data/dma-imx.h b/include/linux/platform_data/dma-imx.h index 80f9be858bd0..281adbb26e6b 100644 --- a/include/linux/platform_data/dma-imx.h +++ b/include/linux/platform_data/dma-imx.h @@ -52,7 +52,6 @@ struct imx_dma_data { int dma_request2; /* secondary DMA request line */ enum sdma_peripheral_type peripheral_type; int priority; - struct device_node *of_node; }; static inline int imx_dma_is_ipu(struct dma_chan *chan) diff --git a/include/linux/platform_data/dma-iop32x.h b/include/linux/platform_data/dma-iop32x.h new file mode 100644 index 000000000000..ac83cff89549 --- /dev/null +++ b/include/linux/platform_data/dma-iop32x.h @@ -0,0 +1,110 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright © 2006, Intel Corporation. + */ +#ifndef IOP_ADMA_H +#define IOP_ADMA_H +#include <linux/types.h> +#include <linux/dmaengine.h> +#include <linux/interrupt.h> + +#define IOP_ADMA_SLOT_SIZE 32 +#define IOP_ADMA_THRESHOLD 4 +#ifdef DEBUG +#define IOP_PARANOIA 1 +#else +#define IOP_PARANOIA 0 +#endif +#define iop_paranoia(x) BUG_ON(IOP_PARANOIA && (x)) + +#define DMA0_ID 0 +#define DMA1_ID 1 +#define AAU_ID 2 + +/** + * struct iop_adma_device - internal representation of an ADMA device + * @pdev: Platform device + * @id: HW ADMA Device selector + * @dma_desc_pool: base of DMA descriptor region (DMA address) + * @dma_desc_pool_virt: base of DMA descriptor region (CPU address) + * @common: embedded struct dma_device + */ +struct iop_adma_device { + struct platform_device *pdev; + int id; + dma_addr_t dma_desc_pool; + void *dma_desc_pool_virt; + struct dma_device common; +}; + +/** + * struct iop_adma_chan - internal representation of an ADMA device + * @pending: allows batching of hardware operations + * @lock: serializes enqueue/dequeue operations to the slot pool + * @mmr_base: memory mapped register base + * @chain: device chain view of the descriptors + * @device: parent device + * @common: common dmaengine channel object members + * @last_used: place holder for allocation to continue from where it left off + * @all_slots: complete domain of slots usable by the channel + * @slots_allocated: records the actual size of the descriptor slot pool + * @irq_tasklet: bottom half where iop_adma_slot_cleanup runs + */ +struct iop_adma_chan { + int pending; + spinlock_t lock; /* protects the descriptor slot pool */ + void __iomem *mmr_base; + struct list_head chain; + struct iop_adma_device *device; + struct dma_chan common; + struct iop_adma_desc_slot *last_used; + struct list_head all_slots; + int slots_allocated; + struct tasklet_struct irq_tasklet; +}; + +/** + * struct iop_adma_desc_slot - IOP-ADMA software descriptor + * @slot_node: node on the iop_adma_chan.all_slots list + * @chain_node: node on the op_adma_chan.chain list + * @hw_desc: virtual address of the hardware descriptor chain + * @phys: hardware address of the hardware descriptor chain + * @group_head: first operation in a transaction + * @slot_cnt: total slots used in an transaction (group of operations) + * @slots_per_op: number of slots per operation + * @idx: pool index + * @tx_list: list of descriptors that are associated with one operation + * @async_tx: support for the async_tx api + * @group_list: list of slots that make up a multi-descriptor transaction + * for example transfer lengths larger than the supported hw max + * @xor_check_result: result of zero sum + * @crc32_result: result crc calculation + */ +struct iop_adma_desc_slot { + struct list_head slot_node; + struct list_head chain_node; + void *hw_desc; + struct iop_adma_desc_slot *group_head; + u16 slot_cnt; + u16 slots_per_op; + u16 idx; + struct list_head tx_list; + struct dma_async_tx_descriptor async_tx; + union { + u32 *xor_check_result; + u32 *crc32_result; + u32 *pq_check_result; + }; +}; + +struct iop_adma_platform_data { + int hw_id; + dma_cap_mask_t cap_mask; + size_t pool_size; +}; + +#define to_iop_sw_desc(addr_hw_desc) \ + container_of(addr_hw_desc, struct iop_adma_desc_slot, hw_desc) +#define iop_hw_desc_slot_idx(hw_desc, idx) \ + ( (void *) (((unsigned long) hw_desc) + ((idx) << 5)) ) +#endif diff --git a/include/linux/platform_data/dwc3-omap.h b/include/linux/platform_data/dwc3-omap.h deleted file mode 100644 index 1d36ca874cc8..000000000000 --- a/include/linux/platform_data/dwc3-omap.h +++ /dev/null @@ -1,43 +0,0 @@ -/** - * dwc3-omap.h - OMAP Specific Glue layer, header. - * - * Copyright (C) 2010-2011 Texas Instruments Incorporated - http://www.ti.com - * All rights reserved. - * - * Author: Felipe Balbi <balbi@ti.com> - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions, and the following disclaimer, - * without modification. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. The names of the above-listed copyright holders may not be used - * to endorse or promote products derived from this software without - * specific prior written permission. - * - * ALTERNATIVELY, this software may be distributed under the terms of the - * GNU General Public License ("GPL") version 2, as published by the Free - * Software Foundation. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS - * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, - * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR - * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR - * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, - * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, - * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR - * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF - * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING - * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -enum dwc3_omap_utmi_mode { - DWC3_OMAP_UTMI_MODE_UNKNOWN = 0, - DWC3_OMAP_UTMI_MODE_HW, - DWC3_OMAP_UTMI_MODE_SW, -}; diff --git a/include/linux/platform_data/eth-netx.h b/include/linux/platform_data/eth-netx.h deleted file mode 100644 index a3a6322668d8..000000000000 --- a/include/linux/platform_data/eth-netx.h +++ /dev/null @@ -1,13 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Copyright (c) 2005 Sascha Hauer <s.hauer@pengutronix.de>, Pengutronix - */ - -#ifndef __ETH_NETX_H -#define __ETH_NETX_H - -struct netxeth_platform_data { - unsigned int xcno; /* number of xmac/xpec engine this eth uses */ -}; - -#endif diff --git a/include/linux/platform_data/fsa9480.h b/include/linux/platform_data/fsa9480.h deleted file mode 100644 index dea8d84448ec..000000000000 --- a/include/linux/platform_data/fsa9480.h +++ /dev/null @@ -1,24 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Copyright (C) 2010 Samsung Electronics - * Minkyu Kang <mk7.kang@samsung.com> - */ - -#ifndef _FSA9480_H_ -#define _FSA9480_H_ - -#define FSA9480_ATTACHED 1 -#define FSA9480_DETACHED 0 - -struct fsa9480_platform_data { - void (*cfg_gpio) (void); - void (*usb_cb) (u8 attached); - void (*uart_cb) (u8 attached); - void (*charger_cb) (u8 attached); - void (*jig_cb) (u8 attached); - void (*reset_cb) (void); - void (*usb_power) (u8 on); - int wakeup; -}; - -#endif /* _FSA9480_H_ */ diff --git a/include/linux/platform_data/gpio-htc-egpio.h b/include/linux/platform_data/gpio-htc-egpio.h index 9a3e78082883..eaefba0b6465 100644 --- a/include/linux/platform_data/gpio-htc-egpio.h +++ b/include/linux/platform_data/gpio-htc-egpio.h @@ -50,7 +50,4 @@ struct htc_egpio_platform_data { int num_chips; }; -/* Determine the wakeup irq, to be called during early resume */ -extern int htc_egpio_get_wakeup_irq(struct device *dev); - #endif diff --git a/include/linux/platform_data/gpio-omap.h b/include/linux/platform_data/gpio-omap.h index 17edc43201d2..8b30b14b47d3 100644 --- a/include/linux/platform_data/gpio-omap.h +++ b/include/linux/platform_data/gpio-omap.h @@ -186,7 +186,7 @@ struct omap_gpio_platform_data { bool is_mpuio; /* whether the bank is of type MPUIO */ u32 non_wakeup_gpios; - struct omap_gpio_reg_offs *regs; + const struct omap_gpio_reg_offs *regs; /* Return context loss count due to PM states changing */ int (*get_context_loss_count)(struct device *dev); diff --git a/include/linux/platform_data/i2c-mux-gpio.h b/include/linux/platform_data/i2c-mux-gpio.h index 9f6ca406505b..5e4c2c272a73 100644 --- a/include/linux/platform_data/i2c-mux-gpio.h +++ b/include/linux/platform_data/i2c-mux-gpio.h @@ -19,10 +19,6 @@ * position * @n_values: Number of multiplexer positions (busses to instantiate) * @classes: Optional I2C auto-detection classes - * @gpio_chip: Optional GPIO chip name; if set, GPIO pin numbers are given - * relative to the base GPIO number of that chip - * @gpios: Array of GPIO numbers used to control MUX - * @n_gpios: Number of GPIOs used to control MUX * @idle: Bitmask to write to MUX when idle or GPIO_I2CMUX_NO_IDLE if not used */ struct i2c_mux_gpio_platform_data { @@ -31,9 +27,6 @@ struct i2c_mux_gpio_platform_data { const unsigned *values; int n_values; const unsigned *classes; - char *gpio_chip; - const unsigned *gpios; - int n_gpios; unsigned idle; }; diff --git a/include/linux/platform_data/iommu-omap.h b/include/linux/platform_data/iommu-omap.h index 44d913a7580c..8474a0208b34 100644 --- a/include/linux/platform_data/iommu-omap.h +++ b/include/linux/platform_data/iommu-omap.h @@ -13,4 +13,8 @@ struct iommu_platform_data { const char *reset_name; int (*assert_reset)(struct platform_device *pdev, const char *name); int (*deassert_reset)(struct platform_device *pdev, const char *name); + int (*device_enable)(struct platform_device *pdev); + int (*device_idle)(struct platform_device *pdev); + int (*set_pwrdm_constraint)(struct platform_device *pdev, bool request, + u8 *pwrst); }; diff --git a/include/linux/platform_data/keypad-w90p910.h b/include/linux/platform_data/keypad-w90p910.h deleted file mode 100644 index 206ca4ecd93f..000000000000 --- a/include/linux/platform_data/keypad-w90p910.h +++ /dev/null @@ -1,16 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef __ASM_ARCH_W90P910_KEYPAD_H -#define __ASM_ARCH_W90P910_KEYPAD_H - -#include <linux/input/matrix_keypad.h> - -extern void mfp_set_groupi(struct device *dev); - -struct w90p910_keypad_platform_data { - const struct matrix_keymap_data *keymap_data; - - unsigned int prescale; - unsigned int debounce; -}; - -#endif /* __ASM_ARCH_W90P910_KEYPAD_H */ diff --git a/include/linux/platform_data/leds-kirkwood-netxbig.h b/include/linux/platform_data/leds-kirkwood-netxbig.h deleted file mode 100644 index 3c85a735c380..000000000000 --- a/include/linux/platform_data/leds-kirkwood-netxbig.h +++ /dev/null @@ -1,54 +0,0 @@ -/* - * Platform data structure for netxbig LED driver - * - * This file is licensed under the terms of the GNU General Public - * License version 2. This program is licensed "as is" without any - * warranty of any kind, whether express or implied. - */ - -#ifndef __LEDS_KIRKWOOD_NETXBIG_H -#define __LEDS_KIRKWOOD_NETXBIG_H - -struct netxbig_gpio_ext { - unsigned *addr; - int num_addr; - unsigned *data; - int num_data; - unsigned enable; -}; - -enum netxbig_led_mode { - NETXBIG_LED_OFF, - NETXBIG_LED_ON, - NETXBIG_LED_SATA, - NETXBIG_LED_TIMER1, - NETXBIG_LED_TIMER2, - NETXBIG_LED_MODE_NUM, -}; - -#define NETXBIG_LED_INVALID_MODE NETXBIG_LED_MODE_NUM - -struct netxbig_led_timer { - unsigned long delay_on; - unsigned long delay_off; - enum netxbig_led_mode mode; -}; - -struct netxbig_led { - const char *name; - const char *default_trigger; - int mode_addr; - int *mode_val; - int bright_addr; - int bright_max; -}; - -struct netxbig_led_platform_data { - struct netxbig_gpio_ext *gpio_ext; - struct netxbig_led_timer *timer; - int num_timer; - struct netxbig_led *leds; - int num_leds; -}; - -#endif /* __LEDS_KIRKWOOD_NETXBIG_H */ diff --git a/include/linux/platform_data/media/mmp-camera.h b/include/linux/platform_data/media/mmp-camera.h index d2d3a443eedf..53adaab64f28 100644 --- a/include/linux/platform_data/media/mmp-camera.h +++ b/include/linux/platform_data/media/mmp-camera.h @@ -12,11 +12,7 @@ enum dphy3_algo { }; struct mmp_camera_platform_data { - struct platform_device *i2c_device; - int sensor_power_gpio; - int sensor_reset_gpio; enum v4l2_mbus_type bus_type; - int mclk_min; /* The minimal value of MCLK */ int mclk_src; /* which clock source the MCLK derives from */ int mclk_div; /* Clock Divider Value for MCLK */ /* diff --git a/include/linux/platform_data/nxp-nci.h b/include/linux/platform_data/nxp-nci.h deleted file mode 100644 index 97827ad468e2..000000000000 --- a/include/linux/platform_data/nxp-nci.h +++ /dev/null @@ -1,19 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Generic platform data for the NXP NCI NFC chips. - * - * Copyright (C) 2014 NXP Semiconductors All rights reserved. - * - * Authors: Clément Perrochaud <clement.perrochaud@nxp.com> - */ - -#ifndef _NXP_NCI_H_ -#define _NXP_NCI_H_ - -struct nxp_nci_nfc_platform_data { - unsigned int gpio_en; - unsigned int gpio_fw; - unsigned int irq; -}; - -#endif /* _NXP_NCI_H_ */ diff --git a/include/linux/platform_data/pinctrl-single.h b/include/linux/platform_data/pinctrl-single.h index 1cf36fdf9510..7473d3c4cabf 100644 --- a/include/linux/platform_data/pinctrl-single.h +++ b/include/linux/platform_data/pinctrl-single.h @@ -1,4 +1,8 @@ /* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef _PINCTRL_SINGLE_H +#define _PINCTRL_SINGLE_H + /** * irq: optional wake-up interrupt * rearm: optional soc specific rearm function @@ -11,3 +15,5 @@ struct pcs_pdata { int irq; void (*rearm)(void); }; + +#endif /* _PINCTRL_SINGLE_H */ diff --git a/include/linux/platform_data/sc18is602.h b/include/linux/platform_data/sc18is602.h index e066d3b0d6d8..0e91489edfe6 100644 --- a/include/linux/platform_data/sc18is602.h +++ b/include/linux/platform_data/sc18is602.h @@ -4,7 +4,7 @@ * * Copyright (C) 2012 Guenter Roeck <linux@roeck-us.net> * - * For further information, see the Documentation/spi/spi-sc18is602 file. + * For further information, see the Documentation/spi/spi-sc18is602.rst file. */ /** diff --git a/include/linux/platform_data/sgi-w1.h b/include/linux/platform_data/sgi-w1.h new file mode 100644 index 000000000000..e28c8a90ff84 --- /dev/null +++ b/include/linux/platform_data/sgi-w1.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * SGI One-Wire (W1) IP + */ + +#ifndef PLATFORM_DATA_SGI_W1_H +#define PLATFORM_DATA_SGI_W1_H + +struct sgi_w1_platform_data { + char dev_id[64]; +}; + +#endif /* PLATFORM_DATA_SGI_W1_H */ diff --git a/include/linux/platform_data/spi-mt65xx.h b/include/linux/platform_data/spi-mt65xx.h index 617a75336d56..f0e6d6483e62 100644 --- a/include/linux/platform_data/spi-mt65xx.h +++ b/include/linux/platform_data/spi-mt65xx.h @@ -11,8 +11,6 @@ /* Board specific platform_data */ struct mtk_chip_config { - u32 tx_mlsb; - u32 rx_mlsb; u32 cs_pol; u32 sample_sel; }; diff --git a/include/linux/platform_data/spi-nuc900.h b/include/linux/platform_data/spi-nuc900.h deleted file mode 100644 index ca3510877000..000000000000 --- a/include/linux/platform_data/spi-nuc900.h +++ /dev/null @@ -1,29 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Copyright (c) 2009 Nuvoton technology corporation. - * - * Wan ZongShun <mcuos.com@gmail.com> - */ - -#ifndef __SPI_NUC900_H -#define __SPI_NUC900_H - -extern void mfp_set_groupg(struct device *dev, const char *subname); - -struct nuc900_spi_info { - unsigned int num_cs; - unsigned int lsb; - unsigned int txneg; - unsigned int rxneg; - unsigned int divider; - unsigned int sleep; - unsigned int txnum; - unsigned int txbitlen; - int bus_num; -}; - -struct nuc900_spi_chip { - unsigned char bits_per_word; -}; - -#endif /* __SPI_NUC900_H */ diff --git a/include/linux/platform_data/ti-sysc.h b/include/linux/platform_data/ti-sysc.h index 9256c0305968..b5b7a3423ca8 100644 --- a/include/linux/platform_data/ti-sysc.h +++ b/include/linux/platform_data/ti-sysc.h @@ -1,3 +1,5 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + #ifndef __TI_SYSC_DATA_H__ #define __TI_SYSC_DATA_H__ @@ -19,6 +21,7 @@ enum ti_sysc_module_type { struct ti_sysc_cookie { void *data; + void *clkdm; }; /** @@ -46,6 +49,11 @@ struct sysc_regbits { s8 emufree_shift; }; +#define SYSC_MODULE_QUIRK_SGX BIT(18) +#define SYSC_MODULE_QUIRK_HDQ1W BIT(17) +#define SYSC_MODULE_QUIRK_I2C BIT(16) +#define SYSC_MODULE_QUIRK_WDT BIT(15) +#define SYSS_QUIRK_RESETDONE_INVERTED BIT(14) #define SYSC_QUIRK_SWSUP_MSTANDBY BIT(13) #define SYSC_QUIRK_SWSUP_SIDLE_ACT BIT(12) #define SYSC_QUIRK_SWSUP_SIDLE BIT(11) @@ -65,7 +73,7 @@ struct sysc_regbits { /** * struct sysc_capabilities - capabilities for an interconnect target module - * + * @type: sysc type identifier for the module * @sysc_mask: bitmask of supported SYSCONFIG register bits * @regbits: bitmask of SYSCONFIG register bits * @mod_quirks: bitmask of module specific quirks @@ -80,8 +88,9 @@ struct sysc_capabilities { /** * struct sysc_config - configuration for an interconnect target module * @sysc_val: configured value for sysc register + * @syss_mask: configured mask value for SYSSTATUS register * @midlemodes: bitmask of supported master idle modes - * @sidlemodes: bitmask of supported master idle modes + * @sidlemodes: bitmask of supported slave idle modes * @srst_udelay: optional delay needed after OCP soft reset * @quirks: bitmask of enabled quirks */ @@ -125,9 +134,16 @@ struct ti_sysc_module_data { }; struct device; +struct clk; struct ti_sysc_platform_data { struct of_dev_auxdata *auxdata; + int (*init_clockdomain)(struct device *dev, struct clk *fck, + struct clk *ick, struct ti_sysc_cookie *cookie); + void (*clkdm_deny_idle)(struct device *dev, + const struct ti_sysc_cookie *cookie); + void (*clkdm_allow_idle)(struct device *dev, + const struct ti_sysc_cookie *cookie); int (*init_module)(struct device *dev, const struct ti_sysc_module_data *data, struct ti_sysc_cookie *cookie); diff --git a/include/linux/platform_data/video-clcd-versatile.h b/include/linux/platform_data/video-clcd-versatile.h deleted file mode 100644 index 305ebaec3afd..000000000000 --- a/include/linux/platform_data/video-clcd-versatile.h +++ /dev/null @@ -1,28 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef PLAT_CLCD_H -#define PLAT_CLCD_H - -#ifdef CONFIG_PLAT_VERSATILE_CLCD -struct clcd_panel *versatile_clcd_get_panel(const char *); -int versatile_clcd_setup_dma(struct clcd_fb *, unsigned long); -int versatile_clcd_mmap_dma(struct clcd_fb *, struct vm_area_struct *); -void versatile_clcd_remove_dma(struct clcd_fb *); -#else -static inline struct clcd_panel *versatile_clcd_get_panel(const char *s) -{ - return NULL; -} -static inline int versatile_clcd_setup_dma(struct clcd_fb *fb, unsigned long framesize) -{ - return -ENODEV; -} -static inline int versatile_clcd_mmap_dma(struct clcd_fb *fb, struct vm_area_struct *vm) -{ - return -ENODEV; -} -static inline void versatile_clcd_remove_dma(struct clcd_fb *fb) -{ -} -#endif - -#endif diff --git a/include/linux/platform_data/video-nuc900fb.h b/include/linux/platform_data/video-nuc900fb.h deleted file mode 100644 index 3da504460c91..000000000000 --- a/include/linux/platform_data/video-nuc900fb.h +++ /dev/null @@ -1,79 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ -/* linux/include/asm/arch-nuc900/fb.h - * - * Copyright (c) 2008 Nuvoton technology corporation - * All rights reserved. - * - * Changelog: - * - * 2008/08/26 vincen.zswan modify this file for LCD. - */ - -#ifndef __ASM_ARM_FB_H -#define __ASM_ARM_FB_H - - - -/* LCD Controller Hardware Desc */ -struct nuc900fb_hw { - unsigned int lcd_dccs; - unsigned int lcd_device_ctrl; - unsigned int lcd_mpulcd_cmd; - unsigned int lcd_int_cs; - unsigned int lcd_crtc_size; - unsigned int lcd_crtc_dend; - unsigned int lcd_crtc_hr; - unsigned int lcd_crtc_hsync; - unsigned int lcd_crtc_vr; - unsigned int lcd_va_baddr0; - unsigned int lcd_va_baddr1; - unsigned int lcd_va_fbctrl; - unsigned int lcd_va_scale; - unsigned int lcd_va_test; - unsigned int lcd_va_win; - unsigned int lcd_va_stuff; -}; - -/* LCD Display Description */ -struct nuc900fb_display { - /* LCD Image type */ - unsigned type; - - /* LCD Screen Size */ - unsigned short width; - unsigned short height; - - /* LCD Screen Info */ - unsigned short xres; - unsigned short yres; - unsigned short bpp; - - unsigned long pixclock; - unsigned short left_margin; - unsigned short right_margin; - unsigned short hsync_len; - unsigned short upper_margin; - unsigned short lower_margin; - unsigned short vsync_len; - - /* hardware special register value */ - unsigned int dccs; - unsigned int devctl; - unsigned int fbctrl; - unsigned int scale; -}; - -struct nuc900fb_mach_info { - struct nuc900fb_display *displays; - unsigned num_displays; - unsigned default_display; - /* GPIO Setting Info */ - unsigned gpio_dir; - unsigned gpio_dir_mask; - unsigned gpio_data; - unsigned gpio_data_mask; -}; - -extern void __init nuc900_fb_set_platdata(struct nuc900fb_mach_info *); - -#endif /* __ASM_ARM_FB_H */ diff --git a/include/linux/platform_data/wilco-ec.h b/include/linux/platform_data/wilco-ec.h index 1ff224793c99..ad03b586a095 100644 --- a/include/linux/platform_data/wilco-ec.h +++ b/include/linux/platform_data/wilco-ec.h @@ -13,12 +13,9 @@ /* Message flags for using the mailbox() interface */ #define WILCO_EC_FLAG_NO_RESPONSE BIT(0) /* EC does not respond */ -#define WILCO_EC_FLAG_EXTENDED_DATA BIT(1) /* EC returns 256 data bytes */ /* Normal commands have a maximum 32 bytes of data */ #define EC_MAILBOX_DATA_SIZE 32 -/* Extended commands have 256 bytes of response data */ -#define EC_MAILBOX_DATA_SIZE_EXTENDED 256 /** * struct wilco_ec_device - Wilco Embedded Controller handle. @@ -32,6 +29,7 @@ * @data_size: Size of the data buffer used for EC communication. * @debugfs_pdev: The child platform_device used by the debugfs sub-driver. * @rtc_pdev: The child platform_device used by the RTC sub-driver. + * @telem_pdev: The child platform_device used by the telemetry sub-driver. */ struct wilco_ec_device { struct device *dev; @@ -43,6 +41,7 @@ struct wilco_ec_device { size_t data_size; struct platform_device *debugfs_pdev; struct platform_device *rtc_pdev; + struct platform_device *telem_pdev; }; /** @@ -85,14 +84,12 @@ struct wilco_ec_response { * enum wilco_ec_msg_type - Message type to select a set of command codes. * @WILCO_EC_MSG_LEGACY: Legacy EC messages for standard EC behavior. * @WILCO_EC_MSG_PROPERTY: Get/Set/Sync EC controlled NVRAM property. - * @WILCO_EC_MSG_TELEMETRY_SHORT: 32 bytes of telemetry data provided by the EC. - * @WILCO_EC_MSG_TELEMETRY_LONG: 256 bytes of telemetry data provided by the EC. + * @WILCO_EC_MSG_TELEMETRY: Request telemetry data from the EC. */ enum wilco_ec_msg_type { WILCO_EC_MSG_LEGACY = 0x00f0, WILCO_EC_MSG_PROPERTY = 0x00f2, - WILCO_EC_MSG_TELEMETRY_SHORT = 0x00f5, - WILCO_EC_MSG_TELEMETRY_LONG = 0x00f6, + WILCO_EC_MSG_TELEMETRY = 0x00f5, }; /** @@ -123,4 +120,87 @@ struct wilco_ec_message { */ int wilco_ec_mailbox(struct wilco_ec_device *ec, struct wilco_ec_message *msg); +/* + * A Property is typically a data item that is stored to NVRAM + * by the EC. Each of these data items has an index associated + * with it, known as the Property ID (PID). Properties may have + * variable lengths, up to a max of WILCO_EC_PROPERTY_MAX_SIZE + * bytes. Properties can be simple integers, or they may be more + * complex binary data. + */ + +#define WILCO_EC_PROPERTY_MAX_SIZE 4 + +/** + * struct ec_property_set_msg - Message to get or set a property. + * @property_id: Which property to get or set. + * @length: Number of bytes of |data| that are used. + * @data: Actual property data. + */ +struct wilco_ec_property_msg { + u32 property_id; + int length; + u8 data[WILCO_EC_PROPERTY_MAX_SIZE]; +}; + +/** + * wilco_ec_get_property() - Retrieve a property from the EC. + * @ec: Embedded Controller device. + * @prop_msg: Message for request and response. + * + * The property_id field of |prop_msg| should be filled before calling this + * function. The result will be stored in the data and length fields. + * + * Return: 0 on success, negative error code on failure. + */ +int wilco_ec_get_property(struct wilco_ec_device *ec, + struct wilco_ec_property_msg *prop_msg); + +/** + * wilco_ec_set_property() - Store a property on the EC. + * @ec: Embedded Controller device. + * @prop_msg: Message for request and response. + * + * The property_id, length, and data fields of |prop_msg| should be + * filled before calling this function. + * + * Return: 0 on success, negative error code on failure. + */ +int wilco_ec_set_property(struct wilco_ec_device *ec, + struct wilco_ec_property_msg *prop_msg); + +/** + * wilco_ec_get_byte_property() - Retrieve a byte-size property from the EC. + * @ec: Embedded Controller device. + * @property_id: Which property to retrieve. + * @val: The result value, will be filled by this function. + * + * Return: 0 on success, negative error code on failure. + */ +int wilco_ec_get_byte_property(struct wilco_ec_device *ec, u32 property_id, + u8 *val); + +/** + * wilco_ec_get_byte_property() - Store a byte-size property on the EC. + * @ec: Embedded Controller device. + * @property_id: Which property to store. + * @val: Value to store. + * + * Return: 0 on success, negative error code on failure. + */ +int wilco_ec_set_byte_property(struct wilco_ec_device *ec, u32 property_id, + u8 val); + +/** + * wilco_ec_add_sysfs() - Create sysfs entries + * @ec: Wilco EC device + * + * wilco_ec_remove_sysfs() needs to be called afterwards + * to perform the necessary cleanup. + * + * Return: 0 on success or negative error code on failure. + */ +int wilco_ec_add_sysfs(struct wilco_ec_device *ec); +void wilco_ec_remove_sysfs(struct wilco_ec_device *ec); + #endif /* WILCO_EC_H */ diff --git a/include/linux/platform_data/x86/asus-wmi.h b/include/linux/platform_data/x86/asus-wmi.h index bfba245636a7..60249e22e844 100644 --- a/include/linux/platform_data/x86/asus-wmi.h +++ b/include/linux/platform_data/x86/asus-wmi.h @@ -12,14 +12,14 @@ #define ASUS_WMI_METHODID_GPID 0x44495047 /* Get Panel ID?? (Resol) */ #define ASUS_WMI_METHODID_QMOD 0x444F4D51 /* Quiet MODe */ #define ASUS_WMI_METHODID_SPLV 0x4C425053 /* Set Panel Light Value */ -#define ASUS_WMI_METHODID_AGFN 0x4E464741 /* FaN? */ +#define ASUS_WMI_METHODID_AGFN 0x4E464741 /* Atk Generic FuNction */ #define ASUS_WMI_METHODID_SFUN 0x4E554653 /* FUNCtionalities */ #define ASUS_WMI_METHODID_SDSP 0x50534453 /* Set DiSPlay output */ #define ASUS_WMI_METHODID_GDSP 0x50534447 /* Get DiSPlay output */ #define ASUS_WMI_METHODID_DEVP 0x50564544 /* DEVice Policy */ #define ASUS_WMI_METHODID_OSVR 0x5256534F /* OS VeRsion */ -#define ASUS_WMI_METHODID_DSTS 0x53544344 /* Device STatuS */ -#define ASUS_WMI_METHODID_DSTS2 0x53545344 /* Device STatuS #2*/ +#define ASUS_WMI_METHODID_DCTS 0x53544344 /* Device status (DCTS) */ +#define ASUS_WMI_METHODID_DSTS 0x53545344 /* Device status (DSTS) */ #define ASUS_WMI_METHODID_BSTS 0x53545342 /* Bios STatuS ? */ #define ASUS_WMI_METHODID_DEVS 0x53564544 /* DEVice Set */ #define ASUS_WMI_METHODID_CFVS 0x53564643 /* CPU Frequency Volt Set */ @@ -57,6 +57,7 @@ #define ASUS_WMI_DEVID_KBD_BACKLIGHT 0x00050021 #define ASUS_WMI_DEVID_LIGHT_SENSOR 0x00050022 /* ?? */ #define ASUS_WMI_DEVID_LIGHTBAR 0x00050025 +#define ASUS_WMI_DEVID_FAN_BOOST_MODE 0x00110018 /* Misc */ #define ASUS_WMI_DEVID_CAMERA 0x00060013 @@ -71,7 +72,8 @@ /* Fan, Thermal */ #define ASUS_WMI_DEVID_THERMAL_CTRL 0x00110011 -#define ASUS_WMI_DEVID_FAN_CTRL 0x00110012 +#define ASUS_WMI_DEVID_FAN_CTRL 0x00110012 /* deprecated */ +#define ASUS_WMI_DEVID_CPU_FAN_CTRL 0x00110013 /* Power */ #define ASUS_WMI_DEVID_PROCESSOR_STATE 0x00120012 @@ -79,6 +81,9 @@ /* Deep S3 / Resume on LID open */ #define ASUS_WMI_DEVID_LID_RESUME 0x00120031 +/* Maximum charging percentage */ +#define ASUS_WMI_DEVID_RSOC 0x00120057 + /* DSTS masks */ #define ASUS_WMI_DSTS_STATUS_BIT 0x00000001 #define ASUS_WMI_DSTS_UNKNOWN_BIT 0x00000002 diff --git a/include/linux/platform_data/xilinx-ll-temac.h b/include/linux/platform_data/xilinx-ll-temac.h index 368530f98176..f4a68136afa6 100644 --- a/include/linux/platform_data/xilinx-ll-temac.h +++ b/include/linux/platform_data/xilinx-ll-temac.h @@ -4,6 +4,7 @@ #include <linux/if_ether.h> #include <linux/phy.h> +#include <linux/spinlock.h> struct ll_temac_platform_data { bool txcsum; /* Enable/disable TX checksum */ @@ -21,7 +22,7 @@ struct ll_temac_platform_data { * TEMAC IP block, the same mutex should be passed here, as * they share the same DCR bus bridge. */ - struct mutex *indirect_mutex; + spinlock_t *indirect_lock; /* DMA channel control setup */ u8 tx_irq_timeout; /* TX Interrupt Delay Time-out */ u8 tx_irq_count; /* TX Interrupt Coalescing Threshold Count */ diff --git a/include/linux/platform_device.h b/include/linux/platform_device.h index beb25f277889..f2688404d1cd 100644 --- a/include/linux/platform_device.h +++ b/include/linux/platform_device.h @@ -4,7 +4,7 @@ * * Copyright (c) 2001-2003 Patrick Mochel <mochel@osdl.org> * - * See Documentation/driver-model/ for more information. + * See Documentation/driver-api/driver-model/ for more information. */ #ifndef _PLATFORM_DEVICE_H_ @@ -24,6 +24,7 @@ struct platform_device { int id; bool id_auto; struct device dev; + u64 dma_mask; u32 num_resources; struct resource *resource; @@ -48,18 +49,23 @@ extern void platform_device_unregister(struct platform_device *); extern struct bus_type platform_bus_type; extern struct device platform_bus; -extern void arch_setup_pdev_archdata(struct platform_device *); extern struct resource *platform_get_resource(struct platform_device *, unsigned int, unsigned int); +extern struct device * +platform_find_device_by_driver(struct device *start, + const struct device_driver *drv); extern void __iomem * devm_platform_ioremap_resource(struct platform_device *pdev, unsigned int index); extern int platform_get_irq(struct platform_device *, unsigned int); +extern int platform_get_irq_optional(struct platform_device *, unsigned int); extern int platform_irq_count(struct platform_device *); extern struct resource *platform_get_resource_byname(struct platform_device *, unsigned int, const char *); extern int platform_get_irq_byname(struct platform_device *, const char *); +extern int platform_get_irq_byname_optional(struct platform_device *dev, + const char *name); extern int platform_add_devices(struct platform_device **, int); struct platform_device_info { diff --git a/include/linux/pm.h b/include/linux/pm.h index 345d74a727e3..4c441be03079 100644 --- a/include/linux/pm.h +++ b/include/linux/pm.h @@ -271,7 +271,7 @@ typedef struct pm_message { * actions to be performed by a device driver's callbacks generally depend on * the platform and subsystem the device belongs to. * - * Refer to Documentation/power/runtime_pm.txt for more information about the + * Refer to Documentation/power/runtime_pm.rst for more information about the * role of the @runtime_suspend(), @runtime_resume() and @runtime_idle() * callbacks in device runtime power management. */ @@ -712,8 +712,6 @@ struct dev_pm_domain { extern void device_pm_lock(void); extern void dpm_resume_start(pm_message_t state); extern void dpm_resume_end(pm_message_t state); -extern void dpm_noirq_resume_devices(pm_message_t state); -extern void dpm_noirq_end(void); extern void dpm_resume_noirq(pm_message_t state); extern void dpm_resume_early(pm_message_t state); extern void dpm_resume(pm_message_t state); @@ -722,8 +720,6 @@ extern void dpm_complete(pm_message_t state); extern void device_pm_unlock(void); extern int dpm_suspend_end(pm_message_t state); extern int dpm_suspend_start(pm_message_t state); -extern void dpm_noirq_begin(void); -extern int dpm_noirq_suspend_devices(pm_message_t state); extern int dpm_suspend_noirq(pm_message_t state); extern int dpm_suspend_late(pm_message_t state); extern int dpm_suspend(pm_message_t state); @@ -760,7 +756,6 @@ extern int pm_generic_poweroff_late(struct device *dev); extern int pm_generic_poweroff(struct device *dev); extern void pm_generic_complete(struct device *dev); -extern void dev_pm_skip_next_resume_phases(struct device *dev); extern bool dev_pm_may_skip_resume(struct device *dev); extern bool dev_pm_smart_suspend_and_suspended(struct device *dev); diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h index 91d9bf497071..baf02ff91a31 100644 --- a/include/linux/pm_domain.h +++ b/include/linux/pm_domain.h @@ -197,9 +197,9 @@ static inline struct generic_pm_domain_data *dev_gpd_data(struct device *dev) int pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev); int pm_genpd_remove_device(struct device *dev); int pm_genpd_add_subdomain(struct generic_pm_domain *genpd, - struct generic_pm_domain *new_subdomain); + struct generic_pm_domain *subdomain); int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd, - struct generic_pm_domain *target); + struct generic_pm_domain *subdomain); int pm_genpd_init(struct generic_pm_domain *genpd, struct dev_power_governor *gov, bool is_off); int pm_genpd_remove(struct generic_pm_domain *genpd); @@ -226,12 +226,12 @@ static inline int pm_genpd_remove_device(struct device *dev) return -ENOSYS; } static inline int pm_genpd_add_subdomain(struct generic_pm_domain *genpd, - struct generic_pm_domain *new_sd) + struct generic_pm_domain *subdomain) { return -ENOSYS; } static inline int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd, - struct generic_pm_domain *target) + struct generic_pm_domain *subdomain) { return -ENOSYS; } @@ -282,8 +282,8 @@ int of_genpd_add_provider_onecell(struct device_node *np, struct genpd_onecell_data *data); void of_genpd_del_provider(struct device_node *np); int of_genpd_add_device(struct of_phandle_args *args, struct device *dev); -int of_genpd_add_subdomain(struct of_phandle_args *parent, - struct of_phandle_args *new_subdomain); +int of_genpd_add_subdomain(struct of_phandle_args *parent_spec, + struct of_phandle_args *subdomain_spec); struct generic_pm_domain *of_genpd_remove_last(struct device_node *np); int of_genpd_parse_idle_states(struct device_node *dn, struct genpd_power_state **states, int *n); @@ -316,8 +316,8 @@ static inline int of_genpd_add_device(struct of_phandle_args *args, return -ENODEV; } -static inline int of_genpd_add_subdomain(struct of_phandle_args *parent, - struct of_phandle_args *new_subdomain) +static inline int of_genpd_add_subdomain(struct of_phandle_args *parent_spec, + struct of_phandle_args *subdomain_spec) { return -ENODEV; } diff --git a/include/linux/pm_opp.h b/include/linux/pm_opp.h index 5f3a1ee9c4c2..b8197ab014f2 100644 --- a/include/linux/pm_opp.h +++ b/include/linux/pm_opp.h @@ -96,6 +96,8 @@ unsigned long dev_pm_opp_get_suspend_opp_freq(struct device *dev); struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev, unsigned long freq, bool available); +struct dev_pm_opp *dev_pm_opp_find_level_exact(struct device *dev, + unsigned int level); struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev, unsigned long *freq); @@ -128,8 +130,8 @@ struct opp_table *dev_pm_opp_set_clkname(struct device *dev, const char * name); void dev_pm_opp_put_clkname(struct opp_table *opp_table); struct opp_table *dev_pm_opp_register_set_opp_helper(struct device *dev, int (*set_opp)(struct dev_pm_set_opp_data *data)); void dev_pm_opp_unregister_set_opp_helper(struct opp_table *opp_table); -struct opp_table *dev_pm_opp_set_genpd_virt_dev(struct device *dev, struct device *virt_dev, int index); -void dev_pm_opp_put_genpd_virt_dev(struct opp_table *opp_table, struct device *virt_dev); +struct opp_table *dev_pm_opp_attach_genpd(struct device *dev, const char **names, struct device ***virt_devs); +void dev_pm_opp_detach_genpd(struct opp_table *opp_table); int dev_pm_opp_xlate_performance_state(struct opp_table *src_table, struct opp_table *dst_table, unsigned int pstate); int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq); int dev_pm_opp_set_sharing_cpus(struct device *cpu_dev, const struct cpumask *cpumask); @@ -200,6 +202,12 @@ static inline struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev, return ERR_PTR(-ENOTSUPP); } +static inline struct dev_pm_opp *dev_pm_opp_find_level_exact(struct device *dev, + unsigned int level) +{ + return ERR_PTR(-ENOTSUPP); +} + static inline struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev, unsigned long *freq) { @@ -292,12 +300,12 @@ static inline struct opp_table *dev_pm_opp_set_clkname(struct device *dev, const static inline void dev_pm_opp_put_clkname(struct opp_table *opp_table) {} -static inline struct opp_table *dev_pm_opp_set_genpd_virt_dev(struct device *dev, struct device *virt_dev, int index) +static inline struct opp_table *dev_pm_opp_attach_genpd(struct device *dev, const char **names, struct device ***virt_devs) { return ERR_PTR(-ENOTSUPP); } -static inline void dev_pm_opp_put_genpd_virt_dev(struct opp_table *opp_table, struct device *virt_dev) {} +static inline void dev_pm_opp_detach_genpd(struct opp_table *opp_table) {} static inline int dev_pm_opp_xlate_performance_state(struct opp_table *src_table, struct opp_table *dst_table, unsigned int pstate) { diff --git a/include/linux/pm_qos.h b/include/linux/pm_qos.h index 6ea1ae373d77..ebf5ef17cc2a 100644 --- a/include/linux/pm_qos.h +++ b/include/linux/pm_qos.h @@ -13,9 +13,6 @@ enum { PM_QOS_RESERVED = 0, PM_QOS_CPU_DMA_LATENCY, - PM_QOS_NETWORK_LATENCY, - PM_QOS_NETWORK_THROUGHPUT, - PM_QOS_MEMORY_BANDWIDTH, /* insert new class ID */ PM_QOS_NUM_CLASSES, @@ -33,9 +30,6 @@ enum pm_qos_flags_status { #define PM_QOS_LATENCY_ANY_NS ((s64)PM_QOS_LATENCY_ANY * NSEC_PER_USEC) #define PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE (2000 * USEC_PER_SEC) -#define PM_QOS_NETWORK_LAT_DEFAULT_VALUE (2000 * USEC_PER_SEC) -#define PM_QOS_NETWORK_THROUGHPUT_DEFAULT_VALUE 0 -#define PM_QOS_MEMORY_BANDWIDTH_DEFAULT_VALUE 0 #define PM_QOS_RESUME_LATENCY_DEFAULT_VALUE PM_QOS_LATENCY_ANY #define PM_QOS_RESUME_LATENCY_NO_CONSTRAINT PM_QOS_LATENCY_ANY #define PM_QOS_RESUME_LATENCY_NO_CONSTRAINT_NS PM_QOS_LATENCY_ANY_NS @@ -139,16 +133,18 @@ s32 pm_qos_read_value(struct pm_qos_constraints *c); #ifdef CONFIG_PM enum pm_qos_flags_status __dev_pm_qos_flags(struct device *dev, s32 mask); enum pm_qos_flags_status dev_pm_qos_flags(struct device *dev, s32 mask); -s32 __dev_pm_qos_read_value(struct device *dev); -s32 dev_pm_qos_read_value(struct device *dev); +s32 __dev_pm_qos_resume_latency(struct device *dev); +s32 dev_pm_qos_read_value(struct device *dev, enum dev_pm_qos_req_type type); int dev_pm_qos_add_request(struct device *dev, struct dev_pm_qos_request *req, enum dev_pm_qos_req_type type, s32 value); int dev_pm_qos_update_request(struct dev_pm_qos_request *req, s32 new_value); int dev_pm_qos_remove_request(struct dev_pm_qos_request *req); int dev_pm_qos_add_notifier(struct device *dev, - struct notifier_block *notifier); + struct notifier_block *notifier, + enum dev_pm_qos_req_type type); int dev_pm_qos_remove_notifier(struct device *dev, - struct notifier_block *notifier); + struct notifier_block *notifier, + enum dev_pm_qos_req_type type); void dev_pm_qos_constraints_init(struct device *dev); void dev_pm_qos_constraints_destroy(struct device *dev); int dev_pm_qos_add_ancestor_request(struct device *dev, @@ -174,7 +170,7 @@ static inline s32 dev_pm_qos_requested_flags(struct device *dev) return dev->power.qos->flags_req->data.flr.flags; } -static inline s32 dev_pm_qos_raw_read_value(struct device *dev) +static inline s32 dev_pm_qos_raw_resume_latency(struct device *dev) { return IS_ERR_OR_NULL(dev->power.qos) ? PM_QOS_RESUME_LATENCY_NO_CONSTRAINT : @@ -187,10 +183,20 @@ static inline enum pm_qos_flags_status __dev_pm_qos_flags(struct device *dev, static inline enum pm_qos_flags_status dev_pm_qos_flags(struct device *dev, s32 mask) { return PM_QOS_FLAGS_UNDEFINED; } -static inline s32 __dev_pm_qos_read_value(struct device *dev) - { return PM_QOS_RESUME_LATENCY_NO_CONSTRAINT; } -static inline s32 dev_pm_qos_read_value(struct device *dev) +static inline s32 __dev_pm_qos_resume_latency(struct device *dev) { return PM_QOS_RESUME_LATENCY_NO_CONSTRAINT; } +static inline s32 dev_pm_qos_read_value(struct device *dev, + enum dev_pm_qos_req_type type) +{ + switch (type) { + case DEV_PM_QOS_RESUME_LATENCY: + return PM_QOS_RESUME_LATENCY_NO_CONSTRAINT; + default: + WARN_ON(1); + return 0; + } +} + static inline int dev_pm_qos_add_request(struct device *dev, struct dev_pm_qos_request *req, enum dev_pm_qos_req_type type, @@ -202,10 +208,12 @@ static inline int dev_pm_qos_update_request(struct dev_pm_qos_request *req, static inline int dev_pm_qos_remove_request(struct dev_pm_qos_request *req) { return 0; } static inline int dev_pm_qos_add_notifier(struct device *dev, - struct notifier_block *notifier) + struct notifier_block *notifier, + enum dev_pm_qos_req_type type) { return 0; } static inline int dev_pm_qos_remove_notifier(struct device *dev, - struct notifier_block *notifier) + struct notifier_block *notifier, + enum dev_pm_qos_req_type type) { return 0; } static inline void dev_pm_qos_constraints_init(struct device *dev) { @@ -241,10 +249,54 @@ static inline s32 dev_pm_qos_requested_resume_latency(struct device *dev) return PM_QOS_RESUME_LATENCY_NO_CONSTRAINT; } static inline s32 dev_pm_qos_requested_flags(struct device *dev) { return 0; } -static inline s32 dev_pm_qos_raw_read_value(struct device *dev) +static inline s32 dev_pm_qos_raw_resume_latency(struct device *dev) { return PM_QOS_RESUME_LATENCY_NO_CONSTRAINT; } #endif +#define FREQ_QOS_MIN_DEFAULT_VALUE 0 +#define FREQ_QOS_MAX_DEFAULT_VALUE (-1) + +enum freq_qos_req_type { + FREQ_QOS_MIN = 1, + FREQ_QOS_MAX, +}; + +struct freq_constraints { + struct pm_qos_constraints min_freq; + struct blocking_notifier_head min_freq_notifiers; + struct pm_qos_constraints max_freq; + struct blocking_notifier_head max_freq_notifiers; +}; + +struct freq_qos_request { + enum freq_qos_req_type type; + struct plist_node pnode; + struct freq_constraints *qos; +}; + +static inline int freq_qos_request_active(struct freq_qos_request *req) +{ + return !IS_ERR_OR_NULL(req->qos); +} + +void freq_constraints_init(struct freq_constraints *qos); + +s32 freq_qos_read_value(struct freq_constraints *qos, + enum freq_qos_req_type type); + +int freq_qos_add_request(struct freq_constraints *qos, + struct freq_qos_request *req, + enum freq_qos_req_type type, s32 value); +int freq_qos_update_request(struct freq_qos_request *req, s32 new_value); +int freq_qos_remove_request(struct freq_qos_request *req); + +int freq_qos_add_notifier(struct freq_constraints *qos, + enum freq_qos_req_type type, + struct notifier_block *notifier); +int freq_qos_remove_notifier(struct freq_constraints *qos, + enum freq_qos_req_type type, + struct notifier_block *notifier); + #endif diff --git a/include/linux/pm_wakeup.h b/include/linux/pm_wakeup.h index ce57771fab9b..661efa029c96 100644 --- a/include/linux/pm_wakeup.h +++ b/include/linux/pm_wakeup.h @@ -21,6 +21,7 @@ struct wake_irq; * struct wakeup_source - Representation of wakeup sources * * @name: Name of the wakeup source + * @id: Wakeup source id * @entry: Wakeup source list entry * @lock: Wakeup source lock * @wakeirq: Optional device specific wakeirq @@ -35,11 +36,13 @@ struct wake_irq; * @relax_count: Number of times the wakeup source was deactivated. * @expire_count: Number of times the wakeup source's timeout has expired. * @wakeup_count: Number of times the wakeup source might abort suspend. + * @dev: Struct device for sysfs statistics about the wakeup source. * @active: Status of the wakeup source. - * @has_timeout: The wakeup source has been activated with a timeout. + * @autosleep_enabled: Autosleep is active, so update @prevent_sleep_time. */ struct wakeup_source { const char *name; + int id; struct list_head entry; spinlock_t lock; struct wake_irq *wakeirq; @@ -55,6 +58,7 @@ struct wakeup_source { unsigned long relax_count; unsigned long expire_count; unsigned long wakeup_count; + struct device *dev; bool active:1; bool autosleep_enabled:1; }; @@ -81,12 +85,12 @@ static inline void device_set_wakeup_path(struct device *dev) } /* drivers/base/power/wakeup.c */ -extern void wakeup_source_prepare(struct wakeup_source *ws, const char *name); extern struct wakeup_source *wakeup_source_create(const char *name); extern void wakeup_source_destroy(struct wakeup_source *ws); extern void wakeup_source_add(struct wakeup_source *ws); extern void wakeup_source_remove(struct wakeup_source *ws); -extern struct wakeup_source *wakeup_source_register(const char *name); +extern struct wakeup_source *wakeup_source_register(struct device *dev, + const char *name); extern void wakeup_source_unregister(struct wakeup_source *ws); extern int device_wakeup_enable(struct device *dev); extern int device_wakeup_disable(struct device *dev); @@ -112,9 +116,6 @@ static inline bool device_can_wakeup(struct device *dev) return dev->power.can_wakeup; } -static inline void wakeup_source_prepare(struct wakeup_source *ws, - const char *name) {} - static inline struct wakeup_source *wakeup_source_create(const char *name) { return NULL; @@ -126,7 +127,8 @@ static inline void wakeup_source_add(struct wakeup_source *ws) {} static inline void wakeup_source_remove(struct wakeup_source *ws) {} -static inline struct wakeup_source *wakeup_source_register(const char *name) +static inline struct wakeup_source *wakeup_source_register(struct device *dev, + const char *name) { return NULL; } @@ -181,13 +183,6 @@ static inline void pm_wakeup_dev_event(struct device *dev, unsigned int msec, #endif /* !CONFIG_PM_SLEEP */ -static inline void wakeup_source_init(struct wakeup_source *ws, - const char *name) -{ - wakeup_source_prepare(ws, name); - wakeup_source_add(ws); -} - static inline void __pm_wakeup_event(struct wakeup_source *ws, unsigned int msec) { return pm_wakeup_ws_event(ws, msec, false); diff --git a/include/linux/poison.h b/include/linux/poison.h index d6d980a681c7..df34330b4e34 100644 --- a/include/linux/poison.h +++ b/include/linux/poison.h @@ -21,7 +21,7 @@ * non-initialized list entries. */ #define LIST_POISON1 ((void *) 0x100 + POISON_POINTER_DELTA) -#define LIST_POISON2 ((void *) 0x200 + POISON_POINTER_DELTA) +#define LIST_POISON2 ((void *) 0x122 + POISON_POINTER_DELTA) /********** include/linux/timer.h **********/ /* diff --git a/include/linux/posix-timers.h b/include/linux/posix-timers.h index b20798fc5191..3d10c84a97a9 100644 --- a/include/linux/posix-timers.h +++ b/include/linux/posix-timers.h @@ -4,18 +4,11 @@ #include <linux/spinlock.h> #include <linux/list.h> -#include <linux/sched.h> -#include <linux/timex.h> #include <linux/alarmtimer.h> +#include <linux/timerqueue.h> -struct siginfo; - -struct cpu_timer_list { - struct list_head entry; - u64 expires; - struct task_struct *task; - int firing; -}; +struct kernel_siginfo; +struct task_struct; /* * Bit fields within a clockid: @@ -63,6 +56,115 @@ static inline int clockid_to_fd(const clockid_t clk) return ~(clk >> 3); } +#ifdef CONFIG_POSIX_TIMERS + +/** + * cpu_timer - Posix CPU timer representation for k_itimer + * @node: timerqueue node to queue in the task/sig + * @head: timerqueue head on which this timer is queued + * @task: Pointer to target task + * @elist: List head for the expiry list + * @firing: Timer is currently firing + */ +struct cpu_timer { + struct timerqueue_node node; + struct timerqueue_head *head; + struct task_struct *task; + struct list_head elist; + int firing; +}; + +static inline bool cpu_timer_enqueue(struct timerqueue_head *head, + struct cpu_timer *ctmr) +{ + ctmr->head = head; + return timerqueue_add(head, &ctmr->node); +} + +static inline void cpu_timer_dequeue(struct cpu_timer *ctmr) +{ + if (ctmr->head) { + timerqueue_del(ctmr->head, &ctmr->node); + ctmr->head = NULL; + } +} + +static inline u64 cpu_timer_getexpires(struct cpu_timer *ctmr) +{ + return ctmr->node.expires; +} + +static inline void cpu_timer_setexpires(struct cpu_timer *ctmr, u64 exp) +{ + ctmr->node.expires = exp; +} + +/** + * posix_cputimer_base - Container per posix CPU clock + * @nextevt: Earliest-expiration cache + * @tqhead: timerqueue head for cpu_timers + */ +struct posix_cputimer_base { + u64 nextevt; + struct timerqueue_head tqhead; +}; + +/** + * posix_cputimers - Container for posix CPU timer related data + * @bases: Base container for posix CPU clocks + * @timers_active: Timers are queued. + * @expiry_active: Timer expiry is active. Used for + * process wide timers to avoid multiple + * task trying to handle expiry concurrently + * + * Used in task_struct and signal_struct + */ +struct posix_cputimers { + struct posix_cputimer_base bases[CPUCLOCK_MAX]; + unsigned int timers_active; + unsigned int expiry_active; +}; + +static inline void posix_cputimers_init(struct posix_cputimers *pct) +{ + memset(pct, 0, sizeof(*pct)); + pct->bases[0].nextevt = U64_MAX; + pct->bases[1].nextevt = U64_MAX; + pct->bases[2].nextevt = U64_MAX; +} + +void posix_cputimers_group_init(struct posix_cputimers *pct, u64 cpu_limit); + +static inline void posix_cputimers_rt_watchdog(struct posix_cputimers *pct, + u64 runtime) +{ + pct->bases[CPUCLOCK_SCHED].nextevt = runtime; +} + +/* Init task static initializer */ +#define INIT_CPU_TIMERBASE(b) { \ + .nextevt = U64_MAX, \ +} + +#define INIT_CPU_TIMERBASES(b) { \ + INIT_CPU_TIMERBASE(b[0]), \ + INIT_CPU_TIMERBASE(b[1]), \ + INIT_CPU_TIMERBASE(b[2]), \ +} + +#define INIT_CPU_TIMERS(s) \ + .posix_cputimers = { \ + .bases = INIT_CPU_TIMERBASES(s.posix_cputimers.bases), \ + }, +#else +struct posix_cputimers { }; +struct cpu_timer { }; +#define INIT_CPU_TIMERS(s) +static inline void posix_cputimers_init(struct posix_cputimers *pct) { } +static inline void posix_cputimers_group_init(struct posix_cputimers *pct, + u64 cpu_limit) { } +#endif + #define REQUEUE_PENDING 1 /** @@ -85,7 +187,8 @@ static inline int clockid_to_fd(const clockid_t clk) * @it_process: The task to wakeup on clock_nanosleep (CPU timers) * @sigq: Pointer to preallocated sigqueue * @it: Union representing the various posix timer type - * internals. Also used for rcu freeing the timer. + * internals. + * @rcu: RCU head for freeing the timer. */ struct k_itimer { struct list_head list; @@ -110,15 +213,15 @@ struct k_itimer { struct { struct hrtimer timer; } real; - struct cpu_timer_list cpu; + struct cpu_timer cpu; struct { struct alarm alarmtimer; } alarm; - struct rcu_head rcu; } it; + struct rcu_head rcu; }; -void run_posix_cpu_timers(struct task_struct *task); +void run_posix_cpu_timers(void); void posix_cpu_timers_exit(struct task_struct *task); void posix_cpu_timers_exit_group(struct task_struct *task); void set_process_cpu_timer(struct task_struct *task, unsigned int clock_idx, diff --git a/include/linux/power_supply.h b/include/linux/power_supply.h index 6f348b3ee2e0..28413f737e7d 100644 --- a/include/linux/power_supply.h +++ b/include/linux/power_supply.h @@ -128,6 +128,8 @@ enum power_supply_property { POWER_SUPPLY_PROP_CHARGE_CONTROL_START_THRESHOLD, /* in percents! */ POWER_SUPPLY_PROP_CHARGE_CONTROL_END_THRESHOLD, /* in percents! */ POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT, + POWER_SUPPLY_PROP_INPUT_VOLTAGE_LIMIT, + POWER_SUPPLY_PROP_INPUT_POWER_LIMIT, POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN, POWER_SUPPLY_PROP_ENERGY_EMPTY_DESIGN, POWER_SUPPLY_PROP_ENERGY_FULL, @@ -480,4 +482,17 @@ static inline bool power_supply_is_watt_property(enum power_supply_property psp) return 0; } +#ifdef CONFIG_POWER_SUPPLY_HWMON +int power_supply_add_hwmon_sysfs(struct power_supply *psy); +void power_supply_remove_hwmon_sysfs(struct power_supply *psy); +#else +static inline int power_supply_add_hwmon_sysfs(struct power_supply *psy) +{ + return 0; +} + +static inline +void power_supply_remove_hwmon_sysfs(struct power_supply *psy) {} +#endif + #endif /* __LINUX_POWER_SUPPLY_H__ */ diff --git a/include/linux/preempt.h b/include/linux/preempt.h index dd92b1a93919..bbb68dba37cc 100644 --- a/include/linux/preempt.h +++ b/include/linux/preempt.h @@ -182,7 +182,7 @@ do { \ #define preemptible() (preempt_count() == 0 && !irqs_disabled()) -#ifdef CONFIG_PREEMPT +#ifdef CONFIG_PREEMPTION #define preempt_enable() \ do { \ barrier(); \ @@ -203,7 +203,7 @@ do { \ __preempt_schedule(); \ } while (0) -#else /* !CONFIG_PREEMPT */ +#else /* !CONFIG_PREEMPTION */ #define preempt_enable() \ do { \ barrier(); \ @@ -217,7 +217,7 @@ do { \ } while (0) #define preempt_check_resched() do { } while (0) -#endif /* CONFIG_PREEMPT */ +#endif /* CONFIG_PREEMPTION */ #define preempt_disable_notrace() \ do { \ diff --git a/include/linux/printk.h b/include/linux/printk.h index cefd374c47b1..c09d67edda3a 100644 --- a/include/linux/printk.h +++ b/include/linux/printk.h @@ -488,13 +488,6 @@ extern int hex_dump_to_buffer(const void *buf, size_t len, int rowsize, extern void print_hex_dump(const char *level, const char *prefix_str, int prefix_type, int rowsize, int groupsize, const void *buf, size_t len, bool ascii); -#if defined(CONFIG_DYNAMIC_DEBUG) -#define print_hex_dump_bytes(prefix_str, prefix_type, buf, len) \ - dynamic_hex_dump(prefix_str, prefix_type, 16, 1, buf, len, true) -#else -extern void print_hex_dump_bytes(const char *prefix_str, int prefix_type, - const void *buf, size_t len); -#endif /* defined(CONFIG_DYNAMIC_DEBUG) */ #else static inline void print_hex_dump(const char *level, const char *prefix_str, int prefix_type, int rowsize, int groupsize, @@ -526,4 +519,19 @@ static inline void print_hex_dump_debug(const char *prefix_str, int prefix_type, } #endif +/** + * print_hex_dump_bytes - shorthand form of print_hex_dump() with default params + * @prefix_str: string to prefix each line with; + * caller supplies trailing spaces for alignment if desired + * @prefix_type: controls whether prefix of an offset, address, or none + * is printed (%DUMP_PREFIX_OFFSET, %DUMP_PREFIX_ADDRESS, %DUMP_PREFIX_NONE) + * @buf: data blob to dump + * @len: number of bytes in the @buf + * + * Calls print_hex_dump(), with log level of KERN_DEBUG, + * rowsize of 16, groupsize of 1, and ASCII output included. + */ +#define print_hex_dump_bytes(prefix_str, prefix_type, buf, len) \ + print_hex_dump_debug(prefix_str, prefix_type, 16, 1, buf, len, true) + #endif diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h index 52a283ba0465..a705aa2d03f9 100644 --- a/include/linux/proc_fs.h +++ b/include/linux/proc_fs.h @@ -75,6 +75,15 @@ struct proc_dir_entry *proc_create_net_single_write(const char *name, umode_t mo void *data); extern struct pid *tgid_pidfd_to_pid(const struct file *file); +#ifdef CONFIG_PROC_PID_ARCH_STATUS +/* + * The architecture which selects CONFIG_PROC_PID_ARCH_STATUS must + * provide proc_pid_arch_status() definition. + */ +int proc_pid_arch_status(struct seq_file *m, struct pid_namespace *ns, + struct pid *pid, struct task_struct *task); +#endif /* CONFIG_PROC_PID_ARCH_STATUS */ + #else /* CONFIG_PROC_FS */ static inline void proc_root_init(void) diff --git a/include/linux/processor.h b/include/linux/processor.h index dbc952eec869..dc78bdc7079a 100644 --- a/include/linux/processor.h +++ b/include/linux/processor.h @@ -32,15 +32,6 @@ #define spin_cpu_relax() cpu_relax() #endif -/* - * spin_cpu_yield may be called to yield (undirected) to the hypervisor if - * necessary. This should be used if the wait is expected to take longer - * than context switch overhead, but we can't sleep or do a directed yield. - */ -#ifndef spin_cpu_yield -#define spin_cpu_yield() cpu_relax_yield() -#endif - #ifndef spin_end #define spin_end() #endif diff --git a/include/linux/property.h b/include/linux/property.h index e9caa290cda5..9b3d4ca3a73a 100644 --- a/include/linux/property.h +++ b/include/linux/property.h @@ -76,6 +76,10 @@ int fwnode_property_get_reference_args(const struct fwnode_handle *fwnode, unsigned int nargs, unsigned int index, struct fwnode_reference_args *args); +struct fwnode_handle *fwnode_find_reference(const struct fwnode_handle *fwnode, + const char *name, + unsigned int index); + struct fwnode_handle *fwnode_get_parent(const struct fwnode_handle *fwnode); struct fwnode_handle *fwnode_get_next_parent( struct fwnode_handle *fwnode); @@ -141,6 +145,26 @@ static inline int device_property_read_u64(struct device *dev, return device_property_read_u64_array(dev, propname, val, 1); } +static inline int device_property_count_u8(struct device *dev, const char *propname) +{ + return device_property_read_u8_array(dev, propname, NULL, 0); +} + +static inline int device_property_count_u16(struct device *dev, const char *propname) +{ + return device_property_read_u16_array(dev, propname, NULL, 0); +} + +static inline int device_property_count_u32(struct device *dev, const char *propname) +{ + return device_property_read_u32_array(dev, propname, NULL, 0); +} + +static inline int device_property_count_u64(struct device *dev, const char *propname) +{ + return device_property_read_u64_array(dev, propname, NULL, 0); +} + static inline bool fwnode_property_read_bool(const struct fwnode_handle *fwnode, const char *propname) { @@ -171,6 +195,30 @@ static inline int fwnode_property_read_u64(const struct fwnode_handle *fwnode, return fwnode_property_read_u64_array(fwnode, propname, val, 1); } +static inline int fwnode_property_count_u8(const struct fwnode_handle *fwnode, + const char *propname) +{ + return fwnode_property_read_u8_array(fwnode, propname, NULL, 0); +} + +static inline int fwnode_property_count_u16(const struct fwnode_handle *fwnode, + const char *propname) +{ + return fwnode_property_read_u16_array(fwnode, propname, NULL, 0); +} + +static inline int fwnode_property_count_u32(const struct fwnode_handle *fwnode, + const char *propname) +{ + return fwnode_property_read_u32_array(fwnode, propname, NULL, 0); +} + +static inline int fwnode_property_count_u64(const struct fwnode_handle *fwnode, + const char *propname) +{ + return fwnode_property_read_u64_array(fwnode, propname, NULL, 0); +} + /** * struct property_entry - "Built-in" device property representation. * @name: Name of the property. @@ -329,7 +377,58 @@ int fwnode_graph_parse_endpoint(const struct fwnode_handle *fwnode, /* -------------------------------------------------------------------------- */ /* Software fwnode support - when HW description is incomplete or missing */ +struct software_node; + +/** + * struct software_node_ref_args - Reference with additional arguments + * @node: Reference to a software node + * @nargs: Number of elements in @args array + * @args: Integer arguments + */ +struct software_node_ref_args { + const struct software_node *node; + unsigned int nargs; + u64 args[NR_FWNODE_REFERENCE_ARGS]; +}; + +/** + * struct software_node_reference - Named software node reference property + * @name: Name of the property + * @nrefs: Number of elements in @refs array + * @refs: Array of references with optional arguments + */ +struct software_node_reference { + const char *name; + unsigned int nrefs; + const struct software_node_ref_args *refs; +}; + +/** + * struct software_node - Software node description + * @name: Name of the software node + * @parent: Parent of the software node + * @properties: Array of device properties + * @references: Array of software node reference properties + */ +struct software_node { + const char *name; + const struct software_node *parent; + const struct property_entry *properties; + const struct software_node_reference *references; +}; + bool is_software_node(const struct fwnode_handle *fwnode); +const struct software_node *to_software_node(struct fwnode_handle *fwnode); +struct fwnode_handle *software_node_fwnode(const struct software_node *node); + +const struct software_node * +software_node_find_by_name(const struct software_node *parent, + const char *name); + +int software_node_register_nodes(const struct software_node *nodes); +void software_node_unregister_nodes(const struct software_node *nodes); + +int software_node_register(const struct software_node *node); int software_node_notify(struct device *dev, unsigned long action); diff --git a/include/linux/psci.h b/include/linux/psci.h index a8a15613c157..e2bacc6fd2f2 100644 --- a/include/linux/psci.h +++ b/include/linux/psci.h @@ -15,8 +15,8 @@ bool psci_tos_resident_on(int cpu); -int psci_cpu_init_idle(unsigned int cpu); -int psci_cpu_suspend_enter(unsigned long index); +int psci_cpu_suspend_enter(u32 state); +bool psci_power_state_is_valid(u32 state); enum psci_conduit { PSCI_CONDUIT_NONE, diff --git a/include/linux/pseudo_fs.h b/include/linux/pseudo_fs.h new file mode 100644 index 000000000000..eceda1d1407a --- /dev/null +++ b/include/linux/pseudo_fs.h @@ -0,0 +1,16 @@ +#ifndef __LINUX_PSEUDO_FS__ +#define __LINUX_PSEUDO_FS__ + +#include <linux/fs_context.h> + +struct pseudo_fs_context { + const struct super_operations *ops; + const struct xattr_handler **xattr; + const struct dentry_operations *dops; + unsigned long magic; +}; + +struct pseudo_fs_context *init_pseudo(struct fs_context *fc, + unsigned long magic); + +#endif diff --git a/include/linux/ptp_clock_kernel.h b/include/linux/ptp_clock_kernel.h index 28eb9c792522..93cc4f1d444a 100644 --- a/include/linux/ptp_clock_kernel.h +++ b/include/linux/ptp_clock_kernel.h @@ -213,6 +213,14 @@ extern void ptp_clock_event(struct ptp_clock *ptp, extern int ptp_clock_index(struct ptp_clock *ptp); /** + * scaled_ppm_to_ppb() - convert scaled ppm to ppb + * + * @ppm: Parts per million, but with a 16 bit binary fractional field + */ + +extern s32 scaled_ppm_to_ppb(long ppm); + +/** * ptp_find_pin() - obtain the pin index of a given auxiliary function * * @ptp: The clock obtained from ptp_clock_register(). diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h index d5084ebd9f03..2a9df80ea887 100644 --- a/include/linux/ptrace.h +++ b/include/linux/ptrace.h @@ -355,7 +355,7 @@ static inline void user_single_step_report(struct pt_regs *regs) info.si_code = SI_USER; info.si_pid = 0; info.si_uid = 0; - force_sig_info(info.si_signo, &info, current); + force_sig_info(&info); } #endif diff --git a/include/linux/pwm.h b/include/linux/pwm.h index eaa5c6e3fc9f..b2c9c460947d 100644 --- a/include/linux/pwm.h +++ b/include/linux/pwm.h @@ -262,7 +262,7 @@ struct pwm_ops { int (*capture)(struct pwm_chip *chip, struct pwm_device *pwm, struct pwm_capture *result, unsigned long timeout); int (*apply)(struct pwm_chip *chip, struct pwm_device *pwm, - struct pwm_state *state); + const struct pwm_state *state); void (*get_state)(struct pwm_chip *chip, struct pwm_device *pwm, struct pwm_state *state); struct module *owner; @@ -316,7 +316,7 @@ struct pwm_capture { /* PWM user APIs */ struct pwm_device *pwm_request(int pwm_id, const char *label); void pwm_free(struct pwm_device *pwm); -int pwm_apply_state(struct pwm_device *pwm, struct pwm_state *state); +int pwm_apply_state(struct pwm_device *pwm, const struct pwm_state *state); int pwm_adjust_config(struct pwm_device *pwm); /** @@ -405,12 +405,16 @@ struct pwm_device *of_pwm_xlate_with_flags(struct pwm_chip *pc, const struct of_phandle_args *args); struct pwm_device *pwm_get(struct device *dev, const char *con_id); -struct pwm_device *of_pwm_get(struct device_node *np, const char *con_id); +struct pwm_device *of_pwm_get(struct device *dev, struct device_node *np, + const char *con_id); void pwm_put(struct pwm_device *pwm); struct pwm_device *devm_pwm_get(struct device *dev, const char *con_id); struct pwm_device *devm_of_pwm_get(struct device *dev, struct device_node *np, const char *con_id); +struct pwm_device *devm_fwnode_pwm_get(struct device *dev, + struct fwnode_handle *fwnode, + const char *con_id); void devm_pwm_put(struct device *dev, struct pwm_device *pwm); #else static inline struct pwm_device *pwm_request(int pwm_id, const char *label) @@ -493,7 +497,8 @@ static inline struct pwm_device *pwm_get(struct device *dev, return ERR_PTR(-ENODEV); } -static inline struct pwm_device *of_pwm_get(struct device_node *np, +static inline struct pwm_device *of_pwm_get(struct device *dev, + struct device_node *np, const char *con_id) { return ERR_PTR(-ENODEV); @@ -516,6 +521,13 @@ static inline struct pwm_device *devm_of_pwm_get(struct device *dev, return ERR_PTR(-ENODEV); } +static inline struct pwm_device * +devm_fwnode_pwm_get(struct device *dev, struct fwnode_handle *fwnode, + const char *con_id) +{ + return ERR_PTR(-ENODEV); +} + static inline void devm_pwm_put(struct device *dev, struct pwm_device *pwm) { } diff --git a/include/linux/qcom_scm.h b/include/linux/qcom_scm.h index 3f12cc77fb58..2d5eff506e13 100644 --- a/include/linux/qcom_scm.h +++ b/include/linux/qcom_scm.h @@ -49,8 +49,9 @@ extern int qcom_scm_pas_mem_setup(u32 peripheral, phys_addr_t addr, extern int qcom_scm_pas_auth_and_reset(u32 peripheral); extern int qcom_scm_pas_shutdown(u32 peripheral); extern int qcom_scm_assign_mem(phys_addr_t mem_addr, size_t mem_sz, - unsigned int *src, struct qcom_scm_vmperm *newvm, - int dest_cnt); + unsigned int *src, + const struct qcom_scm_vmperm *newvm, + unsigned int dest_cnt); extern void qcom_scm_cpu_power_down(u32 flags); extern u32 qcom_scm_get_version(void); extern int qcom_scm_set_remote_state(u32 state, u32 id); @@ -87,8 +88,8 @@ qcom_scm_pas_auth_and_reset(u32 peripheral) { return -ENODEV; } static inline int qcom_scm_pas_shutdown(u32 peripheral) { return -ENODEV; } static inline int qcom_scm_assign_mem(phys_addr_t mem_addr, size_t mem_sz, unsigned int *src, - struct qcom_scm_vmperm *newvm, - int dest_cnt) { return -ENODEV; } + const struct qcom_scm_vmperm *newvm, + unsigned int dest_cnt) { return -ENODEV; } static inline void qcom_scm_cpu_power_down(u32 flags) {} static inline u32 qcom_scm_get_version(void) { return 0; } static inline u32 diff --git a/include/linux/qed/qed_if.h b/include/linux/qed/qed_if.h index 48841e5dab90..b5db1ee96d78 100644 --- a/include/linux/qed/qed_if.h +++ b/include/linux/qed/qed_if.h @@ -689,7 +689,7 @@ enum qed_link_mode_bits { QED_LM_40000baseLR4_Full_BIT = BIT(9), QED_LM_50000baseKR2_Full_BIT = BIT(10), QED_LM_100000baseKR4_Full_BIT = BIT(11), - QED_LM_2500baseX_Full_BIT = BIT(12), + QED_LM_TP_BIT = BIT(12), QED_LM_Backplane_BIT = BIT(13), QED_LM_1000baseKX_Full_BIT = BIT(14), QED_LM_10000baseKX4_Full_BIT = BIT(15), @@ -804,6 +804,7 @@ enum qed_nvm_flash_cmd { QED_NVM_FLASH_CMD_FILE_DATA = 0x2, QED_NVM_FLASH_CMD_FILE_START = 0x3, QED_NVM_FLASH_CMD_NVM_CHANGE = 0x4, + QED_NVM_FLASH_CMD_NVM_CFG_ID = 0x5, QED_NVM_FLASH_CMD_NVM_MAX, }; @@ -907,7 +908,8 @@ struct qed_common_ops { u32 (*sb_release)(struct qed_dev *cdev, struct qed_sb_info *sb_info, - u16 sb_id); + u16 sb_id, + enum qed_sb_type type); void (*simd_handler_config)(struct qed_dev *cdev, void *token, @@ -1123,6 +1125,41 @@ struct qed_common_ops { */ int (*read_module_eeprom)(struct qed_dev *cdev, char *buf, u8 dev_addr, u32 offset, u32 len); + +/** + * @brief get_affin_hwfn_idx + * + * @param cdev + */ + u8 (*get_affin_hwfn_idx)(struct qed_dev *cdev); + +/** + * @brief read_nvm_cfg - Read NVM config attribute value. + * @param cdev + * @param buf - buffer + * @param cmd - NVM CFG command id + * @param entity_id - Entity id + * + */ + int (*read_nvm_cfg)(struct qed_dev *cdev, u8 **buf, u32 cmd, + u32 entity_id); +/** + * @brief read_nvm_cfg - Read NVM config attribute value. + * @param cdev + * @param cmd - NVM CFG command id + * + * @return config id length, 0 on error. + */ + int (*read_nvm_cfg_len)(struct qed_dev *cdev, u32 cmd); + +/** + * @brief set_grc_config - Configure value for grc config id. + * @param cdev + * @param cfg_id - grc config id + * @param val - grc config value + * + */ + int (*set_grc_config)(struct qed_dev *cdev, u32 cfg_id, u32 val); }; #define MASK_FIELD(_name, _value) \ diff --git a/include/linux/qed/qed_rdma_if.h b/include/linux/qed/qed_rdma_if.h index d15f8e4815e3..74efca15fde7 100644 --- a/include/linux/qed/qed_rdma_if.h +++ b/include/linux/qed/qed_rdma_if.h @@ -225,7 +225,7 @@ struct qed_rdma_start_in_params { struct qed_rdma_add_user_out_params { u16 dpi; - u64 dpi_addr; + void __iomem *dpi_addr; u64 dpi_phys_addr; u32 dpi_size; u16 wid_count; @@ -670,6 +670,8 @@ struct qed_rdma_ops { int (*ll2_set_mac_filter)(struct qed_dev *cdev, u8 *old_mac_address, u8 *new_mac_address); + int (*iwarp_set_engine_affin)(struct qed_dev *cdev, bool b_reset); + int (*iwarp_connect)(void *rdma_cxt, struct qed_iwarp_connect_in *iparams, struct qed_iwarp_connect_out *oparams); diff --git a/include/linux/quicklist.h b/include/linux/quicklist.h deleted file mode 100644 index 034982c98c8b..000000000000 --- a/include/linux/quicklist.h +++ /dev/null @@ -1,94 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef LINUX_QUICKLIST_H -#define LINUX_QUICKLIST_H -/* - * Fast allocations and disposal of pages. Pages must be in the condition - * as needed after allocation when they are freed. Per cpu lists of pages - * are kept that only contain node local pages. - * - * (C) 2007, SGI. Christoph Lameter <cl@linux.com> - */ -#include <linux/kernel.h> -#include <linux/gfp.h> -#include <linux/percpu.h> - -#ifdef CONFIG_QUICKLIST - -struct quicklist { - void *page; - int nr_pages; -}; - -DECLARE_PER_CPU(struct quicklist, quicklist)[CONFIG_NR_QUICK]; - -/* - * The two key functions quicklist_alloc and quicklist_free are inline so - * that they may be custom compiled for the platform. - * Specifying a NULL ctor can remove constructor support. Specifying - * a constant quicklist allows the determination of the exact address - * in the per cpu area. - * - * The fast patch in quicklist_alloc touched only a per cpu cacheline and - * the first cacheline of the page itself. There is minmal overhead involved. - */ -static inline void *quicklist_alloc(int nr, gfp_t flags, void (*ctor)(void *)) -{ - struct quicklist *q; - void **p = NULL; - - q =&get_cpu_var(quicklist)[nr]; - p = q->page; - if (likely(p)) { - q->page = p[0]; - p[0] = NULL; - q->nr_pages--; - } - put_cpu_var(quicklist); - if (likely(p)) - return p; - - p = (void *)__get_free_page(flags | __GFP_ZERO); - if (ctor && p) - ctor(p); - return p; -} - -static inline void __quicklist_free(int nr, void (*dtor)(void *), void *p, - struct page *page) -{ - struct quicklist *q; - - q = &get_cpu_var(quicklist)[nr]; - *(void **)p = q->page; - q->page = p; - q->nr_pages++; - put_cpu_var(quicklist); -} - -static inline void quicklist_free(int nr, void (*dtor)(void *), void *pp) -{ - __quicklist_free(nr, dtor, pp, virt_to_page(pp)); -} - -static inline void quicklist_free_page(int nr, void (*dtor)(void *), - struct page *page) -{ - __quicklist_free(nr, dtor, page_address(page), page); -} - -void quicklist_trim(int nr, void (*dtor)(void *), - unsigned long min_pages, unsigned long max_free); - -unsigned long quicklist_total_size(void); - -#else - -static inline unsigned long quicklist_total_size(void) -{ - return 0; -} - -#endif - -#endif /* LINUX_QUICKLIST_H */ - diff --git a/include/linux/quotaops.h b/include/linux/quotaops.h index dc905a4ff8d7..185d94829701 100644 --- a/include/linux/quotaops.h +++ b/include/linux/quotaops.h @@ -22,7 +22,7 @@ static inline struct quota_info *sb_dqopt(struct super_block *sb) /* i_mutex must being held */ static inline bool is_quota_modification(struct inode *inode, struct iattr *ia) { - return (ia->ia_valid & ATTR_SIZE && ia->ia_size != inode->i_size) || + return (ia->ia_valid & ATTR_SIZE) || (ia->ia_valid & ATTR_UID && !uid_eq(ia->ia_uid, inode->i_uid)) || (ia->ia_valid & ATTR_GID && !gid_eq(ia->ia_gid, inode->i_gid)); } diff --git a/include/linux/ramfs.h b/include/linux/ramfs.h index 5ef7d54caac2..b806a0ff6554 100644 --- a/include/linux/ramfs.h +++ b/include/linux/ramfs.h @@ -4,8 +4,7 @@ struct inode *ramfs_get_inode(struct super_block *sb, const struct inode *dir, umode_t mode, dev_t dev); -extern struct dentry *ramfs_mount(struct file_system_type *fs_type, - int flags, const char *dev_name, void *data); +extern int ramfs_init_fs_context(struct fs_context *fc); #ifdef CONFIG_MMU static inline int @@ -17,10 +16,8 @@ ramfs_nommu_expand_for_mapping(struct inode *inode, size_t newsize) extern int ramfs_nommu_expand_for_mapping(struct inode *inode, size_t newsize); #endif +extern const struct fs_parameter_description ramfs_fs_parameters; extern const struct file_operations ramfs_file_operations; extern const struct vm_operations_struct generic_file_vm_ops; -extern int __init init_ramfs_fs(void); - -int ramfs_fill_super(struct super_block *sb, void *data, int silent); #endif diff --git a/include/linux/random.h b/include/linux/random.h index 1f7dced2bba6..f189c927fdea 100644 --- a/include/linux/random.h +++ b/include/linux/random.h @@ -19,6 +19,7 @@ struct random_ready_callback { }; extern void add_device_randomness(const void *, unsigned int); +extern void add_bootloader_randomness(const void *, unsigned int); #if defined(LATENT_ENTROPY_PLUGIN) && !defined(__CHECKER__) static inline void add_latent_entropy(void) diff --git a/include/linux/rbtree.h b/include/linux/rbtree.h index e6337fce08f2..1fd61a9af45c 100644 --- a/include/linux/rbtree.h +++ b/include/linux/rbtree.h @@ -32,25 +32,9 @@ struct rb_root { struct rb_node *rb_node; }; -/* - * Leftmost-cached rbtrees. - * - * We do not cache the rightmost node based on footprint - * size vs number of potential users that could benefit - * from O(1) rb_last(). Just not worth it, users that want - * this feature can always implement the logic explicitly. - * Furthermore, users that want to cache both pointers may - * find it a bit asymmetric, but that's ok. - */ -struct rb_root_cached { - struct rb_root rb_root; - struct rb_node *rb_leftmost; -}; - #define rb_parent(r) ((struct rb_node *)((r)->__rb_parent_color & ~3)) #define RB_ROOT (struct rb_root) { NULL, } -#define RB_ROOT_CACHED (struct rb_root_cached) { {NULL, }, NULL } #define rb_entry(ptr, type, member) container_of(ptr, type, member) #define RB_EMPTY_ROOT(root) (READ_ONCE((root)->rb_node) == NULL) @@ -72,12 +56,6 @@ extern struct rb_node *rb_prev(const struct rb_node *); extern struct rb_node *rb_first(const struct rb_root *); extern struct rb_node *rb_last(const struct rb_root *); -extern void rb_insert_color_cached(struct rb_node *, - struct rb_root_cached *, bool); -extern void rb_erase_cached(struct rb_node *node, struct rb_root_cached *); -/* Same as rb_first(), but O(1) */ -#define rb_first_cached(root) (root)->rb_leftmost - /* Postorder iteration - always visit the parent after its children */ extern struct rb_node *rb_first_postorder(const struct rb_root *); extern struct rb_node *rb_next_postorder(const struct rb_node *); @@ -87,8 +65,6 @@ extern void rb_replace_node(struct rb_node *victim, struct rb_node *new, struct rb_root *root); extern void rb_replace_node_rcu(struct rb_node *victim, struct rb_node *new, struct rb_root *root); -extern void rb_replace_node_cached(struct rb_node *victim, struct rb_node *new, - struct rb_root_cached *root); static inline void rb_link_node(struct rb_node *node, struct rb_node *parent, struct rb_node **rb_link) @@ -136,4 +112,50 @@ static inline void rb_link_node_rcu(struct rb_node *node, struct rb_node *parent typeof(*pos), field); 1; }); \ pos = n) +/* + * Leftmost-cached rbtrees. + * + * We do not cache the rightmost node based on footprint + * size vs number of potential users that could benefit + * from O(1) rb_last(). Just not worth it, users that want + * this feature can always implement the logic explicitly. + * Furthermore, users that want to cache both pointers may + * find it a bit asymmetric, but that's ok. + */ +struct rb_root_cached { + struct rb_root rb_root; + struct rb_node *rb_leftmost; +}; + +#define RB_ROOT_CACHED (struct rb_root_cached) { {NULL, }, NULL } + +/* Same as rb_first(), but O(1) */ +#define rb_first_cached(root) (root)->rb_leftmost + +static inline void rb_insert_color_cached(struct rb_node *node, + struct rb_root_cached *root, + bool leftmost) +{ + if (leftmost) + root->rb_leftmost = node; + rb_insert_color(node, &root->rb_root); +} + +static inline void rb_erase_cached(struct rb_node *node, + struct rb_root_cached *root) +{ + if (root->rb_leftmost == node) + root->rb_leftmost = rb_next(node); + rb_erase(node, &root->rb_root); +} + +static inline void rb_replace_node_cached(struct rb_node *victim, + struct rb_node *new, + struct rb_root_cached *root) +{ + if (root->rb_leftmost == victim) + root->rb_leftmost = new; + rb_replace_node(victim, new, &root->rb_root); +} + #endif /* _LINUX_RBTREE_H */ diff --git a/include/linux/rbtree_augmented.h b/include/linux/rbtree_augmented.h index 0f902ccb48b0..fdd421b8d9ae 100644 --- a/include/linux/rbtree_augmented.h +++ b/include/linux/rbtree_augmented.h @@ -30,10 +30,9 @@ struct rb_augment_callbacks { void (*rotate)(struct rb_node *old, struct rb_node *new); }; -extern void __rb_insert_augmented(struct rb_node *node, - struct rb_root *root, - bool newleft, struct rb_node **leftmost, +extern void __rb_insert_augmented(struct rb_node *node, struct rb_root *root, void (*augment_rotate)(struct rb_node *old, struct rb_node *new)); + /* * Fixup the rbtree and update the augmented information when rebalancing. * @@ -48,7 +47,7 @@ static inline void rb_insert_augmented(struct rb_node *node, struct rb_root *root, const struct rb_augment_callbacks *augment) { - __rb_insert_augmented(node, root, false, NULL, augment->rotate); + __rb_insert_augmented(node, root, augment->rotate); } static inline void @@ -56,45 +55,92 @@ rb_insert_augmented_cached(struct rb_node *node, struct rb_root_cached *root, bool newleft, const struct rb_augment_callbacks *augment) { - __rb_insert_augmented(node, &root->rb_root, - newleft, &root->rb_leftmost, augment->rotate); + if (newleft) + root->rb_leftmost = node; + rb_insert_augmented(node, &root->rb_root, augment); } -#define RB_DECLARE_CALLBACKS(rbstatic, rbname, rbstruct, rbfield, \ - rbtype, rbaugmented, rbcompute) \ +/* + * Template for declaring augmented rbtree callbacks (generic case) + * + * RBSTATIC: 'static' or empty + * RBNAME: name of the rb_augment_callbacks structure + * RBSTRUCT: struct type of the tree nodes + * RBFIELD: name of struct rb_node field within RBSTRUCT + * RBAUGMENTED: name of field within RBSTRUCT holding data for subtree + * RBCOMPUTE: name of function that recomputes the RBAUGMENTED data + */ + +#define RB_DECLARE_CALLBACKS(RBSTATIC, RBNAME, \ + RBSTRUCT, RBFIELD, RBAUGMENTED, RBCOMPUTE) \ static inline void \ -rbname ## _propagate(struct rb_node *rb, struct rb_node *stop) \ +RBNAME ## _propagate(struct rb_node *rb, struct rb_node *stop) \ { \ while (rb != stop) { \ - rbstruct *node = rb_entry(rb, rbstruct, rbfield); \ - rbtype augmented = rbcompute(node); \ - if (node->rbaugmented == augmented) \ + RBSTRUCT *node = rb_entry(rb, RBSTRUCT, RBFIELD); \ + if (RBCOMPUTE(node, true)) \ break; \ - node->rbaugmented = augmented; \ - rb = rb_parent(&node->rbfield); \ + rb = rb_parent(&node->RBFIELD); \ } \ } \ static inline void \ -rbname ## _copy(struct rb_node *rb_old, struct rb_node *rb_new) \ +RBNAME ## _copy(struct rb_node *rb_old, struct rb_node *rb_new) \ { \ - rbstruct *old = rb_entry(rb_old, rbstruct, rbfield); \ - rbstruct *new = rb_entry(rb_new, rbstruct, rbfield); \ - new->rbaugmented = old->rbaugmented; \ + RBSTRUCT *old = rb_entry(rb_old, RBSTRUCT, RBFIELD); \ + RBSTRUCT *new = rb_entry(rb_new, RBSTRUCT, RBFIELD); \ + new->RBAUGMENTED = old->RBAUGMENTED; \ } \ static void \ -rbname ## _rotate(struct rb_node *rb_old, struct rb_node *rb_new) \ +RBNAME ## _rotate(struct rb_node *rb_old, struct rb_node *rb_new) \ { \ - rbstruct *old = rb_entry(rb_old, rbstruct, rbfield); \ - rbstruct *new = rb_entry(rb_new, rbstruct, rbfield); \ - new->rbaugmented = old->rbaugmented; \ - old->rbaugmented = rbcompute(old); \ + RBSTRUCT *old = rb_entry(rb_old, RBSTRUCT, RBFIELD); \ + RBSTRUCT *new = rb_entry(rb_new, RBSTRUCT, RBFIELD); \ + new->RBAUGMENTED = old->RBAUGMENTED; \ + RBCOMPUTE(old, false); \ } \ -rbstatic const struct rb_augment_callbacks rbname = { \ - .propagate = rbname ## _propagate, \ - .copy = rbname ## _copy, \ - .rotate = rbname ## _rotate \ +RBSTATIC const struct rb_augment_callbacks RBNAME = { \ + .propagate = RBNAME ## _propagate, \ + .copy = RBNAME ## _copy, \ + .rotate = RBNAME ## _rotate \ }; +/* + * Template for declaring augmented rbtree callbacks, + * computing RBAUGMENTED scalar as max(RBCOMPUTE(node)) for all subtree nodes. + * + * RBSTATIC: 'static' or empty + * RBNAME: name of the rb_augment_callbacks structure + * RBSTRUCT: struct type of the tree nodes + * RBFIELD: name of struct rb_node field within RBSTRUCT + * RBTYPE: type of the RBAUGMENTED field + * RBAUGMENTED: name of RBTYPE field within RBSTRUCT holding data for subtree + * RBCOMPUTE: name of function that returns the per-node RBTYPE scalar + */ + +#define RB_DECLARE_CALLBACKS_MAX(RBSTATIC, RBNAME, RBSTRUCT, RBFIELD, \ + RBTYPE, RBAUGMENTED, RBCOMPUTE) \ +static inline bool RBNAME ## _compute_max(RBSTRUCT *node, bool exit) \ +{ \ + RBSTRUCT *child; \ + RBTYPE max = RBCOMPUTE(node); \ + if (node->RBFIELD.rb_left) { \ + child = rb_entry(node->RBFIELD.rb_left, RBSTRUCT, RBFIELD); \ + if (child->RBAUGMENTED > max) \ + max = child->RBAUGMENTED; \ + } \ + if (node->RBFIELD.rb_right) { \ + child = rb_entry(node->RBFIELD.rb_right, RBSTRUCT, RBFIELD); \ + if (child->RBAUGMENTED > max) \ + max = child->RBAUGMENTED; \ + } \ + if (exit && node->RBAUGMENTED == max) \ + return true; \ + node->RBAUGMENTED = max; \ + return false; \ +} \ +RB_DECLARE_CALLBACKS(RBSTATIC, RBNAME, \ + RBSTRUCT, RBFIELD, RBAUGMENTED, RBNAME ## _compute_max) + #define RB_RED 0 #define RB_BLACK 1 @@ -150,7 +196,6 @@ extern void __rb_erase_color(struct rb_node *parent, struct rb_root *root, static __always_inline struct rb_node * __rb_erase_augmented(struct rb_node *node, struct rb_root *root, - struct rb_node **leftmost, const struct rb_augment_callbacks *augment) { struct rb_node *child = node->rb_right; @@ -158,9 +203,6 @@ __rb_erase_augmented(struct rb_node *node, struct rb_root *root, struct rb_node *parent, *rebalance; unsigned long pc; - if (leftmost && node == *leftmost) - *leftmost = rb_next(node); - if (!tmp) { /* * Case 1: node to erase has no more than 1 child (easy!) @@ -260,8 +302,7 @@ static __always_inline void rb_erase_augmented(struct rb_node *node, struct rb_root *root, const struct rb_augment_callbacks *augment) { - struct rb_node *rebalance = __rb_erase_augmented(node, root, - NULL, augment); + struct rb_node *rebalance = __rb_erase_augmented(node, root, augment); if (rebalance) __rb_erase_color(rebalance, root, augment->rotate); } @@ -270,11 +311,9 @@ static __always_inline void rb_erase_augmented_cached(struct rb_node *node, struct rb_root_cached *root, const struct rb_augment_callbacks *augment) { - struct rb_node *rebalance = __rb_erase_augmented(node, &root->rb_root, - &root->rb_leftmost, - augment); - if (rebalance) - __rb_erase_color(rebalance, &root->rb_root, augment->rotate); + if (root->rb_leftmost == node) + root->rb_leftmost = rb_next(node); + rb_erase_augmented(node, &root->rb_root, augment); } #endif /* _LINUX_RBTREE_AUGMENTED_H */ diff --git a/include/linux/rcu_segcblist.h b/include/linux/rcu_segcblist.h index 87404cb015f1..646759042333 100644 --- a/include/linux/rcu_segcblist.h +++ b/include/linux/rcu_segcblist.h @@ -14,6 +14,9 @@ #ifndef __INCLUDE_LINUX_RCU_SEGCBLIST_H #define __INCLUDE_LINUX_RCU_SEGCBLIST_H +#include <linux/types.h> +#include <linux/atomic.h> + /* Simple unsegmented callback lists. */ struct rcu_cblist { struct rcu_head *head; @@ -65,8 +68,14 @@ struct rcu_segcblist { struct rcu_head *head; struct rcu_head **tails[RCU_CBLIST_NSEGS]; unsigned long gp_seq[RCU_CBLIST_NSEGS]; +#ifdef CONFIG_RCU_NOCB_CPU + atomic_long_t len; +#else long len; +#endif long len_lazy; + u8 enabled; + u8 offloaded; }; #define RCU_SEGCBLIST_INITIALIZER(n) \ diff --git a/include/linux/rcu_sync.h b/include/linux/rcu_sync.h index 6fc53a1345b3..0027d4c8087c 100644 --- a/include/linux/rcu_sync.h +++ b/include/linux/rcu_sync.h @@ -13,62 +13,42 @@ #include <linux/wait.h> #include <linux/rcupdate.h> -enum rcu_sync_type { RCU_SYNC, RCU_SCHED_SYNC, RCU_BH_SYNC }; - /* Structure to mediate between updaters and fastpath-using readers. */ struct rcu_sync { int gp_state; int gp_count; wait_queue_head_t gp_wait; - int cb_state; struct rcu_head cb_head; - - enum rcu_sync_type gp_type; }; -extern void rcu_sync_lockdep_assert(struct rcu_sync *); - /** * rcu_sync_is_idle() - Are readers permitted to use their fastpaths? * @rsp: Pointer to rcu_sync structure to use for synchronization * - * Returns true if readers are permitted to use their fastpaths. - * Must be invoked within an RCU read-side critical section whose - * flavor matches that of the rcu_sync struture. + * Returns true if readers are permitted to use their fastpaths. Must be + * invoked within some flavor of RCU read-side critical section. */ static inline bool rcu_sync_is_idle(struct rcu_sync *rsp) { -#ifdef CONFIG_PROVE_RCU - rcu_sync_lockdep_assert(rsp); -#endif - return !rsp->gp_state; /* GP_IDLE */ + RCU_LOCKDEP_WARN(!rcu_read_lock_any_held(), + "suspicious rcu_sync_is_idle() usage"); + return !READ_ONCE(rsp->gp_state); /* GP_IDLE */ } -extern void rcu_sync_init(struct rcu_sync *, enum rcu_sync_type); +extern void rcu_sync_init(struct rcu_sync *); extern void rcu_sync_enter_start(struct rcu_sync *); extern void rcu_sync_enter(struct rcu_sync *); extern void rcu_sync_exit(struct rcu_sync *); extern void rcu_sync_dtor(struct rcu_sync *); -#define __RCU_SYNC_INITIALIZER(name, type) { \ +#define __RCU_SYNC_INITIALIZER(name) { \ .gp_state = 0, \ .gp_count = 0, \ .gp_wait = __WAIT_QUEUE_HEAD_INITIALIZER(name.gp_wait), \ - .cb_state = 0, \ - .gp_type = type, \ } -#define __DEFINE_RCU_SYNC(name, type) \ - struct rcu_sync_struct name = __RCU_SYNC_INITIALIZER(name, type) - -#define DEFINE_RCU_SYNC(name) \ - __DEFINE_RCU_SYNC(name, RCU_SYNC) - -#define DEFINE_RCU_SCHED_SYNC(name) \ - __DEFINE_RCU_SYNC(name, RCU_SCHED_SYNC) - -#define DEFINE_RCU_BH_SYNC(name) \ - __DEFINE_RCU_SYNC(name, RCU_BH_SYNC) +#define DEFINE_RCU_SYNC(name) \ + struct rcu_sync name = __RCU_SYNC_INITIALIZER(name) #endif /* _LINUX_RCU_SYNC_H_ */ diff --git a/include/linux/rculist.h b/include/linux/rculist.h index e91ec9ddcd30..4158b7212936 100644 --- a/include/linux/rculist.h +++ b/include/linux/rculist.h @@ -41,6 +41,24 @@ static inline void INIT_LIST_HEAD_RCU(struct list_head *list) #define list_next_rcu(list) (*((struct list_head __rcu **)(&(list)->next))) /* + * Check during list traversal that we are within an RCU reader + */ + +#define check_arg_count_one(dummy) + +#ifdef CONFIG_PROVE_RCU_LIST +#define __list_check_rcu(dummy, cond, extra...) \ + ({ \ + check_arg_count_one(extra); \ + RCU_LOCKDEP_WARN(!cond && !rcu_read_lock_any_held(), \ + "RCU-list traversed in non-reader section!"); \ + }) +#else +#define __list_check_rcu(dummy, cond, extra...) \ + ({ check_arg_count_one(extra); }) +#endif + +/* * Insert a new entry between two known consecutive entries. * * This is only for internal list manipulation where we know @@ -343,14 +361,16 @@ static inline void list_splice_tail_init_rcu(struct list_head *list, * @pos: the type * to use as a loop cursor. * @head: the head for your list. * @member: the name of the list_head within the struct. + * @cond: optional lockdep expression if called from non-RCU protection. * * This list-traversal primitive may safely run concurrently with * the _rcu list-mutation primitives such as list_add_rcu() * as long as the traversal is guarded by rcu_read_lock(). */ -#define list_for_each_entry_rcu(pos, head, member) \ - for (pos = list_entry_rcu((head)->next, typeof(*pos), member); \ - &pos->member != (head); \ +#define list_for_each_entry_rcu(pos, head, member, cond...) \ + for (__list_check_rcu(dummy, ## cond, 0), \ + pos = list_entry_rcu((head)->next, typeof(*pos), member); \ + &pos->member != (head); \ pos = list_entry_rcu(pos->member.next, typeof(*pos), member)) /** @@ -616,13 +636,15 @@ static inline void hlist_add_behind_rcu(struct hlist_node *n, * @pos: the type * to use as a loop cursor. * @head: the head for your list. * @member: the name of the hlist_node within the struct. + * @cond: optional lockdep expression if called from non-RCU protection. * * This list-traversal primitive may safely run concurrently with * the _rcu list-mutation primitives such as hlist_add_head_rcu() * as long as the traversal is guarded by rcu_read_lock(). */ -#define hlist_for_each_entry_rcu(pos, head, member) \ - for (pos = hlist_entry_safe (rcu_dereference_raw(hlist_first_rcu(head)),\ +#define hlist_for_each_entry_rcu(pos, head, member, cond...) \ + for (__list_check_rcu(dummy, ## cond, 0), \ + pos = hlist_entry_safe(rcu_dereference_raw(hlist_first_rcu(head)),\ typeof(*(pos)), member); \ pos; \ pos = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu(\ @@ -642,10 +664,10 @@ static inline void hlist_add_behind_rcu(struct hlist_node *n, * not do any RCU debugging or tracing. */ #define hlist_for_each_entry_rcu_notrace(pos, head, member) \ - for (pos = hlist_entry_safe (rcu_dereference_raw_notrace(hlist_first_rcu(head)),\ + for (pos = hlist_entry_safe(rcu_dereference_raw_check(hlist_first_rcu(head)),\ typeof(*(pos)), member); \ pos; \ - pos = hlist_entry_safe(rcu_dereference_raw_notrace(hlist_next_rcu(\ + pos = hlist_entry_safe(rcu_dereference_raw_check(hlist_next_rcu(\ &(pos)->member)), typeof(*(pos)), member)) /** diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index b25d20822e75..75a2eded7aa2 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h @@ -221,6 +221,7 @@ int debug_lockdep_rcu_enabled(void); int rcu_read_lock_held(void); int rcu_read_lock_bh_held(void); int rcu_read_lock_sched_held(void); +int rcu_read_lock_any_held(void); #else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ @@ -241,6 +242,12 @@ static inline int rcu_read_lock_sched_held(void) { return !preemptible(); } + +static inline int rcu_read_lock_any_held(void) +{ + return !preemptible(); +} + #endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */ #ifdef CONFIG_PROVE_RCU @@ -365,16 +372,15 @@ static inline void rcu_preempt_sleep_check(void) { } * other macros that it invokes. */ #define rcu_assign_pointer(p, v) \ -({ \ +do { \ uintptr_t _r_a_p__v = (uintptr_t)(v); \ - rcu_check_sparse(p, __rcu); \ + rcu_check_sparse(p, __rcu); \ \ if (__builtin_constant_p(v) && (_r_a_p__v) == (uintptr_t)NULL) \ WRITE_ONCE((p), (typeof(p))(_r_a_p__v)); \ else \ smp_store_release(&p, RCU_INITIALIZER((typeof(p))_r_a_p__v)); \ - _r_a_p__v; \ -}) +} while (0) /** * rcu_swap_protected() - swap an RCU and a regular pointer @@ -477,7 +483,7 @@ static inline void rcu_preempt_sleep_check(void) { } * The no-tracing version of rcu_dereference_raw() must not call * rcu_read_lock_held(). */ -#define rcu_dereference_raw_notrace(p) __rcu_dereference_check((p), 1, __rcu) +#define rcu_dereference_raw_check(p) __rcu_dereference_check((p), 1, __rcu) /** * rcu_dereference_protected() - fetch RCU pointer when updates prevented @@ -579,14 +585,14 @@ static inline void rcu_preempt_sleep_check(void) { } * * In non-preemptible RCU implementations (TREE_RCU and TINY_RCU), * it is illegal to block while in an RCU read-side critical section. - * In preemptible RCU implementations (PREEMPT_RCU) in CONFIG_PREEMPT + * In preemptible RCU implementations (PREEMPT_RCU) in CONFIG_PREEMPTION * kernel builds, RCU read-side critical sections may be preempted, * but explicit blocking is illegal. Finally, in preemptible RCU * implementations in real-time (with -rt patchset) kernel builds, RCU * read-side critical sections may be preempted and they may also block, but * only when acquiring spinlocks that are subject to priority inheritance. */ -static inline void rcu_read_lock(void) +static __always_inline void rcu_read_lock(void) { __rcu_read_lock(); __acquire(RCU); @@ -803,7 +809,7 @@ static inline notrace void rcu_read_unlock_sched_notrace(void) /** * kfree_rcu() - kfree an object after a grace period. * @ptr: pointer to kfree - * @rcu_head: the name of the struct rcu_head within the type of @ptr. + * @rhf: the name of the struct rcu_head within the type of @ptr. * * Many rcu callbacks functions just call kfree() on the base structure. * These functions are trivial, but their size adds up, and furthermore @@ -826,9 +832,13 @@ static inline notrace void rcu_read_unlock_sched_notrace(void) * The BUILD_BUG_ON check must not involve any function calls, hence the * checks are done in macros here. */ -#define kfree_rcu(ptr, rcu_head) \ - __kfree_rcu(&((ptr)->rcu_head), offsetof(typeof(*(ptr)), rcu_head)) - +#define kfree_rcu(ptr, rhf) \ +do { \ + typeof (ptr) ___p = (ptr); \ + \ + if (___p) \ + __kfree_rcu(&((___p)->rhf), offsetof(typeof(*(ptr)), rhf)); \ +} while (0) /* * Place this after a lock-acquisition primitive to guarantee that diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h index 8e727f57d814..9bf1dfe7781f 100644 --- a/include/linux/rcutiny.h +++ b/include/linux/rcutiny.h @@ -12,7 +12,7 @@ #ifndef __LINUX_TINY_H #define __LINUX_TINY_H -#include <linux/ktime.h> +#include <asm/param.h> /* for HZ */ /* Never flag non-existent other CPUs! */ static inline bool rcu_eqs_special_set(int cpu) { return false; } diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h index 735601ac27d3..18b1ed9864b0 100644 --- a/include/linux/rcutree.h +++ b/include/linux/rcutree.h @@ -53,7 +53,7 @@ void rcu_scheduler_starting(void); extern int rcu_scheduler_active __read_mostly; void rcu_end_inkernel_boot(void); bool rcu_is_watching(void); -#ifndef CONFIG_PREEMPT +#ifndef CONFIG_PREEMPTION void rcu_all_qs(void); #endif diff --git a/include/linux/rcuwait.h b/include/linux/rcuwait.h index 563290fc194f..75c97e4bbc57 100644 --- a/include/linux/rcuwait.h +++ b/include/linux/rcuwait.h @@ -6,16 +6,11 @@ /* * rcuwait provides a way of blocking and waking up a single - * task in an rcu-safe manner; where it is forbidden to use - * after exit_notify(). task_struct is not properly rcu protected, - * unless dealing with rcu-aware lists, ie: find_task_by_*(). + * task in an rcu-safe manner. * - * Alternatively we have task_rcu_dereference(), but the return - * semantics have different implications which would break the - * wakeup side. The only time @task is non-nil is when a user is - * blocked (or checking if it needs to) on a condition, and reset - * as soon as we know that the condition has succeeded and are - * awoken. + * The only time @task is non-nil is when a user is blocked (or + * checking if it needs to) on a condition, and reset as soon as we + * know that the condition has succeeded and are awoken. */ struct rcuwait { struct task_struct __rcu *task; @@ -37,13 +32,6 @@ extern void rcuwait_wake_up(struct rcuwait *w); */ #define rcuwait_wait_event(w, condition) \ ({ \ - /* \ - * Complain if we are called after do_exit()/exit_notify(), \ - * as we cannot rely on the rcu critical region for the \ - * wakeup side. \ - */ \ - WARN_ON(current->exit_state); \ - \ rcu_assign_pointer((w)->task, current); \ for (;;) { \ /* \ diff --git a/include/linux/regmap.h b/include/linux/regmap.h index d3dea823af8e..dfe493ac692d 100644 --- a/include/linux/regmap.h +++ b/include/linux/regmap.h @@ -22,6 +22,7 @@ struct module; struct clk; struct device; struct i2c_client; +struct i3c_device; struct irq_domain; struct slim_device; struct spi_device; @@ -109,7 +110,7 @@ struct reg_sequence { * @cond: Break condition (usually involving @val) * @sleep_us: Maximum time to sleep between reads in us (0 * tight-loops). Should be less than ~20ms since usleep_range - * is used (see Documentation/timers/timers-howto.txt). + * is used (see Documentation/timers/timers-howto.rst). * @timeout_us: Timeout in us, 0 means never timeout * * Returns 0 on success and -ETIMEDOUT upon a timeout or the regmap_read @@ -151,7 +152,7 @@ struct reg_sequence { * @cond: Break condition (usually involving @val) * @sleep_us: Maximum time to sleep between reads in us (0 * tight-loops). Should be less than ~20ms since usleep_range - * is used (see Documentation/timers/timers-howto.txt). + * is used (see Documentation/timers/timers-howto.rst). * @timeout_us: Timeout in us, 0 means never timeout * * Returns 0 on success and -ETIMEDOUT upon a timeout or the regmap_field_read @@ -621,6 +622,10 @@ struct regmap *__devm_regmap_init_slimbus(struct slim_device *slimbus, const struct regmap_config *config, struct lock_class_key *lock_key, const char *lock_name); +struct regmap *__devm_regmap_init_i3c(struct i3c_device *i3c, + const struct regmap_config *config, + struct lock_class_key *lock_key, + const char *lock_name); /* * Wrapper for regmap_init macros to include a unique lockdep key and name * for each call. No-op if CONFIG_LOCKDEP is not set. @@ -979,6 +984,21 @@ bool regmap_ac97_default_volatile(struct device *dev, unsigned int reg); #define devm_regmap_init_slimbus(slimbus, config) \ __regmap_lockdep_wrapper(__devm_regmap_init_slimbus, #config, \ slimbus, config) + +/** + * devm_regmap_init_i3c() - Initialise managed register map + * + * @i3c: Device that will be interacted with + * @config: Configuration for register map + * + * The return value will be an ERR_PTR() on error or a valid pointer + * to a struct regmap. The regmap will be automatically freed by the + * device management code. + */ +#define devm_regmap_init_i3c(i3c, config) \ + __regmap_lockdep_wrapper(__devm_regmap_init_i3c, #config, \ + i3c, config) + int regmap_mmio_attach_clk(struct regmap *map, struct clk *clk); void regmap_mmio_detach_clk(struct regmap *map); void regmap_exit(struct regmap *map); diff --git a/include/linux/regulator/consumer.h b/include/linux/regulator/consumer.h index 815983419375..337a46391527 100644 --- a/include/linux/regulator/consumer.h +++ b/include/linux/regulator/consumer.h @@ -281,6 +281,12 @@ void devm_regulator_unregister_notifier(struct regulator *regulator, void *regulator_get_drvdata(struct regulator *regulator); void regulator_set_drvdata(struct regulator *regulator, void *data); +/* misc helpers */ + +void regulator_bulk_set_supply_names(struct regulator_bulk_data *consumers, + const char *const *supply_names, + unsigned int num_supplies); + #else /* @@ -580,6 +586,13 @@ static inline int regulator_list_voltage(struct regulator *regulator, unsigned s return -EINVAL; } +static inline void +regulator_bulk_set_supply_names(struct regulator_bulk_data *consumers, + const char *const *supply_names, + unsigned int num_supplies) +{ +} + #endif static inline int regulator_set_voltage_triplet(struct regulator *regulator, diff --git a/include/linux/regulator/coupler.h b/include/linux/regulator/coupler.h new file mode 100644 index 000000000000..0212d6255e4e --- /dev/null +++ b/include/linux/regulator/coupler.h @@ -0,0 +1,97 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * coupler.h -- SoC Regulator support, coupler API. + * + * Regulator Coupler Interface. + */ + +#ifndef __LINUX_REGULATOR_COUPLER_H_ +#define __LINUX_REGULATOR_COUPLER_H_ + +#include <linux/kernel.h> +#include <linux/suspend.h> + +struct regulator_coupler; +struct regulator_dev; + +/** + * struct regulator_coupler - customized regulator's coupler + * + * Regulator's coupler allows to customize coupling algorithm. + * + * @list: couplers list entry + * @attach_regulator: Callback invoked on creation of a coupled regulator, + * couples are unresolved at this point. The callee should + * check that it could handle the regulator and return 0 on + * success, -errno on failure and 1 if given regulator is + * not suitable for this coupler (case of having multiple + * regulators in a system). Callback shall be implemented. + * @detach_regulator: Callback invoked on destruction of a coupled regulator. + * This callback is optional and could be NULL. + * @balance_voltage: Callback invoked when voltage of a coupled regulator is + * changing. Called with all of the coupled rdev's being held + * under "consumer lock". The callee should perform voltage + * balancing, changing voltage of the coupled regulators as + * needed. It's up to the coupler to verify the voltage + * before changing it in hardware, i.e. coupler should + * check consumer's min/max and etc. This callback is + * optional and could be NULL, in which case a generic + * voltage balancer will be used. + */ +struct regulator_coupler { + struct list_head list; + + int (*attach_regulator)(struct regulator_coupler *coupler, + struct regulator_dev *rdev); + int (*detach_regulator)(struct regulator_coupler *coupler, + struct regulator_dev *rdev); + int (*balance_voltage)(struct regulator_coupler *coupler, + struct regulator_dev *rdev, + suspend_state_t state); +}; + +#ifdef CONFIG_REGULATOR +int regulator_coupler_register(struct regulator_coupler *coupler); +const char *rdev_get_name(struct regulator_dev *rdev); +int regulator_check_consumers(struct regulator_dev *rdev, + int *min_uV, int *max_uV, + suspend_state_t state); +int regulator_check_voltage(struct regulator_dev *rdev, + int *min_uV, int *max_uV); +int regulator_get_voltage_rdev(struct regulator_dev *rdev); +int regulator_set_voltage_rdev(struct regulator_dev *rdev, + int min_uV, int max_uV, + suspend_state_t state); +#else +static inline int regulator_coupler_register(struct regulator_coupler *coupler) +{ + return 0; +} +static inline const char *rdev_get_name(struct regulator_dev *rdev) +{ + return NULL; +} +static inline int regulator_check_consumers(struct regulator_dev *rdev, + int *min_uV, int *max_uV, + suspend_state_t state) +{ + return -EINVAL; +} +static inline int regulator_check_voltage(struct regulator_dev *rdev, + int *min_uV, int *max_uV) +{ + return -EINVAL; +} +static inline int regulator_get_voltage_rdev(struct regulator_dev *rdev) +{ + return -EINVAL; +} +static inline int regulator_set_voltage_rdev(struct regulator_dev *rdev, + int min_uV, int max_uV, + suspend_state_t state) +{ + return -EINVAL; +} +#endif + +#endif diff --git a/include/linux/regulator/driver.h b/include/linux/regulator/driver.h index d45ab52c91c9..9a911bb5fb61 100644 --- a/include/linux/regulator/driver.h +++ b/include/linux/regulator/driver.h @@ -12,8 +12,6 @@ #ifndef __LINUX_REGULATOR_DRIVER_H_ #define __LINUX_REGULATOR_DRIVER_H_ -#define MAX_COUPLED 2 - #include <linux/device.h> #include <linux/notifier.h> #include <linux/regulator/consumer.h> @@ -283,6 +281,11 @@ enum regulator_type { * @vsel_range_mask: Mask for register bitfield used for range selector * @vsel_reg: Register for selector when using regulator_regmap_X_voltage_ * @vsel_mask: Mask for register bitfield used for selector + * @vsel_step: Specify the resolution of selector stepping when setting + * voltage. If 0, then no stepping is done (requested selector is + * set directly), if >0 then the regulator API will ramp the + * voltage up/down gradually each time increasing/decreasing the + * selector by the specified step value. * @csel_reg: Register for current limit selector using regmap set_current_limit * @csel_mask: Mask for register bitfield used for current limit selector * @apply_reg: Register for initiate voltage change on the output when @@ -357,6 +360,7 @@ struct regulator_desc { unsigned int vsel_range_mask; unsigned int vsel_reg; unsigned int vsel_mask; + unsigned int vsel_step; unsigned int csel_reg; unsigned int csel_mask; unsigned int apply_reg; @@ -423,7 +427,8 @@ struct regulator_config { * incremented. */ struct coupling_desc { - struct regulator_dev *coupled_rdevs[MAX_COUPLED]; + struct regulator_dev **coupled_rdevs; + struct regulator_coupler *coupler; int n_resolved; int n_coupled; }; @@ -549,4 +554,5 @@ void regulator_unlock(struct regulator_dev *rdev); */ int regulator_desc_list_voltage_linear_range(const struct regulator_desc *desc, unsigned int selector); + #endif diff --git a/include/linux/regulator/machine.h b/include/linux/regulator/machine.h index 5539efa76d26..a84cc8879c3e 100644 --- a/include/linux/regulator/machine.h +++ b/include/linux/regulator/machine.h @@ -153,7 +153,7 @@ struct regulation_constraints { int system_load; /* used for coupled regulators */ - int max_spread; + u32 *max_spread; /* used for changing voltage in steps */ int max_uV_step; diff --git a/include/linux/regulator/max8952.h b/include/linux/regulator/max8952.h index ebd99d2e62ad..8712c091abf0 100644 --- a/include/linux/regulator/max8952.h +++ b/include/linux/regulator/max8952.h @@ -105,9 +105,6 @@ enum { #define MAX8952_NUM_DVS_MODE 4 struct max8952_platform_data { - int gpio_vid0; - int gpio_vid1; - u32 default_mode; u32 dvs_mode[MAX8952_NUM_DVS_MODE]; /* MAX8952_DVS_MODEx_XXXXmV */ diff --git a/include/linux/regulator/mt6358-regulator.h b/include/linux/regulator/mt6358-regulator.h new file mode 100644 index 000000000000..1cc304946d09 --- /dev/null +++ b/include/linux/regulator/mt6358-regulator.h @@ -0,0 +1,56 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2019 MediaTek Inc. + */ + +#ifndef __LINUX_REGULATOR_MT6358_H +#define __LINUX_REGULATOR_MT6358_H + +enum { + MT6358_ID_VDRAM1 = 0, + MT6358_ID_VCORE, + MT6358_ID_VPA, + MT6358_ID_VPROC11, + MT6358_ID_VPROC12, + MT6358_ID_VGPU, + MT6358_ID_VS2, + MT6358_ID_VMODEM, + MT6358_ID_VS1, + MT6358_ID_VDRAM2 = 9, + MT6358_ID_VSIM1, + MT6358_ID_VIBR, + MT6358_ID_VRF12, + MT6358_ID_VIO18, + MT6358_ID_VUSB, + MT6358_ID_VCAMIO, + MT6358_ID_VCAMD, + MT6358_ID_VCN18, + MT6358_ID_VFE28, + MT6358_ID_VSRAM_PROC11, + MT6358_ID_VCN28, + MT6358_ID_VSRAM_OTHERS, + MT6358_ID_VSRAM_GPU, + MT6358_ID_VXO22, + MT6358_ID_VEFUSE, + MT6358_ID_VAUX18, + MT6358_ID_VMCH, + MT6358_ID_VBIF28, + MT6358_ID_VSRAM_PROC12, + MT6358_ID_VCAMA1, + MT6358_ID_VEMC, + MT6358_ID_VIO28, + MT6358_ID_VA12, + MT6358_ID_VRF18, + MT6358_ID_VCN33_BT, + MT6358_ID_VCN33_WIFI, + MT6358_ID_VCAMA2, + MT6358_ID_VMC, + MT6358_ID_VLDO28, + MT6358_ID_VAUD28, + MT6358_ID_VSIM2, + MT6358_ID_RG_MAX, +}; + +#define MT6358_MAX_REGULATOR MT6358_ID_RG_MAX + +#endif /* __LINUX_REGULATOR_MT6358_H */ diff --git a/include/linux/remoteproc.h b/include/linux/remoteproc.h index 04d04709f2bd..16ad66683ad0 100644 --- a/include/linux/remoteproc.h +++ b/include/linux/remoteproc.h @@ -100,7 +100,9 @@ struct fw_rsc_hdr { * the remote processor will be writing logs. * @RSC_VDEV: declare support for a virtio device, and serve as its * virtio header. - * @RSC_LAST: just keep this one at the end + * @RSC_LAST: just keep this one at the end of standard resources + * @RSC_VENDOR_START: start of the vendor specific resource types range + * @RSC_VENDOR_END: end of the vendor specific resource types range * * For more details regarding a specific resource type, please see its * dedicated structure below. @@ -111,11 +113,13 @@ struct fw_rsc_hdr { * please update it as needed. */ enum fw_resource_type { - RSC_CARVEOUT = 0, - RSC_DEVMEM = 1, - RSC_TRACE = 2, - RSC_VDEV = 3, - RSC_LAST = 4, + RSC_CARVEOUT = 0, + RSC_DEVMEM = 1, + RSC_TRACE = 2, + RSC_VDEV = 3, + RSC_LAST = 4, + RSC_VENDOR_START = 128, + RSC_VENDOR_END = 512, }; #define FW_RSC_ADDR_ANY (-1) @@ -340,12 +344,26 @@ struct rproc_mem_entry { struct firmware; /** + * enum rsc_handling_status - return status of rproc_ops handle_rsc hook + * @RSC_HANDLED: resource was handled + * @RSC_IGNORED: resource was ignored + */ +enum rsc_handling_status { + RSC_HANDLED = 0, + RSC_IGNORED = 1, +}; + +/** * struct rproc_ops - platform-specific device handlers * @start: power on the device and boot it * @stop: power off the device * @kick: kick a virtqueue (virtqueue id given as a parameter) * @da_to_va: optional platform hook to perform address translations * @parse_fw: parse firmware to extract information (e.g. resource table) + * @handle_rsc: optional platform hook to handle vendor resources. Should return + * RSC_HANDLED if resource was handled, RSC_IGNORED if not handled and a + * negative value on error + * @load_rsc_table: load resource table from firmware image * @find_loaded_rsc_table: find the loaded resouce table * @load: load firmware to memory, where the remote processor * expects to find it @@ -358,6 +376,8 @@ struct rproc_ops { void (*kick)(struct rproc *rproc, int vqid); void * (*da_to_va)(struct rproc *rproc, u64 da, int len); int (*parse_fw)(struct rproc *rproc, const struct firmware *fw); + int (*handle_rsc)(struct rproc *rproc, u32 rsc_type, void *rsc, + int offset, int avail); struct resource_table *(*find_loaded_rsc_table)( struct rproc *rproc, const struct firmware *fw); int (*load)(struct rproc *rproc, const struct firmware *fw); diff --git a/include/linux/rhashtable.h b/include/linux/rhashtable.h index 9f8bc06d4136..beb9a9da1699 100644 --- a/include/linux/rhashtable.h +++ b/include/linux/rhashtable.h @@ -352,37 +352,38 @@ static inline void rht_unlock(struct bucket_table *tbl, static inline struct rhash_head __rcu *__rht_ptr( struct rhash_lock_head *const *bkt) { - return (struct rhash_head __rcu *)((unsigned long)*bkt & ~BIT(0)); + return (struct rhash_head __rcu *) + ((unsigned long)*bkt & ~BIT(0) ?: + (unsigned long)RHT_NULLS_MARKER(bkt)); } /* * Where 'bkt' is a bucket and might be locked: - * rht_ptr() dereferences that pointer and clears the lock bit. + * rht_ptr_rcu() dereferences that pointer and clears the lock bit. + * rht_ptr() dereferences in a context where the bucket is locked. * rht_ptr_exclusive() dereferences in a context where exclusive * access is guaranteed, such as when destroying the table. */ +static inline struct rhash_head *rht_ptr_rcu( + struct rhash_lock_head *const *bkt) +{ + struct rhash_head __rcu *p = __rht_ptr(bkt); + + return rcu_dereference(p); +} + static inline struct rhash_head *rht_ptr( struct rhash_lock_head *const *bkt, struct bucket_table *tbl, unsigned int hash) { - struct rhash_head __rcu *p = __rht_ptr(bkt); - - if (!p) - return RHT_NULLS_MARKER(bkt); - - return rht_dereference_bucket_rcu(p, tbl, hash); + return rht_dereference_bucket(__rht_ptr(bkt), tbl, hash); } static inline struct rhash_head *rht_ptr_exclusive( struct rhash_lock_head *const *bkt) { - struct rhash_head __rcu *p = __rht_ptr(bkt); - - if (!p) - return RHT_NULLS_MARKER(bkt); - - return rcu_dereference_protected(p, 1); + return rcu_dereference_protected(__rht_ptr(bkt), 1); } static inline void rht_assign_locked(struct rhash_lock_head **bkt, @@ -509,7 +510,7 @@ static inline void rht_assign_unlock(struct bucket_table *tbl, */ #define rht_for_each_rcu(pos, tbl, hash) \ for (({barrier(); }), \ - pos = rht_ptr(rht_bucket(tbl, hash), tbl, hash); \ + pos = rht_ptr_rcu(rht_bucket(tbl, hash)); \ !rht_is_a_nulls(pos); \ pos = rcu_dereference_raw(pos->next)) @@ -546,8 +547,7 @@ static inline void rht_assign_unlock(struct bucket_table *tbl, */ #define rht_for_each_entry_rcu(tpos, pos, tbl, hash, member) \ rht_for_each_entry_rcu_from(tpos, pos, \ - rht_ptr(rht_bucket(tbl, hash), \ - tbl, hash), \ + rht_ptr_rcu(rht_bucket(tbl, hash)), \ tbl, hash, member) /** @@ -603,7 +603,7 @@ restart: hash = rht_key_hashfn(ht, tbl, key, params); bkt = rht_bucket(tbl, hash); do { - rht_for_each_rcu_from(he, rht_ptr(bkt, tbl, hash), tbl, hash) { + rht_for_each_rcu_from(he, rht_ptr_rcu(bkt), tbl, hash) { if (params.obj_cmpfn ? params.obj_cmpfn(&arg, rht_obj(ht, he)) : rhashtable_compare(&arg, rht_obj(ht, he))) diff --git a/include/linux/root_dev.h b/include/linux/root_dev.h index bab671b0782f..4e78651371ba 100644 --- a/include/linux/root_dev.h +++ b/include/linux/root_dev.h @@ -8,6 +8,7 @@ enum { Root_NFS = MKDEV(UNNAMED_MAJOR, 255), + Root_CIFS = MKDEV(UNNAMED_MAJOR, 254), Root_RAM0 = MKDEV(RAMDISK_MAJOR, 0), Root_RAM1 = MKDEV(RAMDISK_MAJOR, 1), Root_FD0 = MKDEV(FLOPPY_MAJOR, 0), diff --git a/include/linux/rwsem.h b/include/linux/rwsem.h index 2ea18a3def04..00d6054687dd 100644 --- a/include/linux/rwsem.h +++ b/include/linux/rwsem.h @@ -34,26 +34,30 @@ */ struct rw_semaphore { atomic_long_t count; -#ifdef CONFIG_RWSEM_SPIN_ON_OWNER /* - * Write owner. Used as a speculative check to see - * if the owner is running on the cpu. + * Write owner or one of the read owners as well flags regarding + * the current state of the rwsem. Can be used as a speculative + * check to see if the write owner is running on the cpu. */ - struct task_struct *owner; + atomic_long_t owner; +#ifdef CONFIG_RWSEM_SPIN_ON_OWNER struct optimistic_spin_queue osq; /* spinner MCS lock */ #endif raw_spinlock_t wait_lock; struct list_head wait_list; +#ifdef CONFIG_DEBUG_RWSEMS + void *magic; +#endif #ifdef CONFIG_DEBUG_LOCK_ALLOC struct lockdep_map dep_map; #endif }; /* - * Setting bit 1 of the owner field but not bit 0 will indicate + * Setting all bits of the owner field except bit 0 will indicate * that the rwsem is writer-owned with an unknown owner. */ -#define RWSEM_OWNER_UNKNOWN ((struct task_struct *)-2L) +#define RWSEM_OWNER_UNKNOWN (-2L) /* In all implementations count != 0 means locked */ static inline int rwsem_is_locked(struct rw_semaphore *sem) @@ -72,17 +76,25 @@ static inline int rwsem_is_locked(struct rw_semaphore *sem) # define __RWSEM_DEP_MAP_INIT(lockname) #endif +#ifdef CONFIG_DEBUG_RWSEMS +# define __DEBUG_RWSEM_INITIALIZER(lockname) , .magic = &lockname +#else +# define __DEBUG_RWSEM_INITIALIZER(lockname) +#endif + #ifdef CONFIG_RWSEM_SPIN_ON_OWNER -#define __RWSEM_OPT_INIT(lockname) , .osq = OSQ_LOCK_UNLOCKED, .owner = NULL +#define __RWSEM_OPT_INIT(lockname) , .osq = OSQ_LOCK_UNLOCKED #else #define __RWSEM_OPT_INIT(lockname) #endif #define __RWSEM_INITIALIZER(name) \ { __RWSEM_INIT_COUNT(name), \ + .owner = ATOMIC_LONG_INIT(0), \ .wait_list = LIST_HEAD_INIT((name).wait_list), \ .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(name.wait_lock) \ __RWSEM_OPT_INIT(name) \ + __DEBUG_RWSEM_INITIALIZER(name) \ __RWSEM_DEP_MAP_INIT(name) } #define DECLARE_RWSEM(name) \ @@ -158,7 +170,7 @@ extern void downgrade_write(struct rw_semaphore *sem); * static then another method for expressing nested locking is * the explicit definition of lock class keys and the use of * lockdep_set_class() at lock initialization time. - * See Documentation/locking/lockdep-design.txt for more details.) + * See Documentation/locking/lockdep-design.rst for more details.) */ extern void down_read_nested(struct rw_semaphore *sem, int subclass); extern void down_write_nested(struct rw_semaphore *sem, int subclass); diff --git a/include/linux/scatterlist.h b/include/linux/scatterlist.h index 30a9a55c28ba..6eec50fb36c8 100644 --- a/include/linux/scatterlist.h +++ b/include/linux/scatterlist.h @@ -266,10 +266,11 @@ int sg_split(struct scatterlist *in, const int in_mapped_nents, typedef struct scatterlist *(sg_alloc_fn)(unsigned int, gfp_t); typedef void (sg_free_fn)(struct scatterlist *, unsigned int); -void __sg_free_table(struct sg_table *, unsigned int, bool, sg_free_fn *); +void __sg_free_table(struct sg_table *, unsigned int, unsigned int, + sg_free_fn *); void sg_free_table(struct sg_table *); int __sg_alloc_table(struct sg_table *, unsigned int, unsigned int, - struct scatterlist *, gfp_t, sg_alloc_fn *); + struct scatterlist *, unsigned int, gfp_t, sg_alloc_fn *); int sg_alloc_table(struct sg_table *, unsigned int, gfp_t); int __sg_alloc_table_from_pages(struct sg_table *sgt, struct page **pages, unsigned int n_pages, unsigned int offset, @@ -331,9 +332,11 @@ size_t sg_zero_buffer(struct scatterlist *sgl, unsigned int nents, #endif #ifdef CONFIG_SG_POOL -void sg_free_table_chained(struct sg_table *table, bool first_chunk); +void sg_free_table_chained(struct sg_table *table, + unsigned nents_first_chunk); int sg_alloc_table_chained(struct sg_table *table, int nents, - struct scatterlist *first_chunk); + struct scatterlist *first_chunk, + unsigned nents_first_chunk); #endif /* diff --git a/include/linux/sched.h b/include/linux/sched.h index 11837410690f..67a1d86981a9 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -25,9 +25,11 @@ #include <linux/resource.h> #include <linux/latencytop.h> #include <linux/sched/prio.h> +#include <linux/sched/types.h> #include <linux/signal_types.h> #include <linux/mm_types_task.h> #include <linux/task_io_accounting.h> +#include <linux/posix-timers.h> #include <linux/rseq.h> /* task_struct member predeclarations (sorted alphabetically): */ @@ -35,6 +37,7 @@ struct audit_context; struct backing_dev_info; struct bio_list; struct blk_plug; +struct capture_control; struct cfs_rq; struct fs_struct; struct futex_pi_state; @@ -47,8 +50,9 @@ struct pid_namespace; struct pipe_inode_info; struct rcu_node; struct reclaim_state; -struct capture_control; struct robust_list_head; +struct root_domain; +struct rq; struct sched_attr; struct sched_param; struct seq_file; @@ -219,6 +223,7 @@ extern long schedule_timeout_uninterruptible(long timeout); extern long schedule_timeout_idle(long timeout); asmlinkage void schedule(void); extern void schedule_preempt_disabled(void); +asmlinkage void preempt_schedule_irq(void); extern int __must_check io_schedule_prepare(void); extern void io_schedule_finish(int token); @@ -242,27 +247,6 @@ struct prev_cputime { #endif }; -/** - * struct task_cputime - collected CPU time counts - * @utime: time spent in user mode, in nanoseconds - * @stime: time spent in kernel mode, in nanoseconds - * @sum_exec_runtime: total time spent on the CPU, in nanoseconds - * - * This structure groups together three kinds of CPU time that are tracked for - * threads and thread groups. Most things considering CPU time want to group - * these counts together and treat all three of them in parallel. - */ -struct task_cputime { - u64 utime; - u64 stime; - unsigned long long sum_exec_runtime; -}; - -/* Alternate field names when used on cache expirations: */ -#define virt_exp utime -#define prof_exp stime -#define sched_exp sum_exec_runtime - enum vtime_state { /* Task is sleeping or running in a CPU with VTIME inactive: */ VTIME_INACTIVE = 0, @@ -281,6 +265,23 @@ struct vtime { u64 gtime; }; +/* + * Utilization clamp constraints. + * @UCLAMP_MIN: Minimum utilization + * @UCLAMP_MAX: Maximum utilization + * @UCLAMP_CNT: Utilization clamp constraints count + */ +enum uclamp_id { + UCLAMP_MIN = 0, + UCLAMP_MAX, + UCLAMP_CNT +}; + +#ifdef CONFIG_SMP +extern struct root_domain def_root_domain; +extern struct mutex sched_domains_mutex; +#endif + struct sched_info { #ifdef CONFIG_SCHED_INFO /* Cumulative counters: */ @@ -312,6 +313,10 @@ struct sched_info { # define SCHED_FIXEDPOINT_SHIFT 10 # define SCHED_FIXEDPOINT_SCALE (1L << SCHED_FIXEDPOINT_SHIFT) +/* Increase resolution of cpu_capacity calculations */ +# define SCHED_CAPACITY_SHIFT SCHED_FIXEDPOINT_SHIFT +# define SCHED_CAPACITY_SCALE (1L << SCHED_CAPACITY_SHIFT) + struct load_weight { unsigned long weight; u32 inv_weight; @@ -560,12 +565,47 @@ struct sched_dl_entity { struct hrtimer inactive_timer; }; +#ifdef CONFIG_UCLAMP_TASK +/* Number of utilization clamp buckets (shorter alias) */ +#define UCLAMP_BUCKETS CONFIG_UCLAMP_BUCKETS_COUNT + +/* + * Utilization clamp for a scheduling entity + * @value: clamp value "assigned" to a se + * @bucket_id: bucket index corresponding to the "assigned" value + * @active: the se is currently refcounted in a rq's bucket + * @user_defined: the requested clamp value comes from user-space + * + * The bucket_id is the index of the clamp bucket matching the clamp value + * which is pre-computed and stored to avoid expensive integer divisions from + * the fast path. + * + * The active bit is set whenever a task has got an "effective" value assigned, + * which can be different from the clamp value "requested" from user-space. + * This allows to know a task is refcounted in the rq's bucket corresponding + * to the "effective" bucket_id. + * + * The user_defined bit is set whenever a task has got a task-specific clamp + * value requested from userspace, i.e. the system defaults apply to this task + * just as a restriction. This allows to relax default clamps when a less + * restrictive task-specific value has been requested, thus allowing to + * implement a "nice" semantic. For example, a task running with a 20% + * default boost can still drop its own boosting to 0%. + */ +struct uclamp_se { + unsigned int value : bits_per(SCHED_CAPACITY_SCALE); + unsigned int bucket_id : bits_per(UCLAMP_BUCKETS); + unsigned int active : 1; + unsigned int user_defined : 1; +}; +#endif /* CONFIG_UCLAMP_TASK */ + union rcu_special { struct { u8 blocked; u8 need_qs; u8 exp_hint; /* Hint for performance. */ - u8 pad; /* No garbage from compiler! */ + u8 deferred_qs; } b; /* Bits. */ u32 s; /* Set of bits. */ }; @@ -640,6 +680,13 @@ struct task_struct { #endif struct sched_dl_entity dl; +#ifdef CONFIG_UCLAMP_TASK + /* Clamp values requested for a scheduling entity */ + struct uclamp_se uclamp_req[UCLAMP_CNT]; + /* Effective clamp values used for a scheduling entity */ + struct uclamp_se uclamp[UCLAMP_CNT]; +#endif + #ifdef CONFIG_PREEMPT_NOTIFIERS /* List of struct preempt_notifier: */ struct hlist_head preempt_notifiers; @@ -651,7 +698,8 @@ struct task_struct { unsigned int policy; int nr_cpus_allowed; - cpumask_t cpus_allowed; + const cpumask_t *cpus_ptr; + cpumask_t cpus_mask; #ifdef CONFIG_PREEMPT_RCU int rcu_read_lock_nesting; @@ -815,10 +863,8 @@ struct task_struct { unsigned long min_flt; unsigned long maj_flt; -#ifdef CONFIG_POSIX_TIMERS - struct task_cputime cputime_expires; - struct list_head cpu_timers[3]; -#endif + /* Empty if CONFIG_POSIX_CPUTIMERS=n */ + struct posix_cputimers posix_cputimers; /* Process credentials: */ @@ -831,6 +877,11 @@ struct task_struct { /* Effective (overridable) subjective task credentials (COW): */ const struct cred __rcu *cred; +#ifdef CONFIG_KEYS + /* Cached requested key. */ + struct key *cached_requested_key; +#endif + /* * executable name, excluding path. * @@ -908,6 +959,10 @@ struct task_struct { struct mutex_waiter *blocked_on; #endif +#ifdef CONFIG_DEBUG_ATOMIC_SLEEP + int non_block_count; +#endif + #ifdef CONFIG_TRACE_IRQFLAGS unsigned int irq_events; unsigned long hardirq_enable_ip; @@ -1026,7 +1081,15 @@ struct task_struct { u64 last_sum_exec_runtime; struct callback_head numa_work; - struct numa_group *numa_group; + /* + * This pointer is only modified for current in syscall and + * pagefault context (and for tasks being destroyed), so it can be read + * from any of the following contexts: + * - RCU read-side critical section + * - current->numa_group from everywhere + * - task's runqueue locked, task not running + */ + struct numa_group __rcu *numa_group; /* * numa_faults is an array split into four regions: @@ -1068,7 +1131,10 @@ struct task_struct { struct tlbflush_unmap_batch tlb_ubc; - struct rcu_head rcu; + union { + refcount_t rcu_users; + struct rcu_head rcu; + }; /* Cache last used pipe for splice(): */ struct pipe_inode_info *splice_pipe; @@ -1399,7 +1465,7 @@ extern struct pid *cad_pid; #define PF_SWAPWRITE 0x00800000 /* Allowed to write to swap */ #define PF_MEMSTALL 0x01000000 /* Stalled due to lack of memory */ #define PF_UMH 0x02000000 /* I'm an Usermodehelper process */ -#define PF_NO_SETAFFINITY 0x04000000 /* Userland is not allowed to meddle with cpus_allowed */ +#define PF_NO_SETAFFINITY 0x04000000 /* Userland is not allowed to meddle with cpus_mask */ #define PF_MCE_EARLY 0x08000000 /* Early kill for mce process policy */ #define PF_MEMALLOC_NOCMA 0x10000000 /* All allocation request will have _GFP_MOVABLE cleared */ #define PF_FREEZER_SKIP 0x40000000 /* Freezer should not count it as freezable */ @@ -1518,10 +1584,6 @@ static inline int set_cpus_allowed_ptr(struct task_struct *p, const struct cpuma } #endif -#ifndef cpu_relax_yield -#define cpu_relax_yield() cpu_relax() -#endif - extern int yield_to(struct task_struct *p, bool preempt); extern void set_user_nice(struct task_struct *p, long nice); extern int task_prio(const struct task_struct *p); @@ -1697,7 +1759,7 @@ static inline int test_tsk_need_resched(struct task_struct *tsk) * value indicates whether a reschedule was done in fact. * cond_resched_lock() will drop the spinlock before scheduling, */ -#ifndef CONFIG_PREEMPT +#ifndef CONFIG_PREEMPTION extern int _cond_resched(void); #else static inline int _cond_resched(void) { return 0; } @@ -1726,12 +1788,12 @@ static inline void cond_resched_rcu(void) /* * Does a critical section need to be broken due to another - * task waiting?: (technically does not depend on CONFIG_PREEMPT, + * task waiting?: (technically does not depend on CONFIG_PREEMPTION, * but a general need for low latency) */ static inline int spin_needbreak(spinlock_t *lock) { -#ifdef CONFIG_PREEMPT +#ifdef CONFIG_PREEMPTION return spin_is_contended(lock); #else return 0; @@ -1781,7 +1843,10 @@ static inline void set_task_cpu(struct task_struct *p, unsigned int cpu) * running or not. */ #ifndef vcpu_is_preempted -# define vcpu_is_preempted(cpu) false +static inline bool vcpu_is_preempted(int cpu) +{ + return false; +} #endif extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask); @@ -1919,4 +1984,16 @@ static inline void rseq_syscall(struct pt_regs *regs) #endif +const struct sched_avg *sched_trace_cfs_rq_avg(struct cfs_rq *cfs_rq); +char *sched_trace_cfs_rq_path(struct cfs_rq *cfs_rq, char *str, int len); +int sched_trace_cfs_rq_cpu(struct cfs_rq *cfs_rq); + +const struct sched_avg *sched_trace_rq_avg_rt(struct rq *rq); +const struct sched_avg *sched_trace_rq_avg_dl(struct rq *rq); +const struct sched_avg *sched_trace_rq_avg_irq(struct rq *rq); + +int sched_trace_rq_cpu(struct rq *rq); + +const struct cpumask *sched_trace_rd_span(struct root_domain *rd); + #endif diff --git a/include/linux/sched/cputime.h b/include/linux/sched/cputime.h index 53f883f5a2fd..6c9f19a33865 100644 --- a/include/linux/sched/cputime.h +++ b/include/linux/sched/cputime.h @@ -61,8 +61,7 @@ extern void cputime_adjust(struct task_cputime *curr, struct prev_cputime *prev, * Thread group CPU time accounting. */ void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times); -void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times); - +void thread_group_sample_cputime(struct task_struct *tsk, u64 *samples); /* * The following are functions that support scheduler-internal time accounting. @@ -71,7 +70,7 @@ void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times); */ /** - * get_running_cputimer - return &tsk->signal->cputimer if cputimer is running + * get_running_cputimer - return &tsk->signal->cputimer if cputimers are active * * @tsk: Pointer to target task. */ @@ -81,8 +80,11 @@ struct thread_group_cputimer *get_running_cputimer(struct task_struct *tsk) { struct thread_group_cputimer *cputimer = &tsk->signal->cputimer; - /* Check if cputimer isn't running. This is accessed without locking. */ - if (!READ_ONCE(cputimer->running)) + /* + * Check whether posix CPU timers are active. If not the thread + * group accounting is not active either. Lockless check. + */ + if (!READ_ONCE(tsk->signal->posix_cputimers.timers_active)) return NULL; /* diff --git a/include/linux/sched/deadline.h b/include/linux/sched/deadline.h index 0cb034331cbb..1aff00b65f3c 100644 --- a/include/linux/sched/deadline.h +++ b/include/linux/sched/deadline.h @@ -24,3 +24,11 @@ static inline bool dl_time_before(u64 a, u64 b) { return (s64)(a - b) < 0; } + +#ifdef CONFIG_SMP + +struct root_domain; +extern void dl_add_task_root_domain(struct task_struct *p); +extern void dl_clear_root_domain(struct root_domain *rd); + +#endif /* CONFIG_SMP */ diff --git a/include/linux/sched/isolation.h b/include/linux/sched/isolation.h index b0fb1446fe04..6c8512d3be88 100644 --- a/include/linux/sched/isolation.h +++ b/include/linux/sched/isolation.h @@ -19,6 +19,7 @@ enum hk_flags { DECLARE_STATIC_KEY_FALSE(housekeeping_overridden); extern int housekeeping_any_cpu(enum hk_flags flags); extern const struct cpumask *housekeeping_cpumask(enum hk_flags flags); +extern bool housekeeping_enabled(enum hk_flags flags); extern void housekeeping_affine(struct task_struct *t, enum hk_flags flags); extern bool housekeeping_test_cpu(int cpu, enum hk_flags flags); extern void __init housekeeping_init(void); @@ -35,6 +36,11 @@ static inline const struct cpumask *housekeeping_cpumask(enum hk_flags flags) return cpu_possible_mask; } +static inline bool housekeeping_enabled(enum hk_flags flags) +{ + return false; +} + static inline void housekeeping_affine(struct task_struct *t, enum hk_flags flags) { } static inline void housekeeping_init(void) { } diff --git a/include/linux/sched/mm.h b/include/linux/sched/mm.h index 4a7944078cc3..e6770012db18 100644 --- a/include/linux/sched/mm.h +++ b/include/linux/sched/mm.h @@ -362,16 +362,16 @@ enum { static inline void membarrier_mm_sync_core_before_usermode(struct mm_struct *mm) { + if (current->mm != mm) + return; if (likely(!(atomic_read(&mm->membarrier_state) & MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE))) return; sync_core_before_usermode(); } -static inline void membarrier_execve(struct task_struct *t) -{ - atomic_set(&t->mm->membarrier_state, 0); -} +extern void membarrier_exec_mmap(struct mm_struct *mm); + #else #ifdef CONFIG_ARCH_HAS_MEMBARRIER_CALLBACKS static inline void membarrier_arch_switch_mm(struct mm_struct *prev, @@ -380,7 +380,7 @@ static inline void membarrier_arch_switch_mm(struct mm_struct *prev, { } #endif -static inline void membarrier_execve(struct task_struct *t) +static inline void membarrier_exec_mmap(struct mm_struct *mm) { } static inline void membarrier_mm_sync_core_before_usermode(struct mm_struct *mm) diff --git a/include/linux/sched/nohz.h b/include/linux/sched/nohz.h index b36f4cf38111..1abe91ff6e4a 100644 --- a/include/linux/sched/nohz.h +++ b/include/linux/sched/nohz.h @@ -7,14 +7,6 @@ */ #if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON) -extern void cpu_load_update_nohz_start(void); -extern void cpu_load_update_nohz_stop(void); -#else -static inline void cpu_load_update_nohz_start(void) { } -static inline void cpu_load_update_nohz_stop(void) { } -#endif - -#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON) extern void nohz_balance_enter_idle(int cpu); extern int get_nohz_timer_target(void); #else diff --git a/include/linux/sched/numa_balancing.h b/include/linux/sched/numa_balancing.h index e7dd04a84ba8..3988762efe15 100644 --- a/include/linux/sched/numa_balancing.h +++ b/include/linux/sched/numa_balancing.h @@ -19,7 +19,7 @@ extern void task_numa_fault(int last_node, int node, int pages, int flags); extern pid_t task_numa_group_id(struct task_struct *p); extern void set_numabalancing_state(bool enabled); -extern void task_numa_free(struct task_struct *p); +extern void task_numa_free(struct task_struct *p, bool final); extern bool should_numa_migrate_memory(struct task_struct *p, struct page *page, int src_nid, int dst_cpu); #else @@ -34,7 +34,7 @@ static inline pid_t task_numa_group_id(struct task_struct *p) static inline void set_numabalancing_state(bool enabled) { } -static inline void task_numa_free(struct task_struct *p) +static inline void task_numa_free(struct task_struct *p, bool final) { } static inline bool should_numa_migrate_memory(struct task_struct *p, diff --git a/include/linux/sched/signal.h b/include/linux/sched/signal.h index 38a0f0785323..88050259c466 100644 --- a/include/linux/sched/signal.h +++ b/include/linux/sched/signal.h @@ -9,16 +9,17 @@ #include <linux/sched/task.h> #include <linux/cred.h> #include <linux/refcount.h> +#include <linux/posix-timers.h> /* * Types defining task->signal and task->sighand and APIs using them: */ struct sighand_struct { - refcount_t count; - struct k_sigaction action[_NSIG]; spinlock_t siglock; + refcount_t count; wait_queue_head_t signalfd_wqh; + struct k_sigaction action[_NSIG]; }; /* @@ -56,18 +57,12 @@ struct task_cputime_atomic { /** * struct thread_group_cputimer - thread group interval timer counts * @cputime_atomic: atomic thread group interval timers. - * @running: true when there are timers running and - * @cputime_atomic receives updates. - * @checking_timer: true when a thread in the group is in the - * process of checking for thread group timers. * * This structure contains the version of task_cputime, above, that is * used for thread group CPU timer calculations. */ struct thread_group_cputimer { struct task_cputime_atomic cputime_atomic; - bool running; - bool checking_timer; }; struct multiprocess_signals { @@ -148,12 +143,9 @@ struct signal_struct { */ struct thread_group_cputimer cputimer; - /* Earliest-expiration cache. */ - struct task_cputime cputime_expires; - - struct list_head cpu_timers[3]; - #endif + /* Empty if CONFIG_POSIX_TIMERS=n */ + struct posix_cputimers posix_cputimers; /* PID/PID hash table linkage. */ struct pid *pids[PIDTYPE_MAX]; @@ -307,16 +299,19 @@ static inline void kernel_signal_stop(void) # define ___ARCH_SI_IA64(_a1, _a2, _a3) #endif -int force_sig_fault(int sig, int code, void __user *addr +int force_sig_fault_to_task(int sig, int code, void __user *addr ___ARCH_SI_TRAPNO(int trapno) ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr) , struct task_struct *t); +int force_sig_fault(int sig, int code, void __user *addr + ___ARCH_SI_TRAPNO(int trapno) + ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr)); int send_sig_fault(int sig, int code, void __user *addr ___ARCH_SI_TRAPNO(int trapno) ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr) , struct task_struct *t); -int force_sig_mceerr(int code, void __user *, short, struct task_struct *); +int force_sig_mceerr(int code, void __user *, short); int send_sig_mceerr(int code, void __user *, short, struct task_struct *); int force_sig_bnderr(void __user *addr, void __user *lower, void __user *upper); @@ -325,17 +320,17 @@ int force_sig_pkuerr(void __user *addr, u32 pkey); int force_sig_ptrace_errno_trap(int errno, void __user *addr); extern int send_sig_info(int, struct kernel_siginfo *, struct task_struct *); -extern void force_sigsegv(int sig, struct task_struct *p); -extern int force_sig_info(int, struct kernel_siginfo *, struct task_struct *); +extern void force_sigsegv(int sig); +extern int force_sig_info(struct kernel_siginfo *); extern int __kill_pgrp_info(int sig, struct kernel_siginfo *info, struct pid *pgrp); extern int kill_pid_info(int sig, struct kernel_siginfo *info, struct pid *pid); -extern int kill_pid_info_as_cred(int, struct kernel_siginfo *, struct pid *, +extern int kill_pid_usb_asyncio(int sig, int errno, sigval_t addr, struct pid *, const struct cred *); extern int kill_pgrp(struct pid *pid, int sig, int priv); extern int kill_pid(struct pid *pid, int sig, int priv); extern __must_check bool do_notify_parent(struct task_struct *, int); extern void __wake_up_parent(struct task_struct *p, struct task_struct *parent); -extern void force_sig(int, struct task_struct *); +extern void force_sig(int); extern int send_sig(int, struct task_struct *, int); extern int zap_other_threads(struct task_struct *p); extern struct sigqueue *sigqueue_alloc(void); @@ -417,7 +412,6 @@ void task_join_group_stop(struct task_struct *task); static inline void set_restore_sigmask(void) { set_thread_flag(TIF_RESTORE_SIGMASK); - WARN_ON(!test_thread_flag(TIF_SIGPENDING)); } static inline void clear_tsk_restore_sigmask(struct task_struct *task) @@ -448,7 +442,6 @@ static inline bool test_and_clear_restore_sigmask(void) static inline void set_restore_sigmask(void) { current->restore_sigmask = true; - WARN_ON(!test_thread_flag(TIF_SIGPENDING)); } static inline void clear_tsk_restore_sigmask(struct task_struct *task) { @@ -481,6 +474,16 @@ static inline void restore_saved_sigmask(void) __set_current_blocked(¤t->saved_sigmask); } +extern int set_user_sigmask(const sigset_t __user *umask, size_t sigsetsize); + +static inline void restore_saved_sigmask_unless(bool interrupted) +{ + if (interrupted) + WARN_ON(!test_thread_flag(TIF_SIGPENDING)); + else + restore_saved_sigmask(); +} + static inline sigset_t *sigmask_to_save(void) { sigset_t *res = ¤t->blocked; diff --git a/include/linux/sched/sysctl.h b/include/linux/sched/sysctl.h index 99ce6d728df7..d4f6215ee03f 100644 --- a/include/linux/sched/sysctl.h +++ b/include/linux/sched/sysctl.h @@ -56,6 +56,11 @@ int sched_proc_update_handler(struct ctl_table *table, int write, extern unsigned int sysctl_sched_rt_period; extern int sysctl_sched_rt_runtime; +#ifdef CONFIG_UCLAMP_TASK +extern unsigned int sysctl_sched_uclamp_util_min; +extern unsigned int sysctl_sched_uclamp_util_max; +#endif + #ifdef CONFIG_CFS_BANDWIDTH extern unsigned int sysctl_sched_cfs_bandwidth_slice; #endif @@ -75,6 +80,12 @@ extern int sched_rt_handler(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos); +#ifdef CONFIG_UCLAMP_TASK +extern int sysctl_sched_uclamp_handler(struct ctl_table *table, int write, + void __user *buffer, size_t *lenp, + loff_t *ppos); +#endif + extern int sysctl_numa_balancing(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos); diff --git a/include/linux/sched/task.h b/include/linux/sched/task.h index f1227f2c38a4..4b1c3b664f51 100644 --- a/include/linux/sched/task.h +++ b/include/linux/sched/task.h @@ -8,11 +8,26 @@ */ #include <linux/sched.h> +#include <linux/uaccess.h> struct task_struct; struct rusage; union thread_union; +/* All the bits taken by the old clone syscall. */ +#define CLONE_LEGACY_FLAGS 0xffffffffULL + +struct kernel_clone_args { + u64 flags; + int __user *pidfd; + int __user *child_tid; + int __user *parent_tid; + int exit_signal; + unsigned long stack; + unsigned long stack_size; + unsigned long tls; +}; + /* * This serializes "schedule()" and also protects * the run-queue from deletions/modifications (but @@ -73,7 +88,8 @@ extern void do_group_exit(int); extern void exit_files(struct task_struct *); extern void exit_itimers(struct signal_struct *); -extern long _do_fork(unsigned long, unsigned long, unsigned long, int __user *, int __user *, unsigned long); +extern long _do_fork(struct kernel_clone_args *kargs); +extern bool legacy_clone_args_valid(const struct kernel_clone_args *kargs); extern long do_fork(unsigned long, unsigned long, unsigned long, int __user *, int __user *); struct task_struct *fork_idle(int); struct mm_struct *copy_init_mm(void); @@ -89,7 +105,11 @@ extern void sched_exec(void); #define sched_exec() {} #endif -#define get_task_struct(tsk) do { refcount_inc(&(tsk)->usage); } while(0) +static inline struct task_struct *get_task_struct(struct task_struct *t) +{ + refcount_inc(&t->usage); + return t; +} extern void __put_task_struct(struct task_struct *t); @@ -99,7 +119,7 @@ static inline void put_task_struct(struct task_struct *t) __put_task_struct(t); } -struct task_struct *task_rcu_dereference(struct task_struct **ptask); +void put_task_struct_rcu_user(struct task_struct *task); #ifdef CONFIG_ARCH_WANTS_DYNAMIC_TASK_STRUCT extern int arch_task_struct_size __read_mostly; diff --git a/include/linux/sched/topology.h b/include/linux/sched/topology.h index cfc0a89a7159..f341163fedc9 100644 --- a/include/linux/sched/topology.h +++ b/include/linux/sched/topology.h @@ -7,12 +7,6 @@ #include <linux/sched/idle.h> /* - * Increase resolution of cpu_capacity calculations - */ -#define SCHED_CAPACITY_SHIFT SCHED_FIXEDPOINT_SHIFT -#define SCHED_CAPACITY_SCALE (1L << SCHED_CAPACITY_SHIFT) - -/* * sched-domains (multiprocessor balancing) declarations: */ #ifdef CONFIG_SMP @@ -84,11 +78,6 @@ struct sched_domain { unsigned int busy_factor; /* less balancing by factor if busy */ unsigned int imbalance_pct; /* No balance until over watermark */ unsigned int cache_nice_tries; /* Leave cache hot tasks for # tries */ - unsigned int busy_idx; - unsigned int idle_idx; - unsigned int newidle_idx; - unsigned int wake_idx; - unsigned int forkexec_idx; int nohz_idle; /* NOHZ IDLE status */ int flags; /* See SD_* */ @@ -161,6 +150,10 @@ static inline struct cpumask *sched_domain_span(struct sched_domain *sd) return to_cpumask(sd->span); } +extern void partition_sched_domains_locked(int ndoms_new, + cpumask_var_t doms_new[], + struct sched_domain_attr *dattr_new); + extern void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[], struct sched_domain_attr *dattr_new); @@ -201,19 +194,17 @@ extern void set_sched_topology(struct sched_domain_topology_level *tl); # define SD_INIT_NAME(type) #endif -#ifndef arch_scale_cpu_capacity -static __always_inline -unsigned long arch_scale_cpu_capacity(struct sched_domain *sd, int cpu) -{ - return SCHED_CAPACITY_SCALE; -} -#endif - #else /* CONFIG_SMP */ struct sched_domain_attr; static inline void +partition_sched_domains_locked(int ndoms_new, cpumask_var_t doms_new[], + struct sched_domain_attr *dattr_new) +{ +} + +static inline void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[], struct sched_domain_attr *dattr_new) { @@ -224,16 +215,16 @@ static inline bool cpus_share_cache(int this_cpu, int that_cpu) return true; } +#endif /* !CONFIG_SMP */ + #ifndef arch_scale_cpu_capacity static __always_inline -unsigned long arch_scale_cpu_capacity(void __always_unused *sd, int cpu) +unsigned long arch_scale_cpu_capacity(int cpu) { return SCHED_CAPACITY_SCALE; } #endif -#endif /* !CONFIG_SMP */ - static inline int task_node(const struct task_struct *p) { return cpu_to_node(task_cpu(p)); diff --git a/include/linux/sched/types.h b/include/linux/sched/types.h new file mode 100644 index 000000000000..3c3e049224ae --- /dev/null +++ b/include/linux/sched/types.h @@ -0,0 +1,23 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_SCHED_TYPES_H +#define _LINUX_SCHED_TYPES_H + +#include <linux/types.h> + +/** + * struct task_cputime - collected CPU time counts + * @stime: time spent in kernel mode, in nanoseconds + * @utime: time spent in user mode, in nanoseconds + * @sum_exec_runtime: total time spent on the CPU, in nanoseconds + * + * This structure groups together three kinds of CPU time that are tracked for + * threads and thread groups. Most things considering CPU time want to group + * these counts together and treat all three of them in parallel. + */ +struct task_cputime { + u64 stime; + u64 utime; + unsigned long long sum_exec_runtime; +}; + +#endif diff --git a/include/linux/sched/user.h b/include/linux/sched/user.h index 468d2565a9fe..917d88edb7b9 100644 --- a/include/linux/sched/user.h +++ b/include/linux/sched/user.h @@ -7,8 +7,6 @@ #include <linux/refcount.h> #include <linux/ratelimit.h> -struct key; - /* * Some day this will be a full-fledged user tracking system.. */ @@ -30,18 +28,6 @@ struct user_struct { unsigned long unix_inflight; /* How many files in flight in unix sockets */ atomic_long_t pipe_bufs; /* how many pages are allocated in pipe buffers */ -#ifdef CONFIG_KEYS - /* - * These pointers can only change from NULL to a non-NULL value once. - * Writes are protected by key_user_keyring_mutex. - * Unlocked readers should use READ_ONCE() unless they know that - * install_user_keyrings() has been called successfully (which sets - * these members to non-NULL values, preventing further modifications). - */ - struct key *uid_keyring; /* UID specific keyring */ - struct key *session_keyring; /* UID's default session keyring */ -#endif - /* Hash table maintenance information */ struct hlist_node uidhash_node; kuid_t uid; diff --git a/include/linux/sched/wake_q.h b/include/linux/sched/wake_q.h index ad826d2a4557..26a2013ac39c 100644 --- a/include/linux/sched/wake_q.h +++ b/include/linux/sched/wake_q.h @@ -51,6 +51,11 @@ static inline void wake_q_init(struct wake_q_head *head) head->lastp = &head->first; } +static inline bool wake_q_empty(struct wake_q_head *head) +{ + return head->first == WAKE_Q_TAIL; +} + extern void wake_q_add(struct wake_q_head *head, struct task_struct *task); extern void wake_q_add_safe(struct wake_q_head *head, struct task_struct *task); extern void wake_up_q(struct wake_q_head *head); diff --git a/include/linux/scmi_protocol.h b/include/linux/scmi_protocol.h index 3105055c00a7..881fea47c83d 100644 --- a/include/linux/scmi_protocol.h +++ b/include/linux/scmi_protocol.h @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0 +/* SPDX-License-Identifier: GPL-2.0-only */ /* * SCMI Message Protocol driver header * @@ -71,7 +71,7 @@ struct scmi_clk_ops { int (*rate_get)(const struct scmi_handle *handle, u32 clk_id, u64 *rate); int (*rate_set)(const struct scmi_handle *handle, u32 clk_id, - u32 config, u64 rate); + u64 rate); int (*enable)(const struct scmi_handle *handle, u32 clk_id); int (*disable)(const struct scmi_handle *handle, u32 clk_id); }; @@ -144,6 +144,9 @@ struct scmi_power_ops { struct scmi_sensor_info { u32 id; u8 type; + s8 scale; + u8 num_trip_points; + bool async; char name[SCMI_MAX_STR_SIZE]; }; @@ -166,9 +169,9 @@ enum scmi_sensor_class { * * @count_get: get the count of sensors provided by SCMI * @info_get: get the information of the specified sensor - * @configuration_set: control notifications on cross-over events for + * @trip_point_notify: control notifications on cross-over events for * the trip-points - * @trip_point_set: selects and configures a trip-point of interest + * @trip_point_config: selects and configures a trip-point of interest * @reading_get: gets the current value of the sensor */ struct scmi_sensor_ops { @@ -176,12 +179,32 @@ struct scmi_sensor_ops { const struct scmi_sensor_info *(*info_get) (const struct scmi_handle *handle, u32 sensor_id); - int (*configuration_set)(const struct scmi_handle *handle, - u32 sensor_id); - int (*trip_point_set)(const struct scmi_handle *handle, u32 sensor_id, - u8 trip_id, u64 trip_value); + int (*trip_point_notify)(const struct scmi_handle *handle, + u32 sensor_id, bool enable); + int (*trip_point_config)(const struct scmi_handle *handle, + u32 sensor_id, u8 trip_id, u64 trip_value); int (*reading_get)(const struct scmi_handle *handle, u32 sensor_id, - bool async, u64 *value); + u64 *value); +}; + +/** + * struct scmi_reset_ops - represents the various operations provided + * by SCMI Reset Protocol + * + * @num_domains_get: get the count of reset domains provided by SCMI + * @name_get: gets the name of a reset domain + * @latency_get: gets the reset latency for the specified reset domain + * @reset: resets the specified reset domain + * @assert: explicitly assert reset signal of the specified reset domain + * @deassert: explicitly deassert reset signal of the specified reset domain + */ +struct scmi_reset_ops { + int (*num_domains_get)(const struct scmi_handle *handle); + char *(*name_get)(const struct scmi_handle *handle, u32 domain); + int (*latency_get)(const struct scmi_handle *handle, u32 domain); + int (*reset)(const struct scmi_handle *handle, u32 domain); + int (*assert)(const struct scmi_handle *handle, u32 domain); + int (*deassert)(const struct scmi_handle *handle, u32 domain); }; /** @@ -193,6 +216,7 @@ struct scmi_sensor_ops { * @perf_ops: pointer to set of performance protocol operations * @clk_ops: pointer to set of clock protocol operations * @sensor_ops: pointer to set of sensor protocol operations + * @reset_ops: pointer to set of reset protocol operations * @perf_priv: pointer to private data structure specific to performance * protocol(for internal use only) * @clk_priv: pointer to private data structure specific to clock @@ -201,6 +225,8 @@ struct scmi_sensor_ops { * protocol(for internal use only) * @sensor_priv: pointer to private data structure specific to sensors * protocol(for internal use only) + * @reset_priv: pointer to private data structure specific to reset + * protocol(for internal use only) */ struct scmi_handle { struct device *dev; @@ -209,11 +235,13 @@ struct scmi_handle { struct scmi_clk_ops *clk_ops; struct scmi_power_ops *power_ops; struct scmi_sensor_ops *sensor_ops; + struct scmi_reset_ops *reset_ops; /* for protocol internal use */ void *perf_priv; void *clk_priv; void *power_priv; void *sensor_priv; + void *reset_priv; }; enum scmi_std_protocol { @@ -223,6 +251,7 @@ enum scmi_std_protocol { SCMI_PROTOCOL_PERF = 0x13, SCMI_PROTOCOL_CLOCK = 0x14, SCMI_PROTOCOL_SENSOR = 0x15, + SCMI_PROTOCOL_RESET = 0x16, }; struct scmi_device { diff --git a/include/linux/security.h b/include/linux/security.h index 659071c2e57c..9df7547afc0c 100644 --- a/include/linux/security.h +++ b/include/linux/security.h @@ -77,6 +77,55 @@ enum lsm_event { LSM_POLICY_CHANGE, }; +/* + * These are reasons that can be passed to the security_locked_down() + * LSM hook. Lockdown reasons that protect kernel integrity (ie, the + * ability for userland to modify kernel code) are placed before + * LOCKDOWN_INTEGRITY_MAX. Lockdown reasons that protect kernel + * confidentiality (ie, the ability for userland to extract + * information from the running kernel that would otherwise be + * restricted) are placed before LOCKDOWN_CONFIDENTIALITY_MAX. + * + * LSM authors should note that the semantics of any given lockdown + * reason are not guaranteed to be stable - the same reason may block + * one set of features in one kernel release, and a slightly different + * set of features in a later kernel release. LSMs that seek to expose + * lockdown policy at any level of granularity other than "none", + * "integrity" or "confidentiality" are responsible for either + * ensuring that they expose a consistent level of functionality to + * userland, or ensuring that userland is aware that this is + * potentially a moving target. It is easy to misuse this information + * in a way that could break userspace. Please be careful not to do + * so. + * + * If you add to this, remember to extend lockdown_reasons in + * security/lockdown/lockdown.c. + */ +enum lockdown_reason { + LOCKDOWN_NONE, + LOCKDOWN_MODULE_SIGNATURE, + LOCKDOWN_DEV_MEM, + LOCKDOWN_EFI_TEST, + LOCKDOWN_KEXEC, + LOCKDOWN_HIBERNATION, + LOCKDOWN_PCI_ACCESS, + LOCKDOWN_IOPORT, + LOCKDOWN_MSR, + LOCKDOWN_ACPI_TABLES, + LOCKDOWN_PCMCIA_CIS, + LOCKDOWN_TIOCSSERIAL, + LOCKDOWN_MODULE_PARAMETERS, + LOCKDOWN_MMIOTRACE, + LOCKDOWN_DEBUGFS, + LOCKDOWN_INTEGRITY_MAX, + LOCKDOWN_KCORE, + LOCKDOWN_KPROBES, + LOCKDOWN_BPF_READ, + LOCKDOWN_PERF, + LOCKDOWN_TRACEFS, + LOCKDOWN_CONFIDENTIALITY_MAX, +}; + /* These functions are in security/commoncap.c */ extern int cap_capable(const struct cred *cred, struct user_namespace *ns, int cap, unsigned int opts); @@ -189,12 +238,13 @@ static inline const char *kernel_load_data_id_str(enum kernel_load_data_id id) #ifdef CONFIG_SECURITY -int call_lsm_notifier(enum lsm_event event, void *data); -int register_lsm_notifier(struct notifier_block *nb); -int unregister_lsm_notifier(struct notifier_block *nb); +int call_blocking_lsm_notifier(enum lsm_event event, void *data); +int register_blocking_lsm_notifier(struct notifier_block *nb); +int unregister_blocking_lsm_notifier(struct notifier_block *nb); /* prototypes */ extern int security_init(void); +extern int early_security_init(void); /* Security operations */ int security_binder_set_context_mgr(struct task_struct *mgr); @@ -259,7 +309,8 @@ int security_dentry_create_files_as(struct dentry *dentry, int mode, struct qstr *name, const struct cred *old, struct cred *new); - +int security_path_notify(const struct path *path, u64 mask, + unsigned int obj_type); int security_inode_alloc(struct inode *inode); void security_inode_free(struct inode *inode); int security_inode_init_security(struct inode *inode, struct inode *dir, @@ -387,24 +438,24 @@ int security_ismaclabel(const char *name); int security_secid_to_secctx(u32 secid, char **secdata, u32 *seclen); int security_secctx_to_secid(const char *secdata, u32 seclen, u32 *secid); void security_release_secctx(char *secdata, u32 seclen); - void security_inode_invalidate_secctx(struct inode *inode); int security_inode_notifysecctx(struct inode *inode, void *ctx, u32 ctxlen); int security_inode_setsecctx(struct dentry *dentry, void *ctx, u32 ctxlen); int security_inode_getsecctx(struct inode *inode, void **ctx, u32 *ctxlen); +int security_locked_down(enum lockdown_reason what); #else /* CONFIG_SECURITY */ -static inline int call_lsm_notifier(enum lsm_event event, void *data) +static inline int call_blocking_lsm_notifier(enum lsm_event event, void *data) { return 0; } -static inline int register_lsm_notifier(struct notifier_block *nb) +static inline int register_blocking_lsm_notifier(struct notifier_block *nb) { return 0; } -static inline int unregister_lsm_notifier(struct notifier_block *nb) +static inline int unregister_blocking_lsm_notifier(struct notifier_block *nb) { return 0; } @@ -423,6 +474,11 @@ static inline int security_init(void) return 0; } +static inline int early_security_init(void) +{ + return 0; +} + static inline int security_binder_set_context_mgr(struct task_struct *mgr) { return 0; @@ -621,6 +677,12 @@ static inline int security_move_mount(const struct path *from_path, return 0; } +static inline int security_path_notify(const struct path *path, u64 mask, + unsigned int obj_type) +{ + return 0; +} + static inline int security_inode_alloc(struct inode *inode) { return 0; @@ -1204,6 +1266,10 @@ static inline int security_inode_getsecctx(struct inode *inode, void **ctx, u32 { return -EOPNOTSUPP; } +static inline int security_locked_down(enum lockdown_reason what) +{ + return 0; +} #endif /* CONFIG_SECURITY */ #ifdef CONFIG_SECURITY_NETWORK diff --git a/include/linux/sed-opal.h b/include/linux/sed-opal.h index 3e76b6d7d97f..53c28d750a45 100644 --- a/include/linux/sed-opal.h +++ b/include/linux/sed-opal.h @@ -39,6 +39,9 @@ static inline bool is_sed_ioctl(unsigned int cmd) case IOC_OPAL_ENABLE_DISABLE_MBR: case IOC_OPAL_ERASE_LR: case IOC_OPAL_SECURE_ERASE_LR: + case IOC_OPAL_PSID_REVERT_TPR: + case IOC_OPAL_MBR_DONE: + case IOC_OPAL_WRITE_SHADOW_MBR: return true; } return false; diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h index a121982af0f5..5998e1f4ff06 100644 --- a/include/linux/seq_file.h +++ b/include/linux/seq_file.h @@ -127,6 +127,7 @@ void seq_put_hex_ll(struct seq_file *m, const char *delimiter, unsigned long long v, unsigned int width); void seq_escape(struct seq_file *m, const char *s, const char *esc); +void seq_escape_mem_ascii(struct seq_file *m, const char *src, size_t isz); void seq_hex_dump(struct seq_file *m, const char *prefix_str, int prefix_type, int rowsize, int groupsize, const void *buf, size_t len, diff --git a/include/linux/serial_8250.h b/include/linux/serial_8250.h index 5e0b59422a68..bb2bc99388ca 100644 --- a/include/linux/serial_8250.h +++ b/include/linux/serial_8250.h @@ -110,6 +110,7 @@ struct uart_8250_port { * if no_console_suspend */ unsigned char probe; + struct mctrl_gpios *gpios; #define UART_PROBE_RSA (1 << 0) /* diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h index 05b179015d6c..2b78cc734719 100644 --- a/include/linux/serial_core.h +++ b/include/linux/serial_core.h @@ -32,7 +32,7 @@ struct device; /* * This structure describes all the operations that can be done on the - * physical hardware. See Documentation/serial/driver.rst for details. + * physical hardware. See Documentation/driver-api/serial/driver.rst for details. */ struct uart_ops { unsigned int (*tx_empty)(struct uart_port *); diff --git a/include/linux/sfp.h b/include/linux/sfp.h index d9d9de3fcf8e..1c35428e98bc 100644 --- a/include/linux/sfp.h +++ b/include/linux/sfp.h @@ -464,11 +464,14 @@ enum { struct fwnode_handle; struct ethtool_eeprom; struct ethtool_modinfo; -struct net_device; struct sfp_bus; /** * struct sfp_upstream_ops - upstream operations structure + * @attach: called when the sfp socket driver is bound to the upstream + * (mandatory). + * @detach: called when the sfp socket driver is unbound from the upstream + * (mandatory). * @module_insert: called after a module has been detected to determine * whether the module is supported for the upstream device. * @module_remove: called after the module has been removed. @@ -481,6 +484,8 @@ struct sfp_bus; * been removed. */ struct sfp_upstream_ops { + void (*attach)(void *priv, struct sfp_bus *bus); + void (*detach)(void *priv, struct sfp_bus *bus); int (*module_insert)(void *priv, const struct sfp_eeprom_id *id); void (*module_remove)(void *priv); void (*link_down)(void *priv); @@ -504,7 +509,7 @@ int sfp_get_module_eeprom(struct sfp_bus *bus, struct ethtool_eeprom *ee, void sfp_upstream_start(struct sfp_bus *bus); void sfp_upstream_stop(struct sfp_bus *bus); struct sfp_bus *sfp_register_upstream(struct fwnode_handle *fwnode, - struct net_device *ndev, void *upstream, + void *upstream, const struct sfp_upstream_ops *ops); void sfp_unregister_upstream(struct sfp_bus *bus); #else @@ -549,8 +554,7 @@ static inline void sfp_upstream_stop(struct sfp_bus *bus) } static inline struct sfp_bus *sfp_register_upstream( - struct fwnode_handle *fwnode, - struct net_device *ndev, void *upstream, + struct fwnode_handle *fwnode, void *upstream, const struct sfp_upstream_ops *ops) { return (struct sfp_bus *)-1; diff --git a/include/linux/sha256.h b/include/linux/sha256.h deleted file mode 100644 index 26972b9e92db..000000000000 --- a/include/linux/sha256.h +++ /dev/null @@ -1,28 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Copyright (C) 2014 Red Hat Inc. - * - * Author: Vivek Goyal <vgoyal@redhat.com> - */ - -#ifndef SHA256_H -#define SHA256_H - -#include <linux/types.h> -#include <crypto/sha.h> - -/* - * Stand-alone implementation of the SHA256 algorithm. It is designed to - * have as little dependencies as possible so it can be used in the - * kexec_file purgatory. In other cases you should use the implementation in - * crypto/. - * - * For details see lib/sha256.c - */ - -extern int sha256_init(struct sha256_state *sctx); -extern int sha256_update(struct sha256_state *sctx, const u8 *input, - unsigned int length); -extern int sha256_final(struct sha256_state *sctx, u8 *hash); - -#endif /* SHA256_H */ diff --git a/include/linux/shmem_fs.h b/include/linux/shmem_fs.h index 20d815a33145..de8e4b71e3ba 100644 --- a/include/linux/shmem_fs.h +++ b/include/linux/shmem_fs.h @@ -49,8 +49,9 @@ static inline struct shmem_inode_info *SHMEM_I(struct inode *inode) /* * Functions in mm/shmem.c called directly from elsewhere: */ +extern const struct fs_parameter_description shmem_fs_parameters; extern int shmem_init(void); -extern int shmem_fill_super(struct super_block *sb, void *data, int silent); +extern int shmem_init_fs_context(struct fs_context *fc); extern struct file *shmem_file_setup(const char *name, loff_t size, unsigned long flags); extern struct file *shmem_kernel_file_setup(const char *name, loff_t size, diff --git a/include/linux/shrinker.h b/include/linux/shrinker.h index 9443cafd1969..0f80123650e2 100644 --- a/include/linux/shrinker.h +++ b/include/linux/shrinker.h @@ -69,7 +69,7 @@ struct shrinker { /* These are for internal use */ struct list_head list; -#ifdef CONFIG_MEMCG_KMEM +#ifdef CONFIG_MEMCG /* ID in shrinker_idr */ int id; #endif @@ -81,6 +81,11 @@ struct shrinker { /* Flags */ #define SHRINKER_NUMA_AWARE (1 << 0) #define SHRINKER_MEMCG_AWARE (1 << 1) +/* + * It just makes sense when the shrinker is also MEMCG_AWARE for now, + * non-MEMCG_AWARE shrinker should not have this flag set. + */ +#define SHRINKER_NONSLAB (1 << 2) extern int prealloc_shrinker(struct shrinker *shrinker); extern void register_shrinker_prepared(struct shrinker *shrinker); diff --git a/include/linux/signal.h b/include/linux/signal.h index 78c2bb376954..1a5f88316b08 100644 --- a/include/linux/signal.h +++ b/include/linux/signal.h @@ -273,10 +273,6 @@ extern int group_send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p, enum pid_type type); extern int __group_send_sig_info(int, struct kernel_siginfo *, struct task_struct *); extern int sigprocmask(int, sigset_t *, sigset_t *); -extern int set_user_sigmask(const sigset_t __user *usigmask, sigset_t *set, - sigset_t *oldset, size_t sigsetsize); -extern void restore_user_sigmask(const void __user *usigmask, - sigset_t *sigsaved, bool interrupted); extern void set_current_blocked(sigset_t *); extern void __set_current_blocked(const sigset_t *); extern int show_unhandled_signals; @@ -286,6 +282,9 @@ extern void signal_setup_done(int failed, struct ksignal *ksig, int stepping); extern void exit_signals(struct task_struct *tsk); extern void kernel_sigaction(int, __sighandler_t); +#define SIG_KTHREAD ((__force __sighandler_t)2) +#define SIG_KTHREAD_KERNEL ((__force __sighandler_t)3) + static inline void allow_signal(int sig) { /* @@ -293,7 +292,17 @@ static inline void allow_signal(int sig) * know it'll be handled, so that they don't get converted to * SIGKILL or just silently dropped. */ - kernel_sigaction(sig, (__force __sighandler_t)2); + kernel_sigaction(sig, SIG_KTHREAD); +} + +static inline void allow_kernel_signal(int sig) +{ + /* + * Kernel threads handle their own signals. Let the signal code + * know signals sent by the kernel will be handled, so that they + * don't get silently dropped. + */ + kernel_sigaction(sig, SIG_KTHREAD_KERNEL); } static inline void disallow_signal(int sig) diff --git a/include/linux/siox.h b/include/linux/siox.h index a860cb8c1f9d..da7225bf1877 100644 --- a/include/linux/siox.h +++ b/include/linux/siox.h @@ -72,3 +72,13 @@ static inline void siox_driver_unregister(struct siox_driver *sdriver) { return driver_unregister(&sdriver->driver); } + +/* + * module_siox_driver() - Helper macro for drivers that don't do + * anything special in module init/exit. This eliminates a lot of + * boilerplate. Each module may only use this macro once, and + * calling it replaces module_init() and module_exit() + */ +#define module_siox_driver(__siox_driver) \ + module_driver(__siox_driver, siox_driver_register, \ + siox_driver_unregister) diff --git a/include/linux/sizes.h b/include/linux/sizes.h index 1cbb4c4d016e..9874f6f67537 100644 --- a/include/linux/sizes.h +++ b/include/linux/sizes.h @@ -44,5 +44,6 @@ #define SZ_2G 0x80000000 #define SZ_4G _AC(0x100000000, ULL) +#define SZ_64T _AC(0x400000000000, ULL) #endif /* __LINUX_SIZES_H__ */ diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 056f557d5194..64a395c7f689 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -14,6 +14,7 @@ #include <linux/compiler.h> #include <linux/time.h> #include <linux/bug.h> +#include <linux/bvec.h> #include <linux/cache.h> #include <linux/rbtree.h> #include <linux/socket.h> @@ -36,6 +37,9 @@ #include <linux/in6.h> #include <linux/if_packet.h> #include <net/flow.h> +#if IS_ENABLED(CONFIG_NF_CONNTRACK) +#include <linux/netfilter/nf_conntrack_common.h> +#endif /* The interface for checksum offload between the stack and networking drivers * is as follows... @@ -243,12 +247,6 @@ struct bpf_prog; union bpf_attr; struct skb_ext; -#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) -struct nf_conntrack { - atomic_t use; -}; -#endif - #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) struct nf_bridge_info { enum { @@ -278,6 +276,16 @@ struct nf_bridge_info { }; #endif +#if IS_ENABLED(CONFIG_NET_TC_SKB_EXT) +/* Chain in tc_skb_ext will be used to share the tc chain with + * ovs recirc_id. It will be set to the current chain by tc + * and read by ovs to recirc_id. + */ +struct tc_skb_ext { + __u32 chain; +}; +#endif + struct sk_buff_head { /* These two members must be first. */ struct sk_buff *next; @@ -308,58 +316,45 @@ extern int sysctl_max_skb_frags; */ #define GSO_BY_FRAGS 0xFFFF -typedef struct skb_frag_struct skb_frag_t; - -struct skb_frag_struct { - struct { - struct page *p; - } page; -#if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536) - __u32 page_offset; - __u32 size; -#else - __u16 page_offset; - __u16 size; -#endif -}; +typedef struct bio_vec skb_frag_t; /** - * skb_frag_size - Returns the size of a skb fragment + * skb_frag_size() - Returns the size of a skb fragment * @frag: skb fragment */ static inline unsigned int skb_frag_size(const skb_frag_t *frag) { - return frag->size; + return frag->bv_len; } /** - * skb_frag_size_set - Sets the size of a skb fragment + * skb_frag_size_set() - Sets the size of a skb fragment * @frag: skb fragment * @size: size of fragment */ static inline void skb_frag_size_set(skb_frag_t *frag, unsigned int size) { - frag->size = size; + frag->bv_len = size; } /** - * skb_frag_size_add - Incrementes the size of a skb fragment by %delta + * skb_frag_size_add() - Increments the size of a skb fragment by @delta * @frag: skb fragment * @delta: value to add */ static inline void skb_frag_size_add(skb_frag_t *frag, int delta) { - frag->size += delta; + frag->bv_len += delta; } /** - * skb_frag_size_sub - Decrements the size of a skb fragment by %delta + * skb_frag_size_sub() - Decrements the size of a skb fragment by @delta * @frag: skb fragment * @delta: value to subtract */ static inline void skb_frag_size_sub(skb_frag_t *frag, int delta) { - frag->size -= delta; + frag->bv_len -= delta; } /** @@ -379,7 +374,7 @@ static inline bool skb_frag_must_loop(struct page *p) * skb_frag_foreach_page - loop over pages in a fragment * * @f: skb frag to operate on - * @f_off: offset from start of f->page.p + * @f_off: offset from start of f->bv_page * @f_len: length from f_off to loop over * @p: (temp var) current page * @p_off: (temp var) offset from start of current page, @@ -916,7 +911,6 @@ static inline bool skb_pfmemalloc(const struct sk_buff *skb) #define SKB_DST_NOREF 1UL #define SKB_DST_PTRMASK ~(SKB_DST_NOREF) -#define SKB_NFCT_PTRMASK ~(7UL) /** * skb_dst - returns skb dst_entry * @skb: buffer @@ -1024,6 +1018,7 @@ static inline bool skb_unref(struct sk_buff *skb) void skb_release_head_state(struct sk_buff *skb); void kfree_skb(struct sk_buff *skb); void kfree_skb_list(struct sk_buff *segs); +void skb_dump(const char *level, const struct sk_buff *skb, bool full_pkt); void skb_tx_error(struct sk_buff *skb); void consume_skb(struct sk_buff *skb); void __consume_stateless_skb(struct sk_buff *skb); @@ -1059,6 +1054,7 @@ struct sk_buff *alloc_skb_with_frags(unsigned long header_len, int max_page_order, int *errcode, gfp_t gfp_mask); +struct sk_buff *alloc_skb_for_msg(struct sk_buff *first); /* Layout of fast clones : [skb1][skb2][fclone_ref] */ struct sk_buff_fclones { @@ -1281,7 +1277,7 @@ static inline int skb_flow_dissector_bpf_prog_detach(const union bpf_attr *attr) struct bpf_flow_dissector; bool bpf_flow_dissect(struct bpf_prog *prog, struct bpf_flow_dissector *ctx, - __be16 proto, int nhoff, int hlen); + __be16 proto, int nhoff, int hlen, unsigned int flags); bool __skb_flow_dissect(const struct net *net, const struct sk_buff *skb, @@ -1319,6 +1315,20 @@ skb_flow_dissect_flow_keys_basic(const struct net *net, data, proto, nhoff, hlen, flags); } +void skb_flow_dissect_meta(const struct sk_buff *skb, + struct flow_dissector *flow_dissector, + void *target_container); + +/* Gets a skb connection tracking info, ctinfo map should be a + * a map of mapsize to translate enum ip_conntrack_info states + * to user states. + */ +void +skb_flow_dissect_ct(const struct sk_buff *skb, + struct flow_dissector *flow_dissector, + void *target_container, + u16 *ctinfo_map, + size_t mapsize); void skb_flow_dissect_tunnel_info(const struct sk_buff *skb, struct flow_dissector *flow_dissector, @@ -1344,7 +1354,8 @@ static inline __u32 skb_get_hash_flowi6(struct sk_buff *skb, const struct flowi6 return skb->hash; } -__u32 skb_get_hash_perturb(const struct sk_buff *skb, u32 perturb); +__u32 skb_get_hash_perturb(const struct sk_buff *skb, + const siphash_key_t *perturb); static inline __u32 skb_get_hash_raw(const struct sk_buff *skb) { @@ -1358,6 +1369,14 @@ static inline void skb_copy_hash(struct sk_buff *to, const struct sk_buff *from) to->l4_hash = from->l4_hash; }; +static inline void skb_copy_decrypted(struct sk_buff *to, + const struct sk_buff *from) +{ +#ifdef CONFIG_TLS_DEVICE + to->decrypted = from->decrypted; +#endif +} + #ifdef NET_SKBUFF_DATA_USES_OFFSET static inline unsigned char *skb_end_pointer(const struct sk_buff *skb) { @@ -1477,6 +1496,19 @@ static inline int skb_queue_empty(const struct sk_buff_head *list) } /** + * skb_queue_empty_lockless - check if a queue is empty + * @list: queue head + * + * Returns true if the queue is empty, false otherwise. + * This variant can be used in lockless contexts. + */ +static inline bool skb_queue_empty_lockless(const struct sk_buff_head *list) +{ + return READ_ONCE(list->next) == (const struct sk_buff *) list; +} + + +/** * skb_queue_is_last - check if skb is the last entry in the queue * @list: queue head * @skb: buffer @@ -1829,9 +1861,11 @@ static inline void __skb_insert(struct sk_buff *newsk, struct sk_buff *prev, struct sk_buff *next, struct sk_buff_head *list) { - newsk->next = next; - newsk->prev = prev; - next->prev = prev->next = newsk; + /* see skb_queue_empty_lockless() for the opposite READ_ONCE() */ + WRITE_ONCE(newsk->next, next); + WRITE_ONCE(newsk->prev, prev); + WRITE_ONCE(next->prev, newsk); + WRITE_ONCE(prev->next, newsk); list->qlen++; } @@ -1842,11 +1876,11 @@ static inline void __skb_queue_splice(const struct sk_buff_head *list, struct sk_buff *first = list->next; struct sk_buff *last = list->prev; - first->prev = prev; - prev->next = first; + WRITE_ONCE(first->prev, prev); + WRITE_ONCE(prev->next, first); - last->next = next; - next->prev = last; + WRITE_ONCE(last->next, next); + WRITE_ONCE(next->prev, last); } /** @@ -1987,8 +2021,8 @@ static inline void __skb_unlink(struct sk_buff *skb, struct sk_buff_head *list) next = skb->next; prev = skb->prev; skb->next = skb->prev = NULL; - next->prev = prev; - prev->next = next; + WRITE_ONCE(next->prev, prev); + WRITE_ONCE(prev->next, next); } /** @@ -2073,8 +2107,8 @@ static inline void __skb_fill_page_desc(struct sk_buff *skb, int i, * that not all callers have unique ownership of the page but rely * on page_is_pfmemalloc doing the right thing(tm). */ - frag->page.p = page; - frag->page_offset = off; + frag->bv_page = page; + frag->bv_offset = off; skb_frag_size_set(frag, size); page = compound_head(page); @@ -2854,6 +2888,46 @@ static inline void skb_propagate_pfmemalloc(struct page *page, } /** + * skb_frag_off() - Returns the offset of a skb fragment + * @frag: the paged fragment + */ +static inline unsigned int skb_frag_off(const skb_frag_t *frag) +{ + return frag->bv_offset; +} + +/** + * skb_frag_off_add() - Increments the offset of a skb fragment by @delta + * @frag: skb fragment + * @delta: value to add + */ +static inline void skb_frag_off_add(skb_frag_t *frag, int delta) +{ + frag->bv_offset += delta; +} + +/** + * skb_frag_off_set() - Sets the offset of a skb fragment + * @frag: skb fragment + * @offset: offset of fragment + */ +static inline void skb_frag_off_set(skb_frag_t *frag, unsigned int offset) +{ + frag->bv_offset = offset; +} + +/** + * skb_frag_off_copy() - Sets the offset of a skb fragment from another fragment + * @fragto: skb fragment where offset is set + * @fragfrom: skb fragment offset is copied from + */ +static inline void skb_frag_off_copy(skb_frag_t *fragto, + const skb_frag_t *fragfrom) +{ + fragto->bv_offset = fragfrom->bv_offset; +} + +/** * skb_frag_page - retrieve the page referred to by a paged fragment * @frag: the paged fragment * @@ -2861,7 +2935,7 @@ static inline void skb_propagate_pfmemalloc(struct page *page, */ static inline struct page *skb_frag_page(const skb_frag_t *frag) { - return frag->page.p; + return frag->bv_page; } /** @@ -2919,7 +2993,7 @@ static inline void skb_frag_unref(struct sk_buff *skb, int f) */ static inline void *skb_frag_address(const skb_frag_t *frag) { - return page_address(skb_frag_page(frag)) + frag->page_offset; + return page_address(skb_frag_page(frag)) + skb_frag_off(frag); } /** @@ -2935,7 +3009,18 @@ static inline void *skb_frag_address_safe(const skb_frag_t *frag) if (unlikely(!ptr)) return NULL; - return ptr + frag->page_offset; + return ptr + skb_frag_off(frag); +} + +/** + * skb_frag_page_copy() - sets the page in a fragment from another fragment + * @fragto: skb fragment where page is set + * @fragfrom: skb fragment page is copied from + */ +static inline void skb_frag_page_copy(skb_frag_t *fragto, + const skb_frag_t *fragfrom) +{ + fragto->bv_page = fragfrom->bv_page; } /** @@ -2947,7 +3032,7 @@ static inline void *skb_frag_address_safe(const skb_frag_t *frag) */ static inline void __skb_frag_set_page(skb_frag_t *frag, struct page *page) { - frag->page.p = page; + frag->bv_page = page; } /** @@ -2983,7 +3068,7 @@ static inline dma_addr_t skb_frag_dma_map(struct device *dev, enum dma_data_direction dir) { return dma_map_page(dev, skb_frag_page(frag), - frag->page_offset + offset, size, dir); + skb_frag_off(frag) + offset, size, dir); } static inline struct sk_buff *pskb_copy(struct sk_buff *skb, @@ -3150,10 +3235,10 @@ static inline bool skb_can_coalesce(struct sk_buff *skb, int i, if (skb_zcopy(skb)) return false; if (i) { - const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i - 1]; + const skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1]; return page == skb_frag_page(frag) && - off == frag->page_offset + skb_frag_size(frag); + off == skb_frag_off(frag) + skb_frag_size(frag); } return false; } @@ -3441,6 +3526,11 @@ int skb_ensure_writable(struct sk_buff *skb, int write_len); int __skb_vlan_pop(struct sk_buff *skb, u16 *vlan_tci); int skb_vlan_pop(struct sk_buff *skb); int skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci); +int skb_mpls_push(struct sk_buff *skb, __be32 mpls_lse, __be16 mpls_proto, + int mac_len); +int skb_mpls_pop(struct sk_buff *skb, __be16 next_proto, int mac_len); +int skb_mpls_update_lse(struct sk_buff *skb, __be32 mpls_lse); +int skb_mpls_dec_ttl(struct sk_buff *skb); struct sk_buff *pskb_extract(struct sk_buff *skb, int off, int to_copy, gfp_t gfp); @@ -3914,18 +4004,16 @@ static inline bool __skb_checksum_convert_check(struct sk_buff *skb) return (skb->ip_summed == CHECKSUM_NONE && skb->csum_valid); } -static inline void __skb_checksum_convert(struct sk_buff *skb, - __sum16 check, __wsum pseudo) +static inline void __skb_checksum_convert(struct sk_buff *skb, __wsum pseudo) { skb->csum = ~pseudo; skb->ip_summed = CHECKSUM_COMPLETE; } -#define skb_checksum_try_convert(skb, proto, check, compute_pseudo) \ +#define skb_checksum_try_convert(skb, proto, compute_pseudo) \ do { \ if (__skb_checksum_convert_check(skb)) \ - __skb_checksum_convert(skb, check, \ - compute_pseudo(skb, proto)); \ + __skb_checksum_convert(skb, compute_pseudo(skb, proto)); \ } while (0) static inline void skb_remcsum_adjust_partial(struct sk_buff *skb, void *ptr, @@ -3965,25 +4053,27 @@ static inline void skb_remcsum_process(struct sk_buff *skb, void *ptr, static inline struct nf_conntrack *skb_nfct(const struct sk_buff *skb) { #if IS_ENABLED(CONFIG_NF_CONNTRACK) - return (void *)(skb->_nfct & SKB_NFCT_PTRMASK); + return (void *)(skb->_nfct & NFCT_PTRMASK); #else return NULL; #endif } -#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) -void nf_conntrack_destroy(struct nf_conntrack *nfct); -static inline void nf_conntrack_put(struct nf_conntrack *nfct) +static inline unsigned long skb_get_nfct(const struct sk_buff *skb) { - if (nfct && atomic_dec_and_test(&nfct->use)) - nf_conntrack_destroy(nfct); +#if IS_ENABLED(CONFIG_NF_CONNTRACK) + return skb->_nfct; +#else + return 0UL; +#endif } -static inline void nf_conntrack_get(struct nf_conntrack *nfct) + +static inline void skb_set_nfct(struct sk_buff *skb, unsigned long nfct) { - if (nfct) - atomic_inc(&nfct->use); -} +#if IS_ENABLED(CONFIG_NF_CONNTRACK) + skb->_nfct = nfct; #endif +} #ifdef CONFIG_SKB_EXTENSIONS enum skb_ext_id { @@ -3993,6 +4083,9 @@ enum skb_ext_id { #ifdef CONFIG_XFRM SKB_EXT_SEC_PATH, #endif +#if IS_ENABLED(CONFIG_NET_TC_SKB_EXT) + TC_SKB_EXT, +#endif SKB_EXT_NUM, /* must be last */ }; @@ -4068,22 +4161,28 @@ static inline void *skb_ext_find(const struct sk_buff *skb, enum skb_ext_id id) return NULL; } + +static inline void skb_ext_reset(struct sk_buff *skb) +{ + if (unlikely(skb->active_extensions)) { + __skb_ext_put(skb->extensions); + skb->active_extensions = 0; + } +} #else static inline void skb_ext_put(struct sk_buff *skb) {} +static inline void skb_ext_reset(struct sk_buff *skb) {} static inline void skb_ext_del(struct sk_buff *skb, int unused) {} static inline void __skb_ext_copy(struct sk_buff *d, const struct sk_buff *s) {} static inline void skb_ext_copy(struct sk_buff *dst, const struct sk_buff *s) {} #endif /* CONFIG_SKB_EXTENSIONS */ -static inline void nf_reset(struct sk_buff *skb) +static inline void nf_reset_ct(struct sk_buff *skb) { #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) nf_conntrack_put(skb_nfct(skb)); skb->_nfct = 0; #endif -#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) - skb_ext_del(skb, SKB_EXT_BRIDGE_NF); -#endif } static inline void nf_reset_trace(struct sk_buff *skb) diff --git a/include/linux/skmsg.h b/include/linux/skmsg.h index 50ced8aba9db..e4b3fb4bb77c 100644 --- a/include/linux/skmsg.h +++ b/include/linux/skmsg.h @@ -354,7 +354,13 @@ static inline void sk_psock_restore_proto(struct sock *sk, sk->sk_write_space = psock->saved_write_space; if (psock->sk_proto) { - sk->sk_prot = psock->sk_proto; + struct inet_connection_sock *icsk = inet_csk(sk); + bool has_ulp = !!icsk->icsk_ulp_data; + + if (has_ulp) + tcp_update_ulp(sk, psock->sk_proto); + else + sk->sk_prot = psock->sk_proto; psock->sk_proto = NULL; } } diff --git a/include/linux/slab.h b/include/linux/slab.h index 9449b19c5f10..4d2a2fa55ed5 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h @@ -16,6 +16,7 @@ #include <linux/overflow.h> #include <linux/types.h> #include <linux/workqueue.h> +#include <linux/percpu-refcount.h> /* @@ -115,6 +116,10 @@ /* Objects are reclaimable */ #define SLAB_RECLAIM_ACCOUNT ((slab_flags_t __force)0x00020000U) #define SLAB_TEMPORARY SLAB_RECLAIM_ACCOUNT /* Objects are short-lived */ + +/* Slab deactivation flag */ +#define SLAB_DEACTIVATED ((slab_flags_t __force)0x10000000U) + /* * ZERO_SIZE_PTR will be returned for zero sized kmalloc requests. * @@ -151,8 +156,7 @@ void kmem_cache_destroy(struct kmem_cache *); int kmem_cache_shrink(struct kmem_cache *); void memcg_create_kmem_cache(struct mem_cgroup *, struct kmem_cache *); -void memcg_deactivate_kmem_caches(struct mem_cgroup *); -void memcg_destroy_kmem_caches(struct mem_cgroup *); +void memcg_deactivate_kmem_caches(struct mem_cgroup *, struct mem_cgroup *); /* * Please use this macro to create slab caches. Simply specify the @@ -184,6 +188,7 @@ void * __must_check __krealloc(const void *, size_t, gfp_t); void * __must_check krealloc(const void *, size_t, gfp_t); void kfree(const void *); void kzfree(const void *); +size_t __ksize(const void *); size_t ksize(const void *); #ifdef CONFIG_HAVE_HARDENED_USERCOPY_ALLOCATOR @@ -488,6 +493,10 @@ static __always_inline void *kmalloc_large(size_t size, gfp_t flags) * kmalloc is the normal method of allocating memory * for objects smaller than page size in the kernel. * + * The allocated object address is aligned to at least ARCH_KMALLOC_MINALIGN + * bytes. For @size of power of two bytes, the alignment is also guaranteed + * to be at least to the size. + * * The @flags argument may be one of the GFP flags defined at * include/linux/gfp.h and described at * :ref:`Documentation/core-api/mm-api.rst <mm-api-gfp-flags>` @@ -590,67 +599,6 @@ static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) return __kmalloc_node(size, flags, node); } -struct memcg_cache_array { - struct rcu_head rcu; - struct kmem_cache *entries[0]; -}; - -/* - * This is the main placeholder for memcg-related information in kmem caches. - * Both the root cache and the child caches will have it. For the root cache, - * this will hold a dynamically allocated array large enough to hold - * information about the currently limited memcgs in the system. To allow the - * array to be accessed without taking any locks, on relocation we free the old - * version only after a grace period. - * - * Root and child caches hold different metadata. - * - * @root_cache: Common to root and child caches. NULL for root, pointer to - * the root cache for children. - * - * The following fields are specific to root caches. - * - * @memcg_caches: kmemcg ID indexed table of child caches. This table is - * used to index child cachces during allocation and cleared - * early during shutdown. - * - * @root_caches_node: List node for slab_root_caches list. - * - * @children: List of all child caches. While the child caches are also - * reachable through @memcg_caches, a child cache remains on - * this list until it is actually destroyed. - * - * The following fields are specific to child caches. - * - * @memcg: Pointer to the memcg this cache belongs to. - * - * @children_node: List node for @root_cache->children list. - * - * @kmem_caches_node: List node for @memcg->kmem_caches list. - */ -struct memcg_cache_params { - struct kmem_cache *root_cache; - union { - struct { - struct memcg_cache_array __rcu *memcg_caches; - struct list_head __root_caches_node; - struct list_head children; - bool dying; - }; - struct { - struct mem_cgroup *memcg; - struct list_head children_node; - struct list_head kmem_caches_node; - - void (*deact_fn)(struct kmem_cache *); - union { - struct rcu_head deact_rcu_head; - struct work_struct deact_work; - }; - }; - }; -}; - int memcg_update_all_caches(int num_memcgs); /** diff --git a/include/linux/smp.h b/include/linux/smp.h index a56f08ff3097..6fc856c9eda5 100644 --- a/include/linux/smp.h +++ b/include/linux/smp.h @@ -35,7 +35,7 @@ int smp_call_function_single(int cpuid, smp_call_func_t func, void *info, /* * Call a function on all processors */ -int on_each_cpu(smp_call_func_t func, void *info, int wait); +void on_each_cpu(smp_call_func_t func, void *info, int wait); /* * Call a function on processors specified by mask, which might include @@ -101,7 +101,7 @@ extern void smp_cpus_done(unsigned int max_cpus); /* * Call a function on all other processors */ -int smp_call_function(smp_call_func_t func, void *info, int wait); +void smp_call_function(smp_call_func_t func, void *info, int wait); void smp_call_function_many(const struct cpumask *mask, smp_call_func_t func, void *info, bool wait); @@ -144,9 +144,8 @@ static inline void smp_send_stop(void) { } * These macros fold the SMP functionality into a single CPU system */ #define raw_smp_processor_id() 0 -static inline int up_smp_call_function(smp_call_func_t func, void *info) +static inline void up_smp_call_function(smp_call_func_t func, void *info) { - return 0; } #define smp_call_function(func, info, wait) \ (up_smp_call_function(func, info)) @@ -181,29 +180,46 @@ static inline int get_boot_cpu_id(void) #endif /* !SMP */ -/* - * smp_processor_id(): get the current CPU ID. +/** + * raw_processor_id() - get the current (unstable) CPU id + * + * For then you know what you are doing and need an unstable + * CPU id. + */ + +/** + * smp_processor_id() - get the current (stable) CPU id + * + * This is the normal accessor to the CPU id and should be used + * whenever possible. * - * if DEBUG_PREEMPT is enabled then we check whether it is - * used in a preemption-safe way. (smp_processor_id() is safe - * if it's used in a preemption-off critical section, or in - * a thread that is bound to the current CPU.) + * The CPU id is stable when: * - * NOTE: raw_smp_processor_id() is for internal use only - * (smp_processor_id() is the preferred variant), but in rare - * instances it might also be used to turn off false positives - * (i.e. smp_processor_id() use that the debugging code reports but - * which use for some reason is legal). Don't use this to hack around - * the warning message, as your code might not work under PREEMPT. + * - IRQs are disabled; + * - preemption is disabled; + * - the task is CPU affine. + * + * When CONFIG_DEBUG_PREEMPT; we verify these assumption and WARN + * when smp_processor_id() is used when the CPU id is not stable. + */ + +/* + * Allow the architecture to differentiate between a stable and unstable read. + * For example, x86 uses an IRQ-safe asm-volatile read for the unstable but a + * regular asm read for the stable. */ +#ifndef __smp_processor_id +#define __smp_processor_id(x) raw_smp_processor_id(x) +#endif + #ifdef CONFIG_DEBUG_PREEMPT extern unsigned int debug_smp_processor_id(void); # define smp_processor_id() debug_smp_processor_id() #else -# define smp_processor_id() raw_smp_processor_id() +# define smp_processor_id() __smp_processor_id() #endif -#define get_cpu() ({ preempt_disable(); smp_processor_id(); }) +#define get_cpu() ({ preempt_disable(); __smp_processor_id(); }) #define put_cpu() preempt_enable() /* diff --git a/include/linux/soc/amlogic/meson-canvas.h b/include/linux/soc/amlogic/meson-canvas.h index b4dde2fbeb3f..0cb2a6050d1f 100644 --- a/include/linux/soc/amlogic/meson-canvas.h +++ b/include/linux/soc/amlogic/meson-canvas.h @@ -20,6 +20,7 @@ #define MESON_CANVAS_ENDIAN_SWAP64 0x7 #define MESON_CANVAS_ENDIAN_SWAP128 0xf +struct device; struct meson_canvas; /** diff --git a/include/linux/soc/mediatek/mtk-cmdq.h b/include/linux/soc/mediatek/mtk-cmdq.h index 54ade13a9b15..9618debb9ceb 100644 --- a/include/linux/soc/mediatek/mtk-cmdq.h +++ b/include/linux/soc/mediatek/mtk-cmdq.h @@ -13,9 +13,6 @@ #define CMDQ_NO_TIMEOUT 0xffffffffu -/** cmdq event maximum */ -#define CMDQ_MAX_EVENT 0x3ff - struct cmdq_pkt; struct cmdq_client { @@ -63,26 +60,26 @@ void cmdq_pkt_destroy(struct cmdq_pkt *pkt); /** * cmdq_pkt_write() - append write command to the CMDQ packet * @pkt: the CMDQ packet - * @value: the specified target register value * @subsys: the CMDQ sub system code * @offset: register offset from CMDQ sub system + * @value: the specified target register value * * Return: 0 for success; else the error code is returned */ -int cmdq_pkt_write(struct cmdq_pkt *pkt, u32 value, u32 subsys, u32 offset); +int cmdq_pkt_write(struct cmdq_pkt *pkt, u8 subsys, u16 offset, u32 value); /** * cmdq_pkt_write_mask() - append write command with mask to the CMDQ packet * @pkt: the CMDQ packet - * @value: the specified target register value * @subsys: the CMDQ sub system code * @offset: register offset from CMDQ sub system + * @value: the specified target register value * @mask: the specified target register mask * * Return: 0 for success; else the error code is returned */ -int cmdq_pkt_write_mask(struct cmdq_pkt *pkt, u32 value, - u32 subsys, u32 offset, u32 mask); +int cmdq_pkt_write_mask(struct cmdq_pkt *pkt, u8 subsys, + u16 offset, u32 value, u32 mask); /** * cmdq_pkt_wfe() - append wait for event command to the CMDQ packet @@ -91,7 +88,7 @@ int cmdq_pkt_write_mask(struct cmdq_pkt *pkt, u32 value, * * Return: 0 for success; else the error code is returned */ -int cmdq_pkt_wfe(struct cmdq_pkt *pkt, u32 event); +int cmdq_pkt_wfe(struct cmdq_pkt *pkt, u16 event); /** * cmdq_pkt_clear_event() - append clear event command to the CMDQ packet @@ -100,7 +97,7 @@ int cmdq_pkt_wfe(struct cmdq_pkt *pkt, u32 event); * * Return: 0 for success; else the error code is returned */ -int cmdq_pkt_clear_event(struct cmdq_pkt *pkt, u32 event); +int cmdq_pkt_clear_event(struct cmdq_pkt *pkt, u16 event); /** * cmdq_pkt_flush_async() - trigger CMDQ to asynchronously execute the CMDQ diff --git a/include/linux/soc/nxp/lpc32xx-misc.h b/include/linux/soc/nxp/lpc32xx-misc.h new file mode 100644 index 000000000000..699c6f1e3aab --- /dev/null +++ b/include/linux/soc/nxp/lpc32xx-misc.h @@ -0,0 +1,33 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Author: Kevin Wells <kevin.wells@nxp.com> + * + * Copyright (C) 2010 NXP Semiconductors + */ + +#ifndef __SOC_LPC32XX_MISC_H +#define __SOC_LPC32XX_MISC_H + +#include <linux/types.h> +#include <linux/phy.h> + +#ifdef CONFIG_ARCH_LPC32XX +extern u32 lpc32xx_return_iram(void __iomem **mapbase, dma_addr_t *dmaaddr); +extern void lpc32xx_set_phy_interface_mode(phy_interface_t mode); +extern void lpc32xx_loopback_set(resource_size_t mapbase, int state); +#else +static inline u32 lpc32xx_return_iram(void __iomem **mapbase, dma_addr_t *dmaaddr) +{ + *mapbase = NULL; + *dmaaddr = 0; + return 0; +} +static inline void lpc32xx_set_phy_interface_mode(phy_interface_t mode) +{ +} +static inline void lpc32xx_loopback_set(resource_size_t mapbase, int state) +{ +} +#endif + +#endif /* __SOC_LPC32XX_MISC_H */ diff --git a/include/linux/soc/qcom/mdt_loader.h b/include/linux/soc/qcom/mdt_loader.h index 944b06aefb0f..e600baec6825 100644 --- a/include/linux/soc/qcom/mdt_loader.h +++ b/include/linux/soc/qcom/mdt_loader.h @@ -21,4 +21,6 @@ int qcom_mdt_load_no_init(struct device *dev, const struct firmware *fw, const char *fw_name, int pas_id, void *mem_region, phys_addr_t mem_phys, size_t mem_size, phys_addr_t *reloc_base); +void *qcom_mdt_read_metadata(const struct firmware *fw, size_t *data_len); + #endif diff --git a/include/linux/soc/samsung/exynos-chipid.h b/include/linux/soc/samsung/exynos-chipid.h new file mode 100644 index 000000000000..8bca6763f99c --- /dev/null +++ b/include/linux/soc/samsung/exynos-chipid.h @@ -0,0 +1,52 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2018 Samsung Electronics Co., Ltd. + * http://www.samsung.com/ + * + * Exynos - CHIPID support + */ +#ifndef __LINUX_SOC_EXYNOS_CHIPID_H +#define __LINUX_SOC_EXYNOS_CHIPID_H + +#define EXYNOS_CHIPID_REG_PRO_ID 0x00 +#define EXYNOS_SUBREV_MASK (0xf << 4) +#define EXYNOS_MAINREV_MASK (0xf << 0) +#define EXYNOS_REV_MASK (EXYNOS_SUBREV_MASK | \ + EXYNOS_MAINREV_MASK) +#define EXYNOS_MASK 0xfffff000 + +#define EXYNOS_CHIPID_REG_PKG_ID 0x04 +/* Bit field definitions for EXYNOS_CHIPID_REG_PKG_ID register */ +#define EXYNOS5422_IDS_OFFSET 24 +#define EXYNOS5422_IDS_MASK 0xff +#define EXYNOS5422_USESG_OFFSET 3 +#define EXYNOS5422_USESG_MASK 0x01 +#define EXYNOS5422_SG_OFFSET 0 +#define EXYNOS5422_SG_MASK 0x07 +#define EXYNOS5422_TABLE_OFFSET 8 +#define EXYNOS5422_TABLE_MASK 0x03 +#define EXYNOS5422_SG_A_OFFSET 17 +#define EXYNOS5422_SG_A_MASK 0x0f +#define EXYNOS5422_SG_B_OFFSET 21 +#define EXYNOS5422_SG_B_MASK 0x03 +#define EXYNOS5422_SG_BSIGN_OFFSET 23 +#define EXYNOS5422_SG_BSIGN_MASK 0x01 +#define EXYNOS5422_BIN2_OFFSET 12 +#define EXYNOS5422_BIN2_MASK 0x01 + +#define EXYNOS_CHIPID_REG_LOT_ID 0x14 + +#define EXYNOS_CHIPID_REG_AUX_INFO 0x1c +/* Bit field definitions for EXYNOS_CHIPID_REG_AUX_INFO register */ +#define EXYNOS5422_TMCB_OFFSET 0 +#define EXYNOS5422_TMCB_MASK 0x7f +#define EXYNOS5422_ARM_UP_OFFSET 8 +#define EXYNOS5422_ARM_UP_MASK 0x03 +#define EXYNOS5422_ARM_DN_OFFSET 10 +#define EXYNOS5422_ARM_DN_MASK 0x03 +#define EXYNOS5422_KFC_UP_OFFSET 12 +#define EXYNOS5422_KFC_UP_MASK 0x03 +#define EXYNOS5422_KFC_DN_OFFSET 14 +#define EXYNOS5422_KFC_DN_MASK 0x03 + +#endif /*__LINUX_SOC_EXYNOS_CHIPID_H */ diff --git a/include/linux/soc/ti/ti_sci_protocol.h b/include/linux/soc/ti/ti_sci_protocol.h index 568722a041bf..9531ec823298 100644 --- a/include/linux/soc/ti/ti_sci_protocol.h +++ b/include/linux/soc/ti/ti_sci_protocol.h @@ -97,7 +97,10 @@ struct ti_sci_core_ops { */ struct ti_sci_dev_ops { int (*get_device)(const struct ti_sci_handle *handle, u32 id); + int (*get_device_exclusive)(const struct ti_sci_handle *handle, u32 id); int (*idle_device)(const struct ti_sci_handle *handle, u32 id); + int (*idle_device_exclusive)(const struct ti_sci_handle *handle, + u32 id); int (*put_device)(const struct ti_sci_handle *handle, u32 id); int (*is_valid)(const struct ti_sci_handle *handle, u32 id); int (*get_context_loss_count)(const struct ti_sci_handle *handle, @@ -166,29 +169,29 @@ struct ti_sci_dev_ops { * managed by driver for that purpose. */ struct ti_sci_clk_ops { - int (*get_clock)(const struct ti_sci_handle *handle, u32 did, u8 cid, + int (*get_clock)(const struct ti_sci_handle *handle, u32 did, u32 cid, bool needs_ssc, bool can_change_freq, bool enable_input_term); - int (*idle_clock)(const struct ti_sci_handle *handle, u32 did, u8 cid); - int (*put_clock)(const struct ti_sci_handle *handle, u32 did, u8 cid); - int (*is_auto)(const struct ti_sci_handle *handle, u32 did, u8 cid, + int (*idle_clock)(const struct ti_sci_handle *handle, u32 did, u32 cid); + int (*put_clock)(const struct ti_sci_handle *handle, u32 did, u32 cid); + int (*is_auto)(const struct ti_sci_handle *handle, u32 did, u32 cid, bool *req_state); - int (*is_on)(const struct ti_sci_handle *handle, u32 did, u8 cid, + int (*is_on)(const struct ti_sci_handle *handle, u32 did, u32 cid, bool *req_state, bool *current_state); - int (*is_off)(const struct ti_sci_handle *handle, u32 did, u8 cid, + int (*is_off)(const struct ti_sci_handle *handle, u32 did, u32 cid, bool *req_state, bool *current_state); - int (*set_parent)(const struct ti_sci_handle *handle, u32 did, u8 cid, - u8 parent_id); - int (*get_parent)(const struct ti_sci_handle *handle, u32 did, u8 cid, - u8 *parent_id); + int (*set_parent)(const struct ti_sci_handle *handle, u32 did, u32 cid, + u32 parent_id); + int (*get_parent)(const struct ti_sci_handle *handle, u32 did, u32 cid, + u32 *parent_id); int (*get_num_parents)(const struct ti_sci_handle *handle, u32 did, - u8 cid, u8 *num_parents); + u32 cid, u32 *num_parents); int (*get_best_match_freq)(const struct ti_sci_handle *handle, u32 did, - u8 cid, u64 min_freq, u64 target_freq, + u32 cid, u64 min_freq, u64 target_freq, u64 max_freq, u64 *match_freq); - int (*set_freq)(const struct ti_sci_handle *handle, u32 did, u8 cid, + int (*set_freq)(const struct ti_sci_handle *handle, u32 did, u32 cid, u64 min_freq, u64 target_freq, u64 max_freq); - int (*get_freq)(const struct ti_sci_handle *handle, u32 did, u8 cid, + int (*get_freq)(const struct ti_sci_handle *handle, u32 did, u32 cid, u64 *current_freq); }; @@ -241,12 +244,254 @@ struct ti_sci_rm_irq_ops { u16 global_event, u8 vint_status_bit); }; +/* RA config.addr_lo parameter is valid for RM ring configure TI_SCI message */ +#define TI_SCI_MSG_VALUE_RM_RING_ADDR_LO_VALID BIT(0) +/* RA config.addr_hi parameter is valid for RM ring configure TI_SCI message */ +#define TI_SCI_MSG_VALUE_RM_RING_ADDR_HI_VALID BIT(1) + /* RA config.count parameter is valid for RM ring configure TI_SCI message */ +#define TI_SCI_MSG_VALUE_RM_RING_COUNT_VALID BIT(2) +/* RA config.mode parameter is valid for RM ring configure TI_SCI message */ +#define TI_SCI_MSG_VALUE_RM_RING_MODE_VALID BIT(3) +/* RA config.size parameter is valid for RM ring configure TI_SCI message */ +#define TI_SCI_MSG_VALUE_RM_RING_SIZE_VALID BIT(4) +/* RA config.order_id parameter is valid for RM ring configure TISCI message */ +#define TI_SCI_MSG_VALUE_RM_RING_ORDER_ID_VALID BIT(5) + +#define TI_SCI_MSG_VALUE_RM_ALL_NO_ORDER \ + (TI_SCI_MSG_VALUE_RM_RING_ADDR_LO_VALID | \ + TI_SCI_MSG_VALUE_RM_RING_ADDR_HI_VALID | \ + TI_SCI_MSG_VALUE_RM_RING_COUNT_VALID | \ + TI_SCI_MSG_VALUE_RM_RING_MODE_VALID | \ + TI_SCI_MSG_VALUE_RM_RING_SIZE_VALID) + +/** + * struct ti_sci_rm_ringacc_ops - Ring Accelerator Management operations + * @config: configure the SoC Navigator Subsystem Ring Accelerator ring + * @get_config: get the SoC Navigator Subsystem Ring Accelerator ring + * configuration + */ +struct ti_sci_rm_ringacc_ops { + int (*config)(const struct ti_sci_handle *handle, + u32 valid_params, u16 nav_id, u16 index, + u32 addr_lo, u32 addr_hi, u32 count, u8 mode, + u8 size, u8 order_id + ); + int (*get_config)(const struct ti_sci_handle *handle, + u32 nav_id, u32 index, u8 *mode, + u32 *addr_lo, u32 *addr_hi, u32 *count, + u8 *size, u8 *order_id); +}; + +/** + * struct ti_sci_rm_psil_ops - PSI-L thread operations + * @pair: pair PSI-L source thread to a destination thread. + * If the src_thread is mapped to UDMA tchan, the corresponding channel's + * TCHAN_THRD_ID register is updated. + * If the dst_thread is mapped to UDMA rchan, the corresponding channel's + * RCHAN_THRD_ID register is updated. + * @unpair: unpair PSI-L source thread from a destination thread. + * If the src_thread is mapped to UDMA tchan, the corresponding channel's + * TCHAN_THRD_ID register is cleared. + * If the dst_thread is mapped to UDMA rchan, the corresponding channel's + * RCHAN_THRD_ID register is cleared. + */ +struct ti_sci_rm_psil_ops { + int (*pair)(const struct ti_sci_handle *handle, u32 nav_id, + u32 src_thread, u32 dst_thread); + int (*unpair)(const struct ti_sci_handle *handle, u32 nav_id, + u32 src_thread, u32 dst_thread); +}; + +/* UDMAP channel types */ +#define TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR 2 +#define TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR_SB 3 /* RX only */ +#define TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_PBRR 10 +#define TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_PBVR 11 +#define TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_BCOPY_PBRR 12 +#define TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_BCOPY_PBVR 13 + +#define TI_SCI_RM_UDMAP_RX_FLOW_DESC_HOST 0 +#define TI_SCI_RM_UDMAP_RX_FLOW_DESC_MONO 2 + +#define TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES 1 +#define TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_128_BYTES 2 +#define TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_256_BYTES 3 + +/* UDMAP TX/RX channel valid_params common declarations */ +#define TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID BIT(0) +#define TI_SCI_MSG_VALUE_RM_UDMAP_CH_ATYPE_VALID BIT(1) +#define TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID BIT(2) +#define TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID BIT(3) +#define TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID BIT(4) +#define TI_SCI_MSG_VALUE_RM_UDMAP_CH_PRIORITY_VALID BIT(5) +#define TI_SCI_MSG_VALUE_RM_UDMAP_CH_QOS_VALID BIT(6) +#define TI_SCI_MSG_VALUE_RM_UDMAP_CH_ORDER_ID_VALID BIT(7) +#define TI_SCI_MSG_VALUE_RM_UDMAP_CH_SCHED_PRIORITY_VALID BIT(8) +#define TI_SCI_MSG_VALUE_RM_UDMAP_CH_BURST_SIZE_VALID BIT(14) + +/** + * Configures a Navigator Subsystem UDMAP transmit channel + * + * Configures a Navigator Subsystem UDMAP transmit channel registers. + * See @ti_sci_msg_rm_udmap_tx_ch_cfg_req + */ +struct ti_sci_msg_rm_udmap_tx_ch_cfg { + u32 valid_params; +#define TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_FILT_EINFO_VALID BIT(9) +#define TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_FILT_PSWORDS_VALID BIT(10) +#define TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_SUPR_TDPKT_VALID BIT(11) +#define TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_CREDIT_COUNT_VALID BIT(12) +#define TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_FDEPTH_VALID BIT(13) + u16 nav_id; + u16 index; + u8 tx_pause_on_err; + u8 tx_filt_einfo; + u8 tx_filt_pswords; + u8 tx_atype; + u8 tx_chan_type; + u8 tx_supr_tdpkt; + u16 tx_fetch_size; + u8 tx_credit_count; + u16 txcq_qnum; + u8 tx_priority; + u8 tx_qos; + u8 tx_orderid; + u16 fdepth; + u8 tx_sched_priority; + u8 tx_burst_size; +}; + +/** + * Configures a Navigator Subsystem UDMAP receive channel + * + * Configures a Navigator Subsystem UDMAP receive channel registers. + * See @ti_sci_msg_rm_udmap_rx_ch_cfg_req + */ +struct ti_sci_msg_rm_udmap_rx_ch_cfg { + u32 valid_params; +#define TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_START_VALID BIT(9) +#define TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_CNT_VALID BIT(10) +#define TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_IGNORE_SHORT_VALID BIT(11) +#define TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_IGNORE_LONG_VALID BIT(12) + u16 nav_id; + u16 index; + u16 rx_fetch_size; + u16 rxcq_qnum; + u8 rx_priority; + u8 rx_qos; + u8 rx_orderid; + u8 rx_sched_priority; + u16 flowid_start; + u16 flowid_cnt; + u8 rx_pause_on_err; + u8 rx_atype; + u8 rx_chan_type; + u8 rx_ignore_short; + u8 rx_ignore_long; + u8 rx_burst_size; +}; + +/** + * Configures a Navigator Subsystem UDMAP receive flow + * + * Configures a Navigator Subsystem UDMAP receive flow's registers. + * See @tis_ci_msg_rm_udmap_flow_cfg_req + */ +struct ti_sci_msg_rm_udmap_flow_cfg { + u32 valid_params; +#define TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_EINFO_PRESENT_VALID BIT(0) +#define TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_PSINFO_PRESENT_VALID BIT(1) +#define TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_ERROR_HANDLING_VALID BIT(2) +#define TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DESC_TYPE_VALID BIT(3) +#define TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SOP_OFFSET_VALID BIT(4) +#define TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_QNUM_VALID BIT(5) +#define TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_HI_VALID BIT(6) +#define TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_LO_VALID BIT(7) +#define TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_HI_VALID BIT(8) +#define TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_LO_VALID BIT(9) +#define TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_HI_SEL_VALID BIT(10) +#define TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_LO_SEL_VALID BIT(11) +#define TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_HI_SEL_VALID BIT(12) +#define TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_LO_SEL_VALID BIT(13) +#define TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ0_SZ0_QNUM_VALID BIT(14) +#define TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ1_QNUM_VALID BIT(15) +#define TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ2_QNUM_VALID BIT(16) +#define TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ3_QNUM_VALID BIT(17) +#define TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_PS_LOCATION_VALID BIT(18) + u16 nav_id; + u16 flow_index; + u8 rx_einfo_present; + u8 rx_psinfo_present; + u8 rx_error_handling; + u8 rx_desc_type; + u16 rx_sop_offset; + u16 rx_dest_qnum; + u8 rx_src_tag_hi; + u8 rx_src_tag_lo; + u8 rx_dest_tag_hi; + u8 rx_dest_tag_lo; + u8 rx_src_tag_hi_sel; + u8 rx_src_tag_lo_sel; + u8 rx_dest_tag_hi_sel; + u8 rx_dest_tag_lo_sel; + u16 rx_fdq0_sz0_qnum; + u16 rx_fdq1_qnum; + u16 rx_fdq2_qnum; + u16 rx_fdq3_qnum; + u8 rx_ps_location; +}; + +/** + * struct ti_sci_rm_udmap_ops - UDMA Management operations + * @tx_ch_cfg: configure SoC Navigator Subsystem UDMA transmit channel. + * @rx_ch_cfg: configure SoC Navigator Subsystem UDMA receive channel. + * @rx_flow_cfg1: configure SoC Navigator Subsystem UDMA receive flow. + */ +struct ti_sci_rm_udmap_ops { + int (*tx_ch_cfg)(const struct ti_sci_handle *handle, + const struct ti_sci_msg_rm_udmap_tx_ch_cfg *params); + int (*rx_ch_cfg)(const struct ti_sci_handle *handle, + const struct ti_sci_msg_rm_udmap_rx_ch_cfg *params); + int (*rx_flow_cfg)(const struct ti_sci_handle *handle, + const struct ti_sci_msg_rm_udmap_flow_cfg *params); +}; + +/** + * struct ti_sci_proc_ops - Processor Control operations + * @request: Request to control a physical processor. The requesting host + * should be in the processor access list + * @release: Relinquish a physical processor control + * @handover: Handover a physical processor control to another host + * in the permitted list + * @set_config: Set base configuration of a processor + * @set_control: Setup limited control flags in specific cases + * @get_status: Get the state of physical processor + * + * NOTE: The following paramteres are generic in nature for all these ops, + * -handle: Pointer to TI SCI handle as retrieved by *ti_sci_get_handle + * -pid: Processor ID + * -hid: Host ID + */ +struct ti_sci_proc_ops { + int (*request)(const struct ti_sci_handle *handle, u8 pid); + int (*release)(const struct ti_sci_handle *handle, u8 pid); + int (*handover)(const struct ti_sci_handle *handle, u8 pid, u8 hid); + int (*set_config)(const struct ti_sci_handle *handle, u8 pid, + u64 boot_vector, u32 cfg_set, u32 cfg_clr); + int (*set_control)(const struct ti_sci_handle *handle, u8 pid, + u32 ctrl_set, u32 ctrl_clr); + int (*get_status)(const struct ti_sci_handle *handle, u8 pid, + u64 *boot_vector, u32 *cfg_flags, u32 *ctrl_flags, + u32 *status_flags); +}; + /** * struct ti_sci_ops - Function support for TI SCI * @dev_ops: Device specific operations * @clk_ops: Clock specific operations * @rm_core_ops: Resource management core operations. * @rm_irq_ops: IRQ management specific operations + * @proc_ops: Processor Control specific operations */ struct ti_sci_ops { struct ti_sci_core_ops core_ops; @@ -254,6 +499,10 @@ struct ti_sci_ops { struct ti_sci_clk_ops clk_ops; struct ti_sci_rm_core_ops rm_core_ops; struct ti_sci_rm_irq_ops rm_irq_ops; + struct ti_sci_rm_ringacc_ops rm_ring_ops; + struct ti_sci_rm_psil_ops rm_psil_ops; + struct ti_sci_rm_udmap_ops rm_udmap_ops; + struct ti_sci_proc_ops proc_ops; }; /** diff --git a/include/linux/socket.h b/include/linux/socket.h index b57cd8bf96e2..4049d9755cf1 100644 --- a/include/linux/socket.h +++ b/include/linux/socket.h @@ -12,6 +12,7 @@ struct pid; struct cred; +struct socket; #define __sockaddr_check_size(size) \ BUILD_BUG_ON(((size) > sizeof(struct __kernel_sockaddr_storage))) @@ -262,7 +263,7 @@ struct ucred { #define PF_MAX AF_MAX /* Maximum queue length specifiable by listen. */ -#define SOMAXCONN 128 +#define SOMAXCONN 4096 /* Flags we can use with send/ and recv. Added those for 1003.1g not all are supported yet @@ -291,6 +292,9 @@ struct ucred { #define MSG_BATCH 0x40000 /* sendmmsg(): more messages coming */ #define MSG_EOF MSG_FIN #define MSG_NO_SHARED_FRAGS 0x80000 /* sendpage() internal : page frags are not shared */ +#define MSG_SENDPAGE_DECRYPTED 0x100000 /* sendpage() internal : page may carry + * plain text and require encryption + */ #define MSG_ZEROCOPY 0x4000000 /* Use user data in kernel path */ #define MSG_FASTOPEN 0x20000000 /* Send data in TCP SYN */ @@ -374,6 +378,12 @@ extern int __sys_recvmmsg(int fd, struct mmsghdr __user *mmsg, extern int __sys_sendmmsg(int fd, struct mmsghdr __user *mmsg, unsigned int vlen, unsigned int flags, bool forbid_cmsg_compat); +extern long __sys_sendmsg_sock(struct socket *sock, + struct user_msghdr __user *msg, + unsigned int flags); +extern long __sys_recvmsg_sock(struct socket *sock, + struct user_msghdr __user *msg, + unsigned int flags); /* helpers which do the actual work for syscalls */ extern int __sys_recvfrom(int fd, void __user *ubuf, size_t size, diff --git a/include/linux/sort.h b/include/linux/sort.h index 2b99a5dd073d..61b96d0ebc44 100644 --- a/include/linux/sort.h +++ b/include/linux/sort.h @@ -4,6 +4,11 @@ #include <linux/types.h> +void sort_r(void *base, size_t num, size_t size, + int (*cmp)(const void *, const void *, const void *), + void (*swap)(void *, void *, int), + const void *priv); + void sort(void *base, size_t num, size_t size, int (*cmp)(const void *, const void *), void (*swap)(void *, void *, int)); diff --git a/include/linux/soundwire/sdw.h b/include/linux/soundwire/sdw.h index 35662d9c2c62..ea787201c3ac 100644 --- a/include/linux/soundwire/sdw.h +++ b/include/linux/soundwire/sdw.h @@ -4,6 +4,8 @@ #ifndef __SOUNDWIRE_H #define __SOUNDWIRE_H +#include <linux/mod_devicetable.h> + struct sdw_bus; struct sdw_slave; @@ -41,6 +43,31 @@ struct sdw_slave; #define SDW_DAI_ID_RANGE_START 100 #define SDW_DAI_ID_RANGE_END 200 +enum { + SDW_PORT_DIRN_SINK = 0, + SDW_PORT_DIRN_SOURCE, + SDW_PORT_DIRN_MAX, +}; + +/* + * constants for flow control, ports and transport + * + * these are bit masks as devices can have multiple capabilities + */ + +/* + * flow modes for SDW port. These can be isochronous, tx controlled, + * rx controlled or async + */ +#define SDW_PORT_FLOW_MODE_ISOCH 0 +#define SDW_PORT_FLOW_MODE_TX_CNTRL BIT(0) +#define SDW_PORT_FLOW_MODE_RX_CNTRL BIT(1) +#define SDW_PORT_FLOW_MODE_ASYNC GENMASK(1, 0) + +/* sample packaging for block. It can be per port or per channel */ +#define SDW_BLOCK_PACKG_PER_PORT BIT(0) +#define SDW_BLOCK_PACKG_PER_CH BIT(1) + /** * enum sdw_slave_status - Slave status * @SDW_SLAVE_UNATTACHED: Slave is not attached with the bus. @@ -76,6 +103,14 @@ enum sdw_command_response { SDW_CMD_FAIL_OTHER = 4, }; +/* block group count enum */ +enum sdw_dpn_grouping { + SDW_BLK_GRP_CNT_1 = 0, + SDW_BLK_GRP_CNT_2 = 1, + SDW_BLK_GRP_CNT_3 = 2, + SDW_BLK_GRP_CNT_4 = 3, +}; + /** * enum sdw_stream_type: data stream type * @@ -100,6 +135,26 @@ enum sdw_data_direction { SDW_DATA_DIR_TX = 1, }; +/** + * enum sdw_port_data_mode: Data Port mode + * + * @SDW_PORT_DATA_MODE_NORMAL: Normal data mode where audio data is received + * and transmitted. + * @SDW_PORT_DATA_MODE_STATIC_1: Simple test mode which uses static value of + * logic 1. The encoding will result in signal transitions at every bitslot + * owned by this Port + * @SDW_PORT_DATA_MODE_STATIC_0: Simple test mode which uses static value of + * logic 0. The encoding will result in no signal transitions + * @SDW_PORT_DATA_MODE_PRBS: Test mode which uses a PRBS generator to produce + * a pseudo random data pattern that is transferred + */ +enum sdw_port_data_mode { + SDW_PORT_DATA_MODE_NORMAL = 0, + SDW_PORT_DATA_MODE_STATIC_1 = 1, + SDW_PORT_DATA_MODE_STATIC_0 = 2, + SDW_PORT_DATA_MODE_PRBS = 3, +}; + /* * SDW properties, defined in MIPI DisCo spec v1.0 */ @@ -153,10 +208,11 @@ enum sdw_clk_stop_mode { * (inclusive) * @num_words: number of wordlengths supported * @words: wordlengths supported - * @flow_controlled: Slave implementation results in an OK_NotReady + * @BRA_flow_controlled: Slave implementation results in an OK_NotReady * response * @simple_ch_prep_sm: If channel prepare sequence is required - * @device_interrupts: If implementation-defined interrupts are supported + * @imp_def_interrupts: If set, each bit corresponds to support for + * implementation-defined interrupts * * The wordlengths are specified by Spec as max, min AND number of * discrete values, implementation can define based on the wordlengths they @@ -167,9 +223,9 @@ struct sdw_dp0_prop { u32 min_word; u32 num_words; u32 *words; - bool flow_controlled; + bool BRA_flow_controlled; bool simple_ch_prep_sm; - bool device_interrupts; + bool imp_def_interrupts; }; /** @@ -219,7 +275,7 @@ struct sdw_dpn_audio_mode { * @simple_ch_prep_sm: If the port supports simplified channel prepare state * machine * @ch_prep_timeout: Port-specific timeout value, in milliseconds - * @device_interrupts: If set, each bit corresponds to support for + * @imp_def_interrupts: If set, each bit corresponds to support for * implementation-defined interrupts * @max_ch: Maximum channels supported * @min_ch: Minimum channels supported @@ -244,7 +300,7 @@ struct sdw_dpn_prop { u32 max_grouping; bool simple_ch_prep_sm; u32 ch_prep_timeout; - u32 device_interrupts; + u32 imp_def_interrupts; u32 max_ch; u32 min_ch; u32 num_ch; @@ -311,36 +367,36 @@ struct sdw_slave_prop { /** * struct sdw_master_prop - Master properties * @revision: MIPI spec version of the implementation - * @master_count: Number of masters - * @clk_stop_mode: Bitmap for Clock Stop modes supported - * @max_freq: Maximum Bus clock frequency, in Hz + * @clk_stop_modes: Bitmap, bit N set when clock-stop-modeN supported + * @max_clk_freq: Maximum Bus clock frequency, in Hz * @num_clk_gears: Number of clock gears supported * @clk_gears: Clock gears supported - * @num_freq: Number of clock frequencies supported, in Hz - * @freq: Clock frequencies supported, in Hz + * @num_clk_freq: Number of clock frequencies supported, in Hz + * @clk_freq: Clock frequencies supported, in Hz * @default_frame_rate: Controller default Frame rate, in Hz * @default_row: Number of rows * @default_col: Number of columns - * @dynamic_frame: Dynamic frame supported + * @dynamic_frame: Dynamic frame shape supported * @err_threshold: Number of times that software may retry sending a single * command - * @dpn_prop: Data Port N properties + * @mclk_freq: clock reference passed to SoundWire Master, in Hz. + * @hw_disabled: if true, the Master is not functional, typically due to pin-mux */ struct sdw_master_prop { u32 revision; - u32 master_count; - enum sdw_clk_stop_mode clk_stop_mode; - u32 max_freq; + u32 clk_stop_modes; + u32 max_clk_freq; u32 num_clk_gears; u32 *clk_gears; - u32 num_freq; - u32 *freq; + u32 num_clk_freq; + u32 *clk_freq; u32 default_frame_rate; u32 default_row; u32 default_col; bool dynamic_frame; u32 err_threshold; - struct sdw_dpn_prop *dpn_prop; + u32 mclk_freq; + bool hw_disabled; }; int sdw_master_read_prop(struct sdw_bus *bus); @@ -488,6 +544,7 @@ struct sdw_slave_ops { * @bus: Bus handle * @ops: Slave callback ops * @prop: Slave properties + * @debugfs: Slave debugfs * @node: node for bus list * @port_ready: Port ready completion flag for each Slave port * @dev_num: Device Number assigned by Bus @@ -499,6 +556,9 @@ struct sdw_slave { struct sdw_bus *bus; const struct sdw_slave_ops *ops; struct sdw_slave_prop prop; +#ifdef CONFIG_DEBUG_FS + struct dentry *debugfs; +#endif struct list_head node; struct completion *port_ready; u16 dev_num; @@ -668,6 +728,7 @@ struct sdw_master_ops { * Bit set implies used number, bit clear implies unused number. * @bus_lock: bus lock * @msg_lock: message lock + * @compute_params: points to Bus resource management implementation * @ops: Master callback ops * @port_ops: Master port callback ops * @params: Current bus parameters @@ -675,6 +736,7 @@ struct sdw_master_ops { * @m_rt_list: List of Master instance of all stream(s) running on Bus. This * is used to compute and program bus bandwidth, clock, frame shape, * transport and port parameters + * @debugfs: Bus debugfs * @defer_msg: Defer message * @clk_stop_timeout: Clock stop timeout computed * @bank_switch_timeout: Bank switch timeout computed @@ -689,11 +751,15 @@ struct sdw_bus { DECLARE_BITMAP(assigned, SDW_MAX_DEVICES); struct mutex bus_lock; struct mutex msg_lock; + int (*compute_params)(struct sdw_bus *bus); const struct sdw_master_ops *ops; const struct sdw_master_port_ops *port_ops; struct sdw_bus_params params; struct sdw_master_prop prop; struct list_head m_rt_list; +#ifdef CONFIG_DEBUG_FS + struct dentry *debugfs; +#endif struct sdw_defer defer_msg; unsigned int clk_stop_timeout; u32 bank_switch_timeout; @@ -778,7 +844,7 @@ struct sdw_stream_params { * @m_rt_count: Count of Master runtime(s) in this stream */ struct sdw_stream_runtime { - char *name; + const char *name; struct sdw_stream_params params; enum sdw_stream_state state; enum sdw_stream_type type; @@ -786,7 +852,7 @@ struct sdw_stream_runtime { int m_rt_count; }; -struct sdw_stream_runtime *sdw_alloc_stream(char *stream_name); +struct sdw_stream_runtime *sdw_alloc_stream(const char *stream_name); void sdw_release_stream(struct sdw_stream_runtime *stream); int sdw_stream_add_master(struct sdw_bus *bus, struct sdw_stream_config *stream_config, diff --git a/include/linux/soundwire/sdw_intel.h b/include/linux/soundwire/sdw_intel.h index 4d70da45363d..c9427cb6020b 100644 --- a/include/linux/soundwire/sdw_intel.h +++ b/include/linux/soundwire/sdw_intel.h @@ -8,6 +8,7 @@ * struct sdw_intel_ops: Intel audio driver callback ops * * @config_stream: configure the stream with the hw_params + * the first argument containing the context is mandatory */ struct sdw_intel_ops { int (*config_stream)(void *arg, void *substream, diff --git a/include/linux/soundwire/sdw_type.h b/include/linux/soundwire/sdw_type.h index 9c756b5a0dfe..aaa7f4267c14 100644 --- a/include/linux/soundwire/sdw_type.h +++ b/include/linux/soundwire/sdw_type.h @@ -16,4 +16,15 @@ void sdw_unregister_driver(struct sdw_driver *drv); int sdw_slave_modalias(const struct sdw_slave *slave, char *buf, size_t size); +/** + * module_sdw_driver() - Helper macro for registering a Soundwire driver + * @__sdw_driver: soundwire slave driver struct + * + * Helper macro for Soundwire drivers which do not do anything special in + * module init/exit. This eliminates a lot of boilerplate. Each module may only + * use this macro once, and calling it replaces module_init() and module_exit() + */ +#define module_sdw_driver(__sdw_driver) \ + module_driver(__sdw_driver, sdw_register_driver, \ + sdw_unregister_driver) #endif /* __SOUNDWIRE_TYPES_H */ diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h index 053abd22ad31..af4f265d0f67 100644 --- a/include/linux/spi/spi.h +++ b/include/linux/spi/spi.h @@ -109,6 +109,7 @@ void spi_statistics_add_transfer_stats(struct spi_statistics *stats, * This may be changed by the device's driver, or left at the * default (0) indicating protocol words are eight bit bytes. * The spi_transfer.bits_per_word can override this for each transfer. + * @rt: Make the pump thread real time priority. * @irq: Negative, or the number passed to request_irq() to receive * interrupts from this device. * @controller_state: Controller's runtime state @@ -143,6 +144,7 @@ struct spi_device { u32 max_speed_hz; u8 chip_select; u8 bits_per_word; + bool rt; u32 mode; #define SPI_CPHA 0x01 /* clock phase */ #define SPI_CPOL 0x02 /* clock polarity */ @@ -735,6 +737,9 @@ extern void spi_res_release(struct spi_controller *ctlr, * @bits_per_word: select a bits_per_word other than the device default * for this transfer. If 0 the default (from @spi_device) is used. * @cs_change: affects chipselect after this transfer completes + * @cs_change_delay: delay between cs deassert and assert when + * @cs_change is set and @spi_transfer is not the last in @spi_message + * @cs_change_delay_unit: unit of cs_change_delay * @delay_usecs: microseconds to delay after this transfer before * (optionally) changing the chipselect status, then starting * the next transfer or completing this @spi_message. @@ -742,6 +747,9 @@ extern void spi_res_release(struct spi_controller *ctlr, * (set by bits_per_word) transmission. * @word_delay: clock cycles to inter word delay after each word size * (set by bits_per_word) transmission. + * @effective_speed_hz: the effective SCK-speed that was used to + * transfer this transfer. Set to 0 if the spi bus driver does + * not support it. * @transfer_list: transfers are sequenced through @spi_message.transfers * @tx_sg: Scatterlist for transmit, currently not for client use * @rx_sg: Scatterlist for receive, currently not for client use @@ -824,9 +832,16 @@ struct spi_transfer { u8 bits_per_word; u8 word_delay_usecs; u16 delay_usecs; + u16 cs_change_delay; + u8 cs_change_delay_unit; +#define SPI_DELAY_UNIT_USECS 0 +#define SPI_DELAY_UNIT_NSECS 1 +#define SPI_DELAY_UNIT_SCK 2 u32 speed_hz; u16 word_delay; + u32 effective_speed_hz; + struct list_head transfer_list; }; @@ -967,6 +982,8 @@ static inline void spi_message_free(struct spi_message *m) kfree(m); } +extern void spi_set_cs_timing(struct spi_device *spi, u8 setup, u8 hold, u8 inactive_dly); + extern int spi_setup(struct spi_device *spi); extern int spi_async(struct spi_device *spi, struct spi_message *message); extern int spi_async_locked(struct spi_device *spi, @@ -997,6 +1014,26 @@ spi_max_transfer_size(struct spi_device *spi) return min(tr_max, msg_max); } +/** + * spi_is_bpw_supported - Check if bits per word is supported + * @spi: SPI device + * @bpw: Bits per word + * + * This function checks to see if the SPI controller supports @bpw. + * + * Returns: + * True if @bpw is supported, false otherwise. + */ +static inline bool spi_is_bpw_supported(struct spi_device *spi, u32 bpw) +{ + u32 bpw_mask = spi->master->bits_per_word_mask; + + if (bpw == 8 || (bpw <= 32 && bpw_mask & SPI_BPW_MASK(bpw))) + return true; + + return false; +} + /*---------------------------------------------------------------------------*/ /* SPI transfer replacement methods which make use of spi_res */ diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h index ed7c4d6b8235..031ce8617df8 100644 --- a/include/linux/spinlock.h +++ b/include/linux/spinlock.h @@ -214,7 +214,7 @@ static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock) /* * Define the various spin_lock methods. Note we define these - * regardless of whether CONFIG_SMP or CONFIG_PREEMPT are set. The + * regardless of whether CONFIG_SMP or CONFIG_PREEMPTION are set. The * various methods are defined as nops in the case they are not * required. */ diff --git a/include/linux/spinlock_api_smp.h b/include/linux/spinlock_api_smp.h index 42dfab89e740..b762eaba4cdf 100644 --- a/include/linux/spinlock_api_smp.h +++ b/include/linux/spinlock_api_smp.h @@ -96,7 +96,7 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock) /* * If lockdep is enabled then we use the non-preemption spin-ops - * even on CONFIG_PREEMPT, because lockdep assumes that interrupts are + * even on CONFIG_PREEMPTION, because lockdep assumes that interrupts are * not re-enabled during lock-acquire (which the preempt-spin-ops do): */ #if !defined(CONFIG_GENERIC_LOCKBREAK) || defined(CONFIG_DEBUG_LOCK_ALLOC) diff --git a/include/linux/srcutree.h b/include/linux/srcutree.h index 7f7c8c050f63..9cfcc8a756ae 100644 --- a/include/linux/srcutree.h +++ b/include/linux/srcutree.h @@ -120,9 +120,17 @@ struct srcu_struct { * * See include/linux/percpu-defs.h for the rules on per-CPU variables. */ -#define __DEFINE_SRCU(name, is_static) \ - static DEFINE_PER_CPU(struct srcu_data, name##_srcu_data);\ - is_static struct srcu_struct name = __SRCU_STRUCT_INIT(name, name##_srcu_data) +#ifdef MODULE +# define __DEFINE_SRCU(name, is_static) \ + is_static struct srcu_struct name; \ + struct srcu_struct * const __srcu_struct_##name \ + __section("___srcu_struct_ptrs") = &name +#else +# define __DEFINE_SRCU(name, is_static) \ + static DEFINE_PER_CPU(struct srcu_data, name##_srcu_data); \ + is_static struct srcu_struct name = \ + __SRCU_STRUCT_INIT(name, name##_srcu_data) +#endif #define DEFINE_SRCU(name) __DEFINE_SRCU(name, /* not static */) #define DEFINE_STATIC_SRCU(name) __DEFINE_SRCU(name, static) diff --git a/include/linux/stacktrace.h b/include/linux/stacktrace.h index f0cfd12cb45e..83bd8cb475d7 100644 --- a/include/linux/stacktrace.h +++ b/include/linux/stacktrace.h @@ -9,9 +9,9 @@ struct task_struct; struct pt_regs; #ifdef CONFIG_STACKTRACE -void stack_trace_print(unsigned long *trace, unsigned int nr_entries, +void stack_trace_print(const unsigned long *trace, unsigned int nr_entries, int spaces); -int stack_trace_snprint(char *buf, size_t size, unsigned long *entries, +int stack_trace_snprint(char *buf, size_t size, const unsigned long *entries, unsigned int nr_entries, int spaces); unsigned int stack_trace_save(unsigned long *store, unsigned int size, unsigned int skipnr); diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h index 1a0bb622cf10..dc60d03c4b60 100644 --- a/include/linux/stmmac.h +++ b/include/linux/stmmac.h @@ -78,14 +78,10 @@ /* Platfrom data for platform device structure's platform_data field */ struct stmmac_mdio_bus_data { - int (*phy_reset)(void *priv); unsigned int phy_mask; int *irqs; int probed_phy_irq; -#ifdef CONFIG_OF - int reset_gpio, active_low; - u32 delays[3]; -#endif + bool needs_reset; }; struct stmmac_dma_cfg { @@ -135,8 +131,10 @@ struct plat_stmmacenet_data { int bus_id; int phy_addr; int interface; + int phy_interface; struct stmmac_mdio_bus_data *mdio_bus_data; struct device_node *phy_node; + struct device_node *phylink_node; struct device_node *mdio_node; struct stmmac_dma_cfg *dma_cfg; int clk_csr; @@ -171,11 +169,13 @@ struct plat_stmmacenet_data { struct clk *clk_ptp_ref; unsigned int clk_ptp_rate; unsigned int clk_ref_rate; + s32 ptp_max_adj; struct reset_control *stmmac_rst; struct stmmac_axi *axi; int has_gmac4; bool has_sun8i; bool tso_en; + int rss_en; int mac_port_sel_speed; bool en_tx_lpi_clockgating; int has_xgmac; diff --git a/include/linux/stop_machine.h b/include/linux/stop_machine.h index 6d3635c86dbe..f9a0c6189852 100644 --- a/include/linux/stop_machine.h +++ b/include/linux/stop_machine.h @@ -36,6 +36,7 @@ int stop_cpus(const struct cpumask *cpumask, cpu_stop_fn_t fn, void *arg); int try_stop_cpus(const struct cpumask *cpumask, cpu_stop_fn_t fn, void *arg); void stop_machine_park(int cpu); void stop_machine_unpark(int cpu); +void stop_machine_yield(const struct cpumask *cpumask); #else /* CONFIG_SMP */ diff --git a/include/linux/string.h b/include/linux/string.h index 4deb11f7976b..b6ccdc2c7f02 100644 --- a/include/linux/string.h +++ b/include/linux/string.h @@ -227,7 +227,26 @@ static inline bool strstarts(const char *str, const char *prefix) } size_t memweight(const void *ptr, size_t bytes); -void memzero_explicit(void *s, size_t count); + +/** + * memzero_explicit - Fill a region of memory (e.g. sensitive + * keying data) with 0s. + * @s: Pointer to the start of the area. + * @count: The size of the area. + * + * Note: usually using memset() is just fine (!), but in cases + * where clearing out _local_ data at the end of a scope is + * necessary, memzero_explicit() should be used instead in + * order to prevent the compiler from optimising away zeroing. + * + * memzero_explicit() doesn't need an arch-specific version as + * it just invokes the one of memset() implicitly. + */ +static inline void memzero_explicit(void *s, size_t count) +{ + memset(s, 0, count); + barrier_data(s); +} /** * kbasename - return the last part of a pathname. @@ -474,8 +493,9 @@ static inline void memcpy_and_pad(void *dest, size_t dest_len, * But this can lead to bugs due to typos, or if prefix is a pointer * and not a constant. Instead use str_has_prefix(). * - * Returns: 0 if @str does not start with @prefix - strlen(@prefix) if @str does start with @prefix + * Returns: + * * strlen(@prefix) if @str starts with @prefix + * * 0 if @str does not start with @prefix */ static __always_inline size_t str_has_prefix(const char *str, const char *prefix) { diff --git a/include/linux/string_helpers.h b/include/linux/string_helpers.h index d23c5030901a..c28955132234 100644 --- a/include/linux/string_helpers.h +++ b/include/linux/string_helpers.h @@ -54,6 +54,9 @@ static inline int string_unescape_any_inplace(char *buf) int string_escape_mem(const char *src, size_t isz, char *dst, size_t osz, unsigned int flags, const char *only); +int string_escape_mem_ascii(const char *src, size_t isz, char *dst, + size_t osz); + static inline int string_escape_mem_any_np(const char *src, size_t isz, char *dst, size_t osz, const char *only) { diff --git a/include/linux/sudmac.h b/include/linux/sudmac.h deleted file mode 100644 index cccc0a665d26..000000000000 --- a/include/linux/sudmac.h +++ /dev/null @@ -1,49 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Header for the SUDMAC driver - * - * Copyright (C) 2013 Renesas Solutions Corp. - */ -#ifndef SUDMAC_H -#define SUDMAC_H - -#include <linux/dmaengine.h> -#include <linux/shdma-base.h> -#include <linux/types.h> - -/* Used by slave DMA clients to request DMA to/from a specific peripheral */ -struct sudmac_slave { - struct shdma_slave shdma_slave; /* Set by the platform */ -}; - -/* - * Supplied by platforms to specify, how a DMA channel has to be configured for - * a certain peripheral - */ -struct sudmac_slave_config { - int slave_id; -}; - -struct sudmac_channel { - unsigned long offset; - unsigned long config; - unsigned long wait; /* The configuable range is 0 to 3 */ - unsigned long dint_end_bit; -}; - -struct sudmac_pdata { - const struct sudmac_slave_config *slave; - int slave_num; - const struct sudmac_channel *channel; - int channel_num; -}; - -/* Definitions for the sudmac_channel.config */ -#define SUDMAC_TX_BUFFER_MODE BIT(0) -#define SUDMAC_RX_END_MODE BIT(1) - -/* Definitions for the sudmac_channel.dint_end_bit */ -#define SUDMAC_DMA_BIT_CH0 BIT(0) -#define SUDMAC_DMA_BIT_CH1 BIT(1) - -#endif diff --git a/include/linux/sunrpc/bc_xprt.h b/include/linux/sunrpc/bc_xprt.h index d4229a78524a..d796058cdff2 100644 --- a/include/linux/sunrpc/bc_xprt.h +++ b/include/linux/sunrpc/bc_xprt.h @@ -43,6 +43,7 @@ void xprt_destroy_backchannel(struct rpc_xprt *, unsigned int max_reqs); int xprt_setup_bc(struct rpc_xprt *xprt, unsigned int min_reqs); void xprt_destroy_bc(struct rpc_xprt *xprt, unsigned int max_reqs); void xprt_free_bc_rqst(struct rpc_rqst *req); +unsigned int xprt_bc_max_slots(struct rpc_xprt *xprt); /* * Determine if a shared backchannel is in use @@ -63,6 +64,11 @@ static inline int xprt_setup_backchannel(struct rpc_xprt *xprt, return 0; } +static inline void xprt_destroy_backchannel(struct rpc_xprt *xprt, + unsigned int max_reqs) +{ +} + static inline bool svc_is_backchannel(const struct svc_rqst *rqstp) { return false; diff --git a/include/linux/sunrpc/cache.h b/include/linux/sunrpc/cache.h index c7f38e897174..f8603724fbee 100644 --- a/include/linux/sunrpc/cache.h +++ b/include/linux/sunrpc/cache.h @@ -87,6 +87,7 @@ struct cache_detail { int has_died); struct cache_head * (*alloc)(void); + void (*flush)(void); int (*match)(struct cache_head *orig, struct cache_head *new); void (*init)(struct cache_head *orig, struct cache_head *new); void (*update)(struct cache_head *orig, struct cache_head *new); @@ -107,9 +108,9 @@ struct cache_detail { /* fields for communication over channel */ struct list_head queue; - atomic_t readers; /* how many time is /chennel open */ - time_t last_close; /* if no readers, when did last close */ - time_t last_warn; /* when we last warned about no readers */ + atomic_t writers; /* how many time is /channel open */ + time_t last_close; /* if no writers, when did last close */ + time_t last_warn; /* when we last warned about no writers */ union { struct proc_dir_entry *procfs; diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h index 6e8073140a5d..abc63bd1be2b 100644 --- a/include/linux/sunrpc/clnt.h +++ b/include/linux/sunrpc/clnt.h @@ -124,6 +124,7 @@ struct rpc_create_args { u32 prognumber; /* overrides program->number */ u32 version; rpc_authflavor_t authflavor; + u32 nconnect; unsigned long flags; char *client_name; struct svc_xprt *bc_xprt; /* NFSv4.1 backchannel */ @@ -163,6 +164,8 @@ void rpc_shutdown_client(struct rpc_clnt *); void rpc_release_client(struct rpc_clnt *); void rpc_task_release_transport(struct rpc_task *); void rpc_task_release_client(struct rpc_task *); +struct rpc_xprt *rpc_task_get_xprt(struct rpc_clnt *clnt, + struct rpc_xprt *xprt); int rpcb_create_local(struct net *); void rpcb_put_local(struct net *); @@ -191,6 +194,7 @@ void rpc_setbufsize(struct rpc_clnt *, unsigned int, unsigned int); struct net * rpc_net_ns(struct rpc_clnt *); size_t rpc_max_payload(struct rpc_clnt *); size_t rpc_max_bc_payload(struct rpc_clnt *); +unsigned int rpc_num_bc_slots(struct rpc_clnt *); void rpc_force_rebind(struct rpc_clnt *); size_t rpc_peeraddr(struct rpc_clnt *, struct sockaddr *, size_t); const char *rpc_peeraddr2str(struct rpc_clnt *, enum rpc_display_format_t); diff --git a/include/linux/sunrpc/metrics.h b/include/linux/sunrpc/metrics.h index 1b3751327575..0ee3f7052846 100644 --- a/include/linux/sunrpc/metrics.h +++ b/include/linux/sunrpc/metrics.h @@ -30,7 +30,7 @@ #include <linux/ktime.h> #include <linux/spinlock.h> -#define RPC_IOSTATS_VERS "1.0" +#define RPC_IOSTATS_VERS "1.1" struct rpc_iostats { spinlock_t om_lock; @@ -66,6 +66,11 @@ struct rpc_iostats { ktime_t om_queue, /* queued for xmit */ om_rtt, /* RPC RTT */ om_execute; /* RPC execution */ + /* + * The count of operations that complete with tk_status < 0. + * These statuses usually indicate error conditions. + */ + unsigned long om_error_status; } ____cacheline_aligned; struct rpc_task; diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h index d0e451868f02..a6ef35184ef1 100644 --- a/include/linux/sunrpc/sched.h +++ b/include/linux/sunrpc/sched.h @@ -98,7 +98,6 @@ typedef void (*rpc_action)(struct rpc_task *); struct rpc_call_ops { void (*rpc_call_prepare)(struct rpc_task *, void *); - void (*rpc_call_prepare_transmit)(struct rpc_task *, void *); void (*rpc_call_done)(struct rpc_task *, void *); void (*rpc_count_stats)(struct rpc_task *, void *); void (*rpc_release)(void *); @@ -126,6 +125,7 @@ struct rpc_task_setup { #define RPC_CALL_MAJORSEEN 0x0020 /* major timeout seen */ #define RPC_TASK_ROOTCREDS 0x0040 /* force root creds */ #define RPC_TASK_DYNAMIC 0x0080 /* task was kmalloc'ed */ +#define RPC_TASK_NO_ROUND_ROBIN 0x0100 /* send requests on "main" xprt */ #define RPC_TASK_SOFT 0x0200 /* Use soft timeouts */ #define RPC_TASK_SOFTCONN 0x0400 /* Fail if can't connect */ #define RPC_TASK_SENT 0x0800 /* message was sent */ @@ -183,8 +183,9 @@ struct rpc_task_setup { #define RPC_NR_PRIORITY (1 + RPC_PRIORITY_PRIVILEGED - RPC_PRIORITY_LOW) struct rpc_timer { - struct timer_list timer; struct list_head list; + unsigned long expires; + struct delayed_work dwork; }; /* @@ -241,9 +242,6 @@ void rpc_sleep_on_priority_timeout(struct rpc_wait_queue *queue, void rpc_sleep_on_priority(struct rpc_wait_queue *, struct rpc_task *, int priority); -void rpc_wake_up_queued_task_on_wq(struct workqueue_struct *wq, - struct rpc_wait_queue *queue, - struct rpc_task *task); void rpc_wake_up_queued_task(struct rpc_wait_queue *, struct rpc_task *); void rpc_wake_up_queued_task_set_status(struct rpc_wait_queue *, diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h index 981f0d726ad4..40f65888dd38 100644 --- a/include/linux/sunrpc/svc_rdma.h +++ b/include/linux/sunrpc/svc_rdma.h @@ -42,6 +42,7 @@ #ifndef SVC_RDMA_H #define SVC_RDMA_H +#include <linux/llist.h> #include <linux/sunrpc/xdr.h> #include <linux/sunrpc/svcsock.h> #include <linux/sunrpc/rpc_rdma.h> @@ -107,8 +108,7 @@ struct svcxprt_rdma { struct list_head sc_read_complete_q; struct work_struct sc_work; - spinlock_t sc_recv_lock; - struct list_head sc_recv_ctxts; + struct llist_head sc_recv_ctxts; }; /* sc_flags */ #define RDMAXPRT_CONN_PENDING 3 @@ -125,6 +125,7 @@ enum { #define RPCSVC_MAXPAYLOAD_RDMA RPCSVC_MAXPAYLOAD struct svc_rdma_recv_ctxt { + struct llist_node rc_node; struct list_head rc_list; struct ib_recv_wr rc_recv_wr; struct ib_cqe rc_cqe; @@ -200,7 +201,6 @@ extern struct svc_xprt_class svc_rdma_bc_class; #endif /* svc_rdma.c */ -extern struct workqueue_struct *svc_rdma_wq; extern int svc_rdma_init(void); extern void svc_rdma_cleanup(void); diff --git a/include/linux/sunrpc/xdr.h b/include/linux/sunrpc/xdr.h index 9ee3970ba59c..f33e5013bdfb 100644 --- a/include/linux/sunrpc/xdr.h +++ b/include/linux/sunrpc/xdr.h @@ -164,6 +164,13 @@ xdr_decode_opaque_fixed(__be32 *p, void *ptr, unsigned int len) return p + XDR_QUADLEN(len); } +static inline void xdr_netobj_dup(struct xdr_netobj *dst, + struct xdr_netobj *src, gfp_t gfp_mask) +{ + dst->data = kmemdup(src->data, src->len, gfp_mask); + dst->len = src->len; +} + /* * Adjust kvec to reflect end of xdr'ed data (RPC client XDR) */ @@ -179,7 +186,7 @@ xdr_adjust_iovec(struct kvec *iov, __be32 *p) extern void xdr_shift_buf(struct xdr_buf *, size_t); extern void xdr_buf_from_iov(struct kvec *, struct xdr_buf *); extern int xdr_buf_subsegment(struct xdr_buf *, struct xdr_buf *, unsigned int, unsigned int); -extern int xdr_buf_read_netobj(struct xdr_buf *, struct xdr_netobj *, unsigned int); +extern int xdr_buf_read_mic(struct xdr_buf *, struct xdr_netobj *, unsigned int); extern int read_bytes_from_xdr_buf(struct xdr_buf *, unsigned int, void *, unsigned int); extern int write_bytes_to_xdr_buf(struct xdr_buf *, unsigned int, void *, unsigned int); diff --git a/include/linux/sunrpc/xprt.h b/include/linux/sunrpc/xprt.h index a6d9fce7f20e..d783e15ba898 100644 --- a/include/linux/sunrpc/xprt.h +++ b/include/linux/sunrpc/xprt.h @@ -158,6 +158,7 @@ struct rpc_xprt_ops { int (*bc_setup)(struct rpc_xprt *xprt, unsigned int min_reqs); size_t (*bc_maxpayload)(struct rpc_xprt *xprt); + unsigned int (*bc_num_slots)(struct rpc_xprt *xprt); void (*bc_free_rqst)(struct rpc_rqst *rqst); void (*bc_destroy)(struct rpc_xprt *xprt, unsigned int max_reqs); @@ -238,6 +239,7 @@ struct rpc_xprt { /* * Send stuff */ + atomic_long_t queuelen; spinlock_t transport_lock; /* lock transport info */ spinlock_t reserve_lock; /* lock slot table */ spinlock_t queue_lock; /* send/receive queue lock */ @@ -250,8 +252,9 @@ struct rpc_xprt { #if defined(CONFIG_SUNRPC_BACKCHANNEL) struct svc_serv *bc_serv; /* The RPC service which will */ /* process the callback */ - int bc_alloc_count; /* Total number of preallocs */ - atomic_t bc_free_slots; + unsigned int bc_alloc_max; + unsigned int bc_alloc_count; /* Total number of preallocs */ + atomic_t bc_slot_count; /* Number of allocated slots */ spinlock_t bc_pa_lock; /* Protects the preallocated * items */ struct list_head bc_pa_list; /* List of preallocated @@ -334,6 +337,9 @@ struct xprt_class { */ struct rpc_xprt *xprt_create_transport(struct xprt_create *args); void xprt_connect(struct rpc_task *task); +unsigned long xprt_reconnect_delay(const struct rpc_xprt *xprt); +void xprt_reconnect_backoff(struct rpc_xprt *xprt, + unsigned long init_to); void xprt_reserve(struct rpc_task *task); void xprt_retry_reserve(struct rpc_task *task); int xprt_reserve_xprt(struct rpc_xprt *xprt, struct rpc_task *task); @@ -346,6 +352,7 @@ bool xprt_prepare_transmit(struct rpc_task *task); void xprt_request_enqueue_transmit(struct rpc_task *task); void xprt_request_enqueue_receive(struct rpc_task *task); void xprt_request_wait_receive(struct rpc_task *task); +void xprt_request_dequeue_xprt(struct rpc_task *task); bool xprt_request_need_retransmit(struct rpc_task *task); void xprt_transmit(struct rpc_task *task); void xprt_end_transmit(struct rpc_task *task); diff --git a/include/linux/sunrpc/xprtmultipath.h b/include/linux/sunrpc/xprtmultipath.h index af1257c030d2..c6cce3fbf29d 100644 --- a/include/linux/sunrpc/xprtmultipath.h +++ b/include/linux/sunrpc/xprtmultipath.h @@ -15,6 +15,8 @@ struct rpc_xprt_switch { struct kref xps_kref; unsigned int xps_nxprts; + unsigned int xps_nactive; + atomic_long_t xps_queuelen; struct list_head xps_xprt_list; struct net * xps_net; diff --git a/include/linux/sunrpc/xprtrdma.h b/include/linux/sunrpc/xprtrdma.h index 86fc38ff0355..16c239e0d6dd 100644 --- a/include/linux/sunrpc/xprtrdma.h +++ b/include/linux/sunrpc/xprtrdma.h @@ -49,9 +49,9 @@ * fully-chunked NFS message (read chunks are the largest). Note only * a single chunk type per message is supported currently. */ -#define RPCRDMA_MIN_SLOT_TABLE (2U) +#define RPCRDMA_MIN_SLOT_TABLE (4U) #define RPCRDMA_DEF_SLOT_TABLE (128U) -#define RPCRDMA_MAX_SLOT_TABLE (256U) +#define RPCRDMA_MAX_SLOT_TABLE (16384U) #define RPCRDMA_MIN_INLINE (1024) /* min inline thresh */ #define RPCRDMA_DEF_INLINE (4096) /* default inline thresh */ diff --git a/include/linux/sunrpc/xprtsock.h b/include/linux/sunrpc/xprtsock.h index b81d0b3e0799..a940de03808d 100644 --- a/include/linux/sunrpc/xprtsock.h +++ b/include/linux/sunrpc/xprtsock.h @@ -56,10 +56,12 @@ struct sock_xprt { */ unsigned long sock_state; struct delayed_work connect_worker; + struct work_struct error_worker; struct work_struct recv_worker; struct mutex recv_mutex; struct sockaddr_storage srcaddr; unsigned short srcport; + int xprt_err; /* * UDP socket buffer size parameters @@ -84,6 +86,10 @@ struct sock_xprt { #define XPRT_SOCK_CONNECTING 1U #define XPRT_SOCK_DATA_READY (2) #define XPRT_SOCK_UPD_TIMEOUT (3) +#define XPRT_SOCK_WAKE_ERROR (4) +#define XPRT_SOCK_WAKE_WRITE (5) +#define XPRT_SOCK_WAKE_PENDING (6) +#define XPRT_SOCK_WAKE_DISCONNECT (7) #endif /* __KERNEL__ */ diff --git a/include/linux/suspend.h b/include/linux/suspend.h index f0d262ad7b78..6fc8843f1c9e 100644 --- a/include/linux/suspend.h +++ b/include/linux/suspend.h @@ -190,8 +190,9 @@ struct platform_suspend_ops { struct platform_s2idle_ops { int (*begin)(void); int (*prepare)(void); + int (*prepare_late)(void); void (*wake)(void); - void (*sync)(void); + void (*restore_early)(void); void (*restore)(void); void (*end)(void); }; @@ -304,7 +305,7 @@ static inline bool idle_should_enter_s2idle(void) return unlikely(s2idle_state == S2IDLE_STATE_ENTER); } -extern bool pm_suspend_via_s2idle(void); +extern bool pm_suspend_default_s2idle(void); extern void __init pm_states_init(void); extern void s2idle_set_ops(const struct platform_s2idle_ops *ops); extern void s2idle_wake(void); @@ -336,7 +337,8 @@ static inline void pm_set_suspend_via_firmware(void) {} static inline void pm_set_resume_via_firmware(void) {} static inline bool pm_suspend_via_firmware(void) { return false; } static inline bool pm_resume_via_firmware(void) { return false; } -static inline bool pm_suspend_via_s2idle(void) { return false; } +static inline bool pm_suspend_no_platform(void) { return false; } +static inline bool pm_suspend_default_s2idle(void) { return false; } static inline void suspend_set_ops(const struct platform_suspend_ops *ops) {} static inline int pm_suspend(suspend_state_t state) { return -ENOSYS; } @@ -448,6 +450,7 @@ extern bool system_entering_hibernation(void); extern bool hibernation_available(void); asmlinkage int swsusp_save(void); extern struct pbe *restore_pblist; +int pfn_is_nosave(unsigned long pfn); #else /* CONFIG_HIBERNATION */ static inline void register_nosave_region(unsigned long b, unsigned long e) {} static inline void register_nosave_region_late(unsigned long b, unsigned long e) {} diff --git a/include/linux/swap.h b/include/linux/swap.h index 4bfb5c4ac108..063c0c1e112b 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -148,7 +148,7 @@ struct zone; * We always assume that blocks are of size PAGE_SIZE. */ struct swap_extent { - struct list_head list; + struct rb_node rb_node; pgoff_t start_page; pgoff_t nr_pages; sector_t start_block; @@ -175,8 +175,9 @@ enum { SWP_PAGE_DISCARD = (1 << 10), /* freed swap page-cluster discards */ SWP_STABLE_WRITES = (1 << 11), /* no overwrite PG_writeback pages */ SWP_SYNCHRONOUS_IO = (1 << 12), /* synchronous IO is efficient */ + SWP_VALID = (1 << 13), /* swap is valid to be operated on? */ /* add others here before... */ - SWP_SCANNING = (1 << 13), /* refcount in scan_swap_map */ + SWP_SCANNING = (1 << 14), /* refcount in scan_swap_map */ }; #define SWAP_CLUSTER_MAX 32UL @@ -247,8 +248,7 @@ struct swap_info_struct { unsigned int cluster_next; /* likely index for next allocation */ unsigned int cluster_nr; /* countdown to next cluster search */ struct percpu_cluster __percpu *percpu_cluster; /* per cpu's swap location */ - struct swap_extent *curr_swap_extent; - struct swap_extent first_swap_extent; + struct rb_root swap_extent_root;/* root of the swap extent rbtree */ struct block_device *bdev; /* swap device or bdev of swap file */ struct file *swap_file; /* seldom referenced */ unsigned int old_block_size; /* seldom referenced */ @@ -340,6 +340,7 @@ extern void lru_add_drain_cpu(int cpu); extern void lru_add_drain_all(void); extern void rotate_reclaimable_page(struct page *page); extern void deactivate_file_page(struct page *page); +extern void deactivate_page(struct page *page); extern void mark_page_lazyfree(struct page *page); extern void swap_setup(void); @@ -364,6 +365,7 @@ extern int vm_swappiness; extern int remove_mapping(struct address_space *mapping, struct page *page); extern unsigned long vm_total_pages; +extern unsigned long reclaim_pages(struct list_head *page_list); #ifdef CONFIG_NUMA extern int node_reclaim_mode; extern int sysctl_min_unmapped_ratio; @@ -460,7 +462,7 @@ extern unsigned int count_swap_pages(int, int); extern sector_t map_swap_page(struct page *, struct block_device **); extern sector_t swapdev_block(int, pgoff_t); extern int page_swapcount(struct page *); -extern int __swap_count(struct swap_info_struct *si, swp_entry_t entry); +extern int __swap_count(swp_entry_t entry); extern int __swp_swapcount(swp_entry_t entry); extern int swp_swapcount(swp_entry_t entry); extern struct swap_info_struct *page_swap_info(struct page *); @@ -470,6 +472,12 @@ extern int try_to_free_swap(struct page *); struct backing_dev_info; extern int init_swap_address_space(unsigned int type, unsigned long nr_pages); extern void exit_swap_address_space(unsigned int type); +extern struct swap_info_struct *get_swap_device(swp_entry_t entry); + +static inline void put_swap_device(struct swap_info_struct *si) +{ + rcu_read_unlock(); +} #else /* CONFIG_SWAP */ @@ -576,7 +584,7 @@ static inline int page_swapcount(struct page *page) return 0; } -static inline int __swap_count(struct swap_info_struct *si, swp_entry_t entry) +static inline int __swap_count(swp_entry_t entry) { return 0; } diff --git a/include/linux/swapops.h b/include/linux/swapops.h index 4d961668e5fc..877fd239b6ff 100644 --- a/include/linux/swapops.h +++ b/include/linux/swapops.h @@ -6,6 +6,8 @@ #include <linux/bug.h> #include <linux/mm_types.h> +#ifdef CONFIG_MMU + /* * swapcache pages are stored in the swapper_space radix tree. We want to * get good packing density in that tree, so the index should be dense in @@ -50,13 +52,11 @@ static inline pgoff_t swp_offset(swp_entry_t entry) return entry.val & SWP_OFFSET_MASK; } -#ifdef CONFIG_MMU /* check whether a pte points to a swap entry */ static inline int is_swap_pte(pte_t pte) { return !pte_none(pte) && !pte_present(pte); } -#endif /* * Convert the arch-dependent pte representation of a swp_entry_t into an @@ -129,12 +129,6 @@ static inline struct page *device_private_entry_to_page(swp_entry_t entry) { return pfn_to_page(swp_offset(entry)); } - -vm_fault_t device_private_entry_fault(struct vm_area_struct *vma, - unsigned long addr, - swp_entry_t entry, - unsigned int flags, - pmd_t *pmdp); #else /* CONFIG_DEVICE_PRIVATE */ static inline swp_entry_t make_device_private_entry(struct page *page, bool write) { @@ -164,15 +158,6 @@ static inline struct page *device_private_entry_to_page(swp_entry_t entry) { return NULL; } - -static inline vm_fault_t device_private_entry_fault(struct vm_area_struct *vma, - unsigned long addr, - swp_entry_t entry, - unsigned int flags, - pmd_t *pmdp) -{ - return VM_FAULT_SIGBUS; -} #endif /* CONFIG_DEVICE_PRIVATE */ #ifdef CONFIG_MIGRATION @@ -375,4 +360,5 @@ static inline int non_swap_entry(swp_entry_t entry) } #endif +#endif /* CONFIG_MMU */ #endif /* _LINUX_SWAPOPS_H */ diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h index 361f62bb4a8e..cde3dc18e21a 100644 --- a/include/linux/swiotlb.h +++ b/include/linux/swiotlb.h @@ -46,13 +46,17 @@ enum dma_sync_target { extern phys_addr_t swiotlb_tbl_map_single(struct device *hwdev, dma_addr_t tbl_dma_addr, - phys_addr_t phys, size_t size, + phys_addr_t phys, + size_t mapping_size, + size_t alloc_size, enum dma_data_direction dir, unsigned long attrs); extern void swiotlb_tbl_unmap_single(struct device *hwdev, phys_addr_t tlb_addr, - size_t size, enum dma_data_direction dir, + size_t mapping_size, + size_t alloc_size, + enum dma_data_direction dir, unsigned long attrs); extern void swiotlb_tbl_sync_single(struct device *hwdev, diff --git a/include/linux/sys_soc.h b/include/linux/sys_soc.h index b7c70c3e953f..48ceea867dd6 100644 --- a/include/linux/sys_soc.h +++ b/include/linux/sys_soc.h @@ -12,6 +12,7 @@ struct soc_device_attribute { const char *machine; const char *family; const char *revision; + const char *serial_number; const char *soc_id; const void *data; }; diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index 2bcef4c70183..f7c561c4dcdd 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h @@ -68,6 +68,7 @@ struct sigaltstack; struct rseq; union bpf_attr; struct io_uring_params; +struct clone_args; #include <linux/types.h> #include <linux/aio_abi.h> @@ -264,7 +265,7 @@ static inline void addr_limit_user_check(void) if (CHECK_DATA_CORRUPTION(!segment_eq(get_fs(), USER_DS), "Invalid address limit on user-mode return")) - force_sig(SIGKILL, current); + force_sig(SIGKILL); #ifdef TIF_FSCHECK clear_thread_flag(TIF_FSCHECK); @@ -850,6 +851,9 @@ asmlinkage long sys_clone(unsigned long, unsigned long, int __user *, int __user *, unsigned long); #endif #endif + +asmlinkage long sys_clone3(struct clone_args __user *uargs, size_t size); + asmlinkage long sys_execve(const char __user *filename, const char __user *const __user *argv, const char __user *const __user *envp); @@ -927,6 +931,7 @@ asmlinkage long sys_clock_adjtime32(clockid_t which_clock, struct old_timex32 __user *tx); asmlinkage long sys_syncfs(int fd); asmlinkage long sys_setns(int fd, int nstype); +asmlinkage long sys_pidfd_open(pid_t pid, unsigned int flags); asmlinkage long sys_sendmmsg(int fd, struct mmsghdr __user *msg, unsigned int vlen, unsigned flags); asmlinkage long sys_process_vm_readv(pid_t pid, @@ -1226,8 +1231,8 @@ asmlinkage long sys_ni_syscall(void); * the ksys_xyzyyz() functions prototyped below. */ -int ksys_mount(char __user *dev_name, char __user *dir_name, char __user *type, - unsigned long flags, void __user *data); +int ksys_mount(const char __user *dev_name, const char __user *dir_name, + const char __user *type, unsigned long flags, void __user *data); int ksys_umount(char __user *name, int flags); int ksys_dup(unsigned int fildes); int ksys_chroot(const char __user *filename); @@ -1397,4 +1402,23 @@ static inline unsigned int ksys_personality(unsigned int personality) return old; } +/* for __ARCH_WANT_SYS_IPC */ +long ksys_semtimedop(int semid, struct sembuf __user *tsops, + unsigned int nsops, + const struct __kernel_timespec __user *timeout); +long ksys_semget(key_t key, int nsems, int semflg); +long ksys_old_semctl(int semid, int semnum, int cmd, unsigned long arg); +long ksys_msgget(key_t key, int msgflg); +long ksys_old_msgctl(int msqid, int cmd, struct msqid_ds __user *buf); +long ksys_msgrcv(int msqid, struct msgbuf __user *msgp, size_t msgsz, + long msgtyp, int msgflg); +long ksys_msgsnd(int msqid, struct msgbuf __user *msgp, size_t msgsz, + int msgflg); +long ksys_shmget(key_t key, size_t size, int shmflg); +long ksys_shmdt(char __user *shmaddr); +long ksys_old_shmctl(int shmid, int cmd, struct shmid_ds __user *buf); +long compat_ksys_semtimedop(int semid, struct sembuf __user *tsems, + unsigned int nsops, + const struct old_timespec32 __user *timeout); + #endif diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h index aadd310769d0..6df477329b76 100644 --- a/include/linux/sysctl.h +++ b/include/linux/sysctl.h @@ -37,6 +37,13 @@ struct ctl_table_root; struct ctl_table_header; struct ctl_dir; +/* Keep the same order as in fs/proc/proc_sysctl.c */ +#define SYSCTL_ZERO ((void *)&sysctl_vals[0]) +#define SYSCTL_ONE ((void *)&sysctl_vals[1]) +#define SYSCTL_INT_MAX ((void *)&sysctl_vals[2]) + +extern const int sysctl_vals[]; + typedef int proc_handler (struct ctl_table *ctl, int write, void __user *buffer, size_t *lenp, loff_t *ppos); diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h index 786816cf4aa5..fa7ee503fb76 100644 --- a/include/linux/sysfs.h +++ b/include/linux/sysfs.h @@ -196,6 +196,12 @@ struct bin_attribute { .size = _size, \ } +#define __BIN_ATTR_WO(_name, _size) { \ + .attr = { .name = __stringify(_name), .mode = 0200 }, \ + .write = _name##_write, \ + .size = _size, \ +} + #define __BIN_ATTR_RW(_name, _size) \ __BIN_ATTR(_name, 0644, _name##_read, _name##_write, _size) @@ -208,6 +214,9 @@ struct bin_attribute bin_attr_##_name = __BIN_ATTR(_name, _mode, _read, \ #define BIN_ATTR_RO(_name, _size) \ struct bin_attribute bin_attr_##_name = __BIN_ATTR_RO(_name, _size) +#define BIN_ATTR_WO(_name, _size) \ +struct bin_attribute bin_attr_##_name = __BIN_ATTR_WO(_name, _size) + #define BIN_ATTR_RW(_name, _size) \ struct bin_attribute bin_attr_##_name = __BIN_ATTR_RW(_name, _size) @@ -268,6 +277,8 @@ int __must_check sysfs_create_group(struct kobject *kobj, const struct attribute_group *grp); int __must_check sysfs_create_groups(struct kobject *kobj, const struct attribute_group **groups); +int __must_check sysfs_update_groups(struct kobject *kobj, + const struct attribute_group **groups); int sysfs_update_group(struct kobject *kobj, const struct attribute_group *grp); void sysfs_remove_group(struct kobject *kobj, @@ -433,6 +444,12 @@ static inline int sysfs_create_groups(struct kobject *kobj, return 0; } +static inline int sysfs_update_groups(struct kobject *kobj, + const struct attribute_group **groups) +{ + return 0; +} + static inline int sysfs_update_group(struct kobject *kobj, const struct attribute_group *grp) { diff --git a/include/linux/t10-pi.h b/include/linux/t10-pi.h index 3e2a80cc7b56..96305a64a5a7 100644 --- a/include/linux/t10-pi.h +++ b/include/linux/t10-pi.h @@ -53,18 +53,4 @@ extern const struct blk_integrity_profile t10_pi_type1_ip; extern const struct blk_integrity_profile t10_pi_type3_crc; extern const struct blk_integrity_profile t10_pi_type3_ip; -#ifdef CONFIG_BLK_DEV_INTEGRITY -extern void t10_pi_prepare(struct request *rq, u8 protection_type); -extern void t10_pi_complete(struct request *rq, u8 protection_type, - unsigned int intervals); -#else -static inline void t10_pi_complete(struct request *rq, u8 protection_type, - unsigned int intervals) -{ -} -static inline void t10_pi_prepare(struct request *rq, u8 protection_type) -{ -} -#endif - #endif diff --git a/include/linux/tcp.h b/include/linux/tcp.h index 9a478a0cd3a2..668e25a76d69 100644 --- a/include/linux/tcp.h +++ b/include/linux/tcp.h @@ -58,12 +58,7 @@ static inline unsigned int tcp_optlen(const struct sk_buff *skb) /* TCP Fast Open Cookie as stored in memory */ struct tcp_fastopen_cookie { - union { - u8 val[TCP_FASTOPEN_COOKIE_MAX]; -#if IS_ENABLED(CONFIG_IPV6) - struct in6_addr addr; -#endif - }; + __le64 val[DIV_ROUND_UP(TCP_FASTOPEN_COOKIE_MAX, sizeof(u64))]; s8 len; bool exp; /* In RFC6994 experimental option format */ }; @@ -245,6 +240,7 @@ struct tcp_sock { syn_smc:1; /* SYN includes SMC */ u32 tlp_high_seq; /* snd_nxt at the time of TLP retransmit. */ + u32 tcp_tx_delay; /* delay (in usec) added to TX packets */ u64 tcp_wstamp_ns; /* departure time for next sent data packet */ u64 tcp_clock_cache; /* cache last tcp_clock_ns() (see tcp_mstamp_refresh()) */ @@ -358,6 +354,8 @@ struct tcp_sock { #define BPF_SOCK_OPS_TEST_FLAG(TP, ARG) 0 #endif + u32 rcv_ooopack; /* Received out-of-order packets, for tcpinfo */ + /* Receiver side RTT estimation */ u32 rcv_rtt_last_tsecr; struct { @@ -395,7 +393,7 @@ struct tcp_sock { /* fastopen_rsk points to request_sock that resulted in this big * socket. Used to retransmit SYNACKs etc. */ - struct request_sock *fastopen_rsk; + struct request_sock __rcu *fastopen_rsk; u32 *saved_syn; }; @@ -436,6 +434,7 @@ struct tcp_timewait_sock { u32 tw_last_oow_ack_time; int tw_ts_recent_stamp; + u32 tw_tx_delay; #ifdef CONFIG_TCP_MD5SIG struct tcp_md5sig_key *tw_md5_key; #endif @@ -448,8 +447,8 @@ static inline struct tcp_timewait_sock *tcp_twsk(const struct sock *sk) static inline bool tcp_passive_fastopen(const struct sock *sk) { - return (sk->sk_state == TCP_SYN_RECV && - tcp_sk(sk)->fastopen_rsk != NULL); + return sk->sk_state == TCP_SYN_RECV && + rcu_access_pointer(tcp_sk(sk)->fastopen_rsk) != NULL; } static inline void fastopen_queue_tune(struct sock *sk, int backlog) diff --git a/include/linux/thermal.h b/include/linux/thermal.h index 15a4ca5d7099..e45659c75920 100644 --- a/include/linux/thermal.h +++ b/include/linux/thermal.h @@ -251,7 +251,7 @@ struct thermal_bind_params { * platform characterization. This value is relative to the * rest of the weights so a cooling device whose weight is * double that of another cooling device is twice as - * effective. See Documentation/thermal/sysfs-api.txt for more + * effective. See Documentation/driver-api/thermal/sysfs-api.rst for more * information. */ int weight; @@ -259,7 +259,7 @@ struct thermal_bind_params { /* * This is a bit mask that gives the binding relation between this * thermal zone and cdev, for a particular trip point. - * See Documentation/thermal/sysfs-api.txt for more information. + * See Documentation/driver-api/thermal/sysfs-api.rst for more information. */ int trip_mask; diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h index 8d8821b3689a..659a4400517b 100644 --- a/include/linux/thread_info.h +++ b/include/linux/thread_info.h @@ -134,7 +134,7 @@ static inline void copy_overflow(int size, unsigned long count) WARN(1, "Buffer overflow detected (%d < %lu)!\n", size, count); } -static __always_inline bool +static __always_inline __must_check bool check_copy_size(const void *addr, size_t bytes, bool is_source) { int sz = __compiletime_object_size(addr); diff --git a/include/linux/thunderbolt.h b/include/linux/thunderbolt.h index 2d7e012db03f..ece782ef5466 100644 --- a/include/linux/thunderbolt.h +++ b/include/linux/thunderbolt.h @@ -429,6 +429,7 @@ static inline struct tb_xdomain *tb_service_parent(struct tb_service *svc) * @lock: Must be held during ring creation/destruction. Is acquired by * interrupt_work when dispatching interrupts to individual rings. * @pdev: Pointer to the PCI device + * @ops: NHI specific optional ops * @iobase: MMIO space of the NHI * @tx_rings: All Tx rings available on this host controller * @rx_rings: All Rx rings available on this host controller @@ -442,6 +443,7 @@ static inline struct tb_xdomain *tb_service_parent(struct tb_service *svc) struct tb_nhi { spinlock_t lock; struct pci_dev *pdev; + const struct tb_nhi_ops *ops; void __iomem *iobase; struct tb_ring **tx_rings; struct tb_ring **rx_rings; diff --git a/include/linux/time64.h b/include/linux/time64.h index a620ee610b9f..19125489ae94 100644 --- a/include/linux/time64.h +++ b/include/linux/time64.h @@ -30,6 +30,8 @@ struct itimerspec64 { /* Located here for timespec[64]_valid_strict */ #define TIME64_MAX ((s64)~((u64)1 << 63)) +#define TIME64_MIN (-TIME64_MAX - 1) + #define KTIME_MAX ((s64)~((u64)1 << 63)) #define KTIME_SEC_MAX (KTIME_MAX / NSEC_PER_SEC) diff --git a/include/linux/timekeeper_internal.h b/include/linux/timekeeper_internal.h index 7acb953298a7..84ff2844df2a 100644 --- a/include/linux/timekeeper_internal.h +++ b/include/linux/timekeeper_internal.h @@ -57,6 +57,7 @@ struct tk_read_base { * @cs_was_changed_seq: The sequence number of clocksource change events * @next_leap_ktime: CLOCK_MONOTONIC time value of a pending leap-second * @raw_sec: CLOCK_MONOTONIC_RAW time in seconds + * @monotonic_to_boot: CLOCK_MONOTONIC to CLOCK_BOOTTIME offset * @cycle_interval: Number of clock cycles in one NTP interval * @xtime_interval: Number of clock shifted nano seconds in one NTP * interval. @@ -84,6 +85,9 @@ struct tk_read_base { * * wall_to_monotonic is no longer the boot time, getboottime must be * used instead. + * + * @monotonic_to_boottime is a timespec64 representation of @offs_boot to + * accelerate the VDSO update for CLOCK_BOOTTIME. */ struct timekeeper { struct tk_read_base tkr_mono; @@ -99,6 +103,7 @@ struct timekeeper { u8 cs_was_changed_seq; ktime_t next_leap_ktime; u64 raw_sec; + struct timespec64 monotonic_to_boot; /* The following members are for timekeeping internal use */ u64 cycle_interval; diff --git a/include/linux/timekeeping.h b/include/linux/timekeeping.h index a8ab0f143ac4..b27e2ffa96c1 100644 --- a/include/linux/timekeeping.h +++ b/include/linux/timekeeping.h @@ -113,6 +113,34 @@ static inline ktime_t ktime_get_coarse_clocktai(void) return ktime_get_coarse_with_offset(TK_OFFS_TAI); } +static inline ktime_t ktime_get_coarse(void) +{ + struct timespec64 ts; + + ktime_get_coarse_ts64(&ts); + return timespec64_to_ktime(ts); +} + +static inline u64 ktime_get_coarse_ns(void) +{ + return ktime_to_ns(ktime_get_coarse()); +} + +static inline u64 ktime_get_coarse_real_ns(void) +{ + return ktime_to_ns(ktime_get_coarse_real()); +} + +static inline u64 ktime_get_coarse_boottime_ns(void) +{ + return ktime_to_ns(ktime_get_coarse_boottime()); +} + +static inline u64 ktime_get_coarse_clocktai_ns(void) +{ + return ktime_to_ns(ktime_get_coarse_clocktai()); +} + /** * ktime_mono_to_real - Convert monotonic time to clock realtime */ @@ -131,12 +159,12 @@ static inline u64 ktime_get_real_ns(void) return ktime_to_ns(ktime_get_real()); } -static inline u64 ktime_get_boot_ns(void) +static inline u64 ktime_get_boottime_ns(void) { return ktime_to_ns(ktime_get_boottime()); } -static inline u64 ktime_get_tai_ns(void) +static inline u64 ktime_get_clocktai_ns(void) { return ktime_to_ns(ktime_get_clocktai()); } diff --git a/include/linux/timer.h b/include/linux/timer.h index 7b066fd38248..1e6650ed066d 100644 --- a/include/linux/timer.h +++ b/include/linux/timer.h @@ -36,19 +36,30 @@ struct timer_list { #define __TIMER_LOCKDEP_MAP_INITIALIZER(_kn) #endif -/* - * A deferrable timer will work normally when the system is busy, but - * will not cause a CPU to come out of idle just to service it; instead, - * the timer will be serviced when the CPU eventually wakes up with a - * subsequent non-deferrable timer. +/** + * @TIMER_DEFERRABLE: A deferrable timer will work normally when the + * system is busy, but will not cause a CPU to come out of idle just + * to service it; instead, the timer will be serviced when the CPU + * eventually wakes up with a subsequent non-deferrable timer. * - * An irqsafe timer is executed with IRQ disabled and it's safe to wait for - * the completion of the running instance from IRQ handlers, for example, - * by calling del_timer_sync(). + * @TIMER_IRQSAFE: An irqsafe timer is executed with IRQ disabled and + * it's safe to wait for the completion of the running instance from + * IRQ handlers, for example, by calling del_timer_sync(). * * Note: The irq disabled callback execution is a special case for * workqueue locking issues. It's not meant for executing random crap * with interrupts disabled. Abuse is monitored! + * + * @TIMER_PINNED: A pinned timer will not be affected by any timer + * placement heuristics (like, NOHZ) and will always expire on the CPU + * on which the timer was enqueued. + * + * Note: Because enqueuing of timers can migrate the timer from one + * CPU to another, pinned timers are not guaranteed to stay on the + * initialy selected CPU. They move to the CPU on which the enqueue + * function is invoked via mod_timer() or add_timer(). If the timer + * should be placed on a particular CPU, then add_timer_on() has to be + * used. */ #define TIMER_CPUMASK 0x0003FFFF #define TIMER_MIGRATING 0x00040000 @@ -172,7 +183,7 @@ extern void add_timer(struct timer_list *timer); extern int try_to_del_timer_sync(struct timer_list *timer); -#ifdef CONFIG_SMP +#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT) extern int del_timer_sync(struct timer_list *timer); #else # define del_timer_sync(t) del_timer(t) diff --git a/include/linux/timeriomem-rng.h b/include/linux/timeriomem-rng.h index fd4a6e6ec831..672df7fbf6c1 100644 --- a/include/linux/timeriomem-rng.h +++ b/include/linux/timeriomem-rng.h @@ -5,6 +5,9 @@ * Copyright (c) 2009 Alexander Clouter <alex@digriz.org.uk> */ +#ifndef _LINUX_TIMERIOMEM_RNG_H +#define _LINUX_TIMERIOMEM_RNG_H + struct timeriomem_rng_data { void __iomem *address; @@ -14,3 +17,5 @@ struct timeriomem_rng_data { /* bits of entropy per 1024 bits read */ unsigned int quality; }; + +#endif /* _LINUX_TIMERIOMEM_RNG_H */ diff --git a/include/linux/timerqueue.h b/include/linux/timerqueue.h index 78b8cc73f12f..93884086f392 100644 --- a/include/linux/timerqueue.h +++ b/include/linux/timerqueue.h @@ -12,8 +12,7 @@ struct timerqueue_node { }; struct timerqueue_head { - struct rb_root head; - struct timerqueue_node *next; + struct rb_root_cached rb_root; }; @@ -29,13 +28,14 @@ extern struct timerqueue_node *timerqueue_iterate_next( * * @head: head of timerqueue * - * Returns a pointer to the timer node that has the - * earliest expiration time. + * Returns a pointer to the timer node that has the earliest expiration time. */ static inline struct timerqueue_node *timerqueue_getnext(struct timerqueue_head *head) { - return head->next; + struct rb_node *leftmost = rb_first_cached(&head->rb_root); + + return rb_entry(leftmost, struct timerqueue_node, node); } static inline void timerqueue_init(struct timerqueue_node *node) @@ -43,9 +43,18 @@ static inline void timerqueue_init(struct timerqueue_node *node) RB_CLEAR_NODE(&node->node); } +static inline bool timerqueue_node_queued(struct timerqueue_node *node) +{ + return !RB_EMPTY_NODE(&node->node); +} + +static inline bool timerqueue_node_expires(struct timerqueue_node *node) +{ + return node->expires; +} + static inline void timerqueue_init_head(struct timerqueue_head *head) { - head->head = RB_ROOT; - head->next = NULL; + head->rb_root = RB_ROOT_CACHED; } #endif /* _LINUX_TIMERQUEUE_H */ diff --git a/include/linux/tnum.h b/include/linux/tnum.h index c7dc2b5902c0..c17af77f3fae 100644 --- a/include/linux/tnum.h +++ b/include/linux/tnum.h @@ -5,6 +5,10 @@ * propagate the unknown bits such that the tnum result represents all the * possible results for possible values of the operands. */ + +#ifndef _LINUX_TNUM_H +#define _LINUX_TNUM_H + #include <linux/types.h> struct tnum { @@ -81,3 +85,5 @@ bool tnum_in(struct tnum a, struct tnum b); int tnum_strn(char *str, size_t size, struct tnum a); /* Format a tnum as tristate binary expansion */ int tnum_sbin(char *str, size_t size, struct tnum a); + +#endif /* _LINUX_TNUM_H */ diff --git a/include/linux/topology.h b/include/linux/topology.h index cb0775e1ee4b..eb2fe6edd73c 100644 --- a/include/linux/topology.h +++ b/include/linux/topology.h @@ -27,6 +27,7 @@ #ifndef _LINUX_TOPOLOGY_H #define _LINUX_TOPOLOGY_H +#include <linux/arch_topology.h> #include <linux/cpumask.h> #include <linux/bitops.h> #include <linux/mmzone.h> @@ -59,6 +60,20 @@ int arch_update_cpu_topology(void); */ #define RECLAIM_DISTANCE 30 #endif + +/* + * The following tunable allows platforms to override the default node + * reclaim distance (RECLAIM_DISTANCE) if remote memory accesses are + * sufficiently fast that the default value actually hurts + * performance. + * + * AMD EPYC machines use this because even though the 2-hop distance + * is 32 (3.2x slower than a local memory access) performance actually + * *improves* if allowed to reclaim memory and load balance tasks + * between NUMA nodes 2-hops apart. + */ +extern int __read_mostly node_reclaim_distance; + #ifndef PENALTY_FOR_NODE_WITH_CPUS #define PENALTY_FOR_NODE_WITH_CPUS (1) #endif @@ -184,6 +199,9 @@ static inline int cpu_to_mem(int cpu) #ifndef topology_physical_package_id #define topology_physical_package_id(cpu) ((void)(cpu), -1) #endif +#ifndef topology_die_id +#define topology_die_id(cpu) ((void)(cpu), -1) +#endif #ifndef topology_core_id #define topology_core_id(cpu) ((void)(cpu), 0) #endif @@ -193,6 +211,9 @@ static inline int cpu_to_mem(int cpu) #ifndef topology_core_cpumask #define topology_core_cpumask(cpu) cpumask_of(cpu) #endif +#ifndef topology_die_cpumask +#define topology_die_cpumask(cpu) cpumask_of(cpu) +#endif #ifdef CONFIG_SCHED_SMT static inline const struct cpumask *cpu_smt_mask(int cpu) diff --git a/include/linux/torture.h b/include/linux/torture.h index 23d80db426d7..6241f59e2d6f 100644 --- a/include/linux/torture.h +++ b/include/linux/torture.h @@ -66,7 +66,7 @@ int torture_shutdown_init(int ssecs, void (*cleanup)(void)); /* Task stuttering, which forces load/no-load transitions. */ bool stutter_wait(const char *title); -int torture_stutter_init(int s); +int torture_stutter_init(int s, int sgap); /* Initialization and cleanup. */ bool torture_init_begin(char *ttype, int v); @@ -86,7 +86,7 @@ void _torture_stop_kthread(char *m, struct task_struct **tp); #define torture_stop_kthread(n, tp) \ _torture_stop_kthread("Stopping " #n " task", &(tp)) -#ifdef CONFIG_PREEMPT +#ifdef CONFIG_PREEMPTION #define torture_preempt_schedule() preempt_schedule() #else #define torture_preempt_schedule() diff --git a/include/linux/tpm_eventlog.h b/include/linux/tpm_eventlog.h index 81519f163211..131ea1bad458 100644 --- a/include/linux/tpm_eventlog.h +++ b/include/linux/tpm_eventlog.h @@ -112,4 +112,164 @@ struct tcg_pcr_event2_head { struct tpm_digest digests[]; } __packed; +struct tcg_algorithm_size { + u16 algorithm_id; + u16 algorithm_size; +}; + +struct tcg_algorithm_info { + u8 signature[16]; + u32 platform_class; + u8 spec_version_minor; + u8 spec_version_major; + u8 spec_errata; + u8 uintn_size; + u32 number_of_algorithms; + struct tcg_algorithm_size digest_sizes[]; +}; + +#ifndef TPM_MEMREMAP +#define TPM_MEMREMAP(start, size) NULL +#endif + +#ifndef TPM_MEMUNMAP +#define TPM_MEMUNMAP(start, size) do{} while(0) +#endif + +/** + * __calc_tpm2_event_size - calculate the size of a TPM2 event log entry + * @event: Pointer to the event whose size should be calculated + * @event_header: Pointer to the initial event containing the digest lengths + * @do_mapping: Whether or not the event needs to be mapped + * + * The TPM2 event log format can contain multiple digests corresponding to + * separate PCR banks, and also contains a variable length of the data that + * was measured. This requires knowledge of how long each digest type is, + * and this information is contained within the first event in the log. + * + * We calculate the length by examining the number of events, and then looking + * at each event in turn to determine how much space is used for events in + * total. Once we've done this we know the offset of the data length field, + * and can calculate the total size of the event. + * + * Return: size of the event on success, 0 on failure + */ + +static inline int __calc_tpm2_event_size(struct tcg_pcr_event2_head *event, + struct tcg_pcr_event *event_header, + bool do_mapping) +{ + struct tcg_efi_specid_event_head *efispecid; + struct tcg_event_field *event_field; + void *mapping = NULL; + int mapping_size; + void *marker; + void *marker_start; + u32 halg_size; + size_t size; + u16 halg; + int i; + int j; + u32 count, event_type; + + marker = event; + marker_start = marker; + marker = marker + sizeof(event->pcr_idx) + sizeof(event->event_type) + + sizeof(event->count); + + /* Map the event header */ + if (do_mapping) { + mapping_size = marker - marker_start; + mapping = TPM_MEMREMAP((unsigned long)marker_start, + mapping_size); + if (!mapping) { + size = 0; + goto out; + } + } else { + mapping = marker_start; + } + + event = (struct tcg_pcr_event2_head *)mapping; + /* + * The loop below will unmap these fields if the log is larger than + * one page, so save them here for reference: + */ + count = READ_ONCE(event->count); + event_type = READ_ONCE(event->event_type); + + efispecid = (struct tcg_efi_specid_event_head *)event_header->event; + + /* Check if event is malformed. */ + if (count > efispecid->num_algs) { + size = 0; + goto out; + } + + for (i = 0; i < count; i++) { + halg_size = sizeof(event->digests[i].alg_id); + + /* Map the digest's algorithm identifier */ + if (do_mapping) { + TPM_MEMUNMAP(mapping, mapping_size); + mapping_size = halg_size; + mapping = TPM_MEMREMAP((unsigned long)marker, + mapping_size); + if (!mapping) { + size = 0; + goto out; + } + } else { + mapping = marker; + } + + memcpy(&halg, mapping, halg_size); + marker = marker + halg_size; + + for (j = 0; j < efispecid->num_algs; j++) { + if (halg == efispecid->digest_sizes[j].alg_id) { + marker += + efispecid->digest_sizes[j].digest_size; + break; + } + } + /* Algorithm without known length. Such event is unparseable. */ + if (j == efispecid->num_algs) { + size = 0; + goto out; + } + } + + /* + * Map the event size - we don't read from the event itself, so + * we don't need to map it + */ + if (do_mapping) { + TPM_MEMUNMAP(mapping, mapping_size); + mapping_size += sizeof(event_field->event_size); + mapping = TPM_MEMREMAP((unsigned long)marker, + mapping_size); + if (!mapping) { + size = 0; + goto out; + } + } else { + mapping = marker; + } + + event_field = (struct tcg_event_field *)mapping; + + marker = marker + sizeof(event_field->event_size) + + event_field->event_size; + size = marker - marker_start; + + if (event_type == 0 && event_field->event_size == 0) + size = 0; + +out: + if (do_mapping) + TPM_MEMUNMAP(mapping, mapping_size); + return size; +} + #endif diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h index 8a62731673f7..30a8cdcfd4a4 100644 --- a/include/linux/trace_events.h +++ b/include/linux/trace_events.h @@ -142,6 +142,7 @@ enum print_line_t { enum print_line_t trace_handle_return(struct trace_seq *s); void tracing_generic_entry_update(struct trace_entry *entry, + unsigned short type, unsigned long flags, int pc); struct trace_event_file; @@ -317,6 +318,14 @@ trace_event_name(struct trace_event_call *call) return call->name; } +static inline struct list_head * +trace_get_fields(struct trace_event_call *event_call) +{ + if (!event_call->class->get_fields) + return &event_call->class->fields; + return event_call->class->get_fields(event_call); +} + struct trace_array; struct trace_subsystem_dir; @@ -539,6 +548,7 @@ extern int trace_event_get_offsets(struct trace_event_call *call); #define is_signed_type(type) (((type)(-1)) < (type)1) +int ftrace_set_clr_event(struct trace_array *tr, char *buf, int set); int trace_set_clr_event(const char *system, const char *event, int set); /* diff --git a/include/linux/tracehook.h b/include/linux/tracehook.h index 09d678433fc0..36fb3bbed6b2 100644 --- a/include/linux/tracehook.h +++ b/include/linux/tracehook.h @@ -54,13 +54,15 @@ struct linux_binprm; /* * ptrace report for syscall entry and exit looks identical. */ -static inline int ptrace_report_syscall(struct pt_regs *regs) +static inline int ptrace_report_syscall(struct pt_regs *regs, + unsigned long message) { int ptrace = current->ptrace; if (!(ptrace & PT_PTRACED)) return 0; + current->ptrace_message = message; ptrace_notify(SIGTRAP | ((ptrace & PT_TRACESYSGOOD) ? 0x80 : 0)); /* @@ -73,6 +75,7 @@ static inline int ptrace_report_syscall(struct pt_regs *regs) current->exit_code = 0; } + current->ptrace_message = 0; return fatal_signal_pending(current); } @@ -98,7 +101,7 @@ static inline int ptrace_report_syscall(struct pt_regs *regs) static inline __must_check int tracehook_report_syscall_entry( struct pt_regs *regs) { - return ptrace_report_syscall(regs); + return ptrace_report_syscall(regs, PTRACE_EVENTMSG_SYSCALL_ENTRY); } /** @@ -123,7 +126,7 @@ static inline void tracehook_report_syscall_exit(struct pt_regs *regs, int step) if (step) user_single_step_report(regs); else - ptrace_report_syscall(regs); + ptrace_report_syscall(regs, PTRACE_EVENTMSG_SYSCALL_EXIT); } /** @@ -184,6 +187,13 @@ static inline void tracehook_notify_resume(struct pt_regs *regs) if (unlikely(current->task_works)) task_work_run(); +#ifdef CONFIG_KEYS_REQUEST_CACHE + if (unlikely(current->cached_requested_key)) { + key_put(current->cached_requested_key); + current->cached_requested_key = NULL; + } +#endif + mem_cgroup_handle_over_high(); blkcg_maybe_throttle_current(); } diff --git a/include/linux/types.h b/include/linux/types.h index 231114ae38f4..05030f608be3 100644 --- a/include/linux/types.h +++ b/include/linux/types.h @@ -174,7 +174,7 @@ typedef struct { #ifdef CONFIG_64BIT typedef struct { - long counter; + s64 counter; } atomic64_t; #endif diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h index 2b70130af585..d4ee6e942562 100644 --- a/include/linux/uaccess.h +++ b/include/linux/uaccess.h @@ -55,7 +55,7 @@ * as usual) and both source and destination can trigger faults. */ -static __always_inline unsigned long +static __always_inline __must_check unsigned long __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n) { kasan_check_write(to, n); @@ -63,7 +63,7 @@ __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n) return raw_copy_from_user(to, from, n); } -static __always_inline unsigned long +static __always_inline __must_check unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n) { might_fault(); @@ -85,7 +85,7 @@ __copy_from_user(void *to, const void __user *from, unsigned long n) * The caller should also make sure he pins the user space address * so that we don't result in page fault and sleep. */ -static __always_inline unsigned long +static __always_inline __must_check unsigned long __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n) { kasan_check_read(from, n); @@ -93,7 +93,7 @@ __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n) return raw_copy_to_user(to, from, n); } -static __always_inline unsigned long +static __always_inline __must_check unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n) { might_fault(); @@ -103,7 +103,7 @@ __copy_to_user(void __user *to, const void *from, unsigned long n) } #ifdef INLINE_COPY_FROM_USER -static inline unsigned long +static inline __must_check unsigned long _copy_from_user(void *to, const void __user *from, unsigned long n) { unsigned long res = n; @@ -117,12 +117,12 @@ _copy_from_user(void *to, const void __user *from, unsigned long n) return res; } #else -extern unsigned long +extern __must_check unsigned long _copy_from_user(void *, const void __user *, unsigned long); #endif #ifdef INLINE_COPY_TO_USER -static inline unsigned long +static inline __must_check unsigned long _copy_to_user(void __user *to, const void *from, unsigned long n) { might_fault(); @@ -133,7 +133,7 @@ _copy_to_user(void __user *to, const void *from, unsigned long n) return n; } #else -extern unsigned long +extern __must_check unsigned long _copy_to_user(void __user *, const void *, unsigned long); #endif @@ -203,7 +203,10 @@ static inline void pagefault_enable(void) /* * Is the pagefault handler disabled? If so, user access methods will not sleep. */ -#define pagefault_disabled() (current->pagefault_disabled != 0) +static inline bool pagefault_disabled(void) +{ + return current->pagefault_disabled != 0; +} /* * The pagefault handler is in general disabled by pagefault_disable() or @@ -219,14 +222,85 @@ static inline void pagefault_enable(void) #ifndef ARCH_HAS_NOCACHE_UACCESS -static inline unsigned long __copy_from_user_inatomic_nocache(void *to, - const void __user *from, unsigned long n) +static inline __must_check unsigned long +__copy_from_user_inatomic_nocache(void *to, const void __user *from, + unsigned long n) { return __copy_from_user_inatomic(to, from, n); } #endif /* ARCH_HAS_NOCACHE_UACCESS */ +extern __must_check int check_zeroed_user(const void __user *from, size_t size); + +/** + * copy_struct_from_user: copy a struct from userspace + * @dst: Destination address, in kernel space. This buffer must be @ksize + * bytes long. + * @ksize: Size of @dst struct. + * @src: Source address, in userspace. + * @usize: (Alleged) size of @src struct. + * + * Copies a struct from userspace to kernel space, in a way that guarantees + * backwards-compatibility for struct syscall arguments (as long as future + * struct extensions are made such that all new fields are *appended* to the + * old struct, and zeroed-out new fields have the same meaning as the old + * struct). + * + * @ksize is just sizeof(*dst), and @usize should've been passed by userspace. + * The recommended usage is something like the following: + * + * SYSCALL_DEFINE2(foobar, const struct foo __user *, uarg, size_t, usize) + * { + * int err; + * struct foo karg = {}; + * + * if (usize > PAGE_SIZE) + * return -E2BIG; + * if (usize < FOO_SIZE_VER0) + * return -EINVAL; + * + * err = copy_struct_from_user(&karg, sizeof(karg), uarg, usize); + * if (err) + * return err; + * + * // ... + * } + * + * There are three cases to consider: + * * If @usize == @ksize, then it's copied verbatim. + * * If @usize < @ksize, then the userspace has passed an old struct to a + * newer kernel. The rest of the trailing bytes in @dst (@ksize - @usize) + * are to be zero-filled. + * * If @usize > @ksize, then the userspace has passed a new struct to an + * older kernel. The trailing bytes unknown to the kernel (@usize - @ksize) + * are checked to ensure they are zeroed, otherwise -E2BIG is returned. + * + * Returns (in all cases, some data may have been copied): + * * -E2BIG: (@usize > @ksize) and there are non-zero trailing bytes in @src. + * * -EFAULT: access to userspace failed. + */ +static __always_inline __must_check int +copy_struct_from_user(void *dst, size_t ksize, const void __user *src, + size_t usize) +{ + size_t size = min(ksize, usize); + size_t rest = max(ksize, usize) - size; + + /* Deal with trailing bytes. */ + if (usize < ksize) { + memset(dst + size, 0, rest); + } else if (usize > ksize) { + int ret = check_zeroed_user(src + size, rest); + if (ret <= 0) + return ret ?: -E2BIG; + } + /* Copy the interoperable parts of the struct. */ + if (copy_from_user(dst, src, size)) + return -EFAULT; + return 0; +} + /* * probe_kernel_read(): safely attempt to read from a location * @dst: pointer to the buffer that shall take the data @@ -240,6 +314,18 @@ extern long probe_kernel_read(void *dst, const void *src, size_t size); extern long __probe_kernel_read(void *dst, const void *src, size_t size); /* + * probe_user_read(): safely attempt to read from a location in user space + * @dst: pointer to the buffer that shall take the data + * @src: address to read from + * @size: size of the data chunk + * + * Safely read from address @src to the buffer at @dst. If a kernel fault + * happens, handle that and return -EFAULT. + */ +extern long probe_user_read(void *dst, const void __user *src, size_t size); +extern long __probe_user_read(void *dst, const void __user *src, size_t size); + +/* * probe_kernel_write(): safely attempt to write to a location * @dst: address to write to * @src: pointer to the data that shall be written @@ -252,6 +338,9 @@ extern long notrace probe_kernel_write(void *dst, const void *src, size_t size); extern long notrace __probe_kernel_write(void *dst, const void *src, size_t size); extern long strncpy_from_unsafe(char *dst, const void *unsafe_addr, long count); +extern long strncpy_from_unsafe_user(char *dst, const void __user *unsafe_addr, + long count); +extern long strnlen_unsafe_user(const void __user *unsafe_addr, long count); /** * probe_kernel_address(): safely attempt to read from a location @@ -266,8 +355,10 @@ extern long strncpy_from_unsafe(char *dst, const void *unsafe_addr, long count); #ifndef user_access_begin #define user_access_begin(ptr,len) access_ok(ptr, len) #define user_access_end() do { } while (0) -#define unsafe_get_user(x, ptr, err) do { if (unlikely(__get_user(x, ptr))) goto err; } while (0) -#define unsafe_put_user(x, ptr, err) do { if (unlikely(__put_user(x, ptr))) goto err; } while (0) +#define unsafe_op_wrap(op, err) do { if (unlikely(op)) goto err; } while (0) +#define unsafe_get_user(x,p,e) unsafe_op_wrap(__get_user(x,p),e) +#define unsafe_put_user(x,p,e) unsafe_op_wrap(__put_user(x,p),e) +#define unsafe_copy_to_user(d,s,l,e) unsafe_op_wrap(__copy_to_user(d,s,l),e) static inline unsigned long user_access_save(void) { return 0UL; } static inline void user_access_restore(unsigned long flags) { } #endif diff --git a/include/linux/uio.h b/include/linux/uio.h index 2c90a0842ee8..ab5f523bc0df 100644 --- a/include/linux/uio.h +++ b/include/linux/uio.h @@ -19,9 +19,6 @@ struct kvec { }; enum iter_type { - /* set if ITER_BVEC doesn't hold a bv_page ref */ - ITER_BVEC_FLAG_NO_REF = 2, - /* iter types */ ITER_IOVEC = 4, ITER_KVEC = 8, @@ -56,7 +53,7 @@ struct iov_iter { static inline enum iter_type iov_iter_type(const struct iov_iter *i) { - return i->type & ~(READ | WRITE | ITER_BVEC_FLAG_NO_REF); + return i->type & ~(READ | WRITE); } static inline bool iter_is_iovec(const struct iov_iter *i) @@ -89,11 +86,6 @@ static inline unsigned char iov_iter_rw(const struct iov_iter *i) return i->type & (READ | WRITE); } -static inline bool iov_iter_bvec_no_ref(const struct iov_iter *i) -{ - return (i->type & ITER_BVEC_FLAG_NO_REF) != 0; -} - /* * Total number of bytes covered by an iovec. * @@ -275,13 +267,13 @@ bool csum_and_copy_from_iter_full(void *addr, size_t bytes, __wsum *csum, struct size_t hash_and_copy_to_iter(const void *addr, size_t bytes, void *hashp, struct iov_iter *i); -int import_iovec(int type, const struct iovec __user * uvector, +ssize_t import_iovec(int type, const struct iovec __user * uvector, unsigned nr_segs, unsigned fast_segs, struct iovec **iov, struct iov_iter *i); #ifdef CONFIG_COMPAT struct compat_iovec; -int compat_import_iovec(int type, const struct compat_iovec __user * uvector, +ssize_t compat_import_iovec(int type, const struct compat_iovec __user * uvector, unsigned nr_segs, unsigned fast_segs, struct iovec **iov, struct iov_iter *i); #endif diff --git a/include/linux/unicode.h b/include/linux/unicode.h index aec2c6d800aa..990aa97d8049 100644 --- a/include/linux/unicode.h +++ b/include/linux/unicode.h @@ -17,6 +17,9 @@ int utf8_strncmp(const struct unicode_map *um, int utf8_strncasecmp(const struct unicode_map *um, const struct qstr *s1, const struct qstr *s2); +int utf8_strncasecmp_folded(const struct unicode_map *um, + const struct qstr *cf, + const struct qstr *s1); int utf8_normalize(const struct unicode_map *um, const struct qstr *str, unsigned char *dest, size_t dlen); diff --git a/include/linux/usb.h b/include/linux/usb.h index ae82d9d1112b..e656e7b4b1e4 100644 --- a/include/linux/usb.h +++ b/include/linux/usb.h @@ -426,7 +426,6 @@ struct usb_bus { struct device *sysdev; /* as seen from firmware or bus */ int busnum; /* Bus number (in order of reg) */ const char *bus_name; /* stable id (PCI slot_name etc) */ - u8 uses_dma; /* Does the host controller use DMA? */ u8 uses_pio_for_control; /* * Does the host controller use PIO * for control transfers? @@ -578,6 +577,7 @@ struct usb3_lpm_parameters { * @bus_mA: Current available from the bus * @portnum: parent port number (origin 1) * @level: number of USB hub ancestors + * @devaddr: device address, XHCI: assigned by HW, others: same as devnum * @can_submit: URBs may be submitted * @persist_enabled: USB_PERSIST enabled for this device * @have_langid: whether string_langid is valid @@ -661,6 +661,7 @@ struct usb_device { unsigned short bus_mA; u8 portnum; u8 level; + u8 devaddr; unsigned can_submit:1; unsigned persist_enabled:1; @@ -1149,6 +1150,8 @@ struct usbdrv_wrap { * @id_table: USB drivers use ID table to support hotplugging. * Export this with MODULE_DEVICE_TABLE(usb,...). This must be set * or your driver's probe function will never get called. + * @dev_groups: Attributes attached to the device that will be created once it + * is bound to the driver. * @dynids: used internally to hold the list of dynamically added device * ids for this driver. * @drvwrap: Driver-model core structure wrapper. @@ -1196,6 +1199,7 @@ struct usb_driver { int (*post_reset)(struct usb_interface *intf); const struct usb_device_id *id_table; + const struct attribute_group **dev_groups; struct usb_dynids dynids; struct usbdrv_wrap drvwrap; @@ -1219,6 +1223,8 @@ struct usb_driver { * module is being unloaded. * @suspend: Called when the device is going to be suspended by the system. * @resume: Called when the device is being resumed by the system. + * @dev_groups: Attributes attached to the device that will be created once it + * is bound to the driver. * @drvwrap: Driver-model core structure wrapper. * @supports_autosuspend: if set to 0, the USB core will not allow autosuspend * for devices bound to this driver. @@ -1233,6 +1239,7 @@ struct usb_device_driver { int (*suspend) (struct usb_device *udev, pm_message_t message); int (*resume) (struct usb_device *udev, pm_message_t message); + const struct attribute_group **dev_groups; struct usbdrv_wrap drvwrap; unsigned int supports_autosuspend:1; }; @@ -1455,7 +1462,7 @@ typedef void (*usb_complete_t)(struct urb *); * field rather than determining a dma address themselves. * * Note that transfer_buffer must still be set if the controller - * does not support DMA (as indicated by bus.uses_dma) and when talking + * does not support DMA (as indicated by hcd_uses_dma()) and when talking * to root hub. If you have to trasfer between highmem zone and the device * on such controller, create a bounce buffer or bail out with an error. * If transfer_buffer cannot be set (is in highmem) and the controller is DMA diff --git a/include/linux/usb/association.h b/include/linux/usb/association.h deleted file mode 100644 index d7f3cb9b9db5..000000000000 --- a/include/linux/usb/association.h +++ /dev/null @@ -1,151 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * Wireless USB - Cable Based Association - * - * Copyright (C) 2006 Intel Corporation - * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com> - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License version - * 2 as published by the Free Software Foundation. - * - */ -#ifndef __LINUX_USB_ASSOCIATION_H -#define __LINUX_USB_ASSOCIATION_H - - -/* - * Association attributes - * - * Association Models Supplement to WUSB 1.0 T[3-1] - * - * Each field in the structures has it's ID, it's length and then the - * value. This is the actual definition of the field's ID and its - * length. - */ -struct wusb_am_attr { - __u8 id; - __u8 len; -}; - -/* Different fields defined by the spec */ -#define WUSB_AR_AssociationTypeId { .id = cpu_to_le16(0x0000), .len = cpu_to_le16(2) } -#define WUSB_AR_AssociationSubTypeId { .id = cpu_to_le16(0x0001), .len = cpu_to_le16(2) } -#define WUSB_AR_Length { .id = cpu_to_le16(0x0002), .len = cpu_to_le16(4) } -#define WUSB_AR_AssociationStatus { .id = cpu_to_le16(0x0004), .len = cpu_to_le16(4) } -#define WUSB_AR_LangID { .id = cpu_to_le16(0x0008), .len = cpu_to_le16(2) } -#define WUSB_AR_DeviceFriendlyName { .id = cpu_to_le16(0x000b), .len = cpu_to_le16(64) } /* max */ -#define WUSB_AR_HostFriendlyName { .id = cpu_to_le16(0x000c), .len = cpu_to_le16(64) } /* max */ -#define WUSB_AR_CHID { .id = cpu_to_le16(0x1000), .len = cpu_to_le16(16) } -#define WUSB_AR_CDID { .id = cpu_to_le16(0x1001), .len = cpu_to_le16(16) } -#define WUSB_AR_ConnectionContext { .id = cpu_to_le16(0x1002), .len = cpu_to_le16(48) } -#define WUSB_AR_BandGroups { .id = cpu_to_le16(0x1004), .len = cpu_to_le16(2) } - -/* CBAF Control Requests (AMS1.0[T4-1] */ -enum { - CBAF_REQ_GET_ASSOCIATION_INFORMATION = 0x01, - CBAF_REQ_GET_ASSOCIATION_REQUEST, - CBAF_REQ_SET_ASSOCIATION_RESPONSE -}; - -/* - * CBAF USB-interface defitions - * - * No altsettings, one optional interrupt endpoint. - */ -enum { - CBAF_IFACECLASS = 0xef, - CBAF_IFACESUBCLASS = 0x03, - CBAF_IFACEPROTOCOL = 0x01, -}; - -/* Association Information (AMS1.0[T4-3]) */ -struct wusb_cbaf_assoc_info { - __le16 Length; - __u8 NumAssociationRequests; - __le16 Flags; - __u8 AssociationRequestsArray[]; -} __attribute__((packed)); - -/* Association Request (AMS1.0[T4-4]) */ -struct wusb_cbaf_assoc_request { - __u8 AssociationDataIndex; - __u8 Reserved; - __le16 AssociationTypeId; - __le16 AssociationSubTypeId; - __le32 AssociationTypeInfoSize; -} __attribute__((packed)); - -enum { - AR_TYPE_WUSB = 0x0001, - AR_TYPE_WUSB_RETRIEVE_HOST_INFO = 0x0000, - AR_TYPE_WUSB_ASSOCIATE = 0x0001, -}; - -/* Association Attribute header (AMS1.0[3.8]) */ -struct wusb_cbaf_attr_hdr { - __le16 id; - __le16 len; -} __attribute__((packed)); - -/* Host Info (AMS1.0[T4-7]) (yeah, more headers and fields...) */ -struct wusb_cbaf_host_info { - struct wusb_cbaf_attr_hdr AssociationTypeId_hdr; - __le16 AssociationTypeId; - struct wusb_cbaf_attr_hdr AssociationSubTypeId_hdr; - __le16 AssociationSubTypeId; - struct wusb_cbaf_attr_hdr CHID_hdr; - struct wusb_ckhdid CHID; - struct wusb_cbaf_attr_hdr LangID_hdr; - __le16 LangID; - struct wusb_cbaf_attr_hdr HostFriendlyName_hdr; - __u8 HostFriendlyName[]; -} __attribute__((packed)); - -/* Device Info (AMS1.0[T4-8]) - * - * I still don't get this tag'n'header stuff for each goddamn - * field... - */ -struct wusb_cbaf_device_info { - struct wusb_cbaf_attr_hdr Length_hdr; - __le32 Length; - struct wusb_cbaf_attr_hdr CDID_hdr; - struct wusb_ckhdid CDID; - struct wusb_cbaf_attr_hdr BandGroups_hdr; - __le16 BandGroups; - struct wusb_cbaf_attr_hdr LangID_hdr; - __le16 LangID; - struct wusb_cbaf_attr_hdr DeviceFriendlyName_hdr; - __u8 DeviceFriendlyName[]; -} __attribute__((packed)); - -/* Connection Context; CC_DATA - Success case (AMS1.0[T4-9]) */ -struct wusb_cbaf_cc_data { - struct wusb_cbaf_attr_hdr AssociationTypeId_hdr; - __le16 AssociationTypeId; - struct wusb_cbaf_attr_hdr AssociationSubTypeId_hdr; - __le16 AssociationSubTypeId; - struct wusb_cbaf_attr_hdr Length_hdr; - __le32 Length; - struct wusb_cbaf_attr_hdr ConnectionContext_hdr; - struct wusb_ckhdid CHID; - struct wusb_ckhdid CDID; - struct wusb_ckhdid CK; - struct wusb_cbaf_attr_hdr BandGroups_hdr; - __le16 BandGroups; -} __attribute__((packed)); - -/* CC_DATA - Failure case (AMS1.0[T4-10]) */ -struct wusb_cbaf_cc_data_fail { - struct wusb_cbaf_attr_hdr AssociationTypeId_hdr; - __le16 AssociationTypeId; - struct wusb_cbaf_attr_hdr AssociationSubTypeId_hdr; - __le16 AssociationSubTypeId; - struct wusb_cbaf_attr_hdr Length_hdr; - __le16 Length; - struct wusb_cbaf_attr_hdr AssociationStatus_hdr; - __u32 AssociationStatus; -} __attribute__((packed)); - -#endif /* __LINUX_USB_ASSOCIATION_H */ diff --git a/include/linux/usb/ch9.h b/include/linux/usb/ch9.h index da82606be605..58b83066bea4 100644 --- a/include/linux/usb/ch9.h +++ b/include/linux/usb/ch9.h @@ -70,4 +70,31 @@ extern enum usb_device_speed usb_get_maximum_speed(struct device *dev); */ extern const char *usb_state_string(enum usb_device_state state); +#ifdef CONFIG_TRACING +/** + * usb_decode_ctrl - Returns human readable representation of control request. + * @str: buffer to return a human-readable representation of control request. + * This buffer should have about 200 bytes. + * @size: size of str buffer. + * @bRequestType: matches the USB bmRequestType field + * @bRequest: matches the USB bRequest field + * @wValue: matches the USB wValue field (CPU byte order) + * @wIndex: matches the USB wIndex field (CPU byte order) + * @wLength: matches the USB wLength field (CPU byte order) + * + * Function returns decoded, formatted and human-readable description of + * control request packet. + * + * The usage scenario for this is for tracepoints, so function as a return + * use the same value as in parameters. This approach allows to use this + * function in TP_printk + * + * Important: wValue, wIndex, wLength parameters before invoking this function + * should be processed by le16_to_cpu macro. + */ +extern const char *usb_decode_ctrl(char *str, size_t size, __u8 bRequestType, + __u8 bRequest, __u16 wValue, __u16 wIndex, + __u16 wLength); +#endif + #endif /* __LINUX_USB_CH9_H */ diff --git a/include/linux/usb/chipidea.h b/include/linux/usb/chipidea.h index 911e05af671e..edd89b7c8f18 100644 --- a/include/linux/usb/chipidea.h +++ b/include/linux/usb/chipidea.h @@ -61,6 +61,7 @@ struct ci_hdrc_platform_data { #define CI_HDRC_OVERRIDE_PHY_CONTROL BIT(12) /* Glue layer manages phy */ #define CI_HDRC_REQUIRES_ALIGNED_DMA BIT(13) #define CI_HDRC_IMX_IS_HSIC BIT(14) +#define CI_HDRC_PMQOS BIT(15) enum usb_dr_mode dr_mode; #define CI_HDRC_CONTROLLER_RESET_EVENT 0 #define CI_HDRC_CONTROLLER_STOPPED_EVENT 1 diff --git a/include/linux/usb/gadget.h b/include/linux/usb/gadget.h index 7595056b96c1..124462d65eac 100644 --- a/include/linux/usb/gadget.h +++ b/include/linux/usb/gadget.h @@ -291,6 +291,9 @@ struct usb_dcd_config_params { #define USB_DEFAULT_U1_DEV_EXIT_LAT 0x01 /* Less then 1 microsec */ __le16 bU2DevExitLat; /* U2 Device exit Latency */ #define USB_DEFAULT_U2_DEV_EXIT_LAT 0x1F4 /* Less then 500 microsec */ + __u8 besl_baseline; /* Recommended baseline BESL (0-15) */ + __u8 besl_deep; /* Recommended deep BESL (0-15) */ +#define USB_DEFAULT_BESL_UNSPECIFIED 0xFF /* No recommended value */ }; @@ -310,7 +313,8 @@ struct usb_gadget_ops { int (*pullup) (struct usb_gadget *, int is_on); int (*ioctl)(struct usb_gadget *, unsigned code, unsigned long param); - void (*get_config_params)(struct usb_dcd_config_params *); + void (*get_config_params)(struct usb_gadget *, + struct usb_dcd_config_params *); int (*udc_start)(struct usb_gadget *, struct usb_gadget_driver *); int (*udc_stop)(struct usb_gadget *); diff --git a/include/linux/usb/hcd.h b/include/linux/usb/hcd.h index bb57b5af4700..712b2a603645 100644 --- a/include/linux/usb/hcd.h +++ b/include/linux/usb/hcd.h @@ -216,6 +216,9 @@ struct usb_hcd { #define HC_IS_RUNNING(state) ((state) & __ACTIVE) #define HC_IS_SUSPENDED(state) ((state) & __SUSPEND) + /* memory pool for HCs having local memory, or %NULL */ + struct gen_pool *localmem_pool; + /* more shared queuing code would be good; it should support * smarter scheduling, handle transaction translators, etc; * input size of periodic table to an interrupt scheduler. @@ -253,7 +256,7 @@ struct hc_driver { int flags; #define HCD_MEMORY 0x0001 /* HC regs use memory (else I/O) */ -#define HCD_LOCAL_MEM 0x0002 /* HC needs local memory */ +#define HCD_DMA 0x0002 /* HC uses DMA */ #define HCD_SHARED 0x0004 /* Two (or more) usb_hcds share HW */ #define HCD_USB11 0x0010 /* USB 1.1 */ #define HCD_USB2 0x0020 /* USB 2.0 */ @@ -420,6 +423,11 @@ static inline bool hcd_periodic_completion_in_progress(struct usb_hcd *hcd, return hcd->high_prio_bh.completing_ep == ep; } +static inline bool hcd_uses_dma(struct usb_hcd *hcd) +{ + return IS_ENABLED(CONFIG_HAS_DMA) && (hcd->driver->flags & HCD_DMA); +} + extern int usb_hcd_link_urb_to_ep(struct usb_hcd *hcd, struct urb *urb); extern int usb_hcd_check_unlink_urb(struct usb_hcd *hcd, struct urb *urb, int status); @@ -461,6 +469,8 @@ extern int usb_add_hcd(struct usb_hcd *hcd, unsigned int irqnum, unsigned long irqflags); extern void usb_remove_hcd(struct usb_hcd *hcd); extern int usb_hcd_find_raw_port_number(struct usb_hcd *hcd, int port1); +int usb_hcd_setup_local_mem(struct usb_hcd *hcd, phys_addr_t phys_addr, + dma_addr_t dma, size_t size); struct platform_device; extern void usb_hcd_platform_shutdown(struct platform_device *dev); @@ -587,6 +597,10 @@ extern void usb_ep0_reinit(struct usb_device *); #define GetPortStatus HUB_CLASS_REQ(USB_DIR_IN, USB_RT_PORT, USB_REQ_GET_STATUS) #define SetHubFeature HUB_CLASS_REQ(USB_DIR_OUT, USB_RT_HUB, USB_REQ_SET_FEATURE) #define SetPortFeature HUB_CLASS_REQ(USB_DIR_OUT, USB_RT_PORT, USB_REQ_SET_FEATURE) +#define ClearTTBuffer HUB_CLASS_REQ(USB_DIR_OUT, USB_RT_PORT, HUB_CLEAR_TT_BUFFER) +#define ResetTT HUB_CLASS_REQ(USB_DIR_OUT, USB_RT_PORT, HUB_RESET_TT) +#define GetTTState HUB_CLASS_REQ(USB_DIR_IN, USB_RT_PORT, HUB_GET_TT_STATE) +#define StopTT HUB_CLASS_REQ(USB_DIR_OUT, USB_RT_PORT, HUB_STOP_TT) /*-------------------------------------------------------------------------*/ diff --git a/include/linux/usb/renesas_usbhs.h b/include/linux/usb/renesas_usbhs.h index 53924f8e840c..6914475bbc86 100644 --- a/include/linux/usb/renesas_usbhs.h +++ b/include/linux/usb/renesas_usbhs.h @@ -3,6 +3,7 @@ * Renesas USB * * Copyright (C) 2011 Renesas Solutions Corp. + * Copyright (C) 2019 Renesas Electronics Corporation * Kuninori Morimoto <kuninori.morimoto.gx@renesas.com> * * This program is distributed in the hope that it will be useful, @@ -33,17 +34,6 @@ enum { }; /* - * callback functions table for driver - * - * These functions are called from platform for driver. - * Callback function's pointer will be set before - * renesas_usbhs_platform_callback :: hardware_init was called - */ -struct renesas_usbhs_driver_callback { - int (*notify_hotplug)(struct platform_device *pdev); -}; - -/* * callback functions for platform * * These functions are called from driver for platform @@ -180,23 +170,20 @@ struct renesas_usbhs_driver_param { */ int pio_dma_border; /* default is 64byte */ - uintptr_t type; u32 enable_gpio; /* * option: */ - u32 has_otg:1; /* for controlling PWEN/EXTLP */ - u32 has_sudmac:1; /* for SUDMAC */ u32 has_usb_dmac:1; /* for USB-DMAC */ + u32 runtime_pwctrl:1; + u32 has_cnen:1; + u32 cfifo_byte_addr:1; /* CFIFO is byte addressable */ #define USBHS_USB_DMAC_XFER_SIZE 32 /* hardcode the xfer size */ + u32 multi_clks:1; + u32 has_new_pipe_configs:1; }; -#define USBHS_TYPE_RCAR_GEN2 1 -#define USBHS_TYPE_RCAR_GEN3 2 -#define USBHS_TYPE_RCAR_GEN3_WITH_PLL 3 -#define USBHS_TYPE_RZA1 4 - /* * option: * @@ -212,12 +199,6 @@ struct renesas_usbhs_platform_info { struct renesas_usbhs_platform_callback platform_callback; /* - * driver set these callback functions pointer. - * platform can use it on callback functions - */ - struct renesas_usbhs_driver_callback driver_callback; - - /* * option: * * driver use these param for some register @@ -230,12 +211,4 @@ struct renesas_usbhs_platform_info { */ #define renesas_usbhs_get_info(pdev)\ ((struct renesas_usbhs_platform_info *)(pdev)->dev.platform_data) - -#define renesas_usbhs_call_notify_hotplug(pdev) \ - ({ \ - struct renesas_usbhs_driver_callback *dc; \ - dc = &(renesas_usbhs_get_info(pdev)->driver_callback); \ - if (dc && dc->notify_hotplug) \ - dc->notify_hotplug(pdev); \ - }) #endif /* RENESAS_USB_H */ diff --git a/include/linux/usb/role.h b/include/linux/usb/role.h index c05ffa6abda9..2d77f97df72d 100644 --- a/include/linux/usb/role.h +++ b/include/linux/usb/role.h @@ -42,14 +42,51 @@ struct usb_role_switch_desc { bool allow_userspace_control; }; + +#if IS_ENABLED(CONFIG_USB_ROLE_SWITCH) int usb_role_switch_set_role(struct usb_role_switch *sw, enum usb_role role); enum usb_role usb_role_switch_get_role(struct usb_role_switch *sw); struct usb_role_switch *usb_role_switch_get(struct device *dev); +struct usb_role_switch *fwnode_usb_role_switch_get(struct fwnode_handle *node); void usb_role_switch_put(struct usb_role_switch *sw); struct usb_role_switch * usb_role_switch_register(struct device *parent, const struct usb_role_switch_desc *desc); void usb_role_switch_unregister(struct usb_role_switch *sw); +#else +static inline int usb_role_switch_set_role(struct usb_role_switch *sw, + enum usb_role role) +{ + return 0; +} + +static inline enum usb_role usb_role_switch_get_role(struct usb_role_switch *sw) +{ + return USB_ROLE_NONE; +} + +static inline struct usb_role_switch *usb_role_switch_get(struct device *dev) +{ + return ERR_PTR(-ENODEV); +} + +static inline struct usb_role_switch * +fwnode_usb_role_switch_get(struct fwnode_handle *node) +{ + return ERR_PTR(-ENODEV); +} + +static inline void usb_role_switch_put(struct usb_role_switch *sw) { } + +static inline struct usb_role_switch * +usb_role_switch_register(struct device *parent, + const struct usb_role_switch_desc *desc) +{ + return ERR_PTR(-ENODEV); +} + +static inline void usb_role_switch_unregister(struct usb_role_switch *sw) { } +#endif #endif /* __LINUX_USB_ROLE_H */ diff --git a/include/linux/usb/samsung_usb_phy.h b/include/linux/usb/samsung_usb_phy.h deleted file mode 100644 index dc0071741695..000000000000 --- a/include/linux/usb/samsung_usb_phy.h +++ /dev/null @@ -1,17 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0+ -/* - * Copyright (C) 2012 Samsung Electronics Co.Ltd - * http://www.samsung.com/ - * - * Defines phy types for samsung usb phy controllers - HOST or DEIVCE. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation; either version 2 of the License, or (at your - * option) any later version. - */ - -enum samsung_usb_phy_type { - USB_PHY_TYPE_DEVICE, - USB_PHY_TYPE_HOST, -}; diff --git a/include/linux/usb/typec_mux.h b/include/linux/usb/typec_mux.h index 43f40685e53c..873ace5b0cf8 100644 --- a/include/linux/usb/typec_mux.h +++ b/include/linux/usb/typec_mux.h @@ -3,54 +3,48 @@ #ifndef __USB_TYPEC_MUX #define __USB_TYPEC_MUX -#include <linux/list.h> #include <linux/usb/typec.h> struct device; +struct typec_mux; +struct typec_switch; +struct fwnode_handle; -/** - * struct typec_switch - USB Type-C cable orientation switch - * @dev: Switch device - * @entry: List entry - * @set: Callback to the driver for setting the orientation - * - * USB Type-C pin flipper switch routing the correct data pairs from the - * connector to the USB controller depending on the orientation of the cable - * plug. - */ -struct typec_switch { - struct device *dev; - struct list_head entry; - - int (*set)(struct typec_switch *sw, enum typec_orientation orientation); -}; +typedef int (*typec_switch_set_fn_t)(struct typec_switch *sw, + enum typec_orientation orientation); -/** - * struct typec_switch - USB Type-C connector pin mux - * @dev: Mux device - * @entry: List entry - * @set: Callback to the driver for setting the state of the mux - * - * Pin Multiplexer/DeMultiplexer switch routing the USB Type-C connector pins to - * different components depending on the requested mode of operation. Used with - * Accessory/Alternate modes. - */ -struct typec_mux { - struct device *dev; - struct list_head entry; - - int (*set)(struct typec_mux *mux, int state); +struct typec_switch_desc { + struct fwnode_handle *fwnode; + typec_switch_set_fn_t set; + void *drvdata; }; struct typec_switch *typec_switch_get(struct device *dev); void typec_switch_put(struct typec_switch *sw); -int typec_switch_register(struct typec_switch *sw); +struct typec_switch * +typec_switch_register(struct device *parent, + const struct typec_switch_desc *desc); void typec_switch_unregister(struct typec_switch *sw); +void typec_switch_set_drvdata(struct typec_switch *sw, void *data); +void *typec_switch_get_drvdata(struct typec_switch *sw); + +typedef int (*typec_mux_set_fn_t)(struct typec_mux *mux, int state); + +struct typec_mux_desc { + struct fwnode_handle *fwnode; + typec_mux_set_fn_t set; + void *drvdata; +}; + struct typec_mux * typec_mux_get(struct device *dev, const struct typec_altmode_desc *desc); void typec_mux_put(struct typec_mux *mux); -int typec_mux_register(struct typec_mux *mux); +struct typec_mux * +typec_mux_register(struct device *parent, const struct typec_mux_desc *desc); void typec_mux_unregister(struct typec_mux *mux); +void typec_mux_set_drvdata(struct typec_mux *mux, void *data); +void *typec_mux_get_drvdata(struct typec_mux *mux); + #endif /* __USB_TYPEC_MUX */ diff --git a/include/linux/usb/usb338x.h b/include/linux/usb/usb338x.h index 7189e3387bf9..20020c1336d5 100644 --- a/include/linux/usb/usb338x.h +++ b/include/linux/usb/usb338x.h @@ -113,7 +113,10 @@ struct usb338x_ll_regs { u32 ll_ltssm_ctrl1; u32 ll_ltssm_ctrl2; u32 ll_ltssm_ctrl3; - u32 unused[2]; + u32 unused1; + + /* 0x710 */ + u32 unused2; u32 ll_general_ctrl0; u32 ll_general_ctrl1; #define PM_U3_AUTO_EXIT 29 @@ -136,29 +139,41 @@ struct usb338x_ll_regs { u32 ll_general_ctrl2; #define SELECT_INVERT_LANE_POLARITY 7 #define FORCE_INVERT_LANE_POLARITY 6 + + /* 0x720 */ u32 ll_general_ctrl3; u32 ll_general_ctrl4; u32 ll_error_gen; -} __packed; + u32 unused3; + + /* 0x730 */ + u32 unused4[4]; -struct usb338x_ll_lfps_regs { - /* offset 0x748 */ + /* 0x740 */ + u32 unused5[2]; u32 ll_lfps_5; #define TIMER_LFPS_6US 16 u32 ll_lfps_6; #define TIMER_LFPS_80US 0 -} __packed; -struct usb338x_ll_tsn_regs { - /* offset 0x77C */ + /* 0x750 */ + u32 unused6[8]; + + /* 0x770 */ + u32 unused7[3]; u32 ll_tsn_counters_2; #define HOT_TX_NORESET_TS2 24 + + /* 0x780 */ u32 ll_tsn_counters_3; #define HOT_RX_RESET_TS2 0 -} __packed; + u32 unused8[3]; -struct usb338x_ll_chi_regs { - /* offset 0x79C */ + /* 0x790 */ + u32 unused9; + u32 ll_lfps_timers_2; +#define LFPS_TIMERS_2_WORKAROUND_VALUE 0x084d + u32 unused10; u32 ll_tsn_chicken_bit; #define RECOVERY_IDLE_TO_RECOVER_FMW 3 } __packed; diff --git a/include/linux/usb/wusb-wa.h b/include/linux/usb/wusb-wa.h deleted file mode 100644 index 64a840b5106e..000000000000 --- a/include/linux/usb/wusb-wa.h +++ /dev/null @@ -1,304 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * Wireless USB Wire Adapter constants and structures. - * - * Copyright (C) 2005-2006 Intel Corporation. - * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com> - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License version - * 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA - * 02110-1301, USA. - * - * - * FIXME: docs - * FIXME: organize properly, group logically - * - * All the event structures are defined in uwb/spec.h, as they are - * common to the WHCI and WUSB radio control interfaces. - * - * References: - * [WUSB] Wireless Universal Serial Bus Specification, revision 1.0, ch8 - */ -#ifndef __LINUX_USB_WUSB_WA_H -#define __LINUX_USB_WUSB_WA_H - -/** - * Radio Command Request for the Radio Control Interface - * - * Radio Control Interface command and event codes are the same as - * WHCI, and listed in include/linux/uwb.h:UWB_RC_{CMD,EVT}_* - */ -enum { - WA_EXEC_RC_CMD = 40, /* Radio Control command Request */ -}; - -/* Wireless Adapter Requests ([WUSB] table 8-51) */ -enum { - WUSB_REQ_ADD_MMC_IE = 20, - WUSB_REQ_REMOVE_MMC_IE = 21, - WUSB_REQ_SET_NUM_DNTS = 22, - WUSB_REQ_SET_CLUSTER_ID = 23, - WUSB_REQ_SET_DEV_INFO = 24, - WUSB_REQ_GET_TIME = 25, - WUSB_REQ_SET_STREAM_IDX = 26, - WUSB_REQ_SET_WUSB_MAS = 27, - WUSB_REQ_CHAN_STOP = 28, -}; - - -/* Wireless Adapter WUSB Channel Time types ([WUSB] table 8-52) */ -enum { - WUSB_TIME_ADJ = 0, - WUSB_TIME_BPST = 1, - WUSB_TIME_WUSB = 2, -}; - -enum { - WA_ENABLE = 0x01, - WA_RESET = 0x02, - RPIPE_PAUSE = 0x1, - RPIPE_STALL = 0x2, -}; - -/* Responses from Get Status request ([WUSB] section 8.3.1.6) */ -enum { - WA_STATUS_ENABLED = 0x01, - WA_STATUS_RESETTING = 0x02 -}; - -enum rpipe_crs { - RPIPE_CRS_CTL = 0x01, - RPIPE_CRS_ISO = 0x02, - RPIPE_CRS_BULK = 0x04, - RPIPE_CRS_INTR = 0x08 -}; - -/** - * RPipe descriptor ([WUSB] section 8.5.2.11) - * - * FIXME: explain rpipes - */ -struct usb_rpipe_descriptor { - u8 bLength; - u8 bDescriptorType; - __le16 wRPipeIndex; - __le16 wRequests; - __le16 wBlocks; /* rw if 0 */ - __le16 wMaxPacketSize; /* rw */ - union { - u8 dwa_bHSHubAddress; /* rw: DWA. */ - u8 hwa_bMaxBurst; /* rw: HWA. */ - }; - union { - u8 dwa_bHSHubPort; /* rw: DWA. */ - u8 hwa_bDeviceInfoIndex; /* rw: HWA. */ - }; - u8 bSpeed; /* rw: xfer rate 'enum uwb_phy_rate' */ - union { - u8 dwa_bDeviceAddress; /* rw: DWA Target device address. */ - u8 hwa_reserved; /* rw: HWA. */ - }; - u8 bEndpointAddress; /* rw: Target EP address */ - u8 bDataSequence; /* ro: Current Data sequence */ - __le32 dwCurrentWindow; /* ro */ - u8 bMaxDataSequence; /* ro?: max supported seq */ - u8 bInterval; /* rw: */ - u8 bOverTheAirInterval; /* rw: */ - u8 bmAttribute; /* ro? */ - u8 bmCharacteristics; /* ro? enum rpipe_attr, supported xsactions */ - u8 bmRetryOptions; /* rw? */ - __le16 wNumTransactionErrors; /* rw */ -} __attribute__ ((packed)); - -/** - * Wire Adapter Notification types ([WUSB] sections 8.4.5 & 8.5.4) - * - * These are the notifications coming on the notification endpoint of - * an HWA and a DWA. - */ -enum wa_notif_type { - DWA_NOTIF_RWAKE = 0x91, - DWA_NOTIF_PORTSTATUS = 0x92, - WA_NOTIF_TRANSFER = 0x93, - HWA_NOTIF_BPST_ADJ = 0x94, - HWA_NOTIF_DN = 0x95, -}; - -/** - * Wire Adapter notification header - * - * Notifications coming from a wire adapter use a common header - * defined in [WUSB] sections 8.4.5 & 8.5.4. - */ -struct wa_notif_hdr { - u8 bLength; - u8 bNotifyType; /* enum wa_notif_type */ -} __packed; - -/** - * HWA DN Received notification [(WUSB] section 8.5.4.2) - * - * The DNData is specified in WUSB1.0[7.6]. For each device - * notification we received, we just need to dispatch it. - * - * @dndata: this is really an array of notifications, but all start - * with the same header. - */ -struct hwa_notif_dn { - struct wa_notif_hdr hdr; - u8 bSourceDeviceAddr; /* from errata 2005/07 */ - u8 bmAttributes; - struct wusb_dn_hdr dndata[]; -} __packed; - -/* [WUSB] section 8.3.3 */ -enum wa_xfer_type { - WA_XFER_TYPE_CTL = 0x80, - WA_XFER_TYPE_BI = 0x81, /* bulk/interrupt */ - WA_XFER_TYPE_ISO = 0x82, - WA_XFER_RESULT = 0x83, - WA_XFER_ABORT = 0x84, - WA_XFER_ISO_PACKET_INFO = 0xA0, - WA_XFER_ISO_PACKET_STATUS = 0xA1, -}; - -/* [WUSB] section 8.3.3 */ -struct wa_xfer_hdr { - u8 bLength; /* 0x18 */ - u8 bRequestType; /* 0x80 WA_REQUEST_TYPE_CTL */ - __le16 wRPipe; /* RPipe index */ - __le32 dwTransferID; /* Host-assigned ID */ - __le32 dwTransferLength; /* Length of data to xfer */ - u8 bTransferSegment; -} __packed; - -struct wa_xfer_ctl { - struct wa_xfer_hdr hdr; - u8 bmAttribute; - __le16 wReserved; - struct usb_ctrlrequest baSetupData; -} __packed; - -struct wa_xfer_bi { - struct wa_xfer_hdr hdr; - u8 bReserved; - __le16 wReserved; -} __packed; - -/* [WUSB] section 8.5.5 */ -struct wa_xfer_hwaiso { - struct wa_xfer_hdr hdr; - u8 bReserved; - __le16 wPresentationTime; - __le32 dwNumOfPackets; -} __packed; - -struct wa_xfer_packet_info_hwaiso { - __le16 wLength; - u8 bPacketType; - u8 bReserved; - __le16 PacketLength[0]; -} __packed; - -struct wa_xfer_packet_status_len_hwaiso { - __le16 PacketLength; - __le16 PacketStatus; -} __packed; - -struct wa_xfer_packet_status_hwaiso { - __le16 wLength; - u8 bPacketType; - u8 bReserved; - struct wa_xfer_packet_status_len_hwaiso PacketStatus[0]; -} __packed; - -/* [WUSB] section 8.3.3.5 */ -struct wa_xfer_abort { - u8 bLength; - u8 bRequestType; - __le16 wRPipe; /* RPipe index */ - __le32 dwTransferID; /* Host-assigned ID */ -} __packed; - -/** - * WA Transfer Complete notification ([WUSB] section 8.3.3.3) - * - */ -struct wa_notif_xfer { - struct wa_notif_hdr hdr; - u8 bEndpoint; - u8 Reserved; -} __packed; - -/** Transfer result basic codes [WUSB] table 8-15 */ -enum { - WA_XFER_STATUS_SUCCESS, - WA_XFER_STATUS_HALTED, - WA_XFER_STATUS_DATA_BUFFER_ERROR, - WA_XFER_STATUS_BABBLE, - WA_XFER_RESERVED, - WA_XFER_STATUS_NOT_FOUND, - WA_XFER_STATUS_INSUFFICIENT_RESOURCE, - WA_XFER_STATUS_TRANSACTION_ERROR, - WA_XFER_STATUS_ABORTED, - WA_XFER_STATUS_RPIPE_NOT_READY, - WA_XFER_INVALID_FORMAT, - WA_XFER_UNEXPECTED_SEGMENT_NUMBER, - WA_XFER_STATUS_RPIPE_TYPE_MISMATCH, -}; - -/** [WUSB] section 8.3.3.4 */ -struct wa_xfer_result { - struct wa_notif_hdr hdr; - __le32 dwTransferID; - __le32 dwTransferLength; - u8 bTransferSegment; - u8 bTransferStatus; - __le32 dwNumOfPackets; -} __packed; - -/** - * Wire Adapter Class Descriptor ([WUSB] section 8.5.2.7). - * - * NOTE: u16 fields are read Little Endian from the hardware. - * - * @bNumPorts is the original max number of devices that the host can - * connect; we might chop this so the stack can handle - * it. In case you need to access it, use wusbhc->ports_max - * if it is a Wireless USB WA. - */ -struct usb_wa_descriptor { - u8 bLength; - u8 bDescriptorType; - __le16 bcdWAVersion; - u8 bNumPorts; /* don't use!! */ - u8 bmAttributes; /* Reserved == 0 */ - __le16 wNumRPipes; - __le16 wRPipeMaxBlock; - u8 bRPipeBlockSize; - u8 bPwrOn2PwrGood; - u8 bNumMMCIEs; - u8 DeviceRemovable; /* FIXME: in DWA this is up to 16 bytes */ -} __packed; - -/** - * HWA Device Information Buffer (WUSB1.0[T8.54]) - */ -struct hwa_dev_info { - u8 bmDeviceAvailability[32]; /* FIXME: ignored for now */ - u8 bDeviceAddress; - __le16 wPHYRates; - u8 bmDeviceAttribute; -} __packed; - -#endif /* #ifndef __LINUX_USB_WUSB_WA_H */ diff --git a/include/linux/usb/wusb.h b/include/linux/usb/wusb.h deleted file mode 100644 index 65adee629106..000000000000 --- a/include/linux/usb/wusb.h +++ /dev/null @@ -1,362 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * Wireless USB Standard Definitions - * Event Size Tables - * - * Copyright (C) 2005-2006 Intel Corporation - * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com> - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License version - * 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA - * 02110-1301, USA. - * - * - * FIXME: docs - * FIXME: organize properly, group logically - * - * All the event structures are defined in uwb/spec.h, as they are - * common to the WHCI and WUSB radio control interfaces. - */ - -#ifndef __WUSB_H__ -#define __WUSB_H__ - -#include <linux/types.h> -#include <linux/kernel.h> -#include <linux/uwb/spec.h> -#include <linux/usb/ch9.h> -#include <linux/param.h> - -/** - * WUSB Information Element header - * - * I don't know why, they decided to make it different to the MBOA MAC - * IE Header; beats me. - */ -struct wuie_hdr { - u8 bLength; - u8 bIEIdentifier; -} __attribute__((packed)); - -enum { - WUIE_ID_WCTA = 0x80, - WUIE_ID_CONNECTACK, - WUIE_ID_HOST_INFO, - WUIE_ID_CHANGE_ANNOUNCE, - WUIE_ID_DEVICE_DISCONNECT, - WUIE_ID_HOST_DISCONNECT, - WUIE_ID_KEEP_ALIVE = 0x89, - WUIE_ID_ISOCH_DISCARD, - WUIE_ID_RESET_DEVICE, -}; - -/** - * Maximum number of array elements in a WUSB IE. - * - * WUSB1.0[7.5 before table 7-38] says that in WUSB IEs that - * are "arrays" have to limited to 4 elements. So we define it - * like that to ease up and submit only the neeed size. - */ -#define WUIE_ELT_MAX 4 - -/** - * Wrapper for the data that defines a CHID, a CDID or a CK - * - * WUSB defines that CHIDs, CDIDs and CKs are a 16 byte string of - * data. In order to avoid confusion and enforce types, we wrap it. - * - * Make it packed, as we use it in some hw definitions. - */ -struct wusb_ckhdid { - u8 data[16]; -} __attribute__((packed)); - -static const struct wusb_ckhdid wusb_ckhdid_zero = { .data = { 0 } }; - -#define WUSB_CKHDID_STRSIZE (3 * sizeof(struct wusb_ckhdid) + 1) - -/** - * WUSB IE: Host Information (WUSB1.0[7.5.2]) - * - * Used to provide information about the host to the Wireless USB - * devices in range (CHID can be used as an ASCII string). - */ -struct wuie_host_info { - struct wuie_hdr hdr; - __le16 attributes; - struct wusb_ckhdid CHID; -} __attribute__((packed)); - -/** - * WUSB IE: Connect Ack (WUSB1.0[7.5.1]) - * - * Used to acknowledge device connect requests. See note for - * WUIE_ELT_MAX. - */ -struct wuie_connect_ack { - struct wuie_hdr hdr; - struct { - struct wusb_ckhdid CDID; - u8 bDeviceAddress; /* 0 means unused */ - u8 bReserved; - } blk[WUIE_ELT_MAX]; -} __attribute__((packed)); - -/** - * WUSB IE Host Information Element, Connect Availability - * - * WUSB1.0[7.5.2], bmAttributes description - */ -enum { - WUIE_HI_CAP_RECONNECT = 0, - WUIE_HI_CAP_LIMITED, - WUIE_HI_CAP_RESERVED, - WUIE_HI_CAP_ALL, -}; - -/** - * WUSB IE: Channel Stop (WUSB1.0[7.5.8]) - * - * Tells devices the host is going to stop sending MMCs and will disappear. - */ -struct wuie_channel_stop { - struct wuie_hdr hdr; - u8 attributes; - u8 timestamp[3]; -} __attribute__((packed)); - -/** - * WUSB IE: Keepalive (WUSB1.0[7.5.9]) - * - * Ask device(s) to send keepalives. - */ -struct wuie_keep_alive { - struct wuie_hdr hdr; - u8 bDeviceAddress[WUIE_ELT_MAX]; -} __attribute__((packed)); - -/** - * WUSB IE: Reset device (WUSB1.0[7.5.11]) - * - * Tell device to reset; in all truth, we can fit 4 CDIDs, but we only - * use it for one at the time... - * - * In any case, this request is a wee bit silly: why don't they target - * by address?? - */ -struct wuie_reset { - struct wuie_hdr hdr; - struct wusb_ckhdid CDID; -} __attribute__((packed)); - -/** - * WUSB IE: Disconnect device (WUSB1.0[7.5.11]) - * - * Tell device to disconnect; we can fit 4 addresses, but we only use - * it for one at the time... - */ -struct wuie_disconnect { - struct wuie_hdr hdr; - u8 bDeviceAddress; - u8 padding; -} __attribute__((packed)); - -/** - * WUSB IE: Host disconnect ([WUSB] section 7.5.5) - * - * Tells all connected devices to disconnect. - */ -struct wuie_host_disconnect { - struct wuie_hdr hdr; -} __attribute__((packed)); - -/** - * WUSB Device Notification header (WUSB1.0[7.6]) - */ -struct wusb_dn_hdr { - u8 bType; - u8 notifdata[]; -} __attribute__((packed)); - -/** Device Notification codes (WUSB1.0[Table 7-54]) */ -enum WUSB_DN { - WUSB_DN_CONNECT = 0x01, - WUSB_DN_DISCONNECT = 0x02, - WUSB_DN_EPRDY = 0x03, - WUSB_DN_MASAVAILCHANGED = 0x04, - WUSB_DN_RWAKE = 0x05, - WUSB_DN_SLEEP = 0x06, - WUSB_DN_ALIVE = 0x07, -}; - -/** WUSB Device Notification Connect */ -struct wusb_dn_connect { - struct wusb_dn_hdr hdr; - __le16 attributes; - struct wusb_ckhdid CDID; -} __attribute__((packed)); - -static inline int wusb_dn_connect_prev_dev_addr(const struct wusb_dn_connect *dn) -{ - return le16_to_cpu(dn->attributes) & 0xff; -} - -static inline int wusb_dn_connect_new_connection(const struct wusb_dn_connect *dn) -{ - return (le16_to_cpu(dn->attributes) >> 8) & 0x1; -} - -static inline int wusb_dn_connect_beacon_behavior(const struct wusb_dn_connect *dn) -{ - return (le16_to_cpu(dn->attributes) >> 9) & 0x03; -} - -/** Device is alive (aka: pong) (WUSB1.0[7.6.7]) */ -struct wusb_dn_alive { - struct wusb_dn_hdr hdr; -} __attribute__((packed)); - -/** Device is disconnecting (WUSB1.0[7.6.2]) */ -struct wusb_dn_disconnect { - struct wusb_dn_hdr hdr; -} __attribute__((packed)); - -/* General constants */ -enum { - WUSB_TRUST_TIMEOUT_MS = 4000, /* [WUSB] section 4.15.1 */ -}; - -/* - * WUSB Crypto stuff (WUSB1.0[6]) - */ - -extern const char *wusb_et_name(u8); - -/** - * WUSB key index WUSB1.0[7.3.2.4], for usage when setting keys for - * the host or the device. - */ -static inline u8 wusb_key_index(int index, int type, int originator) -{ - return (originator << 6) | (type << 4) | index; -} - -#define WUSB_KEY_INDEX_TYPE_PTK 0 /* for HWA only */ -#define WUSB_KEY_INDEX_TYPE_ASSOC 1 -#define WUSB_KEY_INDEX_TYPE_GTK 2 -#define WUSB_KEY_INDEX_ORIGINATOR_HOST 0 -#define WUSB_KEY_INDEX_ORIGINATOR_DEVICE 1 -/* bits 0-3 used for the key index. */ -#define WUSB_KEY_INDEX_MAX 15 - -/* A CCM Nonce, defined in WUSB1.0[6.4.1] */ -struct aes_ccm_nonce { - u8 sfn[6]; /* Little Endian */ - u8 tkid[3]; /* LE */ - struct uwb_dev_addr dest_addr; - struct uwb_dev_addr src_addr; -} __attribute__((packed)); - -/* A CCM operation label, defined on WUSB1.0[6.5.x] */ -struct aes_ccm_label { - u8 data[14]; -} __attribute__((packed)); - -/* - * Input to the key derivation sequence defined in - * WUSB1.0[6.5.1]. Rest of the data is in the CCM Nonce passed to the - * PRF function. - */ -struct wusb_keydvt_in { - u8 hnonce[16]; - u8 dnonce[16]; -} __attribute__((packed)); - -/* - * Output from the key derivation sequence defined in - * WUSB1.0[6.5.1]. - */ -struct wusb_keydvt_out { - u8 kck[16]; - u8 ptk[16]; -} __attribute__((packed)); - -/* Pseudo Random Function WUSB1.0[6.5] */ -extern int wusb_crypto_init(void); -extern void wusb_crypto_exit(void); -extern ssize_t wusb_prf(void *out, size_t out_size, - const u8 key[16], const struct aes_ccm_nonce *_n, - const struct aes_ccm_label *a, - const void *b, size_t blen, size_t len); - -static inline int wusb_prf_64(void *out, size_t out_size, const u8 key[16], - const struct aes_ccm_nonce *n, - const struct aes_ccm_label *a, - const void *b, size_t blen) -{ - return wusb_prf(out, out_size, key, n, a, b, blen, 64); -} - -static inline int wusb_prf_128(void *out, size_t out_size, const u8 key[16], - const struct aes_ccm_nonce *n, - const struct aes_ccm_label *a, - const void *b, size_t blen) -{ - return wusb_prf(out, out_size, key, n, a, b, blen, 128); -} - -static inline int wusb_prf_256(void *out, size_t out_size, const u8 key[16], - const struct aes_ccm_nonce *n, - const struct aes_ccm_label *a, - const void *b, size_t blen) -{ - return wusb_prf(out, out_size, key, n, a, b, blen, 256); -} - -/* Key derivation WUSB1.0[6.5.1] */ -static inline int wusb_key_derive(struct wusb_keydvt_out *keydvt_out, - const u8 key[16], - const struct aes_ccm_nonce *n, - const struct wusb_keydvt_in *keydvt_in) -{ - const struct aes_ccm_label a = { .data = "Pair-wise keys" }; - return wusb_prf_256(keydvt_out, sizeof(*keydvt_out), key, n, &a, - keydvt_in, sizeof(*keydvt_in)); -} - -/* - * Out-of-band MIC Generation WUSB1.0[6.5.2] - * - * Compute the MIC over @key, @n and @hs and place it in @mic_out. - * - * @mic_out: Where to place the 8 byte MIC tag - * @key: KCK from the derivation process - * @n: CCM nonce, n->sfn == 0, TKID as established in the - * process. - * @hs: Handshake struct for phase 2 of the 4-way. - * hs->bStatus and hs->bReserved are zero. - * hs->bMessageNumber is 2 (WUSB1.0[7.3.2.5.2] - * hs->dest_addr is the device's USB address padded with 0 - * hs->src_addr is the hosts's UWB device address - * hs->mic is ignored (as we compute that value). - */ -static inline int wusb_oob_mic(u8 mic_out[8], const u8 key[16], - const struct aes_ccm_nonce *n, - const struct usb_handshake *hs) -{ - const struct aes_ccm_label a = { .data = "out-of-bandMIC" }; - return wusb_prf_64(mic_out, 8, key, n, &a, - hs, sizeof(*hs) - sizeof(hs->MIC)); -} - -#endif /* #ifndef __WUSB_H__ */ diff --git a/include/linux/user_namespace.h b/include/linux/user_namespace.h index d6b74b91096b..fb9f4f799554 100644 --- a/include/linux/user_namespace.h +++ b/include/linux/user_namespace.h @@ -64,10 +64,20 @@ struct user_namespace { struct ns_common ns; unsigned long flags; +#ifdef CONFIG_KEYS + /* List of joinable keyrings in this namespace. Modification access of + * these pointers is controlled by keyring_sem. Once + * user_keyring_register is set, it won't be changed, so it can be + * accessed directly with READ_ONCE(). + */ + struct list_head keyring_name_list; + struct key *user_keyring_register; + struct rw_semaphore keyring_sem; +#endif + /* Register of per-UID persistent keyrings for this namespace */ #ifdef CONFIG_PERSISTENT_KEYRINGS struct key *persistent_keyring_register; - struct rw_semaphore persistent_keyring_register_sem; #endif struct work_struct work; #ifdef CONFIG_SYSCTL diff --git a/include/linux/uwb.h b/include/linux/uwb.h deleted file mode 100644 index 6918a61e1ac1..000000000000 --- a/include/linux/uwb.h +++ /dev/null @@ -1,817 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Ultra Wide Band - * UWB API - * - * Copyright (C) 2005-2006 Intel Corporation - * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com> - * - * FIXME: doc: overview of the API, different parts and pointers - */ - -#ifndef __LINUX__UWB_H__ -#define __LINUX__UWB_H__ - -#include <linux/limits.h> -#include <linux/device.h> -#include <linux/mutex.h> -#include <linux/timer.h> -#include <linux/wait.h> -#include <linux/workqueue.h> -#include <linux/uwb/spec.h> -#include <asm/page.h> - -struct uwb_dev; -struct uwb_beca_e; -struct uwb_rc; -struct uwb_rsv; -struct uwb_dbg; - -/** - * struct uwb_dev - a UWB Device - * @rc: UWB Radio Controller that discovered the device (kind of its - * parent). - * @bce: a beacon cache entry for this device; or NULL if the device - * is a local radio controller. - * @mac_addr: the EUI-48 address of this device. - * @dev_addr: the current DevAddr used by this device. - * @beacon_slot: the slot number the beacon is using. - * @streams: bitmap of streams allocated to reservations targeted at - * this device. For an RC, this is the streams allocated for - * reservations targeted at DevAddrs. - * - * A UWB device may either by a neighbor or part of a local radio - * controller. - */ -struct uwb_dev { - struct mutex mutex; - struct list_head list_node; - struct device dev; - struct uwb_rc *rc; /* radio controller */ - struct uwb_beca_e *bce; /* Beacon Cache Entry */ - - struct uwb_mac_addr mac_addr; - struct uwb_dev_addr dev_addr; - int beacon_slot; - DECLARE_BITMAP(streams, UWB_NUM_STREAMS); - DECLARE_BITMAP(last_availability_bm, UWB_NUM_MAS); -}; -#define to_uwb_dev(d) container_of(d, struct uwb_dev, dev) - -/** - * UWB HWA/WHCI Radio Control {Command|Event} Block context IDs - * - * RC[CE]Bs have a 'context ID' field that matches the command with - * the event received to confirm it. - * - * Maximum number of context IDs - */ -enum { UWB_RC_CTX_MAX = 256 }; - - -/** Notification chain head for UWB generated events to listeners */ -struct uwb_notifs_chain { - struct list_head list; - struct mutex mutex; -}; - -/* Beacon cache list */ -struct uwb_beca { - struct list_head list; - size_t entries; - struct mutex mutex; -}; - -/* Event handling thread. */ -struct uwbd { - int pid; - struct task_struct *task; - wait_queue_head_t wq; - struct list_head event_list; - spinlock_t event_list_lock; -}; - -/** - * struct uwb_mas_bm - a bitmap of all MAS in a superframe - * @bm: a bitmap of length #UWB_NUM_MAS - */ -struct uwb_mas_bm { - DECLARE_BITMAP(bm, UWB_NUM_MAS); - DECLARE_BITMAP(unsafe_bm, UWB_NUM_MAS); - int safe; - int unsafe; -}; - -/** - * uwb_rsv_state - UWB Reservation state. - * - * NONE - reservation is not active (no DRP IE being transmitted). - * - * Owner reservation states: - * - * INITIATED - owner has sent an initial DRP request. - * PENDING - target responded with pending Reason Code. - * MODIFIED - reservation manager is modifying an established - * reservation with a different MAS allocation. - * ESTABLISHED - the reservation has been successfully negotiated. - * - * Target reservation states: - * - * DENIED - request is denied. - * ACCEPTED - request is accepted. - * PENDING - PAL has yet to make a decision to whether to accept or - * deny. - * - * FIXME: further target states TBD. - */ -enum uwb_rsv_state { - UWB_RSV_STATE_NONE = 0, - UWB_RSV_STATE_O_INITIATED, - UWB_RSV_STATE_O_PENDING, - UWB_RSV_STATE_O_MODIFIED, - UWB_RSV_STATE_O_ESTABLISHED, - UWB_RSV_STATE_O_TO_BE_MOVED, - UWB_RSV_STATE_O_MOVE_EXPANDING, - UWB_RSV_STATE_O_MOVE_COMBINING, - UWB_RSV_STATE_O_MOVE_REDUCING, - UWB_RSV_STATE_T_ACCEPTED, - UWB_RSV_STATE_T_DENIED, - UWB_RSV_STATE_T_CONFLICT, - UWB_RSV_STATE_T_PENDING, - UWB_RSV_STATE_T_EXPANDING_ACCEPTED, - UWB_RSV_STATE_T_EXPANDING_CONFLICT, - UWB_RSV_STATE_T_EXPANDING_PENDING, - UWB_RSV_STATE_T_EXPANDING_DENIED, - UWB_RSV_STATE_T_RESIZED, - - UWB_RSV_STATE_LAST, -}; - -enum uwb_rsv_target_type { - UWB_RSV_TARGET_DEV, - UWB_RSV_TARGET_DEVADDR, -}; - -/** - * struct uwb_rsv_target - the target of a reservation. - * - * Reservations unicast and targeted at a single device - * (UWB_RSV_TARGET_DEV); or (e.g., in the case of WUSB) targeted at a - * specific (private) DevAddr (UWB_RSV_TARGET_DEVADDR). - */ -struct uwb_rsv_target { - enum uwb_rsv_target_type type; - union { - struct uwb_dev *dev; - struct uwb_dev_addr devaddr; - }; -}; - -struct uwb_rsv_move { - struct uwb_mas_bm final_mas; - struct uwb_ie_drp *companion_drp_ie; - struct uwb_mas_bm companion_mas; -}; - -/* - * Number of streams reserved for reservations targeted at DevAddrs. - */ -#define UWB_NUM_GLOBAL_STREAMS 1 - -typedef void (*uwb_rsv_cb_f)(struct uwb_rsv *rsv); - -/** - * struct uwb_rsv - a DRP reservation - * - * Data structure management: - * - * @rc: the radio controller this reservation is for - * (as target or owner) - * @rc_node: a list node for the RC - * @pal_node: a list node for the PAL - * - * Owner and target parameters: - * - * @owner: the UWB device owning this reservation - * @target: the target UWB device - * @type: reservation type - * - * Owner parameters: - * - * @max_mas: maxiumum number of MAS - * @min_mas: minimum number of MAS - * @sparsity: owner selected sparsity - * @is_multicast: true iff multicast - * - * @callback: callback function when the reservation completes - * @pal_priv: private data for the PAL making the reservation - * - * Reservation status: - * - * @status: negotiation status - * @stream: stream index allocated for this reservation - * @tiebreaker: conflict tiebreaker for this reservation - * @mas: reserved MAS - * @drp_ie: the DRP IE - * @ie_valid: true iff the DRP IE matches the reservation parameters - * - * DRP reservations are uniquely identified by the owner, target and - * stream index. However, when using a DevAddr as a target (e.g., for - * a WUSB cluster reservation) the responses may be received from - * devices with different DevAddrs. In this case, reservations are - * uniquely identified by just the stream index. A number of stream - * indexes (UWB_NUM_GLOBAL_STREAMS) are reserved for this. - */ -struct uwb_rsv { - struct uwb_rc *rc; - struct list_head rc_node; - struct list_head pal_node; - struct kref kref; - - struct uwb_dev *owner; - struct uwb_rsv_target target; - enum uwb_drp_type type; - int max_mas; - int min_mas; - int max_interval; - bool is_multicast; - - uwb_rsv_cb_f callback; - void *pal_priv; - - enum uwb_rsv_state state; - bool needs_release_companion_mas; - u8 stream; - u8 tiebreaker; - struct uwb_mas_bm mas; - struct uwb_ie_drp *drp_ie; - struct uwb_rsv_move mv; - bool ie_valid; - struct timer_list timer; - struct work_struct handle_timeout_work; -}; - -static const -struct uwb_mas_bm uwb_mas_bm_zero = { .bm = { 0 } }; - -static inline void uwb_mas_bm_copy_le(void *dst, const struct uwb_mas_bm *mas) -{ - bitmap_copy_le(dst, mas->bm, UWB_NUM_MAS); -} - -/** - * struct uwb_drp_avail - a radio controller's view of MAS usage - * @global: MAS unused by neighbors (excluding reservations targeted - * or owned by the local radio controller) or the beaon period - * @local: MAS unused by local established reservations - * @pending: MAS unused by local pending reservations - * @ie: DRP Availability IE to be included in the beacon - * @ie_valid: true iff @ie is valid and does not need to regenerated from - * @global and @local - * - * Each radio controller maintains a view of MAS usage or - * availability. MAS available for a new reservation are determined - * from the intersection of @global, @local, and @pending. - * - * The radio controller must transmit a DRP Availability IE that's the - * intersection of @global and @local. - * - * A set bit indicates the MAS is unused and available. - * - * rc->rsvs_mutex should be held before accessing this data structure. - * - * [ECMA-368] section 17.4.3. - */ -struct uwb_drp_avail { - DECLARE_BITMAP(global, UWB_NUM_MAS); - DECLARE_BITMAP(local, UWB_NUM_MAS); - DECLARE_BITMAP(pending, UWB_NUM_MAS); - struct uwb_ie_drp_avail ie; - bool ie_valid; -}; - -struct uwb_drp_backoff_win { - u8 window; - u8 n; - int total_expired; - struct timer_list timer; - bool can_reserve_extra_mases; -}; - -const char *uwb_rsv_state_str(enum uwb_rsv_state state); -const char *uwb_rsv_type_str(enum uwb_drp_type type); - -struct uwb_rsv *uwb_rsv_create(struct uwb_rc *rc, uwb_rsv_cb_f cb, - void *pal_priv); -void uwb_rsv_destroy(struct uwb_rsv *rsv); - -int uwb_rsv_establish(struct uwb_rsv *rsv); -int uwb_rsv_modify(struct uwb_rsv *rsv, - int max_mas, int min_mas, int sparsity); -void uwb_rsv_terminate(struct uwb_rsv *rsv); - -void uwb_rsv_accept(struct uwb_rsv *rsv, uwb_rsv_cb_f cb, void *pal_priv); - -void uwb_rsv_get_usable_mas(struct uwb_rsv *orig_rsv, struct uwb_mas_bm *mas); - -/** - * Radio Control Interface instance - * - * - * Life cycle rules: those of the UWB Device. - * - * @index: an index number for this radio controller, as used in the - * device name. - * @version: version of protocol supported by this device - * @priv: Backend implementation; rw with uwb_dev.dev.sem taken. - * @cmd: Backend implementation to execute commands; rw and call - * only with uwb_dev.dev.sem taken. - * @reset: Hardware reset of radio controller and any PAL controllers. - * @filter: Backend implementation to manipulate data to and from device - * to be compliant to specification assumed by driver (WHCI - * 0.95). - * - * uwb_dev.dev.mutex is used to execute commands and update - * the corresponding structures; can't use a spinlock - * because rc->cmd() can sleep. - * @ies: This is a dynamically allocated array cacheing the - * IEs (settable by the host) that the beacon of this - * radio controller is currently sending. - * - * In reality, we store here the full command we set to - * the radio controller (which is basically a command - * prefix followed by all the IEs the beacon currently - * contains). This way we don't have to realloc and - * memcpy when setting it. - * - * We set this up in uwb_rc_ie_setup(), where we alloc - * this struct, call get_ie() [so we know which IEs are - * currently being sent, if any]. - * - * @ies_capacity:Amount of space (in bytes) allocated in @ies. The - * amount used is given by sizeof(*ies) plus ies->wIELength - * (which is a little endian quantity all the time). - * @ies_mutex: protect the IE cache - * @dbg: information for the debug interface - */ -struct uwb_rc { - struct uwb_dev uwb_dev; - int index; - u16 version; - - struct module *owner; - void *priv; - int (*start)(struct uwb_rc *rc); - void (*stop)(struct uwb_rc *rc); - int (*cmd)(struct uwb_rc *, const struct uwb_rccb *, size_t); - int (*reset)(struct uwb_rc *rc); - int (*filter_cmd)(struct uwb_rc *, struct uwb_rccb **, size_t *); - int (*filter_event)(struct uwb_rc *, struct uwb_rceb **, const size_t, - size_t *, size_t *); - - spinlock_t neh_lock; /* protects neh_* and ctx_* */ - struct list_head neh_list; /* Open NE handles */ - unsigned long ctx_bm[UWB_RC_CTX_MAX / 8 / sizeof(unsigned long)]; - u8 ctx_roll; - - int beaconing; /* Beaconing state [channel number] */ - int beaconing_forced; - int scanning; - enum uwb_scan_type scan_type:3; - unsigned ready:1; - struct uwb_notifs_chain notifs_chain; - struct uwb_beca uwb_beca; - - struct uwbd uwbd; - - struct uwb_drp_backoff_win bow; - struct uwb_drp_avail drp_avail; - struct list_head reservations; - struct list_head cnflt_alien_list; - struct uwb_mas_bm cnflt_alien_bitmap; - struct mutex rsvs_mutex; - spinlock_t rsvs_lock; - struct workqueue_struct *rsv_workq; - - struct delayed_work rsv_update_work; - struct delayed_work rsv_alien_bp_work; - int set_drp_ie_pending; - struct mutex ies_mutex; - struct uwb_rc_cmd_set_ie *ies; - size_t ies_capacity; - - struct list_head pals; - int active_pals; - - struct uwb_dbg *dbg; -}; - - -/** - * struct uwb_pal - a UWB PAL - * @name: descriptive name for this PAL (wusbhc, wlp, etc.). - * @device: a device for the PAL. Used to link the PAL and the radio - * controller in sysfs. - * @rc: the radio controller the PAL uses. - * @channel_changed: called when the channel used by the radio changes. - * A channel of -1 means the channel has been stopped. - * @new_rsv: called when a peer requests a reservation (may be NULL if - * the PAL cannot accept reservation requests). - * @channel: channel being used by the PAL; 0 if the PAL isn't using - * the radio; -1 if the PAL wishes to use the radio but - * cannot. - * @debugfs_dir: a debugfs directory which the PAL can use for its own - * debugfs files. - * - * A Protocol Adaptation Layer (PAL) is a user of the WiMedia UWB - * radio platform (e.g., WUSB, WLP or Bluetooth UWB AMP). - * - * The PALs using a radio controller must register themselves to - * permit the UWB stack to coordinate usage of the radio between the - * various PALs or to allow PALs to response to certain requests from - * peers. - * - * A struct uwb_pal should be embedded in a containing structure - * belonging to the PAL and initialized with uwb_pal_init()). Fields - * should be set appropriately by the PAL before registering the PAL - * with uwb_pal_register(). - */ -struct uwb_pal { - struct list_head node; - const char *name; - struct device *device; - struct uwb_rc *rc; - - void (*channel_changed)(struct uwb_pal *pal, int channel); - void (*new_rsv)(struct uwb_pal *pal, struct uwb_rsv *rsv); - - int channel; - struct dentry *debugfs_dir; -}; - -void uwb_pal_init(struct uwb_pal *pal); -int uwb_pal_register(struct uwb_pal *pal); -void uwb_pal_unregister(struct uwb_pal *pal); - -int uwb_radio_start(struct uwb_pal *pal); -void uwb_radio_stop(struct uwb_pal *pal); - -/* - * General public API - * - * This API can be used by UWB device drivers or by those implementing - * UWB Radio Controllers - */ -struct uwb_dev *uwb_dev_get_by_devaddr(struct uwb_rc *rc, - const struct uwb_dev_addr *devaddr); -struct uwb_dev *uwb_dev_get_by_rc(struct uwb_dev *, struct uwb_rc *); -static inline void uwb_dev_get(struct uwb_dev *uwb_dev) -{ - get_device(&uwb_dev->dev); -} -static inline void uwb_dev_put(struct uwb_dev *uwb_dev) -{ - put_device(&uwb_dev->dev); -} -struct uwb_dev *uwb_dev_try_get(struct uwb_rc *rc, struct uwb_dev *uwb_dev); - -/** - * Callback function for 'uwb_{dev,rc}_foreach()'. - * - * @dev: Linux device instance - * 'uwb_dev = container_of(dev, struct uwb_dev, dev)' - * @priv: Data passed by the caller to 'uwb_{dev,rc}_foreach()'. - * - * @returns: 0 to continue the iterations, any other val to stop - * iterating and return the value to the caller of - * _foreach(). - */ -typedef int (*uwb_dev_for_each_f)(struct device *dev, void *priv); -int uwb_dev_for_each(struct uwb_rc *rc, uwb_dev_for_each_f func, void *priv); - -struct uwb_rc *uwb_rc_alloc(void); -struct uwb_rc *uwb_rc_get_by_dev(const struct uwb_dev_addr *); -struct uwb_rc *uwb_rc_get_by_grandpa(const struct device *); -void uwb_rc_put(struct uwb_rc *rc); - -typedef void (*uwb_rc_cmd_cb_f)(struct uwb_rc *rc, void *arg, - struct uwb_rceb *reply, ssize_t reply_size); - -int uwb_rc_cmd_async(struct uwb_rc *rc, const char *cmd_name, - struct uwb_rccb *cmd, size_t cmd_size, - u8 expected_type, u16 expected_event, - uwb_rc_cmd_cb_f cb, void *arg); -ssize_t uwb_rc_cmd(struct uwb_rc *rc, const char *cmd_name, - struct uwb_rccb *cmd, size_t cmd_size, - struct uwb_rceb *reply, size_t reply_size); -ssize_t uwb_rc_vcmd(struct uwb_rc *rc, const char *cmd_name, - struct uwb_rccb *cmd, size_t cmd_size, - u8 expected_type, u16 expected_event, - struct uwb_rceb **preply); - -size_t __uwb_addr_print(char *, size_t, const unsigned char *, int); - -int uwb_rc_dev_addr_set(struct uwb_rc *, const struct uwb_dev_addr *); -int uwb_rc_dev_addr_get(struct uwb_rc *, struct uwb_dev_addr *); -int uwb_rc_mac_addr_set(struct uwb_rc *, const struct uwb_mac_addr *); -int uwb_rc_mac_addr_get(struct uwb_rc *, struct uwb_mac_addr *); -int __uwb_mac_addr_assigned_check(struct device *, void *); -int __uwb_dev_addr_assigned_check(struct device *, void *); - -/* Print in @buf a pretty repr of @addr */ -static inline size_t uwb_dev_addr_print(char *buf, size_t buf_size, - const struct uwb_dev_addr *addr) -{ - return __uwb_addr_print(buf, buf_size, addr->data, 0); -} - -/* Print in @buf a pretty repr of @addr */ -static inline size_t uwb_mac_addr_print(char *buf, size_t buf_size, - const struct uwb_mac_addr *addr) -{ - return __uwb_addr_print(buf, buf_size, addr->data, 1); -} - -/* @returns 0 if device addresses @addr2 and @addr1 are equal */ -static inline int uwb_dev_addr_cmp(const struct uwb_dev_addr *addr1, - const struct uwb_dev_addr *addr2) -{ - return memcmp(addr1, addr2, sizeof(*addr1)); -} - -/* @returns 0 if MAC addresses @addr2 and @addr1 are equal */ -static inline int uwb_mac_addr_cmp(const struct uwb_mac_addr *addr1, - const struct uwb_mac_addr *addr2) -{ - return memcmp(addr1, addr2, sizeof(*addr1)); -} - -/* @returns !0 if a MAC @addr is a broadcast address */ -static inline int uwb_mac_addr_bcast(const struct uwb_mac_addr *addr) -{ - struct uwb_mac_addr bcast = { - .data = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff } - }; - return !uwb_mac_addr_cmp(addr, &bcast); -} - -/* @returns !0 if a MAC @addr is all zeroes*/ -static inline int uwb_mac_addr_unset(const struct uwb_mac_addr *addr) -{ - struct uwb_mac_addr unset = { - .data = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 } - }; - return !uwb_mac_addr_cmp(addr, &unset); -} - -/* @returns !0 if the address is in use. */ -static inline unsigned __uwb_dev_addr_assigned(struct uwb_rc *rc, - struct uwb_dev_addr *addr) -{ - return uwb_dev_for_each(rc, __uwb_dev_addr_assigned_check, addr); -} - -/* - * UWB Radio Controller API - * - * This API is used (in addition to the general API) to implement UWB - * Radio Controllers. - */ -void uwb_rc_init(struct uwb_rc *); -int uwb_rc_add(struct uwb_rc *, struct device *dev, void *rc_priv); -void uwb_rc_rm(struct uwb_rc *); -void uwb_rc_neh_grok(struct uwb_rc *, void *, size_t); -void uwb_rc_neh_error(struct uwb_rc *, int); -void uwb_rc_reset_all(struct uwb_rc *rc); -void uwb_rc_pre_reset(struct uwb_rc *rc); -int uwb_rc_post_reset(struct uwb_rc *rc); - -/** - * uwb_rsv_is_owner - is the owner of this reservation the RC? - * @rsv: the reservation - */ -static inline bool uwb_rsv_is_owner(struct uwb_rsv *rsv) -{ - return rsv->owner == &rsv->rc->uwb_dev; -} - -/** - * enum uwb_notifs - UWB events that can be passed to any listeners - * @UWB_NOTIF_ONAIR: a new neighbour has joined the beacon group. - * @UWB_NOTIF_OFFAIR: a neighbour has left the beacon group. - * - * Higher layers can register callback functions with the radio - * controller using uwb_notifs_register(). The radio controller - * maintains a list of all registered handlers and will notify all - * nodes when an event occurs. - */ -enum uwb_notifs { - UWB_NOTIF_ONAIR, - UWB_NOTIF_OFFAIR, -}; - -/* Callback function registered with UWB */ -struct uwb_notifs_handler { - struct list_head list_node; - void (*cb)(void *, struct uwb_dev *, enum uwb_notifs); - void *data; -}; - -int uwb_notifs_register(struct uwb_rc *, struct uwb_notifs_handler *); -int uwb_notifs_deregister(struct uwb_rc *, struct uwb_notifs_handler *); - - -/** - * UWB radio controller Event Size Entry (for creating entry tables) - * - * WUSB and WHCI define events and notifications, and they might have - * fixed or variable size. - * - * Each event/notification has a size which is not necessarily known - * in advance based on the event code. As well, vendor specific - * events/notifications will have a size impossible to determine - * unless we know about the device's specific details. - * - * It was way too smart of the spec writers not to think that it would - * be impossible for a generic driver to skip over vendor specific - * events/notifications if there are no LENGTH fields in the HEADER of - * each message...the transaction size cannot be counted on as the - * spec does not forbid to pack more than one event in a single - * transaction. - * - * Thus, we guess sizes with tables (or for events, when you know the - * size ahead of time you can use uwb_rc_neh_extra_size*()). We - * register tables with the known events and their sizes, and then we - * traverse those tables. For those with variable length, we provide a - * way to lookup the size inside the event/notification's - * payload. This allows device-specific event size tables to be - * registered. - * - * @size: Size of the payload - * - * @offset: if != 0, at offset @offset-1 starts a field with a length - * that has to be added to @size. The format of the field is - * given by @type. - * - * @type: Type and length of the offset field. Most common is LE 16 - * bits (that's why that is zero); others are there mostly to - * cover for bugs and weirdos. - */ -struct uwb_est_entry { - size_t size; - unsigned offset; - enum { UWB_EST_16 = 0, UWB_EST_8 = 1 } type; -}; - -int uwb_est_register(u8 type, u8 code_high, u16 vendor, u16 product, - const struct uwb_est_entry *, size_t entries); -int uwb_est_unregister(u8 type, u8 code_high, u16 vendor, u16 product, - const struct uwb_est_entry *, size_t entries); -ssize_t uwb_est_find_size(struct uwb_rc *rc, const struct uwb_rceb *rceb, - size_t len); - -/* -- Misc */ - -enum { - EDC_MAX_ERRORS = 10, - EDC_ERROR_TIMEFRAME = HZ, -}; - -/* error density counter */ -struct edc { - unsigned long timestart; - u16 errorcount; -}; - -static inline -void edc_init(struct edc *edc) -{ - edc->timestart = jiffies; -} - -/* Called when an error occurred. - * This is way to determine if the number of acceptable errors per time - * period has been exceeded. It is not accurate as there are cases in which - * this scheme will not work, for example if there are periodic occurrences - * of errors that straddle updates to the start time. This scheme is - * sufficient for our usage. - * - * @returns 1 if maximum acceptable errors per timeframe has been exceeded. - */ -static inline int edc_inc(struct edc *err_hist, u16 max_err, u16 timeframe) -{ - unsigned long now; - - now = jiffies; - if (now - err_hist->timestart > timeframe) { - err_hist->errorcount = 1; - err_hist->timestart = now; - } else if (++err_hist->errorcount > max_err) { - err_hist->errorcount = 0; - err_hist->timestart = now; - return 1; - } - return 0; -} - - -/* Information Element handling */ - -struct uwb_ie_hdr *uwb_ie_next(void **ptr, size_t *len); -int uwb_rc_ie_add(struct uwb_rc *uwb_rc, const struct uwb_ie_hdr *ies, size_t size); -int uwb_rc_ie_rm(struct uwb_rc *uwb_rc, enum uwb_ie element_id); - -/* - * Transmission statistics - * - * UWB uses LQI and RSSI (one byte values) for reporting radio signal - * strength and line quality indication. We do quick and dirty - * averages of those. They are signed values, btw. - * - * For 8 bit quantities, we keep the min, the max, an accumulator - * (@sigma) and a # of samples. When @samples gets to 255, we compute - * the average (@sigma / @samples), place it in @sigma and reset - * @samples to 1 (so we use it as the first sample). - * - * Now, statistically speaking, probably I am kicking the kidneys of - * some books I have in my shelves collecting dust, but I just want to - * get an approx, not the Nobel. - * - * LOCKING: there is no locking per se, but we try to keep a lockless - * schema. Only _add_samples() modifies the values--as long as you - * have other locking on top that makes sure that no two calls of - * _add_sample() happen at the same time, then we are fine. Now, for - * resetting the values we just set @samples to 0 and that makes the - * next _add_sample() to start with defaults. Reading the values in - * _show() currently can race, so you need to make sure the calls are - * under the same lock that protects calls to _add_sample(). FIXME: - * currently unlocked (It is not ultraprecise but does the trick. Bite - * me). - */ -struct stats { - s8 min, max; - s16 sigma; - atomic_t samples; -}; - -static inline -void stats_init(struct stats *stats) -{ - atomic_set(&stats->samples, 0); - wmb(); -} - -static inline -void stats_add_sample(struct stats *stats, s8 sample) -{ - s8 min, max; - s16 sigma; - unsigned samples = atomic_read(&stats->samples); - if (samples == 0) { /* it was zero before, so we initialize */ - min = 127; - max = -128; - sigma = 0; - } else { - min = stats->min; - max = stats->max; - sigma = stats->sigma; - } - - if (sample < min) /* compute new values */ - min = sample; - else if (sample > max) - max = sample; - sigma += sample; - - stats->min = min; /* commit */ - stats->max = max; - stats->sigma = sigma; - if (atomic_add_return(1, &stats->samples) > 255) { - /* wrapped around! reset */ - stats->sigma = sigma / 256; - atomic_set(&stats->samples, 1); - } -} - -static inline ssize_t stats_show(struct stats *stats, char *buf) -{ - int min, max, avg; - int samples = atomic_read(&stats->samples); - if (samples == 0) - min = max = avg = 0; - else { - min = stats->min; - max = stats->max; - avg = stats->sigma / samples; - } - return scnprintf(buf, PAGE_SIZE, "%d %d %d\n", min, max, avg); -} - -static inline ssize_t stats_store(struct stats *stats, const char *buf, - size_t size) -{ - stats_init(stats); - return size; -} - -#endif /* #ifndef __LINUX__UWB_H__ */ diff --git a/include/linux/uwb/debug-cmd.h b/include/linux/uwb/debug-cmd.h deleted file mode 100644 index f97db6c3bcc0..000000000000 --- a/include/linux/uwb/debug-cmd.h +++ /dev/null @@ -1,57 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Ultra Wide Band - * Debug interface commands - * - * Copyright (C) 2008 Cambridge Silicon Radio Ltd. - */ -#ifndef __LINUX__UWB__DEBUG_CMD_H__ -#define __LINUX__UWB__DEBUG_CMD_H__ - -#include <linux/types.h> - -/* - * Debug interface commands - * - * UWB_DBG_CMD_RSV_ESTABLISH: Establish a new unicast reservation. - * - * UWB_DBG_CMD_RSV_TERMINATE: Terminate the Nth reservation. - */ - -enum uwb_dbg_cmd_type { - UWB_DBG_CMD_RSV_ESTABLISH = 1, - UWB_DBG_CMD_RSV_TERMINATE = 2, - UWB_DBG_CMD_IE_ADD = 3, - UWB_DBG_CMD_IE_RM = 4, - UWB_DBG_CMD_RADIO_START = 5, - UWB_DBG_CMD_RADIO_STOP = 6, -}; - -struct uwb_dbg_cmd_rsv_establish { - __u8 target[6]; - __u8 type; - __u16 max_mas; - __u16 min_mas; - __u8 max_interval; -}; - -struct uwb_dbg_cmd_rsv_terminate { - int index; -}; - -struct uwb_dbg_cmd_ie { - __u8 data[128]; - int len; -}; - -struct uwb_dbg_cmd { - __u32 type; - union { - struct uwb_dbg_cmd_rsv_establish rsv_establish; - struct uwb_dbg_cmd_rsv_terminate rsv_terminate; - struct uwb_dbg_cmd_ie ie_add; - struct uwb_dbg_cmd_ie ie_rm; - }; -}; - -#endif /* #ifndef __LINUX__UWB__DEBUG_CMD_H__ */ diff --git a/include/linux/uwb/spec.h b/include/linux/uwb/spec.h deleted file mode 100644 index 5f75caf7b4ba..000000000000 --- a/include/linux/uwb/spec.h +++ /dev/null @@ -1,767 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Ultra Wide Band - * UWB Standard definitions - * - * Copyright (C) 2005-2006 Intel Corporation - * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com> - * - * All these definitions are based on the ECMA-368 standard. - * - * Note all definitions are Little Endian in the wire, and we will - * convert them to host order before operating on the bitfields (that - * yes, we use extensively). - */ - -#ifndef __LINUX__UWB_SPEC_H__ -#define __LINUX__UWB_SPEC_H__ - -#include <linux/types.h> -#include <linux/bitmap.h> -#include <linux/if_ether.h> - -#define i1480_FW 0x00000303 -/* #define i1480_FW 0x00000302 */ - -/** - * Number of Medium Access Slots in a superframe. - * - * UWB divides time in SuperFrames, each one divided in 256 pieces, or - * Medium Access Slots. See MBOA MAC[5.4.5] for details. The MAS is the - * basic bandwidth allocation unit in UWB. - */ -enum { UWB_NUM_MAS = 256 }; - -/** - * Number of Zones in superframe. - * - * UWB divides the superframe into zones with numbering starting from BPST. - * See MBOA MAC[16.8.6] - */ -enum { UWB_NUM_ZONES = 16 }; - -/* - * Number of MAS in a zone. - */ -#define UWB_MAS_PER_ZONE (UWB_NUM_MAS / UWB_NUM_ZONES) - -/* - * Number of MAS required before a row can be considered available. - */ -#define UWB_USABLE_MAS_PER_ROW (UWB_NUM_ZONES - 1) - -/* - * Number of streams per DRP reservation between a pair of devices. - * - * [ECMA-368] section 16.8.6. - */ -enum { UWB_NUM_STREAMS = 8 }; - -/* - * mMasLength - * - * The length of a MAS in microseconds. - * - * [ECMA-368] section 17.16. - */ -enum { UWB_MAS_LENGTH_US = 256 }; - -/* - * mBeaconSlotLength - * - * The length of the beacon slot in microseconds. - * - * [ECMA-368] section 17.16 - */ -enum { UWB_BEACON_SLOT_LENGTH_US = 85 }; - -/* - * mMaxLostBeacons - * - * The number beacons missing in consecutive superframes before a - * device can be considered as unreachable. - * - * [ECMA-368] section 17.16 - */ -enum { UWB_MAX_LOST_BEACONS = 3 }; - -/* - * mDRPBackOffWinMin - * - * The minimum number of superframes to wait before trying to reserve - * extra MAS. - * - * [ECMA-368] section 17.16 - */ -enum { UWB_DRP_BACKOFF_WIN_MIN = 2 }; - -/* - * mDRPBackOffWinMax - * - * The maximum number of superframes to wait before trying to reserve - * extra MAS. - * - * [ECMA-368] section 17.16 - */ -enum { UWB_DRP_BACKOFF_WIN_MAX = 16 }; - -/* - * Length of a superframe in microseconds. - */ -#define UWB_SUPERFRAME_LENGTH_US (UWB_MAS_LENGTH_US * UWB_NUM_MAS) - -/** - * UWB MAC address - * - * It is *imperative* that this struct is exactly 6 packed bytes (as - * it is also used to define headers sent down and up the wire/radio). - */ -struct uwb_mac_addr { - u8 data[ETH_ALEN]; -} __attribute__((packed)); - - -/** - * UWB device address - * - * It is *imperative* that this struct is exactly 6 packed bytes (as - * it is also used to define headers sent down and up the wire/radio). - */ -struct uwb_dev_addr { - u8 data[2]; -} __attribute__((packed)); - - -/** - * Types of UWB addresses - * - * Order matters (by size). - */ -enum uwb_addr_type { - UWB_ADDR_DEV = 0, - UWB_ADDR_MAC = 1, -}; - - -/** Size of a char buffer for printing a MAC/device address */ -enum { UWB_ADDR_STRSIZE = 32 }; - - -/** UWB WiMedia protocol IDs. */ -enum uwb_prid { - UWB_PRID_WLP_RESERVED = 0x0000, - UWB_PRID_WLP = 0x0001, - UWB_PRID_WUSB_BOT = 0x0010, - UWB_PRID_WUSB = 0x0010, - UWB_PRID_WUSB_TOP = 0x001F, -}; - - -/** PHY Rate (MBOA MAC[7.8.12, Table 61]) */ -enum uwb_phy_rate { - UWB_PHY_RATE_53 = 0, - UWB_PHY_RATE_80, - UWB_PHY_RATE_106, - UWB_PHY_RATE_160, - UWB_PHY_RATE_200, - UWB_PHY_RATE_320, - UWB_PHY_RATE_400, - UWB_PHY_RATE_480, - UWB_PHY_RATE_INVALID -}; - - -/** - * Different ways to scan (MBOA MAC[6.2.2, Table 8], WUSB[Table 8-78]) - */ -enum uwb_scan_type { - UWB_SCAN_ONLY = 0, - UWB_SCAN_OUTSIDE_BP, - UWB_SCAN_WHILE_INACTIVE, - UWB_SCAN_DISABLED, - UWB_SCAN_ONLY_STARTTIME, - UWB_SCAN_TOP -}; - - -/** ACK Policy types (MBOA MAC[7.2.1.3]) */ -enum uwb_ack_pol { - UWB_ACK_NO = 0, - UWB_ACK_INM = 1, - UWB_ACK_B = 2, - UWB_ACK_B_REQ = 3, -}; - - -/** DRP reservation types ([ECMA-368 table 106) */ -enum uwb_drp_type { - UWB_DRP_TYPE_ALIEN_BP = 0, - UWB_DRP_TYPE_HARD, - UWB_DRP_TYPE_SOFT, - UWB_DRP_TYPE_PRIVATE, - UWB_DRP_TYPE_PCA, -}; - - -/** DRP Reason Codes ([ECMA-368] table 107) */ -enum uwb_drp_reason { - UWB_DRP_REASON_ACCEPTED = 0, - UWB_DRP_REASON_CONFLICT, - UWB_DRP_REASON_PENDING, - UWB_DRP_REASON_DENIED, - UWB_DRP_REASON_MODIFIED, -}; - -/** Relinquish Request Reason Codes ([ECMA-368] table 113) */ -enum uwb_relinquish_req_reason { - UWB_RELINQUISH_REQ_REASON_NON_SPECIFIC = 0, - UWB_RELINQUISH_REQ_REASON_OVER_ALLOCATION, -}; - -/** - * DRP Notification Reason Codes (WHCI 0.95 [3.1.4.9]) - */ -enum uwb_drp_notif_reason { - UWB_DRP_NOTIF_DRP_IE_RCVD = 0, - UWB_DRP_NOTIF_CONFLICT, - UWB_DRP_NOTIF_TERMINATE, -}; - - -/** Allocation of MAS slots in a DRP request MBOA MAC[7.8.7] */ -struct uwb_drp_alloc { - __le16 zone_bm; - __le16 mas_bm; -} __attribute__((packed)); - - -/** General MAC Header format (ECMA-368[16.2]) */ -struct uwb_mac_frame_hdr { - __le16 Frame_Control; - struct uwb_dev_addr DestAddr; - struct uwb_dev_addr SrcAddr; - __le16 Sequence_Control; - __le16 Access_Information; -} __attribute__((packed)); - - -/** - * uwb_beacon_frame - a beacon frame including MAC headers - * - * [ECMA] section 16.3. - */ -struct uwb_beacon_frame { - struct uwb_mac_frame_hdr hdr; - struct uwb_mac_addr Device_Identifier; /* may be a NULL EUI-48 */ - u8 Beacon_Slot_Number; - u8 Device_Control; - u8 IEData[]; -} __attribute__((packed)); - - -/** Information Element codes (MBOA MAC[T54]) */ -enum uwb_ie { - UWB_PCA_AVAILABILITY = 2, - UWB_IE_DRP_AVAILABILITY = 8, - UWB_IE_DRP = 9, - UWB_BP_SWITCH_IE = 11, - UWB_MAC_CAPABILITIES_IE = 12, - UWB_PHY_CAPABILITIES_IE = 13, - UWB_APP_SPEC_PROBE_IE = 15, - UWB_IDENTIFICATION_IE = 19, - UWB_MASTER_KEY_ID_IE = 20, - UWB_RELINQUISH_REQUEST_IE = 21, - UWB_IE_WLP = 250, /* WiMedia Logical Link Control Protocol WLP 0.99 */ - UWB_APP_SPEC_IE = 255, -}; - - -/** - * Header common to all Information Elements (IEs) - */ -struct uwb_ie_hdr { - u8 element_id; /* enum uwb_ie */ - u8 length; -} __attribute__((packed)); - - -/** Dynamic Reservation Protocol IE (MBOA MAC[7.8.6]) */ -struct uwb_ie_drp { - struct uwb_ie_hdr hdr; - __le16 drp_control; - struct uwb_dev_addr dev_addr; - struct uwb_drp_alloc allocs[]; -} __attribute__((packed)); - -static inline int uwb_ie_drp_type(struct uwb_ie_drp *ie) -{ - return (le16_to_cpu(ie->drp_control) >> 0) & 0x7; -} - -static inline int uwb_ie_drp_stream_index(struct uwb_ie_drp *ie) -{ - return (le16_to_cpu(ie->drp_control) >> 3) & 0x7; -} - -static inline int uwb_ie_drp_reason_code(struct uwb_ie_drp *ie) -{ - return (le16_to_cpu(ie->drp_control) >> 6) & 0x7; -} - -static inline int uwb_ie_drp_status(struct uwb_ie_drp *ie) -{ - return (le16_to_cpu(ie->drp_control) >> 9) & 0x1; -} - -static inline int uwb_ie_drp_owner(struct uwb_ie_drp *ie) -{ - return (le16_to_cpu(ie->drp_control) >> 10) & 0x1; -} - -static inline int uwb_ie_drp_tiebreaker(struct uwb_ie_drp *ie) -{ - return (le16_to_cpu(ie->drp_control) >> 11) & 0x1; -} - -static inline int uwb_ie_drp_unsafe(struct uwb_ie_drp *ie) -{ - return (le16_to_cpu(ie->drp_control) >> 12) & 0x1; -} - -static inline void uwb_ie_drp_set_type(struct uwb_ie_drp *ie, enum uwb_drp_type type) -{ - u16 drp_control = le16_to_cpu(ie->drp_control); - drp_control = (drp_control & ~(0x7 << 0)) | (type << 0); - ie->drp_control = cpu_to_le16(drp_control); -} - -static inline void uwb_ie_drp_set_stream_index(struct uwb_ie_drp *ie, int stream_index) -{ - u16 drp_control = le16_to_cpu(ie->drp_control); - drp_control = (drp_control & ~(0x7 << 3)) | (stream_index << 3); - ie->drp_control = cpu_to_le16(drp_control); -} - -static inline void uwb_ie_drp_set_reason_code(struct uwb_ie_drp *ie, - enum uwb_drp_reason reason_code) -{ - u16 drp_control = le16_to_cpu(ie->drp_control); - drp_control = (ie->drp_control & ~(0x7 << 6)) | (reason_code << 6); - ie->drp_control = cpu_to_le16(drp_control); -} - -static inline void uwb_ie_drp_set_status(struct uwb_ie_drp *ie, int status) -{ - u16 drp_control = le16_to_cpu(ie->drp_control); - drp_control = (drp_control & ~(0x1 << 9)) | (status << 9); - ie->drp_control = cpu_to_le16(drp_control); -} - -static inline void uwb_ie_drp_set_owner(struct uwb_ie_drp *ie, int owner) -{ - u16 drp_control = le16_to_cpu(ie->drp_control); - drp_control = (drp_control & ~(0x1 << 10)) | (owner << 10); - ie->drp_control = cpu_to_le16(drp_control); -} - -static inline void uwb_ie_drp_set_tiebreaker(struct uwb_ie_drp *ie, int tiebreaker) -{ - u16 drp_control = le16_to_cpu(ie->drp_control); - drp_control = (drp_control & ~(0x1 << 11)) | (tiebreaker << 11); - ie->drp_control = cpu_to_le16(drp_control); -} - -static inline void uwb_ie_drp_set_unsafe(struct uwb_ie_drp *ie, int unsafe) -{ - u16 drp_control = le16_to_cpu(ie->drp_control); - drp_control = (drp_control & ~(0x1 << 12)) | (unsafe << 12); - ie->drp_control = cpu_to_le16(drp_control); -} - -/** Dynamic Reservation Protocol IE (MBOA MAC[7.8.7]) */ -struct uwb_ie_drp_avail { - struct uwb_ie_hdr hdr; - DECLARE_BITMAP(bmp, UWB_NUM_MAS); -} __attribute__((packed)); - -/* Relinqish Request IE ([ECMA-368] section 16.8.19). */ -struct uwb_relinquish_request_ie { - struct uwb_ie_hdr hdr; - __le16 relinquish_req_control; - struct uwb_dev_addr dev_addr; - struct uwb_drp_alloc allocs[]; -} __attribute__((packed)); - -static inline int uwb_ie_relinquish_req_reason_code(struct uwb_relinquish_request_ie *ie) -{ - return (le16_to_cpu(ie->relinquish_req_control) >> 0) & 0xf; -} - -static inline void uwb_ie_relinquish_req_set_reason_code(struct uwb_relinquish_request_ie *ie, - int reason_code) -{ - u16 ctrl = le16_to_cpu(ie->relinquish_req_control); - ctrl = (ctrl & ~(0xf << 0)) | (reason_code << 0); - ie->relinquish_req_control = cpu_to_le16(ctrl); -} - -/** - * The Vendor ID is set to an OUI that indicates the vendor of the device. - * ECMA-368 [16.8.10] - */ -struct uwb_vendor_id { - u8 data[3]; -} __attribute__((packed)); - -/** - * The device type ID - * FIXME: clarify what this means - * ECMA-368 [16.8.10] - */ -struct uwb_device_type_id { - u8 data[3]; -} __attribute__((packed)); - - -/** - * UWB device information types - * ECMA-368 [16.8.10] - */ -enum uwb_dev_info_type { - UWB_DEV_INFO_VENDOR_ID = 0, - UWB_DEV_INFO_VENDOR_TYPE, - UWB_DEV_INFO_NAME, -}; - -/** - * UWB device information found in Identification IE - * ECMA-368 [16.8.10] - */ -struct uwb_dev_info { - u8 type; /* enum uwb_dev_info_type */ - u8 length; - u8 data[]; -} __attribute__((packed)); - -/** - * UWB Identification IE - * ECMA-368 [16.8.10] - */ -struct uwb_identification_ie { - struct uwb_ie_hdr hdr; - struct uwb_dev_info info[]; -} __attribute__((packed)); - -/* - * UWB Radio Controller - * - * These definitions are common to the Radio Control layers as - * exported by the WUSB1.0 HWA and WHCI interfaces. - */ - -/** Radio Control Command Block (WUSB1.0[Table 8-65] and WHCI 0.95) */ -struct uwb_rccb { - u8 bCommandType; /* enum hwa_cet */ - __le16 wCommand; /* Command code */ - u8 bCommandContext; /* Context ID */ -} __attribute__((packed)); - - -/** Radio Control Event Block (WUSB[table 8-66], WHCI 0.95) */ -struct uwb_rceb { - u8 bEventType; /* enum hwa_cet */ - __le16 wEvent; /* Event code */ - u8 bEventContext; /* Context ID */ -} __attribute__((packed)); - - -enum { - UWB_RC_CET_GENERAL = 0, /* General Command/Event type */ - UWB_RC_CET_EX_TYPE_1 = 1, /* Extended Type 1 Command/Event type */ -}; - -/* Commands to the radio controller */ -enum uwb_rc_cmd { - UWB_RC_CMD_CHANNEL_CHANGE = 16, - UWB_RC_CMD_DEV_ADDR_MGMT = 17, /* Device Address Management */ - UWB_RC_CMD_GET_IE = 18, /* GET Information Elements */ - UWB_RC_CMD_RESET = 19, - UWB_RC_CMD_SCAN = 20, /* Scan management */ - UWB_RC_CMD_SET_BEACON_FILTER = 21, - UWB_RC_CMD_SET_DRP_IE = 22, /* Dynamic Reservation Protocol IEs */ - UWB_RC_CMD_SET_IE = 23, /* Information Element management */ - UWB_RC_CMD_SET_NOTIFICATION_FILTER = 24, - UWB_RC_CMD_SET_TX_POWER = 25, - UWB_RC_CMD_SLEEP = 26, - UWB_RC_CMD_START_BEACON = 27, - UWB_RC_CMD_STOP_BEACON = 28, - UWB_RC_CMD_BP_MERGE = 29, - UWB_RC_CMD_SEND_COMMAND_FRAME = 30, - UWB_RC_CMD_SET_ASIE_NOTIF = 31, -}; - -/* Notifications from the radio controller */ -enum uwb_rc_evt { - UWB_RC_EVT_IE_RCV = 0, - UWB_RC_EVT_BEACON = 1, - UWB_RC_EVT_BEACON_SIZE = 2, - UWB_RC_EVT_BPOIE_CHANGE = 3, - UWB_RC_EVT_BP_SLOT_CHANGE = 4, - UWB_RC_EVT_BP_SWITCH_IE_RCV = 5, - UWB_RC_EVT_DEV_ADDR_CONFLICT = 6, - UWB_RC_EVT_DRP_AVAIL = 7, - UWB_RC_EVT_DRP = 8, - UWB_RC_EVT_BP_SWITCH_STATUS = 9, - UWB_RC_EVT_CMD_FRAME_RCV = 10, - UWB_RC_EVT_CHANNEL_CHANGE_IE_RCV = 11, - /* Events (command responses) use the same code as the command */ - UWB_RC_EVT_UNKNOWN_CMD_RCV = 65535, -}; - -enum uwb_rc_extended_type_1_cmd { - UWB_RC_SET_DAA_ENERGY_MASK = 32, - UWB_RC_SET_NOTIFICATION_FILTER_EX = 33, -}; - -enum uwb_rc_extended_type_1_evt { - UWB_RC_DAA_ENERGY_DETECTED = 0, -}; - -/* Radio Control Result Code. [WHCI] table 3-3. */ -enum { - UWB_RC_RES_SUCCESS = 0, - UWB_RC_RES_FAIL, - UWB_RC_RES_FAIL_HARDWARE, - UWB_RC_RES_FAIL_NO_SLOTS, - UWB_RC_RES_FAIL_BEACON_TOO_LARGE, - UWB_RC_RES_FAIL_INVALID_PARAMETER, - UWB_RC_RES_FAIL_UNSUPPORTED_PWR_LEVEL, - UWB_RC_RES_FAIL_INVALID_IE_DATA, - UWB_RC_RES_FAIL_BEACON_SIZE_EXCEEDED, - UWB_RC_RES_FAIL_CANCELLED, - UWB_RC_RES_FAIL_INVALID_STATE, - UWB_RC_RES_FAIL_INVALID_SIZE, - UWB_RC_RES_FAIL_ACK_NOT_RECEIVED, - UWB_RC_RES_FAIL_NO_MORE_ASIE_NOTIF, - UWB_RC_RES_FAIL_TIME_OUT = 255, -}; - -/* Confirm event. [WHCI] section 3.1.3.1 etc. */ -struct uwb_rc_evt_confirm { - struct uwb_rceb rceb; - u8 bResultCode; -} __attribute__((packed)); - -/* Device Address Management event. [WHCI] section 3.1.3.2. */ -struct uwb_rc_evt_dev_addr_mgmt { - struct uwb_rceb rceb; - u8 baAddr[ETH_ALEN]; - u8 bResultCode; -} __attribute__((packed)); - - -/* Get IE Event. [WHCI] section 3.1.3.3. */ -struct uwb_rc_evt_get_ie { - struct uwb_rceb rceb; - __le16 wIELength; - u8 IEData[]; -} __attribute__((packed)); - -/* Set DRP IE Event. [WHCI] section 3.1.3.7. */ -struct uwb_rc_evt_set_drp_ie { - struct uwb_rceb rceb; - __le16 wRemainingSpace; - u8 bResultCode; -} __attribute__((packed)); - -/* Set IE Event. [WHCI] section 3.1.3.8. */ -struct uwb_rc_evt_set_ie { - struct uwb_rceb rceb; - __le16 RemainingSpace; - u8 bResultCode; -} __attribute__((packed)); - -/* Scan command. [WHCI] 3.1.3.5. */ -struct uwb_rc_cmd_scan { - struct uwb_rccb rccb; - u8 bChannelNumber; - u8 bScanState; - __le16 wStartTime; -} __attribute__((packed)); - -/* Set DRP IE command. [WHCI] section 3.1.3.7. */ -struct uwb_rc_cmd_set_drp_ie { - struct uwb_rccb rccb; - __le16 wIELength; - struct uwb_ie_drp IEData[]; -} __attribute__((packed)); - -/* Set IE command. [WHCI] section 3.1.3.8. */ -struct uwb_rc_cmd_set_ie { - struct uwb_rccb rccb; - __le16 wIELength; - u8 IEData[]; -} __attribute__((packed)); - -/* Set DAA Energy Mask event. [WHCI 0.96] section 3.1.3.17. */ -struct uwb_rc_evt_set_daa_energy_mask { - struct uwb_rceb rceb; - __le16 wLength; - u8 result; -} __attribute__((packed)); - -/* Set Notification Filter Extended event. [WHCI 0.96] section 3.1.3.18. */ -struct uwb_rc_evt_set_notification_filter_ex { - struct uwb_rceb rceb; - __le16 wLength; - u8 result; -} __attribute__((packed)); - -/* IE Received notification. [WHCI] section 3.1.4.1. */ -struct uwb_rc_evt_ie_rcv { - struct uwb_rceb rceb; - struct uwb_dev_addr SrcAddr; - __le16 wIELength; - u8 IEData[]; -} __attribute__((packed)); - -/* Type of the received beacon. [WHCI] section 3.1.4.2. */ -enum uwb_rc_beacon_type { - UWB_RC_BEACON_TYPE_SCAN = 0, - UWB_RC_BEACON_TYPE_NEIGHBOR, - UWB_RC_BEACON_TYPE_OL_ALIEN, - UWB_RC_BEACON_TYPE_NOL_ALIEN, -}; - -/* Beacon received notification. [WHCI] 3.1.4.2. */ -struct uwb_rc_evt_beacon { - struct uwb_rceb rceb; - u8 bChannelNumber; - u8 bBeaconType; - __le16 wBPSTOffset; - u8 bLQI; - u8 bRSSI; - __le16 wBeaconInfoLength; - u8 BeaconInfo[]; -} __attribute__((packed)); - - -/* Beacon Size Change notification. [WHCI] section 3.1.4.3 */ -struct uwb_rc_evt_beacon_size { - struct uwb_rceb rceb; - __le16 wNewBeaconSize; -} __attribute__((packed)); - - -/* BPOIE Change notification. [WHCI] section 3.1.4.4. */ -struct uwb_rc_evt_bpoie_change { - struct uwb_rceb rceb; - __le16 wBPOIELength; - u8 BPOIE[]; -} __attribute__((packed)); - - -/* Beacon Slot Change notification. [WHCI] section 3.1.4.5. */ -struct uwb_rc_evt_bp_slot_change { - struct uwb_rceb rceb; - u8 slot_info; -} __attribute__((packed)); - -static inline int uwb_rc_evt_bp_slot_change_slot_num( - const struct uwb_rc_evt_bp_slot_change *evt) -{ - return evt->slot_info & 0x7f; -} - -static inline int uwb_rc_evt_bp_slot_change_no_slot( - const struct uwb_rc_evt_bp_slot_change *evt) -{ - return (evt->slot_info & 0x80) >> 7; -} - -/* BP Switch IE Received notification. [WHCI] section 3.1.4.6. */ -struct uwb_rc_evt_bp_switch_ie_rcv { - struct uwb_rceb rceb; - struct uwb_dev_addr wSrcAddr; - __le16 wIELength; - u8 IEData[]; -} __attribute__((packed)); - -/* DevAddr Conflict notification. [WHCI] section 3.1.4.7. */ -struct uwb_rc_evt_dev_addr_conflict { - struct uwb_rceb rceb; -} __attribute__((packed)); - -/* DRP notification. [WHCI] section 3.1.4.9. */ -struct uwb_rc_evt_drp { - struct uwb_rceb rceb; - struct uwb_dev_addr src_addr; - u8 reason; - u8 beacon_slot_number; - __le16 ie_length; - u8 ie_data[]; -} __attribute__((packed)); - -static inline enum uwb_drp_notif_reason uwb_rc_evt_drp_reason(struct uwb_rc_evt_drp *evt) -{ - return evt->reason & 0x0f; -} - - -/* DRP Availability Change notification. [WHCI] section 3.1.4.8. */ -struct uwb_rc_evt_drp_avail { - struct uwb_rceb rceb; - DECLARE_BITMAP(bmp, UWB_NUM_MAS); -} __attribute__((packed)); - -/* BP switch status notification. [WHCI] section 3.1.4.10. */ -struct uwb_rc_evt_bp_switch_status { - struct uwb_rceb rceb; - u8 status; - u8 slot_offset; - __le16 bpst_offset; - u8 move_countdown; -} __attribute__((packed)); - -/* Command Frame Received notification. [WHCI] section 3.1.4.11. */ -struct uwb_rc_evt_cmd_frame_rcv { - struct uwb_rceb rceb; - __le16 receive_time; - struct uwb_dev_addr wSrcAddr; - struct uwb_dev_addr wDstAddr; - __le16 control; - __le16 reserved; - __le16 dataLength; - u8 data[]; -} __attribute__((packed)); - -/* Channel Change IE Received notification. [WHCI] section 3.1.4.12. */ -struct uwb_rc_evt_channel_change_ie_rcv { - struct uwb_rceb rceb; - struct uwb_dev_addr wSrcAddr; - __le16 wIELength; - u8 IEData[]; -} __attribute__((packed)); - -/* DAA Energy Detected notification. [WHCI 0.96] section 3.1.4.14. */ -struct uwb_rc_evt_daa_energy_detected { - struct uwb_rceb rceb; - __le16 wLength; - u8 bandID; - u8 reserved; - u8 toneBmp[16]; -} __attribute__((packed)); - - -/** - * Radio Control Interface Class Descriptor - * - * WUSB 1.0 [8.6.1.2] - */ -struct uwb_rc_control_intf_class_desc { - u8 bLength; - u8 bDescriptorType; - __le16 bcdRCIVersion; -} __attribute__((packed)); - -#endif /* #ifndef __LINUX__UWB_SPEC_H__ */ diff --git a/include/linux/uwb/umc.h b/include/linux/uwb/umc.h deleted file mode 100644 index ddbceb39ad15..000000000000 --- a/include/linux/uwb/umc.h +++ /dev/null @@ -1,192 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * UWB Multi-interface Controller support. - * - * Copyright (C) 2007 Cambridge Silicon Radio Ltd. - * - * UMC (UWB Multi-interface Controller) capabilities (e.g., radio - * controller, host controller) are presented as devices on the "umc" - * bus. - * - * The radio controller is not strictly a UMC capability but it's - * useful to present it as such. - * - * References: - * - * [WHCI] Wireless Host Controller Interface Specification for - * Certified Wireless Universal Serial Bus, revision 0.95. - * - * How this works is kind of convoluted but simple. The whci.ko driver - * loads when WHCI devices are detected. These WHCI devices expose - * many devices in the same PCI function (they couldn't have reused - * functions, no), so for each PCI function that exposes these many - * devices, whci ceates a umc_dev [whci_probe() -> whci_add_cap()] - * with umc_device_create() and adds it to the bus with - * umc_device_register(). - * - * umc_device_register() calls device_register() which will push the - * bus management code to load your UMC driver's somehting_probe() - * that you have registered for that capability code. - * - * Now when the WHCI device is removed, whci_remove() will go over - * each umc_dev assigned to each of the PCI function's capabilities - * and through whci_del_cap() call umc_device_unregister() each - * created umc_dev. Of course, if you are bound to the device, your - * driver's something_remove() will be called. - */ - -#ifndef _LINUX_UWB_UMC_H_ -#define _LINUX_UWB_UMC_H_ - -#include <linux/device.h> -#include <linux/pci.h> - -/* - * UMC capability IDs. - * - * 0x00 is reserved so use it for the radio controller device. - * - * [WHCI] table 2-8 - */ -#define UMC_CAP_ID_WHCI_RC 0x00 /* radio controller */ -#define UMC_CAP_ID_WHCI_WUSB_HC 0x01 /* WUSB host controller */ - -/** - * struct umc_dev - UMC capability device - * - * @version: version of the specification this capability conforms to. - * @cap_id: capability ID. - * @bar: PCI Bar (64 bit) where the resource lies - * @resource: register space resource. - * @irq: interrupt line. - */ -struct umc_dev { - u16 version; - u8 cap_id; - u8 bar; - struct resource resource; - unsigned irq; - struct device dev; -}; - -#define to_umc_dev(d) container_of(d, struct umc_dev, dev) - -/** - * struct umc_driver - UMC capability driver - * @cap_id: supported capability ID. - * @match: driver specific capability matching function. - * @match_data: driver specific data for match() (e.g., a - * table of pci_device_id's if umc_match_pci_id() is used). - */ -struct umc_driver { - char *name; - u8 cap_id; - int (*match)(struct umc_driver *, struct umc_dev *); - const void *match_data; - - int (*probe)(struct umc_dev *); - void (*remove)(struct umc_dev *); - int (*pre_reset)(struct umc_dev *); - int (*post_reset)(struct umc_dev *); - - struct device_driver driver; -}; - -#define to_umc_driver(d) container_of(d, struct umc_driver, driver) - -extern struct bus_type umc_bus_type; - -struct umc_dev *umc_device_create(struct device *parent, int n); -int __must_check umc_device_register(struct umc_dev *umc); -void umc_device_unregister(struct umc_dev *umc); - -int __must_check __umc_driver_register(struct umc_driver *umc_drv, - struct module *mod, - const char *mod_name); - -/** - * umc_driver_register - register a UMC capabiltity driver. - * @umc_drv: pointer to the driver. - */ -#define umc_driver_register(umc_drv) \ - __umc_driver_register(umc_drv, THIS_MODULE, KBUILD_MODNAME) - -void umc_driver_unregister(struct umc_driver *umc_drv); - -/* - * Utility function you can use to match (umc_driver->match) against a - * null-terminated array of 'struct pci_device_id' in - * umc_driver->match_data. - */ -int umc_match_pci_id(struct umc_driver *umc_drv, struct umc_dev *umc); - -/** - * umc_parent_pci_dev - return the UMC's parent PCI device or NULL if none - * @umc_dev: UMC device whose parent PCI device we are looking for - * - * DIRTY!!! DON'T RELY ON THIS - * - * FIXME: This is as dirty as it gets, but we need some way to check - * the correct type of umc_dev->parent (so that for example, we can - * cast to pci_dev). Casting to pci_dev is necessary because at some - * point we need to request resources from the device. Mapping is - * easily over come (ioremap and stuff are bus agnostic), but hooking - * up to some error handlers (such as pci error handlers) might need - * this. - * - * THIS might (probably will) be removed in the future, so don't count - * on it. - */ -static inline struct pci_dev *umc_parent_pci_dev(struct umc_dev *umc_dev) -{ - struct pci_dev *pci_dev = NULL; - if (dev_is_pci(umc_dev->dev.parent)) - pci_dev = to_pci_dev(umc_dev->dev.parent); - return pci_dev; -} - -/** - * umc_dev_get() - reference a UMC device. - * @umc_dev: Pointer to UMC device. - * - * NOTE: we are assuming in this whole scheme that the parent device - * is referenced at _probe() time and unreferenced at _remove() - * time by the parent's subsystem. - */ -static inline struct umc_dev *umc_dev_get(struct umc_dev *umc_dev) -{ - get_device(&umc_dev->dev); - return umc_dev; -} - -/** - * umc_dev_put() - unreference a UMC device. - * @umc_dev: Pointer to UMC device. - */ -static inline void umc_dev_put(struct umc_dev *umc_dev) -{ - put_device(&umc_dev->dev); -} - -/** - * umc_set_drvdata - set UMC device's driver data. - * @umc_dev: Pointer to UMC device. - * @data: Data to set. - */ -static inline void umc_set_drvdata(struct umc_dev *umc_dev, void *data) -{ - dev_set_drvdata(&umc_dev->dev, data); -} - -/** - * umc_get_drvdata - recover UMC device's driver data. - * @umc_dev: Pointer to UMC device. - */ -static inline void *umc_get_drvdata(struct umc_dev *umc_dev) -{ - return dev_get_drvdata(&umc_dev->dev); -} - -int umc_controller_reset(struct umc_dev *umc); - -#endif /* #ifndef _LINUX_UWB_UMC_H_ */ diff --git a/include/linux/uwb/whci.h b/include/linux/uwb/whci.h deleted file mode 100644 index 1a5c2cc2b008..000000000000 --- a/include/linux/uwb/whci.h +++ /dev/null @@ -1,102 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Wireless Host Controller Interface for Ultra-Wide-Band and Wireless USB - * - * Copyright (C) 2005-2006 Intel Corporation - * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com> - * - * References: - * [WHCI] Wireless Host Controller Interface Specification for - * Certified Wireless Universal Serial Bus, revision 0.95. - */ -#ifndef _LINUX_UWB_WHCI_H_ -#define _LINUX_UWB_WHCI_H_ - -#include <linux/pci.h> - -/* - * UWB interface capability registers (offsets from UWBBASE) - * - * [WHCI] section 2.2 - */ -#define UWBCAPINFO 0x00 /* == UWBCAPDATA(0) */ -# define UWBCAPINFO_TO_N_CAPS(c) (((c) >> 0) & 0xFull) -#define UWBCAPDATA(n) (8*(n)) -# define UWBCAPDATA_TO_VERSION(c) (((c) >> 32) & 0xFFFFull) -# define UWBCAPDATA_TO_OFFSET(c) (((c) >> 18) & 0x3FFFull) -# define UWBCAPDATA_TO_BAR(c) (((c) >> 16) & 0x3ull) -# define UWBCAPDATA_TO_SIZE(c) ((((c) >> 8) & 0xFFull) * sizeof(u32)) -# define UWBCAPDATA_TO_CAP_ID(c) (((c) >> 0) & 0xFFull) - -/* Size of the WHCI capability data (including the RC capability) for - a device with n capabilities. */ -#define UWBCAPDATA_SIZE(n) (8 + 8*(n)) - - -/* - * URC registers (offsets from URCBASE) - * - * [WHCI] section 2.3 - */ -#define URCCMD 0x00 -# define URCCMD_RESET (1 << 31) /* UMC Hardware reset */ -# define URCCMD_RS (1 << 30) /* Run/Stop */ -# define URCCMD_EARV (1 << 29) /* Event Address Register Valid */ -# define URCCMD_ACTIVE (1 << 15) /* Command is active */ -# define URCCMD_IWR (1 << 14) /* Interrupt When Ready */ -# define URCCMD_SIZE_MASK 0x00000fff /* Command size mask */ -#define URCSTS 0x04 -# define URCSTS_EPS (1 << 17) /* Event Processing Status */ -# define URCSTS_HALTED (1 << 16) /* RC halted */ -# define URCSTS_HSE (1 << 10) /* Host System Error...fried */ -# define URCSTS_ER (1 << 9) /* Event Ready */ -# define URCSTS_RCI (1 << 8) /* Ready for Command Interrupt */ -# define URCSTS_INT_MASK 0x00000700 /* URC interrupt sources */ -# define URCSTS_ISI 0x000000ff /* Interrupt Source Identification */ -#define URCINTR 0x08 -# define URCINTR_EN_ALL 0x000007ff /* Enable all interrupt sources */ -#define URCCMDADDR 0x10 -#define URCEVTADDR 0x18 -# define URCEVTADDR_OFFSET_MASK 0xfff /* Event pointer offset mask */ - - -/** Write 32 bit @value to little endian register at @addr */ -static inline -void le_writel(u32 value, void __iomem *addr) -{ - iowrite32(value, addr); -} - - -/** Read from 32 bit little endian register at @addr */ -static inline -u32 le_readl(void __iomem *addr) -{ - return ioread32(addr); -} - - -/** Write 64 bit @value to little endian register at @addr */ -static inline -void le_writeq(u64 value, void __iomem *addr) -{ - iowrite32(value, addr); - iowrite32(value >> 32, addr + 4); -} - - -/** Read from 64 bit little endian register at @addr */ -static inline -u64 le_readq(void __iomem *addr) -{ - u64 value; - value = ioread32(addr); - value |= (u64)ioread32(addr + 4) << 32; - return value; -} - -extern int whci_wait_for(struct device *dev, u32 __iomem *reg, - u32 mask, u32 result, - unsigned long max_ms, const char *tag); - -#endif /* #ifndef _LINUX_UWB_WHCI_H_ */ diff --git a/include/linux/verification.h b/include/linux/verification.h index 32d990d163c4..911ab7c2b1ab 100644 --- a/include/linux/verification.h +++ b/include/linux/verification.h @@ -32,6 +32,7 @@ extern const char *const key_being_used_for[NR__KEY_BEING_USED_FOR]; #ifdef CONFIG_SYSTEM_DATA_VERIFICATION struct key; +struct pkcs7_message; extern int verify_pkcs7_signature(const void *data, size_t len, const void *raw_pkcs7, size_t pkcs7_len, @@ -41,6 +42,15 @@ extern int verify_pkcs7_signature(const void *data, size_t len, const void *data, size_t len, size_t asn1hdrlen), void *ctx); +extern int verify_pkcs7_message_sig(const void *data, size_t len, + struct pkcs7_message *pkcs7, + struct key *trusted_keys, + enum key_being_used_for usage, + int (*view_content)(void *ctx, + const void *data, + size_t len, + size_t asn1hdrlen), + void *ctx); #ifdef CONFIG_SIGNED_PE_FILE_VERIFICATION extern int verify_pefile_signature(const void *pebuf, unsigned pelen, diff --git a/include/linux/vermagic.h b/include/linux/vermagic.h index bae807eb2933..9aced11e9000 100644 --- a/include/linux/vermagic.h +++ b/include/linux/vermagic.h @@ -9,6 +9,8 @@ #endif #ifdef CONFIG_PREEMPT #define MODULE_VERMAGIC_PREEMPT "preempt " +#elif defined(CONFIG_PREEMPT_RT) +#define MODULE_VERMAGIC_PREEMPT "preempt_rt " #else #define MODULE_VERMAGIC_PREEMPT "" #endif diff --git a/include/linux/virtio_vsock.h b/include/linux/virtio_vsock.h index e223e2632edd..07875ccc7bb5 100644 --- a/include/linux/virtio_vsock.h +++ b/include/linux/virtio_vsock.h @@ -35,23 +35,24 @@ struct virtio_vsock_sock { /* Protected by tx_lock */ u32 tx_cnt; - u32 buf_alloc; u32 peer_fwd_cnt; u32 peer_buf_alloc; /* Protected by rx_lock */ u32 fwd_cnt; + u32 last_fwd_cnt; u32 rx_bytes; + u32 buf_alloc; struct list_head rx_queue; }; struct virtio_vsock_pkt { struct virtio_vsock_hdr hdr; - struct work_struct work; struct list_head list; /* socket refcnt not held, only use for cancellation */ struct vsock_sock *vsk; void *buf; + u32 buf_len; u32 len; u32 off; bool reply; diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h index 51e131245379..4e7809408073 100644 --- a/include/linux/vmalloc.h +++ b/include/linux/vmalloc.h @@ -18,6 +18,7 @@ struct notifier_block; /* in notifier.h */ #define VM_ALLOC 0x00000002 /* vmalloc() */ #define VM_MAP 0x00000004 /* vmap()ed pages */ #define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */ +#define VM_DMA_COHERENT 0x00000010 /* dma_alloc_coherent */ #define VM_UNINITIALIZED 0x00000020 /* vm_struct is not fully initialized */ #define VM_NO_GUARD 0x00000040 /* don't add guard page */ #define VM_KASAN 0x00000080 /* has allocated kasan shadow memory */ @@ -26,6 +27,7 @@ struct notifier_block; /* in notifier.h */ * vfree_atomic(). */ #define VM_FLUSH_RESET_PERMS 0x00000100 /* Reset direct map and flush TLB on unmap */ + /* bits [20..32] reserved for arch specific ioremap internals */ /* @@ -51,15 +53,21 @@ struct vmap_area { unsigned long va_start; unsigned long va_end; - /* - * Largest available free size in subtree. - */ - unsigned long subtree_max_size; - unsigned long flags; struct rb_node rb_node; /* address sorted rbtree */ struct list_head list; /* address sorted list */ - struct llist_node purge_list; /* "lazy purge" list */ - struct vm_struct *vm; + + /* + * The following three variables can be packed, because + * a vmap_area object is always one of the three states: + * 1) in "free" tree (root is vmap_area_root) + * 2) in "busy" tree (root is free_vmap_area_root) + * 3) in purge list (head is vmap_purge_list) + */ + union { + unsigned long subtree_max_size; /* in "free" tree */ + struct vm_struct *vm; /* in "busy" tree */ + struct llist_node purge_list; /* in purge list */ + }; }; /* @@ -72,10 +80,12 @@ extern void vm_unmap_aliases(void); #ifdef CONFIG_MMU extern void __init vmalloc_init(void); +extern unsigned long vmalloc_nr_pages(void); #else static inline void vmalloc_init(void) { } +static inline unsigned long vmalloc_nr_pages(void) { return 0; } #endif extern void *vmalloc(unsigned long size); diff --git a/include/linux/vmpressure.h b/include/linux/vmpressure.h index 61e6fddfb26f..6d28bc433c1c 100644 --- a/include/linux/vmpressure.h +++ b/include/linux/vmpressure.h @@ -17,7 +17,7 @@ struct vmpressure { unsigned long tree_scanned; unsigned long tree_reclaimed; /* The lock is used to keep the scanned/reclaimed above in sync. */ - struct spinlock sr_lock; + spinlock_t sr_lock; /* The list of vmpressure_event structs. */ struct list_head events; diff --git a/include/linux/vmw_vmci_defs.h b/include/linux/vmw_vmci_defs.h index 77ac9c7b9483..fefb5292403b 100644 --- a/include/linux/vmw_vmci_defs.h +++ b/include/linux/vmw_vmci_defs.h @@ -62,9 +62,18 @@ enum { /* * A single VMCI device has an upper limit of 128MB on the amount of - * memory that can be used for queue pairs. + * memory that can be used for queue pairs. Since each queue pair + * consists of at least two pages, the memory limit also dictates the + * number of queue pairs a guest can create. */ #define VMCI_MAX_GUEST_QP_MEMORY (128 * 1024 * 1024) +#define VMCI_MAX_GUEST_QP_COUNT (VMCI_MAX_GUEST_QP_MEMORY / PAGE_SIZE / 2) + +/* + * There can be at most PAGE_SIZE doorbells since there is one doorbell + * per byte in the doorbell bitmap page. + */ +#define VMCI_MAX_GUEST_DOORBELL_COUNT PAGE_SIZE /* * Queues with pre-mapped data pages must be small, so that we don't pin @@ -430,8 +439,8 @@ enum { struct vmci_queue_header { /* All fields are 64bit and aligned. */ struct vmci_handle handle; /* Identifier. */ - atomic64_t producer_tail; /* Offset in this queue. */ - atomic64_t consumer_head; /* Offset in peer queue. */ + u64 producer_tail; /* Offset in this queue. */ + u64 consumer_head; /* Offset in peer queue. */ }; /* @@ -732,13 +741,9 @@ static inline void *vmci_event_data_payload(struct vmci_event_data *ev_data) * prefix will be used, so correctness isn't an issue, but using a * 64bit operation still adds unnecessary overhead. */ -static inline u64 vmci_q_read_pointer(atomic64_t *var) +static inline u64 vmci_q_read_pointer(u64 *var) { -#if defined(CONFIG_X86_32) - return atomic_read((atomic_t *)var); -#else - return atomic64_read(var); -#endif + return READ_ONCE(*(unsigned long *)var); } /* @@ -747,23 +752,17 @@ static inline u64 vmci_q_read_pointer(atomic64_t *var) * never exceeds a 32bit value in this case. On 32bit SMP, using a * locked cmpxchg8b adds unnecessary overhead. */ -static inline void vmci_q_set_pointer(atomic64_t *var, - u64 new_val) +static inline void vmci_q_set_pointer(u64 *var, u64 new_val) { -#if defined(CONFIG_X86_32) - return atomic_set((atomic_t *)var, (u32)new_val); -#else - return atomic64_set(var, new_val); -#endif + /* XXX buggered on big-endian */ + WRITE_ONCE(*(unsigned long *)var, (unsigned long)new_val); } /* * Helper to add a given offset to a head or tail pointer. Wraps the * value of the pointer around the max size of the queue. */ -static inline void vmci_qp_add_pointer(atomic64_t *var, - size_t add, - u64 size) +static inline void vmci_qp_add_pointer(u64 *var, size_t add, u64 size) { u64 new_val = vmci_q_read_pointer(var); @@ -840,8 +839,8 @@ static inline void vmci_q_header_init(struct vmci_queue_header *q_header, const struct vmci_handle handle) { q_header->handle = handle; - atomic64_set(&q_header->producer_tail, 0); - atomic64_set(&q_header->consumer_head, 0); + q_header->producer_tail = 0; + q_header->consumer_head = 0; } /* diff --git a/include/linux/w1.h b/include/linux/w1.h index e0b5156f78fd..7da0c7588e04 100644 --- a/include/linux/w1.h +++ b/include/linux/w1.h @@ -118,6 +118,9 @@ typedef void (*w1_slave_found_callback)(struct w1_master *, u64); * w1_master* is passed to the slave found callback. * u8 is search_type, W1_SEARCH or W1_ALARM_SEARCH * + * @dev_id: Optional device id string, which w1 slaves could use for + * creating names, which then give a connection to the w1 master + * * Note: read_bit and write_bit are very low level functions and should only * be used with hardware that doesn't really support 1-wire operations, * like a parallel/serial port. @@ -150,6 +153,8 @@ struct w1_bus_master { void (*search)(void *, struct w1_master *, u8, w1_slave_found_callback); + + char *dev_id; }; /** diff --git a/include/linux/wait.h b/include/linux/wait.h index b6f77cf60dd7..3eb7cae8206c 100644 --- a/include/linux/wait.h +++ b/include/linux/wait.h @@ -127,6 +127,19 @@ static inline int waitqueue_active(struct wait_queue_head *wq_head) } /** + * wq_has_single_sleeper - check if there is only one sleeper + * @wq_head: wait queue head + * + * Returns true of wq_head has only one sleeper on the list. + * + * Please refer to the comment for waitqueue_active. + */ +static inline bool wq_has_single_sleeper(struct wait_queue_head *wq_head) +{ + return list_is_singular(&wq_head->head); +} + +/** * wq_has_sleeper - check if there are any waiting processes * @wq_head: wait queue head * @@ -488,8 +501,8 @@ do { \ int __ret = 0; \ struct hrtimer_sleeper __t; \ \ - hrtimer_init_on_stack(&__t.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); \ - hrtimer_init_sleeper(&__t, current); \ + hrtimer_init_sleeper_on_stack(&__t, CLOCK_MONOTONIC, \ + HRTIMER_MODE_REL); \ if ((timeout) != KTIME_MAX) \ hrtimer_start_range_ns(&__t.timer, timeout, \ current->timer_slack_ns, \ diff --git a/include/linux/wanrouter.h b/include/linux/wanrouter.h deleted file mode 100644 index f6358558f9f5..000000000000 --- a/include/linux/wanrouter.h +++ /dev/null @@ -1,11 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * wanrouter.h Legacy declarations kept around until X25 is removed - */ - -#ifndef _ROUTER_H -#define _ROUTER_H - -#include <uapi/linux/wanrouter.h> - -#endif /* _ROUTER_H */ diff --git a/include/linux/wimax/debug.h b/include/linux/wimax/debug.h index 7cb63e4ec0ae..4dd2c1cea6a9 100644 --- a/include/linux/wimax/debug.h +++ b/include/linux/wimax/debug.h @@ -98,9 +98,7 @@ * To manipulate from user space the levels, create a debugfs dentry * and then register each submodule with: * - * result = d_level_register_debugfs("PREFIX_", submodule_X, parent); - * if (result < 0) - * goto error; + * d_level_register_debugfs("PREFIX_", submodule_X, parent); * * Where PREFIX_ is a name of your chosing. This will create debugfs * file with a single numeric value that can be use to tweak it. To @@ -408,25 +406,13 @@ do { \ * @submodule: name of submodule (not a string, just the name) * @dentry: debugfs parent dentry * - * Returns: 0 if ok, < 0 errno on error. - * * For removing, just use debugfs_remove_recursive() on the parent. */ #define d_level_register_debugfs(prefix, name, parent) \ ({ \ - int rc; \ - struct dentry *fd; \ - struct dentry *verify_parent_type = parent; \ - fd = debugfs_create_u8( \ - prefix #name, 0600, verify_parent_type, \ + debugfs_create_u8( \ + prefix #name, 0600, parent, \ &(D_LEVEL[__D_SUBMODULE_ ## name].level)); \ - rc = PTR_ERR(fd); \ - if (IS_ERR(fd) && rc != -ENODEV) \ - printk(KERN_ERR "%s: Can't create debugfs entry %s: " \ - "%d\n", __func__, prefix #name, rc); \ - else \ - rc = 0; \ - rc; \ }) diff --git a/include/linux/wmi.h b/include/linux/wmi.h index fcc9d029f67a..8ef7e7faea1e 100644 --- a/include/linux/wmi.h +++ b/include/linux/wmi.h @@ -36,7 +36,7 @@ struct wmi_driver { struct device_driver driver; const struct wmi_device_id *id_table; - int (*probe)(struct wmi_device *wdev); + int (*probe)(struct wmi_device *wdev, const void *context); int (*remove)(struct wmi_device *wdev); void (*notify)(struct wmi_device *device, union acpi_object *data); long (*filter_callback)(struct wmi_device *wdev, unsigned int cmd, diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h index d59525fca4d3..4261d1c6e87b 100644 --- a/include/linux/workqueue.h +++ b/include/linux/workqueue.h @@ -435,7 +435,7 @@ struct workqueue_struct *alloc_workqueue(const char *fmt, extern void destroy_workqueue(struct workqueue_struct *wq); -struct workqueue_attrs *alloc_workqueue_attrs(gfp_t gfp_mask); +struct workqueue_attrs *alloc_workqueue_attrs(void); void free_workqueue_attrs(struct workqueue_attrs *attrs); int apply_workqueue_attrs(struct workqueue_struct *wq, const struct workqueue_attrs *attrs); diff --git a/include/linux/writeback.h b/include/linux/writeback.h index 738a0c24874f..a19d845dd7eb 100644 --- a/include/linux/writeback.h +++ b/include/linux/writeback.h @@ -11,6 +11,7 @@ #include <linux/flex_proportions.h> #include <linux/backing-dev-defs.h> #include <linux/blk_types.h> +#include <linux/blk-cgroup.h> struct bio; @@ -68,6 +69,17 @@ struct writeback_control { unsigned for_reclaim:1; /* Invoked from the page allocator */ unsigned range_cyclic:1; /* range_start is cyclic */ unsigned for_sync:1; /* sync(2) WB_SYNC_ALL writeback */ + + /* + * When writeback IOs are bounced through async layers, only the + * initial synchronous phase should be accounted towards inode + * cgroup ownership arbitration to avoid confusion. Later stages + * can set the following flag to disable the accounting. + */ + unsigned no_cgroup_owner:1; + + unsigned punt_to_cgroup:1; /* cgrp punting, see __REQ_CGROUP_PUNT */ + #ifdef CONFIG_CGROUP_WRITEBACK struct bdi_writeback *wb; /* wb this writeback is issued under */ struct inode *inode; /* inode being written out */ @@ -84,12 +96,27 @@ struct writeback_control { static inline int wbc_to_write_flags(struct writeback_control *wbc) { + int flags = 0; + + if (wbc->punt_to_cgroup) + flags = REQ_CGROUP_PUNT; + if (wbc->sync_mode == WB_SYNC_ALL) - return REQ_SYNC; + flags |= REQ_SYNC; else if (wbc->for_kupdate || wbc->for_background) - return REQ_BACKGROUND; + flags |= REQ_BACKGROUND; - return 0; + return flags; +} + +static inline struct cgroup_subsys_state * +wbc_blkcg_css(struct writeback_control *wbc) +{ +#ifdef CONFIG_CGROUP_WRITEBACK + if (wbc->wb) + return wbc->wb->blkcg_css; +#endif + return blkcg_root_css; } /* @@ -188,8 +215,10 @@ void wbc_attach_and_unlock_inode(struct writeback_control *wbc, struct inode *inode) __releases(&inode->i_lock); void wbc_detach_inode(struct writeback_control *wbc); -void wbc_account_io(struct writeback_control *wbc, struct page *page, - size_t bytes); +void wbc_account_cgroup_owner(struct writeback_control *wbc, struct page *page, + size_t bytes); +int cgroup_writeback_by_id(u64 bdi_id, int memcg_id, unsigned long nr_pages, + enum wb_reason reason, struct wb_completion *done); void cgroup_writeback_umount(void); /** @@ -291,8 +320,8 @@ static inline void wbc_init_bio(struct writeback_control *wbc, struct bio *bio) { } -static inline void wbc_account_io(struct writeback_control *wbc, - struct page *page, size_t bytes) +static inline void wbc_account_cgroup_owner(struct writeback_control *wbc, + struct page *page, size_t bytes) { } diff --git a/include/linux/xarray.h b/include/linux/xarray.h index 5921599b6dc4..86eecbd98e84 100644 --- a/include/linux/xarray.h +++ b/include/linux/xarray.h @@ -230,8 +230,8 @@ static inline int xa_err(void *entry) * This structure is used either directly or via the XA_LIMIT() macro * to communicate the range of IDs that are valid for allocation. * Two common ranges are predefined for you: - * * xa_limit_32b - [0 - UINT_MAX] - * * xa_limit_31b - [0 - INT_MAX] + * * xa_limit_32b - [0 - UINT_MAX] + * * xa_limit_31b - [0 - INT_MAX] */ struct xa_limit { u32 max; diff --git a/include/linux/zpool.h b/include/linux/zpool.h index 7238865e75b0..51bf43076165 100644 --- a/include/linux/zpool.h +++ b/include/linux/zpool.h @@ -46,6 +46,8 @@ const char *zpool_get_type(struct zpool *pool); void zpool_destroy_pool(struct zpool *pool); +bool zpool_malloc_support_movable(struct zpool *pool); + int zpool_malloc(struct zpool *pool, size_t size, gfp_t gfp, unsigned long *handle); @@ -90,6 +92,7 @@ struct zpool_driver { struct zpool *zpool); void (*destroy)(void *pool); + bool malloc_support_movable; int (*malloc)(void *pool, size_t size, gfp_t gfp, unsigned long *handle); void (*free)(void *pool, unsigned long handle); diff --git a/include/math-emu/op-common.h b/include/math-emu/op-common.h index f37d12877754..adcc6a97db61 100644 --- a/include/math-emu/op-common.h +++ b/include/math-emu/op-common.h @@ -308,6 +308,7 @@ do { \ \ case _FP_CLS_COMBINE(FP_CLS_NORMAL,FP_CLS_ZERO): \ R##_e = X##_e; \ + /* Fall through */ \ case _FP_CLS_COMBINE(FP_CLS_NAN,FP_CLS_NORMAL): \ case _FP_CLS_COMBINE(FP_CLS_NAN,FP_CLS_INF): \ case _FP_CLS_COMBINE(FP_CLS_NAN,FP_CLS_ZERO): \ @@ -318,6 +319,7 @@ do { \ \ case _FP_CLS_COMBINE(FP_CLS_ZERO,FP_CLS_NORMAL): \ R##_e = Y##_e; \ + /* Fall through */ \ case _FP_CLS_COMBINE(FP_CLS_NORMAL,FP_CLS_NAN): \ case _FP_CLS_COMBINE(FP_CLS_INF,FP_CLS_NAN): \ case _FP_CLS_COMBINE(FP_CLS_ZERO,FP_CLS_NAN): \ @@ -415,6 +417,7 @@ do { \ case _FP_CLS_COMBINE(FP_CLS_NAN,FP_CLS_INF): \ case _FP_CLS_COMBINE(FP_CLS_NAN,FP_CLS_ZERO): \ R##_s = X##_s; \ + /* Fall through */ \ \ case _FP_CLS_COMBINE(FP_CLS_INF,FP_CLS_INF): \ case _FP_CLS_COMBINE(FP_CLS_INF,FP_CLS_NORMAL): \ @@ -428,6 +431,7 @@ do { \ case _FP_CLS_COMBINE(FP_CLS_INF,FP_CLS_NAN): \ case _FP_CLS_COMBINE(FP_CLS_ZERO,FP_CLS_NAN): \ R##_s = Y##_s; \ + /* Fall through */ \ \ case _FP_CLS_COMBINE(FP_CLS_NORMAL,FP_CLS_INF): \ case _FP_CLS_COMBINE(FP_CLS_NORMAL,FP_CLS_ZERO): \ @@ -493,6 +497,7 @@ do { \ \ case _FP_CLS_COMBINE(FP_CLS_NORMAL,FP_CLS_ZERO): \ FP_SET_EXCEPTION(FP_EX_DIVZERO); \ + /* Fall through */ \ case _FP_CLS_COMBINE(FP_CLS_INF,FP_CLS_ZERO): \ case _FP_CLS_COMBINE(FP_CLS_INF,FP_CLS_NORMAL): \ R##_c = FP_CLS_INF; \ diff --git a/include/media/cec-notifier.h b/include/media/cec-notifier.h index 57b3a9f6ea1d..f161f8a493ac 100644 --- a/include/media/cec-notifier.h +++ b/include/media/cec-notifier.h @@ -43,6 +43,60 @@ struct cec_notifier *cec_notifier_get_conn(struct device *dev, void cec_notifier_put(struct cec_notifier *n); /** + * cec_notifier_conn_register - find or create a new cec_notifier for the given + * HDMI device and connector tuple. + * @hdmi_dev: HDMI device that sends the events. + * @conn_name: the connector name from which the event occurs. May be NULL + * if there is always only one HDMI connector created by the HDMI device. + * @conn_info: the connector info from which the event occurs (may be NULL) + * + * If a notifier for device @dev and connector @conn_name already exists, then + * increase the refcount and return that notifier. + * + * If it doesn't exist, then allocate a new notifier struct and return a + * pointer to that new struct. + * + * Return NULL if the memory could not be allocated. + */ +struct cec_notifier * +cec_notifier_conn_register(struct device *hdmi_dev, const char *conn_name, + const struct cec_connector_info *conn_info); + +/** + * cec_notifier_conn_unregister - decrease refcount and delete when the + * refcount reaches 0. + * @n: notifier. If NULL, then this function does nothing. + */ +void cec_notifier_conn_unregister(struct cec_notifier *n); + +/** + * cec_notifier_cec_adap_register - find or create a new cec_notifier for the + * given device. + * @hdmi_dev: HDMI device that sends the events. + * @conn_name: the connector name from which the event occurs. May be NULL + * if there is always only one HDMI connector created by the HDMI device. + * @adap: the cec adapter that registered this notifier. + * + * If a notifier for device @dev and connector @conn_name already exists, then + * increase the refcount and return that notifier. + * + * If it doesn't exist, then allocate a new notifier struct and return a + * pointer to that new struct. + * + * Return NULL if the memory could not be allocated. + */ +struct cec_notifier * +cec_notifier_cec_adap_register(struct device *hdmi_dev, const char *conn_name, + struct cec_adapter *adap); + +/** + * cec_notifier_cec_adap_unregister - decrease refcount and delete when the + * refcount reaches 0. + * @n: notifier. If NULL, then this function does nothing. + */ +void cec_notifier_cec_adap_unregister(struct cec_notifier *n); + +/** * cec_notifier_set_phys_addr - set a new physical address. * @n: the CEC notifier * @pa: the CEC physical address @@ -64,30 +118,6 @@ void cec_notifier_set_phys_addr_from_edid(struct cec_notifier *n, const struct edid *edid); /** - * cec_notifier_register - register a callback with the notifier - * @n: the CEC notifier - * @adap: the CEC adapter, passed as argument to the callback function - * @callback: the callback function - */ -void cec_notifier_register(struct cec_notifier *n, - struct cec_adapter *adap, - void (*callback)(struct cec_adapter *adap, u16 pa)); - -/** - * cec_notifier_unregister - unregister the callback from the notifier. - * @n: the CEC notifier - */ -void cec_notifier_unregister(struct cec_notifier *n); - -/** - * cec_register_cec_notifier - register the notifier with the cec adapter. - * @adap: the CEC adapter - * @notifier: the CEC notifier - */ -void cec_register_cec_notifier(struct cec_adapter *adap, - struct cec_notifier *notifier); - -/** * cec_notifier_parse_hdmi_phandle - find the hdmi device from "hdmi-phandle" * @dev: the device with the "hdmi-phandle" device tree property * @@ -110,27 +140,36 @@ static inline void cec_notifier_put(struct cec_notifier *n) { } -static inline void cec_notifier_set_phys_addr(struct cec_notifier *n, u16 pa) +static inline struct cec_notifier * +cec_notifier_conn_register(struct device *hdmi_dev, const char *conn_name, + const struct cec_connector_info *conn_info) { + /* A non-NULL pointer is expected on success */ + return (struct cec_notifier *)0xdeadfeed; } -static inline void cec_notifier_set_phys_addr_from_edid(struct cec_notifier *n, - const struct edid *edid) +static inline void cec_notifier_conn_unregister(struct cec_notifier *n) +{ +} + +static inline struct cec_notifier * +cec_notifier_cec_adap_register(struct device *hdmi_dev, const char *conn_name, + struct cec_adapter *adap) { + /* A non-NULL pointer is expected on success */ + return (struct cec_notifier *)0xdeadfeed; } -static inline void cec_notifier_register(struct cec_notifier *n, - struct cec_adapter *adap, - void (*callback)(struct cec_adapter *adap, u16 pa)) +static inline void cec_notifier_cec_adap_unregister(struct cec_notifier *n) { } -static inline void cec_notifier_unregister(struct cec_notifier *n) +static inline void cec_notifier_set_phys_addr(struct cec_notifier *n, u16 pa) { } -static inline void cec_register_cec_notifier(struct cec_adapter *adap, - struct cec_notifier *notifier) +static inline void cec_notifier_set_phys_addr_from_edid(struct cec_notifier *n, + const struct edid *edid) { } diff --git a/include/media/cec.h b/include/media/cec.h index 707411ef8ba2..4d59387bc61b 100644 --- a/include/media/cec.h +++ b/include/media/cec.h @@ -17,7 +17,9 @@ #include <linux/timer.h> #include <linux/cec-funcs.h> #include <media/rc-core.h> -#include <media/cec-notifier.h> + +/* CEC_ADAP_G_CONNECTOR_INFO is available */ +#define CEC_CAP_CONNECTOR_INFO (1 << 8) #define CEC_CAP_DEFAULTS (CEC_CAP_LOG_ADDRS | CEC_CAP_TRANSMIT | \ CEC_CAP_PASSTHROUGH | CEC_CAP_RC) @@ -53,6 +55,7 @@ struct cec_devnode { struct cec_adapter; struct cec_data; struct cec_pin; +struct cec_notifier; struct cec_data { struct list_head list; @@ -144,6 +147,34 @@ struct cec_adap_ops { */ #define CEC_MAX_MSG_TX_QUEUE_SZ (18 * 1) +/** + * struct cec_drm_connector_info - tells which drm connector is + * associated with the CEC adapter. + * @card_no: drm card number + * @connector_id: drm connector ID + */ +struct cec_drm_connector_info { + __u32 card_no; + __u32 connector_id; +}; + +#define CEC_CONNECTOR_TYPE_NO_CONNECTOR 0 +#define CEC_CONNECTOR_TYPE_DRM 1 + +/** + * struct cec_connector_info - tells if and which connector is + * associated with the CEC adapter. + * @type: connector type (if any) + * @drm: drm connector info + */ +struct cec_connector_info { + __u32 type; + union { + struct cec_drm_connector_info drm; + __u32 raw[16]; + }; +}; + struct cec_adapter { struct module *owner; char name[32]; @@ -182,6 +213,7 @@ struct cec_adapter { struct cec_fh *cec_initiator; bool passthrough; struct cec_log_addrs log_addrs; + struct cec_connector_info conn_info; u32 tx_timeouts; @@ -233,6 +265,7 @@ static inline bool cec_is_registered(const struct cec_adapter *adap) ((pa) >> 12), ((pa) >> 8) & 0xf, ((pa) >> 4) & 0xf, (pa) & 0xf struct edid; +struct drm_connector; #if IS_REACHABLE(CONFIG_CEC_CORE) struct cec_adapter *cec_allocate_adapter(const struct cec_adap_ops *ops, @@ -247,6 +280,8 @@ void cec_s_phys_addr(struct cec_adapter *adap, u16 phys_addr, bool block); void cec_s_phys_addr_from_edid(struct cec_adapter *adap, const struct edid *edid); +void cec_s_conn_info(struct cec_adapter *adap, + const struct cec_connector_info *conn_info); int cec_transmit_msg(struct cec_adapter *adap, struct cec_msg *msg, bool block); @@ -331,6 +366,9 @@ void cec_queue_pin_5v_event(struct cec_adapter *adap, bool is_high, ktime_t ts); u16 cec_get_edid_phys_addr(const u8 *edid, unsigned int size, unsigned int *offset); +void cec_fill_conn_info_from_drm(struct cec_connector_info *conn_info, + const struct drm_connector *connector); + #else static inline int cec_register_adapter(struct cec_adapter *adap, @@ -365,6 +403,64 @@ static inline u16 cec_get_edid_phys_addr(const u8 *edid, unsigned int size, return CEC_PHYS_ADDR_INVALID; } +static inline void cec_s_conn_info(struct cec_adapter *adap, + const struct cec_connector_info *conn_info) +{ +} + +static inline void +cec_fill_conn_info_from_drm(struct cec_connector_info *conn_info, + const struct drm_connector *connector) +{ + memset(conn_info, 0, sizeof(*conn_info)); +} + +#endif + +#if IS_REACHABLE(CONFIG_CEC_CORE) && IS_ENABLED(CONFIG_CEC_NOTIFIER) + +/** + * cec_notifier_register - register a callback with the notifier + * @n: the CEC notifier + * @adap: the CEC adapter, passed as argument to the callback function + * @callback: the callback function + */ +void cec_notifier_register(struct cec_notifier *n, + struct cec_adapter *adap, + void (*callback)(struct cec_adapter *adap, u16 pa)); + +/** + * cec_notifier_unregister - unregister the callback from the notifier. + * @n: the CEC notifier + */ +void cec_notifier_unregister(struct cec_notifier *n); + +/** + * cec_register_cec_notifier - register the notifier with the cec adapter. + * @adap: the CEC adapter + * @notifier: the CEC notifier + */ +void cec_register_cec_notifier(struct cec_adapter *adap, + struct cec_notifier *notifier); + +#else + +static inline void +cec_notifier_register(struct cec_notifier *n, + struct cec_adapter *adap, + void (*callback)(struct cec_adapter *adap, u16 pa)) +{ +} + +static inline void cec_notifier_unregister(struct cec_notifier *n) +{ +} + +static inline void cec_register_cec_notifier(struct cec_adapter *adap, + struct cec_notifier *notifier) +{ +} + #endif /** diff --git a/include/media/davinci/vpfe_capture.h b/include/media/davinci/vpfe_capture.h index 2c5b3eacf527..4ad53031e2f7 100644 --- a/include/media/davinci/vpfe_capture.h +++ b/include/media/davinci/vpfe_capture.h @@ -32,7 +32,7 @@ #define CAPTURE_DRV_NAME "vpfe-capture" struct vpfe_pixel_format { - struct v4l2_fmtdesc fmtdesc; + u32 pixelformat; /* bytes per pixel */ int bpp; }; diff --git a/include/media/drv-intf/cx25840.h b/include/media/drv-intf/cx25840.h index 328ddb359fdf..ba69bc525382 100644 --- a/include/media/drv-intf/cx25840.h +++ b/include/media/drv-intf/cx25840.h @@ -1,25 +1,31 @@ /* SPDX-License-Identifier: GPL-2.0-or-later */ -/* - cx25840.h - definition for cx25840/1/2/3 inputs - - Copyright (C) 2006 Hans Verkuil (hverkuil@xs4all.nl) -*/ +/* + * cx25840.h - definition for cx25840/1/2/3 inputs + * + * Copyright (C) 2006 Hans Verkuil (hverkuil@xs4all.nl) + */ #ifndef _CX25840_H_ #define _CX25840_H_ -/* Note that the cx25840 driver requires that the bridge driver calls the - v4l2_subdev's init operation in order to load the driver's firmware. - Without this the audio standard detection will fail and you will - only get mono. - - Since loading the firmware is often problematic when the driver is - compiled into the kernel I recommend postponing calling this function - until the first open of the video device. Another reason for - postponing it is that loading this firmware takes a long time (seconds) - due to the slow i2c bus speed. So it will speed up the boot process if - you can avoid loading the fw as long as the video device isn't used. */ +/* + * Note that the cx25840 driver requires that the bridge driver calls the + * v4l2_subdev's load_fw operation in order to load the driver's firmware. + * This will load the firmware on the first invocation (further ones are NOP). + * Without this the audio standard detection will fail and you will + * only get mono. + * Alternatively, you can call the reset operation (this can be done + * multiple times if needed, each invocation will fully reinitialize + * the device). + * + * Since loading the firmware is often problematic when the driver is + * compiled into the kernel I recommend postponing calling this function + * until the first open of the video device. Another reason for + * postponing it is that loading this firmware takes a long time (seconds) + * due to the slow i2c bus speed. So it will speed up the boot process if + * you can avoid loading the fw as long as the video device isn't used. + */ enum cx25840_video_input { /* Composite video inputs In1-In8 */ @@ -32,8 +38,10 @@ enum cx25840_video_input { CX25840_COMPOSITE7, CX25840_COMPOSITE8, - /* S-Video inputs consist of one luma input (In1-In8) ORed with one - chroma input (In5-In8) */ + /* + * S-Video inputs consist of one luma input (In1-In8) ORed with one + * chroma input (In5-In8) + */ CX25840_SVIDEO_LUMA1 = 0x10, CX25840_SVIDEO_LUMA2 = 0x20, CX25840_SVIDEO_LUMA3 = 0x30, @@ -76,6 +84,81 @@ enum cx25840_video_input { CX25840_DIF_ON = 0x80000400, }; +/* + * The defines below are used to set the chip video output settings + * in the generic mode that can be enabled by calling the subdevice + * init core op. + * + * The requested settings can be passed to the init core op as + * @val parameter and to the s_routing video op as @config parameter. + * + * For details please refer to the section 3.7 Video Output Formatting and + * to Video Out Control 1 to 4 registers in the section 5.6 Video Decoder Core + * of the chip datasheet. + */ +#define CX25840_VCONFIG_FMT_SHIFT 0 +#define CX25840_VCONFIG_FMT_MASK GENMASK(2, 0) +#define CX25840_VCONFIG_FMT_BT601 BIT(0) +#define CX25840_VCONFIG_FMT_BT656 BIT(1) +#define CX25840_VCONFIG_FMT_VIP11 GENMASK(1, 0) +#define CX25840_VCONFIG_FMT_VIP2 BIT(2) + +#define CX25840_VCONFIG_RES_SHIFT 3 +#define CX25840_VCONFIG_RES_MASK GENMASK(4, 3) +#define CX25840_VCONFIG_RES_8BIT BIT(3) +#define CX25840_VCONFIG_RES_10BIT BIT(4) + +#define CX25840_VCONFIG_VBIRAW_SHIFT 5 +#define CX25840_VCONFIG_VBIRAW_MASK GENMASK(6, 5) +#define CX25840_VCONFIG_VBIRAW_DISABLED BIT(5) +#define CX25840_VCONFIG_VBIRAW_ENABLED BIT(6) + +#define CX25840_VCONFIG_ANCDATA_SHIFT 7 +#define CX25840_VCONFIG_ANCDATA_MASK GENMASK(8, 7) +#define CX25840_VCONFIG_ANCDATA_DISABLED BIT(7) +#define CX25840_VCONFIG_ANCDATA_ENABLED BIT(8) + +#define CX25840_VCONFIG_TASKBIT_SHIFT 9 +#define CX25840_VCONFIG_TASKBIT_MASK GENMASK(10, 9) +#define CX25840_VCONFIG_TASKBIT_ZERO BIT(9) +#define CX25840_VCONFIG_TASKBIT_ONE BIT(10) + +#define CX25840_VCONFIG_ACTIVE_SHIFT 11 +#define CX25840_VCONFIG_ACTIVE_MASK GENMASK(12, 11) +#define CX25840_VCONFIG_ACTIVE_COMPOSITE BIT(11) +#define CX25840_VCONFIG_ACTIVE_HORIZONTAL BIT(12) + +#define CX25840_VCONFIG_VALID_SHIFT 13 +#define CX25840_VCONFIG_VALID_MASK GENMASK(14, 13) +#define CX25840_VCONFIG_VALID_NORMAL BIT(13) +#define CX25840_VCONFIG_VALID_ANDACTIVE BIT(14) + +#define CX25840_VCONFIG_HRESETW_SHIFT 15 +#define CX25840_VCONFIG_HRESETW_MASK GENMASK(16, 15) +#define CX25840_VCONFIG_HRESETW_NORMAL BIT(15) +#define CX25840_VCONFIG_HRESETW_PIXCLK BIT(16) + +#define CX25840_VCONFIG_CLKGATE_SHIFT 17 +#define CX25840_VCONFIG_CLKGATE_MASK GENMASK(18, 17) +#define CX25840_VCONFIG_CLKGATE_NONE BIT(17) +#define CX25840_VCONFIG_CLKGATE_VALID BIT(18) +#define CX25840_VCONFIG_CLKGATE_VALIDACTIVE GENMASK(18, 17) + +#define CX25840_VCONFIG_DCMODE_SHIFT 19 +#define CX25840_VCONFIG_DCMODE_MASK GENMASK(20, 19) +#define CX25840_VCONFIG_DCMODE_DWORDS BIT(19) +#define CX25840_VCONFIG_DCMODE_BYTES BIT(20) + +#define CX25840_VCONFIG_IDID0S_SHIFT 21 +#define CX25840_VCONFIG_IDID0S_MASK GENMASK(22, 21) +#define CX25840_VCONFIG_IDID0S_NORMAL BIT(21) +#define CX25840_VCONFIG_IDID0S_LINECNT BIT(22) + +#define CX25840_VCONFIG_VIPCLAMP_SHIFT 23 +#define CX25840_VCONFIG_VIPCLAMP_MASK GENMASK(24, 23) +#define CX25840_VCONFIG_VIPCLAMP_ENABLED BIT(23) +#define CX25840_VCONFIG_VIPCLAMP_DISABLED BIT(24) + enum cx25840_audio_input { /* Audio inputs: serial or In4-In8 */ CX25840_AUDIO_SERIAL, @@ -103,7 +186,7 @@ enum cx25840_io_pin { }; enum cx25840_io_pad { - /* Output pads */ + /* Output pads, these must match the actual chip register values */ CX25840_PAD_DEFAULT = 0, CX25840_PAD_ACTIVE, CX25840_PAD_VACTIVE, @@ -162,13 +245,16 @@ enum cx23885_io_pad { CX23885_PAD_GPIO16, }; -/* pvr150_workaround activates a workaround for a hardware bug that is - present in Hauppauge PVR-150 (and possibly PVR-500) cards that have - certain NTSC tuners (tveeprom tuner model numbers 85, 99 and 112). The - audio autodetect fails on some channels for these models and the workaround - is to select the audio standard explicitly. Many thanks to Hauppauge for - providing this information. - This platform data only needs to be supplied by the ivtv driver. */ +/* + * pvr150_workaround activates a workaround for a hardware bug that is + * present in Hauppauge PVR-150 (and possibly PVR-500) cards that have + * certain NTSC tuners (tveeprom tuner model numbers 85, 99 and 112). The + * audio autodetect fails on some channels for these models and the workaround + * is to select the audio standard explicitly. Many thanks to Hauppauge for + * providing this information. + * + * This platform data only needs to be supplied by the ivtv driver. + */ struct cx25840_platform_data { int pvr150_workaround; }; diff --git a/include/media/drv-intf/exynos-fimc.h b/include/media/drv-intf/exynos-fimc.h index 59703439bb37..6b9ef631d6bb 100644 --- a/include/media/drv-intf/exynos-fimc.h +++ b/include/media/drv-intf/exynos-fimc.h @@ -87,7 +87,6 @@ struct fimc_source_info { /** * struct fimc_fmt - color format data structure * @mbus_code: media bus pixel code, -1 if not applicable - * @name: format description * @fourcc: fourcc code for this format, 0 if not applicable * @color: the driver's private color format id * @memplanes: number of physically non-contiguous data planes @@ -99,7 +98,6 @@ struct fimc_source_info { */ struct fimc_fmt { u32 mbus_code; - char *name; u32 fourcc; u32 color; u16 memplanes; diff --git a/include/media/drv-intf/saa7146_vv.h b/include/media/drv-intf/saa7146_vv.h index b34d86bb0664..635805fb35e8 100644 --- a/include/media/drv-intf/saa7146_vv.h +++ b/include/media/drv-intf/saa7146_vv.h @@ -32,7 +32,6 @@ struct saa7146_video_dma { #define FORMAT_IS_PLANAR 0x2 struct saa7146_format { - char *name; u32 pixelformat; u32 trans; u8 depth; diff --git a/include/media/drv-intf/soc_mediabus.h b/include/media/drv-intf/soc_mediabus.h index 73de3bd0c605..361f8852c9fc 100644 --- a/include/media/drv-intf/soc_mediabus.h +++ b/include/media/drv-intf/soc_mediabus.h @@ -66,7 +66,6 @@ enum soc_mbus_layout { /** * struct soc_mbus_pixelfmt - Data format on the media bus - * @name: Name of the format * @fourcc: Fourcc code, that will be obtained if the data is * stored in memory in the following way: * @packing: Type of sample-packing, that has to be used @@ -74,7 +73,6 @@ enum soc_mbus_layout { * @bits_per_sample: How many bits the bridge has to sample */ struct soc_mbus_pixelfmt { - const char *name; u32 fourcc; enum soc_mbus_packing packing; enum soc_mbus_order order; diff --git a/include/media/dvb-usb-ids.h b/include/media/dvb-usb-ids.h index 52875e3eee71..7ce4e8332421 100644 --- a/include/media/dvb-usb-ids.h +++ b/include/media/dvb-usb-ids.h @@ -388,6 +388,7 @@ #define USB_PID_MYGICA_D689 0xd811 #define USB_PID_MYGICA_T230 0xc688 #define USB_PID_MYGICA_T230C 0xc689 +#define USB_PID_MYGICA_T230C2 0xc68a #define USB_PID_ELGATO_EYETV_DIVERSITY 0x0011 #define USB_PID_ELGATO_EYETV_DTT 0x0021 #define USB_PID_ELGATO_EYETV_DTT_2 0x003f diff --git a/include/media/dvb_frontend.h b/include/media/dvb_frontend.h index f05cd7b94a2c..0d76fa4551b3 100644 --- a/include/media/dvb_frontend.h +++ b/include/media/dvb_frontend.h @@ -41,6 +41,7 @@ #include <linux/delay.h> #include <linux/mutex.h> #include <linux/slab.h> +#include <linux/bitops.h> #include <linux/dvb/frontend.h> @@ -141,10 +142,10 @@ struct analog_parameters { * These devices have AUTO recovery capabilities from LOCK failure */ enum dvbfe_algo { - DVBFE_ALGO_HW = (1 << 0), - DVBFE_ALGO_SW = (1 << 1), - DVBFE_ALGO_CUSTOM = (1 << 2), - DVBFE_ALGO_RECOVERY = (1 << 31) + DVBFE_ALGO_HW = BIT(0), + DVBFE_ALGO_SW = BIT(1), + DVBFE_ALGO_CUSTOM = BIT(2), + DVBFE_ALGO_RECOVERY = BIT(31), }; /** @@ -170,12 +171,12 @@ enum dvbfe_algo { * The frontend search algorithm was requested to search again */ enum dvbfe_search { - DVBFE_ALGO_SEARCH_SUCCESS = (1 << 0), - DVBFE_ALGO_SEARCH_ASLEEP = (1 << 1), - DVBFE_ALGO_SEARCH_FAILED = (1 << 2), - DVBFE_ALGO_SEARCH_INVALID = (1 << 3), - DVBFE_ALGO_SEARCH_AGAIN = (1 << 4), - DVBFE_ALGO_SEARCH_ERROR = (1 << 31), + DVBFE_ALGO_SEARCH_SUCCESS = BIT(0), + DVBFE_ALGO_SEARCH_ASLEEP = BIT(1), + DVBFE_ALGO_SEARCH_FAILED = BIT(2), + DVBFE_ALGO_SEARCH_INVALID = BIT(3), + DVBFE_ALGO_SEARCH_AGAIN = BIT(4), + DVBFE_ALGO_SEARCH_ERROR = BIT(31), }; /** diff --git a/include/media/dvbdev.h b/include/media/dvbdev.h index 881ca461b7bb..551325858de3 100644 --- a/include/media/dvbdev.h +++ b/include/media/dvbdev.h @@ -86,8 +86,8 @@ struct dvb_frontend; * @priv: private data * @device: pointer to struct device * @module: pointer to struct module - * @mfe_shared: mfe shared: indicates mutually exclusive frontends - * Thie usage of this flag is currently deprecated + * @mfe_shared: indicates mutually exclusive frontends. + * Use of this flag is currently deprecated. * @mfe_dvbdev: Frontend device in use, in the case of MFE * @mfe_lock: Lock to prevent using the other frontends when MFE is * used. diff --git a/include/media/h264-ctrls.h b/include/media/h264-ctrls.h new file mode 100644 index 000000000000..e877bf1d537c --- /dev/null +++ b/include/media/h264-ctrls.h @@ -0,0 +1,210 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * These are the H.264 state controls for use with stateless H.264 + * codec drivers. + * + * It turns out that these structs are not stable yet and will undergo + * more changes. So keep them private until they are stable and ready to + * become part of the official public API. + */ + +#ifndef _H264_CTRLS_H_ +#define _H264_CTRLS_H_ + +#include <linux/videodev2.h> + +/* Our pixel format isn't stable at the moment */ +#define V4L2_PIX_FMT_H264_SLICE v4l2_fourcc('S', '2', '6', '4') /* H264 parsed slices */ + +/* + * This is put insanely high to avoid conflicting with controls that + * would be added during the phase where those controls are not + * stable. It should be fixed eventually. + */ +#define V4L2_CID_MPEG_VIDEO_H264_SPS (V4L2_CID_MPEG_BASE+1000) +#define V4L2_CID_MPEG_VIDEO_H264_PPS (V4L2_CID_MPEG_BASE+1001) +#define V4L2_CID_MPEG_VIDEO_H264_SCALING_MATRIX (V4L2_CID_MPEG_BASE+1002) +#define V4L2_CID_MPEG_VIDEO_H264_SLICE_PARAMS (V4L2_CID_MPEG_BASE+1003) +#define V4L2_CID_MPEG_VIDEO_H264_DECODE_PARAMS (V4L2_CID_MPEG_BASE+1004) +#define V4L2_CID_MPEG_VIDEO_H264_DECODE_MODE (V4L2_CID_MPEG_BASE+1005) +#define V4L2_CID_MPEG_VIDEO_H264_START_CODE (V4L2_CID_MPEG_BASE+1006) + +/* enum v4l2_ctrl_type type values */ +#define V4L2_CTRL_TYPE_H264_SPS 0x0110 +#define V4L2_CTRL_TYPE_H264_PPS 0x0111 +#define V4L2_CTRL_TYPE_H264_SCALING_MATRIX 0x0112 +#define V4L2_CTRL_TYPE_H264_SLICE_PARAMS 0x0113 +#define V4L2_CTRL_TYPE_H264_DECODE_PARAMS 0x0114 + +enum v4l2_mpeg_video_h264_decode_mode { + V4L2_MPEG_VIDEO_H264_DECODE_MODE_SLICE_BASED, + V4L2_MPEG_VIDEO_H264_DECODE_MODE_FRAME_BASED, +}; + +enum v4l2_mpeg_video_h264_start_code { + V4L2_MPEG_VIDEO_H264_START_CODE_NONE, + V4L2_MPEG_VIDEO_H264_START_CODE_ANNEX_B, +}; + +#define V4L2_H264_SPS_CONSTRAINT_SET0_FLAG 0x01 +#define V4L2_H264_SPS_CONSTRAINT_SET1_FLAG 0x02 +#define V4L2_H264_SPS_CONSTRAINT_SET2_FLAG 0x04 +#define V4L2_H264_SPS_CONSTRAINT_SET3_FLAG 0x08 +#define V4L2_H264_SPS_CONSTRAINT_SET4_FLAG 0x10 +#define V4L2_H264_SPS_CONSTRAINT_SET5_FLAG 0x20 + +#define V4L2_H264_SPS_FLAG_SEPARATE_COLOUR_PLANE 0x01 +#define V4L2_H264_SPS_FLAG_QPPRIME_Y_ZERO_TRANSFORM_BYPASS 0x02 +#define V4L2_H264_SPS_FLAG_DELTA_PIC_ORDER_ALWAYS_ZERO 0x04 +#define V4L2_H264_SPS_FLAG_GAPS_IN_FRAME_NUM_VALUE_ALLOWED 0x08 +#define V4L2_H264_SPS_FLAG_FRAME_MBS_ONLY 0x10 +#define V4L2_H264_SPS_FLAG_MB_ADAPTIVE_FRAME_FIELD 0x20 +#define V4L2_H264_SPS_FLAG_DIRECT_8X8_INFERENCE 0x40 + +struct v4l2_ctrl_h264_sps { + __u8 profile_idc; + __u8 constraint_set_flags; + __u8 level_idc; + __u8 seq_parameter_set_id; + __u8 chroma_format_idc; + __u8 bit_depth_luma_minus8; + __u8 bit_depth_chroma_minus8; + __u8 log2_max_frame_num_minus4; + __u8 pic_order_cnt_type; + __u8 log2_max_pic_order_cnt_lsb_minus4; + __u8 max_num_ref_frames; + __u8 num_ref_frames_in_pic_order_cnt_cycle; + __s32 offset_for_ref_frame[255]; + __s32 offset_for_non_ref_pic; + __s32 offset_for_top_to_bottom_field; + __u16 pic_width_in_mbs_minus1; + __u16 pic_height_in_map_units_minus1; + __u32 flags; +}; + +#define V4L2_H264_PPS_FLAG_ENTROPY_CODING_MODE 0x0001 +#define V4L2_H264_PPS_FLAG_BOTTOM_FIELD_PIC_ORDER_IN_FRAME_PRESENT 0x0002 +#define V4L2_H264_PPS_FLAG_WEIGHTED_PRED 0x0004 +#define V4L2_H264_PPS_FLAG_DEBLOCKING_FILTER_CONTROL_PRESENT 0x0008 +#define V4L2_H264_PPS_FLAG_CONSTRAINED_INTRA_PRED 0x0010 +#define V4L2_H264_PPS_FLAG_REDUNDANT_PIC_CNT_PRESENT 0x0020 +#define V4L2_H264_PPS_FLAG_TRANSFORM_8X8_MODE 0x0040 +#define V4L2_H264_PPS_FLAG_PIC_SCALING_MATRIX_PRESENT 0x0080 + +struct v4l2_ctrl_h264_pps { + __u8 pic_parameter_set_id; + __u8 seq_parameter_set_id; + __u8 num_slice_groups_minus1; + __u8 num_ref_idx_l0_default_active_minus1; + __u8 num_ref_idx_l1_default_active_minus1; + __u8 weighted_bipred_idc; + __s8 pic_init_qp_minus26; + __s8 pic_init_qs_minus26; + __s8 chroma_qp_index_offset; + __s8 second_chroma_qp_index_offset; + __u16 flags; +}; + +struct v4l2_ctrl_h264_scaling_matrix { + __u8 scaling_list_4x4[6][16]; + __u8 scaling_list_8x8[6][64]; +}; + +struct v4l2_h264_weight_factors { + __s16 luma_weight[32]; + __s16 luma_offset[32]; + __s16 chroma_weight[32][2]; + __s16 chroma_offset[32][2]; +}; + +struct v4l2_h264_pred_weight_table { + __u16 luma_log2_weight_denom; + __u16 chroma_log2_weight_denom; + struct v4l2_h264_weight_factors weight_factors[2]; +}; + +#define V4L2_H264_SLICE_TYPE_P 0 +#define V4L2_H264_SLICE_TYPE_B 1 +#define V4L2_H264_SLICE_TYPE_I 2 +#define V4L2_H264_SLICE_TYPE_SP 3 +#define V4L2_H264_SLICE_TYPE_SI 4 + +#define V4L2_H264_SLICE_FLAG_FIELD_PIC 0x01 +#define V4L2_H264_SLICE_FLAG_BOTTOM_FIELD 0x02 +#define V4L2_H264_SLICE_FLAG_DIRECT_SPATIAL_MV_PRED 0x04 +#define V4L2_H264_SLICE_FLAG_SP_FOR_SWITCH 0x08 + +struct v4l2_ctrl_h264_slice_params { + /* Size in bytes, including header */ + __u32 size; + + /* Offset in bytes to the start of slice in the OUTPUT buffer. */ + __u32 start_byte_offset; + + /* Offset in bits to slice_data() from the beginning of this slice. */ + __u32 header_bit_size; + + __u16 first_mb_in_slice; + __u8 slice_type; + __u8 pic_parameter_set_id; + __u8 colour_plane_id; + __u8 redundant_pic_cnt; + __u16 frame_num; + __u16 idr_pic_id; + __u16 pic_order_cnt_lsb; + __s32 delta_pic_order_cnt_bottom; + __s32 delta_pic_order_cnt0; + __s32 delta_pic_order_cnt1; + + struct v4l2_h264_pred_weight_table pred_weight_table; + /* Size in bits of dec_ref_pic_marking() syntax element. */ + __u32 dec_ref_pic_marking_bit_size; + /* Size in bits of pic order count syntax. */ + __u32 pic_order_cnt_bit_size; + + __u8 cabac_init_idc; + __s8 slice_qp_delta; + __s8 slice_qs_delta; + __u8 disable_deblocking_filter_idc; + __s8 slice_alpha_c0_offset_div2; + __s8 slice_beta_offset_div2; + __u8 num_ref_idx_l0_active_minus1; + __u8 num_ref_idx_l1_active_minus1; + __u32 slice_group_change_cycle; + + /* + * Entries on each list are indices into + * v4l2_ctrl_h264_decode_params.dpb[]. + */ + __u8 ref_pic_list0[32]; + __u8 ref_pic_list1[32]; + + __u32 flags; +}; + +#define V4L2_H264_DPB_ENTRY_FLAG_VALID 0x01 +#define V4L2_H264_DPB_ENTRY_FLAG_ACTIVE 0x02 +#define V4L2_H264_DPB_ENTRY_FLAG_LONG_TERM 0x04 + +struct v4l2_h264_dpb_entry { + __u64 reference_ts; + __u16 frame_num; + __u16 pic_num; + /* Note that field is indicated by v4l2_buffer.field */ + __s32 top_field_order_cnt; + __s32 bottom_field_order_cnt; + __u32 flags; /* V4L2_H264_DPB_ENTRY_FLAG_* */ +}; + +#define V4L2_H264_DECODE_PARAM_FLAG_IDR_PIC 0x01 + +struct v4l2_ctrl_h264_decode_params { + struct v4l2_h264_dpb_entry dpb[16]; + __u16 num_slices; + __u16 nal_ref_idc; + __s32 top_field_order_cnt; + __s32 bottom_field_order_cnt; + __u32 flags; /* V4L2_H264_DECODE_PARAM_FLAG_* */ +}; + +#endif diff --git a/include/media/rc-map.h b/include/media/rc-map.h index bebd3c4c6338..afd2ab31bdf2 100644 --- a/include/media/rc-map.h +++ b/include/media/rc-map.h @@ -5,6 +5,9 @@ * Copyright (c) 2010 by Mauro Carvalho Chehab */ +#ifndef _MEDIA_RC_MAP_H +#define _MEDIA_RC_MAP_H + #include <linux/input.h> #include <uapi/linux/lirc.h> @@ -38,22 +41,6 @@ #define RC_PROTO_BIT_RCMM32 BIT_ULL(RC_PROTO_RCMM32) #define RC_PROTO_BIT_XBOX_DVD BIT_ULL(RC_PROTO_XBOX_DVD) -#define RC_PROTO_BIT_ALL \ - (RC_PROTO_BIT_UNKNOWN | RC_PROTO_BIT_OTHER | \ - RC_PROTO_BIT_RC5 | RC_PROTO_BIT_RC5X_20 | \ - RC_PROTO_BIT_RC5_SZ | RC_PROTO_BIT_JVC | \ - RC_PROTO_BIT_SONY12 | RC_PROTO_BIT_SONY15 | \ - RC_PROTO_BIT_SONY20 | RC_PROTO_BIT_NEC | \ - RC_PROTO_BIT_NECX | RC_PROTO_BIT_NEC32 | \ - RC_PROTO_BIT_SANYO | \ - RC_PROTO_BIT_MCIR2_KBD | RC_PROTO_BIT_MCIR2_MSE | \ - RC_PROTO_BIT_RC6_0 | RC_PROTO_BIT_RC6_6A_20 | \ - RC_PROTO_BIT_RC6_6A_24 | RC_PROTO_BIT_RC6_6A_32 | \ - RC_PROTO_BIT_RC6_MCE | RC_PROTO_BIT_SHARP | \ - RC_PROTO_BIT_XMP | RC_PROTO_BIT_CEC | \ - RC_PROTO_BIT_IMON | RC_PROTO_BIT_RCMM12 | \ - RC_PROTO_BIT_RCMM24 | RC_PROTO_BIT_RCMM32 | \ - RC_PROTO_BIT_XBOX_DVD) /* All rc protocols for which we have decoders */ #define RC_PROTO_BIT_ALL_IR_DECODER \ (RC_PROTO_BIT_RC5 | RC_PROTO_BIT_RC5X_20 | \ @@ -224,6 +211,7 @@ struct rc_map *rc_map_get(const char *name); #define RC_MAP_IT913X_V1 "rc-it913x-v1" #define RC_MAP_IT913X_V2 "rc-it913x-v2" #define RC_MAP_KAIOMY "rc-kaiomy" +#define RC_MAP_KHADAS "rc-khadas" #define RC_MAP_KWORLD_315U "rc-kworld-315u" #define RC_MAP_KWORLD_PC150U "rc-kworld-pc150u" #define RC_MAP_KWORLD_PLUS_TV_ANALOG "rc-kworld-plus-tv-analog" @@ -241,6 +229,7 @@ struct rc_map *rc_map_get(const char *name); #define RC_MAP_NEC_TERRATEC_CINERGY_XS "rc-nec-terratec-cinergy-xs" #define RC_MAP_NORWOOD "rc-norwood" #define RC_MAP_NPGTECH "rc-npgtech" +#define RC_MAP_ODROID "rc-odroid" #define RC_MAP_PCTV_SEDNA "rc-pctv-sedna" #define RC_MAP_PINNACLE_COLOR "rc-pinnacle-color" #define RC_MAP_PINNACLE_GREY "rc-pinnacle-grey" @@ -261,6 +250,8 @@ struct rc_map *rc_map_get(const char *name); #define RC_MAP_SNAPSTREAM_FIREFLY "rc-snapstream-firefly" #define RC_MAP_STREAMZAP "rc-streamzap" #define RC_MAP_TANGO "rc-tango" +#define RC_MAP_TANIX_TX3MINI "rc-tanix-tx3mini" +#define RC_MAP_TANIX_TX5MAX "rc-tanix-tx5max" #define RC_MAP_TBS_NEC "rc-tbs-nec" #define RC_MAP_TECHNISAT_TS35 "rc-technisat-ts35" #define RC_MAP_TECHNISAT_USB2 "rc-technisat-usb2" @@ -280,13 +271,18 @@ struct rc_map *rc_map_get(const char *name); #define RC_MAP_VIDEOMATE_K100 "rc-videomate-k100" #define RC_MAP_VIDEOMATE_S350 "rc-videomate-s350" #define RC_MAP_VIDEOMATE_TV_PVR "rc-videomate-tv-pvr" +#define RC_MAP_WETEK_HUB "rc-wetek-hub" +#define RC_MAP_WETEK_PLAY2 "rc-wetek-play2" #define RC_MAP_WINFAST "rc-winfast" #define RC_MAP_WINFAST_USBII_DELUXE "rc-winfast-usbii-deluxe" #define RC_MAP_SU3000 "rc-su3000" #define RC_MAP_XBOX_DVD "rc-xbox-dvd" +#define RC_MAP_X96MAX "rc-x96max" #define RC_MAP_ZX_IRDEC "rc-zx-irdec" /* * Please, do not just append newer Remote Controller names at the end. * The names should be ordered in alphabetical order */ + +#endif /* _MEDIA_RC_MAP_H */ diff --git a/include/media/v4l2-async.h b/include/media/v4l2-async.h index 2e3d93f742a3..8319284c93cb 100644 --- a/include/media/v4l2-async.h +++ b/include/media/v4l2-async.h @@ -172,8 +172,9 @@ int v4l2_async_notifier_add_subdev(struct v4l2_async_notifier *notifier, * the driver's async sub-device struct, i.e. both * begin at the same memory address. * - * Allocate a fwnode-matched asd of size asd_struct_size, and add it - * to the notifiers @asd_list. + * Allocate a fwnode-matched asd of size asd_struct_size, and add it to the + * notifiers @asd_list. The function also gets a reference of the fwnode which + * is released later at notifier cleanup time. */ struct v4l2_async_subdev * v4l2_async_notifier_add_fwnode_subdev(struct v4l2_async_notifier *notifier, @@ -181,6 +182,31 @@ v4l2_async_notifier_add_fwnode_subdev(struct v4l2_async_notifier *notifier, unsigned int asd_struct_size); /** + * v4l2_async_notifier_add_fwnode_remote_subdev - Allocate and add a fwnode + * remote async subdev to the + * notifier's master asd_list. + * + * @notif: pointer to &struct v4l2_async_notifier + * @endpoint: local endpoint pointing to the remote sub-device to be matched + * @asd: Async sub-device struct allocated by the caller. The &struct + * v4l2_async_subdev shall be the first member of the driver's async + * sub-device struct, i.e. both begin at the same memory address. + * + * Gets the remote endpoint of a given local endpoint, set it up for fwnode + * matching and adds the async sub-device to the notifier's @asd_list. The + * function also gets a reference of the fwnode which is released later at + * notifier cleanup time. + * + * This is just like @v4l2_async_notifier_add_fwnode_subdev, but with the + * exception that the fwnode refers to a local endpoint, not the remote one, and + * the function relies on the caller to allocate the async sub-device struct. + */ +int +v4l2_async_notifier_add_fwnode_remote_subdev(struct v4l2_async_notifier *notif, + struct fwnode_handle *endpoint, + struct v4l2_async_subdev *asd); + +/** * v4l2_async_notifier_add_i2c_subdev - Allocate and add an i2c async * subdev to the notifier's master asd_list. * diff --git a/include/media/v4l2-common.h b/include/media/v4l2-common.h index 3a1ef141ec07..c070d8ae11e5 100644 --- a/include/media/v4l2-common.h +++ b/include/media/v4l2-common.h @@ -96,16 +96,45 @@ int v4l2_ctrl_query_fill(struct v4l2_queryctrl *qctrl, /* ------------------------------------------------------------------------- */ -/* I2C Helper functions */ - -struct i2c_driver; -struct i2c_adapter; -struct i2c_client; -struct i2c_device_id; struct v4l2_device; struct v4l2_subdev; struct v4l2_subdev_ops; +/* I2C Helper functions */ +#include <linux/i2c.h> + +/** + * enum v4l2_i2c_tuner_type - specifies the range of tuner address that + * should be used when seeking for I2C devices. + * + * @ADDRS_RADIO: Radio tuner addresses. + * Represent the following I2C addresses: + * 0x10 (if compiled with tea5761 support) + * and 0x60. + * @ADDRS_DEMOD: Demod tuner addresses. + * Represent the following I2C addresses: + * 0x42, 0x43, 0x4a and 0x4b. + * @ADDRS_TV: TV tuner addresses. + * Represent the following I2C addresses: + * 0x42, 0x43, 0x4a, 0x4b, 0x60, 0x61, 0x62, + * 0x63 and 0x64. + * @ADDRS_TV_WITH_DEMOD: TV tuner addresses if demod is present, this + * excludes addresses used by the demodulator + * from the list of candidates. + * Represent the following I2C addresses: + * 0x60, 0x61, 0x62, 0x63 and 0x64. + * + * NOTE: All I2C addresses above use the 7-bit notation. + */ +enum v4l2_i2c_tuner_type { + ADDRS_RADIO, + ADDRS_DEMOD, + ADDRS_TV, + ADDRS_TV_WITH_DEMOD, +}; + +#if defined(CONFIG_VIDEO_V4L2_I2C) + /** * v4l2_i2c_new_subdev - Load an i2c module and return an initialized * &struct v4l2_subdev. @@ -123,8 +152,6 @@ struct v4l2_subdev *v4l2_i2c_new_subdev(struct v4l2_device *v4l2_dev, struct i2c_adapter *adapter, const char *client_type, u8 addr, const unsigned short *probe_addrs); -struct i2c_board_info; - /** * v4l2_i2c_new_subdev_board - Load an i2c module and return an initialized * &struct v4l2_subdev. @@ -175,35 +202,6 @@ void v4l2_i2c_subdev_init(struct v4l2_subdev *sd, struct i2c_client *client, unsigned short v4l2_i2c_subdev_addr(struct v4l2_subdev *sd); /** - * enum v4l2_i2c_tuner_type - specifies the range of tuner address that - * should be used when seeking for I2C devices. - * - * @ADDRS_RADIO: Radio tuner addresses. - * Represent the following I2C addresses: - * 0x10 (if compiled with tea5761 support) - * and 0x60. - * @ADDRS_DEMOD: Demod tuner addresses. - * Represent the following I2C addresses: - * 0x42, 0x43, 0x4a and 0x4b. - * @ADDRS_TV: TV tuner addresses. - * Represent the following I2C addresses: - * 0x42, 0x43, 0x4a, 0x4b, 0x60, 0x61, 0x62, - * 0x63 and 0x64. - * @ADDRS_TV_WITH_DEMOD: TV tuner addresses if demod is present, this - * excludes addresses used by the demodulator - * from the list of candidates. - * Represent the following I2C addresses: - * 0x60, 0x61, 0x62, 0x63 and 0x64. - * - * NOTE: All I2C addresses above use the 7-bit notation. - */ -enum v4l2_i2c_tuner_type { - ADDRS_RADIO, - ADDRS_DEMOD, - ADDRS_TV, - ADDRS_TV_WITH_DEMOD, -}; -/** * v4l2_i2c_tuner_addrs - Return a list of I2C tuner addresses to probe. * * @type: type of the tuner to seek, as defined by @@ -213,14 +211,64 @@ enum v4l2_i2c_tuner_type { */ const unsigned short *v4l2_i2c_tuner_addrs(enum v4l2_i2c_tuner_type type); +/** + * v4l2_i2c_subdev_unregister - Unregister a v4l2_subdev + * + * @sd: pointer to &struct v4l2_subdev + */ +void v4l2_i2c_subdev_unregister(struct v4l2_subdev *sd); + +#else + +static inline struct v4l2_subdev * +v4l2_i2c_new_subdev(struct v4l2_device *v4l2_dev, + struct i2c_adapter *adapter, const char *client_type, + u8 addr, const unsigned short *probe_addrs) +{ + return NULL; +} + +static inline struct v4l2_subdev * +v4l2_i2c_new_subdev_board(struct v4l2_device *v4l2_dev, + struct i2c_adapter *adapter, struct i2c_board_info *info, + const unsigned short *probe_addrs) +{ + return NULL; +} + +static inline void +v4l2_i2c_subdev_set_name(struct v4l2_subdev *sd, struct i2c_client *client, + const char *devname, const char *postfix) +{} + +static inline void +v4l2_i2c_subdev_init(struct v4l2_subdev *sd, struct i2c_client *client, + const struct v4l2_subdev_ops *ops) +{} + +static inline unsigned short v4l2_i2c_subdev_addr(struct v4l2_subdev *sd) +{ + return I2C_CLIENT_END; +} + +static inline const unsigned short * +v4l2_i2c_tuner_addrs(enum v4l2_i2c_tuner_type type) +{ + return NULL; +} + +static inline void v4l2_i2c_subdev_unregister(struct v4l2_subdev *sd) +{} + +#endif + /* ------------------------------------------------------------------------- */ /* SPI Helper functions */ -#if defined(CONFIG_SPI) #include <linux/spi/spi.h> -struct spi_device; +#if defined(CONFIG_SPI) /** * v4l2_spi_new_subdev - Load an spi module and return an initialized @@ -246,6 +294,30 @@ struct v4l2_subdev *v4l2_spi_new_subdev(struct v4l2_device *v4l2_dev, */ void v4l2_spi_subdev_init(struct v4l2_subdev *sd, struct spi_device *spi, const struct v4l2_subdev_ops *ops); + +/** + * v4l2_spi_subdev_unregister - Unregister a v4l2_subdev + * + * @sd: pointer to &struct v4l2_subdev + */ +void v4l2_spi_subdev_unregister(struct v4l2_subdev *sd); + +#else + +static inline struct v4l2_subdev * +v4l2_spi_new_subdev(struct v4l2_device *v4l2_dev, + struct spi_master *master, struct spi_board_info *info) +{ + return NULL; +} + +static inline void +v4l2_spi_subdev_init(struct v4l2_subdev *sd, struct spi_device *spi, + const struct v4l2_subdev_ops *ops) +{} + +static inline void v4l2_spi_subdev_unregister(struct v4l2_subdev *sd) +{} #endif /* ------------------------------------------------------------------------- */ @@ -408,9 +480,11 @@ struct v4l2_format_info { const struct v4l2_format_info *v4l2_format_info(u32 format); -int v4l2_fill_pixfmt(struct v4l2_pix_format *pixfmt, int pixelformat, - int width, int height); -int v4l2_fill_pixfmt_mp(struct v4l2_pix_format_mplane *pixfmt, int pixelformat, - int width, int height); +void v4l2_apply_frmsize_constraints(u32 *width, u32 *height, + const struct v4l2_frmsize_stepwise *frmsize); +int v4l2_fill_pixfmt(struct v4l2_pix_format *pixfmt, u32 pixelformat, + u32 width, u32 height); +int v4l2_fill_pixfmt_mp(struct v4l2_pix_format_mplane *pixfmt, u32 pixelformat, + u32 width, u32 height); #endif /* V4L2_COMMON_H_ */ diff --git a/include/media/v4l2-ctrls.h b/include/media/v4l2-ctrls.h index bfa2a4527040..570ff4b0205a 100644 --- a/include/media/v4l2-ctrls.h +++ b/include/media/v4l2-ctrls.h @@ -14,11 +14,13 @@ #include <media/media-request.h> /* - * Include the mpeg2 and fwht stateless codec compound control definitions. + * Include the stateless codec compound control definitions. * This will move to the public headers once this API is fully stable. */ #include <media/mpeg2-ctrls.h> #include <media/fwht-ctrls.h> +#include <media/h264-ctrls.h> +#include <media/vp8-ctrls.h> /* forward references */ struct file; @@ -42,6 +44,12 @@ struct poll_table_struct; * @p_mpeg2_slice_params: Pointer to a MPEG2 slice parameters structure. * @p_mpeg2_quantization: Pointer to a MPEG2 quantization data structure. * @p_fwht_params: Pointer to a FWHT stateless parameters structure. + * @p_h264_sps: Pointer to a struct v4l2_ctrl_h264_sps. + * @p_h264_pps: Pointer to a struct v4l2_ctrl_h264_pps. + * @p_h264_scaling_matrix: Pointer to a struct v4l2_ctrl_h264_scaling_matrix. + * @p_h264_slice_params: Pointer to a struct v4l2_ctrl_h264_slice_params. + * @p_h264_decode_params: Pointer to a struct v4l2_ctrl_h264_decode_params. + * @p_vp8_frame_header: Pointer to a VP8 frame header structure. * @p: Pointer to a compound value. */ union v4l2_ctrl_ptr { @@ -54,6 +62,12 @@ union v4l2_ctrl_ptr { struct v4l2_ctrl_mpeg2_slice_params *p_mpeg2_slice_params; struct v4l2_ctrl_mpeg2_quantization *p_mpeg2_quantization; struct v4l2_ctrl_fwht_params *p_fwht_params; + struct v4l2_ctrl_h264_sps *p_h264_sps; + struct v4l2_ctrl_h264_pps *p_h264_pps; + struct v4l2_ctrl_h264_scaling_matrix *p_h264_scaling_matrix; + struct v4l2_ctrl_h264_slice_params *p_h264_slice_params; + struct v4l2_ctrl_h264_decode_params *p_h264_decode_params; + struct v4l2_ctrl_vp8_frame_header *p_vp8_frame_header; void *p; }; @@ -1254,25 +1268,28 @@ int v4l2_s_ctrl(struct v4l2_fh *fh, struct v4l2_ctrl_handler *hdl, * :ref:`VIDIOC_G_EXT_CTRLS <vidioc_g_ext_ctrls>` ioctl * * @hdl: pointer to &struct v4l2_ctrl_handler + * @vdev: pointer to &struct video_device * @mdev: pointer to &struct media_device * @c: pointer to &struct v4l2_ext_controls * * If hdl == NULL then they will all return -EINVAL. */ -int v4l2_g_ext_ctrls(struct v4l2_ctrl_handler *hdl, struct media_device *mdev, - struct v4l2_ext_controls *c); +int v4l2_g_ext_ctrls(struct v4l2_ctrl_handler *hdl, struct video_device *vdev, + struct media_device *mdev, struct v4l2_ext_controls *c); /** * v4l2_try_ext_ctrls - Helper function to implement * :ref:`VIDIOC_TRY_EXT_CTRLS <vidioc_g_ext_ctrls>` ioctl * * @hdl: pointer to &struct v4l2_ctrl_handler + * @vdev: pointer to &struct video_device * @mdev: pointer to &struct media_device * @c: pointer to &struct v4l2_ext_controls * * If hdl == NULL then they will all return -EINVAL. */ int v4l2_try_ext_ctrls(struct v4l2_ctrl_handler *hdl, + struct video_device *vdev, struct media_device *mdev, struct v4l2_ext_controls *c); @@ -1282,12 +1299,14 @@ int v4l2_try_ext_ctrls(struct v4l2_ctrl_handler *hdl, * * @fh: pointer to &struct v4l2_fh * @hdl: pointer to &struct v4l2_ctrl_handler + * @vdev: pointer to &struct video_device * @mdev: pointer to &struct media_device * @c: pointer to &struct v4l2_ext_controls * * If hdl == NULL then they will all return -EINVAL. */ int v4l2_s_ext_ctrls(struct v4l2_fh *fh, struct v4l2_ctrl_handler *hdl, + struct video_device *vdev, struct media_device *mdev, struct v4l2_ext_controls *c); diff --git a/include/media/v4l2-ioctl.h b/include/media/v4l2-ioctl.h index 8533ece5026e..4bba65a59d46 100644 --- a/include/media/v4l2-ioctl.h +++ b/include/media/v4l2-ioctl.h @@ -26,19 +26,13 @@ struct v4l2_fh; * :ref:`VIDIOC_QUERYCAP <vidioc_querycap>` ioctl * @vidioc_enum_fmt_vid_cap: pointer to the function that implements * :ref:`VIDIOC_ENUM_FMT <vidioc_enum_fmt>` ioctl logic - * for video capture in single plane mode + * for video capture in single and multi plane mode * @vidioc_enum_fmt_vid_overlay: pointer to the function that implements * :ref:`VIDIOC_ENUM_FMT <vidioc_enum_fmt>` ioctl logic * for video overlay * @vidioc_enum_fmt_vid_out: pointer to the function that implements * :ref:`VIDIOC_ENUM_FMT <vidioc_enum_fmt>` ioctl logic - * for video output in single plane mode - * @vidioc_enum_fmt_vid_cap_mplane: pointer to the function that implements - * :ref:`VIDIOC_ENUM_FMT <vidioc_enum_fmt>` ioctl logic - * for video capture in multiplane mode - * @vidioc_enum_fmt_vid_out_mplane: pointer to the function that implements - * :ref:`VIDIOC_ENUM_FMT <vidioc_enum_fmt>` ioctl logic - * for video output in multiplane mode + * for video output in single and multi plane mode * @vidioc_enum_fmt_sdr_cap: pointer to the function that implements * :ref:`VIDIOC_ENUM_FMT <vidioc_enum_fmt>` ioctl logic * for Software Defined Radio capture @@ -313,10 +307,6 @@ struct v4l2_ioctl_ops { struct v4l2_fmtdesc *f); int (*vidioc_enum_fmt_vid_out)(struct file *file, void *fh, struct v4l2_fmtdesc *f); - int (*vidioc_enum_fmt_vid_cap_mplane)(struct file *file, void *fh, - struct v4l2_fmtdesc *f); - int (*vidioc_enum_fmt_vid_out_mplane)(struct file *file, void *fh, - struct v4l2_fmtdesc *f); int (*vidioc_enum_fmt_sdr_cap)(struct file *file, void *fh, struct v4l2_fmtdesc *f); int (*vidioc_enum_fmt_sdr_out)(struct file *file, void *fh, @@ -612,6 +602,8 @@ struct v4l2_ioctl_ops { #define V4L2_DEV_DEBUG_STREAMING 0x08 /* Log poll() */ #define V4L2_DEV_DEBUG_POLL 0x10 +/* Log controls */ +#define V4L2_DEV_DEBUG_CTRL 0x20 /* Video standard functions */ diff --git a/include/media/v4l2-mem2mem.h b/include/media/v4l2-mem2mem.h index 864c26dfb854..0b9c3a287061 100644 --- a/include/media/v4l2-mem2mem.h +++ b/include/media/v4l2-mem2mem.h @@ -668,6 +668,10 @@ int v4l2_m2m_ioctl_streamon(struct file *file, void *fh, enum v4l2_buf_type type); int v4l2_m2m_ioctl_streamoff(struct file *file, void *fh, enum v4l2_buf_type type); +int v4l2_m2m_ioctl_try_encoder_cmd(struct file *file, void *fh, + struct v4l2_encoder_cmd *ec); +int v4l2_m2m_ioctl_try_decoder_cmd(struct file *file, void *fh, + struct v4l2_decoder_cmd *dc); int v4l2_m2m_fop_mmap(struct file *file, struct vm_area_struct *vma); __poll_t v4l2_m2m_fop_poll(struct file *file, poll_table *wait); diff --git a/include/media/v4l2-subdev.h b/include/media/v4l2-subdev.h index 7168311e8ecc..71f1f2f0da53 100644 --- a/include/media/v4l2-subdev.h +++ b/include/media/v4l2-subdev.h @@ -1082,6 +1082,8 @@ void v4l2_subdev_free_pad_config(struct v4l2_subdev_pad_config *cfg); void v4l2_subdev_init(struct v4l2_subdev *sd, const struct v4l2_subdev_ops *ops); +extern const struct v4l2_subdev_ops v4l2_subdev_call_wrappers; + /** * v4l2_subdev_call - call an operation of a v4l2_subdev. * @@ -1103,6 +1105,10 @@ void v4l2_subdev_init(struct v4l2_subdev *sd, __result = -ENODEV; \ else if (!(__sd->ops->o && __sd->ops->o->f)) \ __result = -ENOIOCTLCMD; \ + else if (v4l2_subdev_call_wrappers.o && \ + v4l2_subdev_call_wrappers.o->f) \ + __result = v4l2_subdev_call_wrappers.o->f( \ + __sd, ##args); \ else \ __result = __sd->ops->o->f(__sd, ##args); \ __result; \ diff --git a/include/media/videobuf2-core.h b/include/media/videobuf2-core.h index 22f3ff76a8b5..640aabe69450 100644 --- a/include/media/videobuf2-core.h +++ b/include/media/videobuf2-core.h @@ -54,7 +54,8 @@ struct vb2_threadio_data; * will then be passed as @buf_priv argument to other ops in this * structure. Additional gfp_flags to use when allocating the * are also passed to this operation. These flags are from the - * gfp_flags field of vb2_queue. + * gfp_flags field of vb2_queue. The size argument to this function + * shall be *page aligned*. * @put: inform the allocator that the buffer will no longer be used; * usually will result in the allocator freeing the buffer (if * no other users of this buffer are present); the @buf_priv @@ -1162,6 +1163,24 @@ static inline void vb2_clear_last_buffer_dequeued(struct vb2_queue *q) q->last_buffer_dequeued = false; } +/** + * vb2_get_buffer() - get a buffer from a queue + * @q: pointer to &struct vb2_queue with videobuf2 queue. + * @index: buffer index + * + * This function obtains a buffer from a queue, by its index. + * Keep in mind that there is no refcounting involved in this + * operation, so the buffer lifetime should be taken into + * consideration. + */ +static inline struct vb2_buffer *vb2_get_buffer(struct vb2_queue *q, + unsigned int index) +{ + if (index < q->num_buffers) + return q->bufs[index]; + return NULL; +} + /* * The following functions are not part of the vb2 core API, but are useful * functions for videobuf2-*. diff --git a/include/media/videobuf2-memops.h b/include/media/videobuf2-memops.h index 4b5b84f93538..cd4a46331531 100644 --- a/include/media/videobuf2-memops.h +++ b/include/media/videobuf2-memops.h @@ -34,8 +34,7 @@ struct vb2_vmarea_handler { extern const struct vm_operations_struct vb2_common_vm_ops; struct frame_vector *vb2_create_framevec(unsigned long start, - unsigned long length, - bool write); + unsigned long length); void vb2_destroy_framevec(struct frame_vector *vec); #endif diff --git a/include/media/vp8-ctrls.h b/include/media/vp8-ctrls.h new file mode 100644 index 000000000000..53cba826e482 --- /dev/null +++ b/include/media/vp8-ctrls.h @@ -0,0 +1,112 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * These are the VP8 state controls for use with stateless VP8 + * codec drivers. + * + * It turns out that these structs are not stable yet and will undergo + * more changes. So keep them private until they are stable and ready to + * become part of the official public API. + */ + +#ifndef _VP8_CTRLS_H_ +#define _VP8_CTRLS_H_ + +#include <linux/types.h> + +#define V4L2_PIX_FMT_VP8_FRAME v4l2_fourcc('V', 'P', '8', 'F') + +#define V4L2_CID_MPEG_VIDEO_VP8_FRAME_HEADER (V4L2_CID_MPEG_BASE + 2000) +#define V4L2_CTRL_TYPE_VP8_FRAME_HEADER 0x301 + +#define V4L2_VP8_SEGMENT_HEADER_FLAG_ENABLED 0x01 +#define V4L2_VP8_SEGMENT_HEADER_FLAG_UPDATE_MAP 0x02 +#define V4L2_VP8_SEGMENT_HEADER_FLAG_UPDATE_FEATURE_DATA 0x04 +#define V4L2_VP8_SEGMENT_HEADER_FLAG_DELTA_VALUE_MODE 0x08 + +struct v4l2_vp8_segment_header { + __s8 quant_update[4]; + __s8 lf_update[4]; + __u8 segment_probs[3]; + __u8 padding; + __u32 flags; +}; + +#define V4L2_VP8_LF_HEADER_ADJ_ENABLE 0x01 +#define V4L2_VP8_LF_HEADER_DELTA_UPDATE 0x02 +#define V4L2_VP8_LF_FILTER_TYPE_SIMPLE 0x04 +struct v4l2_vp8_loopfilter_header { + __s8 ref_frm_delta[4]; + __s8 mb_mode_delta[4]; + __u8 sharpness_level; + __u8 level; + __u16 padding; + __u32 flags; +}; + +struct v4l2_vp8_quantization_header { + __u8 y_ac_qi; + __s8 y_dc_delta; + __s8 y2_dc_delta; + __s8 y2_ac_delta; + __s8 uv_dc_delta; + __s8 uv_ac_delta; + __u16 padding; +}; + +struct v4l2_vp8_entropy_header { + __u8 coeff_probs[4][8][3][11]; + __u8 y_mode_probs[4]; + __u8 uv_mode_probs[3]; + __u8 mv_probs[2][19]; + __u8 padding[3]; +}; + +struct v4l2_vp8_entropy_coder_state { + __u8 range; + __u8 value; + __u8 bit_count; + __u8 padding; +}; + +#define V4L2_VP8_FRAME_HEADER_FLAG_KEY_FRAME 0x01 +#define V4L2_VP8_FRAME_HEADER_FLAG_EXPERIMENTAL 0x02 +#define V4L2_VP8_FRAME_HEADER_FLAG_SHOW_FRAME 0x04 +#define V4L2_VP8_FRAME_HEADER_FLAG_MB_NO_SKIP_COEFF 0x08 +#define V4L2_VP8_FRAME_HEADER_FLAG_SIGN_BIAS_GOLDEN 0x10 +#define V4L2_VP8_FRAME_HEADER_FLAG_SIGN_BIAS_ALT 0x20 + +#define VP8_FRAME_IS_KEY_FRAME(hdr) \ + (!!((hdr)->flags & V4L2_VP8_FRAME_HEADER_FLAG_KEY_FRAME)) + +struct v4l2_ctrl_vp8_frame_header { + struct v4l2_vp8_segment_header segment_header; + struct v4l2_vp8_loopfilter_header lf_header; + struct v4l2_vp8_quantization_header quant_header; + struct v4l2_vp8_entropy_header entropy_header; + struct v4l2_vp8_entropy_coder_state coder_state; + + __u16 width; + __u16 height; + + __u8 horizontal_scale; + __u8 vertical_scale; + + __u8 version; + __u8 prob_skip_false; + __u8 prob_intra; + __u8 prob_last; + __u8 prob_gf; + __u8 num_dct_parts; + + __u32 first_part_size; + __u32 first_part_header_bits; + __u32 dct_part_sizes[8]; + + __u64 last_frame_ts; + __u64 golden_frame_ts; + __u64 alt_frame_ts; + + __u64 flags; +}; + +#endif diff --git a/include/memory/jedec_ddr.h b/include/memory/jedec_ddr.h deleted file mode 100644 index 90a9dabbe606..000000000000 --- a/include/memory/jedec_ddr.h +++ /dev/null @@ -1,172 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Definitions for DDR memories based on JEDEC specs - * - * Copyright (C) 2012 Texas Instruments, Inc. - * - * Aneesh V <aneesh@ti.com> - */ -#ifndef __LINUX_JEDEC_DDR_H -#define __LINUX_JEDEC_DDR_H - -#include <linux/types.h> - -/* DDR Densities */ -#define DDR_DENSITY_64Mb 1 -#define DDR_DENSITY_128Mb 2 -#define DDR_DENSITY_256Mb 3 -#define DDR_DENSITY_512Mb 4 -#define DDR_DENSITY_1Gb 5 -#define DDR_DENSITY_2Gb 6 -#define DDR_DENSITY_4Gb 7 -#define DDR_DENSITY_8Gb 8 -#define DDR_DENSITY_16Gb 9 -#define DDR_DENSITY_32Gb 10 - -/* DDR type */ -#define DDR_TYPE_DDR2 1 -#define DDR_TYPE_DDR3 2 -#define DDR_TYPE_LPDDR2_S4 3 -#define DDR_TYPE_LPDDR2_S2 4 -#define DDR_TYPE_LPDDR2_NVM 5 - -/* DDR IO width */ -#define DDR_IO_WIDTH_4 1 -#define DDR_IO_WIDTH_8 2 -#define DDR_IO_WIDTH_16 3 -#define DDR_IO_WIDTH_32 4 - -/* Number of Row bits */ -#define R9 9 -#define R10 10 -#define R11 11 -#define R12 12 -#define R13 13 -#define R14 14 -#define R15 15 -#define R16 16 - -/* Number of Column bits */ -#define C7 7 -#define C8 8 -#define C9 9 -#define C10 10 -#define C11 11 -#define C12 12 - -/* Number of Banks */ -#define B1 0 -#define B2 1 -#define B4 2 -#define B8 3 - -/* Refresh rate in nano-seconds */ -#define T_REFI_15_6 15600 -#define T_REFI_7_8 7800 -#define T_REFI_3_9 3900 - -/* tRFC values */ -#define T_RFC_90 90000 -#define T_RFC_110 110000 -#define T_RFC_130 130000 -#define T_RFC_160 160000 -#define T_RFC_210 210000 -#define T_RFC_300 300000 -#define T_RFC_350 350000 - -/* Mode register numbers */ -#define DDR_MR0 0 -#define DDR_MR1 1 -#define DDR_MR2 2 -#define DDR_MR3 3 -#define DDR_MR4 4 -#define DDR_MR5 5 -#define DDR_MR6 6 -#define DDR_MR7 7 -#define DDR_MR8 8 -#define DDR_MR9 9 -#define DDR_MR10 10 -#define DDR_MR11 11 -#define DDR_MR16 16 -#define DDR_MR17 17 -#define DDR_MR18 18 - -/* - * LPDDR2 related defines - */ - -/* MR4 register fields */ -#define MR4_SDRAM_REF_RATE_SHIFT 0 -#define MR4_SDRAM_REF_RATE_MASK 7 -#define MR4_TUF_SHIFT 7 -#define MR4_TUF_MASK (1 << 7) - -/* MR4 SDRAM Refresh Rate field values */ -#define SDRAM_TEMP_NOMINAL 0x3 -#define SDRAM_TEMP_RESERVED_4 0x4 -#define SDRAM_TEMP_HIGH_DERATE_REFRESH 0x5 -#define SDRAM_TEMP_HIGH_DERATE_REFRESH_AND_TIMINGS 0x6 -#define SDRAM_TEMP_VERY_HIGH_SHUTDOWN 0x7 - -#define NUM_DDR_ADDR_TABLE_ENTRIES 11 -#define NUM_DDR_TIMING_TABLE_ENTRIES 4 - -/* Structure for DDR addressing info from the JEDEC spec */ -struct lpddr2_addressing { - u32 num_banks; - u32 tREFI_ns; - u32 tRFCab_ps; -}; - -/* - * Structure for timings from the LPDDR2 datasheet - * All parameters are in pico seconds(ps) unless explicitly indicated - * with a suffix like tRAS_max_ns below - */ -struct lpddr2_timings { - u32 max_freq; - u32 min_freq; - u32 tRPab; - u32 tRCD; - u32 tWR; - u32 tRAS_min; - u32 tRRD; - u32 tWTR; - u32 tXP; - u32 tRTP; - u32 tCKESR; - u32 tDQSCK_max; - u32 tDQSCK_max_derated; - u32 tFAW; - u32 tZQCS; - u32 tZQCL; - u32 tZQinit; - u32 tRAS_max_ns; -}; - -/* - * Min value for some parameters in terms of number of tCK cycles(nCK) - * Please set to zero parameters that are not valid for a given memory - * type - */ -struct lpddr2_min_tck { - u32 tRPab; - u32 tRCD; - u32 tWR; - u32 tRASmin; - u32 tRRD; - u32 tWTR; - u32 tXP; - u32 tRTP; - u32 tCKE; - u32 tCKESR; - u32 tFAW; -}; - -extern const struct lpddr2_addressing - lpddr2_jedec_addressing_table[NUM_DDR_ADDR_TABLE_ENTRIES]; -extern const struct lpddr2_timings - lpddr2_jedec_timings[NUM_DDR_TIMING_TABLE_ENTRIES]; -extern const struct lpddr2_min_tck lpddr2_jedec_min_tck; - -#endif /* __LINUX_JEDEC_DDR_H */ diff --git a/include/misc/charlcd.h b/include/misc/charlcd.h deleted file mode 100644 index 8cf6c18b0adb..000000000000 --- a/include/misc/charlcd.h +++ /dev/null @@ -1,39 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ -/* - * Character LCD driver for Linux - * - * Copyright (C) 2000-2008, Willy Tarreau <w@1wt.eu> - * Copyright (C) 2016-2017 Glider bvba - */ - -struct charlcd { - const struct charlcd_ops *ops; - const unsigned char *char_conv; /* Optional */ - - int ifwidth; /* 4-bit or 8-bit (default) */ - int height; - int width; - int bwidth; /* Default set by charlcd_alloc() */ - int hwidth; /* Default set by charlcd_alloc() */ - - void *drvdata; /* Set by charlcd_alloc() */ -}; - -struct charlcd_ops { - /* Required */ - void (*write_cmd)(struct charlcd *lcd, int cmd); - void (*write_data)(struct charlcd *lcd, int data); - - /* Optional */ - void (*write_cmd_raw4)(struct charlcd *lcd, int cmd); /* 4-bit only */ - void (*clear_fast)(struct charlcd *lcd); - void (*backlight)(struct charlcd *lcd, int on); -}; - -struct charlcd *charlcd_alloc(unsigned int drvdata_size); -void charlcd_free(struct charlcd *lcd); - -int charlcd_register(struct charlcd *lcd); -int charlcd_unregister(struct charlcd *lcd); - -void charlcd_poke(struct charlcd *lcd); diff --git a/include/misc/ocxl.h b/include/misc/ocxl.h index 5c4b4916e6be..06dd5839e438 100644 --- a/include/misc/ocxl.h +++ b/include/misc/ocxl.h @@ -32,7 +32,10 @@ struct ocxl_afu_config { u8 pp_mmio_bar; /* per-process MMIO area */ u64 pp_mmio_offset; u32 pp_mmio_stride; - u8 log_mem_size; + u64 lpc_mem_offset; + u64 lpc_mem_size; + u64 special_purpose_mem_offset; + u64 special_purpose_mem_size; u8 pasid_supported_log; u16 actag_supported; }; diff --git a/include/net/act_api.h b/include/net/act_api.h index c61a1bf4e3de..b18c699681ca 100644 --- a/include/net/act_api.h +++ b/include/net/act_api.h @@ -15,6 +15,7 @@ struct tcf_idrinfo { struct mutex lock; struct idr action_idr; + struct net *net; }; struct tc_action_ops; @@ -77,6 +78,8 @@ static inline void tcf_tm_dump(struct tcf_t *dtm, const struct tcf_t *stm) #define ACT_P_CREATED 1 #define ACT_P_DELETED 1 +typedef void (*tc_action_priv_destructor)(void *priv); + struct tc_action_ops { struct list_head head; char kind[IFNAMSIZ]; @@ -98,8 +101,11 @@ struct tc_action_ops { struct netlink_ext_ack *); void (*stats_update)(struct tc_action *, u64, u32, u64, bool); size_t (*get_fill_size)(const struct tc_action *act); - struct net_device *(*get_dev)(const struct tc_action *a); - void (*put_dev)(struct net_device *dev); + struct net_device *(*get_dev)(const struct tc_action *a, + tc_action_priv_destructor *destructor); + struct psample_group * + (*get_psample_group)(const struct tc_action *a, + tc_action_priv_destructor *destructor); }; struct tc_action_net { @@ -108,7 +114,7 @@ struct tc_action_net { }; static inline -int tc_action_net_init(struct tc_action_net *tn, +int tc_action_net_init(struct net *net, struct tc_action_net *tn, const struct tc_action_ops *ops) { int err = 0; @@ -117,6 +123,7 @@ int tc_action_net_init(struct tc_action_net *tn, if (!tn->idrinfo) return -ENOMEM; tn->ops = ops; + tn->idrinfo->net = net; mutex_init(&tn->idrinfo->lock); idr_init(&tn->idrinfo->action_idr); return err; diff --git a/include/net/addrconf.h b/include/net/addrconf.h index becdad576859..3f62b347b04a 100644 --- a/include/net/addrconf.h +++ b/include/net/addrconf.h @@ -206,7 +206,7 @@ static inline int ipv6_mc_may_pull(struct sk_buff *skb, unsigned int len) { if (skb_transport_offset(skb) + ipv6_transport_len(skb) < len) - return -EINVAL; + return 0; return pskb_may_pull(skb, len); } diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h index 9a5330eed794..5bc1e30dedde 100644 --- a/include/net/bluetooth/hci.h +++ b/include/net/bluetooth/hci.h @@ -1143,6 +1143,26 @@ struct hci_cp_write_sc_support { __u8 support; } __packed; +#define HCI_OP_READ_AUTH_PAYLOAD_TO 0x0c7b +struct hci_cp_read_auth_payload_to { + __le16 handle; +} __packed; +struct hci_rp_read_auth_payload_to { + __u8 status; + __le16 handle; + __le16 timeout; +} __packed; + +#define HCI_OP_WRITE_AUTH_PAYLOAD_TO 0x0c7c +struct hci_cp_write_auth_payload_to { + __le16 handle; + __le16 timeout; +} __packed; +struct hci_rp_write_auth_payload_to { + __u8 status; + __le16 handle; +} __packed; + #define HCI_OP_READ_LOCAL_OOB_EXT_DATA 0x0c7d struct hci_rp_read_local_oob_ext_data { __u8 status; diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h index 05b1b96f4d9e..b689aceb636b 100644 --- a/include/net/bluetooth/hci_core.h +++ b/include/net/bluetooth/hci_core.h @@ -199,6 +199,8 @@ struct adv_info { /* Default min/max age of connection information (1s/3s) */ #define DEFAULT_CONN_INFO_MIN_AGE 1000 #define DEFAULT_CONN_INFO_MAX_AGE 3000 +/* Default authenticated payload timeout 30s */ +#define DEFAULT_AUTH_PAYLOAD_TIMEOUT 0x0bb8 struct amp_assoc { __u16 len; @@ -275,6 +277,8 @@ struct hci_dev { __u16 discov_interleaved_timeout; __u16 conn_info_min_age; __u16 conn_info_max_age; + __u16 auth_payload_timeout; + __u8 min_enc_key_size; __u8 ssp_debug_mode; __u8 hw_error_code; __u32 clock; @@ -481,6 +485,7 @@ struct hci_conn { __u16 disc_timeout; __u16 conn_timeout; __u16 setting; + __u16 auth_payload_timeout; __u16 le_conn_min_interval; __u16 le_conn_max_interval; __u16 le_conn_interval; @@ -1512,6 +1517,8 @@ void hci_mgmt_chan_unregister(struct hci_mgmt_chan *c); #define DISCOV_INTERLEAVED_INQUIRY_LEN 0x04 #define DISCOV_BREDR_INQUIRY_LEN 0x08 #define DISCOV_LE_RESTART_DELAY msecs_to_jiffies(200) /* msec */ +#define DISCOV_LE_FAST_ADV_INT_MIN 100 /* msec */ +#define DISCOV_LE_FAST_ADV_INT_MAX 150 /* msec */ void mgmt_fill_version_info(void *ver); int mgmt_new_settings(struct hci_dev *hdev); diff --git a/include/net/bond_options.h b/include/net/bond_options.h index 2a05cc349018..9d382f2f0bc5 100644 --- a/include/net/bond_options.h +++ b/include/net/bond_options.h @@ -63,6 +63,7 @@ enum { BOND_OPT_AD_ACTOR_SYSTEM, BOND_OPT_AD_USER_PORT_KEY, BOND_OPT_NUM_PEER_NOTIF_ALIAS, + BOND_OPT_PEER_NOTIF_DELAY, BOND_OPT_LAST }; diff --git a/include/net/bonding.h b/include/net/bonding.h index b46d68acf701..1afc125014da 100644 --- a/include/net/bonding.h +++ b/include/net/bonding.h @@ -38,6 +38,15 @@ #define __long_aligned __attribute__((aligned((sizeof(long))))) #endif +#define slave_info(bond_dev, slave_dev, fmt, ...) \ + netdev_info(bond_dev, "(slave %s): " fmt, (slave_dev)->name, ##__VA_ARGS__) +#define slave_warn(bond_dev, slave_dev, fmt, ...) \ + netdev_warn(bond_dev, "(slave %s): " fmt, (slave_dev)->name, ##__VA_ARGS__) +#define slave_dbg(bond_dev, slave_dev, fmt, ...) \ + netdev_dbg(bond_dev, "(slave %s): " fmt, (slave_dev)->name, ##__VA_ARGS__) +#define slave_err(bond_dev, slave_dev, fmt, ...) \ + netdev_err(bond_dev, "(slave %s): " fmt, (slave_dev)->name, ##__VA_ARGS__) + #define BOND_MODE(bond) ((bond)->params.mode) /* slave list primitives */ @@ -114,6 +123,7 @@ struct bond_params { int fail_over_mac; int updelay; int downdelay; + int peer_notif_delay; int lacp_fast; unsigned int min_links; int ad_select; @@ -193,7 +203,6 @@ struct bonding { struct slave __rcu *primary_slave; struct bond_up_slave __rcu *slave_arr; /* Array of usable slaves */ bool force_primary; - u32 nest_level; s32 slave_cnt; /* never change this value outside the attach/detach wrappers */ int (*recv_probe)(const struct sk_buff *, struct bonding *, struct slave *); @@ -229,6 +238,7 @@ struct bonding { struct dentry *debug_dir; #endif /* CONFIG_DEBUG_FS */ struct rtnl_link_stats64 bond_stats; + struct lock_class_key stats_lock_key; }; #define bond_slave_get_rcu(dev) \ diff --git a/include/net/bpf_sk_storage.h b/include/net/bpf_sk_storage.h index b9dcb02e756b..8e4f831d2e52 100644 --- a/include/net/bpf_sk_storage.h +++ b/include/net/bpf_sk_storage.h @@ -10,4 +10,14 @@ void bpf_sk_storage_free(struct sock *sk); extern const struct bpf_func_proto bpf_sk_storage_get_proto; extern const struct bpf_func_proto bpf_sk_storage_delete_proto; +#ifdef CONFIG_BPF_SYSCALL +int bpf_sk_storage_clone(const struct sock *sk, struct sock *newsk); +#else +static inline int bpf_sk_storage_clone(const struct sock *sk, + struct sock *newsk) +{ + return 0; +} +#endif + #endif /* _BPF_SK_STORAGE_H */ diff --git a/include/net/busy_poll.h b/include/net/busy_poll.h index 127a5c4e3699..86e028388bad 100644 --- a/include/net/busy_poll.h +++ b/include/net/busy_poll.h @@ -122,7 +122,7 @@ static inline void skb_mark_napi_id(struct sk_buff *skb, static inline void sk_mark_napi_id(struct sock *sk, const struct sk_buff *skb) { #ifdef CONFIG_NET_RX_BUSY_POLL - sk->sk_napi_id = skb->napi_id; + WRITE_ONCE(sk->sk_napi_id, skb->napi_id); #endif sk_rx_queue_set(sk, skb); } @@ -132,8 +132,8 @@ static inline void sk_mark_napi_id_once(struct sock *sk, const struct sk_buff *skb) { #ifdef CONFIG_NET_RX_BUSY_POLL - if (!sk->sk_napi_id) - sk->sk_napi_id = skb->napi_id; + if (!READ_ONCE(sk->sk_napi_id)) + WRITE_ONCE(sk->sk_napi_id, skb->napi_id); #endif } diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h index 8fb5be3ca0ca..4ab2c49423dc 100644 --- a/include/net/cfg80211.h +++ b/include/net/cfg80211.h @@ -247,6 +247,19 @@ struct ieee80211_rate { }; /** + * struct ieee80211_he_obss_pd - AP settings for spatial reuse + * + * @enable: is the feature enabled. + * @min_offset: minimal tx power offset an associated station shall use + * @max_offset: maximum tx power offset an associated station shall use + */ +struct ieee80211_he_obss_pd { + bool enable; + u8 min_offset; + u8 max_offset; +}; + +/** * struct ieee80211_sta_ht_cap - STA's HT capabilities * * This structure describes most essential parameters needed @@ -318,6 +331,60 @@ struct ieee80211_sband_iftype_data { }; /** + * enum ieee80211_edmg_bw_config - allowed channel bandwidth configurations + * + * @IEEE80211_EDMG_BW_CONFIG_4: 2.16GHz + * @IEEE80211_EDMG_BW_CONFIG_5: 2.16GHz and 4.32GHz + * @IEEE80211_EDMG_BW_CONFIG_6: 2.16GHz, 4.32GHz and 6.48GHz + * @IEEE80211_EDMG_BW_CONFIG_7: 2.16GHz, 4.32GHz, 6.48GHz and 8.64GHz + * @IEEE80211_EDMG_BW_CONFIG_8: 2.16GHz and 2.16GHz + 2.16GHz + * @IEEE80211_EDMG_BW_CONFIG_9: 2.16GHz, 4.32GHz and 2.16GHz + 2.16GHz + * @IEEE80211_EDMG_BW_CONFIG_10: 2.16GHz, 4.32GHz, 6.48GHz and 2.16GHz+2.16GHz + * @IEEE80211_EDMG_BW_CONFIG_11: 2.16GHz, 4.32GHz, 6.48GHz, 8.64GHz and + * 2.16GHz+2.16GHz + * @IEEE80211_EDMG_BW_CONFIG_12: 2.16GHz, 2.16GHz + 2.16GHz and + * 4.32GHz + 4.32GHz + * @IEEE80211_EDMG_BW_CONFIG_13: 2.16GHz, 4.32GHz, 2.16GHz + 2.16GHz and + * 4.32GHz + 4.32GHz + * @IEEE80211_EDMG_BW_CONFIG_14: 2.16GHz, 4.32GHz, 6.48GHz, 2.16GHz + 2.16GHz + * and 4.32GHz + 4.32GHz + * @IEEE80211_EDMG_BW_CONFIG_15: 2.16GHz, 4.32GHz, 6.48GHz, 8.64GHz, + * 2.16GHz + 2.16GHz and 4.32GHz + 4.32GHz + */ +enum ieee80211_edmg_bw_config { + IEEE80211_EDMG_BW_CONFIG_4 = 4, + IEEE80211_EDMG_BW_CONFIG_5 = 5, + IEEE80211_EDMG_BW_CONFIG_6 = 6, + IEEE80211_EDMG_BW_CONFIG_7 = 7, + IEEE80211_EDMG_BW_CONFIG_8 = 8, + IEEE80211_EDMG_BW_CONFIG_9 = 9, + IEEE80211_EDMG_BW_CONFIG_10 = 10, + IEEE80211_EDMG_BW_CONFIG_11 = 11, + IEEE80211_EDMG_BW_CONFIG_12 = 12, + IEEE80211_EDMG_BW_CONFIG_13 = 13, + IEEE80211_EDMG_BW_CONFIG_14 = 14, + IEEE80211_EDMG_BW_CONFIG_15 = 15, +}; + +/** + * struct ieee80211_edmg - EDMG configuration + * + * This structure describes most essential parameters needed + * to describe 802.11ay EDMG configuration + * + * @channels: bitmap that indicates the 2.16 GHz channel(s) + * that are allowed to be used for transmissions. + * Bit 0 indicates channel 1, bit 1 indicates channel 2, etc. + * Set to 0 indicate EDMG not supported. + * @bw_config: Channel BW Configuration subfield encodes + * the allowed channel bandwidth configurations + */ +struct ieee80211_edmg { + u8 channels; + enum ieee80211_edmg_bw_config bw_config; +}; + +/** * struct ieee80211_supported_band - frequency band definition * * This structure describes a frequency band a wiphy @@ -333,6 +400,7 @@ struct ieee80211_sband_iftype_data { * @n_bitrates: Number of bitrates in @bitrates * @ht_cap: HT capabilities in this band * @vht_cap: VHT capabilities in this band + * @edmg_cap: EDMG capabilities in this band * @n_iftype_data: number of iftype data entries * @iftype_data: interface type data entries. Note that the bits in * @types_mask inside this structure cannot overlap (i.e. only @@ -347,6 +415,7 @@ struct ieee80211_supported_band { int n_bitrates; struct ieee80211_sta_ht_cap ht_cap; struct ieee80211_sta_vht_cap vht_cap; + struct ieee80211_edmg edmg_cap; u16 n_iftype_data; const struct ieee80211_sband_iftype_data *iftype_data; }; @@ -379,16 +448,18 @@ ieee80211_get_sband_iftype_data(const struct ieee80211_supported_band *sband, } /** - * ieee80211_get_he_sta_cap - return HE capabilities for an sband's STA - * @sband: the sband to search for the STA on + * ieee80211_get_he_iftype_cap - return HE capabilities for an sband's iftype + * @sband: the sband to search for the iftype on + * @iftype: enum nl80211_iftype * * Return: pointer to the struct ieee80211_sta_he_cap, or NULL is none found */ static inline const struct ieee80211_sta_he_cap * -ieee80211_get_he_sta_cap(const struct ieee80211_supported_band *sband) +ieee80211_get_he_iftype_cap(const struct ieee80211_supported_band *sband, + u8 iftype) { const struct ieee80211_sband_iftype_data *data = - ieee80211_get_sband_iftype_data(sband, NL80211_IFTYPE_STATION); + ieee80211_get_sband_iftype_data(sband, iftype); if (data && data->he_cap.has_he) return &data->he_cap; @@ -397,6 +468,18 @@ ieee80211_get_he_sta_cap(const struct ieee80211_supported_band *sband) } /** + * ieee80211_get_he_sta_cap - return HE capabilities for an sband's STA + * @sband: the sband to search for the STA on + * + * Return: pointer to the struct ieee80211_sta_he_cap, or NULL is none found + */ +static inline const struct ieee80211_sta_he_cap * +ieee80211_get_he_sta_cap(const struct ieee80211_supported_band *sband) +{ + return ieee80211_get_he_iftype_cap(sband, NL80211_IFTYPE_STATION); +} + +/** * wiphy_read_of_freq_limits - read frequency limits from device tree * * @wiphy: the wireless device to get extra limits for @@ -500,12 +583,17 @@ struct key_params { * @center_freq1: center frequency of first segment * @center_freq2: center frequency of second segment * (only with 80+80 MHz) + * @edmg: define the EDMG channels configuration. + * If edmg is requested (i.e. the .channels member is non-zero), + * chan will define the primary channel and all other + * parameters are ignored. */ struct cfg80211_chan_def { struct ieee80211_channel *chan; enum nl80211_chan_width width; u32 center_freq1; u32 center_freq2; + struct ieee80211_edmg edmg; }; /** @@ -564,6 +652,19 @@ cfg80211_chandef_identical(const struct cfg80211_chan_def *chandef1, } /** + * cfg80211_chandef_is_edmg - check if chandef represents an EDMG channel + * + * @chandef: the channel definition + * + * Return: %true if EDMG defined, %false otherwise. + */ +static inline bool +cfg80211_chandef_is_edmg(const struct cfg80211_chan_def *chandef) +{ + return chandef->edmg.channels || chandef->edmg.bw_config; +} + +/** * cfg80211_chandef_compatible - check if two channel definitions are compatible * @chandef1: first channel definition * @chandef2: second channel definition @@ -667,6 +768,7 @@ ieee80211_chandef_max_power(struct cfg80211_chan_def *chandef) * @SURVEY_INFO_TIME_RX: receive time was filled in * @SURVEY_INFO_TIME_TX: transmit time was filled in * @SURVEY_INFO_TIME_SCAN: scan time was filled in + * @SURVEY_INFO_TIME_BSS_RX: local BSS receive time was filled in * * Used by the driver to indicate which info in &struct survey_info * it has filled in during the get_survey(). @@ -680,6 +782,7 @@ enum survey_info_flags { SURVEY_INFO_TIME_RX = BIT(5), SURVEY_INFO_TIME_TX = BIT(6), SURVEY_INFO_TIME_SCAN = BIT(7), + SURVEY_INFO_TIME_BSS_RX = BIT(8), }; /** @@ -696,6 +799,7 @@ enum survey_info_flags { * @time_rx: amount of time the radio spent receiving data * @time_tx: amount of time the radio spent transmitting data * @time_scan: amount of time the radio spent for scanning + * @time_bss_rx: amount of time the radio spent receiving data on a local BSS * * Used by dump_survey() to report back per-channel survey information. * @@ -710,6 +814,7 @@ struct survey_info { u64 time_rx; u64 time_tx; u64 time_scan; + u64 time_bss_rx; u32 filled; s8 noise; }; @@ -739,6 +844,9 @@ struct survey_info { * CFG80211_MAX_WEP_KEYS WEP keys * @wep_tx_key: key index (0..3) of the default TX static WEP key * @psk: PSK (for devices supporting 4-way-handshake offload) + * @sae_pwd: password for SAE authentication (for devices supporting SAE + * offload) + * @sae_pwd_len: length of SAE password (for devices supporting SAE offload) */ struct cfg80211_crypto_settings { u32 wpa_versions; @@ -754,6 +862,8 @@ struct cfg80211_crypto_settings { struct key_params *wep_keys; int wep_tx_key; const u8 *psk; + const u8 *sae_pwd; + u8 sae_pwd_len; }; /** @@ -875,7 +985,9 @@ enum cfg80211_ap_settings_flags { * @he_cap: HE capabilities (or %NULL if HE isn't enabled) * @ht_required: stations must support HT * @vht_required: stations must support VHT + * @twt_responder: Enable Target Wait Time * @flags: flags, as defined in enum cfg80211_ap_settings_flags + * @he_obss_pd: OBSS Packet Detection settings */ struct cfg80211_ap_settings { struct cfg80211_chan_def chandef; @@ -901,7 +1013,9 @@ struct cfg80211_ap_settings { const struct ieee80211_vht_cap *vht_cap; const struct ieee80211_he_cap_elem *he_cap; bool ht_required, vht_required; + bool twt_responder; u32 flags; + struct ieee80211_he_obss_pd he_obss_pd; }; /** @@ -1141,15 +1255,17 @@ int cfg80211_check_station_change(struct wiphy *wiphy, * @RATE_INFO_FLAGS_MCS: mcs field filled with HT MCS * @RATE_INFO_FLAGS_VHT_MCS: mcs field filled with VHT MCS * @RATE_INFO_FLAGS_SHORT_GI: 400ns guard interval - * @RATE_INFO_FLAGS_60G: 60GHz MCS + * @RATE_INFO_FLAGS_DMG: 60GHz MCS * @RATE_INFO_FLAGS_HE_MCS: HE MCS information + * @RATE_INFO_FLAGS_EDMG: 60GHz MCS in EDMG mode */ enum rate_info_flags { RATE_INFO_FLAGS_MCS = BIT(0), RATE_INFO_FLAGS_VHT_MCS = BIT(1), RATE_INFO_FLAGS_SHORT_GI = BIT(2), - RATE_INFO_FLAGS_60G = BIT(3), + RATE_INFO_FLAGS_DMG = BIT(3), RATE_INFO_FLAGS_HE_MCS = BIT(4), + RATE_INFO_FLAGS_EDMG = BIT(5), }; /** @@ -1189,6 +1305,7 @@ enum rate_info_bw { * @he_dcm: HE DCM value * @he_ru_alloc: HE RU allocation (from &enum nl80211_he_ru_alloc, * only valid if bw is %RATE_INFO_BW_HE_RU) + * @n_bonded_ch: In case of EDMG the number of bonded channels (1-4) */ struct rate_info { u8 flags; @@ -1199,6 +1316,7 @@ struct rate_info { u8 he_gi; u8 he_dcm; u8 he_ru_alloc; + u8 n_bonded_ch; }; /** @@ -1294,6 +1412,7 @@ struct cfg80211_tid_stats { * indicate the relevant values in this struct for them * @connected_time: time(in secs) since a station is last connected * @inactive_time: time since last station activity (tx/rx) in milliseconds + * @assoc_at: bootime (ns) of the last association * @rx_bytes: bytes (size of MPDUs) received from this station * @tx_bytes: bytes (size of MPDUs) transmitted to this station * @llid: mesh local link id @@ -1354,6 +1473,7 @@ struct station_info { u64 filled; u32 connected_time; u32 inactive_time; + u64 assoc_at; u64 rx_bytes; u64 tx_bytes; u16 llid; @@ -2007,7 +2127,7 @@ enum cfg80211_signal_type { * received by the device (not just by the host, in case it was * buffered on the device) and be accurate to about 10ms. * If the frame isn't buffered, just passing the return value of - * ktime_get_boot_ns() is likely appropriate. + * ktime_get_boottime_ns() is likely appropriate. * @parent_tsf: the time at the start of reception of the first octet of the * timestamp field of the frame. The time is the TSF of the BSS specified * by %parent_bssid. @@ -2400,6 +2520,9 @@ struct cfg80211_bss_selection { * @fils_erp_rrk_len: Length of @fils_erp_rrk in octets. * @want_1x: indicates user-space supports and wants to use 802.1X driver * offload of 4-way handshake. + * @edmg: define the EDMG channels. + * This may specify multiple channels and bonding options for the driver + * to choose from, based on BSS configuration. */ struct cfg80211_connect_params { struct ieee80211_channel *channel; @@ -2433,6 +2556,7 @@ struct cfg80211_connect_params { const u8 *fils_erp_rrk; size_t fils_erp_rrk_len; bool want_1x; + struct ieee80211_edmg edmg; }; /** @@ -4149,6 +4273,8 @@ struct sta_opmode_info { u8 rx_nss; }; +#define VENDOR_CMD_RAW_DATA ((const struct nla_policy *)(long)(-ENODATA)) + /** * struct wiphy_vendor_command - vendor command definition * @info: vendor command identifying information, as used in nl80211 @@ -4159,6 +4285,10 @@ struct sta_opmode_info { * @dumpit: dump callback, for transferring bigger/multiple items. The * @storage points to cb->args[5], ie. is preserved over the multiple * dumpit calls. + * @policy: policy pointer for attributes within %NL80211_ATTR_VENDOR_DATA. + * Set this to %VENDOR_CMD_RAW_DATA if no policy can be given and the + * attribute is just raw data (e.g. a firmware command). + * @maxattr: highest attribute number in policy * It's recommended to not have the same sub command with both @doit and * @dumpit, so that userspace can assume certain ones are get and others * are used with dump requests. @@ -4171,6 +4301,8 @@ struct wiphy_vendor_command { int (*dumpit)(struct wiphy *wiphy, struct wireless_dev *wdev, struct sk_buff *skb, const void *data, int data_len, unsigned long *storage); + const struct nla_policy *policy; + unsigned int maxattr; }; /** @@ -5418,6 +5550,14 @@ const struct ieee80211_reg_rule *freq_reg_info(struct wiphy *wiphy, const char *reg_initiator_name(enum nl80211_reg_initiator initiator); /** + * regulatory_pre_cac_allowed - check if pre-CAC allowed in the current regdom + * @wiphy: wiphy for which pre-CAC capability is checked. + * + * Pre-CAC is allowed only in some regdomains (notable ETSI). + */ +bool regulatory_pre_cac_allowed(struct wiphy *wiphy); + +/** * DOC: Internal regulatory db functions * */ @@ -5719,6 +5859,26 @@ void cfg80211_put_bss(struct wiphy *wiphy, struct cfg80211_bss *bss); */ void cfg80211_unlink_bss(struct wiphy *wiphy, struct cfg80211_bss *bss); +/** + * cfg80211_bss_iter - iterate all BSS entries + * + * This function iterates over the BSS entries associated with the given wiphy + * and calls the callback for the iterated BSS. The iterator function is not + * allowed to call functions that might modify the internal state of the BSS DB. + * + * @wiphy: the wiphy + * @chandef: if given, the iterator function will be called only if the channel + * of the currently iterated BSS is a subset of the given channel. + * @iter: the iterator function to call + * @iter_data: an argument to the iterator function + */ +void cfg80211_bss_iter(struct wiphy *wiphy, + struct cfg80211_chan_def *chandef, + void (*iter)(struct wiphy *wiphy, + struct cfg80211_bss *bss, + void *data), + void *iter_data); + static inline enum nl80211_bss_scan_width cfg80211_chandef_to_scan_width(const struct cfg80211_chan_def *chandef) { @@ -6229,8 +6389,11 @@ struct cfg80211_fils_resp_params { * case. * @bssid: The BSSID of the AP (may be %NULL) * @bss: Entry of bss to which STA got connected to, can be obtained through - * cfg80211_get_bss() (may be %NULL). Only one parameter among @bssid and - * @bss needs to be specified. + * cfg80211_get_bss() (may be %NULL). But it is recommended to store the + * bss from the connect_request and hold a reference to it and return + * through this param to avoid a warning if the bss is expired during the + * connection, esp. for those drivers implementing connect op. + * Only one parameter among @bssid and @bss needs to be specified. * @req_ie: Association request IEs (may be %NULL) * @req_ie_len: Association request IEs length * @resp_ie: Association response IEs (may be %NULL) @@ -6278,8 +6441,12 @@ void cfg80211_connect_done(struct net_device *dev, * * @dev: network device * @bssid: the BSSID of the AP - * @bss: entry of bss to which STA got connected to, can be obtained - * through cfg80211_get_bss (may be %NULL) + * @bss: Entry of bss to which STA got connected to, can be obtained through + * cfg80211_get_bss() (may be %NULL). But it is recommended to store the + * bss from the connect_request and hold a reference to it and return + * through this param to avoid a warning if the bss is expired during the + * connection, esp. for those drivers implementing connect op. + * Only one parameter among @bssid and @bss needs to be specified. * @req_ie: association request IEs (maybe be %NULL) * @req_ie_len: association request IEs length * @resp_ie: association response IEs (may be %NULL) @@ -6490,6 +6657,16 @@ void cfg80211_remain_on_channel_expired(struct wireless_dev *wdev, u64 cookie, gfp_t gfp); /** + * cfg80211_tx_mgmt_expired - tx_mgmt duration expired + * @wdev: wireless device + * @cookie: the requested cookie + * @chan: The current channel (from tx_mgmt request) + * @gfp: allocation flags + */ +void cfg80211_tx_mgmt_expired(struct wireless_dev *wdev, u64 cookie, + struct ieee80211_channel *chan, gfp_t gfp); + +/** * cfg80211_sinfo_alloc_tid_stats - allocate per-tid statistics. * * @sinfo: the station information @@ -7254,6 +7431,21 @@ void cfg80211_pmsr_complete(struct wireless_dev *wdev, struct cfg80211_pmsr_request *req, gfp_t gfp); +/** + * cfg80211_iftype_allowed - check whether the interface can be allowed + * @wiphy: the wiphy + * @iftype: interface type + * @is_4addr: use_4addr flag, must be '0' when check_swif is '1' + * @check_swif: check iftype against software interfaces + * + * Check whether the interface is allowed to operate; additionally, this API + * can be used to check iftype against the software interfaces when + * check_swif is '1'. + */ +bool cfg80211_iftype_allowed(struct wiphy *wiphy, enum nl80211_iftype iftype, + bool is_4addr, u8 check_swif); + + /* Logging, debugging and troubleshooting/diagnostic helpers. */ /* wiphy_printk helpers, similar to dev_printk */ diff --git a/include/net/devlink.h b/include/net/devlink.h index c9fbeb5b701f..23e4b65ec9df 100644 --- a/include/net/devlink.h +++ b/include/net/devlink.h @@ -13,6 +13,8 @@ #include <linux/list.h> #include <linux/netdevice.h> #include <linux/spinlock.h> +#include <linux/workqueue.h> +#include <linux/refcount.h> #include <net/net_namespace.h> #include <uapi/linux/devlink.h> @@ -30,28 +32,51 @@ struct devlink { struct list_head reporter_list; struct mutex reporters_lock; /* protects reporter_list */ struct devlink_dpipe_headers *dpipe_headers; + struct list_head trap_list; + struct list_head trap_group_list; const struct devlink_ops *ops; struct device *dev; possible_net_t _net; struct mutex lock; + bool reload_failed; char priv[0] __aligned(NETDEV_ALIGN); }; +struct devlink_port_phys_attrs { + u32 port_number; /* Same value as "split group". + * A physical port which is visible to the user + * for a given port flavour. + */ + u32 split_subport_number; +}; + +struct devlink_port_pci_pf_attrs { + u16 pf; /* Associated PCI PF for this port. */ +}; + +struct devlink_port_pci_vf_attrs { + u16 pf; /* Associated PCI PF for this port. */ + u16 vf; /* Associated PCI VF for of the PCI PF for this port. */ +}; + struct devlink_port_attrs { u8 set:1, split:1, switch_port:1; enum devlink_port_flavour flavour; - u32 port_number; /* same value as "split group" */ - u32 split_subport_number; struct netdev_phys_item_id switch_id; + union { + struct devlink_port_phys_attrs phys; + struct devlink_port_pci_pf_attrs pci_pf; + struct devlink_port_pci_vf_attrs pci_vf; + }; }; struct devlink_port { struct list_head list; struct list_head param_list; struct devlink *devlink; - unsigned index; + unsigned int index; bool registered; spinlock_t type_lock; /* Protects type and type_dev * pointer consistency. @@ -60,6 +85,7 @@ struct devlink_port { enum devlink_port_type desired_type; void *type_dev; struct devlink_port_attrs attrs; + struct delayed_work type_warn_dw; }; struct devlink_sb_pool_info { @@ -373,6 +399,7 @@ enum devlink_param_generic_id { DEVLINK_PARAM_GENERIC_ID_MSIX_VEC_PER_PF_MAX, DEVLINK_PARAM_GENERIC_ID_MSIX_VEC_PER_PF_MIN, DEVLINK_PARAM_GENERIC_ID_FW_LOAD_POLICY, + DEVLINK_PARAM_GENERIC_ID_RESET_DEV_ON_DRV_PROBE, /* add new param generic ids above here*/ __DEVLINK_PARAM_GENERIC_ID_MAX, @@ -403,6 +430,10 @@ enum devlink_param_generic_id { #define DEVLINK_PARAM_GENERIC_FW_LOAD_POLICY_NAME "fw_load_policy" #define DEVLINK_PARAM_GENERIC_FW_LOAD_POLICY_TYPE DEVLINK_PARAM_TYPE_U8 +#define DEVLINK_PARAM_GENERIC_RESET_DEV_ON_DRV_PROBE_NAME \ + "reset_dev_on_drv_probe" +#define DEVLINK_PARAM_GENERIC_RESET_DEV_ON_DRV_PROBE_TYPE DEVLINK_PARAM_TYPE_U8 + #define DEVLINK_PARAM_GENERIC(_id, _cmodes, _get, _set, _validate) \ { \ .id = DEVLINK_PARAM_GENERIC_ID_##_id, \ @@ -433,6 +464,13 @@ enum devlink_param_generic_id { /* Maker of the board */ #define DEVLINK_INFO_VERSION_GENERIC_BOARD_MANUFACTURE "board.manufacture" +/* Part number, identifier of asic design */ +#define DEVLINK_INFO_VERSION_GENERIC_ASIC_ID "asic.id" +/* Revision of asic design */ +#define DEVLINK_INFO_VERSION_GENERIC_ASIC_REV "asic.rev" + +/* Overall FW version */ +#define DEVLINK_INFO_VERSION_GENERIC_FW "fw" /* Control processor FW version */ #define DEVLINK_INFO_VERSION_GENERIC_FW_MGMT "fw.mgmt" /* Data path microcode controlling high-speed packet processing */ @@ -475,8 +513,140 @@ struct devlink_health_reporter_ops { struct devlink_fmsg *fmsg); }; +/** + * struct devlink_trap_group - Immutable packet trap group attributes. + * @name: Trap group name. + * @id: Trap group identifier. + * @generic: Whether the trap group is generic or not. + * + * Describes immutable attributes of packet trap groups that drivers register + * with devlink. + */ +struct devlink_trap_group { + const char *name; + u16 id; + bool generic; +}; + +#define DEVLINK_TRAP_METADATA_TYPE_F_IN_PORT BIT(0) + +/** + * struct devlink_trap - Immutable packet trap attributes. + * @type: Trap type. + * @init_action: Initial trap action. + * @generic: Whether the trap is generic or not. + * @id: Trap identifier. + * @name: Trap name. + * @group: Immutable packet trap group attributes. + * @metadata_cap: Metadata types that can be provided by the trap. + * + * Describes immutable attributes of packet traps that drivers register with + * devlink. + */ +struct devlink_trap { + enum devlink_trap_type type; + enum devlink_trap_action init_action; + bool generic; + u16 id; + const char *name; + struct devlink_trap_group group; + u32 metadata_cap; +}; + +/* All traps must be documented in + * Documentation/networking/devlink-trap.rst + */ +enum devlink_trap_generic_id { + DEVLINK_TRAP_GENERIC_ID_SMAC_MC, + DEVLINK_TRAP_GENERIC_ID_VLAN_TAG_MISMATCH, + DEVLINK_TRAP_GENERIC_ID_INGRESS_VLAN_FILTER, + DEVLINK_TRAP_GENERIC_ID_INGRESS_STP_FILTER, + DEVLINK_TRAP_GENERIC_ID_EMPTY_TX_LIST, + DEVLINK_TRAP_GENERIC_ID_PORT_LOOPBACK_FILTER, + DEVLINK_TRAP_GENERIC_ID_BLACKHOLE_ROUTE, + DEVLINK_TRAP_GENERIC_ID_TTL_ERROR, + DEVLINK_TRAP_GENERIC_ID_TAIL_DROP, + + /* Add new generic trap IDs above */ + __DEVLINK_TRAP_GENERIC_ID_MAX, + DEVLINK_TRAP_GENERIC_ID_MAX = __DEVLINK_TRAP_GENERIC_ID_MAX - 1, +}; + +/* All trap groups must be documented in + * Documentation/networking/devlink-trap.rst + */ +enum devlink_trap_group_generic_id { + DEVLINK_TRAP_GROUP_GENERIC_ID_L2_DROPS, + DEVLINK_TRAP_GROUP_GENERIC_ID_L3_DROPS, + DEVLINK_TRAP_GROUP_GENERIC_ID_BUFFER_DROPS, + + /* Add new generic trap group IDs above */ + __DEVLINK_TRAP_GROUP_GENERIC_ID_MAX, + DEVLINK_TRAP_GROUP_GENERIC_ID_MAX = + __DEVLINK_TRAP_GROUP_GENERIC_ID_MAX - 1, +}; + +#define DEVLINK_TRAP_GENERIC_NAME_SMAC_MC \ + "source_mac_is_multicast" +#define DEVLINK_TRAP_GENERIC_NAME_VLAN_TAG_MISMATCH \ + "vlan_tag_mismatch" +#define DEVLINK_TRAP_GENERIC_NAME_INGRESS_VLAN_FILTER \ + "ingress_vlan_filter" +#define DEVLINK_TRAP_GENERIC_NAME_INGRESS_STP_FILTER \ + "ingress_spanning_tree_filter" +#define DEVLINK_TRAP_GENERIC_NAME_EMPTY_TX_LIST \ + "port_list_is_empty" +#define DEVLINK_TRAP_GENERIC_NAME_PORT_LOOPBACK_FILTER \ + "port_loopback_filter" +#define DEVLINK_TRAP_GENERIC_NAME_BLACKHOLE_ROUTE \ + "blackhole_route" +#define DEVLINK_TRAP_GENERIC_NAME_TTL_ERROR \ + "ttl_value_is_too_small" +#define DEVLINK_TRAP_GENERIC_NAME_TAIL_DROP \ + "tail_drop" + +#define DEVLINK_TRAP_GROUP_GENERIC_NAME_L2_DROPS \ + "l2_drops" +#define DEVLINK_TRAP_GROUP_GENERIC_NAME_L3_DROPS \ + "l3_drops" +#define DEVLINK_TRAP_GROUP_GENERIC_NAME_BUFFER_DROPS \ + "buffer_drops" + +#define DEVLINK_TRAP_GENERIC(_type, _init_action, _id, _group, _metadata_cap) \ + { \ + .type = DEVLINK_TRAP_TYPE_##_type, \ + .init_action = DEVLINK_TRAP_ACTION_##_init_action, \ + .generic = true, \ + .id = DEVLINK_TRAP_GENERIC_ID_##_id, \ + .name = DEVLINK_TRAP_GENERIC_NAME_##_id, \ + .group = _group, \ + .metadata_cap = _metadata_cap, \ + } + +#define DEVLINK_TRAP_DRIVER(_type, _init_action, _id, _name, _group, \ + _metadata_cap) \ + { \ + .type = DEVLINK_TRAP_TYPE_##_type, \ + .init_action = DEVLINK_TRAP_ACTION_##_init_action, \ + .generic = false, \ + .id = _id, \ + .name = _name, \ + .group = _group, \ + .metadata_cap = _metadata_cap, \ + } + +#define DEVLINK_TRAP_GROUP_GENERIC(_id) \ + { \ + .name = DEVLINK_TRAP_GROUP_GENERIC_NAME_##_id, \ + .id = DEVLINK_TRAP_GROUP_GENERIC_ID_##_id, \ + .generic = true, \ + } + struct devlink_ops { - int (*reload)(struct devlink *devlink, struct netlink_ext_ack *extack); + int (*reload_down)(struct devlink *devlink, + struct netlink_ext_ack *extack); + int (*reload_up)(struct devlink *devlink, + struct netlink_ext_ack *extack); int (*port_type_set)(struct devlink_port *devlink_port, enum devlink_port_type port_type); int (*port_split)(struct devlink *devlink, unsigned int port_index, @@ -526,14 +696,48 @@ struct devlink_ops { int (*eswitch_inline_mode_get)(struct devlink *devlink, u8 *p_inline_mode); int (*eswitch_inline_mode_set)(struct devlink *devlink, u8 inline_mode, struct netlink_ext_ack *extack); - int (*eswitch_encap_mode_get)(struct devlink *devlink, u8 *p_encap_mode); - int (*eswitch_encap_mode_set)(struct devlink *devlink, u8 encap_mode, + int (*eswitch_encap_mode_get)(struct devlink *devlink, + enum devlink_eswitch_encap_mode *p_encap_mode); + int (*eswitch_encap_mode_set)(struct devlink *devlink, + enum devlink_eswitch_encap_mode encap_mode, struct netlink_ext_ack *extack); int (*info_get)(struct devlink *devlink, struct devlink_info_req *req, struct netlink_ext_ack *extack); int (*flash_update)(struct devlink *devlink, const char *file_name, const char *component, struct netlink_ext_ack *extack); + /** + * @trap_init: Trap initialization function. + * + * Should be used by device drivers to initialize the trap in the + * underlying device. Drivers should also store the provided trap + * context, so that they could efficiently pass it to + * devlink_trap_report() when the trap is triggered. + */ + int (*trap_init)(struct devlink *devlink, + const struct devlink_trap *trap, void *trap_ctx); + /** + * @trap_fini: Trap de-initialization function. + * + * Should be used by device drivers to de-initialize the trap in the + * underlying device. + */ + void (*trap_fini)(struct devlink *devlink, + const struct devlink_trap *trap, void *trap_ctx); + /** + * @trap_action_set: Trap action set function. + */ + int (*trap_action_set)(struct devlink *devlink, + const struct devlink_trap *trap, + enum devlink_trap_action action); + /** + * @trap_group_init: Trap group initialization function. + * + * Should be used by device drivers to initialize the trap group in the + * underlying device. + */ + int (*trap_group_init)(struct devlink *devlink, + const struct devlink_trap_group *group); }; static inline void *devlink_priv(struct devlink *devlink) @@ -586,6 +790,13 @@ void devlink_port_attrs_set(struct devlink_port *devlink_port, u32 split_subport_number, const unsigned char *switch_id, unsigned char switch_id_len); +void devlink_port_attrs_pci_pf_set(struct devlink_port *devlink_port, + const unsigned char *switch_id, + unsigned char switch_id_len, u16 pf); +void devlink_port_attrs_pci_vf_set(struct devlink_port *devlink_port, + const unsigned char *switch_id, + unsigned char switch_id_len, + u16 pf, u16 vf); int devlink_sb_register(struct devlink *devlink, unsigned int sb_index, u32 size, u16 ingress_pools_count, u16 egress_pools_count, u16 ingress_tc_count, @@ -671,7 +882,7 @@ struct devlink_region *devlink_region_create(struct devlink *devlink, u64 region_size); void devlink_region_destroy(struct devlink_region *region); u32 devlink_region_shapshot_id_get(struct devlink *devlink); -int devlink_region_snapshot_create(struct devlink_region *region, u64 data_len, +int devlink_region_snapshot_create(struct devlink_region *region, u8 *data, u32 snapshot_id, devlink_snapshot_data_dest_t *data_destructor); int devlink_info_serial_number_put(struct devlink_info_req *req, @@ -735,6 +946,27 @@ void devlink_health_reporter_state_update(struct devlink_health_reporter *reporter, enum devlink_health_reporter_state state); +bool devlink_is_reload_failed(const struct devlink *devlink); + +void devlink_flash_update_begin_notify(struct devlink *devlink); +void devlink_flash_update_end_notify(struct devlink *devlink); +void devlink_flash_update_status_notify(struct devlink *devlink, + const char *status_msg, + const char *component, + unsigned long done, + unsigned long total); + +int devlink_traps_register(struct devlink *devlink, + const struct devlink_trap *traps, + size_t traps_count, void *priv); +void devlink_traps_unregister(struct devlink *devlink, + const struct devlink_trap *traps, + size_t traps_count); +void devlink_trap_report(struct devlink *devlink, + struct sk_buff *skb, void *trap_ctx, + struct devlink_port *in_devlink_port); +void *devlink_trap_ctx_priv(void *trap_ctx); + #if IS_ENABLED(CONFIG_NET_DEVLINK) void devlink_compat_running_version(struct net_device *dev, diff --git a/include/net/drop_monitor.h b/include/net/drop_monitor.h new file mode 100644 index 000000000000..2ab668461463 --- /dev/null +++ b/include/net/drop_monitor.h @@ -0,0 +1,33 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ + +#ifndef _NET_DROP_MONITOR_H_ +#define _NET_DROP_MONITOR_H_ + +#include <linux/ktime.h> +#include <linux/netdevice.h> +#include <linux/skbuff.h> + +/** + * struct net_dm_hw_metadata - Hardware-supplied packet metadata. + * @trap_group_name: Hardware trap group name. + * @trap_name: Hardware trap name. + * @input_dev: Input netdevice. + */ +struct net_dm_hw_metadata { + const char *trap_group_name; + const char *trap_name; + struct net_device *input_dev; +}; + +#if IS_ENABLED(CONFIG_NET_DROP_MONITOR) +void net_dm_hw_report(struct sk_buff *skb, + const struct net_dm_hw_metadata *hw_metadata); +#else +static inline void +net_dm_hw_report(struct sk_buff *skb, + const struct net_dm_hw_metadata *hw_metadata) +{ +} +#endif + +#endif /* _NET_DROP_MONITOR_H_ */ diff --git a/include/net/dsa.h b/include/net/dsa.h index ba6dfff98196..541fb514e31d 100644 --- a/include/net/dsa.h +++ b/include/net/dsa.h @@ -18,6 +18,7 @@ #include <linux/net_tstamp.h> #include <linux/phy.h> #include <linux/platform_data/dsa.h> +#include <linux/phylink.h> #include <net/devlink.h> #include <net/switchdev.h> @@ -40,6 +41,7 @@ struct phylink_link_state; #define DSA_TAG_PROTO_TRAILER_VALUE 11 #define DSA_TAG_PROTO_8021Q_VALUE 12 #define DSA_TAG_PROTO_SJA1105_VALUE 13 +#define DSA_TAG_PROTO_KSZ8795_VALUE 14 enum dsa_tag_protocol { DSA_TAG_PROTO_NONE = DSA_TAG_PROTO_NONE_VALUE, @@ -56,6 +58,7 @@ enum dsa_tag_protocol { DSA_TAG_PROTO_TRAILER = DSA_TAG_PROTO_TRAILER_VALUE, DSA_TAG_PROTO_8021Q = DSA_TAG_PROTO_8021Q_VALUE, DSA_TAG_PROTO_SJA1105 = DSA_TAG_PROTO_SJA1105_VALUE, + DSA_TAG_PROTO_KSZ8795 = DSA_TAG_PROTO_KSZ8795_VALUE, }; struct packet_type; @@ -180,7 +183,7 @@ struct dsa_port { struct dsa_switch *ds; unsigned int index; const char *name; - const struct dsa_port *cpu_dp; + struct dsa_port *cpu_dp; const char *mac; struct device_node *dn; unsigned int ageing_time; @@ -189,6 +192,7 @@ struct dsa_port { struct net_device *bridge_dev; struct devlink_port devlink_port; struct phylink *pl; + struct phylink_config pl_config; struct work_struct xmit_work; struct sk_buff_head xmit_queue; @@ -271,9 +275,6 @@ struct dsa_switch { */ bool vlan_filtering; - unsigned long *bitmap; - unsigned long _bitmap; - /* Dynamically allocated ports, keep last */ size_t num_ports; struct dsa_port ports[]; @@ -355,6 +356,7 @@ struct dsa_switch_ops { int port); int (*setup)(struct dsa_switch *ds); + void (*teardown)(struct dsa_switch *ds); u32 (*get_phy_flags)(struct dsa_switch *ds, int port); /* @@ -513,6 +515,8 @@ struct dsa_switch_ops { bool ingress); void (*port_mirror_del)(struct dsa_switch *ds, int port, struct dsa_mall_mirror_tc_entry *mirror); + int (*port_setup_tc)(struct dsa_switch *ds, int port, + enum tc_setup_type type, void *type_data); /* * Cross-chip operations diff --git a/include/net/dst.h b/include/net/dst.h index 12b31c602cb0..fe62fe2eb781 100644 --- a/include/net/dst.h +++ b/include/net/dst.h @@ -183,7 +183,7 @@ static inline void dst_metric_set(struct dst_entry *dst, int metric, u32 val) } /* Kernel-internal feature bits that are unallocated in user space. */ -#define DST_FEATURE_ECN_CA (1 << 31) +#define DST_FEATURE_ECN_CA (1U << 31) #define DST_FEATURE_MASK (DST_FEATURE_ECN_CA) #define DST_FEATURE_ECN_MASK (DST_FEATURE_ECN_CA | RTAX_FEATURE_ECN) @@ -302,8 +302,9 @@ static inline bool dst_hold_safe(struct dst_entry *dst) * @skb: buffer * * If dst is not yet refcounted and not destroyed, grab a ref on it. + * Returns true if dst is refcounted. */ -static inline void skb_dst_force(struct sk_buff *skb) +static inline bool skb_dst_force(struct sk_buff *skb) { if (skb_dst_is_noref(skb)) { struct dst_entry *dst = skb_dst(skb); @@ -314,6 +315,8 @@ static inline void skb_dst_force(struct sk_buff *skb) skb->_skb_refdst = (unsigned long)dst; } + + return skb->_skb_refdst != 0UL; } diff --git a/include/net/fib_rules.h b/include/net/fib_rules.h index b473df5b9512..20dcadd8eed9 100644 --- a/include/net/fib_rules.h +++ b/include/net/fib_rules.h @@ -103,6 +103,7 @@ struct fib_rule_notifier_info { }; #define FRA_GENERIC_POLICY \ + [FRA_UNSPEC] = { .strict_start_type = FRA_DPORT_RANGE + 1 }, \ [FRA_IIFNAME] = { .type = NLA_STRING, .len = IFNAMSIZ - 1 }, \ [FRA_OIFNAME] = { .type = NLA_STRING, .len = IFNAMSIZ - 1 }, \ [FRA_PRIORITY] = { .type = NLA_U32 }, \ @@ -179,9 +180,9 @@ static inline bool fib_rule_port_range_compare(struct fib_rule_port_range *a, static inline bool fib_rule_requires_fldissect(struct fib_rule *rule) { - return rule->ip_proto || + return rule->iifindex != LOOPBACK_IFINDEX && (rule->ip_proto || fib_rule_port_range_set(&rule->sport_range) || - fib_rule_port_range_set(&rule->dport_range); + fib_rule_port_range_set(&rule->dport_range)); } struct fib_rules_ops *fib_rules_register(const struct fib_rules_ops *, diff --git a/include/net/flow_dissector.h b/include/net/flow_dissector.h index dfabc0503446..5cd12276ae21 100644 --- a/include/net/flow_dissector.h +++ b/include/net/flow_dissector.h @@ -4,6 +4,7 @@ #include <linux/types.h> #include <linux/in6.h> +#include <linux/siphash.h> #include <uapi/linux/if_ether.h> /** @@ -200,6 +201,28 @@ struct flow_dissector_key_ip { __u8 ttl; }; +/** + * struct flow_dissector_key_meta: + * @ingress_ifindex: ingress ifindex + */ +struct flow_dissector_key_meta { + int ingress_ifindex; +}; + +/** + * struct flow_dissector_key_ct: + * @ct_state: conntrack state after converting with map + * @ct_mark: conttrack mark + * @ct_zone: conntrack zone + * @ct_labels: conntrack labels + */ +struct flow_dissector_key_ct { + u16 ct_state; + u16 ct_zone; + u32 ct_mark; + u32 ct_labels[4]; +}; + enum flow_dissector_key_id { FLOW_DISSECTOR_KEY_CONTROL, /* struct flow_dissector_key_control */ FLOW_DISSECTOR_KEY_BASIC, /* struct flow_dissector_key_basic */ @@ -225,14 +248,15 @@ enum flow_dissector_key_id { FLOW_DISSECTOR_KEY_CVLAN, /* struct flow_dissector_key_vlan */ FLOW_DISSECTOR_KEY_ENC_IP, /* struct flow_dissector_key_ip */ FLOW_DISSECTOR_KEY_ENC_OPTS, /* struct flow_dissector_key_enc_opts */ + FLOW_DISSECTOR_KEY_META, /* struct flow_dissector_key_meta */ + FLOW_DISSECTOR_KEY_CT, /* struct flow_dissector_key_ct */ FLOW_DISSECTOR_KEY_MAX, }; #define FLOW_DISSECTOR_F_PARSE_1ST_FRAG BIT(0) -#define FLOW_DISSECTOR_F_STOP_AT_L3 BIT(1) -#define FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL BIT(2) -#define FLOW_DISSECTOR_F_STOP_AT_ENCAP BIT(3) +#define FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL BIT(1) +#define FLOW_DISSECTOR_F_STOP_AT_ENCAP BIT(2) struct flow_dissector_key { enum flow_dissector_key_id key_id; @@ -253,7 +277,7 @@ struct flow_keys_basic { struct flow_keys { struct flow_dissector_key_control control; #define FLOW_KEYS_HASH_START_FIELD basic - struct flow_dissector_key_basic basic; + struct flow_dissector_key_basic basic __aligned(SIPHASH_ALIGNMENT); struct flow_dissector_key_tags tags; struct flow_dissector_key_vlan vlan; struct flow_dissector_key_vlan cvlan; diff --git a/include/net/flow_offload.h b/include/net/flow_offload.h index a2df99f9b196..86c567f531f3 100644 --- a/include/net/flow_offload.h +++ b/include/net/flow_offload.h @@ -1,7 +1,10 @@ #ifndef _NET_FLOW_OFFLOAD_H #define _NET_FLOW_OFFLOAD_H +#include <linux/kernel.h> +#include <linux/list.h> #include <net/flow_dissector.h> +#include <linux/rhashtable.h> struct flow_match { struct flow_dissector *dissector; @@ -9,6 +12,10 @@ struct flow_match { void *key; }; +struct flow_match_meta { + struct flow_dissector_key_meta *key, *mask; +}; + struct flow_match_basic { struct flow_dissector_key_basic *key, *mask; }; @@ -63,6 +70,8 @@ struct flow_match_enc_opts { struct flow_rule; +void flow_rule_match_meta(const struct flow_rule *rule, + struct flow_match_meta *out); void flow_rule_match_basic(const struct flow_rule *rule, struct flow_match_basic *out); void flow_rule_match_control(const struct flow_rule *rule, @@ -109,6 +118,8 @@ enum flow_action_id { FLOW_ACTION_GOTO, FLOW_ACTION_REDIRECT, FLOW_ACTION_MIRRED, + FLOW_ACTION_REDIRECT_INGRESS, + FLOW_ACTION_MIRRED_INGRESS, FLOW_ACTION_VLAN_PUSH, FLOW_ACTION_VLAN_POP, FLOW_ACTION_VLAN_MANGLE, @@ -118,10 +129,16 @@ enum flow_action_id { FLOW_ACTION_ADD, FLOW_ACTION_CSUM, FLOW_ACTION_MARK, + FLOW_ACTION_PTYPE, FLOW_ACTION_WAKE, FLOW_ACTION_QUEUE, FLOW_ACTION_SAMPLE, FLOW_ACTION_POLICE, + FLOW_ACTION_CT, + FLOW_ACTION_MPLS_PUSH, + FLOW_ACTION_MPLS_POP, + FLOW_ACTION_MPLS_MANGLE, + NUM_FLOW_ACTIONS, }; /* This is mirroring enum pedit_header_type definition for easy mapping between @@ -137,8 +154,12 @@ enum flow_action_mangle_base { FLOW_ACT_MANGLE_HDR_TYPE_UDP, }; +typedef void (*action_destr)(void *priv); + struct flow_action_entry { enum flow_action_id id; + action_destr destructor; + void *destructor_priv; union { u32 chain_index; /* FLOW_ACTION_GOTO */ struct net_device *dev; /* FLOW_ACTION_REDIRECT */ @@ -153,9 +174,10 @@ struct flow_action_entry { u32 mask; u32 val; } mangle; - const struct ip_tunnel_info *tunnel; /* FLOW_ACTION_TUNNEL_ENCAP */ + struct ip_tunnel_info *tunnel; /* FLOW_ACTION_TUNNEL_ENCAP */ u32 csum_flags; /* FLOW_ACTION_CSUM */ u32 mark; /* FLOW_ACTION_MARK */ + u16 ptype; /* FLOW_ACTION_PTYPE */ struct { /* FLOW_ACTION_QUEUE */ u32 ctx; u32 index; @@ -171,6 +193,26 @@ struct flow_action_entry { s64 burst; u64 rate_bytes_ps; } police; + struct { /* FLOW_ACTION_CT */ + int action; + u16 zone; + } ct; + struct { /* FLOW_ACTION_MPLS_PUSH */ + u32 label; + __be16 proto; + u8 tc; + u8 bos; + u8 ttl; + } mpls_push; + struct { /* FLOW_ACTION_MPLS_POP */ + __be16 proto; + } mpls_pop; + struct { /* FLOW_ACTION_MPLS_MANGLE */ + u32 label; + u8 tc; + u8 bos; + u8 ttl; + } mpls_mangle; }; }; @@ -225,4 +267,150 @@ static inline void flow_stats_update(struct flow_stats *flow_stats, flow_stats->lastused = max_t(u64, flow_stats->lastused, lastused); } +enum flow_block_command { + FLOW_BLOCK_BIND, + FLOW_BLOCK_UNBIND, +}; + +enum flow_block_binder_type { + FLOW_BLOCK_BINDER_TYPE_UNSPEC, + FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS, + FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS, +}; + +struct flow_block { + struct list_head cb_list; +}; + +struct netlink_ext_ack; + +struct flow_block_offload { + enum flow_block_command command; + enum flow_block_binder_type binder_type; + bool block_shared; + bool unlocked_driver_cb; + struct net *net; + struct flow_block *block; + struct list_head cb_list; + struct list_head *driver_block_list; + struct netlink_ext_ack *extack; +}; + +enum tc_setup_type; +typedef int flow_setup_cb_t(enum tc_setup_type type, void *type_data, + void *cb_priv); + +struct flow_block_cb { + struct list_head driver_list; + struct list_head list; + flow_setup_cb_t *cb; + void *cb_ident; + void *cb_priv; + void (*release)(void *cb_priv); + unsigned int refcnt; +}; + +struct flow_block_cb *flow_block_cb_alloc(flow_setup_cb_t *cb, + void *cb_ident, void *cb_priv, + void (*release)(void *cb_priv)); +void flow_block_cb_free(struct flow_block_cb *block_cb); + +struct flow_block_cb *flow_block_cb_lookup(struct flow_block *block, + flow_setup_cb_t *cb, void *cb_ident); + +void *flow_block_cb_priv(struct flow_block_cb *block_cb); +void flow_block_cb_incref(struct flow_block_cb *block_cb); +unsigned int flow_block_cb_decref(struct flow_block_cb *block_cb); + +static inline void flow_block_cb_add(struct flow_block_cb *block_cb, + struct flow_block_offload *offload) +{ + list_add_tail(&block_cb->list, &offload->cb_list); +} + +static inline void flow_block_cb_remove(struct flow_block_cb *block_cb, + struct flow_block_offload *offload) +{ + list_move(&block_cb->list, &offload->cb_list); +} + +bool flow_block_cb_is_busy(flow_setup_cb_t *cb, void *cb_ident, + struct list_head *driver_block_list); + +int flow_block_cb_setup_simple(struct flow_block_offload *f, + struct list_head *driver_list, + flow_setup_cb_t *cb, + void *cb_ident, void *cb_priv, bool ingress_only); + +enum flow_cls_command { + FLOW_CLS_REPLACE, + FLOW_CLS_DESTROY, + FLOW_CLS_STATS, + FLOW_CLS_TMPLT_CREATE, + FLOW_CLS_TMPLT_DESTROY, +}; + +struct flow_cls_common_offload { + u32 chain_index; + __be16 protocol; + u32 prio; + struct netlink_ext_ack *extack; +}; + +struct flow_cls_offload { + struct flow_cls_common_offload common; + enum flow_cls_command command; + unsigned long cookie; + struct flow_rule *rule; + struct flow_stats stats; + u32 classid; +}; + +static inline struct flow_rule * +flow_cls_offload_flow_rule(struct flow_cls_offload *flow_cmd) +{ + return flow_cmd->rule; +} + +static inline void flow_block_init(struct flow_block *flow_block) +{ + INIT_LIST_HEAD(&flow_block->cb_list); +} + +typedef int flow_indr_block_bind_cb_t(struct net_device *dev, void *cb_priv, + enum tc_setup_type type, void *type_data); + +typedef void flow_indr_block_ing_cmd_t(struct net_device *dev, + flow_indr_block_bind_cb_t *cb, + void *cb_priv, + enum flow_block_command command); + +struct flow_indr_block_ing_entry { + flow_indr_block_ing_cmd_t *cb; + struct list_head list; +}; + +void flow_indr_add_block_ing_cb(struct flow_indr_block_ing_entry *entry); + +void flow_indr_del_block_ing_cb(struct flow_indr_block_ing_entry *entry); + +int __flow_indr_block_cb_register(struct net_device *dev, void *cb_priv, + flow_indr_block_bind_cb_t *cb, + void *cb_ident); + +void __flow_indr_block_cb_unregister(struct net_device *dev, + flow_indr_block_bind_cb_t *cb, + void *cb_ident); + +int flow_indr_block_cb_register(struct net_device *dev, void *cb_priv, + flow_indr_block_bind_cb_t *cb, void *cb_ident); + +void flow_indr_block_cb_unregister(struct net_device *dev, + flow_indr_block_bind_cb_t *cb, + void *cb_ident); + +void flow_indr_block_call(struct net_device *dev, + struct flow_block_offload *bo, + enum flow_block_command command); + #endif /* _NET_FLOW_OFFLOAD_H */ diff --git a/include/net/fq.h b/include/net/fq.h index d126b5d20261..2ad85e683041 100644 --- a/include/net/fq.h +++ b/include/net/fq.h @@ -69,7 +69,7 @@ struct fq { struct list_head backlogs; spinlock_t lock; u32 flows_cnt; - u32 perturbation; + siphash_key_t perturbation; u32 limit; u32 memory_limit; u32 memory_usage; diff --git a/include/net/fq_impl.h b/include/net/fq_impl.h index be40a4b327e3..107c0d700ed6 100644 --- a/include/net/fq_impl.h +++ b/include/net/fq_impl.h @@ -108,7 +108,7 @@ begin: static u32 fq_flow_idx(struct fq *fq, struct sk_buff *skb) { - u32 hash = skb_get_hash_perturb(skb, fq->perturbation); + u32 hash = skb_get_hash_perturb(skb, &fq->perturbation); return reciprocal_scale(hash, fq->flows_cnt); } @@ -308,7 +308,7 @@ static int fq_init(struct fq *fq, int flows_cnt) INIT_LIST_HEAD(&fq->backlogs); spin_lock_init(&fq->lock); fq->flows_cnt = max_t(u32, flows_cnt, 1); - fq->perturbation = prandom_u32(); + get_random_bytes(&fq->perturbation, sizeof(fq->perturbation)); fq->quantum = 300; fq->limit = 8192; fq->memory_limit = 16 << 20; /* 16 MBytes */ diff --git a/include/net/gue.h b/include/net/gue.h index fdad41469b65..3a6595bfa641 100644 --- a/include/net/gue.h +++ b/include/net/gue.h @@ -60,7 +60,7 @@ struct guehdr { /* Private flags in the private option extension */ -#define GUE_PFLAG_REMCSUM htonl(1 << 31) +#define GUE_PFLAG_REMCSUM htonl(1U << 31) #define GUE_PLEN_REMCSUM 4 #define GUE_PFLAGS_ALL (GUE_PFLAG_REMCSUM) diff --git a/include/net/hwbm.h b/include/net/hwbm.h index 89085e2e2da5..c81444611a22 100644 --- a/include/net/hwbm.h +++ b/include/net/hwbm.h @@ -12,18 +12,22 @@ struct hwbm_pool { /* constructor called during alocation */ int (*construct)(struct hwbm_pool *bm_pool, void *buf); /* protect acces to the buffer counter*/ - spinlock_t lock; + struct mutex buf_lock; /* private data */ void *priv; }; #ifdef CONFIG_HWBM void hwbm_buf_free(struct hwbm_pool *bm_pool, void *buf); int hwbm_pool_refill(struct hwbm_pool *bm_pool, gfp_t gfp); -int hwbm_pool_add(struct hwbm_pool *bm_pool, unsigned int buf_num, gfp_t gfp); +int hwbm_pool_add(struct hwbm_pool *bm_pool, unsigned int buf_num); #else -void hwbm_buf_free(struct hwbm_pool *bm_pool, void *buf) {} -int hwbm_pool_refill(struct hwbm_pool *bm_pool, gfp_t gfp) { return 0; } -int hwbm_pool_add(struct hwbm_pool *bm_pool, unsigned int buf_num, gfp_t gfp) +static inline void hwbm_buf_free(struct hwbm_pool *bm_pool, void *buf) {} + +static inline int hwbm_pool_refill(struct hwbm_pool *bm_pool, gfp_t gfp) +{ return 0; } + +static inline int hwbm_pool_add(struct hwbm_pool *bm_pool, + unsigned int buf_num) { return 0; } #endif /* CONFIG_HWBM */ #endif /* _HWBM_H */ diff --git a/include/net/if_inet6.h b/include/net/if_inet6.h index 50037913c9b1..a01981d7108f 100644 --- a/include/net/if_inet6.h +++ b/include/net/if_inet6.h @@ -89,9 +89,9 @@ struct ip6_sf_socklist { struct ipv6_mc_socklist { struct in6_addr addr; int ifindex; + unsigned int sfmode; /* MCAST_{INCLUDE,EXCLUDE} */ struct ipv6_mc_socklist __rcu *next; rwlock_t sflock; - unsigned int sfmode; /* MCAST_{INCLUDE,EXCLUDE} */ struct ip6_sf_socklist *sflist; struct rcu_head rcu; }; diff --git a/include/net/inet_common.h b/include/net/inet_common.h index 975901a95c0f..ae2ba897675c 100644 --- a/include/net/inet_common.h +++ b/include/net/inet_common.h @@ -25,6 +25,7 @@ int inet_dgram_connect(struct socket *sock, struct sockaddr *uaddr, int addr_len, int flags); int inet_accept(struct socket *sock, struct socket *newsock, int flags, bool kern); +int inet_send_prepare(struct sock *sk); int inet_sendmsg(struct socket *sock, struct msghdr *msg, size_t size); ssize_t inet_sendpage(struct socket *sock, struct page *page, int offset, size_t size, int flags); diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h index c57d53e7e02c..895546058a20 100644 --- a/include/net/inet_connection_sock.h +++ b/include/net/inet_connection_sock.h @@ -97,7 +97,7 @@ struct inet_connection_sock { const struct tcp_congestion_ops *icsk_ca_ops; const struct inet_connection_sock_af_ops *icsk_af_ops; const struct tcp_ulp_ops *icsk_ulp_ops; - void *icsk_ulp_data; + void __rcu *icsk_ulp_data; void (*icsk_clean_acked)(struct sock *sk, u32 acked_seq); struct hlist_node icsk_listen_portaddr_node; unsigned int (*icsk_sync_mss)(struct sock *sk, u32 pmtu); diff --git a/include/net/inet_frag.h b/include/net/inet_frag.h index 378904ee9129..bac79e817776 100644 --- a/include/net/inet_frag.h +++ b/include/net/inet_frag.h @@ -3,19 +3,24 @@ #define __NET_FRAG_H__ #include <linux/rhashtable-types.h> +#include <linux/completion.h> -struct netns_frags { +/* Per netns frag queues directory */ +struct fqdir { /* sysctls */ long high_thresh; long low_thresh; int timeout; int max_dist; struct inet_frags *f; + struct net *net; + bool dead; struct rhashtable rhashtable ____cacheline_aligned_in_smp; /* Keep atomic mem on separate cachelines in structs that include it */ atomic_long_t mem ____cacheline_aligned_in_smp; + struct work_struct destroy_work; }; /** @@ -24,11 +29,13 @@ struct netns_frags { * @INET_FRAG_FIRST_IN: first fragment has arrived * @INET_FRAG_LAST_IN: final fragment has arrived * @INET_FRAG_COMPLETE: frag queue has been processed and is due for destruction + * @INET_FRAG_HASH_DEAD: inet_frag_kill() has not removed fq from rhashtable */ enum { INET_FRAG_FIRST_IN = BIT(0), INET_FRAG_LAST_IN = BIT(1), INET_FRAG_COMPLETE = BIT(2), + INET_FRAG_HASH_DEAD = BIT(3), }; struct frag_v4_compare_key { @@ -64,7 +71,7 @@ struct frag_v6_compare_key { * @meat: length of received fragments so far * @flags: fragment queue flags * @max_size: maximum received fragment size - * @net: namespace that this frag belongs to + * @fqdir: pointer to struct fqdir * @rcu: rcu head for freeing deferall */ struct inet_frag_queue { @@ -84,7 +91,7 @@ struct inet_frag_queue { int meat; __u8 flags; u16 max_size; - struct netns_frags *net; + struct fqdir *fqdir; struct rcu_head rcu; }; @@ -98,21 +105,25 @@ struct inet_frags { struct kmem_cache *frags_cachep; const char *frags_cache_name; struct rhashtable_params rhash_params; + refcount_t refcnt; + struct completion completion; }; int inet_frags_init(struct inet_frags *); void inet_frags_fini(struct inet_frags *); -static inline int inet_frags_init_net(struct netns_frags *nf) +int fqdir_init(struct fqdir **fqdirp, struct inet_frags *f, struct net *net); + +static inline void fqdir_pre_exit(struct fqdir *fqdir) { - atomic_long_set(&nf->mem, 0); - return rhashtable_init(&nf->rhashtable, &nf->f->rhash_params); + fqdir->high_thresh = 0; /* prevent creation of new frags */ + fqdir->dead = true; } -void inet_frags_exit_net(struct netns_frags *nf); +void fqdir_exit(struct fqdir *fqdir); void inet_frag_kill(struct inet_frag_queue *q); void inet_frag_destroy(struct inet_frag_queue *q); -struct inet_frag_queue *inet_frag_find(struct netns_frags *nf, void *key); +struct inet_frag_queue *inet_frag_find(struct fqdir *fqdir, void *key); /* Free all skbs in the queue; return the sum of their truesizes. */ unsigned int inet_frag_rbtree_purge(struct rb_root *root); @@ -125,19 +136,19 @@ static inline void inet_frag_put(struct inet_frag_queue *q) /* Memory Tracking Functions. */ -static inline long frag_mem_limit(const struct netns_frags *nf) +static inline long frag_mem_limit(const struct fqdir *fqdir) { - return atomic_long_read(&nf->mem); + return atomic_long_read(&fqdir->mem); } -static inline void sub_frag_mem_limit(struct netns_frags *nf, long val) +static inline void sub_frag_mem_limit(struct fqdir *fqdir, long val) { - atomic_long_sub(val, &nf->mem); + atomic_long_sub(val, &fqdir->mem); } -static inline void add_frag_mem_limit(struct netns_frags *nf, long val) +static inline void add_frag_mem_limit(struct fqdir *fqdir, long val) { - atomic_long_add(val, &nf->mem); + atomic_long_add(val, &fqdir->mem); } /* RFC 3168 support : @@ -160,7 +171,7 @@ int inet_frag_queue_insert(struct inet_frag_queue *q, struct sk_buff *skb, void *inet_frag_reasm_prepare(struct inet_frag_queue *q, struct sk_buff *skb, struct sk_buff *parent); void inet_frag_reasm_finish(struct inet_frag_queue *q, struct sk_buff *head, - void *reasm_data); + void *reasm_data, bool try_coalesce); struct sk_buff *inet_frag_pull_head(struct inet_frag_queue *q); #endif diff --git a/include/net/inet_sock.h b/include/net/inet_sock.h index 7769c9b36d75..34c4436fd18f 100644 --- a/include/net/inet_sock.h +++ b/include/net/inet_sock.h @@ -160,6 +160,7 @@ struct inet_cork { char priority; __u16 gso_size; u64 transmit_time; + u32 mark; }; struct inet_cork_full { diff --git a/include/net/inet_timewait_sock.h b/include/net/inet_timewait_sock.h index c2f756aedc54..dfd919b3119e 100644 --- a/include/net/inet_timewait_sock.h +++ b/include/net/inet_timewait_sock.h @@ -70,6 +70,8 @@ struct inet_timewait_sock { tw_flowlabel : 20, tw_pad : 2, /* 2 bits hole */ tw_tos : 8; + u32 tw_txhash; + u32 tw_priority; struct timer_list tw_timer; struct inet_bind_bucket *tw_tb; }; diff --git a/include/net/ip.h b/include/net/ip.h index 49c672c8cdae..a2c61c36dc4a 100644 --- a/include/net/ip.h +++ b/include/net/ip.h @@ -88,6 +88,7 @@ static inline void ipcm_init_sk(struct ipcm_cookie *ipcm, { ipcm_init(ipcm); + ipcm->sockc.mark = inet->sk.sk_mark; ipcm->sockc.tsflags = inet->sk.sk_tsflags; ipcm->oif = inet->sk.sk_bound_dev_if; ipcm->addr = inet->inet_saddr; @@ -161,6 +162,44 @@ int ip_output(struct net *net, struct sock *sk, struct sk_buff *skb); int ip_mc_output(struct net *net, struct sock *sk, struct sk_buff *skb); int ip_do_fragment(struct net *net, struct sock *sk, struct sk_buff *skb, int (*output)(struct net *, struct sock *, struct sk_buff *)); + +struct ip_fraglist_iter { + struct sk_buff *frag; + struct iphdr *iph; + int offset; + unsigned int hlen; +}; + +void ip_fraglist_init(struct sk_buff *skb, struct iphdr *iph, + unsigned int hlen, struct ip_fraglist_iter *iter); +void ip_fraglist_prepare(struct sk_buff *skb, struct ip_fraglist_iter *iter); + +static inline struct sk_buff *ip_fraglist_next(struct ip_fraglist_iter *iter) +{ + struct sk_buff *skb = iter->frag; + + iter->frag = skb->next; + skb_mark_not_on_list(skb); + + return skb; +} + +struct ip_frag_state { + bool DF; + unsigned int hlen; + unsigned int ll_rs; + unsigned int mtu; + unsigned int left; + int offset; + int ptr; + __be16 not_last_frag; +}; + +void ip_frag_init(struct sk_buff *skb, unsigned int hlen, unsigned int ll_rs, + unsigned int mtu, bool DF, struct ip_frag_state *state); +struct sk_buff *ip_frag_next(struct sk_buff *skb, + struct ip_frag_state *state); + void ip_send_check(struct iphdr *ip); int __ip_local_out(struct net *net, struct sock *sk, struct sk_buff *skb); int ip_local_out(struct net *net, struct sock *sk, struct sk_buff *skb); @@ -241,7 +280,7 @@ void ip_send_unicast_reply(struct sock *sk, struct sk_buff *skb, const struct ip_options *sopt, __be32 daddr, __be32 saddr, const struct ip_reply_arg *arg, - unsigned int len); + unsigned int len, u64 transmit_time); #define IP_INC_STATS(net, field) SNMP_INC_STATS64((net)->mib.ip_statistics, field) #define __IP_INC_STATS(net, field) __SNMP_INC_STATS64((net)->mib.ip_statistics, field) diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h index 855b352b660f..4b5656c71abc 100644 --- a/include/net/ip6_fib.h +++ b/include/net/ip6_fib.h @@ -49,6 +49,7 @@ struct fib6_config { u16 fc_delete_all_nh : 1, fc_ignore_dev_down:1, __unused : 14; + u32 fc_nh_id; struct in6_addr fc_dst; struct in6_addr fc_src; @@ -127,6 +128,9 @@ struct fib6_nh { #ifdef CONFIG_IPV6_ROUTER_PREF unsigned long last_probe; #endif + + struct rt6_info * __percpu *rt6i_pcpu; + struct rt6_exception_bucket __rcu *rt6i_exception_bucket; }; struct fib6_info { @@ -139,7 +143,10 @@ struct fib6_info { * destination, but not the same gateway. nsiblings is just a cache * to speed up lookup. */ - struct list_head fib6_siblings; + union { + struct list_head fib6_siblings; + struct list_head nh_list; + }; unsigned int fib6_nsiblings; refcount_t fib6_ref; @@ -152,22 +159,19 @@ struct fib6_info { struct rt6key fib6_src; struct rt6key fib6_prefsrc; - struct rt6_info * __percpu *rt6i_pcpu; - struct rt6_exception_bucket __rcu *rt6i_exception_bucket; - u32 fib6_metric; u8 fib6_protocol; u8 fib6_type; - u8 exception_bucket_flushed:1, - should_flush:1, + u8 should_flush:1, dst_nocount:1, dst_nopolicy:1, dst_host:1, fib6_destroying:1, - unused:2; + unused:3; - struct fib6_nh fib6_nh; struct rcu_head rcu; + struct nexthop *nh; + struct fib6_nh fib6_nh[0]; }; struct rt6_info { @@ -276,7 +280,7 @@ static inline void ip6_rt_put(struct rt6_info *rt) dst_release(&rt->dst); } -struct fib6_info *fib6_info_alloc(gfp_t gfp_flags); +struct fib6_info *fib6_info_alloc(gfp_t gfp_flags, bool with_fib6_nh); void fib6_info_destroy_rcu(struct rcu_head *head); static inline void fib6_info_hold(struct fib6_info *f6i) @@ -312,6 +316,7 @@ struct fib6_walker { enum fib6_walk_state state; unsigned int skip; unsigned int count; + unsigned int skip_in_node; int (*func)(struct fib6_walker *); void *args; }; @@ -373,6 +378,7 @@ typedef struct rt6_info *(*pol_lookup_t)(struct net *, struct fib6_entry_notifier_info { struct fib_notifier_info info; /* must be first */ struct fib6_info *rt; + unsigned int nsiblings; }; /* @@ -437,16 +443,22 @@ void rt6_get_prefsrc(const struct rt6_info *rt, struct in6_addr *addr) rcu_read_unlock(); } -static inline struct net_device *fib6_info_nh_dev(const struct fib6_info *f6i) -{ - return f6i->fib6_nh.fib_nh_dev; -} - int fib6_nh_init(struct net *net, struct fib6_nh *fib6_nh, struct fib6_config *cfg, gfp_t gfp_flags, struct netlink_ext_ack *extack); void fib6_nh_release(struct fib6_nh *fib6_nh); +int call_fib6_entry_notifiers(struct net *net, + enum fib_event_type event_type, + struct fib6_info *rt, + struct netlink_ext_ack *extack); +int call_fib6_multipath_entry_notifiers(struct net *net, + enum fib_event_type event_type, + struct fib6_info *rt, + unsigned int nsiblings, + struct netlink_ext_ack *extack); +void fib6_rt_update(struct net *net, struct fib6_info *rt, + struct nl_info *info); void inet6_rt_notify(int event, struct fib6_info *rt, struct nl_info *info, unsigned int flags); @@ -480,6 +492,7 @@ int fib6_tables_dump(struct net *net, struct notifier_block *nb); void fib6_update_sernum(struct net *net, struct fib6_info *rt); void fib6_update_sernum_upto_root(struct net *net, struct fib6_info *rt); +void fib6_update_sernum_stub(struct net *net, struct fib6_info *f6i); void fib6_metric_set(struct fib6_info *f6i, int metric, u32 val); static inline bool fib6_metric_locked(struct fib6_info *f6i, int metric) diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h index ee7405e759ba..b69c16cbbf71 100644 --- a/include/net/ip6_route.h +++ b/include/net/ip6_route.h @@ -27,6 +27,7 @@ struct route_info { #include <linux/ip.h> #include <linux/ipv6.h> #include <linux/route.h> +#include <net/nexthop.h> #define RT6_LOOKUP_F_IFACE 0x00000001 #define RT6_LOOKUP_F_REACHABLE 0x00000002 @@ -35,6 +36,7 @@ struct route_info { #define RT6_LOOKUP_F_SRCPREF_PUBLIC 0x00000010 #define RT6_LOOKUP_F_SRCPREF_COA 0x00000020 #define RT6_LOOKUP_F_IGNORE_LINKSTATE 0x00000040 +#define RT6_LOOKUP_F_DST_NOREF 0x00000080 /* We do not (yet ?) support IPv6 jumbograms (RFC 2675) * Unlike IPv4, hdr->seg_len doesn't include the IPv6 header @@ -66,11 +68,14 @@ static inline bool rt6_need_strict(const struct in6_addr *daddr) (IPV6_ADDR_MULTICAST | IPV6_ADDR_LINKLOCAL | IPV6_ADDR_LOOPBACK); } +/* fib entries using a nexthop object can not be coalesced into + * a multipath route + */ static inline bool rt6_qualify_for_ecmp(const struct fib6_info *f6i) { /* the RTF_ADDRCONF flag filters out RA's */ - return !(f6i->fib6_flags & RTF_ADDRCONF) && - f6i->fib6_nh.fib_nh_gw_family; + return !(f6i->fib6_flags & RTF_ADDRCONF) && !f6i->nh && + f6i->fib6_nh->fib_nh_gw_family; } void ip6_route_input(struct sk_buff *skb); @@ -79,6 +84,10 @@ struct dst_entry *ip6_route_input_lookup(struct net *net, struct flowi6 *fl6, const struct sk_buff *skb, int flags); +struct dst_entry *ip6_route_output_flags_noref(struct net *net, + const struct sock *sk, + struct flowi6 *fl6, int flags); + struct dst_entry *ip6_route_output_flags(struct net *net, const struct sock *sk, struct flowi6 *fl6, int flags); @@ -89,6 +98,16 @@ static inline struct dst_entry *ip6_route_output(struct net *net, return ip6_route_output_flags(net, sk, fl6, 0); } +/* Only conditionally release dst if flags indicates + * !RT6_LOOKUP_F_DST_NOREF or dst is in uncached_list. + */ +static inline void ip6_rt_put_flags(struct rt6_info *rt, int flags) +{ + if (!(flags & RT6_LOOKUP_F_DST_NOREF) || + !list_empty(&rt->rt6i_uncached)) + ip6_rt_put(rt); +} + struct dst_entry *ip6_route_lookup(struct net *net, struct flowi6 *fl6, const struct sk_buff *skb, int flags); struct rt6_info *ip6_pol_route(struct net *net, struct fib6_table *table, @@ -178,7 +197,7 @@ struct rt6_rtnl_dump_arg { struct fib_dump_filter filter; }; -int rt6_dump_route(struct fib6_info *f6i, void *p_arg); +int rt6_dump_route(struct fib6_info *f6i, void *p_arg, unsigned int skip); void rt6_mtu_change(struct net_device *dev, unsigned int mtu); void rt6_remove_prefsrc(struct inet6_ifaddr *ifp); void rt6_clean_tohost(struct net *net, struct in6_addr *gateway); @@ -275,8 +294,13 @@ static inline const struct in6_addr *rt6_nexthop(const struct rt6_info *rt, static inline bool rt6_duplicate_nexthop(struct fib6_info *a, struct fib6_info *b) { - struct fib6_nh *nha = &a->fib6_nh, *nhb = &b->fib6_nh; + struct fib6_nh *nha, *nhb; + + if (a->nh || b->nh) + return nexthop_cmp(a->nh, b->nh); + nha = a->fib6_nh; + nhb = b->fib6_nh; return nha->fib_nh_dev == nhb->fib_nh_dev && ipv6_addr_equal(&nha->fib_nh_gw6, &nhb->fib_nh_gw6) && !lwtunnel_cmp_encap(nha->fib_nh_lws, nhb->fib_nh_lws); diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h index bbeff32fb6cb..ab1ca9e238d2 100644 --- a/include/net/ip_fib.h +++ b/include/net/ip_fib.h @@ -40,6 +40,7 @@ struct fib_config { u32 fc_flags; u32 fc_priority; __be32 fc_prefsrc; + u32 fc_nh_id; struct nlattr *fc_mx; struct rtnexthop *fc_mp; int fc_mx_len; @@ -125,9 +126,12 @@ struct fib_nh { * This structure contains data shared by many of routes. */ +struct nexthop; + struct fib_info { struct hlist_node fib_hash; struct hlist_node fib_lhash; + struct list_head nh_list; struct net *fib_net; int fib_treeref; refcount_t fib_clntref; @@ -146,9 +150,10 @@ struct fib_info { #define fib_advmss fib_metrics->metrics[RTAX_ADVMSS-1] int fib_nhs; bool fib_nh_is_v6; + bool nh_updated; + struct nexthop *nh; struct rcu_head rcu; struct fib_nh fib_nh[0]; -#define fib_dev fib_nh[0].fib_nh_dev }; @@ -185,18 +190,14 @@ struct fib_result_nl { int err; }; -static inline struct fib_nh_common *fib_info_nhc(struct fib_info *fi, int nhsel) -{ - return &fi->fib_nh[nhsel].nh_common; -} - #ifdef CONFIG_IP_MULTIPLE_TABLES #define FIB_TABLE_HASHSZ 256 #else #define FIB_TABLE_HASHSZ 2 #endif -__be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh); +__be32 fib_info_update_nhc_saddr(struct net *net, struct fib_nh_common *nhc, + unsigned char scope); __be32 fib_result_prefsrc(struct net *net, struct fib_result *res); #define FIB_RES_NHC(res) ((res).nhc) @@ -227,6 +228,7 @@ int call_fib4_notifiers(struct net *net, enum fib_event_type event_type, int __net_init fib4_notifier_init(struct net *net); void __net_exit fib4_notifier_exit(struct net *net); +void fib_info_notify_update(struct net *net, struct nl_info *info); void fib_notify(struct net *net, struct notifier_block *nb); struct fib_table { @@ -243,6 +245,8 @@ struct fib_dump_filter { /* filter_set is an optimization that an entry is set */ bool filter_set; bool dump_all_families; + bool dump_routes; + bool dump_exceptions; unsigned char protocol; unsigned char rt_type; unsigned int flags; @@ -425,11 +429,14 @@ int fib_sync_down_dev(struct net_device *dev, unsigned long event, bool force); int fib_sync_down_addr(struct net_device *dev, __be32 local); int fib_sync_up(struct net_device *dev, unsigned char nh_flags); void fib_sync_mtu(struct net_device *dev, u32 orig_mtu); +void fib_nhc_update_mtu(struct fib_nh_common *nhc, u32 new, u32 orig); #ifdef CONFIG_IP_ROUTE_MULTIPATH int fib_multipath_hash(const struct net *net, const struct flowi4 *fl4, const struct sk_buff *skb, struct flow_keys *flkeys); #endif +int fib_check_nh(struct net *net, struct fib_nh *nh, u32 table, u8 scope, + struct netlink_ext_ack *extack); void fib_select_multipath(struct fib_result *res, int hash); void fib_select_path(struct net *net, struct fib_result *res, struct flowi4 *fl4, const struct sk_buff *skb); @@ -451,11 +458,18 @@ static inline void fib_combine_itag(u32 *itag, const struct fib_result *res) { #ifdef CONFIG_IP_ROUTE_CLASSID struct fib_nh_common *nhc = res->nhc; - struct fib_nh *nh = container_of(nhc, struct fib_nh, nh_common); #ifdef CONFIG_IP_MULTIPLE_TABLES u32 rtag; #endif - *itag = nh->nh_tclassid << 16; + if (nhc->nhc_family == AF_INET) { + struct fib_nh *nh; + + nh = container_of(nhc, struct fib_nh, nh_common); + *itag = nh->nh_tclassid << 16; + } else { + *itag = 0; + } + #ifdef CONFIG_IP_MULTIPLE_TABLES rtag = res->tclassid; if (*itag == 0) @@ -465,6 +479,7 @@ static inline void fib_combine_itag(u32 *itag, const struct fib_result *res) #endif } +void fib_flush(struct net *net); void free_fib_info(struct fib_info *fi); static inline void fib_info_hold(struct fib_info *fi) @@ -498,7 +513,7 @@ int ip_valid_fib_dump_req(struct net *net, const struct nlmsghdr *nlh, struct netlink_callback *cb); int fib_nexthop_info(struct sk_buff *skb, const struct fib_nh_common *nh, - unsigned char *flags, bool skip_oif); + u8 rt_family, unsigned char *flags, bool skip_oif); int fib_add_nexthop(struct sk_buff *skb, const struct fib_nh_common *nh, - int nh_weight); + int nh_weight, u8 rt_family); #endif /* _NET_FIB_H */ diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h index 2ac40135b576..078887c8c586 100644 --- a/include/net/ip_vs.h +++ b/include/net/ip_vs.h @@ -603,6 +603,7 @@ struct ip_vs_dest_user_kern { u16 tun_type; /* tunnel type */ __be16 tun_port; /* tunnel port */ + u16 tun_flags; /* tunnel flags */ }; @@ -665,6 +666,7 @@ struct ip_vs_dest { atomic_t last_weight; /* server latest weight */ __u16 tun_type; /* tunnel type */ __be16 tun_port; /* tunnel port */ + __u16 tun_flags; /* tunnel flags */ refcount_t refcnt; /* reference counter */ struct ip_vs_stats stats; /* statistics */ @@ -808,11 +810,12 @@ struct ipvs_master_sync_state { struct ip_vs_sync_buff *sync_buff; unsigned long sync_queue_len; unsigned int sync_queue_delay; - struct task_struct *master_thread; struct delayed_work master_wakeup_work; struct netns_ipvs *ipvs; }; +struct ip_vs_sync_thread_data; + /* How much time to keep dests in trash */ #define IP_VS_DEST_TRASH_PERIOD (120 * HZ) @@ -886,6 +889,7 @@ struct netns_ipvs { struct delayed_work defense_work; /* Work handler */ int drop_rate; int drop_counter; + int old_secure_tcp; atomic_t dropentry; /* locks in ctl.c */ spinlock_t dropentry_lock; /* drop entry handling */ @@ -943,7 +947,8 @@ struct netns_ipvs { spinlock_t sync_lock; struct ipvs_master_sync_state *ms; spinlock_t sync_buff_lock; - struct task_struct **backup_threads; + struct ip_vs_sync_thread_data *master_tinfo; + struct ip_vs_sync_thread_data *backup_tinfo; int threads_mask; volatile int sync_state; struct mutex sync_mutex; @@ -1404,6 +1409,9 @@ bool ip_vs_has_real_service(struct netns_ipvs *ipvs, int af, __u16 protocol, struct ip_vs_dest * ip_vs_find_real_service(struct netns_ipvs *ipvs, int af, __u16 protocol, const union nf_inet_addr *daddr, __be16 dport); +struct ip_vs_dest *ip_vs_find_tunnel(struct netns_ipvs *ipvs, int af, + const union nf_inet_addr *daddr, + __be16 tun_port); int ip_vs_use_count_inc(void); void ip_vs_use_count_dec(void); @@ -1497,6 +1505,9 @@ static inline int ip_vs_todrop(struct netns_ipvs *ipvs) static inline int ip_vs_todrop(struct netns_ipvs *ipvs) { return 0; } #endif +#define IP_VS_DFWD_METHOD(dest) (atomic_read(&(dest)->conn_flags) & \ + IP_VS_CONN_F_FWD_MASK) + /* ip_vs_fwd_tag returns the forwarding tag of the connection */ #define IP_VS_FWD_METHOD(cp) (cp->flags & IP_VS_CONN_F_FWD_MASK) diff --git a/include/net/ipv6.h b/include/net/ipv6.h index 60d9480bc4d1..009605c56f20 100644 --- a/include/net/ipv6.h +++ b/include/net/ipv6.h @@ -13,6 +13,7 @@ #include <linux/hardirq.h> #include <linux/jhash.h> #include <linux/refcount.h> +#include <linux/jump_label_ratelimit.h> #include <net/if_inet6.h> #include <net/ndisc.h> #include <net/flow.h> @@ -150,6 +151,49 @@ struct frag_hdr { #define IP6_MF 0x0001 #define IP6_OFFSET 0xFFF8 +struct ip6_fraglist_iter { + struct ipv6hdr *tmp_hdr; + struct sk_buff *frag; + int offset; + unsigned int hlen; + __be32 frag_id; + u8 nexthdr; +}; + +int ip6_fraglist_init(struct sk_buff *skb, unsigned int hlen, u8 *prevhdr, + u8 nexthdr, __be32 frag_id, + struct ip6_fraglist_iter *iter); +void ip6_fraglist_prepare(struct sk_buff *skb, struct ip6_fraglist_iter *iter); + +static inline struct sk_buff *ip6_fraglist_next(struct ip6_fraglist_iter *iter) +{ + struct sk_buff *skb = iter->frag; + + iter->frag = skb->next; + skb_mark_not_on_list(skb); + + return skb; +} + +struct ip6_frag_state { + u8 *prevhdr; + unsigned int hlen; + unsigned int mtu; + unsigned int left; + int offset; + int ptr; + int hroom; + int troom; + __be32 frag_id; + u8 nexthdr; +}; + +void ip6_frag_init(struct sk_buff *skb, unsigned int hlen, unsigned int mtu, + unsigned short needed_tailroom, int hdr_room, u8 *prevhdr, + u8 nexthdr, __be32 frag_id, struct ip6_frag_state *state); +struct sk_buff *ip6_frag_next(struct sk_buff *skb, + struct ip6_frag_state *state); + #define IP6_REPLY_MARK(net, mark) \ ((net)->ipv6.sysctl.fwmark_reflect ? (mark) : 0) @@ -258,6 +302,13 @@ struct ipv6_txoptions { /* Option buffer, as read by IPV6_PKTOPTIONS, starts here. */ }; +/* flowlabel_reflect sysctl values */ +enum flowlabel_reflect { + FLOWLABEL_REFLECT_ESTABLISHED = 1, + FLOWLABEL_REFLECT_TCP_RESET = 2, + FLOWLABEL_REFLECT_ICMPV6_ECHO_REPLIES = 4, +}; + struct ip6_flowlabel { struct ip6_flowlabel __rcu *next; __be32 label; @@ -339,7 +390,18 @@ static inline void txopt_put(struct ipv6_txoptions *opt) kfree_rcu(opt, rcu); } -struct ip6_flowlabel *fl6_sock_lookup(struct sock *sk, __be32 label); +struct ip6_flowlabel *__fl6_sock_lookup(struct sock *sk, __be32 label); + +extern struct static_key_false_deferred ipv6_flowlabel_exclusive; +static inline struct ip6_flowlabel *fl6_sock_lookup(struct sock *sk, + __be32 label) +{ + if (static_branch_unlikely(&ipv6_flowlabel_exclusive.key)) + return __fl6_sock_lookup(sk, label) ? : ERR_PTR(-ENOENT); + + return NULL; +} + struct ipv6_txoptions *fl6_merge_options(struct ipv6_txoptions *opt_space, struct ip6_flowlabel *fl, struct ipv6_txoptions *fopt); @@ -919,7 +981,7 @@ int ip6_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb); * upper-layer output functions */ int ip6_xmit(const struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6, - __u32 mark, struct ipv6_txoptions *opt, int tclass); + __u32 mark, struct ipv6_txoptions *opt, int tclass, u32 priority); int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr); diff --git a/include/net/ipv6_frag.h b/include/net/ipv6_frag.h index 1f77fb4dc79d..a21e8b1381a1 100644 --- a/include/net/ipv6_frag.h +++ b/include/net/ipv6_frag.h @@ -67,6 +67,8 @@ ip6frag_expire_frag_queue(struct net *net, struct frag_queue *fq) struct sk_buff *head; rcu_read_lock(); + if (fq->q.fqdir->dead) + goto out_rcu_unlock; spin_lock(&fq->q.lock); if (fq->q.flags & INET_FRAG_COMPLETE) diff --git a/include/net/ipv6_stubs.h b/include/net/ipv6_stubs.h index 6c0c4fde16f8..5c93e942c50b 100644 --- a/include/net/ipv6_stubs.h +++ b/include/net/ipv6_stubs.h @@ -45,6 +45,11 @@ struct ipv6_stub { struct fib6_config *cfg, gfp_t gfp_flags, struct netlink_ext_ack *extack); void (*fib6_nh_release)(struct fib6_nh *fib6_nh); + void (*fib6_update_sernum)(struct net *net, struct fib6_info *rt); + int (*ip6_del_rt)(struct net *net, struct fib6_info *rt); + void (*fib6_rt_update)(struct net *net, struct fib6_info *rt, + struct nl_info *info); + void (*udpv6_encap_enable)(void); void (*ndisc_send_na)(struct net_device *dev, const struct in6_addr *daddr, const struct in6_addr *solicited_addr, diff --git a/include/net/llc_conn.h b/include/net/llc_conn.h index df528a623548..ea985aa7a6c5 100644 --- a/include/net/llc_conn.h +++ b/include/net/llc_conn.h @@ -104,7 +104,7 @@ void llc_sk_reset(struct sock *sk); /* Access to a connection */ int llc_conn_state_process(struct sock *sk, struct sk_buff *skb); -int llc_conn_send_pdu(struct sock *sk, struct sk_buff *skb); +void llc_conn_send_pdu(struct sock *sk, struct sk_buff *skb); void llc_conn_rtn_pdu(struct sock *sk, struct sk_buff *skb); void llc_conn_resend_i_pdu_as_cmd(struct sock *sk, u8 nr, u8 first_p_bit); void llc_conn_resend_i_pdu_as_rsp(struct sock *sk, u8 nr, u8 first_f_bit); diff --git a/include/net/mac80211.h b/include/net/mac80211.h index 456f2edf78dc..523c6a09e1c8 100644 --- a/include/net/mac80211.h +++ b/include/net/mac80211.h @@ -314,6 +314,8 @@ struct ieee80211_vif_chanctx_switch { * @BSS_CHANGED_MCAST_RATE: Multicast Rate setting changed for this interface * @BSS_CHANGED_FTM_RESPONDER: fime timing reasurement request responder * functionality changed for this BSS (AP mode). + * @BSS_CHANGED_TWT: TWT status changed + * @BSS_CHANGED_HE_OBSS_PD: OBSS Packet Detection status changed. * */ enum ieee80211_bss_change { @@ -344,6 +346,8 @@ enum ieee80211_bss_change { BSS_CHANGED_KEEP_ALIVE = 1<<24, BSS_CHANGED_MCAST_RATE = 1<<25, BSS_CHANGED_FTM_RESPONDER = 1<<26, + BSS_CHANGED_TWT = 1<<27, + BSS_CHANGED_HE_OBSS_PD = 1<<28, /* when adding here, make sure to change ieee80211_reconfig */ }; @@ -501,6 +505,8 @@ struct ieee80211_ftm_responder_params { * @he_support: does this BSS support HE * @twt_requester: does this BSS support TWT requester (relevant for managed * mode only, set if the AP advertises TWT responder role) + * @twt_responder: does this BSS support TWT requester (relevant for managed + * mode only, set if the AP advertises TWT responder role) * @assoc: association status * @ibss_joined: indicates whether this station is part of an IBSS * or not @@ -596,6 +602,8 @@ struct ieee80211_ftm_responder_params { * nontransmitted BSSIDs * @profile_periodicity: the least number of beacon frames need to be received * in order to discover all the nontransmitted BSSIDs in the set. + * @he_operation: HE operation information of the AP we are connected to + * @he_obss_pd: OBSS Packet Detection parameters. */ struct ieee80211_bss_conf { const u8 *bssid; @@ -608,6 +616,7 @@ struct ieee80211_bss_conf { u16 frame_time_rts_th; bool he_support; bool twt_requester; + bool twt_responder; /* association related data */ bool assoc, ibss_joined; bool ibss_creator; @@ -656,6 +665,8 @@ struct ieee80211_bss_conf { u8 bssid_indicator; bool ema_ap; u8 profile_periodicity; + struct ieee80211_he_operation he_operation; + struct ieee80211_he_obss_pd he_obss_pd; }; /** @@ -1053,11 +1064,13 @@ struct ieee80211_tx_info { * @sta: Station that the packet was transmitted for * @info: Basic tx status information * @skb: Packet skb (can be NULL if not provided by the driver) + * @rate: The TX rate that was used when sending the packet */ struct ieee80211_tx_status { struct ieee80211_sta *sta; struct ieee80211_tx_info *info; struct sk_buff *skb; + struct rate_info *rate; }; /** @@ -1697,6 +1710,9 @@ struct wireless_dev *ieee80211_vif_to_wdev(struct ieee80211_vif *vif); * a TKIP key if it only requires MIC space. Do not set together with * @IEEE80211_KEY_FLAG_GENERATE_MMIC on the same key. * @IEEE80211_KEY_FLAG_NO_AUTO_TX: Key needs explicit Tx activation. + * @IEEE80211_KEY_FLAG_GENERATE_MMIE: This flag should be set by the driver + * for a AES_CMAC key to indicate that it requires sequence number + * generation only */ enum ieee80211_key_flags { IEEE80211_KEY_FLAG_GENERATE_IV_MGMT = BIT(0), @@ -1709,6 +1725,7 @@ enum ieee80211_key_flags { IEEE80211_KEY_FLAG_RESERVE_TAILROOM = BIT(7), IEEE80211_KEY_FLAG_PUT_MIC_SPACE = BIT(8), IEEE80211_KEY_FLAG_NO_AUTO_TX = BIT(9), + IEEE80211_KEY_FLAG_GENERATE_MMIE = BIT(10), }; /** @@ -2263,8 +2280,9 @@ struct ieee80211_txq { * @IEEE80211_HW_SUPPORTS_ONLY_HE_MULTI_BSSID: Hardware supports multi BSSID * only for HE APs. Applies if @IEEE80211_HW_SUPPORTS_MULTI_BSSID is set. * - * @IEEE80211_HW_EXT_KEY_ID_NATIVE: Driver and hardware are supporting Extended - * Key ID and can handle two unicast keys per station for Rx and Tx. + * @IEEE80211_HW_AMPDU_KEYBORDER_SUPPORT: The card and driver is only + * aggregating MPDUs with the same keyid, allowing mac80211 to keep Tx + * A-MPDU sessions active while rekeying with Extended Key ID. * * @NUM_IEEE80211_HW_FLAGS: number of hardware flags, used for sizing arrays */ @@ -2317,7 +2335,7 @@ enum ieee80211_hw_flags { IEEE80211_HW_TX_STATUS_NO_AMPDU_LEN, IEEE80211_HW_SUPPORTS_MULTI_BSSID, IEEE80211_HW_SUPPORTS_ONLY_HE_MULTI_BSSID, - IEEE80211_HW_EXT_KEY_ID_NATIVE, + IEEE80211_HW_AMPDU_KEYBORDER_SUPPORT, /* keep last, obviously */ NUM_IEEE80211_HW_FLAGS @@ -2445,6 +2463,8 @@ enum ieee80211_hw_flags { * * @weight_multiplier: Driver specific airtime weight multiplier used while * refilling deficit of each TXQ. + * + * @max_mtu: the max mtu could be set. */ struct ieee80211_hw { struct ieee80211_conf conf; @@ -2482,6 +2502,7 @@ struct ieee80211_hw { u8 max_nan_de_entries; u8 tx_sk_pacing_shift; u8 weight_multiplier; + u32 max_mtu; }; static inline bool _ieee80211_hw_check(struct ieee80211_hw *hw, @@ -3905,7 +3926,8 @@ struct ieee80211_ops { struct ieee80211_channel *chan, int duration, enum ieee80211_roc_type type); - int (*cancel_remain_on_channel)(struct ieee80211_hw *hw); + int (*cancel_remain_on_channel)(struct ieee80211_hw *hw, + struct ieee80211_vif *vif); int (*set_ringparam)(struct ieee80211_hw *hw, u32 tx, u32 rx); void (*get_ringparam)(struct ieee80211_hw *hw, u32 *tx, u32 *tx_max, u32 *rx, u32 *rx_max); @@ -5936,7 +5958,6 @@ struct rate_control_ops { void (*add_sta_debugfs)(void *priv, void *priv_sta, struct dentry *dir); - void (*remove_sta_debugfs)(void *priv, void *priv_sta); u32 (*get_expected_throughput)(void *priv_sta); }; @@ -5948,29 +5969,6 @@ static inline int rate_supported(struct ieee80211_sta *sta, return (sta == NULL || sta->supp_rates[band] & BIT(index)); } -/** - * rate_control_send_low - helper for drivers for management/no-ack frames - * - * Rate control algorithms that agree to use the lowest rate to - * send management frames and NO_ACK data with the respective hw - * retries should use this in the beginning of their mac80211 get_rate - * callback. If true is returned the rate control can simply return. - * If false is returned we guarantee that sta and sta and priv_sta is - * not null. - * - * Rate control algorithms wishing to do more intelligent selection of - * rate for multicast/broadcast frames may choose to not use this. - * - * @sta: &struct ieee80211_sta pointer to the target destination. Note - * that this may be null. - * @priv_sta: private rate control structure. This may be null. - * @txrc: rate control information we sholud populate for mac80211. - */ -bool rate_control_send_low(struct ieee80211_sta *sta, - void *priv_sta, - struct ieee80211_tx_rate_control *txrc); - - static inline s8 rate_lowest_index(struct ieee80211_supported_band *sband, struct ieee80211_sta *sta) @@ -6248,11 +6246,37 @@ void ieee80211_unreserve_tid(struct ieee80211_sta *sta, u8 tid); * but for the duration of the frame handling. * However, also note that while in the wake_tx_queue() method, * rcu_read_lock() is already held. + * + * softirqs must also be disabled when this function is called. + * In process context, use ieee80211_tx_dequeue_ni() instead. */ struct sk_buff *ieee80211_tx_dequeue(struct ieee80211_hw *hw, struct ieee80211_txq *txq); /** + * ieee80211_tx_dequeue_ni - dequeue a packet from a software tx queue + * (in process context) + * + * Like ieee80211_tx_dequeue() but can be called in process context + * (internally disables bottom halves). + * + * @hw: pointer as obtained from ieee80211_alloc_hw() + * @txq: pointer obtained from station or virtual interface, or from + * ieee80211_next_txq() + */ +static inline struct sk_buff *ieee80211_tx_dequeue_ni(struct ieee80211_hw *hw, + struct ieee80211_txq *txq) +{ + struct sk_buff *skb; + + local_bh_disable(); + skb = ieee80211_tx_dequeue(hw, txq); + local_bh_enable(); + + return skb; +} + +/** * ieee80211_next_txq - get next tx queue to pull packets from * * @hw: pointer as obtained from ieee80211_alloc_hw() diff --git a/include/net/ndisc.h b/include/net/ndisc.h index 366150053043..b2f715ca0567 100644 --- a/include/net/ndisc.h +++ b/include/net/ndisc.h @@ -40,6 +40,7 @@ enum { ND_OPT_RDNSS = 25, /* RFC5006 */ ND_OPT_DNSSL = 31, /* RFC6106 */ ND_OPT_6CO = 34, /* RFC6775 */ + ND_OPT_CAPTIVE_PORTAL = 37, /* RFC7710 */ __ND_OPT_MAX }; diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h index 12689ddfc24c..c7e15a213ef2 100644 --- a/include/net/net_namespace.h +++ b/include/net/net_namespace.h @@ -19,6 +19,7 @@ #include <net/netns/packet.h> #include <net/netns/ipv4.h> #include <net/netns/ipv6.h> +#include <net/netns/nexthop.h> #include <net/netns/ieee802154_6lowpan.h> #include <net/netns/sctp.h> #include <net/netns/dccp.h> @@ -51,7 +52,10 @@ struct bpf_prog; #define NETDEV_HASHENTRIES (1 << NETDEV_HASHBITS) struct net { - refcount_t passive; /* To decided when the network + /* First cache line can be often dirtied. + * Do not place here read-mostly fields. + */ + refcount_t passive; /* To decide when the network * namespace should be freed. */ refcount_t count; /* To decided when the network @@ -59,8 +63,13 @@ struct net { */ spinlock_t rules_mod_lock; - u32 hash_mix; - atomic64_t cookie_gen; + unsigned int dev_unreg_count; + + unsigned int dev_base_seq; /* protected by rtnl_mutex */ + int ifindex; + + spinlock_t nsid_lock; + atomic_t fnhe_genid; struct list_head list; /* list of network namespaces */ struct list_head exit_list; /* To linked to call pernet exit @@ -71,13 +80,16 @@ struct net { */ struct llist_node cleanup_list; /* namespaces on death row */ +#ifdef CONFIG_KEYS + struct key_tag *key_domain; /* Key domain of operation tag */ +#endif struct user_namespace *user_ns; /* Owning user namespace */ struct ucounts *ucounts; - spinlock_t nsid_lock; struct idr netns_ids; struct ns_common ns; + struct list_head dev_base_head; struct proc_dir_entry *proc_net; struct proc_dir_entry *proc_net_stat; @@ -90,24 +102,23 @@ struct net { struct uevent_sock *uevent_sock; /* uevent socket */ - struct list_head dev_base_head; struct hlist_head *dev_name_head; struct hlist_head *dev_index_head; - unsigned int dev_base_seq; /* protected by rtnl_mutex */ - int ifindex; - unsigned int dev_unreg_count; + /* Note that @hash_mix can be read millions times per second, + * it is critical that it is on a read_mostly cache line. + */ + u32 hash_mix; + + struct net_device *loopback_dev; /* The loopback */ /* core fib_rules */ struct list_head rules_ops; - struct list_head fib_notifier_ops; /* Populated by - * register_pernet_subsys() - */ - struct net_device *loopback_dev; /* The loopback */ struct netns_core core; struct netns_mib mib; struct netns_packet packet; struct netns_unix unx; + struct netns_nexthop nexthop; struct netns_ipv4 ipv4; #if IS_ENABLED(CONFIG_IPV6) struct netns_ipv6 ipv6; @@ -166,8 +177,10 @@ struct net { #ifdef CONFIG_XDP_SOCKETS struct netns_xdp xdp; #endif +#if IS_ENABLED(CONFIG_CRYPTO_USER) + struct sock *crypto_nlsk; +#endif struct sock *diag_nlsk; - atomic_t fnhe_genid; } __randomize_layout; #include <linux/seq_file_net.h> @@ -329,7 +342,7 @@ static inline struct net *read_pnet(const possible_net_t *pnet) #define __net_initconst __initconst #endif -int peernet2id_alloc(struct net *net, struct net *peer); +int peernet2id_alloc(struct net *net, struct net *peer, gfp_t gfp); int peernet2id(struct net *net, struct net *peer); bool peernet_has_id(struct net *net, struct net *peer); struct net *get_net_ns_by_id(struct net *net, int id); @@ -353,8 +366,13 @@ struct pernet_operations { * synchronize_rcu() related to these pernet_operations, * instead of separate synchronize_rcu() for every net. * Please, avoid synchronize_rcu() at all, where it's possible. + * + * Note that a combination of pre_exit() and exit() can + * be used, since a synchronize_rcu() is guaranteed between + * the calls. */ int (*init)(struct net *net); + void (*pre_exit)(struct net *net); void (*exit)(struct net *net); void (*exit_batch)(struct list_head *net_exit_list); unsigned int *id; diff --git a/include/net/netfilter/br_netfilter.h b/include/net/netfilter/br_netfilter.h index 89808ce293c4..371696ec11b2 100644 --- a/include/net/netfilter/br_netfilter.h +++ b/include/net/netfilter/br_netfilter.h @@ -2,16 +2,22 @@ #ifndef _BR_NETFILTER_H_ #define _BR_NETFILTER_H_ +#include <linux/netfilter.h> + #include "../../../net/bridge/br_private.h" static inline struct nf_bridge_info *nf_bridge_alloc(struct sk_buff *skb) { +#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) struct nf_bridge_info *b = skb_ext_add(skb, SKB_EXT_BRIDGE_NF); if (b) memset(b, 0, sizeof(*b)); return b; +#else + return NULL; +#endif } void nf_bridge_update_protocol(struct sk_buff *skb); @@ -36,13 +42,18 @@ int br_nf_pre_routing_finish_bridge(struct net *net, struct sock *sk, struct sk_ static inline struct rtable *bridge_parent_rtable(const struct net_device *dev) { +#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) struct net_bridge_port *port; port = br_port_get_rcu(dev); return port ? &port->br->fake_rtable : NULL; +#else + return NULL; +#endif } -struct net_device *setup_pre_routing(struct sk_buff *skb); +struct net_device *setup_pre_routing(struct sk_buff *skb, + const struct net *net); #if IS_ENABLED(CONFIG_IPV6) int br_validate_ipv6(struct net *net, struct sk_buff *skb); @@ -56,7 +67,7 @@ static inline int br_validate_ipv6(struct net *net, struct sk_buff *skb) } static inline unsigned int -br_nf_pre_routing_ipv6(const struct nf_hook_ops *ops, struct sk_buff *skb, +br_nf_pre_routing_ipv6(void *priv, struct sk_buff *skb, const struct nf_hook_state *state) { return NF_ACCEPT; diff --git a/include/net/netfilter/ipv4/nf_dup_ipv4.h b/include/net/netfilter/ipv4/nf_dup_ipv4.h index c962e0be3549..a2bc16cdbcd3 100644 --- a/include/net/netfilter/ipv4/nf_dup_ipv4.h +++ b/include/net/netfilter/ipv4/nf_dup_ipv4.h @@ -2,6 +2,9 @@ #ifndef _NF_DUP_IPV4_H_ #define _NF_DUP_IPV4_H_ +#include <linux/skbuff.h> +#include <uapi/linux/in.h> + void nf_dup_ipv4(struct net *net, struct sk_buff *skb, unsigned int hooknum, const struct in_addr *gw, int oif); diff --git a/include/net/netfilter/ipv6/nf_conntrack_icmpv6.h b/include/net/netfilter/ipv6/nf_conntrack_icmpv6.h deleted file mode 100644 index c86895bc5eb6..000000000000 --- a/include/net/netfilter/ipv6/nf_conntrack_icmpv6.h +++ /dev/null @@ -1,21 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * ICMPv6 tracking. - * - * 21 Apl 2004: Yasuyuki Kozakai @USAGI <yasuyuki.kozakai@toshiba.co.jp> - * - separated from nf_conntrack_icmp.h - * - * Derived from include/linux/netfiter_ipv4/ip_conntrack_icmp.h - */ - -#ifndef _NF_CONNTRACK_ICMPV6_H -#define _NF_CONNTRACK_ICMPV6_H - -#ifndef ICMPV6_NI_QUERY -#define ICMPV6_NI_QUERY 139 -#endif -#ifndef ICMPV6_NI_REPLY -#define ICMPV6_NI_REPLY 140 -#endif - -#endif /* _NF_CONNTRACK_ICMPV6_H */ diff --git a/include/net/netfilter/ipv6/nf_defrag_ipv6.h b/include/net/netfilter/ipv6/nf_defrag_ipv6.h index 9d7e28736da9..6d31cd041143 100644 --- a/include/net/netfilter/ipv6/nf_defrag_ipv6.h +++ b/include/net/netfilter/ipv6/nf_defrag_ipv6.h @@ -2,7 +2,9 @@ #ifndef _NF_DEFRAG_IPV6_H #define _NF_DEFRAG_IPV6_H -struct net; +#include <linux/skbuff.h> +#include <linux/types.h> + int nf_defrag_ipv6_enable(struct net *); int nf_ct_frag6_init(void); diff --git a/include/net/netfilter/ipv6/nf_dup_ipv6.h b/include/net/netfilter/ipv6/nf_dup_ipv6.h index caf0c2dd8ee7..f6312bb04a13 100644 --- a/include/net/netfilter/ipv6/nf_dup_ipv6.h +++ b/include/net/netfilter/ipv6/nf_dup_ipv6.h @@ -2,6 +2,8 @@ #ifndef _NF_DUP_IPV6_H_ #define _NF_DUP_IPV6_H_ +#include <linux/skbuff.h> + void nf_dup_ipv6(struct net *net, struct sk_buff *skb, unsigned int hooknum, const struct in6_addr *gw, int oif); diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h index d2bc733a2ef1..9f551f3b69c6 100644 --- a/include/net/netfilter/nf_conntrack.h +++ b/include/net/netfilter/nf_conntrack.h @@ -13,17 +13,14 @@ #ifndef _NF_CONNTRACK_H #define _NF_CONNTRACK_H -#include <linux/netfilter/nf_conntrack_common.h> - #include <linux/bitops.h> #include <linux/compiler.h> -#include <linux/atomic.h> +#include <linux/netfilter/nf_conntrack_common.h> #include <linux/netfilter/nf_conntrack_tcp.h> #include <linux/netfilter/nf_conntrack_dccp.h> #include <linux/netfilter/nf_conntrack_sctp.h> #include <linux/netfilter/nf_conntrack_proto_gre.h> -#include <net/netfilter/ipv6/nf_conntrack_icmpv6.h> #include <net/netfilter/nf_conntrack_tuple.h> @@ -49,6 +46,7 @@ union nf_conntrack_expect_proto { struct nf_conntrack_net { unsigned int users4; unsigned int users6; + unsigned int users_bridge; }; #include <linux/types.h> @@ -69,7 +67,8 @@ struct nf_conn { struct nf_conntrack ct_general; spinlock_t lock; - u16 cpu; + /* jiffies32 when this ct is considered dead */ + u32 timeout; #ifdef CONFIG_NF_CONNTRACK_ZONES struct nf_conntrack_zone zone; @@ -81,9 +80,7 @@ struct nf_conn { /* Have we seen traffic both ways yet? (bitset) */ unsigned long status; - /* jiffies32 when this ct is considered dead */ - u32 timeout; - + u16 cpu; possible_net_t ct_net; #if IS_ENABLED(CONFIG_NF_NAT) @@ -148,16 +145,14 @@ void nf_conntrack_alter_reply(struct nf_conn *ct, int nf_conntrack_tuple_taken(const struct nf_conntrack_tuple *tuple, const struct nf_conn *ignored_conntrack); -#define NFCT_INFOMASK 7UL -#define NFCT_PTRMASK ~(NFCT_INFOMASK) - /* Return conntrack_info and tuple hash for given skb. */ static inline struct nf_conn * nf_ct_get(const struct sk_buff *skb, enum ip_conntrack_info *ctinfo) { - *ctinfo = skb->_nfct & NFCT_INFOMASK; + unsigned long nfct = skb_get_nfct(skb); - return (struct nf_conn *)(skb->_nfct & NFCT_PTRMASK); + *ctinfo = nfct & NFCT_INFOMASK; + return (struct nf_conn *)(nfct & NFCT_PTRMASK); } /* decrement reference count on a conntrack */ @@ -321,7 +316,7 @@ u32 nf_ct_get_id(const struct nf_conn *ct); static inline void nf_ct_set(struct sk_buff *skb, struct nf_conn *ct, enum ip_conntrack_info info) { - skb->_nfct = (unsigned long)ct | info; + skb_set_nfct(skb, (unsigned long)ct | info); } #define NF_CT_STAT_INC(net, count) __this_cpu_inc((net)->ct.stat->count) diff --git a/include/net/netfilter/nf_conntrack_acct.h b/include/net/netfilter/nf_conntrack_acct.h index 1fee733c18a7..f7a060c6eb28 100644 --- a/include/net/netfilter/nf_conntrack_acct.h +++ b/include/net/netfilter/nf_conntrack_acct.h @@ -29,6 +29,7 @@ struct nf_conn_acct *nf_conn_acct_find(const struct nf_conn *ct) static inline struct nf_conn_acct *nf_ct_acct_ext_add(struct nf_conn *ct, gfp_t gfp) { +#if IS_ENABLED(CONFIG_NF_CONNTRACK) struct net *net = nf_ct_net(ct); struct nf_conn_acct *acct; @@ -41,22 +42,32 @@ struct nf_conn_acct *nf_ct_acct_ext_add(struct nf_conn *ct, gfp_t gfp) return acct; -}; +#else + return NULL; +#endif +} /* Check if connection tracking accounting is enabled */ static inline bool nf_ct_acct_enabled(struct net *net) { +#if IS_ENABLED(CONFIG_NF_CONNTRACK) return net->ct.sysctl_acct != 0; +#else + return false; +#endif } /* Enable/disable connection tracking accounting */ static inline void nf_ct_set_acct(struct net *net, bool enable) { +#if IS_ENABLED(CONFIG_NF_CONNTRACK) net->ct.sysctl_acct = enable; +#endif } void nf_conntrack_acct_pernet_init(struct net *net); int nf_conntrack_acct_init(void); void nf_conntrack_acct_fini(void); + #endif /* _NF_CONNTRACK_ACCT_H */ diff --git a/include/net/netfilter/nf_conntrack_bridge.h b/include/net/netfilter/nf_conntrack_bridge.h new file mode 100644 index 000000000000..c564281ede5e --- /dev/null +++ b/include/net/netfilter/nf_conntrack_bridge.h @@ -0,0 +1,19 @@ +#ifndef NF_CONNTRACK_BRIDGE_ +#define NF_CONNTRACK_BRIDGE_ + +#include <linux/module.h> +#include <linux/types.h> +#include <uapi/linux/if_ether.h> + +struct nf_hook_ops; + +struct nf_ct_bridge_info { + struct nf_hook_ops *ops; + unsigned int ops_size; + struct module *me; +}; + +void nf_ct_bridge_register(struct nf_ct_bridge_info *info); +void nf_ct_bridge_unregister(struct nf_ct_bridge_info *info); + +#endif diff --git a/include/net/netfilter/nf_conntrack_core.h b/include/net/netfilter/nf_conntrack_core.h index ae41e92251dd..09f2efea0b97 100644 --- a/include/net/netfilter/nf_conntrack_core.h +++ b/include/net/netfilter/nf_conntrack_core.h @@ -14,13 +14,16 @@ #define _NF_CONNTRACK_CORE_H #include <linux/netfilter.h> -#include <net/netfilter/nf_conntrack_l4proto.h> +#include <net/netfilter/nf_conntrack.h> #include <net/netfilter/nf_conntrack_ecache.h> +#include <net/netfilter/nf_conntrack_l4proto.h> /* This header is used to share core functionality between the standalone connection tracking module, and the compatibility layer's use of connection tracking. */ -unsigned int nf_conntrack_in(struct sk_buff *skb, const struct nf_hook_state *state); + +unsigned int nf_conntrack_in(struct sk_buff *skb, + const struct nf_hook_state *state); int nf_conntrack_init_net(struct net *net); void nf_conntrack_cleanup_net(struct net *net); @@ -64,6 +67,9 @@ static inline int nf_conntrack_confirm(struct sk_buff *skb) return ret; } +unsigned int nf_confirm(struct sk_buff *skb, unsigned int protoff, + struct nf_conn *ct, enum ip_conntrack_info ctinfo); + void print_tuple(struct seq_file *s, const struct nf_conntrack_tuple *tuple, const struct nf_conntrack_l4proto *proto); diff --git a/include/net/netfilter/nf_conntrack_count.h b/include/net/netfilter/nf_conntrack_count.h index f32fc8289473..9645b47fa7e4 100644 --- a/include/net/netfilter/nf_conntrack_count.h +++ b/include/net/netfilter/nf_conntrack_count.h @@ -2,6 +2,9 @@ #define _NF_CONNTRACK_COUNT_H #include <linux/list.h> +#include <linux/spinlock.h> +#include <net/netfilter/nf_conntrack_tuple.h> +#include <net/netfilter/nf_conntrack_zones.h> struct nf_conncount_data; diff --git a/include/net/netfilter/nf_conntrack_ecache.h b/include/net/netfilter/nf_conntrack_ecache.h index 52b44192b43f..eb81f9195e28 100644 --- a/include/net/netfilter/nf_conntrack_ecache.h +++ b/include/net/netfilter/nf_conntrack_ecache.h @@ -61,9 +61,10 @@ nf_ct_ecache_ext_add(struct nf_conn *ct, u16 ctmask, u16 expmask, gfp_t gfp) #else return NULL; #endif -}; +} #ifdef CONFIG_NF_CONNTRACK_EVENTS + /* This structure is passed to event handler */ struct nf_ct_event { struct nf_conn *ct; @@ -84,9 +85,26 @@ void nf_ct_deliver_cached_events(struct nf_conn *ct); int nf_conntrack_eventmask_report(unsigned int eventmask, struct nf_conn *ct, u32 portid, int report); +#else + +static inline void nf_ct_deliver_cached_events(const struct nf_conn *ct) +{ +} + +static inline int nf_conntrack_eventmask_report(unsigned int eventmask, + struct nf_conn *ct, + u32 portid, + int report) +{ + return 0; +} + +#endif + static inline void nf_conntrack_event_cache(enum ip_conntrack_events event, struct nf_conn *ct) { +#ifdef CONFIG_NF_CONNTRACK_EVENTS struct net *net = nf_ct_net(ct); struct nf_conntrack_ecache *e; @@ -98,31 +116,42 @@ nf_conntrack_event_cache(enum ip_conntrack_events event, struct nf_conn *ct) return; set_bit(event, &e->cache); +#endif } static inline int nf_conntrack_event_report(enum ip_conntrack_events event, struct nf_conn *ct, u32 portid, int report) { +#ifdef CONFIG_NF_CONNTRACK_EVENTS const struct net *net = nf_ct_net(ct); if (!rcu_access_pointer(net->ct.nf_conntrack_event_cb)) return 0; return nf_conntrack_eventmask_report(1 << event, ct, portid, report); +#else + return 0; +#endif } static inline int nf_conntrack_event(enum ip_conntrack_events event, struct nf_conn *ct) { +#ifdef CONFIG_NF_CONNTRACK_EVENTS const struct net *net = nf_ct_net(ct); if (!rcu_access_pointer(net->ct.nf_conntrack_event_cb)) return 0; return nf_conntrack_eventmask_report(1 << event, ct, 0, 0); +#else + return 0; +#endif } +#ifdef CONFIG_NF_CONNTRACK_EVENTS + struct nf_exp_event { struct nf_conntrack_expect *exp; u32 portid; @@ -148,41 +177,18 @@ void nf_conntrack_ecache_pernet_fini(struct net *net); int nf_conntrack_ecache_init(void); void nf_conntrack_ecache_fini(void); -static inline void nf_conntrack_ecache_delayed_work(struct net *net) +#else /* CONFIG_NF_CONNTRACK_EVENTS */ + +static inline void nf_ct_expect_event_report(enum ip_conntrack_expect_events e, + struct nf_conntrack_expect *exp, + u32 portid, + int report) { - if (!delayed_work_pending(&net->ct.ecache_dwork)) { - schedule_delayed_work(&net->ct.ecache_dwork, HZ); - net->ct.ecache_dwork_pending = true; - } } -static inline void nf_conntrack_ecache_work(struct net *net) +static inline void nf_conntrack_ecache_pernet_init(struct net *net) { - if (net->ct.ecache_dwork_pending) { - net->ct.ecache_dwork_pending = false; - mod_delayed_work(system_wq, &net->ct.ecache_dwork, 0); - } } -#else /* CONFIG_NF_CONNTRACK_EVENTS */ -static inline void nf_conntrack_event_cache(enum ip_conntrack_events event, - struct nf_conn *ct) {} -static inline int nf_conntrack_eventmask_report(unsigned int eventmask, - struct nf_conn *ct, - u32 portid, - int report) { return 0; } -static inline int nf_conntrack_event(enum ip_conntrack_events event, - struct nf_conn *ct) { return 0; } -static inline int nf_conntrack_event_report(enum ip_conntrack_events event, - struct nf_conn *ct, - u32 portid, - int report) { return 0; } -static inline void nf_ct_deliver_cached_events(const struct nf_conn *ct) {} -static inline void nf_ct_expect_event_report(enum ip_conntrack_expect_events e, - struct nf_conntrack_expect *exp, - u32 portid, - int report) {} - -static inline void nf_conntrack_ecache_pernet_init(struct net *net) {} static inline void nf_conntrack_ecache_pernet_fini(struct net *net) { @@ -197,14 +203,26 @@ static inline void nf_conntrack_ecache_fini(void) { } +#endif /* CONFIG_NF_CONNTRACK_EVENTS */ + static inline void nf_conntrack_ecache_delayed_work(struct net *net) { +#ifdef CONFIG_NF_CONNTRACK_EVENTS + if (!delayed_work_pending(&net->ct.ecache_dwork)) { + schedule_delayed_work(&net->ct.ecache_dwork, HZ); + net->ct.ecache_dwork_pending = true; + } +#endif } static inline void nf_conntrack_ecache_work(struct net *net) { +#ifdef CONFIG_NF_CONNTRACK_EVENTS + if (net->ct.ecache_dwork_pending) { + net->ct.ecache_dwork_pending = false; + mod_delayed_work(system_wq, &net->ct.ecache_dwork, 0); + } +#endif } -#endif /* CONFIG_NF_CONNTRACK_EVENTS */ #endif /*_NF_CONNTRACK_ECACHE_H*/ - diff --git a/include/net/netfilter/nf_conntrack_expect.h b/include/net/netfilter/nf_conntrack_expect.h index 93ce6b0daaba..0855b60fba17 100644 --- a/include/net/netfilter/nf_conntrack_expect.h +++ b/include/net/netfilter/nf_conntrack_expect.h @@ -76,6 +76,11 @@ struct nf_conntrack_expect_policy { #define NF_CT_EXPECT_CLASS_DEFAULT 0 #define NF_CT_EXPECT_MAX_CNT 255 +/* Allow to reuse expectations with the same tuples from different master + * conntracks. + */ +#define NF_CT_EXP_F_SKIP_MASTER 0x1 + int nf_conntrack_expect_pernet_init(struct net *net); void nf_conntrack_expect_pernet_fini(struct net *net); @@ -121,11 +126,12 @@ void nf_ct_expect_init(struct nf_conntrack_expect *, unsigned int, u_int8_t, const union nf_inet_addr *, u_int8_t, const __be16 *, const __be16 *); void nf_ct_expect_put(struct nf_conntrack_expect *exp); -int nf_ct_expect_related_report(struct nf_conntrack_expect *expect, - u32 portid, int report); -static inline int nf_ct_expect_related(struct nf_conntrack_expect *expect) +int nf_ct_expect_related_report(struct nf_conntrack_expect *expect, + u32 portid, int report, unsigned int flags); +static inline int nf_ct_expect_related(struct nf_conntrack_expect *expect, + unsigned int flags) { - return nf_ct_expect_related_report(expect, 0, 0); + return nf_ct_expect_related_report(expect, 0, 0, flags); } #endif /*_NF_CONNTRACK_EXPECT_H*/ diff --git a/include/net/netfilter/nf_conntrack_extend.h b/include/net/netfilter/nf_conntrack_extend.h index 21f887c5058c..112a6f40dfaf 100644 --- a/include/net/netfilter/nf_conntrack_extend.h +++ b/include/net/netfilter/nf_conntrack_extend.h @@ -8,7 +8,7 @@ enum nf_ct_ext_id { NF_CT_EXT_HELPER, -#if defined(CONFIG_NF_NAT) || defined(CONFIG_NF_NAT_MODULE) +#if IS_ENABLED(CONFIG_NF_NAT) NF_CT_EXT_NAT, #endif NF_CT_EXT_SEQADJ, diff --git a/include/net/netfilter/nf_conntrack_l4proto.h b/include/net/netfilter/nf_conntrack_l4proto.h index a49edfdf47e8..4cad1f0a327a 100644 --- a/include/net/netfilter/nf_conntrack_l4proto.h +++ b/include/net/netfilter/nf_conntrack_l4proto.h @@ -176,42 +176,44 @@ void nf_ct_l4proto_log_invalid(const struct sk_buff *skb, const char *fmt, ...) { } #endif /* CONFIG_SYSCTL */ +#if IS_ENABLED(CONFIG_NF_CONNTRACK) static inline struct nf_generic_net *nf_generic_pernet(struct net *net) { - return &net->ct.nf_ct_proto.generic; + return &net->ct.nf_ct_proto.generic; } static inline struct nf_tcp_net *nf_tcp_pernet(struct net *net) { - return &net->ct.nf_ct_proto.tcp; + return &net->ct.nf_ct_proto.tcp; } static inline struct nf_udp_net *nf_udp_pernet(struct net *net) { - return &net->ct.nf_ct_proto.udp; + return &net->ct.nf_ct_proto.udp; } static inline struct nf_icmp_net *nf_icmp_pernet(struct net *net) { - return &net->ct.nf_ct_proto.icmp; + return &net->ct.nf_ct_proto.icmp; } static inline struct nf_icmp_net *nf_icmpv6_pernet(struct net *net) { - return &net->ct.nf_ct_proto.icmpv6; + return &net->ct.nf_ct_proto.icmpv6; } +#endif #ifdef CONFIG_NF_CT_PROTO_DCCP static inline struct nf_dccp_net *nf_dccp_pernet(struct net *net) { - return &net->ct.nf_ct_proto.dccp; + return &net->ct.nf_ct_proto.dccp; } #endif #ifdef CONFIG_NF_CT_PROTO_SCTP static inline struct nf_sctp_net *nf_sctp_pernet(struct net *net) { - return &net->ct.nf_ct_proto.sctp; + return &net->ct.nf_ct_proto.sctp; } #endif diff --git a/include/net/netfilter/nf_conntrack_labels.h b/include/net/netfilter/nf_conntrack_labels.h index 4eacce6f3bcc..ba916411c4e1 100644 --- a/include/net/netfilter/nf_conntrack_labels.h +++ b/include/net/netfilter/nf_conntrack_labels.h @@ -1,11 +1,14 @@ /* SPDX-License-Identifier: GPL-2.0 */ -#include <linux/types.h> -#include <net/net_namespace.h> + +#ifndef _NF_CONNTRACK_LABELS_H +#define _NF_CONNTRACK_LABELS_H + #include <linux/netfilter/nf_conntrack_common.h> #include <linux/netfilter/nf_conntrack_tuple_common.h> +#include <linux/types.h> +#include <net/net_namespace.h> #include <net/netfilter/nf_conntrack.h> #include <net/netfilter/nf_conntrack_extend.h> - #include <uapi/linux/netfilter/xt_connlabel.h> #define NF_CT_LABELS_MAX_SIZE ((XT_CONNLABEL_MAXBIT + 1) / BITS_PER_BYTE) @@ -51,3 +54,5 @@ static inline void nf_conntrack_labels_fini(void) {} static inline int nf_connlabels_get(struct net *net, unsigned int bit) { return 0; } static inline void nf_connlabels_put(struct net *net) {} #endif + +#endif /* _NF_CONNTRACK_LABELS_H */ diff --git a/include/net/netfilter/nf_conntrack_synproxy.h b/include/net/netfilter/nf_conntrack_synproxy.h index 2c7559a54092..6a3ab081e4bf 100644 --- a/include/net/netfilter/nf_conntrack_synproxy.h +++ b/include/net/netfilter/nf_conntrack_synproxy.h @@ -2,6 +2,7 @@ #ifndef _NF_CONNTRACK_SYNPROXY_H #define _NF_CONNTRACK_SYNPROXY_H +#include <net/netfilter/nf_conntrack_seqadj.h> #include <net/netns/generic.h> struct nf_conn_synproxy { @@ -31,6 +32,7 @@ static inline struct nf_conn_synproxy *nfct_synproxy_ext_add(struct nf_conn *ct) static inline bool nf_ct_add_synproxy(struct nf_conn *ct, const struct nf_conn *tmpl) { +#if IS_ENABLED(CONFIG_NETFILTER_SYNPROXY) if (tmpl && nfct_synproxy(tmpl)) { if (!nfct_seqadj_ext_add(ct)) return false; @@ -38,55 +40,9 @@ static inline bool nf_ct_add_synproxy(struct nf_conn *ct, if (!nfct_synproxy_ext_add(ct)) return false; } +#endif return true; } -struct synproxy_stats { - unsigned int syn_received; - unsigned int cookie_invalid; - unsigned int cookie_valid; - unsigned int cookie_retrans; - unsigned int conn_reopened; -}; - -struct synproxy_net { - struct nf_conn *tmpl; - struct synproxy_stats __percpu *stats; - unsigned int hook_ref4; - unsigned int hook_ref6; -}; - -extern unsigned int synproxy_net_id; -static inline struct synproxy_net *synproxy_pernet(struct net *net) -{ - return net_generic(net, synproxy_net_id); -} - -struct synproxy_options { - u8 options; - u8 wscale; - u16 mss; - u32 tsval; - u32 tsecr; -}; - -struct tcphdr; -struct xt_synproxy_info; -bool synproxy_parse_options(const struct sk_buff *skb, unsigned int doff, - const struct tcphdr *th, - struct synproxy_options *opts); -unsigned int synproxy_options_size(const struct synproxy_options *opts); -void synproxy_build_options(struct tcphdr *th, - const struct synproxy_options *opts); - -void synproxy_init_timestamp_cookie(const struct xt_synproxy_info *info, - struct synproxy_options *opts); -void synproxy_check_timestamp_cookie(struct synproxy_options *opts); - -unsigned int synproxy_tstamp_adjust(struct sk_buff *skb, unsigned int protoff, - struct tcphdr *th, struct nf_conn *ct, - enum ip_conntrack_info ctinfo, - const struct nf_conn_synproxy *synproxy); - #endif /* _NF_CONNTRACK_SYNPROXY_H */ diff --git a/include/net/netfilter/nf_conntrack_timeout.h b/include/net/netfilter/nf_conntrack_timeout.h index 00a8fbb2d735..6dd72396f534 100644 --- a/include/net/netfilter/nf_conntrack_timeout.h +++ b/include/net/netfilter/nf_conntrack_timeout.h @@ -32,6 +32,7 @@ struct nf_conn_timeout { static inline unsigned int * nf_ct_timeout_data(const struct nf_conn_timeout *t) { +#ifdef CONFIG_NF_CONNTRACK_TIMEOUT struct nf_ct_timeout *timeout; timeout = rcu_dereference(t->timeout); @@ -39,6 +40,9 @@ nf_ct_timeout_data(const struct nf_conn_timeout *t) return NULL; return (unsigned int *)timeout->data; +#else + return NULL; +#endif } static inline diff --git a/include/net/netfilter/nf_conntrack_timestamp.h b/include/net/netfilter/nf_conntrack_timestamp.h index 0ed617bf0a3d..820ea34b6029 100644 --- a/include/net/netfilter/nf_conntrack_timestamp.h +++ b/include/net/netfilter/nf_conntrack_timestamp.h @@ -38,16 +38,6 @@ struct nf_conn_tstamp *nf_ct_tstamp_ext_add(struct nf_conn *ct, gfp_t gfp) #endif }; -static inline bool nf_ct_tstamp_enabled(struct net *net) -{ - return net->ct.sysctl_tstamp != 0; -} - -static inline void nf_ct_set_tstamp(struct net *net, bool enable) -{ - net->ct.sysctl_tstamp = enable; -} - #ifdef CONFIG_NF_CONNTRACK_TIMESTAMP void nf_conntrack_tstamp_pernet_init(struct net *net); diff --git a/include/net/netfilter/nf_conntrack_tuple.h b/include/net/netfilter/nf_conntrack_tuple.h index bf0444e111a6..9334371c94e2 100644 --- a/include/net/netfilter/nf_conntrack_tuple.h +++ b/include/net/netfilter/nf_conntrack_tuple.h @@ -123,7 +123,7 @@ struct nf_conntrack_tuple_hash { static inline bool __nf_ct_tuple_src_equal(const struct nf_conntrack_tuple *t1, const struct nf_conntrack_tuple *t2) -{ +{ return (nf_inet_addr_cmp(&t1->src.u3, &t2->src.u3) && t1->src.u.all == t2->src.u.all && t1->src.l3num == t2->src.l3num); diff --git a/include/net/netfilter/nf_conntrack_zones.h b/include/net/netfilter/nf_conntrack_zones.h index 52950baa3ab5..48dbadb96fb3 100644 --- a/include/net/netfilter/nf_conntrack_zones.h +++ b/include/net/netfilter/nf_conntrack_zones.h @@ -3,9 +3,7 @@ #define _NF_CONNTRACK_ZONES_H #include <linux/netfilter/nf_conntrack_zones_common.h> - -#if IS_ENABLED(CONFIG_NF_CONNTRACK) -#include <net/netfilter/nf_conntrack_extend.h> +#include <net/netfilter/nf_conntrack.h> static inline const struct nf_conntrack_zone * nf_ct_zone(const struct nf_conn *ct) @@ -87,5 +85,5 @@ static inline bool nf_ct_zone_equal_any(const struct nf_conn *a, return true; #endif } -#endif /* IS_ENABLED(CONFIG_NF_CONNTRACK) */ + #endif /* _NF_CONNTRACK_ZONES_H */ diff --git a/include/net/netfilter/nf_dup_netdev.h b/include/net/netfilter/nf_dup_netdev.h index 2a6f6dcad3d9..b175d271aec9 100644 --- a/include/net/netfilter/nf_dup_netdev.h +++ b/include/net/netfilter/nf_dup_netdev.h @@ -2,7 +2,15 @@ #ifndef _NF_DUP_NETDEV_H_ #define _NF_DUP_NETDEV_H_ +#include <net/netfilter/nf_tables.h> + void nf_dup_netdev_egress(const struct nft_pktinfo *pkt, int oif); void nf_fwd_netdev_egress(const struct nft_pktinfo *pkt, int oif); +struct nft_offload_ctx; +struct nft_flow_rule; + +int nft_fwd_dup_netdev_offload(struct nft_offload_ctx *ctx, + struct nft_flow_rule *flow, + enum flow_action_id id, int oif); #endif diff --git a/include/net/netfilter/nf_flow_table.h b/include/net/netfilter/nf_flow_table.h index 3e370cb36263..b37a7d608134 100644 --- a/include/net/netfilter/nf_flow_table.h +++ b/include/net/netfilter/nf_flow_table.h @@ -6,6 +6,7 @@ #include <linux/netdevice.h> #include <linux/rhashtable-types.h> #include <linux/rcupdate.h> +#include <linux/netfilter.h> #include <linux/netfilter/nf_conntrack_tuple_common.h> #include <net/dst.h> @@ -53,8 +54,6 @@ struct flow_offload_tuple { u8 l4proto; u8 dir; - int oifidx; - u16 mtu; struct dst_entry *dst_cache; @@ -124,4 +123,4 @@ unsigned int nf_flow_offload_ipv6_hook(void *priv, struct sk_buff *skb, #define MODULE_ALIAS_NF_FLOWTABLE(family) \ MODULE_ALIAS("nf-flowtable-" __stringify(family)) -#endif /* _FLOW_OFFLOAD_H */ +#endif /* _NF_FLOW_TABLE_H */ diff --git a/include/net/netfilter/nf_nat.h b/include/net/netfilter/nf_nat.h index 423cda2c6542..0d412dd63707 100644 --- a/include/net/netfilter/nf_nat.h +++ b/include/net/netfilter/nf_nat.h @@ -1,9 +1,14 @@ /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _NF_NAT_H #define _NF_NAT_H + +#include <linux/list.h> #include <linux/netfilter_ipv4.h> -#include <linux/netfilter/nf_nat.h> +#include <linux/netfilter/nf_conntrack_pptp.h> +#include <net/netfilter/nf_conntrack.h> +#include <net/netfilter/nf_conntrack_extend.h> #include <net/netfilter/nf_conntrack_tuple.h> +#include <uapi/linux/netfilter/nf_nat.h> enum nf_nat_manip_type { NF_NAT_MANIP_SRC, @@ -14,20 +19,14 @@ enum nf_nat_manip_type { #define HOOK2MANIP(hooknum) ((hooknum) != NF_INET_POST_ROUTING && \ (hooknum) != NF_INET_LOCAL_IN) -#include <linux/list.h> -#include <linux/netfilter/nf_conntrack_pptp.h> -#include <net/netfilter/nf_conntrack_extend.h> - /* per conntrack: nat application helper private data */ union nf_conntrack_nat_help { /* insert nat helper private data here */ -#if defined(CONFIG_NF_NAT_PPTP) || defined(CONFIG_NF_NAT_PPTP_MODULE) +#if IS_ENABLED(CONFIG_NF_NAT_PPTP) struct nf_nat_pptp nat_pptp_info; #endif }; -struct nf_conn; - /* The structure embedded in the conntrack structure. */ struct nf_conn_nat { union nf_conntrack_nat_help help; @@ -48,7 +47,7 @@ struct nf_conn_nat *nf_ct_nat_ext_add(struct nf_conn *ct); static inline struct nf_conn_nat *nfct_nat(const struct nf_conn *ct) { -#if defined(CONFIG_NF_NAT) || defined(CONFIG_NF_NAT_MODULE) +#if IS_ENABLED(CONFIG_NF_NAT) return nf_ct_ext_find(ct, NF_CT_EXT_NAT); #else return NULL; diff --git a/include/net/netfilter/nf_nat_helper.h b/include/net/netfilter/nf_nat_helper.h index 97d7033e93a4..efae84646353 100644 --- a/include/net/netfilter/nf_nat_helper.h +++ b/include/net/netfilter/nf_nat_helper.h @@ -3,9 +3,9 @@ #define _NF_NAT_HELPER_H /* NAT protocol helper routines. */ +#include <linux/skbuff.h> #include <net/netfilter/nf_conntrack.h> - -struct sk_buff; +#include <net/netfilter/nf_conntrack_expect.h> /* These return true or false. */ bool __nf_nat_mangle_tcp_packet(struct sk_buff *skb, struct nf_conn *ct, diff --git a/include/net/netfilter/nf_nat_masquerade.h b/include/net/netfilter/nf_nat_masquerade.h index 54a14d643c34..be7abc9d5f22 100644 --- a/include/net/netfilter/nf_nat_masquerade.h +++ b/include/net/netfilter/nf_nat_masquerade.h @@ -2,6 +2,7 @@ #ifndef _NF_NAT_MASQUERADE_H_ #define _NF_NAT_MASQUERADE_H_ +#include <linux/skbuff.h> #include <net/netfilter/nf_nat.h> unsigned int diff --git a/include/net/netfilter/nf_nat_redirect.h b/include/net/netfilter/nf_nat_redirect.h index c129aacc8ae8..2418653a66db 100644 --- a/include/net/netfilter/nf_nat_redirect.h +++ b/include/net/netfilter/nf_nat_redirect.h @@ -2,6 +2,9 @@ #ifndef _NF_NAT_REDIRECT_H_ #define _NF_NAT_REDIRECT_H_ +#include <linux/skbuff.h> +#include <uapi/linux/netfilter/nf_nat.h> + unsigned int nf_nat_redirect_ipv4(struct sk_buff *skb, const struct nf_nat_ipv4_multi_range_compat *mr, diff --git a/include/net/netfilter/nf_queue.h b/include/net/netfilter/nf_queue.h index 7239105d9d2e..47088083667b 100644 --- a/include/net/netfilter/nf_queue.h +++ b/include/net/netfilter/nf_queue.h @@ -5,6 +5,8 @@ #include <linux/ip.h> #include <linux/ipv6.h> #include <linux/jhash.h> +#include <linux/netfilter.h> +#include <linux/skbuff.h> /* Each queued (to userspace) skbuff has one of these. */ struct nf_queue_entry { @@ -120,6 +122,6 @@ nfqueue_hash(const struct sk_buff *skb, u16 queue, u16 queues_total, u8 family, } int nf_queue(struct sk_buff *skb, struct nf_hook_state *state, - const struct nf_hook_entries *entries, unsigned int index, - unsigned int verdict); + unsigned int index, unsigned int verdict); + #endif /* _NF_QUEUE_H */ diff --git a/include/net/netfilter/nf_reject.h b/include/net/netfilter/nf_reject.h index 221f877f29d1..9051c3a0c8e7 100644 --- a/include/net/netfilter/nf_reject.h +++ b/include/net/netfilter/nf_reject.h @@ -2,6 +2,9 @@ #ifndef _NF_REJECT_H #define _NF_REJECT_H +#include <linux/types.h> +#include <uapi/linux/in.h> + static inline bool nf_reject_verify_csum(__u8 proto) { /* Skip protocols that don't use 16-bit one's complement checksum diff --git a/include/net/netfilter/nf_synproxy.h b/include/net/netfilter/nf_synproxy.h new file mode 100644 index 000000000000..a336f9434e73 --- /dev/null +++ b/include/net/netfilter/nf_synproxy.h @@ -0,0 +1,89 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _NF_SYNPROXY_SHARED_H +#define _NF_SYNPROXY_SHARED_H + +#include <linux/module.h> +#include <linux/skbuff.h> +#include <net/ip6_checksum.h> +#include <net/ip6_route.h> +#include <net/tcp.h> + +#include <net/netfilter/nf_conntrack_seqadj.h> +#include <net/netfilter/nf_conntrack_synproxy.h> + +struct synproxy_stats { + unsigned int syn_received; + unsigned int cookie_invalid; + unsigned int cookie_valid; + unsigned int cookie_retrans; + unsigned int conn_reopened; +}; + +struct synproxy_net { + struct nf_conn *tmpl; + struct synproxy_stats __percpu *stats; + unsigned int hook_ref4; + unsigned int hook_ref6; +}; + +extern unsigned int synproxy_net_id; +static inline struct synproxy_net *synproxy_pernet(struct net *net) +{ + return net_generic(net, synproxy_net_id); +} + +struct synproxy_options { + u8 options; + u8 wscale; + u16 mss_option; + u16 mss_encode; + u32 tsval; + u32 tsecr; +}; + +struct nf_synproxy_info; +bool synproxy_parse_options(const struct sk_buff *skb, unsigned int doff, + const struct tcphdr *th, + struct synproxy_options *opts); + +void synproxy_init_timestamp_cookie(const struct nf_synproxy_info *info, + struct synproxy_options *opts); + +void synproxy_send_client_synack(struct net *net, const struct sk_buff *skb, + const struct tcphdr *th, + const struct synproxy_options *opts); + +bool synproxy_recv_client_ack(struct net *net, + const struct sk_buff *skb, + const struct tcphdr *th, + struct synproxy_options *opts, u32 recv_seq); + +struct nf_hook_state; + +unsigned int ipv4_synproxy_hook(void *priv, struct sk_buff *skb, + const struct nf_hook_state *nhs); +int nf_synproxy_ipv4_init(struct synproxy_net *snet, struct net *net); +void nf_synproxy_ipv4_fini(struct synproxy_net *snet, struct net *net); + +#if IS_ENABLED(CONFIG_IPV6) +void synproxy_send_client_synack_ipv6(struct net *net, + const struct sk_buff *skb, + const struct tcphdr *th, + const struct synproxy_options *opts); + +bool synproxy_recv_client_ack_ipv6(struct net *net, const struct sk_buff *skb, + const struct tcphdr *th, + struct synproxy_options *opts, u32 recv_seq); + +unsigned int ipv6_synproxy_hook(void *priv, struct sk_buff *skb, + const struct nf_hook_state *nhs); +int nf_synproxy_ipv6_init(struct synproxy_net *snet, struct net *net); +void nf_synproxy_ipv6_fini(struct synproxy_net *snet, struct net *net); +#else +static inline int +nf_synproxy_ipv6_init(struct synproxy_net *snet, struct net *net) { return 0; } +static inline void +nf_synproxy_ipv6_fini(struct synproxy_net *snet, struct net *net) {}; +#endif /* CONFIG_IPV6 */ + +#endif /* _NF_SYNPROXY_SHARED_H */ diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h index 5b8624ae4a27..001d294edf57 100644 --- a/include/net/netfilter/nf_tables.h +++ b/include/net/netfilter/nf_tables.h @@ -2,6 +2,7 @@ #ifndef _NET_NF_TABLES_H #define _NET_NF_TABLES_H +#include <asm/unaligned.h> #include <linux/list.h> #include <linux/netfilter.h> #include <linux/netfilter/nfnetlink.h> @@ -11,6 +12,7 @@ #include <linux/rhashtable.h> #include <net/netfilter/nf_flow_table.h> #include <net/netlink.h> +#include <net/flow_offload.h> struct module; @@ -99,23 +101,28 @@ struct nft_regs { }; }; -/* Store/load an u16 or u8 integer to/from the u32 data register. +/* Store/load an u8, u16 or u64 integer to/from the u32 data register. * * Note, when using concatenations, register allocation happens at 32-bit * level. So for store instruction, pad the rest part with zero to avoid * garbage values. */ -static inline void nft_reg_store16(u32 *dreg, u16 val) +static inline void nft_reg_store8(u32 *dreg, u8 val) { *dreg = 0; - *(u16 *)dreg = val; + *(u8 *)dreg = val; } -static inline void nft_reg_store8(u32 *dreg, u8 val) +static inline u8 nft_reg_load8(u32 *sreg) +{ + return *(u8 *)sreg; +} + +static inline void nft_reg_store16(u32 *dreg, u16 val) { *dreg = 0; - *(u8 *)dreg = val; + *(u16 *)dreg = val; } static inline u16 nft_reg_load16(u32 *sreg) @@ -123,9 +130,14 @@ static inline u16 nft_reg_load16(u32 *sreg) return *(u16 *)sreg; } -static inline u8 nft_reg_load8(u32 *sreg) +static inline void nft_reg_store64(u32 *dreg, u64 val) { - return *(u8 *)sreg; + put_unaligned(val, (u64 *)dreg); +} + +static inline u64 nft_reg_load64(u32 *sreg) +{ + return get_unaligned((u64 *)sreg); } static inline void nft_data_copy(u32 *dst, const struct nft_data *src, @@ -161,6 +173,7 @@ struct nft_ctx { const struct nlattr * const *nla; u32 portid; u32 seq; + u16 flags; u8 family; u8 level; bool report; @@ -287,17 +300,23 @@ struct nft_expr; * struct nft_set_ops - nf_tables set operations * * @lookup: look up an element within the set + * @update: update an element if exists, add it if doesn't exist + * @delete: delete an element * @insert: insert new element into set * @activate: activate new element in the next generation * @deactivate: lookup for element and deactivate it in the next generation * @flush: deactivate element in the next generation * @remove: remove element from set - * @walk: iterate over all set elemeennts + * @walk: iterate over all set elements * @get: get set elements * @privsize: function to return size of set private data * @init: initialize private data of new set instance * @destroy: destroy private data of set instance * @elemsize: element private size + * + * Operations lookup, update and delete have simpler interfaces, are faster + * and currently only used in the packet path. All the rest are slower, + * control plane functions. */ struct nft_set_ops { bool (*lookup)(const struct net *net, @@ -312,6 +331,8 @@ struct nft_set_ops { const struct nft_expr *expr, struct nft_regs *regs, const struct nft_set_ext **ext); + bool (*delete)(const struct nft_set *set, + const u32 *key); int (*insert)(const struct net *net, const struct nft_set *set, @@ -419,8 +440,7 @@ struct nft_set { unsigned char *udata; /* runtime data below here */ const struct nft_set_ops *ops ____cacheline_aligned; - u16 flags:13, - bound:1, + u16 flags:14, genmask:2; u8 klen; u8 dlen; @@ -636,7 +656,7 @@ static inline struct nft_object **nft_set_ext_obj(const struct nft_set_ext *ext) void *nft_set_elem_init(const struct nft_set *set, const struct nft_set_ext_tmpl *tmpl, const u32 *key, const u32 *data, - u64 timeout, gfp_t gfp); + u64 timeout, u64 expiration, gfp_t gfp); void nft_set_elem_destroy(const struct nft_set *set, void *elem, bool destroy_expr); @@ -735,6 +755,9 @@ enum nft_trans_phase { NFT_TRANS_RELEASE }; +struct nft_flow_rule; +struct nft_offload_ctx; + /** * struct nft_expr_ops - nf_tables expression operations * @@ -777,6 +800,10 @@ struct nft_expr_ops { const struct nft_data **data); bool (*gc)(struct net *net, const struct nft_expr *expr); + int (*offload)(struct nft_offload_ctx *ctx, + struct nft_flow_rule *flow, + const struct nft_expr *expr); + u32 offload_flags; const struct nft_expr_type *type; void *data; }; @@ -859,8 +886,11 @@ static inline struct nft_userdata *nft_userdata(const struct nft_rule *rule) enum nft_chain_flags { NFT_BASE_CHAIN = 0x1, + NFT_CHAIN_HW_OFFLOAD = 0x2, }; +#define NFT_CHAIN_POLICY_UNSET U8_MAX + /** * struct nft_chain - nf_tables chain * @@ -942,6 +972,7 @@ struct nft_stats { * @stats: per-cpu chain stats * @chain: the chain * @dev_name: device name that this base chain is attached to (if any) + * @flow_block: flow block (for hardware offload) */ struct nft_base_chain { struct nf_hook_ops ops; @@ -951,6 +982,7 @@ struct nft_base_chain { struct nft_stats __percpu *stats; struct nft_chain chain; char dev_name[IFNAMSIZ]; + struct flow_block flow_block; }; static inline struct nft_base_chain *nft_base_chain(const struct nft_chain *chain) @@ -1091,6 +1123,7 @@ struct nft_object_type { * @init: initialize object from netlink attributes * @destroy: release existing stateful object * @dump: netlink dump stateful object + * @update: update stateful object */ struct nft_object_ops { void (*eval)(struct nft_object *obj, @@ -1105,6 +1138,8 @@ struct nft_object_ops { int (*dump)(struct sk_buff *skb, struct nft_object *obj, bool reset); + void (*update)(struct nft_object *obj, + struct nft_object *newobj); const struct nft_object_type *type; }; @@ -1148,6 +1183,10 @@ struct nft_flowtable *nft_flowtable_lookup(const struct nft_table *table, const struct nlattr *nla, u8 genmask); +void nf_tables_deactivate_flowtable(const struct nft_ctx *ctx, + struct nft_flowtable *flowtable, + enum nft_trans_phase phase); + void nft_register_flowtable_type(struct nf_flowtable_type *type); void nft_unregister_flowtable_type(struct nf_flowtable_type *type); @@ -1195,6 +1234,8 @@ void nft_trace_notify(struct nft_traceinfo *info); #define MODULE_ALIAS_NFT_OBJ(type) \ MODULE_ALIAS("nft-obj-" __stringify(type)) +#if IS_ENABLED(CONFIG_NF_TABLES) + /* * The gencursor defines two generations, the currently active and the * next one. Objects contain a bitmask of 2 bits specifying the generations @@ -1268,6 +1309,8 @@ static inline void nft_set_elem_change_active(const struct net *net, ext->genmask ^= nft_genmask_next(net); } +#endif /* IS_ENABLED(CONFIG_NF_TABLES) */ + /* * We use a free bit in the genmask field to indicate the element * is busy, meaning it is currently being processed either by @@ -1322,23 +1365,29 @@ struct nft_trans { struct nft_trans_rule { struct nft_rule *rule; + struct nft_flow_rule *flow; u32 rule_id; }; #define nft_trans_rule(trans) \ (((struct nft_trans_rule *)trans->data)->rule) +#define nft_trans_flow_rule(trans) \ + (((struct nft_trans_rule *)trans->data)->flow) #define nft_trans_rule_id(trans) \ (((struct nft_trans_rule *)trans->data)->rule_id) struct nft_trans_set { struct nft_set *set; u32 set_id; + bool bound; }; #define nft_trans_set(trans) \ (((struct nft_trans_set *)trans->data)->set) #define nft_trans_set_id(trans) \ (((struct nft_trans_set *)trans->data)->set_id) +#define nft_trans_set_bound(trans) \ + (((struct nft_trans_set *)trans->data)->bound) struct nft_trans_chain { bool update; @@ -1369,19 +1418,28 @@ struct nft_trans_table { struct nft_trans_elem { struct nft_set *set; struct nft_set_elem elem; + bool bound; }; #define nft_trans_elem_set(trans) \ (((struct nft_trans_elem *)trans->data)->set) #define nft_trans_elem(trans) \ (((struct nft_trans_elem *)trans->data)->elem) +#define nft_trans_elem_set_bound(trans) \ + (((struct nft_trans_elem *)trans->data)->bound) struct nft_trans_obj { struct nft_object *obj; + struct nft_object *newobj; + bool update; }; #define nft_trans_obj(trans) \ (((struct nft_trans_obj *)trans->data)->obj) +#define nft_trans_obj_newobj(trans) \ + (((struct nft_trans_obj *)trans->data)->newobj) +#define nft_trans_obj_update(trans) \ + (((struct nft_trans_obj *)trans->data)->update) struct nft_trans_flowtable { struct nft_flowtable *flowtable; diff --git a/include/net/netfilter/nf_tables_ipv6.h b/include/net/netfilter/nf_tables_ipv6.h index dabe6fdb553a..d0f1c537b017 100644 --- a/include/net/netfilter/nf_tables_ipv6.h +++ b/include/net/netfilter/nf_tables_ipv6.h @@ -4,6 +4,7 @@ #include <linux/netfilter_ipv6/ip6_tables.h> #include <net/ipv6.h> +#include <net/netfilter/nf_tables.h> static inline void nft_set_pktinfo_ipv6(struct nft_pktinfo *pkt, struct sk_buff *skb) diff --git a/include/net/netfilter/nf_tables_offload.h b/include/net/netfilter/nf_tables_offload.h new file mode 100644 index 000000000000..03cf5856d76f --- /dev/null +++ b/include/net/netfilter/nf_tables_offload.h @@ -0,0 +1,83 @@ +#ifndef _NET_NF_TABLES_OFFLOAD_H +#define _NET_NF_TABLES_OFFLOAD_H + +#include <net/flow_offload.h> +#include <net/netfilter/nf_tables.h> + +struct nft_offload_reg { + u32 key; + u32 len; + u32 base_offset; + u32 offset; + struct nft_data data; + struct nft_data mask; +}; + +enum nft_offload_dep_type { + NFT_OFFLOAD_DEP_UNSPEC = 0, + NFT_OFFLOAD_DEP_NETWORK, + NFT_OFFLOAD_DEP_TRANSPORT, +}; + +struct nft_offload_ctx { + struct { + enum nft_offload_dep_type type; + __be16 l3num; + u8 protonum; + } dep; + unsigned int num_actions; + struct net *net; + struct nft_offload_reg regs[NFT_REG32_15 + 1]; +}; + +void nft_offload_set_dependency(struct nft_offload_ctx *ctx, + enum nft_offload_dep_type type); +void nft_offload_update_dependency(struct nft_offload_ctx *ctx, + const void *data, u32 len); + +struct nft_flow_key { + struct flow_dissector_key_basic basic; + union { + struct flow_dissector_key_ipv4_addrs ipv4; + struct flow_dissector_key_ipv6_addrs ipv6; + }; + struct flow_dissector_key_ports tp; + struct flow_dissector_key_ip ip; + struct flow_dissector_key_vlan vlan; + struct flow_dissector_key_eth_addrs eth_addrs; +} __aligned(BITS_PER_LONG / 8); /* Ensure that we can do comparisons as longs. */ + +struct nft_flow_match { + struct flow_dissector dissector; + struct nft_flow_key key; + struct nft_flow_key mask; +}; + +struct nft_flow_rule { + __be16 proto; + struct nft_flow_match match; + struct flow_rule *rule; +}; + +#define NFT_OFFLOAD_F_ACTION (1 << 0) + +struct nft_rule; +struct nft_flow_rule *nft_flow_rule_create(struct net *net, const struct nft_rule *rule); +void nft_flow_rule_destroy(struct nft_flow_rule *flow); +int nft_flow_rule_offload_commit(struct net *net); + +#define NFT_OFFLOAD_MATCH(__key, __base, __field, __len, __reg) \ + (__reg)->base_offset = \ + offsetof(struct nft_flow_key, __base); \ + (__reg)->offset = \ + offsetof(struct nft_flow_key, __base.__field); \ + (__reg)->len = __len; \ + (__reg)->key = __key; \ + memset(&(__reg)->mask, 0xff, (__reg)->len); + +int nft_chain_offload_priority(struct nft_base_chain *basechain); + +int nft_offload_init(void); +void nft_offload_exit(void); + +#endif diff --git a/include/net/netfilter/nft_fib.h b/include/net/netfilter/nft_fib.h index e4c4d8eaca8c..628b6fa579cd 100644 --- a/include/net/netfilter/nft_fib.h +++ b/include/net/netfilter/nft_fib.h @@ -2,6 +2,8 @@ #ifndef _NFT_FIB_H_ #define _NFT_FIB_H_ +#include <net/netfilter/nf_tables.h> + struct nft_fib { enum nft_registers dreg:8; u8 result; diff --git a/include/net/netfilter/nft_meta.h b/include/net/netfilter/nft_meta.h new file mode 100644 index 000000000000..07e2fd507963 --- /dev/null +++ b/include/net/netfilter/nft_meta.h @@ -0,0 +1,46 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _NFT_META_H_ +#define _NFT_META_H_ + +#include <net/netfilter/nf_tables.h> + +struct nft_meta { + enum nft_meta_keys key:8; + union { + enum nft_registers dreg:8; + enum nft_registers sreg:8; + }; +}; + +extern const struct nla_policy nft_meta_policy[]; + +int nft_meta_get_init(const struct nft_ctx *ctx, + const struct nft_expr *expr, + const struct nlattr * const tb[]); + +int nft_meta_set_init(const struct nft_ctx *ctx, + const struct nft_expr *expr, + const struct nlattr * const tb[]); + +int nft_meta_get_dump(struct sk_buff *skb, + const struct nft_expr *expr); + +int nft_meta_set_dump(struct sk_buff *skb, + const struct nft_expr *expr); + +void nft_meta_get_eval(const struct nft_expr *expr, + struct nft_regs *regs, + const struct nft_pktinfo *pkt); + +void nft_meta_set_eval(const struct nft_expr *expr, + struct nft_regs *regs, + const struct nft_pktinfo *pkt); + +void nft_meta_set_destroy(const struct nft_ctx *ctx, + const struct nft_expr *expr); + +int nft_meta_set_validate(const struct nft_ctx *ctx, + const struct nft_expr *expr, + const struct nft_data **data); + +#endif diff --git a/include/net/netfilter/nft_reject.h b/include/net/netfilter/nft_reject.h index de80c50761f0..56b123a42220 100644 --- a/include/net/netfilter/nft_reject.h +++ b/include/net/netfilter/nft_reject.h @@ -2,6 +2,11 @@ #ifndef _NFT_REJECT_H_ #define _NFT_REJECT_H_ +#include <linux/types.h> +#include <net/netlink.h> +#include <net/netfilter/nf_tables.h> +#include <uapi/linux/netfilter/nf_tables.h> + struct nft_reject { enum nft_reject_types type:8; u8 icmp_code; diff --git a/include/net/netlink.h b/include/net/netlink.h index 395b4406f4b0..b140c8f1be22 100644 --- a/include/net/netlink.h +++ b/include/net/netlink.h @@ -378,13 +378,17 @@ struct nla_policy { /** * struct nl_info - netlink source information * @nlh: Netlink message header of original request + * @nl_net: Network namespace * @portid: Netlink PORTID of requesting application + * @skip_notify: Skip netlink notifications to user space + * @skip_notify_kernel: Skip selected in-kernel notifications */ struct nl_info { struct nlmsghdr *nlh; struct net *nl_net; u32 portid; - bool skip_notify; + u8 skip_notify:1, + skip_notify_kernel:1; }; /** @@ -680,9 +684,8 @@ static inline int nlmsg_parse(const struct nlmsghdr *nlh, int hdrlen, const struct nla_policy *policy, struct netlink_ext_ack *extack) { - return __nla_parse(tb, maxtype, nlmsg_attrdata(nlh, hdrlen), - nlmsg_attrlen(nlh, hdrlen), policy, - NL_VALIDATE_STRICT, extack); + return __nlmsg_parse(nlh, hdrlen, tb, maxtype, policy, + NL_VALIDATE_STRICT, extack); } /** @@ -1755,6 +1758,15 @@ static inline int __nla_validate_nested(const struct nlattr *start, int maxtype, } static inline int +nl80211_validate_nested(const struct nlattr *start, int maxtype, + const struct nla_policy *policy, + struct netlink_ext_ack *extack) +{ + return __nla_validate_nested(start, maxtype, policy, + NL_VALIDATE_STRICT, extack); +} + +static inline int nla_validate_nested_deprecated(const struct nlattr *start, int maxtype, const struct nla_policy *policy, struct netlink_ext_ack *extack) diff --git a/include/net/netns/can.h b/include/net/netns/can.h index ca9bd9fba5b5..b6ab7d1530d7 100644 --- a/include/net/netns/can.h +++ b/include/net/netns/can.h @@ -9,8 +9,8 @@ #include <linux/spinlock.h> struct can_dev_rcv_lists; -struct s_stats; -struct s_pstats; +struct can_pkg_stats; +struct can_rcv_lists_stats; struct netns_can { #if IS_ENABLED(CONFIG_PROC_FS) @@ -28,11 +28,11 @@ struct netns_can { #endif /* receive filters subscribed for 'all' CAN devices */ - struct can_dev_rcv_lists *can_rx_alldev_list; - spinlock_t can_rcvlists_lock; - struct timer_list can_stattimer;/* timer for statistics update */ - struct s_stats *can_stats; /* packet statistics */ - struct s_pstats *can_pstats; /* receive list statistics */ + struct can_dev_rcv_lists *rx_alldev_list; + spinlock_t rcvlists_lock; + struct timer_list stattimer; /* timer for statistics update */ + struct can_pkg_stats *pkg_stats; + struct can_rcv_lists_stats *rcv_lists_stats; /* CAN GW per-net gateway jobs */ struct hlist_head cgw_list; diff --git a/include/net/netns/ieee802154_6lowpan.h b/include/net/netns/ieee802154_6lowpan.h index 736aeac52f56..95406e1342cb 100644 --- a/include/net/netns/ieee802154_6lowpan.h +++ b/include/net/netns/ieee802154_6lowpan.h @@ -16,7 +16,7 @@ struct netns_sysctl_lowpan { struct netns_ieee802154_lowpan { struct netns_sysctl_lowpan sysctl; - struct netns_frags frags; + struct fqdir *fqdir; }; #endif diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h index 623cfbb7b8dc..c0c0791b1912 100644 --- a/include/net/netns/ipv4.h +++ b/include/net/netns/ipv4.h @@ -72,7 +72,7 @@ struct netns_ipv4 { struct inet_peer_base *peers; struct sock * __percpu *tcp_sk; - struct netns_frags frags; + struct fqdir *fqdir; #ifdef CONFIG_NETFILTER struct xt_table *iptable_filter; struct xt_table *iptable_mangle; @@ -116,6 +116,7 @@ struct netns_ipv4 { int sysctl_tcp_l3mdev_accept; #endif int sysctl_tcp_mtu_probing; + int sysctl_tcp_mtu_probe_floor; int sysctl_tcp_base_mss; int sysctl_tcp_min_snd_mss; int sysctl_tcp_probe_threshold; diff --git a/include/net/netns/ipv6.h b/include/net/netns/ipv6.h index 5e61b5a8635d..022a0fd1a5a4 100644 --- a/include/net/netns/ipv6.h +++ b/include/net/netns/ipv6.h @@ -58,7 +58,7 @@ struct netns_ipv6 { struct ipv6_devconf *devconf_all; struct ipv6_devconf *devconf_dflt; struct inet_peer_base *peers; - struct netns_frags frags; + struct fqdir *fqdir; #ifdef CONFIG_NETFILTER struct xt_table *ip6table_filter; struct xt_table *ip6table_mangle; @@ -116,7 +116,7 @@ struct netns_ipv6 { #if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6) struct netns_nf_frag { - struct netns_frags frags; + struct fqdir *fqdir; }; #endif diff --git a/include/net/netns/nexthop.h b/include/net/netns/nexthop.h new file mode 100644 index 000000000000..c712ee5eebd9 --- /dev/null +++ b/include/net/netns/nexthop.h @@ -0,0 +1,18 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * nexthops in net namespaces + */ + +#ifndef __NETNS_NEXTHOP_H__ +#define __NETNS_NEXTHOP_H__ + +#include <linux/rbtree.h> + +struct netns_nexthop { + struct rb_root rb_root; /* tree of nexthops by id */ + struct hlist_head *devhash; /* nexthops by device */ + + unsigned int seq; /* protected by rtnl_mutex */ + u32 last_id_allocated; +}; +#endif diff --git a/include/net/netns/sctp.h b/include/net/netns/sctp.h index 0db7fb3e4e15..bdc0f27b8514 100644 --- a/include/net/netns/sctp.h +++ b/include/net/netns/sctp.h @@ -128,6 +128,9 @@ struct netns_sctp { /* Flag to indicate if stream interleave is enabled */ int intl_enable; + /* Flag to indicate if ecn is enabled */ + int ecn_enable; + /* * Policy to control SCTP IPv4 address scoping * 0 - Disable IPv4 address scoping diff --git a/include/net/nexthop.h b/include/net/nexthop.h new file mode 100644 index 000000000000..331ebbc94fe7 --- /dev/null +++ b/include/net/nexthop.h @@ -0,0 +1,307 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Generic nexthop implementation + * + * Copyright (c) 2017-19 Cumulus Networks + * Copyright (c) 2017-19 David Ahern <dsa@cumulusnetworks.com> + */ + +#ifndef __LINUX_NEXTHOP_H +#define __LINUX_NEXTHOP_H + +#include <linux/netdevice.h> +#include <linux/route.h> +#include <linux/types.h> +#include <net/ip_fib.h> +#include <net/ip6_fib.h> +#include <net/netlink.h> + +#define NEXTHOP_VALID_USER_FLAGS RTNH_F_ONLINK + +struct nexthop; + +struct nh_config { + u32 nh_id; + + u8 nh_family; + u8 nh_protocol; + u8 nh_blackhole; + u32 nh_flags; + + int nh_ifindex; + struct net_device *dev; + + union { + __be32 ipv4; + struct in6_addr ipv6; + } gw; + + struct nlattr *nh_grp; + u16 nh_grp_type; + + struct nlattr *nh_encap; + u16 nh_encap_type; + + u32 nlflags; + struct nl_info nlinfo; +}; + +struct nh_info { + struct hlist_node dev_hash; /* entry on netns devhash */ + struct nexthop *nh_parent; + + u8 family; + bool reject_nh; + + union { + struct fib_nh_common fib_nhc; + struct fib_nh fib_nh; + struct fib6_nh fib6_nh; + }; +}; + +struct nh_grp_entry { + struct nexthop *nh; + u8 weight; + atomic_t upper_bound; + + struct list_head nh_list; + struct nexthop *nh_parent; /* nexthop of group with this entry */ +}; + +struct nh_group { + u16 num_nh; + bool mpath; + bool has_v4; + struct nh_grp_entry nh_entries[0]; +}; + +struct nexthop { + struct rb_node rb_node; /* entry on netns rbtree */ + struct list_head fi_list; /* v4 entries using nh */ + struct list_head f6i_list; /* v6 entries using nh */ + struct list_head grp_list; /* nh group entries using this nh */ + struct net *net; + + u32 id; + + u8 protocol; /* app managing this nh */ + u8 nh_flags; + bool is_group; + + refcount_t refcnt; + struct rcu_head rcu; + + union { + struct nh_info __rcu *nh_info; + struct nh_group __rcu *nh_grp; + }; +}; + +/* caller is holding rcu or rtnl; no reference taken to nexthop */ +struct nexthop *nexthop_find_by_id(struct net *net, u32 id); +void nexthop_free_rcu(struct rcu_head *head); + +static inline bool nexthop_get(struct nexthop *nh) +{ + return refcount_inc_not_zero(&nh->refcnt); +} + +static inline void nexthop_put(struct nexthop *nh) +{ + if (refcount_dec_and_test(&nh->refcnt)) + call_rcu(&nh->rcu, nexthop_free_rcu); +} + +static inline bool nexthop_cmp(const struct nexthop *nh1, + const struct nexthop *nh2) +{ + return nh1 == nh2; +} + +static inline bool nexthop_is_multipath(const struct nexthop *nh) +{ + if (nh->is_group) { + struct nh_group *nh_grp; + + nh_grp = rcu_dereference_rtnl(nh->nh_grp); + return nh_grp->mpath; + } + return false; +} + +struct nexthop *nexthop_select_path(struct nexthop *nh, int hash); + +static inline unsigned int nexthop_num_path(const struct nexthop *nh) +{ + unsigned int rc = 1; + + if (nexthop_is_multipath(nh)) { + struct nh_group *nh_grp; + + nh_grp = rcu_dereference_rtnl(nh->nh_grp); + rc = nh_grp->num_nh; + } + + return rc; +} + +static inline +struct nexthop *nexthop_mpath_select(const struct nexthop *nh, int nhsel) +{ + const struct nh_group *nhg = rcu_dereference_rtnl(nh->nh_grp); + + /* for_nexthops macros in fib_semantics.c grabs a pointer to + * the nexthop before checking nhsel + */ + if (nhsel >= nhg->num_nh) + return NULL; + + return nhg->nh_entries[nhsel].nh; +} + +static inline +int nexthop_mpath_fill_node(struct sk_buff *skb, struct nexthop *nh, + u8 rt_family) +{ + struct nh_group *nhg = rtnl_dereference(nh->nh_grp); + int i; + + for (i = 0; i < nhg->num_nh; i++) { + struct nexthop *nhe = nhg->nh_entries[i].nh; + struct nh_info *nhi = rcu_dereference_rtnl(nhe->nh_info); + struct fib_nh_common *nhc = &nhi->fib_nhc; + int weight = nhg->nh_entries[i].weight; + + if (fib_add_nexthop(skb, nhc, weight, rt_family) < 0) + return -EMSGSIZE; + } + + return 0; +} + +/* called with rcu lock */ +static inline bool nexthop_is_blackhole(const struct nexthop *nh) +{ + const struct nh_info *nhi; + + if (nexthop_is_multipath(nh)) { + if (nexthop_num_path(nh) > 1) + return false; + nh = nexthop_mpath_select(nh, 0); + if (!nh) + return false; + } + + nhi = rcu_dereference_rtnl(nh->nh_info); + return nhi->reject_nh; +} + +static inline void nexthop_path_fib_result(struct fib_result *res, int hash) +{ + struct nh_info *nhi; + struct nexthop *nh; + + nh = nexthop_select_path(res->fi->nh, hash); + nhi = rcu_dereference(nh->nh_info); + res->nhc = &nhi->fib_nhc; +} + +/* called with rcu read lock or rtnl held */ +static inline +struct fib_nh_common *nexthop_fib_nhc(struct nexthop *nh, int nhsel) +{ + struct nh_info *nhi; + + BUILD_BUG_ON(offsetof(struct fib_nh, nh_common) != 0); + BUILD_BUG_ON(offsetof(struct fib6_nh, nh_common) != 0); + + if (nexthop_is_multipath(nh)) { + nh = nexthop_mpath_select(nh, nhsel); + if (!nh) + return NULL; + } + + nhi = rcu_dereference_rtnl(nh->nh_info); + return &nhi->fib_nhc; +} + +static inline unsigned int fib_info_num_path(const struct fib_info *fi) +{ + if (unlikely(fi->nh)) + return nexthop_num_path(fi->nh); + + return fi->fib_nhs; +} + +int fib_check_nexthop(struct nexthop *nh, u8 scope, + struct netlink_ext_ack *extack); + +static inline struct fib_nh_common *fib_info_nhc(struct fib_info *fi, int nhsel) +{ + if (unlikely(fi->nh)) + return nexthop_fib_nhc(fi->nh, nhsel); + + return &fi->fib_nh[nhsel].nh_common; +} + +/* only used when fib_nh is built into fib_info */ +static inline struct fib_nh *fib_info_nh(struct fib_info *fi, int nhsel) +{ + WARN_ON(fi->nh); + + return &fi->fib_nh[nhsel]; +} + +/* + * IPv6 variants + */ +int fib6_check_nexthop(struct nexthop *nh, struct fib6_config *cfg, + struct netlink_ext_ack *extack); + +static inline struct fib6_nh *nexthop_fib6_nh(struct nexthop *nh) +{ + struct nh_info *nhi; + + if (nexthop_is_multipath(nh)) { + nh = nexthop_mpath_select(nh, 0); + if (!nh) + return NULL; + } + + nhi = rcu_dereference_rtnl(nh->nh_info); + if (nhi->family == AF_INET6) + return &nhi->fib6_nh; + + return NULL; +} + +static inline struct net_device *fib6_info_nh_dev(struct fib6_info *f6i) +{ + struct fib6_nh *fib6_nh; + + fib6_nh = f6i->nh ? nexthop_fib6_nh(f6i->nh) : f6i->fib6_nh; + return fib6_nh->fib_nh_dev; +} + +static inline void nexthop_path_fib6_result(struct fib6_result *res, int hash) +{ + struct nexthop *nh = res->f6i->nh; + struct nh_info *nhi; + + nh = nexthop_select_path(nh, hash); + + nhi = rcu_dereference_rtnl(nh->nh_info); + if (nhi->reject_nh) { + res->fib6_type = RTN_BLACKHOLE; + res->fib6_flags |= RTF_REJECT; + res->nh = nexthop_fib6_nh(nh); + } else { + res->nh = &nhi->fib6_nh; + } +} + +int nexthop_for_each_fib6_nh(struct nexthop *nh, + int (*cb)(struct fib6_nh *nh, void *arg), + void *arg); +#endif diff --git a/include/net/page_pool.h b/include/net/page_pool.h index 694d055e01ef..2cbcdbdec254 100644 --- a/include/net/page_pool.h +++ b/include/net/page_pool.h @@ -16,14 +16,16 @@ * page_pool_alloc_pages() call. Drivers should likely use * page_pool_dev_alloc_pages() replacing dev_alloc_pages(). * - * If page_pool handles DMA mapping (use page->private), then API user - * is responsible for invoking page_pool_put_page() once. In-case of - * elevated refcnt, the DMA state is released, assuming other users of - * the page will eventually call put_page(). + * API keeps track of in-flight pages, in-order to let API user know + * when it is safe to dealloactor page_pool object. Thus, API users + * must make sure to call page_pool_release_page() when a page is + * "leaving" the page_pool. Or call page_pool_put_page() where + * appropiate. For maintaining correct accounting. * - * If no DMA mapping is done, then it can act as shim-layer that - * fall-through to alloc_page. As no state is kept on the page, the - * regular put_page() call is sufficient. + * API user must only call page_pool_put_page() once on a page, as it + * will either recycle the page, or in case of elevated refcnt, it + * will release the DMA mapping and in-flight state accounting. We + * hope to lift this requirement in the future. */ #ifndef _NET_PAGE_POOL_H #define _NET_PAGE_POOL_H @@ -66,9 +68,10 @@ struct page_pool_params { }; struct page_pool { - struct rcu_head rcu; struct page_pool_params p; + u32 pages_state_hold_cnt; + /* * Data structure for allocation side * @@ -96,6 +99,14 @@ struct page_pool { * TODO: Implement bulk return pages into this structure. */ struct ptr_ring ring; + + atomic_t pages_state_release_cnt; + + /* A page_pool is strictly tied to a single RX-queue being + * protected by NAPI, due to above pp_alloc_cache. This + * refcnt serves purpose is to simplify drivers error handling. + */ + refcount_t user_cnt; }; struct page *page_pool_alloc_pages(struct page_pool *pool, gfp_t gfp); @@ -107,9 +118,36 @@ static inline struct page *page_pool_dev_alloc_pages(struct page_pool *pool) return page_pool_alloc_pages(pool, gfp); } +/* get the stored dma direction. A driver might decide to treat this locally and + * avoid the extra cache line from page_pool to determine the direction + */ +static +inline enum dma_data_direction page_pool_get_dma_dir(struct page_pool *pool) +{ + return pool->p.dma_dir; +} + struct page_pool *page_pool_create(const struct page_pool_params *params); -void page_pool_destroy(struct page_pool *pool); +void __page_pool_free(struct page_pool *pool); +static inline void page_pool_free(struct page_pool *pool) +{ + /* When page_pool isn't compiled-in, net/core/xdp.c doesn't + * allow registering MEM_TYPE_PAGE_POOL, but shield linker. + */ +#ifdef CONFIG_PAGE_POOL + __page_pool_free(pool); +#endif +} + +/* Drivers use this instead of page_pool_free */ +static inline void page_pool_destroy(struct page_pool *pool) +{ + if (!pool) + return; + + page_pool_free(pool); +} /* Never call this directly, use helpers below */ void __page_pool_put_page(struct page_pool *pool, @@ -132,6 +170,43 @@ static inline void page_pool_recycle_direct(struct page_pool *pool, __page_pool_put_page(pool, page, true); } +/* API user MUST have disconnected alloc-side (not allowed to call + * page_pool_alloc_pages()) before calling this. The free-side can + * still run concurrently, to handle in-flight packet-pages. + * + * A request to shutdown can fail (with false) if there are still + * in-flight packet-pages. + */ +bool __page_pool_request_shutdown(struct page_pool *pool); +static inline bool page_pool_request_shutdown(struct page_pool *pool) +{ + bool safe_to_remove = false; + +#ifdef CONFIG_PAGE_POOL + safe_to_remove = __page_pool_request_shutdown(pool); +#endif + return safe_to_remove; +} + +/* Disconnects a page (from a page_pool). API users can have a need + * to disconnect a page (from a page_pool), to allow it to be used as + * a regular page (that will eventually be returned to the normal + * page-allocator via put_page). + */ +void page_pool_unmap_page(struct page_pool *pool, struct page *page); +static inline void page_pool_release_page(struct page_pool *pool, + struct page *page) +{ +#ifdef CONFIG_PAGE_POOL + page_pool_unmap_page(pool, page); +#endif +} + +static inline dma_addr_t page_pool_get_dma_addr(struct page *page) +{ + return page->dma_addr; +} + static inline bool is_page_pool_compiled_in(void) { #ifdef CONFIG_PAGE_POOL @@ -141,4 +216,14 @@ static inline bool is_page_pool_compiled_in(void) #endif } +static inline void page_pool_get(struct page_pool *pool) +{ + refcount_inc(&pool->user_cnt); +} + +static inline bool page_pool_put(struct page_pool *pool) +{ + return refcount_dec_and_test(&pool->user_cnt); +} + #endif /* _NET_PAGE_POOL_H */ diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h index 514e3c80ecc1..e553fc80eb23 100644 --- a/include/net/pkt_cls.h +++ b/include/net/pkt_cls.h @@ -6,10 +6,10 @@ #include <linux/workqueue.h> #include <net/sch_generic.h> #include <net/act_api.h> -#include <net/flow_offload.h> +#include <net/net_namespace.h> /* TC action not accessible from user space */ -#define TC_ACT_REINSERT (TC_ACT_VALUE_MAX + 1) +#define TC_ACT_CONSUMED (TC_ACT_VALUE_MAX + 1) /* Basic packet classifier frontend definitions. */ @@ -25,14 +25,8 @@ struct tcf_walker { int register_tcf_proto_ops(struct tcf_proto_ops *ops); int unregister_tcf_proto_ops(struct tcf_proto_ops *ops); -enum tcf_block_binder_type { - TCF_BLOCK_BINDER_TYPE_UNSPEC, - TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS, - TCF_BLOCK_BINDER_TYPE_CLSACT_EGRESS, -}; - struct tcf_block_ext_info { - enum tcf_block_binder_type binder_type; + enum flow_block_binder_type binder_type; tcf_chain_head_change_t *chain_head_change; void *chain_head_change_priv; u32 block_index; @@ -65,37 +59,17 @@ static inline bool tcf_block_shared(struct tcf_block *block) return block->index; } +static inline bool tcf_block_non_null_shared(struct tcf_block *block) +{ + return block && block->index; +} + static inline struct Qdisc *tcf_block_q(struct tcf_block *block) { WARN_ON(tcf_block_shared(block)); return block->q; } -void *tcf_block_cb_priv(struct tcf_block_cb *block_cb); -struct tcf_block_cb *tcf_block_cb_lookup(struct tcf_block *block, - tc_setup_cb_t *cb, void *cb_ident); -void tcf_block_cb_incref(struct tcf_block_cb *block_cb); -unsigned int tcf_block_cb_decref(struct tcf_block_cb *block_cb); -struct tcf_block_cb *__tcf_block_cb_register(struct tcf_block *block, - tc_setup_cb_t *cb, void *cb_ident, - void *cb_priv, - struct netlink_ext_ack *extack); -int tcf_block_cb_register(struct tcf_block *block, - tc_setup_cb_t *cb, void *cb_ident, - void *cb_priv, struct netlink_ext_ack *extack); -void __tcf_block_cb_unregister(struct tcf_block *block, - struct tcf_block_cb *block_cb); -void tcf_block_cb_unregister(struct tcf_block *block, - tc_setup_cb_t *cb, void *cb_ident); -int __tc_indr_block_cb_register(struct net_device *dev, void *cb_priv, - tc_indr_block_bind_cb_t *cb, void *cb_ident); -int tc_indr_block_cb_register(struct net_device *dev, void *cb_priv, - tc_indr_block_bind_cb_t *cb, void *cb_ident); -void __tc_indr_block_cb_unregister(struct net_device *dev, - tc_indr_block_bind_cb_t *cb, void *cb_ident); -void tc_indr_block_cb_unregister(struct net_device *dev, - tc_indr_block_bind_cb_t *cb, void *cb_ident); - int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp, struct tcf_result *res, bool compat_mode); @@ -105,6 +79,11 @@ static inline bool tcf_block_shared(struct tcf_block *block) return false; } +static inline bool tcf_block_non_null_shared(struct tcf_block *block) +{ + return false; +} + static inline int tcf_block_get(struct tcf_block **p_block, struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q, @@ -137,97 +116,18 @@ static inline struct Qdisc *tcf_block_q(struct tcf_block *block) } static inline -int tc_setup_cb_block_register(struct tcf_block *block, tc_setup_cb_t *cb, +int tc_setup_cb_block_register(struct tcf_block *block, flow_setup_cb_t *cb, void *cb_priv) { return 0; } static inline -void tc_setup_cb_block_unregister(struct tcf_block *block, tc_setup_cb_t *cb, +void tc_setup_cb_block_unregister(struct tcf_block *block, flow_setup_cb_t *cb, void *cb_priv) { } -static inline -void *tcf_block_cb_priv(struct tcf_block_cb *block_cb) -{ - return NULL; -} - -static inline -struct tcf_block_cb *tcf_block_cb_lookup(struct tcf_block *block, - tc_setup_cb_t *cb, void *cb_ident) -{ - return NULL; -} - -static inline -void tcf_block_cb_incref(struct tcf_block_cb *block_cb) -{ -} - -static inline -unsigned int tcf_block_cb_decref(struct tcf_block_cb *block_cb) -{ - return 0; -} - -static inline -struct tcf_block_cb *__tcf_block_cb_register(struct tcf_block *block, - tc_setup_cb_t *cb, void *cb_ident, - void *cb_priv, - struct netlink_ext_ack *extack) -{ - return NULL; -} - -static inline -int tcf_block_cb_register(struct tcf_block *block, - tc_setup_cb_t *cb, void *cb_ident, - void *cb_priv, struct netlink_ext_ack *extack) -{ - return 0; -} - -static inline -void __tcf_block_cb_unregister(struct tcf_block *block, - struct tcf_block_cb *block_cb) -{ -} - -static inline -void tcf_block_cb_unregister(struct tcf_block *block, - tc_setup_cb_t *cb, void *cb_ident) -{ -} - -static inline -int __tc_indr_block_cb_register(struct net_device *dev, void *cb_priv, - tc_indr_block_bind_cb_t *cb, void *cb_ident) -{ - return 0; -} - -static inline -int tc_indr_block_cb_register(struct net_device *dev, void *cb_priv, - tc_indr_block_bind_cb_t *cb, void *cb_ident) -{ - return 0; -} - -static inline -void __tc_indr_block_cb_unregister(struct net_device *dev, - tc_indr_block_bind_cb_t *cb, void *cb_ident) -{ -} - -static inline -void tc_indr_block_cb_unregister(struct net_device *dev, - tc_indr_block_bind_cb_t *cb, void *cb_ident) -{ -} - static inline int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp, struct tcf_result *res, bool compat_mode) { @@ -576,9 +476,6 @@ static inline int tcf_valid_offset(const struct sk_buff *skb, (ptr <= (ptr + len))); } -#ifdef CONFIG_NET_CLS_IND -#include <net/net_namespace.h> - static inline int tcf_change_indev(struct net *net, struct nlattr *indev_tlv, struct netlink_ext_ack *extack) @@ -605,33 +502,30 @@ tcf_match_indev(struct sk_buff *skb, int ifindex) return false; return ifindex == skb->skb_iif; } -#endif /* CONFIG_NET_CLS_IND */ int tc_setup_flow_action(struct flow_action *flow_action, - const struct tcf_exts *exts); + const struct tcf_exts *exts, bool rtnl_held); +void tc_cleanup_flow_action(struct flow_action *flow_action); + int tc_setup_cb_call(struct tcf_block *block, enum tc_setup_type type, - void *type_data, bool err_stop); + void *type_data, bool err_stop, bool rtnl_held); +int tc_setup_cb_add(struct tcf_block *block, struct tcf_proto *tp, + enum tc_setup_type type, void *type_data, bool err_stop, + u32 *flags, unsigned int *in_hw_count, bool rtnl_held); +int tc_setup_cb_replace(struct tcf_block *block, struct tcf_proto *tp, + enum tc_setup_type type, void *type_data, bool err_stop, + u32 *old_flags, unsigned int *old_in_hw_count, + u32 *new_flags, unsigned int *new_in_hw_count, + bool rtnl_held); +int tc_setup_cb_destroy(struct tcf_block *block, struct tcf_proto *tp, + enum tc_setup_type type, void *type_data, bool err_stop, + u32 *flags, unsigned int *in_hw_count, bool rtnl_held); +int tc_setup_cb_reoffload(struct tcf_block *block, struct tcf_proto *tp, + bool add, flow_setup_cb_t *cb, + enum tc_setup_type type, void *type_data, + void *cb_priv, u32 *flags, unsigned int *in_hw_count); unsigned int tcf_exts_num_actions(struct tcf_exts *exts); -enum tc_block_command { - TC_BLOCK_BIND, - TC_BLOCK_UNBIND, -}; - -struct tc_block_offload { - enum tc_block_command command; - enum tcf_block_binder_type binder_type; - struct tcf_block *block; - struct netlink_ext_ack *extack; -}; - -struct tc_cls_common_offload { - u32 chain_index; - __be16 protocol; - u32 prio; - struct netlink_ext_ack *extack; -}; - struct tc_cls_u32_knode { struct tcf_exts *exts; struct tcf_result *res; @@ -659,7 +553,7 @@ enum tc_clsu32_command { }; struct tc_cls_u32_offload { - struct tc_cls_common_offload common; + struct flow_cls_common_offload common; /* knode values */ enum tc_clsu32_command command; union { @@ -686,7 +580,7 @@ static inline bool tc_can_offload_extack(const struct net_device *dev, static inline bool tc_cls_can_offload_and_chain0(const struct net_device *dev, - struct tc_cls_common_offload *common) + struct flow_cls_common_offload *common) { if (!tc_can_offload_extack(dev, common->extack)) return false; @@ -728,40 +622,17 @@ static inline bool tc_in_hw(u32 flags) } static inline void -tc_cls_common_offload_init(struct tc_cls_common_offload *cls_common, +tc_cls_common_offload_init(struct flow_cls_common_offload *cls_common, const struct tcf_proto *tp, u32 flags, struct netlink_ext_ack *extack) { cls_common->chain_index = tp->chain->index; cls_common->protocol = tp->protocol; - cls_common->prio = tp->prio; + cls_common->prio = tp->prio >> 16; if (tc_skip_sw(flags) || flags & TCA_CLS_FLAGS_VERBOSE) cls_common->extack = extack; } -enum tc_fl_command { - TC_CLSFLOWER_REPLACE, - TC_CLSFLOWER_DESTROY, - TC_CLSFLOWER_STATS, - TC_CLSFLOWER_TMPLT_CREATE, - TC_CLSFLOWER_TMPLT_DESTROY, -}; - -struct tc_cls_flower_offload { - struct tc_cls_common_offload common; - enum tc_fl_command command; - unsigned long cookie; - struct flow_rule *rule; - struct flow_stats stats; - u32 classid; -}; - -static inline struct flow_rule * -tc_cls_flower_offload_flow_rule(struct tc_cls_flower_offload *tc_flow_cmd) -{ - return tc_flow_cmd->rule; -} - enum tc_matchall_command { TC_CLSMATCHALL_REPLACE, TC_CLSMATCHALL_DESTROY, @@ -769,7 +640,7 @@ enum tc_matchall_command { }; struct tc_cls_matchall_offload { - struct tc_cls_common_offload common; + struct flow_cls_common_offload common; enum tc_matchall_command command; struct flow_rule *rule; struct flow_stats stats; @@ -782,7 +653,7 @@ enum tc_clsbpf_command { }; struct tc_cls_bpf_offload { - struct tc_cls_common_offload common; + struct flow_cls_common_offload common; enum tc_clsbpf_command command; struct tcf_exts *exts; struct bpf_prog *prog; diff --git a/include/net/pkt_sched.h b/include/net/pkt_sched.h index a16fbe9a2a67..6a70845bd9ab 100644 --- a/include/net/pkt_sched.h +++ b/include/net/pkt_sched.h @@ -118,7 +118,12 @@ void __qdisc_run(struct Qdisc *q); static inline void qdisc_run(struct Qdisc *q) { if (qdisc_run_begin(q)) { - __qdisc_run(q); + /* NOLOCK qdisc must check 'state' under the qdisc seqlock + * to avoid racing with dev_qdisc_reset() + */ + if (!(q->flags & TCQ_F_NOLOCK) || + likely(!test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) + __qdisc_run(q); qdisc_run_end(q); } } @@ -161,4 +166,27 @@ struct tc_etf_qopt_offload { s32 queue; }; +struct tc_taprio_sched_entry { + u8 command; /* TC_TAPRIO_CMD_* */ + + /* The gate_mask in the offloading side refers to traffic classes */ + u32 gate_mask; + u32 interval; +}; + +struct tc_taprio_qopt_offload { + u8 enable; + ktime_t base_time; + u64 cycle_time; + u64 cycle_time_extension; + + size_t num_entries; + struct tc_taprio_sched_entry entries[0]; +}; + +/* Reference counting */ +struct tc_taprio_qopt_offload *taprio_offload_get(struct tc_taprio_qopt_offload + *offload); +void taprio_offload_free(struct tc_taprio_qopt_offload *offload); + #endif diff --git a/include/net/psample.h b/include/net/psample.h index 37a4df2325b2..68ae16bb0a4a 100644 --- a/include/net/psample.h +++ b/include/net/psample.h @@ -11,9 +11,11 @@ struct psample_group { u32 group_num; u32 refcount; u32 seq; + struct rcu_head rcu; }; struct psample_group *psample_group_get(struct net *net, u32 group_num); +void psample_group_take(struct psample_group *group); void psample_group_put(struct psample_group *group); #if IS_ENABLED(CONFIG_PSAMPLE) diff --git a/include/net/request_sock.h b/include/net/request_sock.h index fd178d58fa84..cf8b33213bbc 100644 --- a/include/net/request_sock.h +++ b/include/net/request_sock.h @@ -185,7 +185,7 @@ void reqsk_fastopen_remove(struct sock *sk, struct request_sock *req, static inline bool reqsk_queue_empty(const struct request_sock_queue *queue) { - return queue->rskq_accept_head == NULL; + return READ_ONCE(queue->rskq_accept_head) == NULL; } static inline struct request_sock *reqsk_queue_remove(struct request_sock_queue *queue, @@ -197,7 +197,7 @@ static inline struct request_sock *reqsk_queue_remove(struct request_sock_queue req = queue->rskq_accept_head; if (req) { sk_acceptq_removed(parent); - queue->rskq_accept_head = req->dl_next; + WRITE_ONCE(queue->rskq_accept_head, req->dl_next); if (queue->rskq_accept_head == NULL) queue->rskq_accept_tail = NULL; } diff --git a/include/net/route.h b/include/net/route.h index 55ff71ffb796..6c516840380d 100644 --- a/include/net/route.h +++ b/include/net/route.h @@ -53,10 +53,11 @@ struct rtable { unsigned int rt_flags; __u16 rt_type; __u8 rt_is_input; - u8 rt_gw_family; + __u8 rt_uses_gateway; int rt_iif; + u8 rt_gw_family; /* Info on neighbour */ union { __be32 rt_gw4; @@ -231,6 +232,10 @@ void fib_modify_prefix_metric(struct in_ifaddr *ifa, u32 new_metric); void rt_add_uncached_list(struct rtable *rt); void rt_del_uncached_list(struct rtable *rt); +int fib_dump_info_fnhe(struct sk_buff *skb, struct netlink_callback *cb, + u32 table_id, struct fib_info *fi, + int *fa_index, int fa_start, unsigned int flags); + static inline void ip_rt_put(struct rtable *rt) { /* dst_release() accepts a NULL parameter. diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h index 21f434f3ac9e..637548d54b3e 100644 --- a/include/net/sch_generic.h +++ b/include/net/sch_generic.h @@ -13,8 +13,11 @@ #include <linux/refcount.h> #include <linux/workqueue.h> #include <linux/mutex.h> +#include <linux/rwsem.h> +#include <linux/atomic.h> #include <net/gen_stats.h> #include <net/rtnetlink.h> +#include <net/flow_offload.h> struct Qdisc_ops; struct qdisc_walker; @@ -22,12 +25,6 @@ struct tcf_walker; struct module; struct bpf_flow_keys; -typedef int tc_setup_cb_t(enum tc_setup_type type, - void *type_data, void *cb_priv); - -typedef int tc_indr_block_bind_cb_t(struct net_device *dev, void *cb_priv, - enum tc_setup_type type, void *type_data); - struct qdisc_rate_table { struct tc_ratespec rate; u32 data[256]; @@ -279,7 +276,7 @@ struct tcf_result { }; const struct tcf_proto *goto_tp; - /* used by the TC_ACT_REINSERT action */ + /* used in the skb_tc_reinsert function */ struct { bool ingress; struct gnet_stats_queue *qstats; @@ -313,8 +310,12 @@ struct tcf_proto_ops { void (*walk)(struct tcf_proto *tp, struct tcf_walker *arg, bool rtnl_held); int (*reoffload)(struct tcf_proto *tp, bool add, - tc_setup_cb_t *cb, void *cb_priv, + flow_setup_cb_t *cb, void *cb_priv, struct netlink_ext_ack *extack); + void (*hw_add)(struct tcf_proto *tp, + void *type_data); + void (*hw_del)(struct tcf_proto *tp, + void *type_data); void (*bind_class)(void *, u32, unsigned long); void * (*tmplt_create)(struct net *net, struct tcf_chain *chain, @@ -401,11 +402,13 @@ struct tcf_block { refcount_t refcnt; struct net *net; struct Qdisc *q; - struct list_head cb_list; + struct rw_semaphore cb_lock; /* protects cb_list and offload counters */ + struct flow_block flow_block; struct list_head owner_list; bool keep_dst; - unsigned int offloadcnt; /* Number of oddloaded filters */ + atomic_t offloadcnt; /* Number of oddloaded filters */ unsigned int nooffloaddevcnt; /* Number of devs unable to do offload */ + unsigned int lockeddevcnt; /* Number of devs that require rtnl lock. */ struct { struct tcf_chain *chain; struct list_head filter_chain_list; @@ -441,37 +444,6 @@ static inline bool lockdep_tcf_proto_is_locked(struct tcf_proto *tp) #define tcf_proto_dereference(p, tp) \ rcu_dereference_protected(p, lockdep_tcf_proto_is_locked(tp)) -static inline void tcf_block_offload_inc(struct tcf_block *block, u32 *flags) -{ - if (*flags & TCA_CLS_FLAGS_IN_HW) - return; - *flags |= TCA_CLS_FLAGS_IN_HW; - block->offloadcnt++; -} - -static inline void tcf_block_offload_dec(struct tcf_block *block, u32 *flags) -{ - if (!(*flags & TCA_CLS_FLAGS_IN_HW)) - return; - *flags &= ~TCA_CLS_FLAGS_IN_HW; - block->offloadcnt--; -} - -static inline void -tc_cls_offload_cnt_update(struct tcf_block *block, u32 *cnt, - u32 *flags, bool add) -{ - if (add) { - if (!*cnt) - tcf_block_offload_inc(block, flags); - (*cnt)++; - } else { - (*cnt)--; - if (!*cnt) - tcf_block_offload_dec(block, flags); - } -} - static inline void qdisc_cb_private_validate(const struct sk_buff *skb, int sz) { struct qdisc_skb_cb *qcb; @@ -522,6 +494,11 @@ static inline struct Qdisc *qdisc_root(const struct Qdisc *qdisc) return q; } +static inline struct Qdisc *qdisc_root_bh(const struct Qdisc *qdisc) +{ + return rcu_dereference_bh(qdisc->dev_queue->qdisc); +} + static inline struct Qdisc *qdisc_root_sleeping(const struct Qdisc *qdisc) { return qdisc->dev_queue->qdisc_sleeping; diff --git a/include/net/sctp/auth.h b/include/net/sctp/auth.h index caaae2de9099..d4b3b2dcd15b 100644 --- a/include/net/sctp/auth.h +++ b/include/net/sctp/auth.h @@ -107,5 +107,7 @@ int sctp_auth_del_key_id(struct sctp_endpoint *ep, struct sctp_association *asoc, __u16 key_id); int sctp_auth_deact_key_id(struct sctp_endpoint *ep, struct sctp_association *asoc, __u16 key_id); +int sctp_auth_init(struct sctp_endpoint *ep, gfp_t gfp); +void sctp_auth_free(struct sctp_endpoint *ep); #endif diff --git a/include/net/sctp/checksum.h b/include/net/sctp/checksum.h index 314699333bec..5a9bb09f32b6 100644 --- a/include/net/sctp/checksum.h +++ b/include/net/sctp/checksum.h @@ -43,19 +43,21 @@ static inline __wsum sctp_csum_combine(__wsum csum, __wsum csum2, (__force __u32)csum2, len); } +static const struct skb_checksum_ops sctp_csum_ops = { + .update = sctp_csum_update, + .combine = sctp_csum_combine, +}; + static inline __le32 sctp_compute_cksum(const struct sk_buff *skb, unsigned int offset) { struct sctphdr *sh = (struct sctphdr *)(skb->data + offset); - const struct skb_checksum_ops ops = { - .update = sctp_csum_update, - .combine = sctp_csum_combine, - }; __le32 old = sh->checksum; __wsum new; sh->checksum = 0; - new = ~__skb_checksum(skb, offset, skb->len - offset, ~(__wsum)0, &ops); + new = ~__skb_checksum(skb, offset, skb->len - offset, ~(__wsum)0, + &sctp_csum_ops); sh->checksum = old; return cpu_to_le32((__force __u32)new); diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h index 5d60f13d2347..3ab5c6bbb90b 100644 --- a/include/net/sctp/sctp.h +++ b/include/net/sctp/sctp.h @@ -610,4 +610,9 @@ static inline __u32 sctp_min_frag_point(struct sctp_sock *sp, __u16 datasize) return sctp_mtu_payload(sp, SCTP_DEFAULT_MINSEGMENT, datasize); } +static inline bool sctp_newsk_ready(const struct sock *sk) +{ + return sock_flag(sk, SOCK_DEAD) || sk->sk_socket; +} + #endif /* __net_sctp_h__ */ diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h index 0767701ef362..503fbc3cd819 100644 --- a/include/net/sctp/structs.h +++ b/include/net/sctp/structs.h @@ -219,7 +219,6 @@ struct sctp_sock { disable_fragments:1, v4mapped:1, frag_interleave:1, - strm_interleave:1, recvrcvinfo:1, recvnxtinfo:1, data_ready_signalled:1; @@ -1323,8 +1322,11 @@ struct sctp_endpoint { /* SCTP-AUTH: endpoint shared keys */ struct list_head endpoint_shared_keys; __u16 active_key_id; - __u8 auth_enable:1, + __u8 ecn_enable:1, + auth_enable:1, + intl_enable:1, prsctp_enable:1, + asconf_enable:1, reconf_enable:1; __u8 strreset_enable; @@ -1679,28 +1681,30 @@ struct sctp_association { __be16 addip_disabled_mask; /* These are capabilities which our peer advertised. */ - __u8 ecn_capable:1, /* Can peer do ECN? */ + __u16 ecn_capable:1, /* Can peer do ECN? */ ipv4_address:1, /* Peer understands IPv4 addresses? */ ipv6_address:1, /* Peer understands IPv6 addresses? */ hostname_address:1, /* Peer understands DNS addresses? */ asconf_capable:1, /* Does peer support ADDIP? */ prsctp_capable:1, /* Can peer do PR-SCTP? */ reconf_capable:1, /* Can peer do RE-CONFIG? */ - auth_capable:1; /* Is peer doing SCTP-AUTH? */ - - /* sack_needed : This flag indicates if the next received - * : packet is to be responded to with a - * : SACK. This is initialized to 0. When a packet - * : is received sack_cnt is incremented. If this value - * : reaches 2 or more, a SACK is sent and the - * : value is reset to 0. Note: This is used only - * : when no DATA chunks are received out of - * : order. When DATA chunks are out of order, - * : SACK's are not delayed (see Section 6). - */ - __u8 sack_needed:1, /* Do we need to sack the peer? */ + intl_capable:1, /* Can peer do INTERLEAVE */ + auth_capable:1, /* Is peer doing SCTP-AUTH? */ + /* sack_needed: + * This flag indicates if the next received + * packet is to be responded to with a + * SACK. This is initialized to 0. When a packet + * is received sack_cnt is incremented. If this value + * reaches 2 or more, a SACK is sent and the + * value is reset to 0. Note: This is used only + * when no DATA chunks are received out of + * order. When DATA chunks are out of order, + * SACK's are not delayed (see Section 6). + */ + sack_needed:1, /* Do we need to sack the peer? */ sack_generation:1, zero_window_announced:1; + __u32 sack_cnt; __u32 adaptation_ind; /* Adaptation Code point. */ @@ -2049,10 +2053,7 @@ struct sctp_association { __u8 need_ecne:1, /* Need to send an ECNE Chunk? */ temp:1, /* Is it a temporary association? */ - force_delay:1, - intl_enable:1, - prsctp_enable:1, - reconf_enable:1; + force_delay:1; __u8 strreset_enable; __u8 strreset_outstanding; /* request param count on the fly */ diff --git a/include/net/sock.h b/include/net/sock.h index 6cbc16136357..8f9adcfac41b 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -878,12 +878,17 @@ static inline bool sk_acceptq_is_full(const struct sock *sk) */ static inline int sk_stream_min_wspace(const struct sock *sk) { - return sk->sk_wmem_queued >> 1; + return READ_ONCE(sk->sk_wmem_queued) >> 1; } static inline int sk_stream_wspace(const struct sock *sk) { - return sk->sk_sndbuf - sk->sk_wmem_queued; + return READ_ONCE(sk->sk_sndbuf) - READ_ONCE(sk->sk_wmem_queued); +} + +static inline void sk_wmem_queued_add(struct sock *sk, int val) +{ + WRITE_ONCE(sk->sk_wmem_queued, sk->sk_wmem_queued + val); } void sk_stream_write_space(struct sock *sk); @@ -949,8 +954,8 @@ static inline void sk_incoming_cpu_update(struct sock *sk) { int cpu = raw_smp_processor_id(); - if (unlikely(sk->sk_incoming_cpu != cpu)) - sk->sk_incoming_cpu = cpu; + if (unlikely(READ_ONCE(sk->sk_incoming_cpu) != cpu)) + WRITE_ONCE(sk->sk_incoming_cpu, cpu); } static inline void sock_rps_record_flow_hash(__u32 hash) @@ -1207,7 +1212,7 @@ static inline void sk_refcnt_debug_release(const struct sock *sk) static inline bool __sk_stream_memory_free(const struct sock *sk, int wake) { - if (sk->sk_wmem_queued >= sk->sk_sndbuf) + if (READ_ONCE(sk->sk_wmem_queued) >= READ_ONCE(sk->sk_sndbuf)) return false; return sk->sk_prot->stream_memory_free ? @@ -1467,7 +1472,7 @@ DECLARE_STATIC_KEY_FALSE(tcp_tx_skb_cache_key); static inline void sk_wmem_free_skb(struct sock *sk, struct sk_buff *skb) { sock_set_flag(sk, SOCK_QUEUE_SHRUNK); - sk->sk_wmem_queued -= skb->truesize; + sk_wmem_queued_add(sk, -skb->truesize); sk_mem_uncharge(sk, skb->truesize); if (static_branch_unlikely(&tcp_tx_skb_cache_key) && !sk->sk_tx_skb_cache && !skb_cloned(skb)) { @@ -1822,7 +1827,7 @@ static inline void sock_graft(struct sock *sk, struct socket *parent) { WARN_ON(parent->sk); write_lock_bh(&sk->sk_callback_lock); - rcu_assign_pointer(sk->sk_wq, parent->wq); + rcu_assign_pointer(sk->sk_wq, &parent->wq); parent->sk = sk; sk_set_socket(sk, parent); sk->sk_uid = SOCK_INODE(parent)->i_uid; @@ -2014,7 +2019,7 @@ static inline int skb_copy_to_page_nocache(struct sock *sk, struct iov_iter *fro skb->len += copy; skb->data_len += copy; skb->truesize += copy; - sk->sk_wmem_queued += copy; + sk_wmem_queued_add(sk, copy); sk_mem_charge(sk, copy); return 0; } @@ -2100,7 +2105,7 @@ static inline void sock_poll_wait(struct file *filp, struct socket *sock, poll_table *p) { if (!poll_does_not_wait(p)) { - poll_wait(filp, &sock->wq->wait, p); + poll_wait(filp, &sock->wq.wait, p); /* We need to be sure we are in sync with the * socket flags modification. * @@ -2220,10 +2225,14 @@ static inline void sk_wake_async(const struct sock *sk, int how, int band) static inline void sk_stream_moderate_sndbuf(struct sock *sk) { - if (!(sk->sk_userlocks & SOCK_SNDBUF_LOCK)) { - sk->sk_sndbuf = min(sk->sk_sndbuf, sk->sk_wmem_queued >> 1); - sk->sk_sndbuf = max_t(u32, sk->sk_sndbuf, SOCK_MIN_SNDBUF); - } + u32 val; + + if (sk->sk_userlocks & SOCK_SNDBUF_LOCK) + return; + + val = min(sk->sk_sndbuf, sk->sk_wmem_queued >> 1); + + WRITE_ONCE(sk->sk_sndbuf, max_t(u32, val, SOCK_MIN_SNDBUF)); } struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp, @@ -2233,12 +2242,17 @@ struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp, * sk_page_frag - return an appropriate page_frag * @sk: socket * - * If socket allocation mode allows current thread to sleep, it means its - * safe to use the per task page_frag instead of the per socket one. + * Use the per task page_frag instead of the per socket one for + * optimization when we know that we're in the normal context and owns + * everything that's associated with %current. + * + * gfpflags_allow_blocking() isn't enough here as direct reclaim may nest + * inside other socket operations and end up recursing into sk_page_frag() + * while it's already in use. */ static inline struct page_frag *sk_page_frag(struct sock *sk) { - if (gfpflags_allow_blocking(sk->sk_allocation)) + if (gfpflags_normal_context(sk->sk_allocation)) return ¤t->task_frag; return &sk->sk_frag; @@ -2251,7 +2265,7 @@ bool sk_page_frag_refill(struct sock *sk, struct page_frag *pfrag); */ static inline bool sock_writeable(const struct sock *sk) { - return refcount_read(&sk->sk_wmem_alloc) < (sk->sk_sndbuf >> 1); + return refcount_read(&sk->sk_wmem_alloc) < (READ_ONCE(sk->sk_sndbuf) >> 1); } static inline gfp_t gfp_any(void) @@ -2271,7 +2285,9 @@ static inline long sock_sndtimeo(const struct sock *sk, bool noblock) static inline int sock_rcvlowat(const struct sock *sk, int waitall, int len) { - return (waitall ? len : min_t(int, sk->sk_rcvlowat, len)) ? : 1; + int v = waitall ? len : min_t(int, READ_ONCE(sk->sk_rcvlowat), len); + + return v ?: 1; } /* Alas, with timeout socket operations are not restartable. @@ -2482,6 +2498,7 @@ static inline bool sk_fullsock(const struct sock *sk) /* Checks if this SKB belongs to an HW offloaded socket * and whether any SW fallbacks are required based on dev. + * Check decrypted mark in case skb_orphan() cleared socket. */ static inline struct sk_buff *sk_validate_xmit_skb(struct sk_buff *skb, struct net_device *dev) @@ -2489,8 +2506,15 @@ static inline struct sk_buff *sk_validate_xmit_skb(struct sk_buff *skb, #ifdef CONFIG_SOCK_VALIDATE_XMIT struct sock *sk = skb->sk; - if (sk && sk_fullsock(sk) && sk->sk_validate_xmit_skb) + if (sk && sk_fullsock(sk) && sk->sk_validate_xmit_skb) { skb = sk->sk_validate_xmit_skb(sk, dev, skb); +#ifdef CONFIG_TLS_DEVICE + } else if (unlikely(skb->decrypted)) { + pr_warn_ratelimited("unencrypted skb with no associated socket - dropping\n"); + kfree_skb(skb); + skb = NULL; +#endif + } #endif return skb; diff --git a/include/net/sock_reuseport.h b/include/net/sock_reuseport.h index 8a5f70c7cdf2..43f4a818d88f 100644 --- a/include/net/sock_reuseport.h +++ b/include/net/sock_reuseport.h @@ -21,7 +21,8 @@ struct sock_reuseport { unsigned int synq_overflow_ts; /* ID stays the same even after the size of socks[] grows. */ unsigned int reuseport_id; - bool bind_inany; + unsigned int bind_inany:1; + unsigned int has_conns:1; struct bpf_prog __rcu *prog; /* optional BPF sock selector */ struct sock *socks[0]; /* array of sock pointers */ }; @@ -35,6 +36,25 @@ extern struct sock *reuseport_select_sock(struct sock *sk, struct sk_buff *skb, int hdr_len); extern int reuseport_attach_prog(struct sock *sk, struct bpf_prog *prog); +extern int reuseport_detach_prog(struct sock *sk); + +static inline bool reuseport_has_conns(struct sock *sk, bool set) +{ + struct sock_reuseport *reuse; + bool ret = false; + + rcu_read_lock(); + reuse = rcu_dereference(sk->sk_reuseport_cb); + if (reuse) { + if (set) + reuse->has_conns = 1; + ret = reuse->has_conns; + } + rcu_read_unlock(); + + return ret; +} + int reuseport_get_id(struct sock_reuseport *reuse); #endif /* _SOCK_REUSEPORT_H */ diff --git a/include/net/tc_act/tc_ct.h b/include/net/tc_act/tc_ct.h new file mode 100644 index 000000000000..bdc20ab3b88d --- /dev/null +++ b/include/net/tc_act/tc_ct.h @@ -0,0 +1,63 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __NET_TC_CT_H +#define __NET_TC_CT_H + +#include <net/act_api.h> +#include <uapi/linux/tc_act/tc_ct.h> + +#if IS_ENABLED(CONFIG_NF_CONNTRACK) +#include <net/netfilter/nf_nat.h> +#include <net/netfilter/nf_conntrack_labels.h> + +struct tcf_ct_params { + struct nf_conn *tmpl; + u16 zone; + + u32 mark; + u32 mark_mask; + + u32 labels[NF_CT_LABELS_MAX_SIZE / sizeof(u32)]; + u32 labels_mask[NF_CT_LABELS_MAX_SIZE / sizeof(u32)]; + + struct nf_nat_range2 range; + bool ipv4_range; + + u16 ct_action; + + struct rcu_head rcu; +}; + +struct tcf_ct { + struct tc_action common; + struct tcf_ct_params __rcu *params; +}; + +#define to_ct(a) ((struct tcf_ct *)a) +#define to_ct_params(a) ((struct tcf_ct_params *) \ + rtnl_dereference((to_ct(a)->params))) + +static inline uint16_t tcf_ct_zone(const struct tc_action *a) +{ + return to_ct_params(a)->zone; +} + +static inline int tcf_ct_action(const struct tc_action *a) +{ + return to_ct_params(a)->ct_action; +} + +#else +static inline uint16_t tcf_ct_zone(const struct tc_action *a) { return 0; } +static inline int tcf_ct_action(const struct tc_action *a) { return 0; } +#endif /* CONFIG_NF_CONNTRACK */ + +static inline bool is_tcf_ct(const struct tc_action *a) +{ +#if defined(CONFIG_NET_CLS_ACT) && IS_ENABLED(CONFIG_NF_CONNTRACK) + if (a->ops && a->ops->id == TCA_ID_CT) + return true; +#endif + return false; +} + +#endif /* __NET_TC_CT_H */ diff --git a/include/net/tc_act/tc_ctinfo.h b/include/net/tc_act/tc_ctinfo.h new file mode 100644 index 000000000000..f071c1d70a25 --- /dev/null +++ b/include/net/tc_act/tc_ctinfo.h @@ -0,0 +1,33 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __NET_TC_CTINFO_H +#define __NET_TC_CTINFO_H + +#include <net/act_api.h> + +struct tcf_ctinfo_params { + struct rcu_head rcu; + struct net *net; + u32 dscpmask; + u32 dscpstatemask; + u32 cpmarkmask; + u16 zone; + u8 mode; + u8 dscpmaskshift; +}; + +struct tcf_ctinfo { + struct tc_action common; + struct tcf_ctinfo_params __rcu *params; + u64 stats_dscp_set; + u64 stats_dscp_error; + u64 stats_cpmark_set; +}; + +enum { + CTINFO_MODE_DSCP = BIT(0), + CTINFO_MODE_CPMARK = BIT(1) +}; + +#define to_ctinfo(a) ((struct tcf_ctinfo *)a) + +#endif /* __NET_TC_CTINFO_H */ diff --git a/include/net/tc_act/tc_mirred.h b/include/net/tc_act/tc_mirred.h index c757585a05b0..1cace4c69e44 100644 --- a/include/net/tc_act/tc_mirred.h +++ b/include/net/tc_act/tc_mirred.h @@ -32,6 +32,24 @@ static inline bool is_tcf_mirred_egress_mirror(const struct tc_action *a) return false; } +static inline bool is_tcf_mirred_ingress_redirect(const struct tc_action *a) +{ +#ifdef CONFIG_NET_CLS_ACT + if (a->ops && a->ops->id == TCA_ID_MIRRED) + return to_mirred(a)->tcfm_eaction == TCA_INGRESS_REDIR; +#endif + return false; +} + +static inline bool is_tcf_mirred_ingress_mirror(const struct tc_action *a) +{ +#ifdef CONFIG_NET_CLS_ACT + if (a->ops && a->ops->id == TCA_ID_MIRRED) + return to_mirred(a)->tcfm_eaction == TCA_INGRESS_MIRROR; +#endif + return false; +} + static inline struct net_device *tcf_mirred_dev(const struct tc_action *a) { return rtnl_dereference(to_mirred(a)->tcfm_dev); diff --git a/include/net/tc_act/tc_mpls.h b/include/net/tc_act/tc_mpls.h new file mode 100644 index 000000000000..721de4f5733a --- /dev/null +++ b/include/net/tc_act/tc_mpls.h @@ -0,0 +1,105 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ +/* Copyright (C) 2019 Netronome Systems, Inc. */ + +#ifndef __NET_TC_MPLS_H +#define __NET_TC_MPLS_H + +#include <linux/tc_act/tc_mpls.h> +#include <net/act_api.h> + +struct tcf_mpls_params { + int tcfm_action; + u32 tcfm_label; + u8 tcfm_tc; + u8 tcfm_ttl; + u8 tcfm_bos; + __be16 tcfm_proto; + struct rcu_head rcu; +}; + +#define ACT_MPLS_TC_NOT_SET 0xff +#define ACT_MPLS_BOS_NOT_SET 0xff +#define ACT_MPLS_LABEL_NOT_SET 0xffffffff + +struct tcf_mpls { + struct tc_action common; + struct tcf_mpls_params __rcu *mpls_p; +}; +#define to_mpls(a) ((struct tcf_mpls *)a) + +static inline bool is_tcf_mpls(const struct tc_action *a) +{ +#ifdef CONFIG_NET_CLS_ACT + if (a->ops && a->ops->id == TCA_ID_MPLS) + return true; +#endif + return false; +} + +static inline u32 tcf_mpls_action(const struct tc_action *a) +{ + u32 tcfm_action; + + rcu_read_lock(); + tcfm_action = rcu_dereference(to_mpls(a)->mpls_p)->tcfm_action; + rcu_read_unlock(); + + return tcfm_action; +} + +static inline __be16 tcf_mpls_proto(const struct tc_action *a) +{ + __be16 tcfm_proto; + + rcu_read_lock(); + tcfm_proto = rcu_dereference(to_mpls(a)->mpls_p)->tcfm_proto; + rcu_read_unlock(); + + return tcfm_proto; +} + +static inline u32 tcf_mpls_label(const struct tc_action *a) +{ + u32 tcfm_label; + + rcu_read_lock(); + tcfm_label = rcu_dereference(to_mpls(a)->mpls_p)->tcfm_label; + rcu_read_unlock(); + + return tcfm_label; +} + +static inline u8 tcf_mpls_tc(const struct tc_action *a) +{ + u8 tcfm_tc; + + rcu_read_lock(); + tcfm_tc = rcu_dereference(to_mpls(a)->mpls_p)->tcfm_tc; + rcu_read_unlock(); + + return tcfm_tc; +} + +static inline u8 tcf_mpls_bos(const struct tc_action *a) +{ + u8 tcfm_bos; + + rcu_read_lock(); + tcfm_bos = rcu_dereference(to_mpls(a)->mpls_p)->tcfm_bos; + rcu_read_unlock(); + + return tcfm_bos; +} + +static inline u8 tcf_mpls_ttl(const struct tc_action *a) +{ + u8 tcfm_ttl; + + rcu_read_lock(); + tcfm_ttl = rcu_dereference(to_mpls(a)->mpls_p)->tcfm_ttl; + rcu_read_unlock(); + + return tcfm_ttl; +} + +#endif /* __NET_TC_MPLS_H */ diff --git a/include/net/tc_act/tc_police.h b/include/net/tc_act/tc_police.h index 8b9ef3664262..cfdc7cb82cad 100644 --- a/include/net/tc_act/tc_police.h +++ b/include/net/tc_act/tc_police.h @@ -54,7 +54,7 @@ static inline u64 tcf_police_rate_bytes_ps(const struct tc_action *act) struct tcf_police *police = to_police(act); struct tcf_police_params *params; - params = rcu_dereference_bh(police->params); + params = rcu_dereference_bh_rtnl(police->params); return params->rate.rate_bytes_ps; } @@ -63,7 +63,7 @@ static inline s64 tcf_police_tcfp_burst(const struct tc_action *act) struct tcf_police *police = to_police(act); struct tcf_police_params *params; - params = rcu_dereference_bh(police->params); + params = rcu_dereference_bh_rtnl(police->params); return params->tcfp_burst; } diff --git a/include/net/tc_act/tc_sample.h b/include/net/tc_act/tc_sample.h index 0a559d4b6f0f..b5d76305e854 100644 --- a/include/net/tc_act/tc_sample.h +++ b/include/net/tc_act/tc_sample.h @@ -41,10 +41,4 @@ static inline int tcf_sample_trunc_size(const struct tc_action *a) return to_sample(a)->trunc_size; } -static inline struct psample_group * -tcf_sample_psample_group(const struct tc_action *a) -{ - return rcu_dereference(to_sample(a)->psample_group); -} - #endif /* __NET_TC_SAMPLE_H */ diff --git a/include/net/tc_act/tc_skbedit.h b/include/net/tc_act/tc_skbedit.h index 4c04e2985508..b22a1f641f02 100644 --- a/include/net/tc_act/tc_skbedit.h +++ b/include/net/tc_act/tc_skbedit.h @@ -54,4 +54,31 @@ static inline u32 tcf_skbedit_mark(const struct tc_action *a) return mark; } +/* Return true iff action is ptype */ +static inline bool is_tcf_skbedit_ptype(const struct tc_action *a) +{ +#ifdef CONFIG_NET_CLS_ACT + u32 flags; + + if (a->ops && a->ops->id == TCA_ID_SKBEDIT) { + rcu_read_lock(); + flags = rcu_dereference(to_skbedit(a)->params)->flags; + rcu_read_unlock(); + return flags == SKBEDIT_F_PTYPE; + } +#endif + return false; +} + +static inline u32 tcf_skbedit_ptype(const struct tc_action *a) +{ + u16 ptype; + + rcu_read_lock(); + ptype = rcu_dereference(to_skbedit(a)->params)->ptype; + rcu_read_unlock(); + + return ptype; +} + #endif /* __NET_TC_SKBEDIT_H */ diff --git a/include/net/tc_act/tc_tunnel_key.h b/include/net/tc_act/tc_tunnel_key.h index 7c3f777c168c..0689d9bcdf84 100644 --- a/include/net/tc_act/tc_tunnel_key.h +++ b/include/net/tc_act/tc_tunnel_key.h @@ -59,4 +59,21 @@ static inline struct ip_tunnel_info *tcf_tunnel_info(const struct tc_action *a) return NULL; #endif } + +static inline struct ip_tunnel_info * +tcf_tunnel_info_copy(const struct tc_action *a) +{ +#ifdef CONFIG_NET_CLS_ACT + struct ip_tunnel_info *tun = tcf_tunnel_info(a); + + if (tun) { + size_t tun_size = sizeof(*tun) + tun->options_len; + struct ip_tunnel_info *tun_copy = kmemdup(tun, tun_size, + GFP_KERNEL); + + return tun_copy; + } +#endif + return NULL; +} #endif /* __NET_TC_TUNNEL_KEY_H */ diff --git a/include/net/tcp.h b/include/net/tcp.h index 582c0caa9811..ab4eb5eb5d07 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -43,6 +43,7 @@ #include <linux/seq_file.h> #include <linux/memcontrol.h> #include <linux/bpf-cgroup.h> +#include <linux/siphash.h> extern struct inet_hashinfo tcp_hashinfo; @@ -63,7 +64,7 @@ void tcp_time_wait(struct sock *sk, int state, int timeo); /* Minimal accepted MSS. It is (60+60+8) - (20+20). */ #define TCP_MIN_MSS 88U -/* The least MTU to use for probing */ +/* The initial MTU to use for probing */ #define TCP_BASE_MSS 1024 /* probing interval, default to 10 minutes as per RFC4821 */ @@ -257,7 +258,7 @@ static inline bool tcp_under_memory_pressure(const struct sock *sk) mem_cgroup_under_socket_pressure(sk->sk_memcg)) return true; - return tcp_memory_pressure; + return READ_ONCE(tcp_memory_pressure); } /* * The next routines deal with comparing 32 bit unsigned ints @@ -414,6 +415,16 @@ void tcp_parse_options(const struct net *net, const struct sk_buff *skb, const u8 *tcp_parse_md5sig_option(const struct tcphdr *th); /* + * BPF SKB-less helpers + */ +u16 tcp_v4_get_syncookie(struct sock *sk, struct iphdr *iph, + struct tcphdr *th, u32 *cookie); +u16 tcp_v6_get_syncookie(struct sock *sk, struct ipv6hdr *iph, + struct tcphdr *th, u32 *cookie); +u16 tcp_get_syncookie_mss(struct request_sock_ops *rsk_ops, + const struct tcp_request_sock_ops *af_ops, + struct sock *sk, struct tcphdr *th); +/* * TCP v4 functions exported for the inet6 API */ @@ -1063,7 +1074,8 @@ void tcp_get_default_congestion_control(struct net *net, char *name); void tcp_get_available_congestion_control(char *buf, size_t len); void tcp_get_allowed_congestion_control(char *buf, size_t len); int tcp_set_allowed_congestion_control(char *allowed); -int tcp_set_congestion_control(struct sock *sk, const char *name, bool load, bool reinit); +int tcp_set_congestion_control(struct sock *sk, const char *name, bool load, + bool reinit, bool cap_net_admin); u32 tcp_slow_start(struct tcp_sock *tp, u32 acked); void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w, u32 acked); @@ -1368,13 +1380,14 @@ static inline int tcp_win_from_space(const struct sock *sk, int space) /* Note: caller must be prepared to deal with negative returns */ static inline int tcp_space(const struct sock *sk) { - return tcp_win_from_space(sk, sk->sk_rcvbuf - sk->sk_backlog.len - + return tcp_win_from_space(sk, READ_ONCE(sk->sk_rcvbuf) - + READ_ONCE(sk->sk_backlog.len) - atomic_read(&sk->sk_rmem_alloc)); } static inline int tcp_full_space(const struct sock *sk) { - return tcp_win_from_space(sk, sk->sk_rcvbuf); + return tcp_win_from_space(sk, READ_ONCE(sk->sk_rcvbuf)); } extern void tcp_openreq_init_rwin(struct request_sock *req, @@ -1612,7 +1625,7 @@ void tcp_free_fastopen_req(struct tcp_sock *tp); void tcp_fastopen_destroy_cipher(struct sock *sk); void tcp_fastopen_ctx_destroy(struct net *net); int tcp_fastopen_reset_cipher(struct net *net, struct sock *sk, - void *key, unsigned int len); + void *primary_key, void *backup_key); void tcp_fastopen_add_skb(struct sock *sk, struct sk_buff *skb); struct sock *tcp_try_fastopen(struct sock *sk, struct sk_buff *skb, struct request_sock *req, @@ -1622,13 +1635,16 @@ void tcp_fastopen_init_key_once(struct net *net); bool tcp_fastopen_cookie_check(struct sock *sk, u16 *mss, struct tcp_fastopen_cookie *cookie); bool tcp_fastopen_defer_connect(struct sock *sk, int *err); -#define TCP_FASTOPEN_KEY_LENGTH 16 +#define TCP_FASTOPEN_KEY_LENGTH sizeof(siphash_key_t) +#define TCP_FASTOPEN_KEY_MAX 2 +#define TCP_FASTOPEN_KEY_BUF_LENGTH \ + (TCP_FASTOPEN_KEY_LENGTH * TCP_FASTOPEN_KEY_MAX) /* Fastopen key context */ struct tcp_fastopen_context { - struct crypto_cipher *tfm; - __u8 key[TCP_FASTOPEN_KEY_LENGTH]; - struct rcu_head rcu; + siphash_key_t key[TCP_FASTOPEN_KEY_MAX]; + int num; + struct rcu_head rcu; }; extern unsigned int sysctl_tcp_fastopen_blackhole_timeout; @@ -1637,6 +1653,35 @@ bool tcp_fastopen_active_should_disable(struct sock *sk); void tcp_fastopen_active_disable_ofo_check(struct sock *sk); void tcp_fastopen_active_detect_blackhole(struct sock *sk, bool expired); +/* Caller needs to wrap with rcu_read_(un)lock() */ +static inline +struct tcp_fastopen_context *tcp_fastopen_get_ctx(const struct sock *sk) +{ + struct tcp_fastopen_context *ctx; + + ctx = rcu_dereference(inet_csk(sk)->icsk_accept_queue.fastopenq.ctx); + if (!ctx) + ctx = rcu_dereference(sock_net(sk)->ipv4.tcp_fastopen_ctx); + return ctx; +} + +static inline +bool tcp_fastopen_cookie_match(const struct tcp_fastopen_cookie *foc, + const struct tcp_fastopen_cookie *orig) +{ + if (orig->len == TCP_FASTOPEN_COOKIE_SIZE && + orig->len == foc->len && + !memcmp(orig->val, foc->val, foc->len)) + return true; + return false; +} + +static inline +int tcp_fastopen_context_len(const struct tcp_fastopen_context *ctx) +{ + return ctx->num; +} + /* Latencies incurred by various limits for a sender. They are * chronograph-like stats that are mutually exclusive. */ @@ -1675,6 +1720,11 @@ static inline struct sk_buff *tcp_rtx_queue_head(const struct sock *sk) return skb_rb_first(&sk->tcp_rtx_queue); } +static inline struct sk_buff *tcp_rtx_queue_tail(const struct sock *sk) +{ + return skb_rb_last(&sk->tcp_rtx_queue); +} + static inline struct sk_buff *tcp_write_queue_head(const struct sock *sk) { return skb_peek(&sk->sk_write_queue); @@ -1867,7 +1917,8 @@ static inline u32 tcp_notsent_lowat(const struct tcp_sock *tp) static inline bool tcp_stream_memory_free(const struct sock *sk, int wake) { const struct tcp_sock *tp = tcp_sk(sk); - u32 notsent_bytes = tp->write_seq - tp->snd_nxt; + u32 notsent_bytes = READ_ONCE(tp->write_seq) - + READ_ONCE(tp->snd_nxt); return (notsent_bytes << wake) < tcp_notsent_lowat(tp); } @@ -2069,8 +2120,13 @@ struct tcp_ulp_ops { /* initialize ulp */ int (*init)(struct sock *sk); + /* update ulp */ + void (*update)(struct sock *sk, struct proto *p); /* cleanup ulp */ void (*release)(struct sock *sk); + /* diagnostic */ + int (*get_info)(const struct sock *sk, struct sk_buff *skb); + size_t (*get_info_size)(const struct sock *sk); char name[TCP_ULP_NAME_MAX]; struct module *owner; @@ -2080,6 +2136,7 @@ void tcp_unregister_ulp(struct tcp_ulp_ops *type); int tcp_set_ulp(struct sock *sk, const char *name); void tcp_get_available_ulp(char *buf, size_t len); void tcp_cleanup_ulp(struct sock *sk); +void tcp_update_ulp(struct sock *sk, struct proto *p); #define MODULE_ALIAS_TCP_ULP(name) \ __MODULE_INFO(alias, alias_userspace, name); \ @@ -2188,6 +2245,12 @@ static inline bool tcp_bpf_ca_needs_ecn(struct sock *sk) return (tcp_call_bpf(sk, BPF_SOCK_OPS_NEEDS_ECN, 0, NULL) == 1); } +static inline void tcp_bpf_rtt(struct sock *sk) +{ + if (BPF_SOCK_OPS_TEST_FLAG(tcp_sk(sk), BPF_SOCK_OPS_RTT_CB_FLAG)) + tcp_call_bpf(sk, BPF_SOCK_OPS_RTT_CB, 0, NULL); +} + #if IS_ENABLED(CONFIG_SMC) extern struct static_key_false tcp_have_smc; #endif @@ -2199,4 +2262,26 @@ void clean_acked_data_disable(struct inet_connection_sock *icsk); void clean_acked_data_flush(void); #endif +DECLARE_STATIC_KEY_FALSE(tcp_tx_delay_enabled); +static inline void tcp_add_tx_delay(struct sk_buff *skb, + const struct tcp_sock *tp) +{ + if (static_branch_unlikely(&tcp_tx_delay_enabled)) + skb->skb_mstamp_ns += (u64)tp->tcp_tx_delay * NSEC_PER_USEC; +} + +/* Compute Earliest Departure Time for some control packets + * like ACK or RST for TIME_WAIT or non ESTABLISHED sockets. + */ +static inline u64 tcp_transmit_time(const struct sock *sk) +{ + if (static_branch_unlikely(&tcp_tx_delay_enabled)) { + u32 delay = (sk->sk_state == TCP_TIME_WAIT) ? + tcp_twsk(sk)->tw_tx_delay : tcp_sk(sk)->tcp_tx_delay; + + return tcp_clock_ns() + (u64)delay * NSEC_PER_USEC; + } + return 0; +} + #endif /* _TCP_H */ diff --git a/include/net/tls.h b/include/net/tls.h index 53d96bca220d..c664e6dba0d1 100644 --- a/include/net/tls.h +++ b/include/net/tls.h @@ -40,6 +40,8 @@ #include <linux/socket.h> #include <linux/tcp.h> #include <linux/skmsg.h> +#include <linux/netdevice.h> +#include <linux/rcupdate.h> #include <net/tcp.h> #include <net/strparser.h> @@ -61,6 +63,7 @@ #define TLS_DEVICE_NAME_MAX 32 #define MAX_IV_SIZE 16 +#define TLS_MAX_REC_SEQ_SIZE 8 /* For AES-CCM, the full 16-bytes of IV is made of '4' fields of given sizes. * @@ -105,9 +108,7 @@ struct tls_device { enum { TLS_BASE, TLS_SW, -#ifdef CONFIG_TLS_DEVICE TLS_HW, -#endif TLS_HW_RECORD, TLS_NUM_CONFIG, }; @@ -160,6 +161,7 @@ struct tls_sw_context_tx { int async_capable; #define BIT_TX_SCHEDULED 0 +#define BIT_TX_CLOSING 1 unsigned long tx_bitmask; }; @@ -197,20 +199,24 @@ struct tls_offload_context_tx { struct scatterlist sg_tx_data[MAX_SKB_FRAGS]; void (*sk_destruct)(struct sock *sk); - u8 driver_state[]; + u8 driver_state[] __aligned(8); /* The TLS layer reserves room for driver specific state * Currently the belief is that there is not enough * driver specific state to justify another layer of indirection */ -#define TLS_DRIVER_STATE_SIZE (max_t(size_t, 8, sizeof(void *))) +#define TLS_DRIVER_STATE_SIZE_TX 16 }; #define TLS_OFFLOAD_CONTEXT_SIZE_TX \ - (ALIGN(sizeof(struct tls_offload_context_tx), sizeof(void *)) + \ - TLS_DRIVER_STATE_SIZE) + (sizeof(struct tls_offload_context_tx) + TLS_DRIVER_STATE_SIZE_TX) enum tls_context_flags { TLS_RX_SYNC_RUNNING = 0, + /* Unlike RX where resync is driven entirely by the core in TX only + * the driver knows when things went out of sync, so we need the flag + * to be atomic. + */ + TLS_TX_SYNC_SCHED = 1, }; struct cipher_context { @@ -240,45 +246,42 @@ struct tls_prot_info { }; struct tls_context { + /* read-only cache line */ struct tls_prot_info prot_info; - union tls_crypto_context crypto_send; - union tls_crypto_context crypto_recv; + u8 tx_conf:3; + u8 rx_conf:3; - struct list_head list; - struct net_device *netdev; - refcount_t refcount; + int (*push_pending_record)(struct sock *sk, int flags); + void (*sk_write_space)(struct sock *sk); void *priv_ctx_tx; void *priv_ctx_rx; - u8 tx_conf:3; - u8 rx_conf:3; + struct net_device *netdev; + /* rw cache line */ struct cipher_context tx; struct cipher_context rx; struct scatterlist *partially_sent_record; u16 partially_sent_offset; - unsigned long flags; bool in_tcp_sendpages; bool pending_open_record_frags; + unsigned long flags; - int (*push_pending_record)(struct sock *sk, int flags); + /* cache cold stuff */ + struct proto *sk_proto; - void (*sk_write_space)(struct sock *sk); void (*sk_destruct)(struct sock *sk); - void (*sk_proto_close)(struct sock *sk, long timeout); - - int (*setsockopt)(struct sock *sk, int level, - int optname, char __user *optval, - unsigned int optlen); - int (*getsockopt)(struct sock *sk, int level, - int optname, char __user *optval, - int __user *optlen); - int (*hash)(struct sock *sk); - void (*unhash)(struct sock *sk); + + union tls_crypto_context crypto_send; + union tls_crypto_context crypto_recv; + + struct list_head list; + refcount_t refcount; + struct rcu_head rcu; }; enum tls_offload_ctx_dir { @@ -294,25 +297,50 @@ struct tlsdev_ops { void (*tls_dev_del)(struct net_device *netdev, struct tls_context *ctx, enum tls_offload_ctx_dir direction); - void (*tls_dev_resync_rx)(struct net_device *netdev, - struct sock *sk, u32 seq, u64 rcd_sn); + int (*tls_dev_resync)(struct net_device *netdev, + struct sock *sk, u32 seq, u8 *rcd_sn, + enum tls_offload_ctx_dir direction); +}; + +enum tls_offload_sync_type { + TLS_OFFLOAD_SYNC_TYPE_DRIVER_REQ = 0, + TLS_OFFLOAD_SYNC_TYPE_CORE_NEXT_HINT = 1, }; +#define TLS_DEVICE_RESYNC_NH_START_IVAL 2 +#define TLS_DEVICE_RESYNC_NH_MAX_IVAL 128 + struct tls_offload_context_rx { /* sw must be the first member of tls_offload_context_rx */ struct tls_sw_context_rx sw; - atomic64_t resync_req; - u8 driver_state[]; + enum tls_offload_sync_type resync_type; + /* this member is set regardless of resync_type, to avoid branches */ + u8 resync_nh_reset:1; + /* CORE_NEXT_HINT-only member, but use the hole here */ + u8 resync_nh_do_now:1; + union { + /* TLS_OFFLOAD_SYNC_TYPE_DRIVER_REQ */ + struct { + atomic64_t resync_req; + }; + /* TLS_OFFLOAD_SYNC_TYPE_CORE_NEXT_HINT */ + struct { + u32 decrypted_failed; + u32 decrypted_tgt; + } resync_nh; + }; + u8 driver_state[] __aligned(8); /* The TLS layer reserves room for driver specific state * Currently the belief is that there is not enough * driver specific state to justify another layer of indirection */ +#define TLS_DRIVER_STATE_SIZE_RX 8 }; #define TLS_OFFLOAD_CONTEXT_SIZE_RX \ - (ALIGN(sizeof(struct tls_offload_context_rx), sizeof(void *)) + \ - TLS_DRIVER_STATE_SIZE) + (sizeof(struct tls_offload_context_rx) + TLS_DRIVER_STATE_SIZE_RX) +void tls_ctx_free(struct sock *sk, struct tls_context *ctx); int wait_on_pending_writer(struct sock *sk, long *timeo); int tls_sk_query(struct sock *sk, int optname, char __user *optval, int __user *optlen); @@ -320,13 +348,17 @@ int tls_sk_attach(struct sock *sk, int optname, char __user *optval, unsigned int optlen); int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx); +void tls_sw_strparser_arm(struct sock *sk, struct tls_context *ctx); +void tls_sw_strparser_done(struct tls_context *tls_ctx); int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size); int tls_sw_sendpage(struct sock *sk, struct page *page, int offset, size_t size, int flags); -void tls_sw_close(struct sock *sk, long timeout); -void tls_sw_free_resources_tx(struct sock *sk); +void tls_sw_cancel_work_tx(struct tls_context *tls_ctx); +void tls_sw_release_resources_tx(struct sock *sk); +void tls_sw_free_ctx_tx(struct tls_context *tls_ctx); void tls_sw_free_resources_rx(struct sock *sk); void tls_sw_release_resources_rx(struct sock *sk); +void tls_sw_free_ctx_rx(struct tls_context *tls_ctx); int tls_sw_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock, int flags, int *addr_len); bool tls_sw_stream_read(const struct sock *sk); @@ -334,13 +366,9 @@ ssize_t tls_sw_splice_read(struct socket *sock, loff_t *ppos, struct pipe_inode_info *pipe, size_t len, unsigned int flags); -int tls_set_device_offload(struct sock *sk, struct tls_context *ctx); int tls_device_sendmsg(struct sock *sk, struct msghdr *msg, size_t size); int tls_device_sendpage(struct sock *sk, struct page *page, int offset, size_t size, int flags); -void tls_device_free_resources_tx(struct sock *sk); -void tls_device_init(void); -void tls_device_cleanup(void); int tls_tx_records(struct sock *sk, int flags); struct tls_record_info *tls_get_record(struct tls_offload_context_tx *context, @@ -389,6 +417,23 @@ static inline bool is_tx_ready(struct tls_sw_context_tx *ctx) return READ_ONCE(rec->tx_ready); } +static inline u16 tls_user_config(struct tls_context *ctx, bool tx) +{ + u16 config = tx ? ctx->tx_conf : ctx->rx_conf; + + switch (config) { + case TLS_BASE: + return TLS_CONF_BASE; + case TLS_SW: + return TLS_CONF_SW; + case TLS_HW: + return TLS_CONF_HW; + case TLS_HW_RECORD: + return TLS_CONF_HW_RECORD; + } + return 0; +} + struct sk_buff * tls_validate_xmit_skb(struct sock *sk, struct net_device *dev, struct sk_buff *skb); @@ -427,23 +472,22 @@ static inline struct tls_context *tls_get_ctx(const struct sock *sk) { struct inet_connection_sock *icsk = inet_csk(sk); - return icsk->icsk_ulp_data; + /* Use RCU on icsk_ulp_data only for sock diag code, + * TLS data path doesn't need rcu_dereference(). + */ + return (__force void *)icsk->icsk_ulp_data; } static inline void tls_advance_record_sn(struct sock *sk, - struct cipher_context *ctx, - int version) + struct tls_prot_info *prot, + struct cipher_context *ctx) { - struct tls_context *tls_ctx = tls_get_ctx(sk); - struct tls_prot_info *prot = &tls_ctx->prot_info; - if (tls_bigint_increment(ctx->rec_seq, prot->rec_seq_size)) tls_err_abort(sk, EBADMSG); - if (version != TLS_1_3_VERSION) { + if (prot->version != TLS_1_3_VERSION) tls_bigint_increment(ctx->iv + TLS_CIPHER_AES_GCM_128_SALT_SIZE, prot->iv_size); - } } static inline void tls_fill_prepend(struct tls_context *ctx, @@ -545,6 +589,23 @@ tls_offload_ctx_rx(const struct tls_context *tls_ctx) return (struct tls_offload_context_rx *)tls_ctx->priv_ctx_rx; } +#if IS_ENABLED(CONFIG_TLS_DEVICE) +static inline void *__tls_driver_ctx(struct tls_context *tls_ctx, + enum tls_offload_ctx_dir direction) +{ + if (direction == TLS_OFFLOAD_CTX_DIR_TX) + return tls_offload_ctx_tx(tls_ctx)->driver_state; + else + return tls_offload_ctx_rx(tls_ctx)->driver_state; +} + +static inline void * +tls_driver_ctx(const struct sock *sk, enum tls_offload_ctx_dir direction) +{ + return __tls_driver_ctx(tls_get_ctx(sk), direction); +} +#endif + /* The TLS context is valid until sk_destruct is called */ static inline void tls_offload_rx_resync_request(struct sock *sk, __be32 seq) { @@ -554,14 +615,39 @@ static inline void tls_offload_rx_resync_request(struct sock *sk, __be32 seq) atomic64_set(&rx_ctx->resync_req, ((u64)ntohl(seq) << 32) | 1); } +static inline void +tls_offload_rx_resync_set_type(struct sock *sk, enum tls_offload_sync_type type) +{ + struct tls_context *tls_ctx = tls_get_ctx(sk); + + tls_offload_ctx_rx(tls_ctx)->resync_type = type; +} + +static inline void tls_offload_tx_resync_request(struct sock *sk) +{ + struct tls_context *tls_ctx = tls_get_ctx(sk); + + WARN_ON(test_and_set_bit(TLS_TX_SYNC_SCHED, &tls_ctx->flags)); +} + +/* Driver's seq tracking has to be disabled until resync succeeded */ +static inline bool tls_offload_tx_resync_pending(struct sock *sk) +{ + struct tls_context *tls_ctx = tls_get_ctx(sk); + bool ret; + + ret = test_bit(TLS_TX_SYNC_SCHED, &tls_ctx->flags); + smp_mb__after_atomic(); + return ret; +} int tls_proccess_cmsg(struct sock *sk, struct msghdr *msg, unsigned char *record_type); void tls_register_device(struct tls_device *device); void tls_unregister_device(struct tls_device *device); -int tls_device_decrypted(struct sock *sk, struct sk_buff *skb); int decrypt_skb(struct sock *sk, struct sk_buff *skb, struct scatterlist *sgout); +struct sk_buff *tls_encrypt_skb(struct sk_buff *skb); struct sk_buff *tls_validate_xmit_skb(struct sock *sk, struct net_device *dev, @@ -571,9 +657,40 @@ int tls_sw_fallback_init(struct sock *sk, struct tls_offload_context_tx *offload_ctx, struct tls_crypto_info *crypto_info); +#ifdef CONFIG_TLS_DEVICE +void tls_device_init(void); +void tls_device_cleanup(void); +int tls_set_device_offload(struct sock *sk, struct tls_context *ctx); +void tls_device_free_resources_tx(struct sock *sk); int tls_set_device_offload_rx(struct sock *sk, struct tls_context *ctx); - void tls_device_offload_cleanup_rx(struct sock *sk); -void handle_device_resync(struct sock *sk, u32 seq, u64 rcd_sn); +void tls_device_rx_resync_new_rec(struct sock *sk, u32 rcd_len, u32 seq); +int tls_device_decrypted(struct sock *sk, struct sk_buff *skb); +#else +static inline void tls_device_init(void) {} +static inline void tls_device_cleanup(void) {} +static inline int +tls_set_device_offload(struct sock *sk, struct tls_context *ctx) +{ + return -EOPNOTSUPP; +} + +static inline void tls_device_free_resources_tx(struct sock *sk) {} + +static inline int +tls_set_device_offload_rx(struct sock *sk, struct tls_context *ctx) +{ + return -EOPNOTSUPP; +} + +static inline void tls_device_offload_cleanup_rx(struct sock *sk) {} +static inline void +tls_device_rx_resync_new_rec(struct sock *sk, u32 rcd_len, u32 seq) {} + +static inline int tls_device_decrypted(struct sock *sk, struct sk_buff *skb) +{ + return 0; +} +#endif #endif /* _TLS_OFFLOAD_H */ diff --git a/include/net/udp.h b/include/net/udp.h index 79d141d2103b..bad74f780831 100644 --- a/include/net/udp.h +++ b/include/net/udp.h @@ -480,7 +480,7 @@ static inline struct sk_buff *udp_rcv_segment(struct sock *sk, * CB fragment */ segs = __skb_gso_segment(skb, features, false); - if (unlikely(IS_ERR_OR_NULL(segs))) { + if (IS_ERR_OR_NULL(segs)) { int segs_nr = skb_shinfo(skb)->gso_segs; atomic_add(segs_nr, &sk->sk_drops); diff --git a/include/net/vxlan.h b/include/net/vxlan.h index 83b5999a2587..373aadcfea21 100644 --- a/include/net/vxlan.h +++ b/include/net/vxlan.h @@ -197,6 +197,7 @@ struct vxlan_rdst { u8 offloaded:1; __be32 remote_vni; u32 remote_ifindex; + struct net_device *remote_dev; struct list_head list; struct rcu_head rcu; struct dst_cache dst_cache; @@ -242,7 +243,7 @@ struct vxlan_dev { struct vxlan_rdst default_dst; /* default destination */ struct timer_list age_timer; - spinlock_t hash_lock; + spinlock_t hash_lock[FDB_HASH_SIZE]; unsigned int addrcnt; struct gro_cells gro_cells; @@ -391,7 +392,7 @@ static inline bool vxlan_addr_multicast(const union vxlan_addr *ipa) if (ipa->sa.sa_family == AF_INET6) return ipv6_addr_is_multicast(&ipa->sin6.sin6_addr); else - return IN_MULTICAST(ntohl(ipa->sin.sin_addr.s_addr)); + return ipv4_is_multicast(ipa->sin.sin_addr.s_addr); } #else /* !IS_ENABLED(CONFIG_IPV6) */ @@ -403,7 +404,7 @@ static inline bool vxlan_addr_any(const union vxlan_addr *ipa) static inline bool vxlan_addr_multicast(const union vxlan_addr *ipa) { - return IN_MULTICAST(ntohl(ipa->sin.sin_addr.s_addr)); + return ipv4_is_multicast(ipa->sin.sin_addr.s_addr); } #endif /* IS_ENABLED(CONFIG_IPV6) */ diff --git a/include/net/xdp.h b/include/net/xdp.h index 8e0deddef35c..40c6d3398458 100644 --- a/include/net/xdp.h +++ b/include/net/xdp.h @@ -129,6 +129,21 @@ void xdp_return_frame(struct xdp_frame *xdpf); void xdp_return_frame_rx_napi(struct xdp_frame *xdpf); void xdp_return_buff(struct xdp_buff *xdp); +/* When sending xdp_frame into the network stack, then there is no + * return point callback, which is needed to release e.g. DMA-mapping + * resources with page_pool. Thus, have explicit function to release + * frame resources. + */ +void __xdp_release_frame(void *data, struct xdp_mem_info *mem); +static inline void xdp_release_frame(struct xdp_frame *xdpf) +{ + struct xdp_mem_info *mem = &xdpf->mem; + + /* Curr only page_pool needs this */ + if (mem->type == MEM_TYPE_PAGE_POOL) + __xdp_release_frame(xdpf->data, mem); +} + int xdp_rxq_info_reg(struct xdp_rxq_info *xdp_rxq, struct net_device *dev, u32 queue_index); void xdp_rxq_info_unreg(struct xdp_rxq_info *xdp_rxq); diff --git a/include/net/xdp_priv.h b/include/net/xdp_priv.h new file mode 100644 index 000000000000..6a8cba6ea79a --- /dev/null +++ b/include/net/xdp_priv.h @@ -0,0 +1,23 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __LINUX_NET_XDP_PRIV_H__ +#define __LINUX_NET_XDP_PRIV_H__ + +#include <linux/rhashtable.h> + +/* Private to net/core/xdp.c, but used by trace/events/xdp.h */ +struct xdp_mem_allocator { + struct xdp_mem_info mem; + union { + void *allocator; + struct page_pool *page_pool; + struct zero_copy_allocator *zc_alloc; + }; + int disconnect_cnt; + unsigned long defer_start; + struct rhash_head node; + struct rcu_head rcu; + struct delayed_work defer_wq; + unsigned long defer_warn; +}; + +#endif /* __LINUX_NET_XDP_PRIV_H__ */ diff --git a/include/net/xdp_sock.h b/include/net/xdp_sock.h index d074b6d60f8a..c9398ce7960f 100644 --- a/include/net/xdp_sock.h +++ b/include/net/xdp_sock.h @@ -16,6 +16,13 @@ struct net_device; struct xsk_queue; +/* Masks for xdp_umem_page flags. + * The low 12-bits of the addr will be 0 since this is the page address, so we + * can use them for flags. + */ +#define XSK_NEXT_PG_CONTIG_SHIFT 0 +#define XSK_NEXT_PG_CONTIG_MASK (1ULL << XSK_NEXT_PG_CONTIG_SHIFT) + struct xdp_umem_page { void *addr; dma_addr_t dma; @@ -27,6 +34,13 @@ struct xdp_umem_fq_reuse { u64 handles[]; }; +/* Flags for the umem flags field. + * + * The NEED_WAKEUP flag is 1 due to the reuse of the flags field for public + * flags. See inlude/uapi/include/linux/if_xdp.h. + */ +#define XDP_UMEM_USES_NEED_WAKEUP (1 << 1) + struct xdp_umem { struct xsk_queue *fq; struct xsk_queue *cq; @@ -41,15 +55,27 @@ struct xdp_umem { struct work_struct work; struct page **pgs; u32 npgs; + u16 queue_id; + u8 need_wakeup; + u8 flags; int id; struct net_device *dev; struct xdp_umem_fq_reuse *fq_reuse; - u16 queue_id; bool zc; spinlock_t xsk_list_lock; struct list_head xsk_list; }; +/* Nodes are linked in the struct xdp_sock map_list field, and used to + * track which maps a certain socket reside in. + */ +struct xsk_map; +struct xsk_map_node { + struct list_head node; + struct xsk_map *map; + struct xdp_sock **map_entry; +}; + struct xdp_sock { /* struct sock must be the first member of struct xdp_sock */ struct sock sk; @@ -58,16 +84,26 @@ struct xdp_sock { struct xdp_umem *umem; struct list_head flush_node; u16 queue_id; - struct xsk_queue *tx ____cacheline_aligned_in_smp; - struct list_head list; bool zc; + enum { + XSK_READY = 0, + XSK_BOUND, + XSK_UNBOUND, + } state; /* Protects multiple processes in the control path */ struct mutex mutex; + struct xsk_queue *tx ____cacheline_aligned_in_smp; + struct list_head list; /* Mutual exclusion of NAPI TX thread and sendmsg error paths * in the SKB destructor callback. */ spinlock_t tx_completion_lock; + /* Protects generic receive. */ + spinlock_t rx_lock; u64 rx_dropped; + struct list_head map_list; + /* Protects map_list */ + spinlock_t map_list_lock; }; struct xdp_buff; @@ -77,28 +113,71 @@ int xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp); void xsk_flush(struct xdp_sock *xs); bool xsk_is_setup_for_bpf_map(struct xdp_sock *xs); /* Used from netdev driver */ +bool xsk_umem_has_addrs(struct xdp_umem *umem, u32 cnt); u64 *xsk_umem_peek_addr(struct xdp_umem *umem, u64 *addr); void xsk_umem_discard_addr(struct xdp_umem *umem); void xsk_umem_complete_tx(struct xdp_umem *umem, u32 nb_entries); -bool xsk_umem_consume_tx(struct xdp_umem *umem, dma_addr_t *dma, u32 *len); +bool xsk_umem_consume_tx(struct xdp_umem *umem, struct xdp_desc *desc); void xsk_umem_consume_tx_done(struct xdp_umem *umem); struct xdp_umem_fq_reuse *xsk_reuseq_prepare(u32 nentries); struct xdp_umem_fq_reuse *xsk_reuseq_swap(struct xdp_umem *umem, struct xdp_umem_fq_reuse *newq); void xsk_reuseq_free(struct xdp_umem_fq_reuse *rq); struct xdp_umem *xdp_get_umem_from_qid(struct net_device *dev, u16 queue_id); +void xsk_set_rx_need_wakeup(struct xdp_umem *umem); +void xsk_set_tx_need_wakeup(struct xdp_umem *umem); +void xsk_clear_rx_need_wakeup(struct xdp_umem *umem); +void xsk_clear_tx_need_wakeup(struct xdp_umem *umem); +bool xsk_umem_uses_need_wakeup(struct xdp_umem *umem); + +void xsk_map_try_sock_delete(struct xsk_map *map, struct xdp_sock *xs, + struct xdp_sock **map_entry); +int xsk_map_inc(struct xsk_map *map); +void xsk_map_put(struct xsk_map *map); + +static inline u64 xsk_umem_extract_addr(u64 addr) +{ + return addr & XSK_UNALIGNED_BUF_ADDR_MASK; +} + +static inline u64 xsk_umem_extract_offset(u64 addr) +{ + return addr >> XSK_UNALIGNED_BUF_OFFSET_SHIFT; +} + +static inline u64 xsk_umem_add_offset_to_addr(u64 addr) +{ + return xsk_umem_extract_addr(addr) + xsk_umem_extract_offset(addr); +} static inline char *xdp_umem_get_data(struct xdp_umem *umem, u64 addr) { - return umem->pages[addr >> PAGE_SHIFT].addr + (addr & (PAGE_SIZE - 1)); + unsigned long page_addr; + + addr = xsk_umem_add_offset_to_addr(addr); + page_addr = (unsigned long)umem->pages[addr >> PAGE_SHIFT].addr; + + return (char *)(page_addr & PAGE_MASK) + (addr & ~PAGE_MASK); } static inline dma_addr_t xdp_umem_get_dma(struct xdp_umem *umem, u64 addr) { - return umem->pages[addr >> PAGE_SHIFT].dma + (addr & (PAGE_SIZE - 1)); + addr = xsk_umem_add_offset_to_addr(addr); + + return umem->pages[addr >> PAGE_SHIFT].dma + (addr & ~PAGE_MASK); } /* Reuse-queue aware version of FILL queue helpers */ +static inline bool xsk_umem_has_addrs_rq(struct xdp_umem *umem, u32 cnt) +{ + struct xdp_umem_fq_reuse *rq = umem->fq_reuse; + + if (rq->length >= cnt) + return true; + + return xsk_umem_has_addrs(umem, cnt - rq->length); +} + static inline u64 *xsk_umem_peek_addr_rq(struct xdp_umem *umem, u64 *addr) { struct xdp_umem_fq_reuse *rq = umem->fq_reuse; @@ -126,6 +205,19 @@ static inline void xsk_umem_fq_reuse(struct xdp_umem *umem, u64 addr) rq->handles[rq->length++] = addr; } + +/* Handle the offset appropriately depending on aligned or unaligned mode. + * For unaligned mode, we store the offset in the upper 16-bits of the address. + * For aligned mode, we simply add the offset to the address. + */ +static inline u64 xsk_umem_adjust_offset(struct xdp_umem *umem, u64 address, + u64 offset) +{ + if (umem->flags & XDP_UMEM_UNALIGNED_CHUNK_FLAG) + return address + (offset << XSK_UNALIGNED_BUF_OFFSET_SHIFT); + else + return address + offset; +} #else static inline int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp) { @@ -146,6 +238,11 @@ static inline bool xsk_is_setup_for_bpf_map(struct xdp_sock *xs) return false; } +static inline bool xsk_umem_has_addrs(struct xdp_umem *umem, u32 cnt) +{ + return false; +} + static inline u64 *xsk_umem_peek_addr(struct xdp_umem *umem, u64 *addr) { return NULL; @@ -159,8 +256,8 @@ static inline void xsk_umem_complete_tx(struct xdp_umem *umem, u32 nb_entries) { } -static inline bool xsk_umem_consume_tx(struct xdp_umem *umem, dma_addr_t *dma, - u32 *len) +static inline bool xsk_umem_consume_tx(struct xdp_umem *umem, + struct xdp_desc *desc) { return false; } @@ -190,6 +287,21 @@ static inline struct xdp_umem *xdp_get_umem_from_qid(struct net_device *dev, return NULL; } +static inline u64 xsk_umem_extract_addr(u64 addr) +{ + return 0; +} + +static inline u64 xsk_umem_extract_offset(u64 addr) +{ + return 0; +} + +static inline u64 xsk_umem_add_offset_to_addr(u64 addr) +{ + return 0; +} + static inline char *xdp_umem_get_data(struct xdp_umem *umem, u64 addr) { return NULL; @@ -200,6 +312,11 @@ static inline dma_addr_t xdp_umem_get_dma(struct xdp_umem *umem, u64 addr) return 0; } +static inline bool xsk_umem_has_addrs_rq(struct xdp_umem *umem, u32 cnt) +{ + return false; +} + static inline u64 *xsk_umem_peek_addr_rq(struct xdp_umem *umem, u64 *addr) { return NULL; @@ -213,6 +330,33 @@ static inline void xsk_umem_fq_reuse(struct xdp_umem *umem, u64 addr) { } +static inline void xsk_set_rx_need_wakeup(struct xdp_umem *umem) +{ +} + +static inline void xsk_set_tx_need_wakeup(struct xdp_umem *umem) +{ +} + +static inline void xsk_clear_rx_need_wakeup(struct xdp_umem *umem) +{ +} + +static inline void xsk_clear_tx_need_wakeup(struct xdp_umem *umem) +{ +} + +static inline bool xsk_umem_uses_need_wakeup(struct xdp_umem *umem) +{ + return false; +} + +static inline u64 xsk_umem_adjust_offset(struct xdp_umem *umem, u64 handle, + u64 offset) +{ + return 0; +} + #endif /* CONFIG_XDP_SOCKETS */ #endif /* _LINUX_XDP_SOCK_H */ diff --git a/include/net/xfrm.h b/include/net/xfrm.h index a2907873ed56..aa08a7a5f6ac 100644 --- a/include/net/xfrm.h +++ b/include/net/xfrm.h @@ -346,22 +346,19 @@ void km_state_expired(struct xfrm_state *x, int hard, u32 portid); int __xfrm_state_delete(struct xfrm_state *x); struct xfrm_state_afinfo { - unsigned int family; - unsigned int proto; - __be16 eth_proto; - struct module *owner; - const struct xfrm_type *type_map[IPPROTO_MAX]; - const struct xfrm_type_offload *type_offload_map[IPPROTO_MAX]; - - int (*init_flags)(struct xfrm_state *x); - void (*init_tempsel)(struct xfrm_selector *sel, - const struct flowi *fl); - void (*init_temprop)(struct xfrm_state *x, - const struct xfrm_tmpl *tmpl, - const xfrm_address_t *daddr, - const xfrm_address_t *saddr); - int (*tmpl_sort)(struct xfrm_tmpl **dst, struct xfrm_tmpl **src, int n); - int (*state_sort)(struct xfrm_state **dst, struct xfrm_state **src, int n); + u8 family; + u8 proto; + + const struct xfrm_type_offload *type_offload_esp; + + const struct xfrm_type *type_esp; + const struct xfrm_type *type_ipip; + const struct xfrm_type *type_ipip6; + const struct xfrm_type *type_comp; + const struct xfrm_type *type_ah; + const struct xfrm_type *type_routing; + const struct xfrm_type *type_dstopts; + int (*output)(struct net *net, struct sock *sk, struct sk_buff *skb); int (*output_finish)(struct sock *sk, struct sk_buff *skb); int (*extract_input)(struct xfrm_state *x, @@ -407,12 +404,10 @@ struct xfrm_type { int (*reject)(struct xfrm_state *, struct sk_buff *, const struct flowi *); int (*hdr_offset)(struct xfrm_state *, struct sk_buff *, u8 **); - /* Estimate maximal size of result of transformation of a dgram */ - u32 (*get_mtu)(struct xfrm_state *, int size); }; int xfrm_register_type(const struct xfrm_type *type, unsigned short family); -int xfrm_unregister_type(const struct xfrm_type *type, unsigned short family); +void xfrm_unregister_type(const struct xfrm_type *type, unsigned short family); struct xfrm_type_offload { char *description; @@ -424,7 +419,7 @@ struct xfrm_type_offload { }; int xfrm_register_type_offload(const struct xfrm_type_offload *type, unsigned short family); -int xfrm_unregister_type_offload(const struct xfrm_type_offload *type, unsigned short family); +void xfrm_unregister_type_offload(const struct xfrm_type_offload *type, unsigned short family); static inline int xfrm_af2proto(unsigned int family) { @@ -988,7 +983,6 @@ static inline void xfrm_dst_destroy(struct xfrm_dst *xdst) void xfrm_dst_ifdown(struct dst_entry *dst, struct net_device *dev); struct xfrm_if_parms { - char name[IFNAMSIZ]; /* name of XFRM device */ int link; /* ifindex of underlying L2 interface */ u32 if_id; /* interface identifyer */ }; @@ -996,7 +990,6 @@ struct xfrm_if_parms { struct xfrm_if { struct xfrm_if __rcu *next; /* next interface in list */ struct net_device *dev; /* virtual device associated with interface */ - struct net_device *phydev; /* physical device */ struct net *net; /* netns for packet i/o */ struct xfrm_if_parms p; /* interface parms */ @@ -1508,21 +1501,19 @@ struct xfrm_state *xfrm_state_lookup_byaddr(struct net *net, u32 mark, u8 proto, unsigned short family); #ifdef CONFIG_XFRM_SUB_POLICY -int xfrm_tmpl_sort(struct xfrm_tmpl **dst, struct xfrm_tmpl **src, int n, - unsigned short family, struct net *net); -int xfrm_state_sort(struct xfrm_state **dst, struct xfrm_state **src, int n, +void xfrm_tmpl_sort(struct xfrm_tmpl **dst, struct xfrm_tmpl **src, int n, unsigned short family); +void xfrm_state_sort(struct xfrm_state **dst, struct xfrm_state **src, int n, + unsigned short family); #else -static inline int xfrm_tmpl_sort(struct xfrm_tmpl **dst, struct xfrm_tmpl **src, - int n, unsigned short family, struct net *net) +static inline void xfrm_tmpl_sort(struct xfrm_tmpl **d, struct xfrm_tmpl **s, + int n, unsigned short family) { - return -ENOSYS; } -static inline int xfrm_state_sort(struct xfrm_state **dst, struct xfrm_state **src, - int n, unsigned short family) +static inline void xfrm_state_sort(struct xfrm_state **d, struct xfrm_state **s, + int n, unsigned short family) { - return -ENOSYS; } #endif @@ -1551,7 +1542,7 @@ void xfrm_sad_getinfo(struct net *net, struct xfrmk_sadinfo *si); void xfrm_spd_getinfo(struct net *net, struct xfrmk_spdinfo *si); u32 xfrm_replay_seqhi(struct xfrm_state *x, __be32 net_seq); int xfrm_init_replay(struct xfrm_state *x); -int xfrm_state_mtu(struct xfrm_state *x, int mtu); +u32 xfrm_state_mtu(struct xfrm_state *x, int mtu); int __xfrm_init_state(struct xfrm_state *x, bool init_replay, bool offload); int xfrm_init_state(struct xfrm_state *x); int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type); diff --git a/include/pcmcia/ds.h b/include/pcmcia/ds.h index 0f42a7b82d18..b7a8de88b3c0 100644 --- a/include/pcmcia/ds.h +++ b/include/pcmcia/ds.h @@ -36,7 +36,7 @@ struct config_t; struct net_device; /* dynamic device IDs for PCMCIA device drivers. See - * Documentation/pcmcia/driver.txt for details. + * Documentation/pcmcia/driver.rst for details. */ struct pcmcia_dynids { struct mutex lock; diff --git a/include/pcmcia/ss.h b/include/pcmcia/ss.h index 4039cb117733..7cf7dbbfa131 100644 --- a/include/pcmcia/ss.h +++ b/include/pcmcia/ss.h @@ -187,7 +187,7 @@ struct pcmcia_socket { unsigned int sysfs_events; /* For the non-trivial interaction between these locks, - * see Documentation/pcmcia/locking.txt */ + * see Documentation/pcmcia/locking.rst */ struct mutex skt_mutex; struct mutex ops_mutex; diff --git a/include/rdma/ib.h b/include/rdma/ib.h index 4f385ec54f80..fe2fc9e91588 100644 --- a/include/rdma/ib.h +++ b/include/rdma/ib.h @@ -36,6 +36,8 @@ #include <linux/types.h> #include <linux/sched.h> #include <linux/cred.h> +#include <linux/uaccess.h> +#include <linux/fs.h> struct ib_addr { union { diff --git a/include/rdma/ib_umem.h b/include/rdma/ib_umem.h index 040d853077c6..a91b2af64ec4 100644 --- a/include/rdma/ib_umem.h +++ b/include/rdma/ib_umem.h @@ -42,11 +42,10 @@ struct ib_ucontext; struct ib_umem_odp; struct ib_umem { - struct ib_ucontext *context; + struct ib_device *ibdev; struct mm_struct *owning_mm; size_t length; unsigned long address; - int page_shift; u32 writable : 1; u32 is_odp : 1; struct work_struct work; @@ -58,24 +57,14 @@ struct ib_umem { /* Returns the offset of the umem start relative to the first page. */ static inline int ib_umem_offset(struct ib_umem *umem) { - return umem->address & (BIT(umem->page_shift) - 1); -} - -/* Returns the first page of an ODP umem. */ -static inline unsigned long ib_umem_start(struct ib_umem *umem) -{ - return umem->address - ib_umem_offset(umem); -} - -/* Returns the address of the page after the last one of an ODP umem. */ -static inline unsigned long ib_umem_end(struct ib_umem *umem) -{ - return ALIGN(umem->address + umem->length, BIT(umem->page_shift)); + return umem->address & ~PAGE_MASK; } static inline size_t ib_umem_num_pages(struct ib_umem *umem) { - return (ib_umem_end(umem) - ib_umem_start(umem)) >> umem->page_shift; + return (ALIGN(umem->address + umem->length, PAGE_SIZE) - + ALIGN_DOWN(umem->address, PAGE_SIZE)) >> + PAGE_SHIFT; } #ifdef CONFIG_INFINIBAND_USER_MEM diff --git a/include/rdma/ib_umem_odp.h b/include/rdma/ib_umem_odp.h index eeec4e53c448..253df1a1fa54 100644 --- a/include/rdma/ib_umem_odp.h +++ b/include/rdma/ib_umem_odp.h @@ -37,11 +37,6 @@ #include <rdma/ib_verbs.h> #include <linux/interval_tree.h> -struct umem_odp_node { - u64 __subtree_last; - struct rb_node rb; -}; - struct ib_umem_odp { struct ib_umem umem; struct ib_ucontext_per_mm *per_mm; @@ -72,10 +67,19 @@ struct ib_umem_odp { int npages; /* Tree tracking */ - struct umem_odp_node interval_tree; + struct interval_tree_node interval_tree; + + /* + * An implicit odp umem cannot be DMA mapped, has 0 length, and serves + * only as an anchor for the driver to hold onto the per_mm. FIXME: + * This should be removed and drivers should work with the per_mm + * directly. + */ + bool is_implicit_odp; struct completion notifier_completion; int dying; + unsigned int page_shift; struct work_struct work; }; @@ -84,6 +88,24 @@ static inline struct ib_umem_odp *to_ib_umem_odp(struct ib_umem *umem) return container_of(umem, struct ib_umem_odp, umem); } +/* Returns the first page of an ODP umem. */ +static inline unsigned long ib_umem_start(struct ib_umem_odp *umem_odp) +{ + return umem_odp->interval_tree.start; +} + +/* Returns the address of the page after the last one of an ODP umem. */ +static inline unsigned long ib_umem_end(struct ib_umem_odp *umem_odp) +{ + return umem_odp->interval_tree.last + 1; +} + +static inline size_t ib_umem_odp_num_pages(struct ib_umem_odp *umem_odp) +{ + return (ib_umem_end(umem_odp) - ib_umem_start(umem_odp)) >> + umem_odp->page_shift; +} + /* * The lower 2 bits of the DMA address signal the R/W permissions for * the entry. To upgrade the permissions, provide the appropriate @@ -100,25 +122,20 @@ static inline struct ib_umem_odp *to_ib_umem_odp(struct ib_umem *umem) #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING struct ib_ucontext_per_mm { - struct ib_ucontext *context; - struct mm_struct *mm; + struct mmu_notifier mn; struct pid *tgid; - bool active; struct rb_root_cached umem_tree; /* Protects umem_tree */ struct rw_semaphore umem_rwsem; - - struct mmu_notifier mn; - unsigned int odp_mrs_count; - - struct list_head ucontext_list; - struct rcu_head rcu; }; -int ib_umem_odp_get(struct ib_umem_odp *umem_odp, int access); -struct ib_umem_odp *ib_alloc_odp_umem(struct ib_umem_odp *root_umem, - unsigned long addr, size_t size); +struct ib_umem_odp *ib_umem_odp_get(struct ib_udata *udata, unsigned long addr, + size_t size, int access); +struct ib_umem_odp *ib_umem_odp_alloc_implicit(struct ib_udata *udata, + int access); +struct ib_umem_odp *ib_umem_odp_alloc_child(struct ib_umem_odp *root_umem, + unsigned long addr, size_t size); void ib_umem_odp_release(struct ib_umem_odp *umem_odp); int ib_umem_odp_map_dma_pages(struct ib_umem_odp *umem_odp, u64 start_offset, @@ -143,8 +160,17 @@ int rbt_ib_umem_for_each_in_range(struct rb_root_cached *root, * Find first region intersecting with address range. * Return NULL if not found */ -struct ib_umem_odp *rbt_ib_umem_lookup(struct rb_root_cached *root, - u64 addr, u64 length); +static inline struct ib_umem_odp * +rbt_ib_umem_lookup(struct rb_root_cached *root, u64 addr, u64 length) +{ + struct interval_tree_node *node; + + node = interval_tree_iter_first(root, addr, addr + length - 1); + if (!node) + return NULL; + return container_of(node, struct ib_umem_odp, interval_tree); + +} static inline int ib_umem_mmu_notifier_retry(struct ib_umem_odp *umem_odp, unsigned long mmu_seq) @@ -165,9 +191,11 @@ static inline int ib_umem_mmu_notifier_retry(struct ib_umem_odp *umem_odp, #else /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */ -static inline int ib_umem_odp_get(struct ib_umem_odp *umem_odp, int access) +static inline struct ib_umem_odp *ib_umem_odp_get(struct ib_udata *udata, + unsigned long addr, + size_t size, int access) { - return -EINVAL; + return ERR_PTR(-EINVAL); } static inline void ib_umem_odp_release(struct ib_umem_odp *umem_odp) {} diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h index 54873085f2da..e7e733add99f 100644 --- a/include/rdma/ib_verbs.h +++ b/include/rdma/ib_verbs.h @@ -61,8 +61,11 @@ #include <linux/cgroup_rdma.h> #include <linux/irqflags.h> #include <linux/preempt.h> +#include <linux/dim.h> #include <uapi/rdma/ib_user_verbs.h> +#include <rdma/rdma_counter.h> #include <rdma/restrack.h> +#include <rdma/signature.h> #include <uapi/rdma/rdma_user_ioctl.h> #include <uapi/rdma/ib_user_ioctl_verbs.h> @@ -95,15 +98,54 @@ void ibdev_info(const struct ib_device *ibdev, const char *format, ...); #if defined(CONFIG_DYNAMIC_DEBUG) #define ibdev_dbg(__dev, format, args...) \ dynamic_ibdev_dbg(__dev, format, ##args) -#elif defined(DEBUG) -#define ibdev_dbg(__dev, format, args...) \ - ibdev_printk(KERN_DEBUG, __dev, format, ##args) #else __printf(2, 3) __cold static inline void ibdev_dbg(const struct ib_device *ibdev, const char *format, ...) {} #endif +#define ibdev_level_ratelimited(ibdev_level, ibdev, fmt, ...) \ +do { \ + static DEFINE_RATELIMIT_STATE(_rs, \ + DEFAULT_RATELIMIT_INTERVAL, \ + DEFAULT_RATELIMIT_BURST); \ + if (__ratelimit(&_rs)) \ + ibdev_level(ibdev, fmt, ##__VA_ARGS__); \ +} while (0) + +#define ibdev_emerg_ratelimited(ibdev, fmt, ...) \ + ibdev_level_ratelimited(ibdev_emerg, ibdev, fmt, ##__VA_ARGS__) +#define ibdev_alert_ratelimited(ibdev, fmt, ...) \ + ibdev_level_ratelimited(ibdev_alert, ibdev, fmt, ##__VA_ARGS__) +#define ibdev_crit_ratelimited(ibdev, fmt, ...) \ + ibdev_level_ratelimited(ibdev_crit, ibdev, fmt, ##__VA_ARGS__) +#define ibdev_err_ratelimited(ibdev, fmt, ...) \ + ibdev_level_ratelimited(ibdev_err, ibdev, fmt, ##__VA_ARGS__) +#define ibdev_warn_ratelimited(ibdev, fmt, ...) \ + ibdev_level_ratelimited(ibdev_warn, ibdev, fmt, ##__VA_ARGS__) +#define ibdev_notice_ratelimited(ibdev, fmt, ...) \ + ibdev_level_ratelimited(ibdev_notice, ibdev, fmt, ##__VA_ARGS__) +#define ibdev_info_ratelimited(ibdev, fmt, ...) \ + ibdev_level_ratelimited(ibdev_info, ibdev, fmt, ##__VA_ARGS__) + +#if defined(CONFIG_DYNAMIC_DEBUG) +/* descriptor check is first to prevent flooding with "callbacks suppressed" */ +#define ibdev_dbg_ratelimited(ibdev, fmt, ...) \ +do { \ + static DEFINE_RATELIMIT_STATE(_rs, \ + DEFAULT_RATELIMIT_INTERVAL, \ + DEFAULT_RATELIMIT_BURST); \ + DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt); \ + if (DYNAMIC_DEBUG_BRANCH(descriptor) && __ratelimit(&_rs)) \ + __dynamic_ibdev_dbg(&descriptor, ibdev, fmt, \ + ##__VA_ARGS__); \ +} while (0) +#else +__printf(2, 3) __cold +static inline +void ibdev_dbg_ratelimited(const struct ib_device *ibdev, const char *format, ...) {} +#endif + union ib_gid { u8 raw[16]; struct { @@ -132,17 +174,6 @@ struct ib_gid_attr { u8 port_num; }; -enum rdma_node_type { - /* IB values map to NodeInfo:NodeType. */ - RDMA_NODE_IB_CA = 1, - RDMA_NODE_IB_SWITCH, - RDMA_NODE_IB_ROUTER, - RDMA_NODE_RNIC, - RDMA_NODE_USNIC, - RDMA_NODE_USNIC_UDP, - RDMA_NODE_UNSPECIFIED, -}; - enum { /* set the local administered indication */ IB_SA_WELL_KNOWN_GUID = BIT_ULL(57) | 2, @@ -164,7 +195,7 @@ enum rdma_protocol_type { }; __attribute_const__ enum rdma_transport_type -rdma_node_get_transport(enum rdma_node_type node_type); +rdma_node_get_transport(unsigned int node_type); enum rdma_network_type { RDMA_NETWORK_IB, @@ -263,7 +294,7 @@ enum ib_device_cap_flags { */ IB_DEVICE_CROSS_CHANNEL = (1 << 27), IB_DEVICE_MANAGED_FLOW_STEERING = (1 << 29), - IB_DEVICE_SIGNATURE_HANDOVER = (1 << 30), + IB_DEVICE_INTEGRITY_HANDOVER = (1 << 30), IB_DEVICE_ON_DEMAND_PAGING = (1ULL << 31), IB_DEVICE_SG_GAPS_REG = (1ULL << 32), IB_DEVICE_VIRTUAL_FUNCTION = (1ULL << 33), @@ -275,17 +306,6 @@ enum ib_device_cap_flags { IB_DEVICE_ALLOW_USER_UNREG = (1ULL << 37), }; -enum ib_signature_prot_cap { - IB_PROT_T10DIF_TYPE_1 = 1, - IB_PROT_T10DIF_TYPE_2 = 1 << 1, - IB_PROT_T10DIF_TYPE_3 = 1 << 2, -}; - -enum ib_signature_guard_cap { - IB_GUARD_T10DIF_CRC = 1, - IB_GUARD_T10DIF_CSUM = 1 << 1, -}; - enum ib_atomic_cap { IB_ATOMIC_NONE, IB_ATOMIC_HCA, @@ -327,8 +347,8 @@ struct ib_rss_caps { }; enum ib_tm_cap_flags { - /* Support tag matching on RC transport */ - IB_TM_CAP_RC = 1 << 0, + /* Support tag matching with rendezvous offload for RC transport */ + IB_TM_CAP_RNDV_RC = 1 << 0, }; struct ib_tm_caps { @@ -346,7 +366,7 @@ struct ib_tm_caps { struct ib_cq_init_attr { unsigned int cqe; - int comp_vector; + u32 comp_vector; u32 flags; }; @@ -411,6 +431,7 @@ struct ib_device_attr { int max_srq_wr; int max_srq_sge; unsigned int max_fast_reg_page_list_len; + unsigned int max_pi_fast_reg_page_list_len; u16 max_pkeys; u8 local_ca_ack_delay; int sig_prot_cap; @@ -469,6 +490,16 @@ enum ib_port_state { IB_PORT_ACTIVE_DEFER = 5 }; +enum ib_port_phys_state { + IB_PORT_PHYS_STATE_SLEEP = 1, + IB_PORT_PHYS_STATE_POLLING = 2, + IB_PORT_PHYS_STATE_DISABLED = 3, + IB_PORT_PHYS_STATE_PORT_CONFIGURATION_TRAINING = 4, + IB_PORT_PHYS_STATE_LINK_UP = 5, + IB_PORT_PHYS_STATE_LINK_ERROR_RECOVERY = 6, + IB_PORT_PHYS_STATE_PHY_TEST = 7, +}; + enum ib_port_width { IB_WIDTH_1X = 1, IB_WIDTH_2X = 16, @@ -796,118 +827,26 @@ __attribute_const__ int ib_rate_to_mbps(enum ib_rate rate); * enum ib_mr_type - memory region type * @IB_MR_TYPE_MEM_REG: memory region that is used for * normal registration - * @IB_MR_TYPE_SIGNATURE: memory region that is used for - * signature operations (data-integrity - * capable regions) * @IB_MR_TYPE_SG_GAPS: memory region that is capable to * register any arbitrary sg lists (without * the normal mr constraints - see * ib_map_mr_sg) + * @IB_MR_TYPE_DM: memory region that is used for device + * memory registration + * @IB_MR_TYPE_USER: memory region that is used for the user-space + * application + * @IB_MR_TYPE_DMA: memory region that is used for DMA operations + * without address translations (VA=PA) + * @IB_MR_TYPE_INTEGRITY: memory region that is used for + * data integrity operations */ enum ib_mr_type { IB_MR_TYPE_MEM_REG, - IB_MR_TYPE_SIGNATURE, IB_MR_TYPE_SG_GAPS, -}; - -/** - * Signature types - * IB_SIG_TYPE_NONE: Unprotected. - * IB_SIG_TYPE_T10_DIF: Type T10-DIF - */ -enum ib_signature_type { - IB_SIG_TYPE_NONE, - IB_SIG_TYPE_T10_DIF, -}; - -/** - * Signature T10-DIF block-guard types - * IB_T10DIF_CRC: Corresponds to T10-PI mandated CRC checksum rules. - * IB_T10DIF_CSUM: Corresponds to IP checksum rules. - */ -enum ib_t10_dif_bg_type { - IB_T10DIF_CRC, - IB_T10DIF_CSUM -}; - -/** - * struct ib_t10_dif_domain - Parameters specific for T10-DIF - * domain. - * @bg_type: T10-DIF block guard type (CRC|CSUM) - * @pi_interval: protection information interval. - * @bg: seed of guard computation. - * @app_tag: application tag of guard block - * @ref_tag: initial guard block reference tag. - * @ref_remap: Indicate wethear the reftag increments each block - * @app_escape: Indicate to skip block check if apptag=0xffff - * @ref_escape: Indicate to skip block check if reftag=0xffffffff - * @apptag_check_mask: check bitmask of application tag. - */ -struct ib_t10_dif_domain { - enum ib_t10_dif_bg_type bg_type; - u16 pi_interval; - u16 bg; - u16 app_tag; - u32 ref_tag; - bool ref_remap; - bool app_escape; - bool ref_escape; - u16 apptag_check_mask; -}; - -/** - * struct ib_sig_domain - Parameters for signature domain - * @sig_type: specific signauture type - * @sig: union of all signature domain attributes that may - * be used to set domain layout. - */ -struct ib_sig_domain { - enum ib_signature_type sig_type; - union { - struct ib_t10_dif_domain dif; - } sig; -}; - -/** - * struct ib_sig_attrs - Parameters for signature handover operation - * @check_mask: bitmask for signature byte check (8 bytes) - * @mem: memory domain layout desciptor. - * @wire: wire domain layout desciptor. - */ -struct ib_sig_attrs { - u8 check_mask; - struct ib_sig_domain mem; - struct ib_sig_domain wire; -}; - -enum ib_sig_err_type { - IB_SIG_BAD_GUARD, - IB_SIG_BAD_REFTAG, - IB_SIG_BAD_APPTAG, -}; - -/** - * Signature check masks (8 bytes in total) according to the T10-PI standard: - * -------- -------- ------------ - * | GUARD | APPTAG | REFTAG | - * | 2B | 2B | 4B | - * -------- -------- ------------ - */ -enum { - IB_SIG_CHECK_GUARD = 0xc0, - IB_SIG_CHECK_APPTAG = 0x30, - IB_SIG_CHECK_REFTAG = 0x0f, -}; - -/** - * struct ib_sig_err - signature error descriptor - */ -struct ib_sig_err { - enum ib_sig_err_type err_type; - u32 expected; - u32 actual; - u64 sig_err_offset; - u32 key; + IB_MR_TYPE_DM, + IB_MR_TYPE_USER, + IB_MR_TYPE_DMA, + IB_MR_TYPE_INTEGRITY, }; enum ib_mr_status_check { @@ -1164,7 +1103,7 @@ enum ib_qp_create_flags { IB_QP_CREATE_MANAGED_SEND = 1 << 3, IB_QP_CREATE_MANAGED_RECV = 1 << 4, IB_QP_CREATE_NETIF_QP = 1 << 5, - IB_QP_CREATE_SIGNATURE_EN = 1 << 6, + IB_QP_CREATE_INTEGRITY_EN = 1 << 6, /* FREE = 1 << 7, */ IB_QP_CREATE_SCATTER_FCS = 1 << 8, IB_QP_CREATE_CVLAN_STRIPPING = 1 << 9, @@ -1343,7 +1282,7 @@ enum ib_wr_opcode { /* These are kernel only and can not be issued by userspace */ IB_WR_REG_MR = 0x20, - IB_WR_REG_SIG_MR, + IB_WR_REG_MR_INTEGRITY, /* reserve values for low level drivers' internal use. * These values will not be used at all in the ib core layer. @@ -1453,20 +1392,6 @@ static inline const struct ib_reg_wr *reg_wr(const struct ib_send_wr *wr) return container_of(wr, struct ib_reg_wr, wr); } -struct ib_sig_handover_wr { - struct ib_send_wr wr; - struct ib_sig_attrs *sig_attrs; - struct ib_mr *sig_mr; - int access_flags; - struct ib_sge *prot; -}; - -static inline const struct ib_sig_handover_wr * -sig_handover_wr(const struct ib_send_wr *wr) -{ - return container_of(wr, struct ib_sig_handover_wr, wr); -} - struct ib_recv_wr { struct ib_recv_wr *next; union { @@ -1541,11 +1466,6 @@ struct ib_ucontext { bool cleanup_retryable; - void (*invalidate_range)(struct ib_umem_odp *umem_odp, - unsigned long start, unsigned long end); - struct mutex per_mm_list_lock; - struct list_head per_mm_list; - struct ib_rdmacg_object cg_obj; /* * Implementation details of the RDMA core, don't use in drivers: @@ -1634,6 +1554,7 @@ struct ib_cq { struct work_struct work; }; struct workqueue_struct *comp_wq; + struct dim *dim; /* * Implementation details of the RDMA core, don't use in drivers: */ @@ -1818,10 +1739,14 @@ struct ib_qp { struct ib_qp_security *qp_sec; u8 port; + bool integrity_en; /* * Implementation details of the RDMA core, don't use in drivers: */ struct rdma_restrack_entry res; + + /* The counter the qp is bind to */ + struct rdma_counter *counter; }; struct ib_dm { @@ -1840,6 +1765,7 @@ struct ib_mr { u64 iova; u64 length; unsigned int page_size; + enum ib_mr_type type; bool need_inval; union { struct ib_uobject *uobject; /* user */ @@ -1847,7 +1773,7 @@ struct ib_mr { }; struct ib_dm *dm; - + struct ib_sig_attrs *sig_attrs; /* only for IB_MR_TYPE_INTEGRITY MRs */ /* * Implementation details of the RDMA core, don't use in drivers: */ @@ -2243,6 +2169,8 @@ struct ib_port_data { spinlock_t netdev_lock; struct net_device __rcu *netdev; struct hlist_node ndev_hash_link; + struct rdma_port_counter port_counter; + struct rdma_hw_stats *hw_stats; }; /* rdma netdev type - specifies protocol type */ @@ -2329,6 +2257,11 @@ struct iw_cm_conn_param; * need to define the supported operations, otherwise they will be set to null. */ struct ib_device_ops { + struct module *owner; + enum rdma_driver_id driver_id; + u32 uverbs_abi_ver; + unsigned int uverbs_no_driver_id_binding:1; + int (*post_send)(struct ib_qp *qp, const struct ib_send_wr *send_wr, const struct ib_send_wr **bad_send_wr); int (*post_recv)(struct ib_qp *qp, const struct ib_recv_wr *recv_wr, @@ -2454,11 +2387,10 @@ struct ib_device_ops { int (*query_qp)(struct ib_qp *qp, struct ib_qp_attr *qp_attr, int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr); int (*destroy_qp)(struct ib_qp *qp, struct ib_udata *udata); - struct ib_cq *(*create_cq)(struct ib_device *device, - const struct ib_cq_init_attr *attr, - struct ib_udata *udata); + int (*create_cq)(struct ib_cq *cq, const struct ib_cq_init_attr *attr, + struct ib_udata *udata); int (*modify_cq)(struct ib_cq *cq, u16 cq_count, u16 cq_period); - int (*destroy_cq)(struct ib_cq *cq, struct ib_udata *udata); + void (*destroy_cq)(struct ib_cq *cq, struct ib_udata *udata); int (*resize_cq)(struct ib_cq *cq, int cqe, struct ib_udata *udata); struct ib_mr *(*get_dma_mr)(struct ib_pd *pd, int mr_access_flags); struct ib_mr *(*reg_user_mr)(struct ib_pd *pd, u64 start, u64 length, @@ -2470,6 +2402,9 @@ struct ib_device_ops { int (*dereg_mr)(struct ib_mr *mr, struct ib_udata *udata); struct ib_mr *(*alloc_mr)(struct ib_pd *pd, enum ib_mr_type mr_type, u32 max_num_sg, struct ib_udata *udata); + struct ib_mr *(*alloc_mr_integrity)(struct ib_pd *pd, + u32 max_num_data_sg, + u32 max_num_meta_sg); int (*advise_mr)(struct ib_pd *pd, enum ib_uverbs_advise_mr_advice advice, u32 flags, struct ib_sge *sg_list, u32 num_sge, @@ -2487,6 +2422,8 @@ struct ib_device_ops { u64 iova); int (*unmap_fmr)(struct list_head *fmr_list); int (*dealloc_fmr)(struct ib_fmr *fmr); + void (*invalidate_range)(struct ib_umem_odp *umem_odp, + unsigned long start, unsigned long end); int (*attach_mcast)(struct ib_qp *qp, union ib_gid *gid, u16 lid); int (*detach_mcast)(struct ib_qp *qp, union ib_gid *gid, u16 lid); struct ib_xrcd *(*alloc_xrcd)(struct ib_device *device, @@ -2516,7 +2453,7 @@ struct ib_device_ops { struct ib_wq *(*create_wq)(struct ib_pd *pd, struct ib_wq_init_attr *init_attr, struct ib_udata *udata); - int (*destroy_wq)(struct ib_wq *wq, struct ib_udata *udata); + void (*destroy_wq)(struct ib_wq *wq, struct ib_udata *udata); int (*modify_wq)(struct ib_wq *wq, struct ib_wq_attr *attr, u32 wq_attr_mask, struct ib_udata *udata); struct ib_rwq_ind_table *(*create_rwq_ind_table)( @@ -2538,6 +2475,11 @@ struct ib_device_ops { int (*read_counters)(struct ib_counters *counters, struct ib_counters_read_attr *counters_read_attr, struct uverbs_attr_bundle *attrs); + int (*map_mr_sg_pi)(struct ib_mr *mr, struct scatterlist *data_sg, + int data_sg_nents, unsigned int *data_sg_offset, + struct scatterlist *meta_sg, int meta_sg_nents, + unsigned int *meta_sg_offset); + /** * alloc_hw_stats - Allocate a struct rdma_hw_stats and fill in the * driver initialized data. The struct is kfree()'ed by the sysfs @@ -2595,8 +2537,34 @@ struct ib_device_ops { u8 pdata_len); int (*iw_create_listen)(struct iw_cm_id *cm_id, int backlog); int (*iw_destroy_listen)(struct iw_cm_id *cm_id); + /** + * counter_bind_qp - Bind a QP to a counter. + * @counter - The counter to be bound. If counter->id is zero then + * the driver needs to allocate a new counter and set counter->id + */ + int (*counter_bind_qp)(struct rdma_counter *counter, struct ib_qp *qp); + /** + * counter_unbind_qp - Unbind the qp from the dynamically-allocated + * counter and bind it onto the default one + */ + int (*counter_unbind_qp)(struct ib_qp *qp); + /** + * counter_dealloc -De-allocate the hw counter + */ + int (*counter_dealloc)(struct rdma_counter *counter); + /** + * counter_alloc_stats - Allocate a struct rdma_hw_stats and fill in + * the driver initialized data. + */ + struct rdma_hw_stats *(*counter_alloc_stats)( + struct rdma_counter *counter); + /** + * counter_update_stats - Query the stats value of this counter + */ + int (*counter_update_stats)(struct rdma_counter *counter); DECLARE_RDMA_OBJ_SIZE(ib_ah); + DECLARE_RDMA_OBJ_SIZE(ib_cq); DECLARE_RDMA_OBJ_SIZE(ib_pd); DECLARE_RDMA_OBJ_SIZE(ib_srq); DECLARE_RDMA_OBJ_SIZE(ib_ucontext); @@ -2636,7 +2604,6 @@ struct ib_device { int num_comp_vectors; - struct module *owner; union { struct device dev; struct ib_core_device coredev; @@ -2648,7 +2615,6 @@ struct ib_device { */ const struct attribute_group *groups[3]; - int uverbs_abi_ver; u64 uverbs_cmd_mask; u64 uverbs_ex_cmd_mask; @@ -2658,6 +2624,8 @@ struct ib_device { u16 is_switch:1; /* Indicates kernel verbs support, should not be used in drivers */ u16 kverbs_provider:1; + /* CQ adaptive moderation (RDMA DIM) */ + u16 use_cq_dim:1; u8 node_type; u8 phys_port_cnt; struct ib_device_attr attrs; @@ -2672,7 +2640,6 @@ struct ib_device { struct rdma_restrack_root *res; const struct uapi_definition *driver_def; - enum rdma_driver_id driver_id; /* * Positive refcount indicates that the device is currently @@ -2694,11 +2661,15 @@ struct ib_device { u32 iw_driver_flags; }; +struct ib_client_nl_info; struct ib_client { const char *name; void (*add) (struct ib_device *); void (*remove)(struct ib_device *, void *client_data); void (*rename)(struct ib_device *dev, void *client_data); + int (*get_nl_info)(struct ib_device *ibdev, void *client_data, + struct ib_client_nl_info *res); + int (*get_global_nl_info)(struct ib_client_nl_info *res); /* Returns the net_dev belonging to this ib_client and matching the * given parameters. @@ -2722,7 +2693,9 @@ struct ib_client { const union ib_gid *gid, const struct sockaddr *addr, void *client_data); - struct list_head list; + + refcount_t uses; + struct completion uses_zero; u32 client_id; /* kverbs are not required by the client */ @@ -3786,6 +3759,25 @@ static inline struct ib_cq *ib_alloc_cq(struct ib_device *dev, void *private, NULL); } +struct ib_cq *__ib_alloc_cq_any(struct ib_device *dev, void *private, + int nr_cqe, enum ib_poll_context poll_ctx, + const char *caller); + +/** + * ib_alloc_cq_any: Allocate kernel CQ + * @dev: The IB device + * @private: Private data attached to the CQE + * @nr_cqe: Number of CQEs in the CQ + * @poll_ctx: Context used for polling the CQ + */ +static inline struct ib_cq *ib_alloc_cq_any(struct ib_device *dev, + void *private, int nr_cqe, + enum ib_poll_context poll_ctx) +{ + return __ib_alloc_cq_any(dev, private, nr_cqe, poll_ctx, + KBUILD_MODNAME); +} + /** * ib_free_cq_user - Free kernel/user CQ * @cq: The CQ to free @@ -3859,9 +3851,9 @@ int ib_destroy_cq_user(struct ib_cq *cq, struct ib_udata *udata); * * NOTE: for user cq use ib_destroy_cq_user with valid udata! */ -static inline int ib_destroy_cq(struct ib_cq *cq) +static inline void ib_destroy_cq(struct ib_cq *cq) { - return ib_destroy_cq_user(cq, NULL); + ib_destroy_cq_user(cq, NULL); } /** @@ -4148,6 +4140,10 @@ static inline struct ib_mr *ib_alloc_mr(struct ib_pd *pd, return ib_alloc_mr_user(pd, mr_type, max_num_sg, NULL); } +struct ib_mr *ib_alloc_mr_integrity(struct ib_pd *pd, + u32 max_num_data_sg, + u32 max_num_meta_sg); + /** * ib_update_fast_reg_key - updates the key portion of the fast_reg MR * R_Key and L_Key. @@ -4332,6 +4328,10 @@ int ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *wq_ind_table); int ib_map_mr_sg(struct ib_mr *mr, struct scatterlist *sg, int sg_nents, unsigned int *sg_offset, unsigned int page_size); +int ib_map_mr_sg_pi(struct ib_mr *mr, struct scatterlist *data_sg, + int data_sg_nents, unsigned int *data_sg_offset, + struct scatterlist *meta_sg, int meta_sg_nents, + unsigned int *meta_sg_offset, unsigned int page_size); static inline int ib_map_mr_sg_zbva(struct ib_mr *mr, struct scatterlist *sg, int sg_nents, diff --git a/include/rdma/iw_portmap.h b/include/rdma/iw_portmap.h index b9fee7feeeb5..c89535047c42 100644 --- a/include/rdma/iw_portmap.h +++ b/include/rdma/iw_portmap.h @@ -33,6 +33,9 @@ #ifndef _IW_PORTMAP_H #define _IW_PORTMAP_H +#include <linux/socket.h> +#include <linux/netlink.h> + #define IWPM_ULIBNAME_SIZE 32 #define IWPM_DEVNAME_SIZE 32 #define IWPM_IFNAME_SIZE 16 diff --git a/include/rdma/mr_pool.h b/include/rdma/mr_pool.h index 83763ef82354..e77123bcb43b 100644 --- a/include/rdma/mr_pool.h +++ b/include/rdma/mr_pool.h @@ -11,7 +11,7 @@ struct ib_mr *ib_mr_pool_get(struct ib_qp *qp, struct list_head *list); void ib_mr_pool_put(struct ib_qp *qp, struct list_head *list, struct ib_mr *mr); int ib_mr_pool_init(struct ib_qp *qp, struct list_head *list, int nr, - enum ib_mr_type type, u32 max_num_sg); + enum ib_mr_type type, u32 max_num_sg, u32 max_num_meta_sg); void ib_mr_pool_destroy(struct ib_qp *qp, struct list_head *list); #endif /* _RDMA_MR_POOL_H */ diff --git a/include/rdma/opa_port_info.h b/include/rdma/opa_port_info.h index 7147a9263011..bdbfe25d3854 100644 --- a/include/rdma/opa_port_info.h +++ b/include/rdma/opa_port_info.h @@ -33,6 +33,8 @@ #if !defined(OPA_PORT_INFO_H) #define OPA_PORT_INFO_H +#include <rdma/opa_smi.h> + #define OPA_PORT_LINK_MODE_NOP 0 /* No change */ #define OPA_PORT_LINK_MODE_OPA 4 /* Port mode is OPA */ diff --git a/include/rdma/rdma_counter.h b/include/rdma/rdma_counter.h new file mode 100644 index 000000000000..eb99856e8b30 --- /dev/null +++ b/include/rdma/rdma_counter.h @@ -0,0 +1,65 @@ +/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ +/* + * Copyright (c) 2019 Mellanox Technologies. All rights reserved. + */ + +#ifndef _RDMA_COUNTER_H_ +#define _RDMA_COUNTER_H_ + +#include <linux/mutex.h> +#include <linux/pid_namespace.h> + +#include <rdma/restrack.h> +#include <rdma/rdma_netlink.h> + +struct ib_device; +struct ib_qp; + +struct auto_mode_param { + int qp_type; +}; + +struct rdma_counter_mode { + enum rdma_nl_counter_mode mode; + enum rdma_nl_counter_mask mask; + struct auto_mode_param param; +}; + +struct rdma_port_counter { + struct rdma_counter_mode mode; + struct rdma_hw_stats *hstats; + unsigned int num_counters; + struct mutex lock; +}; + +struct rdma_counter { + struct rdma_restrack_entry res; + struct ib_device *device; + uint32_t id; + struct kref kref; + struct rdma_counter_mode mode; + struct mutex lock; + struct rdma_hw_stats *stats; + u8 port; +}; + +void rdma_counter_init(struct ib_device *dev); +void rdma_counter_release(struct ib_device *dev); +int rdma_counter_set_auto_mode(struct ib_device *dev, u8 port, + bool on, enum rdma_nl_counter_mask mask); +int rdma_counter_bind_qp_auto(struct ib_qp *qp, u8 port); +int rdma_counter_unbind_qp(struct ib_qp *qp, bool force); + +int rdma_counter_query_stats(struct rdma_counter *counter); +u64 rdma_counter_get_hwstat_value(struct ib_device *dev, u8 port, u32 index); +int rdma_counter_bind_qpn(struct ib_device *dev, u8 port, + u32 qp_num, u32 counter_id); +int rdma_counter_bind_qpn_alloc(struct ib_device *dev, u8 port, + u32 qp_num, u32 *counter_id); +int rdma_counter_unbind_qpn(struct ib_device *dev, u8 port, + u32 qp_num, u32 counter_id); +int rdma_counter_get_mode(struct ib_device *dev, u8 port, + enum rdma_nl_counter_mode *mode, + enum rdma_nl_counter_mask *mask); + +#endif /* _RDMA_COUNTER_H_ */ diff --git a/include/rdma/rdma_netlink.h b/include/rdma/rdma_netlink.h index 10732ab31ba2..ab22759de7ea 100644 --- a/include/rdma/rdma_netlink.h +++ b/include/rdma/rdma_netlink.h @@ -6,6 +6,12 @@ #include <linux/netlink.h> #include <uapi/rdma/rdma_netlink.h> +enum { + RDMA_NLDEV_ATTR_EMPTY_STRING = 1, + RDMA_NLDEV_ATTR_ENTRY_STRLEN = 16, + RDMA_NLDEV_ATTR_CHARDEV_TYPE_SIZE = 32, +}; + struct rdma_nl_cbs { int (*doit)(struct sk_buff *skb, struct nlmsghdr *nlh, struct netlink_ext_ack *extack); @@ -70,28 +76,32 @@ int ibnl_put_attr(struct sk_buff *skb, struct nlmsghdr *nlh, /** * Send the supplied skb to a specific userspace PID. + * @net: Net namespace in which to send the skb * @skb: The netlink skb * @pid: Userspace netlink process ID * Returns 0 on success or a negative error code. */ -int rdma_nl_unicast(struct sk_buff *skb, u32 pid); +int rdma_nl_unicast(struct net *net, struct sk_buff *skb, u32 pid); /** * Send, with wait/1 retry, the supplied skb to a specific userspace PID. + * @net: Net namespace in which to send the skb * @skb: The netlink skb * @pid: Userspace netlink process ID * Returns 0 on success or a negative error code. */ -int rdma_nl_unicast_wait(struct sk_buff *skb, __u32 pid); +int rdma_nl_unicast_wait(struct net *net, struct sk_buff *skb, __u32 pid); /** * Send the supplied skb to a netlink group. + * @net: Net namespace in which to send the skb * @skb: The netlink skb * @group: Netlink group ID * @flags: allocation flags * Returns 0 on success or a negative error code. */ -int rdma_nl_multicast(struct sk_buff *skb, unsigned int group, gfp_t flags); +int rdma_nl_multicast(struct net *net, struct sk_buff *skb, + unsigned int group, gfp_t flags); /** * Check if there are any listeners to the netlink group @@ -110,4 +120,6 @@ void rdma_link_register(struct rdma_link_ops *ops); void rdma_link_unregister(struct rdma_link_ops *ops); #define MODULE_ALIAS_RDMA_LINK(type) MODULE_ALIAS("rdma-link-" type) +#define MODULE_ALIAS_RDMA_CLIENT(type) MODULE_ALIAS("rdma-client-" type) + #endif /* _RDMA_NETLINK_H */ diff --git a/include/rdma/rdma_vt.h b/include/rdma/rdma_vt.h index b9cd06db1a71..ac5a9430abb6 100644 --- a/include/rdma/rdma_vt.h +++ b/include/rdma/rdma_vt.h @@ -2,7 +2,7 @@ #define DEF_RDMA_VT_H /* - * Copyright(c) 2016 - 2018 Intel Corporation. + * Copyright(c) 2016 - 2019 Intel Corporation. * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. @@ -116,6 +116,7 @@ struct rvt_ibport { u64 n_unaligned; u64 n_rc_dupreq; u64 n_rc_seqnak; + u64 n_rc_crwaits; u16 pkey_violations; u16 qkey_violations; u16 mkey_violations; @@ -202,7 +203,6 @@ struct rvt_pd { struct rvt_ah { struct ib_ah ibah; struct rdma_ah_attr attr; - atomic_t refcount; u8 vl; u8 log_pmtu; }; @@ -555,7 +555,7 @@ static inline u16 rvt_get_pkey(struct rvt_dev_info *rdi, struct rvt_dev_info *rvt_alloc_device(size_t size, int nports); void rvt_dealloc_device(struct rvt_dev_info *rdi); -int rvt_register_device(struct rvt_dev_info *rvd, u32 driver_id); +int rvt_register_device(struct rvt_dev_info *rvd); void rvt_unregister_device(struct rvt_dev_info *rvd); int rvt_check_ah(struct ib_device *ibdev, struct rdma_ah_attr *ah_attr); int rvt_init_port(struct rvt_dev_info *rdi, struct rvt_ibport *port, diff --git a/include/rdma/rdmavt_cq.h b/include/rdma/rdmavt_cq.h index 75dc65c0bfb8..574eb7278f46 100644 --- a/include/rdma/rdmavt_cq.h +++ b/include/rdma/rdmavt_cq.h @@ -53,6 +53,7 @@ #include <linux/kthread.h> #include <rdma/ib_user_verbs.h> +#include <rdma/ib_verbs.h> /* * Define an ib_cq_notify value that is not valid so we know when CQ @@ -61,18 +62,27 @@ #define RVT_CQ_NONE (IB_CQ_NEXT_COMP + 1) /* + * Define read macro that apply smp_load_acquire memory barrier + * when reading indice of circular buffer that mmaped to user space. + */ +#define RDMA_READ_UAPI_ATOMIC(member) smp_load_acquire(&(member).val) + +/* + * Define write macro that uses smp_store_release memory barrier + * when writing indice of circular buffer that mmaped to user space. + */ +#define RDMA_WRITE_UAPI_ATOMIC(member, x) smp_store_release(&(member).val, x) +#include <rdma/rvt-abi.h> + +/* * This structure is used to contain the head pointer, tail pointer, * and completion queue entries as a single memory allocation so * it can be mmap'ed into user space. */ -struct rvt_cq_wc { +struct rvt_k_cq_wc { u32 head; /* index of next entry to fill */ u32 tail; /* index of next ib_poll_cq() entry */ - union { - /* these are actually size ibcq.cqe + 1 */ - struct ib_uverbs_wc uqueue[0]; - struct ib_wc kqueue[0]; - }; + struct ib_wc kqueue[]; }; /* @@ -84,10 +94,12 @@ struct rvt_cq { spinlock_t lock; /* protect changes in this struct */ u8 notify; u8 triggered; + u8 cq_full; int comp_vector_cpu; struct rvt_dev_info *rdi; struct rvt_cq_wc *queue; struct rvt_mmap_info *ip; + struct rvt_k_cq_wc *kqueue; }; static inline struct rvt_cq *ibcq_to_rvtcq(struct ib_cq *ibcq) @@ -95,6 +107,6 @@ static inline struct rvt_cq *ibcq_to_rvtcq(struct ib_cq *ibcq) return container_of(ibcq, struct rvt_cq, ibcq); } -void rvt_cq_enter(struct rvt_cq *cq, struct ib_wc *entry, bool solicited); +bool rvt_cq_enter(struct rvt_cq *cq, struct ib_wc *entry, bool solicited); #endif /* DEF_RDMAVT_INCCQH */ diff --git a/include/rdma/rdmavt_qp.h b/include/rdma/rdmavt_qp.h index 68e38c20afc0..b550ae89bf85 100644 --- a/include/rdma/rdmavt_qp.h +++ b/include/rdma/rdmavt_qp.h @@ -2,7 +2,7 @@ #define DEF_RDMAVT_INCQP_H /* - * Copyright(c) 2016 - 2018 Intel Corporation. + * Copyright(c) 2016 - 2019 Intel Corporation. * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. @@ -52,6 +52,7 @@ #include <rdma/ib_pack.h> #include <rdma/ib_verbs.h> #include <rdma/rdmavt_cq.h> +#include <rdma/rvt-abi.h> /* * Atomic bit definitions for r_aflags. */ @@ -156,6 +157,22 @@ #define RVT_SEND_RESERVE_USED IB_SEND_RESERVED_START #define RVT_SEND_COMPLETION_ONLY (IB_SEND_RESERVED_START << 1) +/** + * rvt_ud_wr - IB UD work plus AH cache + * @wr: valid IB work request + * @attr: pointer to an allocated AH attribute + * + * Special case the UD WR so we can keep track of the AH attributes. + * + * NOTE: This data structure is stricly ordered wr then attr. I.e the attr + * MUST come after wr. The ib_ud_wr is sized and copied in rvt_post_one_wr. + * The copy assumes that wr is first. + */ +struct rvt_ud_wr { + struct ib_ud_wr wr; + struct rdma_ah_attr *attr; +}; + /* * Send work request queue entry. * The size of the sg_list is determined when the QP is created and stored @@ -164,7 +181,7 @@ struct rvt_swqe { union { struct ib_send_wr wr; /* don't use wr.sg_list */ - struct ib_ud_wr ud_wr; + struct rvt_ud_wr ud_wr; struct ib_reg_wr reg_wr; struct ib_rdma_wr rdma_wr; struct ib_atomic_wr atomic_wr; @@ -177,33 +194,84 @@ struct rvt_swqe { struct rvt_sge sg_list[0]; }; -/* - * Receive work request queue entry. - * The size of the sg_list is determined when the QP (or SRQ) is created - * and stored in qp->r_rq.max_sge (or srq->rq.max_sge). +/** + * struct rvt_krwq - kernel struct receive work request + * @p_lock: lock to protect producer of the kernel buffer + * @head: index of next entry to fill + * @c_lock:lock to protect consumer of the kernel buffer + * @tail: index of next entry to pull + * @count: count is aproximate of total receive enteries posted + * @rvt_rwqe: struct of receive work request queue entry + * + * This structure is used to contain the head pointer, + * tail pointer and receive work queue entries for kernel + * mode user. */ -struct rvt_rwqe { - u64 wr_id; - u8 num_sge; - struct ib_sge sg_list[0]; -}; - -/* - * This structure is used to contain the head pointer, tail pointer, - * and receive work queue entries as a single memory allocation so - * it can be mmap'ed into user space. - * Note that the wq array elements are variable size so you can't - * just index into the array to get the N'th element; - * use get_rwqe_ptr() instead. - */ -struct rvt_rwq { +struct rvt_krwq { + spinlock_t p_lock; /* protect producer */ u32 head; /* new work requests posted to the head */ + + /* protect consumer */ + spinlock_t c_lock ____cacheline_aligned_in_smp; u32 tail; /* receives pull requests from here. */ - struct rvt_rwqe wq[0]; + u32 count; /* approx count of receive entries posted */ + struct rvt_rwqe *curr_wq; + struct rvt_rwqe wq[]; }; +/* + * rvt_get_swqe_ah - Return the pointer to the struct rvt_ah + * @swqe: valid Send WQE + * + */ +static inline struct rvt_ah *rvt_get_swqe_ah(struct rvt_swqe *swqe) +{ + return ibah_to_rvtah(swqe->ud_wr.wr.ah); +} + +/** + * rvt_get_swqe_ah_attr - Return the cached ah attribute information + * @swqe: valid Send WQE + * + */ +static inline struct rdma_ah_attr *rvt_get_swqe_ah_attr(struct rvt_swqe *swqe) +{ + return swqe->ud_wr.attr; +} + +/** + * rvt_get_swqe_remote_qpn - Access the remote QPN value + * @swqe: valid Send WQE + * + */ +static inline u32 rvt_get_swqe_remote_qpn(struct rvt_swqe *swqe) +{ + return swqe->ud_wr.wr.remote_qpn; +} + +/** + * rvt_get_swqe_remote_qkey - Acces the remote qkey value + * @swqe: valid Send WQE + * + */ +static inline u32 rvt_get_swqe_remote_qkey(struct rvt_swqe *swqe) +{ + return swqe->ud_wr.wr.remote_qkey; +} + +/** + * rvt_get_swqe_pkey_index - Access the pkey index + * @swqe: valid Send WQE + * + */ +static inline u16 rvt_get_swqe_pkey_index(struct rvt_swqe *swqe) +{ + return swqe->ud_wr.wr.pkey_index; +} + struct rvt_rq { struct rvt_rwq *wq; + struct rvt_krwq *kwq; u32 size; /* size of RWQE array */ u8 max_sge; /* protect changes in this struct */ @@ -472,7 +540,7 @@ static inline struct rvt_swqe *rvt_get_swqe_ptr(struct rvt_qp *qp, static inline struct rvt_rwqe *rvt_get_rwqe_ptr(struct rvt_rq *rq, unsigned n) { return (struct rvt_rwqe *) - ((char *)rq->wq->wq + + ((char *)rq->kwq->curr_wq + (sizeof(struct rvt_rwqe) + rq->max_sge * sizeof(struct ib_sge)) * n); } @@ -540,7 +608,7 @@ static inline void rvt_qp_wqe_reserve( /** * rvt_qp_wqe_unreserve - clean reserved operation * @qp - the rvt qp - * @wqe - the send wqe + * @flags - send wqe flags * * This decrements the reserve use count. * @@ -552,11 +620,9 @@ static inline void rvt_qp_wqe_reserve( * the compiler does not juggle the order of the s_last * ring index and the decrementing of s_reserved_used. */ -static inline void rvt_qp_wqe_unreserve( - struct rvt_qp *qp, - struct rvt_swqe *wqe) +static inline void rvt_qp_wqe_unreserve(struct rvt_qp *qp, int flags) { - if (unlikely(wqe->wr.send_flags & RVT_SEND_RESERVE_USED)) { + if (unlikely(flags & RVT_SEND_RESERVE_USED)) { atomic_dec(&qp->s_reserved_used); /* insure no compiler re-order up to s_last change */ smp_mb__after_atomic(); @@ -565,42 +631,6 @@ static inline void rvt_qp_wqe_unreserve( extern const enum ib_wc_opcode ib_rvt_wc_opcode[]; -/** - * rvt_qp_swqe_complete() - insert send completion - * @qp - the qp - * @wqe - the send wqe - * @status - completion status - * - * Insert a send completion into the completion - * queue if the qp indicates it should be done. - * - * See IBTA 10.7.3.1 for info on completion - * control. - */ -static inline void rvt_qp_swqe_complete( - struct rvt_qp *qp, - struct rvt_swqe *wqe, - enum ib_wc_opcode opcode, - enum ib_wc_status status) -{ - if (unlikely(wqe->wr.send_flags & RVT_SEND_RESERVE_USED)) - return; - if (!(qp->s_flags & RVT_S_SIGNAL_REQ_WR) || - (wqe->wr.send_flags & IB_SEND_SIGNALED) || - status != IB_WC_SUCCESS) { - struct ib_wc wc; - - memset(&wc, 0, sizeof(wc)); - wc.wr_id = wqe->wr.wr_id; - wc.status = status; - wc.opcode = opcode; - wc.qp = &qp->ibqp; - wc.byte_len = wqe->length; - rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.send_cq), &wc, - status != IB_WC_SUCCESS); - } -} - /* * Compare the lower 24 bits of the msn values. * Returns an integer <, ==, or > than zero. @@ -734,7 +764,120 @@ static inline void rvt_put_qp_swqe(struct rvt_qp *qp, struct rvt_swqe *wqe) { rvt_put_swqe(wqe); if (qp->allowed_ops == IB_OPCODE_UD) - atomic_dec(&ibah_to_rvtah(wqe->ud_wr.ah)->refcount); + rdma_destroy_ah_attr(wqe->ud_wr.attr); +} + +/** + * rvt_qp_sqwe_incr - increment ring index + * @qp: the qp + * @val: the starting value + * + * Return: the new value wrapping as appropriate + */ +static inline u32 +rvt_qp_swqe_incr(struct rvt_qp *qp, u32 val) +{ + if (++val >= qp->s_size) + val = 0; + return val; +} + +int rvt_error_qp(struct rvt_qp *qp, enum ib_wc_status err); + +/** + * rvt_recv_cq - add a new entry to completion queue + * by receive queue + * @qp: receive queue + * @wc: work completion entry to add + * @solicited: true if @entry is solicited + * + * This is wrapper function for rvt_enter_cq function call by + * receive queue. If rvt_cq_enter return false, it means cq is + * full and the qp is put into error state. + */ +static inline void rvt_recv_cq(struct rvt_qp *qp, struct ib_wc *wc, + bool solicited) +{ + struct rvt_cq *cq = ibcq_to_rvtcq(qp->ibqp.recv_cq); + + if (unlikely(!rvt_cq_enter(cq, wc, solicited))) + rvt_error_qp(qp, IB_WC_LOC_QP_OP_ERR); +} + +/** + * rvt_send_cq - add a new entry to completion queue + * by send queue + * @qp: send queue + * @wc: work completion entry to add + * @solicited: true if @entry is solicited + * + * This is wrapper function for rvt_enter_cq function call by + * send queue. If rvt_cq_enter return false, it means cq is + * full and the qp is put into error state. + */ +static inline void rvt_send_cq(struct rvt_qp *qp, struct ib_wc *wc, + bool solicited) +{ + struct rvt_cq *cq = ibcq_to_rvtcq(qp->ibqp.send_cq); + + if (unlikely(!rvt_cq_enter(cq, wc, solicited))) + rvt_error_qp(qp, IB_WC_LOC_QP_OP_ERR); +} + +/** + * rvt_qp_complete_swqe - insert send completion + * @qp - the qp + * @wqe - the send wqe + * @opcode - wc operation (driver dependent) + * @status - completion status + * + * Update the s_last information, and then insert a send + * completion into the completion + * queue if the qp indicates it should be done. + * + * See IBTA 10.7.3.1 for info on completion + * control. + * + * Return: new last + */ +static inline u32 +rvt_qp_complete_swqe(struct rvt_qp *qp, + struct rvt_swqe *wqe, + enum ib_wc_opcode opcode, + enum ib_wc_status status) +{ + bool need_completion; + u64 wr_id; + u32 byte_len, last; + int flags = wqe->wr.send_flags; + + rvt_qp_wqe_unreserve(qp, flags); + rvt_put_qp_swqe(qp, wqe); + + need_completion = + !(flags & RVT_SEND_RESERVE_USED) && + (!(qp->s_flags & RVT_S_SIGNAL_REQ_WR) || + (flags & IB_SEND_SIGNALED) || + status != IB_WC_SUCCESS); + if (need_completion) { + wr_id = wqe->wr.wr_id; + byte_len = wqe->length; + /* above fields required before writing s_last */ + } + last = rvt_qp_swqe_incr(qp, qp->s_last); + /* see rvt_qp_is_avail() */ + smp_store_release(&qp->s_last, last); + if (need_completion) { + struct ib_wc w = { + .wr_id = wr_id, + .status = status, + .opcode = opcode, + .qp = &qp->ibqp, + .byte_len = byte_len, + }; + rvt_send_cq(qp, &w, status != IB_WC_SUCCESS); + } + return last; } extern const int ib_rvt_state_ops[]; @@ -742,7 +885,6 @@ extern const int ib_rvt_state_ops[]; struct rvt_dev_info; int rvt_get_rwqe(struct rvt_qp *qp, bool wr_id_only); void rvt_comm_est(struct rvt_qp *qp); -int rvt_error_qp(struct rvt_qp *qp, enum ib_wc_status err); void rvt_rc_error(struct rvt_qp *qp, enum ib_wc_status err); unsigned long rvt_rnr_tbl_to_usec(u32 index); enum hrtimer_restart rvt_rc_rnr_retry(struct hrtimer *t); @@ -784,6 +926,88 @@ struct rvt_qp_iter { int n; }; +/** + * ib_cq_tail - Return tail index of cq buffer + * @send_cq - The cq for send + * + * This is called in qp_iter_print to get tail + * of cq buffer. + */ +static inline u32 ib_cq_tail(struct ib_cq *send_cq) +{ + struct rvt_cq *cq = ibcq_to_rvtcq(send_cq); + + return ibcq_to_rvtcq(send_cq)->ip ? + RDMA_READ_UAPI_ATOMIC(cq->queue->tail) : + ibcq_to_rvtcq(send_cq)->kqueue->tail; +} + +/** + * ib_cq_head - Return head index of cq buffer + * @send_cq - The cq for send + * + * This is called in qp_iter_print to get head + * of cq buffer. + */ +static inline u32 ib_cq_head(struct ib_cq *send_cq) +{ + struct rvt_cq *cq = ibcq_to_rvtcq(send_cq); + + return ibcq_to_rvtcq(send_cq)->ip ? + RDMA_READ_UAPI_ATOMIC(cq->queue->head) : + ibcq_to_rvtcq(send_cq)->kqueue->head; +} + +/** + * rvt_free_rq - free memory allocated for rvt_rq struct + * @rvt_rq: request queue data structure + * + * This function should only be called if the rvt_mmap_info() + * has not succeeded. + */ +static inline void rvt_free_rq(struct rvt_rq *rq) +{ + kvfree(rq->kwq); + rq->kwq = NULL; + vfree(rq->wq); + rq->wq = NULL; +} + +/** + * rvt_to_iport - Get the ibport pointer + * @qp: the qp pointer + * + * This function returns the ibport pointer from the qp pointer. + */ +static inline struct rvt_ibport *rvt_to_iport(struct rvt_qp *qp) +{ + struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device); + + return rdi->ports[qp->port_num - 1]; +} + +/** + * rvt_rc_credit_avail - Check if there are enough RC credits for the request + * @qp: the qp + * @wqe: the request + * + * This function returns false when there are not enough credits for the given + * request and true otherwise. + */ +static inline bool rvt_rc_credit_avail(struct rvt_qp *qp, struct rvt_swqe *wqe) +{ + lockdep_assert_held(&qp->s_lock); + if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT) && + rvt_cmp_msn(wqe->ssn, qp->s_lsn + 1) > 0) { + struct rvt_ibport *rvp = rvt_to_iport(qp); + + qp->s_flags |= RVT_S_WAIT_SSN_CREDIT; + rvp->n_rc_crwaits++; + return false; + } + return true; +} + struct rvt_qp_iter *rvt_qp_iter_init(struct rvt_dev_info *rdi, u64 v, void (*cb)(struct rvt_qp *qp, u64 v)); diff --git a/include/rdma/restrack.h b/include/rdma/restrack.h index ecf3c7702a4f..83df1ec6664e 100644 --- a/include/rdma/restrack.h +++ b/include/rdma/restrack.h @@ -14,6 +14,9 @@ #include <uapi/rdma/rdma_netlink.h> #include <linux/xarray.h> +struct ib_device; +struct sk_buff; + /** * enum rdma_restrack_type - HW objects to track */ @@ -43,13 +46,15 @@ enum rdma_restrack_type { */ RDMA_RESTRACK_CTX, /** + * @RDMA_RESTRACK_COUNTER: Statistic Counter + */ + RDMA_RESTRACK_COUNTER, + /** * @RDMA_RESTRACK_MAX: Last entry, used for array dclarations */ RDMA_RESTRACK_MAX }; -struct ib_device; - /** * struct rdma_restrack_entry - metadata per-entry */ @@ -100,8 +105,7 @@ struct rdma_restrack_entry { }; int rdma_restrack_count(struct ib_device *dev, - enum rdma_restrack_type type, - struct pid_namespace *ns); + enum rdma_restrack_type type); void rdma_restrack_kadd(struct rdma_restrack_entry *res); void rdma_restrack_uadd(struct rdma_restrack_entry *res); diff --git a/include/rdma/rw.h b/include/rdma/rw.h index 494f79ca3e62..6ad9dc836c10 100644 --- a/include/rdma/rw.h +++ b/include/rdma/rw.h @@ -39,15 +39,6 @@ struct rdma_rw_ctx { struct ib_send_wr inv_wr; struct ib_mr *mr; } *reg; - - struct { - struct rdma_rw_reg_ctx data; - struct rdma_rw_reg_ctx prot; - struct ib_send_wr sig_inv_wr; - struct ib_mr *sig_mr; - struct ib_sge sig_sge; - struct ib_sig_handover_wr sig_wr; - } *sig; }; }; diff --git a/include/rdma/signature.h b/include/rdma/signature.h new file mode 100644 index 000000000000..d16b0fcc8344 --- /dev/null +++ b/include/rdma/signature.h @@ -0,0 +1,124 @@ +/* SPDX-License-Identifier: (GPL-2.0 OR Linux-OpenIB) */ +/* + * Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved. + */ + +#ifndef _RDMA_SIGNATURE_H_ +#define _RDMA_SIGNATURE_H_ + +#include <linux/types.h> + +enum ib_signature_prot_cap { + IB_PROT_T10DIF_TYPE_1 = 1, + IB_PROT_T10DIF_TYPE_2 = 1 << 1, + IB_PROT_T10DIF_TYPE_3 = 1 << 2, +}; + +enum ib_signature_guard_cap { + IB_GUARD_T10DIF_CRC = 1, + IB_GUARD_T10DIF_CSUM = 1 << 1, +}; + +/** + * enum ib_signature_type - Signature types + * @IB_SIG_TYPE_NONE: Unprotected. + * @IB_SIG_TYPE_T10_DIF: Type T10-DIF + */ +enum ib_signature_type { + IB_SIG_TYPE_NONE, + IB_SIG_TYPE_T10_DIF, +}; + +/** + * enum ib_t10_dif_bg_type - Signature T10-DIF block-guard types + * @IB_T10DIF_CRC: Corresponds to T10-PI mandated CRC checksum rules. + * @IB_T10DIF_CSUM: Corresponds to IP checksum rules. + */ +enum ib_t10_dif_bg_type { + IB_T10DIF_CRC, + IB_T10DIF_CSUM, +}; + +/** + * struct ib_t10_dif_domain - Parameters specific for T10-DIF + * domain. + * @bg_type: T10-DIF block guard type (CRC|CSUM) + * @pi_interval: protection information interval. + * @bg: seed of guard computation. + * @app_tag: application tag of guard block + * @ref_tag: initial guard block reference tag. + * @ref_remap: Indicate wethear the reftag increments each block + * @app_escape: Indicate to skip block check if apptag=0xffff + * @ref_escape: Indicate to skip block check if reftag=0xffffffff + * @apptag_check_mask: check bitmask of application tag. + */ +struct ib_t10_dif_domain { + enum ib_t10_dif_bg_type bg_type; + u16 pi_interval; + u16 bg; + u16 app_tag; + u32 ref_tag; + bool ref_remap; + bool app_escape; + bool ref_escape; + u16 apptag_check_mask; +}; + +/** + * struct ib_sig_domain - Parameters for signature domain + * @sig_type: specific signauture type + * @sig: union of all signature domain attributes that may + * be used to set domain layout. + */ +struct ib_sig_domain { + enum ib_signature_type sig_type; + union { + struct ib_t10_dif_domain dif; + } sig; +}; + +/** + * struct ib_sig_attrs - Parameters for signature handover operation + * @check_mask: bitmask for signature byte check (8 bytes) + * @mem: memory domain layout descriptor. + * @wire: wire domain layout descriptor. + * @meta_length: metadata length + */ +struct ib_sig_attrs { + u8 check_mask; + struct ib_sig_domain mem; + struct ib_sig_domain wire; + int meta_length; +}; + +enum ib_sig_err_type { + IB_SIG_BAD_GUARD, + IB_SIG_BAD_REFTAG, + IB_SIG_BAD_APPTAG, +}; + +/* + * Signature check masks (8 bytes in total) according to the T10-PI standard: + * -------- -------- ------------ + * | GUARD | APPTAG | REFTAG | + * | 2B | 2B | 4B | + * -------- -------- ------------ + */ +enum { + IB_SIG_CHECK_GUARD = 0xc0, + IB_SIG_CHECK_APPTAG = 0x30, + IB_SIG_CHECK_REFTAG = 0x0f, +}; + +/* + * struct ib_sig_err - signature error descriptor + */ +struct ib_sig_err { + enum ib_sig_err_type err_type; + u32 expected; + u32 actual; + u64 sig_err_offset; + u32 key; +}; + +#endif /* _RDMA_SIGNATURE_H_ */ diff --git a/include/scsi/fc/fc_fip.h b/include/scsi/fc/fc_fip.h index 9710254fd98c..e0a3423ba09e 100644 --- a/include/scsi/fc/fc_fip.h +++ b/include/scsi/fc/fc_fip.h @@ -1,18 +1,6 @@ +/* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright 2008 Cisco Systems, Inc. All rights reserved. - * - * This program is free software; you may redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; version 2 of the License. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. */ #ifndef _FC_FIP_H_ #define _FC_FIP_H_ diff --git a/include/scsi/fc/fc_ms.h b/include/scsi/fc/fc_ms.h index b1424dccf426..800d53dc9470 100644 --- a/include/scsi/fc/fc_ms.h +++ b/include/scsi/fc/fc_ms.h @@ -1,5 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0-only */ -/* * Copyright(c) 2011 Intel Corporation. All rights reserved. +/* + * Copyright(c) 2011 Intel Corporation. All rights reserved. * * Maintained at www.Open-FCoE.org */ diff --git a/include/scsi/iscsi_if.h b/include/scsi/iscsi_if.h index 8b31588460d5..92b11c7e0b4f 100644 --- a/include/scsi/iscsi_if.h +++ b/include/scsi/iscsi_if.h @@ -5,8 +5,6 @@ * Copyright (C) 2005 Dmitry Yusupov * Copyright (C) 2005 Alex Aizman * maintained by open-iscsi@googlegroups.com - * - * See the file COPYING included with this distribution for more details. */ #ifndef ISCSI_IF_H diff --git a/include/scsi/iscsi_proto.h b/include/scsi/iscsi_proto.h index aeb4980745ca..b71b5c4f418c 100644 --- a/include/scsi/iscsi_proto.h +++ b/include/scsi/iscsi_proto.h @@ -5,8 +5,6 @@ * Copyright (C) 2005 Dmitry Yusupov * Copyright (C) 2005 Alex Aizman * maintained by open-iscsi@googlegroups.com - * - * See the file COPYING included with this distribution for more details. */ #ifndef ISCSI_PROTO_H diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h index 2d64b53f947c..9b87e1a1c646 100644 --- a/include/scsi/libfc.h +++ b/include/scsi/libfc.h @@ -115,7 +115,7 @@ struct fc_disc_port { struct fc_lport *lp; struct list_head peers; struct work_struct rport_work; - u32 port_id; + u32 port_id; }; /** @@ -155,14 +155,14 @@ struct fc_rport_operations { */ struct fc_rport_libfc_priv { struct fc_lport *local_port; - enum fc_rport_state rp_state; + enum fc_rport_state rp_state; u16 flags; #define FC_RP_FLAGS_REC_SUPPORTED (1 << 0) #define FC_RP_FLAGS_RETRY (1 << 1) #define FC_RP_STARTED (1 << 2) #define FC_RP_FLAGS_CONF_REQ (1 << 3) - unsigned int e_d_tov; - unsigned int r_a_tov; + unsigned int e_d_tov; + unsigned int r_a_tov; }; /** @@ -191,24 +191,24 @@ struct fc_rport_priv { struct fc_lport *local_port; struct fc_rport *rport; struct kref kref; - enum fc_rport_state rp_state; + enum fc_rport_state rp_state; struct fc_rport_identifiers ids; u16 flags; - u16 max_seq; + u16 max_seq; u16 disc_id; u16 maxframe_size; - unsigned int retries; - unsigned int major_retries; - unsigned int e_d_tov; - unsigned int r_a_tov; - struct mutex rp_mutex; + unsigned int retries; + unsigned int major_retries; + unsigned int e_d_tov; + unsigned int r_a_tov; + struct mutex rp_mutex; struct delayed_work retry_work; - enum fc_rport_event event; + enum fc_rport_event event; struct fc_rport_operations *ops; - struct list_head peers; - struct work_struct event_work; + struct list_head peers; + struct work_struct event_work; u32 supported_classes; - u16 prli_count; + u16 prli_count; struct rcu_head rcu; u16 sp_features; u8 spp_type; @@ -618,12 +618,12 @@ struct libfc_function_template { * @disc_callback: Callback routine called when discovery completes */ struct fc_disc { - unsigned char retry_count; - unsigned char pending; - unsigned char requested; - unsigned short seq_count; - unsigned char buf_len; - u16 disc_id; + unsigned char retry_count; + unsigned char pending; + unsigned char requested; + unsigned short seq_count; + unsigned char buf_len; + u16 disc_id; struct list_head rports; void *priv; @@ -697,7 +697,7 @@ struct fc_lport { struct fc_rport_priv *ms_rdata; struct fc_rport_priv *ptp_rdata; void *scsi_priv; - struct fc_disc disc; + struct fc_disc disc; /* Virtual port information */ struct list_head vports; @@ -715,7 +715,7 @@ struct fc_lport { u8 retry_count; /* Fabric information */ - u32 port_id; + u32 port_id; u64 wwpn; u64 wwnn; unsigned int service_params; @@ -743,11 +743,11 @@ struct fc_lport { struct fc_ns_fts fcts; /* Miscellaneous */ - struct mutex lp_mutex; - struct list_head list; + struct mutex lp_mutex; + struct list_head list; struct delayed_work retry_work; void *prov[FC_FC4_PROV_SIZE]; - struct list_head lport_list; + struct list_head lport_list; }; /** diff --git a/include/scsi/libfcoe.h b/include/scsi/libfcoe.h index c50fb297e265..2568cb0627ec 100644 --- a/include/scsi/libfcoe.h +++ b/include/scsi/libfcoe.h @@ -31,7 +31,7 @@ * FIP tunable parameters. */ #define FCOE_CTLR_START_DELAY 2000 /* mS after first adv. to choose FCF */ -#define FCOE_CTRL_SOL_TOV 2000 /* min. solicitation interval (mS) */ +#define FCOE_CTLR_SOL_TOV 2000 /* min. solicitation interval (mS) */ #define FCOE_CTLR_FCF_LIMIT 20 /* max. number of FCF entries */ #define FCOE_CTLR_VN2VN_LOGIN_LIMIT 3 /* max. VN2VN rport login retries */ @@ -229,6 +229,7 @@ struct fcoe_fcf { * @vn_mac: VN_Node assigned MAC address for data */ struct fcoe_rport { + struct fc_rport_priv rdata; unsigned long time; u16 fcoe_len; u16 flags; diff --git a/include/scsi/libiscsi_tcp.h b/include/scsi/libiscsi_tcp.h index 172f15e3dfd6..7c8ba9d7378b 100644 --- a/include/scsi/libiscsi_tcp.h +++ b/include/scsi/libiscsi_tcp.h @@ -5,8 +5,6 @@ * Copyright (C) 2008 Mike Christie * Copyright (C) 2008 Red Hat, Inc. All rights reserved. * maintained by open-iscsi@googlegroups.com - * - * See the file COPYING included with this distribution for more details. */ #ifndef LIBISCSI_TCP_H diff --git a/include/scsi/libsas.h b/include/scsi/libsas.h index e9664bb7d188..4e2d61e8fb1e 100644 --- a/include/scsi/libsas.h +++ b/include/scsi/libsas.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* SPDX-License-Identifier: GPL-2.0-only */ /* * SAS host prototypes and structures header file * @@ -207,8 +207,7 @@ struct sas_work { struct work_struct work; }; -/* Lots of code duplicates this in the SCSI tree, which can be factored out */ -static inline bool sas_dev_type_is_expander(enum sas_device_type type) +static inline bool dev_is_expander(enum sas_device_type type) { return type == SAS_EDGE_EXPANDER_DEVICE || type == SAS_FANOUT_EXPANDER_DEVICE; diff --git a/include/scsi/sas.h b/include/scsi/sas.h index 97a0f6bd201c..a5d8ae49198c 100644 --- a/include/scsi/sas.h +++ b/include/scsi/sas.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* SPDX-License-Identifier: GPL-2.0-only */ /* * SAS structures and definitions header file * diff --git a/include/scsi/scsi_cmnd.h b/include/scsi/scsi_cmnd.h index 76ed5e4acd38..91bd749a02f7 100644 --- a/include/scsi/scsi_cmnd.h +++ b/include/scsi/scsi_cmnd.h @@ -57,6 +57,7 @@ struct scsi_pointer { #define SCMD_TAGGED (1 << 0) #define SCMD_UNCHECKED_ISA_DMA (1 << 1) #define SCMD_INITIALIZED (1 << 2) +#define SCMD_LAST (1 << 3) /* flags preserved across unprep / reprep */ #define SCMD_PRESERVED_FLAGS (SCMD_UNCHECKED_ISA_DMA | SCMD_INITIALIZED) diff --git a/include/scsi/scsi_dbg.h b/include/scsi/scsi_dbg.h index e03bd9d41fa8..7b196d234626 100644 --- a/include/scsi/scsi_dbg.h +++ b/include/scsi/scsi_dbg.h @@ -6,8 +6,6 @@ struct scsi_cmnd; struct scsi_device; struct scsi_sense_hdr; -#define SCSI_LOG_BUFSIZE 128 - extern void scsi_print_command(struct scsi_cmnd *); extern size_t __scsi_format_command(char *, size_t, const unsigned char *, size_t); diff --git a/include/scsi/scsi_eh.h b/include/scsi/scsi_eh.h index 3810b340551c..6bd5ed695a5e 100644 --- a/include/scsi/scsi_eh.h +++ b/include/scsi/scsi_eh.h @@ -32,6 +32,7 @@ extern int scsi_ioctl_reset(struct scsi_device *, int __user *); struct scsi_eh_save { /* saved state */ int result; + unsigned int resid_len; int eh_eflags; enum dma_data_direction data_direction; unsigned underflow; diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h index a5fcdad4a03e..31e0d6ca1eba 100644 --- a/include/scsi/scsi_host.h +++ b/include/scsi/scsi_host.h @@ -80,8 +80,10 @@ struct scsi_host_template { * command block to the LLDD. When the driver finished * processing the command the done callback is invoked. * - * If queuecommand returns 0, then the HBA has accepted the - * command. The done() function must be called on the command + * If queuecommand returns 0, then the driver has accepted the + * command. It must also push it to the HBA if the scsi_cmnd + * flag SCMD_LAST is set, or if the driver does not implement + * commit_rqs. The done() function must be called on the command * when the driver has finished with it. (you may call done on the * command before queuecommand returns, but in this case you * *must* return 0 from queuecommand). @@ -110,6 +112,16 @@ struct scsi_host_template { int (* queuecommand)(struct Scsi_Host *, struct scsi_cmnd *); /* + * The commit_rqs function is used to trigger a hardware + * doorbell after some requests have been queued with + * queuecommand, when an error is encountered before sending + * the request with SCMD_LAST set. + * + * STATUS: OPTIONAL + */ + void (*commit_rqs)(struct Scsi_Host *, u16); + + /* * This is an error handling strategy routine. You don't need to * define one of these if you don't want to - there is a default * routine that is present that should work in most cases. For those @@ -369,6 +381,8 @@ struct scsi_host_template { */ unsigned long dma_boundary; + unsigned long virt_boundary_mask; + /* * This specifies "machine infinity" for host templates which don't * limit the transfer size. Note this limit represents an absolute @@ -587,6 +601,7 @@ struct Scsi_Host { unsigned int max_sectors; unsigned int max_segment_size; unsigned long dma_boundary; + unsigned long virt_boundary_mask; /* * In scsi-mq mode, the number of hardware queues supported by the LLD. * diff --git a/include/scsi/scsi_transport.h b/include/scsi/scsi_transport.h index 0580dce280a1..a0458bda3148 100644 --- a/include/scsi/scsi_transport.h +++ b/include/scsi/scsi_transport.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* SPDX-License-Identifier: GPL-2.0-only */ /* * Transport specific attributes. * diff --git a/include/scsi/scsi_transport_fc.h b/include/scsi/scsi_transport_fc.h index 43f09c7c25a2..7db2dd783834 100644 --- a/include/scsi/scsi_transport_fc.h +++ b/include/scsi/scsi_transport_fc.h @@ -3,9 +3,6 @@ * FiberChannel transport specific attributes exported to sysfs. * * Copyright (c) 2003 Silicon Graphics, Inc. All rights reserved. - * - * ======== - * * Copyright (C) 2004-2007 James Smart, Emulex Corporation * Rewrite for host, target, device, and remote port attributes, * statistics, and service functions... diff --git a/include/soc/arc/mcip.h b/include/soc/arc/mcip.h index 50f49e043668..d1a93c73f006 100644 --- a/include/soc/arc/mcip.h +++ b/include/soc/arc/mcip.h @@ -46,7 +46,9 @@ struct mcip_cmd { #define CMD_IDU_ENABLE 0x71 #define CMD_IDU_DISABLE 0x72 #define CMD_IDU_SET_MODE 0x74 +#define CMD_IDU_READ_MODE 0x75 #define CMD_IDU_SET_DEST 0x76 +#define CMD_IDU_ACK_CIRQ 0x79 #define CMD_IDU_SET_MASK 0x7C #define IDU_M_TRIG_LEVEL 0x0 @@ -119,4 +121,13 @@ static inline void __mcip_cmd_data(unsigned int cmd, unsigned int param, __mcip_cmd(cmd, param); } +/* + * Read MCIP register + */ +static inline unsigned int __mcip_cmd_read(unsigned int cmd, unsigned int param) +{ + __mcip_cmd(cmd, param); + return read_aux_reg(ARC_REG_MCIP_READBACK); +} + #endif diff --git a/include/soc/fsl/bman.h b/include/soc/fsl/bman.h index 5b99cb2ea5ef..173e4049d963 100644 --- a/include/soc/fsl/bman.h +++ b/include/soc/fsl/bman.h @@ -133,5 +133,13 @@ int bman_acquire(struct bman_pool *pool, struct bm_buffer *bufs, u8 num); * failed to probe or 0 if the bman driver did not probed yet. */ int bman_is_probed(void); +/** + * bman_portals_probed - Check if all cpu bound bman portals are probed + * + * Returns 1 if all the required cpu bound bman portals successfully probed, + * -1 if probe errors appeared or 0 if the bman portals did not yet finished + * probing. + */ +int bman_portals_probed(void); #endif /* __FSL_BMAN_H */ diff --git a/include/soc/fsl/qe/qe.h b/include/soc/fsl/qe/qe.h index 3f9d6b6a5691..c1036d16ed03 100644 --- a/include/soc/fsl/qe/qe.h +++ b/include/soc/fsl/qe/qe.h @@ -259,7 +259,7 @@ static inline int qe_alive_during_sleep(void) /* Structure that defines QE firmware binary files. * - * See Documentation/powerpc/qe_firmware.txt for a description of these + * See Documentation/powerpc/qe_firmware.rst for a description of these * fields. */ struct qe_firmware { diff --git a/include/soc/fsl/qman.h b/include/soc/fsl/qman.h index 5cc7af06c1ba..aa31c05a103a 100644 --- a/include/soc/fsl/qman.h +++ b/include/soc/fsl/qman.h @@ -1195,6 +1195,15 @@ int qman_release_cgrid(u32 id); int qman_is_probed(void); /** + * qman_portals_probed - Check if all cpu bound qman portals are probed + * + * Returns 1 if all the required cpu bound qman portals successfully probed, + * -1 if probe errors appeared or 0 if the qman portals did not yet finished + * probing. + */ +int qman_portals_probed(void); + +/** * qman_dqrr_get_ithresh - Get coalesce interrupt threshold * @portal: portal to get the value for * @ithresh: threshold pointer diff --git a/include/soc/mediatek/smi.h b/include/soc/mediatek/smi.h index 79b74ced9d91..5a34b87d89e3 100644 --- a/include/soc/mediatek/smi.h +++ b/include/soc/mediatek/smi.h @@ -20,11 +20,6 @@ struct mtk_smi_larb_iommu { unsigned int mmu; }; -struct mtk_smi_iommu { - unsigned int larb_nr; - struct mtk_smi_larb_iommu larb_imu[MTK_LARB_NR_MAX]; -}; - /* * mtk_smi_larb_get: Enable the power domain and clocks for this local arbiter. * It also initialize some basic setting(like iommu). diff --git a/include/soc/qcom/tcs.h b/include/soc/qcom/tcs.h index 262876a59e86..7a2a055ba6b0 100644 --- a/include/soc/qcom/tcs.h +++ b/include/soc/qcom/tcs.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 */ /* - * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved. + * Copyright (c) 2016-2019, The Linux Foundation. All rights reserved. */ #ifndef __SOC_QCOM_TCS_H__ @@ -53,4 +53,22 @@ struct tcs_request { struct tcs_cmd *cmds; }; +#define BCM_TCS_CMD_COMMIT_SHFT 30 +#define BCM_TCS_CMD_COMMIT_MASK 0x40000000 +#define BCM_TCS_CMD_VALID_SHFT 29 +#define BCM_TCS_CMD_VALID_MASK 0x20000000 +#define BCM_TCS_CMD_VOTE_X_SHFT 14 +#define BCM_TCS_CMD_VOTE_MASK 0x3fff +#define BCM_TCS_CMD_VOTE_Y_SHFT 0 +#define BCM_TCS_CMD_VOTE_Y_MASK 0xfffc000 + +/* Construct a Bus Clock Manager (BCM) specific TCS command */ +#define BCM_TCS_CMD(commit, valid, vote_x, vote_y) \ + (((commit) << BCM_TCS_CMD_COMMIT_SHFT) | \ + ((valid) << BCM_TCS_CMD_VALID_SHFT) | \ + ((cpu_to_le32(vote_x) & \ + BCM_TCS_CMD_VOTE_MASK) << BCM_TCS_CMD_VOTE_X_SHFT) | \ + ((cpu_to_le32(vote_y) & \ + BCM_TCS_CMD_VOTE_MASK) << BCM_TCS_CMD_VOTE_Y_SHFT)) + #endif /* __SOC_QCOM_TCS_H__ */ diff --git a/include/sound/compress_driver.h b/include/sound/compress_driver.h index c5188ff724d1..bc88d6f964da 100644 --- a/include/sound/compress_driver.h +++ b/include/sound/compress_driver.h @@ -173,10 +173,7 @@ static inline void snd_compr_drain_notify(struct snd_compr_stream *stream) if (snd_BUG_ON(!stream)) return; - if (stream->direction == SND_COMPRESS_PLAYBACK) - stream->runtime->state = SNDRV_PCM_STATE_SETUP; - else - stream->runtime->state = SNDRV_PCM_STATE_PREPARED; + stream->runtime->state = SNDRV_PCM_STATE_SETUP; wake_up(&stream->runtime->sleep); } diff --git a/include/sound/hda_codec.h b/include/sound/hda_codec.h index a7c602576b68..9a0393cf024c 100644 --- a/include/sound/hda_codec.h +++ b/include/sound/hda_codec.h @@ -18,6 +18,9 @@ #include <sound/hda_verbs.h> #include <sound/hda_regmap.h> +#define IS_BXT(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x5a98) +#define IS_CFL(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0xa348) + /* * Structures */ @@ -56,6 +59,7 @@ struct hda_bus { unsigned int in_reset:1; /* during reset operation */ unsigned int no_response_fallback:1; /* don't fallback at RIRB error */ unsigned int bus_probing :1; /* during probing process */ + unsigned int keep_power:1; /* keep power up for notification */ int primary_dig_out_type; /* primary digital out PCM type */ unsigned int mixer_assigned; /* codec addr for mixer name */ @@ -249,6 +253,8 @@ struct hda_codec { unsigned int auto_runtime_pm:1; /* enable automatic codec runtime pm */ unsigned int force_pin_prefix:1; /* Add location prefix */ unsigned int link_down_at_suspend:1; /* link down at runtime suspend */ + unsigned int relaxed_resume:1; /* don't resume forcibly for jack */ + #ifdef CONFIG_PM unsigned long power_on_acct; unsigned long power_off_acct; @@ -268,9 +274,6 @@ struct hda_codec { unsigned long jackpoll_interval; /* In jiffies. Zero means no poll, rely on unsol events */ struct delayed_work jackpoll_work; - /* jack detection */ - struct snd_array jacks; - int depop_delay; /* depop delay in ms, -1 for default delay time */ /* fix-up list */ diff --git a/include/sound/hda_register.h b/include/sound/hda_register.h index 0fd39295b426..057d2a2d0bd0 100644 --- a/include/sound/hda_register.h +++ b/include/sound/hda_register.h @@ -264,6 +264,9 @@ enum { SDI0, SDI1, SDI2, SDI3, SDO0, SDO1, SDO2, SDO3 }; #define AZX_REG_ML_LOUTPAY 0x20 #define AZX_REG_ML_LINPAY 0x30 +/* bit0 is reserved, with BIT(1) mapping to stream1 */ +#define ML_LOSIDV_STREAM_MASK 0xFFFE + #define ML_LCTL_SCF_MASK 0xF #define AZX_MLCTL_SPA (0x1 << 16) #define AZX_MLCTL_CPA (0x1 << 23) diff --git a/include/sound/hdaudio.h b/include/sound/hdaudio.h index e8346784cf3f..b260c5fd2337 100644 --- a/include/sound/hdaudio.h +++ b/include/sound/hdaudio.h @@ -120,12 +120,8 @@ void snd_hdac_device_unregister(struct hdac_device *codec); int snd_hdac_device_set_chip_name(struct hdac_device *codec, const char *name); int snd_hdac_codec_modalias(struct hdac_device *hdac, char *buf, size_t size); -int snd_hdac_refresh_widgets(struct hdac_device *codec, bool sysfs); +int snd_hdac_refresh_widgets(struct hdac_device *codec); -unsigned int snd_hdac_make_cmd(struct hdac_device *codec, hda_nid_t nid, - unsigned int verb, unsigned int parm); -int snd_hdac_exec_verb(struct hdac_device *codec, unsigned int cmd, - unsigned int flags, unsigned int *res); int snd_hdac_read(struct hdac_device *codec, hda_nid_t nid, unsigned int verb, unsigned int parm, unsigned int *res); int _snd_hdac_read_parm(struct hdac_device *codec, hda_nid_t nid, int parm, @@ -253,24 +249,6 @@ struct hdac_ext_bus_ops { int (*hdev_detach)(struct hdac_device *hdev); }; -/* - * Lowlevel I/O operators - */ -struct hdac_io_ops { - /* mapped register accesses */ - void (*reg_writel)(u32 value, u32 __iomem *addr); - u32 (*reg_readl)(u32 __iomem *addr); - void (*reg_writew)(u16 value, u16 __iomem *addr); - u16 (*reg_readw)(u16 __iomem *addr); - void (*reg_writeb)(u8 value, u8 __iomem *addr); - u8 (*reg_readb)(u8 __iomem *addr); - /* Allocation ops */ - int (*dma_alloc_pages)(struct hdac_bus *bus, int type, size_t size, - struct snd_dma_buffer *buf); - void (*dma_free_pages)(struct hdac_bus *bus, - struct snd_dma_buffer *buf); -}; - #define HDA_UNSOL_QUEUE_SIZE 64 #define HDA_MAX_CODECS 8 /* limit by controller side */ @@ -304,7 +282,6 @@ struct hdac_rb { struct hdac_bus { struct device *dev; const struct hdac_bus_ops *ops; - const struct hdac_io_ops *io_ops; const struct hdac_ext_bus_ops *ext_ops; /* h/w resources */ @@ -344,6 +321,7 @@ struct hdac_bus { /* CORB/RIRB and position buffers */ struct snd_dma_buffer rb; struct snd_dma_buffer posbuf; + int dma_type; /* SNDRV_DMA_TYPE_XXX for CORB/RIRB */ /* hdac_stream linked list */ struct list_head stream_list; @@ -358,6 +336,9 @@ struct hdac_bus { bool align_bdle_4k:1; /* BDLE align 4K boundary */ bool reverse_assign:1; /* assign devices in reverse order */ bool corbrp_self_clear:1; /* CORBRP clears itself after reset */ + bool polling_mode:1; + + int poll_count; int bdl_pos_adj; /* BDL position adjustment */ @@ -381,8 +362,7 @@ struct hdac_bus { }; int snd_hdac_bus_init(struct hdac_bus *bus, struct device *dev, - const struct hdac_bus_ops *ops, - const struct hdac_io_ops *io_ops); + const struct hdac_bus_ops *ops); void snd_hdac_bus_exit(struct hdac_bus *bus); int snd_hdac_bus_exec_verb(struct hdac_bus *bus, unsigned int addr, unsigned int cmd, unsigned int *res); @@ -390,11 +370,6 @@ int snd_hdac_bus_exec_verb_unlocked(struct hdac_bus *bus, unsigned int addr, unsigned int cmd, unsigned int *res); void snd_hdac_bus_queue_event(struct hdac_bus *bus, u32 res, u32 res_ex); -int snd_hdac_bus_add_device(struct hdac_bus *bus, struct hdac_device *codec); -void snd_hdac_bus_remove_device(struct hdac_bus *bus, - struct hdac_device *codec); -void snd_hdac_bus_process_unsol_events(struct work_struct *work); - static inline void snd_hdac_codec_link_up(struct hdac_device *codec) { set_bit(codec->addr, &codec->bus->codec_powered); @@ -426,21 +401,38 @@ int snd_hdac_bus_handle_stream_irq(struct hdac_bus *bus, unsigned int status, int snd_hdac_bus_alloc_stream_pages(struct hdac_bus *bus); void snd_hdac_bus_free_stream_pages(struct hdac_bus *bus); +#ifdef CONFIG_SND_HDA_ALIGNED_MMIO +unsigned int snd_hdac_aligned_read(void __iomem *addr, unsigned int mask); +void snd_hdac_aligned_write(unsigned int val, void __iomem *addr, + unsigned int mask); +#define snd_hdac_reg_writeb(v, addr) snd_hdac_aligned_write(v, addr, 0xff) +#define snd_hdac_reg_writew(v, addr) snd_hdac_aligned_write(v, addr, 0xffff) +#define snd_hdac_reg_readb(addr) snd_hdac_aligned_read(addr, 0xff) +#define snd_hdac_reg_readw(addr) snd_hdac_aligned_read(addr, 0xffff) +#else /* CONFIG_SND_HDA_ALIGNED_MMIO */ +#define snd_hdac_reg_writeb(val, addr) writeb(val, addr) +#define snd_hdac_reg_writew(val, addr) writew(val, addr) +#define snd_hdac_reg_readb(addr) readb(addr) +#define snd_hdac_reg_readw(addr) readw(addr) +#endif /* CONFIG_SND_HDA_ALIGNED_MMIO */ +#define snd_hdac_reg_writel(val, addr) writel(val, addr) +#define snd_hdac_reg_readl(addr) readl(addr) + /* * macros for easy use */ #define _snd_hdac_chip_writeb(chip, reg, value) \ - ((chip)->io_ops->reg_writeb(value, (chip)->remap_addr + (reg))) + snd_hdac_reg_writeb(value, (chip)->remap_addr + (reg)) #define _snd_hdac_chip_readb(chip, reg) \ - ((chip)->io_ops->reg_readb((chip)->remap_addr + (reg))) + snd_hdac_reg_readb((chip)->remap_addr + (reg)) #define _snd_hdac_chip_writew(chip, reg, value) \ - ((chip)->io_ops->reg_writew(value, (chip)->remap_addr + (reg))) + snd_hdac_reg_writew(value, (chip)->remap_addr + (reg)) #define _snd_hdac_chip_readw(chip, reg) \ - ((chip)->io_ops->reg_readw((chip)->remap_addr + (reg))) + snd_hdac_reg_readw((chip)->remap_addr + (reg)) #define _snd_hdac_chip_writel(chip, reg, value) \ - ((chip)->io_ops->reg_writel(value, (chip)->remap_addr + (reg))) + snd_hdac_reg_writel(value, (chip)->remap_addr + (reg)) #define _snd_hdac_chip_readl(chip, reg) \ - ((chip)->io_ops->reg_readl((chip)->remap_addr + (reg))) + snd_hdac_reg_readl((chip)->remap_addr + (reg)) /* read/write a register, pass without AZX_REG_ prefix */ #define snd_hdac_chip_writel(chip, reg, value) \ @@ -545,24 +537,19 @@ int snd_hdac_get_stream_stripe_ctl(struct hdac_bus *bus, /* * macros for easy use */ -#define _snd_hdac_stream_write(type, dev, reg, value) \ - ((dev)->bus->io_ops->reg_write ## type(value, (dev)->sd_addr + (reg))) -#define _snd_hdac_stream_read(type, dev, reg) \ - ((dev)->bus->io_ops->reg_read ## type((dev)->sd_addr + (reg))) - /* read/write a register, pass without AZX_REG_ prefix */ #define snd_hdac_stream_writel(dev, reg, value) \ - _snd_hdac_stream_write(l, dev, AZX_REG_ ## reg, value) + snd_hdac_reg_writel(value, (dev)->sd_addr + AZX_REG_ ## reg) #define snd_hdac_stream_writew(dev, reg, value) \ - _snd_hdac_stream_write(w, dev, AZX_REG_ ## reg, value) + snd_hdac_reg_writew(value, (dev)->sd_addr + AZX_REG_ ## reg) #define snd_hdac_stream_writeb(dev, reg, value) \ - _snd_hdac_stream_write(b, dev, AZX_REG_ ## reg, value) + snd_hdac_reg_writeb(value, (dev)->sd_addr + AZX_REG_ ## reg) #define snd_hdac_stream_readl(dev, reg) \ - _snd_hdac_stream_read(l, dev, AZX_REG_ ## reg) + snd_hdac_reg_readl((dev)->sd_addr + AZX_REG_ ## reg) #define snd_hdac_stream_readw(dev, reg) \ - _snd_hdac_stream_read(w, dev, AZX_REG_ ## reg) + snd_hdac_reg_readw((dev)->sd_addr + AZX_REG_ ## reg) #define snd_hdac_stream_readb(dev, reg) \ - _snd_hdac_stream_read(b, dev, AZX_REG_ ## reg) + snd_hdac_reg_readb((dev)->sd_addr + AZX_REG_ ## reg) /* update a register, pass without AZX_REG_ prefix */ #define snd_hdac_stream_updatel(dev, reg, mask, val) \ diff --git a/include/sound/hdaudio_ext.h b/include/sound/hdaudio_ext.h index f34aced69ca8..ef88b20c7b0a 100644 --- a/include/sound/hdaudio_ext.h +++ b/include/sound/hdaudio_ext.h @@ -6,7 +6,6 @@ int snd_hdac_ext_bus_init(struct hdac_bus *bus, struct device *dev, const struct hdac_bus_ops *ops, - const struct hdac_io_ops *io_ops, const struct hdac_ext_bus_ops *ext_ops); void snd_hdac_ext_bus_exit(struct hdac_bus *bus); diff --git a/include/sound/hdmi-codec.h b/include/sound/hdmi-codec.h index 7fea496f1f34..83b17682e01c 100644 --- a/include/sound/hdmi-codec.h +++ b/include/sound/hdmi-codec.h @@ -47,6 +47,9 @@ struct hdmi_codec_params { int channels; }; +typedef void (*hdmi_codec_plugged_cb)(struct device *dev, + bool plugged); + struct hdmi_codec_pdata; struct hdmi_codec_ops { /* @@ -88,6 +91,14 @@ struct hdmi_codec_ops { */ int (*get_dai_id)(struct snd_soc_component *comment, struct device_node *endpoint); + + /* + * Hook callback function to handle connector plug event. + * Optional + */ + int (*hook_plugged_cb)(struct device *dev, void *data, + hdmi_codec_plugged_cb fn, + struct device *codec_dev); }; /* HDMI codec initalization data */ @@ -99,6 +110,12 @@ struct hdmi_codec_pdata { void *data; }; +struct snd_soc_component; +struct snd_soc_jack; + +int hdmi_codec_set_jack_detect(struct snd_soc_component *component, + struct snd_soc_jack *jack); + #define HDMI_CODEC_DRV_NAME "hdmi-audio-codec" #endif /* __HDMI_CODEC_H__ */ diff --git a/include/sound/intel-nhlt.h b/include/sound/intel-nhlt.h new file mode 100644 index 000000000000..f657fd8fc0ad --- /dev/null +++ b/include/sound/intel-nhlt.h @@ -0,0 +1,150 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * intel-nhlt.h - Intel HDA Platform NHLT header + * + * Copyright (c) 2015-2019 Intel Corporation + */ + +#ifndef __INTEL_NHLT_H__ +#define __INTEL_NHLT_H__ + +#include <linux/acpi.h> + +#if IS_ENABLED(CONFIG_ACPI) && IS_ENABLED(CONFIG_SND_INTEL_NHLT) + +struct wav_fmt { + u16 fmt_tag; + u16 channels; + u32 samples_per_sec; + u32 avg_bytes_per_sec; + u16 block_align; + u16 bits_per_sample; + u16 cb_size; +} __packed; + +struct wav_fmt_ext { + struct wav_fmt fmt; + union samples { + u16 valid_bits_per_sample; + u16 samples_per_block; + u16 reserved; + } sample; + u32 channel_mask; + u8 sub_fmt[16]; +} __packed; + +enum nhlt_link_type { + NHLT_LINK_HDA = 0, + NHLT_LINK_DSP = 1, + NHLT_LINK_DMIC = 2, + NHLT_LINK_SSP = 3, + NHLT_LINK_INVALID +}; + +enum nhlt_device_type { + NHLT_DEVICE_BT = 0, + NHLT_DEVICE_DMIC = 1, + NHLT_DEVICE_I2S = 4, + NHLT_DEVICE_INVALID +}; + +struct nhlt_specific_cfg { + u32 size; + u8 caps[0]; +} __packed; + +struct nhlt_fmt_cfg { + struct wav_fmt_ext fmt_ext; + struct nhlt_specific_cfg config; +} __packed; + +struct nhlt_fmt { + u8 fmt_count; + struct nhlt_fmt_cfg fmt_config[0]; +} __packed; + +struct nhlt_endpoint { + u32 length; + u8 linktype; + u8 instance_id; + u16 vendor_id; + u16 device_id; + u16 revision_id; + u32 subsystem_id; + u8 device_type; + u8 direction; + u8 virtual_bus_id; + struct nhlt_specific_cfg config; +} __packed; + +struct nhlt_acpi_table { + struct acpi_table_header header; + u8 endpoint_count; + struct nhlt_endpoint desc[0]; +} __packed; + +struct nhlt_resource_desc { + u32 extra; + u16 flags; + u64 addr_spc_gra; + u64 min_addr; + u64 max_addr; + u64 addr_trans_offset; + u64 length; +} __packed; + +#define MIC_ARRAY_2CH 2 +#define MIC_ARRAY_4CH 4 + +struct nhlt_device_specific_config { + u8 virtual_slot; + u8 config_type; +} __packed; + +struct nhlt_dmic_array_config { + struct nhlt_device_specific_config device_config; + u8 array_type; +} __packed; + +struct nhlt_vendor_dmic_array_config { + struct nhlt_dmic_array_config dmic_config; + u8 nb_mics; + /* TODO add vendor mic config */ +} __packed; + +enum { + NHLT_MIC_ARRAY_2CH_SMALL = 0xa, + NHLT_MIC_ARRAY_2CH_BIG = 0xb, + NHLT_MIC_ARRAY_4CH_1ST_GEOM = 0xc, + NHLT_MIC_ARRAY_4CH_L_SHAPED = 0xd, + NHLT_MIC_ARRAY_4CH_2ND_GEOM = 0xe, + NHLT_MIC_ARRAY_VENDOR_DEFINED = 0xf, +}; + +struct nhlt_acpi_table *intel_nhlt_init(struct device *dev); + +void intel_nhlt_free(struct nhlt_acpi_table *addr); + +int intel_nhlt_get_dmic_geo(struct device *dev, struct nhlt_acpi_table *nhlt); + +#else + +struct nhlt_acpi_table; + +static inline struct nhlt_acpi_table *intel_nhlt_init(struct device *dev) +{ + return NULL; +} + +static inline void intel_nhlt_free(struct nhlt_acpi_table *addr) +{ +} + +static inline int intel_nhlt_get_dmic_geo(struct device *dev, + struct nhlt_acpi_table *nhlt) +{ + return 0; +} +#endif + +#endif diff --git a/include/sound/madera-pdata.h b/include/sound/madera-pdata.h new file mode 100644 index 000000000000..e3060f48f108 --- /dev/null +++ b/include/sound/madera-pdata.h @@ -0,0 +1,59 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Platform data for Madera codec driver + * + * Copyright (C) 2016-2019 Cirrus Logic, Inc. and + * Cirrus Logic International Semiconductor Ltd. + */ + +#ifndef MADERA_CODEC_PDATA_H +#define MADERA_CODEC_PDATA_H + +#include <linux/kernel.h> + +#define MADERA_MAX_INPUT 6 +#define MADERA_MAX_MUXED_CHANNELS 4 +#define MADERA_MAX_OUTPUT 6 +#define MADERA_MAX_AIF 4 +#define MADERA_MAX_PDM_SPK 2 +#define MADERA_MAX_DSP 7 + +/** + * struct madera_codec_pdata + * + * @max_channels_clocked: Maximum number of channels that I2S clocks will be + * generated for. Useful when clock master for systems + * where the I2S bus has multiple data lines. + * @dmic_ref: Indicates how the MICBIAS pins have been externally + * connected to DMICs on each input. A value of 0 + * indicates MICVDD and is the default. Other values are: + * For CS47L35 one of the CS47L35_DMIC_REF_xxx values + * For all other codecs one of the MADERA_DMIC_REF_xxx + * Also see the datasheet for a description of the + * INn_DMIC_SUP field. + * @inmode: Mode for the ADC inputs. One of the MADERA_INMODE_xxx + * values. Two-dimensional array + * [input_number][channel number], with four slots per + * input in the order + * [n][0]=INnAL [n][1]=INnAR [n][2]=INnBL [n][3]=INnBR + * @out_mono: For each output set the value to TRUE to indicate that + * the output is mono. [0]=OUT1, [1]=OUT2, ... + * @pdm_fmt: PDM speaker data format. See the PDM_SPKn_FMT field in + * the datasheet for a description of this value. + * @pdm_mute: PDM mute format. See the PDM_SPKn_CTRL_1 register + * in the datasheet for a description of this value. + */ +struct madera_codec_pdata { + u32 max_channels_clocked[MADERA_MAX_AIF]; + + u32 dmic_ref[MADERA_MAX_INPUT]; + + u32 inmode[MADERA_MAX_INPUT][MADERA_MAX_MUXED_CHANNELS]; + + bool out_mono[MADERA_MAX_OUTPUT]; + + u32 pdm_fmt[MADERA_MAX_PDM_SPK]; + u32 pdm_mute[MADERA_MAX_PDM_SPK]; +}; + +#endif diff --git a/include/sound/pcm.h b/include/sound/pcm.h index 1e9bb1c91770..bbe6eb1ff5d2 100644 --- a/include/sound/pcm.h +++ b/include/sound/pcm.h @@ -117,6 +117,8 @@ struct snd_pcm_ops { #define SNDRV_PCM_RATE_96000 (1<<10) /* 96000Hz */ #define SNDRV_PCM_RATE_176400 (1<<11) /* 176400Hz */ #define SNDRV_PCM_RATE_192000 (1<<12) /* 192000Hz */ +#define SNDRV_PCM_RATE_352800 (1<<13) /* 352800Hz */ +#define SNDRV_PCM_RATE_384000 (1<<14) /* 384000Hz */ #define SNDRV_PCM_RATE_CONTINUOUS (1<<30) /* continuous range */ #define SNDRV_PCM_RATE_KNOT (1<<31) /* supports more non-continuos rates */ @@ -129,6 +131,9 @@ struct snd_pcm_ops { SNDRV_PCM_RATE_88200|SNDRV_PCM_RATE_96000) #define SNDRV_PCM_RATE_8000_192000 (SNDRV_PCM_RATE_8000_96000|SNDRV_PCM_RATE_176400|\ SNDRV_PCM_RATE_192000) +#define SNDRV_PCM_RATE_8000_384000 (SNDRV_PCM_RATE_8000_192000|\ + SNDRV_PCM_RATE_352800|\ + SNDRV_PCM_RATE_384000) #define _SNDRV_PCM_FMTBIT(fmt) (1ULL << (__force int)SNDRV_PCM_FORMAT_##fmt) #define SNDRV_PCM_FMTBIT_S8 _SNDRV_PCM_FMTBIT(S8) #define SNDRV_PCM_FMTBIT_U8 _SNDRV_PCM_FMTBIT(U8) diff --git a/include/sound/simple_card_utils.h b/include/sound/simple_card_utils.h index 3429888347e7..31f76b6abf71 100644 --- a/include/sound/simple_card_utils.h +++ b/include/sound/simple_card_utils.h @@ -42,6 +42,7 @@ struct asoc_simple_priv { struct simple_dai_props { struct asoc_simple_dai *cpu_dai; struct asoc_simple_dai *codec_dai; + struct snd_soc_dai_link_component cpus; /* single cpu */ struct snd_soc_dai_link_component codecs; /* single codec */ struct snd_soc_dai_link_component platforms; struct asoc_simple_data adata; @@ -80,16 +81,12 @@ int asoc_simple_parse_card_name(struct snd_soc_card *card, char *prefix); #define asoc_simple_parse_clk_cpu(dev, node, dai_link, simple_dai) \ - asoc_simple_parse_clk(dev, node, dai_link->cpu_of_node, simple_dai, \ - dai_link->cpu_dai_name, NULL) + asoc_simple_parse_clk(dev, node, simple_dai, dai_link->cpus) #define asoc_simple_parse_clk_codec(dev, node, dai_link, simple_dai) \ - asoc_simple_parse_clk(dev, node, dai_link->codec_of_node, simple_dai,\ - dai_link->codec_dai_name, dai_link->codecs) + asoc_simple_parse_clk(dev, node, simple_dai, dai_link->codecs) int asoc_simple_parse_clk(struct device *dev, struct device_node *node, - struct device_node *dai_of_node, struct asoc_simple_dai *simple_dai, - const char *dai_name, struct snd_soc_dai_link_component *dlc); int asoc_simple_startup(struct snd_pcm_substream *substream); void asoc_simple_shutdown(struct snd_pcm_substream *substream); @@ -100,16 +97,11 @@ int asoc_simple_be_hw_params_fixup(struct snd_soc_pcm_runtime *rtd, struct snd_pcm_hw_params *params); #define asoc_simple_parse_cpu(node, dai_link, is_single_link) \ - asoc_simple_parse_dai(node, NULL, \ - &dai_link->cpu_of_node, \ - &dai_link->cpu_dai_name, is_single_link) + asoc_simple_parse_dai(node, dai_link->cpus, is_single_link) #define asoc_simple_parse_codec(node, dai_link) \ - asoc_simple_parse_dai(node, dai_link->codecs, \ - &dai_link->codec_of_node, \ - &dai_link->codec_dai_name, NULL) + asoc_simple_parse_dai(node, dai_link->codecs, NULL) #define asoc_simple_parse_platform(node, dai_link) \ - asoc_simple_parse_dai(node, dai_link->platforms, \ - &dai_link->platform_of_node, NULL, NULL) + asoc_simple_parse_dai(node, dai_link->platforms, NULL) #define asoc_simple_parse_tdm(np, dai) \ snd_soc_of_parse_tdm_slot(np, &(dai)->tx_slot_mask, \ @@ -143,12 +135,16 @@ int asoc_simple_init_priv(struct asoc_simple_priv *priv, struct link_info *li); #ifdef DEBUG -inline void asoc_simple_debug_dai(struct asoc_simple_priv *priv, - char *name, - struct asoc_simple_dai *dai) +static inline void asoc_simple_debug_dai(struct asoc_simple_priv *priv, + char *name, + struct asoc_simple_dai *dai) { struct device *dev = simple_priv_to_dev(priv); + /* dai might be NULL */ + if (!dai) + return; + if (dai->name) dev_dbg(dev, "%s dai name = %s\n", name, dai->name); @@ -171,7 +167,7 @@ inline void asoc_simple_debug_dai(struct asoc_simple_priv *priv, dev_dbg(dev, "%s clk %luHz\n", name, clk_get_rate(dai->clk)); } -inline void asoc_simple_debug_info(struct asoc_simple_priv *priv) +static inline void asoc_simple_debug_info(struct asoc_simple_priv *priv) { struct snd_soc_card *card = simple_priv_to_card(priv); struct device *dev = simple_priv_to_dev(priv); diff --git a/include/sound/soc-acpi-intel-match.h b/include/sound/soc-acpi-intel-match.h index bb5e1e4ce8bf..6c9929abd90b 100644 --- a/include/sound/soc-acpi-intel-match.h +++ b/include/sound/soc-acpi-intel-match.h @@ -25,6 +25,8 @@ extern struct snd_soc_acpi_mach snd_soc_acpi_intel_bxt_machines[]; extern struct snd_soc_acpi_mach snd_soc_acpi_intel_glk_machines[]; extern struct snd_soc_acpi_mach snd_soc_acpi_intel_cnl_machines[]; extern struct snd_soc_acpi_mach snd_soc_acpi_intel_icl_machines[]; +extern struct snd_soc_acpi_mach snd_soc_acpi_intel_tgl_machines[]; +extern struct snd_soc_acpi_mach snd_soc_acpi_intel_ehl_machines[]; /* * generic table used for HDA codec-based platforms, possibly with diff --git a/include/sound/soc-component.h b/include/sound/soc-component.h new file mode 100644 index 000000000000..5d80b2eef525 --- /dev/null +++ b/include/sound/soc-component.h @@ -0,0 +1,387 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * soc-component.h + * + * Copyright (c) 2019 Kuninori Morimoto <kuninori.morimoto.gx@renesas.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef __SOC_COMPONENT_H +#define __SOC_COMPONENT_H + +#include <sound/soc.h> + +/* + * Component probe and remove ordering levels for components with runtime + * dependencies. + */ +#define SND_SOC_COMP_ORDER_FIRST -2 +#define SND_SOC_COMP_ORDER_EARLY -1 +#define SND_SOC_COMP_ORDER_NORMAL 0 +#define SND_SOC_COMP_ORDER_LATE 1 +#define SND_SOC_COMP_ORDER_LAST 2 + +#define for_each_comp_order(order) \ + for (order = SND_SOC_COMP_ORDER_FIRST; \ + order <= SND_SOC_COMP_ORDER_LAST; \ + order++) + +/* component interface */ +struct snd_soc_component_driver { + const char *name; + + /* Default control and setup, added after probe() is run */ + const struct snd_kcontrol_new *controls; + unsigned int num_controls; + const struct snd_soc_dapm_widget *dapm_widgets; + unsigned int num_dapm_widgets; + const struct snd_soc_dapm_route *dapm_routes; + unsigned int num_dapm_routes; + + int (*probe)(struct snd_soc_component *component); + void (*remove)(struct snd_soc_component *component); + int (*suspend)(struct snd_soc_component *component); + int (*resume)(struct snd_soc_component *component); + + unsigned int (*read)(struct snd_soc_component *component, + unsigned int reg); + int (*write)(struct snd_soc_component *component, + unsigned int reg, unsigned int val); + + /* pcm creation and destruction */ + int (*pcm_new)(struct snd_soc_pcm_runtime *rtd); + void (*pcm_free)(struct snd_pcm *pcm); + + /* component wide operations */ + int (*set_sysclk)(struct snd_soc_component *component, + int clk_id, int source, unsigned int freq, int dir); + int (*set_pll)(struct snd_soc_component *component, int pll_id, + int source, unsigned int freq_in, unsigned int freq_out); + int (*set_jack)(struct snd_soc_component *component, + struct snd_soc_jack *jack, void *data); + + /* DT */ + int (*of_xlate_dai_name)(struct snd_soc_component *component, + struct of_phandle_args *args, + const char **dai_name); + int (*of_xlate_dai_id)(struct snd_soc_component *comment, + struct device_node *endpoint); + void (*seq_notifier)(struct snd_soc_component *component, + enum snd_soc_dapm_type type, int subseq); + int (*stream_event)(struct snd_soc_component *component, int event); + int (*set_bias_level)(struct snd_soc_component *component, + enum snd_soc_bias_level level); + + const struct snd_pcm_ops *ops; + const struct snd_compr_ops *compr_ops; + + /* probe ordering - for components with runtime dependencies */ + int probe_order; + int remove_order; + + /* + * signal if the module handling the component should not be removed + * if a pcm is open. Setting this would prevent the module + * refcount being incremented in probe() but allow it be incremented + * when a pcm is opened and decremented when it is closed. + */ + unsigned int module_get_upon_open:1; + + /* bits */ + unsigned int idle_bias_on:1; + unsigned int suspend_bias_off:1; + unsigned int use_pmdown_time:1; /* care pmdown_time at stop */ + unsigned int endianness:1; + unsigned int non_legacy_dai_naming:1; + + /* this component uses topology and ignore machine driver FEs */ + const char *ignore_machine; + const char *topology_name_prefix; + int (*be_hw_params_fixup)(struct snd_soc_pcm_runtime *rtd, + struct snd_pcm_hw_params *params); + bool use_dai_pcm_id; /* use DAI link PCM ID as PCM device number */ + int be_pcm_base; /* base device ID for all BE PCMs */ +}; + +struct snd_soc_component { + const char *name; + int id; + const char *name_prefix; + struct device *dev; + struct snd_soc_card *card; + + unsigned int active; + + unsigned int suspended:1; /* is in suspend PM state */ + + struct list_head list; + struct list_head card_aux_list; /* for auxiliary bound components */ + struct list_head card_list; + + const struct snd_soc_component_driver *driver; + + struct list_head dai_list; + int num_dai; + + struct regmap *regmap; + int val_bytes; + + struct mutex io_mutex; + + /* attached dynamic objects */ + struct list_head dobj_list; + + /* + * DO NOT use any of the fields below in drivers, they are temporary and + * are going to be removed again soon. If you use them in driver code + * the driver will be marked as BROKEN when these fields are removed. + */ + + /* Don't use these, use snd_soc_component_get_dapm() */ + struct snd_soc_dapm_context dapm; + + /* machine specific init */ + int (*init)(struct snd_soc_component *component); + +#ifdef CONFIG_DEBUG_FS + struct dentry *debugfs_root; + const char *debugfs_prefix; +#endif +}; + +#define for_each_component_dais(component, dai)\ + list_for_each_entry(dai, &(component)->dai_list, list) +#define for_each_component_dais_safe(component, dai, _dai)\ + list_for_each_entry_safe(dai, _dai, &(component)->dai_list, list) + +/** + * snd_soc_dapm_to_component() - Casts a DAPM context to the component it is + * embedded in + * @dapm: The DAPM context to cast to the component + * + * This function must only be used on DAPM contexts that are known to be part of + * a component (e.g. in a component driver). Otherwise the behavior is + * undefined. + */ +static inline struct snd_soc_component *snd_soc_dapm_to_component( + struct snd_soc_dapm_context *dapm) +{ + return container_of(dapm, struct snd_soc_component, dapm); +} + +/** + * snd_soc_component_get_dapm() - Returns the DAPM context associated with a + * component + * @component: The component for which to get the DAPM context + */ +static inline struct snd_soc_dapm_context *snd_soc_component_get_dapm( + struct snd_soc_component *component) +{ + return &component->dapm; +} + +/** + * snd_soc_component_init_bias_level() - Initialize COMPONENT DAPM bias level + * @component: The COMPONENT for which to initialize the DAPM bias level + * @level: The DAPM level to initialize to + * + * Initializes the COMPONENT DAPM bias level. See snd_soc_dapm_init_bias_level() + */ +static inline void +snd_soc_component_init_bias_level(struct snd_soc_component *component, + enum snd_soc_bias_level level) +{ + snd_soc_dapm_init_bias_level( + snd_soc_component_get_dapm(component), level); +} + +/** + * snd_soc_component_get_bias_level() - Get current COMPONENT DAPM bias level + * @component: The COMPONENT for which to get the DAPM bias level + * + * Returns: The current DAPM bias level of the COMPONENT. + */ +static inline enum snd_soc_bias_level +snd_soc_component_get_bias_level(struct snd_soc_component *component) +{ + return snd_soc_dapm_get_bias_level( + snd_soc_component_get_dapm(component)); +} + +/** + * snd_soc_component_force_bias_level() - Set the COMPONENT DAPM bias level + * @component: The COMPONENT for which to set the level + * @level: The level to set to + * + * Forces the COMPONENT bias level to a specific state. See + * snd_soc_dapm_force_bias_level(). + */ +static inline int +snd_soc_component_force_bias_level(struct snd_soc_component *component, + enum snd_soc_bias_level level) +{ + return snd_soc_dapm_force_bias_level( + snd_soc_component_get_dapm(component), + level); +} + +/** + * snd_soc_dapm_kcontrol_component() - Returns the component associated to a + * kcontrol + * @kcontrol: The kcontrol + * + * This function must only be used on DAPM contexts that are known to be part of + * a COMPONENT (e.g. in a COMPONENT driver). Otherwise the behavior is undefined + */ +static inline struct snd_soc_component *snd_soc_dapm_kcontrol_component( + struct snd_kcontrol *kcontrol) +{ + return snd_soc_dapm_to_component(snd_soc_dapm_kcontrol_dapm(kcontrol)); +} + +/** + * snd_soc_component_cache_sync() - Sync the register cache with the hardware + * @component: COMPONENT to sync + * + * Note: This function will call regcache_sync() + */ +static inline int snd_soc_component_cache_sync( + struct snd_soc_component *component) +{ + return regcache_sync(component->regmap); +} + +/* component IO */ +int snd_soc_component_read(struct snd_soc_component *component, + unsigned int reg, unsigned int *val); +unsigned int snd_soc_component_read32(struct snd_soc_component *component, + unsigned int reg); +int snd_soc_component_write(struct snd_soc_component *component, + unsigned int reg, unsigned int val); +int snd_soc_component_update_bits(struct snd_soc_component *component, + unsigned int reg, unsigned int mask, + unsigned int val); +int snd_soc_component_update_bits_async(struct snd_soc_component *component, + unsigned int reg, unsigned int mask, + unsigned int val); +void snd_soc_component_async_complete(struct snd_soc_component *component); +int snd_soc_component_test_bits(struct snd_soc_component *component, + unsigned int reg, unsigned int mask, + unsigned int value); + +/* component wide operations */ +int snd_soc_component_set_sysclk(struct snd_soc_component *component, + int clk_id, int source, + unsigned int freq, int dir); +int snd_soc_component_set_pll(struct snd_soc_component *component, int pll_id, + int source, unsigned int freq_in, + unsigned int freq_out); +int snd_soc_component_set_jack(struct snd_soc_component *component, + struct snd_soc_jack *jack, void *data); + +void snd_soc_component_seq_notifier(struct snd_soc_component *component, + enum snd_soc_dapm_type type, int subseq); +int snd_soc_component_stream_event(struct snd_soc_component *component, + int event); +int snd_soc_component_set_bias_level(struct snd_soc_component *component, + enum snd_soc_bias_level level); + +#ifdef CONFIG_REGMAP +void snd_soc_component_init_regmap(struct snd_soc_component *component, + struct regmap *regmap); +void snd_soc_component_exit_regmap(struct snd_soc_component *component); +#endif + +#define snd_soc_component_module_get_when_probe(component)\ + snd_soc_component_module_get(component, 0) +#define snd_soc_component_module_get_when_open(component) \ + snd_soc_component_module_get(component, 1) +int snd_soc_component_module_get(struct snd_soc_component *component, + int upon_open); +#define snd_soc_component_module_put_when_remove(component) \ + snd_soc_component_module_put(component, 0) +#define snd_soc_component_module_put_when_close(component) \ + snd_soc_component_module_put(component, 1) +void snd_soc_component_module_put(struct snd_soc_component *component, + int upon_open); + +static inline void snd_soc_component_set_drvdata(struct snd_soc_component *c, + void *data) +{ + dev_set_drvdata(c->dev, data); +} + +static inline void *snd_soc_component_get_drvdata(struct snd_soc_component *c) +{ + return dev_get_drvdata(c->dev); +} + +static inline bool snd_soc_component_is_active( + struct snd_soc_component *component) +{ + return component->active != 0; +} + +/* component pin */ +int snd_soc_component_enable_pin(struct snd_soc_component *component, + const char *pin); +int snd_soc_component_enable_pin_unlocked(struct snd_soc_component *component, + const char *pin); +int snd_soc_component_disable_pin(struct snd_soc_component *component, + const char *pin); +int snd_soc_component_disable_pin_unlocked(struct snd_soc_component *component, + const char *pin); +int snd_soc_component_nc_pin(struct snd_soc_component *component, + const char *pin); +int snd_soc_component_nc_pin_unlocked(struct snd_soc_component *component, + const char *pin); +int snd_soc_component_get_pin_status(struct snd_soc_component *component, + const char *pin); +int snd_soc_component_force_enable_pin(struct snd_soc_component *component, + const char *pin); +int snd_soc_component_force_enable_pin_unlocked( + struct snd_soc_component *component, + const char *pin); + +/* component driver ops */ +int snd_soc_component_open(struct snd_soc_component *component, + struct snd_pcm_substream *substream); +int snd_soc_component_close(struct snd_soc_component *component, + struct snd_pcm_substream *substream); +int snd_soc_component_prepare(struct snd_soc_component *component, + struct snd_pcm_substream *substream); +int snd_soc_component_hw_params(struct snd_soc_component *component, + struct snd_pcm_substream *substream, + struct snd_pcm_hw_params *params); +int snd_soc_component_hw_free(struct snd_soc_component *component, + struct snd_pcm_substream *substream); +int snd_soc_component_trigger(struct snd_soc_component *component, + struct snd_pcm_substream *substream, + int cmd); +void snd_soc_component_suspend(struct snd_soc_component *component); +void snd_soc_component_resume(struct snd_soc_component *component); +int snd_soc_component_is_suspended(struct snd_soc_component *component); +int snd_soc_component_probe(struct snd_soc_component *component); +void snd_soc_component_remove(struct snd_soc_component *component); +int snd_soc_component_of_xlate_dai_id(struct snd_soc_component *component, + struct device_node *ep); +int snd_soc_component_of_xlate_dai_name(struct snd_soc_component *component, + struct of_phandle_args *args, + const char **dai_name); + +int snd_soc_pcm_component_pointer(struct snd_pcm_substream *substream); +int snd_soc_pcm_component_ioctl(struct snd_pcm_substream *substream, + unsigned int cmd, void *arg); +int snd_soc_pcm_component_copy_user(struct snd_pcm_substream *substream, + int channel, unsigned long pos, + void __user *buf, unsigned long bytes); +struct page *snd_soc_pcm_component_page(struct snd_pcm_substream *substream, + unsigned long offset); +int snd_soc_pcm_component_mmap(struct snd_pcm_substream *substream, + struct vm_area_struct *vma); +int snd_soc_pcm_component_new(struct snd_pcm *pcm); +void snd_soc_pcm_component_free(struct snd_pcm *pcm); + +#endif /* __SOC_COMPONENT_H */ diff --git a/include/sound/soc-dai.h b/include/sound/soc-dai.h index f5d70041108f..939c73db6a03 100644 --- a/include/sound/soc-dai.h +++ b/include/sound/soc-dai.h @@ -145,6 +145,31 @@ int snd_soc_dai_get_channel_map(struct snd_soc_dai *dai, int snd_soc_dai_is_dummy(struct snd_soc_dai *dai); +int snd_soc_dai_hw_params(struct snd_soc_dai *dai, + struct snd_pcm_substream *substream, + struct snd_pcm_hw_params *params); +void snd_soc_dai_hw_free(struct snd_soc_dai *dai, + struct snd_pcm_substream *substream); +int snd_soc_dai_startup(struct snd_soc_dai *dai, + struct snd_pcm_substream *substream); +void snd_soc_dai_shutdown(struct snd_soc_dai *dai, + struct snd_pcm_substream *substream); +int snd_soc_dai_prepare(struct snd_soc_dai *dai, + struct snd_pcm_substream *substream); +int snd_soc_dai_trigger(struct snd_soc_dai *dai, + struct snd_pcm_substream *substream, int cmd); +int snd_soc_dai_bespoke_trigger(struct snd_soc_dai *dai, + struct snd_pcm_substream *substream, int cmd); +snd_pcm_sframes_t snd_soc_dai_delay(struct snd_soc_dai *dai, + struct snd_pcm_substream *substream); +void snd_soc_dai_suspend(struct snd_soc_dai *dai); +void snd_soc_dai_resume(struct snd_soc_dai *dai); +int snd_soc_dai_probe(struct snd_soc_dai *dai); +int snd_soc_dai_remove(struct snd_soc_dai *dai); +int snd_soc_dai_compress_new(struct snd_soc_dai *dai, + struct snd_soc_pcm_runtime *rtd, int num); +bool snd_soc_dai_stream_valid(struct snd_soc_dai *dai, int stream); + struct snd_soc_dai_ops { /* * DAI clocking configuration, all optional. @@ -268,8 +293,6 @@ struct snd_soc_dai_driver { /* Optional Callback used at pcm creation*/ int (*pcm_new)(struct snd_soc_pcm_runtime *rtd, struct snd_soc_dai *dai); - /* DAI is also used for the control bus */ - bool bus_control; /* ops */ const struct snd_soc_dai_ops *ops; @@ -281,6 +304,7 @@ struct snd_soc_dai_driver { unsigned int symmetric_rates:1; unsigned int symmetric_channels:1; unsigned int symmetric_samplebits:1; + unsigned int bus_control:1; /* DAI is also used for the control bus */ /* probe ordering - for components with runtime dependencies */ int probe_order; diff --git a/include/sound/soc-dapm.h b/include/sound/soc-dapm.h index c00a0b8ade08..6e8a31225383 100644 --- a/include/sound/soc-dapm.h +++ b/include/sound/soc-dapm.h @@ -353,6 +353,8 @@ struct device; #define SND_SOC_DAPM_WILL_PMD 0x80 /* called at start of sequence */ #define SND_SOC_DAPM_PRE_POST_PMD \ (SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD) +#define SND_SOC_DAPM_PRE_POST_PMU \ + (SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU) /* convenience event type detection */ #define SND_SOC_DAPM_EVENT_ON(e) \ @@ -402,6 +404,9 @@ int snd_soc_dapm_new_controls(struct snd_soc_dapm_context *dapm, struct snd_soc_dapm_widget *snd_soc_dapm_new_control( struct snd_soc_dapm_context *dapm, const struct snd_soc_dapm_widget *widget); +struct snd_soc_dapm_widget *snd_soc_dapm_new_control_unlocked( + struct snd_soc_dapm_context *dapm, + const struct snd_soc_dapm_widget *widget); int snd_soc_dapm_new_dai_widgets(struct snd_soc_dapm_context *dapm, struct snd_soc_dai *dai); int snd_soc_dapm_link_dai_widgets(struct snd_soc_card *card); @@ -414,6 +419,9 @@ int snd_soc_dapm_update_dai(struct snd_pcm_substream *substream, /* dapm path setup */ int snd_soc_dapm_new_widgets(struct snd_soc_card *card); void snd_soc_dapm_free(struct snd_soc_dapm_context *dapm); +void snd_soc_dapm_init(struct snd_soc_dapm_context *dapm, + struct snd_soc_card *card, + struct snd_soc_component *component); int snd_soc_dapm_add_routes(struct snd_soc_dapm_context *dapm, const struct snd_soc_dapm_route *route, int num); int snd_soc_dapm_del_routes(struct snd_soc_dapm_context *dapm, @@ -659,8 +667,6 @@ struct snd_soc_dapm_context { unsigned int idle_bias_off:1; /* Use BIAS_OFF instead of STANDBY */ /* Go to BIAS_OFF in suspend if the DAPM context is idle */ unsigned int suspend_bias_off:1; - void (*seq_notifier)(struct snd_soc_dapm_context *, - enum snd_soc_dapm_type, int); struct device *dev; /* from parent - for debug */ struct snd_soc_component *component; /* parent component */ @@ -670,10 +676,6 @@ struct snd_soc_dapm_context { enum snd_soc_bias_level target_bias_level; struct list_head list; - int (*stream_event)(struct snd_soc_dapm_context *dapm, int event); - int (*set_bias_level)(struct snd_soc_dapm_context *dapm, - enum snd_soc_bias_level level); - struct snd_soc_dapm_wcache path_sink_cache; struct snd_soc_dapm_wcache path_source_cache; diff --git a/include/sound/soc-dpcm.h b/include/sound/soc-dpcm.h index 4be3a2b7c106..e55aeb00ce2d 100644 --- a/include/sound/soc-dpcm.h +++ b/include/sound/soc-dpcm.h @@ -142,9 +142,16 @@ void snd_soc_dpcm_be_set_state(struct snd_soc_pcm_runtime *be, int stream, /* internal use only */ int soc_dpcm_be_digital_mute(struct snd_soc_pcm_runtime *fe, int mute); -void soc_dpcm_debugfs_add(struct snd_soc_pcm_runtime *rtd); int soc_dpcm_runtime_update(struct snd_soc_card *); +#ifdef CONFIG_DEBUG_FS +void soc_dpcm_debugfs_add(struct snd_soc_pcm_runtime *rtd); +#else +static inline void soc_dpcm_debugfs_add(struct snd_soc_pcm_runtime *rtd) +{ +} +#endif + int dpcm_path_get(struct snd_soc_pcm_runtime *fe, int stream, struct snd_soc_dapm_widget_list **list_); int dpcm_process_paths(struct snd_soc_pcm_runtime *fe, diff --git a/include/sound/soc.h b/include/sound/soc.h index 482b4ea87c3c..f264c6509f00 100644 --- a/include/sound/soc.h +++ b/include/sound/soc.h @@ -363,21 +363,6 @@ const struct soc_enum name = SOC_ENUM_SINGLE_VIRT(ARRAY_SIZE(xtexts), xtexts) /* - * Component probe and remove ordering levels for components with runtime - * dependencies. - */ -#define SND_SOC_COMP_ORDER_FIRST -2 -#define SND_SOC_COMP_ORDER_EARLY -1 -#define SND_SOC_COMP_ORDER_NORMAL 0 -#define SND_SOC_COMP_ORDER_LATE 1 -#define SND_SOC_COMP_ORDER_LAST 2 - -#define for_each_comp_order(order) \ - for (order = SND_SOC_COMP_ORDER_FIRST; \ - order <= SND_SOC_COMP_ORDER_LAST; \ - order++) - -/* * Bias levels * * @ON: Bias is fully on for audio playback and capture operations. @@ -505,10 +490,6 @@ int snd_soc_params_to_bclk(struct snd_pcm_hw_params *parms); int snd_soc_set_runtime_hwparams(struct snd_pcm_substream *substream, const struct snd_pcm_hardware *hw); -int soc_dai_hw_params(struct snd_pcm_substream *substream, - struct snd_pcm_hw_params *params, - struct snd_soc_dai *dai); - /* Jack reporting */ int snd_soc_card_jack_new(struct snd_soc_card *card, const char *id, int type, struct snd_soc_jack *jack, struct snd_soc_jack_pin *pins, @@ -751,132 +732,6 @@ struct snd_soc_compr_ops { int (*trigger)(struct snd_compr_stream *); }; -/* component interface */ -struct snd_soc_component_driver { - const char *name; - - /* Default control and setup, added after probe() is run */ - const struct snd_kcontrol_new *controls; - unsigned int num_controls; - const struct snd_soc_dapm_widget *dapm_widgets; - unsigned int num_dapm_widgets; - const struct snd_soc_dapm_route *dapm_routes; - unsigned int num_dapm_routes; - - int (*probe)(struct snd_soc_component *); - void (*remove)(struct snd_soc_component *); - int (*suspend)(struct snd_soc_component *); - int (*resume)(struct snd_soc_component *); - - unsigned int (*read)(struct snd_soc_component *, unsigned int); - int (*write)(struct snd_soc_component *, unsigned int, unsigned int); - - /* pcm creation and destruction */ - int (*pcm_new)(struct snd_soc_pcm_runtime *); - void (*pcm_free)(struct snd_pcm *); - - /* component wide operations */ - int (*set_sysclk)(struct snd_soc_component *component, - int clk_id, int source, unsigned int freq, int dir); - int (*set_pll)(struct snd_soc_component *component, int pll_id, - int source, unsigned int freq_in, unsigned int freq_out); - int (*set_jack)(struct snd_soc_component *component, - struct snd_soc_jack *jack, void *data); - - /* DT */ - int (*of_xlate_dai_name)(struct snd_soc_component *component, - struct of_phandle_args *args, - const char **dai_name); - int (*of_xlate_dai_id)(struct snd_soc_component *comment, - struct device_node *endpoint); - void (*seq_notifier)(struct snd_soc_component *, enum snd_soc_dapm_type, - int subseq); - int (*stream_event)(struct snd_soc_component *, int event); - int (*set_bias_level)(struct snd_soc_component *component, - enum snd_soc_bias_level level); - - const struct snd_pcm_ops *ops; - const struct snd_compr_ops *compr_ops; - - /* probe ordering - for components with runtime dependencies */ - int probe_order; - int remove_order; - - /* - * signal if the module handling the component should not be removed - * if a pcm is open. Setting this would prevent the module - * refcount being incremented in probe() but allow it be incremented - * when a pcm is opened and decremented when it is closed. - */ - unsigned int module_get_upon_open:1; - - /* bits */ - unsigned int idle_bias_on:1; - unsigned int suspend_bias_off:1; - unsigned int use_pmdown_time:1; /* care pmdown_time at stop */ - unsigned int endianness:1; - unsigned int non_legacy_dai_naming:1; - - /* this component uses topology and ignore machine driver FEs */ - const char *ignore_machine; - const char *topology_name_prefix; - int (*be_hw_params_fixup)(struct snd_soc_pcm_runtime *rtd, - struct snd_pcm_hw_params *params); - bool use_dai_pcm_id; /* use the DAI link PCM ID as PCM device number */ - int be_pcm_base; /* base device ID for all BE PCMs */ -}; - -struct snd_soc_component { - const char *name; - int id; - const char *name_prefix; - struct device *dev; - struct snd_soc_card *card; - - unsigned int active; - - unsigned int suspended:1; /* is in suspend PM state */ - - struct list_head list; - struct list_head card_aux_list; /* for auxiliary bound components */ - struct list_head card_list; - - const struct snd_soc_component_driver *driver; - - struct list_head dai_list; - int num_dai; - - struct regmap *regmap; - int val_bytes; - - struct mutex io_mutex; - - /* attached dynamic objects */ - struct list_head dobj_list; - - /* - * DO NOT use any of the fields below in drivers, they are temporary and - * are going to be removed again soon. If you use them in driver code the - * driver will be marked as BROKEN when these fields are removed. - */ - - /* Don't use these, use snd_soc_component_get_dapm() */ - struct snd_soc_dapm_context dapm; - - /* machine specific init */ - int (*init)(struct snd_soc_component *component); - -#ifdef CONFIG_DEBUG_FS - struct dentry *debugfs_root; - const char *debugfs_prefix; -#endif -}; - -#define for_each_component_dais(component, dai)\ - list_for_each_entry(dai, &(component)->dai_list, list) -#define for_each_component_dais_safe(component, dai, _dai)\ - list_for_each_entry_safe(dai, _dai, &(component)->dai_list, list) - struct snd_soc_rtdcom_list { struct snd_soc_component *component; struct list_head list; /* rtd::component_list */ @@ -901,74 +756,33 @@ struct snd_soc_dai_link { const char *stream_name; /* Stream name */ /* - * cpu_name - * cpu_of_node - * cpu_dai_name - * - * These are legacy style, and will be replaced to - * modern style (= snd_soc_dai_link_component) in the future, - * but, not yet supported so far. - * If modern style was supported for CPU, all driver will switch - * to use it, and, legacy style code will be removed from ALSA SoC. - */ - /* * You MAY specify the link's CPU-side device, either by device name, * or by DT/OF node, but not both. If this information is omitted, * the CPU-side DAI is matched using .cpu_dai_name only, which hence * must be globally unique. These fields are currently typically used * only for codec to codec links, or systems using device tree. */ - const char *cpu_name; - struct device_node *cpu_of_node; /* * You MAY specify the DAI name of the CPU DAI. If this information is * omitted, the CPU-side DAI is matched using .cpu_name/.cpu_of_node * only, which only works well when that device exposes a single DAI. */ - const char *cpu_dai_name; + struct snd_soc_dai_link_component *cpus; + unsigned int num_cpus; /* - * codec_name - * codec_of_node - * codec_dai_name - * - * These are legacy style, it will be converted to modern style - * (= snd_soc_dai_link_component) automatically in soc-core - * if driver is using legacy style. - * Driver shouldn't use both legacy and modern style in the same time. - * If modern style was supported for CPU, all driver will switch - * to use it, and, legacy style code will be removed from ALSA SoC. - */ - /* * You MUST specify the link's codec, either by device name, or by * DT/OF node, but not both. */ - const char *codec_name; - struct device_node *codec_of_node; /* You MUST specify the DAI name within the codec */ - const char *codec_dai_name; - struct snd_soc_dai_link_component *codecs; unsigned int num_codecs; /* - * platform_name - * platform_of_node - * - * These are legacy style, it will be converted to modern style - * (= snd_soc_dai_link_component) automatically in soc-core - * if driver is using legacy style. - * Driver shouldn't use both legacy and modern style in the same time. - * If modern style was supported for CPU, all driver will switch - * to use it, and, legacy style code will be removed from ALSA SoC. - */ - /* * You MAY specify the link's platform/PCM/DMA driver, either by * device name, or by DT/OF node, but not both. Some forms of link - * do not need a platform. + * do not need a platform. In such case, platforms are not mandatory. */ - const char *platform_name; - struct device_node *platform_of_node; struct snd_soc_dai_link_component *platforms; unsigned int num_platforms; @@ -1030,12 +844,6 @@ struct snd_soc_dai_link { /* Do not create a PCM for this DAI link (Backend link) */ unsigned int ignore:1; - /* - * This driver uses legacy platform naming. Set by the core, machine - * drivers should not modify this value. - */ - unsigned int legacy_platform:1; - struct list_head list; /* DAI link list of the soc card */ struct snd_soc_dobj dobj; /* For topology */ }; @@ -1044,6 +852,101 @@ struct snd_soc_dai_link { ((i) < link->num_codecs) && ((codec) = &link->codecs[i]); \ (i)++) +#define for_each_link_platforms(link, i, platform) \ + for ((i) = 0; \ + ((i) < link->num_platforms) && \ + ((platform) = &link->platforms[i]); \ + (i)++) + +/* + * Sample 1 : Single CPU/Codec/Platform + * + * SND_SOC_DAILINK_DEFS(test, + * DAILINK_COMP_ARRAY(COMP_CPU("cpu_dai")), + * DAILINK_COMP_ARRAY(COMP_CODEC("codec", "codec_dai")), + * DAILINK_COMP_ARRAY(COMP_PLATFORM("platform"))); + * + * struct snd_soc_dai_link link = { + * ... + * SND_SOC_DAILINK_REG(test), + * }; + * + * Sample 2 : Multi CPU/Codec, no Platform + * + * SND_SOC_DAILINK_DEFS(test, + * DAILINK_COMP_ARRAY(COMP_CPU("cpu_dai1"), + * COMP_CPU("cpu_dai2")), + * DAILINK_COMP_ARRAY(COMP_CODEC("codec1", "codec_dai1"), + * COMP_CODEC("codec2", "codec_dai2"))); + * + * struct snd_soc_dai_link link = { + * ... + * SND_SOC_DAILINK_REG(test), + * }; + * + * Sample 3 : Define each CPU/Codec/Platform manually + * + * SND_SOC_DAILINK_DEF(test_cpu, + * DAILINK_COMP_ARRAY(COMP_CPU("cpu_dai1"), + * COMP_CPU("cpu_dai2"))); + * SND_SOC_DAILINK_DEF(test_codec, + * DAILINK_COMP_ARRAY(COMP_CODEC("codec1", "codec_dai1"), + * COMP_CODEC("codec2", "codec_dai2"))); + * SND_SOC_DAILINK_DEF(test_platform, + * DAILINK_COMP_ARRAY(COMP_PLATFORM("platform"))); + * + * struct snd_soc_dai_link link = { + * ... + * SND_SOC_DAILINK_REG(test_cpu, + * test_codec, + * test_platform), + * }; + * + * Sample 4 : Sample3 without platform + * + * struct snd_soc_dai_link link = { + * ... + * SND_SOC_DAILINK_REG(test_cpu, + * test_codec); + * }; + */ + +#define SND_SOC_DAILINK_REG1(name) SND_SOC_DAILINK_REG3(name##_cpus, name##_codecs, name##_platforms) +#define SND_SOC_DAILINK_REG2(cpu, codec) SND_SOC_DAILINK_REG3(cpu, codec, null_dailink_component) +#define SND_SOC_DAILINK_REG3(cpu, codec, platform) \ + .cpus = cpu, \ + .num_cpus = ARRAY_SIZE(cpu), \ + .codecs = codec, \ + .num_codecs = ARRAY_SIZE(codec), \ + .platforms = platform, \ + .num_platforms = ARRAY_SIZE(platform) + +#define SND_SOC_DAILINK_REGx(_1, _2, _3, func, ...) func +#define SND_SOC_DAILINK_REG(...) \ + SND_SOC_DAILINK_REGx(__VA_ARGS__, \ + SND_SOC_DAILINK_REG3, \ + SND_SOC_DAILINK_REG2, \ + SND_SOC_DAILINK_REG1)(__VA_ARGS__) + +#define SND_SOC_DAILINK_DEF(name, def...) \ + static struct snd_soc_dai_link_component name[] = { def } + +#define SND_SOC_DAILINK_DEFS(name, cpu, codec, platform...) \ + SND_SOC_DAILINK_DEF(name##_cpus, cpu); \ + SND_SOC_DAILINK_DEF(name##_codecs, codec); \ + SND_SOC_DAILINK_DEF(name##_platforms, platform) + +#define DAILINK_COMP_ARRAY(param...) param +#define COMP_EMPTY() { } +#define COMP_CPU(_dai) { .dai_name = _dai, } +#define COMP_CODEC(_name, _dai) { .name = _name, .dai_name = _dai, } +#define COMP_PLATFORM(_name) { .name = _name } +#define COMP_AUX(_name) { .name = _name } +#define COMP_DUMMY() { .name = "snd-soc-dummy", .dai_name = "snd-soc-dummy-dai", } + +extern struct snd_soc_dai_link_component null_dailink_component[0]; + + struct snd_soc_codec_conf { /* * specify device either by device name, or by @@ -1060,14 +963,11 @@ struct snd_soc_codec_conf { }; struct snd_soc_aux_dev { - const char *name; /* Codec name */ - /* * specify multi-codec either by device name, or by * DT/OF node, but not both. */ - const char *codec_name; - struct device_node *codec_of_node; + struct snd_soc_dai_link_component dlc; /* codec/machine specific init - e.g. add machine controls */ int (*init)(struct snd_soc_component *component); @@ -1088,6 +988,10 @@ struct snd_soc_card { struct mutex mutex; struct mutex dapm_mutex; + /* Mutex for PCM operations */ + struct mutex pcm_mutex; + enum snd_soc_pcm_subclass pcm_subclass; + spinlock_t dpcm_lock; bool instantiated; @@ -1156,8 +1060,6 @@ struct snd_soc_card { int num_of_dapm_routes; bool fully_routed; - struct work_struct deferred_resume_work; - /* lists of probed devices belonging to this card */ struct list_head component_dev_list; struct list_head list; @@ -1177,7 +1079,9 @@ struct snd_soc_card { #ifdef CONFIG_DEBUG_FS struct dentry *debugfs_card_root; - struct dentry *debugfs_pop_time; +#endif +#ifdef CONFIG_PM_SLEEP + struct work_struct deferred_resume_work; #endif u32 pop_time; @@ -1187,9 +1091,13 @@ struct snd_soc_card { for ((i) = 0; \ ((i) < (card)->num_links) && ((link) = &(card)->dai_link[i]); \ (i)++) +#define for_each_card_pre_auxs(card, i, aux) \ + for ((i) = 0; \ + ((i) < (card)->num_aux_devs) && ((aux) = &(card)->aux_dev[i]); \ + (i)++) #define for_each_card_links(card, link) \ - list_for_each_entry(dai_link, &(card)->dai_link_list, list) + list_for_each_entry(link, &(card)->dai_link_list, list) #define for_each_card_links_safe(card, link, _link) \ list_for_each_entry_safe(link, _link, &(card)->dai_link_list, list) @@ -1198,6 +1106,12 @@ struct snd_soc_card { #define for_each_card_rtds_safe(card, rtd, _rtd) \ list_for_each_entry_safe(rtd, _rtd, &(card)->rtd_list, list) +#define for_each_card_auxs(card, component) \ + list_for_each_entry(component, &card->aux_comp_list, card_aux_list) +#define for_each_card_auxs_safe(card, component, _comp) \ + list_for_each_entry_safe(component, _comp, \ + &card->aux_comp_list, card_aux_list) + #define for_each_card_components(card, component) \ list_for_each_entry(component, &(card)->component_dev_list, card_list) @@ -1206,15 +1120,12 @@ struct snd_soc_pcm_runtime { struct device *dev; struct snd_soc_card *card; struct snd_soc_dai_link *dai_link; - struct mutex pcm_mutex; - enum snd_soc_pcm_subclass pcm_subclass; struct snd_pcm_ops ops; unsigned int params_select; /* currently selected param for dai link */ /* Dynamic PCM BE runtime data */ struct snd_soc_dpcm_runtime dpcm[2]; - int fe_compr; long pmdown_time; @@ -1239,6 +1150,7 @@ struct snd_soc_pcm_runtime { /* bit field */ unsigned int dev_registered:1; unsigned int pop_wait:1; + unsigned int fe_compr:1; /* for Dynamic PCM */ }; #define for_each_rtd_codec_dai(rtd, i, dai)\ for ((i) = 0; \ @@ -1295,134 +1207,6 @@ struct soc_enum { struct snd_soc_dobj dobj; }; -/** - * snd_soc_dapm_to_component() - Casts a DAPM context to the component it is - * embedded in - * @dapm: The DAPM context to cast to the component - * - * This function must only be used on DAPM contexts that are known to be part of - * a component (e.g. in a component driver). Otherwise the behavior is - * undefined. - */ -static inline struct snd_soc_component *snd_soc_dapm_to_component( - struct snd_soc_dapm_context *dapm) -{ - return container_of(dapm, struct snd_soc_component, dapm); -} - -/** - * snd_soc_component_get_dapm() - Returns the DAPM context associated with a - * component - * @component: The component for which to get the DAPM context - */ -static inline struct snd_soc_dapm_context *snd_soc_component_get_dapm( - struct snd_soc_component *component) -{ - return &component->dapm; -} - -/** - * snd_soc_component_init_bias_level() - Initialize COMPONENT DAPM bias level - * @component: The COMPONENT for which to initialize the DAPM bias level - * @level: The DAPM level to initialize to - * - * Initializes the COMPONENT DAPM bias level. See snd_soc_dapm_init_bias_level(). - */ -static inline void -snd_soc_component_init_bias_level(struct snd_soc_component *component, - enum snd_soc_bias_level level) -{ - snd_soc_dapm_init_bias_level( - snd_soc_component_get_dapm(component), level); -} - -/** - * snd_soc_component_get_bias_level() - Get current COMPONENT DAPM bias level - * @component: The COMPONENT for which to get the DAPM bias level - * - * Returns: The current DAPM bias level of the COMPONENT. - */ -static inline enum snd_soc_bias_level -snd_soc_component_get_bias_level(struct snd_soc_component *component) -{ - return snd_soc_dapm_get_bias_level( - snd_soc_component_get_dapm(component)); -} - -/** - * snd_soc_component_force_bias_level() - Set the COMPONENT DAPM bias level - * @component: The COMPONENT for which to set the level - * @level: The level to set to - * - * Forces the COMPONENT bias level to a specific state. See - * snd_soc_dapm_force_bias_level(). - */ -static inline int -snd_soc_component_force_bias_level(struct snd_soc_component *component, - enum snd_soc_bias_level level) -{ - return snd_soc_dapm_force_bias_level( - snd_soc_component_get_dapm(component), - level); -} - -/** - * snd_soc_dapm_kcontrol_component() - Returns the component associated to a kcontrol - * @kcontrol: The kcontrol - * - * This function must only be used on DAPM contexts that are known to be part of - * a COMPONENT (e.g. in a COMPONENT driver). Otherwise the behavior is undefined. - */ -static inline struct snd_soc_component *snd_soc_dapm_kcontrol_component( - struct snd_kcontrol *kcontrol) -{ - return snd_soc_dapm_to_component(snd_soc_dapm_kcontrol_dapm(kcontrol)); -} - -/** - * snd_soc_component_cache_sync() - Sync the register cache with the hardware - * @component: COMPONENT to sync - * - * Note: This function will call regcache_sync() - */ -static inline int snd_soc_component_cache_sync( - struct snd_soc_component *component) -{ - return regcache_sync(component->regmap); -} - -/* component IO */ -int snd_soc_component_read(struct snd_soc_component *component, - unsigned int reg, unsigned int *val); -unsigned int snd_soc_component_read32(struct snd_soc_component *component, - unsigned int reg); -int snd_soc_component_write(struct snd_soc_component *component, - unsigned int reg, unsigned int val); -int snd_soc_component_update_bits(struct snd_soc_component *component, - unsigned int reg, unsigned int mask, unsigned int val); -int snd_soc_component_update_bits_async(struct snd_soc_component *component, - unsigned int reg, unsigned int mask, unsigned int val); -void snd_soc_component_async_complete(struct snd_soc_component *component); -int snd_soc_component_test_bits(struct snd_soc_component *component, - unsigned int reg, unsigned int mask, unsigned int value); - -/* component wide operations */ -int snd_soc_component_set_sysclk(struct snd_soc_component *component, - int clk_id, int source, unsigned int freq, int dir); -int snd_soc_component_set_pll(struct snd_soc_component *component, int pll_id, - int source, unsigned int freq_in, - unsigned int freq_out); -int snd_soc_component_set_jack(struct snd_soc_component *component, - struct snd_soc_jack *jack, void *data); - -#ifdef CONFIG_REGMAP - -void snd_soc_component_init_regmap(struct snd_soc_component *component, - struct regmap *regmap); -void snd_soc_component_exit_regmap(struct snd_soc_component *component); - -#endif - /* device driver data */ static inline void snd_soc_card_set_drvdata(struct snd_soc_card *card, @@ -1436,27 +1220,6 @@ static inline void *snd_soc_card_get_drvdata(struct snd_soc_card *card) return card->drvdata; } -static inline void snd_soc_component_set_drvdata(struct snd_soc_component *c, - void *data) -{ - dev_set_drvdata(c->dev, data); -} - -static inline void *snd_soc_component_get_drvdata(struct snd_soc_component *c) -{ - return dev_get_drvdata(c->dev); -} - -static inline void snd_soc_initialize_card_lists(struct snd_soc_card *card) -{ - INIT_LIST_HEAD(&card->widgets); - INIT_LIST_HEAD(&card->paths); - INIT_LIST_HEAD(&card->dapm_list); - INIT_LIST_HEAD(&card->aux_comp_list); - INIT_LIST_HEAD(&card->component_dev_list); - INIT_LIST_HEAD(&card->list); -} - static inline bool snd_soc_volsw_is_stereo(struct soc_mixer_control *mc) { if (mc->reg == mc->rreg && mc->shift == mc->rshift) @@ -1493,12 +1256,6 @@ static inline unsigned int snd_soc_enum_item_to_val(struct soc_enum *e, return e->values[item]; } -static inline bool snd_soc_component_is_active( - struct snd_soc_component *component) -{ - return component->active != 0; -} - /** * snd_soc_kcontrol_component() - Returns the component that registered the * control @@ -1607,15 +1364,11 @@ int snd_soc_fixup_dai_links_platform_name(struct snd_soc_card *card, if (!name) return -ENOMEM; - if (dai_link->platforms) - /* only single platform is supported for now */ - dai_link->platforms->name = name; - else - /* - * legacy mode, this case will be removed when all - * derivers are switched to modern style dai_link. - */ - dai_link->platform_name = name; + if (!dai_link->platforms) + return -EINVAL; + + /* only single platform is supported for now */ + dai_link->platforms->name = name; } return 0; @@ -1638,24 +1391,6 @@ static inline void snd_soc_dapm_mutex_unlock(struct snd_soc_dapm_context *dapm) mutex_unlock(&dapm->card->dapm_mutex); } -int snd_soc_component_enable_pin(struct snd_soc_component *component, - const char *pin); -int snd_soc_component_enable_pin_unlocked(struct snd_soc_component *component, - const char *pin); -int snd_soc_component_disable_pin(struct snd_soc_component *component, - const char *pin); -int snd_soc_component_disable_pin_unlocked(struct snd_soc_component *component, - const char *pin); -int snd_soc_component_nc_pin(struct snd_soc_component *component, - const char *pin); -int snd_soc_component_nc_pin_unlocked(struct snd_soc_component *component, - const char *pin); -int snd_soc_component_get_pin_status(struct snd_soc_component *component, - const char *pin); -int snd_soc_component_force_enable_pin(struct snd_soc_component *component, - const char *pin); -int snd_soc_component_force_enable_pin_unlocked( - struct snd_soc_component *component, - const char *pin); +#include <sound/soc-component.h> #endif diff --git a/include/sound/sof/control.h b/include/sound/sof/control.h index bded69e696d4..6080ea0facd7 100644 --- a/include/sound/sof/control.h +++ b/include/sound/sof/control.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */ +/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */ /* * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. diff --git a/include/sound/sof/dai-intel.h b/include/sound/sof/dai-intel.h index 4bd83f7adddf..5f1ef5565be6 100644 --- a/include/sound/sof/dai-intel.h +++ b/include/sound/sof/dai-intel.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */ +/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */ /* * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. @@ -76,6 +76,9 @@ struct sof_ipc_dai_ssp_params { uint16_t tdm_per_slot_padding_flag; uint32_t clks_control; uint32_t quirks; + uint32_t bclk_delay; /* guaranteed time (ms) for which BCLK + * will be driven, before sending data + */ } __packed; /* HDA Configuration Request - SOF_IPC_DAI_HDA_CONFIG */ @@ -167,12 +170,22 @@ struct sof_ipc_dai_dmic_params { uint32_t wake_up_time; /**< Time from clock start to data (us) */ uint32_t min_clock_on_time; /**< Min. time that clk is kept on (us) */ + uint32_t unmute_ramp_time; /**< Length of logarithmic gain ramp (ms) */ /* reserved for future use */ - uint32_t reserved[6]; + uint32_t reserved[5]; /**< variable number of pdm controller config */ struct sof_ipc_dai_dmic_pdm_ctrl pdm[0]; } __packed; +/* ALH Configuration Request - SOF_IPC_DAI_ALH_CONFIG */ +struct sof_ipc_dai_alh_params { + struct sof_ipc_hdr hdr; + uint32_t stream_id; + + /* reserved for future use */ + uint32_t reserved[15]; +} __packed; + #endif diff --git a/include/sound/sof/dai.h b/include/sound/sof/dai.h index 3d174e20aa53..0f1235022146 100644 --- a/include/sound/sof/dai.h +++ b/include/sound/sof/dai.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */ +/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */ /* * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. @@ -49,7 +49,9 @@ enum sof_ipc_dai_type { SOF_DAI_INTEL_SSP, /**< Intel SSP */ SOF_DAI_INTEL_DMIC, /**< Intel DMIC */ SOF_DAI_INTEL_HDA, /**< Intel HD/A */ - SOF_DAI_INTEL_SOUNDWIRE, /**< Intel SoundWire */ + SOF_DAI_INTEL_ALH, /**< Intel ALH */ + SOF_DAI_IMX_SAI, /**< i.MX SAI */ + SOF_DAI_IMX_ESAI, /**< i.MX ESAI */ }; /* general purpose DAI configuration */ @@ -70,6 +72,7 @@ struct sof_ipc_dai_config { struct sof_ipc_dai_ssp_params ssp; struct sof_ipc_dai_dmic_params dmic; struct sof_ipc_dai_hda_params hda; + struct sof_ipc_dai_alh_params alh; }; } __packed; diff --git a/include/sound/sof/header.h b/include/sound/sof/header.h index 1efcf7b18ec2..10f00c08dbb7 100644 --- a/include/sound/sof/header.h +++ b/include/sound/sof/header.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */ +/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */ /* * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. @@ -49,6 +49,7 @@ #define SOF_IPC_GLB_DAI_MSG SOF_GLB_TYPE(0x8U) #define SOF_IPC_GLB_TRACE_MSG SOF_GLB_TYPE(0x9U) #define SOF_IPC_GLB_GDB_DEBUG SOF_GLB_TYPE(0xAU) +#define SOF_IPC_GLB_TEST_MSG SOF_GLB_TYPE(0xBU) /* * DSP Command Message Types @@ -99,9 +100,13 @@ #define SOF_IPC_STREAM_VORBIS_PARAMS SOF_CMD_TYPE(0x010) #define SOF_IPC_STREAM_VORBIS_FREE SOF_CMD_TYPE(0x011) -/* trace and debug */ +/* trace */ #define SOF_IPC_TRACE_DMA_PARAMS SOF_CMD_TYPE(0x001) #define SOF_IPC_TRACE_DMA_POSITION SOF_CMD_TYPE(0x002) +#define SOF_IPC_TRACE_DMA_PARAMS_EXT SOF_CMD_TYPE(0x003) + +/* debug */ +#define SOF_IPC_TEST_IPC_FLOOD SOF_CMD_TYPE(0x001) /* Get message component id */ #define SOF_IPC_MESSAGE_ID(x) ((x) & 0xffff) diff --git a/include/sound/sof/info.h b/include/sound/sof/info.h index 16528d2b4a50..a9156b4a062c 100644 --- a/include/sound/sof/info.h +++ b/include/sound/sof/info.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */ +/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */ /* * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. diff --git a/include/sound/sof/pm.h b/include/sound/sof/pm.h index 8ae3ad45bdf7..003879401d63 100644 --- a/include/sound/sof/pm.h +++ b/include/sound/sof/pm.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */ +/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */ /* * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. diff --git a/include/sound/sof/stream.h b/include/sound/sof/stream.h index 643f175cb479..0b71b381b952 100644 --- a/include/sound/sof/stream.h +++ b/include/sound/sof/stream.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */ +/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */ /* * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. diff --git a/include/sound/sof/topology.h b/include/sound/sof/topology.h index 46b2a7e63167..c47b36240920 100644 --- a/include/sound/sof/topology.h +++ b/include/sound/sof/topology.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */ +/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */ /* * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. @@ -35,6 +35,7 @@ enum sof_comp_type { SOF_COMP_KEYWORD_DETECT, SOF_COMP_KPB, /* A key phrase buffer component */ SOF_COMP_SELECTOR, /**< channel selector component */ + SOF_COMP_DEMUX, /* keep FILEREAD/FILEWRITE as the last ones */ SOF_COMP_FILEREAD = 10000, /**< host test based file IO */ SOF_COMP_FILEWRITE = 10001, /**< host test based file IO */ @@ -83,9 +84,9 @@ struct sof_ipc_buffer { struct sof_ipc_comp_config { struct sof_ipc_cmd_hdr hdr; uint32_t periods_sink; /**< 0 means variable */ - uint32_t periods_source; /**< 0 means variable */ + uint32_t periods_source;/**< 0 means variable */ uint32_t reserved1; /**< reserved */ - uint32_t frame_fmt; /**< SOF_IPC_FRAME_ */ + uint32_t frame_fmt; /**< SOF_IPC_FRAME_ */ uint32_t xrun_action; /* reserved for future use */ @@ -175,6 +176,8 @@ enum sof_ipc_process_type { SOF_PROCESS_KEYWORD_DETECT, /**< Keyword Detection */ SOF_PROCESS_KPB, /**< KeyPhrase Buffer Manager */ SOF_PROCESS_CHAN_SELECTOR, /**< Channel Selector */ + SOF_PROCESS_MUX, + SOF_PROCESS_DEMUX, }; /* generic "effect", "codec" or proprietary processing component */ diff --git a/include/sound/sof/trace.h b/include/sound/sof/trace.h index 7d211f319a92..fda6e8f6ead4 100644 --- a/include/sound/sof/trace.h +++ b/include/sound/sof/trace.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */ +/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */ /* * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. @@ -19,12 +19,22 @@ #define SOF_TRACE_FILENAME_SIZE 32 /* DMA for Trace params info - SOF_IPC_DEBUG_DMA_PARAMS */ +/* Deprecated - use sof_ipc_dma_trace_params_ext */ struct sof_ipc_dma_trace_params { struct sof_ipc_cmd_hdr hdr; struct sof_ipc_host_buffer buffer; uint32_t stream_tag; } __packed; +/* DMA for Trace params info - SOF_IPC_DEBUG_DMA_PARAMS_EXT */ +struct sof_ipc_dma_trace_params_ext { + struct sof_ipc_cmd_hdr hdr; + struct sof_ipc_host_buffer buffer; + uint32_t stream_tag; + uint64_t timestamp_ns; /* in nanosecond */ + uint32_t reserved[8]; +} __packed; + /* DMA for Trace params info - SOF_IPC_DEBUG_DMA_PARAMS */ struct sof_ipc_dma_trace_posn { struct sof_ipc_reply rhdr; @@ -56,7 +66,9 @@ struct sof_ipc_dma_trace_posn { #define SOF_IPC_PANIC_WFI (SOF_IPC_PANIC_MAGIC | 0xa) #define SOF_IPC_PANIC_ASSERT (SOF_IPC_PANIC_MAGIC | 0xb) -/* panic info include filename and line number */ +/* panic info include filename and line number + * filename array will not include null terminator if fully filled + */ struct sof_ipc_panic_info { struct sof_ipc_hdr hdr; uint32_t code; /* SOF_IPC_PANIC_ */ diff --git a/include/sound/sof/xtensa.h b/include/sound/sof/xtensa.h index d25c764b10e8..dd53d36b34e1 100644 --- a/include/sound/sof/xtensa.h +++ b/include/sound/sof/xtensa.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */ +/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */ /* * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. diff --git a/include/trace/events/afs.h b/include/trace/events/afs.h index 51b1e0da2efc..d5ec4fac82ae 100644 --- a/include/trace/events/afs.h +++ b/include/trace/events/afs.h @@ -27,6 +27,26 @@ enum afs_call_trace { afs_call_trace_work, }; +enum afs_server_trace { + afs_server_trace_alloc, + afs_server_trace_callback, + afs_server_trace_destroy, + afs_server_trace_free, + afs_server_trace_gc, + afs_server_trace_get_by_uuid, + afs_server_trace_get_caps, + afs_server_trace_get_install, + afs_server_trace_get_new_cbi, + afs_server_trace_give_up_cb, + afs_server_trace_put_call, + afs_server_trace_put_cbi, + afs_server_trace_put_find_rsq, + afs_server_trace_put_slist, + afs_server_trace_put_slist_isort, + afs_server_trace_put_uuid_rsq, + afs_server_trace_update, +}; + enum afs_fs_operation { afs_FS_FetchData = 130, /* AFS Fetch file data */ afs_FS_FetchACL = 131, /* AFS Fetch file ACL */ @@ -191,6 +211,17 @@ enum afs_flock_operation { afs_flock_op_wake, }; +enum afs_cb_break_reason { + afs_cb_break_no_break, + afs_cb_break_for_callback, + afs_cb_break_for_deleted, + afs_cb_break_for_lapsed, + afs_cb_break_for_unlink, + afs_cb_break_for_vsbreak, + afs_cb_break_for_volume_callback, + afs_cb_break_for_zap, +}; + #endif /* end __AFS_DECLARE_TRACE_ENUMS_ONCE_ONLY */ /* @@ -204,6 +235,25 @@ enum afs_flock_operation { EM(afs_call_trace_wake, "WAKE ") \ E_(afs_call_trace_work, "WORK ") +#define afs_server_traces \ + EM(afs_server_trace_alloc, "ALLOC ") \ + EM(afs_server_trace_callback, "CALLBACK ") \ + EM(afs_server_trace_destroy, "DESTROY ") \ + EM(afs_server_trace_free, "FREE ") \ + EM(afs_server_trace_gc, "GC ") \ + EM(afs_server_trace_get_by_uuid, "GET uuid ") \ + EM(afs_server_trace_get_caps, "GET caps ") \ + EM(afs_server_trace_get_install, "GET inst ") \ + EM(afs_server_trace_get_new_cbi, "GET cbi ") \ + EM(afs_server_trace_give_up_cb, "giveup-cb") \ + EM(afs_server_trace_put_call, "PUT call ") \ + EM(afs_server_trace_put_cbi, "PUT cbi ") \ + EM(afs_server_trace_put_find_rsq, "PUT f-rsq") \ + EM(afs_server_trace_put_slist, "PUT slist") \ + EM(afs_server_trace_put_slist_isort, "PUT isort") \ + EM(afs_server_trace_put_uuid_rsq, "PUT u-req") \ + E_(afs_server_trace_update, "UPDATE") + #define afs_fs_operations \ EM(afs_FS_FetchData, "FS.FetchData") \ EM(afs_FS_FetchStatus, "FS.FetchStatus") \ @@ -370,6 +420,16 @@ enum afs_flock_operation { EM(afs_flock_op_unlock, "UNLOCK ") \ E_(afs_flock_op_wake, "WAKE ") +#define afs_cb_break_reasons \ + EM(afs_cb_break_no_break, "no-break") \ + EM(afs_cb_break_for_callback, "break-cb") \ + EM(afs_cb_break_for_deleted, "break-del") \ + EM(afs_cb_break_for_lapsed, "break-lapsed") \ + EM(afs_cb_break_for_unlink, "break-unlink") \ + EM(afs_cb_break_for_vsbreak, "break-vs") \ + EM(afs_cb_break_for_volume_callback, "break-v-cb") \ + E_(afs_cb_break_for_zap, "break-zap") + /* * Export enum symbols via userspace. */ @@ -379,6 +439,7 @@ enum afs_flock_operation { #define E_(a, b) TRACE_DEFINE_ENUM(a); afs_call_traces; +afs_server_traces; afs_fs_operations; afs_vl_operations; afs_edit_dir_ops; @@ -388,6 +449,7 @@ afs_io_errors; afs_file_errors; afs_flock_types; afs_flock_operations; +afs_cb_break_reasons; /* * Now redefine the EM() and E_() macros to map the enums to the strings that @@ -1167,6 +1229,76 @@ TRACE_EVENT(afs_get_tree, __entry->cell, __entry->volume, __entry->vid) ); +TRACE_EVENT(afs_cb_break, + TP_PROTO(struct afs_fid *fid, unsigned int cb_break, + enum afs_cb_break_reason reason, bool skipped), + + TP_ARGS(fid, cb_break, reason, skipped), + + TP_STRUCT__entry( + __field_struct(struct afs_fid, fid ) + __field(unsigned int, cb_break ) + __field(enum afs_cb_break_reason, reason ) + __field(bool, skipped ) + ), + + TP_fast_assign( + __entry->fid = *fid; + __entry->cb_break = cb_break; + __entry->reason = reason; + __entry->skipped = skipped; + ), + + TP_printk("%llx:%llx:%x b=%x s=%u %s", + __entry->fid.vid, __entry->fid.vnode, __entry->fid.unique, + __entry->cb_break, + __entry->skipped, + __print_symbolic(__entry->reason, afs_cb_break_reasons)) + ); + +TRACE_EVENT(afs_cb_miss, + TP_PROTO(struct afs_fid *fid, enum afs_cb_break_reason reason), + + TP_ARGS(fid, reason), + + TP_STRUCT__entry( + __field_struct(struct afs_fid, fid ) + __field(enum afs_cb_break_reason, reason ) + ), + + TP_fast_assign( + __entry->fid = *fid; + __entry->reason = reason; + ), + + TP_printk(" %llx:%llx:%x %s", + __entry->fid.vid, __entry->fid.vnode, __entry->fid.unique, + __print_symbolic(__entry->reason, afs_cb_break_reasons)) + ); + +TRACE_EVENT(afs_server, + TP_PROTO(struct afs_server *server, int usage, enum afs_server_trace reason), + + TP_ARGS(server, usage, reason), + + TP_STRUCT__entry( + __field(unsigned int, server ) + __field(int, usage ) + __field(int, reason ) + ), + + TP_fast_assign( + __entry->server = server->debug_id; + __entry->usage = usage; + __entry->reason = reason; + ), + + TP_printk("s=%08x %s u=%d", + __entry->server, + __print_symbolic(__entry->reason, afs_server_traces), + __entry->usage) + ); + #endif /* _TRACE_AFS_H */ /* This part must be outside protection */ diff --git a/include/trace/events/btrfs.h b/include/trace/events/btrfs.h index f9eff010fc7e..75ae1899452b 100644 --- a/include/trace/events/btrfs.h +++ b/include/trace/events/btrfs.h @@ -29,6 +29,7 @@ struct btrfs_qgroup_extent_record; struct btrfs_qgroup; struct extent_io_tree; struct prelim_ref; +struct btrfs_space_info; TRACE_DEFINE_ENUM(FLUSH_DELAYED_ITEMS_NR); TRACE_DEFINE_ENUM(FLUSH_DELAYED_ITEMS); @@ -1087,6 +1088,7 @@ TRACE_EVENT(btrfs_trigger_flush, { FLUSH_DELAYED_REFS, "FLUSH_ELAYED_REFS"}, \ { ALLOC_CHUNK, "ALLOC_CHUNK"}, \ { ALLOC_CHUNK_FORCE, "ALLOC_CHUNK_FORCE"}, \ + { RUN_DELAYED_IPUTS, "RUN_DELAYED_IPUTS"}, \ { COMMIT_TRANS, "COMMIT_TRANS"}) TRACE_EVENT(btrfs_flush_space, @@ -1686,6 +1688,7 @@ TRACE_EVENT(qgroup_update_reserve, __entry->qgid = qgroup->qgroupid; __entry->cur_reserved = qgroup->rsv.values[type]; __entry->diff = diff; + __entry->type = type; ), TP_printk_btrfs("qgid=%llu type=%s cur_reserved=%llu diff=%lld", @@ -1708,6 +1711,7 @@ TRACE_EVENT(qgroup_meta_reserve, TP_fast_assign_btrfs(root->fs_info, __entry->refroot = root->root_key.objectid; __entry->diff = diff; + __entry->type = type; ), TP_printk_btrfs("refroot=%llu(%s) type=%s diff=%lld", @@ -1724,7 +1728,6 @@ TRACE_EVENT(qgroup_meta_convert, TP_STRUCT__entry_btrfs( __field( u64, refroot ) __field( s64, diff ) - __field( int, type ) ), TP_fast_assign_btrfs(root->fs_info, @@ -2085,12 +2088,49 @@ DEFINE_BTRFS_LOCK_EVENT(btrfs_tree_read_unlock); DEFINE_BTRFS_LOCK_EVENT(btrfs_tree_read_unlock_blocking); DEFINE_BTRFS_LOCK_EVENT(btrfs_set_lock_blocking_read); DEFINE_BTRFS_LOCK_EVENT(btrfs_set_lock_blocking_write); -DEFINE_BTRFS_LOCK_EVENT(btrfs_clear_lock_blocking_read); -DEFINE_BTRFS_LOCK_EVENT(btrfs_clear_lock_blocking_write); DEFINE_BTRFS_LOCK_EVENT(btrfs_try_tree_read_lock); DEFINE_BTRFS_LOCK_EVENT(btrfs_try_tree_write_lock); DEFINE_BTRFS_LOCK_EVENT(btrfs_tree_read_lock_atomic); +DECLARE_EVENT_CLASS(btrfs__space_info_update, + + TP_PROTO(struct btrfs_fs_info *fs_info, + struct btrfs_space_info *sinfo, u64 old, s64 diff), + + TP_ARGS(fs_info, sinfo, old, diff), + + TP_STRUCT__entry_btrfs( + __field( u64, type ) + __field( u64, old ) + __field( s64, diff ) + ), + + TP_fast_assign_btrfs(fs_info, + __entry->type = sinfo->flags; + __entry->old = old; + __entry->diff = diff; + ), + TP_printk_btrfs("type=%s old=%llu diff=%lld", + __print_flags(__entry->type, "|", BTRFS_GROUP_FLAGS), + __entry->old, __entry->diff) +); + +DEFINE_EVENT(btrfs__space_info_update, update_bytes_may_use, + + TP_PROTO(struct btrfs_fs_info *fs_info, + struct btrfs_space_info *sinfo, u64 old, s64 diff), + + TP_ARGS(fs_info, sinfo, old, diff) +); + +DEFINE_EVENT(btrfs__space_info_update, update_bytes_pinned, + + TP_PROTO(struct btrfs_fs_info *fs_info, + struct btrfs_space_info *sinfo, u64 old, s64 diff), + + TP_ARGS(fs_info, sinfo, old, diff) +); + #endif /* _TRACE_BTRFS_H */ /* This part must be outside protection */ diff --git a/include/trace/events/dma_fence.h b/include/trace/events/dma_fence.h index 2212adda8f77..64e92d56c6a8 100644 --- a/include/trace/events/dma_fence.h +++ b/include/trace/events/dma_fence.h @@ -2,7 +2,7 @@ #undef TRACE_SYSTEM #define TRACE_SYSTEM dma_fence -#if !defined(_TRACE_FENCE_H) || defined(TRACE_HEADER_MULTI_READ) +#if !defined(_TRACE_DMA_FENCE_H) || defined(TRACE_HEADER_MULTI_READ) #define _TRACE_DMA_FENCE_H #include <linux/tracepoint.h> diff --git a/include/trace/events/erofs.h b/include/trace/events/erofs.h new file mode 100644 index 000000000000..27f5caa6299a --- /dev/null +++ b/include/trace/events/erofs.h @@ -0,0 +1,259 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM erofs + +#if !defined(_TRACE_EROFS_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_EROFS_H + +#include <linux/tracepoint.h> +#include <linux/fs.h> + +struct erofs_map_blocks; + +#define show_dev(dev) MAJOR(dev), MINOR(dev) +#define show_dev_nid(entry) show_dev(entry->dev), entry->nid + +#define show_file_type(type) \ + __print_symbolic(type, \ + { 0, "FILE" }, \ + { 1, "DIR" }) + +#define show_map_flags(flags) __print_flags(flags, "|", \ + { EROFS_GET_BLOCKS_RAW, "RAW" }) + +#define show_mflags(flags) __print_flags(flags, "", \ + { EROFS_MAP_MAPPED, "M" }, \ + { EROFS_MAP_META, "I" }, \ + { EROFS_MAP_ZIPPED, "Z" }) + +TRACE_EVENT(erofs_lookup, + + TP_PROTO(struct inode *dir, struct dentry *dentry, unsigned int flags), + + TP_ARGS(dir, dentry, flags), + + TP_STRUCT__entry( + __field(dev_t, dev ) + __field(erofs_nid_t, nid ) + __field(const char *, name ) + __field(unsigned int, flags ) + ), + + TP_fast_assign( + __entry->dev = dir->i_sb->s_dev; + __entry->nid = EROFS_I(dir)->nid; + __entry->name = dentry->d_name.name; + __entry->flags = flags; + ), + + TP_printk("dev = (%d,%d), pnid = %llu, name:%s, flags:%x", + show_dev_nid(__entry), + __entry->name, + __entry->flags) +); + +TRACE_EVENT(erofs_fill_inode, + TP_PROTO(struct inode *inode, int isdir), + TP_ARGS(inode, isdir), + + TP_STRUCT__entry( + __field(dev_t, dev ) + __field(erofs_nid_t, nid ) + __field(erofs_blk_t, blkaddr ) + __field(unsigned int, ofs ) + __field(int, isdir ) + ), + + TP_fast_assign( + __entry->dev = inode->i_sb->s_dev; + __entry->nid = EROFS_I(inode)->nid; + __entry->blkaddr = erofs_blknr(iloc(EROFS_I_SB(inode), __entry->nid)); + __entry->ofs = erofs_blkoff(iloc(EROFS_I_SB(inode), __entry->nid)); + __entry->isdir = isdir; + ), + + TP_printk("dev = (%d,%d), nid = %llu, blkaddr %u ofs %u, isdir %d", + show_dev_nid(__entry), + __entry->blkaddr, __entry->ofs, + __entry->isdir) +); + +TRACE_EVENT(erofs_readpage, + + TP_PROTO(struct page *page, bool raw), + + TP_ARGS(page, raw), + + TP_STRUCT__entry( + __field(dev_t, dev ) + __field(erofs_nid_t, nid ) + __field(int, dir ) + __field(pgoff_t, index ) + __field(int, uptodate) + __field(bool, raw ) + ), + + TP_fast_assign( + __entry->dev = page->mapping->host->i_sb->s_dev; + __entry->nid = EROFS_I(page->mapping->host)->nid; + __entry->dir = S_ISDIR(page->mapping->host->i_mode); + __entry->index = page->index; + __entry->uptodate = PageUptodate(page); + __entry->raw = raw; + ), + + TP_printk("dev = (%d,%d), nid = %llu, %s, index = %lu, uptodate = %d " + "raw = %d", + show_dev_nid(__entry), + show_file_type(__entry->dir), + (unsigned long)__entry->index, + __entry->uptodate, + __entry->raw) +); + +TRACE_EVENT(erofs_readpages, + + TP_PROTO(struct inode *inode, struct page *page, unsigned int nrpage, + bool raw), + + TP_ARGS(inode, page, nrpage, raw), + + TP_STRUCT__entry( + __field(dev_t, dev ) + __field(erofs_nid_t, nid ) + __field(pgoff_t, start ) + __field(unsigned int, nrpage ) + __field(bool, raw ) + ), + + TP_fast_assign( + __entry->dev = inode->i_sb->s_dev; + __entry->nid = EROFS_I(inode)->nid; + __entry->start = page->index; + __entry->nrpage = nrpage; + __entry->raw = raw; + ), + + TP_printk("dev = (%d,%d), nid = %llu, start = %lu nrpage = %u raw = %d", + show_dev_nid(__entry), + (unsigned long)__entry->start, + __entry->nrpage, + __entry->raw) +); + +DECLARE_EVENT_CLASS(erofs__map_blocks_enter, + TP_PROTO(struct inode *inode, struct erofs_map_blocks *map, + unsigned int flags), + + TP_ARGS(inode, map, flags), + + TP_STRUCT__entry( + __field( dev_t, dev ) + __field( erofs_nid_t, nid ) + __field( erofs_off_t, la ) + __field( u64, llen ) + __field( unsigned int, flags ) + ), + + TP_fast_assign( + __entry->dev = inode->i_sb->s_dev; + __entry->nid = EROFS_I(inode)->nid; + __entry->la = map->m_la; + __entry->llen = map->m_llen; + __entry->flags = flags; + ), + + TP_printk("dev = (%d,%d), nid = %llu, la %llu llen %llu flags %s", + show_dev_nid(__entry), + __entry->la, __entry->llen, + __entry->flags ? show_map_flags(__entry->flags) : "NULL") +); + +DEFINE_EVENT(erofs__map_blocks_enter, erofs_map_blocks_flatmode_enter, + TP_PROTO(struct inode *inode, struct erofs_map_blocks *map, + unsigned flags), + + TP_ARGS(inode, map, flags) +); + +DEFINE_EVENT(erofs__map_blocks_enter, z_erofs_map_blocks_iter_enter, + TP_PROTO(struct inode *inode, struct erofs_map_blocks *map, + unsigned int flags), + + TP_ARGS(inode, map, flags) +); + +DECLARE_EVENT_CLASS(erofs__map_blocks_exit, + TP_PROTO(struct inode *inode, struct erofs_map_blocks *map, + unsigned int flags, int ret), + + TP_ARGS(inode, map, flags, ret), + + TP_STRUCT__entry( + __field( dev_t, dev ) + __field( erofs_nid_t, nid ) + __field( unsigned int, flags ) + __field( erofs_off_t, la ) + __field( erofs_off_t, pa ) + __field( u64, llen ) + __field( u64, plen ) + __field( unsigned int, mflags ) + __field( int, ret ) + ), + + TP_fast_assign( + __entry->dev = inode->i_sb->s_dev; + __entry->nid = EROFS_I(inode)->nid; + __entry->flags = flags; + __entry->la = map->m_la; + __entry->pa = map->m_pa; + __entry->llen = map->m_llen; + __entry->plen = map->m_plen; + __entry->mflags = map->m_flags; + __entry->ret = ret; + ), + + TP_printk("dev = (%d,%d), nid = %llu, flags %s " + "la %llu pa %llu llen %llu plen %llu mflags %s ret %d", + show_dev_nid(__entry), + __entry->flags ? show_map_flags(__entry->flags) : "NULL", + __entry->la, __entry->pa, __entry->llen, __entry->plen, + show_mflags(__entry->mflags), __entry->ret) +); + +DEFINE_EVENT(erofs__map_blocks_exit, erofs_map_blocks_flatmode_exit, + TP_PROTO(struct inode *inode, struct erofs_map_blocks *map, + unsigned flags, int ret), + + TP_ARGS(inode, map, flags, ret) +); + +DEFINE_EVENT(erofs__map_blocks_exit, z_erofs_map_blocks_iter_exit, + TP_PROTO(struct inode *inode, struct erofs_map_blocks *map, + unsigned int flags, int ret), + + TP_ARGS(inode, map, flags, ret) +); + +TRACE_EVENT(erofs_destroy_inode, + TP_PROTO(struct inode *inode), + + TP_ARGS(inode), + + TP_STRUCT__entry( + __field( dev_t, dev ) + __field( erofs_nid_t, nid ) + ), + + TP_fast_assign( + __entry->dev = inode->i_sb->s_dev; + __entry->nid = EROFS_I(inode)->nid; + ), + + TP_printk("dev = (%d,%d), nid = %llu", show_dev_nid(__entry)) +); + +#endif /* _TRACE_EROFS_H */ + + /* This part must be outside protection */ +#include <trace/define_trace.h> diff --git a/include/trace/events/f2fs.h b/include/trace/events/f2fs.h index 53b96f12300c..1796ff99c3e9 100644 --- a/include/trace/events/f2fs.h +++ b/include/trace/events/f2fs.h @@ -76,16 +76,7 @@ TRACE_DEFINE_ENUM(CP_TRIMMED); #define show_bio_type(op,op_flags) show_bio_op(op), \ show_bio_op_flags(op_flags) -#define show_bio_op(op) \ - __print_symbolic(op, \ - { REQ_OP_READ, "READ" }, \ - { REQ_OP_WRITE, "WRITE" }, \ - { REQ_OP_FLUSH, "FLUSH" }, \ - { REQ_OP_DISCARD, "DISCARD" }, \ - { REQ_OP_SECURE_ERASE, "SECURE_ERASE" }, \ - { REQ_OP_ZONE_RESET, "ZONE_RESET" }, \ - { REQ_OP_WRITE_SAME, "WRITE_SAME" }, \ - { REQ_OP_WRITE_ZEROES, "WRITE_ZEROES" }) +#define show_bio_op(op) blk_op_str(op) #define show_bio_op_flags(flags) \ __print_flags(F2FS_BIO_FLAG_MASK(flags), "|", \ @@ -1028,8 +1019,8 @@ DECLARE_EVENT_CLASS(f2fs__submit_page_bio, ), TP_fast_assign( - __entry->dev = page->mapping->host->i_sb->s_dev; - __entry->ino = page->mapping->host->i_ino; + __entry->dev = page_file_mapping(page)->host->i_sb->s_dev; + __entry->ino = page_file_mapping(page)->host->i_ino; __entry->index = page->index; __entry->old_blkaddr = fio->old_blkaddr; __entry->new_blkaddr = fio->new_blkaddr; @@ -1216,10 +1207,11 @@ DECLARE_EVENT_CLASS(f2fs__page, ), TP_fast_assign( - __entry->dev = page->mapping->host->i_sb->s_dev; - __entry->ino = page->mapping->host->i_ino; + __entry->dev = page_file_mapping(page)->host->i_sb->s_dev; + __entry->ino = page_file_mapping(page)->host->i_ino; __entry->type = type; - __entry->dir = S_ISDIR(page->mapping->host->i_mode); + __entry->dir = + S_ISDIR(page_file_mapping(page)->host->i_mode); __entry->index = page->index; __entry->dirty = PageDirty(page); __entry->uptodate = PageUptodate(page); diff --git a/include/trace/events/filelock.h b/include/trace/events/filelock.h index fad7befa612d..c705e4944a50 100644 --- a/include/trace/events/filelock.h +++ b/include/trace/events/filelock.h @@ -176,7 +176,7 @@ TRACE_EVENT(generic_add_lease, TP_STRUCT__entry( __field(unsigned long, i_ino) __field(int, wcount) - __field(int, dcount) + __field(int, rcount) __field(int, icount) __field(dev_t, s_dev) __field(fl_owner_t, fl_owner) @@ -188,21 +188,56 @@ TRACE_EVENT(generic_add_lease, __entry->s_dev = inode->i_sb->s_dev; __entry->i_ino = inode->i_ino; __entry->wcount = atomic_read(&inode->i_writecount); - __entry->dcount = d_count(fl->fl_file->f_path.dentry); + __entry->rcount = atomic_read(&inode->i_readcount); __entry->icount = atomic_read(&inode->i_count); - __entry->fl_owner = fl ? fl->fl_owner : NULL; - __entry->fl_flags = fl ? fl->fl_flags : 0; - __entry->fl_type = fl ? fl->fl_type : 0; + __entry->fl_owner = fl->fl_owner; + __entry->fl_flags = fl->fl_flags; + __entry->fl_type = fl->fl_type; ), - TP_printk("dev=0x%x:0x%x ino=0x%lx wcount=%d dcount=%d icount=%d fl_owner=0x%p fl_flags=%s fl_type=%s", + TP_printk("dev=0x%x:0x%x ino=0x%lx wcount=%d rcount=%d icount=%d fl_owner=0x%p fl_flags=%s fl_type=%s", MAJOR(__entry->s_dev), MINOR(__entry->s_dev), - __entry->i_ino, __entry->wcount, __entry->dcount, + __entry->i_ino, __entry->wcount, __entry->rcount, __entry->icount, __entry->fl_owner, show_fl_flags(__entry->fl_flags), show_fl_type(__entry->fl_type)) ); +TRACE_EVENT(leases_conflict, + TP_PROTO(bool conflict, struct file_lock *lease, struct file_lock *breaker), + + TP_ARGS(conflict, lease, breaker), + + TP_STRUCT__entry( + __field(void *, lease) + __field(void *, breaker) + __field(unsigned int, l_fl_flags) + __field(unsigned int, b_fl_flags) + __field(unsigned char, l_fl_type) + __field(unsigned char, b_fl_type) + __field(bool, conflict) + ), + + TP_fast_assign( + __entry->lease = lease; + __entry->l_fl_flags = lease->fl_flags; + __entry->l_fl_type = lease->fl_type; + __entry->breaker = breaker; + __entry->b_fl_flags = breaker->fl_flags; + __entry->b_fl_type = breaker->fl_type; + __entry->conflict = conflict; + ), + + TP_printk("conflict %d: lease=0x%p fl_flags=%s fl_type=%s; breaker=0x%p fl_flags=%s fl_type=%s", + __entry->conflict, + __entry->lease, + show_fl_flags(__entry->l_fl_flags), + show_fl_type(__entry->l_fl_type), + __entry->breaker, + show_fl_flags(__entry->b_fl_flags), + show_fl_type(__entry->b_fl_type)) +); + #endif /* _TRACE_FILELOCK_H */ /* This part must be outside protection */ diff --git a/include/trace/events/intel_iommu.h b/include/trace/events/intel_iommu.h new file mode 100644 index 000000000000..54e61d456cdf --- /dev/null +++ b/include/trace/events/intel_iommu.h @@ -0,0 +1,106 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Intel IOMMU trace support + * + * Copyright (C) 2019 Intel Corporation + * + * Author: Lu Baolu <baolu.lu@linux.intel.com> + */ +#ifdef CONFIG_INTEL_IOMMU +#undef TRACE_SYSTEM +#define TRACE_SYSTEM intel_iommu + +#if !defined(_TRACE_INTEL_IOMMU_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_INTEL_IOMMU_H + +#include <linux/tracepoint.h> +#include <linux/intel-iommu.h> + +DECLARE_EVENT_CLASS(dma_map, + TP_PROTO(struct device *dev, dma_addr_t dev_addr, phys_addr_t phys_addr, + size_t size), + + TP_ARGS(dev, dev_addr, phys_addr, size), + + TP_STRUCT__entry( + __string(dev_name, dev_name(dev)) + __field(dma_addr_t, dev_addr) + __field(phys_addr_t, phys_addr) + __field(size_t, size) + ), + + TP_fast_assign( + __assign_str(dev_name, dev_name(dev)); + __entry->dev_addr = dev_addr; + __entry->phys_addr = phys_addr; + __entry->size = size; + ), + + TP_printk("dev=%s dev_addr=0x%llx phys_addr=0x%llx size=%zu", + __get_str(dev_name), + (unsigned long long)__entry->dev_addr, + (unsigned long long)__entry->phys_addr, + __entry->size) +); + +DEFINE_EVENT(dma_map, map_single, + TP_PROTO(struct device *dev, dma_addr_t dev_addr, phys_addr_t phys_addr, + size_t size), + TP_ARGS(dev, dev_addr, phys_addr, size) +); + +DEFINE_EVENT(dma_map, map_sg, + TP_PROTO(struct device *dev, dma_addr_t dev_addr, phys_addr_t phys_addr, + size_t size), + TP_ARGS(dev, dev_addr, phys_addr, size) +); + +DEFINE_EVENT(dma_map, bounce_map_single, + TP_PROTO(struct device *dev, dma_addr_t dev_addr, phys_addr_t phys_addr, + size_t size), + TP_ARGS(dev, dev_addr, phys_addr, size) +); + +DECLARE_EVENT_CLASS(dma_unmap, + TP_PROTO(struct device *dev, dma_addr_t dev_addr, size_t size), + + TP_ARGS(dev, dev_addr, size), + + TP_STRUCT__entry( + __string(dev_name, dev_name(dev)) + __field(dma_addr_t, dev_addr) + __field(size_t, size) + ), + + TP_fast_assign( + __assign_str(dev_name, dev_name(dev)); + __entry->dev_addr = dev_addr; + __entry->size = size; + ), + + TP_printk("dev=%s dev_addr=0x%llx size=%zu", + __get_str(dev_name), + (unsigned long long)__entry->dev_addr, + __entry->size) +); + +DEFINE_EVENT(dma_unmap, unmap_single, + TP_PROTO(struct device *dev, dma_addr_t dev_addr, size_t size), + TP_ARGS(dev, dev_addr, size) +); + +DEFINE_EVENT(dma_unmap, unmap_sg, + TP_PROTO(struct device *dev, dma_addr_t dev_addr, size_t size), + TP_ARGS(dev, dev_addr, size) +); + +DEFINE_EVENT(dma_unmap, bounce_unmap_single, + TP_PROTO(struct device *dev, dma_addr_t dev_addr, size_t size), + TP_ARGS(dev, dev_addr, size) +); + +#endif /* _TRACE_INTEL_IOMMU_H */ + +/* This part must be outside protection */ +#include <trace/define_trace.h> +#endif /* CONFIG_INTEL_IOMMU */ diff --git a/include/trace/events/iocost.h b/include/trace/events/iocost.h new file mode 100644 index 000000000000..7ecaa65b7106 --- /dev/null +++ b/include/trace/events/iocost.h @@ -0,0 +1,178 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM iocost + +struct ioc; +struct ioc_now; +struct ioc_gq; + +#if !defined(_TRACE_BLK_IOCOST_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_BLK_IOCOST_H + +#include <linux/tracepoint.h> + +TRACE_EVENT(iocost_iocg_activate, + + TP_PROTO(struct ioc_gq *iocg, const char *path, struct ioc_now *now, + u64 last_period, u64 cur_period, u64 vtime), + + TP_ARGS(iocg, path, now, last_period, cur_period, vtime), + + TP_STRUCT__entry ( + __string(devname, ioc_name(iocg->ioc)) + __string(cgroup, path) + __field(u64, now) + __field(u64, vnow) + __field(u64, vrate) + __field(u64, last_period) + __field(u64, cur_period) + __field(u64, last_vtime) + __field(u64, vtime) + __field(u32, weight) + __field(u32, inuse) + __field(u64, hweight_active) + __field(u64, hweight_inuse) + ), + + TP_fast_assign( + __assign_str(devname, ioc_name(iocg->ioc)); + __assign_str(cgroup, path); + __entry->now = now->now; + __entry->vnow = now->vnow; + __entry->vrate = now->vrate; + __entry->last_period = last_period; + __entry->cur_period = cur_period; + __entry->last_vtime = iocg->last_vtime; + __entry->vtime = vtime; + __entry->weight = iocg->weight; + __entry->inuse = iocg->inuse; + __entry->hweight_active = iocg->hweight_active; + __entry->hweight_inuse = iocg->hweight_inuse; + ), + + TP_printk("[%s:%s] now=%llu:%llu vrate=%llu " + "period=%llu->%llu vtime=%llu->%llu " + "weight=%u/%u hweight=%llu/%llu", + __get_str(devname), __get_str(cgroup), + __entry->now, __entry->vnow, __entry->vrate, + __entry->last_period, __entry->cur_period, + __entry->last_vtime, __entry->vtime, + __entry->inuse, __entry->weight, + __entry->hweight_inuse, __entry->hweight_active + ) +); + +DECLARE_EVENT_CLASS(iocg_inuse_update, + + TP_PROTO(struct ioc_gq *iocg, const char *path, struct ioc_now *now, + u32 old_inuse, u32 new_inuse, + u64 old_hw_inuse, u64 new_hw_inuse), + + TP_ARGS(iocg, path, now, old_inuse, new_inuse, + old_hw_inuse, new_hw_inuse), + + TP_STRUCT__entry ( + __string(devname, ioc_name(iocg->ioc)) + __string(cgroup, path) + __field(u64, now) + __field(u32, old_inuse) + __field(u32, new_inuse) + __field(u64, old_hweight_inuse) + __field(u64, new_hweight_inuse) + ), + + TP_fast_assign( + __assign_str(devname, ioc_name(iocg->ioc)); + __assign_str(cgroup, path); + __entry->now = now->now; + __entry->old_inuse = old_inuse; + __entry->new_inuse = new_inuse; + __entry->old_hweight_inuse = old_hw_inuse; + __entry->new_hweight_inuse = new_hw_inuse; + ), + + TP_printk("[%s:%s] now=%llu inuse=%u->%u hw_inuse=%llu->%llu", + __get_str(devname), __get_str(cgroup), __entry->now, + __entry->old_inuse, __entry->new_inuse, + __entry->old_hweight_inuse, __entry->new_hweight_inuse + ) +); + +DEFINE_EVENT(iocg_inuse_update, iocost_inuse_takeback, + + TP_PROTO(struct ioc_gq *iocg, const char *path, struct ioc_now *now, + u32 old_inuse, u32 new_inuse, + u64 old_hw_inuse, u64 new_hw_inuse), + + TP_ARGS(iocg, path, now, old_inuse, new_inuse, + old_hw_inuse, new_hw_inuse) +); + +DEFINE_EVENT(iocg_inuse_update, iocost_inuse_giveaway, + + TP_PROTO(struct ioc_gq *iocg, const char *path, struct ioc_now *now, + u32 old_inuse, u32 new_inuse, + u64 old_hw_inuse, u64 new_hw_inuse), + + TP_ARGS(iocg, path, now, old_inuse, new_inuse, + old_hw_inuse, new_hw_inuse) +); + +DEFINE_EVENT(iocg_inuse_update, iocost_inuse_reset, + + TP_PROTO(struct ioc_gq *iocg, const char *path, struct ioc_now *now, + u32 old_inuse, u32 new_inuse, + u64 old_hw_inuse, u64 new_hw_inuse), + + TP_ARGS(iocg, path, now, old_inuse, new_inuse, + old_hw_inuse, new_hw_inuse) +); + +TRACE_EVENT(iocost_ioc_vrate_adj, + + TP_PROTO(struct ioc *ioc, u64 new_vrate, u32 (*missed_ppm)[2], + u32 rq_wait_pct, int nr_lagging, int nr_shortages, + int nr_surpluses), + + TP_ARGS(ioc, new_vrate, missed_ppm, rq_wait_pct, nr_lagging, nr_shortages, + nr_surpluses), + + TP_STRUCT__entry ( + __string(devname, ioc_name(ioc)) + __field(u64, old_vrate) + __field(u64, new_vrate) + __field(int, busy_level) + __field(u32, read_missed_ppm) + __field(u32, write_missed_ppm) + __field(u32, rq_wait_pct) + __field(int, nr_lagging) + __field(int, nr_shortages) + __field(int, nr_surpluses) + ), + + TP_fast_assign( + __assign_str(devname, ioc_name(ioc)); + __entry->old_vrate = atomic64_read(&ioc->vtime_rate);; + __entry->new_vrate = new_vrate; + __entry->busy_level = ioc->busy_level; + __entry->read_missed_ppm = (*missed_ppm)[READ]; + __entry->write_missed_ppm = (*missed_ppm)[WRITE]; + __entry->rq_wait_pct = rq_wait_pct; + __entry->nr_lagging = nr_lagging; + __entry->nr_shortages = nr_shortages; + __entry->nr_surpluses = nr_surpluses; + ), + + TP_printk("[%s] vrate=%llu->%llu busy=%d missed_ppm=%u:%u rq_wait_pct=%u lagging=%d shortages=%d surpluses=%d", + __get_str(devname), __entry->old_vrate, __entry->new_vrate, + __entry->busy_level, + __entry->read_missed_ppm, __entry->write_missed_ppm, + __entry->rq_wait_pct, __entry->nr_lagging, __entry->nr_shortages, + __entry->nr_surpluses + ) +); + +#endif /* _TRACE_BLK_IOCOST_H */ + +/* This part must be outside protection */ +#include <trace/define_trace.h> diff --git a/include/trace/events/kmem.h b/include/trace/events/kmem.h index eb57e3037deb..69e8bb8963db 100644 --- a/include/trace/events/kmem.h +++ b/include/trace/events/kmem.h @@ -35,8 +35,8 @@ DECLARE_EVENT_CLASS(kmem_alloc, __entry->gfp_flags = gfp_flags; ), - TP_printk("call_site=%lx ptr=%p bytes_req=%zu bytes_alloc=%zu gfp_flags=%s", - __entry->call_site, + TP_printk("call_site=%pS ptr=%p bytes_req=%zu bytes_alloc=%zu gfp_flags=%s", + (void *)__entry->call_site, __entry->ptr, __entry->bytes_req, __entry->bytes_alloc, @@ -131,7 +131,8 @@ DECLARE_EVENT_CLASS(kmem_free, __entry->ptr = ptr; ), - TP_printk("call_site=%lx ptr=%p", __entry->call_site, __entry->ptr) + TP_printk("call_site=%pS ptr=%p", + (void *)__entry->call_site, __entry->ptr) ); DEFINE_EVENT(kmem_free, kfree, diff --git a/include/trace/events/napi.h b/include/trace/events/napi.h index f3a12566bed0..6678cf8b235b 100644 --- a/include/trace/events/napi.h +++ b/include/trace/events/napi.h @@ -3,7 +3,7 @@ #define TRACE_SYSTEM napi #if !defined(_TRACE_NAPI_H) || defined(TRACE_HEADER_MULTI_READ) -#define _TRACE_NAPI_H_ +#define _TRACE_NAPI_H #include <linux/netdevice.h> #include <linux/tracepoint.h> @@ -38,7 +38,7 @@ TRACE_EVENT(napi_poll, #undef NO_DEV -#endif /* _TRACE_NAPI_H_ */ +#endif /* _TRACE_NAPI_H */ /* This part must be outside protection */ #include <trace/define_trace.h> diff --git a/include/trace/events/neigh.h b/include/trace/events/neigh.h index 0bdb08557763..62bb17516713 100644 --- a/include/trace/events/neigh.h +++ b/include/trace/events/neigh.h @@ -20,6 +20,55 @@ { NUD_NOARP, "noarp" }, \ { NUD_PERMANENT, "permanent"}) +TRACE_EVENT(neigh_create, + + TP_PROTO(struct neigh_table *tbl, struct net_device *dev, + const void *pkey, const struct neighbour *n, + bool exempt_from_gc), + + TP_ARGS(tbl, dev, pkey, n, exempt_from_gc), + + TP_STRUCT__entry( + __field(u32, family) + __dynamic_array(char, dev, IFNAMSIZ ) + __field(int, entries) + __field(u8, created) + __field(u8, gc_exempt) + __array(u8, primary_key4, 4) + __array(u8, primary_key6, 16) + ), + + TP_fast_assign( + struct in6_addr *pin6; + __be32 *p32; + + __entry->family = tbl->family; + __assign_str(dev, (dev ? dev->name : "NULL")); + __entry->entries = atomic_read(&tbl->gc_entries); + __entry->created = n != NULL; + __entry->gc_exempt = exempt_from_gc; + pin6 = (struct in6_addr *)__entry->primary_key6; + p32 = (__be32 *)__entry->primary_key4; + + if (tbl->family == AF_INET) + *p32 = *(__be32 *)pkey; + else + *p32 = 0; + +#if IS_ENABLED(CONFIG_IPV6) + if (tbl->family == AF_INET6) { + pin6 = (struct in6_addr *)__entry->primary_key6; + *pin6 = *(struct in6_addr *)pkey; + } +#endif + ), + + TP_printk("family %d dev %s entries %d primary_key4 %pI4 primary_key6 %pI6c created %d gc_exempt %d", + __entry->family, __get_str(dev), __entry->entries, + __entry->primary_key4, __entry->primary_key6, + __entry->created, __entry->gc_exempt) +); + TRACE_EVENT(neigh_update, TP_PROTO(struct neighbour *n, const u8 *lladdr, u8 new, diff --git a/include/trace/events/page_pool.h b/include/trace/events/page_pool.h new file mode 100644 index 000000000000..47b5ee880aa9 --- /dev/null +++ b/include/trace/events/page_pool.h @@ -0,0 +1,87 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM page_pool + +#if !defined(_TRACE_PAGE_POOL_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_PAGE_POOL_H + +#include <linux/types.h> +#include <linux/tracepoint.h> + +#include <net/page_pool.h> + +TRACE_EVENT(page_pool_inflight, + + TP_PROTO(const struct page_pool *pool, + s32 inflight, u32 hold, u32 release), + + TP_ARGS(pool, inflight, hold, release), + + TP_STRUCT__entry( + __field(const struct page_pool *, pool) + __field(s32, inflight) + __field(u32, hold) + __field(u32, release) + ), + + TP_fast_assign( + __entry->pool = pool; + __entry->inflight = inflight; + __entry->hold = hold; + __entry->release = release; + ), + + TP_printk("page_pool=%p inflight=%d hold=%u release=%u", + __entry->pool, __entry->inflight, __entry->hold, __entry->release) +); + +TRACE_EVENT(page_pool_state_release, + + TP_PROTO(const struct page_pool *pool, + const struct page *page, u32 release), + + TP_ARGS(pool, page, release), + + TP_STRUCT__entry( + __field(const struct page_pool *, pool) + __field(const struct page *, page) + __field(u32, release) + ), + + TP_fast_assign( + __entry->pool = pool; + __entry->page = page; + __entry->release = release; + ), + + TP_printk("page_pool=%p page=%p release=%u", + __entry->pool, __entry->page, __entry->release) +); + +TRACE_EVENT(page_pool_state_hold, + + TP_PROTO(const struct page_pool *pool, + const struct page *page, u32 hold), + + TP_ARGS(pool, page, hold), + + TP_STRUCT__entry( + __field(const struct page_pool *, pool) + __field(const struct page *, page) + __field(u32, hold) + ), + + TP_fast_assign( + __entry->pool = pool; + __entry->page = page; + __entry->hold = hold; + ), + + TP_printk("page_pool=%p page=%p hold=%u", + __entry->pool, __entry->page, __entry->hold) +); + +#endif /* _TRACE_PAGE_POOL_H */ + +/* This part must be outside protection */ +#include <trace/define_trace.h> diff --git a/include/trace/events/power.h b/include/trace/events/power.h index f7aece721aed..7457e238e1b7 100644 --- a/include/trace/events/power.h +++ b/include/trace/events/power.h @@ -379,9 +379,7 @@ DECLARE_EVENT_CLASS(pm_qos_request, TP_printk("pm_qos_class=%s value=%d", __print_symbolic(__entry->pm_qos_class, - { PM_QOS_CPU_DMA_LATENCY, "CPU_DMA_LATENCY" }, - { PM_QOS_NETWORK_LATENCY, "NETWORK_LATENCY" }, - { PM_QOS_NETWORK_THROUGHPUT, "NETWORK_THROUGHPUT" }), + { PM_QOS_CPU_DMA_LATENCY, "CPU_DMA_LATENCY" }), __entry->value) ); @@ -426,9 +424,7 @@ TRACE_EVENT(pm_qos_update_request_timeout, TP_printk("pm_qos_class=%s value=%d, timeout_us=%ld", __print_symbolic(__entry->pm_qos_class, - { PM_QOS_CPU_DMA_LATENCY, "CPU_DMA_LATENCY" }, - { PM_QOS_NETWORK_LATENCY, "NETWORK_LATENCY" }, - { PM_QOS_NETWORK_THROUGHPUT, "NETWORK_THROUGHPUT" }), + { PM_QOS_CPU_DMA_LATENCY, "CPU_DMA_LATENCY" }), __entry->value, __entry->timeout_us) ); diff --git a/include/trace/events/qdisc.h b/include/trace/events/qdisc.h index 60d0d8bd336d..0d1a9ebf55ba 100644 --- a/include/trace/events/qdisc.h +++ b/include/trace/events/qdisc.h @@ -2,7 +2,7 @@ #define TRACE_SYSTEM qdisc #if !defined(_TRACE_QDISC_H) || defined(TRACE_HEADER_MULTI_READ) -#define _TRACE_QDISC_H_ +#define _TRACE_QDISC_H #include <linux/skbuff.h> #include <linux/netdevice.h> @@ -44,7 +44,7 @@ TRACE_EVENT(qdisc_dequeue, __entry->txq_state, __entry->packets, __entry->skbaddr ) ); -#endif /* _TRACE_QDISC_H_ */ +#endif /* _TRACE_QDISC_H */ /* This part must be outside protection */ #include <trace/define_trace.h> diff --git a/include/trace/events/rcu.h b/include/trace/events/rcu.h index 02a3f78f7cd8..694bd040cf51 100644 --- a/include/trace/events/rcu.h +++ b/include/trace/events/rcu.h @@ -100,7 +100,6 @@ TRACE_EVENT_RCU(rcu_grace_period, * "Startedroot": Requested a nocb grace period based on root-node data. * "NoGPkthread": The RCU grace-period kthread has not yet started. * "StartWait": Start waiting for the requested grace period. - * "ResumeWait": Resume waiting after signal. * "EndWait": Complete wait. * "Cleanup": Clean up rcu_node structure after previous GP. * "CleanupMore": Clean up, and another GP is needed. @@ -267,7 +266,8 @@ TRACE_EVENT_RCU(rcu_exp_funnel_lock, * "WakeNotPoll": Don't wake rcuo kthread because it is polling. * "DeferredWake": Carried out the "IsDeferred" wakeup. * "Poll": Start of new polling cycle for rcu_nocb_poll. - * "Sleep": Sleep waiting for CBs for !rcu_nocb_poll. + * "Sleep": Sleep waiting for GP for !rcu_nocb_poll. + * "CBSleep": Sleep waiting for CBs for !rcu_nocb_poll. * "WokeEmpty": rcuo kthread woke to find empty list. * "WokeNonEmpty": rcuo kthread woke to find non-empty list. * "WaitQueue": Enqueue partially done, timed wait for it to complete. diff --git a/include/trace/events/rpcrdma.h b/include/trace/events/rpcrdma.h index df9851cb82b2..a13830616107 100644 --- a/include/trace/events/rpcrdma.h +++ b/include/trace/events/rpcrdma.h @@ -181,18 +181,6 @@ DECLARE_EVENT_CLASS(xprtrdma_wrch_event, ), \ TP_ARGS(task, mr, nsegs)) -TRACE_DEFINE_ENUM(FRWR_IS_INVALID); -TRACE_DEFINE_ENUM(FRWR_IS_VALID); -TRACE_DEFINE_ENUM(FRWR_FLUSHED_FR); -TRACE_DEFINE_ENUM(FRWR_FLUSHED_LI); - -#define xprtrdma_show_frwr_state(x) \ - __print_symbolic(x, \ - { FRWR_IS_INVALID, "INVALID" }, \ - { FRWR_IS_VALID, "VALID" }, \ - { FRWR_FLUSHED_FR, "FLUSHED_FR" }, \ - { FRWR_FLUSHED_LI, "FLUSHED_LI" }) - DECLARE_EVENT_CLASS(xprtrdma_frwr_done, TP_PROTO( const struct ib_wc *wc, @@ -203,22 +191,19 @@ DECLARE_EVENT_CLASS(xprtrdma_frwr_done, TP_STRUCT__entry( __field(const void *, mr) - __field(unsigned int, state) __field(unsigned int, status) __field(unsigned int, vendor_err) ), TP_fast_assign( __entry->mr = container_of(frwr, struct rpcrdma_mr, frwr); - __entry->state = frwr->fr_state; __entry->status = wc->status; __entry->vendor_err = __entry->status ? wc->vendor_err : 0; ), TP_printk( - "mr=%p state=%s: %s (%u/0x%x)", - __entry->mr, xprtrdma_show_frwr_state(__entry->state), - rdma_show_wc_status(__entry->status), + "mr=%p: %s (%u/0x%x)", + __entry->mr, rdma_show_wc_status(__entry->status), __entry->status, __entry->vendor_err ) ); @@ -390,6 +375,37 @@ DEFINE_RXPRT_EVENT(xprtrdma_op_inject_dsc); DEFINE_RXPRT_EVENT(xprtrdma_op_close); DEFINE_RXPRT_EVENT(xprtrdma_op_connect); +TRACE_EVENT(xprtrdma_op_set_cto, + TP_PROTO( + const struct rpcrdma_xprt *r_xprt, + unsigned long connect, + unsigned long reconnect + ), + + TP_ARGS(r_xprt, connect, reconnect), + + TP_STRUCT__entry( + __field(const void *, r_xprt) + __field(unsigned long, connect) + __field(unsigned long, reconnect) + __string(addr, rpcrdma_addrstr(r_xprt)) + __string(port, rpcrdma_portstr(r_xprt)) + ), + + TP_fast_assign( + __entry->r_xprt = r_xprt; + __entry->connect = connect; + __entry->reconnect = reconnect; + __assign_str(addr, rpcrdma_addrstr(r_xprt)); + __assign_str(port, rpcrdma_portstr(r_xprt)); + ), + + TP_printk("peer=[%s]:%s r_xprt=%p: connect=%lu reconnect=%lu", + __get_str(addr), __get_str(port), __entry->r_xprt, + __entry->connect / HZ, __entry->reconnect / HZ + ) +); + TRACE_EVENT(xprtrdma_qp_event, TP_PROTO( const struct rpcrdma_xprt *r_xprt, @@ -435,20 +451,81 @@ TRACE_EVENT(xprtrdma_createmrs, TP_STRUCT__entry( __field(const void *, r_xprt) + __string(addr, rpcrdma_addrstr(r_xprt)) + __string(port, rpcrdma_portstr(r_xprt)) __field(unsigned int, count) ), TP_fast_assign( __entry->r_xprt = r_xprt; __entry->count = count; + __assign_str(addr, rpcrdma_addrstr(r_xprt)); + __assign_str(port, rpcrdma_portstr(r_xprt)); ), - TP_printk("r_xprt=%p: created %u MRs", - __entry->r_xprt, __entry->count + TP_printk("peer=[%s]:%s r_xprt=%p: created %u MRs", + __get_str(addr), __get_str(port), __entry->r_xprt, + __entry->count ) ); -DEFINE_RXPRT_EVENT(xprtrdma_nomrs); +TRACE_EVENT(xprtrdma_mr_get, + TP_PROTO( + const struct rpcrdma_req *req + ), + + TP_ARGS(req), + + TP_STRUCT__entry( + __field(const void *, req) + __field(unsigned int, task_id) + __field(unsigned int, client_id) + __field(u32, xid) + ), + + TP_fast_assign( + const struct rpc_rqst *rqst = &req->rl_slot; + + __entry->req = req; + __entry->task_id = rqst->rq_task->tk_pid; + __entry->client_id = rqst->rq_task->tk_client->cl_clid; + __entry->xid = be32_to_cpu(rqst->rq_xid); + ), + + TP_printk("task:%u@%u xid=0x%08x req=%p", + __entry->task_id, __entry->client_id, __entry->xid, + __entry->req + ) +); + +TRACE_EVENT(xprtrdma_nomrs, + TP_PROTO( + const struct rpcrdma_req *req + ), + + TP_ARGS(req), + + TP_STRUCT__entry( + __field(const void *, req) + __field(unsigned int, task_id) + __field(unsigned int, client_id) + __field(u32, xid) + ), + + TP_fast_assign( + const struct rpc_rqst *rqst = &req->rl_slot; + + __entry->req = req; + __entry->task_id = rqst->rq_task->tk_pid; + __entry->client_id = rqst->rq_task->tk_client->cl_clid; + __entry->xid = be32_to_cpu(rqst->rq_xid); + ), + + TP_printk("task:%u@%u xid=0x%08x req=%p", + __entry->task_id, __entry->client_id, __entry->xid, + __entry->req + ) +); DEFINE_RDCH_EVENT(read); DEFINE_WRCH_EVENT(write); @@ -470,13 +547,12 @@ TRACE_DEFINE_ENUM(rpcrdma_replych); TRACE_EVENT(xprtrdma_marshal, TP_PROTO( - const struct rpc_rqst *rqst, - unsigned int hdrlen, + const struct rpcrdma_req *req, unsigned int rtype, unsigned int wtype ), - TP_ARGS(rqst, hdrlen, rtype, wtype), + TP_ARGS(req, rtype, wtype), TP_STRUCT__entry( __field(unsigned int, task_id) @@ -491,10 +567,12 @@ TRACE_EVENT(xprtrdma_marshal, ), TP_fast_assign( + const struct rpc_rqst *rqst = &req->rl_slot; + __entry->task_id = rqst->rq_task->tk_pid; __entry->client_id = rqst->rq_task->tk_client->cl_clid; __entry->xid = be32_to_cpu(rqst->rq_xid); - __entry->hdrlen = hdrlen; + __entry->hdrlen = req->rl_hdrbuf.len; __entry->headlen = rqst->rq_snd_buf.head[0].iov_len; __entry->pagelen = rqst->rq_snd_buf.page_len; __entry->taillen = rqst->rq_snd_buf.tail[0].iov_len; @@ -538,6 +616,33 @@ TRACE_EVENT(xprtrdma_marshal_failed, ) ); +TRACE_EVENT(xprtrdma_prepsend_failed, + TP_PROTO(const struct rpc_rqst *rqst, + int ret + ), + + TP_ARGS(rqst, ret), + + TP_STRUCT__entry( + __field(unsigned int, task_id) + __field(unsigned int, client_id) + __field(u32, xid) + __field(int, ret) + ), + + TP_fast_assign( + __entry->task_id = rqst->rq_task->tk_pid; + __entry->client_id = rqst->rq_task->tk_client->cl_clid; + __entry->xid = be32_to_cpu(rqst->rq_xid); + __entry->ret = ret; + ), + + TP_printk("task:%u@%u xid=0x%08x: ret=%d", + __entry->task_id, __entry->client_id, __entry->xid, + __entry->ret + ) +); + TRACE_EVENT(xprtrdma_post_send, TP_PROTO( const struct rpcrdma_req *req, @@ -559,7 +664,8 @@ TRACE_EVENT(xprtrdma_post_send, const struct rpc_rqst *rqst = &req->rl_slot; __entry->task_id = rqst->rq_task->tk_pid; - __entry->client_id = rqst->rq_task->tk_client->cl_clid; + __entry->client_id = rqst->rq_task->tk_client ? + rqst->rq_task->tk_client->cl_clid : -1; __entry->req = req; __entry->num_sge = req->rl_sendctx->sc_wr.num_sge; __entry->signaled = req->rl_sendctx->sc_wr.send_flags & @@ -578,21 +684,21 @@ TRACE_EVENT(xprtrdma_post_send, TRACE_EVENT(xprtrdma_post_recv, TP_PROTO( - const struct ib_cqe *cqe + const struct rpcrdma_rep *rep ), - TP_ARGS(cqe), + TP_ARGS(rep), TP_STRUCT__entry( - __field(const void *, cqe) + __field(const void *, rep) ), TP_fast_assign( - __entry->cqe = cqe; + __entry->rep = rep; ), - TP_printk("cqe=%p", - __entry->cqe + TP_printk("rep=%p", + __entry->rep ) ); @@ -670,14 +776,15 @@ TRACE_EVENT(xprtrdma_wc_receive, TP_ARGS(wc), TP_STRUCT__entry( - __field(const void *, cqe) + __field(const void *, rep) __field(u32, byte_len) __field(unsigned int, status) __field(u32, vendor_err) ), TP_fast_assign( - __entry->cqe = wc->wr_cqe; + __entry->rep = container_of(wc->wr_cqe, struct rpcrdma_rep, + rr_cqe); __entry->status = wc->status; if (wc->status) { __entry->byte_len = 0; @@ -688,8 +795,8 @@ TRACE_EVENT(xprtrdma_wc_receive, } ), - TP_printk("cqe=%p %u bytes: %s (%u/0x%x)", - __entry->cqe, __entry->byte_len, + TP_printk("rep=%p %u bytes: %s (%u/0x%x)", + __entry->rep, __entry->byte_len, rdma_show_wc_status(__entry->status), __entry->status, __entry->vendor_err ) @@ -698,6 +805,7 @@ TRACE_EVENT(xprtrdma_wc_receive, DEFINE_FRWR_DONE_EVENT(xprtrdma_wc_fastreg); DEFINE_FRWR_DONE_EVENT(xprtrdma_wc_li); DEFINE_FRWR_DONE_EVENT(xprtrdma_wc_li_wake); +DEFINE_FRWR_DONE_EVENT(xprtrdma_wc_li_done); TRACE_EVENT(xprtrdma_frwr_alloc, TP_PROTO( diff --git a/include/trace/events/rxrpc.h b/include/trace/events/rxrpc.h index d85816878a52..191fe447f990 100644 --- a/include/trace/events/rxrpc.h +++ b/include/trace/events/rxrpc.h @@ -23,20 +23,17 @@ #define __RXRPC_DECLARE_TRACE_ENUMS_ONCE_ONLY enum rxrpc_skb_trace { - rxrpc_skb_rx_cleaned, - rxrpc_skb_rx_freed, - rxrpc_skb_rx_got, - rxrpc_skb_rx_lost, - rxrpc_skb_rx_purged, - rxrpc_skb_rx_received, - rxrpc_skb_rx_rotated, - rxrpc_skb_rx_seen, - rxrpc_skb_tx_cleaned, - rxrpc_skb_tx_freed, - rxrpc_skb_tx_got, - rxrpc_skb_tx_new, - rxrpc_skb_tx_rotated, - rxrpc_skb_tx_seen, + rxrpc_skb_cleaned, + rxrpc_skb_freed, + rxrpc_skb_got, + rxrpc_skb_lost, + rxrpc_skb_new, + rxrpc_skb_purged, + rxrpc_skb_received, + rxrpc_skb_rotated, + rxrpc_skb_seen, + rxrpc_skb_unshared, + rxrpc_skb_unshared_nomem, }; enum rxrpc_local_trace { @@ -228,20 +225,17 @@ enum rxrpc_tx_point { * Declare tracing information enums and their string mappings for display. */ #define rxrpc_skb_traces \ - EM(rxrpc_skb_rx_cleaned, "Rx CLN") \ - EM(rxrpc_skb_rx_freed, "Rx FRE") \ - EM(rxrpc_skb_rx_got, "Rx GOT") \ - EM(rxrpc_skb_rx_lost, "Rx *L*") \ - EM(rxrpc_skb_rx_purged, "Rx PUR") \ - EM(rxrpc_skb_rx_received, "Rx RCV") \ - EM(rxrpc_skb_rx_rotated, "Rx ROT") \ - EM(rxrpc_skb_rx_seen, "Rx SEE") \ - EM(rxrpc_skb_tx_cleaned, "Tx CLN") \ - EM(rxrpc_skb_tx_freed, "Tx FRE") \ - EM(rxrpc_skb_tx_got, "Tx GOT") \ - EM(rxrpc_skb_tx_new, "Tx NEW") \ - EM(rxrpc_skb_tx_rotated, "Tx ROT") \ - E_(rxrpc_skb_tx_seen, "Tx SEE") + EM(rxrpc_skb_cleaned, "CLN") \ + EM(rxrpc_skb_freed, "FRE") \ + EM(rxrpc_skb_got, "GOT") \ + EM(rxrpc_skb_lost, "*L*") \ + EM(rxrpc_skb_new, "NEW") \ + EM(rxrpc_skb_purged, "PUR") \ + EM(rxrpc_skb_received, "RCV") \ + EM(rxrpc_skb_rotated, "ROT") \ + EM(rxrpc_skb_seen, "SEE") \ + EM(rxrpc_skb_unshared, "UNS") \ + E_(rxrpc_skb_unshared_nomem, "US0") #define rxrpc_local_traces \ EM(rxrpc_local_got, "GOT") \ @@ -498,10 +492,10 @@ rxrpc_tx_points; #define E_(a, b) { a, b } TRACE_EVENT(rxrpc_local, - TP_PROTO(struct rxrpc_local *local, enum rxrpc_local_trace op, + TP_PROTO(unsigned int local_debug_id, enum rxrpc_local_trace op, int usage, const void *where), - TP_ARGS(local, op, usage, where), + TP_ARGS(local_debug_id, op, usage, where), TP_STRUCT__entry( __field(unsigned int, local ) @@ -511,7 +505,7 @@ TRACE_EVENT(rxrpc_local, ), TP_fast_assign( - __entry->local = local->debug_id; + __entry->local = local_debug_id; __entry->op = op; __entry->usage = usage; __entry->where = where; @@ -525,10 +519,10 @@ TRACE_EVENT(rxrpc_local, ); TRACE_EVENT(rxrpc_peer, - TP_PROTO(struct rxrpc_peer *peer, enum rxrpc_peer_trace op, + TP_PROTO(unsigned int peer_debug_id, enum rxrpc_peer_trace op, int usage, const void *where), - TP_ARGS(peer, op, usage, where), + TP_ARGS(peer_debug_id, op, usage, where), TP_STRUCT__entry( __field(unsigned int, peer ) @@ -538,7 +532,7 @@ TRACE_EVENT(rxrpc_peer, ), TP_fast_assign( - __entry->peer = peer->debug_id; + __entry->peer = peer_debug_id; __entry->op = op; __entry->usage = usage; __entry->where = where; @@ -552,10 +546,10 @@ TRACE_EVENT(rxrpc_peer, ); TRACE_EVENT(rxrpc_conn, - TP_PROTO(struct rxrpc_connection *conn, enum rxrpc_conn_trace op, + TP_PROTO(unsigned int conn_debug_id, enum rxrpc_conn_trace op, int usage, const void *where), - TP_ARGS(conn, op, usage, where), + TP_ARGS(conn_debug_id, op, usage, where), TP_STRUCT__entry( __field(unsigned int, conn ) @@ -565,7 +559,7 @@ TRACE_EVENT(rxrpc_conn, ), TP_fast_assign( - __entry->conn = conn->debug_id; + __entry->conn = conn_debug_id; __entry->op = op; __entry->usage = usage; __entry->where = where; @@ -612,10 +606,10 @@ TRACE_EVENT(rxrpc_client, ); TRACE_EVENT(rxrpc_call, - TP_PROTO(struct rxrpc_call *call, enum rxrpc_call_trace op, + TP_PROTO(unsigned int call_debug_id, enum rxrpc_call_trace op, int usage, const void *where, const void *aux), - TP_ARGS(call, op, usage, where, aux), + TP_ARGS(call_debug_id, op, usage, where, aux), TP_STRUCT__entry( __field(unsigned int, call ) @@ -626,7 +620,7 @@ TRACE_EVENT(rxrpc_call, ), TP_fast_assign( - __entry->call = call->debug_id; + __entry->call = call_debug_id; __entry->op = op; __entry->usage = usage; __entry->where = where; @@ -643,13 +637,14 @@ TRACE_EVENT(rxrpc_call, TRACE_EVENT(rxrpc_skb, TP_PROTO(struct sk_buff *skb, enum rxrpc_skb_trace op, - int usage, int mod_count, const void *where), + int usage, int mod_count, u8 flags, const void *where), - TP_ARGS(skb, op, usage, mod_count, where), + TP_ARGS(skb, op, usage, mod_count, flags, where), TP_STRUCT__entry( __field(struct sk_buff *, skb ) __field(enum rxrpc_skb_trace, op ) + __field(u8, flags ) __field(int, usage ) __field(int, mod_count ) __field(const void *, where ) @@ -657,14 +652,16 @@ TRACE_EVENT(rxrpc_skb, TP_fast_assign( __entry->skb = skb; + __entry->flags = flags; __entry->op = op; __entry->usage = usage; __entry->mod_count = mod_count; __entry->where = where; ), - TP_printk("s=%p %s u=%d m=%d p=%pSR", + TP_printk("s=%p %cx %s u=%d m=%d p=%pSR", __entry->skb, + __entry->flags & RXRPC_SKB_TX_BUFFER ? 'T' : 'R', __print_symbolic(__entry->op, rxrpc_skb_traces), __entry->usage, __entry->mod_count, @@ -1071,7 +1068,7 @@ TRACE_EVENT(rxrpc_recvmsg, ), TP_fast_assign( - __entry->call = call->debug_id; + __entry->call = call ? call->debug_id : 0; __entry->why = why; __entry->seq = seq; __entry->offset = offset; @@ -1379,7 +1376,7 @@ TRACE_EVENT(rxrpc_rx_eproto, ), TP_fast_assign( - __entry->call = call->debug_id; + __entry->call = call ? call->debug_id : 0; __entry->serial = serial; __entry->why = why; ), diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h index c8c7c7efb487..420e80e56e55 100644 --- a/include/trace/events/sched.h +++ b/include/trace/events/sched.h @@ -594,6 +594,37 @@ TRACE_EVENT(sched_wake_idle_without_ipi, TP_printk("cpu=%d", __entry->cpu) ); + +/* + * Following tracepoints are not exported in tracefs and provide hooking + * mechanisms only for testing and debugging purposes. + * + * Postfixed with _tp to make them easily identifiable in the code. + */ +DECLARE_TRACE(pelt_cfs_tp, + TP_PROTO(struct cfs_rq *cfs_rq), + TP_ARGS(cfs_rq)); + +DECLARE_TRACE(pelt_rt_tp, + TP_PROTO(struct rq *rq), + TP_ARGS(rq)); + +DECLARE_TRACE(pelt_dl_tp, + TP_PROTO(struct rq *rq), + TP_ARGS(rq)); + +DECLARE_TRACE(pelt_irq_tp, + TP_PROTO(struct rq *rq), + TP_ARGS(rq)); + +DECLARE_TRACE(pelt_se_tp, + TP_PROTO(struct sched_entity *se), + TP_ARGS(se)); + +DECLARE_TRACE(sched_overutilized_tp, + TP_PROTO(struct root_domain *rd, bool overutilized), + TP_ARGS(rd, overutilized)); + #endif /* _TRACE_SCHED_H */ /* This part must be outside protection */ diff --git a/include/trace/events/sock.h b/include/trace/events/sock.h index a0c4b8a30966..51fe9f6719eb 100644 --- a/include/trace/events/sock.h +++ b/include/trace/events/sock.h @@ -82,7 +82,7 @@ TRACE_EVENT(sock_rcvqueue_full, TP_fast_assign( __entry->rmem_alloc = atomic_read(&sk->sk_rmem_alloc); __entry->truesize = skb->truesize; - __entry->sk_rcvbuf = sk->sk_rcvbuf; + __entry->sk_rcvbuf = READ_ONCE(sk->sk_rcvbuf); ), TP_printk("rmem_alloc=%d truesize=%u sk_rcvbuf=%d", @@ -115,7 +115,7 @@ TRACE_EVENT(sock_exceed_buf_limit, __entry->rmem_alloc = atomic_read(&sk->sk_rmem_alloc); __entry->sysctl_wmem = sk_get_wmem0(sk, prot); __entry->wmem_alloc = refcount_read(&sk->sk_wmem_alloc); - __entry->wmem_queued = sk->sk_wmem_queued; + __entry->wmem_queued = READ_ONCE(sk->sk_wmem_queued); __entry->kind = kind; ), diff --git a/include/trace/events/tegra_apb_dma.h b/include/trace/events/tegra_apb_dma.h index 0818f6286110..971cd02d2daf 100644 --- a/include/trace/events/tegra_apb_dma.h +++ b/include/trace/events/tegra_apb_dma.h @@ -1,5 +1,5 @@ #if !defined(_TRACE_TEGRA_APB_DMA_H) || defined(TRACE_HEADER_MULTI_READ) -#define _TRACE_TEGRA_APM_DMA_H +#define _TRACE_TEGRA_APB_DMA_H #include <linux/tracepoint.h> #include <linux/dmaengine.h> @@ -55,7 +55,7 @@ TRACE_EVENT(tegra_dma_isr, TP_printk("%s: irq %d\n", __get_str(chan), __entry->irq) ); -#endif /* _TRACE_TEGRADMA_H */ +#endif /* _TRACE_TEGRA_APB_DMA_H */ /* This part must be outside protection */ #include <trace/define_trace.h> diff --git a/include/trace/events/writeback.h b/include/trace/events/writeback.h index aa7f3aeac740..c2ce6480b4b1 100644 --- a/include/trace/events/writeback.h +++ b/include/trace/events/writeback.h @@ -66,8 +66,9 @@ DECLARE_EVENT_CLASS(writeback_page_template, ), TP_fast_assign( - strncpy(__entry->name, - mapping ? dev_name(inode_to_bdi(mapping->host)->dev) : "(unknown)", 32); + strscpy_pad(__entry->name, + mapping ? dev_name(inode_to_bdi(mapping->host)->dev) : "(unknown)", + 32); __entry->ino = mapping ? mapping->host->i_ino : 0; __entry->index = page->index; ), @@ -110,8 +111,8 @@ DECLARE_EVENT_CLASS(writeback_dirty_inode_template, struct backing_dev_info *bdi = inode_to_bdi(inode); /* may be called for files on pseudo FSes w/ unregistered bdi */ - strncpy(__entry->name, - bdi->dev ? dev_name(bdi->dev) : "(unknown)", 32); + strscpy_pad(__entry->name, + bdi->dev ? dev_name(bdi->dev) : "(unknown)", 32); __entry->ino = inode->i_ino; __entry->state = inode->i_state; __entry->flags = flags; @@ -176,6 +177,132 @@ static inline unsigned int __trace_wbc_assign_cgroup(struct writeback_control *w #endif /* CONFIG_CGROUP_WRITEBACK */ #endif /* CREATE_TRACE_POINTS */ +#ifdef CONFIG_CGROUP_WRITEBACK +TRACE_EVENT(inode_foreign_history, + + TP_PROTO(struct inode *inode, struct writeback_control *wbc, + unsigned int history), + + TP_ARGS(inode, wbc, history), + + TP_STRUCT__entry( + __array(char, name, 32) + __field(unsigned long, ino) + __field(unsigned int, cgroup_ino) + __field(unsigned int, history) + ), + + TP_fast_assign( + strncpy(__entry->name, dev_name(inode_to_bdi(inode)->dev), 32); + __entry->ino = inode->i_ino; + __entry->cgroup_ino = __trace_wbc_assign_cgroup(wbc); + __entry->history = history; + ), + + TP_printk("bdi %s: ino=%lu cgroup_ino=%u history=0x%x", + __entry->name, + __entry->ino, + __entry->cgroup_ino, + __entry->history + ) +); + +TRACE_EVENT(inode_switch_wbs, + + TP_PROTO(struct inode *inode, struct bdi_writeback *old_wb, + struct bdi_writeback *new_wb), + + TP_ARGS(inode, old_wb, new_wb), + + TP_STRUCT__entry( + __array(char, name, 32) + __field(unsigned long, ino) + __field(unsigned int, old_cgroup_ino) + __field(unsigned int, new_cgroup_ino) + ), + + TP_fast_assign( + strncpy(__entry->name, dev_name(old_wb->bdi->dev), 32); + __entry->ino = inode->i_ino; + __entry->old_cgroup_ino = __trace_wb_assign_cgroup(old_wb); + __entry->new_cgroup_ino = __trace_wb_assign_cgroup(new_wb); + ), + + TP_printk("bdi %s: ino=%lu old_cgroup_ino=%u new_cgroup_ino=%u", + __entry->name, + __entry->ino, + __entry->old_cgroup_ino, + __entry->new_cgroup_ino + ) +); + +TRACE_EVENT(track_foreign_dirty, + + TP_PROTO(struct page *page, struct bdi_writeback *wb), + + TP_ARGS(page, wb), + + TP_STRUCT__entry( + __array(char, name, 32) + __field(u64, bdi_id) + __field(unsigned long, ino) + __field(unsigned int, memcg_id) + __field(unsigned int, cgroup_ino) + __field(unsigned int, page_cgroup_ino) + ), + + TP_fast_assign( + struct address_space *mapping = page_mapping(page); + struct inode *inode = mapping ? mapping->host : NULL; + + strncpy(__entry->name, dev_name(wb->bdi->dev), 32); + __entry->bdi_id = wb->bdi->id; + __entry->ino = inode ? inode->i_ino : 0; + __entry->memcg_id = wb->memcg_css->id; + __entry->cgroup_ino = __trace_wb_assign_cgroup(wb); + __entry->page_cgroup_ino = page->mem_cgroup->css.cgroup->kn->id.ino; + ), + + TP_printk("bdi %s[%llu]: ino=%lu memcg_id=%u cgroup_ino=%u page_cgroup_ino=%u", + __entry->name, + __entry->bdi_id, + __entry->ino, + __entry->memcg_id, + __entry->cgroup_ino, + __entry->page_cgroup_ino + ) +); + +TRACE_EVENT(flush_foreign, + + TP_PROTO(struct bdi_writeback *wb, unsigned int frn_bdi_id, + unsigned int frn_memcg_id), + + TP_ARGS(wb, frn_bdi_id, frn_memcg_id), + + TP_STRUCT__entry( + __array(char, name, 32) + __field(unsigned int, cgroup_ino) + __field(unsigned int, frn_bdi_id) + __field(unsigned int, frn_memcg_id) + ), + + TP_fast_assign( + strncpy(__entry->name, dev_name(wb->bdi->dev), 32); + __entry->cgroup_ino = __trace_wb_assign_cgroup(wb); + __entry->frn_bdi_id = frn_bdi_id; + __entry->frn_memcg_id = frn_memcg_id; + ), + + TP_printk("bdi %s: cgroup_ino=%u frn_bdi_id=%u frn_memcg_id=%u", + __entry->name, + __entry->cgroup_ino, + __entry->frn_bdi_id, + __entry->frn_memcg_id + ) +); +#endif + DECLARE_EVENT_CLASS(writeback_write_inode_template, TP_PROTO(struct inode *inode, struct writeback_control *wbc), @@ -190,8 +317,8 @@ DECLARE_EVENT_CLASS(writeback_write_inode_template, ), TP_fast_assign( - strncpy(__entry->name, - dev_name(inode_to_bdi(inode)->dev), 32); + strscpy_pad(__entry->name, + dev_name(inode_to_bdi(inode)->dev), 32); __entry->ino = inode->i_ino; __entry->sync_mode = wbc->sync_mode; __entry->cgroup_ino = __trace_wbc_assign_cgroup(wbc); @@ -234,8 +361,9 @@ DECLARE_EVENT_CLASS(writeback_work_class, __field(unsigned int, cgroup_ino) ), TP_fast_assign( - strncpy(__entry->name, - wb->bdi->dev ? dev_name(wb->bdi->dev) : "(unknown)", 32); + strscpy_pad(__entry->name, + wb->bdi->dev ? dev_name(wb->bdi->dev) : + "(unknown)", 32); __entry->nr_pages = work->nr_pages; __entry->sb_dev = work->sb ? work->sb->s_dev : 0; __entry->sync_mode = work->sync_mode; @@ -288,7 +416,7 @@ DECLARE_EVENT_CLASS(writeback_class, __field(unsigned int, cgroup_ino) ), TP_fast_assign( - strncpy(__entry->name, dev_name(wb->bdi->dev), 32); + strscpy_pad(__entry->name, dev_name(wb->bdi->dev), 32); __entry->cgroup_ino = __trace_wb_assign_cgroup(wb); ), TP_printk("bdi %s: cgroup_ino=%u", @@ -310,7 +438,7 @@ TRACE_EVENT(writeback_bdi_register, __array(char, name, 32) ), TP_fast_assign( - strncpy(__entry->name, dev_name(bdi->dev), 32); + strscpy_pad(__entry->name, dev_name(bdi->dev), 32); ), TP_printk("bdi %s", __entry->name @@ -335,7 +463,7 @@ DECLARE_EVENT_CLASS(wbc_class, ), TP_fast_assign( - strncpy(__entry->name, dev_name(bdi->dev), 32); + strscpy_pad(__entry->name, dev_name(bdi->dev), 32); __entry->nr_to_write = wbc->nr_to_write; __entry->pages_skipped = wbc->pages_skipped; __entry->sync_mode = wbc->sync_mode; @@ -386,7 +514,7 @@ TRACE_EVENT(writeback_queue_io, ), TP_fast_assign( unsigned long *older_than_this = work->older_than_this; - strncpy(__entry->name, dev_name(wb->bdi->dev), 32); + strscpy_pad(__entry->name, dev_name(wb->bdi->dev), 32); __entry->older = older_than_this ? *older_than_this : 0; __entry->age = older_than_this ? (jiffies - *older_than_this) * 1000 / HZ : -1; @@ -472,7 +600,7 @@ TRACE_EVENT(bdi_dirty_ratelimit, ), TP_fast_assign( - strlcpy(__entry->bdi, dev_name(wb->bdi->dev), 32); + strscpy_pad(__entry->bdi, dev_name(wb->bdi->dev), 32); __entry->write_bw = KBps(wb->write_bandwidth); __entry->avg_write_bw = KBps(wb->avg_write_bandwidth); __entry->dirty_rate = KBps(dirty_rate); @@ -537,7 +665,7 @@ TRACE_EVENT(balance_dirty_pages, TP_fast_assign( unsigned long freerun = (thresh + bg_thresh) / 2; - strlcpy(__entry->bdi, dev_name(wb->bdi->dev), 32); + strscpy_pad(__entry->bdi, dev_name(wb->bdi->dev), 32); __entry->limit = global_wb_domain.dirty_limit; __entry->setpoint = (global_wb_domain.dirty_limit + @@ -597,8 +725,8 @@ TRACE_EVENT(writeback_sb_inodes_requeue, ), TP_fast_assign( - strncpy(__entry->name, - dev_name(inode_to_bdi(inode)->dev), 32); + strscpy_pad(__entry->name, + dev_name(inode_to_bdi(inode)->dev), 32); __entry->ino = inode->i_ino; __entry->state = inode->i_state; __entry->dirtied_when = inode->dirtied_when; @@ -671,8 +799,8 @@ DECLARE_EVENT_CLASS(writeback_single_inode_template, ), TP_fast_assign( - strncpy(__entry->name, - dev_name(inode_to_bdi(inode)->dev), 32); + strscpy_pad(__entry->name, + dev_name(inode_to_bdi(inode)->dev), 32); __entry->ino = inode->i_ino; __entry->state = inode->i_state; __entry->dirtied_when = inode->dirtied_when; diff --git a/include/trace/events/xdp.h b/include/trace/events/xdp.h index e95cb86b65cf..8c8420230a10 100644 --- a/include/trace/events/xdp.h +++ b/include/trace/events/xdp.h @@ -50,6 +50,35 @@ TRACE_EVENT(xdp_exception, __entry->ifindex) ); +TRACE_EVENT(xdp_bulk_tx, + + TP_PROTO(const struct net_device *dev, + int sent, int drops, int err), + + TP_ARGS(dev, sent, drops, err), + + TP_STRUCT__entry( + __field(int, ifindex) + __field(u32, act) + __field(int, drops) + __field(int, sent) + __field(int, err) + ), + + TP_fast_assign( + __entry->ifindex = dev->ifindex; + __entry->act = XDP_TX; + __entry->drops = drops; + __entry->sent = sent; + __entry->err = err; + ), + + TP_printk("ifindex=%d action=%s sent=%d drops=%d err=%d", + __entry->ifindex, + __print_symbolic(__entry->act, __XDP_ACT_SYM_TAB), + __entry->sent, __entry->drops, __entry->err) +); + DECLARE_EVENT_CLASS(xdp_redirect_template, TP_PROTO(const struct net_device *dev, @@ -146,9 +175,9 @@ struct _bpf_dtab_netdev { #endif /* __DEVMAP_OBJ_TYPE */ #define devmap_ifindex(fwd, map) \ - (!fwd ? 0 : \ - ((map->map_type == BPF_MAP_TYPE_DEVMAP) ? \ - ((struct _bpf_dtab_netdev *)fwd)->dev->ifindex : 0)) + ((map->map_type == BPF_MAP_TYPE_DEVMAP || \ + map->map_type == BPF_MAP_TYPE_DEVMAP_HASH) ? \ + ((struct _bpf_dtab_netdev *)fwd)->dev->ifindex : 0) #define _trace_xdp_redirect_map(dev, xdp, fwd, map, idx) \ trace_xdp_redirect_map(dev, xdp, devmap_ifindex(fwd, map), \ @@ -269,6 +298,121 @@ TRACE_EVENT(xdp_devmap_xmit, __entry->from_ifindex, __entry->to_ifindex, __entry->err) ); +/* Expect users already include <net/xdp.h>, but not xdp_priv.h */ +#include <net/xdp_priv.h> + +#define __MEM_TYPE_MAP(FN) \ + FN(PAGE_SHARED) \ + FN(PAGE_ORDER0) \ + FN(PAGE_POOL) \ + FN(ZERO_COPY) + +#define __MEM_TYPE_TP_FN(x) \ + TRACE_DEFINE_ENUM(MEM_TYPE_##x); +#define __MEM_TYPE_SYM_FN(x) \ + { MEM_TYPE_##x, #x }, +#define __MEM_TYPE_SYM_TAB \ + __MEM_TYPE_MAP(__MEM_TYPE_SYM_FN) { -1, 0 } +__MEM_TYPE_MAP(__MEM_TYPE_TP_FN) + +TRACE_EVENT(mem_disconnect, + + TP_PROTO(const struct xdp_mem_allocator *xa, + bool safe_to_remove, bool force), + + TP_ARGS(xa, safe_to_remove, force), + + TP_STRUCT__entry( + __field(const struct xdp_mem_allocator *, xa) + __field(u32, mem_id) + __field(u32, mem_type) + __field(const void *, allocator) + __field(bool, safe_to_remove) + __field(bool, force) + __field(int, disconnect_cnt) + ), + + TP_fast_assign( + __entry->xa = xa; + __entry->mem_id = xa->mem.id; + __entry->mem_type = xa->mem.type; + __entry->allocator = xa->allocator; + __entry->safe_to_remove = safe_to_remove; + __entry->force = force; + __entry->disconnect_cnt = xa->disconnect_cnt; + ), + + TP_printk("mem_id=%d mem_type=%s allocator=%p" + " safe_to_remove=%s force=%s disconnect_cnt=%d", + __entry->mem_id, + __print_symbolic(__entry->mem_type, __MEM_TYPE_SYM_TAB), + __entry->allocator, + __entry->safe_to_remove ? "true" : "false", + __entry->force ? "true" : "false", + __entry->disconnect_cnt + ) +); + +TRACE_EVENT(mem_connect, + + TP_PROTO(const struct xdp_mem_allocator *xa, + const struct xdp_rxq_info *rxq), + + TP_ARGS(xa, rxq), + + TP_STRUCT__entry( + __field(const struct xdp_mem_allocator *, xa) + __field(u32, mem_id) + __field(u32, mem_type) + __field(const void *, allocator) + __field(const struct xdp_rxq_info *, rxq) + __field(int, ifindex) + ), + + TP_fast_assign( + __entry->xa = xa; + __entry->mem_id = xa->mem.id; + __entry->mem_type = xa->mem.type; + __entry->allocator = xa->allocator; + __entry->rxq = rxq; + __entry->ifindex = rxq->dev->ifindex; + ), + + TP_printk("mem_id=%d mem_type=%s allocator=%p" + " ifindex=%d", + __entry->mem_id, + __print_symbolic(__entry->mem_type, __MEM_TYPE_SYM_TAB), + __entry->allocator, + __entry->ifindex + ) +); + +TRACE_EVENT(mem_return_failed, + + TP_PROTO(const struct xdp_mem_info *mem, + const struct page *page), + + TP_ARGS(mem, page), + + TP_STRUCT__entry( + __field(const struct page *, page) + __field(u32, mem_id) + __field(u32, mem_type) + ), + + TP_fast_assign( + __entry->page = page; + __entry->mem_id = mem->id; + __entry->mem_type = mem->type; + ), + + TP_printk("mem_id=%d mem_type=%s page=%p", + __entry->mem_id, + __print_symbolic(__entry->mem_type, __MEM_TYPE_SYM_TAB), + __entry->page + ) +); + #endif /* _TRACE_XDP_H */ #include <trace/define_trace.h> diff --git a/include/uapi/linux/Kbuild b/include/uapi/Kbuild index 34711c5d6968..61ee6e59c930 100644 --- a/include/uapi/linux/Kbuild +++ b/include/uapi/Kbuild @@ -1,14 +1,14 @@ # SPDX-License-Identifier: GPL-2.0 ifeq ($(wildcard $(srctree)/arch/$(SRCARCH)/include/uapi/asm/a.out.h),) -no-export-headers += a.out.h +no-export-headers += linux/a.out.h endif ifeq ($(wildcard $(srctree)/arch/$(SRCARCH)/include/uapi/asm/kvm.h),) -no-export-headers += kvm.h +no-export-headers += linux/kvm.h endif ifeq ($(wildcard $(srctree)/arch/$(SRCARCH)/include/uapi/asm/kvm_para.h),) ifeq ($(wildcard $(objtree)/arch/$(SRCARCH)/include/generated/uapi/asm/kvm_para.h),) -no-export-headers += kvm_para.h +no-export-headers += linux/kvm_para.h endif endif diff --git a/include/uapi/asm-generic/mman-common.h b/include/uapi/asm-generic/mman-common.h index abd238d0f7a4..c160a5354eb6 100644 --- a/include/uapi/asm-generic/mman-common.h +++ b/include/uapi/asm-generic/mman-common.h @@ -19,15 +19,18 @@ #define MAP_TYPE 0x0f /* Mask for type of mapping */ #define MAP_FIXED 0x10 /* Interpret addr exactly */ #define MAP_ANONYMOUS 0x20 /* don't use a file */ -#ifdef CONFIG_MMAP_ALLOW_UNINITIALIZED -# define MAP_UNINITIALIZED 0x4000000 /* For anonymous mmap, memory could be uninitialized */ -#else -# define MAP_UNINITIALIZED 0x0 /* Don't support this flag */ -#endif -/* 0x0100 - 0x80000 flags are defined in asm-generic/mman.h */ +/* 0x0100 - 0x4000 flags are defined in asm-generic/mman.h */ +#define MAP_POPULATE 0x008000 /* populate (prefault) pagetables */ +#define MAP_NONBLOCK 0x010000 /* do not block on IO */ +#define MAP_STACK 0x020000 /* give out an address that is best suited for process/thread stacks */ +#define MAP_HUGETLB 0x040000 /* create a huge page mapping */ +#define MAP_SYNC 0x080000 /* perform synchronous page faults for the mapping */ #define MAP_FIXED_NOREPLACE 0x100000 /* MAP_FIXED which doesn't unmap underlying mapping */ +#define MAP_UNINITIALIZED 0x4000000 /* For anonymous mmap, memory could be + * uninitialized */ + /* * Flags for mlock */ @@ -64,6 +67,9 @@ #define MADV_WIPEONFORK 18 /* Zero memory on fork, child only */ #define MADV_KEEPONFORK 19 /* Undo MADV_WIPEONFORK */ +#define MADV_COLD 20 /* deactivate these pages */ +#define MADV_PAGEOUT 21 /* reclaim these pages */ + /* compatibility flags */ #define MAP_FILE 0 diff --git a/include/uapi/asm-generic/mman.h b/include/uapi/asm-generic/mman.h index 653687d9771b..57e8195d0b53 100644 --- a/include/uapi/asm-generic/mman.h +++ b/include/uapi/asm-generic/mman.h @@ -9,13 +9,11 @@ #define MAP_EXECUTABLE 0x1000 /* mark it as an executable */ #define MAP_LOCKED 0x2000 /* pages are locked */ #define MAP_NORESERVE 0x4000 /* don't check for reservations */ -#define MAP_POPULATE 0x8000 /* populate (prefault) pagetables */ -#define MAP_NONBLOCK 0x10000 /* do not block on IO */ -#define MAP_STACK 0x20000 /* give out an address that is best suited for process/thread stacks */ -#define MAP_HUGETLB 0x40000 /* create a huge page mapping */ -#define MAP_SYNC 0x80000 /* perform synchronous page faults for the mapping */ -/* Bits [26:31] are reserved, see mman-common.h for MAP_HUGETLB usage */ +/* + * Bits [26:31] are reserved, see asm-generic/hugetlb_encode.h + * for MAP_HUGETLB usage + */ #define MCL_CURRENT 1 /* lock all current mappings */ #define MCL_FUTURE 2 /* lock all future mappings */ diff --git a/include/uapi/asm-generic/socket.h b/include/uapi/asm-generic/socket.h index 8c1391c89171..77f7c1638eb1 100644 --- a/include/uapi/asm-generic/socket.h +++ b/include/uapi/asm-generic/socket.h @@ -117,6 +117,8 @@ #define SO_RCVTIMEO_NEW 66 #define SO_SNDTIMEO_NEW 67 +#define SO_DETACH_REUSEPORT_BPF 68 + #if !defined(__KERNEL__) #if __BITS_PER_LONG == 64 || (defined(__x86_64__) && defined(__ILP32__)) diff --git a/include/uapi/asm-generic/unistd.h b/include/uapi/asm-generic/unistd.h index a87904daf103..1fc8faa6e973 100644 --- a/include/uapi/asm-generic/unistd.h +++ b/include/uapi/asm-generic/unistd.h @@ -569,7 +569,7 @@ __SYSCALL(__NR_semget, sys_semget) __SC_COMP(__NR_semctl, sys_semctl, compat_sys_semctl) #if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32 #define __NR_semtimedop 192 -__SC_COMP(__NR_semtimedop, sys_semtimedop, sys_semtimedop_time32) +__SC_3264(__NR_semtimedop, sys_semtimedop_time32, sys_semtimedop) #endif #define __NR_semop 193 __SYSCALL(__NR_semop, sys_semop) @@ -844,9 +844,15 @@ __SYSCALL(__NR_fsconfig, sys_fsconfig) __SYSCALL(__NR_fsmount, sys_fsmount) #define __NR_fspick 433 __SYSCALL(__NR_fspick, sys_fspick) +#define __NR_pidfd_open 434 +__SYSCALL(__NR_pidfd_open, sys_pidfd_open) +#ifdef __ARCH_WANT_SYS_CLONE3 +#define __NR_clone3 435 +__SYSCALL(__NR_clone3, sys_clone3) +#endif #undef __NR_syscalls -#define __NR_syscalls 434 +#define __NR_syscalls 436 /* * 32 bit systems traditionally used different diff --git a/include/uapi/drm/amdgpu_drm.h b/include/uapi/drm/amdgpu_drm.h index 4788730dbe78..4fe35d600ab8 100644 --- a/include/uapi/drm/amdgpu_drm.h +++ b/include/uapi/drm/amdgpu_drm.h @@ -128,6 +128,10 @@ extern "C" { * for the second page onward should be set to NC. */ #define AMDGPU_GEM_CREATE_MQD_GFX9 (1 << 8) +/* Flag that BO may contain sensitive data that must be wiped before + * releasing the memory + */ +#define AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE (1 << 9) struct drm_amdgpu_gem_create_in { /** the requested memory size */ @@ -219,7 +223,10 @@ union drm_amdgpu_bo_list { #define AMDGPU_CTX_PRIORITY_VERY_LOW -1023 #define AMDGPU_CTX_PRIORITY_LOW -512 #define AMDGPU_CTX_PRIORITY_NORMAL 0 -/* Selecting a priority above NORMAL requires CAP_SYS_NICE or DRM_MASTER */ +/* + * When used in struct drm_amdgpu_ctx_in, a priority above NORMAL requires + * CAP_SYS_NICE or DRM_MASTER +*/ #define AMDGPU_CTX_PRIORITY_HIGH 512 #define AMDGPU_CTX_PRIORITY_VERY_HIGH 1023 @@ -229,6 +236,7 @@ struct drm_amdgpu_ctx_in { /** For future use, no flags defined so far */ __u32 flags; __u32 ctx_id; + /** AMDGPU_CTX_PRIORITY_* */ __s32 priority; }; @@ -281,6 +289,7 @@ struct drm_amdgpu_sched_in { /* AMDGPU_SCHED_OP_* */ __u32 op; __u32 fd; + /** AMDGPU_CTX_PRIORITY_* */ __s32 priority; __u32 ctx_id; }; @@ -912,6 +921,7 @@ struct drm_amdgpu_info_firmware { #define AMDGPU_VRAM_TYPE_HBM 6 #define AMDGPU_VRAM_TYPE_DDR3 7 #define AMDGPU_VRAM_TYPE_DDR4 8 +#define AMDGPU_VRAM_TYPE_GDDR6 9 struct drm_amdgpu_info_device { /** PCI Device ID */ @@ -991,6 +1001,10 @@ struct drm_amdgpu_info_device { __u64 high_va_offset; /** The maximum high virtual address */ __u64 high_va_max; + /* gfx10 pa_sc_tile_steering_override */ + __u32 pa_sc_tile_steering_override; + /* disabled TCCs */ + __u64 tcc_disabled_mask; }; struct drm_amdgpu_info_hw_ip { @@ -1044,6 +1058,7 @@ struct drm_amdgpu_info_vce_clock_table { #define AMDGPU_FAMILY_CZ 135 /* Carrizo, Stoney */ #define AMDGPU_FAMILY_AI 141 /* Vega10 */ #define AMDGPU_FAMILY_RV 142 /* Raven */ +#define AMDGPU_FAMILY_NV 143 /* Navi10 */ #if defined(__cplusplus) } diff --git a/include/uapi/drm/drm.h b/include/uapi/drm/drm.h index 661d73f9a919..8a5b2f8f8eb9 100644 --- a/include/uapi/drm/drm.h +++ b/include/uapi/drm/drm.h @@ -50,6 +50,7 @@ typedef unsigned int drm_handle_t; #else /* One of the BSDs */ +#include <stdint.h> #include <sys/ioccom.h> #include <sys/types.h> typedef int8_t __s8; diff --git a/include/uapi/drm/drm_mode.h b/include/uapi/drm/drm_mode.h index 83cd1636b9be..735c8cfdaaa1 100644 --- a/include/uapi/drm/drm_mode.h +++ b/include/uapi/drm/drm_mode.h @@ -33,6 +33,15 @@ extern "C" { #endif +/** + * DOC: overview + * + * DRM exposes many UAPI and structure definition to have a consistent + * and standardized interface with user. + * Userspace can refer to these structure definitions and UAPI formats + * to communicate to driver + */ + #define DRM_CONNECTOR_NAME_LEN 32 #define DRM_DISPLAY_MODE_LEN 32 #define DRM_PROP_NAME_LEN 32 @@ -352,6 +361,7 @@ enum drm_mode_subconnector { #define DRM_MODE_CONNECTOR_DSI 16 #define DRM_MODE_CONNECTOR_DPI 17 #define DRM_MODE_CONNECTOR_WRITEBACK 18 +#define DRM_MODE_CONNECTOR_SPI 19 struct drm_mode_get_connector { @@ -630,6 +640,92 @@ struct drm_color_lut { __u16 reserved; }; +/** + * struct hdr_metadata_infoframe - HDR Metadata Infoframe Data. + * + * HDR Metadata Infoframe as per CTA 861.G spec. This is expected + * to match exactly with the spec. + * + * Userspace is expected to pass the metadata information as per + * the format described in this structure. + */ +struct hdr_metadata_infoframe { + /** + * @eotf: Electro-Optical Transfer Function (EOTF) + * used in the stream. + */ + __u8 eotf; + /** + * @metadata_type: Static_Metadata_Descriptor_ID. + */ + __u8 metadata_type; + /** + * @display_primaries: Color Primaries of the Data. + * These are coded as unsigned 16-bit values in units of + * 0.00002, where 0x0000 represents zero and 0xC350 + * represents 1.0000. + * @display_primaries.x: X cordinate of color primary. + * @display_primaries.y: Y cordinate of color primary. + */ + struct { + __u16 x, y; + } display_primaries[3]; + /** + * @white_point: White Point of Colorspace Data. + * These are coded as unsigned 16-bit values in units of + * 0.00002, where 0x0000 represents zero and 0xC350 + * represents 1.0000. + * @white_point.x: X cordinate of whitepoint of color primary. + * @white_point.y: Y cordinate of whitepoint of color primary. + */ + struct { + __u16 x, y; + } white_point; + /** + * @max_display_mastering_luminance: Max Mastering Display Luminance. + * This value is coded as an unsigned 16-bit value in units of 1 cd/m2, + * where 0x0001 represents 1 cd/m2 and 0xFFFF represents 65535 cd/m2. + */ + __u16 max_display_mastering_luminance; + /** + * @min_display_mastering_luminance: Min Mastering Display Luminance. + * This value is coded as an unsigned 16-bit value in units of + * 0.0001 cd/m2, where 0x0001 represents 0.0001 cd/m2 and 0xFFFF + * represents 6.5535 cd/m2. + */ + __u16 min_display_mastering_luminance; + /** + * @max_cll: Max Content Light Level. + * This value is coded as an unsigned 16-bit value in units of 1 cd/m2, + * where 0x0001 represents 1 cd/m2 and 0xFFFF represents 65535 cd/m2. + */ + __u16 max_cll; + /** + * @max_fall: Max Frame Average Light Level. + * This value is coded as an unsigned 16-bit value in units of 1 cd/m2, + * where 0x0001 represents 1 cd/m2 and 0xFFFF represents 65535 cd/m2. + */ + __u16 max_fall; +}; + +/** + * struct hdr_output_metadata - HDR output metadata + * + * Metadata Information to be passed from userspace + */ +struct hdr_output_metadata { + /** + * @metadata_type: Static_Metadata_Descriptor_ID. + */ + __u32 metadata_type; + /** + * @hdmi_metadata_type1: HDR Metadata Infoframe. + */ + union { + struct hdr_metadata_infoframe hdmi_metadata_type1; + }; +}; + #define DRM_MODE_PAGE_FLIP_EVENT 0x01 #define DRM_MODE_PAGE_FLIP_ASYNC 0x02 #define DRM_MODE_PAGE_FLIP_TARGET_ABSOLUTE 0x4 @@ -803,6 +899,10 @@ struct drm_format_modifier { }; /** + * struct drm_mode_create_blob - Create New block property + * @data: Pointer to data to copy. + * @length: Length of data to copy. + * @blob_id: new property ID. * Create a new 'blob' data property, copying length bytes from data pointer, * and returning new blob ID. */ @@ -816,6 +916,8 @@ struct drm_mode_create_blob { }; /** + * struct drm_mode_destroy_blob - Destroy user blob + * @blob_id: blob_id to destroy * Destroy a user-created blob property. */ struct drm_mode_destroy_blob { @@ -823,6 +925,12 @@ struct drm_mode_destroy_blob { }; /** + * struct drm_mode_create_lease - Create lease + * @object_ids: Pointer to array of object ids. + * @object_count: Number of object ids. + * @flags: flags for new FD. + * @lessee_id: unique identifier for lessee. + * @fd: file descriptor to new drm_master file. * Lease mode resources, creating another drm_master. */ struct drm_mode_create_lease { @@ -840,6 +948,10 @@ struct drm_mode_create_lease { }; /** + * struct drm_mode_list_lessees - List lessees + * @count_lessees: Number of lessees. + * @pad: pad. + * @lessees_ptr: Pointer to lessess. * List lesses from a drm_master */ struct drm_mode_list_lessees { @@ -860,6 +972,10 @@ struct drm_mode_list_lessees { }; /** + * struct drm_mode_get_lease - Get Lease + * @count_objects: Number of leased objects. + * @pad: pad. + * @objects_ptr: Pointer to objects. * Get leased objects */ struct drm_mode_get_lease { @@ -880,6 +996,8 @@ struct drm_mode_get_lease { }; /** + * struct drm_mode_revoke_lease - Revoke lease + * @lessee_id: Unique ID of lessee. * Revoke lease */ struct drm_mode_revoke_lease { diff --git a/include/uapi/drm/etnaviv_drm.h b/include/uapi/drm/etnaviv_drm.h index 0d5c49dc478c..09d0df8b71c5 100644 --- a/include/uapi/drm/etnaviv_drm.h +++ b/include/uapi/drm/etnaviv_drm.h @@ -73,6 +73,7 @@ struct drm_etnaviv_timespec { #define ETNAVIV_PARAM_GPU_INSTRUCTION_COUNT 0x18 #define ETNAVIV_PARAM_GPU_NUM_CONSTANTS 0x19 #define ETNAVIV_PARAM_GPU_NUM_VARYINGS 0x1a +#define ETNAVIV_PARAM_SOFTPIN_START_ADDR 0x1b #define ETNA_MAX_PIPES 4 @@ -148,6 +149,11 @@ struct drm_etnaviv_gem_submit_reloc { * then patching the cmdstream for this entry is skipped. This can * avoid kernel needing to map/access the cmdstream bo in the common * case. + * If the submit is a softpin submit (ETNA_SUBMIT_SOFTPIN) the 'presumed' + * field is interpreted as the fixed location to map the bo into the gpu + * virtual address space. If the kernel is unable to map the buffer at + * this location the submit will fail. This means userspace is responsible + * for the whole gpu virtual address management. */ #define ETNA_SUBMIT_BO_READ 0x0001 #define ETNA_SUBMIT_BO_WRITE 0x0002 @@ -177,9 +183,11 @@ struct drm_etnaviv_gem_submit_pmr { #define ETNA_SUBMIT_NO_IMPLICIT 0x0001 #define ETNA_SUBMIT_FENCE_FD_IN 0x0002 #define ETNA_SUBMIT_FENCE_FD_OUT 0x0004 +#define ETNA_SUBMIT_SOFTPIN 0x0008 #define ETNA_SUBMIT_FLAGS (ETNA_SUBMIT_NO_IMPLICIT | \ ETNA_SUBMIT_FENCE_FD_IN | \ - ETNA_SUBMIT_FENCE_FD_OUT) + ETNA_SUBMIT_FENCE_FD_OUT| \ + ETNA_SUBMIT_SOFTPIN) #define ETNA_PIPE_3D 0x00 #define ETNA_PIPE_2D 0x01 #define ETNA_PIPE_VG 0x02 diff --git a/include/uapi/drm/i915_drm.h b/include/uapi/drm/i915_drm.h index 3a73f5316766..469dc512cca3 100644 --- a/include/uapi/drm/i915_drm.h +++ b/include/uapi/drm/i915_drm.h @@ -136,6 +136,8 @@ enum drm_i915_gem_engine_class { struct i915_engine_class_instance { __u16 engine_class; /* see enum drm_i915_gem_engine_class */ __u16 engine_instance; +#define I915_ENGINE_CLASS_INVALID_NONE -1 +#define I915_ENGINE_CLASS_INVALID_VIRTUAL -2 }; /** @@ -355,6 +357,8 @@ typedef struct _drm_i915_sarea { #define DRM_I915_PERF_ADD_CONFIG 0x37 #define DRM_I915_PERF_REMOVE_CONFIG 0x38 #define DRM_I915_QUERY 0x39 +#define DRM_I915_GEM_VM_CREATE 0x3a +#define DRM_I915_GEM_VM_DESTROY 0x3b /* Must be kept compact -- no holes */ #define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t) @@ -415,6 +419,8 @@ typedef struct _drm_i915_sarea { #define DRM_IOCTL_I915_PERF_ADD_CONFIG DRM_IOW(DRM_COMMAND_BASE + DRM_I915_PERF_ADD_CONFIG, struct drm_i915_perf_oa_config) #define DRM_IOCTL_I915_PERF_REMOVE_CONFIG DRM_IOW(DRM_COMMAND_BASE + DRM_I915_PERF_REMOVE_CONFIG, __u64) #define DRM_IOCTL_I915_QUERY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_QUERY, struct drm_i915_query) +#define DRM_IOCTL_I915_GEM_VM_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_VM_CREATE, struct drm_i915_gem_vm_control) +#define DRM_IOCTL_I915_GEM_VM_DESTROY DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_VM_DESTROY, struct drm_i915_gem_vm_control) /* Allow drivers to submit batchbuffers directly to hardware, relying * on the security mechanisms provided by hardware. @@ -515,6 +521,7 @@ typedef struct drm_i915_irq_wait { #define I915_SCHEDULER_CAP_PRIORITY (1ul << 1) #define I915_SCHEDULER_CAP_PREEMPTION (1ul << 2) #define I915_SCHEDULER_CAP_SEMAPHORES (1ul << 3) +#define I915_SCHEDULER_CAP_ENGINE_BUSY_STATS (1ul << 4) #define I915_PARAM_HUC_STATUS 42 @@ -598,6 +605,12 @@ typedef struct drm_i915_irq_wait { */ #define I915_PARAM_MMAP_GTT_COHERENT 52 +/* + * Query whether DRM_I915_GEM_EXECBUFFER2 supports coordination of parallel + * execution through use of explicit fence support. + * See I915_EXEC_FENCE_OUT and I915_EXEC_FENCE_SUBMIT. + */ +#define I915_PARAM_HAS_EXEC_SUBMIT_FENCE 53 /* Must be kept compact -- no holes and well documented */ typedef struct drm_i915_getparam { @@ -1120,7 +1133,16 @@ struct drm_i915_gem_execbuffer2 { */ #define I915_EXEC_FENCE_ARRAY (1<<19) -#define __I915_EXEC_UNKNOWN_FLAGS (-(I915_EXEC_FENCE_ARRAY<<1)) +/* + * Setting I915_EXEC_FENCE_SUBMIT implies that lower_32_bits(rsvd2) represent + * a sync_file fd to wait upon (in a nonblocking manner) prior to executing + * the batch. + * + * Returns -EINVAL if the sync_file fd cannot be found. + */ +#define I915_EXEC_FENCE_SUBMIT (1 << 20) + +#define __I915_EXEC_UNKNOWN_FLAGS (-(I915_EXEC_FENCE_SUBMIT << 1)) #define I915_EXEC_CONTEXT_ID_MASK (0xffffffff) #define i915_execbuffer2_set_context_id(eb2, context) \ @@ -1464,8 +1486,9 @@ struct drm_i915_gem_context_create_ext { __u32 ctx_id; /* output: id of new context*/ __u32 flags; #define I915_CONTEXT_CREATE_FLAGS_USE_EXTENSIONS (1u << 0) +#define I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE (1u << 1) #define I915_CONTEXT_CREATE_FLAGS_UNKNOWN \ - (-(I915_CONTEXT_CREATE_FLAGS_USE_EXTENSIONS << 1)) + (-(I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE << 1)) __u64 extensions; }; @@ -1507,6 +1530,41 @@ struct drm_i915_gem_context_param { * On creation, all new contexts are marked as recoverable. */ #define I915_CONTEXT_PARAM_RECOVERABLE 0x8 + + /* + * The id of the associated virtual memory address space (ppGTT) of + * this context. Can be retrieved and passed to another context + * (on the same fd) for both to use the same ppGTT and so share + * address layouts, and avoid reloading the page tables on context + * switches between themselves. + * + * See DRM_I915_GEM_VM_CREATE and DRM_I915_GEM_VM_DESTROY. + */ +#define I915_CONTEXT_PARAM_VM 0x9 + +/* + * I915_CONTEXT_PARAM_ENGINES: + * + * Bind this context to operate on this subset of available engines. Henceforth, + * the I915_EXEC_RING selector for DRM_IOCTL_I915_GEM_EXECBUFFER2 operates as + * an index into this array of engines; I915_EXEC_DEFAULT selecting engine[0] + * and upwards. Slots 0...N are filled in using the specified (class, instance). + * Use + * engine_class: I915_ENGINE_CLASS_INVALID, + * engine_instance: I915_ENGINE_CLASS_INVALID_NONE + * to specify a gap in the array that can be filled in later, e.g. by a + * virtual engine used for load balancing. + * + * Setting the number of engines bound to the context to 0, by passing a zero + * sized argument, will revert back to default settings. + * + * See struct i915_context_param_engines. + * + * Extensions: + * i915_context_engines_load_balance (I915_CONTEXT_ENGINES_EXT_LOAD_BALANCE) + * i915_context_engines_bond (I915_CONTEXT_ENGINES_EXT_BOND) + */ +#define I915_CONTEXT_PARAM_ENGINES 0xa /* Must be kept compact -- no holes and well documented */ __u64 value; @@ -1540,9 +1598,10 @@ struct drm_i915_gem_context_param_sseu { struct i915_engine_class_instance engine; /* - * Unused for now. Must be cleared to zero. + * Unknown flags must be cleared to zero. */ __u32 flags; +#define I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX (1u << 0) /* * Mask of slices to enable for the context. Valid values are a subset @@ -1570,12 +1629,115 @@ struct drm_i915_gem_context_param_sseu { __u32 rsvd; }; +/* + * i915_context_engines_load_balance: + * + * Enable load balancing across this set of engines. + * + * Into the I915_EXEC_DEFAULT slot [0], a virtual engine is created that when + * used will proxy the execbuffer request onto one of the set of engines + * in such a way as to distribute the load evenly across the set. + * + * The set of engines must be compatible (e.g. the same HW class) as they + * will share the same logical GPU context and ring. + * + * To intermix rendering with the virtual engine and direct rendering onto + * the backing engines (bypassing the load balancing proxy), the context must + * be defined to use a single timeline for all engines. + */ +struct i915_context_engines_load_balance { + struct i915_user_extension base; + + __u16 engine_index; + __u16 num_siblings; + __u32 flags; /* all undefined flags must be zero */ + + __u64 mbz64; /* reserved for future use; must be zero */ + + struct i915_engine_class_instance engines[0]; +} __attribute__((packed)); + +#define I915_DEFINE_CONTEXT_ENGINES_LOAD_BALANCE(name__, N__) struct { \ + struct i915_user_extension base; \ + __u16 engine_index; \ + __u16 num_siblings; \ + __u32 flags; \ + __u64 mbz64; \ + struct i915_engine_class_instance engines[N__]; \ +} __attribute__((packed)) name__ + +/* + * i915_context_engines_bond: + * + * Constructed bonded pairs for execution within a virtual engine. + * + * All engines are equal, but some are more equal than others. Given + * the distribution of resources in the HW, it may be preferable to run + * a request on a given subset of engines in parallel to a request on a + * specific engine. We enable this selection of engines within a virtual + * engine by specifying bonding pairs, for any given master engine we will + * only execute on one of the corresponding siblings within the virtual engine. + * + * To execute a request in parallel on the master engine and a sibling requires + * coordination with a I915_EXEC_FENCE_SUBMIT. + */ +struct i915_context_engines_bond { + struct i915_user_extension base; + + struct i915_engine_class_instance master; + + __u16 virtual_index; /* index of virtual engine in ctx->engines[] */ + __u16 num_bonds; + + __u64 flags; /* all undefined flags must be zero */ + __u64 mbz64[4]; /* reserved for future use; must be zero */ + + struct i915_engine_class_instance engines[0]; +} __attribute__((packed)); + +#define I915_DEFINE_CONTEXT_ENGINES_BOND(name__, N__) struct { \ + struct i915_user_extension base; \ + struct i915_engine_class_instance master; \ + __u16 virtual_index; \ + __u16 num_bonds; \ + __u64 flags; \ + __u64 mbz64[4]; \ + struct i915_engine_class_instance engines[N__]; \ +} __attribute__((packed)) name__ + +struct i915_context_param_engines { + __u64 extensions; /* linked chain of extension blocks, 0 terminates */ +#define I915_CONTEXT_ENGINES_EXT_LOAD_BALANCE 0 /* see i915_context_engines_load_balance */ +#define I915_CONTEXT_ENGINES_EXT_BOND 1 /* see i915_context_engines_bond */ + struct i915_engine_class_instance engines[0]; +} __attribute__((packed)); + +#define I915_DEFINE_CONTEXT_PARAM_ENGINES(name__, N__) struct { \ + __u64 extensions; \ + struct i915_engine_class_instance engines[N__]; \ +} __attribute__((packed)) name__ + struct drm_i915_gem_context_create_ext_setparam { #define I915_CONTEXT_CREATE_EXT_SETPARAM 0 struct i915_user_extension base; struct drm_i915_gem_context_param param; }; +struct drm_i915_gem_context_create_ext_clone { +#define I915_CONTEXT_CREATE_EXT_CLONE 1 + struct i915_user_extension base; + __u32 clone_id; + __u32 flags; +#define I915_CONTEXT_CLONE_ENGINES (1u << 0) +#define I915_CONTEXT_CLONE_FLAGS (1u << 1) +#define I915_CONTEXT_CLONE_SCHEDATTR (1u << 2) +#define I915_CONTEXT_CLONE_SSEU (1u << 3) +#define I915_CONTEXT_CLONE_TIMELINE (1u << 4) +#define I915_CONTEXT_CLONE_VM (1u << 5) +#define I915_CONTEXT_CLONE_UNKNOWN -(I915_CONTEXT_CLONE_VM << 1) + __u64 rsvd; +}; + struct drm_i915_gem_context_destroy { __u32 ctx_id; __u32 pad; @@ -1821,6 +1983,7 @@ struct drm_i915_perf_oa_config { struct drm_i915_query_item { __u64 query_id; #define DRM_I915_QUERY_TOPOLOGY_INFO 1 +#define DRM_I915_QUERY_ENGINE_INFO 2 /* Must be kept compact -- no holes and well documented */ /* @@ -1919,6 +2082,47 @@ struct drm_i915_query_topology_info { __u8 data[]; }; +/** + * struct drm_i915_engine_info + * + * Describes one engine and it's capabilities as known to the driver. + */ +struct drm_i915_engine_info { + /** Engine class and instance. */ + struct i915_engine_class_instance engine; + + /** Reserved field. */ + __u32 rsvd0; + + /** Engine flags. */ + __u64 flags; + + /** Capabilities of this engine. */ + __u64 capabilities; +#define I915_VIDEO_CLASS_CAPABILITY_HEVC (1 << 0) +#define I915_VIDEO_AND_ENHANCE_CLASS_CAPABILITY_SFC (1 << 1) + + /** Reserved fields. */ + __u64 rsvd1[4]; +}; + +/** + * struct drm_i915_query_engine_info + * + * Engine info query enumerates all engines known to the driver by filling in + * an array of struct drm_i915_engine_info structures. + */ +struct drm_i915_query_engine_info { + /** Number of struct drm_i915_engine_info structs following. */ + __u32 num_engines; + + /** MBZ */ + __u32 rsvd[3]; + + /** Marker for drm_i915_engine_info structures. */ + struct drm_i915_engine_info engines[]; +}; + #if defined(__cplusplus) } #endif diff --git a/include/uapi/drm/panfrost_drm.h b/include/uapi/drm/panfrost_drm.h index a52e0283b90d..ec19db1eead8 100644 --- a/include/uapi/drm/panfrost_drm.h +++ b/include/uapi/drm/panfrost_drm.h @@ -18,6 +18,9 @@ extern "C" { #define DRM_PANFROST_MMAP_BO 0x03 #define DRM_PANFROST_GET_PARAM 0x04 #define DRM_PANFROST_GET_BO_OFFSET 0x05 +#define DRM_PANFROST_PERFCNT_ENABLE 0x06 +#define DRM_PANFROST_PERFCNT_DUMP 0x07 +#define DRM_PANFROST_MADVISE 0x08 #define DRM_IOCTL_PANFROST_SUBMIT DRM_IOW(DRM_COMMAND_BASE + DRM_PANFROST_SUBMIT, struct drm_panfrost_submit) #define DRM_IOCTL_PANFROST_WAIT_BO DRM_IOW(DRM_COMMAND_BASE + DRM_PANFROST_WAIT_BO, struct drm_panfrost_wait_bo) @@ -25,6 +28,16 @@ extern "C" { #define DRM_IOCTL_PANFROST_MMAP_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_PANFROST_MMAP_BO, struct drm_panfrost_mmap_bo) #define DRM_IOCTL_PANFROST_GET_PARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_PANFROST_GET_PARAM, struct drm_panfrost_get_param) #define DRM_IOCTL_PANFROST_GET_BO_OFFSET DRM_IOWR(DRM_COMMAND_BASE + DRM_PANFROST_GET_BO_OFFSET, struct drm_panfrost_get_bo_offset) +#define DRM_IOCTL_PANFROST_MADVISE DRM_IOWR(DRM_COMMAND_BASE + DRM_PANFROST_MADVISE, struct drm_panfrost_madvise) + +/* + * Unstable ioctl(s): only exposed when the unsafe unstable_ioctls module + * param is set to true. + * All these ioctl(s) are subject to deprecation, so please don't rely on + * them for anything but debugging purpose. + */ +#define DRM_IOCTL_PANFROST_PERFCNT_ENABLE DRM_IOW(DRM_COMMAND_BASE + DRM_PANFROST_PERFCNT_ENABLE, struct drm_panfrost_perfcnt_enable) +#define DRM_IOCTL_PANFROST_PERFCNT_DUMP DRM_IOW(DRM_COMMAND_BASE + DRM_PANFROST_PERFCNT_DUMP, struct drm_panfrost_perfcnt_dump) #define PANFROST_JD_REQ_FS (1 << 0) /** @@ -71,6 +84,9 @@ struct drm_panfrost_wait_bo { __s64 timeout_ns; /* absolute */ }; +#define PANFROST_BO_NOEXEC 1 +#define PANFROST_BO_HEAP 2 + /** * struct drm_panfrost_create_bo - ioctl argument for creating Panfrost BOs. * @@ -116,6 +132,45 @@ struct drm_panfrost_mmap_bo { enum drm_panfrost_param { DRM_PANFROST_PARAM_GPU_PROD_ID, + DRM_PANFROST_PARAM_GPU_REVISION, + DRM_PANFROST_PARAM_SHADER_PRESENT, + DRM_PANFROST_PARAM_TILER_PRESENT, + DRM_PANFROST_PARAM_L2_PRESENT, + DRM_PANFROST_PARAM_STACK_PRESENT, + DRM_PANFROST_PARAM_AS_PRESENT, + DRM_PANFROST_PARAM_JS_PRESENT, + DRM_PANFROST_PARAM_L2_FEATURES, + DRM_PANFROST_PARAM_CORE_FEATURES, + DRM_PANFROST_PARAM_TILER_FEATURES, + DRM_PANFROST_PARAM_MEM_FEATURES, + DRM_PANFROST_PARAM_MMU_FEATURES, + DRM_PANFROST_PARAM_THREAD_FEATURES, + DRM_PANFROST_PARAM_MAX_THREADS, + DRM_PANFROST_PARAM_THREAD_MAX_WORKGROUP_SZ, + DRM_PANFROST_PARAM_THREAD_MAX_BARRIER_SZ, + DRM_PANFROST_PARAM_COHERENCY_FEATURES, + DRM_PANFROST_PARAM_TEXTURE_FEATURES0, + DRM_PANFROST_PARAM_TEXTURE_FEATURES1, + DRM_PANFROST_PARAM_TEXTURE_FEATURES2, + DRM_PANFROST_PARAM_TEXTURE_FEATURES3, + DRM_PANFROST_PARAM_JS_FEATURES0, + DRM_PANFROST_PARAM_JS_FEATURES1, + DRM_PANFROST_PARAM_JS_FEATURES2, + DRM_PANFROST_PARAM_JS_FEATURES3, + DRM_PANFROST_PARAM_JS_FEATURES4, + DRM_PANFROST_PARAM_JS_FEATURES5, + DRM_PANFROST_PARAM_JS_FEATURES6, + DRM_PANFROST_PARAM_JS_FEATURES7, + DRM_PANFROST_PARAM_JS_FEATURES8, + DRM_PANFROST_PARAM_JS_FEATURES9, + DRM_PANFROST_PARAM_JS_FEATURES10, + DRM_PANFROST_PARAM_JS_FEATURES11, + DRM_PANFROST_PARAM_JS_FEATURES12, + DRM_PANFROST_PARAM_JS_FEATURES13, + DRM_PANFROST_PARAM_JS_FEATURES14, + DRM_PANFROST_PARAM_JS_FEATURES15, + DRM_PANFROST_PARAM_NR_CORE_GROUPS, + DRM_PANFROST_PARAM_THREAD_TLS_ALLOC, }; struct drm_panfrost_get_param { @@ -135,6 +190,39 @@ struct drm_panfrost_get_bo_offset { __u64 offset; }; +struct drm_panfrost_perfcnt_enable { + __u32 enable; + /* + * On bifrost we have 2 sets of counters, this parameter defines the + * one to track. + */ + __u32 counterset; +}; + +struct drm_panfrost_perfcnt_dump { + __u64 buf_ptr; +}; + +/* madvise provides a way to tell the kernel in case a buffers contents + * can be discarded under memory pressure, which is useful for userspace + * bo cache where we want to optimistically hold on to buffer allocate + * and potential mmap, but allow the pages to be discarded under memory + * pressure. + * + * Typical usage would involve madvise(DONTNEED) when buffer enters BO + * cache, and madvise(WILLNEED) if trying to recycle buffer from BO cache. + * In the WILLNEED case, 'retained' indicates to userspace whether the + * backing pages still exist. + */ +#define PANFROST_MADV_WILLNEED 0 /* backing pages are needed, status returned in 'retained' */ +#define PANFROST_MADV_DONTNEED 1 /* backing pages not needed */ + +struct drm_panfrost_madvise { + __u32 handle; /* in, GEM handle */ + __u32 madv; /* in, PANFROST_MADV_x */ + __u32 retained; /* out, whether backing store still exists */ +}; + #if defined(__cplusplus) } #endif diff --git a/include/uapi/drm/v3d_drm.h b/include/uapi/drm/v3d_drm.h index ea70669d2138..58fbe48c91e9 100644 --- a/include/uapi/drm/v3d_drm.h +++ b/include/uapi/drm/v3d_drm.h @@ -37,6 +37,7 @@ extern "C" { #define DRM_V3D_GET_PARAM 0x04 #define DRM_V3D_GET_BO_OFFSET 0x05 #define DRM_V3D_SUBMIT_TFU 0x06 +#define DRM_V3D_SUBMIT_CSD 0x07 #define DRM_IOCTL_V3D_SUBMIT_CL DRM_IOWR(DRM_COMMAND_BASE + DRM_V3D_SUBMIT_CL, struct drm_v3d_submit_cl) #define DRM_IOCTL_V3D_WAIT_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_V3D_WAIT_BO, struct drm_v3d_wait_bo) @@ -45,6 +46,7 @@ extern "C" { #define DRM_IOCTL_V3D_GET_PARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_V3D_GET_PARAM, struct drm_v3d_get_param) #define DRM_IOCTL_V3D_GET_BO_OFFSET DRM_IOWR(DRM_COMMAND_BASE + DRM_V3D_GET_BO_OFFSET, struct drm_v3d_get_bo_offset) #define DRM_IOCTL_V3D_SUBMIT_TFU DRM_IOW(DRM_COMMAND_BASE + DRM_V3D_SUBMIT_TFU, struct drm_v3d_submit_tfu) +#define DRM_IOCTL_V3D_SUBMIT_CSD DRM_IOW(DRM_COMMAND_BASE + DRM_V3D_SUBMIT_CSD, struct drm_v3d_submit_csd) /** * struct drm_v3d_submit_cl - ioctl argument for submitting commands to the 3D @@ -190,6 +192,7 @@ enum drm_v3d_param { DRM_V3D_PARAM_V3D_CORE0_IDENT1, DRM_V3D_PARAM_V3D_CORE0_IDENT2, DRM_V3D_PARAM_SUPPORTS_TFU, + DRM_V3D_PARAM_SUPPORTS_CSD, }; struct drm_v3d_get_param { @@ -230,6 +233,31 @@ struct drm_v3d_submit_tfu { __u32 out_sync; }; +/* Submits a compute shader for dispatch. This job will block on any + * previous compute shaders submitted on this fd, and any other + * synchronization must be performed with in_sync/out_sync. + */ +struct drm_v3d_submit_csd { + __u32 cfg[7]; + __u32 coef[4]; + + /* Pointer to a u32 array of the BOs that are referenced by the job. + */ + __u64 bo_handles; + + /* Number of BO handles passed in (size is that times 4). */ + __u32 bo_handle_count; + + /* sync object to block on before running the CSD job. Each + * CSD job will execute in the order submitted to its FD. + * Synchronization against rendering/TFU jobs or CSD from + * other fds requires using sync objects. + */ + __u32 in_sync; + /* Sync object to signal when the CSD job is done. */ + __u32 out_sync; +}; + #if defined(__cplusplus) } #endif diff --git a/include/uapi/linux/adfs_fs.h b/include/uapi/linux/adfs_fs.h index 151d93e27ed4..f1a7d67a7323 100644 --- a/include/uapi/linux/adfs_fs.h +++ b/include/uapi/linux/adfs_fs.h @@ -29,17 +29,17 @@ struct adfs_discrecord { __u8 log2sharesize:4; __u8 unused40:4; __u8 big_flag:1; - __u8 unused41:1; + __u8 unused41:7; __u8 nzones_high; + __u8 reserved43; __le32 format_version; __le32 root_size; __u8 unused52[60 - 52]; -}; +} __attribute__((packed, aligned(4))); #define ADFS_DISCRECORD (0xc00) #define ADFS_DR_OFFSET (0x1c0) #define ADFS_DR_SIZE 60 #define ADFS_DR_SIZE_BITS (ADFS_DR_SIZE << 3) - #endif /* _UAPI_ADFS_FS_H */ diff --git a/include/uapi/linux/audit.h b/include/uapi/linux/audit.h index a1280af20336..c89c6495983d 100644 --- a/include/uapi/linux/audit.h +++ b/include/uapi/linux/audit.h @@ -281,6 +281,7 @@ #define AUDIT_OBJ_GID 110 #define AUDIT_FIELD_COMPARE 111 #define AUDIT_EXE 112 +#define AUDIT_SADDR_FAM 113 #define AUDIT_ARG0 200 #define AUDIT_ARG1 (AUDIT_ARG0+1) diff --git a/include/uapi/linux/batadv_packet.h b/include/uapi/linux/batadv_packet.h index 4ebc2135e950..2a15f01c2243 100644 --- a/include/uapi/linux/batadv_packet.h +++ b/include/uapi/linux/batadv_packet.h @@ -107,12 +107,20 @@ enum batadv_icmp_packettype { * @BATADV_MCAST_WANT_ALL_UNSNOOPABLES: we want all packets destined for * 224.0.0.0/24 or ff02::1 * @BATADV_MCAST_WANT_ALL_IPV4: we want all IPv4 multicast packets + * (both link-local and routable ones) * @BATADV_MCAST_WANT_ALL_IPV6: we want all IPv6 multicast packets + * (both link-local and routable ones) + * @BATADV_MCAST_WANT_NO_RTR4: we have no IPv4 multicast router and therefore + * only need routable IPv4 multicast packets we signed up for explicitly + * @BATADV_MCAST_WANT_NO_RTR6: we have no IPv6 multicast router and therefore + * only need routable IPv6 multicast packets we signed up for explicitly */ enum batadv_mcast_flags { BATADV_MCAST_WANT_ALL_UNSNOOPABLES = 1UL << 0, BATADV_MCAST_WANT_ALL_IPV4 = 1UL << 1, BATADV_MCAST_WANT_ALL_IPV6 = 1UL << 2, + BATADV_MCAST_WANT_NO_RTR4 = 1UL << 3, + BATADV_MCAST_WANT_NO_RTR6 = 1UL << 4, }; /* tt data subtypes */ diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index a8b823c30b43..77c6be96d676 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -106,6 +106,7 @@ enum bpf_cmd { BPF_TASK_FD_QUERY, BPF_MAP_LOOKUP_AND_DELETE_ELEM, BPF_MAP_FREEZE, + BPF_BTF_GET_NEXT_ID, }; enum bpf_map_type { @@ -134,6 +135,7 @@ enum bpf_map_type { BPF_MAP_TYPE_QUEUE, BPF_MAP_TYPE_STACK, BPF_MAP_TYPE_SK_STORAGE, + BPF_MAP_TYPE_DEVMAP_HASH, }; /* Note that tracing related programs such as @@ -170,6 +172,7 @@ enum bpf_prog_type { BPF_PROG_TYPE_FLOW_DISSECTOR, BPF_PROG_TYPE_CGROUP_SYSCTL, BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE, + BPF_PROG_TYPE_CGROUP_SOCKOPT, }; enum bpf_attach_type { @@ -194,6 +197,8 @@ enum bpf_attach_type { BPF_CGROUP_SYSCTL, BPF_CGROUP_UDP4_RECVMSG, BPF_CGROUP_UDP6_RECVMSG, + BPF_CGROUP_GETSOCKOPT, + BPF_CGROUP_SETSOCKOPT, __MAX_BPF_ATTACH_TYPE }; @@ -262,6 +267,27 @@ enum bpf_attach_type { */ #define BPF_F_ANY_ALIGNMENT (1U << 1) +/* BPF_F_TEST_RND_HI32 is used in BPF_PROG_LOAD command for testing purpose. + * Verifier does sub-register def/use analysis and identifies instructions whose + * def only matters for low 32-bit, high 32-bit is never referenced later + * through implicit zero extension. Therefore verifier notifies JIT back-ends + * that it is safe to ignore clearing high 32-bit for these instructions. This + * saves some back-ends a lot of code-gen. However such optimization is not + * necessary on some arches, for example x86_64, arm64 etc, whose JIT back-ends + * hence hasn't used verifier's analysis result. But, we really want to have a + * way to be able to verify the correctness of the described optimization on + * x86_64 on which testsuites are frequently exercised. + * + * So, this flag is introduced. Once it is set, verifier will randomize high + * 32-bit for those instructions who has been identified as safe to ignore them. + * Then, if verifier is not doing correct analysis, such randomization will + * regress tests to expose bugs. + */ +#define BPF_F_TEST_RND_HI32 (1U << 2) + +/* The verifier internal test flag. Behavior is undefined */ +#define BPF_F_TEST_STATE_FREQ (1U << 3) + /* When BPF ldimm64's insn[0].src_reg != 0 then this can have * two extensions: * @@ -315,6 +341,9 @@ enum bpf_attach_type { #define BPF_F_RDONLY_PROG (1U << 7) #define BPF_F_WRONLY_PROG (1U << 8) +/* Clone map from listener for newly accepted socket */ +#define BPF_F_CLONE (1U << 9) + /* flags for BPF_PROG_QUERY */ #define BPF_F_QUERY_EFFECTIVE (1U << 0) @@ -554,6 +583,8 @@ union bpf_attr { * limited to five). * * Each time the helper is called, it appends a line to the trace. + * Lines are discarded while *\/sys/kernel/debug/tracing/trace* is + * open, use *\/sys/kernel/debug/tracing/trace_pipe* to avoid this. * The format of the trace is customizable, and the exact output * one will get depends on the options set in * *\/sys/kernel/debug/tracing/trace_options* (see also the @@ -785,7 +816,7 @@ union bpf_attr { * based on a user-provided identifier for all traffic coming from * the tasks belonging to the related cgroup. See also the related * kernel documentation, available from the Linux sources in file - * *Documentation/cgroup-v1/net_cls.txt*. + * *Documentation/admin-guide/cgroup-v1/net_cls.rst*. * * The Linux kernel has two versions for cgroups: there are * cgroups v1 and cgroups v2. Both are available to users, who can @@ -992,7 +1023,7 @@ union bpf_attr { * The realm of the route for the packet associated to *skb*, or 0 * if none was found. * - * int bpf_perf_event_output(struct pt_reg *ctx, struct bpf_map *map, u64 flags, void *data, u64 size) + * int bpf_perf_event_output(struct pt_regs *ctx, struct bpf_map *map, u64 flags, void *data, u64 size) * Description * Write raw *data* blob into a special BPF perf event held by * *map* of type **BPF_MAP_TYPE_PERF_EVENT_ARRAY**. This perf @@ -1054,7 +1085,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_get_stackid(struct pt_reg *ctx, struct bpf_map *map, u64 flags) + * int bpf_get_stackid(struct pt_regs *ctx, struct bpf_map *map, u64 flags) * Description * Walk a user or a kernel stack and return its id. To achieve * this, the helper needs *ctx*, which is a pointer to the context @@ -1445,8 +1476,8 @@ union bpf_attr { * If no cookie has been set yet, generate a new cookie. Once * generated, the socket cookie remains stable for the life of the * socket. This helper can be useful for monitoring per socket - * networking traffic statistics as it provides a unique socket - * identifier per namespace. + * networking traffic statistics as it provides a global socket + * identifier that can be assumed unique. * Return * A 8-byte long non-decreasing number on success, or 0 if the * socket field is missing inside *skb*. @@ -1550,8 +1581,11 @@ union bpf_attr { * but this is only implemented for native XDP (with driver * support) as of this writing). * - * All values for *flags* are reserved for future usage, and must - * be left at zero. + * The lower two bits of *flags* are used as the return code if + * the map lookup fails. This is so that the return value can be + * one of the XDP program return codes up to XDP_TX, as chosen by + * the caller. Any higher bits in the *flags* argument must be + * unset. * * When used to redirect packets to net devices, this helper * provides a high performance increase over **bpf_redirect**\ (). @@ -1700,7 +1734,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_override_return(struct pt_reg *regs, u64 rc) + * int bpf_override_return(struct pt_regs *regs, u64 rc) * Description * Used for error injection, this helper uses kprobes to override * the return value of the probed function, and to set it to *rc*. @@ -1746,6 +1780,7 @@ union bpf_attr { * * **BPF_SOCK_OPS_RTO_CB_FLAG** (retransmission time out) * * **BPF_SOCK_OPS_RETRANS_CB_FLAG** (retransmission) * * **BPF_SOCK_OPS_STATE_CB_FLAG** (TCP state change) + * * **BPF_SOCK_OPS_RTT_CB_FLAG** (every RTT) * * Therefore, this function can be used to clear a callback flag by * setting the appropriate bit to zero. e.g. to disable the RTO @@ -2674,6 +2709,47 @@ union bpf_attr { * 0 on success. * * **-ENOENT** if the bpf-local-storage cannot be found. + * + * int bpf_send_signal(u32 sig) + * Description + * Send signal *sig* to the current task. + * Return + * 0 on success or successfully queued. + * + * **-EBUSY** if work queue under nmi is full. + * + * **-EINVAL** if *sig* is invalid. + * + * **-EPERM** if no permission to send the *sig*. + * + * **-EAGAIN** if bpf program can try again. + * + * s64 bpf_tcp_gen_syncookie(struct bpf_sock *sk, void *iph, u32 iph_len, struct tcphdr *th, u32 th_len) + * Description + * Try to issue a SYN cookie for the packet with corresponding + * IP/TCP headers, *iph* and *th*, on the listening socket in *sk*. + * + * *iph* points to the start of the IPv4 or IPv6 header, while + * *iph_len* contains **sizeof**\ (**struct iphdr**) or + * **sizeof**\ (**struct ip6hdr**). + * + * *th* points to the start of the TCP header, while *th_len* + * contains the length of the TCP header. + * + * Return + * On success, lower 32 bits hold the generated SYN cookie in + * followed by 16 bits which hold the MSS value for that cookie, + * and the top 16 bits are unused. + * + * On failure, the returned value is one of the following: + * + * **-EINVAL** SYN cookie cannot be issued due to error + * + * **-ENOENT** SYN cookie should not be issued (no SYN flood) + * + * **-EOPNOTSUPP** kernel configuration does not enable SYN cookies + * + * **-EPROTONOSUPPORT** IP packet version is not 4 or 6 */ #define __BPF_FUNC_MAPPER(FN) \ FN(unspec), \ @@ -2784,7 +2860,9 @@ union bpf_attr { FN(strtol), \ FN(strtoul), \ FN(sk_storage_get), \ - FN(sk_storage_delete), + FN(sk_storage_delete), \ + FN(send_signal), \ + FN(tcp_gen_syncookie), /* integer value in 'imm' field of BPF_CALL instruction selects which helper * function eBPF program intends to call @@ -3033,6 +3111,12 @@ struct bpf_tcp_sock { * sum(delta(snd_una)), or how many bytes * were acked. */ + __u32 dsack_dups; /* RFC4898 tcpEStatsStackDSACKDups + * total number of DSACK blocks received + */ + __u32 delivered; /* Total data packets delivered incl. rexmits */ + __u32 delivered_ce; /* Like the above but only ECE marked packets */ + __u32 icsk_retransmits; /* Number of unrecovered [RTO] timeouts */ }; struct bpf_sock_tuple { @@ -3052,6 +3136,10 @@ struct bpf_sock_tuple { }; }; +struct bpf_xdp_sock { + __u32 queue_id; +}; + #define XDP_PACKET_HEADROOM 256 /* User return codes for XDP prog type. @@ -3143,6 +3231,7 @@ struct bpf_prog_info { char name[BPF_OBJ_NAME_LEN]; __u32 ifindex; __u32 gpl_compatible:1; + __u32 :31; /* alignment pad */ __u64 netns_dev; __u64 netns_ino; __u32 nr_jited_ksyms; @@ -3197,7 +3286,7 @@ struct bpf_sock_addr { __u32 user_ip4; /* Allows 1,2,4-byte read and 4-byte write. * Stored in network byte order. */ - __u32 user_ip6[4]; /* Allows 1,2,4-byte read an 4-byte write. + __u32 user_ip6[4]; /* Allows 1,2,4,8-byte read and 4,8-byte write. * Stored in network byte order. */ __u32 user_port; /* Allows 4-byte read and write. @@ -3206,12 +3295,13 @@ struct bpf_sock_addr { __u32 family; /* Allows 4-byte read, but no write */ __u32 type; /* Allows 4-byte read, but no write */ __u32 protocol; /* Allows 4-byte read, but no write */ - __u32 msg_src_ip4; /* Allows 1,2,4-byte read an 4-byte write. + __u32 msg_src_ip4; /* Allows 1,2,4-byte read and 4-byte write. * Stored in network byte order. */ - __u32 msg_src_ip6[4]; /* Allows 1,2,4-byte read an 4-byte write. + __u32 msg_src_ip6[4]; /* Allows 1,2,4,8-byte read and 4,8-byte write. * Stored in network byte order. */ + __bpf_md_ptr(struct bpf_sock *, sk); }; /* User bpf_sock_ops struct to access socket values and specify request ops @@ -3263,13 +3353,15 @@ struct bpf_sock_ops { __u32 sk_txhash; __u64 bytes_received; __u64 bytes_acked; + __bpf_md_ptr(struct bpf_sock *, sk); }; /* Definitions for bpf_sock_ops_cb_flags */ #define BPF_SOCK_OPS_RTO_CB_FLAG (1<<0) #define BPF_SOCK_OPS_RETRANS_CB_FLAG (1<<1) #define BPF_SOCK_OPS_STATE_CB_FLAG (1<<2) -#define BPF_SOCK_OPS_ALL_CB_FLAGS 0x7 /* Mask of all currently +#define BPF_SOCK_OPS_RTT_CB_FLAG (1<<3) +#define BPF_SOCK_OPS_ALL_CB_FLAGS 0xF /* Mask of all currently * supported cb flags */ @@ -3324,6 +3416,8 @@ enum { BPF_SOCK_OPS_TCP_LISTEN_CB, /* Called on listen(2), right after * socket transition to LISTEN state. */ + BPF_SOCK_OPS_RTT_CB, /* Called on every RTT. + */ }; /* List of TCP states. There is a build check in net/ipv4/tcp.c to detect @@ -3451,6 +3545,10 @@ enum bpf_task_fd_type { BPF_FD_TYPE_URETPROBE, /* filename + offset */ }; +#define BPF_FLOW_DISSECTOR_F_PARSE_1ST_FRAG (1U << 0) +#define BPF_FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL (1U << 1) +#define BPF_FLOW_DISSECTOR_F_STOP_AT_ENCAP (1U << 2) + struct bpf_flow_keys { __u16 nhoff; __u16 thoff; @@ -3472,6 +3570,8 @@ struct bpf_flow_keys { __u32 ipv6_dst[4]; /* in6_addr; network order */ }; }; + __u32 flags; + __be32 flow_label; }; struct bpf_func_info { @@ -3502,4 +3602,15 @@ struct bpf_sysctl { */ }; +struct bpf_sockopt { + __bpf_md_ptr(struct bpf_sock *, sk); + __bpf_md_ptr(void *, optval); + __bpf_md_ptr(void *, optval_end); + + __s32 level; + __s32 optname; + __s32 optlen; + __s32 retval; +}; + #endif /* _UAPI__LINUX_BPF_H__ */ diff --git a/include/uapi/linux/bpfilter.h b/include/uapi/linux/bpfilter.h index 2ec3cc99ea4c..cbc1f5813f50 100644 --- a/include/uapi/linux/bpfilter.h +++ b/include/uapi/linux/bpfilter.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 */ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ #ifndef _UAPI_LINUX_BPFILTER_H #define _UAPI_LINUX_BPFILTER_H diff --git a/include/uapi/linux/btf.h b/include/uapi/linux/btf.h index 63ae4a39e58b..c02dec97e1ce 100644 --- a/include/uapi/linux/btf.h +++ b/include/uapi/linux/btf.h @@ -22,9 +22,9 @@ struct btf_header { }; /* Max # of type identifier */ -#define BTF_MAX_TYPE 0x0000ffff +#define BTF_MAX_TYPE 0x000fffff /* Max offset into the string section */ -#define BTF_MAX_NAME_OFFSET 0x0000ffff +#define BTF_MAX_NAME_OFFSET 0x00ffffff /* Max # of struct/union/enum members or func args */ #define BTF_MAX_VLEN 0xffff diff --git a/include/uapi/linux/btrfs.h b/include/uapi/linux/btrfs.h index c195896d478f..3ee0678c0a83 100644 --- a/include/uapi/linux/btrfs.h +++ b/include/uapi/linux/btrfs.h @@ -665,7 +665,12 @@ struct btrfs_ioctl_get_dev_stats { /* out values: */ __u64 values[BTRFS_DEV_STAT_VALUES_MAX]; - __u64 unused[128 - 2 - BTRFS_DEV_STAT_VALUES_MAX]; /* pad to 1k */ + /* + * This pads the struct to 1032 bytes. It was originally meant to pad to + * 1024 bytes, but when adding the flags field, the padding calculation + * was not adjusted. + */ + __u64 unused[128 - 2 - BTRFS_DEV_STAT_VALUES_MAX]; }; #define BTRFS_QUOTA_CTL_ENABLE 1 @@ -917,10 +922,8 @@ enum btrfs_err_code { #define BTRFS_IOC_QUOTA_RESCAN_STATUS _IOR(BTRFS_IOCTL_MAGIC, 45, \ struct btrfs_ioctl_quota_rescan_args) #define BTRFS_IOC_QUOTA_RESCAN_WAIT _IO(BTRFS_IOCTL_MAGIC, 46) -#define BTRFS_IOC_GET_FSLABEL _IOR(BTRFS_IOCTL_MAGIC, 49, \ - char[BTRFS_LABEL_SIZE]) -#define BTRFS_IOC_SET_FSLABEL _IOW(BTRFS_IOCTL_MAGIC, 50, \ - char[BTRFS_LABEL_SIZE]) +#define BTRFS_IOC_GET_FSLABEL FS_IOC_GETFSLABEL +#define BTRFS_IOC_SET_FSLABEL FS_IOC_SETFSLABEL #define BTRFS_IOC_GET_DEV_STATS _IOWR(BTRFS_IOCTL_MAGIC, 52, \ struct btrfs_ioctl_get_dev_stats) #define BTRFS_IOC_DEV_REPLACE _IOWR(BTRFS_IOCTL_MAGIC, 53, \ diff --git a/include/uapi/linux/btrfs_tree.h b/include/uapi/linux/btrfs_tree.h index 421239b98db2..b65c7ee75bc7 100644 --- a/include/uapi/linux/btrfs_tree.h +++ b/include/uapi/linux/btrfs_tree.h @@ -300,7 +300,9 @@ #define BTRFS_CSUM_SIZE 32 /* csum types */ -#define BTRFS_CSUM_TYPE_CRC32 0 +enum btrfs_csum_type { + BTRFS_CSUM_TYPE_CRC32 = 0, +}; /* * flags definitions for directory entry item type @@ -806,11 +808,6 @@ struct btrfs_dev_stats_item { #define BTRFS_DEV_REPLACE_ITEM_CONT_READING_FROM_SRCDEV_MODE_ALWAYS 0 #define BTRFS_DEV_REPLACE_ITEM_CONT_READING_FROM_SRCDEV_MODE_AVOID 1 -#define BTRFS_DEV_REPLACE_ITEM_STATE_NEVER_STARTED 0 -#define BTRFS_DEV_REPLACE_ITEM_STATE_STARTED 1 -#define BTRFS_DEV_REPLACE_ITEM_STATE_SUSPENDED 2 -#define BTRFS_DEV_REPLACE_ITEM_STATE_FINISHED 3 -#define BTRFS_DEV_REPLACE_ITEM_STATE_CANCELED 4 struct btrfs_dev_replace_item { /* @@ -866,6 +863,8 @@ enum btrfs_raid_types { #define BTRFS_BLOCK_GROUP_RAID56_MASK (BTRFS_BLOCK_GROUP_RAID5 | \ BTRFS_BLOCK_GROUP_RAID6) +#define BTRFS_BLOCK_GROUP_RAID1_MASK (BTRFS_BLOCK_GROUP_RAID1) + /* * We need a bit for restriper to be able to tell when chunks of type * SINGLE are available. This "extended" profile format is used in diff --git a/include/uapi/linux/can.h b/include/uapi/linux/can.h index 0afb7d8e867f..1e988fdeba34 100644 --- a/include/uapi/linux/can.h +++ b/include/uapi/linux/can.h @@ -157,7 +157,8 @@ struct canfd_frame { #define CAN_TP20 4 /* VAG Transport Protocol v2.0 */ #define CAN_MCNET 5 /* Bosch MCNet */ #define CAN_ISOTP 6 /* ISO 15765-2 Transport Protocol */ -#define CAN_NPROTO 7 +#define CAN_J1939 7 /* SAE J1939 */ +#define CAN_NPROTO 8 #define SOL_CAN_BASE 100 @@ -174,6 +175,23 @@ struct sockaddr_can { /* transport protocol class address information (e.g. ISOTP) */ struct { canid_t rx_id, tx_id; } tp; + /* J1939 address information */ + struct { + /* 8 byte name when using dynamic addressing */ + __u64 name; + + /* pgn: + * 8 bit: PS in PDU2 case, else 0 + * 8 bit: PF + * 1 bit: DP + * 1 bit: reserved + */ + __u32 pgn; + + /* 1 byte address */ + __u8 addr; + } j1939; + /* reserved for future CAN protocols address information */ } can_addr; }; diff --git a/include/uapi/linux/can/gw.h b/include/uapi/linux/can/gw.h index 7bee7a0b9800..3aea5388c8e4 100644 --- a/include/uapi/linux/can/gw.h +++ b/include/uapi/linux/can/gw.h @@ -80,6 +80,10 @@ enum { CGW_DELETED, /* number of deleted CAN frames (see max_hops param) */ CGW_LIM_HOPS, /* limit the number of hops of this specific rule */ CGW_MOD_UID, /* user defined identifier for modification updates */ + CGW_FDMOD_AND, /* CAN FD frame modification binary AND */ + CGW_FDMOD_OR, /* CAN FD frame modification binary OR */ + CGW_FDMOD_XOR, /* CAN FD frame modification binary XOR */ + CGW_FDMOD_SET, /* CAN FD frame modification set alternate values */ __CGW_MAX }; @@ -88,15 +92,18 @@ enum { #define CGW_FLAGS_CAN_ECHO 0x01 #define CGW_FLAGS_CAN_SRC_TSTAMP 0x02 #define CGW_FLAGS_CAN_IIF_TX_OK 0x04 +#define CGW_FLAGS_CAN_FD 0x08 #define CGW_MOD_FUNCS 4 /* AND OR XOR SET */ /* CAN frame elements that are affected by curr. 3 CAN frame modifications */ #define CGW_MOD_ID 0x01 -#define CGW_MOD_DLC 0x02 +#define CGW_MOD_DLC 0x02 /* contains the data length in bytes */ +#define CGW_MOD_LEN CGW_MOD_DLC /* CAN FD length representation */ #define CGW_MOD_DATA 0x04 +#define CGW_MOD_FLAGS 0x08 /* CAN FD flags */ -#define CGW_FRAME_MODS 3 /* ID DLC DATA */ +#define CGW_FRAME_MODS 4 /* ID DLC/LEN DATA FLAGS */ #define MAX_MODFUNCTIONS (CGW_MOD_FUNCS * CGW_FRAME_MODS) @@ -105,7 +112,13 @@ struct cgw_frame_mod { __u8 modtype; } __attribute__((packed)); +struct cgw_fdframe_mod { + struct canfd_frame cf; + __u8 modtype; +} __attribute__((packed)); + #define CGW_MODATTR_LEN sizeof(struct cgw_frame_mod) +#define CGW_FDMODATTR_LEN sizeof(struct cgw_fdframe_mod) struct cgw_csum_xor { __s8 from_idx; diff --git a/include/uapi/linux/can/j1939.h b/include/uapi/linux/can/j1939.h new file mode 100644 index 000000000000..c32325342d30 --- /dev/null +++ b/include/uapi/linux/can/j1939.h @@ -0,0 +1,99 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +/* + * j1939.h + * + * Copyright (c) 2010-2011 EIA Electronics + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef _UAPI_CAN_J1939_H_ +#define _UAPI_CAN_J1939_H_ + +#include <linux/types.h> +#include <linux/socket.h> +#include <linux/can.h> + +#define J1939_MAX_UNICAST_ADDR 0xfd +#define J1939_IDLE_ADDR 0xfe +#define J1939_NO_ADDR 0xff /* == broadcast or no addr */ +#define J1939_NO_NAME 0 +#define J1939_PGN_REQUEST 0x0ea00 /* Request PG */ +#define J1939_PGN_ADDRESS_CLAIMED 0x0ee00 /* Address Claimed */ +#define J1939_PGN_ADDRESS_COMMANDED 0x0fed8 /* Commanded Address */ +#define J1939_PGN_PDU1_MAX 0x3ff00 +#define J1939_PGN_MAX 0x3ffff +#define J1939_NO_PGN 0x40000 + +/* J1939 Parameter Group Number + * + * bit 0-7 : PDU Specific (PS) + * bit 8-15 : PDU Format (PF) + * bit 16 : Data Page (DP) + * bit 17 : Reserved (R) + * bit 19-31 : set to zero + */ +typedef __u32 pgn_t; + +/* J1939 Priority + * + * bit 0-2 : Priority (P) + * bit 3-7 : set to zero + */ +typedef __u8 priority_t; + +/* J1939 NAME + * + * bit 0-20 : Identity Number + * bit 21-31 : Manufacturer Code + * bit 32-34 : ECU Instance + * bit 35-39 : Function Instance + * bit 40-47 : Function + * bit 48 : Reserved + * bit 49-55 : Vehicle System + * bit 56-59 : Vehicle System Instance + * bit 60-62 : Industry Group + * bit 63 : Arbitrary Address Capable + */ +typedef __u64 name_t; + +/* J1939 socket options */ +#define SOL_CAN_J1939 (SOL_CAN_BASE + CAN_J1939) +enum { + SO_J1939_FILTER = 1, /* set filters */ + SO_J1939_PROMISC = 2, /* set/clr promiscuous mode */ + SO_J1939_SEND_PRIO = 3, + SO_J1939_ERRQUEUE = 4, +}; + +enum { + SCM_J1939_DEST_ADDR = 1, + SCM_J1939_DEST_NAME = 2, + SCM_J1939_PRIO = 3, + SCM_J1939_ERRQUEUE = 4, +}; + +enum { + J1939_NLA_PAD, + J1939_NLA_BYTES_ACKED, +}; + +enum { + J1939_EE_INFO_NONE, + J1939_EE_INFO_TX_ABORT, +}; + +struct j1939_filter { + name_t name; + name_t name_mask; + pgn_t pgn; + pgn_t pgn_mask; + __u8 addr; + __u8 addr_mask; +}; + +#define J1939_FILTER_MAX 512 /* maximum number of j1939_filter set via setsockopt() */ + +#endif /* !_UAPI_CAN_J1939_H_ */ diff --git a/include/uapi/linux/can/netlink.h b/include/uapi/linux/can/netlink.h index 9f56fad4785b..1bc70d3a4d39 100644 --- a/include/uapi/linux/can/netlink.h +++ b/include/uapi/linux/can/netlink.h @@ -40,15 +40,15 @@ struct can_bittiming { }; /* - * CAN harware-dependent bit-timing constant + * CAN hardware-dependent bit-timing constant * * Used for calculating and checking bit-timing parameters */ struct can_bittiming_const { char name[16]; /* Name of the CAN controller hardware */ - __u32 tseg1_min; /* Time segement 1 = prop_seg + phase_seg1 */ + __u32 tseg1_min; /* Time segment 1 = prop_seg + phase_seg1 */ __u32 tseg1_max; - __u32 tseg2_min; /* Time segement 2 = phase_seg2 */ + __u32 tseg2_min; /* Time segment 2 = phase_seg2 */ __u32 tseg2_max; __u32 sjw_max; /* Synchronisation jump width */ __u32 brp_min; /* Bit-rate prescaler */ diff --git a/include/uapi/linux/cec.h b/include/uapi/linux/cec.h index 3094af68b6e7..5704fa0292b5 100644 --- a/include/uapi/linux/cec.h +++ b/include/uapi/linux/cec.h @@ -144,6 +144,7 @@ static inline void cec_msg_set_reply_to(struct cec_msg *msg, /* cec_msg flags field */ #define CEC_MSG_FL_REPLY_TO_FOLLOWERS (1 << 0) +#define CEC_MSG_FL_RAW (1 << 1) /* cec_msg tx/rx_status field */ #define CEC_TX_STATUS_OK (1 << 0) diff --git a/include/uapi/linux/coda.h b/include/uapi/linux/coda.h index 695fade33c64..aa34c2dcae8d 100644 --- a/include/uapi/linux/coda.h +++ b/include/uapi/linux/coda.h @@ -86,10 +86,6 @@ typedef unsigned long long u_quad_t; #define inline -struct timespec { - long ts_sec; - long ts_nsec; -}; #else /* DJGPP but not KERNEL */ #include <sys/time.h> typedef unsigned long long u_quad_t; @@ -110,13 +106,6 @@ typedef unsigned long long u_quad_t; #define cdev_t dev_t #endif -#ifdef __CYGWIN32__ -struct timespec { - time_t tv_sec; /* seconds */ - long tv_nsec; /* nanoseconds */ -}; -#endif - #ifndef __BIT_TYPES_DEFINED__ #define __BIT_TYPES_DEFINED__ typedef signed char int8_t; @@ -211,6 +200,11 @@ struct CodaFid { */ enum coda_vtype { C_VNON, C_VREG, C_VDIR, C_VBLK, C_VCHR, C_VLNK, C_VSOCK, C_VFIFO, C_VBAD }; +struct coda_timespec { + int64_t tv_sec; /* seconds */ + long tv_nsec; /* nanoseconds */ +}; + struct coda_vattr { long va_type; /* vnode type (for create) */ u_short va_mode; /* files access mode and type */ @@ -220,9 +214,9 @@ struct coda_vattr { long va_fileid; /* file id */ u_quad_t va_size; /* file size in bytes */ long va_blocksize; /* blocksize preferred for i/o */ - struct timespec va_atime; /* time of last access */ - struct timespec va_mtime; /* time of last modification */ - struct timespec va_ctime; /* time file changed */ + struct coda_timespec va_atime; /* time of last access */ + struct coda_timespec va_mtime; /* time of last modification */ + struct coda_timespec va_ctime; /* time file changed */ u_long va_gen; /* generation number of file */ u_long va_flags; /* flags defined for file */ cdev_t va_rdev; /* device special file represents */ @@ -277,7 +271,8 @@ struct coda_statfs { #define CODA_STATFS 34 #define CODA_STORE 35 #define CODA_RELEASE 36 -#define CODA_NCALLS 37 +#define CODA_ACCESS_INTENT 37 +#define CODA_NCALLS 38 #define DOWNCALL(opcode) (opcode >= CODA_REPLACE && opcode <= CODA_PURGEFID) @@ -287,7 +282,12 @@ struct coda_statfs { #define CIOC_KERNEL_VERSION _IOWR('c', 10, size_t) -#define CODA_KERNEL_VERSION 3 /* 128-bit file identifiers */ +// CODA_KERNEL_VERSION 0 /* don't care about kernel version number */ +// CODA_KERNEL_VERSION 1 /* The old venus 4.6 compatible interface */ +// CODA_KERNEL_VERSION 2 /* venus_lookup gets an extra parameter */ +// CODA_KERNEL_VERSION 3 /* 128-bit file identifiers */ +// CODA_KERNEL_VERSION 4 /* 64-bit timespec */ +#define CODA_KERNEL_VERSION 5 /* access intent support */ /* * Venus <-> Coda RPC arguments @@ -295,8 +295,8 @@ struct coda_statfs { struct coda_in_hdr { u_int32_t opcode; u_int32_t unique; /* Keep multiple outstanding msgs distinct */ - pid_t pid; - pid_t pgid; + __kernel_pid_t pid; + __kernel_pid_t pgid; vuid_t uid; }; @@ -642,6 +642,25 @@ struct coda_statfs_out { struct coda_statfs stat; }; +#define CODA_ACCESS_TYPE_READ 1 +#define CODA_ACCESS_TYPE_WRITE 2 +#define CODA_ACCESS_TYPE_MMAP 3 +#define CODA_ACCESS_TYPE_READ_FINISH 4 +#define CODA_ACCESS_TYPE_WRITE_FINISH 5 + +/* coda_access_intent: NO_OUT */ +struct coda_access_intent_in { + struct coda_in_hdr ih; + struct CodaFid VFid; + int count; + int pos; + int type; +}; + +struct coda_access_intent_out { + struct coda_out_hdr out; +}; + /* * Occasionally, we don't cache the fid returned by CODA_LOOKUP. * For instance, if the fid is inconsistent. @@ -673,6 +692,7 @@ union inputArgs { struct coda_open_by_fd_in coda_open_by_fd; struct coda_open_by_path_in coda_open_by_path; struct coda_statfs_in coda_statfs; + struct coda_access_intent_in coda_access_intent; }; union outputArgs { diff --git a/include/uapi/linux/coda_psdev.h b/include/uapi/linux/coda_psdev.h deleted file mode 100644 index aa6623efd2dd..000000000000 --- a/include/uapi/linux/coda_psdev.h +++ /dev/null @@ -1,28 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ -#ifndef _UAPI__CODA_PSDEV_H -#define _UAPI__CODA_PSDEV_H - -#include <linux/magic.h> - -#define CODA_PSDEV_MAJOR 67 -#define MAX_CODADEVS 5 /* how many do we allow */ - - -/* messages between coda filesystem in kernel and Venus */ -struct upc_req { - struct list_head uc_chain; - caddr_t uc_data; - u_short uc_flags; - u_short uc_inSize; /* Size is at most 5000 bytes */ - u_short uc_outSize; - u_short uc_opcode; /* copied from data to save lookup */ - int uc_unique; - wait_queue_head_t uc_sleep; /* process' wait queue */ -}; - -#define CODA_REQ_ASYNC 0x1 -#define CODA_REQ_READ 0x2 -#define CODA_REQ_WRITE 0x4 -#define CODA_REQ_ABORT 0x8 - -#endif /* _UAPI__CODA_PSDEV_H */ diff --git a/include/uapi/linux/coff.h b/include/uapi/linux/coff.h index e4a79f80b9a0..ab5c7e847eed 100644 --- a/include/uapi/linux/coff.h +++ b/include/uapi/linux/coff.h @@ -11,6 +11,9 @@ more information about COFF, then O'Reilly has a very excellent book. */ +#ifndef _UAPI_LINUX_COFF_H +#define _UAPI_LINUX_COFF_H + #define E_SYMNMLEN 8 /* Number of characters in a symbol name */ #define E_FILNMLEN 14 /* Number of characters in a file name */ #define E_DIMNUM 4 /* Number of array dimensions in auxiliary entry */ @@ -350,3 +353,5 @@ struct COFF_reloc { /* For new sections we haven't heard of before */ #define COFF_DEF_SECTION_ALIGNMENT 4 + +#endif /* _UAPI_LINUX_COFF_H */ diff --git a/include/uapi/linux/cryptouser.h b/include/uapi/linux/cryptouser.h index 4dc1603919ce..5730c67f0617 100644 --- a/include/uapi/linux/cryptouser.h +++ b/include/uapi/linux/cryptouser.h @@ -19,6 +19,9 @@ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. */ +#ifndef _UAPI_LINUX_CRYPTOUSER_H +#define _UAPI_LINUX_CRYPTOUSER_H + #include <linux/types.h> /* Netlink configuration messages. */ @@ -198,3 +201,5 @@ struct crypto_report_acomp { #define CRYPTO_REPORT_MAXSIZE (sizeof(struct crypto_user_alg) + \ sizeof(struct crypto_report_blkcipher)) + +#endif /* _UAPI_LINUX_CRYPTOUSER_H */ diff --git a/include/uapi/linux/devlink.h b/include/uapi/linux/devlink.h index 5bb4ea67d84f..580b7a2e40e1 100644 --- a/include/uapi/linux/devlink.h +++ b/include/uapi/linux/devlink.h @@ -104,6 +104,18 @@ enum devlink_command { DEVLINK_CMD_HEALTH_REPORTER_DUMP_CLEAR, DEVLINK_CMD_FLASH_UPDATE, + DEVLINK_CMD_FLASH_UPDATE_END, /* notification only */ + DEVLINK_CMD_FLASH_UPDATE_STATUS, /* notification only */ + + DEVLINK_CMD_TRAP_GET, /* can dump */ + DEVLINK_CMD_TRAP_SET, + DEVLINK_CMD_TRAP_NEW, + DEVLINK_CMD_TRAP_DEL, + + DEVLINK_CMD_TRAP_GROUP_GET, /* can dump */ + DEVLINK_CMD_TRAP_GROUP_SET, + DEVLINK_CMD_TRAP_GROUP_NEW, + DEVLINK_CMD_TRAP_GROUP_DEL, /* add new commands above here */ __DEVLINK_CMD_MAX, @@ -167,6 +179,14 @@ enum devlink_port_flavour { DEVLINK_PORT_FLAVOUR_DSA, /* Distributed switch architecture * interconnect port. */ + DEVLINK_PORT_FLAVOUR_PCI_PF, /* Represents eswitch port for + * the PCI PF. It is an internal + * port that faces the PCI PF. + */ + DEVLINK_PORT_FLAVOUR_PCI_VF, /* Represents eswitch port + * for the PCI VF. It is an internal + * port that faces the PCI VF. + */ }; enum devlink_param_cmode { @@ -182,6 +202,56 @@ enum devlink_param_cmode { enum devlink_param_fw_load_policy_value { DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_DRIVER, DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_FLASH, + DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_DISK, + DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_UNKNOWN, +}; + +enum devlink_param_reset_dev_on_drv_probe_value { + DEVLINK_PARAM_RESET_DEV_ON_DRV_PROBE_VALUE_UNKNOWN, + DEVLINK_PARAM_RESET_DEV_ON_DRV_PROBE_VALUE_ALWAYS, + DEVLINK_PARAM_RESET_DEV_ON_DRV_PROBE_VALUE_NEVER, + DEVLINK_PARAM_RESET_DEV_ON_DRV_PROBE_VALUE_DISK, +}; + +enum { + DEVLINK_ATTR_STATS_RX_PACKETS, /* u64 */ + DEVLINK_ATTR_STATS_RX_BYTES, /* u64 */ + + __DEVLINK_ATTR_STATS_MAX, + DEVLINK_ATTR_STATS_MAX = __DEVLINK_ATTR_STATS_MAX - 1 +}; + +/** + * enum devlink_trap_action - Packet trap action. + * @DEVLINK_TRAP_ACTION_DROP: Packet is dropped by the device and a copy is not + * sent to the CPU. + * @DEVLINK_TRAP_ACTION_TRAP: The sole copy of the packet is sent to the CPU. + */ +enum devlink_trap_action { + DEVLINK_TRAP_ACTION_DROP, + DEVLINK_TRAP_ACTION_TRAP, +}; + +/** + * enum devlink_trap_type - Packet trap type. + * @DEVLINK_TRAP_TYPE_DROP: Trap reason is a drop. Trapped packets are only + * processed by devlink and not injected to the + * kernel's Rx path. + * @DEVLINK_TRAP_TYPE_EXCEPTION: Trap reason is an exception. Packet was not + * forwarded as intended due to an exception + * (e.g., missing neighbour entry) and trapped to + * control plane for resolution. Trapped packets + * are processed by devlink and injected to + * the kernel's Rx path. + */ +enum devlink_trap_type { + DEVLINK_TRAP_TYPE_DROP, + DEVLINK_TRAP_TYPE_EXCEPTION, +}; + +enum { + /* Trap can report input port as metadata */ + DEVLINK_ATTR_TRAP_METADATA_TYPE_IN_PORT, }; enum devlink_attr { @@ -331,6 +401,25 @@ enum devlink_attr { DEVLINK_ATTR_FLASH_UPDATE_FILE_NAME, /* string */ DEVLINK_ATTR_FLASH_UPDATE_COMPONENT, /* string */ + DEVLINK_ATTR_FLASH_UPDATE_STATUS_MSG, /* string */ + DEVLINK_ATTR_FLASH_UPDATE_STATUS_DONE, /* u64 */ + DEVLINK_ATTR_FLASH_UPDATE_STATUS_TOTAL, /* u64 */ + + DEVLINK_ATTR_PORT_PCI_PF_NUMBER, /* u16 */ + DEVLINK_ATTR_PORT_PCI_VF_NUMBER, /* u16 */ + + DEVLINK_ATTR_STATS, /* nested */ + + DEVLINK_ATTR_TRAP_NAME, /* string */ + /* enum devlink_trap_action */ + DEVLINK_ATTR_TRAP_ACTION, /* u8 */ + /* enum devlink_trap_type */ + DEVLINK_ATTR_TRAP_TYPE, /* u8 */ + DEVLINK_ATTR_TRAP_GENERIC, /* flag */ + DEVLINK_ATTR_TRAP_METADATA, /* nested */ + DEVLINK_ATTR_TRAP_GROUP_NAME, /* string */ + + DEVLINK_ATTR_RELOAD_FAILED, /* u8 0 or 1 */ /* add new attributes above here, update the policy in devlink.c */ diff --git a/include/uapi/linux/dm-ioctl.h b/include/uapi/linux/dm-ioctl.h index f396a82dfd3e..2df8ceca1f9b 100644 --- a/include/uapi/linux/dm-ioctl.h +++ b/include/uapi/linux/dm-ioctl.h @@ -243,6 +243,7 @@ enum { DM_TARGET_MSG_CMD, DM_DEV_SET_GEOMETRY_CMD, DM_DEV_ARM_POLL_CMD, + DM_GET_TARGET_VERSION_CMD, }; #define DM_IOCTL 0xfd @@ -265,14 +266,15 @@ enum { #define DM_TABLE_STATUS _IOWR(DM_IOCTL, DM_TABLE_STATUS_CMD, struct dm_ioctl) #define DM_LIST_VERSIONS _IOWR(DM_IOCTL, DM_LIST_VERSIONS_CMD, struct dm_ioctl) +#define DM_GET_TARGET_VERSION _IOWR(DM_IOCTL, DM_GET_TARGET_VERSION_CMD, struct dm_ioctl) #define DM_TARGET_MSG _IOWR(DM_IOCTL, DM_TARGET_MSG_CMD, struct dm_ioctl) #define DM_DEV_SET_GEOMETRY _IOWR(DM_IOCTL, DM_DEV_SET_GEOMETRY_CMD, struct dm_ioctl) #define DM_VERSION_MAJOR 4 -#define DM_VERSION_MINOR 40 +#define DM_VERSION_MINOR 41 #define DM_VERSION_PATCHLEVEL 0 -#define DM_VERSION_EXTRA "-ioctl (2019-01-18)" +#define DM_VERSION_EXTRA "-ioctl (2019-09-16)" /* Status bits */ #define DM_READONLY_FLAG (1 << 0) /* In/Out */ diff --git a/include/uapi/linux/dma-buf.h b/include/uapi/linux/dma-buf.h index d75df5210a4a..dbc7092e04b5 100644 --- a/include/uapi/linux/dma-buf.h +++ b/include/uapi/linux/dma-buf.h @@ -35,7 +35,10 @@ struct dma_buf_sync { #define DMA_BUF_SYNC_VALID_FLAGS_MASK \ (DMA_BUF_SYNC_RW | DMA_BUF_SYNC_END) +#define DMA_BUF_NAME_LEN 32 + #define DMA_BUF_BASE 'b' #define DMA_BUF_IOCTL_SYNC _IOW(DMA_BUF_BASE, 0, struct dma_buf_sync) +#define DMA_BUF_SET_NAME _IOW(DMA_BUF_BASE, 1, const char *) #endif diff --git a/include/uapi/linux/dvb/audio.h b/include/uapi/linux/dvb/audio.h index afeae063e640..2f869da69171 100644 --- a/include/uapi/linux/dvb/audio.h +++ b/include/uapi/linux/dvb/audio.h @@ -1,6 +1,8 @@ /* SPDX-License-Identifier: LGPL-2.1+ WITH Linux-syscall-note */ /* - * audio.h + * audio.h - DEPRECATED MPEG-TS audio decoder API + * + * NOTE: should not be used on future drivers * * Copyright (C) 2000 Ralph Metzler <ralph@convergence.de> * & Marcus Metzler <marcus@convergence.de> @@ -52,7 +54,7 @@ typedef enum { typedef struct audio_mixer { unsigned int volume_left; unsigned int volume_right; - // what else do we need? bass, pass-through, ... + /* what else do we need? bass, pass-through, ... */ } audio_mixer_t; diff --git a/include/uapi/linux/dvb/osd.h b/include/uapi/linux/dvb/osd.h index e163508b9ae8..858997c74043 100644 --- a/include/uapi/linux/dvb/osd.h +++ b/include/uapi/linux/dvb/osd.h @@ -1,6 +1,8 @@ /* SPDX-License-Identifier: LGPL-2.1+ WITH Linux-syscall-note */ /* - * osd.h + * osd.h - DEPRECATED On Screen Display API + * + * NOTE: should not be used on future drivers * * Copyright (C) 2001 Ralph Metzler <ralph@convergence.de> * & Marcus Metzler <marcus@convergence.de> @@ -28,74 +30,108 @@ #include <linux/compiler.h> typedef enum { - // All functions return -2 on "not open" - OSD_Close=1, // () - // Disables OSD and releases the buffers - // returns 0 on success - OSD_Open, // (x0,y0,x1,y1,BitPerPixel[2/4/8](color&0x0F),mix[0..15](color&0xF0)) - // Opens OSD with this size and bit depth - // returns 0 on success, -1 on DRAM allocation error, -2 on "already open" - OSD_Show, // () - // enables OSD mode - // returns 0 on success - OSD_Hide, // () - // disables OSD mode - // returns 0 on success - OSD_Clear, // () - // Sets all pixel to color 0 - // returns 0 on success - OSD_Fill, // (color) - // Sets all pixel to color <col> - // returns 0 on success - OSD_SetColor, // (color,R{x0},G{y0},B{x1},opacity{y1}) - // set palette entry <num> to <r,g,b>, <mix> and <trans> apply - // R,G,B: 0..255 - // R=Red, G=Green, B=Blue - // opacity=0: pixel opacity 0% (only video pixel shows) - // opacity=1..254: pixel opacity as specified in header - // opacity=255: pixel opacity 100% (only OSD pixel shows) - // returns 0 on success, -1 on error - OSD_SetPalette, // (firstcolor{color},lastcolor{x0},data) - // Set a number of entries in the palette - // sets the entries "firstcolor" through "lastcolor" from the array "data" - // data has 4 byte for each color: - // R,G,B, and a opacity value: 0->transparent, 1..254->mix, 255->pixel - OSD_SetTrans, // (transparency{color}) - // Sets transparency of mixed pixel (0..15) - // returns 0 on success - OSD_SetPixel, // (x0,y0,color) - // sets pixel <x>,<y> to color number <col> - // returns 0 on success, -1 on error - OSD_GetPixel, // (x0,y0) - // returns color number of pixel <x>,<y>, or -1 - OSD_SetRow, // (x0,y0,x1,data) - // fills pixels x0,y through x1,y with the content of data[] - // returns 0 on success, -1 on clipping all pixel (no pixel drawn) - OSD_SetBlock, // (x0,y0,x1,y1,increment{color},data) - // fills pixels x0,y0 through x1,y1 with the content of data[] - // inc contains the width of one line in the data block, - // inc<=0 uses blockwidth as linewidth - // returns 0 on success, -1 on clipping all pixel - OSD_FillRow, // (x0,y0,x1,color) - // fills pixels x0,y through x1,y with the color <col> - // returns 0 on success, -1 on clipping all pixel - OSD_FillBlock, // (x0,y0,x1,y1,color) - // fills pixels x0,y0 through x1,y1 with the color <col> - // returns 0 on success, -1 on clipping all pixel - OSD_Line, // (x0,y0,x1,y1,color) - // draw a line from x0,y0 to x1,y1 with the color <col> - // returns 0 on success - OSD_Query, // (x0,y0,x1,y1,xasp{color}}), yasp=11 - // fills parameters with the picture dimensions and the pixel aspect ratio - // returns 0 on success - OSD_Test, // () - // draws a test picture. for debugging purposes only - // returns 0 on success -// TODO: remove "test" in final version - OSD_Text, // (x0,y0,size,color,text) - OSD_SetWindow, // (x0) set window with number 0<x0<8 as current - OSD_MoveWindow, // move current window to (x0, y0) - OSD_OpenRaw, // Open other types of OSD windows + /* All functions return -2 on "not open" */ + OSD_Close = 1, /* () */ + /* + * Disables OSD and releases the buffers + * returns 0 on success + */ + OSD_Open, /* (x0,y0,x1,y1,BitPerPixel[2/4/8](color&0x0F),mix[0..15](color&0xF0)) */ + /* + * Opens OSD with this size and bit depth + * returns 0 on success, -1 on DRAM allocation error, -2 on "already open" + */ + OSD_Show, /* () */ + /* + * enables OSD mode + * returns 0 on success + */ + OSD_Hide, /* () */ + /* + * disables OSD mode + * returns 0 on success + */ + OSD_Clear, /* () */ + /* + * Sets all pixel to color 0 + * returns 0 on success + */ + OSD_Fill, /* (color) */ + /* + * Sets all pixel to color <col> + * returns 0 on success + */ + OSD_SetColor, /* (color,R{x0},G{y0},B{x1},opacity{y1}) */ + /* + * set palette entry <num> to <r,g,b>, <mix> and <trans> apply + * R,G,B: 0..255 + * R=Red, G=Green, B=Blue + * opacity=0: pixel opacity 0% (only video pixel shows) + * opacity=1..254: pixel opacity as specified in header + * opacity=255: pixel opacity 100% (only OSD pixel shows) + * returns 0 on success, -1 on error + */ + OSD_SetPalette, /* (firstcolor{color},lastcolor{x0},data) */ + /* + * Set a number of entries in the palette + * sets the entries "firstcolor" through "lastcolor" from the array "data" + * data has 4 byte for each color: + * R,G,B, and a opacity value: 0->transparent, 1..254->mix, 255->pixel + */ + OSD_SetTrans, /* (transparency{color}) */ + /* + * Sets transparency of mixed pixel (0..15) + * returns 0 on success + */ + OSD_SetPixel, /* (x0,y0,color) */ + /* + * sets pixel <x>,<y> to color number <col> + * returns 0 on success, -1 on error + */ + OSD_GetPixel, /* (x0,y0) */ + /* returns color number of pixel <x>,<y>, or -1 */ + OSD_SetRow, /* (x0,y0,x1,data) */ + /* + * fills pixels x0,y through x1,y with the content of data[] + * returns 0 on success, -1 on clipping all pixel (no pixel drawn) + */ + OSD_SetBlock, /* (x0,y0,x1,y1,increment{color},data) */ + /* + * fills pixels x0,y0 through x1,y1 with the content of data[] + * inc contains the width of one line in the data block, + * inc<=0 uses blockwidth as linewidth + * returns 0 on success, -1 on clipping all pixel + */ + OSD_FillRow, /* (x0,y0,x1,color) */ + /* + * fills pixels x0,y through x1,y with the color <col> + * returns 0 on success, -1 on clipping all pixel + */ + OSD_FillBlock, /* (x0,y0,x1,y1,color) */ + /* + * fills pixels x0,y0 through x1,y1 with the color <col> + * returns 0 on success, -1 on clipping all pixel + */ + OSD_Line, /* (x0,y0,x1,y1,color) */ + /* + * draw a line from x0,y0 to x1,y1 with the color <col> + * returns 0 on success + */ + OSD_Query, /* (x0,y0,x1,y1,xasp{color}}), yasp=11 */ + /* + * fills parameters with the picture dimensions and the pixel aspect ratio + * returns 0 on success + */ + OSD_Test, /* () */ + /* + * draws a test picture. for debugging purposes only + * returns 0 on success + * TODO: remove "test" in final version + */ + OSD_Text, /* (x0,y0,size,color,text) */ + OSD_SetWindow, /* (x0) set window with number 0<x0<8 as current */ + OSD_MoveWindow, /* move current window to (x0, y0) */ + OSD_OpenRaw, /* Open other types of OSD windows */ } OSD_Command; typedef struct osd_cmd_s { diff --git a/include/uapi/linux/dvb/video.h b/include/uapi/linux/dvb/video.h index 43ba8b0a3d14..179f1ec60af6 100644 --- a/include/uapi/linux/dvb/video.h +++ b/include/uapi/linux/dvb/video.h @@ -1,6 +1,8 @@ /* SPDX-License-Identifier: LGPL-2.1+ WITH Linux-syscall-note */ /* - * video.h + * video.h - DEPRECATED MPEG-TS video decoder API + * + * NOTE: should not be used on future drivers * * Copyright (C) 2000 Marcus Metzler <marcus@convergence.de> * & Ralph Metzler <ralph@convergence.de> diff --git a/include/uapi/linux/ethtool.h b/include/uapi/linux/ethtool.h index 3534ce157ae9..8938b76c4ee3 100644 --- a/include/uapi/linux/ethtool.h +++ b/include/uapi/linux/ethtool.h @@ -259,10 +259,32 @@ struct ethtool_tunable { #define ETHTOOL_PHY_FAST_LINK_DOWN_ON 0 #define ETHTOOL_PHY_FAST_LINK_DOWN_OFF 0xff +/* Energy Detect Power Down (EDPD) is a feature supported by some PHYs, where + * the PHY's RX & TX blocks are put into a low-power mode when there is no + * link detected (typically cable is un-plugged). For RX, only a minimal + * link-detection is available, and for TX the PHY wakes up to send link pulses + * to avoid any lock-ups in case the peer PHY may also be running in EDPD mode. + * + * Some PHYs may support configuration of the wake-up interval for TX pulses, + * and some PHYs may support only disabling TX pulses entirely. For the latter + * a special value is required (ETHTOOL_PHY_EDPD_NO_TX) so that this can be + * configured from userspace (should the user want it). + * + * The interval units for TX wake-up are in milliseconds, since this should + * cover a reasonable range of intervals: + * - from 1 millisecond, which does not sound like much of a power-saver + * - to ~65 seconds which is quite a lot to wait for a link to come up when + * plugging a cable + */ +#define ETHTOOL_PHY_EDPD_DFLT_TX_MSECS 0xffff +#define ETHTOOL_PHY_EDPD_NO_TX 0xfffe +#define ETHTOOL_PHY_EDPD_DISABLE 0 + enum phy_tunable_id { ETHTOOL_PHY_ID_UNSPEC, ETHTOOL_PHY_DOWNSHIFT, ETHTOOL_PHY_FAST_LINK_DOWN, + ETHTOOL_PHY_EDPD, /* * Add your fresh new phy tunable attribute above and remember to update * phy_tunable_strings[] in net/core/ethtool.c @@ -1483,6 +1505,8 @@ enum ethtool_link_mode_bit_indices { ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT = 64, ETHTOOL_LINK_MODE_200000baseDR4_Full_BIT = 65, ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT = 66, + ETHTOOL_LINK_MODE_100baseT1_Full_BIT = 67, + ETHTOOL_LINK_MODE_1000baseT1_Full_BIT = 68, /* must be last entry */ __ETHTOOL_LINK_MODE_MASK_NBITS diff --git a/include/uapi/linux/flat.h b/include/uapi/linux/flat.h deleted file mode 100644 index 27e595e44fb7..000000000000 --- a/include/uapi/linux/flat.h +++ /dev/null @@ -1,59 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ -/* - * Copyright (C) 2002-2003 David McCullough <davidm@snapgear.com> - * Copyright (C) 1998 Kenneth Albanowski <kjahds@kjahds.com> - * The Silver Hammer Group, Ltd. - * - * This file provides the definitions and structures needed to - * support uClinux flat-format executables. - */ - -#ifndef _UAPI_LINUX_FLAT_H -#define _UAPI_LINUX_FLAT_H - - -#define FLAT_VERSION 0x00000004L - -#ifdef CONFIG_BINFMT_SHARED_FLAT -#define MAX_SHARED_LIBS (4) -#else -#define MAX_SHARED_LIBS (1) -#endif - -/* - * To make everything easier to port and manage cross platform - * development, all fields are in network byte order. - */ - -struct flat_hdr { - char magic[4]; - unsigned long rev; /* version (as above) */ - unsigned long entry; /* Offset of first executable instruction - with text segment from beginning of file */ - unsigned long data_start; /* Offset of data segment from beginning of - file */ - unsigned long data_end; /* Offset of end of data segment - from beginning of file */ - unsigned long bss_end; /* Offset of end of bss segment from beginning - of file */ - - /* (It is assumed that data_end through bss_end forms the bss segment.) */ - - unsigned long stack_size; /* Size of stack, in bytes */ - unsigned long reloc_start; /* Offset of relocation records from - beginning of file */ - unsigned long reloc_count; /* Number of relocation records */ - unsigned long flags; - unsigned long build_date; /* When the program/library was built */ - unsigned long filler[5]; /* Reservered, set to zero */ -}; - -#define FLAT_FLAG_RAM 0x0001 /* load program entirely into RAM */ -#define FLAT_FLAG_GOTPIC 0x0002 /* program is PIC with GOT */ -#define FLAT_FLAG_GZIP 0x0004 /* all but the header is compressed */ -#define FLAT_FLAG_GZDATA 0x0008 /* only data/relocs are compressed (for XIP) */ -#define FLAT_FLAG_KTRACE 0x0010 /* output useful kernel trace for debugging */ - - - -#endif /* _UAPI_LINUX_FLAT_H */ diff --git a/include/uapi/linux/fpga-dfl.h b/include/uapi/linux/fpga-dfl.h index 2e324e515c41..ec70a0746e59 100644 --- a/include/uapi/linux/fpga-dfl.h +++ b/include/uapi/linux/fpga-dfl.h @@ -176,4 +176,22 @@ struct dfl_fpga_fme_port_pr { #define DFL_FPGA_FME_PORT_PR _IO(DFL_FPGA_MAGIC, DFL_FME_BASE + 0) +/** + * DFL_FPGA_FME_PORT_RELEASE - _IOW(DFL_FPGA_MAGIC, DFL_FME_BASE + 1, + * int port_id) + * + * Driver releases the port per Port ID provided by caller. + * Return: 0 on success, -errno on failure. + */ +#define DFL_FPGA_FME_PORT_RELEASE _IOW(DFL_FPGA_MAGIC, DFL_FME_BASE + 1, int) + +/** + * DFL_FPGA_FME_PORT_ASSIGN - _IOW(DFL_FPGA_MAGIC, DFL_FME_BASE + 2, + * int port_id) + * + * Driver assigns the port back per Port ID provided by caller. + * Return: 0 on success, -errno on failure. + */ +#define DFL_FPGA_FME_PORT_ASSIGN _IOW(DFL_FPGA_MAGIC, DFL_FME_BASE + 2, int) + #endif /* _UAPI_LINUX_FPGA_DFL_H */ diff --git a/include/uapi/linux/fs.h b/include/uapi/linux/fs.h index 59c71fa8c553..379a612f8f1d 100644 --- a/include/uapi/linux/fs.h +++ b/include/uapi/linux/fs.h @@ -13,6 +13,9 @@ #include <linux/limits.h> #include <linux/ioctl.h> #include <linux/types.h> +#ifndef __KERNEL__ +#include <linux/fscrypt.h> +#endif /* Use of MS_* flags within the kernel is restricted to core mount(2) code. */ #if !defined(__KERNEL__) @@ -213,57 +216,6 @@ struct fsxattr { #define FS_IOC_SETFSLABEL _IOW(0x94, 50, char[FSLABEL_MAX]) /* - * File system encryption support - */ -/* Policy provided via an ioctl on the topmost directory */ -#define FS_KEY_DESCRIPTOR_SIZE 8 - -#define FS_POLICY_FLAGS_PAD_4 0x00 -#define FS_POLICY_FLAGS_PAD_8 0x01 -#define FS_POLICY_FLAGS_PAD_16 0x02 -#define FS_POLICY_FLAGS_PAD_32 0x03 -#define FS_POLICY_FLAGS_PAD_MASK 0x03 -#define FS_POLICY_FLAG_DIRECT_KEY 0x04 /* use master key directly */ -#define FS_POLICY_FLAGS_VALID 0x07 - -/* Encryption algorithms */ -#define FS_ENCRYPTION_MODE_INVALID 0 -#define FS_ENCRYPTION_MODE_AES_256_XTS 1 -#define FS_ENCRYPTION_MODE_AES_256_GCM 2 -#define FS_ENCRYPTION_MODE_AES_256_CBC 3 -#define FS_ENCRYPTION_MODE_AES_256_CTS 4 -#define FS_ENCRYPTION_MODE_AES_128_CBC 5 -#define FS_ENCRYPTION_MODE_AES_128_CTS 6 -#define FS_ENCRYPTION_MODE_SPECK128_256_XTS 7 /* Removed, do not use. */ -#define FS_ENCRYPTION_MODE_SPECK128_256_CTS 8 /* Removed, do not use. */ -#define FS_ENCRYPTION_MODE_ADIANTUM 9 - -struct fscrypt_policy { - __u8 version; - __u8 contents_encryption_mode; - __u8 filenames_encryption_mode; - __u8 flags; - __u8 master_key_descriptor[FS_KEY_DESCRIPTOR_SIZE]; -}; - -#define FS_IOC_SET_ENCRYPTION_POLICY _IOR('f', 19, struct fscrypt_policy) -#define FS_IOC_GET_ENCRYPTION_PWSALT _IOW('f', 20, __u8[16]) -#define FS_IOC_GET_ENCRYPTION_POLICY _IOW('f', 21, struct fscrypt_policy) - -/* Parameters for passing an encryption key into the kernel keyring */ -#define FS_KEY_DESC_PREFIX "fscrypt:" -#define FS_KEY_DESC_PREFIX_SIZE 8 - -/* Structure that userspace passes to the kernel keyring */ -#define FS_MAX_KEY_SIZE 64 - -struct fscrypt_key { - __u32 mode; - __u8 raw[FS_MAX_KEY_SIZE]; - __u32 size; -}; - -/* * Inode flags (FS_IOC_GETFLAGS / FS_IOC_SETFLAGS) * * Note: for historical reasons, these flags were originally used and @@ -306,11 +258,13 @@ struct fscrypt_key { #define FS_TOPDIR_FL 0x00020000 /* Top of directory hierarchies*/ #define FS_HUGE_FILE_FL 0x00040000 /* Reserved for ext4 */ #define FS_EXTENT_FL 0x00080000 /* Extents */ +#define FS_VERITY_FL 0x00100000 /* Verity protected inode */ #define FS_EA_INODE_FL 0x00200000 /* Inode used for large EA */ #define FS_EOFBLOCKS_FL 0x00400000 /* Reserved for ext4 */ #define FS_NOCOW_FL 0x00800000 /* Do not cow file */ #define FS_INLINE_DATA_FL 0x10000000 /* Reserved for ext4 */ #define FS_PROJINHERIT_FL 0x20000000 /* Create with parents projid */ +#define FS_CASEFOLD_FL 0x40000000 /* Folder is case insensitive */ #define FS_RESERVED_FL 0x80000000 /* reserved for ext2 lib */ #define FS_FL_USER_VISIBLE 0x0003DFFF /* User visible flags */ diff --git a/include/uapi/linux/fscrypt.h b/include/uapi/linux/fscrypt.h new file mode 100644 index 000000000000..39ccfe9311c3 --- /dev/null +++ b/include/uapi/linux/fscrypt.h @@ -0,0 +1,181 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +/* + * fscrypt user API + * + * These ioctls can be used on filesystems that support fscrypt. See the + * "User API" section of Documentation/filesystems/fscrypt.rst. + */ +#ifndef _UAPI_LINUX_FSCRYPT_H +#define _UAPI_LINUX_FSCRYPT_H + +#include <linux/types.h> + +/* Encryption policy flags */ +#define FSCRYPT_POLICY_FLAGS_PAD_4 0x00 +#define FSCRYPT_POLICY_FLAGS_PAD_8 0x01 +#define FSCRYPT_POLICY_FLAGS_PAD_16 0x02 +#define FSCRYPT_POLICY_FLAGS_PAD_32 0x03 +#define FSCRYPT_POLICY_FLAGS_PAD_MASK 0x03 +#define FSCRYPT_POLICY_FLAG_DIRECT_KEY 0x04 +#define FSCRYPT_POLICY_FLAGS_VALID 0x07 + +/* Encryption algorithms */ +#define FSCRYPT_MODE_AES_256_XTS 1 +#define FSCRYPT_MODE_AES_256_CTS 4 +#define FSCRYPT_MODE_AES_128_CBC 5 +#define FSCRYPT_MODE_AES_128_CTS 6 +#define FSCRYPT_MODE_ADIANTUM 9 +#define __FSCRYPT_MODE_MAX 9 + +/* + * Legacy policy version; ad-hoc KDF and no key verification. + * For new encrypted directories, use fscrypt_policy_v2 instead. + * + * Careful: the .version field for this is actually 0, not 1. + */ +#define FSCRYPT_POLICY_V1 0 +#define FSCRYPT_KEY_DESCRIPTOR_SIZE 8 +struct fscrypt_policy_v1 { + __u8 version; + __u8 contents_encryption_mode; + __u8 filenames_encryption_mode; + __u8 flags; + __u8 master_key_descriptor[FSCRYPT_KEY_DESCRIPTOR_SIZE]; +}; +#define fscrypt_policy fscrypt_policy_v1 + +/* + * Process-subscribed "logon" key description prefix and payload format. + * Deprecated; prefer FS_IOC_ADD_ENCRYPTION_KEY instead. + */ +#define FSCRYPT_KEY_DESC_PREFIX "fscrypt:" +#define FSCRYPT_KEY_DESC_PREFIX_SIZE 8 +#define FSCRYPT_MAX_KEY_SIZE 64 +struct fscrypt_key { + __u32 mode; + __u8 raw[FSCRYPT_MAX_KEY_SIZE]; + __u32 size; +}; + +/* + * New policy version with HKDF and key verification (recommended). + */ +#define FSCRYPT_POLICY_V2 2 +#define FSCRYPT_KEY_IDENTIFIER_SIZE 16 +struct fscrypt_policy_v2 { + __u8 version; + __u8 contents_encryption_mode; + __u8 filenames_encryption_mode; + __u8 flags; + __u8 __reserved[4]; + __u8 master_key_identifier[FSCRYPT_KEY_IDENTIFIER_SIZE]; +}; + +/* Struct passed to FS_IOC_GET_ENCRYPTION_POLICY_EX */ +struct fscrypt_get_policy_ex_arg { + __u64 policy_size; /* input/output */ + union { + __u8 version; + struct fscrypt_policy_v1 v1; + struct fscrypt_policy_v2 v2; + } policy; /* output */ +}; + +/* + * v1 policy keys are specified by an arbitrary 8-byte key "descriptor", + * matching fscrypt_policy_v1::master_key_descriptor. + */ +#define FSCRYPT_KEY_SPEC_TYPE_DESCRIPTOR 1 + +/* + * v2 policy keys are specified by a 16-byte key "identifier" which the kernel + * calculates as a cryptographic hash of the key itself, + * matching fscrypt_policy_v2::master_key_identifier. + */ +#define FSCRYPT_KEY_SPEC_TYPE_IDENTIFIER 2 + +/* + * Specifies a key, either for v1 or v2 policies. This doesn't contain the + * actual key itself; this is just the "name" of the key. + */ +struct fscrypt_key_specifier { + __u32 type; /* one of FSCRYPT_KEY_SPEC_TYPE_* */ + __u32 __reserved; + union { + __u8 __reserved[32]; /* reserve some extra space */ + __u8 descriptor[FSCRYPT_KEY_DESCRIPTOR_SIZE]; + __u8 identifier[FSCRYPT_KEY_IDENTIFIER_SIZE]; + } u; +}; + +/* Struct passed to FS_IOC_ADD_ENCRYPTION_KEY */ +struct fscrypt_add_key_arg { + struct fscrypt_key_specifier key_spec; + __u32 raw_size; + __u32 __reserved[9]; + __u8 raw[]; +}; + +/* Struct passed to FS_IOC_REMOVE_ENCRYPTION_KEY */ +struct fscrypt_remove_key_arg { + struct fscrypt_key_specifier key_spec; +#define FSCRYPT_KEY_REMOVAL_STATUS_FLAG_FILES_BUSY 0x00000001 +#define FSCRYPT_KEY_REMOVAL_STATUS_FLAG_OTHER_USERS 0x00000002 + __u32 removal_status_flags; /* output */ + __u32 __reserved[5]; +}; + +/* Struct passed to FS_IOC_GET_ENCRYPTION_KEY_STATUS */ +struct fscrypt_get_key_status_arg { + /* input */ + struct fscrypt_key_specifier key_spec; + __u32 __reserved[6]; + + /* output */ +#define FSCRYPT_KEY_STATUS_ABSENT 1 +#define FSCRYPT_KEY_STATUS_PRESENT 2 +#define FSCRYPT_KEY_STATUS_INCOMPLETELY_REMOVED 3 + __u32 status; +#define FSCRYPT_KEY_STATUS_FLAG_ADDED_BY_SELF 0x00000001 + __u32 status_flags; + __u32 user_count; + __u32 __out_reserved[13]; +}; + +#define FS_IOC_SET_ENCRYPTION_POLICY _IOR('f', 19, struct fscrypt_policy) +#define FS_IOC_GET_ENCRYPTION_PWSALT _IOW('f', 20, __u8[16]) +#define FS_IOC_GET_ENCRYPTION_POLICY _IOW('f', 21, struct fscrypt_policy) +#define FS_IOC_GET_ENCRYPTION_POLICY_EX _IOWR('f', 22, __u8[9]) /* size + version */ +#define FS_IOC_ADD_ENCRYPTION_KEY _IOWR('f', 23, struct fscrypt_add_key_arg) +#define FS_IOC_REMOVE_ENCRYPTION_KEY _IOWR('f', 24, struct fscrypt_remove_key_arg) +#define FS_IOC_REMOVE_ENCRYPTION_KEY_ALL_USERS _IOWR('f', 25, struct fscrypt_remove_key_arg) +#define FS_IOC_GET_ENCRYPTION_KEY_STATUS _IOWR('f', 26, struct fscrypt_get_key_status_arg) + +/**********************************************************************/ + +/* old names; don't add anything new here! */ +#ifndef __KERNEL__ +#define FS_KEY_DESCRIPTOR_SIZE FSCRYPT_KEY_DESCRIPTOR_SIZE +#define FS_POLICY_FLAGS_PAD_4 FSCRYPT_POLICY_FLAGS_PAD_4 +#define FS_POLICY_FLAGS_PAD_8 FSCRYPT_POLICY_FLAGS_PAD_8 +#define FS_POLICY_FLAGS_PAD_16 FSCRYPT_POLICY_FLAGS_PAD_16 +#define FS_POLICY_FLAGS_PAD_32 FSCRYPT_POLICY_FLAGS_PAD_32 +#define FS_POLICY_FLAGS_PAD_MASK FSCRYPT_POLICY_FLAGS_PAD_MASK +#define FS_POLICY_FLAG_DIRECT_KEY FSCRYPT_POLICY_FLAG_DIRECT_KEY +#define FS_POLICY_FLAGS_VALID FSCRYPT_POLICY_FLAGS_VALID +#define FS_ENCRYPTION_MODE_INVALID 0 /* never used */ +#define FS_ENCRYPTION_MODE_AES_256_XTS FSCRYPT_MODE_AES_256_XTS +#define FS_ENCRYPTION_MODE_AES_256_GCM 2 /* never used */ +#define FS_ENCRYPTION_MODE_AES_256_CBC 3 /* never used */ +#define FS_ENCRYPTION_MODE_AES_256_CTS FSCRYPT_MODE_AES_256_CTS +#define FS_ENCRYPTION_MODE_AES_128_CBC FSCRYPT_MODE_AES_128_CBC +#define FS_ENCRYPTION_MODE_AES_128_CTS FSCRYPT_MODE_AES_128_CTS +#define FS_ENCRYPTION_MODE_SPECK128_256_XTS 7 /* removed */ +#define FS_ENCRYPTION_MODE_SPECK128_256_CTS 8 /* removed */ +#define FS_ENCRYPTION_MODE_ADIANTUM FSCRYPT_MODE_ADIANTUM +#define FS_KEY_DESC_PREFIX FSCRYPT_KEY_DESC_PREFIX +#define FS_KEY_DESC_PREFIX_SIZE FSCRYPT_KEY_DESC_PREFIX_SIZE +#define FS_MAX_KEY_SIZE FSCRYPT_MAX_KEY_SIZE +#endif /* !__KERNEL__ */ + +#endif /* _UAPI_LINUX_FSCRYPT_H */ diff --git a/include/uapi/linux/fsverity.h b/include/uapi/linux/fsverity.h new file mode 100644 index 000000000000..da0daf6c193b --- /dev/null +++ b/include/uapi/linux/fsverity.h @@ -0,0 +1,40 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +/* + * fs-verity user API + * + * These ioctls can be used on filesystems that support fs-verity. See the + * "User API" section of Documentation/filesystems/fsverity.rst. + * + * Copyright 2019 Google LLC + */ +#ifndef _UAPI_LINUX_FSVERITY_H +#define _UAPI_LINUX_FSVERITY_H + +#include <linux/ioctl.h> +#include <linux/types.h> + +#define FS_VERITY_HASH_ALG_SHA256 1 +#define FS_VERITY_HASH_ALG_SHA512 2 + +struct fsverity_enable_arg { + __u32 version; + __u32 hash_algorithm; + __u32 block_size; + __u32 salt_size; + __u64 salt_ptr; + __u32 sig_size; + __u32 __reserved1; + __u64 sig_ptr; + __u64 __reserved2[11]; +}; + +struct fsverity_digest { + __u16 digest_algorithm; + __u16 digest_size; /* input/output */ + __u8 digest[]; +}; + +#define FS_IOC_ENABLE_VERITY _IOW('f', 133, struct fsverity_enable_arg) +#define FS_IOC_MEASURE_VERITY _IOWR('f', 134, struct fsverity_digest) + +#endif /* _UAPI_LINUX_FSVERITY_H */ diff --git a/include/uapi/linux/fuse.h b/include/uapi/linux/fuse.h index 2971d29a42e4..373cada89815 100644 --- a/include/uapi/linux/fuse.h +++ b/include/uapi/linux/fuse.h @@ -38,6 +38,43 @@ * * Protocol changelog: * + * 7.1: + * - add the following messages: + * FUSE_SETATTR, FUSE_SYMLINK, FUSE_MKNOD, FUSE_MKDIR, FUSE_UNLINK, + * FUSE_RMDIR, FUSE_RENAME, FUSE_LINK, FUSE_OPEN, FUSE_READ, FUSE_WRITE, + * FUSE_RELEASE, FUSE_FSYNC, FUSE_FLUSH, FUSE_SETXATTR, FUSE_GETXATTR, + * FUSE_LISTXATTR, FUSE_REMOVEXATTR, FUSE_OPENDIR, FUSE_READDIR, + * FUSE_RELEASEDIR + * - add padding to messages to accommodate 32-bit servers on 64-bit kernels + * + * 7.2: + * - add FOPEN_DIRECT_IO and FOPEN_KEEP_CACHE flags + * - add FUSE_FSYNCDIR message + * + * 7.3: + * - add FUSE_ACCESS message + * - add FUSE_CREATE message + * - add filehandle to fuse_setattr_in + * + * 7.4: + * - add frsize to fuse_kstatfs + * - clean up request size limit checking + * + * 7.5: + * - add flags and max_write to fuse_init_out + * + * 7.6: + * - add max_readahead to fuse_init_in and fuse_init_out + * + * 7.7: + * - add FUSE_INTERRUPT message + * - add POSIX file lock support + * + * 7.8: + * - add lock_owner and flags fields to fuse_release_in + * - add FUSE_BMAP message + * - add FUSE_DESTROY message + * * 7.9: * - new fuse_getattr_in input argument of GETATTR * - add lk_flags in fuse_lk_in @@ -133,6 +170,8 @@ * * 7.31 * - add FUSE_WRITE_KILL_PRIV flag + * - add FUSE_SETUPMAPPING and FUSE_REMOVEMAPPING + * - add map_alignment to fuse_init_out, add FUSE_MAP_ALIGNMENT flag */ #ifndef _LINUX_FUSE_H @@ -274,6 +313,7 @@ struct fuse_file_lock { * FUSE_CACHE_SYMLINKS: cache READLINK responses * FUSE_NO_OPENDIR_SUPPORT: kernel supports zero-message opendir * FUSE_EXPLICIT_INVAL_DATA: only invalidate cached pages on explicit request + * FUSE_MAP_ALIGNMENT: map_alignment field is valid */ #define FUSE_ASYNC_READ (1 << 0) #define FUSE_POSIX_LOCKS (1 << 1) @@ -301,6 +341,7 @@ struct fuse_file_lock { #define FUSE_CACHE_SYMLINKS (1 << 23) #define FUSE_NO_OPENDIR_SUPPORT (1 << 24) #define FUSE_EXPLICIT_INVAL_DATA (1 << 25) +#define FUSE_MAP_ALIGNMENT (1 << 26) /** * CUSE INIT request/reply flags @@ -422,9 +463,15 @@ enum fuse_opcode { FUSE_RENAME2 = 45, FUSE_LSEEK = 46, FUSE_COPY_FILE_RANGE = 47, + FUSE_SETUPMAPPING = 48, + FUSE_REMOVEMAPPING = 49, /* CUSE specific operations */ CUSE_INIT = 4096, + + /* Reserved opcodes: helpful to detect structure endian-ness */ + CUSE_INIT_BSWAP_RESERVED = 1048576, /* CUSE_INIT << 8 */ + FUSE_INIT_BSWAP_RESERVED = 436207616, /* FUSE_INIT << 24 */ }; enum fuse_notify_code { @@ -652,7 +699,7 @@ struct fuse_init_out { uint32_t max_write; uint32_t time_gran; uint16_t max_pages; - uint16_t padding; + uint16_t map_alignment; uint32_t unused[8]; }; diff --git a/include/uapi/linux/gsmmux.h b/include/uapi/linux/gsmmux.h index 101d3c469acb..cb8693b39cb7 100644 --- a/include/uapi/linux/gsmmux.h +++ b/include/uapi/linux/gsmmux.h @@ -37,5 +37,7 @@ struct gsm_netconfig { #define GSMIOC_ENABLE_NET _IOW('G', 2, struct gsm_netconfig) #define GSMIOC_DISABLE_NET _IO('G', 3) +/* get the base tty number for a configured gsmmux tty */ +#define GSMIOC_GETFIRST _IOR('G', 4, __u32) #endif diff --git a/include/uapi/linux/if_bridge.h b/include/uapi/linux/if_bridge.h index 773e476a8e54..1b3c2b643a02 100644 --- a/include/uapi/linux/if_bridge.h +++ b/include/uapi/linux/if_bridge.h @@ -237,6 +237,7 @@ struct br_mdb_entry { #define MDB_PERMANENT 1 __u8 state; #define MDB_FLAGS_OFFLOAD (1 << 0) +#define MDB_FLAGS_FAST_LEAVE (1 << 1) __u8 flags; __u16 vid; struct { diff --git a/include/uapi/linux/if_ether.h b/include/uapi/linux/if_ether.h index 3158ba672b72..f6ceb2e63d1e 100644 --- a/include/uapi/linux/if_ether.h +++ b/include/uapi/linux/if_ether.h @@ -91,6 +91,7 @@ #define ETH_P_802_EX1 0x88B5 /* 802.1 Local Experimental 1. */ #define ETH_P_PREAUTH 0x88C7 /* 802.11 Preauthentication */ #define ETH_P_TIPC 0x88CA /* TIPC */ +#define ETH_P_LLDP 0x88CC /* Link Layer Discovery Protocol */ #define ETH_P_MACSEC 0x88E5 /* 802.1ae MACsec */ #define ETH_P_8021AH 0x88E7 /* 802.1ah Backbone Service Tag */ #define ETH_P_MVRP 0x88F5 /* 802.1Q MVRP */ diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h index 5b225ff63b48..4a8c02cafa9a 100644 --- a/include/uapi/linux/if_link.h +++ b/include/uapi/linux/if_link.h @@ -636,6 +636,7 @@ enum { IFLA_BOND_AD_USER_PORT_KEY, IFLA_BOND_AD_ACTOR_SYSTEM, IFLA_BOND_TLB_DYNAMIC_LB, + IFLA_BOND_PEER_NOTIF_DELAY, __IFLA_BOND_MAX, }; @@ -694,6 +695,7 @@ enum { IFLA_VF_IB_NODE_GUID, /* VF Infiniband node GUID */ IFLA_VF_IB_PORT_GUID, /* VF Infiniband port GUID */ IFLA_VF_VLAN_LIST, /* nested list of vlans, option for QinQ */ + IFLA_VF_BROADCAST, /* VF broadcast */ __IFLA_VF_MAX, }; @@ -704,6 +706,10 @@ struct ifla_vf_mac { __u8 mac[32]; /* MAX_ADDR_LEN */ }; +struct ifla_vf_broadcast { + __u8 broadcast[32]; +}; + struct ifla_vf_vlan { __u32 vf; __u32 vlan; /* 0 - 4095, 0 disables VLAN filter */ diff --git a/include/uapi/linux/if_packet.h b/include/uapi/linux/if_packet.h index 467b654bd4c7..3d884d68eb30 100644 --- a/include/uapi/linux/if_packet.h +++ b/include/uapi/linux/if_packet.h @@ -123,7 +123,7 @@ struct tpacket_auxdata { /* Rx and Tx ring - header status */ #define TP_STATUS_TS_SOFTWARE (1 << 29) #define TP_STATUS_TS_SYS_HARDWARE (1 << 30) /* deprecated, never set */ -#define TP_STATUS_TS_RAW_HARDWARE (1 << 31) +#define TP_STATUS_TS_RAW_HARDWARE (1U << 31) /* Rx ring - feature request bits */ #define TP_FT_REQ_FILL_RXHASH 0x1 diff --git a/include/uapi/linux/if_xdp.h b/include/uapi/linux/if_xdp.h index caed8b1614ff..be328c59389d 100644 --- a/include/uapi/linux/if_xdp.h +++ b/include/uapi/linux/if_xdp.h @@ -16,6 +16,18 @@ #define XDP_SHARED_UMEM (1 << 0) #define XDP_COPY (1 << 1) /* Force copy-mode */ #define XDP_ZEROCOPY (1 << 2) /* Force zero-copy mode */ +/* If this option is set, the driver might go sleep and in that case + * the XDP_RING_NEED_WAKEUP flag in the fill and/or Tx rings will be + * set. If it is set, the application need to explicitly wake up the + * driver with a poll() (Rx and Tx) or sendto() (Tx only). If you are + * running the driver and the application on the same core, you should + * use this option so that the kernel will yield to the user space + * application. + */ +#define XDP_USE_NEED_WAKEUP (1 << 3) + +/* Flags for xsk_umem_config flags */ +#define XDP_UMEM_UNALIGNED_CHUNK_FLAG (1 << 0) struct sockaddr_xdp { __u16 sxdp_family; @@ -25,10 +37,14 @@ struct sockaddr_xdp { __u32 sxdp_shared_umem_fd; }; +/* XDP_RING flags */ +#define XDP_RING_NEED_WAKEUP (1 << 0) + struct xdp_ring_offset { __u64 producer; __u64 consumer; __u64 desc; + __u64 flags; }; struct xdp_mmap_offsets { @@ -46,12 +62,14 @@ struct xdp_mmap_offsets { #define XDP_UMEM_FILL_RING 5 #define XDP_UMEM_COMPLETION_RING 6 #define XDP_STATISTICS 7 +#define XDP_OPTIONS 8 struct xdp_umem_reg { __u64 addr; /* Start of packet data area */ __u64 len; /* Length of packet data area */ __u32 chunk_size; __u32 headroom; + __u32 flags; }; struct xdp_statistics { @@ -60,12 +78,24 @@ struct xdp_statistics { __u64 tx_invalid_descs; /* Dropped due to invalid descriptor */ }; +struct xdp_options { + __u32 flags; +}; + +/* Flags for the flags field of struct xdp_options */ +#define XDP_OPTIONS_ZEROCOPY (1 << 0) + /* Pgoff for mmaping the rings */ #define XDP_PGOFF_RX_RING 0 #define XDP_PGOFF_TX_RING 0x80000000 #define XDP_UMEM_PGOFF_FILL_RING 0x100000000ULL #define XDP_UMEM_PGOFF_COMPLETION_RING 0x180000000ULL +/* Masks for unaligned chunks mode */ +#define XSK_UNALIGNED_BUF_OFFSET_SHIFT 48 +#define XSK_UNALIGNED_BUF_ADDR_MASK \ + ((1ULL << XSK_UNALIGNED_BUF_OFFSET_SHIFT) - 1) + /* Rx/Tx descriptor */ struct xdp_desc { __u64 addr; diff --git a/include/uapi/linux/inet_diag.h b/include/uapi/linux/inet_diag.h index e8baca85bac6..a1ff345b3f33 100644 --- a/include/uapi/linux/inet_diag.h +++ b/include/uapi/linux/inet_diag.h @@ -153,11 +153,20 @@ enum { INET_DIAG_BBRINFO, /* request as INET_DIAG_VEGASINFO */ INET_DIAG_CLASS_ID, /* request as INET_DIAG_TCLASS */ INET_DIAG_MD5SIG, + INET_DIAG_ULP_INFO, __INET_DIAG_MAX, }; #define INET_DIAG_MAX (__INET_DIAG_MAX - 1) +enum { + INET_ULP_INFO_UNSPEC, + INET_ULP_INFO_NAME, + INET_ULP_INFO_TLS, + __INET_ULP_INFO_MAX, +}; +#define INET_ULP_INFO_MAX (__INET_ULP_INFO_MAX - 1) + /* INET_DIAG_MEM */ struct inet_diag_meminfo { diff --git a/include/uapi/linux/io_uring.h b/include/uapi/linux/io_uring.h index a0c460025036..ea57526a5b89 100644 --- a/include/uapi/linux/io_uring.h +++ b/include/uapi/linux/io_uring.h @@ -27,6 +27,8 @@ struct io_uring_sqe { __u32 fsync_flags; __u16 poll_events; __u32 sync_range_flags; + __u32 msg_flags; + __u32 timeout_flags; }; __u64 user_data; /* data to be passed back at completion time */ union { @@ -40,6 +42,7 @@ struct io_uring_sqe { */ #define IOSQE_FIXED_FILE (1U << 0) /* use fixed fileset */ #define IOSQE_IO_DRAIN (1U << 1) /* issue after inflight IO */ +#define IOSQE_IO_LINK (1U << 2) /* links next sqe */ /* * io_uring_setup() flags @@ -57,6 +60,9 @@ struct io_uring_sqe { #define IORING_OP_POLL_ADD 6 #define IORING_OP_POLL_REMOVE 7 #define IORING_OP_SYNC_FILE_RANGE 8 +#define IORING_OP_SENDMSG 9 +#define IORING_OP_RECVMSG 10 +#define IORING_OP_TIMEOUT 11 /* * sqe->fsync_flags @@ -124,12 +130,18 @@ struct io_uring_params { __u32 flags; __u32 sq_thread_cpu; __u32 sq_thread_idle; - __u32 resv[5]; + __u32 features; + __u32 resv[4]; struct io_sqring_offsets sq_off; struct io_cqring_offsets cq_off; }; /* + * io_uring_params->features flags + */ +#define IORING_FEAT_SINGLE_MMAP (1U << 0) + +/* * io_uring_register(2) opcodes and arguments */ #define IORING_REGISTER_BUFFERS 0 diff --git a/include/uapi/linux/iommu.h b/include/uapi/linux/iommu.h new file mode 100644 index 000000000000..fc00c5d4741b --- /dev/null +++ b/include/uapi/linux/iommu.h @@ -0,0 +1,155 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +/* + * IOMMU user API definitions + */ + +#ifndef _UAPI_IOMMU_H +#define _UAPI_IOMMU_H + +#include <linux/types.h> + +#define IOMMU_FAULT_PERM_READ (1 << 0) /* read */ +#define IOMMU_FAULT_PERM_WRITE (1 << 1) /* write */ +#define IOMMU_FAULT_PERM_EXEC (1 << 2) /* exec */ +#define IOMMU_FAULT_PERM_PRIV (1 << 3) /* privileged */ + +/* Generic fault types, can be expanded IRQ remapping fault */ +enum iommu_fault_type { + IOMMU_FAULT_DMA_UNRECOV = 1, /* unrecoverable fault */ + IOMMU_FAULT_PAGE_REQ, /* page request fault */ +}; + +enum iommu_fault_reason { + IOMMU_FAULT_REASON_UNKNOWN = 0, + + /* Could not access the PASID table (fetch caused external abort) */ + IOMMU_FAULT_REASON_PASID_FETCH, + + /* PASID entry is invalid or has configuration errors */ + IOMMU_FAULT_REASON_BAD_PASID_ENTRY, + + /* + * PASID is out of range (e.g. exceeds the maximum PASID + * supported by the IOMMU) or disabled. + */ + IOMMU_FAULT_REASON_PASID_INVALID, + + /* + * An external abort occurred fetching (or updating) a translation + * table descriptor + */ + IOMMU_FAULT_REASON_WALK_EABT, + + /* + * Could not access the page table entry (Bad address), + * actual translation fault + */ + IOMMU_FAULT_REASON_PTE_FETCH, + + /* Protection flag check failed */ + IOMMU_FAULT_REASON_PERMISSION, + + /* access flag check failed */ + IOMMU_FAULT_REASON_ACCESS, + + /* Output address of a translation stage caused Address Size fault */ + IOMMU_FAULT_REASON_OOR_ADDRESS, +}; + +/** + * struct iommu_fault_unrecoverable - Unrecoverable fault data + * @reason: reason of the fault, from &enum iommu_fault_reason + * @flags: parameters of this fault (IOMMU_FAULT_UNRECOV_* values) + * @pasid: Process Address Space ID + * @perm: requested permission access using by the incoming transaction + * (IOMMU_FAULT_PERM_* values) + * @addr: offending page address + * @fetch_addr: address that caused a fetch abort, if any + */ +struct iommu_fault_unrecoverable { + __u32 reason; +#define IOMMU_FAULT_UNRECOV_PASID_VALID (1 << 0) +#define IOMMU_FAULT_UNRECOV_ADDR_VALID (1 << 1) +#define IOMMU_FAULT_UNRECOV_FETCH_ADDR_VALID (1 << 2) + __u32 flags; + __u32 pasid; + __u32 perm; + __u64 addr; + __u64 fetch_addr; +}; + +/** + * struct iommu_fault_page_request - Page Request data + * @flags: encodes whether the corresponding fields are valid and whether this + * is the last page in group (IOMMU_FAULT_PAGE_REQUEST_* values) + * @pasid: Process Address Space ID + * @grpid: Page Request Group Index + * @perm: requested page permissions (IOMMU_FAULT_PERM_* values) + * @addr: page address + * @private_data: device-specific private information + */ +struct iommu_fault_page_request { +#define IOMMU_FAULT_PAGE_REQUEST_PASID_VALID (1 << 0) +#define IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE (1 << 1) +#define IOMMU_FAULT_PAGE_REQUEST_PRIV_DATA (1 << 2) + __u32 flags; + __u32 pasid; + __u32 grpid; + __u32 perm; + __u64 addr; + __u64 private_data[2]; +}; + +/** + * struct iommu_fault - Generic fault data + * @type: fault type from &enum iommu_fault_type + * @padding: reserved for future use (should be zero) + * @event: fault event, when @type is %IOMMU_FAULT_DMA_UNRECOV + * @prm: Page Request message, when @type is %IOMMU_FAULT_PAGE_REQ + * @padding2: sets the fault size to allow for future extensions + */ +struct iommu_fault { + __u32 type; + __u32 padding; + union { + struct iommu_fault_unrecoverable event; + struct iommu_fault_page_request prm; + __u8 padding2[56]; + }; +}; + +/** + * enum iommu_page_response_code - Return status of fault handlers + * @IOMMU_PAGE_RESP_SUCCESS: Fault has been handled and the page tables + * populated, retry the access. This is "Success" in PCI PRI. + * @IOMMU_PAGE_RESP_FAILURE: General error. Drop all subsequent faults from + * this device if possible. This is "Response Failure" in PCI PRI. + * @IOMMU_PAGE_RESP_INVALID: Could not handle this fault, don't retry the + * access. This is "Invalid Request" in PCI PRI. + */ +enum iommu_page_response_code { + IOMMU_PAGE_RESP_SUCCESS = 0, + IOMMU_PAGE_RESP_INVALID, + IOMMU_PAGE_RESP_FAILURE, +}; + +/** + * struct iommu_page_response - Generic page response information + * @version: API version of this structure + * @flags: encodes whether the corresponding fields are valid + * (IOMMU_FAULT_PAGE_RESPONSE_* values) + * @pasid: Process Address Space ID + * @grpid: Page Request Group Index + * @code: response code from &enum iommu_page_response_code + */ +struct iommu_page_response { +#define IOMMU_PAGE_RESP_VERSION_1 1 + __u32 version; +#define IOMMU_PAGE_RESP_PASID_VALID (1 << 0) + __u32 flags; + __u32 pasid; + __u32 grpid; + __u32 code; +}; + +#endif /* _UAPI_IOMMU_H */ diff --git a/include/uapi/linux/ip_vs.h b/include/uapi/linux/ip_vs.h index e34f436fc79d..4102ddcb4e14 100644 --- a/include/uapi/linux/ip_vs.h +++ b/include/uapi/linux/ip_vs.h @@ -128,9 +128,15 @@ enum { IP_VS_CONN_F_TUNNEL_TYPE_IPIP = 0, /* IPIP */ IP_VS_CONN_F_TUNNEL_TYPE_GUE, /* GUE */ + IP_VS_CONN_F_TUNNEL_TYPE_GRE, /* GRE */ IP_VS_CONN_F_TUNNEL_TYPE_MAX, }; +/* Tunnel encapsulation flags */ +#define IP_VS_TUNNEL_ENCAP_FLAG_NOCSUM (0) +#define IP_VS_TUNNEL_ENCAP_FLAG_CSUM (1 << 0) +#define IP_VS_TUNNEL_ENCAP_FLAG_REMCSUM (1 << 1) + /* * The struct ip_vs_service_user and struct ip_vs_dest_user are * used to set IPVS rules through setsockopt. @@ -403,6 +409,8 @@ enum { IPVS_DEST_ATTR_TUN_PORT, /* tunnel port */ + IPVS_DEST_ATTR_TUN_FLAGS, /* tunnel flags */ + __IPVS_DEST_ATTR_MAX, }; diff --git a/include/uapi/linux/ipmi_bmc.h b/include/uapi/linux/ipmi_bmc.h index 1670f0944227..782a03eb1086 100644 --- a/include/uapi/linux/ipmi_bmc.h +++ b/include/uapi/linux/ipmi_bmc.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 */ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ /* * Copyright (c) 2015-2018, Intel Corporation. */ diff --git a/include/uapi/linux/isdn.h b/include/uapi/linux/isdn.h deleted file mode 100644 index f371fd52ed75..000000000000 --- a/include/uapi/linux/isdn.h +++ /dev/null @@ -1,144 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ -/* $Id: isdn.h,v 1.125.2.3 2004/02/10 01:07:14 keil Exp $ - * - * Main header for the Linux ISDN subsystem (linklevel). - * - * Copyright 1994,95,96 by Fritz Elfert (fritz@isdn4linux.de) - * Copyright 1995,96 by Thinking Objects Software GmbH Wuerzburg - * Copyright 1995,96 by Michael Hipp (Michael.Hipp@student.uni-tuebingen.de) - * - * This software may be used and distributed according to the terms - * of the GNU General Public License, incorporated herein by reference. - * - */ - -#ifndef _UAPI__ISDN_H__ -#define _UAPI__ISDN_H__ - -#include <linux/ioctl.h> -#include <linux/tty.h> - -#define ISDN_MAX_DRIVERS 32 -#define ISDN_MAX_CHANNELS 64 - -/* New ioctl-codes */ -#define IIOCNETAIF _IO('I',1) -#define IIOCNETDIF _IO('I',2) -#define IIOCNETSCF _IO('I',3) -#define IIOCNETGCF _IO('I',4) -#define IIOCNETANM _IO('I',5) -#define IIOCNETDNM _IO('I',6) -#define IIOCNETGNM _IO('I',7) -#define IIOCGETSET _IO('I',8) /* no longer supported */ -#define IIOCSETSET _IO('I',9) /* no longer supported */ -#define IIOCSETVER _IO('I',10) -#define IIOCNETHUP _IO('I',11) -#define IIOCSETGST _IO('I',12) -#define IIOCSETBRJ _IO('I',13) -#define IIOCSIGPRF _IO('I',14) -#define IIOCGETPRF _IO('I',15) -#define IIOCSETPRF _IO('I',16) -#define IIOCGETMAP _IO('I',17) -#define IIOCSETMAP _IO('I',18) -#define IIOCNETASL _IO('I',19) -#define IIOCNETDIL _IO('I',20) -#define IIOCGETCPS _IO('I',21) -#define IIOCGETDVR _IO('I',22) -#define IIOCNETLCR _IO('I',23) /* dwabc ioctl for LCR from isdnlog */ -#define IIOCNETDWRSET _IO('I',24) /* dwabc ioctl to reset abc-values to default on a net-interface */ - -#define IIOCNETALN _IO('I',32) -#define IIOCNETDLN _IO('I',33) - -#define IIOCNETGPN _IO('I',34) - -#define IIOCDBGVAR _IO('I',127) - -#define IIOCDRVCTL _IO('I',128) - -/* cisco hdlck device private ioctls */ -#define SIOCGKEEPPERIOD (SIOCDEVPRIVATE + 0) -#define SIOCSKEEPPERIOD (SIOCDEVPRIVATE + 1) -#define SIOCGDEBSERINT (SIOCDEVPRIVATE + 2) -#define SIOCSDEBSERINT (SIOCDEVPRIVATE + 3) - -/* Packet encapsulations for net-interfaces */ -#define ISDN_NET_ENCAP_ETHER 0 -#define ISDN_NET_ENCAP_RAWIP 1 -#define ISDN_NET_ENCAP_IPTYP 2 -#define ISDN_NET_ENCAP_CISCOHDLC 3 /* Without SLARP and keepalive */ -#define ISDN_NET_ENCAP_SYNCPPP 4 -#define ISDN_NET_ENCAP_UIHDLC 5 -#define ISDN_NET_ENCAP_CISCOHDLCK 6 /* With SLARP and keepalive */ -#define ISDN_NET_ENCAP_X25IFACE 7 /* Documentation/networking/x25-iface.txt */ -#define ISDN_NET_ENCAP_MAX_ENCAP ISDN_NET_ENCAP_X25IFACE - -/* Facility which currently uses an ISDN-channel */ -#define ISDN_USAGE_NONE 0 -#define ISDN_USAGE_RAW 1 -#define ISDN_USAGE_MODEM 2 -#define ISDN_USAGE_NET 3 -#define ISDN_USAGE_VOICE 4 -#define ISDN_USAGE_FAX 5 -#define ISDN_USAGE_MASK 7 /* Mask to get plain usage */ -#define ISDN_USAGE_DISABLED 32 /* This bit is set, if channel is disabled */ -#define ISDN_USAGE_EXCLUSIVE 64 /* This bit is set, if channel is exclusive */ -#define ISDN_USAGE_OUTGOING 128 /* This bit is set, if channel is outgoing */ - -#define ISDN_MODEM_NUMREG 24 /* Number of Modem-Registers */ -#define ISDN_LMSNLEN 255 /* Length of tty's Listen-MSN string */ -#define ISDN_CMSGLEN 50 /* Length of CONNECT-Message to add for Modem */ - -#define ISDN_MSNLEN 32 -#define NET_DV 0x06 /* Data version for isdn_net_ioctl_cfg */ -#define TTY_DV 0x06 /* Data version for iprofd etc. */ - -#define INF_DV 0x01 /* Data version for /dev/isdninfo */ - -typedef struct { - char drvid[25]; - unsigned long arg; -} isdn_ioctl_struct; - -typedef struct { - char name[10]; - char phone[ISDN_MSNLEN]; - int outgoing; -} isdn_net_ioctl_phone; - -typedef struct { - char name[10]; /* Name of interface */ - char master[10]; /* Name of Master for Bundling */ - char slave[10]; /* Name of Slave for Bundling */ - char eaz[256]; /* EAZ/MSN */ - char drvid[25]; /* DriverId for Bindings */ - int onhtime; /* Hangup-Timeout */ - int charge; /* Charge-Units */ - int l2_proto; /* Layer-2 protocol */ - int l3_proto; /* Layer-3 protocol */ - int p_encap; /* Encapsulation */ - int exclusive; /* Channel, if bound exclusive */ - int dialmax; /* Dial Retry-Counter */ - int slavedelay; /* Delay until slave starts up */ - int cbdelay; /* Delay before Callback */ - int chargehup; /* Flag: Charge-Hangup */ - int ihup; /* Flag: Hangup-Timeout on incoming line */ - int secure; /* Flag: Secure */ - int callback; /* Flag: Callback */ - int cbhup; /* Flag: Reject Call before Callback */ - int pppbind; /* ippp device for bindings */ - int chargeint; /* Use fixed charge interval length */ - int triggercps; /* BogoCPS needed for triggering slave */ - int dialtimeout; /* Dial-Timeout */ - int dialwait; /* Time to wait after failed dial */ - int dialmode; /* Flag: off / on / auto */ -} isdn_net_ioctl_cfg; - -#define ISDN_NET_DIALMODE_MASK 0xC0 /* bits for status */ -#define ISDN_NET_DM_OFF 0x00 /* this interface is stopped */ -#define ISDN_NET_DM_MANUAL 0x40 /* this interface is on (manual) */ -#define ISDN_NET_DM_AUTO 0x80 /* this interface is autodial */ -#define ISDN_NET_DIALMODE(x) ((&(x))->flags & ISDN_NET_DIALMODE_MASK) - - -#endif /* _UAPI__ISDN_H__ */ diff --git a/include/uapi/linux/isdn/capicmd.h b/include/uapi/linux/isdn/capicmd.h index 4941628a4fb9..5ec88e7548a9 100644 --- a/include/uapi/linux/isdn/capicmd.h +++ b/include/uapi/linux/isdn/capicmd.h @@ -16,6 +16,7 @@ #define CAPI_MSG_BASELEN 8 #define CAPI_DATA_B3_REQ_LEN (CAPI_MSG_BASELEN+4+4+2+2+2) #define CAPI_DATA_B3_RESP_LEN (CAPI_MSG_BASELEN+4+2) +#define CAPI_DISCONNECT_B3_RESP_LEN (CAPI_MSG_BASELEN+4) /*----- CAPI commands -----*/ #define CAPI_ALERT 0x01 diff --git a/include/uapi/linux/isdn_divertif.h b/include/uapi/linux/isdn_divertif.h deleted file mode 100644 index 0a17bb1bcb1b..000000000000 --- a/include/uapi/linux/isdn_divertif.h +++ /dev/null @@ -1,31 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ -/* $Id: isdn_divertif.h,v 1.4.6.1 2001/09/23 22:25:05 kai Exp $ - * - * Header for the diversion supplementary interface for i4l. - * - * Author Werner Cornelius (werner@titro.de) - * Copyright by Werner Cornelius (werner@titro.de) - * - * This software may be used and distributed according to the terms - * of the GNU General Public License, incorporated herein by reference. - * - */ - -#ifndef _UAPI_LINUX_ISDN_DIVERTIF_H -#define _UAPI_LINUX_ISDN_DIVERTIF_H - -/***********************************************************/ -/* magic value is also used to control version information */ -/***********************************************************/ -#define DIVERT_IF_MAGIC 0x25873401 -#define DIVERT_CMD_REG 0x00 /* register command */ -#define DIVERT_CMD_REL 0x01 /* release command */ -#define DIVERT_NO_ERR 0x00 /* return value no error */ -#define DIVERT_CMD_ERR 0x01 /* invalid cmd */ -#define DIVERT_VER_ERR 0x02 /* magic/version invalid */ -#define DIVERT_REG_ERR 0x03 /* module already registered */ -#define DIVERT_REL_ERR 0x04 /* module not registered */ -#define DIVERT_REG_NAME isdn_register_divert - - -#endif /* _UAPI_LINUX_ISDN_DIVERTIF_H */ diff --git a/include/uapi/linux/isdn_ppp.h b/include/uapi/linux/isdn_ppp.h deleted file mode 100644 index 0bdc4efaacb2..000000000000 --- a/include/uapi/linux/isdn_ppp.h +++ /dev/null @@ -1,68 +0,0 @@ -/* SPDX-License-Identifier: GPL-1.0+ WITH Linux-syscall-note */ -/* Linux ISDN subsystem, sync PPP, interface to ipppd - * - * Copyright 1994-1999 by Fritz Elfert (fritz@isdn4linux.de) - * Copyright 1995,96 Thinking Objects Software GmbH Wuerzburg - * Copyright 1995,96 by Michael Hipp (Michael.Hipp@student.uni-tuebingen.de) - * Copyright 2000-2002 by Kai Germaschewski (kai@germaschewski.name) - * - * This software may be used and distributed according to the terms - * of the GNU General Public License, incorporated herein by reference. - * - */ - -#ifndef _UAPI_LINUX_ISDN_PPP_H -#define _UAPI_LINUX_ISDN_PPP_H - -#define CALLTYPE_INCOMING 0x1 -#define CALLTYPE_OUTGOING 0x2 -#define CALLTYPE_CALLBACK 0x4 - -#define IPPP_VERSION "2.2.0" - -struct pppcallinfo -{ - int calltype; - unsigned char local_num[64]; - unsigned char remote_num[64]; - int charge_units; -}; - -#define PPPIOCGCALLINFO _IOWR('t',128,struct pppcallinfo) -#define PPPIOCBUNDLE _IOW('t',129,int) -#define PPPIOCGMPFLAGS _IOR('t',130,int) -#define PPPIOCSMPFLAGS _IOW('t',131,int) -#define PPPIOCSMPMTU _IOW('t',132,int) -#define PPPIOCSMPMRU _IOW('t',133,int) -#define PPPIOCGCOMPRESSORS _IOR('t',134,unsigned long [8]) -#define PPPIOCSCOMPRESSOR _IOW('t',135,int) -#define PPPIOCGIFNAME _IOR('t',136, char [IFNAMSIZ] ) - - -#define SC_MP_PROT 0x00000200 -#define SC_REJ_MP_PROT 0x00000400 -#define SC_OUT_SHORT_SEQ 0x00000800 -#define SC_IN_SHORT_SEQ 0x00004000 - -#define SC_DECOMP_ON 0x01 -#define SC_COMP_ON 0x02 -#define SC_DECOMP_DISCARD 0x04 -#define SC_COMP_DISCARD 0x08 -#define SC_LINK_DECOMP_ON 0x10 -#define SC_LINK_COMP_ON 0x20 -#define SC_LINK_DECOMP_DISCARD 0x40 -#define SC_LINK_COMP_DISCARD 0x80 - -#define ISDN_PPP_COMP_MAX_OPTIONS 16 - -#define IPPP_COMP_FLAG_XMIT 0x1 -#define IPPP_COMP_FLAG_LINK 0x2 - -struct isdn_ppp_comp_data { - int num; - unsigned char options[ISDN_PPP_COMP_MAX_OPTIONS]; - int optlen; - int flags; -}; - -#endif /* _UAPI_LINUX_ISDN_PPP_H */ diff --git a/include/uapi/linux/isdnif.h b/include/uapi/linux/isdnif.h deleted file mode 100644 index 611a69196738..000000000000 --- a/include/uapi/linux/isdnif.h +++ /dev/null @@ -1,57 +0,0 @@ -/* SPDX-License-Identifier: GPL-1.0+ WITH Linux-syscall-note */ -/* $Id: isdnif.h,v 1.43.2.2 2004/01/12 23:08:35 keil Exp $ - * - * Linux ISDN subsystem - * Definition of the interface between the subsystem and its low-level drivers. - * - * Copyright 1994,95,96 by Fritz Elfert (fritz@isdn4linux.de) - * Copyright 1995,96 Thinking Objects Software GmbH Wuerzburg - * - * This software may be used and distributed according to the terms - * of the GNU General Public License, incorporated herein by reference. - * - */ - -#ifndef _UAPI__ISDNIF_H__ -#define _UAPI__ISDNIF_H__ - - -/* - * Values for general protocol-selection - */ -#define ISDN_PTYPE_UNKNOWN 0 /* Protocol undefined */ -#define ISDN_PTYPE_1TR6 1 /* german 1TR6-protocol */ -#define ISDN_PTYPE_EURO 2 /* EDSS1-protocol */ -#define ISDN_PTYPE_LEASED 3 /* for leased lines */ -#define ISDN_PTYPE_NI1 4 /* US NI-1 protocol */ -#define ISDN_PTYPE_MAX 7 /* Max. 8 Protocols */ - -/* - * Values for Layer-2-protocol-selection - */ -#define ISDN_PROTO_L2_X75I 0 /* X75/LAPB with I-Frames */ -#define ISDN_PROTO_L2_X75UI 1 /* X75/LAPB with UI-Frames */ -#define ISDN_PROTO_L2_X75BUI 2 /* X75/LAPB with UI-Frames */ -#define ISDN_PROTO_L2_HDLC 3 /* HDLC */ -#define ISDN_PROTO_L2_TRANS 4 /* Transparent (Voice) */ -#define ISDN_PROTO_L2_X25DTE 5 /* X25/LAPB DTE mode */ -#define ISDN_PROTO_L2_X25DCE 6 /* X25/LAPB DCE mode */ -#define ISDN_PROTO_L2_V11096 7 /* V.110 bitrate adaption 9600 Baud */ -#define ISDN_PROTO_L2_V11019 8 /* V.110 bitrate adaption 19200 Baud */ -#define ISDN_PROTO_L2_V11038 9 /* V.110 bitrate adaption 38400 Baud */ -#define ISDN_PROTO_L2_MODEM 10 /* Analog Modem on Board */ -#define ISDN_PROTO_L2_FAX 11 /* Fax Group 2/3 */ -#define ISDN_PROTO_L2_HDLC_56K 12 /* HDLC 56k */ -#define ISDN_PROTO_L2_MAX 15 /* Max. 16 Protocols */ - -/* - * Values for Layer-3-protocol-selection - */ -#define ISDN_PROTO_L3_TRANS 0 /* Transparent */ -#define ISDN_PROTO_L3_TRANSDSP 1 /* Transparent with DSP */ -#define ISDN_PROTO_L3_FCLASS2 2 /* Fax Group 2/3 CLASS 2 */ -#define ISDN_PROTO_L3_FCLASS1 3 /* Fax Group 2/3 CLASS 1 */ -#define ISDN_PROTO_L3_MAX 7 /* Max. 8 Protocols */ - - -#endif /* _UAPI__ISDNIF_H__ */ diff --git a/include/uapi/linux/isst_if.h b/include/uapi/linux/isst_if.h new file mode 100644 index 000000000000..0a52b7b093d3 --- /dev/null +++ b/include/uapi/linux/isst_if.h @@ -0,0 +1,172 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +/* + * Intel Speed Select Interface: OS to hardware Interface + * Copyright (c) 2019, Intel Corporation. + * All rights reserved. + * + * Author: Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com> + */ + +#ifndef __ISST_IF_H +#define __ISST_IF_H + +#include <linux/types.h> + +/** + * struct isst_if_platform_info - Define platform information + * @api_version: Version of the firmware document, which this driver + * can communicate + * @driver_version: Driver version, which will help user to send right + * commands. Even if the firmware is capable, driver may + * not be ready + * @max_cmds_per_ioctl: Returns the maximum number of commands driver will + * accept in a single ioctl + * @mbox_supported: Support of mail box interface + * @mmio_supported: Support of mmio interface for core-power feature + * + * Used to return output of IOCTL ISST_IF_GET_PLATFORM_INFO. This + * information can be used by the user space, to get the driver, firmware + * support and also number of commands to send in a single IOCTL request. + */ +struct isst_if_platform_info { + __u16 api_version; + __u16 driver_version; + __u16 max_cmds_per_ioctl; + __u8 mbox_supported; + __u8 mmio_supported; +}; + +/** + * struct isst_if_cpu_map - CPU mapping between logical and physical CPU + * @logical_cpu: Linux logical CPU number + * @physical_cpu: PUNIT CPU number + * + * Used to convert from Linux logical CPU to PUNIT CPU numbering scheme. + * The PUNIT CPU number is different than APIC ID based CPU numbering. + */ +struct isst_if_cpu_map { + __u32 logical_cpu; + __u32 physical_cpu; +}; + +/** + * struct isst_if_cpu_maps - structure for CPU map IOCTL + * @cmd_count: Number of CPU mapping command in cpu_map[] + * @cpu_map[]: Holds one or more CPU map data structure + * + * This structure used with ioctl ISST_IF_GET_PHY_ID to send + * one or more CPU mapping commands. Here IOCTL return value indicates + * number of commands sent or error number if no commands have been sent. + */ +struct isst_if_cpu_maps { + __u32 cmd_count; + struct isst_if_cpu_map cpu_map[1]; +}; + +/** + * struct isst_if_io_reg - Read write PUNIT IO register + * @read_write: Value 0: Read, 1: Write + * @logical_cpu: Logical CPU number to get target PCI device. + * @reg: PUNIT register offset + * @value: For write operation value to write and for + * for read placeholder read value + * + * Structure to specify read/write data to PUNIT registers. + */ +struct isst_if_io_reg { + __u32 read_write; /* Read:0, Write:1 */ + __u32 logical_cpu; + __u32 reg; + __u32 value; +}; + +/** + * struct isst_if_io_regs - structure for IO register commands + * @cmd_count: Number of io reg commands in io_reg[] + * @io_reg[]: Holds one or more io_reg command structure + * + * This structure used with ioctl ISST_IF_IO_CMD to send + * one or more read/write commands to PUNIT. Here IOCTL return value + * indicates number of requests sent or error number if no requests have + * been sent. + */ +struct isst_if_io_regs { + __u32 req_count; + struct isst_if_io_reg io_reg[1]; +}; + +/** + * struct isst_if_mbox_cmd - Structure to define mail box command + * @logical_cpu: Logical CPU number to get target PCI device + * @parameter: Mailbox parameter value + * @req_data: Request data for the mailbox + * @resp_data: Response data for mailbox command response + * @command: Mailbox command value + * @sub_command: Mailbox sub command value + * @reserved: Unused, set to 0 + * + * Structure to specify mailbox command to be sent to PUNIT. + */ +struct isst_if_mbox_cmd { + __u32 logical_cpu; + __u32 parameter; + __u32 req_data; + __u32 resp_data; + __u16 command; + __u16 sub_command; + __u32 reserved; +}; + +/** + * struct isst_if_mbox_cmds - structure for mailbox commands + * @cmd_count: Number of mailbox commands in mbox_cmd[] + * @mbox_cmd[]: Holds one or more mbox commands + * + * This structure used with ioctl ISST_IF_MBOX_COMMAND to send + * one or more mailbox commands to PUNIT. Here IOCTL return value + * indicates number of commands sent or error number if no commands have + * been sent. + */ +struct isst_if_mbox_cmds { + __u32 cmd_count; + struct isst_if_mbox_cmd mbox_cmd[1]; +}; + +/** + * struct isst_if_msr_cmd - Structure to define msr command + * @read_write: Value 0: Read, 1: Write + * @logical_cpu: Logical CPU number + * @msr: MSR number + * @data: For write operation, data to write, for read + * place holder + * + * Structure to specify MSR command related to PUNIT. + */ +struct isst_if_msr_cmd { + __u32 read_write; /* Read:0, Write:1 */ + __u32 logical_cpu; + __u64 msr; + __u64 data; +}; + +/** + * struct isst_if_msr_cmds - structure for msr commands + * @cmd_count: Number of mailbox commands in msr_cmd[] + * @msr_cmd[]: Holds one or more msr commands + * + * This structure used with ioctl ISST_IF_MSR_COMMAND to send + * one or more MSR commands. IOCTL return value indicates number of + * commands sent or error number if no commands have been sent. + */ +struct isst_if_msr_cmds { + __u32 cmd_count; + struct isst_if_msr_cmd msr_cmd[1]; +}; + +#define ISST_IF_MAGIC 0xFE +#define ISST_IF_GET_PLATFORM_INFO _IOR(ISST_IF_MAGIC, 0, struct isst_if_platform_info *) +#define ISST_IF_GET_PHY_ID _IOWR(ISST_IF_MAGIC, 1, struct isst_if_cpu_map *) +#define ISST_IF_IO_CMD _IOW(ISST_IF_MAGIC, 2, struct isst_if_io_regs *) +#define ISST_IF_MBOX_COMMAND _IOWR(ISST_IF_MAGIC, 3, struct isst_if_mbox_cmds *) +#define ISST_IF_MSR_COMMAND _IOWR(ISST_IF_MAGIC, 4, struct isst_if_msr_cmds *) +#endif diff --git a/include/uapi/linux/jffs2.h b/include/uapi/linux/jffs2.h index a18b719f49d4..784ba0b9690a 100644 --- a/include/uapi/linux/jffs2.h +++ b/include/uapi/linux/jffs2.h @@ -77,11 +77,6 @@ #define JFFS2_ACL_VERSION 0x0001 -// Maybe later... -//#define JFFS2_NODETYPE_CHECKPOINT (JFFS2_FEATURE_RWCOMPAT_DELETE | JFFS2_NODE_ACCURATE | 3) -//#define JFFS2_NODETYPE_OPTIONS (JFFS2_FEATURE_RWCOMPAT_COPY | JFFS2_NODE_ACCURATE | 4) - - #define JFFS2_INO_FLAG_PREREAD 1 /* Do read_inode() for this one at mount time, don't wait for it to happen later */ diff --git a/include/uapi/linux/kexec.h b/include/uapi/linux/kexec.h index 6d112868272d..05669c87a0af 100644 --- a/include/uapi/linux/kexec.h +++ b/include/uapi/linux/kexec.h @@ -31,6 +31,7 @@ #define KEXEC_ARCH_DEFAULT ( 0 << 16) #define KEXEC_ARCH_386 ( 3 << 16) #define KEXEC_ARCH_68K ( 4 << 16) +#define KEXEC_ARCH_PARISC (15 << 16) #define KEXEC_ARCH_X86_64 (62 << 16) #define KEXEC_ARCH_PPC (20 << 16) #define KEXEC_ARCH_PPC64 (21 << 16) diff --git a/include/uapi/linux/keyctl.h b/include/uapi/linux/keyctl.h index f45ee0f69c0c..ed3d5893830d 100644 --- a/include/uapi/linux/keyctl.h +++ b/include/uapi/linux/keyctl.h @@ -67,6 +67,8 @@ #define KEYCTL_PKEY_SIGN 27 /* Create a public key signature */ #define KEYCTL_PKEY_VERIFY 28 /* Verify a public key signature */ #define KEYCTL_RESTRICT_KEYRING 29 /* Restrict keys allowed to link to a keyring */ +#define KEYCTL_MOVE 30 /* Move keys between keyrings */ +#define KEYCTL_CAPABILITIES 31 /* Find capabilities of keyrings subsystem */ /* keyctl structures */ struct keyctl_dh_params { @@ -112,4 +114,21 @@ struct keyctl_pkey_params { __u32 __spare[7]; }; +#define KEYCTL_MOVE_EXCL 0x00000001 /* Do not displace from the to-keyring */ + +/* + * Capabilities flags. The capabilities list is an array of 8-bit integers; + * each integer can carry up to 8 flags. + */ +#define KEYCTL_CAPS0_CAPABILITIES 0x01 /* KEYCTL_CAPABILITIES supported */ +#define KEYCTL_CAPS0_PERSISTENT_KEYRINGS 0x02 /* Persistent keyrings enabled */ +#define KEYCTL_CAPS0_DIFFIE_HELLMAN 0x04 /* Diffie-Hellman computation enabled */ +#define KEYCTL_CAPS0_PUBLIC_KEY 0x08 /* Public key ops enabled */ +#define KEYCTL_CAPS0_BIG_KEY 0x10 /* big_key-type enabled */ +#define KEYCTL_CAPS0_INVALIDATE 0x20 /* KEYCTL_INVALIDATE supported */ +#define KEYCTL_CAPS0_RESTRICT_KEYRING 0x40 /* KEYCTL_RESTRICT_KEYRING supported */ +#define KEYCTL_CAPS0_MOVE 0x80 /* KEYCTL_MOVE supported */ +#define KEYCTL_CAPS1_NS_KEYRING_NAME 0x01 /* Keyring names are per-user_namespace */ +#define KEYCTL_CAPS1_NS_KEY_TAG 0x02 /* Key indexing can include a namespace tag */ + #endif /* _LINUX_KEYCTL_H */ diff --git a/include/uapi/linux/kfd_ioctl.h b/include/uapi/linux/kfd_ioctl.h index dc067ed0b72d..20917c59f39c 100644 --- a/include/uapi/linux/kfd_ioctl.h +++ b/include/uapi/linux/kfd_ioctl.h @@ -35,9 +35,10 @@ struct kfd_ioctl_get_version_args { }; /* For kfd_ioctl_create_queue_args.queue_type. */ -#define KFD_IOC_QUEUE_TYPE_COMPUTE 0 -#define KFD_IOC_QUEUE_TYPE_SDMA 1 -#define KFD_IOC_QUEUE_TYPE_COMPUTE_AQL 2 +#define KFD_IOC_QUEUE_TYPE_COMPUTE 0x0 +#define KFD_IOC_QUEUE_TYPE_SDMA 0x1 +#define KFD_IOC_QUEUE_TYPE_COMPUTE_AQL 0x2 +#define KFD_IOC_QUEUE_TYPE_SDMA_XGMI 0x3 #define KFD_MAX_QUEUE_PERCENTAGE 100 #define KFD_MAX_QUEUE_PRIORITY 15 @@ -338,6 +339,7 @@ struct kfd_ioctl_acquire_vm_args { #define KFD_IOC_ALLOC_MEM_FLAGS_GTT (1 << 1) #define KFD_IOC_ALLOC_MEM_FLAGS_USERPTR (1 << 2) #define KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL (1 << 3) +#define KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP (1 << 4) /* Allocation flags: attributes/access options */ #define KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE (1 << 31) #define KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE (1 << 30) @@ -426,6 +428,13 @@ struct kfd_ioctl_import_dmabuf_args { __u32 dmabuf_fd; /* to KFD */ }; +/* Register offset inside the remapped mmio page + */ +enum kfd_mmio_remap { + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL = 0, + KFD_MMIO_REMAP_HDP_REG_FLUSH_CNTL = 4, +}; + #define AMDKFD_IOCTL_BASE 'K' #define AMDKFD_IO(nr) _IO(AMDKFD_IOCTL_BASE, nr) #define AMDKFD_IOR(nr, type) _IOR(AMDKFD_IOCTL_BASE, nr, type) diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index 2fe12b40d503..52641d8ca9e8 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -116,7 +116,7 @@ struct kvm_irq_level { * ACPI gsi notion of irq. * For IA-64 (APIC model) IOAPIC0: irq 0-23; IOAPIC1: irq 24-47.. * For X86 (standard AT mode) PIC0/1: irq 0-15. IOAPIC0: 0-23.. - * For ARM: See Documentation/virtual/kvm/api.txt + * For ARM: See Documentation/virt/kvm/api.txt */ union { __u32 irq; @@ -243,6 +243,8 @@ struct kvm_hyperv_exit { #define KVM_INTERNAL_ERROR_SIMUL_EX 2 /* Encounter unexpected vm-exit due to delivery event. */ #define KVM_INTERNAL_ERROR_DELIVERY_EV 3 +/* Encounter unexpected vm-exit reason */ +#define KVM_INTERNAL_ERROR_UNEXPECTED_EXIT_REASON 4 /* for KVM_RUN, returned by mmap(vcpu_fd, offset=0) */ struct kvm_run { @@ -696,9 +698,11 @@ struct kvm_ioeventfd { #define KVM_X86_DISABLE_EXITS_MWAIT (1 << 0) #define KVM_X86_DISABLE_EXITS_HLT (1 << 1) #define KVM_X86_DISABLE_EXITS_PAUSE (1 << 2) +#define KVM_X86_DISABLE_EXITS_CSTATE (1 << 3) #define KVM_X86_DISABLE_VALID_EXITS (KVM_X86_DISABLE_EXITS_MWAIT | \ KVM_X86_DISABLE_EXITS_HLT | \ - KVM_X86_DISABLE_EXITS_PAUSE) + KVM_X86_DISABLE_EXITS_PAUSE | \ + KVM_X86_DISABLE_EXITS_CSTATE) /* for KVM_ENABLE_CAP */ struct kvm_enable_cap { @@ -993,6 +997,9 @@ struct kvm_ppc_resize_hpt { #define KVM_CAP_ARM_SVE 170 #define KVM_CAP_ARM_PTRAUTH_ADDRESS 171 #define KVM_CAP_ARM_PTRAUTH_GENERIC 172 +#define KVM_CAP_PMU_EVENT_FILTER 173 +#define KVM_CAP_ARM_IRQ_LINE_LAYOUT_2 174 +#define KVM_CAP_HYPERV_DIRECT_TLBFLUSH 175 #ifdef KVM_CAP_IRQ_ROUTING @@ -1083,7 +1090,7 @@ struct kvm_xen_hvm_config { * * KVM_IRQFD_FLAG_RESAMPLE indicates resamplefd is valid and specifies * the irqfd to operate in resampling mode for level triggered interrupt - * emulation. See Documentation/virtual/kvm/api.txt. + * emulation. See Documentation/virt/kvm/api.txt. */ #define KVM_IRQFD_FLAG_RESAMPLE (1 << 1) @@ -1139,6 +1146,7 @@ struct kvm_dirty_tlb { #define KVM_REG_S390 0x5000000000000000ULL #define KVM_REG_ARM64 0x6000000000000000ULL #define KVM_REG_MIPS 0x7000000000000000ULL +#define KVM_REG_RISCV 0x8000000000000000ULL #define KVM_REG_SIZE_SHIFT 52 #define KVM_REG_SIZE_MASK 0x00f0000000000000ULL @@ -1327,6 +1335,8 @@ struct kvm_s390_ucas_mapping { #define KVM_PPC_GET_RMMU_INFO _IOW(KVMIO, 0xb0, struct kvm_ppc_rmmu_info) /* Available with KVM_CAP_PPC_GET_CPU_CHAR */ #define KVM_PPC_GET_CPU_CHAR _IOR(KVMIO, 0xb1, struct kvm_ppc_cpu_char) +/* Available with KVM_CAP_PMU_EVENT_FILTER */ +#define KVM_SET_PMU_EVENT_FILTER _IOW(KVMIO, 0xb2, struct kvm_pmu_event_filter) /* ioctl for vm fd */ #define KVM_CREATE_DEVICE _IOWR(KVMIO, 0xe0, struct kvm_create_device) diff --git a/include/uapi/linux/kvm_para.h b/include/uapi/linux/kvm_para.h index 6c0ce49931e5..8b86609849b9 100644 --- a/include/uapi/linux/kvm_para.h +++ b/include/uapi/linux/kvm_para.h @@ -28,6 +28,7 @@ #define KVM_HC_MIPS_CONSOLE_OUTPUT 8 #define KVM_HC_CLOCK_PAIRING 9 #define KVM_HC_SEND_IPI 10 +#define KVM_HC_SCHED_YIELD 11 /* * hypercalls use architecture specific diff --git a/include/uapi/linux/magic.h b/include/uapi/linux/magic.h index f8c00045d537..903cc2d2750b 100644 --- a/include/uapi/linux/magic.h +++ b/include/uapi/linux/magic.h @@ -19,6 +19,7 @@ #define SQUASHFS_MAGIC 0x73717368 #define ECRYPTFS_SUPER_MAGIC 0xf15f #define EFS_SUPER_MAGIC 0x414A53 +#define EROFS_SUPER_MAGIC_V1 0xE0F5E1E2 #define EXT2_SUPER_MAGIC 0xEF53 #define EXT3_SUPER_MAGIC 0xEF53 #define XENFS_SUPER_MAGIC 0xabba1974 @@ -91,5 +92,7 @@ #define UDF_SUPER_MAGIC 0x15013346 #define BALLOON_KVM_MAGIC 0x13661366 #define ZSMALLOC_MAGIC 0x58295829 +#define DMA_BUF_MAGIC 0x444d4142 /* "DMAB" */ +#define Z3FOLD_MAGIC 0x33 #endif /* __LINUX_MAGIC_H__ */ diff --git a/include/uapi/linux/mdio.h b/include/uapi/linux/mdio.h index 0a552061ff1c..4bcb41c71b8c 100644 --- a/include/uapi/linux/mdio.h +++ b/include/uapi/linux/mdio.h @@ -45,11 +45,14 @@ #define MDIO_AN_ADVERTISE 16 /* AN advertising (base page) */ #define MDIO_AN_LPA 19 /* AN LP abilities (base page) */ #define MDIO_PCS_EEE_ABLE 20 /* EEE Capability register */ +#define MDIO_PCS_EEE_ABLE2 21 /* EEE Capability register 2 */ #define MDIO_PMA_NG_EXTABLE 21 /* 2.5G/5G PMA/PMD extended ability */ #define MDIO_PCS_EEE_WK_ERR 22 /* EEE wake error counter */ #define MDIO_PHYXS_LNSTAT 24 /* PHY XGXS lane state */ #define MDIO_AN_EEE_ADV 60 /* EEE advertisement */ #define MDIO_AN_EEE_LPABLE 61 /* EEE link partner ability */ +#define MDIO_AN_EEE_ADV2 62 /* EEE advertisement 2 */ +#define MDIO_AN_EEE_LPABLE2 63 /* EEE link partner ability 2 */ /* Media-dependent registers. */ #define MDIO_PMA_10GBT_SWAPPOL 130 /* 10GBASE-T pair swap & polarity */ @@ -276,6 +279,13 @@ #define MDIO_EEE_1000KX 0x0010 /* 1000KX EEE cap */ #define MDIO_EEE_10GKX4 0x0020 /* 10G KX4 EEE cap */ #define MDIO_EEE_10GKR 0x0040 /* 10G KR EEE cap */ +#define MDIO_EEE_40GR_FW 0x0100 /* 40G R fast wake */ +#define MDIO_EEE_40GR_DS 0x0200 /* 40G R deep sleep */ +#define MDIO_EEE_100GR_FW 0x1000 /* 100G R fast wake */ +#define MDIO_EEE_100GR_DS 0x2000 /* 100G R deep sleep */ + +#define MDIO_EEE_2_5GT 0x0001 /* 2.5GT EEE cap */ +#define MDIO_EEE_5GT 0x0002 /* 5GT EEE cap */ /* 2.5G/5G Extended abilities register. */ #define MDIO_PMA_NG_EXTABLE_2_5GBT 0x0001 /* 2.5GBASET ability */ diff --git a/include/uapi/linux/media-bus-format.h b/include/uapi/linux/media-bus-format.h index 2a6b253cfb05..16c1fa2d89a4 100644 --- a/include/uapi/linux/media-bus-format.h +++ b/include/uapi/linux/media-bus-format.h @@ -34,7 +34,7 @@ #define MEDIA_BUS_FMT_FIXED 0x0001 -/* RGB - next is 0x101c */ +/* RGB - next is 0x101d */ #define MEDIA_BUS_FMT_RGB444_1X12 0x1016 #define MEDIA_BUS_FMT_RGB444_2X8_PADHI_BE 0x1001 #define MEDIA_BUS_FMT_RGB444_2X8_PADHI_LE 0x1002 @@ -55,6 +55,7 @@ #define MEDIA_BUS_FMT_RGB888_1X24 0x100a #define MEDIA_BUS_FMT_RGB888_2X12_BE 0x100b #define MEDIA_BUS_FMT_RGB888_2X12_LE 0x100c +#define MEDIA_BUS_FMT_RGB888_3X8 0x101c #define MEDIA_BUS_FMT_RGB888_1X7X4_SPWG 0x1011 #define MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA 0x1012 #define MEDIA_BUS_FMT_ARGB8888_1X32 0x100d diff --git a/include/uapi/linux/media.h b/include/uapi/linux/media.h index 9aedb187bc48..383ac7b7d8f0 100644 --- a/include/uapi/linux/media.h +++ b/include/uapi/linux/media.h @@ -146,7 +146,7 @@ struct media_device_info { #define MEDIA_ENT_FL_CONNECTOR (1 << 1) /* OR with the entity id value to find the next entity */ -#define MEDIA_ENT_ID_FLAG_NEXT (1 << 31) +#define MEDIA_ENT_ID_FLAG_NEXT (1U << 31) struct media_entity_desc { __u32 id; diff --git a/include/uapi/linux/mii.h b/include/uapi/linux/mii.h index a506216591d6..51b48e4be1f2 100644 --- a/include/uapi/linux/mii.h +++ b/include/uapi/linux/mii.h @@ -121,6 +121,8 @@ #define EXPANSION_MFAULTS 0x0010 /* Multiple faults detected */ #define EXPANSION_RESV 0xffe0 /* Unused... */ +#define ESTATUS_1000_XFULL 0x8000 /* Can do 1000BaseX Full */ +#define ESTATUS_1000_XHALF 0x4000 /* Can do 1000BaseX Half */ #define ESTATUS_1000_TFULL 0x2000 /* Can do 1000BT Full */ #define ESTATUS_1000_THALF 0x1000 /* Can do 1000BT Half */ diff --git a/include/uapi/linux/net_dropmon.h b/include/uapi/linux/net_dropmon.h index 5edbd0a675fd..8bf79a9eb234 100644 --- a/include/uapi/linux/net_dropmon.h +++ b/include/uapi/linux/net_dropmon.h @@ -53,6 +53,11 @@ enum { NET_DM_CMD_CONFIG, NET_DM_CMD_START, NET_DM_CMD_STOP, + NET_DM_CMD_PACKET_ALERT, + NET_DM_CMD_CONFIG_GET, + NET_DM_CMD_CONFIG_NEW, + NET_DM_CMD_STATS_GET, + NET_DM_CMD_STATS_NEW, _NET_DM_CMD_MAX, }; @@ -62,4 +67,65 @@ enum { * Our group identifiers */ #define NET_DM_GRP_ALERT 1 + +enum net_dm_attr { + NET_DM_ATTR_UNSPEC, + + NET_DM_ATTR_ALERT_MODE, /* u8 */ + NET_DM_ATTR_PC, /* u64 */ + NET_DM_ATTR_SYMBOL, /* string */ + NET_DM_ATTR_IN_PORT, /* nested */ + NET_DM_ATTR_TIMESTAMP, /* u64 */ + NET_DM_ATTR_PROTO, /* u16 */ + NET_DM_ATTR_PAYLOAD, /* binary */ + NET_DM_ATTR_PAD, + NET_DM_ATTR_TRUNC_LEN, /* u32 */ + NET_DM_ATTR_ORIG_LEN, /* u32 */ + NET_DM_ATTR_QUEUE_LEN, /* u32 */ + NET_DM_ATTR_STATS, /* nested */ + NET_DM_ATTR_HW_STATS, /* nested */ + NET_DM_ATTR_ORIGIN, /* u16 */ + NET_DM_ATTR_HW_TRAP_GROUP_NAME, /* string */ + NET_DM_ATTR_HW_TRAP_NAME, /* string */ + NET_DM_ATTR_HW_ENTRIES, /* nested */ + NET_DM_ATTR_HW_ENTRY, /* nested */ + NET_DM_ATTR_HW_TRAP_COUNT, /* u32 */ + NET_DM_ATTR_SW_DROPS, /* flag */ + NET_DM_ATTR_HW_DROPS, /* flag */ + + __NET_DM_ATTR_MAX, + NET_DM_ATTR_MAX = __NET_DM_ATTR_MAX - 1 +}; + +/** + * enum net_dm_alert_mode - Alert mode. + * @NET_DM_ALERT_MODE_SUMMARY: A summary of recent drops is sent to user space. + * @NET_DM_ALERT_MODE_PACKET: Each dropped packet is sent to user space along + * with metadata. + */ +enum net_dm_alert_mode { + NET_DM_ALERT_MODE_SUMMARY, + NET_DM_ALERT_MODE_PACKET, +}; + +enum { + NET_DM_ATTR_PORT_NETDEV_IFINDEX, /* u32 */ + NET_DM_ATTR_PORT_NETDEV_NAME, /* string */ + + __NET_DM_ATTR_PORT_MAX, + NET_DM_ATTR_PORT_MAX = __NET_DM_ATTR_PORT_MAX - 1 +}; + +enum { + NET_DM_ATTR_STATS_DROPPED, /* u64 */ + + __NET_DM_ATTR_STATS_MAX, + NET_DM_ATTR_STATS_MAX = __NET_DM_ATTR_STATS_MAX - 1 +}; + +enum net_dm_origin { + NET_DM_ORIGIN_SW, + NET_DM_ORIGIN_HW, +}; + #endif diff --git a/include/uapi/linux/netfilter/ipset/ip_set.h b/include/uapi/linux/netfilter/ipset/ip_set.h index ea69ca21ff23..eea166c52c36 100644 --- a/include/uapi/linux/netfilter/ipset/ip_set.h +++ b/include/uapi/linux/netfilter/ipset/ip_set.h @@ -2,7 +2,7 @@ /* Copyright (C) 2000-2002 Joakim Axelsson <gozem@linux.nu> * Patrick Schaaf <bof@bof.de> * Martin Josefsson <gandalf@wlug.westbo.se> - * Copyright (C) 2003-2011 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu> + * Copyright (C) 2003-2011 Jozsef Kadlecsik <kadlec@netfilter.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as diff --git a/include/uapi/linux/netfilter/nf_synproxy.h b/include/uapi/linux/netfilter/nf_synproxy.h new file mode 100644 index 000000000000..00d787f0260e --- /dev/null +++ b/include/uapi/linux/netfilter/nf_synproxy.h @@ -0,0 +1,23 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef _NF_SYNPROXY_H +#define _NF_SYNPROXY_H + +#include <linux/types.h> + +#define NF_SYNPROXY_OPT_MSS 0x01 +#define NF_SYNPROXY_OPT_WSCALE 0x02 +#define NF_SYNPROXY_OPT_SACK_PERM 0x04 +#define NF_SYNPROXY_OPT_TIMESTAMP 0x08 +#define NF_SYNPROXY_OPT_ECN 0x10 +#define NF_SYNPROXY_OPT_MASK (NF_SYNPROXY_OPT_MSS | \ + NF_SYNPROXY_OPT_WSCALE | \ + NF_SYNPROXY_OPT_SACK_PERM | \ + NF_SYNPROXY_OPT_TIMESTAMP) + +struct nf_synproxy_info { + __u8 options; + __u8 wscale; + __u16 mss; +}; + +#endif /* _NF_SYNPROXY_H */ diff --git a/include/uapi/linux/netfilter/nf_tables.h b/include/uapi/linux/netfilter/nf_tables.h index 505393c6e959..ed8881ad18ed 100644 --- a/include/uapi/linux/netfilter/nf_tables.h +++ b/include/uapi/linux/netfilter/nf_tables.h @@ -192,6 +192,7 @@ enum nft_table_attributes { * @NFTA_CHAIN_USE: number of references to this chain (NLA_U32) * @NFTA_CHAIN_TYPE: type name of the string (NLA_NUL_STRING) * @NFTA_CHAIN_COUNTERS: counter specification of the chain (NLA_NESTED: nft_counter_attributes) + * @NFTA_CHAIN_FLAGS: chain flags */ enum nft_chain_attributes { NFTA_CHAIN_UNSPEC, @@ -204,6 +205,7 @@ enum nft_chain_attributes { NFTA_CHAIN_TYPE, NFTA_CHAIN_COUNTERS, NFTA_CHAIN_PAD, + NFTA_CHAIN_FLAGS, __NFTA_CHAIN_MAX }; #define NFTA_CHAIN_MAX (__NFTA_CHAIN_MAX - 1) @@ -634,6 +636,7 @@ enum nft_lookup_attributes { enum nft_dynset_ops { NFT_DYNSET_OP_ADD, NFT_DYNSET_OP_UPDATE, + NFT_DYNSET_OP_DELETE, }; enum nft_dynset_flags { @@ -730,10 +733,12 @@ enum nft_exthdr_flags { * * @NFT_EXTHDR_OP_IPV6: match against ipv6 extension headers * @NFT_EXTHDR_OP_TCP: match against tcp options + * @NFT_EXTHDR_OP_IPV4: match against ipv4 options */ enum nft_exthdr_op { NFT_EXTHDR_OP_IPV6, NFT_EXTHDR_OP_TCPOPT, + NFT_EXTHDR_OP_IPV4, __NFT_EXTHDR_OP_MAX }; #define NFT_EXTHDR_OP_MAX (__NFT_EXTHDR_OP_MAX - 1) @@ -793,6 +798,11 @@ enum nft_exthdr_attributes { * @NFT_META_SECPATH: boolean, secpath_exists (!!skb->sp) * @NFT_META_IIFKIND: packet input interface kind name (dev->rtnl_link_ops->kind) * @NFT_META_OIFKIND: packet output interface kind name (dev->rtnl_link_ops->kind) + * @NFT_META_BRI_IIFPVID: packet input bridge port pvid + * @NFT_META_BRI_IIFVPROTO: packet input bridge vlan proto + * @NFT_META_TIME_NS: time since epoch (in nanoseconds) + * @NFT_META_TIME_DAY: day of week (from 0 = Sunday to 6 = Saturday) + * @NFT_META_TIME_HOUR: hour of day (in seconds) */ enum nft_meta_keys { NFT_META_LEN, @@ -823,6 +833,11 @@ enum nft_meta_keys { NFT_META_SECPATH, NFT_META_IIFKIND, NFT_META_OIFKIND, + NFT_META_BRI_IIFPVID, + NFT_META_BRI_IIFVPROTO, + NFT_META_TIME_NS, + NFT_META_TIME_DAY, + NFT_META_TIME_HOUR, }; /** @@ -1445,6 +1460,17 @@ enum nft_ct_timeout_timeout_attributes { }; #define NFTA_CT_TIMEOUT_MAX (__NFTA_CT_TIMEOUT_MAX - 1) +enum nft_ct_expectation_attributes { + NFTA_CT_EXPECT_UNSPEC, + NFTA_CT_EXPECT_L3PROTO, + NFTA_CT_EXPECT_L4PROTO, + NFTA_CT_EXPECT_DPORT, + NFTA_CT_EXPECT_TIMEOUT, + NFTA_CT_EXPECT_SIZE, + __NFTA_CT_EXPECT_MAX, +}; +#define NFTA_CT_EXPECT_MAX (__NFTA_CT_EXPECT_MAX - 1) + #define NFT_OBJECT_UNSPEC 0 #define NFT_OBJECT_COUNTER 1 #define NFT_OBJECT_QUOTA 2 @@ -1454,7 +1480,9 @@ enum nft_ct_timeout_timeout_attributes { #define NFT_OBJECT_TUNNEL 6 #define NFT_OBJECT_CT_TIMEOUT 7 #define NFT_OBJECT_SECMARK 8 -#define __NFT_OBJECT_MAX 9 +#define NFT_OBJECT_CT_EXPECT 9 +#define NFT_OBJECT_SYNPROXY 10 +#define __NFT_OBJECT_MAX 11 #define NFT_OBJECT_MAX (__NFT_OBJECT_MAX - 1) /** @@ -1538,6 +1566,22 @@ enum nft_osf_flags { }; /** + * enum nft_synproxy_attributes - nf_tables synproxy expression netlink attributes + * + * @NFTA_SYNPROXY_MSS: mss value sent to the backend (NLA_U16) + * @NFTA_SYNPROXY_WSCALE: wscale value sent to the backend (NLA_U8) + * @NFTA_SYNPROXY_FLAGS: flags (NLA_U32) + */ +enum nft_synproxy_attributes { + NFTA_SYNPROXY_UNSPEC, + NFTA_SYNPROXY_MSS, + NFTA_SYNPROXY_WSCALE, + NFTA_SYNPROXY_FLAGS, + __NFTA_SYNPROXY_MAX, +}; +#define NFTA_SYNPROXY_MAX (__NFTA_SYNPROXY_MAX - 1) + +/** * enum nft_device_attributes - nf_tables device netlink attributes * * @NFTA_DEVICE_NAME: name of this device (NLA_STRING) diff --git a/include/uapi/linux/netfilter/nfnetlink_log.h b/include/uapi/linux/netfilter/nfnetlink_log.h index 20983cb195a0..45c8d3b027e0 100644 --- a/include/uapi/linux/netfilter/nfnetlink_log.h +++ b/include/uapi/linux/netfilter/nfnetlink_log.h @@ -33,6 +33,15 @@ struct nfulnl_msg_packet_timestamp { __aligned_be64 usec; }; +enum nfulnl_vlan_attr { + NFULA_VLAN_UNSPEC, + NFULA_VLAN_PROTO, /* __be16 skb vlan_proto */ + NFULA_VLAN_TCI, /* __be16 skb htons(vlan_tci) */ + __NFULA_VLAN_MAX, +}; + +#define NFULA_VLAN_MAX (__NFULA_VLAN_MAX + 1) + enum nfulnl_attr_type { NFULA_UNSPEC, NFULA_PACKET_HDR, @@ -54,6 +63,8 @@ enum nfulnl_attr_type { NFULA_HWLEN, /* hardware header length */ NFULA_CT, /* nf_conntrack_netlink.h */ NFULA_CT_INFO, /* enum ip_conntrack_info */ + NFULA_VLAN, /* nested attribute: packet vlan info */ + NFULA_L2HDR, /* full L2 header */ __NFULA_MAX }; diff --git a/include/uapi/linux/netfilter/xt_SYNPROXY.h b/include/uapi/linux/netfilter/xt_SYNPROXY.h index ea5eba15d4c1..19c04ed86172 100644 --- a/include/uapi/linux/netfilter/xt_SYNPROXY.h +++ b/include/uapi/linux/netfilter/xt_SYNPROXY.h @@ -2,18 +2,14 @@ #ifndef _XT_SYNPROXY_H #define _XT_SYNPROXY_H -#include <linux/types.h> +#include <linux/netfilter/nf_synproxy.h> -#define XT_SYNPROXY_OPT_MSS 0x01 -#define XT_SYNPROXY_OPT_WSCALE 0x02 -#define XT_SYNPROXY_OPT_SACK_PERM 0x04 -#define XT_SYNPROXY_OPT_TIMESTAMP 0x08 -#define XT_SYNPROXY_OPT_ECN 0x10 +#define XT_SYNPROXY_OPT_MSS NF_SYNPROXY_OPT_MSS +#define XT_SYNPROXY_OPT_WSCALE NF_SYNPROXY_OPT_WSCALE +#define XT_SYNPROXY_OPT_SACK_PERM NF_SYNPROXY_OPT_SACK_PERM +#define XT_SYNPROXY_OPT_TIMESTAMP NF_SYNPROXY_OPT_TIMESTAMP +#define XT_SYNPROXY_OPT_ECN NF_SYNPROXY_OPT_ECN -struct xt_synproxy_info { - __u8 options; - __u8 wscale; - __u16 mss; -}; +#define xt_synproxy_info nf_synproxy_info #endif /* _XT_SYNPROXY_H */ diff --git a/include/uapi/linux/netfilter/xt_connlabel.h b/include/uapi/linux/netfilter/xt_connlabel.h index 2312f0ec07b2..323f0dfc2a4e 100644 --- a/include/uapi/linux/netfilter/xt_connlabel.h +++ b/include/uapi/linux/netfilter/xt_connlabel.h @@ -1,4 +1,8 @@ /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ + +#ifndef _UAPI_XT_CONNLABEL_H +#define _UAPI_XT_CONNLABEL_H + #include <linux/types.h> #define XT_CONNLABEL_MAXBIT 127 @@ -11,3 +15,5 @@ struct xt_connlabel_mtinfo { __u16 bit; __u16 options; }; + +#endif /* _UAPI_XT_CONNLABEL_H */ diff --git a/include/uapi/linux/netfilter/xt_nfacct.h b/include/uapi/linux/netfilter/xt_nfacct.h index 5c8a4d760ee3..b5123ab8d54a 100644 --- a/include/uapi/linux/netfilter/xt_nfacct.h +++ b/include/uapi/linux/netfilter/xt_nfacct.h @@ -11,4 +11,9 @@ struct xt_nfacct_match_info { struct nf_acct *nfacct; }; +struct xt_nfacct_match_info_v1 { + char name[NFACCT_NAME_MAX]; + struct nf_acct *nfacct __attribute__((aligned(8))); +}; + #endif /* _XT_NFACCT_MATCH_H */ diff --git a/include/uapi/linux/netfilter/xt_owner.h b/include/uapi/linux/netfilter/xt_owner.h index fa3ad84957d5..5108df4d0313 100644 --- a/include/uapi/linux/netfilter/xt_owner.h +++ b/include/uapi/linux/netfilter/xt_owner.h @@ -5,11 +5,17 @@ #include <linux/types.h> enum { - XT_OWNER_UID = 1 << 0, - XT_OWNER_GID = 1 << 1, - XT_OWNER_SOCKET = 1 << 2, + XT_OWNER_UID = 1 << 0, + XT_OWNER_GID = 1 << 1, + XT_OWNER_SOCKET = 1 << 2, + XT_OWNER_SUPPL_GROUPS = 1 << 3, }; +#define XT_OWNER_MASK (XT_OWNER_UID | \ + XT_OWNER_GID | \ + XT_OWNER_SOCKET | \ + XT_OWNER_SUPPL_GROUPS) + struct xt_owner_match_info { __u32 uid_min, uid_max; __u32 gid_min, gid_max; diff --git a/include/uapi/linux/netfilter/xt_policy.h b/include/uapi/linux/netfilter/xt_policy.h index 323bfa3074c5..4cf2ce2a8a44 100644 --- a/include/uapi/linux/netfilter/xt_policy.h +++ b/include/uapi/linux/netfilter/xt_policy.h @@ -2,6 +2,7 @@ #ifndef _XT_POLICY_H #define _XT_POLICY_H +#include <linux/netfilter.h> #include <linux/types.h> #include <linux/in.h> #include <linux/in6.h> diff --git a/include/uapi/linux/netfilter_bridge/ebtables.h b/include/uapi/linux/netfilter_bridge/ebtables.h index 3b86c14ea49d..8076c940ffeb 100644 --- a/include/uapi/linux/netfilter_bridge/ebtables.h +++ b/include/uapi/linux/netfilter_bridge/ebtables.h @@ -123,7 +123,7 @@ struct ebt_entry_match { union { struct { char name[EBT_EXTENSION_MAXNAMELEN]; - uint8_t revision; + __u8 revision; }; struct xt_match *match; } u; @@ -136,7 +136,7 @@ struct ebt_entry_watcher { union { struct { char name[EBT_EXTENSION_MAXNAMELEN]; - uint8_t revision; + __u8 revision; }; struct xt_target *watcher; } u; @@ -149,7 +149,7 @@ struct ebt_entry_target { union { struct { char name[EBT_EXTENSION_MAXNAMELEN]; - uint8_t revision; + __u8 revision; }; struct xt_target *target; } u; diff --git a/include/uapi/linux/netfilter_ipv4/ipt_LOG.h b/include/uapi/linux/netfilter_ipv4/ipt_LOG.h index 6dec14ba851b..b7cf2c669f40 100644 --- a/include/uapi/linux/netfilter_ipv4/ipt_LOG.h +++ b/include/uapi/linux/netfilter_ipv4/ipt_LOG.h @@ -2,8 +2,6 @@ #ifndef _IPT_LOG_H #define _IPT_LOG_H -#warning "Please update iptables, this file will be removed soon!" - /* make sure not to change this without changing netfilter.h:NF_LOG_* (!) */ #define IPT_LOG_TCPSEQ 0x01 /* Log TCP sequence numbers */ #define IPT_LOG_TCPOPT 0x02 /* Log TCP options */ diff --git a/include/uapi/linux/netfilter_ipv6/ip6t_LOG.h b/include/uapi/linux/netfilter_ipv6/ip6t_LOG.h index 7553a434e4da..23e91a9c2583 100644 --- a/include/uapi/linux/netfilter_ipv6/ip6t_LOG.h +++ b/include/uapi/linux/netfilter_ipv6/ip6t_LOG.h @@ -2,8 +2,6 @@ #ifndef _IP6T_LOG_H #define _IP6T_LOG_H -#warning "Please update iptables, this file will be removed soon!" - /* make sure not to change this without changing netfilter.h:NF_LOG_* (!) */ #define IP6T_LOG_TCPSEQ 0x01 /* Log TCP sequence numbers */ #define IP6T_LOG_TCPOPT 0x02 /* Log TCP options */ diff --git a/include/uapi/linux/nexthop.h b/include/uapi/linux/nexthop.h new file mode 100644 index 000000000000..7b61867e9848 --- /dev/null +++ b/include/uapi/linux/nexthop.h @@ -0,0 +1,56 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef _UAPI_LINUX_NEXTHOP_H +#define _UAPI_LINUX_NEXTHOP_H + +#include <linux/types.h> + +struct nhmsg { + unsigned char nh_family; + unsigned char nh_scope; /* return only */ + unsigned char nh_protocol; /* Routing protocol that installed nh */ + unsigned char resvd; + unsigned int nh_flags; /* RTNH_F flags */ +}; + +/* entry in a nexthop group */ +struct nexthop_grp { + __u32 id; /* nexthop id - must exist */ + __u8 weight; /* weight of this nexthop */ + __u8 resvd1; + __u16 resvd2; +}; + +enum { + NEXTHOP_GRP_TYPE_MPATH, /* default type if not specified */ + __NEXTHOP_GRP_TYPE_MAX, +}; + +#define NEXTHOP_GRP_TYPE_MAX (__NEXTHOP_GRP_TYPE_MAX - 1) + +enum { + NHA_UNSPEC, + NHA_ID, /* u32; id for nexthop. id == 0 means auto-assign */ + + NHA_GROUP, /* array of nexthop_grp */ + NHA_GROUP_TYPE, /* u16 one of NEXTHOP_GRP_TYPE */ + /* if NHA_GROUP attribute is added, no other attributes can be set */ + + NHA_BLACKHOLE, /* flag; nexthop used to blackhole packets */ + /* if NHA_BLACKHOLE is added, OIF, GATEWAY, ENCAP can not be set */ + + NHA_OIF, /* u32; nexthop device */ + NHA_GATEWAY, /* be32 (IPv4) or in6_addr (IPv6) gw address */ + NHA_ENCAP_TYPE, /* u16; lwt encap type */ + NHA_ENCAP, /* lwt encap data */ + + /* NHA_OIF can be appended to dump request to return only + * nexthops using given device + */ + NHA_GROUPS, /* flag; only return nexthop groups in dump */ + NHA_MASTER, /* u32; only return nexthops with given master dev */ + + __NHA_MAX, +}; + +#define NHA_MAX (__NHA_MAX - 1) +#endif diff --git a/include/uapi/linux/nfsd/cld.h b/include/uapi/linux/nfsd/cld.h index b1e9de4f07d5..a519313af953 100644 --- a/include/uapi/linux/nfsd/cld.h +++ b/include/uapi/linux/nfsd/cld.h @@ -26,17 +26,22 @@ #include <linux/types.h> /* latest upcall version available */ -#define CLD_UPCALL_VERSION 1 +#define CLD_UPCALL_VERSION 2 /* defined by RFC3530 */ #define NFS4_OPAQUE_LIMIT 1024 +#ifndef SHA256_DIGEST_SIZE +#define SHA256_DIGEST_SIZE 32 +#endif + enum cld_command { Cld_Create, /* create a record for this cm_id */ Cld_Remove, /* remove record of this cm_id */ Cld_Check, /* is this cm_id allowed? */ Cld_GraceDone, /* grace period is complete */ - Cld_GraceStart, + Cld_GraceStart, /* grace start (upload client records) */ + Cld_GetVersion, /* query max supported upcall version */ }; /* representation of long-form NFSv4 client ID */ @@ -45,6 +50,17 @@ struct cld_name { unsigned char cn_id[NFS4_OPAQUE_LIMIT]; /* client-provided */ } __attribute__((packed)); +/* sha256 hash of the kerberos principal */ +struct cld_princhash { + __u8 cp_len; /* length of cp_data */ + unsigned char cp_data[SHA256_DIGEST_SIZE]; /* hash of principal */ +} __attribute__((packed)); + +struct cld_clntinfo { + struct cld_name cc_name; + struct cld_princhash cc_princhash; +} __attribute__((packed)); + /* message struct for communication with userspace */ struct cld_msg { __u8 cm_vers; /* upcall version */ @@ -54,7 +70,28 @@ struct cld_msg { union { __s64 cm_gracetime; /* grace period start time */ struct cld_name cm_name; + __u8 cm_version; /* for getting max version */ + } __attribute__((packed)) cm_u; +} __attribute__((packed)); + +/* version 2 message can include hash of kerberos principal */ +struct cld_msg_v2 { + __u8 cm_vers; /* upcall version */ + __u8 cm_cmd; /* upcall command */ + __s16 cm_status; /* return code */ + __u32 cm_xid; /* transaction id */ + union { + struct cld_name cm_name; + __u8 cm_version; /* for getting max version */ + struct cld_clntinfo cm_clntinfo; /* name & princ hash */ } __attribute__((packed)) cm_u; } __attribute__((packed)); +struct cld_msg_hdr { + __u8 cm_vers; /* upcall version */ + __u8 cm_cmd; /* upcall command */ + __s16 cm_status; /* return code */ + __u32 cm_xid; /* transaction id */ +} __attribute__((packed)); + #endif /* !_NFSD_CLD_H */ diff --git a/include/uapi/linux/nilfs2_ondisk.h b/include/uapi/linux/nilfs2_ondisk.h index a7e66ab11d1d..c23f91ae5fe8 100644 --- a/include/uapi/linux/nilfs2_ondisk.h +++ b/include/uapi/linux/nilfs2_ondisk.h @@ -29,7 +29,7 @@ #include <linux/types.h> #include <linux/magic.h> - +#include <asm/byteorder.h> #define NILFS_INODE_BMAP_SIZE 7 @@ -533,19 +533,19 @@ enum { static inline void \ nilfs_checkpoint_set_##name(struct nilfs_checkpoint *cp) \ { \ - cp->cp_flags = cpu_to_le32(le32_to_cpu(cp->cp_flags) | \ - (1UL << NILFS_CHECKPOINT_##flag)); \ + cp->cp_flags = __cpu_to_le32(__le32_to_cpu(cp->cp_flags) | \ + (1UL << NILFS_CHECKPOINT_##flag)); \ } \ static inline void \ nilfs_checkpoint_clear_##name(struct nilfs_checkpoint *cp) \ { \ - cp->cp_flags = cpu_to_le32(le32_to_cpu(cp->cp_flags) & \ + cp->cp_flags = __cpu_to_le32(__le32_to_cpu(cp->cp_flags) & \ ~(1UL << NILFS_CHECKPOINT_##flag)); \ } \ static inline int \ nilfs_checkpoint_##name(const struct nilfs_checkpoint *cp) \ { \ - return !!(le32_to_cpu(cp->cp_flags) & \ + return !!(__le32_to_cpu(cp->cp_flags) & \ (1UL << NILFS_CHECKPOINT_##flag)); \ } @@ -595,20 +595,20 @@ enum { static inline void \ nilfs_segment_usage_set_##name(struct nilfs_segment_usage *su) \ { \ - su->su_flags = cpu_to_le32(le32_to_cpu(su->su_flags) | \ + su->su_flags = __cpu_to_le32(__le32_to_cpu(su->su_flags) | \ (1UL << NILFS_SEGMENT_USAGE_##flag));\ } \ static inline void \ nilfs_segment_usage_clear_##name(struct nilfs_segment_usage *su) \ { \ su->su_flags = \ - cpu_to_le32(le32_to_cpu(su->su_flags) & \ + __cpu_to_le32(__le32_to_cpu(su->su_flags) & \ ~(1UL << NILFS_SEGMENT_USAGE_##flag)); \ } \ static inline int \ nilfs_segment_usage_##name(const struct nilfs_segment_usage *su) \ { \ - return !!(le32_to_cpu(su->su_flags) & \ + return !!(__le32_to_cpu(su->su_flags) & \ (1UL << NILFS_SEGMENT_USAGE_##flag)); \ } @@ -619,15 +619,15 @@ NILFS_SEGMENT_USAGE_FNS(ERROR, error) static inline void nilfs_segment_usage_set_clean(struct nilfs_segment_usage *su) { - su->su_lastmod = cpu_to_le64(0); - su->su_nblocks = cpu_to_le32(0); - su->su_flags = cpu_to_le32(0); + su->su_lastmod = __cpu_to_le64(0); + su->su_nblocks = __cpu_to_le32(0); + su->su_flags = __cpu_to_le32(0); } static inline int nilfs_segment_usage_clean(const struct nilfs_segment_usage *su) { - return !le32_to_cpu(su->su_flags); + return !__le32_to_cpu(su->su_flags); } /** diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h index 6f09d1500960..beee59c831a7 100644 --- a/include/uapi/linux/nl80211.h +++ b/include/uapi/linux/nl80211.h @@ -52,6 +52,11 @@ #define NL80211_MULTICAST_GROUP_NAN "nan" #define NL80211_MULTICAST_GROUP_TESTMODE "testmode" +#define NL80211_EDMG_BW_CONFIG_MIN 4 +#define NL80211_EDMG_BW_CONFIG_MAX 15 +#define NL80211_EDMG_CHANNELS_MIN 1 +#define NL80211_EDMG_CHANNELS_MAX 0x3c /* 0b00111100 */ + /** * DOC: Station handling * @@ -235,6 +240,15 @@ */ /** + * DOC: SAE authentication offload + * + * By setting @NL80211_EXT_FEATURE_SAE_OFFLOAD flag drivers can indicate they + * support offloading SAE authentication for WPA3-Personal networks. In + * %NL80211_CMD_CONNECT the password for SAE should be specified using + * %NL80211_ATTR_SAE_PASSWORD. + */ + +/** * enum nl80211_commands - supported nl80211 commands * * @NL80211_CMD_UNSPEC: unspecified command to catch errors @@ -648,7 +662,9 @@ * is used during CSA period. * @NL80211_CMD_FRAME_WAIT_CANCEL: When an off-channel TX was requested, this * command may be used with the corresponding cookie to cancel the wait - * time if it is known that it is no longer necessary. + * time if it is known that it is no longer necessary. This command is + * also sent as an event whenever the driver has completed the off-channel + * wait time. * @NL80211_CMD_ACTION: Alias for @NL80211_CMD_FRAME for backward compatibility. * @NL80211_CMD_FRAME_TX_STATUS: Report TX status of a management frame * transmitted with %NL80211_CMD_FRAME. %NL80211_ATTR_COOKIE identifies @@ -2341,6 +2357,22 @@ enum nl80211_commands { * should be picking up the lowest tx power, either tx power per-interface * or per-station. * + * @NL80211_ATTR_SAE_PASSWORD: attribute for passing SAE password material. It + * is used with %NL80211_CMD_CONNECT to provide password for offloading + * SAE authentication for WPA3-Personal networks. + * + * @NL80211_ATTR_TWT_RESPONDER: Enable target wait time responder support. + * + * @NL80211_ATTR_HE_OBSS_PD: nested attribute for OBSS Packet Detection + * functionality. + * + * @NL80211_ATTR_WIPHY_EDMG_CHANNELS: bitmap that indicates the 2.16 GHz + * channel(s) that are allowed to be used for EDMG transmissions. + * Defined by IEEE P802.11ay/D4.0 section 9.4.2.251. (u8 attribute) + * @NL80211_ATTR_WIPHY_EDMG_BW_CONFIG: Channel BW Configuration subfield encodes + * the allowed channel bandwidth configurations. (u8 attribute) + * Defined by IEEE P802.11ay/D4.0 section 9.4.2.251, Table 13. + * * @NUM_NL80211_ATTR: total number of nl80211_attrs available * @NL80211_ATTR_MAX: highest attribute number currently defined * @__NL80211_ATTR_AFTER_LAST: internal use @@ -2794,6 +2826,15 @@ enum nl80211_attrs { NL80211_ATTR_STA_TX_POWER_SETTING, NL80211_ATTR_STA_TX_POWER, + NL80211_ATTR_SAE_PASSWORD, + + NL80211_ATTR_TWT_RESPONDER, + + NL80211_ATTR_HE_OBSS_PD, + + NL80211_ATTR_WIPHY_EDMG_CHANNELS, + NL80211_ATTR_WIPHY_EDMG_BW_CONFIG, + /* add attributes here, update the policy in nl80211.c */ __NL80211_ATTR_AFTER_LAST, @@ -2844,7 +2885,7 @@ enum nl80211_attrs { #define NL80211_HT_CAPABILITY_LEN 26 #define NL80211_VHT_CAPABILITY_LEN 12 #define NL80211_HE_MIN_CAPABILITY_LEN 16 -#define NL80211_HE_MAX_CAPABILITY_LEN 51 +#define NL80211_HE_MAX_CAPABILITY_LEN 54 #define NL80211_MAX_NR_CIPHER_SUITES 5 #define NL80211_MAX_NR_AKM_SUITES 2 @@ -3175,6 +3216,8 @@ enum nl80211_sta_bss_param { * sent to the station (u64, usec) * @NL80211_STA_INFO_AIRTIME_WEIGHT: current airtime weight for station (u16) * @NL80211_STA_INFO_AIRTIME_LINK_METRIC: airtime link metric for mesh station + * @NL80211_STA_INFO_ASSOC_AT_BOOTTIME: Timestamp (CLOCK_BOOTTIME, nanoseconds) + * of STA's association * @__NL80211_STA_INFO_AFTER_LAST: internal * @NL80211_STA_INFO_MAX: highest possible station info attribute */ @@ -3221,6 +3264,7 @@ enum nl80211_sta_info { NL80211_STA_INFO_TX_DURATION, NL80211_STA_INFO_AIRTIME_WEIGHT, NL80211_STA_INFO_AIRTIME_LINK_METRIC, + NL80211_STA_INFO_ASSOC_AT_BOOTTIME, /* keep last */ __NL80211_STA_INFO_AFTER_LAST, @@ -3402,6 +3446,12 @@ enum nl80211_band_iftype_attr { * @NL80211_BAND_ATTR_VHT_CAPA: VHT capabilities, as in the HT information IE * @NL80211_BAND_ATTR_IFTYPE_DATA: nested array attribute, with each entry using * attributes from &enum nl80211_band_iftype_attr + * @NL80211_BAND_ATTR_EDMG_CHANNELS: bitmap that indicates the 2.16 GHz + * channel(s) that are allowed to be used for EDMG transmissions. + * Defined by IEEE P802.11ay/D4.0 section 9.4.2.251. + * @NL80211_BAND_ATTR_EDMG_BW_CONFIG: Channel BW Configuration subfield encodes + * the allowed channel bandwidth configurations. + * Defined by IEEE P802.11ay/D4.0 section 9.4.2.251, Table 13. * @NL80211_BAND_ATTR_MAX: highest band attribute currently defined * @__NL80211_BAND_ATTR_AFTER_LAST: internal use */ @@ -3419,6 +3469,9 @@ enum nl80211_band_attr { NL80211_BAND_ATTR_VHT_CAPA, NL80211_BAND_ATTR_IFTYPE_DATA, + NL80211_BAND_ATTR_EDMG_CHANNELS, + NL80211_BAND_ATTR_EDMG_BW_CONFIG, + /* keep last */ __NL80211_BAND_ATTR_AFTER_LAST, NL80211_BAND_ATTR_MAX = __NL80211_BAND_ATTR_AFTER_LAST - 1 @@ -3817,6 +3870,8 @@ enum nl80211_user_reg_hint_type { * @NL80211_SURVEY_INFO_TIME_SCAN: time the radio spent for scan * (on this channel or globally) * @NL80211_SURVEY_INFO_PAD: attribute used for padding for 64-bit alignment + * @NL80211_SURVEY_INFO_TIME_BSS_RX: amount of time the radio spent + * receiving frames destined to the local BSS * @NL80211_SURVEY_INFO_MAX: highest survey info attribute number * currently defined * @__NL80211_SURVEY_INFO_AFTER_LAST: internal use @@ -3833,6 +3888,7 @@ enum nl80211_survey_info { NL80211_SURVEY_INFO_TIME_TX, NL80211_SURVEY_INFO_TIME_SCAN, NL80211_SURVEY_INFO_PAD, + NL80211_SURVEY_INFO_TIME_BSS_RX, /* keep last */ __NL80211_SURVEY_INFO_AFTER_LAST, @@ -4406,6 +4462,7 @@ enum nl80211_mfp { enum nl80211_wpa_versions { NL80211_WPA_VERSION_1 = 1 << 0, NL80211_WPA_VERSION_2 = 1 << 1, + NL80211_WPA_VERSION_3 = 1 << 2, }; /** @@ -4516,6 +4573,7 @@ enum nl80211_txrate_gi { * @NL80211_BAND_2GHZ: 2.4 GHz ISM band * @NL80211_BAND_5GHZ: around 5 GHz band (4.9 - 5.7 GHz) * @NL80211_BAND_60GHZ: around 60 GHz band (58.32 - 69.12 GHz) + * @NL80211_BAND_6GHZ: around 6 GHz band (5.9 - 7.2 GHz) * @NUM_NL80211_BANDS: number of bands, avoid using this in userspace * since newer kernel versions may support more bands */ @@ -4523,6 +4581,7 @@ enum nl80211_band { NL80211_BAND_2GHZ, NL80211_BAND_5GHZ, NL80211_BAND_60GHZ, + NL80211_BAND_6GHZ, NUM_NL80211_BANDS, }; @@ -5314,7 +5373,7 @@ enum nl80211_feature_flags { NL80211_FEATURE_TDLS_CHANNEL_SWITCH = 1 << 28, NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR = 1 << 29, NL80211_FEATURE_SCHED_SCAN_RANDOM_MAC_ADDR = 1 << 30, - NL80211_FEATURE_ND_RANDOM_MAC_ADDR = 1 << 31, + NL80211_FEATURE_ND_RANDOM_MAC_ADDR = 1U << 31, }; /** @@ -5422,6 +5481,9 @@ enum nl80211_feature_flags { * @NL80211_EXT_FEATURE_STA_TX_PWR: This driver supports controlling tx power * to a station. * + * @NL80211_EXT_FEATURE_SAE_OFFLOAD: Device wants to do SAE authentication in + * station mode (SAE password is passed as part of the connect command). + * * @NUM_NL80211_EXT_FEATURES: number of extended features. * @MAX_NL80211_EXT_FEATURES: highest extended feature index. */ @@ -5466,6 +5528,7 @@ enum nl80211_ext_feature_index { NL80211_EXT_FEATURE_SCHED_SCAN_BAND_SPECIFIC_RSSI_THOLD, NL80211_EXT_FEATURE_EXT_KEY_ID, NL80211_EXT_FEATURE_STA_TX_PWR, + NL80211_EXT_FEATURE_SAE_OFFLOAD, /* add new features before the definition below */ NUM_NL80211_EXT_FEATURES, @@ -6464,4 +6527,26 @@ enum nl80211_peer_measurement_ftm_resp { NL80211_PMSR_FTM_RESP_ATTR_MAX = NUM_NL80211_PMSR_FTM_RESP_ATTR - 1 }; +/** + * enum nl80211_obss_pd_attributes - OBSS packet detection attributes + * @__NL80211_HE_OBSS_PD_ATTR_INVALID: Invalid + * + * @NL80211_HE_OBSS_PD_ATTR_MIN_OFFSET: the OBSS PD minimum tx power offset. + * @NL80211_HE_OBSS_PD_ATTR_MAX_OFFSET: the OBSS PD maximum tx power offset. + * + * @__NL80211_HE_OBSS_PD_ATTR_LAST: Internal + * @NL80211_HE_OBSS_PD_ATTR_MAX: highest OBSS PD attribute. + */ +enum nl80211_obss_pd_attributes { + __NL80211_HE_OBSS_PD_ATTR_INVALID, + + NL80211_HE_OBSS_PD_ATTR_MIN_OFFSET, + NL80211_HE_OBSS_PD_ATTR_MAX_OFFSET, + + /* keep last */ + __NL80211_HE_OBSS_PD_ATTR_LAST, + NL80211_HE_OBSS_PD_ATTR_MAX = __NL80211_HE_OBSS_PD_ATTR_LAST - 1, +}; + + #endif /* __LINUX_NL80211_H */ diff --git a/include/uapi/linux/nvme_ioctl.h b/include/uapi/linux/nvme_ioctl.h index 1c215ea1798e..e168dc59e9a0 100644 --- a/include/uapi/linux/nvme_ioctl.h +++ b/include/uapi/linux/nvme_ioctl.h @@ -45,6 +45,27 @@ struct nvme_passthru_cmd { __u32 result; }; +struct nvme_passthru_cmd64 { + __u8 opcode; + __u8 flags; + __u16 rsvd1; + __u32 nsid; + __u32 cdw2; + __u32 cdw3; + __u64 metadata; + __u64 addr; + __u32 metadata_len; + __u32 data_len; + __u32 cdw10; + __u32 cdw11; + __u32 cdw12; + __u32 cdw13; + __u32 cdw14; + __u32 cdw15; + __u32 timeout_ms; + __u64 result; +}; + #define nvme_admin_cmd nvme_passthru_cmd #define NVME_IOCTL_ID _IO('N', 0x40) @@ -54,5 +75,7 @@ struct nvme_passthru_cmd { #define NVME_IOCTL_RESET _IO('N', 0x44) #define NVME_IOCTL_SUBSYS_RESET _IO('N', 0x45) #define NVME_IOCTL_RESCAN _IO('N', 0x46) +#define NVME_IOCTL_ADMIN64_CMD _IOWR('N', 0x47, struct nvme_passthru_cmd64) +#define NVME_IOCTL_IO64_CMD _IOWR('N', 0x48, struct nvme_passthru_cmd64) #endif /* _UAPI_LINUX_NVME_IOCTL_H */ diff --git a/include/uapi/linux/openvswitch.h b/include/uapi/linux/openvswitch.h index f271f1ec50ae..1887a451c388 100644 --- a/include/uapi/linux/openvswitch.h +++ b/include/uapi/linux/openvswitch.h @@ -123,6 +123,9 @@ struct ovs_vport_stats { /* Allow datapath to associate multiple Netlink PIDs to each vport */ #define OVS_DP_F_VPORT_PIDS (1 << 1) +/* Allow tc offload recirc sharing */ +#define OVS_DP_F_TC_RECIRC_SHARING (1 << 2) + /* Fixed logical ports. */ #define OVSP_LOCAL ((__u32)0) diff --git a/include/uapi/linux/pci_regs.h b/include/uapi/linux/pci_regs.h index 27164769d184..29d6e93fd15e 100644 --- a/include/uapi/linux/pci_regs.h +++ b/include/uapi/linux/pci_regs.h @@ -528,6 +528,7 @@ #define PCI_EXP_LNKCAP_SLS_5_0GB 0x00000002 /* LNKCAP2 SLS Vector bit 1 */ #define PCI_EXP_LNKCAP_SLS_8_0GB 0x00000003 /* LNKCAP2 SLS Vector bit 2 */ #define PCI_EXP_LNKCAP_SLS_16_0GB 0x00000004 /* LNKCAP2 SLS Vector bit 3 */ +#define PCI_EXP_LNKCAP_SLS_32_0GB 0x00000005 /* LNKCAP2 SLS Vector bit 4 */ #define PCI_EXP_LNKCAP_MLW 0x000003f0 /* Maximum Link Width */ #define PCI_EXP_LNKCAP_ASPMS 0x00000c00 /* ASPM Support */ #define PCI_EXP_LNKCAP_L0SEL 0x00007000 /* L0s Exit Latency */ @@ -556,6 +557,7 @@ #define PCI_EXP_LNKSTA_CLS_5_0GB 0x0002 /* Current Link Speed 5.0GT/s */ #define PCI_EXP_LNKSTA_CLS_8_0GB 0x0003 /* Current Link Speed 8.0GT/s */ #define PCI_EXP_LNKSTA_CLS_16_0GB 0x0004 /* Current Link Speed 16.0GT/s */ +#define PCI_EXP_LNKSTA_CLS_32_0GB 0x0005 /* Current Link Speed 32.0GT/s */ #define PCI_EXP_LNKSTA_NLW 0x03f0 /* Negotiated Link Width */ #define PCI_EXP_LNKSTA_NLW_X1 0x0010 /* Current Link Width x1 */ #define PCI_EXP_LNKSTA_NLW_X2 0x0020 /* Current Link Width x2 */ @@ -589,6 +591,7 @@ #define PCI_EXP_SLTCTL_CCIE 0x0010 /* Command Completed Interrupt Enable */ #define PCI_EXP_SLTCTL_HPIE 0x0020 /* Hot-Plug Interrupt Enable */ #define PCI_EXP_SLTCTL_AIC 0x00c0 /* Attention Indicator Control */ +#define PCI_EXP_SLTCTL_ATTN_IND_SHIFT 6 /* Attention Indicator shift */ #define PCI_EXP_SLTCTL_ATTN_IND_ON 0x0040 /* Attention Indicator on */ #define PCI_EXP_SLTCTL_ATTN_IND_BLINK 0x0080 /* Attention Indicator blinking */ #define PCI_EXP_SLTCTL_ATTN_IND_OFF 0x00c0 /* Attention Indicator off */ @@ -661,6 +664,7 @@ #define PCI_EXP_LNKCAP2_SLS_5_0GB 0x00000004 /* Supported Speed 5GT/s */ #define PCI_EXP_LNKCAP2_SLS_8_0GB 0x00000008 /* Supported Speed 8GT/s */ #define PCI_EXP_LNKCAP2_SLS_16_0GB 0x00000010 /* Supported Speed 16GT/s */ +#define PCI_EXP_LNKCAP2_SLS_32_0GB 0x00000020 /* Supported Speed 32GT/s */ #define PCI_EXP_LNKCAP2_CROSSLINK 0x00000100 /* Crosslink supported */ #define PCI_EXP_LNKCTL2 48 /* Link Control 2 */ #define PCI_EXP_LNKCTL2_TLS 0x000f @@ -668,6 +672,7 @@ #define PCI_EXP_LNKCTL2_TLS_5_0GT 0x0002 /* Supported Speed 5GT/s */ #define PCI_EXP_LNKCTL2_TLS_8_0GT 0x0003 /* Supported Speed 8GT/s */ #define PCI_EXP_LNKCTL2_TLS_16_0GT 0x0004 /* Supported Speed 16GT/s */ +#define PCI_EXP_LNKCTL2_TLS_32_0GT 0x0005 /* Supported Speed 32GT/s */ #define PCI_EXP_LNKSTA2 50 /* Link Status 2 */ #define PCI_CAP_EXP_ENDPOINT_SIZEOF_V2 52 /* v2 endpoints with link end here */ #define PCI_EXP_SLTCAP2 52 /* Slot Capabilities 2 */ @@ -709,7 +714,9 @@ #define PCI_EXT_CAP_ID_DPC 0x1D /* Downstream Port Containment */ #define PCI_EXT_CAP_ID_L1SS 0x1E /* L1 PM Substates */ #define PCI_EXT_CAP_ID_PTM 0x1F /* Precision Time Measurement */ -#define PCI_EXT_CAP_ID_MAX PCI_EXT_CAP_ID_PTM +#define PCI_EXT_CAP_ID_DLF 0x25 /* Data Link Feature */ +#define PCI_EXT_CAP_ID_PL_16GT 0x26 /* Physical Layer 16.0 GT/s */ +#define PCI_EXT_CAP_ID_MAX PCI_EXT_CAP_ID_PL_16GT #define PCI_EXT_CAP_DSN_SIZEOF 12 #define PCI_EXT_CAP_MCAST_ENDPOINT_SIZEOF 40 @@ -1049,4 +1056,14 @@ #define PCI_L1SS_CTL1_LTR_L12_TH_SCALE 0xe0000000 /* LTR_L1.2_THRESHOLD_Scale */ #define PCI_L1SS_CTL2 0x0c /* Control 2 Register */ +/* Data Link Feature */ +#define PCI_DLF_CAP 0x04 /* Capabilities Register */ +#define PCI_DLF_EXCHANGE_ENABLE 0x80000000 /* Data Link Feature Exchange Enable */ + +/* Physical Layer 16.0 GT/s */ +#define PCI_PL_16GT_LE_CTRL 0x20 /* Lane Equalization Control Register */ +#define PCI_PL_16GT_LE_CTRL_DSP_TX_PRESET_MASK 0x0000000F +#define PCI_PL_16GT_LE_CTRL_USP_TX_PRESET_MASK 0x000000F0 +#define PCI_PL_16GT_LE_CTRL_USP_TX_PRESET_SHIFT 4 + #endif /* LINUX_PCI_REGS_H */ diff --git a/include/uapi/linux/perf_event.h b/include/uapi/linux/perf_event.h index 7198ddd0c6b1..bb7b271397a6 100644 --- a/include/uapi/linux/perf_event.h +++ b/include/uapi/linux/perf_event.h @@ -374,7 +374,8 @@ struct perf_event_attr { namespaces : 1, /* include namespaces data */ ksymbol : 1, /* include ksymbol events */ bpf_event : 1, /* include bpf events */ - __reserved_1 : 33; + aux_output : 1, /* generate AUX records instead of events */ + __reserved_1 : 32; union { __u32 wakeup_events; /* wakeup every n events */ diff --git a/include/uapi/linux/pg.h b/include/uapi/linux/pg.h index 364c350e85cd..62b6f69bd9fb 100644 --- a/include/uapi/linux/pg.h +++ b/include/uapi/linux/pg.h @@ -35,6 +35,9 @@ */ +#ifndef _UAPI_LINUX_PG_H +#define _UAPI_LINUX_PG_H + #define PG_MAGIC 'P' #define PG_RESET 'Z' #define PG_COMMAND 'C' @@ -61,4 +64,4 @@ struct pg_read_hdr { }; -/* end of pg.h */ +#endif /* _UAPI_LINUX_PG_H */ diff --git a/include/uapi/linux/pkt_cls.h b/include/uapi/linux/pkt_cls.h index 51a0496f78ea..a6aa466fac9e 100644 --- a/include/uapi/linux/pkt_cls.h +++ b/include/uapi/linux/pkt_cls.h @@ -104,6 +104,9 @@ enum tca_id { TCA_ID_SIMP = TCA_ACT_SIMP, TCA_ID_IFE = TCA_ACT_IFE, TCA_ID_SAMPLE = TCA_ACT_SAMPLE, + TCA_ID_CTINFO, + TCA_ID_MPLS, + TCA_ID_CT, /* other actions go here */ __TCA_ID_MAX = 255 }; @@ -157,6 +160,8 @@ enum { TCA_POLICE_RESULT, TCA_POLICE_TM, TCA_POLICE_PAD, + TCA_POLICE_RATE64, + TCA_POLICE_PEAKRATE64, __TCA_POLICE_MAX #define TCA_POLICE_RESULT TCA_POLICE_RESULT }; @@ -294,7 +299,7 @@ enum { TCA_FW_UNSPEC, TCA_FW_CLASSID, TCA_FW_POLICE, - TCA_FW_INDEV, /* used by CONFIG_NET_CLS_IND */ + TCA_FW_INDEV, TCA_FW_ACT, /* used by CONFIG_NET_CLS_ACT */ TCA_FW_MASK, __TCA_FW_MAX @@ -534,12 +539,28 @@ enum { TCA_FLOWER_KEY_PORT_DST_MIN, /* be16 */ TCA_FLOWER_KEY_PORT_DST_MAX, /* be16 */ + TCA_FLOWER_KEY_CT_STATE, /* u16 */ + TCA_FLOWER_KEY_CT_STATE_MASK, /* u16 */ + TCA_FLOWER_KEY_CT_ZONE, /* u16 */ + TCA_FLOWER_KEY_CT_ZONE_MASK, /* u16 */ + TCA_FLOWER_KEY_CT_MARK, /* u32 */ + TCA_FLOWER_KEY_CT_MARK_MASK, /* u32 */ + TCA_FLOWER_KEY_CT_LABELS, /* u128 */ + TCA_FLOWER_KEY_CT_LABELS_MASK, /* u128 */ + __TCA_FLOWER_MAX, }; #define TCA_FLOWER_MAX (__TCA_FLOWER_MAX - 1) enum { + TCA_FLOWER_KEY_CT_FLAGS_NEW = 1 << 0, /* Beginning of a new connection. */ + TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED = 1 << 1, /* Part of an existing connection. */ + TCA_FLOWER_KEY_CT_FLAGS_RELATED = 1 << 2, /* Related to an established connection. */ + TCA_FLOWER_KEY_CT_FLAGS_TRACKED = 1 << 3, /* Conntrack has occurred. */ +}; + +enum { TCA_FLOWER_KEY_ENC_OPTS_UNSPEC, TCA_FLOWER_KEY_ENC_OPTS_GENEVE, /* Nested * TCA_FLOWER_KEY_ENC_OPT_GENEVE_ diff --git a/include/uapi/linux/pkt_sched.h b/include/uapi/linux/pkt_sched.h index 8b2f993cbb77..5011259b8f67 100644 --- a/include/uapi/linux/pkt_sched.h +++ b/include/uapi/linux/pkt_sched.h @@ -2,6 +2,7 @@ #ifndef __LINUX_PKT_SCHED_H #define __LINUX_PKT_SCHED_H +#include <linux/const.h> #include <linux/types.h> /* Logical priority bands not depending on specific packet scheduler. @@ -988,8 +989,9 @@ struct tc_etf_qopt { __s32 delta; __s32 clockid; __u32 flags; -#define TC_ETF_DEADLINE_MODE_ON BIT(0) -#define TC_ETF_OFFLOAD_ON BIT(1) +#define TC_ETF_DEADLINE_MODE_ON _BITUL(0) +#define TC_ETF_OFFLOAD_ON _BITUL(1) +#define TC_ETF_SKIP_SOCK_CHECK _BITUL(2) }; enum { @@ -1158,6 +1160,9 @@ enum { * [TCA_TAPRIO_ATTR_SCHED_ENTRY_INTERVAL] */ +#define TCA_TAPRIO_ATTR_FLAG_TXTIME_ASSIST BIT(0) +#define TCA_TAPRIO_ATTR_FLAG_FULL_OFFLOAD BIT(1) + enum { TCA_TAPRIO_ATTR_UNSPEC, TCA_TAPRIO_ATTR_PRIOMAP, /* struct tc_mqprio_qopt */ @@ -1169,6 +1174,8 @@ enum { TCA_TAPRIO_ATTR_ADMIN_SCHED, /* The admin sched, only used in dump */ TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME, /* s64 */ TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME_EXTENSION, /* s64 */ + TCA_TAPRIO_ATTR_FLAGS, /* u32 */ + TCA_TAPRIO_ATTR_TXTIME_DELAY, /* u32 */ __TCA_TAPRIO_ATTR_MAX, }; diff --git a/include/uapi/linux/ppdev.h b/include/uapi/linux/ppdev.h index 8fe3c64d149e..eb895b83f2bd 100644 --- a/include/uapi/linux/ppdev.h +++ b/include/uapi/linux/ppdev.h @@ -15,6 +15,9 @@ * Added PPGETMODES/PPGETMODE/PPGETPHASE, Fred Barnes <frmb2@ukc.ac.uk>, 03/01/2001 */ +#ifndef _UAPI_LINUX_PPDEV_H +#define _UAPI_LINUX_PPDEV_H + #define PP_IOCTL 'p' /* Set mode for read/write (e.g. IEEE1284_MODE_EPP) */ @@ -97,4 +100,4 @@ struct ppdev_frob_struct { /* only masks user-visible flags */ #define PP_FLAGMASK (PP_FASTWRITE | PP_FASTREAD | PP_W91284PIC) - +#endif /* _UAPI_LINUX_PPDEV_H */ diff --git a/include/uapi/linux/prctl.h b/include/uapi/linux/prctl.h index 094bb03b9cc2..7da1b37b27aa 100644 --- a/include/uapi/linux/prctl.h +++ b/include/uapi/linux/prctl.h @@ -181,7 +181,7 @@ struct prctl_mm_map { #define PR_GET_THP_DISABLE 42 /* - * Tell the kernel to start/stop helping userspace manage bounds tables. + * No longer implemented, but left here to ensure the numbers stay reserved: */ #define PR_MPX_ENABLE_MANAGEMENT 43 #define PR_MPX_DISABLE_MANAGEMENT 44 @@ -229,4 +229,9 @@ struct prctl_mm_map { # define PR_PAC_APDBKEY (1UL << 3) # define PR_PAC_APGAKEY (1UL << 4) +/* Tagged user address controls for arm64 */ +#define PR_SET_TAGGED_ADDR_CTRL 55 +#define PR_GET_TAGGED_ADDR_CTRL 56 +# define PR_TAGGED_ADDR_ENABLE (1UL << 0) + #endif /* _LINUX_PRCTL_H */ diff --git a/include/uapi/linux/psp-sev.h b/include/uapi/linux/psp-sev.h index 8654b2442f6a..592a0c1b77c9 100644 --- a/include/uapi/linux/psp-sev.h +++ b/include/uapi/linux/psp-sev.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ +/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */ /* * Userspace interface for AMD Secure Encrypted Virtualization (SEV) * platform management commands. diff --git a/include/uapi/linux/ptp_clock.h b/include/uapi/linux/ptp_clock.h index 1bc794ad957a..59e89a1bc3bb 100644 --- a/include/uapi/linux/ptp_clock.h +++ b/include/uapi/linux/ptp_clock.h @@ -25,12 +25,44 @@ #include <linux/ioctl.h> #include <linux/types.h> -/* PTP_xxx bits, for the flags field within the request structures. */ +/* + * Bits of the ptp_extts_request.flags field: + */ #define PTP_ENABLE_FEATURE (1<<0) #define PTP_RISING_EDGE (1<<1) #define PTP_FALLING_EDGE (1<<2) /* + * flag fields valid for the new PTP_EXTTS_REQUEST2 ioctl. + */ +#define PTP_EXTTS_VALID_FLAGS (PTP_ENABLE_FEATURE | \ + PTP_RISING_EDGE | \ + PTP_FALLING_EDGE) + +/* + * flag fields valid for the original PTP_EXTTS_REQUEST ioctl. + * DO NOT ADD NEW FLAGS HERE. + */ +#define PTP_EXTTS_V1_VALID_FLAGS (PTP_ENABLE_FEATURE | \ + PTP_RISING_EDGE | \ + PTP_FALLING_EDGE) + +/* + * Bits of the ptp_perout_request.flags field: + */ +#define PTP_PEROUT_ONE_SHOT (1<<0) + +/* + * flag fields valid for the new PTP_PEROUT_REQUEST2 ioctl. + */ +#define PTP_PEROUT_VALID_FLAGS (PTP_PEROUT_ONE_SHOT) + +/* + * No flags are valid for the original PTP_PEROUT_REQUEST ioctl + */ +#define PTP_PEROUT_V1_VALID_FLAGS (0) + +/* * struct ptp_clock_time - represents a time value * * The sign of the seconds field applies to the whole value. The @@ -67,7 +99,7 @@ struct ptp_perout_request { struct ptp_clock_time start; /* Absolute start time. */ struct ptp_clock_time period; /* Desired period, zero means disable. */ unsigned int index; /* Which channel to configure. */ - unsigned int flags; /* Reserved for future use. */ + unsigned int flags; unsigned int rsv[4]; /* Reserved for future use. */ }; @@ -149,6 +181,18 @@ struct ptp_pin_desc { #define PTP_SYS_OFFSET_EXTENDED \ _IOWR(PTP_CLK_MAGIC, 9, struct ptp_sys_offset_extended) +#define PTP_CLOCK_GETCAPS2 _IOR(PTP_CLK_MAGIC, 10, struct ptp_clock_caps) +#define PTP_EXTTS_REQUEST2 _IOW(PTP_CLK_MAGIC, 11, struct ptp_extts_request) +#define PTP_PEROUT_REQUEST2 _IOW(PTP_CLK_MAGIC, 12, struct ptp_perout_request) +#define PTP_ENABLE_PPS2 _IOW(PTP_CLK_MAGIC, 13, int) +#define PTP_SYS_OFFSET2 _IOW(PTP_CLK_MAGIC, 14, struct ptp_sys_offset) +#define PTP_PIN_GETFUNC2 _IOWR(PTP_CLK_MAGIC, 15, struct ptp_pin_desc) +#define PTP_PIN_SETFUNC2 _IOW(PTP_CLK_MAGIC, 16, struct ptp_pin_desc) +#define PTP_SYS_OFFSET_PRECISE2 \ + _IOWR(PTP_CLK_MAGIC, 17, struct ptp_sys_offset_precise) +#define PTP_SYS_OFFSET_EXTENDED2 \ + _IOWR(PTP_CLK_MAGIC, 18, struct ptp_sys_offset_extended) + struct ptp_extts_event { struct ptp_clock_time t; /* Time event occured. */ unsigned int index; /* Which channel produced the event. */ diff --git a/include/uapi/linux/ptrace.h b/include/uapi/linux/ptrace.h index d5a1b8a492b9..a71b6e3b03eb 100644 --- a/include/uapi/linux/ptrace.h +++ b/include/uapi/linux/ptrace.h @@ -73,6 +73,41 @@ struct seccomp_metadata { __u64 flags; /* Output: filter's flags */ }; +#define PTRACE_GET_SYSCALL_INFO 0x420e +#define PTRACE_SYSCALL_INFO_NONE 0 +#define PTRACE_SYSCALL_INFO_ENTRY 1 +#define PTRACE_SYSCALL_INFO_EXIT 2 +#define PTRACE_SYSCALL_INFO_SECCOMP 3 + +struct ptrace_syscall_info { + __u8 op; /* PTRACE_SYSCALL_INFO_* */ + __u32 arch __attribute__((__aligned__(sizeof(__u32)))); + __u64 instruction_pointer; + __u64 stack_pointer; + union { + struct { + __u64 nr; + __u64 args[6]; + } entry; + struct { + __s64 rval; + __u8 is_error; + } exit; + struct { + __u64 nr; + __u64 args[6]; + __u32 ret_data; + } seccomp; + }; +}; + +/* + * These values are stored in task->ptrace_message + * by tracehook_report_syscall_* to describe the current syscall-stop. + */ +#define PTRACE_EVENTMSG_SYSCALL_ENTRY 1 +#define PTRACE_EVENTMSG_SYSCALL_EXIT 2 + /* Read signals from a shared (process wide) queue */ #define PTRACE_PEEKSIGINFO_SHARED (1 << 0) diff --git a/include/uapi/linux/raid/md_p.h b/include/uapi/linux/raid/md_p.h index b0d15c73f6d7..1f2d8c81f0e0 100644 --- a/include/uapi/linux/raid/md_p.h +++ b/include/uapi/linux/raid/md_p.h @@ -329,6 +329,7 @@ struct mdp_superblock_1 { #define MD_FEATURE_JOURNAL 512 /* support write cache */ #define MD_FEATURE_PPL 1024 /* support PPL */ #define MD_FEATURE_MULTIPLE_PPLS 2048 /* support for multiple PPLs */ +#define MD_FEATURE_RAID0_LAYOUT 4096 /* layout is meaningful for RAID0 */ #define MD_FEATURE_ALL (MD_FEATURE_BITMAP_OFFSET \ |MD_FEATURE_RECOVERY_OFFSET \ |MD_FEATURE_RESHAPE_ACTIVE \ @@ -341,6 +342,7 @@ struct mdp_superblock_1 { |MD_FEATURE_JOURNAL \ |MD_FEATURE_PPL \ |MD_FEATURE_MULTIPLE_PPLS \ + |MD_FEATURE_RAID0_LAYOUT \ ) struct r5l_payload_header { diff --git a/include/uapi/linux/rds.h b/include/uapi/linux/rds.h index 5d0f76c780e5..cba368e55863 100644 --- a/include/uapi/linux/rds.h +++ b/include/uapi/linux/rds.h @@ -250,6 +250,8 @@ struct rds_info_rdma_connection { __u32 rdma_mr_max; __u32 rdma_mr_size; __u8 tos; + __u8 sl; + __u32 cache_allocs; }; struct rds6_info_rdma_connection { @@ -264,6 +266,8 @@ struct rds6_info_rdma_connection { __u32 rdma_mr_max; __u32 rdma_mr_size; __u8 tos; + __u8 sl; + __u32 cache_allocs; }; /* RDS message Receive Path Latency points */ diff --git a/include/uapi/linux/rtnetlink.h b/include/uapi/linux/rtnetlink.h index 46399367627f..ce2a623abb75 100644 --- a/include/uapi/linux/rtnetlink.h +++ b/include/uapi/linux/rtnetlink.h @@ -157,6 +157,13 @@ enum { RTM_GETCHAIN, #define RTM_GETCHAIN RTM_GETCHAIN + RTM_NEWNEXTHOP = 104, +#define RTM_NEWNEXTHOP RTM_NEWNEXTHOP + RTM_DELNEXTHOP, +#define RTM_DELNEXTHOP RTM_DELNEXTHOP + RTM_GETNEXTHOP, +#define RTM_GETNEXTHOP RTM_GETNEXTHOP + __RTM_MAX, #define RTM_MAX (((__RTM_MAX + 3) & ~3) - 1) }; @@ -342,6 +349,7 @@ enum rtattr_type_t { RTA_IP_PROTO, RTA_SPORT, RTA_DPORT, + RTA_NH_ID, __RTA_MAX }; @@ -704,6 +712,8 @@ enum rtnetlink_groups { #define RTNLGRP_IPV4_MROUTE_R RTNLGRP_IPV4_MROUTE_R RTNLGRP_IPV6_MROUTE_R, #define RTNLGRP_IPV6_MROUTE_R RTNLGRP_IPV6_MROUTE_R + RTNLGRP_NEXTHOP, +#define RTNLGRP_NEXTHOP RTNLGRP_NEXTHOP __RTNLGRP_MAX }; #define RTNLGRP_MAX (__RTNLGRP_MAX - 1) diff --git a/include/uapi/linux/rxrpc.h b/include/uapi/linux/rxrpc.h index 782069dcf607..4accfa7e266d 100644 --- a/include/uapi/linux/rxrpc.h +++ b/include/uapi/linux/rxrpc.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* SPDX-License-Identifier: GPL-2.0-or-later WITH Linux-syscall-note */ /* Types and definitions for AF_RXRPC. * * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. diff --git a/include/uapi/linux/sched.h b/include/uapi/linux/sched.h index ed4ee170bee2..25b4fa00bad1 100644 --- a/include/uapi/linux/sched.h +++ b/include/uapi/linux/sched.h @@ -2,6 +2,8 @@ #ifndef _UAPI_LINUX_SCHED_H #define _UAPI_LINUX_SCHED_H +#include <linux/types.h> + /* * cloning flags: */ @@ -31,6 +33,50 @@ #define CLONE_NEWNET 0x40000000 /* New network namespace */ #define CLONE_IO 0x80000000 /* Clone io context */ +#ifndef __ASSEMBLY__ +/** + * struct clone_args - arguments for the clone3 syscall + * @flags: Flags for the new process as listed above. + * All flags are valid except for CSIGNAL and + * CLONE_DETACHED. + * @pidfd: If CLONE_PIDFD is set, a pidfd will be + * returned in this argument. + * @child_tid: If CLONE_CHILD_SETTID is set, the TID of the + * child process will be returned in the child's + * memory. + * @parent_tid: If CLONE_PARENT_SETTID is set, the TID of + * the child process will be returned in the + * parent's memory. + * @exit_signal: The exit_signal the parent process will be + * sent when the child exits. + * @stack: Specify the location of the stack for the + * child process. + * Note, @stack is expected to point to the + * lowest address. The stack direction will be + * determined by the kernel and set up + * appropriately based on @stack_size. + * @stack_size: The size of the stack for the child process. + * @tls: If CLONE_SETTLS is set, the tls descriptor + * is set to tls. + * + * The structure is versioned by size and thus extensible. + * New struct members must go at the end of the struct and + * must be properly 64bit aligned. + */ +struct clone_args { + __aligned_u64 flags; + __aligned_u64 pidfd; + __aligned_u64 child_tid; + __aligned_u64 parent_tid; + __aligned_u64 exit_signal; + __aligned_u64 stack; + __aligned_u64 stack_size; + __aligned_u64 tls; +}; +#endif + +#define CLONE_ARGS_SIZE_VER0 64 /* sizeof first published struct */ + /* * Scheduling policies */ @@ -51,9 +97,21 @@ #define SCHED_FLAG_RESET_ON_FORK 0x01 #define SCHED_FLAG_RECLAIM 0x02 #define SCHED_FLAG_DL_OVERRUN 0x04 +#define SCHED_FLAG_KEEP_POLICY 0x08 +#define SCHED_FLAG_KEEP_PARAMS 0x10 +#define SCHED_FLAG_UTIL_CLAMP_MIN 0x20 +#define SCHED_FLAG_UTIL_CLAMP_MAX 0x40 + +#define SCHED_FLAG_KEEP_ALL (SCHED_FLAG_KEEP_POLICY | \ + SCHED_FLAG_KEEP_PARAMS) + +#define SCHED_FLAG_UTIL_CLAMP (SCHED_FLAG_UTIL_CLAMP_MIN | \ + SCHED_FLAG_UTIL_CLAMP_MAX) #define SCHED_FLAG_ALL (SCHED_FLAG_RESET_ON_FORK | \ SCHED_FLAG_RECLAIM | \ - SCHED_FLAG_DL_OVERRUN) + SCHED_FLAG_DL_OVERRUN | \ + SCHED_FLAG_KEEP_ALL | \ + SCHED_FLAG_UTIL_CLAMP) #endif /* _UAPI_LINUX_SCHED_H */ diff --git a/include/uapi/linux/sched/types.h b/include/uapi/linux/sched/types.h index 10fbb8031930..c852153ddb0d 100644 --- a/include/uapi/linux/sched/types.h +++ b/include/uapi/linux/sched/types.h @@ -9,6 +9,7 @@ struct sched_param { }; #define SCHED_ATTR_SIZE_VER0 48 /* sizeof first published struct */ +#define SCHED_ATTR_SIZE_VER1 56 /* add: util_{min,max} */ /* * Extended scheduling parameters data structure. @@ -21,8 +22,33 @@ struct sched_param { * the tasks may be useful for a wide variety of application fields, e.g., * multimedia, streaming, automation and control, and many others. * - * This variant (sched_attr) is meant at describing a so-called - * sporadic time-constrained task. In such model a task is specified by: + * This variant (sched_attr) allows to define additional attributes to + * improve the scheduler knowledge about task requirements. + * + * Scheduling Class Attributes + * =========================== + * + * A subset of sched_attr attributes specifies the + * scheduling policy and relative POSIX attributes: + * + * @size size of the structure, for fwd/bwd compat. + * + * @sched_policy task's scheduling policy + * @sched_nice task's nice value (SCHED_NORMAL/BATCH) + * @sched_priority task's static priority (SCHED_FIFO/RR) + * + * Certain more advanced scheduling features can be controlled by a + * predefined set of flags via the attribute: + * + * @sched_flags for customizing the scheduler behaviour + * + * Sporadic Time-Constrained Task Attributes + * ========================================= + * + * A subset of sched_attr attributes allows to describe a so-called + * sporadic time-constrained task. + * + * In such a model a task is specified by: * - the activation period or minimum instance inter-arrival time; * - the maximum (or average, depending on the actual scheduling * discipline) computation time of all instances, a.k.a. runtime; @@ -34,14 +60,8 @@ struct sched_param { * than the runtime and must be completed by time instant t equal to * the instance activation time + the deadline. * - * This is reflected by the actual fields of the sched_attr structure: + * This is reflected by the following fields of the sched_attr structure: * - * @size size of the structure, for fwd/bwd compat. - * - * @sched_policy task's scheduling policy - * @sched_flags for customizing the scheduler behaviour - * @sched_nice task's nice value (SCHED_NORMAL/BATCH) - * @sched_priority task's static priority (SCHED_FIFO/RR) * @sched_deadline representative of the task's deadline * @sched_runtime representative of the task's runtime * @sched_period representative of the task's period @@ -53,6 +73,29 @@ struct sched_param { * As of now, the SCHED_DEADLINE policy (sched_dl scheduling class) is the * only user of this new interface. More information about the algorithm * available in the scheduling class file or in Documentation/. + * + * Task Utilization Attributes + * =========================== + * + * A subset of sched_attr attributes allows to specify the utilization + * expected for a task. These attributes allow to inform the scheduler about + * the utilization boundaries within which it should schedule the task. These + * boundaries are valuable hints to support scheduler decisions on both task + * placement and frequency selection. + * + * @sched_util_min represents the minimum utilization + * @sched_util_max represents the maximum utilization + * + * Utilization is a value in the range [0..SCHED_CAPACITY_SCALE]. It + * represents the percentage of CPU time used by a task when running at the + * maximum frequency on the highest capacity CPU of the system. For example, a + * 20% utilization task is a task running for 2ms every 10ms at maximum + * frequency. + * + * A task with a min utilization value bigger than 0 is more likely scheduled + * on a CPU with a capacity big enough to fit the specified value. + * A task with a max utilization value smaller than 1024 is more likely + * scheduled on a CPU with no more capacity than the specified value. */ struct sched_attr { __u32 size; @@ -70,6 +113,11 @@ struct sched_attr { __u64 sched_runtime; __u64 sched_deadline; __u64 sched_period; + + /* Utilization hints */ + __u32 sched_util_min; + __u32 sched_util_max; + }; #endif /* _UAPI_LINUX_SCHED_TYPES_H */ diff --git a/include/uapi/linux/sctp.h b/include/uapi/linux/sctp.h index b8f2c4d56532..6d5b164af55c 100644 --- a/include/uapi/linux/sctp.h +++ b/include/uapi/linux/sctp.h @@ -134,6 +134,9 @@ typedef __s32 sctp_assoc_t; #define SCTP_INTERLEAVING_SUPPORTED 125 #define SCTP_SENDMSG_CONNECT 126 #define SCTP_EVENT 127 +#define SCTP_ASCONF_SUPPORTED 128 +#define SCTP_AUTH_SUPPORTED 129 +#define SCTP_ECN_SUPPORTED 130 /* PR-SCTP policies */ #define SCTP_PR_SCTP_NONE 0x0000 diff --git a/include/uapi/linux/sed-opal.h b/include/uapi/linux/sed-opal.h index 33e53b80cd1f..c6d035fa1b6c 100644 --- a/include/uapi/linux/sed-opal.h +++ b/include/uapi/linux/sed-opal.h @@ -20,6 +20,11 @@ enum opal_mbr { OPAL_MBR_DISABLE = 0x01, }; +enum opal_mbr_done_flag { + OPAL_MBR_NOT_DONE = 0x0, + OPAL_MBR_DONE = 0x01 +}; + enum opal_user { OPAL_ADMIN1 = 0x0, OPAL_USER1 = 0x01, @@ -95,6 +100,19 @@ struct opal_mbr_data { __u8 __align[7]; }; +struct opal_mbr_done { + struct opal_key key; + __u8 done_flag; + __u8 __align[7]; +}; + +struct opal_shadow_mbr { + struct opal_key key; + const __u64 data; + __u64 offset; + __u64 size; +}; + #define IOC_OPAL_SAVE _IOW('p', 220, struct opal_lock_unlock) #define IOC_OPAL_LOCK_UNLOCK _IOW('p', 221, struct opal_lock_unlock) #define IOC_OPAL_TAKE_OWNERSHIP _IOW('p', 222, struct opal_key) @@ -107,5 +125,8 @@ struct opal_mbr_data { #define IOC_OPAL_ENABLE_DISABLE_MBR _IOW('p', 229, struct opal_mbr_data) #define IOC_OPAL_ERASE_LR _IOW('p', 230, struct opal_session_info) #define IOC_OPAL_SECURE_ERASE_LR _IOW('p', 231, struct opal_session_info) +#define IOC_OPAL_PSID_REVERT_TPR _IOW('p', 232, struct opal_key) +#define IOC_OPAL_MBR_DONE _IOW('p', 233, struct opal_mbr_done) +#define IOC_OPAL_WRITE_SHADOW_MBR _IOW('p', 234, struct opal_shadow_mbr) #endif /* _UAPI_SED_OPAL_H */ diff --git a/include/uapi/linux/serial_core.h b/include/uapi/linux/serial_core.h index 67c4aaaa2308..e7fe550b6038 100644 --- a/include/uapi/linux/serial_core.h +++ b/include/uapi/linux/serial_core.h @@ -129,7 +129,7 @@ /* Motorola i.MX SoC */ #define PORT_IMX 62 -/* Marvell MPSC */ +/* Marvell MPSC (obsolete unused) */ #define PORT_MPSC 63 /* TXX9 type number */ @@ -150,9 +150,6 @@ #define PORT_PNX8XXX 70 -/* Hilscher netx */ -#define PORT_NETX 71 - /* SUN4V Hypervisor Console */ #define PORT_SUNHV 72 @@ -164,9 +161,6 @@ /* Blackfin bf5xx */ #define PORT_BFIN 75 -/* Micrel KS8695 */ -#define PORT_KS8695 76 - /* Broadcom SB1250, etc. SOC */ #define PORT_SB1250_DUART 77 @@ -293,4 +287,10 @@ /* SiFive UART */ #define PORT_SIFIVE_V0 120 +/* Sunix UART */ +#define PORT_SUNIX 121 + +/* Freescale Linflex UART */ +#define PORT_LINFLEXUART 122 + #endif /* _UAPILINUX_SERIAL_CORE_H */ diff --git a/include/uapi/linux/serio.h b/include/uapi/linux/serio.h index a0cac1d8670d..50e991952c97 100644 --- a/include/uapi/linux/serio.h +++ b/include/uapi/linux/serio.h @@ -82,5 +82,6 @@ #define SERIO_EGALAX 0x3f #define SERIO_PULSE8_CEC 0x40 #define SERIO_RAINSHADOW_CEC 0x41 +#define SERIO_FSIA6B 0x42 #endif /* _UAPI_SERIO_H */ diff --git a/include/uapi/linux/snmp.h b/include/uapi/linux/snmp.h index fd42c1316d3d..549a31c29f7d 100644 --- a/include/uapi/linux/snmp.h +++ b/include/uapi/linux/snmp.h @@ -284,6 +284,7 @@ enum LINUX_MIB_TCPZEROWINDOWDROP, /* TCPZeroWindowDrop */ LINUX_MIB_TCPRCVQDROP, /* TCPRcvQDrop */ LINUX_MIB_TCPWQUEUETOOBIG, /* TCPWqueueTooBig */ + LINUX_MIB_TCPFASTOPENPASSIVEALTKEY, /* TCPFastOpenPassiveAltKey */ __LINUX_MIB_MAX }; diff --git a/include/uapi/linux/socket.h b/include/uapi/linux/socket.h index 8eb96021709c..c3409c8ec0dd 100644 --- a/include/uapi/linux/socket.h +++ b/include/uapi/linux/socket.h @@ -6,17 +6,24 @@ * Desired design of maximum size and alignment (see RFC2553) */ #define _K_SS_MAXSIZE 128 /* Implementation specific max size */ -#define _K_SS_ALIGNSIZE (__alignof__ (struct sockaddr *)) - /* Implementation specific desired alignment */ typedef unsigned short __kernel_sa_family_t; +/* + * The definition uses anonymous union and struct in order to control the + * default alignment. + */ struct __kernel_sockaddr_storage { - __kernel_sa_family_t ss_family; /* address family */ - /* Following field(s) are implementation specific */ - char __data[_K_SS_MAXSIZE - sizeof(unsigned short)]; + union { + struct { + __kernel_sa_family_t ss_family; /* address family */ + /* Following field(s) are implementation specific */ + char __data[_K_SS_MAXSIZE - sizeof(unsigned short)]; /* space to achieve desired size, */ /* _SS_MAXSIZE value minus size of ss_family */ -} __attribute__ ((aligned(_K_SS_ALIGNSIZE))); /* force desired alignment */ + }; + void *__align; /* implementation specific desired alignment */ + }; +}; #endif /* _UAPI_LINUX_SOCKET_H */ diff --git a/include/uapi/linux/tc_act/tc_ct.h b/include/uapi/linux/tc_act/tc_ct.h new file mode 100644 index 000000000000..5fb1d7ac1027 --- /dev/null +++ b/include/uapi/linux/tc_act/tc_ct.h @@ -0,0 +1,41 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef __UAPI_TC_CT_H +#define __UAPI_TC_CT_H + +#include <linux/types.h> +#include <linux/pkt_cls.h> + +enum { + TCA_CT_UNSPEC, + TCA_CT_PARMS, + TCA_CT_TM, + TCA_CT_ACTION, /* u16 */ + TCA_CT_ZONE, /* u16 */ + TCA_CT_MARK, /* u32 */ + TCA_CT_MARK_MASK, /* u32 */ + TCA_CT_LABELS, /* u128 */ + TCA_CT_LABELS_MASK, /* u128 */ + TCA_CT_NAT_IPV4_MIN, /* be32 */ + TCA_CT_NAT_IPV4_MAX, /* be32 */ + TCA_CT_NAT_IPV6_MIN, /* struct in6_addr */ + TCA_CT_NAT_IPV6_MAX, /* struct in6_addr */ + TCA_CT_NAT_PORT_MIN, /* be16 */ + TCA_CT_NAT_PORT_MAX, /* be16 */ + TCA_CT_PAD, + __TCA_CT_MAX +}; + +#define TCA_CT_MAX (__TCA_CT_MAX - 1) + +#define TCA_CT_ACT_COMMIT (1 << 0) +#define TCA_CT_ACT_FORCE (1 << 1) +#define TCA_CT_ACT_CLEAR (1 << 2) +#define TCA_CT_ACT_NAT (1 << 3) +#define TCA_CT_ACT_NAT_SRC (1 << 4) +#define TCA_CT_ACT_NAT_DST (1 << 5) + +struct tc_ct { + tc_gen; +}; + +#endif /* __UAPI_TC_CT_H */ diff --git a/include/uapi/linux/tc_act/tc_ctinfo.h b/include/uapi/linux/tc_act/tc_ctinfo.h new file mode 100644 index 000000000000..f5f26d95d0e7 --- /dev/null +++ b/include/uapi/linux/tc_act/tc_ctinfo.h @@ -0,0 +1,29 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef __UAPI_TC_CTINFO_H +#define __UAPI_TC_CTINFO_H + +#include <linux/types.h> +#include <linux/pkt_cls.h> + +struct tc_ctinfo { + tc_gen; +}; + +enum { + TCA_CTINFO_UNSPEC, + TCA_CTINFO_PAD, + TCA_CTINFO_TM, + TCA_CTINFO_ACT, + TCA_CTINFO_ZONE, + TCA_CTINFO_PARMS_DSCP_MASK, + TCA_CTINFO_PARMS_DSCP_STATEMASK, + TCA_CTINFO_PARMS_CPMARK_MASK, + TCA_CTINFO_STATS_DSCP_SET, + TCA_CTINFO_STATS_DSCP_ERROR, + TCA_CTINFO_STATS_CPMARK_SET, + __TCA_CTINFO_MAX +}; + +#define TCA_CTINFO_MAX (__TCA_CTINFO_MAX - 1) + +#endif diff --git a/include/uapi/linux/tc_act/tc_mpls.h b/include/uapi/linux/tc_act/tc_mpls.h new file mode 100644 index 000000000000..9360e95273c7 --- /dev/null +++ b/include/uapi/linux/tc_act/tc_mpls.h @@ -0,0 +1,33 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +/* Copyright (C) 2019 Netronome Systems, Inc. */ + +#ifndef __LINUX_TC_MPLS_H +#define __LINUX_TC_MPLS_H + +#include <linux/pkt_cls.h> + +#define TCA_MPLS_ACT_POP 1 +#define TCA_MPLS_ACT_PUSH 2 +#define TCA_MPLS_ACT_MODIFY 3 +#define TCA_MPLS_ACT_DEC_TTL 4 + +struct tc_mpls { + tc_gen; /* generic TC action fields. */ + int m_action; /* action of type TCA_MPLS_ACT_*. */ +}; + +enum { + TCA_MPLS_UNSPEC, + TCA_MPLS_TM, /* struct tcf_t; time values associated with action. */ + TCA_MPLS_PARMS, /* struct tc_mpls; action type and general TC fields. */ + TCA_MPLS_PAD, + TCA_MPLS_PROTO, /* be16; eth_type of pushed or next (for pop) header. */ + TCA_MPLS_LABEL, /* u32; MPLS label. Lower 20 bits are used. */ + TCA_MPLS_TC, /* u8; MPLS TC field. Lower 3 bits are used. */ + TCA_MPLS_TTL, /* u8; MPLS TTL field. Must not be 0. */ + TCA_MPLS_BOS, /* u8; MPLS BOS field. Either 1 or 0. */ + __TCA_MPLS_MAX, +}; +#define TCA_MPLS_MAX (__TCA_MPLS_MAX - 1) + +#endif diff --git a/include/uapi/linux/tcp.h b/include/uapi/linux/tcp.h index b521464ea962..81e697978e8b 100644 --- a/include/uapi/linux/tcp.h +++ b/include/uapi/linux/tcp.h @@ -127,6 +127,9 @@ enum { #define TCP_CM_INQ TCP_INQ +#define TCP_TX_DELAY 37 /* delay outgoing packets by XX usec */ + + #define TCP_REPAIR_ON 1 #define TCP_REPAIR_OFF 0 #define TCP_REPAIR_OFF_NO_WP -1 /* Turn off without window probes */ @@ -267,6 +270,12 @@ struct tcp_info { __u64 tcpi_bytes_retrans; /* RFC4898 tcpEStatsPerfOctetsRetrans */ __u32 tcpi_dsack_dups; /* RFC4898 tcpEStatsStackDSACKDups */ __u32 tcpi_reord_seen; /* reordering events seen */ + + __u32 tcpi_rcv_ooopack; /* Out-of-order packets received */ + + __u32 tcpi_snd_wnd; /* peer's advertised receive window after + * scaling (bytes) + */ }; /* netlink attributes types for SCM_TIMESTAMPING_OPT_STATS */ diff --git a/include/uapi/linux/tls.h b/include/uapi/linux/tls.h index 5b9c26753e46..bcd2869ed472 100644 --- a/include/uapi/linux/tls.h +++ b/include/uapi/linux/tls.h @@ -109,4 +109,19 @@ struct tls12_crypto_info_aes_ccm_128 { unsigned char rec_seq[TLS_CIPHER_AES_CCM_128_REC_SEQ_SIZE]; }; +enum { + TLS_INFO_UNSPEC, + TLS_INFO_VERSION, + TLS_INFO_CIPHER, + TLS_INFO_TXCONF, + TLS_INFO_RXCONF, + __TLS_INFO_MAX, +}; +#define TLS_INFO_MAX (__TLS_INFO_MAX - 1) + +#define TLS_CONF_BASE 1 +#define TLS_CONF_SW 2 +#define TLS_CONF_HW 3 +#define TLS_CONF_HW_RECORD 4 + #endif /* _UAPI_LINUX_TLS_H */ diff --git a/include/uapi/linux/unix_diag.h b/include/uapi/linux/unix_diag.h index 5c502fdf7a42..a1988576fa8a 100644 --- a/include/uapi/linux/unix_diag.h +++ b/include/uapi/linux/unix_diag.h @@ -20,6 +20,7 @@ struct unix_diag_req { #define UDIAG_SHOW_ICONS 0x00000008 /* show pending connections */ #define UDIAG_SHOW_RQLEN 0x00000010 /* show skb receive queue len */ #define UDIAG_SHOW_MEMINFO 0x00000020 /* show memory info of a socket */ +#define UDIAG_SHOW_UID 0x00000040 /* show socket's UID */ struct unix_diag_msg { __u8 udiag_family; @@ -40,6 +41,7 @@ enum { UNIX_DIAG_RQLEN, UNIX_DIAG_MEMINFO, UNIX_DIAG_SHUTDOWN, + UNIX_DIAG_UID, __UNIX_DIAG_MAX, }; diff --git a/include/uapi/linux/usb/audio.h b/include/uapi/linux/usb/audio.h index ddc5396800aa..76b7c3f6cd0d 100644 --- a/include/uapi/linux/usb/audio.h +++ b/include/uapi/linux/usb/audio.h @@ -450,6 +450,43 @@ static inline __u8 *uac_processing_unit_specific(struct uac_processing_unit_desc } } +/* + * Extension Unit (XU) has almost compatible layout with Processing Unit, but + * on UAC2, it has a different bmControls size (bControlSize); it's 1 byte for + * XU while 2 bytes for PU. The last iExtension field is a one-byte index as + * well as iProcessing field of PU. + */ +static inline __u8 uac_extension_unit_bControlSize(struct uac_processing_unit_descriptor *desc, + int protocol) +{ + switch (protocol) { + case UAC_VERSION_1: + return desc->baSourceID[desc->bNrInPins + 4]; + case UAC_VERSION_2: + return 1; /* in UAC2, this value is constant */ + case UAC_VERSION_3: + return 4; /* in UAC3, this value is constant */ + default: + return 1; + } +} + +static inline __u8 uac_extension_unit_iExtension(struct uac_processing_unit_descriptor *desc, + int protocol) +{ + __u8 control_size = uac_extension_unit_bControlSize(desc, protocol); + + switch (protocol) { + case UAC_VERSION_1: + case UAC_VERSION_2: + default: + return *(uac_processing_unit_bmControls(desc, protocol) + + control_size); + case UAC_VERSION_3: + return 0; /* UAC3 does not have this field */ + } +} + /* 4.5.2 Class-Specific AS Interface Descriptor */ struct uac1_as_header_descriptor { __u8 bLength; /* in bytes: 7 */ diff --git a/include/uapi/linux/usb/ch9.h b/include/uapi/linux/usb/ch9.h index d5a5caec8fbc..2b623f36af6b 100644 --- a/include/uapi/linux/usb/ch9.h +++ b/include/uapi/linux/usb/ch9.h @@ -894,6 +894,8 @@ struct usb_ext_cap_descriptor { /* Link Power Management */ #define USB_BESL_SUPPORT (1 << 2) /* supports BESL */ #define USB_BESL_BASELINE_VALID (1 << 3) /* Baseline BESL valid*/ #define USB_BESL_DEEP_VALID (1 << 4) /* Deep BESL valid */ +#define USB_SET_BESL_BASELINE(p) (((p) & 0xf) << 8) +#define USB_SET_BESL_DEEP(p) (((p) & 0xf) << 12) #define USB_GET_BESL_BASELINE(p) (((p) & (0xf << 8)) >> 8) #define USB_GET_BESL_DEEP(p) (((p) & (0xf << 12)) >> 12) } __attribute__((packed)); diff --git a/include/uapi/linux/usb/g_uvc.h b/include/uapi/linux/usb/g_uvc.h index 3c9ee3020cbb..652f169a019e 100644 --- a/include/uapi/linux/usb/g_uvc.h +++ b/include/uapi/linux/usb/g_uvc.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0+ */ +/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */ /* * g_uvc.h -- USB Video Class Gadget driver API * diff --git a/include/uapi/linux/usbdevice_fs.h b/include/uapi/linux/usbdevice_fs.h index 964e87217be4..cf525cddeb94 100644 --- a/include/uapi/linux/usbdevice_fs.h +++ b/include/uapi/linux/usbdevice_fs.h @@ -76,6 +76,26 @@ struct usbdevfs_connectinfo { unsigned char slow; }; +struct usbdevfs_conninfo_ex { + __u32 size; /* Size of the structure from the kernel's */ + /* point of view. Can be used by userspace */ + /* to determine how much data can be */ + /* used/trusted. */ + __u32 busnum; /* USB bus number, as enumerated by the */ + /* kernel, the device is connected to. */ + __u32 devnum; /* Device address on the bus. */ + __u32 speed; /* USB_SPEED_* constants from ch9.h */ + __u8 num_ports; /* Number of ports the device is connected */ + /* to on the way to the root hub. It may */ + /* be bigger than size of 'ports' array so */ + /* userspace can detect overflows. */ + __u8 ports[7]; /* List of ports on the way from the root */ + /* hub to the device. Current limit in */ + /* USB specification is 7 tiers (root hub, */ + /* 5 intermediate hubs, device), which */ + /* gives at most 6 port entries. */ +}; + #define USBDEVFS_URB_SHORT_NOT_OK 0x01 #define USBDEVFS_URB_ISO_ASAP 0x02 #define USBDEVFS_URB_BULK_CONTINUATION 0x04 @@ -137,6 +157,8 @@ struct usbdevfs_hub_portinfo { #define USBDEVFS_CAP_REAP_AFTER_DISCONNECT 0x10 #define USBDEVFS_CAP_MMAP 0x20 #define USBDEVFS_CAP_DROP_PRIVILEGES 0x40 +#define USBDEVFS_CAP_CONNINFO_EX 0x80 +#define USBDEVFS_CAP_SUSPEND 0x100 /* USBDEVFS_DISCONNECT_CLAIM flags & struct */ @@ -197,5 +219,13 @@ struct usbdevfs_streams { #define USBDEVFS_FREE_STREAMS _IOR('U', 29, struct usbdevfs_streams) #define USBDEVFS_DROP_PRIVILEGES _IOW('U', 30, __u32) #define USBDEVFS_GET_SPEED _IO('U', 31) +/* + * Returns struct usbdevfs_conninfo_ex; length is variable to allow + * extending size of the data returned. + */ +#define USBDEVFS_CONNINFO_EX(len) _IOC(_IOC_READ, 'U', 32, len) +#define USBDEVFS_FORBID_SUSPEND _IO('U', 33) +#define USBDEVFS_ALLOW_SUSPEND _IO('U', 34) +#define USBDEVFS_WAIT_FOR_RESUME _IO('U', 35) #endif /* _UAPI_LINUX_USBDEVICE_FS_H */ diff --git a/include/uapi/linux/v4l2-controls.h b/include/uapi/linux/v4l2-controls.h index 37807f23231e..a2669b79b294 100644 --- a/include/uapi/linux/v4l2-controls.h +++ b/include/uapi/linux/v4l2-controls.h @@ -392,8 +392,13 @@ enum v4l2_mpeg_video_header_mode { #define V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MODE (V4L2_CID_MPEG_BASE+221) enum v4l2_mpeg_video_multi_slice_mode { V4L2_MPEG_VIDEO_MULTI_SLICE_MODE_SINGLE = 0, + V4L2_MPEG_VIDEO_MULTI_SLICE_MODE_MAX_MB = 1, + V4L2_MPEG_VIDEO_MULTI_SLICE_MODE_MAX_BYTES = 2, +#ifndef __KERNEL__ + /* Kept for backwards compatibility reasons. Stupid typo... */ V4L2_MPEG_VIDEO_MULTI_SICE_MODE_MAX_MB = 1, V4L2_MPEG_VIDEO_MULTI_SICE_MODE_MAX_BYTES = 2, +#endif }; #define V4L2_CID_MPEG_VIDEO_VBV_SIZE (V4L2_CID_MPEG_BASE+222) #define V4L2_CID_MPEG_VIDEO_DEC_PTS (V4L2_CID_MPEG_BASE+223) @@ -404,6 +409,24 @@ enum v4l2_mpeg_video_multi_slice_mode { #define V4L2_CID_MPEG_VIDEO_MV_V_SEARCH_RANGE (V4L2_CID_MPEG_BASE+228) #define V4L2_CID_MPEG_VIDEO_FORCE_KEY_FRAME (V4L2_CID_MPEG_BASE+229) +/* CIDs for the MPEG-2 Part 2 (H.262) codec */ +#define V4L2_CID_MPEG_VIDEO_MPEG2_LEVEL (V4L2_CID_MPEG_BASE+270) +enum v4l2_mpeg_video_mpeg2_level { + V4L2_MPEG_VIDEO_MPEG2_LEVEL_LOW = 0, + V4L2_MPEG_VIDEO_MPEG2_LEVEL_MAIN = 1, + V4L2_MPEG_VIDEO_MPEG2_LEVEL_HIGH_1440 = 2, + V4L2_MPEG_VIDEO_MPEG2_LEVEL_HIGH = 3, +}; +#define V4L2_CID_MPEG_VIDEO_MPEG2_PROFILE (V4L2_CID_MPEG_BASE+271) +enum v4l2_mpeg_video_mpeg2_profile { + V4L2_MPEG_VIDEO_MPEG2_PROFILE_SIMPLE = 0, + V4L2_MPEG_VIDEO_MPEG2_PROFILE_MAIN = 1, + V4L2_MPEG_VIDEO_MPEG2_PROFILE_SNR_SCALABLE = 2, + V4L2_MPEG_VIDEO_MPEG2_PROFILE_SPATIALLY_SCALABLE = 3, + V4L2_MPEG_VIDEO_MPEG2_PROFILE_HIGH = 4, + V4L2_MPEG_VIDEO_MPEG2_PROFILE_MULTIVIEW = 5, +}; + /* CIDs for the FWHT codec as used by the vicodec driver. */ #define V4L2_CID_FWHT_I_FRAME_QP (V4L2_CID_MPEG_BASE + 290) #define V4L2_CID_FWHT_P_FRAME_QP (V4L2_CID_MPEG_BASE + 291) diff --git a/include/uapi/linux/vbox_vmmdev_types.h b/include/uapi/linux/vbox_vmmdev_types.h index 26f39816af14..c27289fd619a 100644 --- a/include/uapi/linux/vbox_vmmdev_types.h +++ b/include/uapi/linux/vbox_vmmdev_types.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: (GPL-2.0 OR CDDL-1.0) */ +/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR CDDL-1.0) */ /* * Virtual Device for Guest <-> VMM/Host communication, type definitions * which are also used for the vboxguest ioctl interface / by vboxsf diff --git a/include/uapi/linux/vboxguest.h b/include/uapi/linux/vboxguest.h index 612f0c7d3558..9cec58a6a5ea 100644 --- a/include/uapi/linux/vboxguest.h +++ b/include/uapi/linux/vboxguest.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: (GPL-2.0 OR CDDL-1.0) */ +/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR CDDL-1.0) */ /* * VBoxGuest - VirtualBox Guest Additions Driver Interface. * diff --git a/include/uapi/linux/vfio.h b/include/uapi/linux/vfio.h index 8f10748dac79..9e843a147ead 100644 --- a/include/uapi/linux/vfio.h +++ b/include/uapi/linux/vfio.h @@ -295,15 +295,38 @@ struct vfio_region_info_cap_type { __u32 subtype; /* type specific */ }; +/* + * List of region types, global per bus driver. + * If you introduce a new type, please add it here. + */ + +/* PCI region type containing a PCI vendor part */ #define VFIO_REGION_TYPE_PCI_VENDOR_TYPE (1 << 31) #define VFIO_REGION_TYPE_PCI_VENDOR_MASK (0xffff) +#define VFIO_REGION_TYPE_GFX (1) +#define VFIO_REGION_TYPE_CCW (2) + +/* sub-types for VFIO_REGION_TYPE_PCI_* */ -/* 8086 Vendor sub-types */ +/* 8086 vendor PCI sub-types */ #define VFIO_REGION_SUBTYPE_INTEL_IGD_OPREGION (1) #define VFIO_REGION_SUBTYPE_INTEL_IGD_HOST_CFG (2) #define VFIO_REGION_SUBTYPE_INTEL_IGD_LPC_CFG (3) -#define VFIO_REGION_TYPE_GFX (1) +/* 10de vendor PCI sub-types */ +/* + * NVIDIA GPU NVlink2 RAM is coherent RAM mapped onto the host address space. + */ +#define VFIO_REGION_SUBTYPE_NVIDIA_NVLINK2_RAM (1) + +/* 1014 vendor PCI sub-types */ +/* + * IBM NPU NVlink2 ATSD (Address Translation Shootdown) register of NPU + * to do TLB invalidation on a GPU. + */ +#define VFIO_REGION_SUBTYPE_IBM_NVLINK2_ATSD (1) + +/* sub-types for VFIO_REGION_TYPE_GFX */ #define VFIO_REGION_SUBTYPE_GFX_EDID (1) /** @@ -353,26 +376,10 @@ struct vfio_region_gfx_edid { #define VFIO_DEVICE_GFX_LINK_STATE_DOWN 2 }; -#define VFIO_REGION_TYPE_CCW (2) -/* ccw sub-types */ +/* sub-types for VFIO_REGION_TYPE_CCW */ #define VFIO_REGION_SUBTYPE_CCW_ASYNC_CMD (1) /* - * 10de vendor sub-type - * - * NVIDIA GPU NVlink2 RAM is coherent RAM mapped onto the host address space. - */ -#define VFIO_REGION_SUBTYPE_NVIDIA_NVLINK2_RAM (1) - -/* - * 1014 vendor sub-type - * - * IBM NPU NVlink2 ATSD (Address Translation Shootdown) register of NPU - * to do TLB invalidation on a GPU. - */ -#define VFIO_REGION_SUBTYPE_IBM_NVLINK2_ATSD (1) - -/* * The MSIX mappable capability informs that MSIX data of a BAR can be mmapped * which allows direct access to non-MSIX registers which happened to be within * the same system page. @@ -714,7 +721,31 @@ struct vfio_iommu_type1_info { __u32 argsz; __u32 flags; #define VFIO_IOMMU_INFO_PGSIZES (1 << 0) /* supported page sizes info */ - __u64 iova_pgsizes; /* Bitmap of supported page sizes */ +#define VFIO_IOMMU_INFO_CAPS (1 << 1) /* Info supports caps */ + __u64 iova_pgsizes; /* Bitmap of supported page sizes */ + __u32 cap_offset; /* Offset within info struct of first cap */ +}; + +/* + * The IOVA capability allows to report the valid IOVA range(s) + * excluding any non-relaxable reserved regions exposed by + * devices attached to the container. Any DMA map attempt + * outside the valid iova range will return error. + * + * The structures below define version 1 of this capability. + */ +#define VFIO_IOMMU_TYPE1_INFO_CAP_IOVA_RANGE 1 + +struct vfio_iova_range { + __u64 start; + __u64 end; +}; + +struct vfio_iommu_type1_info_cap_iova_range { + struct vfio_info_cap_header header; + __u32 nr_iovas; + __u32 reserved; + struct vfio_iova_range iova_ranges[]; }; #define VFIO_IOMMU_GET_INFO _IO(VFIO_TYPE, VFIO_BASE + 12) diff --git a/include/uapi/linux/videodev2.h b/include/uapi/linux/videodev2.h index 1050a75fb7ef..530638dffd93 100644 --- a/include/uapi/linux/videodev2.h +++ b/include/uapi/linux/videodev2.h @@ -80,7 +80,7 @@ /* Four-character-code (FOURCC) */ #define v4l2_fourcc(a, b, c, d)\ ((__u32)(a) | ((__u32)(b) << 8) | ((__u32)(c) << 16) | ((__u32)(d) << 24)) -#define v4l2_fourcc_be(a, b, c, d) (v4l2_fourcc(a, b, c, d) | (1 << 31)) +#define v4l2_fourcc_be(a, b, c, d) (v4l2_fourcc(a, b, c, d) | (1U << 31)) /* * E N U M S @@ -518,7 +518,13 @@ struct v4l2_pix_format { #define V4L2_PIX_FMT_RGBX444 v4l2_fourcc('R', 'X', '1', '2') /* 16 rrrrgggg bbbbxxxx */ #define V4L2_PIX_FMT_ABGR444 v4l2_fourcc('A', 'B', '1', '2') /* 16 aaaabbbb ggggrrrr */ #define V4L2_PIX_FMT_XBGR444 v4l2_fourcc('X', 'B', '1', '2') /* 16 xxxxbbbb ggggrrrr */ -#define V4L2_PIX_FMT_BGRA444 v4l2_fourcc('B', 'A', '1', '2') /* 16 bbbbgggg rrrraaaa */ + +/* + * Originally this had 'BA12' as fourcc, but this clashed with the older + * V4L2_PIX_FMT_SGRBG12 which inexplicably used that same fourcc. + * So use 'GA12' instead for V4L2_PIX_FMT_BGRA444. + */ +#define V4L2_PIX_FMT_BGRA444 v4l2_fourcc('G', 'A', '1', '2') /* 16 bbbbgggg rrrraaaa */ #define V4L2_PIX_FMT_BGRX444 v4l2_fourcc('B', 'X', '1', '2') /* 16 bbbbgggg rrrrxxxx */ #define V4L2_PIX_FMT_RGB555 v4l2_fourcc('R', 'G', 'B', 'O') /* 16 RGB-5-5-5 */ #define V4L2_PIX_FMT_ARGB555 v4l2_fourcc('A', 'R', '1', '5') /* 16 ARGB-1-5-5-5 */ @@ -768,8 +774,10 @@ struct v4l2_fmtdesc { __u32 reserved[4]; }; -#define V4L2_FMT_FLAG_COMPRESSED 0x0001 -#define V4L2_FMT_FLAG_EMULATED 0x0002 +#define V4L2_FMT_FLAG_COMPRESSED 0x0001 +#define V4L2_FMT_FLAG_EMULATED 0x0002 +#define V4L2_FMT_FLAG_CONTINUOUS_BYTESTREAM 0x0004 +#define V4L2_FMT_FLAG_DYN_RESOLUTION 0x0008 /* Frame Size and frame rate enumeration */ /* diff --git a/include/uapi/linux/virtio_fs.h b/include/uapi/linux/virtio_fs.h new file mode 100644 index 000000000000..b02eb2ac3d99 --- /dev/null +++ b/include/uapi/linux/virtio_fs.h @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */ + +#ifndef _UAPI_LINUX_VIRTIO_FS_H +#define _UAPI_LINUX_VIRTIO_FS_H + +#include <linux/types.h> +#include <linux/virtio_ids.h> +#include <linux/virtio_config.h> +#include <linux/virtio_types.h> + +struct virtio_fs_config { + /* Filesystem name (UTF-8, not NUL-terminated, padded with NULs) */ + __u8 tag[36]; + + /* Number of request queues */ + __u32 num_request_queues; +} __attribute__((packed)); + +#endif /* _UAPI_LINUX_VIRTIO_FS_H */ diff --git a/include/uapi/linux/virtio_ids.h b/include/uapi/linux/virtio_ids.h index 6d5c3b2d4f4d..585e07b27333 100644 --- a/include/uapi/linux/virtio_ids.h +++ b/include/uapi/linux/virtio_ids.h @@ -43,5 +43,8 @@ #define VIRTIO_ID_INPUT 18 /* virtio input */ #define VIRTIO_ID_VSOCK 19 /* virtio vsock transport */ #define VIRTIO_ID_CRYPTO 20 /* virtio crypto */ +#define VIRTIO_ID_IOMMU 23 /* virtio IOMMU */ +#define VIRTIO_ID_FS 26 /* virtio filesystem */ +#define VIRTIO_ID_PMEM 27 /* virtio pmem */ #endif /* _LINUX_VIRTIO_IDS_H */ diff --git a/include/uapi/linux/virtio_iommu.h b/include/uapi/linux/virtio_iommu.h new file mode 100644 index 000000000000..237e36a280cb --- /dev/null +++ b/include/uapi/linux/virtio_iommu.h @@ -0,0 +1,165 @@ +/* SPDX-License-Identifier: BSD-3-Clause */ +/* + * Virtio-iommu definition v0.12 + * + * Copyright (C) 2019 Arm Ltd. + */ +#ifndef _UAPI_LINUX_VIRTIO_IOMMU_H +#define _UAPI_LINUX_VIRTIO_IOMMU_H + +#include <linux/types.h> + +/* Feature bits */ +#define VIRTIO_IOMMU_F_INPUT_RANGE 0 +#define VIRTIO_IOMMU_F_DOMAIN_RANGE 1 +#define VIRTIO_IOMMU_F_MAP_UNMAP 2 +#define VIRTIO_IOMMU_F_BYPASS 3 +#define VIRTIO_IOMMU_F_PROBE 4 +#define VIRTIO_IOMMU_F_MMIO 5 + +struct virtio_iommu_range_64 { + __le64 start; + __le64 end; +}; + +struct virtio_iommu_range_32 { + __le32 start; + __le32 end; +}; + +struct virtio_iommu_config { + /* Supported page sizes */ + __le64 page_size_mask; + /* Supported IOVA range */ + struct virtio_iommu_range_64 input_range; + /* Max domain ID size */ + struct virtio_iommu_range_32 domain_range; + /* Probe buffer size */ + __le32 probe_size; +}; + +/* Request types */ +#define VIRTIO_IOMMU_T_ATTACH 0x01 +#define VIRTIO_IOMMU_T_DETACH 0x02 +#define VIRTIO_IOMMU_T_MAP 0x03 +#define VIRTIO_IOMMU_T_UNMAP 0x04 +#define VIRTIO_IOMMU_T_PROBE 0x05 + +/* Status types */ +#define VIRTIO_IOMMU_S_OK 0x00 +#define VIRTIO_IOMMU_S_IOERR 0x01 +#define VIRTIO_IOMMU_S_UNSUPP 0x02 +#define VIRTIO_IOMMU_S_DEVERR 0x03 +#define VIRTIO_IOMMU_S_INVAL 0x04 +#define VIRTIO_IOMMU_S_RANGE 0x05 +#define VIRTIO_IOMMU_S_NOENT 0x06 +#define VIRTIO_IOMMU_S_FAULT 0x07 +#define VIRTIO_IOMMU_S_NOMEM 0x08 + +struct virtio_iommu_req_head { + __u8 type; + __u8 reserved[3]; +}; + +struct virtio_iommu_req_tail { + __u8 status; + __u8 reserved[3]; +}; + +struct virtio_iommu_req_attach { + struct virtio_iommu_req_head head; + __le32 domain; + __le32 endpoint; + __u8 reserved[8]; + struct virtio_iommu_req_tail tail; +}; + +struct virtio_iommu_req_detach { + struct virtio_iommu_req_head head; + __le32 domain; + __le32 endpoint; + __u8 reserved[8]; + struct virtio_iommu_req_tail tail; +}; + +#define VIRTIO_IOMMU_MAP_F_READ (1 << 0) +#define VIRTIO_IOMMU_MAP_F_WRITE (1 << 1) +#define VIRTIO_IOMMU_MAP_F_MMIO (1 << 2) + +#define VIRTIO_IOMMU_MAP_F_MASK (VIRTIO_IOMMU_MAP_F_READ | \ + VIRTIO_IOMMU_MAP_F_WRITE | \ + VIRTIO_IOMMU_MAP_F_MMIO) + +struct virtio_iommu_req_map { + struct virtio_iommu_req_head head; + __le32 domain; + __le64 virt_start; + __le64 virt_end; + __le64 phys_start; + __le32 flags; + struct virtio_iommu_req_tail tail; +}; + +struct virtio_iommu_req_unmap { + struct virtio_iommu_req_head head; + __le32 domain; + __le64 virt_start; + __le64 virt_end; + __u8 reserved[4]; + struct virtio_iommu_req_tail tail; +}; + +#define VIRTIO_IOMMU_PROBE_T_NONE 0 +#define VIRTIO_IOMMU_PROBE_T_RESV_MEM 1 + +#define VIRTIO_IOMMU_PROBE_T_MASK 0xfff + +struct virtio_iommu_probe_property { + __le16 type; + __le16 length; +}; + +#define VIRTIO_IOMMU_RESV_MEM_T_RESERVED 0 +#define VIRTIO_IOMMU_RESV_MEM_T_MSI 1 + +struct virtio_iommu_probe_resv_mem { + struct virtio_iommu_probe_property head; + __u8 subtype; + __u8 reserved[3]; + __le64 start; + __le64 end; +}; + +struct virtio_iommu_req_probe { + struct virtio_iommu_req_head head; + __le32 endpoint; + __u8 reserved[64]; + + __u8 properties[]; + + /* + * Tail follows the variable-length properties array. No padding, + * property lengths are all aligned on 8 bytes. + */ +}; + +/* Fault types */ +#define VIRTIO_IOMMU_FAULT_R_UNKNOWN 0 +#define VIRTIO_IOMMU_FAULT_R_DOMAIN 1 +#define VIRTIO_IOMMU_FAULT_R_MAPPING 2 + +#define VIRTIO_IOMMU_FAULT_F_READ (1 << 0) +#define VIRTIO_IOMMU_FAULT_F_WRITE (1 << 1) +#define VIRTIO_IOMMU_FAULT_F_EXEC (1 << 2) +#define VIRTIO_IOMMU_FAULT_F_ADDRESS (1 << 8) + +struct virtio_iommu_fault { + __u8 reason; + __u8 reserved[3]; + __le32 flags; + __le32 endpoint; + __u8 reserved2[4]; + __le64 address; +}; + +#endif diff --git a/include/uapi/linux/virtio_pmem.h b/include/uapi/linux/virtio_pmem.h new file mode 100644 index 000000000000..b022787ffb94 --- /dev/null +++ b/include/uapi/linux/virtio_pmem.h @@ -0,0 +1,34 @@ +/* SPDX-License-Identifier: (GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause */ +/* + * Definitions for virtio-pmem devices. + * + * Copyright (C) 2019 Red Hat, Inc. + * + * Author(s): Pankaj Gupta <pagupta@redhat.com> + */ + +#ifndef _UAPI_LINUX_VIRTIO_PMEM_H +#define _UAPI_LINUX_VIRTIO_PMEM_H + +#include <linux/types.h> +#include <linux/virtio_ids.h> +#include <linux/virtio_config.h> + +struct virtio_pmem_config { + __u64 start; + __u64 size; +}; + +#define VIRTIO_PMEM_REQ_TYPE_FLUSH 0 + +struct virtio_pmem_resp { + /* Host return status corresponding to flush request */ + __le32 ret; +}; + +struct virtio_pmem_req { + /* command type */ + __le32 type; +}; + +#endif diff --git a/include/uapi/linux/vmcore.h b/include/uapi/linux/vmcore.h index 022619668e0e..3e9da91866ff 100644 --- a/include/uapi/linux/vmcore.h +++ b/include/uapi/linux/vmcore.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 */ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ #ifndef _UAPI_VMCORE_H #define _UAPI_VMCORE_H diff --git a/include/uapi/linux/wait.h b/include/uapi/linux/wait.h index ac49a220cf2a..85b809fc9f11 100644 --- a/include/uapi/linux/wait.h +++ b/include/uapi/linux/wait.h @@ -17,6 +17,7 @@ #define P_ALL 0 #define P_PID 1 #define P_PGID 2 +#define P_PIDFD 3 #endif /* _UAPI_LINUX_WAIT_H */ diff --git a/include/uapi/linux/wanrouter.h b/include/uapi/linux/wanrouter.h deleted file mode 100644 index 2f1216d00caa..000000000000 --- a/include/uapi/linux/wanrouter.h +++ /dev/null @@ -1,18 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ -/* - * wanrouter.h Legacy declarations kept around until X25 is removed - */ - -#ifndef _UAPI_ROUTER_H -#define _UAPI_ROUTER_H - -/* 'state' defines */ -enum wan_states -{ - WAN_UNCONFIGURED, /* link/channel is not configured */ - WAN_DISCONNECTED, /* link/channel is disconnected */ - WAN_CONNECTING, /* connection is in progress */ - WAN_CONNECTED /* link/channel is operational */ -}; - -#endif /* _UAPI_ROUTER_H */ diff --git a/include/uapi/linux/wmi.h b/include/uapi/linux/wmi.h index c36f2d7675a4..7085c5dca9fa 100644 --- a/include/uapi/linux/wmi.h +++ b/include/uapi/linux/wmi.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ +/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */ /* * User API methods for ACPI-WMI mapping driver * diff --git a/include/uapi/misc/fastrpc.h b/include/uapi/misc/fastrpc.h index 6d701af9fc42..fb792e882cef 100644 --- a/include/uapi/misc/fastrpc.h +++ b/include/uapi/misc/fastrpc.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 */ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ #ifndef __QCOM_FASTRPC_H__ #define __QCOM_FASTRPC_H__ diff --git a/include/uapi/misc/habanalabs.h b/include/uapi/misc/habanalabs.h index 204ab9b4ae67..39c4ea51a719 100644 --- a/include/uapi/misc/habanalabs.h +++ b/include/uapi/misc/habanalabs.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note * - * Copyright 2016-2018 HabanaLabs, Ltd. + * Copyright 2016-2019 HabanaLabs, Ltd. * All Rights Reserved. * */ @@ -28,35 +28,81 @@ enum goya_queue_id { GOYA_QUEUE_ID_DMA_0 = 0, - GOYA_QUEUE_ID_DMA_1, - GOYA_QUEUE_ID_DMA_2, - GOYA_QUEUE_ID_DMA_3, - GOYA_QUEUE_ID_DMA_4, - GOYA_QUEUE_ID_CPU_PQ, - GOYA_QUEUE_ID_MME, /* Internal queues start here */ - GOYA_QUEUE_ID_TPC0, - GOYA_QUEUE_ID_TPC1, - GOYA_QUEUE_ID_TPC2, - GOYA_QUEUE_ID_TPC3, - GOYA_QUEUE_ID_TPC4, - GOYA_QUEUE_ID_TPC5, - GOYA_QUEUE_ID_TPC6, - GOYA_QUEUE_ID_TPC7, + GOYA_QUEUE_ID_DMA_1 = 1, + GOYA_QUEUE_ID_DMA_2 = 2, + GOYA_QUEUE_ID_DMA_3 = 3, + GOYA_QUEUE_ID_DMA_4 = 4, + GOYA_QUEUE_ID_CPU_PQ = 5, + GOYA_QUEUE_ID_MME = 6, /* Internal queues start here */ + GOYA_QUEUE_ID_TPC0 = 7, + GOYA_QUEUE_ID_TPC1 = 8, + GOYA_QUEUE_ID_TPC2 = 9, + GOYA_QUEUE_ID_TPC3 = 10, + GOYA_QUEUE_ID_TPC4 = 11, + GOYA_QUEUE_ID_TPC5 = 12, + GOYA_QUEUE_ID_TPC6 = 13, + GOYA_QUEUE_ID_TPC7 = 14, GOYA_QUEUE_ID_SIZE }; +/* + * Engine Numbering + * + * Used in the "busy_engines_mask" field in `struct hl_info_hw_idle' + */ + +enum goya_engine_id { + GOYA_ENGINE_ID_DMA_0 = 0, + GOYA_ENGINE_ID_DMA_1, + GOYA_ENGINE_ID_DMA_2, + GOYA_ENGINE_ID_DMA_3, + GOYA_ENGINE_ID_DMA_4, + GOYA_ENGINE_ID_MME_0, + GOYA_ENGINE_ID_TPC_0, + GOYA_ENGINE_ID_TPC_1, + GOYA_ENGINE_ID_TPC_2, + GOYA_ENGINE_ID_TPC_3, + GOYA_ENGINE_ID_TPC_4, + GOYA_ENGINE_ID_TPC_5, + GOYA_ENGINE_ID_TPC_6, + GOYA_ENGINE_ID_TPC_7, + GOYA_ENGINE_ID_SIZE +}; + enum hl_device_status { HL_DEVICE_STATUS_OPERATIONAL, HL_DEVICE_STATUS_IN_RESET, HL_DEVICE_STATUS_MALFUNCTION }; -/* Opcode for management ioctl */ -#define HL_INFO_HW_IP_INFO 0 -#define HL_INFO_HW_EVENTS 1 -#define HL_INFO_DRAM_USAGE 2 -#define HL_INFO_HW_IDLE 3 -#define HL_INFO_DEVICE_STATUS 4 +/* Opcode for management ioctl + * + * HW_IP_INFO - Receive information about different IP blocks in the + * device. + * HL_INFO_HW_EVENTS - Receive an array describing how many times each event + * occurred since the last hard reset. + * HL_INFO_DRAM_USAGE - Retrieve the dram usage inside the device and of the + * specific context. This is relevant only for devices + * where the dram is managed by the kernel driver + * HL_INFO_HW_IDLE - Retrieve information about the idle status of each + * internal engine. + * HL_INFO_DEVICE_STATUS - Retrieve the device's status. This opcode doesn't + * require an open context. + * HL_INFO_DEVICE_UTILIZATION - Retrieve the total utilization of the device + * over the last period specified by the user. + * The period can be between 100ms to 1s, in + * resolution of 100ms. The return value is a + * percentage of the utilization rate. + * HL_INFO_HW_EVENTS_AGGREGATE - Receive an array describing how many times each + * event occurred since the driver was loaded. + */ +#define HL_INFO_HW_IP_INFO 0 +#define HL_INFO_HW_EVENTS 1 +#define HL_INFO_DRAM_USAGE 2 +#define HL_INFO_HW_IDLE 3 +#define HL_INFO_DEVICE_STATUS 4 +#define HL_INFO_DEVICE_UTILIZATION 6 +#define HL_INFO_HW_EVENTS_AGGREGATE 7 #define HL_INFO_VERSION_MAX_LEN 128 @@ -86,7 +132,11 @@ struct hl_info_dram_usage { struct hl_info_hw_idle { __u32 is_idle; - __u32 pad; + /* + * Bitmask of busy engines. + * Bits definition is according to `enum <chip>_enging_id'. + */ + __u32 busy_engines_mask; }; struct hl_info_device_status { @@ -94,6 +144,11 @@ struct hl_info_device_status { __u32 pad; }; +struct hl_info_device_utilization { + __u32 utilization; + __u32 pad; +}; + struct hl_info_args { /* Location of relevant struct in userspace */ __u64 return_pointer; @@ -109,8 +164,15 @@ struct hl_info_args { /* HL_INFO_* */ __u32 op; - /* Context ID - Currently not in use */ - __u32 ctx_id; + union { + /* Context ID - Currently not in use */ + __u32 ctx_id; + /* Period value for utilization rate (100ms - 1000ms, in 100ms + * resolution. + */ + __u32 period_ms; + }; + __u32 pad; }; @@ -267,12 +329,12 @@ struct hl_mem_in { struct { /* * Requested virtual address of mapped memory. - * KMD will try to map the requested region to this - * hint address, as long as the address is valid and - * not already mapped. The user should check the + * The driver will try to map the requested region to + * this hint address, as long as the address is valid + * and not already mapped. The user should check the * returned address of the IOCTL to make sure he got - * the hint address. Passing 0 here means that KMD - * will choose the address itself. + * the hint address. Passing 0 here means that the + * driver will choose the address itself. */ __u64 hint_addr; /* Handle returned from HL_MEM_OP_ALLOC */ @@ -285,12 +347,12 @@ struct hl_mem_in { __u64 host_virt_addr; /* * Requested virtual address of mapped memory. - * KMD will try to map the requested region to this - * hint address, as long as the address is valid and - * not already mapped. The user should check the + * The driver will try to map the requested region to + * this hint address, as long as the address is valid + * and not already mapped. The user should check the * returned address of the IOCTL to make sure he got - * the hint address. Passing 0 here means that KMD - * will choose the address itself. + * the hint address. Passing 0 here means that the + * driver will choose the address itself. */ __u64 hint_addr; /* Size of allocated host memory */ @@ -411,7 +473,7 @@ struct hl_debug_params_spmu { #define HL_DEBUG_OP_BMON 4 /* Opcode for SPMU component */ #define HL_DEBUG_OP_SPMU 5 -/* Opcode for timestamp */ +/* Opcode for timestamp (deprecated) */ #define HL_DEBUG_OP_TIMESTAMP 6 /* Opcode for setting the device into or out of debug mode. The enable * variable should be 1 for enabling debug mode and 0 for disabling it diff --git a/include/uapi/misc/ocxl.h b/include/uapi/misc/ocxl.h index 97937cfa3baa..6d29a60a896a 100644 --- a/include/uapi/misc/ocxl.h +++ b/include/uapi/misc/ocxl.h @@ -33,23 +33,23 @@ struct ocxl_ioctl_attach { }; struct ocxl_ioctl_metadata { - __u16 version; // struct version, always backwards compatible + __u16 version; /* struct version, always backwards compatible */ - // Version 0 fields + /* Version 0 fields */ __u8 afu_version_major; __u8 afu_version_minor; - __u32 pasid; // PASID assigned to the current context + __u32 pasid; /* PASID assigned to the current context */ - __u64 pp_mmio_size; // Per PASID MMIO size + __u64 pp_mmio_size; /* Per PASID MMIO size */ __u64 global_mmio_size; - // End version 0 fields + /* End version 0 fields */ - __u64 reserved[13]; // Total of 16*u64 + __u64 reserved[13]; /* Total of 16*u64 */ }; struct ocxl_ioctl_p9_wait { - __u16 thread_id; // The thread ID required to wake this thread + __u16 thread_id; /* The thread ID required to wake this thread */ __u16 reserved1; __u32 reserved2; __u64 reserved3[3]; diff --git a/include/uapi/misc/xilinx_sdfec.h b/include/uapi/misc/xilinx_sdfec.h new file mode 100644 index 000000000000..ee1a42ae6f46 --- /dev/null +++ b/include/uapi/misc/xilinx_sdfec.h @@ -0,0 +1,448 @@ +/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */ +/* + * Xilinx SD-FEC + * + * Copyright (C) 2019 Xilinx, Inc. + * + * Description: + * This driver is developed for SDFEC16 IP. It provides a char device + * in sysfs and supports file operations like open(), close() and ioctl(). + */ +#ifndef __XILINX_SDFEC_H__ +#define __XILINX_SDFEC_H__ + +#include <linux/types.h> + +/* Shared LDPC Tables */ +#define XSDFEC_LDPC_SC_TABLE_ADDR_BASE (0x10000) +#define XSDFEC_LDPC_SC_TABLE_ADDR_HIGH (0x10400) +#define XSDFEC_LDPC_LA_TABLE_ADDR_BASE (0x18000) +#define XSDFEC_LDPC_LA_TABLE_ADDR_HIGH (0x19000) +#define XSDFEC_LDPC_QC_TABLE_ADDR_BASE (0x20000) +#define XSDFEC_LDPC_QC_TABLE_ADDR_HIGH (0x28000) + +/* LDPC tables depth */ +#define XSDFEC_SC_TABLE_DEPTH \ + (XSDFEC_LDPC_SC_TABLE_ADDR_HIGH - XSDFEC_LDPC_SC_TABLE_ADDR_BASE) +#define XSDFEC_LA_TABLE_DEPTH \ + (XSDFEC_LDPC_LA_TABLE_ADDR_HIGH - XSDFEC_LDPC_LA_TABLE_ADDR_BASE) +#define XSDFEC_QC_TABLE_DEPTH \ + (XSDFEC_LDPC_QC_TABLE_ADDR_HIGH - XSDFEC_LDPC_QC_TABLE_ADDR_BASE) + +/** + * enum xsdfec_code - Code Type. + * @XSDFEC_TURBO_CODE: Driver is configured for Turbo mode. + * @XSDFEC_LDPC_CODE: Driver is configured for LDPC mode. + * + * This enum is used to indicate the mode of the driver. The mode is determined + * by checking which codes are set in the driver. Note that the mode cannot be + * changed by the driver. + */ +enum xsdfec_code { + XSDFEC_TURBO_CODE = 0, + XSDFEC_LDPC_CODE, +}; + +/** + * enum xsdfec_order - Order + * @XSDFEC_MAINTAIN_ORDER: Maintain order execution of blocks. + * @XSDFEC_OUT_OF_ORDER: Out-of-order execution of blocks. + * + * This enum is used to indicate whether the order of blocks can change from + * input to output. + */ +enum xsdfec_order { + XSDFEC_MAINTAIN_ORDER = 0, + XSDFEC_OUT_OF_ORDER, +}; + +/** + * enum xsdfec_turbo_alg - Turbo Algorithm Type. + * @XSDFEC_MAX_SCALE: Max Log-Map algorithm with extrinsic scaling. When + * scaling is set to this is equivalent to the Max Log-Map + * algorithm. + * @XSDFEC_MAX_STAR: Log-Map algorithm. + * @XSDFEC_TURBO_ALG_MAX: Used to indicate out of bound Turbo algorithms. + * + * This enum specifies which Turbo Decode algorithm is in use. + */ +enum xsdfec_turbo_alg { + XSDFEC_MAX_SCALE = 0, + XSDFEC_MAX_STAR, + XSDFEC_TURBO_ALG_MAX, +}; + +/** + * enum xsdfec_state - State. + * @XSDFEC_INIT: Driver is initialized. + * @XSDFEC_STARTED: Driver is started. + * @XSDFEC_STOPPED: Driver is stopped. + * @XSDFEC_NEEDS_RESET: Driver needs to be reset. + * @XSDFEC_PL_RECONFIGURE: Programmable Logic needs to be recofigured. + * + * This enum is used to indicate the state of the driver. + */ +enum xsdfec_state { + XSDFEC_INIT = 0, + XSDFEC_STARTED, + XSDFEC_STOPPED, + XSDFEC_NEEDS_RESET, + XSDFEC_PL_RECONFIGURE, +}; + +/** + * enum xsdfec_axis_width - AXIS_WIDTH.DIN Setting for 128-bit width. + * @XSDFEC_1x128b: DIN data input stream consists of a 128-bit lane + * @XSDFEC_2x128b: DIN data input stream consists of two 128-bit lanes + * @XSDFEC_4x128b: DIN data input stream consists of four 128-bit lanes + * + * This enum is used to indicate the AXIS_WIDTH.DIN setting for 128-bit width. + * The number of lanes of the DIN data input stream depends upon the + * AXIS_WIDTH.DIN parameter. + */ +enum xsdfec_axis_width { + XSDFEC_1x128b = 1, + XSDFEC_2x128b = 2, + XSDFEC_4x128b = 4, +}; + +/** + * enum xsdfec_axis_word_include - Words Configuration. + * @XSDFEC_FIXED_VALUE: Fixed, the DIN_WORDS AXI4-Stream interface is removed + * from the IP instance and is driven with the specified + * number of words. + * @XSDFEC_IN_BLOCK: In Block, configures the IP instance to expect a single + * DIN_WORDS value per input code block. The DIN_WORDS + * interface is present. + * @XSDFEC_PER_AXI_TRANSACTION: Per Transaction, configures the IP instance to + * expect one DIN_WORDS value per input transaction on the DIN interface. The + * DIN_WORDS interface is present. + * @XSDFEC_AXIS_WORDS_INCLUDE_MAX: Used to indicate out of bound Words + * Configurations. + * + * This enum is used to specify the DIN_WORDS configuration. + */ +enum xsdfec_axis_word_include { + XSDFEC_FIXED_VALUE = 0, + XSDFEC_IN_BLOCK, + XSDFEC_PER_AXI_TRANSACTION, + XSDFEC_AXIS_WORDS_INCLUDE_MAX, +}; + +/** + * struct xsdfec_turbo - User data for Turbo codes. + * @alg: Specifies which Turbo decode algorithm to use + * @scale: Specifies the extrinsic scaling to apply when the Max Scale algorithm + * has been selected + * + * Turbo code structure to communicate parameters to XSDFEC driver. + */ +struct xsdfec_turbo { + __u32 alg; + __u8 scale; +}; + +/** + * struct xsdfec_ldpc_params - User data for LDPC codes. + * @n: Number of code word bits + * @k: Number of information bits + * @psize: Size of sub-matrix + * @nlayers: Number of layers in code + * @nqc: Quasi Cyclic Number + * @nmqc: Number of M-sized QC operations in parity check matrix + * @nm: Number of M-size vectors in N + * @norm_type: Normalization required or not + * @no_packing: Determines if multiple QC ops should be performed + * @special_qc: Sub-Matrix property for Circulant weight > 0 + * @no_final_parity: Decide if final parity check needs to be performed + * @max_schedule: Experimental code word scheduling limit + * @sc_off: SC offset + * @la_off: LA offset + * @qc_off: QC offset + * @sc_table: Pointer to SC Table which must be page aligned + * @la_table: Pointer to LA Table which must be page aligned + * @qc_table: Pointer to QC Table which must be page aligned + * @code_id: LDPC Code + * + * This structure describes the LDPC code that is passed to the driver by the + * application. + */ +struct xsdfec_ldpc_params { + __u32 n; + __u32 k; + __u32 psize; + __u32 nlayers; + __u32 nqc; + __u32 nmqc; + __u32 nm; + __u32 norm_type; + __u32 no_packing; + __u32 special_qc; + __u32 no_final_parity; + __u32 max_schedule; + __u32 sc_off; + __u32 la_off; + __u32 qc_off; + __u32 *sc_table; + __u32 *la_table; + __u32 *qc_table; + __u16 code_id; +}; + +/** + * struct xsdfec_status - Status of SD-FEC core. + * @state: State of the SD-FEC core + * @activity: Describes if the SD-FEC instance is Active + */ +struct xsdfec_status { + __u32 state; + __s8 activity; +}; + +/** + * struct xsdfec_irq - Enabling or Disabling Interrupts. + * @enable_isr: If true enables the ISR + * @enable_ecc_isr: If true enables the ECC ISR + */ +struct xsdfec_irq { + __s8 enable_isr; + __s8 enable_ecc_isr; +}; + +/** + * struct xsdfec_config - Configuration of SD-FEC core. + * @code: The codes being used by the SD-FEC instance + * @order: Order of Operation + * @din_width: Width of the DIN AXI4-Stream + * @din_word_include: How DIN_WORDS are inputted + * @dout_width: Width of the DOUT AXI4-Stream + * @dout_word_include: HOW DOUT_WORDS are outputted + * @irq: Enabling or disabling interrupts + * @bypass: Is the core being bypassed + * @code_wr_protect: Is write protection of LDPC codes enabled + */ +struct xsdfec_config { + __u32 code; + __u32 order; + __u32 din_width; + __u32 din_word_include; + __u32 dout_width; + __u32 dout_word_include; + struct xsdfec_irq irq; + __s8 bypass; + __s8 code_wr_protect; +}; + +/** + * struct xsdfec_stats - Stats retrived by ioctl XSDFEC_GET_STATS. Used + * to buffer atomic_t variables from struct + * xsdfec_dev. Counts are accumulated until + * the user clears them. + * @isr_err_count: Count of ISR errors + * @cecc_count: Count of Correctable ECC errors (SBE) + * @uecc_count: Count of Uncorrectable ECC errors (MBE) + */ +struct xsdfec_stats { + __u32 isr_err_count; + __u32 cecc_count; + __u32 uecc_count; +}; + +/** + * struct xsdfec_ldpc_param_table_sizes - Used to store sizes of SD-FEC table + * entries for an individual LPDC code + * parameter. + * @sc_size: Size of SC table used + * @la_size: Size of LA table used + * @qc_size: Size of QC table used + */ +struct xsdfec_ldpc_param_table_sizes { + __u32 sc_size; + __u32 la_size; + __u32 qc_size; +}; + +/* + * XSDFEC IOCTL List + */ +#define XSDFEC_MAGIC 'f' +/** + * DOC: XSDFEC_START_DEV + * + * @Description + * + * ioctl to start SD-FEC core + * + * This fails if the XSDFEC_SET_ORDER ioctl has not been previously called + */ +#define XSDFEC_START_DEV _IO(XSDFEC_MAGIC, 0) +/** + * DOC: XSDFEC_STOP_DEV + * + * @Description + * + * ioctl to stop the SD-FEC core + */ +#define XSDFEC_STOP_DEV _IO(XSDFEC_MAGIC, 1) +/** + * DOC: XSDFEC_GET_STATUS + * + * @Description + * + * ioctl that returns status of SD-FEC core + */ +#define XSDFEC_GET_STATUS _IOR(XSDFEC_MAGIC, 2, struct xsdfec_status) +/** + * DOC: XSDFEC_SET_IRQ + * @Parameters + * + * @struct xsdfec_irq * + * Pointer to the &struct xsdfec_irq that contains the interrupt settings + * for the SD-FEC core + * + * @Description + * + * ioctl to enable or disable irq + */ +#define XSDFEC_SET_IRQ _IOW(XSDFEC_MAGIC, 3, struct xsdfec_irq) +/** + * DOC: XSDFEC_SET_TURBO + * @Parameters + * + * @struct xsdfec_turbo * + * Pointer to the &struct xsdfec_turbo that contains the Turbo decode + * settings for the SD-FEC core + * + * @Description + * + * ioctl that sets the SD-FEC Turbo parameter values + * + * This can only be used when the driver is in the XSDFEC_STOPPED state + */ +#define XSDFEC_SET_TURBO _IOW(XSDFEC_MAGIC, 4, struct xsdfec_turbo) +/** + * DOC: XSDFEC_ADD_LDPC_CODE_PARAMS + * @Parameters + * + * @struct xsdfec_ldpc_params * + * Pointer to the &struct xsdfec_ldpc_params that contains the LDPC code + * parameters to be added to the SD-FEC Block + * + * @Description + * ioctl to add an LDPC code to the SD-FEC LDPC codes + * + * This can only be used when: + * + * - Driver is in the XSDFEC_STOPPED state + * + * - SD-FEC core is configured as LPDC + * + * - SD-FEC Code Write Protection is disabled + */ +#define XSDFEC_ADD_LDPC_CODE_PARAMS \ + _IOW(XSDFEC_MAGIC, 5, struct xsdfec_ldpc_params) +/** + * DOC: XSDFEC_GET_CONFIG + * @Parameters + * + * @struct xsdfec_config * + * Pointer to the &struct xsdfec_config that contains the current + * configuration settings of the SD-FEC Block + * + * @Description + * + * ioctl that returns SD-FEC core configuration + */ +#define XSDFEC_GET_CONFIG _IOR(XSDFEC_MAGIC, 6, struct xsdfec_config) +/** + * DOC: XSDFEC_GET_TURBO + * @Parameters + * + * @struct xsdfec_turbo * + * Pointer to the &struct xsdfec_turbo that contains the current Turbo + * decode settings of the SD-FEC Block + * + * @Description + * + * ioctl that returns SD-FEC turbo param values + */ +#define XSDFEC_GET_TURBO _IOR(XSDFEC_MAGIC, 7, struct xsdfec_turbo) +/** + * DOC: XSDFEC_SET_ORDER + * @Parameters + * + * @struct unsigned long * + * Pointer to the unsigned long that contains a value from the + * @enum xsdfec_order + * + * @Description + * + * ioctl that sets order, if order of blocks can change from input to output + * + * This can only be used when the driver is in the XSDFEC_STOPPED state + */ +#define XSDFEC_SET_ORDER _IOW(XSDFEC_MAGIC, 8, unsigned long) +/** + * DOC: XSDFEC_SET_BYPASS + * @Parameters + * + * @struct bool * + * Pointer to bool that sets the bypass value, where false results in + * normal operation and false results in the SD-FEC performing the + * configured operations (same number of cycles) but output data matches + * the input data + * + * @Description + * + * ioctl that sets bypass. + * + * This can only be used when the driver is in the XSDFEC_STOPPED state + */ +#define XSDFEC_SET_BYPASS _IOW(XSDFEC_MAGIC, 9, bool) +/** + * DOC: XSDFEC_IS_ACTIVE + * @Parameters + * + * @struct bool * + * Pointer to bool that returns true if the SD-FEC is processing data + * + * @Description + * + * ioctl that determines if SD-FEC is processing data + */ +#define XSDFEC_IS_ACTIVE _IOR(XSDFEC_MAGIC, 10, bool) +/** + * DOC: XSDFEC_CLEAR_STATS + * + * @Description + * + * ioctl that clears error stats collected during interrupts + */ +#define XSDFEC_CLEAR_STATS _IO(XSDFEC_MAGIC, 11) +/** + * DOC: XSDFEC_GET_STATS + * @Parameters + * + * @struct xsdfec_stats * + * Pointer to the &struct xsdfec_stats that will contain the updated stats + * values + * + * @Description + * + * ioctl that returns SD-FEC core stats + * + * This can only be used when the driver is in the XSDFEC_STOPPED state + */ +#define XSDFEC_GET_STATS _IOR(XSDFEC_MAGIC, 12, struct xsdfec_stats) +/** + * DOC: XSDFEC_SET_DEFAULT_CONFIG + * + * @Description + * + * ioctl that returns SD-FEC core to default config, use after a reset + * + * This can only be used when the driver is in the XSDFEC_STOPPED state + */ +#define XSDFEC_SET_DEFAULT_CONFIG _IO(XSDFEC_MAGIC, 13) + +#endif /* __XILINX_SDFEC_H__ */ diff --git a/include/uapi/mtd/mtd-abi.h b/include/uapi/mtd/mtd-abi.h index aff5b5e59845..47ffe3208c27 100644 --- a/include/uapi/mtd/mtd-abi.h +++ b/include/uapi/mtd/mtd-abi.h @@ -113,11 +113,11 @@ struct mtd_write_req { #define MTD_CAP_NVRAM (MTD_WRITEABLE | MTD_BIT_WRITEABLE | MTD_NO_ERASE) /* Obsolete ECC byte placement modes (used with obsolete MEMGETOOBSEL) */ -#define MTD_NANDECC_OFF 0 // Switch off ECC (Not recommended) -#define MTD_NANDECC_PLACE 1 // Use the given placement in the structure (YAFFS1 legacy mode) -#define MTD_NANDECC_AUTOPLACE 2 // Use the default placement scheme -#define MTD_NANDECC_PLACEONLY 3 // Use the given placement in the structure (Do not store ecc result on read) -#define MTD_NANDECC_AUTOPL_USR 4 // Use the given autoplacement scheme rather than using the default +#define MTD_NANDECC_OFF 0 /* Switch off ECC (Not recommended) */ +#define MTD_NANDECC_PLACE 1 /* Use the given placement in the structure (YAFFS1 legacy mode) */ +#define MTD_NANDECC_AUTOPLACE 2 /* Use the default placement scheme */ +#define MTD_NANDECC_PLACEONLY 3 /* Use the given placement in the structure (Do not store ecc result on read) */ +#define MTD_NANDECC_AUTOPL_USR 4 /* Use the given autoplacement scheme rather than using the default */ /* OTP mode selection */ #define MTD_OTP_OFF 0 diff --git a/include/uapi/rdma/ib_user_cm.h b/include/uapi/rdma/ib_user_cm.h deleted file mode 100644 index e2709bb8cb18..000000000000 --- a/include/uapi/rdma/ib_user_cm.h +++ /dev/null @@ -1,326 +0,0 @@ -/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR Linux-OpenIB) */ -/* - * Copyright (c) 2005 Topspin Communications. All rights reserved. - * Copyright (c) 2005 Intel Corporation. All rights reserved. - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -#ifndef IB_USER_CM_H -#define IB_USER_CM_H - -#include <linux/types.h> -#include <rdma/ib_user_sa.h> - -#define IB_USER_CM_ABI_VERSION 5 - -enum { - IB_USER_CM_CMD_CREATE_ID, - IB_USER_CM_CMD_DESTROY_ID, - IB_USER_CM_CMD_ATTR_ID, - - IB_USER_CM_CMD_LISTEN, - IB_USER_CM_CMD_NOTIFY, - - IB_USER_CM_CMD_SEND_REQ, - IB_USER_CM_CMD_SEND_REP, - IB_USER_CM_CMD_SEND_RTU, - IB_USER_CM_CMD_SEND_DREQ, - IB_USER_CM_CMD_SEND_DREP, - IB_USER_CM_CMD_SEND_REJ, - IB_USER_CM_CMD_SEND_MRA, - IB_USER_CM_CMD_SEND_LAP, - IB_USER_CM_CMD_SEND_APR, - IB_USER_CM_CMD_SEND_SIDR_REQ, - IB_USER_CM_CMD_SEND_SIDR_REP, - - IB_USER_CM_CMD_EVENT, - IB_USER_CM_CMD_INIT_QP_ATTR, -}; -/* - * command ABI structures. - */ -struct ib_ucm_cmd_hdr { - __u32 cmd; - __u16 in; - __u16 out; -}; - -struct ib_ucm_create_id { - __aligned_u64 uid; - __aligned_u64 response; -}; - -struct ib_ucm_create_id_resp { - __u32 id; -}; - -struct ib_ucm_destroy_id { - __aligned_u64 response; - __u32 id; - __u32 reserved; -}; - -struct ib_ucm_destroy_id_resp { - __u32 events_reported; -}; - -struct ib_ucm_attr_id { - __aligned_u64 response; - __u32 id; - __u32 reserved; -}; - -struct ib_ucm_attr_id_resp { - __be64 service_id; - __be64 service_mask; - __be32 local_id; - __be32 remote_id; -}; - -struct ib_ucm_init_qp_attr { - __aligned_u64 response; - __u32 id; - __u32 qp_state; -}; - -struct ib_ucm_listen { - __be64 service_id; - __be64 service_mask; - __u32 id; - __u32 reserved; -}; - -struct ib_ucm_notify { - __u32 id; - __u32 event; -}; - -struct ib_ucm_private_data { - __aligned_u64 data; - __u32 id; - __u8 len; - __u8 reserved[3]; -}; - -struct ib_ucm_req { - __u32 id; - __u32 qpn; - __u32 qp_type; - __u32 psn; - __be64 sid; - __aligned_u64 data; - __aligned_u64 primary_path; - __aligned_u64 alternate_path; - __u8 len; - __u8 peer_to_peer; - __u8 responder_resources; - __u8 initiator_depth; - __u8 remote_cm_response_timeout; - __u8 flow_control; - __u8 local_cm_response_timeout; - __u8 retry_count; - __u8 rnr_retry_count; - __u8 max_cm_retries; - __u8 srq; - __u8 reserved[5]; -}; - -struct ib_ucm_rep { - __aligned_u64 uid; - __aligned_u64 data; - __u32 id; - __u32 qpn; - __u32 psn; - __u8 len; - __u8 responder_resources; - __u8 initiator_depth; - __u8 target_ack_delay; - __u8 failover_accepted; - __u8 flow_control; - __u8 rnr_retry_count; - __u8 srq; - __u8 reserved[4]; -}; - -struct ib_ucm_info { - __u32 id; - __u32 status; - __aligned_u64 info; - __aligned_u64 data; - __u8 info_len; - __u8 data_len; - __u8 reserved[6]; -}; - -struct ib_ucm_mra { - __aligned_u64 data; - __u32 id; - __u8 len; - __u8 timeout; - __u8 reserved[2]; -}; - -struct ib_ucm_lap { - __aligned_u64 path; - __aligned_u64 data; - __u32 id; - __u8 len; - __u8 reserved[3]; -}; - -struct ib_ucm_sidr_req { - __u32 id; - __u32 timeout; - __be64 sid; - __aligned_u64 data; - __aligned_u64 path; - __u16 reserved_pkey; - __u8 len; - __u8 max_cm_retries; - __u8 reserved[4]; -}; - -struct ib_ucm_sidr_rep { - __u32 id; - __u32 qpn; - __u32 qkey; - __u32 status; - __aligned_u64 info; - __aligned_u64 data; - __u8 info_len; - __u8 data_len; - __u8 reserved[6]; -}; -/* - * event notification ABI structures. - */ -struct ib_ucm_event_get { - __aligned_u64 response; - __aligned_u64 data; - __aligned_u64 info; - __u8 data_len; - __u8 info_len; - __u8 reserved[6]; -}; - -struct ib_ucm_req_event_resp { - struct ib_user_path_rec primary_path; - struct ib_user_path_rec alternate_path; - __be64 remote_ca_guid; - __u32 remote_qkey; - __u32 remote_qpn; - __u32 qp_type; - __u32 starting_psn; - __u8 responder_resources; - __u8 initiator_depth; - __u8 local_cm_response_timeout; - __u8 flow_control; - __u8 remote_cm_response_timeout; - __u8 retry_count; - __u8 rnr_retry_count; - __u8 srq; - __u8 port; - __u8 reserved[7]; -}; - -struct ib_ucm_rep_event_resp { - __be64 remote_ca_guid; - __u32 remote_qkey; - __u32 remote_qpn; - __u32 starting_psn; - __u8 responder_resources; - __u8 initiator_depth; - __u8 target_ack_delay; - __u8 failover_accepted; - __u8 flow_control; - __u8 rnr_retry_count; - __u8 srq; - __u8 reserved[5]; -}; - -struct ib_ucm_rej_event_resp { - __u32 reason; - /* ari in ib_ucm_event_get info field. */ -}; - -struct ib_ucm_mra_event_resp { - __u8 timeout; - __u8 reserved[3]; -}; - -struct ib_ucm_lap_event_resp { - struct ib_user_path_rec path; -}; - -struct ib_ucm_apr_event_resp { - __u32 status; - /* apr info in ib_ucm_event_get info field. */ -}; - -struct ib_ucm_sidr_req_event_resp { - __u16 pkey; - __u8 port; - __u8 reserved; -}; - -struct ib_ucm_sidr_rep_event_resp { - __u32 status; - __u32 qkey; - __u32 qpn; - /* info in ib_ucm_event_get info field. */ -}; - -#define IB_UCM_PRES_DATA 0x01 -#define IB_UCM_PRES_INFO 0x02 -#define IB_UCM_PRES_PRIMARY 0x04 -#define IB_UCM_PRES_ALTERNATE 0x08 - -struct ib_ucm_event_resp { - __aligned_u64 uid; - __u32 id; - __u32 event; - __u32 present; - __u32 reserved; - union { - struct ib_ucm_req_event_resp req_resp; - struct ib_ucm_rep_event_resp rep_resp; - struct ib_ucm_rej_event_resp rej_resp; - struct ib_ucm_mra_event_resp mra_resp; - struct ib_ucm_lap_event_resp lap_resp; - struct ib_ucm_apr_event_resp apr_resp; - - struct ib_ucm_sidr_req_event_resp sidr_req_resp; - struct ib_ucm_sidr_rep_event_resp sidr_rep_resp; - - __u32 send_status; - } u; -}; - -#endif /* IB_USER_CM_H */ diff --git a/include/uapi/rdma/mlx5_user_ioctl_cmds.h b/include/uapi/rdma/mlx5_user_ioctl_cmds.h index d404c951954c..d0da070cf0ab 100644 --- a/include/uapi/rdma/mlx5_user_ioctl_cmds.h +++ b/include/uapi/rdma/mlx5_user_ioctl_cmds.h @@ -51,6 +51,7 @@ enum mlx5_ib_devx_methods { MLX5_IB_METHOD_DEVX_OTHER = (1U << UVERBS_ID_NS_SHIFT), MLX5_IB_METHOD_DEVX_QUERY_UAR, MLX5_IB_METHOD_DEVX_QUERY_EQN, + MLX5_IB_METHOD_DEVX_SUBSCRIBE_EVENT, }; enum mlx5_ib_devx_other_attrs { @@ -93,6 +94,14 @@ enum mlx5_ib_devx_obj_query_async_attrs { MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_OUT_LEN, }; +enum mlx5_ib_devx_subscribe_event_attrs { + MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_FD_HANDLE = (1U << UVERBS_ID_NS_SHIFT), + MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_OBJ_HANDLE, + MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_TYPE_NUM_LIST, + MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_FD_NUM, + MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_COOKIE, +}; + enum mlx5_ib_devx_query_eqn_attrs { MLX5_IB_ATTR_DEVX_QUERY_EQN_USER_VEC = (1U << UVERBS_ID_NS_SHIFT), MLX5_IB_ATTR_DEVX_QUERY_EQN_DEV_EQN, @@ -127,16 +136,26 @@ enum mlx5_ib_devx_async_cmd_fd_alloc_attrs { MLX5_IB_ATTR_DEVX_ASYNC_CMD_FD_ALLOC_HANDLE = (1U << UVERBS_ID_NS_SHIFT), }; +enum mlx5_ib_devx_async_event_fd_alloc_attrs { + MLX5_IB_ATTR_DEVX_ASYNC_EVENT_FD_ALLOC_HANDLE = (1U << UVERBS_ID_NS_SHIFT), + MLX5_IB_ATTR_DEVX_ASYNC_EVENT_FD_ALLOC_FLAGS, +}; + enum mlx5_ib_devx_async_cmd_fd_methods { MLX5_IB_METHOD_DEVX_ASYNC_CMD_FD_ALLOC = (1U << UVERBS_ID_NS_SHIFT), }; +enum mlx5_ib_devx_async_event_fd_methods { + MLX5_IB_METHOD_DEVX_ASYNC_EVENT_FD_ALLOC = (1U << UVERBS_ID_NS_SHIFT), +}; + enum mlx5_ib_objects { MLX5_IB_OBJECT_DEVX = (1U << UVERBS_ID_NS_SHIFT), MLX5_IB_OBJECT_DEVX_OBJ, MLX5_IB_OBJECT_DEVX_UMEM, MLX5_IB_OBJECT_FLOW_MATCHER, MLX5_IB_OBJECT_DEVX_ASYNC_CMD_FD, + MLX5_IB_OBJECT_DEVX_ASYNC_EVENT_FD, }; enum mlx5_ib_flow_matcher_create_attrs { diff --git a/include/uapi/rdma/mlx5_user_ioctl_verbs.h b/include/uapi/rdma/mlx5_user_ioctl_verbs.h index a8f34c237458..88b6ca70c2fe 100644 --- a/include/uapi/rdma/mlx5_user_ioctl_verbs.h +++ b/include/uapi/rdma/mlx5_user_ioctl_verbs.h @@ -43,6 +43,7 @@ enum mlx5_ib_uapi_flow_table_type { MLX5_IB_UAPI_FLOW_TABLE_TYPE_NIC_RX = 0x0, MLX5_IB_UAPI_FLOW_TABLE_TYPE_NIC_TX = 0x1, MLX5_IB_UAPI_FLOW_TABLE_TYPE_FDB = 0x2, + MLX5_IB_UAPI_FLOW_TABLE_TYPE_RDMA_RX = 0x3, }; enum mlx5_ib_uapi_flow_action_packet_reformat_type { @@ -63,5 +64,14 @@ enum mlx5_ib_uapi_dm_type { MLX5_IB_UAPI_DM_TYPE_HEADER_MODIFY_SW_ICM, }; +enum mlx5_ib_uapi_devx_create_event_channel_flags { + MLX5_IB_UAPI_DEVX_CR_EV_CH_FLAGS_OMIT_DATA = 1 << 0, +}; + +struct mlx5_ib_uapi_devx_async_event_hdr { + __aligned_u64 cookie; + __u8 out_data[]; +}; + #endif diff --git a/include/uapi/rdma/rdma_netlink.h b/include/uapi/rdma/rdma_netlink.h index 41db51367efa..8e277783fa96 100644 --- a/include/uapi/rdma/rdma_netlink.h +++ b/include/uapi/rdma/rdma_netlink.h @@ -147,6 +147,18 @@ enum { IWPM_NLA_HELLO_MAX }; +/* For RDMA_NLDEV_ATTR_DEV_NODE_TYPE */ +enum { + /* IB values map to NodeInfo:NodeType. */ + RDMA_NODE_IB_CA = 1, + RDMA_NODE_IB_SWITCH, + RDMA_NODE_IB_ROUTER, + RDMA_NODE_RNIC, + RDMA_NODE_USNIC, + RDMA_NODE_USNIC_UDP, + RDMA_NODE_UNSPECIFIED, +}; + /* * Local service operations: * RESOLVE - The client requests the local service to resolve a path. @@ -267,11 +279,15 @@ enum rdma_nldev_command { RDMA_NLDEV_CMD_RES_PD_GET, /* can dump */ - RDMA_NLDEV_NUM_OPS -}; + RDMA_NLDEV_CMD_GET_CHARDEV, -enum { - RDMA_NLDEV_ATTR_ENTRY_STRLEN = 16, + RDMA_NLDEV_CMD_STAT_SET, + + RDMA_NLDEV_CMD_STAT_GET, /* can dump */ + + RDMA_NLDEV_CMD_STAT_DEL, + + RDMA_NLDEV_NUM_OPS }; enum rdma_nldev_print_type { @@ -478,10 +494,72 @@ enum rdma_nldev_attr { * File descriptor handle of the net namespace object */ RDMA_NLDEV_NET_NS_FD, /* u32 */ + /* + * Information about a chardev. + * CHARDEV_TYPE is the name of the chardev ABI (ie uverbs, umad, etc) + * CHARDEV_ABI signals the ABI revision (historical) + * CHARDEV_NAME is the kernel name for the /dev/ file (no directory) + * CHARDEV is the 64 bit dev_t for the inode + */ + RDMA_NLDEV_ATTR_CHARDEV_TYPE, /* string */ + RDMA_NLDEV_ATTR_CHARDEV_NAME, /* string */ + RDMA_NLDEV_ATTR_CHARDEV_ABI, /* u64 */ + RDMA_NLDEV_ATTR_CHARDEV, /* u64 */ + RDMA_NLDEV_ATTR_UVERBS_DRIVER_ID, /* u64 */ + /* + * Counter-specific attributes. + */ + RDMA_NLDEV_ATTR_STAT_MODE, /* u32 */ + RDMA_NLDEV_ATTR_STAT_RES, /* u32 */ + RDMA_NLDEV_ATTR_STAT_AUTO_MODE_MASK, /* u32 */ + RDMA_NLDEV_ATTR_STAT_COUNTER, /* nested table */ + RDMA_NLDEV_ATTR_STAT_COUNTER_ENTRY, /* nested table */ + RDMA_NLDEV_ATTR_STAT_COUNTER_ID, /* u32 */ + RDMA_NLDEV_ATTR_STAT_HWCOUNTERS, /* nested table */ + RDMA_NLDEV_ATTR_STAT_HWCOUNTER_ENTRY, /* nested table */ + RDMA_NLDEV_ATTR_STAT_HWCOUNTER_ENTRY_NAME, /* string */ + RDMA_NLDEV_ATTR_STAT_HWCOUNTER_ENTRY_VALUE, /* u64 */ + + /* + * CQ adaptive moderatio (DIM) + */ + RDMA_NLDEV_ATTR_DEV_DIM, /* u8 */ /* * Always the end */ RDMA_NLDEV_ATTR_MAX }; + +/* + * Supported counter bind modes. All modes are mutual-exclusive. + */ +enum rdma_nl_counter_mode { + RDMA_COUNTER_MODE_NONE, + + /* + * A qp is bound with a counter automatically during initialization + * based on the auto mode (e.g., qp type, ...) + */ + RDMA_COUNTER_MODE_AUTO, + + /* + * Which qp are bound with which counter is explicitly specified + * by the user + */ + RDMA_COUNTER_MODE_MANUAL, + + /* + * Always the end + */ + RDMA_COUNTER_MODE_MAX, +}; + +/* + * Supported criteria in counter auto mode. + * Currently only "qp type" is supported + */ +enum rdma_nl_counter_mask { + RDMA_COUNTER_MASK_QP_TYPE = 1, +}; #endif /* _UAPI_RDMA_NETLINK_H */ diff --git a/include/uapi/rdma/rdma_user_ioctl_cmds.h b/include/uapi/rdma/rdma_user_ioctl_cmds.h index 26213f49f5c8..b8bb285f6b2a 100644 --- a/include/uapi/rdma/rdma_user_ioctl_cmds.h +++ b/include/uapi/rdma/rdma_user_ioctl_cmds.h @@ -36,7 +36,7 @@ #include <linux/types.h> #include <linux/ioctl.h> -/* Documentation/ioctl/ioctl-number.txt */ +/* Documentation/ioctl/ioctl-number.rst */ #define RDMA_IOCTL_MAGIC 0x1b #define RDMA_VERBS_IOCTL \ _IOWR(RDMA_IOCTL_MAGIC, 1, struct ib_uverbs_ioctl_hdr) @@ -103,6 +103,7 @@ enum rdma_driver_id { RDMA_DRIVER_HFI1, RDMA_DRIVER_QIB, RDMA_DRIVER_EFA, + RDMA_DRIVER_SIW, }; #endif diff --git a/include/uapi/rdma/rvt-abi.h b/include/uapi/rdma/rvt-abi.h new file mode 100644 index 000000000000..7c05a02d2be5 --- /dev/null +++ b/include/uapi/rdma/rvt-abi.h @@ -0,0 +1,66 @@ +/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */ + +/* + * This file contains defines, structures, etc. that are used + * to communicate between kernel and user code. + */ + +#ifndef RVT_ABI_USER_H +#define RVT_ABI_USER_H + +#include <linux/types.h> +#include <rdma/ib_user_verbs.h> +#ifndef RDMA_ATOMIC_UAPI +#define RDMA_ATOMIC_UAPI(_type, _name) struct{ _type val; } _name +#endif + +struct rvt_wqe_sge { + __aligned_u64 addr; + __u32 length; + __u32 lkey; +}; + +/* + * This structure is used to contain the head pointer, tail pointer, + * and completion queue entries as a single memory allocation so + * it can be mmap'ed into user space. + */ +struct rvt_cq_wc { + /* index of next entry to fill */ + RDMA_ATOMIC_UAPI(__u32, head); + /* index of next ib_poll_cq() entry */ + RDMA_ATOMIC_UAPI(__u32, tail); + + /* these are actually size ibcq.cqe + 1 */ + struct ib_uverbs_wc uqueue[]; +}; + +/* + * Receive work request queue entry. + * The size of the sg_list is determined when the QP (or SRQ) is created + * and stored in qp->r_rq.max_sge (or srq->rq.max_sge). + */ +struct rvt_rwqe { + __u64 wr_id; + __u8 num_sge; + __u8 padding[7]; + struct rvt_wqe_sge sg_list[]; +}; + +/* + * This structure is used to contain the head pointer, tail pointer, + * and receive work queue entries as a single memory allocation so + * it can be mmap'ed into user space. + * Note that the wq array elements are variable size so you can't + * just index into the array to get the N'th element; + * use get_rwqe_ptr() for user space and rvt_get_rwqe_ptr() + * for kernel space. + */ +struct rvt_rwq { + /* new work requests posted to the head */ + RDMA_ATOMIC_UAPI(__u32, head); + /* receives pull requests from here. */ + RDMA_ATOMIC_UAPI(__u32, tail); + struct rvt_rwqe wq[]; +}; +#endif /* RVT_ABI_USER_H */ diff --git a/include/uapi/rdma/siw-abi.h b/include/uapi/rdma/siw-abi.h new file mode 100644 index 000000000000..af735f55b291 --- /dev/null +++ b/include/uapi/rdma/siw-abi.h @@ -0,0 +1,186 @@ +/* SPDX-License-Identifier: (GPL-2.0 WITH Linux-syscall-note) or BSD-3-Clause */ + +/* Authors: Bernard Metzler <bmt@zurich.ibm.com> */ +/* Copyright (c) 2008-2019, IBM Corporation */ + +#ifndef _SIW_USER_H +#define _SIW_USER_H + +#include <linux/types.h> + +#define SIW_NODE_DESC_COMMON "Software iWARP stack" +#define SIW_ABI_VERSION 1 +#define SIW_MAX_SGE 6 +#define SIW_UOBJ_MAX_KEY 0x08FFFF +#define SIW_INVAL_UOBJ_KEY (SIW_UOBJ_MAX_KEY + 1) + +struct siw_uresp_create_cq { + __u32 cq_id; + __u32 num_cqe; + __aligned_u64 cq_key; +}; + +struct siw_uresp_create_qp { + __u32 qp_id; + __u32 num_sqe; + __u32 num_rqe; + __u32 pad; + __aligned_u64 sq_key; + __aligned_u64 rq_key; +}; + +struct siw_ureq_reg_mr { + __u8 stag_key; + __u8 reserved[3]; + __u32 pad; +}; + +struct siw_uresp_reg_mr { + __u32 stag; + __u32 pad; +}; + +struct siw_uresp_create_srq { + __u32 num_rqe; + __u32 pad; + __aligned_u64 srq_key; +}; + +struct siw_uresp_alloc_ctx { + __u32 dev_id; + __u32 pad; +}; + +enum siw_opcode { + SIW_OP_WRITE, + SIW_OP_READ, + SIW_OP_READ_LOCAL_INV, + SIW_OP_SEND, + SIW_OP_SEND_WITH_IMM, + SIW_OP_SEND_REMOTE_INV, + + /* Unsupported */ + SIW_OP_FETCH_AND_ADD, + SIW_OP_COMP_AND_SWAP, + + SIW_OP_RECEIVE, + /* provider internal SQE */ + SIW_OP_READ_RESPONSE, + /* + * below opcodes valid for + * in-kernel clients only + */ + SIW_OP_INVAL_STAG, + SIW_OP_REG_MR, + SIW_NUM_OPCODES +}; + +/* Keep it same as ibv_sge to allow for memcpy */ +struct siw_sge { + __aligned_u64 laddr; + __u32 length; + __u32 lkey; +}; + +/* + * Inline data are kept within the work request itself occupying + * the space of sge[1] .. sge[n]. Therefore, inline data cannot be + * supported if SIW_MAX_SGE is below 2 elements. + */ +#define SIW_MAX_INLINE (sizeof(struct siw_sge) * (SIW_MAX_SGE - 1)) + +#if SIW_MAX_SGE < 2 +#error "SIW_MAX_SGE must be at least 2" +#endif + +enum siw_wqe_flags { + SIW_WQE_VALID = 1, + SIW_WQE_INLINE = (1 << 1), + SIW_WQE_SIGNALLED = (1 << 2), + SIW_WQE_SOLICITED = (1 << 3), + SIW_WQE_READ_FENCE = (1 << 4), + SIW_WQE_REM_INVAL = (1 << 5), + SIW_WQE_COMPLETED = (1 << 6) +}; + +/* Send Queue Element */ +struct siw_sqe { + __aligned_u64 id; + __u16 flags; + __u8 num_sge; + /* Contains enum siw_opcode values */ + __u8 opcode; + __u32 rkey; + union { + __aligned_u64 raddr; + __aligned_u64 base_mr; + }; + union { + struct siw_sge sge[SIW_MAX_SGE]; + __aligned_u64 access; + }; +}; + +/* Receive Queue Element */ +struct siw_rqe { + __aligned_u64 id; + __u16 flags; + __u8 num_sge; + /* + * only used by kernel driver, + * ignored if set by user + */ + __u8 opcode; + __u32 unused; + struct siw_sge sge[SIW_MAX_SGE]; +}; + +enum siw_notify_flags { + SIW_NOTIFY_NOT = (0), + SIW_NOTIFY_SOLICITED = (1 << 0), + SIW_NOTIFY_NEXT_COMPLETION = (1 << 1), + SIW_NOTIFY_MISSED_EVENTS = (1 << 2), + SIW_NOTIFY_ALL = SIW_NOTIFY_SOLICITED | SIW_NOTIFY_NEXT_COMPLETION | + SIW_NOTIFY_MISSED_EVENTS +}; + +enum siw_wc_status { + SIW_WC_SUCCESS, + SIW_WC_LOC_LEN_ERR, + SIW_WC_LOC_PROT_ERR, + SIW_WC_LOC_QP_OP_ERR, + SIW_WC_WR_FLUSH_ERR, + SIW_WC_BAD_RESP_ERR, + SIW_WC_LOC_ACCESS_ERR, + SIW_WC_REM_ACCESS_ERR, + SIW_WC_REM_INV_REQ_ERR, + SIW_WC_GENERAL_ERR, + SIW_NUM_WC_STATUS +}; + +struct siw_cqe { + __aligned_u64 id; + __u8 flags; + __u8 opcode; + __u16 status; + __u32 bytes; + union { + __aligned_u64 imm_data; + __u32 inval_stag; + }; + /* QP number or QP pointer */ + union { + struct ib_qp *base_qp; + __aligned_u64 qp_id; + }; +}; + +/* + * Shared structure between user and kernel + * to control CQ arming. + */ +struct siw_cq_ctrl { + __u32 flags; + __u32 pad; +}; +#endif diff --git a/include/uapi/scsi/fc/fc_els.h b/include/uapi/scsi/fc/fc_els.h index a81c53508cc6..76f627f0d13b 100644 --- a/include/uapi/scsi/fc/fc_els.h +++ b/include/uapi/scsi/fc/fc_els.h @@ -2,19 +2,6 @@ /* * Copyright(c) 2007 Intel Corporation. All rights reserved. * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - * * Maintained at www.Open-FCoE.org */ diff --git a/include/uapi/scsi/fc/fc_fs.h b/include/uapi/scsi/fc/fc_fs.h index 8c0a292a61ed..0dab49dbb2f7 100644 --- a/include/uapi/scsi/fc/fc_fs.h +++ b/include/uapi/scsi/fc/fc_fs.h @@ -2,19 +2,6 @@ /* * Copyright(c) 2007 Intel Corporation. All rights reserved. * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - * * Maintained at www.Open-FCoE.org */ diff --git a/include/uapi/scsi/fc/fc_gs.h b/include/uapi/scsi/fc/fc_gs.h index 2153f3524555..effb4c662fe5 100644 --- a/include/uapi/scsi/fc/fc_gs.h +++ b/include/uapi/scsi/fc/fc_gs.h @@ -2,19 +2,6 @@ /* * Copyright(c) 2007 Intel Corporation. All rights reserved. * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - * * Maintained at www.Open-FCoE.org */ diff --git a/include/uapi/scsi/fc/fc_ns.h b/include/uapi/scsi/fc/fc_ns.h index 015e5e1ce8f1..4cf0a40a099a 100644 --- a/include/uapi/scsi/fc/fc_ns.h +++ b/include/uapi/scsi/fc/fc_ns.h @@ -2,19 +2,6 @@ /* * Copyright(c) 2007 Intel Corporation. All rights reserved. * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - * * Maintained at www.Open-FCoE.org */ diff --git a/include/uapi/scsi/scsi_bsg_fc.h b/include/uapi/scsi/scsi_bsg_fc.h index 62597d86beed..3ae65e93235c 100644 --- a/include/uapi/scsi/scsi_bsg_fc.h +++ b/include/uapi/scsi/scsi_bsg_fc.h @@ -3,26 +3,13 @@ * FC Transport BSG Interface * * Copyright (C) 2008 James Smart, Emulex Corporation - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - * */ #ifndef SCSI_BSG_FC_H #define SCSI_BSG_FC_H +#include <linux/types.h> + /* * This file intended to be included by both kernel and user space */ @@ -81,10 +68,10 @@ * with the transport upon completion of the login. */ struct fc_bsg_host_add_rport { - uint8_t reserved; + __u8 reserved; /* FC Address Identier of the remote port to login to */ - uint8_t port_id[3]; + __u8 port_id[3]; }; /* Response: @@ -102,10 +89,10 @@ struct fc_bsg_host_add_rport { * remain logged in with the remote port. */ struct fc_bsg_host_del_rport { - uint8_t reserved; + __u8 reserved; /* FC Address Identier of the remote port to logout of */ - uint8_t port_id[3]; + __u8 port_id[3]; }; /* Response: @@ -126,10 +113,10 @@ struct fc_bsg_host_els { * ELS Command Code being sent (must be the same as byte 0 * of the payload) */ - uint8_t command_code; + __u8 command_code; /* FC Address Identier of the remote port to send the ELS to */ - uint8_t port_id[3]; + __u8 port_id[3]; }; /* Response: @@ -166,14 +153,14 @@ struct fc_bsg_ctels_reply { * Note: x_RJT/BSY status will indicae that the rjt_data field * is valid and contains the reason/explanation values. */ - uint32_t status; /* See FC_CTELS_STATUS_xxx */ + __u32 status; /* See FC_CTELS_STATUS_xxx */ /* valid if status is not FC_CTELS_STATUS_OK */ struct { - uint8_t action; /* fragment_id for CT REJECT */ - uint8_t reason_code; - uint8_t reason_explanation; - uint8_t vendor_unique; + __u8 action; /* fragment_id for CT REJECT */ + __u8 reason_code; + __u8 reason_explanation; + __u8 vendor_unique; } rjt_data; }; @@ -189,17 +176,17 @@ struct fc_bsg_ctels_reply { * and whether to tear it down after the request. */ struct fc_bsg_host_ct { - uint8_t reserved; + __u8 reserved; /* FC Address Identier of the remote port to send the ELS to */ - uint8_t port_id[3]; + __u8 port_id[3]; /* * We need words 0-2 of the generic preamble for the LLD's */ - uint32_t preamble_word0; /* revision & IN_ID */ - uint32_t preamble_word1; /* GS_Type, GS_SubType, Options, Rsvd */ - uint32_t preamble_word2; /* Cmd Code, Max Size */ + __u32 preamble_word0; /* revision & IN_ID */ + __u32 preamble_word1; /* GS_Type, GS_SubType, Options, Rsvd */ + __u32 preamble_word2; /* Cmd Code, Max Size */ }; /* Response: @@ -219,17 +206,17 @@ struct fc_bsg_host_vendor { * Identifies the vendor that the message is formatted for. This * should be the recipient of the message. */ - uint64_t vendor_id; + __u64 vendor_id; /* start of vendor command area */ - uint32_t vendor_cmd[0]; + __u32 vendor_cmd[0]; }; /* Response: */ struct fc_bsg_host_vendor_reply { /* start of vendor response area */ - uint32_t vendor_rsp[0]; + __u32 vendor_rsp[0]; }; @@ -248,7 +235,7 @@ struct fc_bsg_rport_els { * ELS Command Code being sent (must be the same as * byte 0 of the payload) */ - uint8_t els_code; + __u8 els_code; }; /* Response: @@ -266,9 +253,9 @@ struct fc_bsg_rport_ct { /* * We need words 0-2 of the generic preamble for the LLD's */ - uint32_t preamble_word0; /* revision & IN_ID */ - uint32_t preamble_word1; /* GS_Type, GS_SubType, Options, Rsvd */ - uint32_t preamble_word2; /* Cmd Code, Max Size */ + __u32 preamble_word0; /* revision & IN_ID */ + __u32 preamble_word1; /* GS_Type, GS_SubType, Options, Rsvd */ + __u32 preamble_word2; /* Cmd Code, Max Size */ }; /* Response: * @@ -280,7 +267,7 @@ struct fc_bsg_rport_ct { /* request (CDB) structure of the sg_io_v4 */ struct fc_bsg_request { - uint32_t msgcode; + __u32 msgcode; union { struct fc_bsg_host_add_rport h_addrport; struct fc_bsg_host_del_rport h_delrport; @@ -304,10 +291,10 @@ struct fc_bsg_reply { * msg and status fields. The per-msgcode reply structure * will contain valid data. */ - uint32_t result; + __u32 result; /* If there was reply_payload, how much was recevied ? */ - uint32_t reply_payload_rcv_len; + __u32 reply_payload_rcv_len; union { struct fc_bsg_host_vendor_reply vendor_reply; diff --git a/include/uapi/scsi/scsi_bsg_ufs.h b/include/uapi/scsi/scsi_bsg_ufs.h index 17c7abd0803a..9988db6ad244 100644 --- a/include/uapi/scsi/scsi_bsg_ufs.h +++ b/include/uapi/scsi/scsi_bsg_ufs.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 */ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ /* * UFS Transport SGIO v4 BSG Message Support * diff --git a/include/uapi/scsi/scsi_netlink.h b/include/uapi/scsi/scsi_netlink.h index 5ccc2333acab..1b1737c3c9d8 100644 --- a/include/uapi/scsi/scsi_netlink.h +++ b/include/uapi/scsi/scsi_netlink.h @@ -4,21 +4,6 @@ * Used for the posting of outbound SCSI transport events * * Copyright (C) 2006 James Smart, Emulex Corporation - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - * */ #ifndef SCSI_NETLINK_H #define SCSI_NETLINK_H @@ -41,12 +26,12 @@ /* SCSI_TRANSPORT_MSG event message header */ struct scsi_nl_hdr { - uint8_t version; - uint8_t transport; - uint16_t magic; - uint16_t msgtype; - uint16_t msglen; -} __attribute__((aligned(sizeof(uint64_t)))); + __u8 version; + __u8 transport; + __u16 magic; + __u16 msgtype; + __u16 msglen; +} __attribute__((aligned(sizeof(__u64)))); /* scsi_nl_hdr->version value */ #define SCSI_NL_VERSION 1 @@ -90,10 +75,10 @@ struct scsi_nl_hdr { */ struct scsi_nl_host_vendor_msg { struct scsi_nl_hdr snlh; /* must be 1st element ! */ - uint64_t vendor_id; - uint16_t host_no; - uint16_t vmsg_datalen; -} __attribute__((aligned(sizeof(uint64_t)))); + __u64 vendor_id; + __u16 host_no; + __u16 vmsg_datalen; +} __attribute__((aligned(sizeof(__u64)))); /* diff --git a/include/uapi/scsi/scsi_netlink_fc.h b/include/uapi/scsi/scsi_netlink_fc.h index 060f563c38a2..7535253f1a96 100644 --- a/include/uapi/scsi/scsi_netlink_fc.h +++ b/include/uapi/scsi/scsi_netlink_fc.h @@ -3,25 +3,11 @@ * FC Transport Netlink Interface * * Copyright (C) 2006 James Smart, Emulex Corporation - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - * */ #ifndef SCSI_NETLINK_FC_H #define SCSI_NETLINK_FC_H +#include <linux/types.h> #include <scsi/scsi_netlink.h> /* @@ -58,14 +44,14 @@ */ struct fc_nl_event { struct scsi_nl_hdr snlh; /* must be 1st element ! */ - uint64_t seconds; - uint64_t vendor_id; - uint16_t host_no; - uint16_t event_datalen; - uint32_t event_num; - uint32_t event_code; - uint32_t event_data; -} __attribute__((aligned(sizeof(uint64_t)))); + __u64 seconds; + __u64 vendor_id; + __u16 host_no; + __u16 event_datalen; + __u32 event_num; + __u32 event_code; + __u32 event_data; +} __attribute__((aligned(sizeof(__u64)))); #endif /* SCSI_NETLINK_FC_H */ diff --git a/include/uapi/sound/skl-tplg-interface.h b/include/uapi/sound/skl-tplg-interface.h index f39352cef382..9eee32f5e407 100644 --- a/include/uapi/sound/skl-tplg-interface.h +++ b/include/uapi/sound/skl-tplg-interface.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 */ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ /* * skl-tplg-interface.h - Intel DSP FW private data interface * diff --git a/include/uapi/sound/sof/abi.h b/include/uapi/sound/sof/abi.h index 0868eb47acf7..a0fe0d4c4b66 100644 --- a/include/uapi/sound/sof/abi.h +++ b/include/uapi/sound/sof/abi.h @@ -26,7 +26,7 @@ /* SOF ABI version major, minor and patch numbers */ #define SOF_ABI_MAJOR 3 -#define SOF_ABI_MINOR 6 +#define SOF_ABI_MINOR 10 #define SOF_ABI_PATCH 0 /* SOF ABI version number. Format within 32bit word is MMmmmppp */ diff --git a/include/uapi/sound/sof/eq.h b/include/uapi/sound/sof/eq.h deleted file mode 100644 index 666c2b6a3229..000000000000 --- a/include/uapi/sound/sof/eq.h +++ /dev/null @@ -1,172 +0,0 @@ -/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */ -/* - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * Copyright(c) 2018 Intel Corporation. All rights reserved. - */ - -#ifndef __INCLUDE_UAPI_SOUND_SOF_USER_EQ_H__ -#define __INCLUDE_UAPI_SOUND_SOF_USER_EQ_H__ - -/* FIR EQ type */ - -#define SOF_EQ_FIR_IDX_SWITCH 0 - -#define SOF_EQ_FIR_MAX_SIZE 4096 /* Max size allowed for coef data in bytes */ - -#define SOF_EQ_FIR_MAX_LENGTH 192 /* Max length for individual filter */ - -#define SOF_EQ_FIR_MAX_RESPONSES 8 /* A blob can define max 8 FIR EQs */ - -/* - * eq_fir_configuration data structure contains this information - * uint32_t size - * This is the number of bytes need to store the received EQ - * configuration. - * uint16_t channels_in_config - * This describes the number of channels in this EQ config data. It - * can be different from PLATFORM_MAX_CHANNELS. - * uint16_t number_of_responses - * 0=no responses, 1=one response defined, 2=two responses defined, etc. - * int16_t data[] - * assign_response[channels_in_config] - * 0 = use first response, 1 = use 2nd response, etc. - * E.g. {0, 0, 0, 0, 1, 1, 1, 1} would apply to channels 0-3 the - * same first defined response and for to channels 4-7 the second. - * coef_data[] - * Repeated data - * { filter_length, output_shift, h[] } - * for every EQ response defined where vector h has filter_length - * number of coefficients. Coefficients in h[] are in Q1.15 format. - * E.g. 16384 (Q1.15) = 0.5. The shifts are number of right shifts. - * - * NOTE: The channels_in_config must be even to have coef_data aligned to - * 32 bit word in RAM. Therefore a mono EQ assign must be duplicated to 2ch - * even if it would never used. Similarly a 5ch EQ assign must be increased - * to 6ch. EQ init will return an error if this is not met. - * - * NOTE: The filter_length must be multiple of four. Therefore the filter must - * be padded from the end with zeros have this condition met. - */ - -struct sof_eq_fir_config { - uint32_t size; - uint16_t channels_in_config; - uint16_t number_of_responses; - - /* reserved */ - uint32_t reserved[4]; - - int16_t data[]; -} __packed; - -struct sof_eq_fir_coef_data { - int16_t length; /* Number of FIR taps */ - int16_t out_shift; /* Amount of right shifts at output */ - - /* reserved */ - uint32_t reserved[4]; - - int16_t coef[]; /* FIR coefficients */ -} __packed; - -/* In the struct above there's two 16 bit words (length, shift) and four - * reserved 32 bit words before the actual FIR coefficients. This information - * is used in parsing of the configuration blob. - */ -#define SOF_EQ_FIR_COEF_NHEADER \ - (sizeof(struct sof_eq_fir_coef_data) / sizeof(int16_t)) - -/* IIR EQ type */ - -#define SOF_EQ_IIR_IDX_SWITCH 0 - -#define SOF_EQ_IIR_MAX_SIZE 1024 /* Max size allowed for coef data in bytes */ - -#define SOF_EQ_IIR_MAX_RESPONSES 8 /* A blob can define max 8 IIR EQs */ - -/* eq_iir_configuration - * uint32_t channels_in_config - * This describes the number of channels in this EQ config data. It - * can be different from PLATFORM_MAX_CHANNELS. - * uint32_t number_of_responses_defined - * 0=no responses, 1=one response defined, 2=two responses defined, etc. - * int32_t data[] - * Data consist of two parts. First is the response assign vector that - * has length of channels_in_config. The latter part is coefficient - * data. - * uint32_t assign_response[channels_in_config] - * -1 = not defined, 0 = use first response, 1 = use 2nd, etc. - * E.g. {0, 0, 0, 0, -1, -1, -1, -1} would apply to channels 0-3 the - * same first defined response and leave channels 4-7 unequalized. - * coefficient_data[] - * <1st EQ> - * uint32_t num_biquads - * uint32_t num_biquads_in_series - * <1st biquad> - * int32_t coef_a2 Q2.30 format - * int32_t coef_a1 Q2.30 format - * int32_t coef_b2 Q2.30 format - * int32_t coef_b1 Q2.30 format - * int32_t coef_b0 Q2.30 format - * int32_t output_shift number of shifts right, shift left is negative - * int32_t output_gain Q2.14 format - * <2nd biquad> - * ... - * <2nd EQ> - * - * Note: A flat response biquad can be made with a section set to - * b0 = 1.0, gain = 1.0, and other parameters set to 0 - * {0, 0, 0, 0, 1073741824, 0, 16484} - */ - -struct sof_eq_iir_config { - uint32_t size; - uint32_t channels_in_config; - uint32_t number_of_responses; - - /* reserved */ - uint32_t reserved[4]; - - int32_t data[]; /* eq_assign[channels], eq 0, eq 1, ... */ -} __packed; - -struct sof_eq_iir_header_df2t { - uint32_t num_sections; - uint32_t num_sections_in_series; - - /* reserved */ - uint32_t reserved[4]; - - int32_t biquads[]; /* Repeated biquad coefficients */ -} __packed; - -struct sof_eq_iir_biquad_df2t { - int32_t a2; /* Q2.30 */ - int32_t a1; /* Q2.30 */ - int32_t b2; /* Q2.30 */ - int32_t b1; /* Q2.30 */ - int32_t b0; /* Q2.30 */ - int32_t output_shift; /* Number of right shifts */ - int32_t output_gain; /* Q2.14 */ -} __packed; - -/* A full 22th order equalizer with 11 biquads cover octave bands 1-11 in - * in the 0 - 20 kHz bandwidth. - */ -#define SOF_EQ_IIR_DF2T_BIQUADS_MAX 11 - -/* The number of int32_t words in sof_eq_iir_header_df2t: - * num_sections, num_sections_in_series, reserved[4] - */ -#define SOF_EQ_IIR_NHEADER_DF2T \ - (sizeof(struct sof_eq_iir_header_df2t) / sizeof(int32_t)) - -/* The number of int32_t words in sof_eq_iir_biquad_df2t: - * a2, a1, b2, b1, b0, output_shift, output_gain - */ -#define SOF_EQ_IIR_NBIQUAD_DF2T \ - (sizeof(struct sof_eq_iir_biquad_df2t) / sizeof(int32_t)) - -#endif diff --git a/include/uapi/sound/sof/fw.h b/include/uapi/sound/sof/fw.h index 1afca973eb09..e9f697467a86 100644 --- a/include/uapi/sound/sof/fw.h +++ b/include/uapi/sound/sof/fw.h @@ -13,6 +13,8 @@ #ifndef __INCLUDE_UAPI_SOF_FW_H__ #define __INCLUDE_UAPI_SOF_FW_H__ +#include <linux/types.h> + #define SND_SOF_FW_SIG_SIZE 4 #define SND_SOF_FW_ABI 1 #define SND_SOF_FW_SIG "Reef" @@ -46,8 +48,8 @@ enum snd_sof_fw_blk_type { struct snd_sof_blk_hdr { enum snd_sof_fw_blk_type type; - uint32_t size; /* bytes minus this header */ - uint32_t offset; /* offset from base */ + __u32 size; /* bytes minus this header */ + __u32 offset; /* offset from base */ } __packed; /* @@ -61,8 +63,8 @@ enum snd_sof_fw_mod_type { struct snd_sof_mod_hdr { enum snd_sof_fw_mod_type type; - uint32_t size; /* bytes minus this header */ - uint32_t num_blocks; /* number of blocks */ + __u32 size; /* bytes minus this header */ + __u32 num_blocks; /* number of blocks */ } __packed; /* @@ -70,9 +72,9 @@ struct snd_sof_mod_hdr { */ struct snd_sof_fw_header { unsigned char sig[SND_SOF_FW_SIG_SIZE]; /* "Reef" */ - uint32_t file_size; /* size of file minus this header */ - uint32_t num_modules; /* number of modules */ - uint32_t abi; /* version of header format */ + __u32 file_size; /* size of file minus this header */ + __u32 num_modules; /* number of modules */ + __u32 abi; /* version of header format */ } __packed; #endif diff --git a/include/uapi/sound/sof/header.h b/include/uapi/sound/sof/header.h index 7868990b0d6f..5f4518e7a972 100644 --- a/include/uapi/sound/sof/header.h +++ b/include/uapi/sound/sof/header.h @@ -9,6 +9,8 @@ #ifndef __INCLUDE_UAPI_SOUND_SOF_USER_HEADER_H__ #define __INCLUDE_UAPI_SOUND_SOF_USER_HEADER_H__ +#include <linux/types.h> + /* * Header for all non IPC ABI data. * @@ -16,12 +18,12 @@ * Used by any bespoke component data structures or binary blobs. */ struct sof_abi_hdr { - uint32_t magic; /**< 'S', 'O', 'F', '\0' */ - uint32_t type; /**< component specific type */ - uint32_t size; /**< size in bytes of data excl. this struct */ - uint32_t abi; /**< SOF ABI version */ - uint32_t reserved[4]; /**< reserved for future use */ - uint32_t data[0]; /**< Component data - opaque to core */ + __u32 magic; /**< 'S', 'O', 'F', '\0' */ + __u32 type; /**< component specific type */ + __u32 size; /**< size in bytes of data excl. this struct */ + __u32 abi; /**< SOF ABI version */ + __u32 reserved[4]; /**< reserved for future use */ + __u32 data[0]; /**< Component data - opaque to core */ } __packed; #endif diff --git a/include/uapi/sound/sof/manifest.h b/include/uapi/sound/sof/manifest.h deleted file mode 100644 index 2009ee30fad0..000000000000 --- a/include/uapi/sound/sof/manifest.h +++ /dev/null @@ -1,188 +0,0 @@ -/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */ -/* - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * Copyright(c) 2018 Intel Corporation. All rights reserved. - */ - -#ifndef __INCLUDE_UAPI_SOUND_SOF_USER_MANIFEST_H__ -#define __INCLUDE_UAPI_SOUND_SOF_USER_MANIFEST_H__ - -/* start offset for base FW module */ -#define SOF_MAN_ELF_TEXT_OFFSET 0x2000 - -/* FW Extended Manifest Header id = $AE1 */ -#define SOF_MAN_EXT_HEADER_MAGIC 0x31454124 - -/* module type load type */ -#define SOF_MAN_MOD_TYPE_BUILTIN 0 -#define SOF_MAN_MOD_TYPE_MODULE 1 - -struct sof_man_module_type { - uint32_t load_type:4; /* SOF_MAN_MOD_TYPE_ */ - uint32_t auto_start:1; - uint32_t domain_ll:1; - uint32_t domain_dp:1; - uint32_t rsvd_:25; -}; - -/* segment flags.type */ -#define SOF_MAN_SEGMENT_TEXT 0 -#define SOF_MAN_SEGMENT_RODATA 1 -#define SOF_MAN_SEGMENT_DATA 1 -#define SOF_MAN_SEGMENT_BSS 2 -#define SOF_MAN_SEGMENT_EMPTY 15 - -union sof_man_segment_flags { - uint32_t ul; - struct { - uint32_t contents:1; - uint32_t alloc:1; - uint32_t load:1; - uint32_t readonly:1; - uint32_t code:1; - uint32_t data:1; - uint32_t _rsvd0:2; - uint32_t type:4; /* MAN_SEGMENT_ */ - uint32_t _rsvd1:4; - uint32_t length:16; /* of segment in pages */ - } r; -} __packed; - -/* - * Module segment descriptor. Used by ROM - Immutable. - */ -struct sof_man_segment_desc { - union sof_man_segment_flags flags; - uint32_t v_base_addr; - uint32_t file_offset; -} __packed; - -/* - * The firmware binary can be split into several modules. - */ - -#define SOF_MAN_MOD_ID_LEN 4 -#define SOF_MAN_MOD_NAME_LEN 8 -#define SOF_MAN_MOD_SHA256_LEN 32 -#define SOF_MAN_MOD_ID {'$', 'A', 'M', 'E'} - -/* - * Each module has an entry in the FW header. Used by ROM - Immutable. - */ -struct sof_man_module { - uint8_t struct_id[SOF_MAN_MOD_ID_LEN]; /* SOF_MAN_MOD_ID */ - uint8_t name[SOF_MAN_MOD_NAME_LEN]; - uint8_t uuid[16]; - struct sof_man_module_type type; - uint8_t hash[SOF_MAN_MOD_SHA256_LEN]; - uint32_t entry_point; - uint16_t cfg_offset; - uint16_t cfg_count; - uint32_t affinity_mask; - uint16_t instance_max_count; /* max number of instances */ - uint16_t instance_bss_size; /* instance (pages) */ - struct sof_man_segment_desc segment[3]; -} __packed; - -/* - * Each module has a configuration in the FW header. Used by ROM - Immutable. - */ -struct sof_man_mod_config { - uint32_t par[4]; /* module parameters */ - uint32_t is_pages; /* actual size of instance .bss (pages) */ - uint32_t cps; /* cycles per second */ - uint32_t ibs; /* input buffer size (bytes) */ - uint32_t obs; /* output buffer size (bytes) */ - uint32_t module_flags; /* flags, reserved for future use */ - uint32_t cpc; /* cycles per single run */ - uint32_t obls; /* output block size, reserved for future use */ -} __packed; - -/* - * FW Manifest Header - */ - -#define SOF_MAN_FW_HDR_FW_NAME_LEN 8 -#define SOF_MAN_FW_HDR_ID {'$', 'A', 'M', '1'} -#define SOF_MAN_FW_HDR_NAME "ADSPFW" -#define SOF_MAN_FW_HDR_FLAGS 0x0 -#define SOF_MAN_FW_HDR_FEATURES 0xff - -/* - * The firmware has a standard header that is checked by the ROM on firmware - * loading. preload_page_count is used by DMA code loader and is entire - * image size on CNL. i.e. CNL: total size of the binary’s .text and .rodata - * Used by ROM - Immutable. - */ -struct sof_man_fw_header { - uint8_t header_id[4]; - uint32_t header_len; - uint8_t name[SOF_MAN_FW_HDR_FW_NAME_LEN]; - /* number of pages of preloaded image loaded by driver */ - uint32_t preload_page_count; - uint32_t fw_image_flags; - uint32_t feature_mask; - uint16_t major_version; - uint16_t minor_version; - uint16_t hotfix_version; - uint16_t build_version; - uint32_t num_module_entries; - uint32_t hw_buf_base_addr; - uint32_t hw_buf_length; - /* target address for binary loading as offset in IMR - must be == base offset */ - uint32_t load_offset; -} __packed; - -/* - * Firmware manifest descriptor. This can contain N modules and N module - * configs. Used by ROM - Immutable. - */ -struct sof_man_fw_desc { - struct sof_man_fw_header header; - - /* Warning - hack for module arrays. For some unknown reason the we - * have a variable size array of struct man_module followed by a - * variable size array of struct mod_config. These should have been - * merged into a variable array of a parent structure. We have to hack - * around this in many places.... - * - * struct sof_man_module man_module[]; - * struct sof_man_mod_config mod_config[]; - */ - -} __packed; - -/* - * Component Descriptor. Used by ROM - Immutable. - */ -struct sof_man_component_desc { - uint32_t reserved[2]; /* all 0 */ - uint32_t version; - uint8_t hash[SOF_MAN_MOD_SHA256_LEN]; - uint32_t base_offset; - uint32_t limit_offset; - uint32_t attributes[4]; -} __packed; - -/* - * Audio DSP extended metadata. Used by ROM - Immutable. - */ -struct sof_man_adsp_meta_file_ext { - uint32_t ext_type; /* always 17 for ADSP extension */ - uint32_t ext_len; - uint32_t imr_type; - uint8_t reserved[16]; /* all 0 */ - struct sof_man_component_desc comp_desc[1]; -} __packed; - -/* - * Module Manifest for rimage module metadata. Not used by ROM. - */ -struct sof_man_module_manifest { - struct sof_man_module module; - uint32_t text_size; -} __packed; - -#endif diff --git a/include/uapi/sound/sof/tokens.h b/include/uapi/sound/sof/tokens.h index 53ea94bf1c08..8f996857fb24 100644 --- a/include/uapi/sound/sof/tokens.h +++ b/include/uapi/sound/sof/tokens.h @@ -75,6 +75,7 @@ #define SOF_TKN_INTEL_SSP_FRAME_PULSE_WIDTH 503 #define SOF_TKN_INTEL_SSP_QUIRKS 504 #define SOF_TKN_INTEL_SSP_TDM_PADDING_PER_SLOT 505 +#define SOF_TKN_INTEL_SSP_BCLK_DELAY 506 /* DMIC */ #define SOF_TKN_INTEL_DMIC_DRIVER_VERSION 600 @@ -85,6 +86,7 @@ #define SOF_TKN_INTEL_DMIC_NUM_PDM_ACTIVE 605 #define SOF_TKN_INTEL_DMIC_SAMPLE_RATE 608 #define SOF_TKN_INTEL_DMIC_FIFO_WORD_LENGTH 609 +#define SOF_TKN_INTEL_DMIC_UNMUTE_RAMP_TIME_MS 610 /* DMIC PDM */ #define SOF_TKN_INTEL_DMIC_PDM_CTRL_ID 700 @@ -104,4 +106,12 @@ /* for backward compatibility */ #define SOF_TKN_EFFECT_TYPE SOF_TKN_PROCESS_TYPE +/* SAI */ +#define SOF_TKN_IMX_SAI_FIRST_TOKEN 1000 +/* TODO: Add SAI tokens */ + +/* ESAI */ +#define SOF_TKN_IMX_ESAI_FIRST_TOKEN 1100 +/* TODO: Add ESAI tokens */ + #endif diff --git a/include/uapi/sound/sof/tone.h b/include/uapi/sound/sof/tone.h deleted file mode 100644 index d7c6e5d8317e..000000000000 --- a/include/uapi/sound/sof/tone.h +++ /dev/null @@ -1,21 +0,0 @@ -/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */ -/* -* This file is provided under a dual BSD/GPLv2 license. When using or -* redistributing this file, you may do so under either license. -* -* Copyright(c) 2018 Intel Corporation. All rights reserved. -*/ - -#ifndef __INCLUDE_UAPI_SOUND_SOF_USER_TONE_H__ -#define __INCLUDE_UAPI_SOUND_SOF_USER_TONE_H__ - -#define SOF_TONE_IDX_FREQUENCY 0 -#define SOF_TONE_IDX_AMPLITUDE 1 -#define SOF_TONE_IDX_FREQ_MULT 2 -#define SOF_TONE_IDX_AMPL_MULT 3 -#define SOF_TONE_IDX_LENGTH 4 -#define SOF_TONE_IDX_PERIOD 5 -#define SOF_TONE_IDX_REPEATS 6 -#define SOF_TONE_IDX_LIN_RAMP_STEP 7 - -#endif diff --git a/include/uapi/sound/sof/trace.h b/include/uapi/sound/sof/trace.h deleted file mode 100644 index ffa7288a0f16..000000000000 --- a/include/uapi/sound/sof/trace.h +++ /dev/null @@ -1,66 +0,0 @@ -/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */ -/* - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * Copyright(c) 2018 Intel Corporation. All rights reserved. - */ - -#ifndef __INCLUDE_UAPI_SOUND_SOF_USER_TRACE_H__ -#define __INCLUDE_UAPI_SOUND_SOF_USER_TRACE_H__ - -/* - * Host system time. - * - * This property is used by the driver to pass down information about - * current system time. It is expressed in us. - * FW translates timestamps (in log entries, probe pockets) to this time - * domain. - * - * (cavs: SystemTime). - */ -struct system_time { - uint32_t val_l; /* Lower dword of current host time value */ - uint32_t val_u; /* Upper dword of current host time value */ -} __packed; - -#define LOG_ENABLE 1 /* Enable logging */ -#define LOG_DISABLE 0 /* Disable logging */ - -#define LOG_LEVEL_CRITICAL 1 /* (FDK fatal) */ -#define LOG_LEVEL_VERBOSE 2 - -/* - * Layout of a log fifo. - */ -struct log_buffer_layout { - uint32_t read_ptr; /*read pointer */ - uint32_t write_ptr; /* write pointer */ - uint32_t buffer[0]; /* buffer */ -} __packed; - -/* - * Log buffer status reported by FW. - */ -struct log_buffer_status { - uint32_t core_id; /* ID of core that logged to other half */ -} __packed; - -#define TRACE_ID_LENGTH 12 - -/* - * Log entry header. - * - * The header is followed by an array of arguments (uint32_t[]). - * Number of arguments is specified by the params_num field of log_entry - */ -struct log_entry_header { - uint32_t id_0 : TRACE_ID_LENGTH; /* e.g. Pipeline ID */ - uint32_t id_1 : TRACE_ID_LENGTH; /* e.g. Component ID */ - uint32_t core_id : 8; /* Reporting core's id */ - - uint64_t timestamp; /* Timestamp (in dsp ticks) */ - uint32_t log_entry_address; /* Address of log entry in ELF */ -} __packed; - -#endif diff --git a/include/vdso/datapage.h b/include/vdso/datapage.h new file mode 100644 index 000000000000..2e302c0f41f7 --- /dev/null +++ b/include/vdso/datapage.h @@ -0,0 +1,89 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __VDSO_DATAPAGE_H +#define __VDSO_DATAPAGE_H + +#ifndef __ASSEMBLY__ + +#include <linux/bits.h> +#include <linux/time.h> +#include <linux/types.h> + +#define VDSO_BASES (CLOCK_TAI + 1) +#define VDSO_HRES (BIT(CLOCK_REALTIME) | \ + BIT(CLOCK_MONOTONIC) | \ + BIT(CLOCK_BOOTTIME) | \ + BIT(CLOCK_TAI)) +#define VDSO_COARSE (BIT(CLOCK_REALTIME_COARSE) | \ + BIT(CLOCK_MONOTONIC_COARSE)) +#define VDSO_RAW (BIT(CLOCK_MONOTONIC_RAW)) + +#define CS_HRES_COARSE 0 +#define CS_RAW 1 +#define CS_BASES (CS_RAW + 1) + +/** + * struct vdso_timestamp - basetime per clock_id + * @sec: seconds + * @nsec: nanoseconds + * + * There is one vdso_timestamp object in vvar for each vDSO-accelerated + * clock_id. For high-resolution clocks, this encodes the time + * corresponding to vdso_data.cycle_last. For coarse clocks this encodes + * the actual time. + * + * To be noticed that for highres clocks nsec is left-shifted by + * vdso_data.cs[x].shift. + */ +struct vdso_timestamp { + u64 sec; + u64 nsec; +}; + +/** + * struct vdso_data - vdso datapage representation + * @seq: timebase sequence counter + * @clock_mode: clock mode + * @cycle_last: timebase at clocksource init + * @mask: clocksource mask + * @mult: clocksource multiplier + * @shift: clocksource shift + * @basetime[clock_id]: basetime per clock_id + * @tz_minuteswest: minutes west of Greenwich + * @tz_dsttime: type of DST correction + * @hrtimer_res: hrtimer resolution + * @__unused: unused + * + * vdso_data will be accessed by 64 bit and compat code at the same time + * so we should be careful before modifying this structure. + */ +struct vdso_data { + u32 seq; + + s32 clock_mode; + u64 cycle_last; + u64 mask; + u32 mult; + u32 shift; + + struct vdso_timestamp basetime[VDSO_BASES]; + + s32 tz_minuteswest; + s32 tz_dsttime; + u32 hrtimer_res; + u32 __unused; +}; + +/* + * We use the hidden visibility to prevent the compiler from generating a GOT + * relocation. Not only is going through a GOT useless (the entry couldn't and + * must not be overridden by another library), it does not even work: the linker + * cannot generate an absolute address to the data page. + * + * With the hidden visibility, the compiler simply generates a PC-relative + * relocation, and this is what we need. + */ +extern struct vdso_data _vdso_data[CS_BASES] __attribute__((visibility("hidden"))); + +#endif /* !__ASSEMBLY__ */ + +#endif /* __VDSO_DATAPAGE_H */ diff --git a/include/vdso/helpers.h b/include/vdso/helpers.h new file mode 100644 index 000000000000..01641dbb68ef --- /dev/null +++ b/include/vdso/helpers.h @@ -0,0 +1,56 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __VDSO_HELPERS_H +#define __VDSO_HELPERS_H + +#ifndef __ASSEMBLY__ + +#include <vdso/datapage.h> + +static __always_inline u32 vdso_read_begin(const struct vdso_data *vd) +{ + u32 seq; + + while ((seq = READ_ONCE(vd->seq)) & 1) + cpu_relax(); + + smp_rmb(); + return seq; +} + +static __always_inline u32 vdso_read_retry(const struct vdso_data *vd, + u32 start) +{ + u32 seq; + + smp_rmb(); + seq = READ_ONCE(vd->seq); + return seq != start; +} + +static __always_inline void vdso_write_begin(struct vdso_data *vd) +{ + /* + * WRITE_ONCE it is required otherwise the compiler can validly tear + * updates to vd[x].seq and it is possible that the value seen by the + * reader it is inconsistent. + */ + WRITE_ONCE(vd[CS_HRES_COARSE].seq, vd[CS_HRES_COARSE].seq + 1); + WRITE_ONCE(vd[CS_RAW].seq, vd[CS_RAW].seq + 1); + smp_wmb(); +} + +static __always_inline void vdso_write_end(struct vdso_data *vd) +{ + smp_wmb(); + /* + * WRITE_ONCE it is required otherwise the compiler can validly tear + * updates to vd[x].seq and it is possible that the value seen by the + * reader it is inconsistent. + */ + WRITE_ONCE(vd[CS_HRES_COARSE].seq, vd[CS_HRES_COARSE].seq + 1); + WRITE_ONCE(vd[CS_RAW].seq, vd[CS_RAW].seq + 1); +} + +#endif /* !__ASSEMBLY__ */ + +#endif /* __VDSO_HELPERS_H */ diff --git a/include/vdso/vsyscall.h b/include/vdso/vsyscall.h new file mode 100644 index 000000000000..2c6134e0c23d --- /dev/null +++ b/include/vdso/vsyscall.h @@ -0,0 +1,11 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __VDSO_VSYSCALL_H +#define __VDSO_VSYSCALL_H + +#ifndef __ASSEMBLY__ + +#include <asm/vdso/vsyscall.h> + +#endif /* !__ASSEMBLY__ */ + +#endif /* __VDSO_VSYSCALL_H */ diff --git a/include/video/da8xx-fb.h b/include/video/da8xx-fb.h index efed3c3383d6..1d19ae62b844 100644 --- a/include/video/da8xx-fb.h +++ b/include/video/da8xx-fb.h @@ -32,7 +32,6 @@ struct da8xx_lcdc_platform_data { const char manu_name[10]; void *controller_data; const char type[25]; - void (*panel_power_ctrl)(int); }; struct lcd_ctrl_config { diff --git a/include/video/imx-ipu-v3.h b/include/video/imx-ipu-v3.h index b03fafa1ff58..06b0b57e996c 100644 --- a/include/video/imx-ipu-v3.h +++ b/include/video/imx-ipu-v3.h @@ -387,20 +387,64 @@ enum ipu_ic_task { IC_NUM_TASKS, }; +/* + * The parameters that describe a colorspace according to the + * Image Converter: + * - Y'CbCr encoding + * - quantization + * - "colorspace" (RGB or YUV). + */ +struct ipu_ic_colorspace { + enum v4l2_ycbcr_encoding enc; + enum v4l2_quantization quant; + enum ipu_color_space cs; +}; + +static inline void +ipu_ic_fill_colorspace(struct ipu_ic_colorspace *ic_cs, + enum v4l2_ycbcr_encoding enc, + enum v4l2_quantization quant, + enum ipu_color_space cs) +{ + ic_cs->enc = enc; + ic_cs->quant = quant; + ic_cs->cs = cs; +} + +struct ipu_ic_csc_params { + s16 coeff[3][3]; /* signed 9-bit integer coefficients */ + s16 offset[3]; /* signed 11+2-bit fixed point offset */ + u8 scale:2; /* scale coefficients * 2^(scale-1) */ + bool sat:1; /* saturate to (16, 235(Y) / 240(U, V)) */ +}; + +struct ipu_ic_csc { + struct ipu_ic_colorspace in_cs; + struct ipu_ic_colorspace out_cs; + struct ipu_ic_csc_params params; +}; + struct ipu_ic; + +int __ipu_ic_calc_csc(struct ipu_ic_csc *csc); +int ipu_ic_calc_csc(struct ipu_ic_csc *csc, + enum v4l2_ycbcr_encoding in_enc, + enum v4l2_quantization in_quant, + enum ipu_color_space in_cs, + enum v4l2_ycbcr_encoding out_enc, + enum v4l2_quantization out_quant, + enum ipu_color_space out_cs); int ipu_ic_task_init(struct ipu_ic *ic, + const struct ipu_ic_csc *csc, int in_width, int in_height, - int out_width, int out_height, - enum ipu_color_space in_cs, - enum ipu_color_space out_cs); + int out_width, int out_height); int ipu_ic_task_init_rsc(struct ipu_ic *ic, + const struct ipu_ic_csc *csc, int in_width, int in_height, int out_width, int out_height, - enum ipu_color_space in_cs, - enum ipu_color_space out_cs, u32 rsc); int ipu_ic_task_graphics_init(struct ipu_ic *ic, - enum ipu_color_space in_g_cs, + const struct ipu_ic_colorspace *g_in_cs, bool galpha_en, u32 galpha, bool colorkey_en, u32 colorkey); void ipu_ic_task_enable(struct ipu_ic *ic); diff --git a/include/video/omapfb_dss.h b/include/video/omapfb_dss.h index a167b839eccb..e8eaac2cb7b8 100644 --- a/include/video/omapfb_dss.h +++ b/include/video/omapfb_dss.h @@ -114,11 +114,6 @@ enum omap_dss_trans_key_type { OMAP_DSS_COLOR_KEY_VID_SRC = 1, }; -enum omap_rfbi_te_mode { - OMAP_DSS_RFBI_TE_MODE_1 = 1, - OMAP_DSS_RFBI_TE_MODE_2 = 2, -}; - enum omap_dss_signal_level { OMAPDSS_SIG_ACTIVE_LOW, OMAPDSS_SIG_ACTIVE_HIGH, @@ -189,27 +184,6 @@ enum omap_dss_output_id { OMAP_DSS_OUTPUT_HDMI = 1 << 6, }; -/* RFBI */ - -struct rfbi_timings { - int cs_on_time; - int cs_off_time; - int we_on_time; - int we_off_time; - int re_on_time; - int re_off_time; - int we_cycle_time; - int re_cycle_time; - int cs_pulse_width; - int access_time; - - int clk_div; - - u32 tim[5]; /* set by rfbi_convert_timings() */ - - int converted; -}; - /* DSI */ enum omap_dss_dsi_trans_mode { @@ -641,11 +615,6 @@ struct omap_dss_device { } dpi; struct { - u8 channel; - u8 data_lines; - } rfbi; - - struct { u8 datapairs; } sdi; @@ -668,7 +637,6 @@ struct omap_dss_device { struct { u8 pixel_size; - struct rfbi_timings rfbi_timings; } ctrl; const char *name; diff --git a/include/xen/arm/hypervisor.h b/include/xen/arm/hypervisor.h index 2982571f7cc1..43ef24dd030e 100644 --- a/include/xen/arm/hypervisor.h +++ b/include/xen/arm/hypervisor.h @@ -19,8 +19,6 @@ static inline enum paravirt_lazy_mode paravirt_get_lazy_mode(void) return PARAVIRT_LAZY_NONE; } -extern const struct dma_map_ops *xen_dma_ops; - #ifdef CONFIG_XEN void __init xen_early_init(void); #else diff --git a/include/xen/arm/page-coherent.h b/include/xen/arm/page-coherent.h index 2ca9164a79bf..b9cc11e887ed 100644 --- a/include/xen/arm/page-coherent.h +++ b/include/xen/arm/page-coherent.h @@ -2,15 +2,19 @@ #ifndef _XEN_ARM_PAGE_COHERENT_H #define _XEN_ARM_PAGE_COHERENT_H -void __xen_dma_map_page(struct device *hwdev, struct page *page, - dma_addr_t dev_addr, unsigned long offset, size_t size, - enum dma_data_direction dir, unsigned long attrs); -void __xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle, - size_t size, enum dma_data_direction dir, - unsigned long attrs); -void __xen_dma_sync_single_for_cpu(struct device *hwdev, - dma_addr_t handle, size_t size, enum dma_data_direction dir); -void __xen_dma_sync_single_for_device(struct device *hwdev, - dma_addr_t handle, size_t size, enum dma_data_direction dir); +#include <linux/dma-mapping.h> +#include <asm/page.h> + +static inline void *xen_alloc_coherent_pages(struct device *hwdev, size_t size, + dma_addr_t *dma_handle, gfp_t flags, unsigned long attrs) +{ + return dma_direct_alloc(hwdev, size, dma_handle, flags, attrs); +} + +static inline void xen_free_coherent_pages(struct device *hwdev, size_t size, + void *cpu_addr, dma_addr_t dma_handle, unsigned long attrs) +{ + dma_direct_free(hwdev, size, cpu_addr, dma_handle, attrs); +} #endif /* _XEN_ARM_PAGE_COHERENT_H */ diff --git a/include/xen/balloon.h b/include/xen/balloon.h index 4914b93a23f2..6fb95aa19405 100644 --- a/include/xen/balloon.h +++ b/include/xen/balloon.h @@ -27,16 +27,6 @@ void balloon_set_new_target(unsigned long target); int alloc_xenballooned_pages(int nr_pages, struct page **pages); void free_xenballooned_pages(int nr_pages, struct page **pages); -struct device; -#ifdef CONFIG_XEN_SELFBALLOONING -extern int register_xen_selfballooning(struct device *dev); -#else -static inline int register_xen_selfballooning(struct device *dev) -{ - return -ENOSYS; -} -#endif - #ifdef CONFIG_XEN_BALLOON void xen_balloon_init(void); #else diff --git a/include/xen/events.h b/include/xen/events.h index a48897199975..c0e6a0598397 100644 --- a/include/xen/events.h +++ b/include/xen/events.h @@ -3,6 +3,7 @@ #define _XEN_EVENTS_H #include <linux/interrupt.h> +#include <linux/irq.h> #ifdef CONFIG_PCI_MSI #include <linux/msi.h> #endif @@ -59,7 +60,7 @@ void evtchn_put(unsigned int evtchn); void xen_send_IPI_one(unsigned int cpu, enum ipi_vector vector); void rebind_evtchn_irq(int evtchn, int irq); -int xen_rebind_evtchn_to_cpu(int evtchn, unsigned tcpu); +int xen_set_affinity_evtchn(struct irq_desc *desc, unsigned int tcpu); static inline void notify_remote_via_evtchn(int port) { diff --git a/include/xen/swiotlb-xen.h b/include/xen/swiotlb-xen.h index 5e4b83f83dbc..d71380f6ed0b 100644 --- a/include/xen/swiotlb-xen.h +++ b/include/xen/swiotlb-xen.h @@ -4,6 +4,11 @@ #include <linux/swiotlb.h> +void xen_dma_sync_for_cpu(struct device *dev, dma_addr_t handle, + phys_addr_t paddr, size_t size, enum dma_data_direction dir); +void xen_dma_sync_for_device(struct device *dev, dma_addr_t handle, + phys_addr_t paddr, size_t size, enum dma_data_direction dir); + extern int xen_swiotlb_init(int verbose, bool early); extern const struct dma_map_ops xen_swiotlb_dma_ops; diff --git a/include/xen/tmem.h b/include/xen/tmem.h deleted file mode 100644 index c80bafe31f14..000000000000 --- a/include/xen/tmem.h +++ /dev/null @@ -1,18 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _XEN_TMEM_H -#define _XEN_TMEM_H - -#include <linux/types.h> - -#ifdef CONFIG_XEN_TMEM_MODULE -#define tmem_enabled true -#else -/* defined in drivers/xen/tmem.c */ -extern bool tmem_enabled; -#endif - -#ifdef CONFIG_XEN_SELFBALLOONING -extern int xen_selfballoon_init(bool, bool); -#endif - -#endif /* _XEN_TMEM_H */ diff --git a/include/xen/xen-ops.h b/include/xen/xen-ops.h index 4969817124a8..d89969aa9942 100644 --- a/include/xen/xen-ops.h +++ b/include/xen/xen-ops.h @@ -109,6 +109,9 @@ static inline int xen_xlate_unmap_gfn_range(struct vm_area_struct *vma, } #endif +int xen_remap_vma_range(struct vm_area_struct *vma, unsigned long addr, + unsigned long len); + /* * xen_remap_domain_gfn_array() - map an array of foreign frames by gfn * @vma: VMA to map the pages into @@ -209,30 +212,7 @@ int xen_xlate_map_ballooned_pages(xen_pfn_t **pfns, void **vaddr, bool xen_running_on_version_or_later(unsigned int major, unsigned int minor); -efi_status_t xen_efi_get_time(efi_time_t *tm, efi_time_cap_t *tc); -efi_status_t xen_efi_set_time(efi_time_t *tm); -efi_status_t xen_efi_get_wakeup_time(efi_bool_t *enabled, efi_bool_t *pending, - efi_time_t *tm); -efi_status_t xen_efi_set_wakeup_time(efi_bool_t enabled, efi_time_t *tm); -efi_status_t xen_efi_get_variable(efi_char16_t *name, efi_guid_t *vendor, - u32 *attr, unsigned long *data_size, - void *data); -efi_status_t xen_efi_get_next_variable(unsigned long *name_size, - efi_char16_t *name, efi_guid_t *vendor); -efi_status_t xen_efi_set_variable(efi_char16_t *name, efi_guid_t *vendor, - u32 attr, unsigned long data_size, - void *data); -efi_status_t xen_efi_query_variable_info(u32 attr, u64 *storage_space, - u64 *remaining_space, - u64 *max_variable_size); -efi_status_t xen_efi_get_next_high_mono_count(u32 *count); -efi_status_t xen_efi_update_capsule(efi_capsule_header_t **capsules, - unsigned long count, unsigned long sg_list); -efi_status_t xen_efi_query_capsule_caps(efi_capsule_header_t **capsules, - unsigned long count, u64 *max_size, - int *reset_type); -void xen_efi_reset_system(int reset_type, efi_status_t status, - unsigned long data_size, efi_char16_t *data); +void xen_efi_runtime_setup(void); #ifdef CONFIG_PREEMPT |