summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/acpi/acoutput.h6
-rw-r--r--include/acpi/acpi_drivers.h7
-rw-r--r--include/acpi/acpixf.h2
-rw-r--r--include/acpi/actbl.h1
-rw-r--r--include/acpi/actbl3.h23
-rw-r--r--include/acpi/actypes.h6
-rw-r--r--include/acpi/cppc_acpi.h3
-rw-r--r--include/acpi/platform/aclinux.h4
-rw-r--r--include/asm-generic/4level-fixup.h2
-rw-r--r--include/asm-generic/5level-fixup.h3
-rw-r--r--include/asm-generic/bug.h8
-rw-r--r--include/asm-generic/dma-mapping.h2
-rw-r--r--include/asm-generic/fixmap.h1
-rw-r--r--include/asm-generic/pgtable-nop4d-hack.h3
-rw-r--r--include/asm-generic/pgtable-nop4d.h3
-rw-r--r--include/asm-generic/pgtable-nopmd.h2
-rw-r--r--include/asm-generic/pgtable-nopud.h3
-rw-r--r--include/asm-generic/pgtable.h72
-rw-r--r--include/crypto/acompress.h38
-rw-r--r--include/crypto/aead.h41
-rw-r--r--include/crypto/akcipher.h74
-rw-r--r--include/crypto/chacha.h54
-rw-r--r--include/crypto/chacha20.h27
-rw-r--r--include/crypto/hash.h32
-rw-r--r--include/crypto/hash_info.h1
-rw-r--r--include/crypto/internal/cryptouser.h9
-rw-r--r--include/crypto/internal/skcipher.h2
-rw-r--r--include/crypto/kpp.h48
-rw-r--r--include/crypto/nhpoly1305.h74
-rw-r--r--include/crypto/poly1305.h28
-rw-r--r--include/crypto/rng.h27
-rw-r--r--include/crypto/skcipher.h49
-rw-r--r--include/crypto/streebog.h34
-rw-r--r--include/drm/bridge/dw_hdmi.h1
-rw-r--r--include/drm/bridge/dw_mipi_dsi.h14
-rw-r--r--include/drm/drmP.h7
-rw-r--r--include/drm/drm_atomic.h10
-rw-r--r--include/drm/drm_atomic_helper.h45
-rw-r--r--include/drm/drm_atomic_state_helper.h73
-rw-r--r--include/drm/drm_connector.h60
-rw-r--r--include/drm/drm_crtc.h9
-rw-r--r--include/drm/drm_crtc_helper.h6
-rw-r--r--include/drm/drm_damage_helper.h99
-rw-r--r--include/drm/drm_dp_helper.h98
-rw-r--r--include/drm/drm_dp_mst_helper.h6
-rw-r--r--include/drm/drm_drv.h14
-rw-r--r--include/drm/drm_dsc.h485
-rw-r--r--include/drm/drm_fb_cma_helper.h2
-rw-r--r--include/drm/drm_file.h14
-rw-r--r--include/drm/drm_fourcc.h89
-rw-r--r--include/drm/drm_framebuffer.h24
-rw-r--r--include/drm/drm_gem.h181
-rw-r--r--include/drm/drm_gem_cma_helper.h24
-rw-r--r--include/drm/drm_global.h53
-rw-r--r--include/drm/drm_hdcp.h212
-rw-r--r--include/drm/drm_mipi_dsi.h8
-rw-r--r--include/drm/drm_mode_config.h27
-rw-r--r--include/drm/drm_modeset_lock.h59
-rw-r--r--include/drm/drm_plane.h44
-rw-r--r--include/drm/drm_plane_helper.h35
-rw-r--r--include/drm/drm_prime.h4
-rw-r--r--include/drm/drm_property.h3
-rw-r--r--include/drm/drm_syncobj.h4
-rw-r--r--include/drm/drm_vblank.h8
-rw-r--r--include/drm/gpu_scheduler.h9
-rw-r--r--include/drm/i915_pciids.h21
-rw-r--r--include/drm/tinydrm/tinydrm.h35
-rw-r--r--include/drm/ttm/ttm_bo_driver.h23
-rw-r--r--include/drm/ttm/ttm_execbuf_util.h4
-rw-r--r--include/drm/ttm/ttm_memory.h4
-rw-r--r--include/dt-bindings/clock/bcm2835-aux.h10
-rw-r--r--include/dt-bindings/clock/bcm2835.h10
-rw-r--r--include/dt-bindings/clock/gxbb-clkc.h18
-rw-r--r--include/dt-bindings/clock/imx6qdl-clock.h4
-rw-r--r--include/dt-bindings/clock/imx7ulp-clock.h116
-rw-r--r--include/dt-bindings/clock/imx8mq-clock.h395
-rw-r--r--include/dt-bindings/clock/imx8qxp-clock.h289
-rw-r--r--include/dt-bindings/clock/meson8b-clkc.h4
-rw-r--r--include/dt-bindings/clock/mt7629-clk.h203
-rw-r--r--include/dt-bindings/clock/qcom,gcc-msm8998.h94
-rw-r--r--include/dt-bindings/clock/qcom,gcc-sdm845.h2
-rw-r--r--include/dt-bindings/clock/qcom,gpucc-sdm845.h24
-rw-r--r--include/dt-bindings/clock/qcom,lpass-sdm845.h15
-rw-r--r--include/dt-bindings/clock/qcom,rpmcc.h4
-rw-r--r--include/dt-bindings/clock/r8a7795-cpg-mssr.h2
-rw-r--r--include/dt-bindings/clock/r8a7796-cpg-mssr.h2
-rw-r--r--include/dt-bindings/clock/r8a77995-cpg-mssr.h5
-rw-r--r--include/dt-bindings/clock/rk3328-cru.h3
-rw-r--r--include/dt-bindings/clock/sun8i-de2.h3
-rw-r--r--include/dt-bindings/clock/suniv-ccu-f1c100s.h70
-rw-r--r--include/dt-bindings/firmware/imx/rsrc.h559
-rw-r--r--include/dt-bindings/media/xilinx-vip.h5
-rw-r--r--include/dt-bindings/regulator/active-semi,8945a-regulator.h30
-rw-r--r--include/dt-bindings/reset/sun8i-de2.h1
-rw-r--r--include/dt-bindings/reset/suniv-ccu-f1c100s.h38
-rw-r--r--include/dt-bindings/sound/qcom,q6afe.h1
-rw-r--r--include/kvm/arm_arch_timer.h4
-rw-r--r--include/linux/acpi.h30
-rw-r--r--include/linux/adxl.h5
-rw-r--r--include/linux/audit.h8
-rw-r--r--include/linux/avf/virtchnl.h10
-rw-r--r--include/linux/bio.h29
-rw-r--r--include/linux/blk-cgroup.h227
-rw-r--r--include/linux/blk-mq-pci.h4
-rw-r--r--include/linux/blk-mq-rdma.h2
-rw-r--r--include/linux/blk-mq-virtio.h4
-rw-r--r--include/linux/blk-mq.h83
-rw-r--r--include/linux/blk_types.h24
-rw-r--r--include/linux/blkdev.h250
-rw-r--r--include/linux/bpf.h42
-rw-r--r--include/linux/bpf_verifier.h5
-rw-r--r--include/linux/brcmphy.h1
-rw-r--r--include/linux/bsg-lib.h6
-rw-r--r--include/linux/btf.h20
-rw-r--r--include/linux/can/dev.h1
-rw-r--r--include/linux/can/rx-offload.h7
-rw-r--r--include/linux/ceph/ceph_features.h8
-rw-r--r--include/linux/cgroup.h2
-rw-r--r--include/linux/clk-provider.h15
-rw-r--r--include/linux/clk/clk-conf.h5
-rw-r--r--include/linux/compat.h26
-rw-r--r--include/linux/compiler-gcc.h12
-rw-r--r--include/linux/compiler.h58
-rw-r--r--include/linux/compiler_attributes.h23
-rw-r--r--include/linux/compiler_types.h110
-rw-r--r--include/linux/cordic.h9
-rw-r--r--include/linux/cpufreq.h8
-rw-r--r--include/linux/cpuhotplug.h2
-rw-r--r--include/linux/cpuidle.h2
-rw-r--r--include/linux/crypto.h331
-rw-r--r--include/linux/dax.h14
-rw-r--r--include/linux/dell-led.h7
-rw-r--r--include/linux/devfreq.h13
-rw-r--r--include/linux/dma-debug.h34
-rw-r--r--include/linux/dma-direct.h19
-rw-r--r--include/linux/dma-fence.h1
-rw-r--r--include/linux/dma-iommu.h1
-rw-r--r--include/linux/dma-mapping.h352
-rw-r--r--include/linux/dma-noncoherent.h7
-rw-r--r--include/linux/edac.h6
-rw-r--r--include/linux/efi.h26
-rw-r--r--include/linux/elevator.h94
-rw-r--r--include/linux/energy_model.h187
-rw-r--r--include/linux/etherdevice.h1
-rw-r--r--include/linux/fanotify.h5
-rw-r--r--include/linux/filter.h36
-rw-r--r--include/linux/firmware/imx/sci.h1
-rw-r--r--include/linux/firmware/imx/svc/pm.h85
-rw-r--r--include/linux/firmware/imx/types.h552
-rw-r--r--include/linux/fs.h15
-rw-r--r--include/linux/fscache-cache.h3
-rw-r--r--include/linux/fsnotify.h61
-rw-r--r--include/linux/fsnotify_backend.h11
-rw-r--r--include/linux/ftrace.h7
-rw-r--r--include/linux/futex.h8
-rw-r--r--include/linux/genhd.h57
-rw-r--r--include/linux/gfp.h12
-rw-r--r--include/linux/gpio/consumer.h23
-rw-r--r--include/linux/hdmi.h24
-rw-r--r--include/linux/hid-sensor-hub.h4
-rw-r--r--include/linux/hid.h32
-rw-r--r--include/linux/hrtimer.h5
-rw-r--r--include/linux/hyperv.h7
-rw-r--r--include/linux/i3c/ccc.h385
-rw-r--r--include/linux/i3c/device.h331
-rw-r--r--include/linux/i3c/master.h648
-rw-r--r--include/linux/i8253.h1
-rw-r--r--include/linux/ide.h14
-rw-r--r--include/linux/ieee80211.h32
-rw-r--r--include/linux/if_bridge.h12
-rw-r--r--include/linux/if_vlan.h53
-rw-r--r--include/linux/indirect_call_wrapper.h51
-rw-r--r--include/linux/init.h1
-rw-r--r--include/linux/interrupt.h19
-rw-r--r--include/linux/ioprio.h13
-rw-r--r--include/linux/irq.h6
-rw-r--r--include/linux/irq_sim.h2
-rw-r--r--include/linux/irqchip.h4
-rw-r--r--include/linux/irqchip/irq-madera.h132
-rw-r--r--include/linux/irqdomain.h6
-rw-r--r--include/linux/jbd2.h7
-rw-r--r--include/linux/kexec.h12
-rw-r--r--include/linux/kprobes.h6
-rw-r--r--include/linux/kvm_host.h12
-rw-r--r--include/linux/leds.h21
-rw-r--r--include/linux/lightnvm.h3
-rw-r--r--include/linux/linkage.h6
-rw-r--r--include/linux/linkmode.h9
-rw-r--r--include/linux/lockdep.h2
-rw-r--r--include/linux/mempolicy.h2
-rw-r--r--include/linux/mfd/axp20x.h4
-rw-r--r--include/linux/mfd/wm8994/pdata.h3
-rw-r--r--include/linux/mii.h121
-rw-r--r--include/linux/mlx4/device.h4
-rw-r--r--include/linux/mlx5/cq.h12
-rw-r--r--include/linux/mlx5/device.h24
-rw-r--r--include/linux/mlx5/driver.h254
-rw-r--r--include/linux/mlx5/eq.h72
-rw-r--r--include/linux/mlx5/fs.h8
-rw-r--r--include/linux/mlx5/mlx5_ifc.h183
-rw-r--r--include/linux/mlx5/port.h3
-rw-r--r--include/linux/mlx5/qp.h5
-rw-r--r--include/linux/mlx5/srq.h72
-rw-r--r--include/linux/mlx5/transobj.h11
-rw-r--r--include/linux/mm.h8
-rw-r--r--include/linux/mm_types.h5
-rw-r--r--include/linux/mmc/sdio_ids.h1
-rw-r--r--include/linux/mmzone.h8
-rw-r--r--include/linux/mod_devicetable.h19
-rw-r--r--include/linux/module.h11
-rw-r--r--include/linux/msi.h6
-rw-r--r--include/linux/mtd/cfi.h1
-rw-r--r--include/linux/mtd/mtd.h3
-rw-r--r--include/linux/mtd/nand.h7
-rw-r--r--include/linux/mtd/rawnand.h158
-rw-r--r--include/linux/mtd/sh_flctl.h16
-rw-r--r--include/linux/mtd/spi-nor.h11
-rw-r--r--include/linux/mtd/spinand.h2
-rw-r--r--include/linux/net_dim.h2
-rw-r--r--include/linux/netdevice.h96
-rw-r--r--include/linux/netfilter/ipset/ip_set.h4
-rw-r--r--include/linux/netfilter/ipset/ip_set_comment.h4
-rw-r--r--include/linux/netfilter/nf_conntrack_proto_gre.h15
-rw-r--r--include/linux/netfilter/nfnetlink.h12
-rw-r--r--include/linux/netfilter_bridge.h33
-rw-r--r--include/linux/netlink.h57
-rw-r--r--include/linux/nmi.h2
-rw-r--r--include/linux/nvme-fc-driver.h17
-rw-r--r--include/linux/nvme-tcp.h189
-rw-r--r--include/linux/nvme.h73
-rw-r--r--include/linux/objagg.h46
-rw-r--r--include/linux/of.h1
-rw-r--r--include/linux/of_fdt.h1
-rw-r--r--include/linux/of_net.h6
-rw-r--r--include/linux/pci.h4
-rw-r--r--include/linux/pci_ids.h5
-rw-r--r--include/linux/pe.h2
-rw-r--r--include/linux/percpu-rwsem.h2
-rw-r--r--include/linux/perf/arm_pmu.h4
-rw-r--r--include/linux/perf_event.h4
-rw-r--r--include/linux/phy.h114
-rw-r--r--include/linux/phy_fixed.h5
-rw-r--r--include/linux/phy_led_triggers.h2
-rw-r--r--include/linux/platform_data/davinci_asp.h1
-rw-r--r--include/linux/platform_data/gpio-davinci.h2
-rw-r--r--include/linux/platform_data/mdio-gpio.h14
-rw-r--r--include/linux/pm.h5
-rw-r--r--include/linux/pm_domain.h14
-rw-r--r--include/linux/pm_opp.h23
-rw-r--r--include/linux/pm_runtime.h6
-rw-r--r--include/linux/power/smartreflex.h10
-rw-r--r--include/linux/preempt.h3
-rw-r--r--include/linux/printk.h5
-rw-r--r--include/linux/property.h12
-rw-r--r--include/linux/psi.h3
-rw-r--r--include/linux/pstore.h39
-rw-r--r--include/linux/pstore_ram.h50
-rw-r--r--include/linux/ptp_clock_kernel.h33
-rw-r--r--include/linux/ptrace.h18
-rw-r--r--include/linux/pwm.h42
-rw-r--r--include/linux/qed/qed_if.h41
-rw-r--r--include/linux/rcupdate_wait.h17
-rw-r--r--include/linux/regmap.h41
-rw-r--r--include/linux/regulator/consumer.h2
-rw-r--r--include/linux/regulator/driver.h9
-rw-r--r--include/linux/regulator/machine.h3
-rw-r--r--include/linux/regulator/pfuze100.h3
-rw-r--r--include/linux/reservation.h12
-rw-r--r--include/linux/rhashtable.h34
-rw-r--r--include/linux/sbitmap.h89
-rw-r--r--include/linux/scatterlist.h6
-rw-r--r--include/linux/sched.h20
-rw-r--r--include/linux/sched/cpufreq.h6
-rw-r--r--include/linux/sched/isolation.h4
-rw-r--r--include/linux/sched/mm.h2
-rw-r--r--include/linux/sched/smt.h20
-rw-r--r--include/linux/sched/stat.h2
-rw-r--r--include/linux/sched/topology.h17
-rw-r--r--include/linux/sfp.h2
-rw-r--r--include/linux/signal.h4
-rw-r--r--include/linux/skbuff.h196
-rw-r--r--include/linux/skmsg.h9
-rw-r--r--include/linux/socket.h10
-rw-r--r--include/linux/spi/pxa2xx_spi.h1
-rw-r--r--include/linux/spi/spi-mem.h84
-rw-r--r--include/linux/spi/spi.h5
-rw-r--r--include/linux/srcu.h79
-rw-r--r--include/linux/srcutiny.h24
-rw-r--r--include/linux/srcutree.h8
-rw-r--r--include/linux/sunrpc/xdr.h1
-rw-r--r--include/linux/swap.h4
-rw-r--r--include/linux/swiotlb.h77
-rw-r--r--include/linux/syscalls.h29
-rw-r--r--include/linux/sysfs.h8
-rw-r--r--include/linux/t10-pi.h9
-rw-r--r--include/linux/tcp.h1
-rw-r--r--include/linux/thinkpad_acpi.h16
-rw-r--r--include/linux/time32.h25
-rw-r--r--include/linux/timekeeping.h14
-rw-r--r--include/linux/timekeeping32.h15
-rw-r--r--include/linux/trace_events.h8
-rw-r--r--include/linux/tracehook.h4
-rw-r--r--include/linux/tracepoint.h8
-rw-r--r--include/linux/tty.h1
-rw-r--r--include/linux/types.h4
-rw-r--r--include/linux/udp.h26
-rw-r--r--include/linux/uio.h5
-rw-r--r--include/linux/usb.h4
-rw-r--r--include/linux/usb/quirks.h3
-rw-r--r--include/linux/writeback.h5
-rw-r--r--include/linux/xarray.h321
-rw-r--r--include/media/cec.h1
-rw-r--r--include/media/davinci/vpbe.h4
-rw-r--r--include/media/media-request.h2
-rw-r--r--include/media/mpeg2-ctrls.h86
-rw-r--r--include/media/rc-map.h1
-rw-r--r--include/media/v4l2-common.h5
-rw-r--r--include/media/v4l2-ctrls.h6
-rw-r--r--include/media/v4l2-dev.h13
-rw-r--r--include/media/v4l2-ioctl.h33
-rw-r--r--include/media/v4l2-mem2mem.h2
-rw-r--r--include/media/v4l2-subdev.h6
-rw-r--r--include/media/videobuf2-core.h2
-rw-r--r--include/net/act_api.h30
-rw-r--r--include/net/addrconf.h2
-rw-r--r--include/net/af_rxrpc.h3
-rw-r--r--include/net/cfg80211.h282
-rw-r--r--include/net/devlink.h4
-rw-r--r--include/net/dsa.h3
-rw-r--r--include/net/flow.h2
-rw-r--r--include/net/flow_dissector.h6
-rw-r--r--include/net/gen_stats.h2
-rw-r--r--include/net/geneve.h6
-rw-r--r--include/net/gre.h13
-rw-r--r--include/net/icmp.h2
-rw-r--r--include/net/if_inet6.h2
-rw-r--r--include/net/inet6_hashtables.h5
-rw-r--r--include/net/inet_common.h9
-rw-r--r--include/net/inet_hashtables.h25
-rw-r--r--include/net/inet_sock.h21
-rw-r--r--include/net/ip.h4
-rw-r--r--include/net/ip6_tunnel.h2
-rw-r--r--include/net/ip_tunnels.h20
-rw-r--r--include/net/ipv6.h2
-rw-r--r--include/net/l3mdev.h22
-rw-r--r--include/net/mac80211.h25
-rw-r--r--include/net/neighbour.h56
-rw-r--r--include/net/netfilter/br_netfilter.h14
-rw-r--r--include/net/netfilter/ipv4/nf_nat_masquerade.h2
-rw-r--r--include/net/netfilter/ipv6/nf_nat_masquerade.h2
-rw-r--r--include/net/netfilter/nf_conntrack.h5
-rw-r--r--include/net/netfilter/nf_conntrack_acct.h6
-rw-r--r--include/net/netfilter/nf_conntrack_ecache.h7
-rw-r--r--include/net/netfilter/nf_conntrack_helper.h3
-rw-r--r--include/net/netfilter/nf_conntrack_l4proto.h39
-rw-r--r--include/net/netfilter/nf_conntrack_timestamp.h13
-rw-r--r--include/net/netfilter/nf_flow_table.h4
-rw-r--r--include/net/netfilter/nf_nat_l3proto.h7
-rw-r--r--include/net/netfilter/nf_nat_l4proto.h78
-rw-r--r--include/net/netns/conntrack.h6
-rw-r--r--include/net/netns/ipv4.h3
-rw-r--r--include/net/netns/xfrm.h2
-rw-r--r--include/net/pkt_cls.h108
-rw-r--r--include/net/protocol.h9
-rw-r--r--include/net/raw.h14
-rw-r--r--include/net/rtnetlink.h3
-rw-r--r--include/net/sch_generic.h27
-rw-r--r--include/net/sctp/constants.h2
-rw-r--r--include/net/sctp/sctp.h21
-rw-r--r--include/net/sctp/sm.h4
-rw-r--r--include/net/sctp/structs.h12
-rw-r--r--include/net/sctp/ulpevent.h39
-rw-r--r--include/net/seg6.h1
-rw-r--r--include/net/sock.h45
-rw-r--r--include/net/switchdev.h106
-rw-r--r--include/net/tcp.h51
-rw-r--r--include/net/tls.h15
-rw-r--r--include/net/udp.h58
-rw-r--r--include/net/udp_tunnel.h10
-rw-r--r--include/net/vxlan.h17
-rw-r--r--include/net/xfrm.h47
-rw-r--r--include/scsi/scsi_cmnd.h6
-rw-r--r--include/scsi/scsi_dh.h2
-rw-r--r--include/scsi/scsi_driver.h3
-rw-r--r--include/scsi/scsi_host.h18
-rw-r--r--include/scsi/scsi_tcq.h14
-rw-r--r--include/soc/fsl/dpaa2-io.h4
-rw-r--r--include/soc/fsl/qman.h8
-rw-r--r--include/soc/tegra/pmc.h2
-rw-r--r--include/sound/compress_driver.h19
-rw-r--r--include/sound/hda_codec.h1
-rw-r--r--include/sound/hda_component.h11
-rw-r--r--include/sound/hdaudio.h14
-rw-r--r--include/sound/pcm_params.h4
-rw-r--r--include/sound/simple_card_utils.h6
-rw-r--r--include/sound/soc-acpi-intel-match.h1
-rw-r--r--include/sound/soc-acpi.h15
-rw-r--r--include/sound/soc.h16
-rw-r--r--include/trace/events/bcache.h27
-rw-r--r--include/trace/events/btrfs.h4
-rw-r--r--include/trace/events/ext4.h20
-rw-r--r--include/trace/events/filelock.h16
-rw-r--r--include/trace/events/kyber.h8
-rw-r--r--include/trace/events/net.h59
-rw-r--r--include/trace/events/objagg.h228
-rw-r--r--include/trace/events/rxrpc.h2
-rw-r--r--include/trace/events/sched.h12
-rw-r--r--include/uapi/asm-generic/Kbuild.asm1
-rw-r--r--include/uapi/asm-generic/unistd.h8
-rw-r--r--include/uapi/drm/amdgpu_drm.h6
-rw-r--r--include/uapi/drm/drm_fourcc.h15
-rw-r--r--include/uapi/drm/drm_mode.h19
-rw-r--r--include/uapi/drm/i915_drm.h8
-rw-r--r--include/uapi/drm/msm_drm.h25
-rw-r--r--include/uapi/drm/v3d_drm.h39
-rw-r--r--include/uapi/drm/virtgpu_drm.h13
-rw-r--r--include/uapi/linux/aio_abi.h2
-rw-r--r--include/uapi/linux/blkzoned.h4
-rw-r--r--include/uapi/linux/bpf.h236
-rw-r--r--include/uapi/linux/btf.h38
-rw-r--r--include/uapi/linux/btrfs.h1
-rw-r--r--include/uapi/linux/btrfs_tree.h1
-rw-r--r--include/uapi/linux/cryptouser.h102
-rw-r--r--include/uapi/linux/devlink.h5
-rw-r--r--include/uapi/linux/elf.h2
-rw-r--r--include/uapi/linux/ethtool.h4
-rw-r--r--include/uapi/linux/fanotify.h2
-rw-r--r--include/uapi/linux/hash_info.h2
-rw-r--r--include/uapi/linux/if_bridge.h21
-rw-r--r--include/uapi/linux/if_link.h19
-rw-r--r--include/uapi/linux/if_tun.h1
-rw-r--r--include/uapi/linux/if_tunnel.h20
-rw-r--r--include/uapi/linux/in.h10
-rw-r--r--include/uapi/linux/input-event-codes.h1
-rw-r--r--include/uapi/linux/kfd_ioctl.h44
-rw-r--r--include/uapi/linux/kvm.h19
-rw-r--r--include/uapi/linux/ncsi.h15
-rw-r--r--include/uapi/linux/neighbour.h1
-rw-r--r--include/uapi/linux/net_namespace.h2
-rw-r--r--include/uapi/linux/net_tstamp.h4
-rw-r--r--include/uapi/linux/netfilter.h4
-rw-r--r--include/uapi/linux/netfilter/ipset/ip_set.h19
-rw-r--r--include/uapi/linux/netfilter/nf_tables.h4
-rw-r--r--include/uapi/linux/netfilter_bridge.h4
-rw-r--r--include/uapi/linux/netfilter_decnet.h10
-rw-r--r--include/uapi/linux/netfilter_ipv4.h28
-rw-r--r--include/uapi/linux/netfilter_ipv6.h29
-rw-r--r--include/uapi/linux/netlink.h2
-rw-r--r--include/uapi/linux/nl80211.h458
-rw-r--r--include/uapi/linux/pkt_cls.h7
-rw-r--r--include/uapi/linux/pkt_sched.h30
-rw-r--r--include/uapi/linux/prctl.h9
-rw-r--r--include/uapi/linux/ptp_clock.h12
-rw-r--r--include/uapi/linux/sctp.h16
-rw-r--r--include/uapi/linux/snmp.h1
-rw-r--r--include/uapi/linux/tcp.h1
-rw-r--r--include/uapi/linux/udp.h1
-rw-r--r--include/uapi/linux/v4l2-common.h28
-rw-r--r--include/uapi/linux/v4l2-controls.h67
-rw-r--r--include/uapi/linux/vfio.h42
-rw-r--r--include/uapi/linux/videodev2.h8
-rw-r--r--include/uapi/linux/virtio_config.h3
-rw-r--r--include/uapi/linux/virtio_gpu.h18
-rw-r--r--include/uapi/linux/virtio_ring.h52
-rw-r--r--include/uapi/sound/firewire.h20
-rw-r--r--include/video/imx-ipu-v3.h9
-rw-r--r--include/xen/balloon.h5
-rw-r--r--include/xen/interface/hvm/start_info.h63
-rw-r--r--include/xen/xen-front-pgdir-shbuf.h89
-rw-r--r--include/xen/xen-ops.h12
-rw-r--r--include/xen/xen.h3
471 files changed, 12007 insertions, 4379 deletions
diff --git a/include/acpi/acoutput.h b/include/acpi/acoutput.h
index 3a26aa7ead23..6db9a6d40c85 100644
--- a/include/acpi/acoutput.h
+++ b/include/acpi/acoutput.h
@@ -73,7 +73,8 @@
#define ACPI_LV_RESOURCES 0x00010000
#define ACPI_LV_USER_REQUESTS 0x00020000
#define ACPI_LV_PACKAGE 0x00040000
-#define ACPI_LV_VERBOSITY1 0x0007FF40 | ACPI_LV_ALL_EXCEPTIONS
+#define ACPI_LV_EVALUATION 0x00080000
+#define ACPI_LV_VERBOSITY1 0x000FFF40 | ACPI_LV_ALL_EXCEPTIONS
/* Trace verbosity level 2 [Function tracing and memory allocation] */
@@ -141,6 +142,7 @@
#define ACPI_DB_INTERRUPTS ACPI_DEBUG_LEVEL (ACPI_LV_INTERRUPTS)
#define ACPI_DB_USER_REQUESTS ACPI_DEBUG_LEVEL (ACPI_LV_USER_REQUESTS)
#define ACPI_DB_PACKAGE ACPI_DEBUG_LEVEL (ACPI_LV_PACKAGE)
+#define ACPI_DB_EVALUATION ACPI_DEBUG_LEVEL (ACPI_LV_EVALUATION)
#define ACPI_DB_MUTEX ACPI_DEBUG_LEVEL (ACPI_LV_MUTEX)
#define ACPI_DB_EVENTS ACPI_DEBUG_LEVEL (ACPI_LV_EVENTS)
@@ -148,7 +150,7 @@
/* Defaults for debug_level, debug and normal */
-#define ACPI_DEBUG_DEFAULT (ACPI_LV_INFO | ACPI_LV_REPAIR)
+#define ACPI_DEBUG_DEFAULT (ACPI_LV_INIT | ACPI_LV_DEBUG_OBJECT | ACPI_LV_EVALUATION | ACPI_LV_REPAIR)
#define ACPI_NORMAL_DEFAULT (ACPI_LV_INIT | ACPI_LV_DEBUG_OBJECT | ACPI_LV_REPAIR)
#define ACPI_DEBUG_ALL (ACPI_LV_AML_DISASSEMBLE | ACPI_LV_ALL_EXCEPTIONS | ACPI_LV_ALL)
diff --git a/include/acpi/acpi_drivers.h b/include/acpi/acpi_drivers.h
index 14499757338f..de1804aeaf69 100644
--- a/include/acpi/acpi_drivers.h
+++ b/include/acpi/acpi_drivers.h
@@ -88,7 +88,14 @@ int acpi_pci_link_free_irq(acpi_handle handle);
struct pci_bus;
+#ifdef CONFIG_PCI
struct pci_dev *acpi_get_pci_dev(acpi_handle);
+#else
+static inline struct pci_dev *acpi_get_pci_dev(acpi_handle handle)
+{
+ return NULL;
+}
+#endif
/* Arch-defined function to add a bus to the system */
diff --git a/include/acpi/acpixf.h b/include/acpi/acpixf.h
index 0c19b68bf060..7aa38b648564 100644
--- a/include/acpi/acpixf.h
+++ b/include/acpi/acpixf.h
@@ -12,7 +12,7 @@
/* Current ACPICA subsystem version in YYYYMMDD format */
-#define ACPI_CA_VERSION 0x20181003
+#define ACPI_CA_VERSION 0x20181213
#include <acpi/acconfig.h>
#include <acpi/actypes.h>
diff --git a/include/acpi/actbl.h b/include/acpi/actbl.h
index 517addd6b11d..0a977eca0a74 100644
--- a/include/acpi/actbl.h
+++ b/include/acpi/actbl.h
@@ -38,6 +38,7 @@
#define ACPI_SIG_XSDT "XSDT" /* Extended System Description Table */
#define ACPI_SIG_SSDT "SSDT" /* Secondary System Description Table */
#define ACPI_RSDP_NAME "RSDP" /* Short name for RSDP, not signature */
+#define ACPI_OEM_NAME "OEM" /* Short name for OEM, not signature */
/*
* All tables and structures must be byte-packed to match the ACPI
diff --git a/include/acpi/actbl3.h b/include/acpi/actbl3.h
index 501f341d1d92..ea1ca49c9c1b 100644
--- a/include/acpi/actbl3.h
+++ b/include/acpi/actbl3.h
@@ -365,6 +365,29 @@ struct acpi_table_tcpa_server {
*
******************************************************************************/
+/* Revision 3 */
+
+struct acpi_table_tpm23 {
+ struct acpi_table_header header; /* Common ACPI table header */
+ u32 reserved;
+ u64 control_address;
+ u32 start_method;
+};
+
+/* Value for start_method above */
+
+#define ACPI_TPM23_ACPI_START_METHOD 2
+
+/*
+ * Optional trailer for revision 3. If start method is 2, there is a 4 byte
+ * reserved area of all zeros.
+ */
+struct acpi_tmp23_trailer {
+ u32 reserved;
+};
+
+/* Revision 4 */
+
struct acpi_table_tpm2 {
struct acpi_table_header header; /* Common ACPI table header */
u16 platform_class;
diff --git a/include/acpi/actypes.h b/include/acpi/actypes.h
index 66ceb12ebc63..2590627dbfcc 100644
--- a/include/acpi/actypes.h
+++ b/include/acpi/actypes.h
@@ -527,6 +527,10 @@ typedef u64 acpi_integer;
#define ACPI_VALIDATE_RSDP_SIG(a) (!strncmp (ACPI_CAST_PTR (char, (a)), ACPI_SIG_RSDP, 8))
#define ACPI_MAKE_RSDP_SIG(dest) (memcpy (ACPI_CAST_PTR (char, (dest)), ACPI_SIG_RSDP, 8))
+/* Support for OEMx signature (x can be any character) */
+#define ACPI_IS_OEM_SIG(a) (!strncmp (ACPI_CAST_PTR (char, (a)), ACPI_OEM_NAME, 3) &&\
+ strnlen (a, ACPI_NAME_SIZE) == ACPI_NAME_SIZE)
+
/*
* Algorithm to obtain access bit width.
* Can be used with access_width of struct acpi_generic_address and access_size of
@@ -1273,6 +1277,8 @@ typedef enum {
#define ACPI_OSI_WIN_10_RS1 0x0E
#define ACPI_OSI_WIN_10_RS2 0x0F
#define ACPI_OSI_WIN_10_RS3 0x10
+#define ACPI_OSI_WIN_10_RS4 0x11
+#define ACPI_OSI_WIN_10_RS5 0x12
/* Definitions of getopt */
diff --git a/include/acpi/cppc_acpi.h b/include/acpi/cppc_acpi.h
index cf59e6210d27..4f34734e7f36 100644
--- a/include/acpi/cppc_acpi.h
+++ b/include/acpi/cppc_acpi.h
@@ -142,5 +142,8 @@ extern int cppc_set_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls);
extern int cppc_get_perf_caps(int cpu, struct cppc_perf_caps *caps);
extern int acpi_get_psd_map(struct cppc_cpudata **);
extern unsigned int cppc_get_transition_latency(int cpu);
+extern bool cpc_ffh_supported(void);
+extern int cpc_read_ffh(int cpunum, struct cpc_reg *reg, u64 *val);
+extern int cpc_write_ffh(int cpunum, struct cpc_reg *reg, u64 val);
#endif /* _CPPC_ACPI_H*/
diff --git a/include/acpi/platform/aclinux.h b/include/acpi/platform/aclinux.h
index 7451b3bca83a..e3d21d014fcc 100644
--- a/include/acpi/platform/aclinux.h
+++ b/include/acpi/platform/aclinux.h
@@ -33,6 +33,10 @@
/* Kernel specific ACPICA configuration */
+#ifdef CONFIG_PCI
+#define ACPI_PCI_CONFIGURED
+#endif
+
#ifdef CONFIG_ACPI_REDUCED_HARDWARE_ONLY
#define ACPI_REDUCED_HARDWARE 1
#endif
diff --git a/include/asm-generic/4level-fixup.h b/include/asm-generic/4level-fixup.h
index 89f3b03b1445..e3667c9a33a5 100644
--- a/include/asm-generic/4level-fixup.h
+++ b/include/asm-generic/4level-fixup.h
@@ -3,7 +3,7 @@
#define _4LEVEL_FIXUP_H
#define __ARCH_HAS_4LEVEL_HACK
-#define __PAGETABLE_PUD_FOLDED
+#define __PAGETABLE_PUD_FOLDED 1
#define PUD_SHIFT PGDIR_SHIFT
#define PUD_SIZE PGDIR_SIZE
diff --git a/include/asm-generic/5level-fixup.h b/include/asm-generic/5level-fixup.h
index 9c2e0708eb82..bb6cb347018c 100644
--- a/include/asm-generic/5level-fixup.h
+++ b/include/asm-generic/5level-fixup.h
@@ -3,7 +3,7 @@
#define _5LEVEL_FIXUP_H
#define __ARCH_HAS_5LEVEL_HACK
-#define __PAGETABLE_P4D_FOLDED
+#define __PAGETABLE_P4D_FOLDED 1
#define P4D_SHIFT PGDIR_SHIFT
#define P4D_SIZE PGDIR_SIZE
@@ -26,6 +26,7 @@
#define p4d_clear(p4d) pgd_clear(p4d)
#define p4d_val(p4d) pgd_val(p4d)
#define p4d_populate(mm, p4d, pud) pgd_populate(mm, p4d, pud)
+#define p4d_populate_safe(mm, p4d, pud) pgd_populate(mm, p4d, pud)
#define p4d_page(p4d) pgd_page(p4d)
#define p4d_page_vaddr(p4d) pgd_page_vaddr(p4d)
diff --git a/include/asm-generic/bug.h b/include/asm-generic/bug.h
index cdafa5edea49..20561a60db9c 100644
--- a/include/asm-generic/bug.h
+++ b/include/asm-generic/bug.h
@@ -17,8 +17,10 @@
#ifndef __ASSEMBLY__
#include <linux/kernel.h>
-struct bug_entry {
+#ifdef CONFIG_BUG
+
#ifdef CONFIG_GENERIC_BUG
+struct bug_entry {
#ifndef CONFIG_GENERIC_BUG_RELATIVE_POINTERS
unsigned long bug_addr;
#else
@@ -33,10 +35,8 @@ struct bug_entry {
unsigned short line;
#endif
unsigned short flags;
-#endif /* CONFIG_GENERIC_BUG */
};
-
-#ifdef CONFIG_BUG
+#endif /* CONFIG_GENERIC_BUG */
/*
* Don't use BUG() or BUG_ON() unless there's really no way out; one
diff --git a/include/asm-generic/dma-mapping.h b/include/asm-generic/dma-mapping.h
index 880a292d792f..c13f46109e88 100644
--- a/include/asm-generic/dma-mapping.h
+++ b/include/asm-generic/dma-mapping.h
@@ -4,7 +4,7 @@
static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
{
- return &dma_direct_ops;
+ return NULL;
}
#endif /* _ASM_GENERIC_DMA_MAPPING_H */
diff --git a/include/asm-generic/fixmap.h b/include/asm-generic/fixmap.h
index 827e4d3bbc7a..8cc7b09c1bc7 100644
--- a/include/asm-generic/fixmap.h
+++ b/include/asm-generic/fixmap.h
@@ -16,6 +16,7 @@
#define __ASM_GENERIC_FIXMAP_H
#include <linux/bug.h>
+#include <linux/mm_types.h>
#define __fix_to_virt(x) (FIXADDR_TOP - ((x) << PAGE_SHIFT))
#define __virt_to_fix(x) ((FIXADDR_TOP - ((x)&PAGE_MASK)) >> PAGE_SHIFT)
diff --git a/include/asm-generic/pgtable-nop4d-hack.h b/include/asm-generic/pgtable-nop4d-hack.h
index 0c34215263b8..829bdb0d6327 100644
--- a/include/asm-generic/pgtable-nop4d-hack.h
+++ b/include/asm-generic/pgtable-nop4d-hack.h
@@ -5,7 +5,7 @@
#ifndef __ASSEMBLY__
#include <asm-generic/5level-fixup.h>
-#define __PAGETABLE_PUD_FOLDED
+#define __PAGETABLE_PUD_FOLDED 1
/*
* Having the pud type consist of a pgd gets the size right, and allows
@@ -31,6 +31,7 @@ static inline void pgd_clear(pgd_t *pgd) { }
#define pud_ERROR(pud) (pgd_ERROR((pud).pgd))
#define pgd_populate(mm, pgd, pud) do { } while (0)
+#define pgd_populate_safe(mm, pgd, pud) do { } while (0)
/*
* (puds are folded into pgds so this doesn't get actually called,
* but the define is needed for a generic inline function.)
diff --git a/include/asm-generic/pgtable-nop4d.h b/include/asm-generic/pgtable-nop4d.h
index 1a29b2a0282b..aebab905e6cd 100644
--- a/include/asm-generic/pgtable-nop4d.h
+++ b/include/asm-generic/pgtable-nop4d.h
@@ -4,7 +4,7 @@
#ifndef __ASSEMBLY__
-#define __PAGETABLE_P4D_FOLDED
+#define __PAGETABLE_P4D_FOLDED 1
typedef struct { pgd_t pgd; } p4d_t;
@@ -26,6 +26,7 @@ static inline void pgd_clear(pgd_t *pgd) { }
#define p4d_ERROR(p4d) (pgd_ERROR((p4d).pgd))
#define pgd_populate(mm, pgd, p4d) do { } while (0)
+#define pgd_populate_safe(mm, pgd, p4d) do { } while (0)
/*
* (p4ds are folded into pgds so this doesn't get actually called,
* but the define is needed for a generic inline function.)
diff --git a/include/asm-generic/pgtable-nopmd.h b/include/asm-generic/pgtable-nopmd.h
index f35f6e8149e4..b85b8271a73d 100644
--- a/include/asm-generic/pgtable-nopmd.h
+++ b/include/asm-generic/pgtable-nopmd.h
@@ -8,7 +8,7 @@
struct mm_struct;
-#define __PAGETABLE_PMD_FOLDED
+#define __PAGETABLE_PMD_FOLDED 1
/*
* Having the pmd type consist of a pud gets the size right, and allows
diff --git a/include/asm-generic/pgtable-nopud.h b/include/asm-generic/pgtable-nopud.h
index e950b9c50f34..c77a1d301155 100644
--- a/include/asm-generic/pgtable-nopud.h
+++ b/include/asm-generic/pgtable-nopud.h
@@ -9,7 +9,7 @@
#else
#include <asm-generic/pgtable-nop4d.h>
-#define __PAGETABLE_PUD_FOLDED
+#define __PAGETABLE_PUD_FOLDED 1
/*
* Having the pud type consist of a p4d gets the size right, and allows
@@ -35,6 +35,7 @@ static inline void p4d_clear(p4d_t *p4d) { }
#define pud_ERROR(pud) (p4d_ERROR((pud).p4d))
#define p4d_populate(mm, p4d, pud) do { } while (0)
+#define p4d_populate_safe(mm, p4d, pud) do { } while (0)
/*
* (puds are folded into p4ds so this doesn't get actually called,
* but the define is needed for a generic inline function.)
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
index 5657a20e0c59..a9cac82e9a7a 100644
--- a/include/asm-generic/pgtable.h
+++ b/include/asm-generic/pgtable.h
@@ -375,7 +375,6 @@ static inline int pte_unused(pte_t pte)
#endif
#ifndef __HAVE_ARCH_PMD_SAME
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
static inline int pmd_same(pmd_t pmd_a, pmd_t pmd_b)
{
return pmd_val(pmd_a) == pmd_val(pmd_b);
@@ -385,21 +384,60 @@ static inline int pud_same(pud_t pud_a, pud_t pud_b)
{
return pud_val(pud_a) == pud_val(pud_b);
}
-#else /* CONFIG_TRANSPARENT_HUGEPAGE */
-static inline int pmd_same(pmd_t pmd_a, pmd_t pmd_b)
+#endif
+
+#ifndef __HAVE_ARCH_P4D_SAME
+static inline int p4d_same(p4d_t p4d_a, p4d_t p4d_b)
{
- BUILD_BUG();
- return 0;
+ return p4d_val(p4d_a) == p4d_val(p4d_b);
}
+#endif
-static inline int pud_same(pud_t pud_a, pud_t pud_b)
+#ifndef __HAVE_ARCH_PGD_SAME
+static inline int pgd_same(pgd_t pgd_a, pgd_t pgd_b)
{
- BUILD_BUG();
- return 0;
+ return pgd_val(pgd_a) == pgd_val(pgd_b);
}
-#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
#endif
+/*
+ * Use set_p*_safe(), and elide TLB flushing, when confident that *no*
+ * TLB flush will be required as a result of the "set". For example, use
+ * in scenarios where it is known ahead of time that the routine is
+ * setting non-present entries, or re-setting an existing entry to the
+ * same value. Otherwise, use the typical "set" helpers and flush the
+ * TLB.
+ */
+#define set_pte_safe(ptep, pte) \
+({ \
+ WARN_ON_ONCE(pte_present(*ptep) && !pte_same(*ptep, pte)); \
+ set_pte(ptep, pte); \
+})
+
+#define set_pmd_safe(pmdp, pmd) \
+({ \
+ WARN_ON_ONCE(pmd_present(*pmdp) && !pmd_same(*pmdp, pmd)); \
+ set_pmd(pmdp, pmd); \
+})
+
+#define set_pud_safe(pudp, pud) \
+({ \
+ WARN_ON_ONCE(pud_present(*pudp) && !pud_same(*pudp, pud)); \
+ set_pud(pudp, pud); \
+})
+
+#define set_p4d_safe(p4dp, p4d) \
+({ \
+ WARN_ON_ONCE(p4d_present(*p4dp) && !p4d_same(*p4dp, p4d)); \
+ set_p4d(p4dp, p4d); \
+})
+
+#define set_pgd_safe(pgdp, pgd) \
+({ \
+ WARN_ON_ONCE(pgd_present(*pgdp) && !pgd_same(*pgdp, pgd)); \
+ set_pgd(pgdp, pgd); \
+})
+
#ifndef __HAVE_ARCH_DO_SWAP_PAGE
/*
* Some architectures support metadata associated with a page. When a
@@ -1127,4 +1165,20 @@ static inline bool arch_has_pfn_modify_check(void)
#endif
#endif
+/*
+ * On some architectures it depends on the mm if the p4d/pud or pmd
+ * layer of the page table hierarchy is folded or not.
+ */
+#ifndef mm_p4d_folded
+#define mm_p4d_folded(mm) __is_defined(__PAGETABLE_P4D_FOLDED)
+#endif
+
+#ifndef mm_pud_folded
+#define mm_pud_folded(mm) __is_defined(__PAGETABLE_PUD_FOLDED)
+#endif
+
+#ifndef mm_pmd_folded
+#define mm_pmd_folded(mm) __is_defined(__PAGETABLE_PMD_FOLDED)
+#endif
+
#endif /* _ASM_GENERIC_PGTABLE_H */
diff --git a/include/crypto/acompress.h b/include/crypto/acompress.h
index 22e6f412c595..a3e766dff917 100644
--- a/include/crypto/acompress.h
+++ b/include/crypto/acompress.h
@@ -234,34 +234,6 @@ static inline void acomp_request_set_params(struct acomp_req *req,
req->flags |= CRYPTO_ACOMP_ALLOC_OUTPUT;
}
-static inline void crypto_stat_compress(struct acomp_req *req, int ret)
-{
-#ifdef CONFIG_CRYPTO_STATS
- struct crypto_acomp *tfm = crypto_acomp_reqtfm(req);
-
- if (ret && ret != -EINPROGRESS && ret != -EBUSY) {
- atomic_inc(&tfm->base.__crt_alg->compress_err_cnt);
- } else {
- atomic_inc(&tfm->base.__crt_alg->compress_cnt);
- atomic64_add(req->slen, &tfm->base.__crt_alg->compress_tlen);
- }
-#endif
-}
-
-static inline void crypto_stat_decompress(struct acomp_req *req, int ret)
-{
-#ifdef CONFIG_CRYPTO_STATS
- struct crypto_acomp *tfm = crypto_acomp_reqtfm(req);
-
- if (ret && ret != -EINPROGRESS && ret != -EBUSY) {
- atomic_inc(&tfm->base.__crt_alg->compress_err_cnt);
- } else {
- atomic_inc(&tfm->base.__crt_alg->decompress_cnt);
- atomic64_add(req->slen, &tfm->base.__crt_alg->decompress_tlen);
- }
-#endif
-}
-
/**
* crypto_acomp_compress() -- Invoke asynchronous compress operation
*
@@ -274,10 +246,13 @@ static inline void crypto_stat_decompress(struct acomp_req *req, int ret)
static inline int crypto_acomp_compress(struct acomp_req *req)
{
struct crypto_acomp *tfm = crypto_acomp_reqtfm(req);
+ struct crypto_alg *alg = tfm->base.__crt_alg;
+ unsigned int slen = req->slen;
int ret;
+ crypto_stats_get(alg);
ret = tfm->compress(req);
- crypto_stat_compress(req, ret);
+ crypto_stats_compress(slen, ret, alg);
return ret;
}
@@ -293,10 +268,13 @@ static inline int crypto_acomp_compress(struct acomp_req *req)
static inline int crypto_acomp_decompress(struct acomp_req *req)
{
struct crypto_acomp *tfm = crypto_acomp_reqtfm(req);
+ struct crypto_alg *alg = tfm->base.__crt_alg;
+ unsigned int slen = req->slen;
int ret;
+ crypto_stats_get(alg);
ret = tfm->decompress(req);
- crypto_stat_decompress(req, ret);
+ crypto_stats_decompress(slen, ret, alg);
return ret;
}
diff --git a/include/crypto/aead.h b/include/crypto/aead.h
index 0d765d7bfb82..9ad595f97c65 100644
--- a/include/crypto/aead.h
+++ b/include/crypto/aead.h
@@ -115,7 +115,6 @@ struct aead_request {
* @setkey: see struct skcipher_alg
* @encrypt: see struct skcipher_alg
* @decrypt: see struct skcipher_alg
- * @geniv: see struct skcipher_alg
* @ivsize: see struct skcipher_alg
* @chunksize: see struct skcipher_alg
* @init: Initialize the cryptographic transformation object. This function
@@ -142,8 +141,6 @@ struct aead_alg {
int (*init)(struct crypto_aead *tfm);
void (*exit)(struct crypto_aead *tfm);
- const char *geniv;
-
unsigned int ivsize;
unsigned int maxauthsize;
unsigned int chunksize;
@@ -306,34 +303,6 @@ static inline struct crypto_aead *crypto_aead_reqtfm(struct aead_request *req)
return __crypto_aead_cast(req->base.tfm);
}
-static inline void crypto_stat_aead_encrypt(struct aead_request *req, int ret)
-{
-#ifdef CONFIG_CRYPTO_STATS
- struct crypto_aead *tfm = crypto_aead_reqtfm(req);
-
- if (ret && ret != -EINPROGRESS && ret != -EBUSY) {
- atomic_inc(&tfm->base.__crt_alg->aead_err_cnt);
- } else {
- atomic_inc(&tfm->base.__crt_alg->encrypt_cnt);
- atomic64_add(req->cryptlen, &tfm->base.__crt_alg->encrypt_tlen);
- }
-#endif
-}
-
-static inline void crypto_stat_aead_decrypt(struct aead_request *req, int ret)
-{
-#ifdef CONFIG_CRYPTO_STATS
- struct crypto_aead *tfm = crypto_aead_reqtfm(req);
-
- if (ret && ret != -EINPROGRESS && ret != -EBUSY) {
- atomic_inc(&tfm->base.__crt_alg->aead_err_cnt);
- } else {
- atomic_inc(&tfm->base.__crt_alg->decrypt_cnt);
- atomic64_add(req->cryptlen, &tfm->base.__crt_alg->decrypt_tlen);
- }
-#endif
-}
-
/**
* crypto_aead_encrypt() - encrypt plaintext
* @req: reference to the aead_request handle that holds all information
@@ -356,13 +325,16 @@ static inline void crypto_stat_aead_decrypt(struct aead_request *req, int ret)
static inline int crypto_aead_encrypt(struct aead_request *req)
{
struct crypto_aead *aead = crypto_aead_reqtfm(req);
+ struct crypto_alg *alg = aead->base.__crt_alg;
+ unsigned int cryptlen = req->cryptlen;
int ret;
+ crypto_stats_get(alg);
if (crypto_aead_get_flags(aead) & CRYPTO_TFM_NEED_KEY)
ret = -ENOKEY;
else
ret = crypto_aead_alg(aead)->encrypt(req);
- crypto_stat_aead_encrypt(req, ret);
+ crypto_stats_aead_encrypt(cryptlen, alg, ret);
return ret;
}
@@ -391,15 +363,18 @@ static inline int crypto_aead_encrypt(struct aead_request *req)
static inline int crypto_aead_decrypt(struct aead_request *req)
{
struct crypto_aead *aead = crypto_aead_reqtfm(req);
+ struct crypto_alg *alg = aead->base.__crt_alg;
+ unsigned int cryptlen = req->cryptlen;
int ret;
+ crypto_stats_get(alg);
if (crypto_aead_get_flags(aead) & CRYPTO_TFM_NEED_KEY)
ret = -ENOKEY;
else if (req->cryptlen < crypto_aead_authsize(aead))
ret = -EINVAL;
else
ret = crypto_aead_alg(aead)->decrypt(req);
- crypto_stat_aead_decrypt(req, ret);
+ crypto_stats_aead_decrypt(cryptlen, alg, ret);
return ret;
}
diff --git a/include/crypto/akcipher.h b/include/crypto/akcipher.h
index afac71119396..2d690494568c 100644
--- a/include/crypto/akcipher.h
+++ b/include/crypto/akcipher.h
@@ -271,62 +271,6 @@ static inline unsigned int crypto_akcipher_maxsize(struct crypto_akcipher *tfm)
return alg->max_size(tfm);
}
-static inline void crypto_stat_akcipher_encrypt(struct akcipher_request *req,
- int ret)
-{
-#ifdef CONFIG_CRYPTO_STATS
- struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
-
- if (ret && ret != -EINPROGRESS && ret != -EBUSY) {
- atomic_inc(&tfm->base.__crt_alg->akcipher_err_cnt);
- } else {
- atomic_inc(&tfm->base.__crt_alg->encrypt_cnt);
- atomic64_add(req->src_len, &tfm->base.__crt_alg->encrypt_tlen);
- }
-#endif
-}
-
-static inline void crypto_stat_akcipher_decrypt(struct akcipher_request *req,
- int ret)
-{
-#ifdef CONFIG_CRYPTO_STATS
- struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
-
- if (ret && ret != -EINPROGRESS && ret != -EBUSY) {
- atomic_inc(&tfm->base.__crt_alg->akcipher_err_cnt);
- } else {
- atomic_inc(&tfm->base.__crt_alg->decrypt_cnt);
- atomic64_add(req->src_len, &tfm->base.__crt_alg->decrypt_tlen);
- }
-#endif
-}
-
-static inline void crypto_stat_akcipher_sign(struct akcipher_request *req,
- int ret)
-{
-#ifdef CONFIG_CRYPTO_STATS
- struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
-
- if (ret && ret != -EINPROGRESS && ret != -EBUSY)
- atomic_inc(&tfm->base.__crt_alg->akcipher_err_cnt);
- else
- atomic_inc(&tfm->base.__crt_alg->sign_cnt);
-#endif
-}
-
-static inline void crypto_stat_akcipher_verify(struct akcipher_request *req,
- int ret)
-{
-#ifdef CONFIG_CRYPTO_STATS
- struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
-
- if (ret && ret != -EINPROGRESS && ret != -EBUSY)
- atomic_inc(&tfm->base.__crt_alg->akcipher_err_cnt);
- else
- atomic_inc(&tfm->base.__crt_alg->verify_cnt);
-#endif
-}
-
/**
* crypto_akcipher_encrypt() - Invoke public key encrypt operation
*
@@ -341,10 +285,13 @@ static inline int crypto_akcipher_encrypt(struct akcipher_request *req)
{
struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
struct akcipher_alg *alg = crypto_akcipher_alg(tfm);
+ struct crypto_alg *calg = tfm->base.__crt_alg;
+ unsigned int src_len = req->src_len;
int ret;
+ crypto_stats_get(calg);
ret = alg->encrypt(req);
- crypto_stat_akcipher_encrypt(req, ret);
+ crypto_stats_akcipher_encrypt(src_len, ret, calg);
return ret;
}
@@ -362,10 +309,13 @@ static inline int crypto_akcipher_decrypt(struct akcipher_request *req)
{
struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
struct akcipher_alg *alg = crypto_akcipher_alg(tfm);
+ struct crypto_alg *calg = tfm->base.__crt_alg;
+ unsigned int src_len = req->src_len;
int ret;
+ crypto_stats_get(calg);
ret = alg->decrypt(req);
- crypto_stat_akcipher_decrypt(req, ret);
+ crypto_stats_akcipher_decrypt(src_len, ret, calg);
return ret;
}
@@ -383,10 +333,12 @@ static inline int crypto_akcipher_sign(struct akcipher_request *req)
{
struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
struct akcipher_alg *alg = crypto_akcipher_alg(tfm);
+ struct crypto_alg *calg = tfm->base.__crt_alg;
int ret;
+ crypto_stats_get(calg);
ret = alg->sign(req);
- crypto_stat_akcipher_sign(req, ret);
+ crypto_stats_akcipher_sign(ret, calg);
return ret;
}
@@ -404,10 +356,12 @@ static inline int crypto_akcipher_verify(struct akcipher_request *req)
{
struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
struct akcipher_alg *alg = crypto_akcipher_alg(tfm);
+ struct crypto_alg *calg = tfm->base.__crt_alg;
int ret;
+ crypto_stats_get(calg);
ret = alg->verify(req);
- crypto_stat_akcipher_verify(req, ret);
+ crypto_stats_akcipher_verify(ret, calg);
return ret;
}
diff --git a/include/crypto/chacha.h b/include/crypto/chacha.h
new file mode 100644
index 000000000000..1fc70a69d550
--- /dev/null
+++ b/include/crypto/chacha.h
@@ -0,0 +1,54 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Common values and helper functions for the ChaCha and XChaCha stream ciphers.
+ *
+ * XChaCha extends ChaCha's nonce to 192 bits, while provably retaining ChaCha's
+ * security. Here they share the same key size, tfm context, and setkey
+ * function; only their IV size and encrypt/decrypt function differ.
+ *
+ * The ChaCha paper specifies 20, 12, and 8-round variants. In general, it is
+ * recommended to use the 20-round variant ChaCha20. However, the other
+ * variants can be needed in some performance-sensitive scenarios. The generic
+ * ChaCha code currently allows only the 20 and 12-round variants.
+ */
+
+#ifndef _CRYPTO_CHACHA_H
+#define _CRYPTO_CHACHA_H
+
+#include <crypto/skcipher.h>
+#include <linux/types.h>
+#include <linux/crypto.h>
+
+/* 32-bit stream position, then 96-bit nonce (RFC7539 convention) */
+#define CHACHA_IV_SIZE 16
+
+#define CHACHA_KEY_SIZE 32
+#define CHACHA_BLOCK_SIZE 64
+#define CHACHAPOLY_IV_SIZE 12
+
+/* 192-bit nonce, then 64-bit stream position */
+#define XCHACHA_IV_SIZE 32
+
+struct chacha_ctx {
+ u32 key[8];
+ int nrounds;
+};
+
+void chacha_block(u32 *state, u8 *stream, int nrounds);
+static inline void chacha20_block(u32 *state, u8 *stream)
+{
+ chacha_block(state, stream, 20);
+}
+void hchacha_block(const u32 *in, u32 *out, int nrounds);
+
+void crypto_chacha_init(u32 *state, struct chacha_ctx *ctx, u8 *iv);
+
+int crypto_chacha20_setkey(struct crypto_skcipher *tfm, const u8 *key,
+ unsigned int keysize);
+int crypto_chacha12_setkey(struct crypto_skcipher *tfm, const u8 *key,
+ unsigned int keysize);
+
+int crypto_chacha_crypt(struct skcipher_request *req);
+int crypto_xchacha_crypt(struct skcipher_request *req);
+
+#endif /* _CRYPTO_CHACHA_H */
diff --git a/include/crypto/chacha20.h b/include/crypto/chacha20.h
deleted file mode 100644
index f76302d99e2b..000000000000
--- a/include/crypto/chacha20.h
+++ /dev/null
@@ -1,27 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Common values for the ChaCha20 algorithm
- */
-
-#ifndef _CRYPTO_CHACHA20_H
-#define _CRYPTO_CHACHA20_H
-
-#include <crypto/skcipher.h>
-#include <linux/types.h>
-#include <linux/crypto.h>
-
-#define CHACHA20_IV_SIZE 16
-#define CHACHA20_KEY_SIZE 32
-#define CHACHA20_BLOCK_SIZE 64
-
-struct chacha20_ctx {
- u32 key[8];
-};
-
-void chacha20_block(u32 *state, u8 *stream);
-void crypto_chacha20_init(u32 *state, struct chacha20_ctx *ctx, u8 *iv);
-int crypto_chacha20_setkey(struct crypto_skcipher *tfm, const u8 *key,
- unsigned int keysize);
-int crypto_chacha20_crypt(struct skcipher_request *req);
-
-#endif
diff --git a/include/crypto/hash.h b/include/crypto/hash.h
index bc7796600338..3b31c1b349ae 100644
--- a/include/crypto/hash.h
+++ b/include/crypto/hash.h
@@ -412,32 +412,6 @@ static inline void *ahash_request_ctx(struct ahash_request *req)
int crypto_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
unsigned int keylen);
-static inline void crypto_stat_ahash_update(struct ahash_request *req, int ret)
-{
-#ifdef CONFIG_CRYPTO_STATS
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
-
- if (ret && ret != -EINPROGRESS && ret != -EBUSY)
- atomic_inc(&tfm->base.__crt_alg->hash_err_cnt);
- else
- atomic64_add(req->nbytes, &tfm->base.__crt_alg->hash_tlen);
-#endif
-}
-
-static inline void crypto_stat_ahash_final(struct ahash_request *req, int ret)
-{
-#ifdef CONFIG_CRYPTO_STATS
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
-
- if (ret && ret != -EINPROGRESS && ret != -EBUSY) {
- atomic_inc(&tfm->base.__crt_alg->hash_err_cnt);
- } else {
- atomic_inc(&tfm->base.__crt_alg->hash_cnt);
- atomic64_add(req->nbytes, &tfm->base.__crt_alg->hash_tlen);
- }
-#endif
-}
-
/**
* crypto_ahash_finup() - update and finalize message digest
* @req: reference to the ahash_request handle that holds all information
@@ -552,10 +526,14 @@ static inline int crypto_ahash_init(struct ahash_request *req)
*/
static inline int crypto_ahash_update(struct ahash_request *req)
{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct crypto_alg *alg = tfm->base.__crt_alg;
+ unsigned int nbytes = req->nbytes;
int ret;
+ crypto_stats_get(alg);
ret = crypto_ahash_reqtfm(req)->update(req);
- crypto_stat_ahash_update(req, ret);
+ crypto_stats_ahash_update(nbytes, ret, alg);
return ret;
}
diff --git a/include/crypto/hash_info.h b/include/crypto/hash_info.h
index 56f217d41f12..91786b68dbdb 100644
--- a/include/crypto/hash_info.h
+++ b/include/crypto/hash_info.h
@@ -15,6 +15,7 @@
#include <crypto/sha.h>
#include <crypto/md5.h>
+#include <crypto/streebog.h>
#include <uapi/linux/hash_info.h>
diff --git a/include/crypto/internal/cryptouser.h b/include/crypto/internal/cryptouser.h
index 8db299c25566..40623f4457df 100644
--- a/include/crypto/internal/cryptouser.h
+++ b/include/crypto/internal/cryptouser.h
@@ -3,6 +3,11 @@
struct crypto_alg *crypto_alg_match(struct crypto_user_alg *p, int exact);
-int crypto_dump_reportstat(struct sk_buff *skb, struct netlink_callback *cb);
+#ifdef CONFIG_CRYPTO_STATS
int crypto_reportstat(struct sk_buff *in_skb, struct nlmsghdr *in_nlh, struct nlattr **attrs);
-int crypto_dump_reportstat_done(struct netlink_callback *cb);
+#else
+static int crypto_reportstat(struct sk_buff *in_skb, struct nlmsghdr *in_nlh, struct nlattr **attrs)
+{
+ return -ENOTSUPP;
+}
+#endif
diff --git a/include/crypto/internal/skcipher.h b/include/crypto/internal/skcipher.h
index e42f7063f245..453e867b4bd9 100644
--- a/include/crypto/internal/skcipher.h
+++ b/include/crypto/internal/skcipher.h
@@ -70,8 +70,6 @@ struct skcipher_walk {
unsigned int alignmask;
};
-extern const struct crypto_type crypto_givcipher_type;
-
static inline struct crypto_instance *skcipher_crypto_instance(
struct skcipher_instance *inst)
{
diff --git a/include/crypto/kpp.h b/include/crypto/kpp.h
index f517ba6d3a27..1a97e1601422 100644
--- a/include/crypto/kpp.h
+++ b/include/crypto/kpp.h
@@ -268,42 +268,6 @@ struct kpp_secret {
unsigned short len;
};
-static inline void crypto_stat_kpp_set_secret(struct crypto_kpp *tfm, int ret)
-{
-#ifdef CONFIG_CRYPTO_STATS
- if (ret)
- atomic_inc(&tfm->base.__crt_alg->kpp_err_cnt);
- else
- atomic_inc(&tfm->base.__crt_alg->setsecret_cnt);
-#endif
-}
-
-static inline void crypto_stat_kpp_generate_public_key(struct kpp_request *req,
- int ret)
-{
-#ifdef CONFIG_CRYPTO_STATS
- struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
-
- if (ret)
- atomic_inc(&tfm->base.__crt_alg->kpp_err_cnt);
- else
- atomic_inc(&tfm->base.__crt_alg->generate_public_key_cnt);
-#endif
-}
-
-static inline void crypto_stat_kpp_compute_shared_secret(struct kpp_request *req,
- int ret)
-{
-#ifdef CONFIG_CRYPTO_STATS
- struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
-
- if (ret)
- atomic_inc(&tfm->base.__crt_alg->kpp_err_cnt);
- else
- atomic_inc(&tfm->base.__crt_alg->compute_shared_secret_cnt);
-#endif
-}
-
/**
* crypto_kpp_set_secret() - Invoke kpp operation
*
@@ -323,10 +287,12 @@ static inline int crypto_kpp_set_secret(struct crypto_kpp *tfm,
const void *buffer, unsigned int len)
{
struct kpp_alg *alg = crypto_kpp_alg(tfm);
+ struct crypto_alg *calg = tfm->base.__crt_alg;
int ret;
+ crypto_stats_get(calg);
ret = alg->set_secret(tfm, buffer, len);
- crypto_stat_kpp_set_secret(tfm, ret);
+ crypto_stats_kpp_set_secret(calg, ret);
return ret;
}
@@ -347,10 +313,12 @@ static inline int crypto_kpp_generate_public_key(struct kpp_request *req)
{
struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
struct kpp_alg *alg = crypto_kpp_alg(tfm);
+ struct crypto_alg *calg = tfm->base.__crt_alg;
int ret;
+ crypto_stats_get(calg);
ret = alg->generate_public_key(req);
- crypto_stat_kpp_generate_public_key(req, ret);
+ crypto_stats_kpp_generate_public_key(calg, ret);
return ret;
}
@@ -368,10 +336,12 @@ static inline int crypto_kpp_compute_shared_secret(struct kpp_request *req)
{
struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
struct kpp_alg *alg = crypto_kpp_alg(tfm);
+ struct crypto_alg *calg = tfm->base.__crt_alg;
int ret;
+ crypto_stats_get(calg);
ret = alg->compute_shared_secret(req);
- crypto_stat_kpp_compute_shared_secret(req, ret);
+ crypto_stats_kpp_compute_shared_secret(calg, ret);
return ret;
}
diff --git a/include/crypto/nhpoly1305.h b/include/crypto/nhpoly1305.h
new file mode 100644
index 000000000000..53c04423c582
--- /dev/null
+++ b/include/crypto/nhpoly1305.h
@@ -0,0 +1,74 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Common values and helper functions for the NHPoly1305 hash function.
+ */
+
+#ifndef _NHPOLY1305_H
+#define _NHPOLY1305_H
+
+#include <crypto/hash.h>
+#include <crypto/poly1305.h>
+
+/* NH parameterization: */
+
+/* Endianness: little */
+/* Word size: 32 bits (works well on NEON, SSE2, AVX2) */
+
+/* Stride: 2 words (optimal on ARM32 NEON; works okay on other CPUs too) */
+#define NH_PAIR_STRIDE 2
+#define NH_MESSAGE_UNIT (NH_PAIR_STRIDE * 2 * sizeof(u32))
+
+/* Num passes (Toeplitz iteration count): 4, to give ε = 2^{-128} */
+#define NH_NUM_PASSES 4
+#define NH_HASH_BYTES (NH_NUM_PASSES * sizeof(u64))
+
+/* Max message size: 1024 bytes (32x compression factor) */
+#define NH_NUM_STRIDES 64
+#define NH_MESSAGE_WORDS (NH_PAIR_STRIDE * 2 * NH_NUM_STRIDES)
+#define NH_MESSAGE_BYTES (NH_MESSAGE_WORDS * sizeof(u32))
+#define NH_KEY_WORDS (NH_MESSAGE_WORDS + \
+ NH_PAIR_STRIDE * 2 * (NH_NUM_PASSES - 1))
+#define NH_KEY_BYTES (NH_KEY_WORDS * sizeof(u32))
+
+#define NHPOLY1305_KEY_SIZE (POLY1305_BLOCK_SIZE + NH_KEY_BYTES)
+
+struct nhpoly1305_key {
+ struct poly1305_key poly_key;
+ u32 nh_key[NH_KEY_WORDS];
+};
+
+struct nhpoly1305_state {
+
+ /* Running total of polynomial evaluation */
+ struct poly1305_state poly_state;
+
+ /* Partial block buffer */
+ u8 buffer[NH_MESSAGE_UNIT];
+ unsigned int buflen;
+
+ /*
+ * Number of bytes remaining until the current NH message reaches
+ * NH_MESSAGE_BYTES. When nonzero, 'nh_hash' holds the partial NH hash.
+ */
+ unsigned int nh_remaining;
+
+ __le64 nh_hash[NH_NUM_PASSES];
+};
+
+typedef void (*nh_t)(const u32 *key, const u8 *message, size_t message_len,
+ __le64 hash[NH_NUM_PASSES]);
+
+int crypto_nhpoly1305_setkey(struct crypto_shash *tfm,
+ const u8 *key, unsigned int keylen);
+
+int crypto_nhpoly1305_init(struct shash_desc *desc);
+int crypto_nhpoly1305_update(struct shash_desc *desc,
+ const u8 *src, unsigned int srclen);
+int crypto_nhpoly1305_update_helper(struct shash_desc *desc,
+ const u8 *src, unsigned int srclen,
+ nh_t nh_fn);
+int crypto_nhpoly1305_final(struct shash_desc *desc, u8 *dst);
+int crypto_nhpoly1305_final_helper(struct shash_desc *desc, u8 *dst,
+ nh_t nh_fn);
+
+#endif /* _NHPOLY1305_H */
diff --git a/include/crypto/poly1305.h b/include/crypto/poly1305.h
index f718a19da82f..34317ed2071e 100644
--- a/include/crypto/poly1305.h
+++ b/include/crypto/poly1305.h
@@ -13,13 +13,21 @@
#define POLY1305_KEY_SIZE 32
#define POLY1305_DIGEST_SIZE 16
+struct poly1305_key {
+ u32 r[5]; /* key, base 2^26 */
+};
+
+struct poly1305_state {
+ u32 h[5]; /* accumulator, base 2^26 */
+};
+
struct poly1305_desc_ctx {
/* key */
- u32 r[5];
+ struct poly1305_key r;
/* finalize key */
u32 s[4];
/* accumulator */
- u32 h[5];
+ struct poly1305_state h;
/* partial buffer */
u8 buf[POLY1305_BLOCK_SIZE];
/* bytes used in partial buffer */
@@ -30,6 +38,22 @@ struct poly1305_desc_ctx {
bool sset;
};
+/*
+ * Poly1305 core functions. These implement the ε-almost-∆-universal hash
+ * function underlying the Poly1305 MAC, i.e. they don't add an encrypted nonce
+ * ("s key") at the end. They also only support block-aligned inputs.
+ */
+void poly1305_core_setkey(struct poly1305_key *key, const u8 *raw_key);
+static inline void poly1305_core_init(struct poly1305_state *state)
+{
+ memset(state->h, 0, sizeof(state->h));
+}
+void poly1305_core_blocks(struct poly1305_state *state,
+ const struct poly1305_key *key,
+ const void *src, unsigned int nblocks);
+void poly1305_core_emit(const struct poly1305_state *state, void *dst);
+
+/* Crypto API helper functions for the Poly1305 MAC */
int crypto_poly1305_init(struct shash_desc *desc);
unsigned int crypto_poly1305_setdesckey(struct poly1305_desc_ctx *dctx,
const u8 *src, unsigned int srclen);
diff --git a/include/crypto/rng.h b/include/crypto/rng.h
index 6d258f5b68f1..022a1b896b47 100644
--- a/include/crypto/rng.h
+++ b/include/crypto/rng.h
@@ -122,29 +122,6 @@ static inline void crypto_free_rng(struct crypto_rng *tfm)
crypto_destroy_tfm(tfm, crypto_rng_tfm(tfm));
}
-static inline void crypto_stat_rng_seed(struct crypto_rng *tfm, int ret)
-{
-#ifdef CONFIG_CRYPTO_STATS
- if (ret && ret != -EINPROGRESS && ret != -EBUSY)
- atomic_inc(&tfm->base.__crt_alg->rng_err_cnt);
- else
- atomic_inc(&tfm->base.__crt_alg->seed_cnt);
-#endif
-}
-
-static inline void crypto_stat_rng_generate(struct crypto_rng *tfm,
- unsigned int dlen, int ret)
-{
-#ifdef CONFIG_CRYPTO_STATS
- if (ret && ret != -EINPROGRESS && ret != -EBUSY) {
- atomic_inc(&tfm->base.__crt_alg->rng_err_cnt);
- } else {
- atomic_inc(&tfm->base.__crt_alg->generate_cnt);
- atomic64_add(dlen, &tfm->base.__crt_alg->generate_tlen);
- }
-#endif
-}
-
/**
* crypto_rng_generate() - get random number
* @tfm: cipher handle
@@ -163,10 +140,12 @@ static inline int crypto_rng_generate(struct crypto_rng *tfm,
const u8 *src, unsigned int slen,
u8 *dst, unsigned int dlen)
{
+ struct crypto_alg *alg = tfm->base.__crt_alg;
int ret;
+ crypto_stats_get(alg);
ret = crypto_rng_alg(tfm)->generate(tfm, src, slen, dst, dlen);
- crypto_stat_rng_generate(tfm, dlen, ret);
+ crypto_stats_rng_generate(alg, dlen, ret);
return ret;
}
diff --git a/include/crypto/skcipher.h b/include/crypto/skcipher.h
index 925f547cdcfa..e555294ed77f 100644
--- a/include/crypto/skcipher.h
+++ b/include/crypto/skcipher.h
@@ -39,19 +39,6 @@ struct skcipher_request {
void *__ctx[] CRYPTO_MINALIGN_ATTR;
};
-/**
- * struct skcipher_givcrypt_request - Crypto request with IV generation
- * @seq: Sequence number for IV generation
- * @giv: Space for generated IV
- * @creq: The crypto request itself
- */
-struct skcipher_givcrypt_request {
- u64 seq;
- u8 *giv;
-
- struct ablkcipher_request creq;
-};
-
struct crypto_skcipher {
int (*setkey)(struct crypto_skcipher *tfm, const u8 *key,
unsigned int keylen);
@@ -486,32 +473,6 @@ static inline struct crypto_sync_skcipher *crypto_sync_skcipher_reqtfm(
return container_of(tfm, struct crypto_sync_skcipher, base);
}
-static inline void crypto_stat_skcipher_encrypt(struct skcipher_request *req,
- int ret, struct crypto_alg *alg)
-{
-#ifdef CONFIG_CRYPTO_STATS
- if (ret && ret != -EINPROGRESS && ret != -EBUSY) {
- atomic_inc(&alg->cipher_err_cnt);
- } else {
- atomic_inc(&alg->encrypt_cnt);
- atomic64_add(req->cryptlen, &alg->encrypt_tlen);
- }
-#endif
-}
-
-static inline void crypto_stat_skcipher_decrypt(struct skcipher_request *req,
- int ret, struct crypto_alg *alg)
-{
-#ifdef CONFIG_CRYPTO_STATS
- if (ret && ret != -EINPROGRESS && ret != -EBUSY) {
- atomic_inc(&alg->cipher_err_cnt);
- } else {
- atomic_inc(&alg->decrypt_cnt);
- atomic64_add(req->cryptlen, &alg->decrypt_tlen);
- }
-#endif
-}
-
/**
* crypto_skcipher_encrypt() - encrypt plaintext
* @req: reference to the skcipher_request handle that holds all information
@@ -526,13 +487,16 @@ static inline void crypto_stat_skcipher_decrypt(struct skcipher_request *req,
static inline int crypto_skcipher_encrypt(struct skcipher_request *req)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct crypto_alg *alg = tfm->base.__crt_alg;
+ unsigned int cryptlen = req->cryptlen;
int ret;
+ crypto_stats_get(alg);
if (crypto_skcipher_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
ret = -ENOKEY;
else
ret = tfm->encrypt(req);
- crypto_stat_skcipher_encrypt(req, ret, tfm->base.__crt_alg);
+ crypto_stats_skcipher_encrypt(cryptlen, ret, alg);
return ret;
}
@@ -550,13 +514,16 @@ static inline int crypto_skcipher_encrypt(struct skcipher_request *req)
static inline int crypto_skcipher_decrypt(struct skcipher_request *req)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct crypto_alg *alg = tfm->base.__crt_alg;
+ unsigned int cryptlen = req->cryptlen;
int ret;
+ crypto_stats_get(alg);
if (crypto_skcipher_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
ret = -ENOKEY;
else
ret = tfm->decrypt(req);
- crypto_stat_skcipher_decrypt(req, ret, tfm->base.__crt_alg);
+ crypto_stats_skcipher_decrypt(cryptlen, ret, alg);
return ret;
}
diff --git a/include/crypto/streebog.h b/include/crypto/streebog.h
new file mode 100644
index 000000000000..4af119f7e07b
--- /dev/null
+++ b/include/crypto/streebog.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: GPL-2.0+ OR BSD-2-Clause */
+/*
+ * Copyright (c) 2013 Alexey Degtyarev <alexey@renatasystems.org>
+ * Copyright (c) 2018 Vitaly Chikunov <vt@altlinux.org>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ */
+
+#ifndef _CRYPTO_STREEBOG_H_
+#define _CRYPTO_STREEBOG_H_
+
+#include <linux/types.h>
+
+#define STREEBOG256_DIGEST_SIZE 32
+#define STREEBOG512_DIGEST_SIZE 64
+#define STREEBOG_BLOCK_SIZE 64
+
+struct streebog_uint512 {
+ u64 qword[8];
+};
+
+struct streebog_state {
+ u8 buffer[STREEBOG_BLOCK_SIZE];
+ struct streebog_uint512 hash;
+ struct streebog_uint512 h;
+ struct streebog_uint512 N;
+ struct streebog_uint512 Sigma;
+ size_t fillsize;
+};
+
+#endif /* !_CRYPTO_STREEBOG_H_ */
diff --git a/include/drm/bridge/dw_hdmi.h b/include/drm/bridge/dw_hdmi.h
index ccb5aa8468e0..9c56412bb2cf 100644
--- a/include/drm/bridge/dw_hdmi.h
+++ b/include/drm/bridge/dw_hdmi.h
@@ -133,6 +133,7 @@ struct dw_hdmi_plat_data {
const struct dw_hdmi_phy_ops *phy_ops;
const char *phy_name;
void *phy_data;
+ unsigned int phy_force_vendor;
/* Synopsys PHY support */
const struct dw_hdmi_mpll_config *mpll_cfg;
diff --git a/include/drm/bridge/dw_mipi_dsi.h b/include/drm/bridge/dw_mipi_dsi.h
index d9c6d549f971..48a671e782ca 100644
--- a/include/drm/bridge/dw_mipi_dsi.h
+++ b/include/drm/bridge/dw_mipi_dsi.h
@@ -19,6 +19,13 @@ struct dw_mipi_dsi_phy_ops {
unsigned int *lane_mbps);
};
+struct dw_mipi_dsi_host_ops {
+ int (*attach)(void *priv_data,
+ struct mipi_dsi_device *dsi);
+ int (*detach)(void *priv_data,
+ struct mipi_dsi_device *dsi);
+};
+
struct dw_mipi_dsi_plat_data {
void __iomem *base;
unsigned int max_data_lanes;
@@ -27,6 +34,7 @@ struct dw_mipi_dsi_plat_data {
const struct drm_display_mode *mode);
const struct dw_mipi_dsi_phy_ops *phy_ops;
+ const struct dw_mipi_dsi_host_ops *host_ops;
void *priv_data;
};
@@ -35,10 +43,8 @@ struct dw_mipi_dsi *dw_mipi_dsi_probe(struct platform_device *pdev,
const struct dw_mipi_dsi_plat_data
*plat_data);
void dw_mipi_dsi_remove(struct dw_mipi_dsi *dsi);
-struct dw_mipi_dsi *dw_mipi_dsi_bind(struct platform_device *pdev,
- struct drm_encoder *encoder,
- const struct dw_mipi_dsi_plat_data
- *plat_data);
+int dw_mipi_dsi_bind(struct dw_mipi_dsi *dsi, struct drm_encoder *encoder);
void dw_mipi_dsi_unbind(struct dw_mipi_dsi *dsi);
+void dw_mipi_dsi_set_slave(struct dw_mipi_dsi *dsi, struct dw_mipi_dsi *slave);
#endif /* __DW_MIPI_DSI__ */
diff --git a/include/drm/drmP.h b/include/drm/drmP.h
index 05350424a4d3..bdb0d5548f39 100644
--- a/include/drm/drmP.h
+++ b/include/drm/drmP.h
@@ -68,7 +68,6 @@
#include <drm/drm_agpsupport.h>
#include <drm/drm_crtc.h>
#include <drm/drm_fourcc.h>
-#include <drm/drm_global.h>
#include <drm/drm_hashtab.h>
#include <drm/drm_mm.h>
#include <drm/drm_os_linux.h>
@@ -110,4 +109,10 @@ static inline bool drm_can_sleep(void)
return true;
}
+#if defined(CONFIG_DRM_DEBUG_SELFTEST_MODULE)
+#define EXPORT_SYMBOL_FOR_TESTS_ONLY(x) EXPORT_SYMBOL(x)
+#else
+#define EXPORT_SYMBOL_FOR_TESTS_ONLY(x)
+#endif
+
#endif
diff --git a/include/drm/drm_atomic.h b/include/drm/drm_atomic.h
index 1e810e0b7664..f9b35834c45d 100644
--- a/include/drm/drm_atomic.h
+++ b/include/drm/drm_atomic.h
@@ -265,7 +265,6 @@ struct __drm_private_objs_state {
* struct drm_atomic_state - the global state object for atomic updates
* @ref: count of all references to this state (will not be freed until zero)
* @dev: parent DRM device
- * @allow_modeset: allow full modeset
* @legacy_cursor_update: hint to enforce legacy cursor IOCTL semantics
* @async_update: hint for asynchronous plane update
* @planes: pointer to array of structures with per-plane data
@@ -284,6 +283,15 @@ struct drm_atomic_state {
struct kref ref;
struct drm_device *dev;
+
+ /**
+ * @allow_modeset:
+ *
+ * Allow full modeset. This is used by the ATOMIC IOCTL handler to
+ * implement the DRM_MODE_ATOMIC_ALLOW_MODESET flag. Drivers should
+ * never consult this flag, instead looking at the output of
+ * drm_atomic_crtc_needs_modeset().
+ */
bool allow_modeset : 1;
bool legacy_cursor_update : 1;
bool async_update : 1;
diff --git a/include/drm/drm_atomic_helper.h b/include/drm/drm_atomic_helper.h
index 657af7b39379..58214be3bf3d 100644
--- a/include/drm/drm_atomic_helper.h
+++ b/include/drm/drm_atomic_helper.h
@@ -31,6 +31,7 @@
#include <drm/drm_crtc.h>
#include <drm/drm_modeset_helper_vtables.h>
#include <drm/drm_modeset_helper.h>
+#include <drm/drm_atomic_state_helper.h>
#include <drm/drm_util.h>
struct drm_atomic_state;
@@ -126,6 +127,9 @@ int __drm_atomic_helper_set_config(struct drm_mode_set *set,
int drm_atomic_helper_disable_all(struct drm_device *dev,
struct drm_modeset_acquire_ctx *ctx);
void drm_atomic_helper_shutdown(struct drm_device *dev);
+struct drm_atomic_state *
+drm_atomic_helper_duplicate_state(struct drm_device *dev,
+ struct drm_modeset_acquire_ctx *ctx);
struct drm_atomic_state *drm_atomic_helper_suspend(struct drm_device *dev);
int drm_atomic_helper_commit_duplicated_state(struct drm_atomic_state *state,
struct drm_modeset_acquire_ctx *ctx);
@@ -144,51 +148,10 @@ int drm_atomic_helper_page_flip_target(
uint32_t flags,
uint32_t target,
struct drm_modeset_acquire_ctx *ctx);
-struct drm_encoder *
-drm_atomic_helper_best_encoder(struct drm_connector *connector);
-
-/* default implementations for state handling */
-void drm_atomic_helper_crtc_reset(struct drm_crtc *crtc);
-void __drm_atomic_helper_crtc_duplicate_state(struct drm_crtc *crtc,
- struct drm_crtc_state *state);
-struct drm_crtc_state *
-drm_atomic_helper_crtc_duplicate_state(struct drm_crtc *crtc);
-void __drm_atomic_helper_crtc_destroy_state(struct drm_crtc_state *state);
-void drm_atomic_helper_crtc_destroy_state(struct drm_crtc *crtc,
- struct drm_crtc_state *state);
-
-void __drm_atomic_helper_plane_reset(struct drm_plane *plane,
- struct drm_plane_state *state);
-void drm_atomic_helper_plane_reset(struct drm_plane *plane);
-void __drm_atomic_helper_plane_duplicate_state(struct drm_plane *plane,
- struct drm_plane_state *state);
-struct drm_plane_state *
-drm_atomic_helper_plane_duplicate_state(struct drm_plane *plane);
-void __drm_atomic_helper_plane_destroy_state(struct drm_plane_state *state);
-void drm_atomic_helper_plane_destroy_state(struct drm_plane *plane,
- struct drm_plane_state *state);
-
-void __drm_atomic_helper_connector_reset(struct drm_connector *connector,
- struct drm_connector_state *conn_state);
-void drm_atomic_helper_connector_reset(struct drm_connector *connector);
-void
-__drm_atomic_helper_connector_duplicate_state(struct drm_connector *connector,
- struct drm_connector_state *state);
-struct drm_connector_state *
-drm_atomic_helper_connector_duplicate_state(struct drm_connector *connector);
-struct drm_atomic_state *
-drm_atomic_helper_duplicate_state(struct drm_device *dev,
- struct drm_modeset_acquire_ctx *ctx);
-void
-__drm_atomic_helper_connector_destroy_state(struct drm_connector_state *state);
-void drm_atomic_helper_connector_destroy_state(struct drm_connector *connector,
- struct drm_connector_state *state);
int drm_atomic_helper_legacy_gamma_set(struct drm_crtc *crtc,
u16 *red, u16 *green, u16 *blue,
uint32_t size,
struct drm_modeset_acquire_ctx *ctx);
-void __drm_atomic_helper_private_obj_duplicate_state(struct drm_private_obj *obj,
- struct drm_private_state *state);
/**
* drm_atomic_crtc_for_each_plane - iterate over planes currently attached to CRTC
diff --git a/include/drm/drm_atomic_state_helper.h b/include/drm/drm_atomic_state_helper.h
new file mode 100644
index 000000000000..66c92cbd8e16
--- /dev/null
+++ b/include/drm/drm_atomic_state_helper.h
@@ -0,0 +1,73 @@
+/*
+ * Copyright (C) 2018 Intel Corp.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Rob Clark <robdclark@gmail.com>
+ * Daniel Vetter <daniel.vetter@ffwll.ch>
+ */
+
+#include <linux/types.h>
+
+struct drm_crtc;
+struct drm_crtc_state;
+struct drm_plane;
+struct drm_plane_state;
+struct drm_connector;
+struct drm_connector_state;
+struct drm_private_obj;
+struct drm_private_state;
+struct drm_modeset_acquire_ctx;
+struct drm_device;
+
+void drm_atomic_helper_crtc_reset(struct drm_crtc *crtc);
+void __drm_atomic_helper_crtc_duplicate_state(struct drm_crtc *crtc,
+ struct drm_crtc_state *state);
+struct drm_crtc_state *
+drm_atomic_helper_crtc_duplicate_state(struct drm_crtc *crtc);
+void __drm_atomic_helper_crtc_destroy_state(struct drm_crtc_state *state);
+void drm_atomic_helper_crtc_destroy_state(struct drm_crtc *crtc,
+ struct drm_crtc_state *state);
+
+void __drm_atomic_helper_plane_reset(struct drm_plane *plane,
+ struct drm_plane_state *state);
+void drm_atomic_helper_plane_reset(struct drm_plane *plane);
+void __drm_atomic_helper_plane_duplicate_state(struct drm_plane *plane,
+ struct drm_plane_state *state);
+struct drm_plane_state *
+drm_atomic_helper_plane_duplicate_state(struct drm_plane *plane);
+void __drm_atomic_helper_plane_destroy_state(struct drm_plane_state *state);
+void drm_atomic_helper_plane_destroy_state(struct drm_plane *plane,
+ struct drm_plane_state *state);
+
+void __drm_atomic_helper_connector_reset(struct drm_connector *connector,
+ struct drm_connector_state *conn_state);
+void drm_atomic_helper_connector_reset(struct drm_connector *connector);
+void
+__drm_atomic_helper_connector_duplicate_state(struct drm_connector *connector,
+ struct drm_connector_state *state);
+struct drm_connector_state *
+drm_atomic_helper_connector_duplicate_state(struct drm_connector *connector);
+void
+__drm_atomic_helper_connector_destroy_state(struct drm_connector_state *state);
+void drm_atomic_helper_connector_destroy_state(struct drm_connector *connector,
+ struct drm_connector_state *state);
+void __drm_atomic_helper_private_obj_duplicate_state(struct drm_private_obj *obj,
+ struct drm_private_state *state);
diff --git a/include/drm/drm_connector.h b/include/drm/drm_connector.h
index 9ccad6b062f2..9be2181b3ed7 100644
--- a/include/drm/drm_connector.h
+++ b/include/drm/drm_connector.h
@@ -508,6 +508,18 @@ struct drm_connector_state {
* drm_writeback_signal_completion()
*/
struct drm_writeback_job *writeback_job;
+
+ /**
+ * @max_requested_bpc: Connector property to limit the maximum bit
+ * depth of the pixels.
+ */
+ u8 max_requested_bpc;
+
+ /**
+ * @max_bpc: Connector max_bpc based on the requested max_bpc property
+ * and the connector bpc limitations obtained from edid.
+ */
+ u8 max_bpc;
};
/**
@@ -960,6 +972,17 @@ struct drm_connector {
struct drm_property *scaling_mode_property;
/**
+ * @vrr_capable_property: Optional property to help userspace
+ * query hardware support for variable refresh rate on a connector.
+ * connector. Drivers can add the property to a connector by
+ * calling drm_connector_attach_vrr_capable_property().
+ *
+ * This should be updated only by calling
+ * drm_connector_set_vrr_capable_property().
+ */
+ struct drm_property *vrr_capable_property;
+
+ /**
* @content_protection_property: DRM ENUM property for content
* protection. See drm_connector_attach_content_protection_property().
*/
@@ -973,6 +996,12 @@ struct drm_connector {
*/
struct drm_property_blob *path_blob_ptr;
+ /**
+ * @max_bpc_property: Default connector property for the max bpc to be
+ * driven out of the connector.
+ */
+ struct drm_property *max_bpc_property;
+
#define DRM_CONNECTOR_POLL_HPD (1 << 0)
#define DRM_CONNECTOR_POLL_CONNECT (1 << 1)
#define DRM_CONNECTOR_POLL_DISCONNECT (1 << 2)
@@ -1133,6 +1162,7 @@ int drm_connector_init(struct drm_device *dev,
struct drm_connector *connector,
const struct drm_connector_funcs *funcs,
int connector_type);
+void drm_connector_attach_edid_property(struct drm_connector *connector);
int drm_connector_register(struct drm_connector *connector);
void drm_connector_unregister(struct drm_connector *connector);
int drm_connector_attach_encoder(struct drm_connector *connector,
@@ -1192,30 +1222,6 @@ static inline void drm_connector_put(struct drm_connector *connector)
}
/**
- * drm_connector_reference - acquire a connector reference
- * @connector: DRM connector
- *
- * This is a compatibility alias for drm_connector_get() and should not be
- * used by new code.
- */
-static inline void drm_connector_reference(struct drm_connector *connector)
-{
- drm_connector_get(connector);
-}
-
-/**
- * drm_connector_unreference - release a connector reference
- * @connector: DRM connector
- *
- * This is a compatibility alias for drm_connector_put() and should not be
- * used by new code.
- */
-static inline void drm_connector_unreference(struct drm_connector *connector)
-{
- drm_connector_put(connector);
-}
-
-/**
* drm_connector_is_unregistered - has the connector been unregistered from
* userspace?
* @connector: DRM connector
@@ -1250,6 +1256,8 @@ int drm_mode_create_scaling_mode_property(struct drm_device *dev);
int drm_connector_attach_content_type_property(struct drm_connector *dev);
int drm_connector_attach_scaling_mode_property(struct drm_connector *connector,
u32 scaling_mode_mask);
+int drm_connector_attach_vrr_capable_property(
+ struct drm_connector *connector);
int drm_connector_attach_content_protection_property(
struct drm_connector *connector);
int drm_mode_create_aspect_ratio_property(struct drm_device *dev);
@@ -1266,8 +1274,12 @@ int drm_connector_update_edid_property(struct drm_connector *connector,
const struct edid *edid);
void drm_connector_set_link_status_property(struct drm_connector *connector,
uint64_t link_status);
+void drm_connector_set_vrr_capable_property(
+ struct drm_connector *connector, bool capable);
int drm_connector_init_panel_orientation_property(
struct drm_connector *connector, int width, int height);
+int drm_connector_attach_max_bpc_property(struct drm_connector *connector,
+ int min, int max);
/**
* struct drm_tile_group - Tile group metadata
diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h
index b21437bc95bf..39c3900aab3c 100644
--- a/include/drm/drm_crtc.h
+++ b/include/drm/drm_crtc.h
@@ -291,6 +291,15 @@ struct drm_crtc_state {
u32 pageflip_flags;
/**
+ * @vrr_enabled:
+ *
+ * Indicates if variable refresh rate should be enabled for the CRTC.
+ * Support for the requested vrr state will depend on driver and
+ * hardware capabiltiy - lacking support is not treated as failure.
+ */
+ bool vrr_enabled;
+
+ /**
* @event:
*
* Optional pointer to a DRM event to signal upon completion of the
diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h
index 6914633037a5..d65f034843ce 100644
--- a/include/drm/drm_crtc_helper.h
+++ b/include/drm/drm_crtc_helper.h
@@ -57,12 +57,6 @@ int drm_helper_connector_dpms(struct drm_connector *connector, int mode);
void drm_helper_resume_force_mode(struct drm_device *dev);
-int drm_helper_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode, int x, int y,
- struct drm_framebuffer *old_fb);
-int drm_helper_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
- struct drm_framebuffer *old_fb);
-
/* drm_probe_helper.c */
int drm_helper_probe_single_connector_modes(struct drm_connector
*connector, uint32_t maxX,
diff --git a/include/drm/drm_damage_helper.h b/include/drm/drm_damage_helper.h
new file mode 100644
index 000000000000..4487660b26b8
--- /dev/null
+++ b/include/drm/drm_damage_helper.h
@@ -0,0 +1,99 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/**************************************************************************
+ *
+ * Copyright (c) 2018 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Deepak Rawat <drawat@vmware.com>
+ *
+ **************************************************************************/
+
+#ifndef DRM_DAMAGE_HELPER_H_
+#define DRM_DAMAGE_HELPER_H_
+
+#include <drm/drm_atomic_helper.h>
+
+/**
+ * drm_atomic_for_each_plane_damage - Iterator macro for plane damage.
+ * @iter: The iterator to advance.
+ * @rect: Return a rectangle in fb coordinate clipped to plane src.
+ *
+ * Note that if the first call to iterator macro return false then no need to do
+ * plane update. Iterator will return full plane src when damage is not passed
+ * by user-space.
+ */
+#define drm_atomic_for_each_plane_damage(iter, rect) \
+ while (drm_atomic_helper_damage_iter_next(iter, rect))
+
+/**
+ * struct drm_atomic_helper_damage_iter - Closure structure for damage iterator.
+ *
+ * This structure tracks state needed to walk the list of plane damage clips.
+ */
+struct drm_atomic_helper_damage_iter {
+ /* private: Plane src in whole number. */
+ struct drm_rect plane_src;
+ /* private: Rectangles in plane damage blob. */
+ const struct drm_rect *clips;
+ /* private: Number of rectangles in plane damage blob. */
+ uint32_t num_clips;
+ /* private: Current clip iterator is advancing on. */
+ uint32_t curr_clip;
+ /* private: Whether need full plane update. */
+ bool full_update;
+};
+
+void drm_plane_enable_fb_damage_clips(struct drm_plane *plane);
+void drm_atomic_helper_check_plane_damage(struct drm_atomic_state *state,
+ struct drm_plane_state *plane_state);
+int drm_atomic_helper_dirtyfb(struct drm_framebuffer *fb,
+ struct drm_file *file_priv, unsigned int flags,
+ unsigned int color, struct drm_clip_rect *clips,
+ unsigned int num_clips);
+void
+drm_atomic_helper_damage_iter_init(struct drm_atomic_helper_damage_iter *iter,
+ const struct drm_plane_state *old_state,
+ const struct drm_plane_state *new_state);
+bool
+drm_atomic_helper_damage_iter_next(struct drm_atomic_helper_damage_iter *iter,
+ struct drm_rect *rect);
+
+/**
+ * drm_helper_get_plane_damage_clips - Returns damage clips in &drm_rect.
+ * @state: Plane state.
+ *
+ * Returns plane damage rectangles in internal &drm_rect. Currently &drm_rect
+ * can be obtained by simply typecasting &drm_mode_rect. This is because both
+ * are signed 32 and during drm_atomic_check_only() it is verified that damage
+ * clips are inside fb.
+ *
+ * Return: Clips in plane fb_damage_clips blob property.
+ */
+static inline struct drm_rect *
+drm_helper_get_plane_damage_clips(const struct drm_plane_state *state)
+{
+ return (struct drm_rect *)drm_plane_get_damage_clips(state);
+}
+
+#endif
diff --git a/include/drm/drm_dp_helper.h b/include/drm/drm_dp_helper.h
index 2a3843f248cf..5736c942c85b 100644
--- a/include/drm/drm_dp_helper.h
+++ b/include/drm/drm_dp_helper.h
@@ -231,6 +231,8 @@
#define DP_DSC_MAX_BITS_PER_PIXEL_LOW 0x067 /* eDP 1.4 */
#define DP_DSC_MAX_BITS_PER_PIXEL_HI 0x068 /* eDP 1.4 */
+# define DP_DSC_MAX_BITS_PER_PIXEL_HI_MASK (0x3 << 0)
+# define DP_DSC_MAX_BITS_PER_PIXEL_HI_SHIFT 8
#define DP_DSC_DEC_COLOR_FORMAT_CAP 0x069
# define DP_DSC_RGB (1 << 0)
@@ -279,6 +281,8 @@
# define DP_DSC_THROUGHPUT_MODE_1_1000 (14 << 4)
#define DP_DSC_MAX_SLICE_WIDTH 0x06C
+#define DP_DSC_MIN_SLICE_WIDTH_VALUE 2560
+#define DP_DSC_SLICE_WIDTH_MULTIPLIER 320
#define DP_DSC_SLICE_CAP_2 0x06D
# define DP_DSC_16_PER_DP_DSC_SINK (1 << 0)
@@ -477,6 +481,7 @@
# define DP_AUX_FRAME_SYNC_VALID (1 << 0)
#define DP_DSC_ENABLE 0x160 /* DP 1.4 */
+# define DP_DECOMPRESSION_EN (1 << 0)
#define DP_PSR_EN_CFG 0x170 /* XXX 1.2? */
# define DP_PSR_ENABLE (1 << 0)
@@ -685,6 +690,8 @@
# define DP_EDP_12 0x01
# define DP_EDP_13 0x02
# define DP_EDP_14 0x03
+# define DP_EDP_14a 0x04 /* eDP 1.4a */
+# define DP_EDP_14b 0x05 /* eDP 1.4b */
#define DP_EDP_GENERAL_CAP_1 0x701
# define DP_EDP_TCON_BACKLIGHT_ADJUSTMENT_CAP (1 << 0)
@@ -905,6 +912,57 @@
#define DP_AUX_HDCP_KSV_FIFO 0x6802C
#define DP_AUX_HDCP_AINFO 0x6803B
+/* DP HDCP2.2 parameter offsets in DPCD address space */
+#define DP_HDCP_2_2_REG_RTX_OFFSET 0x69000
+#define DP_HDCP_2_2_REG_TXCAPS_OFFSET 0x69008
+#define DP_HDCP_2_2_REG_CERT_RX_OFFSET 0x6900B
+#define DP_HDCP_2_2_REG_RRX_OFFSET 0x69215
+#define DP_HDCP_2_2_REG_RX_CAPS_OFFSET 0x6921D
+#define DP_HDCP_2_2_REG_EKPUB_KM_OFFSET 0x69220
+#define DP_HDCP_2_2_REG_EKH_KM_WR_OFFSET 0x692A0
+#define DP_HDCP_2_2_REG_M_OFFSET 0x692B0
+#define DP_HDCP_2_2_REG_HPRIME_OFFSET 0x692C0
+#define DP_HDCP_2_2_REG_EKH_KM_RD_OFFSET 0x692E0
+#define DP_HDCP_2_2_REG_RN_OFFSET 0x692F0
+#define DP_HDCP_2_2_REG_LPRIME_OFFSET 0x692F8
+#define DP_HDCP_2_2_REG_EDKEY_KS_OFFSET 0x69318
+#define DP_HDCP_2_2_REG_RIV_OFFSET 0x69328
+#define DP_HDCP_2_2_REG_RXINFO_OFFSET 0x69330
+#define DP_HDCP_2_2_REG_SEQ_NUM_V_OFFSET 0x69332
+#define DP_HDCP_2_2_REG_VPRIME_OFFSET 0x69335
+#define DP_HDCP_2_2_REG_RECV_ID_LIST_OFFSET 0x69345
+#define DP_HDCP_2_2_REG_V_OFFSET 0x693E0
+#define DP_HDCP_2_2_REG_SEQ_NUM_M_OFFSET 0x693F0
+#define DP_HDCP_2_2_REG_K_OFFSET 0x693F3
+#define DP_HDCP_2_2_REG_STREAM_ID_TYPE_OFFSET 0x693F5
+#define DP_HDCP_2_2_REG_MPRIME_OFFSET 0x69473
+#define DP_HDCP_2_2_REG_RXSTATUS_OFFSET 0x69493
+#define DP_HDCP_2_2_REG_STREAM_TYPE_OFFSET 0x69494
+#define DP_HDCP_2_2_REG_DBG_OFFSET 0x69518
+
+/* DP HDCP message start offsets in DPCD address space */
+#define DP_HDCP_2_2_AKE_INIT_OFFSET DP_HDCP_2_2_REG_RTX_OFFSET
+#define DP_HDCP_2_2_AKE_SEND_CERT_OFFSET DP_HDCP_2_2_REG_CERT_RX_OFFSET
+#define DP_HDCP_2_2_AKE_NO_STORED_KM_OFFSET DP_HDCP_2_2_REG_EKPUB_KM_OFFSET
+#define DP_HDCP_2_2_AKE_STORED_KM_OFFSET DP_HDCP_2_2_REG_EKH_KM_WR_OFFSET
+#define DP_HDCP_2_2_AKE_SEND_HPRIME_OFFSET DP_HDCP_2_2_REG_HPRIME_OFFSET
+#define DP_HDCP_2_2_AKE_SEND_PAIRING_INFO_OFFSET \
+ DP_HDCP_2_2_REG_EKH_KM_RD_OFFSET
+#define DP_HDCP_2_2_LC_INIT_OFFSET DP_HDCP_2_2_REG_RN_OFFSET
+#define DP_HDCP_2_2_LC_SEND_LPRIME_OFFSET DP_HDCP_2_2_REG_LPRIME_OFFSET
+#define DP_HDCP_2_2_SKE_SEND_EKS_OFFSET DP_HDCP_2_2_REG_EDKEY_KS_OFFSET
+#define DP_HDCP_2_2_REP_SEND_RECVID_LIST_OFFSET DP_HDCP_2_2_REG_RXINFO_OFFSET
+#define DP_HDCP_2_2_REP_SEND_ACK_OFFSET DP_HDCP_2_2_REG_V_OFFSET
+#define DP_HDCP_2_2_REP_STREAM_MANAGE_OFFSET DP_HDCP_2_2_REG_SEQ_NUM_M_OFFSET
+#define DP_HDCP_2_2_REP_STREAM_READY_OFFSET DP_HDCP_2_2_REG_MPRIME_OFFSET
+
+#define HDCP_2_2_DP_RXSTATUS_LEN 1
+#define HDCP_2_2_DP_RXSTATUS_READY(x) ((x) & BIT(0))
+#define HDCP_2_2_DP_RXSTATUS_H_PRIME(x) ((x) & BIT(1))
+#define HDCP_2_2_DP_RXSTATUS_PAIRING(x) ((x) & BIT(2))
+#define HDCP_2_2_DP_RXSTATUS_REAUTH_REQ(x) ((x) & BIT(3))
+#define HDCP_2_2_DP_RXSTATUS_LINK_FAILED(x) ((x) & BIT(4))
+
/* DP 1.2 Sideband message defines */
/* peer device type - DP 1.2a Table 2-92 */
#define DP_PEER_DEVICE_NONE 0x0
@@ -963,6 +1021,7 @@ u8 drm_dp_get_adjust_request_pre_emphasis(const u8 link_status[DP_LINK_STATUS_SI
#define DP_BRANCH_OUI_HEADER_SIZE 0xc
#define DP_RECEIVER_CAP_SIZE 0xf
+#define DP_DSC_RECEIVER_CAP_SIZE 0xf
#define EDP_PSR_RECEIVER_CAP_SIZE 2
#define EDP_DISPLAY_CTL_CAP_SIZE 3
@@ -993,6 +1052,7 @@ struct dp_sdp_header {
#define EDP_SDP_HEADER_REVISION_MASK 0x1F
#define EDP_SDP_HEADER_VALID_PAYLOAD_BYTES 0x1F
+#define DP_SDP_PPS_HEADER_PAYLOAD_BYTES_MINUS_1 0x7F
struct edp_vsc_psr {
struct dp_sdp_header sdp_header;
@@ -1059,6 +1119,44 @@ drm_dp_is_branch(const u8 dpcd[DP_RECEIVER_CAP_SIZE])
return dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT;
}
+/* DP/eDP DSC support */
+u8 drm_dp_dsc_sink_max_slice_count(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE],
+ bool is_edp);
+u8 drm_dp_dsc_sink_line_buf_depth(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE]);
+int drm_dp_dsc_sink_supported_input_bpcs(const u8 dsc_dpc[DP_DSC_RECEIVER_CAP_SIZE],
+ u8 dsc_bpc[3]);
+
+static inline bool
+drm_dp_sink_supports_dsc(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE])
+{
+ return dsc_dpcd[DP_DSC_SUPPORT - DP_DSC_SUPPORT] &
+ DP_DSC_DECOMPRESSION_IS_SUPPORTED;
+}
+
+static inline u16
+drm_edp_dsc_sink_output_bpp(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE])
+{
+ return dsc_dpcd[DP_DSC_MAX_BITS_PER_PIXEL_LOW - DP_DSC_SUPPORT] |
+ (dsc_dpcd[DP_DSC_MAX_BITS_PER_PIXEL_HI - DP_DSC_SUPPORT] &
+ DP_DSC_MAX_BITS_PER_PIXEL_HI_MASK <<
+ DP_DSC_MAX_BITS_PER_PIXEL_HI_SHIFT);
+}
+
+static inline u32
+drm_dp_dsc_sink_max_slice_width(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE])
+{
+ /* Max Slicewidth = Number of Pixels * 320 */
+ return dsc_dpcd[DP_DSC_MAX_SLICE_WIDTH - DP_DSC_SUPPORT] *
+ DP_DSC_SLICE_WIDTH_MULTIPLIER;
+}
+
+/* Forward Error Correction Support on DP 1.4 */
+static inline bool
+drm_dp_sink_supports_fec(const u8 fec_capable)
+{
+ return fec_capable & DP_FEC_CAPABLE;
+}
+
/*
* DisplayPort AUX channel
*/
diff --git a/include/drm/drm_dp_mst_helper.h b/include/drm/drm_dp_mst_helper.h
index 7f78d26a0766..59f005b419cf 100644
--- a/include/drm/drm_dp_mst_helper.h
+++ b/include/drm/drm_dp_mst_helper.h
@@ -409,7 +409,6 @@ struct drm_dp_payload {
struct drm_dp_mst_topology_state {
struct drm_private_state base;
int avail_slots;
- struct drm_atomic_state *state;
struct drm_dp_mst_topology_mgr *mgr;
};
@@ -498,11 +497,6 @@ struct drm_dp_mst_topology_mgr {
int pbn_div;
/**
- * @state: State information for topology manager
- */
- struct drm_dp_mst_topology_state *state;
-
- /**
* @funcs: Atomic helper callbacks
*/
const struct drm_private_state_funcs *funcs;
diff --git a/include/drm/drm_drv.h b/include/drm/drm_drv.h
index 3199ef70c007..35af23f5fa0d 100644
--- a/include/drm/drm_drv.h
+++ b/include/drm/drm_drv.h
@@ -471,6 +471,8 @@ struct drm_driver {
* @gem_prime_export:
*
* export GEM -> dmabuf
+ *
+ * This defaults to drm_gem_prime_export() if not set.
*/
struct dma_buf * (*gem_prime_export)(struct drm_device *dev,
struct drm_gem_object *obj, int flags);
@@ -478,6 +480,8 @@ struct drm_driver {
* @gem_prime_import:
*
* import dmabuf -> GEM
+ *
+ * This defaults to drm_gem_prime_import() if not set.
*/
struct drm_gem_object * (*gem_prime_import)(struct drm_device *dev,
struct dma_buf *dma_buf);
@@ -523,8 +527,10 @@ struct drm_driver {
* @dumb_map_offset:
*
* Allocate an offset in the drm device node's address space to be able to
- * memory map a dumb buffer. GEM-based drivers must use
- * drm_gem_create_mmap_offset() to implement this.
+ * memory map a dumb buffer.
+ *
+ * The default implementation is drm_gem_create_mmap_offset(). GEM based
+ * drivers must not overwrite this.
*
* Called by the user via ioctl.
*
@@ -544,6 +550,9 @@ struct drm_driver {
*
* Called by the user via ioctl.
*
+ * The default implementation is drm_gem_dumb_destroy(). GEM based drivers
+ * must not overwrite this.
+ *
* Returns:
*
* Zero on success, negative errno on failure.
@@ -621,7 +630,6 @@ void drm_dev_unregister(struct drm_device *dev);
void drm_dev_get(struct drm_device *dev);
void drm_dev_put(struct drm_device *dev);
-void drm_dev_unref(struct drm_device *dev);
void drm_put_dev(struct drm_device *dev);
bool drm_dev_enter(struct drm_device *dev, int *idx);
void drm_dev_exit(int idx);
diff --git a/include/drm/drm_dsc.h b/include/drm/drm_dsc.h
new file mode 100644
index 000000000000..d03f1b83421a
--- /dev/null
+++ b/include/drm/drm_dsc.h
@@ -0,0 +1,485 @@
+/* SPDX-License-Identifier: MIT
+ * Copyright (C) 2018 Intel Corp.
+ *
+ * Authors:
+ * Manasi Navare <manasi.d.navare@intel.com>
+ */
+
+#ifndef DRM_DSC_H_
+#define DRM_DSC_H_
+
+#include <drm/drm_dp_helper.h>
+
+/* VESA Display Stream Compression DSC 1.2 constants */
+#define DSC_NUM_BUF_RANGES 15
+#define DSC_MUX_WORD_SIZE_8_10_BPC 48
+#define DSC_MUX_WORD_SIZE_12_BPC 64
+#define DSC_RC_PIXELS_PER_GROUP 3
+#define DSC_SCALE_DECREMENT_INTERVAL_MAX 4095
+#define DSC_RANGE_BPG_OFFSET_MASK 0x3f
+
+/* DSC Rate Control Constants */
+#define DSC_RC_MODEL_SIZE_CONST 8192
+#define DSC_RC_EDGE_FACTOR_CONST 6
+#define DSC_RC_TGT_OFFSET_HI_CONST 3
+#define DSC_RC_TGT_OFFSET_LO_CONST 3
+
+/* DSC PPS constants and macros */
+#define DSC_PPS_VERSION_MAJOR_SHIFT 4
+#define DSC_PPS_BPC_SHIFT 4
+#define DSC_PPS_MSB_SHIFT 8
+#define DSC_PPS_LSB_MASK (0xFF << 0)
+#define DSC_PPS_BPP_HIGH_MASK (0x3 << 8)
+#define DSC_PPS_VBR_EN_SHIFT 2
+#define DSC_PPS_SIMPLE422_SHIFT 3
+#define DSC_PPS_CONVERT_RGB_SHIFT 4
+#define DSC_PPS_BLOCK_PRED_EN_SHIFT 5
+#define DSC_PPS_INIT_XMIT_DELAY_HIGH_MASK (0x3 << 8)
+#define DSC_PPS_SCALE_DEC_INT_HIGH_MASK (0xF << 8)
+#define DSC_PPS_RC_TGT_OFFSET_HI_SHIFT 4
+#define DSC_PPS_RC_RANGE_MINQP_SHIFT 11
+#define DSC_PPS_RC_RANGE_MAXQP_SHIFT 6
+#define DSC_PPS_NATIVE_420_SHIFT 1
+#define DSC_1_2_MAX_LINEBUF_DEPTH_BITS 16
+#define DSC_1_2_MAX_LINEBUF_DEPTH_VAL 0
+#define DSC_1_1_MAX_LINEBUF_DEPTH_BITS 13
+
+/* Configuration for a single Rate Control model range */
+struct drm_dsc_rc_range_parameters {
+ /* Min Quantization Parameters allowed for this range */
+ u8 range_min_qp;
+ /* Max Quantization Parameters allowed for this range */
+ u8 range_max_qp;
+ /* Bits/group offset to apply to target for this group */
+ u8 range_bpg_offset;
+};
+
+struct drm_dsc_config {
+ /* Bits / component for previous reconstructed line buffer */
+ u8 line_buf_depth;
+ /* Bits per component to code (must be 8, 10, or 12) */
+ u8 bits_per_component;
+ /*
+ * Flag indicating to do RGB - YCoCg conversion
+ * and back (should be 1 for RGB input)
+ */
+ bool convert_rgb;
+ u8 slice_count;
+ /* Slice Width */
+ u16 slice_width;
+ /* Slice Height */
+ u16 slice_height;
+ /*
+ * 4:2:2 enable mode (from PPS, 4:2:2 conversion happens
+ * outside of DSC encode/decode algorithm)
+ */
+ bool enable422;
+ /* Picture Width */
+ u16 pic_width;
+ /* Picture Height */
+ u16 pic_height;
+ /* Offset to bits/group used by RC to determine QP adjustment */
+ u8 rc_tgt_offset_high;
+ /* Offset to bits/group used by RC to determine QP adjustment */
+ u8 rc_tgt_offset_low;
+ /* Bits/pixel target << 4 (ie., 4 fractional bits) */
+ u16 bits_per_pixel;
+ /*
+ * Factor to determine if an edge is present based
+ * on the bits produced
+ */
+ u8 rc_edge_factor;
+ /* Slow down incrementing once the range reaches this value */
+ u8 rc_quant_incr_limit1;
+ /* Slow down incrementing once the range reaches this value */
+ u8 rc_quant_incr_limit0;
+ /* Number of pixels to delay the initial transmission */
+ u16 initial_xmit_delay;
+ /* Number of pixels to delay the VLD on the decoder,not including SSM */
+ u16 initial_dec_delay;
+ /* Block prediction enable */
+ bool block_pred_enable;
+ /* Bits/group offset to use for first line of the slice */
+ u8 first_line_bpg_offset;
+ /* Value to use for RC model offset at slice start */
+ u16 initial_offset;
+ /* Thresholds defining each of the buffer ranges */
+ u16 rc_buf_thresh[DSC_NUM_BUF_RANGES - 1];
+ /* Parameters for each of the RC ranges */
+ struct drm_dsc_rc_range_parameters rc_range_params[DSC_NUM_BUF_RANGES];
+ /* Total size of RC model */
+ u16 rc_model_size;
+ /* Minimum QP where flatness information is sent */
+ u8 flatness_min_qp;
+ /* Maximum QP where flatness information is sent */
+ u8 flatness_max_qp;
+ /* Initial value for scale factor */
+ u8 initial_scale_value;
+ /* Decrement scale factor every scale_decrement_interval groups */
+ u16 scale_decrement_interval;
+ /* Increment scale factor every scale_increment_interval groups */
+ u16 scale_increment_interval;
+ /* Non-first line BPG offset to use */
+ u16 nfl_bpg_offset;
+ /* BPG offset used to enforce slice bit */
+ u16 slice_bpg_offset;
+ /* Final RC linear transformation offset value */
+ u16 final_offset;
+ /* Enable on-off VBR (ie., disable stuffing bits) */
+ bool vbr_enable;
+ /* Mux word size (in bits) for SSM mode */
+ u8 mux_word_size;
+ /*
+ * The (max) size in bytes of the "chunks" that are
+ * used in slice multiplexing
+ */
+ u16 slice_chunk_size;
+ /* Rate Control buffer siz in bits */
+ u16 rc_bits;
+ /* DSC Minor Version */
+ u8 dsc_version_minor;
+ /* DSC Major version */
+ u8 dsc_version_major;
+ /* Native 4:2:2 support */
+ bool native_422;
+ /* Native 4:2:0 support */
+ bool native_420;
+ /* Additional bits/grp for seconnd line of slice for native 4:2:0 */
+ u8 second_line_bpg_offset;
+ /* Num of bits deallocated for each grp that is not in second line of slice */
+ u16 nsl_bpg_offset;
+ /* Offset adj fr second line in Native 4:2:0 mode */
+ u16 second_line_offset_adj;
+};
+
+/**
+ * struct picture_parameter_set - Represents 128 bytes of Picture Parameter Set
+ *
+ * The VESA DSC standard defines picture parameter set (PPS) which display
+ * stream compression encoders must communicate to decoders.
+ * The PPS is encapsulated in 128 bytes (PPS 0 through PPS 127). The fields in
+ * this structure are as per Table 4.1 in Vesa DSC specification v1.1/v1.2.
+ * The PPS fields that span over more than a byte should be stored in Big Endian
+ * format.
+ */
+struct drm_dsc_picture_parameter_set {
+ /**
+ * @dsc_version:
+ * PPS0[3:0] - dsc_version_minor: Contains Minor version of DSC
+ * PPS0[7:4] - dsc_version_major: Contains major version of DSC
+ */
+ u8 dsc_version;
+ /**
+ * @pps_identifier:
+ * PPS1[7:0] - Application specific identifier that can be
+ * used to differentiate between different PPS tables.
+ */
+ u8 pps_identifier;
+ /**
+ * @pps_reserved:
+ * PPS2[7:0]- RESERVED Byte
+ */
+ u8 pps_reserved;
+ /**
+ * @pps_3:
+ * PPS3[3:0] - linebuf_depth: Contains linebuffer bit depth used to
+ * generate the bitstream. (0x0 - 16 bits for DSC 1.2, 0x8 - 8 bits,
+ * 0xA - 10 bits, 0xB - 11 bits, 0xC - 12 bits, 0xD - 13 bits,
+ * 0xE - 14 bits for DSC1.2, 0xF - 14 bits for DSC 1.2.
+ * PPS3[7:4] - bits_per_component: Bits per component for the original
+ * pixels of the encoded picture.
+ * 0x0 = 16bpc (allowed only when dsc_version_minor = 0x2)
+ * 0x8 = 8bpc, 0xA = 10bpc, 0xC = 12bpc, 0xE = 14bpc (also
+ * allowed only when dsc_minor_version = 0x2)
+ */
+ u8 pps_3;
+ /**
+ * @pps_4:
+ * PPS4[1:0] -These are the most significant 2 bits of
+ * compressed BPP bits_per_pixel[9:0] syntax element.
+ * PPS4[2] - vbr_enable: 0 = VBR disabled, 1 = VBR enabled
+ * PPS4[3] - simple_422: Indicates if decoder drops samples to
+ * reconstruct the 4:2:2 picture.
+ * PPS4[4] - Convert_rgb: Indicates if DSC color space conversion is
+ * active.
+ * PPS4[5] - blobk_pred_enable: Indicates if BP is used to code any
+ * groups in picture
+ * PPS4[7:6] - Reseved bits
+ */
+ u8 pps_4;
+ /**
+ * @bits_per_pixel_low:
+ * PPS5[7:0] - This indicates the lower significant 8 bits of
+ * the compressed BPP bits_per_pixel[9:0] element.
+ */
+ u8 bits_per_pixel_low;
+ /**
+ * @pic_height:
+ * PPS6[7:0], PPS7[7:0] -pic_height: Specifies the number of pixel rows
+ * within the raster.
+ */
+ __be16 pic_height;
+ /**
+ * @pic_width:
+ * PPS8[7:0], PPS9[7:0] - pic_width: Number of pixel columns within
+ * the raster.
+ */
+ __be16 pic_width;
+ /**
+ * @slice_height:
+ * PPS10[7:0], PPS11[7:0] - Slice height in units of pixels.
+ */
+ __be16 slice_height;
+ /**
+ * @slice_width:
+ * PPS12[7:0], PPS13[7:0] - Slice width in terms of pixels.
+ */
+ __be16 slice_width;
+ /**
+ * @chunk_size:
+ * PPS14[7:0], PPS15[7:0] - Size in units of bytes of the chunks
+ * that are used for slice multiplexing.
+ */
+ __be16 chunk_size;
+ /**
+ * @initial_xmit_delay_high:
+ * PPS16[1:0] - Most Significant two bits of initial transmission delay.
+ * It specifies the number of pixel times that the encoder waits before
+ * transmitting data from its rate buffer.
+ * PPS16[7:2] - Reserved
+ */
+ u8 initial_xmit_delay_high;
+ /**
+ * @initial_xmit_delay_low:
+ * PPS17[7:0] - Least significant 8 bits of initial transmission delay.
+ */
+ u8 initial_xmit_delay_low;
+ /**
+ * @initial_dec_delay:
+ *
+ * PPS18[7:0], PPS19[7:0] - Initial decoding delay which is the number
+ * of pixel times that the decoder accumulates data in its rate buffer
+ * before starting to decode and output pixels.
+ */
+ __be16 initial_dec_delay;
+ /**
+ * @pps20_reserved:
+ *
+ * PPS20[7:0] - Reserved
+ */
+ u8 pps20_reserved;
+ /**
+ * @initial_scale_value:
+ * PPS21[5:0] - Initial rcXformScale factor used at beginning
+ * of a slice.
+ * PPS21[7:6] - Reserved
+ */
+ u8 initial_scale_value;
+ /**
+ * @scale_increment_interval:
+ * PPS22[7:0], PPS23[7:0] - Number of group times between incrementing
+ * the rcXformScale factor at end of a slice.
+ */
+ __be16 scale_increment_interval;
+ /**
+ * @scale_decrement_interval_high:
+ * PPS24[3:0] - Higher 4 bits indicating number of group times between
+ * decrementing the rcXformScale factor at beginning of a slice.
+ * PPS24[7:4] - Reserved
+ */
+ u8 scale_decrement_interval_high;
+ /**
+ * @scale_decrement_interval_low:
+ * PPS25[7:0] - Lower 8 bits of scale decrement interval
+ */
+ u8 scale_decrement_interval_low;
+ /**
+ * @pps26_reserved:
+ * PPS26[7:0]
+ */
+ u8 pps26_reserved;
+ /**
+ * @first_line_bpg_offset:
+ * PPS27[4:0] - Number of additional bits that are allocated
+ * for each group on first line of a slice.
+ * PPS27[7:5] - Reserved
+ */
+ u8 first_line_bpg_offset;
+ /**
+ * @nfl_bpg_offset:
+ * PPS28[7:0], PPS29[7:0] - Number of bits including frac bits
+ * deallocated for each group for groups after the first line of slice.
+ */
+ __be16 nfl_bpg_offset;
+ /**
+ * @slice_bpg_offset:
+ * PPS30, PPS31[7:0] - Number of bits that are deallocated for each
+ * group to enforce the slice constraint.
+ */
+ __be16 slice_bpg_offset;
+ /**
+ * @initial_offset:
+ * PPS32,33[7:0] - Initial value for rcXformOffset
+ */
+ __be16 initial_offset;
+ /**
+ * @final_offset:
+ * PPS34,35[7:0] - Maximum end-of-slice value for rcXformOffset
+ */
+ __be16 final_offset;
+ /**
+ * @flatness_min_qp:
+ * PPS36[4:0] - Minimum QP at which flatness is signaled and
+ * flatness QP adjustment is made.
+ * PPS36[7:5] - Reserved
+ */
+ u8 flatness_min_qp;
+ /**
+ * @flatness_max_qp:
+ * PPS37[4:0] - Max QP at which flatness is signalled and
+ * the flatness adjustment is made.
+ * PPS37[7:5] - Reserved
+ */
+ u8 flatness_max_qp;
+ /**
+ * @rc_model_size:
+ * PPS38,39[7:0] - Number of bits within RC Model.
+ */
+ __be16 rc_model_size;
+ /**
+ * @rc_edge_factor:
+ * PPS40[3:0] - Ratio of current activity vs, previous
+ * activity to determine presence of edge.
+ * PPS40[7:4] - Reserved
+ */
+ u8 rc_edge_factor;
+ /**
+ * @rc_quant_incr_limit0:
+ * PPS41[4:0] - QP threshold used in short term RC
+ * PPS41[7:5] - Reserved
+ */
+ u8 rc_quant_incr_limit0;
+ /**
+ * @rc_quant_incr_limit1:
+ * PPS42[4:0] - QP threshold used in short term RC
+ * PPS42[7:5] - Reserved
+ */
+ u8 rc_quant_incr_limit1;
+ /**
+ * @rc_tgt_offset:
+ * PPS43[3:0] - Lower end of the variability range around the target
+ * bits per group that is allowed by short term RC.
+ * PPS43[7:4]- Upper end of the variability range around the target
+ * bits per group that i allowed by short term rc.
+ */
+ u8 rc_tgt_offset;
+ /**
+ * @rc_buf_thresh:
+ * PPS44[7:0] - PPS57[7:0] - Specifies the thresholds in RC model for
+ * the 15 ranges defined by 14 thresholds.
+ */
+ u8 rc_buf_thresh[DSC_NUM_BUF_RANGES - 1];
+ /**
+ * @rc_range_parameters:
+ * PPS58[7:0] - PPS87[7:0]
+ * Parameters that correspond to each of the 15 ranges.
+ */
+ __be16 rc_range_parameters[DSC_NUM_BUF_RANGES];
+ /**
+ * @native_422_420:
+ * PPS88[0] - 0 = Native 4:2:2 not used
+ * 1 = Native 4:2:2 used
+ * PPS88[1] - 0 = Native 4:2:0 not use
+ * 1 = Native 4:2:0 used
+ * PPS88[7:2] - Reserved 6 bits
+ */
+ u8 native_422_420;
+ /**
+ * @second_line_bpg_offset:
+ * PPS89[4:0] - Additional bits/group budget for the
+ * second line of a slice in Native 4:2:0 mode.
+ * Set to 0 if DSC minor version is 1 or native420 is 0.
+ * PPS89[7:5] - Reserved
+ */
+ u8 second_line_bpg_offset;
+ /**
+ * @nsl_bpg_offset:
+ * PPS90[7:0], PPS91[7:0] - Number of bits that are deallocated
+ * for each group that is not in the second line of a slice.
+ */
+ __be16 nsl_bpg_offset;
+ /**
+ * @second_line_offset_adj:
+ * PPS92[7:0], PPS93[7:0] - Used as offset adjustment for the second
+ * line in Native 4:2:0 mode.
+ */
+ __be16 second_line_offset_adj;
+ /**
+ * @pps_long_94_reserved:
+ * PPS 94, 95, 96, 97 - Reserved
+ */
+ u32 pps_long_94_reserved;
+ /**
+ * @pps_long_98_reserved:
+ * PPS 98, 99, 100, 101 - Reserved
+ */
+ u32 pps_long_98_reserved;
+ /**
+ * @pps_long_102_reserved:
+ * PPS 102, 103, 104, 105 - Reserved
+ */
+ u32 pps_long_102_reserved;
+ /**
+ * @pps_long_106_reserved:
+ * PPS 106, 107, 108, 109 - reserved
+ */
+ u32 pps_long_106_reserved;
+ /**
+ * @pps_long_110_reserved:
+ * PPS 110, 111, 112, 113 - reserved
+ */
+ u32 pps_long_110_reserved;
+ /**
+ * @pps_long_114_reserved:
+ * PPS 114 - 117 - reserved
+ */
+ u32 pps_long_114_reserved;
+ /**
+ * @pps_long_118_reserved:
+ * PPS 118 - 121 - reserved
+ */
+ u32 pps_long_118_reserved;
+ /**
+ * @pps_long_122_reserved:
+ * PPS 122- 125 - reserved
+ */
+ u32 pps_long_122_reserved;
+ /**
+ * @pps_short_126_reserved:
+ * PPS 126, 127 - reserved
+ */
+ __be16 pps_short_126_reserved;
+} __packed;
+
+/**
+ * struct drm_dsc_pps_infoframe - DSC infoframe carrying the Picture Parameter
+ * Set Metadata
+ *
+ * This structure represents the DSC PPS infoframe required to send the Picture
+ * Parameter Set metadata required before enabling VESA Display Stream
+ * Compression. This is based on the DP Secondary Data Packet structure and
+ * comprises of SDP Header as defined in drm_dp_helper.h and PPS payload.
+ *
+ * @pps_header: Header for PPS as per DP SDP header format
+ * @pps_payload: PPS payload fields as per DSC specification Table 4-1
+ */
+struct drm_dsc_pps_infoframe {
+ struct dp_sdp_header pps_header;
+ struct drm_dsc_picture_parameter_set pps_payload;
+} __packed;
+
+void drm_dsc_dp_pps_header_init(struct drm_dsc_pps_infoframe *pps_sdp);
+void drm_dsc_pps_infoframe_pack(struct drm_dsc_pps_infoframe *pps_sdp,
+ const struct drm_dsc_config *dsc_cfg);
+
+#endif /* _DRM_DSC_H_ */
diff --git a/include/drm/drm_fb_cma_helper.h b/include/drm/drm_fb_cma_helper.h
index 4a65f0d155b0..8dbbe1eece1b 100644
--- a/include/drm/drm_fb_cma_helper.h
+++ b/include/drm/drm_fb_cma_helper.h
@@ -26,8 +26,6 @@ void drm_fbdev_cma_fini(struct drm_fbdev_cma *fbdev_cma);
void drm_fbdev_cma_restore_mode(struct drm_fbdev_cma *fbdev_cma);
void drm_fbdev_cma_hotplug_event(struct drm_fbdev_cma *fbdev_cma);
-void drm_fbdev_cma_set_suspend_unlocked(struct drm_fbdev_cma *fbdev_cma,
- bool state);
struct drm_gem_cma_object *drm_fb_cma_get_gem_obj(struct drm_framebuffer *fb,
unsigned int plane);
diff --git a/include/drm/drm_file.h b/include/drm/drm_file.h
index 26485acc51d7..84ac79219e4c 100644
--- a/include/drm/drm_file.h
+++ b/include/drm/drm_file.h
@@ -164,14 +164,14 @@ struct drm_file {
* See also the :ref:`section on primary nodes and authentication
* <drm_primary_node>`.
*/
- unsigned authenticated :1;
+ bool authenticated;
/**
* @stereo_allowed:
*
* True when the client has asked us to expose stereo 3D mode flags.
*/
- unsigned stereo_allowed :1;
+ bool stereo_allowed;
/**
* @universal_planes:
@@ -179,10 +179,10 @@ struct drm_file {
* True if client understands CRTC primary planes and cursor planes
* in the plane list. Automatically set when @atomic is set.
*/
- unsigned universal_planes:1;
+ bool universal_planes;
/** @atomic: True if client understands atomic properties. */
- unsigned atomic:1;
+ bool atomic;
/**
* @aspect_ratio_allowed:
@@ -190,14 +190,14 @@ struct drm_file {
* True, if client can handle picture aspect ratios, and has requested
* to pass this information along with the mode.
*/
- unsigned aspect_ratio_allowed:1;
+ bool aspect_ratio_allowed;
/**
* @writeback_connectors:
*
* True if client understands writeback connectors
*/
- unsigned writeback_connectors:1;
+ bool writeback_connectors;
/**
* @is_master:
@@ -208,7 +208,7 @@ struct drm_file {
* See also the :ref:`section on primary nodes and authentication
* <drm_primary_node>`.
*/
- unsigned is_master:1;
+ bool is_master;
/**
* @master:
diff --git a/include/drm/drm_fourcc.h b/include/drm/drm_fourcc.h
index 865ef60c17af..bcb389f04618 100644
--- a/include/drm/drm_fourcc.h
+++ b/include/drm/drm_fourcc.h
@@ -52,25 +52,86 @@ struct drm_mode_fb_cmd2;
/**
* struct drm_format_info - information about a DRM format
- * @format: 4CC format identifier (DRM_FORMAT_*)
- * @depth: Color depth (number of bits per pixel excluding padding bits),
- * valid for a subset of RGB formats only. This is a legacy field, do not
- * use in new code and set to 0 for new formats.
- * @num_planes: Number of color planes (1 to 3)
- * @cpp: Number of bytes per pixel (per plane)
- * @hsub: Horizontal chroma subsampling factor
- * @vsub: Vertical chroma subsampling factor
- * @has_alpha: Does the format embeds an alpha component?
- * @is_yuv: Is it a YUV format?
*/
struct drm_format_info {
+ /** @format: 4CC format identifier (DRM_FORMAT_*) */
u32 format;
+
+ /**
+ * @depth:
+ *
+ * Color depth (number of bits per pixel excluding padding bits),
+ * valid for a subset of RGB formats only. This is a legacy field, do
+ * not use in new code and set to 0 for new formats.
+ */
u8 depth;
+
+ /** @num_planes: Number of color planes (1 to 3) */
u8 num_planes;
- u8 cpp[3];
+
+ union {
+ /**
+ * @cpp:
+ *
+ * Number of bytes per pixel (per plane), this is aliased with
+ * @char_per_block. It is deprecated in favour of using the
+ * triplet @char_per_block, @block_w, @block_h for better
+ * describing the pixel format.
+ */
+ u8 cpp[3];
+
+ /**
+ * @char_per_block:
+ *
+ * Number of bytes per block (per plane), where blocks are
+ * defined as a rectangle of pixels which are stored next to
+ * each other in a byte aligned memory region. Together with
+ * @block_w and @block_h this is used to properly describe tiles
+ * in tiled formats or to describe groups of pixels in packed
+ * formats for which the memory needed for a single pixel is not
+ * byte aligned.
+ *
+ * @cpp has been kept for historical reasons because there are
+ * a lot of places in drivers where it's used. In drm core for
+ * generic code paths the preferred way is to use
+ * @char_per_block, drm_format_info_block_width() and
+ * drm_format_info_block_height() which allows handling both
+ * block and non-block formats in the same way.
+ *
+ * For formats that are intended to be used only with non-linear
+ * modifiers both @cpp and @char_per_block must be 0 in the
+ * generic format table. Drivers could supply accurate
+ * information from their drm_mode_config.get_format_info hook
+ * if they want the core to be validating the pitch.
+ */
+ u8 char_per_block[3];
+ };
+
+ /**
+ * @block_w:
+ *
+ * Block width in pixels, this is intended to be accessed through
+ * drm_format_info_block_width()
+ */
+ u8 block_w[3];
+
+ /**
+ * @block_h:
+ *
+ * Block height in pixels, this is intended to be accessed through
+ * drm_format_info_block_height()
+ */
+ u8 block_h[3];
+
+ /** @hsub: Horizontal chroma subsampling factor */
u8 hsub;
+ /** @vsub: Vertical chroma subsampling factor */
u8 vsub;
+
+ /** @has_alpha: Does the format embeds an alpha component? */
bool has_alpha;
+
+ /** @is_yuv: Is it a YUV format? */
bool is_yuv;
};
@@ -96,6 +157,12 @@ int drm_format_horz_chroma_subsampling(uint32_t format);
int drm_format_vert_chroma_subsampling(uint32_t format);
int drm_format_plane_width(int width, uint32_t format, int plane);
int drm_format_plane_height(int height, uint32_t format, int plane);
+unsigned int drm_format_info_block_width(const struct drm_format_info *info,
+ int plane);
+unsigned int drm_format_info_block_height(const struct drm_format_info *info,
+ int plane);
+uint64_t drm_format_info_min_pitch(const struct drm_format_info *info,
+ int plane, unsigned int buffer_width);
const char *drm_get_format_name(uint32_t format, struct drm_format_name_buf *buf);
#endif /* __DRM_FOURCC_H__ */
diff --git a/include/drm/drm_framebuffer.h b/include/drm/drm_framebuffer.h
index c50502c656e5..c94acedfb08e 100644
--- a/include/drm/drm_framebuffer.h
+++ b/include/drm/drm_framebuffer.h
@@ -241,30 +241,6 @@ static inline void drm_framebuffer_put(struct drm_framebuffer *fb)
}
/**
- * drm_framebuffer_reference - acquire a framebuffer reference
- * @fb: DRM framebuffer
- *
- * This is a compatibility alias for drm_framebuffer_get() and should not be
- * used by new code.
- */
-static inline void drm_framebuffer_reference(struct drm_framebuffer *fb)
-{
- drm_framebuffer_get(fb);
-}
-
-/**
- * drm_framebuffer_unreference - release a framebuffer reference
- * @fb: DRM framebuffer
- *
- * This is a compatibility alias for drm_framebuffer_put() and should not be
- * used by new code.
- */
-static inline void drm_framebuffer_unreference(struct drm_framebuffer *fb)
-{
- drm_framebuffer_put(fb);
-}
-
-/**
* drm_framebuffer_read_refcount - read the framebuffer reference count.
* @fb: framebuffer
*
diff --git a/include/drm/drm_gem.h b/include/drm/drm_gem.h
index 3583b98a1718..c95727425284 100644
--- a/include/drm/drm_gem.h
+++ b/include/drm/drm_gem.h
@@ -38,6 +38,121 @@
#include <drm/drm_vma_manager.h>
+struct drm_gem_object;
+
+/**
+ * struct drm_gem_object_funcs - GEM object functions
+ */
+struct drm_gem_object_funcs {
+ /**
+ * @free:
+ *
+ * Deconstructor for drm_gem_objects.
+ *
+ * This callback is mandatory.
+ */
+ void (*free)(struct drm_gem_object *obj);
+
+ /**
+ * @open:
+ *
+ * Called upon GEM handle creation.
+ *
+ * This callback is optional.
+ */
+ int (*open)(struct drm_gem_object *obj, struct drm_file *file);
+
+ /**
+ * @close:
+ *
+ * Called upon GEM handle release.
+ *
+ * This callback is optional.
+ */
+ void (*close)(struct drm_gem_object *obj, struct drm_file *file);
+
+ /**
+ * @print_info:
+ *
+ * If driver subclasses struct &drm_gem_object, it can implement this
+ * optional hook for printing additional driver specific info.
+ *
+ * drm_printf_indent() should be used in the callback passing it the
+ * indent argument.
+ *
+ * This callback is called from drm_gem_print_info().
+ *
+ * This callback is optional.
+ */
+ void (*print_info)(struct drm_printer *p, unsigned int indent,
+ const struct drm_gem_object *obj);
+
+ /**
+ * @export:
+ *
+ * Export backing buffer as a &dma_buf.
+ * If this is not set drm_gem_prime_export() is used.
+ *
+ * This callback is optional.
+ */
+ struct dma_buf *(*export)(struct drm_gem_object *obj, int flags);
+
+ /**
+ * @pin:
+ *
+ * Pin backing buffer in memory.
+ *
+ * This callback is optional.
+ */
+ int (*pin)(struct drm_gem_object *obj);
+
+ /**
+ * @unpin:
+ *
+ * Unpin backing buffer.
+ *
+ * This callback is optional.
+ */
+ void (*unpin)(struct drm_gem_object *obj);
+
+ /**
+ * @get_sg_table:
+ *
+ * Returns a Scatter-Gather table representation of the buffer.
+ * Used when exporting a buffer.
+ *
+ * This callback is mandatory if buffer export is supported.
+ */
+ struct sg_table *(*get_sg_table)(struct drm_gem_object *obj);
+
+ /**
+ * @vmap:
+ *
+ * Returns a virtual address for the buffer.
+ *
+ * This callback is optional.
+ */
+ void *(*vmap)(struct drm_gem_object *obj);
+
+ /**
+ * @vunmap:
+ *
+ * Releases the the address previously returned by @vmap.
+ *
+ * This callback is optional.
+ */
+ void (*vunmap)(struct drm_gem_object *obj, void *vaddr);
+
+ /**
+ * @vm_ops:
+ *
+ * Virtual memory operations used with mmap.
+ *
+ * This is optional but necessary for mmap support.
+ */
+ const struct vm_operations_struct *vm_ops;
+};
+
/**
* struct drm_gem_object - GEM buffer object
*
@@ -146,6 +261,17 @@ struct drm_gem_object {
* simply leave it as NULL.
*/
struct dma_buf_attachment *import_attach;
+
+ /**
+ * @funcs:
+ *
+ * Optional GEM object functions. If this is set, it will be used instead of the
+ * corresponding &drm_driver GEM callbacks.
+ *
+ * New drivers should use this.
+ *
+ */
+ const struct drm_gem_object_funcs *funcs;
};
/**
@@ -222,56 +348,6 @@ __drm_gem_object_put(struct drm_gem_object *obj)
void drm_gem_object_put_unlocked(struct drm_gem_object *obj);
void drm_gem_object_put(struct drm_gem_object *obj);
-/**
- * drm_gem_object_reference - acquire a GEM buffer object reference
- * @obj: GEM buffer object
- *
- * This is a compatibility alias for drm_gem_object_get() and should not be
- * used by new code.
- */
-static inline void drm_gem_object_reference(struct drm_gem_object *obj)
-{
- drm_gem_object_get(obj);
-}
-
-/**
- * __drm_gem_object_unreference - raw function to release a GEM buffer object
- * reference
- * @obj: GEM buffer object
- *
- * This is a compatibility alias for __drm_gem_object_put() and should not be
- * used by new code.
- */
-static inline void __drm_gem_object_unreference(struct drm_gem_object *obj)
-{
- __drm_gem_object_put(obj);
-}
-
-/**
- * drm_gem_object_unreference_unlocked - release a GEM buffer object reference
- * @obj: GEM buffer object
- *
- * This is a compatibility alias for drm_gem_object_put_unlocked() and should
- * not be used by new code.
- */
-static inline void
-drm_gem_object_unreference_unlocked(struct drm_gem_object *obj)
-{
- drm_gem_object_put_unlocked(obj);
-}
-
-/**
- * drm_gem_object_unreference - release a GEM buffer object reference
- * @obj: GEM buffer object
- *
- * This is a compatibility alias for drm_gem_object_put() and should not be
- * used by new code.
- */
-static inline void drm_gem_object_unreference(struct drm_gem_object *obj)
-{
- drm_gem_object_put(obj);
-}
-
int drm_gem_handle_create(struct drm_file *file_priv,
struct drm_gem_object *obj,
u32 *handlep);
@@ -293,4 +369,9 @@ int drm_gem_dumb_destroy(struct drm_file *file,
struct drm_device *dev,
uint32_t handle);
+int drm_gem_pin(struct drm_gem_object *obj);
+void drm_gem_unpin(struct drm_gem_object *obj);
+void *drm_gem_vmap(struct drm_gem_object *obj);
+void drm_gem_vunmap(struct drm_gem_object *obj, void *vaddr);
+
#endif /* __DRM_GEM_H__ */
diff --git a/include/drm/drm_gem_cma_helper.h b/include/drm/drm_gem_cma_helper.h
index 19777145cf8e..07c504940ba1 100644
--- a/include/drm/drm_gem_cma_helper.h
+++ b/include/drm/drm_gem_cma_helper.h
@@ -103,4 +103,28 @@ int drm_gem_cma_prime_mmap(struct drm_gem_object *obj,
void *drm_gem_cma_prime_vmap(struct drm_gem_object *obj);
void drm_gem_cma_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
+struct drm_gem_object *
+drm_cma_gem_create_object_default_funcs(struct drm_device *dev, size_t size);
+
+/**
+ * DRM_GEM_CMA_VMAP_DRIVER_OPS - CMA GEM driver operations ensuring a virtual
+ * address on the buffer
+ *
+ * This macro provides a shortcut for setting the default GEM operations in the
+ * &drm_driver structure for drivers that need the virtual address also on
+ * imported buffers.
+ */
+#define DRM_GEM_CMA_VMAP_DRIVER_OPS \
+ .gem_create_object = drm_cma_gem_create_object_default_funcs, \
+ .dumb_create = drm_gem_cma_dumb_create, \
+ .prime_handle_to_fd = drm_gem_prime_handle_to_fd, \
+ .prime_fd_to_handle = drm_gem_prime_fd_to_handle, \
+ .gem_prime_import_sg_table = drm_gem_cma_prime_import_sg_table_vmap, \
+ .gem_prime_mmap = drm_gem_prime_mmap
+
+struct drm_gem_object *
+drm_gem_cma_prime_import_sg_table_vmap(struct drm_device *drm,
+ struct dma_buf_attachment *attach,
+ struct sg_table *sgt);
+
#endif /* __DRM_GEM_CMA_HELPER_H__ */
diff --git a/include/drm/drm_global.h b/include/drm/drm_global.h
deleted file mode 100644
index 3a830602a2e4..000000000000
--- a/include/drm/drm_global.h
+++ /dev/null
@@ -1,53 +0,0 @@
-/**************************************************************************
- *
- * Copyright 2008-2009 VMware, Inc., Palo Alto, CA., USA
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
- * USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- **************************************************************************/
-/*
- * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
- */
-
-#ifndef _DRM_GLOBAL_H_
-#define _DRM_GLOBAL_H_
-enum drm_global_types {
- DRM_GLOBAL_TTM_MEM = 0,
- DRM_GLOBAL_TTM_BO,
- DRM_GLOBAL_TTM_OBJECT,
- DRM_GLOBAL_NUM
-};
-
-struct drm_global_reference {
- enum drm_global_types global_type;
- size_t size;
- void *object;
- int (*init) (struct drm_global_reference *);
- void (*release) (struct drm_global_reference *);
-};
-
-void drm_global_init(void);
-void drm_global_release(void);
-int drm_global_item_ref(struct drm_global_reference *ref);
-void drm_global_item_unref(struct drm_global_reference *ref);
-
-#endif
diff --git a/include/drm/drm_hdcp.h b/include/drm/drm_hdcp.h
index 98e63d870139..a6de09c5e47f 100644
--- a/include/drm/drm_hdcp.h
+++ b/include/drm/drm_hdcp.h
@@ -38,4 +38,216 @@
#define DRM_HDCP_DDC_BSTATUS 0x41
#define DRM_HDCP_DDC_KSV_FIFO 0x43
+#define DRM_HDCP_1_4_SRM_ID 0x8
+#define DRM_HDCP_1_4_VRL_LENGTH_SIZE 3
+#define DRM_HDCP_1_4_DCP_SIG_SIZE 40
+
+/* Protocol message definition for HDCP2.2 specification */
+/*
+ * Protected content streams are classified into 2 types:
+ * - Type0: Can be transmitted with HDCP 1.4+
+ * - Type1: Can be transmitted with HDCP 2.2+
+ */
+#define HDCP_STREAM_TYPE0 0x00
+#define HDCP_STREAM_TYPE1 0x01
+
+/* HDCP2.2 Msg IDs */
+#define HDCP_2_2_NULL_MSG 1
+#define HDCP_2_2_AKE_INIT 2
+#define HDCP_2_2_AKE_SEND_CERT 3
+#define HDCP_2_2_AKE_NO_STORED_KM 4
+#define HDCP_2_2_AKE_STORED_KM 5
+#define HDCP_2_2_AKE_SEND_HPRIME 7
+#define HDCP_2_2_AKE_SEND_PAIRING_INFO 8
+#define HDCP_2_2_LC_INIT 9
+#define HDCP_2_2_LC_SEND_LPRIME 10
+#define HDCP_2_2_SKE_SEND_EKS 11
+#define HDCP_2_2_REP_SEND_RECVID_LIST 12
+#define HDCP_2_2_REP_SEND_ACK 15
+#define HDCP_2_2_REP_STREAM_MANAGE 16
+#define HDCP_2_2_REP_STREAM_READY 17
+#define HDCP_2_2_ERRATA_DP_STREAM_TYPE 50
+
+#define HDCP_2_2_RTX_LEN 8
+#define HDCP_2_2_RRX_LEN 8
+
+#define HDCP_2_2_K_PUB_RX_MOD_N_LEN 128
+#define HDCP_2_2_K_PUB_RX_EXP_E_LEN 3
+#define HDCP_2_2_K_PUB_RX_LEN (HDCP_2_2_K_PUB_RX_MOD_N_LEN + \
+ HDCP_2_2_K_PUB_RX_EXP_E_LEN)
+
+#define HDCP_2_2_DCP_LLC_SIG_LEN 384
+
+#define HDCP_2_2_E_KPUB_KM_LEN 128
+#define HDCP_2_2_E_KH_KM_M_LEN (16 + 16)
+#define HDCP_2_2_H_PRIME_LEN 32
+#define HDCP_2_2_E_KH_KM_LEN 16
+#define HDCP_2_2_RN_LEN 8
+#define HDCP_2_2_L_PRIME_LEN 32
+#define HDCP_2_2_E_DKEY_KS_LEN 16
+#define HDCP_2_2_RIV_LEN 8
+#define HDCP_2_2_SEQ_NUM_LEN 3
+#define HDCP_2_2_V_PRIME_HALF_LEN (HDCP_2_2_L_PRIME_LEN / 2)
+#define HDCP_2_2_RECEIVER_ID_LEN DRM_HDCP_KSV_LEN
+#define HDCP_2_2_MAX_DEVICE_COUNT 31
+#define HDCP_2_2_RECEIVER_IDS_MAX_LEN (HDCP_2_2_RECEIVER_ID_LEN * \
+ HDCP_2_2_MAX_DEVICE_COUNT)
+#define HDCP_2_2_MPRIME_LEN 32
+
+/* Following Macros take a byte at a time for bit(s) masking */
+/*
+ * TODO: This has to be changed for DP MST, as multiple stream on
+ * same port is possible.
+ * For HDCP2.2 on HDMI and DP SST this value is always 1.
+ */
+#define HDCP_2_2_MAX_CONTENT_STREAMS_CNT 1
+#define HDCP_2_2_TXCAP_MASK_LEN 2
+#define HDCP_2_2_RXCAPS_LEN 3
+#define HDCP_2_2_RX_REPEATER(x) ((x) & BIT(0))
+#define HDCP_2_2_DP_HDCP_CAPABLE(x) ((x) & BIT(1))
+#define HDCP_2_2_RXINFO_LEN 2
+
+/* HDCP1.x compliant device in downstream */
+#define HDCP_2_2_HDCP1_DEVICE_CONNECTED(x) ((x) & BIT(0))
+
+/* HDCP2.0 Compliant repeater in downstream */
+#define HDCP_2_2_HDCP_2_0_REP_CONNECTED(x) ((x) & BIT(1))
+#define HDCP_2_2_MAX_CASCADE_EXCEEDED(x) ((x) & BIT(2))
+#define HDCP_2_2_MAX_DEVS_EXCEEDED(x) ((x) & BIT(3))
+#define HDCP_2_2_DEV_COUNT_LO(x) (((x) & (0xF << 4)) >> 4)
+#define HDCP_2_2_DEV_COUNT_HI(x) ((x) & BIT(0))
+#define HDCP_2_2_DEPTH(x) (((x) & (0x7 << 1)) >> 1)
+
+struct hdcp2_cert_rx {
+ u8 receiver_id[HDCP_2_2_RECEIVER_ID_LEN];
+ u8 kpub_rx[HDCP_2_2_K_PUB_RX_LEN];
+ u8 reserved[2];
+ u8 dcp_signature[HDCP_2_2_DCP_LLC_SIG_LEN];
+} __packed;
+
+struct hdcp2_streamid_type {
+ u8 stream_id;
+ u8 stream_type;
+} __packed;
+
+/*
+ * The TxCaps field specified in the HDCP HDMI, DP specs
+ * This field is big endian as specified in the errata.
+ */
+struct hdcp2_tx_caps {
+ /* Transmitter must set this to 0x2 */
+ u8 version;
+
+ /* Reserved for HDCP and DP Spec. Read as Zero */
+ u8 tx_cap_mask[HDCP_2_2_TXCAP_MASK_LEN];
+} __packed;
+
+/* Main structures for HDCP2.2 protocol communication */
+struct hdcp2_ake_init {
+ u8 msg_id;
+ u8 r_tx[HDCP_2_2_RTX_LEN];
+ struct hdcp2_tx_caps tx_caps;
+} __packed;
+
+struct hdcp2_ake_send_cert {
+ u8 msg_id;
+ struct hdcp2_cert_rx cert_rx;
+ u8 r_rx[HDCP_2_2_RRX_LEN];
+ u8 rx_caps[HDCP_2_2_RXCAPS_LEN];
+} __packed;
+
+struct hdcp2_ake_no_stored_km {
+ u8 msg_id;
+ u8 e_kpub_km[HDCP_2_2_E_KPUB_KM_LEN];
+} __packed;
+
+struct hdcp2_ake_stored_km {
+ u8 msg_id;
+ u8 e_kh_km_m[HDCP_2_2_E_KH_KM_M_LEN];
+} __packed;
+
+struct hdcp2_ake_send_hprime {
+ u8 msg_id;
+ u8 h_prime[HDCP_2_2_H_PRIME_LEN];
+} __packed;
+
+struct hdcp2_ake_send_pairing_info {
+ u8 msg_id;
+ u8 e_kh_km[HDCP_2_2_E_KH_KM_LEN];
+} __packed;
+
+struct hdcp2_lc_init {
+ u8 msg_id;
+ u8 r_n[HDCP_2_2_RN_LEN];
+} __packed;
+
+struct hdcp2_lc_send_lprime {
+ u8 msg_id;
+ u8 l_prime[HDCP_2_2_L_PRIME_LEN];
+} __packed;
+
+struct hdcp2_ske_send_eks {
+ u8 msg_id;
+ u8 e_dkey_ks[HDCP_2_2_E_DKEY_KS_LEN];
+ u8 riv[HDCP_2_2_RIV_LEN];
+} __packed;
+
+struct hdcp2_rep_send_receiverid_list {
+ u8 msg_id;
+ u8 rx_info[HDCP_2_2_RXINFO_LEN];
+ u8 seq_num_v[HDCP_2_2_SEQ_NUM_LEN];
+ u8 v_prime[HDCP_2_2_V_PRIME_HALF_LEN];
+ u8 receiver_ids[HDCP_2_2_RECEIVER_IDS_MAX_LEN];
+} __packed;
+
+struct hdcp2_rep_send_ack {
+ u8 msg_id;
+ u8 v[HDCP_2_2_V_PRIME_HALF_LEN];
+} __packed;
+
+struct hdcp2_rep_stream_manage {
+ u8 msg_id;
+ u8 seq_num_m[HDCP_2_2_SEQ_NUM_LEN];
+ __be16 k;
+ struct hdcp2_streamid_type streams[HDCP_2_2_MAX_CONTENT_STREAMS_CNT];
+} __packed;
+
+struct hdcp2_rep_stream_ready {
+ u8 msg_id;
+ u8 m_prime[HDCP_2_2_MPRIME_LEN];
+} __packed;
+
+struct hdcp2_dp_errata_stream_type {
+ u8 msg_id;
+ u8 stream_type;
+} __packed;
+
+/* HDCP2.2 TIMEOUTs in mSec */
+#define HDCP_2_2_CERT_TIMEOUT_MS 100
+#define HDCP_2_2_HPRIME_NO_PAIRED_TIMEOUT_MS 1000
+#define HDCP_2_2_HPRIME_PAIRED_TIMEOUT_MS 200
+#define HDCP_2_2_PAIRING_TIMEOUT_MS 200
+#define HDCP_2_2_HDMI_LPRIME_TIMEOUT_MS 20
+#define HDCP_2_2_DP_LPRIME_TIMEOUT_MS 7
+#define HDCP_2_2_RECVID_LIST_TIMEOUT_MS 3000
+#define HDCP_2_2_STREAM_READY_TIMEOUT_MS 100
+
+/* HDMI HDCP2.2 Register Offsets */
+#define HDCP_2_2_HDMI_REG_VER_OFFSET 0x50
+#define HDCP_2_2_HDMI_REG_WR_MSG_OFFSET 0x60
+#define HDCP_2_2_HDMI_REG_RXSTATUS_OFFSET 0x70
+#define HDCP_2_2_HDMI_REG_RD_MSG_OFFSET 0x80
+#define HDCP_2_2_HDMI_REG_DBG_OFFSET 0xC0
+
+#define HDCP_2_2_HDMI_SUPPORT_MASK BIT(2)
+#define HDCP_2_2_RX_CAPS_VERSION_VAL 0x02
+#define HDCP_2_2_SEQ_NUM_MAX 0xFFFFFF
+#define HDCP_2_2_DELAY_BEFORE_ENCRYPTION_EN 200
+
+/* Below macros take a byte at a time and mask the bit(s) */
+#define HDCP_2_2_HDMI_RXSTATUS_LEN 2
+#define HDCP_2_2_HDMI_RXSTATUS_MSG_SZ_HI(x) ((x) & 0x3)
+#define HDCP_2_2_HDMI_RXSTATUS_READY(x) ((x) & BIT(2))
+#define HDCP_2_2_HDMI_RXSTATUS_REAUTH_REQ(x) ((x) & BIT(3))
+
#endif
diff --git a/include/drm/drm_mipi_dsi.h b/include/drm/drm_mipi_dsi.h
index 4fef19064b0f..491528f48cfb 100644
--- a/include/drm/drm_mipi_dsi.h
+++ b/include/drm/drm_mipi_dsi.h
@@ -168,6 +168,12 @@ struct mipi_dsi_device_info {
* @format: pixel format for video mode
* @lanes: number of active data lanes
* @mode_flags: DSI operation mode related flags
+ * @hs_rate: maximum lane frequency for high speed mode in hertz, this should
+ * be set to the real limits of the hardware, zero is only accepted for
+ * legacy drivers
+ * @lp_rate: maximum lane frequency for low power mode in hertz, this should
+ * be set to the real limits of the hardware, zero is only accepted for
+ * legacy drivers
*/
struct mipi_dsi_device {
struct mipi_dsi_host *host;
@@ -178,6 +184,8 @@ struct mipi_dsi_device {
unsigned int lanes;
enum mipi_dsi_pixel_format format;
unsigned long mode_flags;
+ unsigned long hs_rate;
+ unsigned long lp_rate;
};
#define MIPI_DSI_MODULE_PREFIX "mipi-dsi:"
diff --git a/include/drm/drm_mode_config.h b/include/drm/drm_mode_config.h
index 928e4172a0bb..572274ccbec7 100644
--- a/include/drm/drm_mode_config.h
+++ b/include/drm/drm_mode_config.h
@@ -52,6 +52,12 @@ struct drm_mode_config_funcs {
* requested metadata, but most of that is left to the driver. See
* &struct drm_mode_fb_cmd2 for details.
*
+ * To validate the pixel format and modifier drivers can use
+ * drm_any_plane_has_format() to make sure at least one plane supports
+ * the requested values. Note that the driver must first determine the
+ * actual modifier used if the request doesn't have it specified,
+ * ie. when (@mode_cmd->flags & DRM_MODE_FB_MODIFIERS) == 0.
+ *
* If the parameters are deemed valid and the backing storage objects in
* the underlying memory manager all exist, then the driver allocates
* a new &drm_framebuffer structure, subclassed to contain
@@ -628,6 +634,15 @@ struct drm_mode_config {
*/
struct drm_property *prop_crtc_id;
/**
+ * @prop_fb_damage_clips: Optional plane property to mark damaged
+ * regions on the plane in framebuffer coordinates of the framebuffer
+ * attached to the plane.
+ *
+ * The layout of blob data is simply an array of &drm_mode_rect. Unlike
+ * plane src coordinates, damage clips are not in 16.16 fixed point.
+ */
+ struct drm_property *prop_fb_damage_clips;
+ /**
* @prop_active: Default atomic CRTC property to control the active
* state, which is the simplified implementation for DPMS in atomic
* drivers.
@@ -639,6 +654,11 @@ struct drm_mode_config {
* connectors must be of and active must be set to disabled, too.
*/
struct drm_property *prop_mode_id;
+ /**
+ * @prop_vrr_enabled: Default atomic CRTC property to indicate
+ * whether variable refresh rate should be enabled on the CRTC.
+ */
+ struct drm_property *prop_vrr_enabled;
/**
* @dvi_i_subconnector_property: Optional DVI-I property to
@@ -809,6 +829,13 @@ struct drm_mode_config {
/* dumb ioctl parameters */
uint32_t preferred_depth, prefer_shadow;
+
+ /**
+ * @quirk_addfb_prefer_xbgr_30bpp:
+ *
+ * Special hack for legacy ADDFB to keep nouveau userspace happy. Should
+ * only ever be set by the nouveau kernel driver.
+ */
bool quirk_addfb_prefer_xbgr_30bpp;
/**
diff --git a/include/drm/drm_modeset_lock.h b/include/drm/drm_modeset_lock.h
index a685d1bb21f2..a308f2d6496f 100644
--- a/include/drm/drm_modeset_lock.h
+++ b/include/drm/drm_modeset_lock.h
@@ -130,4 +130,63 @@ void drm_warn_on_modeset_not_all_locked(struct drm_device *dev);
int drm_modeset_lock_all_ctx(struct drm_device *dev,
struct drm_modeset_acquire_ctx *ctx);
+/**
+ * DRM_MODESET_LOCK_ALL_BEGIN - Helper to acquire modeset locks
+ * @dev: drm device
+ * @ctx: local modeset acquire context, will be dereferenced
+ * @flags: DRM_MODESET_ACQUIRE_* flags to pass to drm_modeset_acquire_init()
+ * @ret: local ret/err/etc variable to track error status
+ *
+ * Use these macros to simplify grabbing all modeset locks using a local
+ * context. This has the advantage of reducing boilerplate, but also properly
+ * checking return values where appropriate.
+ *
+ * Any code run between BEGIN and END will be holding the modeset locks.
+ *
+ * This must be paired with DRM_MODESET_LOCK_ALL_END(). We will jump back and
+ * forth between the labels on deadlock and error conditions.
+ *
+ * Drivers can acquire additional modeset locks. If any lock acquisition
+ * fails, the control flow needs to jump to DRM_MODESET_LOCK_ALL_END() with
+ * the @ret parameter containing the return value of drm_modeset_lock().
+ *
+ * Returns:
+ * The only possible value of ret immediately after DRM_MODESET_LOCK_ALL_BEGIN()
+ * is 0, so no error checking is necessary
+ */
+#define DRM_MODESET_LOCK_ALL_BEGIN(dev, ctx, flags, ret) \
+ drm_modeset_acquire_init(&ctx, flags); \
+modeset_lock_retry: \
+ ret = drm_modeset_lock_all_ctx(dev, &ctx); \
+ if (ret) \
+ goto modeset_lock_fail;
+
+/**
+ * DRM_MODESET_LOCK_ALL_END - Helper to release and cleanup modeset locks
+ * @ctx: local modeset acquire context, will be dereferenced
+ * @ret: local ret/err/etc variable to track error status
+ *
+ * The other side of DRM_MODESET_LOCK_ALL_BEGIN(). It will bounce back to BEGIN
+ * if ret is -EDEADLK.
+ *
+ * It's important that you use the same ret variable for begin and end so
+ * deadlock conditions are properly handled.
+ *
+ * Returns:
+ * ret will be untouched unless it is -EDEADLK on entry. That means that if you
+ * successfully acquire the locks, ret will be whatever your code sets it to. If
+ * there is a deadlock or other failure with acquire or backoff, ret will be set
+ * to that failure. In both of these cases the code between BEGIN/END will not
+ * be run, so the failure will reflect the inability to grab the locks.
+ */
+#define DRM_MODESET_LOCK_ALL_END(ctx, ret) \
+modeset_lock_fail: \
+ if (ret == -EDEADLK) { \
+ ret = drm_modeset_backoff(&ctx); \
+ if (!ret) \
+ goto modeset_lock_retry; \
+ } \
+ drm_modeset_drop_locks(&ctx); \
+ drm_modeset_acquire_fini(&ctx);
+
#endif /* DRM_MODESET_LOCK_H_ */
diff --git a/include/drm/drm_plane.h b/include/drm/drm_plane.h
index 0a0834bef8bd..6078c700d9ba 100644
--- a/include/drm/drm_plane.h
+++ b/include/drm/drm_plane.h
@@ -173,6 +173,16 @@ struct drm_plane_state {
*/
enum drm_color_range color_range;
+ /**
+ * @fb_damage_clips:
+ *
+ * Blob representing damage (area in plane framebuffer that changed
+ * since last plane update) as an array of &drm_mode_rect in framebuffer
+ * coodinates of the attached framebuffer. Note that unlike plane src,
+ * damage clips are not in 16.16 fixed point.
+ */
+ struct drm_property_blob *fb_damage_clips;
+
/** @src: clipped source coordinates of the plane (in 16.16) */
/** @dst: clipped destination coordinates of the plane */
struct drm_rect src, dst;
@@ -798,5 +808,39 @@ static inline struct drm_plane *drm_plane_find(struct drm_device *dev,
#define drm_for_each_plane(plane, dev) \
list_for_each_entry(plane, &(dev)->mode_config.plane_list, head)
+bool drm_any_plane_has_format(struct drm_device *dev,
+ u32 format, u64 modifier);
+/**
+ * drm_plane_get_damage_clips_count - Returns damage clips count.
+ * @state: Plane state.
+ *
+ * Simple helper to get the number of &drm_mode_rect clips set by user-space
+ * during plane update.
+ *
+ * Return: Number of clips in plane fb_damage_clips blob property.
+ */
+static inline unsigned int
+drm_plane_get_damage_clips_count(const struct drm_plane_state *state)
+{
+ return (state && state->fb_damage_clips) ?
+ state->fb_damage_clips->length/sizeof(struct drm_mode_rect) : 0;
+}
+
+/**
+ * drm_plane_get_damage_clips - Returns damage clips.
+ * @state: Plane state.
+ *
+ * Note that this function returns uapi type &drm_mode_rect. Drivers might
+ * instead be interested in internal &drm_rect which can be obtained by calling
+ * drm_helper_get_plane_damage_clips().
+ *
+ * Return: Damage clips in plane fb_damage_clips blob property.
+ */
+static inline struct drm_mode_rect *
+drm_plane_get_damage_clips(const struct drm_plane_state *state)
+{
+ return (struct drm_mode_rect *)((state && state->fb_damage_clips) ?
+ state->fb_damage_clips->data : NULL);
+}
#endif
diff --git a/include/drm/drm_plane_helper.h b/include/drm/drm_plane_helper.h
index 26cee2934781..331ebd60b3a3 100644
--- a/include/drm/drm_plane_helper.h
+++ b/include/drm/drm_plane_helper.h
@@ -38,42 +38,7 @@
*/
#define DRM_PLANE_HELPER_NO_SCALING (1<<16)
-int drm_plane_helper_check_update(struct drm_plane *plane,
- struct drm_crtc *crtc,
- struct drm_framebuffer *fb,
- struct drm_rect *src,
- struct drm_rect *dest,
- unsigned int rotation,
- int min_scale,
- int max_scale,
- bool can_position,
- bool can_update_disabled,
- bool *visible);
-int drm_primary_helper_update(struct drm_plane *plane,
- struct drm_crtc *crtc,
- struct drm_framebuffer *fb,
- int crtc_x, int crtc_y,
- unsigned int crtc_w, unsigned int crtc_h,
- uint32_t src_x, uint32_t src_y,
- uint32_t src_w, uint32_t src_h,
- struct drm_modeset_acquire_ctx *ctx);
-int drm_primary_helper_disable(struct drm_plane *plane,
- struct drm_modeset_acquire_ctx *ctx);
void drm_primary_helper_destroy(struct drm_plane *plane);
extern const struct drm_plane_funcs drm_primary_helper_funcs;
-int drm_plane_helper_update(struct drm_plane *plane, struct drm_crtc *crtc,
- struct drm_framebuffer *fb,
- int crtc_x, int crtc_y,
- unsigned int crtc_w, unsigned int crtc_h,
- uint32_t src_x, uint32_t src_y,
- uint32_t src_w, uint32_t src_h,
- struct drm_modeset_acquire_ctx *ctx);
-int drm_plane_helper_disable(struct drm_plane *plane,
- struct drm_modeset_acquire_ctx *ctx);
-
-/* For use by drm_crtc_helper.c */
-int drm_plane_helper_commit(struct drm_plane *plane,
- struct drm_plane_state *plane_state,
- struct drm_framebuffer *old_fb);
#endif
diff --git a/include/drm/drm_prime.h b/include/drm/drm_prime.h
index d716d653b096..b03731a3f079 100644
--- a/include/drm/drm_prime.h
+++ b/include/drm/drm_prime.h
@@ -70,6 +70,7 @@ struct dma_buf *drm_gem_prime_export(struct drm_device *dev,
int drm_gem_prime_handle_to_fd(struct drm_device *dev,
struct drm_file *file_priv, uint32_t handle, uint32_t flags,
int *prime_fd);
+int drm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma);
struct drm_gem_object *drm_gem_prime_import(struct drm_device *dev,
struct dma_buf *dma_buf);
@@ -93,9 +94,6 @@ void drm_gem_unmap_dma_buf(struct dma_buf_attachment *attach,
enum dma_data_direction dir);
void *drm_gem_dmabuf_vmap(struct dma_buf *dma_buf);
void drm_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr);
-void *drm_gem_dmabuf_kmap(struct dma_buf *dma_buf, unsigned long page_num);
-void drm_gem_dmabuf_kunmap(struct dma_buf *dma_buf, unsigned long page_num,
- void *addr);
int drm_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma);
int drm_prime_sg_to_page_addr_arrays(struct sg_table *sgt, struct page **pages,
diff --git a/include/drm/drm_property.h b/include/drm/drm_property.h
index 5b9efff35d6d..4a0a80d658c7 100644
--- a/include/drm/drm_property.h
+++ b/include/drm/drm_property.h
@@ -153,7 +153,8 @@ struct drm_property {
* userspace. The kernel is allowed to update the value of these
* properties. This is generally used to expose probe state to
* userspace, e.g. the EDID, or the connector path property on DP
- * MST sinks.
+ * MST sinks. Kernel can update the value of an immutable property
+ * by calling drm_object_property_set_value().
*/
uint32_t flags;
diff --git a/include/drm/drm_syncobj.h b/include/drm/drm_syncobj.h
index 425432b85a87..b1fe921f8e8f 100644
--- a/include/drm/drm_syncobj.h
+++ b/include/drm/drm_syncobj.h
@@ -131,10 +131,10 @@ drm_syncobj_fence_get(struct drm_syncobj *syncobj)
struct drm_syncobj *drm_syncobj_find(struct drm_file *file_private,
u32 handle);
-void drm_syncobj_replace_fence(struct drm_syncobj *syncobj, u64 point,
+void drm_syncobj_replace_fence(struct drm_syncobj *syncobj,
struct dma_fence *fence);
int drm_syncobj_find_fence(struct drm_file *file_private,
- u32 handle, u64 point,
+ u32 handle, u64 point, u64 flags,
struct dma_fence **fence);
void drm_syncobj_free(struct kref *kref);
int drm_syncobj_create(struct drm_syncobj **out_syncobj, uint32_t flags,
diff --git a/include/drm/drm_vblank.h b/include/drm/drm_vblank.h
index d25a9603ab57..6ad9630d4f48 100644
--- a/include/drm/drm_vblank.h
+++ b/include/drm/drm_vblank.h
@@ -95,7 +95,7 @@ struct drm_vblank_crtc {
/**
* @queue: Wait queue for vblank waiters.
*/
- wait_queue_head_t queue; /**< VBLANK wait queue */
+ wait_queue_head_t queue;
/**
* @disable_timer: Disable timer for the delayed vblank disabling
* hysteresis logic. Vblank disabling is controlled through the
@@ -107,7 +107,7 @@ struct drm_vblank_crtc {
/**
* @seqlock: Protect vblank count and time.
*/
- seqlock_t seqlock; /* protects vblank count and time */
+ seqlock_t seqlock;
/**
* @count: Current software vblank counter.
@@ -123,7 +123,7 @@ struct drm_vblank_crtc {
* this refcount reaches 0 can the hardware interrupt be disabled using
* @disable_timer.
*/
- atomic_t refcount; /* number of users of vblank interruptsper crtc */
+ atomic_t refcount;
/**
* @last: Protected by &drm_device.vbl_lock, used for wraparound handling.
*/
@@ -136,7 +136,7 @@ struct drm_vblank_crtc {
* call drm_crtc_vblank_off() and drm_crtc_vblank_on(), which explicitly
* save and restore the vblank count.
*/
- unsigned int inmodeset; /* Display driver is setting mode */
+ unsigned int inmodeset;
/**
* @pipe: drm_crtc_index() of the &drm_crtc corresponding to this
* structure.
diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h
index d87b268f1781..47e19796c450 100644
--- a/include/drm/gpu_scheduler.h
+++ b/include/drm/gpu_scheduler.h
@@ -264,6 +264,7 @@ struct drm_sched_backend_ops {
* @hang_limit: once the hangs by a job crosses this limit then it is marked
* guilty and it will be considered for scheduling further.
* @num_jobs: the number of jobs in queue in the scheduler
+ * @ready: marks if the underlying HW is ready to work
*
* One scheduler is implemented for each hardware ring.
*/
@@ -283,22 +284,26 @@ struct drm_gpu_scheduler {
spinlock_t job_list_lock;
int hang_limit;
atomic_t num_jobs;
+ bool ready;
};
int drm_sched_init(struct drm_gpu_scheduler *sched,
const struct drm_sched_backend_ops *ops,
uint32_t hw_submission, unsigned hang_limit, long timeout,
const char *name);
+
void drm_sched_fini(struct drm_gpu_scheduler *sched);
int drm_sched_job_init(struct drm_sched_job *job,
struct drm_sched_entity *entity,
void *owner);
+void drm_sched_job_cleanup(struct drm_sched_job *job);
void drm_sched_wakeup(struct drm_gpu_scheduler *sched);
void drm_sched_hw_job_reset(struct drm_gpu_scheduler *sched,
struct drm_sched_job *job);
void drm_sched_job_recovery(struct drm_gpu_scheduler *sched);
bool drm_sched_dependency_optimized(struct dma_fence* fence,
struct drm_sched_entity *entity);
+void drm_sched_fault(struct drm_gpu_scheduler *sched);
void drm_sched_job_kickout(struct drm_sched_job *s_job);
void drm_sched_rq_add_entity(struct drm_sched_rq *rq,
@@ -326,4 +331,8 @@ struct drm_sched_fence *drm_sched_fence_create(
void drm_sched_fence_scheduled(struct drm_sched_fence *fence);
void drm_sched_fence_finished(struct drm_sched_fence *fence);
+unsigned long drm_sched_suspend_timeout(struct drm_gpu_scheduler *sched);
+void drm_sched_resume_timeout(struct drm_gpu_scheduler *sched,
+ unsigned long remaining);
+
#endif
diff --git a/include/drm/i915_pciids.h b/include/drm/i915_pciids.h
index fd965ffbb92e..192667144693 100644
--- a/include/drm/i915_pciids.h
+++ b/include/drm/i915_pciids.h
@@ -365,16 +365,20 @@
INTEL_VGA_DEVICE(0x593B, info) /* Halo GT4 */
/* AML/KBL Y GT2 */
-#define INTEL_AML_GT2_IDS(info) \
+#define INTEL_AML_KBL_GT2_IDS(info) \
INTEL_VGA_DEVICE(0x591C, info), /* ULX GT2 */ \
INTEL_VGA_DEVICE(0x87C0, info) /* ULX GT2 */
+/* AML/CFL Y GT2 */
+#define INTEL_AML_CFL_GT2_IDS(info) \
+ INTEL_VGA_DEVICE(0x87CA, info)
+
#define INTEL_KBL_IDS(info) \
INTEL_KBL_GT1_IDS(info), \
INTEL_KBL_GT2_IDS(info), \
INTEL_KBL_GT3_IDS(info), \
INTEL_KBL_GT4_IDS(info), \
- INTEL_AML_GT2_IDS(info)
+ INTEL_AML_KBL_GT2_IDS(info)
/* CFL S */
#define INTEL_CFL_S_GT1_IDS(info) \
@@ -407,17 +411,17 @@
/* WHL/CFL U GT1 */
#define INTEL_WHL_U_GT1_IDS(info) \
- INTEL_VGA_DEVICE(0x3EA1, info)
+ INTEL_VGA_DEVICE(0x3EA1, info), \
+ INTEL_VGA_DEVICE(0x3EA4, info)
/* WHL/CFL U GT2 */
#define INTEL_WHL_U_GT2_IDS(info) \
- INTEL_VGA_DEVICE(0x3EA0, info)
+ INTEL_VGA_DEVICE(0x3EA0, info), \
+ INTEL_VGA_DEVICE(0x3EA3, info)
/* WHL/CFL U GT3 */
#define INTEL_WHL_U_GT3_IDS(info) \
- INTEL_VGA_DEVICE(0x3EA2, info), \
- INTEL_VGA_DEVICE(0x3EA3, info), \
- INTEL_VGA_DEVICE(0x3EA4, info)
+ INTEL_VGA_DEVICE(0x3EA2, info)
#define INTEL_CFL_IDS(info) \
INTEL_CFL_S_GT1_IDS(info), \
@@ -427,7 +431,8 @@
INTEL_CFL_U_GT3_IDS(info), \
INTEL_WHL_U_GT1_IDS(info), \
INTEL_WHL_U_GT2_IDS(info), \
- INTEL_WHL_U_GT3_IDS(info)
+ INTEL_WHL_U_GT3_IDS(info), \
+ INTEL_AML_CFL_GT2_IDS(info)
/* CNL */
#define INTEL_CNL_IDS(info) \
diff --git a/include/drm/tinydrm/tinydrm.h b/include/drm/tinydrm/tinydrm.h
index fe9827d0ca8a..448aa5ea4722 100644
--- a/include/drm/tinydrm/tinydrm.h
+++ b/include/drm/tinydrm/tinydrm.h
@@ -10,10 +10,15 @@
#ifndef __LINUX_TINYDRM_H
#define __LINUX_TINYDRM_H
-#include <drm/drm_gem_cma_helper.h>
-#include <drm/drm_fb_cma_helper.h>
+#include <linux/mutex.h>
#include <drm/drm_simple_kms_helper.h>
+struct drm_clip_rect;
+struct drm_driver;
+struct drm_file;
+struct drm_framebuffer;
+struct drm_framebuffer_funcs;
+
/**
* struct tinydrm_device - tinydrm device
*/
@@ -54,27 +59,6 @@ pipe_to_tinydrm(struct drm_simple_display_pipe *pipe)
}
/**
- * TINYDRM_GEM_DRIVER_OPS - default tinydrm gem operations
- *
- * This macro provides a shortcut for setting the tinydrm GEM operations in
- * the &drm_driver structure.
- */
-#define TINYDRM_GEM_DRIVER_OPS \
- .gem_free_object_unlocked = tinydrm_gem_cma_free_object, \
- .gem_print_info = drm_gem_cma_print_info, \
- .gem_vm_ops = &drm_gem_cma_vm_ops, \
- .prime_handle_to_fd = drm_gem_prime_handle_to_fd, \
- .prime_fd_to_handle = drm_gem_prime_fd_to_handle, \
- .gem_prime_import = drm_gem_prime_import, \
- .gem_prime_export = drm_gem_prime_export, \
- .gem_prime_get_sg_table = drm_gem_cma_prime_get_sg_table, \
- .gem_prime_import_sg_table = tinydrm_gem_cma_prime_import_sg_table, \
- .gem_prime_vmap = drm_gem_cma_prime_vmap, \
- .gem_prime_vunmap = drm_gem_cma_prime_vunmap, \
- .gem_prime_mmap = drm_gem_cma_prime_mmap, \
- .dumb_create = drm_gem_cma_dumb_create
-
-/**
* TINYDRM_MODE - tinydrm display mode
* @hd: Horizontal resolution, width
* @vd: Vertical resolution, height
@@ -97,11 +81,6 @@ pipe_to_tinydrm(struct drm_simple_display_pipe *pipe)
.type = DRM_MODE_TYPE_DRIVER, \
.clock = 1 /* pass validation */
-void tinydrm_gem_cma_free_object(struct drm_gem_object *gem_obj);
-struct drm_gem_object *
-tinydrm_gem_cma_prime_import_sg_table(struct drm_device *drm,
- struct dma_buf_attachment *attach,
- struct sg_table *sgt);
int devm_tinydrm_init(struct device *parent, struct tinydrm_device *tdev,
const struct drm_framebuffer_funcs *fb_funcs,
struct drm_driver *driver);
diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h
index e4fee8e02559..1021106438b2 100644
--- a/include/drm/ttm/ttm_bo_driver.h
+++ b/include/drm/ttm/ttm_bo_driver.h
@@ -31,7 +31,6 @@
#define _TTM_BO_DRIVER_H_
#include <drm/drm_mm.h>
-#include <drm/drm_global.h>
#include <drm/drm_vma_manager.h>
#include <linux/workqueue.h>
#include <linux/fs.h>
@@ -385,15 +384,6 @@ struct ttm_bo_driver {
};
/**
- * struct ttm_bo_global_ref - Argument to initialize a struct ttm_bo_global.
- */
-
-struct ttm_bo_global_ref {
- struct drm_global_reference ref;
- struct ttm_mem_global *mem_glob;
-};
-
-/**
* struct ttm_bo_global - Buffer object driver global data.
*
* @mem_glob: Pointer to a struct ttm_mem_global object for accounting.
@@ -407,7 +397,7 @@ struct ttm_bo_global_ref {
* @swap_lru: Lru list of buffer objects used for swapping.
*/
-struct ttm_bo_global {
+extern struct ttm_bo_global {
/**
* Constant after init.
@@ -416,12 +406,12 @@ struct ttm_bo_global {
struct kobject kobj;
struct ttm_mem_global *mem_glob;
struct page *dummy_read_page;
- struct mutex device_list_mutex;
spinlock_t lru_lock;
/**
- * Protected by device_list_mutex.
+ * Protected by ttm_global_mutex.
*/
+ unsigned int use_count;
struct list_head device_list;
/**
@@ -433,7 +423,7 @@ struct ttm_bo_global {
* Internal protection.
*/
atomic_t bo_count;
-};
+} ttm_bo_glob;
#define TTM_NUM_MEM_TYPES 8
@@ -578,9 +568,6 @@ void ttm_bo_mem_put(struct ttm_buffer_object *bo, struct ttm_mem_reg *mem);
void ttm_bo_mem_put_locked(struct ttm_buffer_object *bo,
struct ttm_mem_reg *mem);
-void ttm_bo_global_release(struct drm_global_reference *ref);
-int ttm_bo_global_init(struct drm_global_reference *ref);
-
int ttm_bo_device_release(struct ttm_bo_device *bdev);
/**
@@ -598,7 +585,7 @@ int ttm_bo_device_release(struct ttm_bo_device *bdev);
* Returns:
* !0: Failure.
*/
-int ttm_bo_device_init(struct ttm_bo_device *bdev, struct ttm_bo_global *glob,
+int ttm_bo_device_init(struct ttm_bo_device *bdev,
struct ttm_bo_driver *driver,
struct address_space *mapping,
uint64_t file_page_offset, bool need_dma32);
diff --git a/include/drm/ttm/ttm_execbuf_util.h b/include/drm/ttm/ttm_execbuf_util.h
index b0fdd1980034..621615fa7728 100644
--- a/include/drm/ttm/ttm_execbuf_util.h
+++ b/include/drm/ttm/ttm_execbuf_util.h
@@ -40,13 +40,13 @@
*
* @head: list head for thread-private list.
* @bo: refcounted buffer object pointer.
- * @shared: should the fence be added shared?
+ * @num_shared: How many shared fences we want to add.
*/
struct ttm_validate_buffer {
struct list_head head;
struct ttm_buffer_object *bo;
- bool shared;
+ unsigned int num_shared;
};
/**
diff --git a/include/drm/ttm/ttm_memory.h b/include/drm/ttm/ttm_memory.h
index 737b5fed8003..3ff48a0a2d7b 100644
--- a/include/drm/ttm/ttm_memory.h
+++ b/include/drm/ttm/ttm_memory.h
@@ -63,7 +63,7 @@
#define TTM_MEM_MAX_ZONES 2
struct ttm_mem_zone;
-struct ttm_mem_global {
+extern struct ttm_mem_global {
struct kobject kobj;
struct ttm_bo_global *bo_glob;
struct workqueue_struct *swap_queue;
@@ -78,7 +78,7 @@ struct ttm_mem_global {
#else
struct ttm_mem_zone *zone_dma32;
#endif
-};
+} ttm_mem_glob;
extern int ttm_mem_global_init(struct ttm_mem_global *glob);
extern void ttm_mem_global_release(struct ttm_mem_global *glob);
diff --git a/include/dt-bindings/clock/bcm2835-aux.h b/include/dt-bindings/clock/bcm2835-aux.h
index d91156e2658d..bb79de383a3b 100644
--- a/include/dt-bindings/clock/bcm2835-aux.h
+++ b/include/dt-bindings/clock/bcm2835-aux.h
@@ -1,14 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2015 Broadcom Corporation
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#define BCM2835_AUX_CLOCK_UART 0
diff --git a/include/dt-bindings/clock/bcm2835.h b/include/dt-bindings/clock/bcm2835.h
index a0c812b0fa39..2cec01f96897 100644
--- a/include/dt-bindings/clock/bcm2835.h
+++ b/include/dt-bindings/clock/bcm2835.h
@@ -1,14 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2015 Broadcom Corporation
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#define BCM2835_PLLA 0
diff --git a/include/dt-bindings/clock/gxbb-clkc.h b/include/dt-bindings/clock/gxbb-clkc.h
index 3979d48c025f..db0763e96173 100644
--- a/include/dt-bindings/clock/gxbb-clkc.h
+++ b/include/dt-bindings/clock/gxbb-clkc.h
@@ -128,5 +128,23 @@
#define CLKID_VDEC_1 153
#define CLKID_VDEC_HEVC 156
#define CLKID_GEN_CLK 159
+#define CLKID_VID_PLL 166
+#define CLKID_VCLK 175
+#define CLKID_VCLK2 176
+#define CLKID_VCLK_DIV1 185
+#define CLKID_VCLK_DIV2 186
+#define CLKID_VCLK_DIV4 187
+#define CLKID_VCLK_DIV6 188
+#define CLKID_VCLK_DIV12 189
+#define CLKID_VCLK2_DIV1 190
+#define CLKID_VCLK2_DIV2 191
+#define CLKID_VCLK2_DIV4 192
+#define CLKID_VCLK2_DIV6 193
+#define CLKID_VCLK2_DIV12 194
+#define CLKID_CTS_ENCI 199
+#define CLKID_CTS_ENCP 200
+#define CLKID_CTS_VDAC 201
+#define CLKID_HDMI_TX 202
+#define CLKID_HDMI 205
#endif /* __GXBB_CLKC_H */
diff --git a/include/dt-bindings/clock/imx6qdl-clock.h b/include/dt-bindings/clock/imx6qdl-clock.h
index 87b068f4a998..b3cef297d5df 100644
--- a/include/dt-bindings/clock/imx6qdl-clock.h
+++ b/include/dt-bindings/clock/imx6qdl-clock.h
@@ -274,6 +274,8 @@
#define IMX6QDL_CLK_EPIT1 261
#define IMX6QDL_CLK_EPIT2 262
#define IMX6QDL_CLK_MMDC_P0_IPG 263
-#define IMX6QDL_CLK_END 264
+#define IMX6QDL_CLK_DCIC1 264
+#define IMX6QDL_CLK_DCIC2 265
+#define IMX6QDL_CLK_END 266
#endif /* __DT_BINDINGS_CLOCK_IMX6QDL_H */
diff --git a/include/dt-bindings/clock/imx7ulp-clock.h b/include/dt-bindings/clock/imx7ulp-clock.h
new file mode 100644
index 000000000000..21d872e69cb1
--- /dev/null
+++ b/include/dt-bindings/clock/imx7ulp-clock.h
@@ -0,0 +1,116 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright (C) 2016 Freescale Semiconductor, Inc.
+ * Copyright 2017~2018 NXP
+ *
+ */
+
+#ifndef __DT_BINDINGS_CLOCK_IMX7ULP_H
+#define __DT_BINDINGS_CLOCK_IMX7ULP_H
+
+/* SCG1 */
+
+#define IMX7ULP_CLK_DUMMY 0
+#define IMX7ULP_CLK_ROSC 1
+#define IMX7ULP_CLK_SOSC 2
+#define IMX7ULP_CLK_FIRC 3
+#define IMX7ULP_CLK_SPLL_PRE_SEL 4
+#define IMX7ULP_CLK_SPLL_PRE_DIV 5
+#define IMX7ULP_CLK_SPLL 6
+#define IMX7ULP_CLK_SPLL_POST_DIV1 7
+#define IMX7ULP_CLK_SPLL_POST_DIV2 8
+#define IMX7ULP_CLK_SPLL_PFD0 9
+#define IMX7ULP_CLK_SPLL_PFD1 10
+#define IMX7ULP_CLK_SPLL_PFD2 11
+#define IMX7ULP_CLK_SPLL_PFD3 12
+#define IMX7ULP_CLK_SPLL_PFD_SEL 13
+#define IMX7ULP_CLK_SPLL_SEL 14
+#define IMX7ULP_CLK_APLL_PRE_SEL 15
+#define IMX7ULP_CLK_APLL_PRE_DIV 16
+#define IMX7ULP_CLK_APLL 17
+#define IMX7ULP_CLK_APLL_POST_DIV1 18
+#define IMX7ULP_CLK_APLL_POST_DIV2 19
+#define IMX7ULP_CLK_APLL_PFD0 20
+#define IMX7ULP_CLK_APLL_PFD1 21
+#define IMX7ULP_CLK_APLL_PFD2 22
+#define IMX7ULP_CLK_APLL_PFD3 23
+#define IMX7ULP_CLK_APLL_PFD_SEL 24
+#define IMX7ULP_CLK_APLL_SEL 25
+#define IMX7ULP_CLK_UPLL 26
+#define IMX7ULP_CLK_SYS_SEL 27
+#define IMX7ULP_CLK_CORE_DIV 28
+#define IMX7ULP_CLK_BUS_DIV 29
+#define IMX7ULP_CLK_PLAT_DIV 30
+#define IMX7ULP_CLK_DDR_SEL 31
+#define IMX7ULP_CLK_DDR_DIV 32
+#define IMX7ULP_CLK_NIC_SEL 33
+#define IMX7ULP_CLK_NIC0_DIV 34
+#define IMX7ULP_CLK_GPU_DIV 35
+#define IMX7ULP_CLK_NIC1_DIV 36
+#define IMX7ULP_CLK_NIC1_BUS_DIV 37
+#define IMX7ULP_CLK_NIC1_EXT_DIV 38
+#define IMX7ULP_CLK_MIPI_PLL 39
+#define IMX7ULP_CLK_SIRC 40
+#define IMX7ULP_CLK_SOSC_BUS_CLK 41
+#define IMX7ULP_CLK_FIRC_BUS_CLK 42
+#define IMX7ULP_CLK_SPLL_BUS_CLK 43
+#define IMX7ULP_CLK_HSRUN_SYS_SEL 44
+#define IMX7ULP_CLK_HSRUN_CORE_DIV 45
+
+#define IMX7ULP_CLK_SCG1_END 46
+
+/* PCC2 */
+#define IMX7ULP_CLK_DMA1 0
+#define IMX7ULP_CLK_RGPIO2P1 1
+#define IMX7ULP_CLK_FLEXBUS 2
+#define IMX7ULP_CLK_SEMA42_1 3
+#define IMX7ULP_CLK_DMA_MUX1 4
+#define IMX7ULP_CLK_SNVS 5
+#define IMX7ULP_CLK_CAAM 6
+#define IMX7ULP_CLK_LPTPM4 7
+#define IMX7ULP_CLK_LPTPM5 8
+#define IMX7ULP_CLK_LPIT1 9
+#define IMX7ULP_CLK_LPSPI2 10
+#define IMX7ULP_CLK_LPSPI3 11
+#define IMX7ULP_CLK_LPI2C4 12
+#define IMX7ULP_CLK_LPI2C5 13
+#define IMX7ULP_CLK_LPUART4 14
+#define IMX7ULP_CLK_LPUART5 15
+#define IMX7ULP_CLK_FLEXIO1 16
+#define IMX7ULP_CLK_USB0 17
+#define IMX7ULP_CLK_USB1 18
+#define IMX7ULP_CLK_USB_PHY 19
+#define IMX7ULP_CLK_USB_PL301 20
+#define IMX7ULP_CLK_USDHC0 21
+#define IMX7ULP_CLK_USDHC1 22
+#define IMX7ULP_CLK_WDG1 23
+#define IMX7ULP_CLK_WDG2 24
+
+#define IMX7ULP_CLK_PCC2_END 25
+
+/* PCC3 */
+#define IMX7ULP_CLK_LPTPM6 0
+#define IMX7ULP_CLK_LPTPM7 1
+#define IMX7ULP_CLK_LPI2C6 2
+#define IMX7ULP_CLK_LPI2C7 3
+#define IMX7ULP_CLK_LPUART6 4
+#define IMX7ULP_CLK_LPUART7 5
+#define IMX7ULP_CLK_VIU 6
+#define IMX7ULP_CLK_DSI 7
+#define IMX7ULP_CLK_LCDIF 8
+#define IMX7ULP_CLK_MMDC 9
+#define IMX7ULP_CLK_PCTLC 10
+#define IMX7ULP_CLK_PCTLD 11
+#define IMX7ULP_CLK_PCTLE 12
+#define IMX7ULP_CLK_PCTLF 13
+#define IMX7ULP_CLK_GPU3D 14
+#define IMX7ULP_CLK_GPU2D 15
+
+#define IMX7ULP_CLK_PCC3_END 16
+
+/* SMC1 */
+#define IMX7ULP_CLK_ARM 0
+
+#define IMX7ULP_CLK_SMC1_END 1
+
+#endif /* __DT_BINDINGS_CLOCK_IMX7ULP_H */
diff --git a/include/dt-bindings/clock/imx8mq-clock.h b/include/dt-bindings/clock/imx8mq-clock.h
new file mode 100644
index 000000000000..b53be41929be
--- /dev/null
+++ b/include/dt-bindings/clock/imx8mq-clock.h
@@ -0,0 +1,395 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright 2016 Freescale Semiconductor, Inc.
+ * Copyright 2017 NXP
+ */
+
+#ifndef __DT_BINDINGS_CLOCK_IMX8MQ_H
+#define __DT_BINDINGS_CLOCK_IMX8MQ_H
+
+#define IMX8MQ_CLK_DUMMY 0
+#define IMX8MQ_CLK_32K 1
+#define IMX8MQ_CLK_25M 2
+#define IMX8MQ_CLK_27M 3
+#define IMX8MQ_CLK_EXT1 4
+#define IMX8MQ_CLK_EXT2 5
+#define IMX8MQ_CLK_EXT3 6
+#define IMX8MQ_CLK_EXT4 7
+
+/* ANAMIX PLL clocks */
+/* FRAC PLLs */
+/* ARM PLL */
+#define IMX8MQ_ARM_PLL_REF_SEL 8
+#define IMX8MQ_ARM_PLL_REF_DIV 9
+#define IMX8MQ_ARM_PLL 10
+#define IMX8MQ_ARM_PLL_BYPASS 11
+#define IMX8MQ_ARM_PLL_OUT 12
+
+/* GPU PLL */
+#define IMX8MQ_GPU_PLL_REF_SEL 13
+#define IMX8MQ_GPU_PLL_REF_DIV 14
+#define IMX8MQ_GPU_PLL 15
+#define IMX8MQ_GPU_PLL_BYPASS 16
+#define IMX8MQ_GPU_PLL_OUT 17
+
+/* VPU PLL */
+#define IMX8MQ_VPU_PLL_REF_SEL 18
+#define IMX8MQ_VPU_PLL_REF_DIV 19
+#define IMX8MQ_VPU_PLL 20
+#define IMX8MQ_VPU_PLL_BYPASS 21
+#define IMX8MQ_VPU_PLL_OUT 22
+
+/* AUDIO PLL1 */
+#define IMX8MQ_AUDIO_PLL1_REF_SEL 23
+#define IMX8MQ_AUDIO_PLL1_REF_DIV 24
+#define IMX8MQ_AUDIO_PLL1 25
+#define IMX8MQ_AUDIO_PLL1_BYPASS 26
+#define IMX8MQ_AUDIO_PLL1_OUT 27
+
+/* AUDIO PLL2 */
+#define IMX8MQ_AUDIO_PLL2_REF_SEL 28
+#define IMX8MQ_AUDIO_PLL2_REF_DIV 29
+#define IMX8MQ_AUDIO_PLL2 30
+#define IMX8MQ_AUDIO_PLL2_BYPASS 31
+#define IMX8MQ_AUDIO_PLL2_OUT 32
+
+/* VIDEO PLL1 */
+#define IMX8MQ_VIDEO_PLL1_REF_SEL 33
+#define IMX8MQ_VIDEO_PLL1_REF_DIV 34
+#define IMX8MQ_VIDEO_PLL1 35
+#define IMX8MQ_VIDEO_PLL1_BYPASS 36
+#define IMX8MQ_VIDEO_PLL1_OUT 37
+
+/* SYS1 PLL */
+#define IMX8MQ_SYS1_PLL1_REF_SEL 38
+#define IMX8MQ_SYS1_PLL1_REF_DIV 39
+#define IMX8MQ_SYS1_PLL1 40
+#define IMX8MQ_SYS1_PLL1_OUT 41
+#define IMX8MQ_SYS1_PLL1_OUT_DIV 42
+#define IMX8MQ_SYS1_PLL2 43
+#define IMX8MQ_SYS1_PLL2_DIV 44
+#define IMX8MQ_SYS1_PLL2_OUT 45
+
+/* SYS2 PLL */
+#define IMX8MQ_SYS2_PLL1_REF_SEL 46
+#define IMX8MQ_SYS2_PLL1_REF_DIV 47
+#define IMX8MQ_SYS2_PLL1 48
+#define IMX8MQ_SYS2_PLL1_OUT 49
+#define IMX8MQ_SYS2_PLL1_OUT_DIV 50
+#define IMX8MQ_SYS2_PLL2 51
+#define IMX8MQ_SYS2_PLL2_DIV 52
+#define IMX8MQ_SYS2_PLL2_OUT 53
+
+/* SYS3 PLL */
+#define IMX8MQ_SYS3_PLL1_REF_SEL 54
+#define IMX8MQ_SYS3_PLL1_REF_DIV 55
+#define IMX8MQ_SYS3_PLL1 56
+#define IMX8MQ_SYS3_PLL1_OUT 57
+#define IMX8MQ_SYS3_PLL1_OUT_DIV 58
+#define IMX8MQ_SYS3_PLL2 59
+#define IMX8MQ_SYS3_PLL2_DIV 60
+#define IMX8MQ_SYS3_PLL2_OUT 61
+
+/* DRAM PLL */
+#define IMX8MQ_DRAM_PLL1_REF_SEL 62
+#define IMX8MQ_DRAM_PLL1_REF_DIV 63
+#define IMX8MQ_DRAM_PLL1 64
+#define IMX8MQ_DRAM_PLL1_OUT 65
+#define IMX8MQ_DRAM_PLL1_OUT_DIV 66
+#define IMX8MQ_DRAM_PLL2 67
+#define IMX8MQ_DRAM_PLL2_DIV 68
+#define IMX8MQ_DRAM_PLL2_OUT 69
+
+/* SYS PLL DIV */
+#define IMX8MQ_SYS1_PLL_40M 70
+#define IMX8MQ_SYS1_PLL_80M 71
+#define IMX8MQ_SYS1_PLL_100M 72
+#define IMX8MQ_SYS1_PLL_133M 73
+#define IMX8MQ_SYS1_PLL_160M 74
+#define IMX8MQ_SYS1_PLL_200M 75
+#define IMX8MQ_SYS1_PLL_266M 76
+#define IMX8MQ_SYS1_PLL_400M 77
+#define IMX8MQ_SYS1_PLL_800M 78
+
+#define IMX8MQ_SYS2_PLL_50M 79
+#define IMX8MQ_SYS2_PLL_100M 80
+#define IMX8MQ_SYS2_PLL_125M 81
+#define IMX8MQ_SYS2_PLL_166M 82
+#define IMX8MQ_SYS2_PLL_200M 83
+#define IMX8MQ_SYS2_PLL_250M 84
+#define IMX8MQ_SYS2_PLL_333M 85
+#define IMX8MQ_SYS2_PLL_500M 86
+#define IMX8MQ_SYS2_PLL_1000M 87
+
+/* CCM ROOT clocks */
+/* A53 */
+#define IMX8MQ_CLK_A53_SRC 88
+#define IMX8MQ_CLK_A53_CG 89
+#define IMX8MQ_CLK_A53_DIV 90
+/* M4 */
+#define IMX8MQ_CLK_M4_SRC 91
+#define IMX8MQ_CLK_M4_CG 92
+#define IMX8MQ_CLK_M4_DIV 93
+/* VPU */
+#define IMX8MQ_CLK_VPU_SRC 94
+#define IMX8MQ_CLK_VPU_CG 95
+#define IMX8MQ_CLK_VPU_DIV 96
+/* GPU CORE */
+#define IMX8MQ_CLK_GPU_CORE_SRC 97
+#define IMX8MQ_CLK_GPU_CORE_CG 98
+#define IMX8MQ_CLK_GPU_CORE_DIV 99
+/* GPU SHADER */
+#define IMX8MQ_CLK_GPU_SHADER_SRC 100
+#define IMX8MQ_CLK_GPU_SHADER_CG 101
+#define IMX8MQ_CLK_GPU_SHADER_DIV 102
+
+/* BUS TYPE */
+/* MAIN AXI */
+#define IMX8MQ_CLK_MAIN_AXI 103
+/* ENET AXI */
+#define IMX8MQ_CLK_ENET_AXI 104
+/* NAND_USDHC_BUS */
+#define IMX8MQ_CLK_NAND_USDHC_BUS 105
+/* VPU BUS */
+#define IMX8MQ_CLK_VPU_BUS 106
+/* DISP_AXI */
+#define IMX8MQ_CLK_DISP_AXI 107
+/* DISP APB */
+#define IMX8MQ_CLK_DISP_APB 108
+/* DISP RTRM */
+#define IMX8MQ_CLK_DISP_RTRM 109
+/* USB_BUS */
+#define IMX8MQ_CLK_USB_BUS 110
+/* GPU_AXI */
+#define IMX8MQ_CLK_GPU_AXI 111
+/* GPU_AHB */
+#define IMX8MQ_CLK_GPU_AHB 112
+/* NOC */
+#define IMX8MQ_CLK_NOC 113
+/* NOC_APB */
+#define IMX8MQ_CLK_NOC_APB 115
+
+/* AHB */
+#define IMX8MQ_CLK_AHB 116
+/* AUDIO AHB */
+#define IMX8MQ_CLK_AUDIO_AHB 117
+
+/* DRAM_ALT */
+#define IMX8MQ_CLK_DRAM_ALT 118
+/* DRAM APB */
+#define IMX8MQ_CLK_DRAM_APB 119
+/* VPU_G1 */
+#define IMX8MQ_CLK_VPU_G1 120
+/* VPU_G2 */
+#define IMX8MQ_CLK_VPU_G2 121
+/* DISP_DTRC */
+#define IMX8MQ_CLK_DISP_DTRC 122
+/* DISP_DC8000 */
+#define IMX8MQ_CLK_DISP_DC8000 123
+/* PCIE_CTRL */
+#define IMX8MQ_CLK_PCIE1_CTRL 124
+/* PCIE_PHY */
+#define IMX8MQ_CLK_PCIE1_PHY 125
+/* PCIE_AUX */
+#define IMX8MQ_CLK_PCIE1_AUX 126
+/* DC_PIXEL */
+#define IMX8MQ_CLK_DC_PIXEL 127
+/* LCDIF_PIXEL */
+#define IMX8MQ_CLK_LCDIF_PIXEL 128
+/* SAI1~6 */
+#define IMX8MQ_CLK_SAI1 129
+
+#define IMX8MQ_CLK_SAI2 130
+
+#define IMX8MQ_CLK_SAI3 131
+
+#define IMX8MQ_CLK_SAI4 132
+
+#define IMX8MQ_CLK_SAI5 133
+
+#define IMX8MQ_CLK_SAI6 134
+/* SPDIF1 */
+#define IMX8MQ_CLK_SPDIF1 135
+/* SPDIF2 */
+#define IMX8MQ_CLK_SPDIF2 136
+/* ENET_REF */
+#define IMX8MQ_CLK_ENET_REF 137
+/* ENET_TIMER */
+#define IMX8MQ_CLK_ENET_TIMER 138
+/* ENET_PHY */
+#define IMX8MQ_CLK_ENET_PHY_REF 139
+/* NAND */
+#define IMX8MQ_CLK_NAND 140
+/* QSPI */
+#define IMX8MQ_CLK_QSPI 141
+/* USDHC1 */
+#define IMX8MQ_CLK_USDHC1 142
+/* USDHC2 */
+#define IMX8MQ_CLK_USDHC2 143
+/* I2C1 */
+#define IMX8MQ_CLK_I2C1 144
+/* I2C2 */
+#define IMX8MQ_CLK_I2C2 145
+/* I2C3 */
+#define IMX8MQ_CLK_I2C3 146
+/* I2C4 */
+#define IMX8MQ_CLK_I2C4 147
+/* UART1 */
+#define IMX8MQ_CLK_UART1 148
+/* UART2 */
+#define IMX8MQ_CLK_UART2 149
+/* UART3 */
+#define IMX8MQ_CLK_UART3 150
+/* UART4 */
+#define IMX8MQ_CLK_UART4 151
+/* USB_CORE_REF */
+#define IMX8MQ_CLK_USB_CORE_REF 152
+/* USB_PHY_REF */
+#define IMX8MQ_CLK_USB_PHY_REF 163
+/* ECSPI1 */
+#define IMX8MQ_CLK_ECSPI1 164
+/* ECSPI2 */
+#define IMX8MQ_CLK_ECSPI2 165
+/* PWM1 */
+#define IMX8MQ_CLK_PWM1 166
+/* PWM2 */
+#define IMX8MQ_CLK_PWM2 167
+/* PWM3 */
+#define IMX8MQ_CLK_PWM3 168
+/* PWM4 */
+#define IMX8MQ_CLK_PWM4 169
+/* GPT1 */
+#define IMX8MQ_CLK_GPT1 170
+/* WDOG */
+#define IMX8MQ_CLK_WDOG 171
+/* WRCLK */
+#define IMX8MQ_CLK_WRCLK 172
+/* DSI_CORE */
+#define IMX8MQ_CLK_DSI_CORE 173
+/* DSI_PHY */
+#define IMX8MQ_CLK_DSI_PHY_REF 174
+/* DSI_DBI */
+#define IMX8MQ_CLK_DSI_DBI 175
+/*DSI_ESC */
+#define IMX8MQ_CLK_DSI_ESC 176
+/* CSI1_CORE */
+#define IMX8MQ_CLK_CSI1_CORE 177
+/* CSI1_PHY */
+#define IMX8MQ_CLK_CSI1_PHY_REF 178
+/* CSI_ESC */
+#define IMX8MQ_CLK_CSI1_ESC 179
+/* CSI2_CORE */
+#define IMX8MQ_CLK_CSI2_CORE 170
+/* CSI2_PHY */
+#define IMX8MQ_CLK_CSI2_PHY_REF 181
+/* CSI2_ESC */
+#define IMX8MQ_CLK_CSI2_ESC 182
+/* PCIE2_CTRL */
+#define IMX8MQ_CLK_PCIE2_CTRL 183
+/* PCIE2_PHY */
+#define IMX8MQ_CLK_PCIE2_PHY 184
+/* PCIE2_AUX */
+#define IMX8MQ_CLK_PCIE2_AUX 185
+/* ECSPI3 */
+#define IMX8MQ_CLK_ECSPI3 186
+
+/* CCGR clocks */
+#define IMX8MQ_CLK_A53_ROOT 187
+#define IMX8MQ_CLK_DRAM_ROOT 188
+#define IMX8MQ_CLK_ECSPI1_ROOT 189
+#define IMX8MQ_CLK_ECSPI2_ROOT 180
+#define IMX8MQ_CLK_ECSPI3_ROOT 181
+#define IMX8MQ_CLK_ENET1_ROOT 182
+#define IMX8MQ_CLK_GPT1_ROOT 193
+#define IMX8MQ_CLK_I2C1_ROOT 194
+#define IMX8MQ_CLK_I2C2_ROOT 195
+#define IMX8MQ_CLK_I2C3_ROOT 196
+#define IMX8MQ_CLK_I2C4_ROOT 197
+#define IMX8MQ_CLK_M4_ROOT 198
+#define IMX8MQ_CLK_PCIE1_ROOT 199
+#define IMX8MQ_CLK_PCIE2_ROOT 200
+#define IMX8MQ_CLK_PWM1_ROOT 201
+#define IMX8MQ_CLK_PWM2_ROOT 202
+#define IMX8MQ_CLK_PWM3_ROOT 203
+#define IMX8MQ_CLK_PWM4_ROOT 204
+#define IMX8MQ_CLK_QSPI_ROOT 205
+#define IMX8MQ_CLK_SAI1_ROOT 206
+#define IMX8MQ_CLK_SAI2_ROOT 207
+#define IMX8MQ_CLK_SAI3_ROOT 208
+#define IMX8MQ_CLK_SAI4_ROOT 209
+#define IMX8MQ_CLK_SAI5_ROOT 210
+#define IMX8MQ_CLK_SAI6_ROOT 212
+#define IMX8MQ_CLK_UART1_ROOT 213
+#define IMX8MQ_CLK_UART2_ROOT 214
+#define IMX8MQ_CLK_UART3_ROOT 215
+#define IMX8MQ_CLK_UART4_ROOT 216
+#define IMX8MQ_CLK_USB1_CTRL_ROOT 217
+#define IMX8MQ_CLK_USB2_CTRL_ROOT 218
+#define IMX8MQ_CLK_USB1_PHY_ROOT 219
+#define IMX8MQ_CLK_USB2_PHY_ROOT 220
+#define IMX8MQ_CLK_USDHC1_ROOT 221
+#define IMX8MQ_CLK_USDHC2_ROOT 222
+#define IMX8MQ_CLK_WDOG1_ROOT 223
+#define IMX8MQ_CLK_WDOG2_ROOT 224
+#define IMX8MQ_CLK_WDOG3_ROOT 225
+#define IMX8MQ_CLK_GPU_ROOT 226
+#define IMX8MQ_CLK_HEVC_ROOT 227
+#define IMX8MQ_CLK_AVC_ROOT 228
+#define IMX8MQ_CLK_VP9_ROOT 229
+#define IMX8MQ_CLK_HEVC_INTER_ROOT 230
+#define IMX8MQ_CLK_DISP_ROOT 231
+#define IMX8MQ_CLK_HDMI_ROOT 232
+#define IMX8MQ_CLK_HDMI_PHY_ROOT 233
+#define IMX8MQ_CLK_VPU_DEC_ROOT 234
+#define IMX8MQ_CLK_CSI1_ROOT 235
+#define IMX8MQ_CLK_CSI2_ROOT 236
+#define IMX8MQ_CLK_RAWNAND_ROOT 237
+#define IMX8MQ_CLK_SDMA1_ROOT 238
+#define IMX8MQ_CLK_SDMA2_ROOT 239
+#define IMX8MQ_CLK_VPU_G1_ROOT 240
+#define IMX8MQ_CLK_VPU_G2_ROOT 241
+
+/* SCCG PLL GATE */
+#define IMX8MQ_SYS1_PLL_OUT 232
+#define IMX8MQ_SYS2_PLL_OUT 243
+#define IMX8MQ_SYS3_PLL_OUT 244
+#define IMX8MQ_DRAM_PLL_OUT 245
+
+#define IMX8MQ_GPT_3M_CLK 246
+
+#define IMX8MQ_CLK_IPG_ROOT 247
+#define IMX8MQ_CLK_IPG_AUDIO_ROOT 248
+#define IMX8MQ_CLK_SAI1_IPG 249
+#define IMX8MQ_CLK_SAI2_IPG 250
+#define IMX8MQ_CLK_SAI3_IPG 251
+#define IMX8MQ_CLK_SAI4_IPG 252
+#define IMX8MQ_CLK_SAI5_IPG 253
+#define IMX8MQ_CLK_SAI6_IPG 254
+
+/* DSI AHB/IPG clocks */
+/* rxesc clock */
+#define IMX8MQ_CLK_DSI_AHB 255
+/* txesc clock */
+#define IMX8MQ_CLK_DSI_IPG_DIV 256
+
+#define IMX8MQ_CLK_TMU_ROOT 265
+
+/* Display root clocks */
+#define IMX8MQ_CLK_DISP_AXI_ROOT 266
+#define IMX8MQ_CLK_DISP_APB_ROOT 267
+#define IMX8MQ_CLK_DISP_RTRM_ROOT 268
+
+#define IMX8MQ_CLK_OCOTP_ROOT 269
+
+#define IMX8MQ_CLK_DRAM_ALT_ROOT 270
+#define IMX8MQ_CLK_DRAM_CORE 271
+
+#define IMX8MQ_CLK_MU_ROOT 272
+#define IMX8MQ_VIDEO2_PLL_OUT 273
+
+#define IMX8MQ_CLK_CLKO2 274
+
+#define IMX8MQ_CLK_NAND_USDHC_BUS_RAWNAND_CLK 275
+
+#define IMX8MQ_CLK_END 276
+#endif /* __DT_BINDINGS_CLOCK_IMX8MQ_H */
diff --git a/include/dt-bindings/clock/imx8qxp-clock.h b/include/dt-bindings/clock/imx8qxp-clock.h
new file mode 100644
index 000000000000..6fec3687f3c7
--- /dev/null
+++ b/include/dt-bindings/clock/imx8qxp-clock.h
@@ -0,0 +1,289 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright 2018 NXP
+ * Dong Aisheng <aisheng.dong@nxp.com>
+ */
+
+#ifndef __DT_BINDINGS_CLOCK_IMX8QXP_H
+#define __DT_BINDINGS_CLOCK_IMX8QXP_H
+
+/* SCU Clocks */
+
+#define IMX8QXP_CLK_DUMMY 0
+
+/* CPU */
+#define IMX8QXP_A35_CLK 1
+
+/* LSIO SS */
+#define IMX8QXP_LSIO_MEM_CLK 2
+#define IMX8QXP_LSIO_BUS_CLK 3
+#define IMX8QXP_LSIO_PWM0_CLK 10
+#define IMX8QXP_LSIO_PWM1_CLK 11
+#define IMX8QXP_LSIO_PWM2_CLK 12
+#define IMX8QXP_LSIO_PWM3_CLK 13
+#define IMX8QXP_LSIO_PWM4_CLK 14
+#define IMX8QXP_LSIO_PWM5_CLK 15
+#define IMX8QXP_LSIO_PWM6_CLK 16
+#define IMX8QXP_LSIO_PWM7_CLK 17
+#define IMX8QXP_LSIO_GPT0_CLK 18
+#define IMX8QXP_LSIO_GPT1_CLK 19
+#define IMX8QXP_LSIO_GPT2_CLK 20
+#define IMX8QXP_LSIO_GPT3_CLK 21
+#define IMX8QXP_LSIO_GPT4_CLK 22
+#define IMX8QXP_LSIO_FSPI0_CLK 23
+#define IMX8QXP_LSIO_FSPI1_CLK 24
+
+/* Connectivity SS */
+#define IMX8QXP_CONN_AXI_CLK_ROOT 30
+#define IMX8QXP_CONN_AHB_CLK_ROOT 31
+#define IMX8QXP_CONN_IPG_CLK_ROOT 32
+#define IMX8QXP_CONN_SDHC0_CLK 40
+#define IMX8QXP_CONN_SDHC1_CLK 41
+#define IMX8QXP_CONN_SDHC2_CLK 42
+#define IMX8QXP_CONN_ENET0_ROOT_CLK 43
+#define IMX8QXP_CONN_ENET0_BYPASS_CLK 44
+#define IMX8QXP_CONN_ENET0_RGMII_CLK 45
+#define IMX8QXP_CONN_ENET1_ROOT_CLK 46
+#define IMX8QXP_CONN_ENET1_BYPASS_CLK 47
+#define IMX8QXP_CONN_ENET1_RGMII_CLK 48
+#define IMX8QXP_CONN_GPMI_BCH_IO_CLK 49
+#define IMX8QXP_CONN_GPMI_BCH_CLK 50
+#define IMX8QXP_CONN_USB2_ACLK 51
+#define IMX8QXP_CONN_USB2_BUS_CLK 52
+#define IMX8QXP_CONN_USB2_LPM_CLK 53
+
+/* HSIO SS */
+#define IMX8QXP_HSIO_AXI_CLK 60
+#define IMX8QXP_HSIO_PER_CLK 61
+
+/* Display controller SS */
+#define IMX8QXP_DC_AXI_EXT_CLK 70
+#define IMX8QXP_DC_AXI_INT_CLK 71
+#define IMX8QXP_DC_CFG_CLK 72
+#define IMX8QXP_DC0_PLL0_CLK 80
+#define IMX8QXP_DC0_PLL1_CLK 81
+#define IMX8QXP_DC0_DISP0_CLK 82
+#define IMX8QXP_DC0_DISP1_CLK 83
+
+/* MIPI-LVDS SS */
+#define IMX8QXP_MIPI_IPG_CLK 90
+#define IMX8QXP_MIPI0_PIXEL_CLK 100
+#define IMX8QXP_MIPI0_BYPASS_CLK 101
+#define IMX8QXP_MIPI0_LVDS_PIXEL_CLK 102
+#define IMX8QXP_MIPI0_LVDS_BYPASS_CLK 103
+#define IMX8QXP_MIPI0_LVDS_PHY_CLK 104
+#define IMX8QXP_MIPI0_I2C0_CLK 105
+#define IMX8QXP_MIPI0_I2C1_CLK 106
+#define IMX8QXP_MIPI0_PWM0_CLK 107
+#define IMX8QXP_MIPI1_PIXEL_CLK 108
+#define IMX8QXP_MIPI1_BYPASS_CLK 109
+#define IMX8QXP_MIPI1_LVDS_PIXEL_CLK 110
+#define IMX8QXP_MIPI1_LVDS_BYPASS_CLK 111
+#define IMX8QXP_MIPI1_LVDS_PHY_CLK 112
+#define IMX8QXP_MIPI1_I2C0_CLK 113
+#define IMX8QXP_MIPI1_I2C1_CLK 114
+#define IMX8QXP_MIPI1_PWM0_CLK 115
+
+/* IMG SS */
+#define IMX8QXP_IMG_AXI_CLK 120
+#define IMX8QXP_IMG_IPG_CLK 121
+#define IMX8QXP_IMG_PXL_CLK 122
+
+/* MIPI-CSI SS */
+#define IMX8QXP_CSI0_CORE_CLK 130
+#define IMX8QXP_CSI0_ESC_CLK 131
+#define IMX8QXP_CSI0_PWM0_CLK 132
+#define IMX8QXP_CSI0_I2C0_CLK 133
+
+/* PARALLER CSI SS */
+#define IMX8QXP_PARALLEL_CSI_DPLL_CLK 140
+#define IMX8QXP_PARALLEL_CSI_PIXEL_CLK 141
+#define IMX8QXP_PARALLEL_CSI_MCLK_CLK 142
+
+/* VPU SS */
+#define IMX8QXP_VPU_ENC_CLK 150
+#define IMX8QXP_VPU_DEC_CLK 151
+
+/* GPU SS */
+#define IMX8QXP_GPU0_CORE_CLK 160
+#define IMX8QXP_GPU0_SHADER_CLK 161
+
+/* ADMA SS */
+#define IMX8QXP_ADMA_IPG_CLK_ROOT 165
+#define IMX8QXP_ADMA_UART0_CLK 170
+#define IMX8QXP_ADMA_UART1_CLK 171
+#define IMX8QXP_ADMA_UART2_CLK 172
+#define IMX8QXP_ADMA_UART3_CLK 173
+#define IMX8QXP_ADMA_SPI0_CLK 174
+#define IMX8QXP_ADMA_SPI1_CLK 175
+#define IMX8QXP_ADMA_SPI2_CLK 176
+#define IMX8QXP_ADMA_SPI3_CLK 177
+#define IMX8QXP_ADMA_CAN0_CLK 178
+#define IMX8QXP_ADMA_CAN1_CLK 179
+#define IMX8QXP_ADMA_CAN2_CLK 180
+#define IMX8QXP_ADMA_I2C0_CLK 181
+#define IMX8QXP_ADMA_I2C1_CLK 182
+#define IMX8QXP_ADMA_I2C2_CLK 183
+#define IMX8QXP_ADMA_I2C3_CLK 184
+#define IMX8QXP_ADMA_FTM0_CLK 185
+#define IMX8QXP_ADMA_FTM1_CLK 186
+#define IMX8QXP_ADMA_ADC0_CLK 187
+#define IMX8QXP_ADMA_PWM_CLK 188
+#define IMX8QXP_ADMA_LCD_CLK 189
+
+#define IMX8QXP_SCU_CLK_END 190
+
+/* LPCG clocks */
+
+/* LSIO SS LPCG */
+#define IMX8QXP_LSIO_LPCG_PWM0_IPG_CLK 0
+#define IMX8QXP_LSIO_LPCG_PWM0_IPG_S_CLK 1
+#define IMX8QXP_LSIO_LPCG_PWM0_IPG_HF_CLK 2
+#define IMX8QXP_LSIO_LPCG_PWM0_IPG_SLV_CLK 3
+#define IMX8QXP_LSIO_LPCG_PWM0_IPG_MSTR_CLK 4
+#define IMX8QXP_LSIO_LPCG_PWM1_IPG_CLK 5
+#define IMX8QXP_LSIO_LPCG_PWM1_IPG_S_CLK 6
+#define IMX8QXP_LSIO_LPCG_PWM1_IPG_HF_CLK 7
+#define IMX8QXP_LSIO_LPCG_PWM1_IPG_SLV_CLK 8
+#define IMX8QXP_LSIO_LPCG_PWM1_IPG_MSTR_CLK 9
+#define IMX8QXP_LSIO_LPCG_PWM2_IPG_CLK 10
+#define IMX8QXP_LSIO_LPCG_PWM2_IPG_S_CLK 11
+#define IMX8QXP_LSIO_LPCG_PWM2_IPG_HF_CLK 12
+#define IMX8QXP_LSIO_LPCG_PWM2_IPG_SLV_CLK 13
+#define IMX8QXP_LSIO_LPCG_PWM2_IPG_MSTR_CLK 14
+#define IMX8QXP_LSIO_LPCG_PWM3_IPG_CLK 15
+#define IMX8QXP_LSIO_LPCG_PWM3_IPG_S_CLK 16
+#define IMX8QXP_LSIO_LPCG_PWM3_IPG_HF_CLK 17
+#define IMX8QXP_LSIO_LPCG_PWM3_IPG_SLV_CLK 18
+#define IMX8QXP_LSIO_LPCG_PWM3_IPG_MSTR_CLK 19
+#define IMX8QXP_LSIO_LPCG_PWM4_IPG_CLK 20
+#define IMX8QXP_LSIO_LPCG_PWM4_IPG_S_CLK 21
+#define IMX8QXP_LSIO_LPCG_PWM4_IPG_HF_CLK 22
+#define IMX8QXP_LSIO_LPCG_PWM4_IPG_SLV_CLK 23
+#define IMX8QXP_LSIO_LPCG_PWM4_IPG_MSTR_CLK 24
+#define IMX8QXP_LSIO_LPCG_PWM5_IPG_CLK 25
+#define IMX8QXP_LSIO_LPCG_PWM5_IPG_S_CLK 26
+#define IMX8QXP_LSIO_LPCG_PWM5_IPG_HF_CLK 27
+#define IMX8QXP_LSIO_LPCG_PWM5_IPG_SLV_CLK 28
+#define IMX8QXP_LSIO_LPCG_PWM5_IPG_MSTR_CLK 29
+#define IMX8QXP_LSIO_LPCG_PWM6_IPG_CLK 30
+#define IMX8QXP_LSIO_LPCG_PWM6_IPG_S_CLK 31
+#define IMX8QXP_LSIO_LPCG_PWM6_IPG_HF_CLK 32
+#define IMX8QXP_LSIO_LPCG_PWM6_IPG_SLV_CLK 33
+#define IMX8QXP_LSIO_LPCG_PWM6_IPG_MSTR_CLK 34
+#define IMX8QXP_LSIO_LPCG_PWM7_IPG_CLK 35
+#define IMX8QXP_LSIO_LPCG_PWM7_IPG_S_CLK 36
+#define IMX8QXP_LSIO_LPCG_PWM7_IPG_HF_CLK 37
+#define IMX8QXP_LSIO_LPCG_PWM7_IPG_SLV_CLK 38
+#define IMX8QXP_LSIO_LPCG_PWM7_IPG_MSTR_CLK 39
+#define IMX8QXP_LSIO_LPCG_GPT0_IPG_CLK 40
+#define IMX8QXP_LSIO_LPCG_GPT0_IPG_S_CLK 41
+#define IMX8QXP_LSIO_LPCG_GPT0_IPG_HF_CLK 42
+#define IMX8QXP_LSIO_LPCG_GPT0_IPG_SLV_CLK 43
+#define IMX8QXP_LSIO_LPCG_GPT0_IPG_MSTR_CLK 44
+#define IMX8QXP_LSIO_LPCG_GPT1_IPG_CLK 45
+#define IMX8QXP_LSIO_LPCG_GPT1_IPG_S_CLK 46
+#define IMX8QXP_LSIO_LPCG_GPT1_IPG_HF_CLK 47
+#define IMX8QXP_LSIO_LPCG_GPT1_IPG_SLV_CLK 48
+#define IMX8QXP_LSIO_LPCG_GPT1_IPG_MSTR_CLK 49
+#define IMX8QXP_LSIO_LPCG_GPT2_IPG_CLK 50
+#define IMX8QXP_LSIO_LPCG_GPT2_IPG_S_CLK 51
+#define IMX8QXP_LSIO_LPCG_GPT2_IPG_HF_CLK 52
+#define IMX8QXP_LSIO_LPCG_GPT2_IPG_SLV_CLK 53
+#define IMX8QXP_LSIO_LPCG_GPT2_IPG_MSTR_CLK 54
+#define IMX8QXP_LSIO_LPCG_GPT3_IPG_CLK 55
+#define IMX8QXP_LSIO_LPCG_GPT3_IPG_S_CLK 56
+#define IMX8QXP_LSIO_LPCG_GPT3_IPG_HF_CLK 57
+#define IMX8QXP_LSIO_LPCG_GPT3_IPG_SLV_CLK 58
+#define IMX8QXP_LSIO_LPCG_GPT3_IPG_MSTR_CLK 59
+#define IMX8QXP_LSIO_LPCG_GPT4_IPG_CLK 60
+#define IMX8QXP_LSIO_LPCG_GPT4_IPG_S_CLK 61
+#define IMX8QXP_LSIO_LPCG_GPT4_IPG_HF_CLK 62
+#define IMX8QXP_LSIO_LPCG_GPT4_IPG_SLV_CLK 63
+#define IMX8QXP_LSIO_LPCG_GPT4_IPG_MSTR_CLK 64
+#define IMX8QXP_LSIO_LPCG_FSPI0_HCLK 65
+#define IMX8QXP_LSIO_LPCG_FSPI0_IPG_CLK 66
+#define IMX8QXP_LSIO_LPCG_FSPI0_IPG_S_CLK 67
+#define IMX8QXP_LSIO_LPCG_FSPI0_IPG_SFCK 68
+#define IMX8QXP_LSIO_LPCG_FSPI1_HCLK 69
+#define IMX8QXP_LSIO_LPCG_FSPI1_IPG_CLK 70
+#define IMX8QXP_LSIO_LPCG_FSPI1_IPG_S_CLK 71
+#define IMX8QXP_LSIO_LPCG_FSPI1_IPG_SFCK 72
+
+#define IMX8QXP_LSIO_LPCG_CLK_END 73
+
+/* Connectivity SS LPCG */
+#define IMX8QXP_CONN_LPCG_SDHC0_IPG_CLK 0
+#define IMX8QXP_CONN_LPCG_SDHC0_PER_CLK 1
+#define IMX8QXP_CONN_LPCG_SDHC0_HCLK 2
+#define IMX8QXP_CONN_LPCG_SDHC1_IPG_CLK 3
+#define IMX8QXP_CONN_LPCG_SDHC1_PER_CLK 4
+#define IMX8QXP_CONN_LPCG_SDHC1_HCLK 5
+#define IMX8QXP_CONN_LPCG_SDHC2_IPG_CLK 6
+#define IMX8QXP_CONN_LPCG_SDHC2_PER_CLK 7
+#define IMX8QXP_CONN_LPCG_SDHC2_HCLK 8
+#define IMX8QXP_CONN_LPCG_GPMI_APB_CLK 9
+#define IMX8QXP_CONN_LPCG_GPMI_BCH_APB_CLK 10
+#define IMX8QXP_CONN_LPCG_GPMI_BCH_IO_CLK 11
+#define IMX8QXP_CONN_LPCG_GPMI_BCH_CLK 12
+#define IMX8QXP_CONN_LPCG_APBHDMA_CLK 13
+#define IMX8QXP_CONN_LPCG_ENET0_ROOT_CLK 14
+#define IMX8QXP_CONN_LPCG_ENET0_TX_CLK 15
+#define IMX8QXP_CONN_LPCG_ENET0_AHB_CLK 16
+#define IMX8QXP_CONN_LPCG_ENET0_IPG_S_CLK 17
+#define IMX8QXP_CONN_LPCG_ENET0_IPG_CLK 18
+
+#define IMX8QXP_CONN_LPCG_ENET1_ROOT_CLK 19
+#define IMX8QXP_CONN_LPCG_ENET1_TX_CLK 20
+#define IMX8QXP_CONN_LPCG_ENET1_AHB_CLK 21
+#define IMX8QXP_CONN_LPCG_ENET1_IPG_S_CLK 22
+#define IMX8QXP_CONN_LPCG_ENET1_IPG_CLK 23
+
+#define IMX8QXP_CONN_LPCG_CLK_END 24
+
+/* ADMA SS LPCG */
+#define IMX8QXP_ADMA_LPCG_UART0_IPG_CLK 0
+#define IMX8QXP_ADMA_LPCG_UART0_BAUD_CLK 1
+#define IMX8QXP_ADMA_LPCG_UART1_IPG_CLK 2
+#define IMX8QXP_ADMA_LPCG_UART1_BAUD_CLK 3
+#define IMX8QXP_ADMA_LPCG_UART2_IPG_CLK 4
+#define IMX8QXP_ADMA_LPCG_UART2_BAUD_CLK 5
+#define IMX8QXP_ADMA_LPCG_UART3_IPG_CLK 6
+#define IMX8QXP_ADMA_LPCG_UART3_BAUD_CLK 7
+#define IMX8QXP_ADMA_LPCG_SPI0_IPG_CLK 8
+#define IMX8QXP_ADMA_LPCG_SPI1_IPG_CLK 9
+#define IMX8QXP_ADMA_LPCG_SPI2_IPG_CLK 10
+#define IMX8QXP_ADMA_LPCG_SPI3_IPG_CLK 11
+#define IMX8QXP_ADMA_LPCG_SPI0_CLK 12
+#define IMX8QXP_ADMA_LPCG_SPI1_CLK 13
+#define IMX8QXP_ADMA_LPCG_SPI2_CLK 14
+#define IMX8QXP_ADMA_LPCG_SPI3_CLK 15
+#define IMX8QXP_ADMA_LPCG_CAN0_IPG_CLK 16
+#define IMX8QXP_ADMA_LPCG_CAN0_IPG_PE_CLK 17
+#define IMX8QXP_ADMA_LPCG_CAN0_IPG_CHI_CLK 18
+#define IMX8QXP_ADMA_LPCG_CAN1_IPG_CLK 19
+#define IMX8QXP_ADMA_LPCG_CAN1_IPG_PE_CLK 20
+#define IMX8QXP_ADMA_LPCG_CAN1_IPG_CHI_CLK 21
+#define IMX8QXP_ADMA_LPCG_CAN2_IPG_CLK 22
+#define IMX8QXP_ADMA_LPCG_CAN2_IPG_PE_CLK 23
+#define IMX8QXP_ADMA_LPCG_CAN2_IPG_CHI_CLK 24
+#define IMX8QXP_ADMA_LPCG_I2C0_CLK 25
+#define IMX8QXP_ADMA_LPCG_I2C1_CLK 26
+#define IMX8QXP_ADMA_LPCG_I2C2_CLK 27
+#define IMX8QXP_ADMA_LPCG_I2C3_CLK 28
+#define IMX8QXP_ADMA_LPCG_I2C0_IPG_CLK 29
+#define IMX8QXP_ADMA_LPCG_I2C1_IPG_CLK 30
+#define IMX8QXP_ADMA_LPCG_I2C2_IPG_CLK 31
+#define IMX8QXP_ADMA_LPCG_I2C3_IPG_CLK 32
+#define IMX8QXP_ADMA_LPCG_FTM0_CLK 33
+#define IMX8QXP_ADMA_LPCG_FTM1_CLK 34
+#define IMX8QXP_ADMA_LPCG_FTM0_IPG_CLK 35
+#define IMX8QXP_ADMA_LPCG_FTM1_IPG_CLK 36
+#define IMX8QXP_ADMA_LPCG_PWM_HI_CLK 37
+#define IMX8QXP_ADMA_LPCG_PWM_IPG_CLK 38
+#define IMX8QXP_ADMA_LPCG_LCD_PIX_CLK 39
+#define IMX8QXP_ADMA_LPCG_LCD_APB_CLK 40
+
+#define IMX8QXP_ADMA_LPCG_CLK_END 41
+
+#endif /* __DT_BINDINGS_CLOCK_IMX8QXP_H */
diff --git a/include/dt-bindings/clock/meson8b-clkc.h b/include/dt-bindings/clock/meson8b-clkc.h
index a60f47b49231..5fe2923382d0 100644
--- a/include/dt-bindings/clock/meson8b-clkc.h
+++ b/include/dt-bindings/clock/meson8b-clkc.h
@@ -103,5 +103,9 @@
#define CLKID_MPLL1 94
#define CLKID_MPLL2 95
#define CLKID_NAND_CLK 112
+#define CLKID_ABP 124
+#define CLKID_PERIPH 126
+#define CLKID_AXI 128
+#define CLKID_L2_DRAM 130
#endif /* __MESON8B_CLKC_H */
diff --git a/include/dt-bindings/clock/mt7629-clk.h b/include/dt-bindings/clock/mt7629-clk.h
new file mode 100644
index 000000000000..ad8e6d7f0154
--- /dev/null
+++ b/include/dt-bindings/clock/mt7629-clk.h
@@ -0,0 +1,203 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2018 MediaTek Inc.
+ */
+
+#ifndef _DT_BINDINGS_CLK_MT7629_H
+#define _DT_BINDINGS_CLK_MT7629_H
+
+/* TOPCKGEN */
+#define CLK_TOP_TO_U2_PHY 0
+#define CLK_TOP_TO_U2_PHY_1P 1
+#define CLK_TOP_PCIE0_PIPE_EN 2
+#define CLK_TOP_PCIE1_PIPE_EN 3
+#define CLK_TOP_SSUSB_TX250M 4
+#define CLK_TOP_SSUSB_EQ_RX250M 5
+#define CLK_TOP_SSUSB_CDR_REF 6
+#define CLK_TOP_SSUSB_CDR_FB 7
+#define CLK_TOP_SATA_ASIC 8
+#define CLK_TOP_SATA_RBC 9
+#define CLK_TOP_TO_USB3_SYS 10
+#define CLK_TOP_P1_1MHZ 11
+#define CLK_TOP_4MHZ 12
+#define CLK_TOP_P0_1MHZ 13
+#define CLK_TOP_ETH_500M 14
+#define CLK_TOP_TXCLK_SRC_PRE 15
+#define CLK_TOP_RTC 16
+#define CLK_TOP_PWM_QTR_26M 17
+#define CLK_TOP_CPUM_TCK_IN 18
+#define CLK_TOP_TO_USB3_DA_TOP 19
+#define CLK_TOP_MEMPLL 20
+#define CLK_TOP_DMPLL 21
+#define CLK_TOP_DMPLL_D4 22
+#define CLK_TOP_DMPLL_D8 23
+#define CLK_TOP_SYSPLL_D2 24
+#define CLK_TOP_SYSPLL1_D2 25
+#define CLK_TOP_SYSPLL1_D4 26
+#define CLK_TOP_SYSPLL1_D8 27
+#define CLK_TOP_SYSPLL1_D16 28
+#define CLK_TOP_SYSPLL2_D2 29
+#define CLK_TOP_SYSPLL2_D4 30
+#define CLK_TOP_SYSPLL2_D8 31
+#define CLK_TOP_SYSPLL_D5 32
+#define CLK_TOP_SYSPLL3_D2 33
+#define CLK_TOP_SYSPLL3_D4 34
+#define CLK_TOP_SYSPLL_D7 35
+#define CLK_TOP_SYSPLL4_D2 36
+#define CLK_TOP_SYSPLL4_D4 37
+#define CLK_TOP_SYSPLL4_D16 38
+#define CLK_TOP_UNIVPLL 39
+#define CLK_TOP_UNIVPLL1_D2 40
+#define CLK_TOP_UNIVPLL1_D4 41
+#define CLK_TOP_UNIVPLL1_D8 42
+#define CLK_TOP_UNIVPLL_D3 43
+#define CLK_TOP_UNIVPLL2_D2 44
+#define CLK_TOP_UNIVPLL2_D4 45
+#define CLK_TOP_UNIVPLL2_D8 46
+#define CLK_TOP_UNIVPLL2_D16 47
+#define CLK_TOP_UNIVPLL_D5 48
+#define CLK_TOP_UNIVPLL3_D2 49
+#define CLK_TOP_UNIVPLL3_D4 50
+#define CLK_TOP_UNIVPLL3_D16 51
+#define CLK_TOP_UNIVPLL_D7 52
+#define CLK_TOP_UNIVPLL_D80_D4 53
+#define CLK_TOP_UNIV48M 54
+#define CLK_TOP_SGMIIPLL_D2 55
+#define CLK_TOP_CLKXTAL_D4 56
+#define CLK_TOP_HD_FAXI 57
+#define CLK_TOP_FAXI 58
+#define CLK_TOP_F_FAUD_INTBUS 59
+#define CLK_TOP_AP2WBHIF_HCLK 60
+#define CLK_TOP_10M_INFRAO 61
+#define CLK_TOP_MSDC30_1 62
+#define CLK_TOP_SPI 63
+#define CLK_TOP_SF 64
+#define CLK_TOP_FLASH 65
+#define CLK_TOP_TO_USB3_REF 66
+#define CLK_TOP_TO_USB3_MCU 67
+#define CLK_TOP_TO_USB3_DMA 68
+#define CLK_TOP_FROM_TOP_AHB 69
+#define CLK_TOP_FROM_TOP_AXI 70
+#define CLK_TOP_PCIE1_MAC_EN 71
+#define CLK_TOP_PCIE0_MAC_EN 72
+#define CLK_TOP_AXI_SEL 73
+#define CLK_TOP_MEM_SEL 74
+#define CLK_TOP_DDRPHYCFG_SEL 75
+#define CLK_TOP_ETH_SEL 76
+#define CLK_TOP_PWM_SEL 77
+#define CLK_TOP_F10M_REF_SEL 78
+#define CLK_TOP_NFI_INFRA_SEL 79
+#define CLK_TOP_FLASH_SEL 80
+#define CLK_TOP_UART_SEL 81
+#define CLK_TOP_SPI0_SEL 82
+#define CLK_TOP_SPI1_SEL 83
+#define CLK_TOP_MSDC50_0_SEL 84
+#define CLK_TOP_MSDC30_0_SEL 85
+#define CLK_TOP_MSDC30_1_SEL 86
+#define CLK_TOP_AP2WBMCU_SEL 87
+#define CLK_TOP_AP2WBHIF_SEL 88
+#define CLK_TOP_AUDIO_SEL 89
+#define CLK_TOP_AUD_INTBUS_SEL 90
+#define CLK_TOP_PMICSPI_SEL 91
+#define CLK_TOP_SCP_SEL 92
+#define CLK_TOP_ATB_SEL 93
+#define CLK_TOP_HIF_SEL 94
+#define CLK_TOP_SATA_SEL 95
+#define CLK_TOP_U2_SEL 96
+#define CLK_TOP_AUD1_SEL 97
+#define CLK_TOP_AUD2_SEL 98
+#define CLK_TOP_IRRX_SEL 99
+#define CLK_TOP_IRTX_SEL 100
+#define CLK_TOP_SATA_MCU_SEL 101
+#define CLK_TOP_PCIE0_MCU_SEL 102
+#define CLK_TOP_PCIE1_MCU_SEL 103
+#define CLK_TOP_SSUSB_MCU_SEL 104
+#define CLK_TOP_CRYPTO_SEL 105
+#define CLK_TOP_SGMII_REF_1_SEL 106
+#define CLK_TOP_10M_SEL 107
+#define CLK_TOP_NR_CLK 108
+
+/* INFRACFG */
+#define CLK_INFRA_MUX1_SEL 0
+#define CLK_INFRA_DBGCLK_PD 1
+#define CLK_INFRA_TRNG_PD 2
+#define CLK_INFRA_DEVAPC_PD 3
+#define CLK_INFRA_APXGPT_PD 4
+#define CLK_INFRA_SEJ_PD 5
+#define CLK_INFRA_NR_CLK 6
+
+/* PERICFG */
+#define CLK_PERIBUS_SEL 0
+#define CLK_PERI_PWM1_PD 1
+#define CLK_PERI_PWM2_PD 2
+#define CLK_PERI_PWM3_PD 3
+#define CLK_PERI_PWM4_PD 4
+#define CLK_PERI_PWM5_PD 5
+#define CLK_PERI_PWM6_PD 6
+#define CLK_PERI_PWM7_PD 7
+#define CLK_PERI_PWM_PD 8
+#define CLK_PERI_AP_DMA_PD 9
+#define CLK_PERI_MSDC30_1_PD 10
+#define CLK_PERI_UART0_PD 11
+#define CLK_PERI_UART1_PD 12
+#define CLK_PERI_UART2_PD 13
+#define CLK_PERI_UART3_PD 14
+#define CLK_PERI_BTIF_PD 15
+#define CLK_PERI_I2C0_PD 16
+#define CLK_PERI_SPI0_PD 17
+#define CLK_PERI_SNFI_PD 18
+#define CLK_PERI_NFI_PD 19
+#define CLK_PERI_NFIECC_PD 20
+#define CLK_PERI_FLASH_PD 21
+#define CLK_PERI_NR_CLK 22
+
+/* APMIXEDSYS */
+#define CLK_APMIXED_ARMPLL 0
+#define CLK_APMIXED_MAINPLL 1
+#define CLK_APMIXED_UNIV2PLL 2
+#define CLK_APMIXED_ETH1PLL 3
+#define CLK_APMIXED_ETH2PLL 4
+#define CLK_APMIXED_SGMIPLL 5
+#define CLK_APMIXED_MAIN_CORE_EN 6
+#define CLK_APMIXED_NR_CLK 7
+
+/* SSUSBSYS */
+#define CLK_SSUSB_U2_PHY_1P_EN 0
+#define CLK_SSUSB_U2_PHY_EN 1
+#define CLK_SSUSB_REF_EN 2
+#define CLK_SSUSB_SYS_EN 3
+#define CLK_SSUSB_MCU_EN 4
+#define CLK_SSUSB_DMA_EN 5
+#define CLK_SSUSB_NR_CLK 6
+
+/* PCIESYS */
+#define CLK_PCIE_P1_AUX_EN 0
+#define CLK_PCIE_P1_OBFF_EN 1
+#define CLK_PCIE_P1_AHB_EN 2
+#define CLK_PCIE_P1_AXI_EN 3
+#define CLK_PCIE_P1_MAC_EN 4
+#define CLK_PCIE_P1_PIPE_EN 5
+#define CLK_PCIE_P0_AUX_EN 6
+#define CLK_PCIE_P0_OBFF_EN 7
+#define CLK_PCIE_P0_AHB_EN 8
+#define CLK_PCIE_P0_AXI_EN 9
+#define CLK_PCIE_P0_MAC_EN 10
+#define CLK_PCIE_P0_PIPE_EN 11
+#define CLK_PCIE_NR_CLK 12
+
+/* ETHSYS */
+#define CLK_ETH_FE_EN 0
+#define CLK_ETH_GP2_EN 1
+#define CLK_ETH_GP1_EN 2
+#define CLK_ETH_GP0_EN 3
+#define CLK_ETH_ESW_EN 4
+#define CLK_ETH_NR_CLK 5
+
+/* SGMIISYS */
+#define CLK_SGMII_TX_EN 0
+#define CLK_SGMII_RX_EN 1
+#define CLK_SGMII_CDR_REF 2
+#define CLK_SGMII_CDR_FB 3
+#define CLK_SGMII_NR_CLK 4
+
+#endif /* _DT_BINDINGS_CLK_MT7629_H */
diff --git a/include/dt-bindings/clock/qcom,gcc-msm8998.h b/include/dt-bindings/clock/qcom,gcc-msm8998.h
index 58a242e656b1..ba84bbab5c83 100644
--- a/include/dt-bindings/clock/qcom,gcc-msm8998.h
+++ b/include/dt-bindings/clock/qcom,gcc-msm8998.h
@@ -180,6 +180,11 @@
#define USB30_MASTER_CLK_SRC 163
#define USB30_MOCK_UTMI_CLK_SRC 164
#define USB3_PHY_AUX_CLK_SRC 165
+#define GCC_USB3_CLKREF_CLK 166
+#define GCC_HDMI_CLKREF_CLK 167
+#define GCC_UFS_CLKREF_CLK 168
+#define GCC_PCIE_CLKREF_CLK 169
+#define GCC_RX1_USB2_CLKREF_CLK 170
#define PCIE_0_GDSC 0
#define UFS_GDSC 1
@@ -204,5 +209,94 @@
#define GCC_TSIF_BCR 16
#define GCC_UFS_BCR 17
#define GCC_USB_30_BCR 18
+#define GCC_SYSTEM_NOC_BCR 19
+#define GCC_CONFIG_NOC_BCR 20
+#define GCC_AHB2PHY_EAST_BCR 21
+#define GCC_IMEM_BCR 22
+#define GCC_PIMEM_BCR 23
+#define GCC_MMSS_BCR 24
+#define GCC_QDSS_BCR 25
+#define GCC_WCSS_BCR 26
+#define GCC_BLSP1_BCR 27
+#define GCC_BLSP1_UART1_BCR 28
+#define GCC_BLSP1_UART2_BCR 29
+#define GCC_BLSP1_UART3_BCR 30
+#define GCC_CM_PHY_REFGEN1_BCR 31
+#define GCC_CM_PHY_REFGEN2_BCR 32
+#define GCC_BLSP2_BCR 33
+#define GCC_BLSP2_UART1_BCR 34
+#define GCC_BLSP2_UART2_BCR 35
+#define GCC_BLSP2_UART3_BCR 36
+#define GCC_SRAM_SENSOR_BCR 37
+#define GCC_PRNG_BCR 38
+#define GCC_TSIF_0_RESET 39
+#define GCC_TSIF_1_RESET 40
+#define GCC_TCSR_BCR 41
+#define GCC_BOOT_ROM_BCR 42
+#define GCC_MSG_RAM_BCR 43
+#define GCC_TLMM_BCR 44
+#define GCC_MPM_BCR 45
+#define GCC_SEC_CTRL_BCR 46
+#define GCC_SPMI_BCR 47
+#define GCC_SPDM_BCR 48
+#define GCC_CE1_BCR 49
+#define GCC_BIMC_BCR 50
+#define GCC_SNOC_BUS_TIMEOUT0_BCR 51
+#define GCC_SNOC_BUS_TIMEOUT1_BCR 52
+#define GCC_SNOC_BUS_TIMEOUT3_BCR 53
+#define GCC_SNOC_BUS_TIMEOUT_EXTREF_BCR 54
+#define GCC_PNOC_BUS_TIMEOUT0_BCR 55
+#define GCC_CNOC_PERIPH_BUS_TIMEOUT1_BCR 56
+#define GCC_CNOC_PERIPH_BUS_TIMEOUT2_BCR 57
+#define GCC_CNOC_BUS_TIMEOUT0_BCR 58
+#define GCC_CNOC_BUS_TIMEOUT1_BCR 59
+#define GCC_CNOC_BUS_TIMEOUT2_BCR 60
+#define GCC_CNOC_BUS_TIMEOUT3_BCR 61
+#define GCC_CNOC_BUS_TIMEOUT4_BCR 62
+#define GCC_CNOC_BUS_TIMEOUT5_BCR 63
+#define GCC_CNOC_BUS_TIMEOUT6_BCR 64
+#define GCC_CNOC_BUS_TIMEOUT7_BCR 65
+#define GCC_APB2JTAG_BCR 66
+#define GCC_RBCPR_CX_BCR 67
+#define GCC_RBCPR_MX_BCR 68
+#define GCC_USB3_PHY_BCR 69
+#define GCC_USB3PHY_PHY_BCR 70
+#define GCC_USB3_DP_PHY_BCR 71
+#define GCC_SSC_BCR 72
+#define GCC_SSC_RESET 73
+#define GCC_USB_PHY_CFG_AHB2PHY_BCR 74
+#define GCC_PCIE_0_LINK_DOWN_BCR 75
+#define GCC_PCIE_0_PHY_BCR 76
+#define GCC_PCIE_0_NOCSR_COM_PHY_BCR 77
+#define GCC_PCIE_PHY_BCR 78
+#define GCC_PCIE_PHY_NOCSR_COM_PHY_BCR 79
+#define GCC_PCIE_PHY_CFG_AHB_BCR 80
+#define GCC_PCIE_PHY_COM_BCR 81
+#define GCC_GPU_BCR 82
+#define GCC_SPSS_BCR 83
+#define GCC_OBT_ODT_BCR 84
+#define GCC_VS_BCR 85
+#define GCC_MSS_VS_RESET 86
+#define GCC_GPU_VS_RESET 87
+#define GCC_APC0_VS_RESET 88
+#define GCC_APC1_VS_RESET 89
+#define GCC_CNOC_BUS_TIMEOUT8_BCR 90
+#define GCC_CNOC_BUS_TIMEOUT9_BCR 91
+#define GCC_CNOC_BUS_TIMEOUT10_BCR 92
+#define GCC_CNOC_BUS_TIMEOUT11_BCR 93
+#define GCC_CNOC_BUS_TIMEOUT12_BCR 94
+#define GCC_CNOC_BUS_TIMEOUT13_BCR 95
+#define GCC_CNOC_BUS_TIMEOUT14_BCR 96
+#define GCC_CNOC_BUS_TIMEOUT_EXTREF_BCR 97
+#define GCC_AGGRE1_NOC_BCR 98
+#define GCC_AGGRE2_NOC_BCR 99
+#define GCC_DCC_BCR 100
+#define GCC_QREFS_VBG_CAL_BCR 101
+#define GCC_IPA_BCR 102
+#define GCC_GLM_BCR 103
+#define GCC_SKL_BCR 104
+#define GCC_MSMPU_BCR 105
+#define GCC_QUSB2PHY_PRIM_BCR 106
+#define GCC_QUSB2PHY_SEC_BCR 107
#endif
diff --git a/include/dt-bindings/clock/qcom,gcc-sdm845.h b/include/dt-bindings/clock/qcom,gcc-sdm845.h
index b8eae5a76503..968fa65b9c42 100644
--- a/include/dt-bindings/clock/qcom,gcc-sdm845.h
+++ b/include/dt-bindings/clock/qcom,gcc-sdm845.h
@@ -197,6 +197,8 @@
#define GCC_QSPI_CORE_CLK_SRC 187
#define GCC_QSPI_CORE_CLK 188
#define GCC_QSPI_CNOC_PERIPH_AHB_CLK 189
+#define GCC_LPASS_Q6_AXI_CLK 190
+#define GCC_LPASS_SWAY_CLK 191
/* GCC Resets */
#define GCC_MMSS_BCR 0
diff --git a/include/dt-bindings/clock/qcom,gpucc-sdm845.h b/include/dt-bindings/clock/qcom,gpucc-sdm845.h
new file mode 100644
index 000000000000..9690d901b50a
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,gpucc-sdm845.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_SDM_GPU_CC_SDM845_H
+#define _DT_BINDINGS_CLK_SDM_GPU_CC_SDM845_H
+
+/* GPU_CC clock registers */
+#define GPU_CC_CX_GMU_CLK 0
+#define GPU_CC_CXO_CLK 1
+#define GPU_CC_GMU_CLK_SRC 2
+#define GPU_CC_PLL1 3
+
+/* GPU_CC Resets */
+#define GPUCC_GPU_CC_CX_BCR 0
+#define GPUCC_GPU_CC_GMU_BCR 1
+#define GPUCC_GPU_CC_XO_BCR 2
+
+/* GPU_CC GDSCRs */
+#define GPU_CX_GDSC 0
+#define GPU_GX_GDSC 1
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,lpass-sdm845.h b/include/dt-bindings/clock/qcom,lpass-sdm845.h
new file mode 100644
index 000000000000..659050846f61
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,lpass-sdm845.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_SDM_LPASS_SDM845_H
+#define _DT_BINDINGS_CLK_SDM_LPASS_SDM845_H
+
+#define LPASS_Q6SS_AHBM_AON_CLK 0
+#define LPASS_Q6SS_AHBS_AON_CLK 1
+#define LPASS_QDSP6SS_XO_CLK 2
+#define LPASS_QDSP6SS_SLEEP_CLK 3
+#define LPASS_QDSP6SS_CORE_CLK 4
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,rpmcc.h b/include/dt-bindings/clock/qcom,rpmcc.h
index c585b82b9c05..3658b0c14966 100644
--- a/include/dt-bindings/clock/qcom,rpmcc.h
+++ b/include/dt-bindings/clock/qcom,rpmcc.h
@@ -123,5 +123,9 @@
#define RPM_SMD_DIV_A_CLK3 73
#define RPM_SMD_LN_BB_CLK 74
#define RPM_SMD_LN_BB_A_CLK 75
+#define RPM_SMD_BIMC_GPU_CLK 76
+#define RPM_SMD_BIMC_GPU_A_CLK 77
+#define RPM_SMD_QPIC_CLK 78
+#define RPM_SMD_QPIC_CLK_A 79
#endif
diff --git a/include/dt-bindings/clock/r8a7795-cpg-mssr.h b/include/dt-bindings/clock/r8a7795-cpg-mssr.h
index 948389641565..92b3e2a95179 100644
--- a/include/dt-bindings/clock/r8a7795-cpg-mssr.h
+++ b/include/dt-bindings/clock/r8a7795-cpg-mssr.h
@@ -50,7 +50,7 @@
#define R8A7795_CLK_CANFD 39
#define R8A7795_CLK_HDMI 40
#define R8A7795_CLK_CSI0 41
-#define R8A7795_CLK_CSIREF 42
+/* CLK_CSIREF was removed */
#define R8A7795_CLK_CP 43
#define R8A7795_CLK_CPEX 44
#define R8A7795_CLK_R 45
diff --git a/include/dt-bindings/clock/r8a7796-cpg-mssr.h b/include/dt-bindings/clock/r8a7796-cpg-mssr.h
index e6087f2f7e3a..c0957cf45840 100644
--- a/include/dt-bindings/clock/r8a7796-cpg-mssr.h
+++ b/include/dt-bindings/clock/r8a7796-cpg-mssr.h
@@ -56,7 +56,7 @@
#define R8A7796_CLK_CANFD 45
#define R8A7796_CLK_HDMI 46
#define R8A7796_CLK_CSI0 47
-#define R8A7796_CLK_CSIREF 48
+/* CLK_CSIREF was removed */
#define R8A7796_CLK_CP 49
#define R8A7796_CLK_CPEX 50
#define R8A7796_CLK_R 51
diff --git a/include/dt-bindings/clock/r8a77995-cpg-mssr.h b/include/dt-bindings/clock/r8a77995-cpg-mssr.h
index 1eb11acfa563..fd701c4e87cf 100644
--- a/include/dt-bindings/clock/r8a77995-cpg-mssr.h
+++ b/include/dt-bindings/clock/r8a77995-cpg-mssr.h
@@ -35,8 +35,8 @@
#define R8A77995_CLK_CRD2 24
#define R8A77995_CLK_SD0H 25
#define R8A77995_CLK_SD0 26
-#define R8A77995_CLK_SSP2 27
-#define R8A77995_CLK_SSP1 28
+/* CLK_SSP2 was removed */
+/* CLK_SSP1 was removed */
#define R8A77995_CLK_RPC 29
#define R8A77995_CLK_RPCD2 30
#define R8A77995_CLK_ZA2 31
@@ -49,5 +49,6 @@
#define R8A77995_CLK_LV0 38
#define R8A77995_CLK_LV1 39
#define R8A77995_CLK_CP 40
+#define R8A77995_CLK_CPEX 41
#endif /* __DT_BINDINGS_CLOCK_R8A77995_CPG_MSSR_H__ */
diff --git a/include/dt-bindings/clock/rk3328-cru.h b/include/dt-bindings/clock/rk3328-cru.h
index a82a0109faff..bcaa4559ab1b 100644
--- a/include/dt-bindings/clock/rk3328-cru.h
+++ b/include/dt-bindings/clock/rk3328-cru.h
@@ -172,13 +172,14 @@
#define PCLK_HDCP 232
#define PCLK_DCF 233
#define PCLK_SARADC 234
+#define PCLK_ACODECPHY 235
/* hclk gates */
#define HCLK_PERI 308
#define HCLK_TSP 309
#define HCLK_GMAC 310
#define HCLK_I2S0_8CH 311
-#define HCLK_I2S1_8CH 313
+#define HCLK_I2S1_8CH 312
#define HCLK_I2S2_2CH 313
#define HCLK_SPDIF_8CH 314
#define HCLK_VOP 315
diff --git a/include/dt-bindings/clock/sun8i-de2.h b/include/dt-bindings/clock/sun8i-de2.h
index 3bed63b524aa..7768f73b051e 100644
--- a/include/dt-bindings/clock/sun8i-de2.h
+++ b/include/dt-bindings/clock/sun8i-de2.h
@@ -15,4 +15,7 @@
#define CLK_MIXER1 7
#define CLK_WB 8
+#define CLK_BUS_ROT 9
+#define CLK_ROT 10
+
#endif /* _DT_BINDINGS_CLOCK_SUN8I_DE2_H_ */
diff --git a/include/dt-bindings/clock/suniv-ccu-f1c100s.h b/include/dt-bindings/clock/suniv-ccu-f1c100s.h
new file mode 100644
index 000000000000..f5ac155c9c70
--- /dev/null
+++ b/include/dt-bindings/clock/suniv-ccu-f1c100s.h
@@ -0,0 +1,70 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+ *
+ * Copyright (c) 2018 Icenowy Zheng <icenowy@aosc.xyz>
+ *
+ */
+
+#ifndef _DT_BINDINGS_CLK_SUNIV_F1C100S_H_
+#define _DT_BINDINGS_CLK_SUNIV_F1C100S_H_
+
+#define CLK_CPU 11
+
+#define CLK_BUS_DMA 14
+#define CLK_BUS_MMC0 15
+#define CLK_BUS_MMC1 16
+#define CLK_BUS_DRAM 17
+#define CLK_BUS_SPI0 18
+#define CLK_BUS_SPI1 19
+#define CLK_BUS_OTG 20
+#define CLK_BUS_VE 21
+#define CLK_BUS_LCD 22
+#define CLK_BUS_DEINTERLACE 23
+#define CLK_BUS_CSI 24
+#define CLK_BUS_TVD 25
+#define CLK_BUS_TVE 26
+#define CLK_BUS_DE_BE 27
+#define CLK_BUS_DE_FE 28
+#define CLK_BUS_CODEC 29
+#define CLK_BUS_SPDIF 30
+#define CLK_BUS_IR 31
+#define CLK_BUS_RSB 32
+#define CLK_BUS_I2S0 33
+#define CLK_BUS_I2C0 34
+#define CLK_BUS_I2C1 35
+#define CLK_BUS_I2C2 36
+#define CLK_BUS_PIO 37
+#define CLK_BUS_UART0 38
+#define CLK_BUS_UART1 39
+#define CLK_BUS_UART2 40
+
+#define CLK_MMC0 41
+#define CLK_MMC0_SAMPLE 42
+#define CLK_MMC0_OUTPUT 43
+#define CLK_MMC1 44
+#define CLK_MMC1_SAMPLE 45
+#define CLK_MMC1_OUTPUT 46
+#define CLK_I2S 47
+#define CLK_SPDIF 48
+
+#define CLK_USB_PHY0 49
+
+#define CLK_DRAM_VE 50
+#define CLK_DRAM_CSI 51
+#define CLK_DRAM_DEINTERLACE 52
+#define CLK_DRAM_TVD 53
+#define CLK_DRAM_DE_FE 54
+#define CLK_DRAM_DE_BE 55
+
+#define CLK_DE_BE 56
+#define CLK_DE_FE 57
+#define CLK_TCON 58
+#define CLK_DEINTERLACE 59
+#define CLK_TVE2_CLK 60
+#define CLK_TVE1_CLK 61
+#define CLK_TVD 62
+#define CLK_CSI 63
+#define CLK_VE 64
+#define CLK_CODEC 65
+#define CLK_AVS 66
+
+#endif
diff --git a/include/dt-bindings/firmware/imx/rsrc.h b/include/dt-bindings/firmware/imx/rsrc.h
new file mode 100644
index 000000000000..4481f2d60d65
--- /dev/null
+++ b/include/dt-bindings/firmware/imx/rsrc.h
@@ -0,0 +1,559 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright (C) 2016 Freescale Semiconductor, Inc.
+ * Copyright 2017-2018 NXP
+ */
+
+#ifndef __DT_BINDINGS_RSCRC_IMX_H
+#define __DT_BINDINGS_RSCRC_IMX_H
+
+/*
+ * These defines are used to indicate a resource. Resources include peripherals
+ * and bus masters (but not memory regions). Note items from list should
+ * never be changed or removed (only added to at the end of the list).
+ */
+
+#define IMX_SC_R_A53 0
+#define IMX_SC_R_A53_0 1
+#define IMX_SC_R_A53_1 2
+#define IMX_SC_R_A53_2 3
+#define IMX_SC_R_A53_3 4
+#define IMX_SC_R_A72 5
+#define IMX_SC_R_A72_0 6
+#define IMX_SC_R_A72_1 7
+#define IMX_SC_R_A72_2 8
+#define IMX_SC_R_A72_3 9
+#define IMX_SC_R_CCI 10
+#define IMX_SC_R_DB 11
+#define IMX_SC_R_DRC_0 12
+#define IMX_SC_R_DRC_1 13
+#define IMX_SC_R_GIC_SMMU 14
+#define IMX_SC_R_IRQSTR_M4_0 15
+#define IMX_SC_R_IRQSTR_M4_1 16
+#define IMX_SC_R_SMMU 17
+#define IMX_SC_R_GIC 18
+#define IMX_SC_R_DC_0_BLIT0 19
+#define IMX_SC_R_DC_0_BLIT1 20
+#define IMX_SC_R_DC_0_BLIT2 21
+#define IMX_SC_R_DC_0_BLIT_OUT 22
+#define IMX_SC_R_DC_0_CAPTURE0 23
+#define IMX_SC_R_DC_0_CAPTURE1 24
+#define IMX_SC_R_DC_0_WARP 25
+#define IMX_SC_R_DC_0_INTEGRAL0 26
+#define IMX_SC_R_DC_0_INTEGRAL1 27
+#define IMX_SC_R_DC_0_VIDEO0 28
+#define IMX_SC_R_DC_0_VIDEO1 29
+#define IMX_SC_R_DC_0_FRAC0 30
+#define IMX_SC_R_DC_0_FRAC1 31
+#define IMX_SC_R_DC_0 32
+#define IMX_SC_R_GPU_2_PID0 33
+#define IMX_SC_R_DC_0_PLL_0 34
+#define IMX_SC_R_DC_0_PLL_1 35
+#define IMX_SC_R_DC_1_BLIT0 36
+#define IMX_SC_R_DC_1_BLIT1 37
+#define IMX_SC_R_DC_1_BLIT2 38
+#define IMX_SC_R_DC_1_BLIT_OUT 39
+#define IMX_SC_R_DC_1_CAPTURE0 40
+#define IMX_SC_R_DC_1_CAPTURE1 41
+#define IMX_SC_R_DC_1_WARP 42
+#define IMX_SC_R_DC_1_INTEGRAL0 43
+#define IMX_SC_R_DC_1_INTEGRAL1 44
+#define IMX_SC_R_DC_1_VIDEO0 45
+#define IMX_SC_R_DC_1_VIDEO1 46
+#define IMX_SC_R_DC_1_FRAC0 47
+#define IMX_SC_R_DC_1_FRAC1 48
+#define IMX_SC_R_DC_1 49
+#define IMX_SC_R_GPU_3_PID0 50
+#define IMX_SC_R_DC_1_PLL_0 51
+#define IMX_SC_R_DC_1_PLL_1 52
+#define IMX_SC_R_SPI_0 53
+#define IMX_SC_R_SPI_1 54
+#define IMX_SC_R_SPI_2 55
+#define IMX_SC_R_SPI_3 56
+#define IMX_SC_R_UART_0 57
+#define IMX_SC_R_UART_1 58
+#define IMX_SC_R_UART_2 59
+#define IMX_SC_R_UART_3 60
+#define IMX_SC_R_UART_4 61
+#define IMX_SC_R_EMVSIM_0 62
+#define IMX_SC_R_EMVSIM_1 63
+#define IMX_SC_R_DMA_0_CH0 64
+#define IMX_SC_R_DMA_0_CH1 65
+#define IMX_SC_R_DMA_0_CH2 66
+#define IMX_SC_R_DMA_0_CH3 67
+#define IMX_SC_R_DMA_0_CH4 68
+#define IMX_SC_R_DMA_0_CH5 69
+#define IMX_SC_R_DMA_0_CH6 70
+#define IMX_SC_R_DMA_0_CH7 71
+#define IMX_SC_R_DMA_0_CH8 72
+#define IMX_SC_R_DMA_0_CH9 73
+#define IMX_SC_R_DMA_0_CH10 74
+#define IMX_SC_R_DMA_0_CH11 75
+#define IMX_SC_R_DMA_0_CH12 76
+#define IMX_SC_R_DMA_0_CH13 77
+#define IMX_SC_R_DMA_0_CH14 78
+#define IMX_SC_R_DMA_0_CH15 79
+#define IMX_SC_R_DMA_0_CH16 80
+#define IMX_SC_R_DMA_0_CH17 81
+#define IMX_SC_R_DMA_0_CH18 82
+#define IMX_SC_R_DMA_0_CH19 83
+#define IMX_SC_R_DMA_0_CH20 84
+#define IMX_SC_R_DMA_0_CH21 85
+#define IMX_SC_R_DMA_0_CH22 86
+#define IMX_SC_R_DMA_0_CH23 87
+#define IMX_SC_R_DMA_0_CH24 88
+#define IMX_SC_R_DMA_0_CH25 89
+#define IMX_SC_R_DMA_0_CH26 90
+#define IMX_SC_R_DMA_0_CH27 91
+#define IMX_SC_R_DMA_0_CH28 92
+#define IMX_SC_R_DMA_0_CH29 93
+#define IMX_SC_R_DMA_0_CH30 94
+#define IMX_SC_R_DMA_0_CH31 95
+#define IMX_SC_R_I2C_0 96
+#define IMX_SC_R_I2C_1 97
+#define IMX_SC_R_I2C_2 98
+#define IMX_SC_R_I2C_3 99
+#define IMX_SC_R_I2C_4 100
+#define IMX_SC_R_ADC_0 101
+#define IMX_SC_R_ADC_1 102
+#define IMX_SC_R_FTM_0 103
+#define IMX_SC_R_FTM_1 104
+#define IMX_SC_R_CAN_0 105
+#define IMX_SC_R_CAN_1 106
+#define IMX_SC_R_CAN_2 107
+#define IMX_SC_R_DMA_1_CH0 108
+#define IMX_SC_R_DMA_1_CH1 109
+#define IMX_SC_R_DMA_1_CH2 110
+#define IMX_SC_R_DMA_1_CH3 111
+#define IMX_SC_R_DMA_1_CH4 112
+#define IMX_SC_R_DMA_1_CH5 113
+#define IMX_SC_R_DMA_1_CH6 114
+#define IMX_SC_R_DMA_1_CH7 115
+#define IMX_SC_R_DMA_1_CH8 116
+#define IMX_SC_R_DMA_1_CH9 117
+#define IMX_SC_R_DMA_1_CH10 118
+#define IMX_SC_R_DMA_1_CH11 119
+#define IMX_SC_R_DMA_1_CH12 120
+#define IMX_SC_R_DMA_1_CH13 121
+#define IMX_SC_R_DMA_1_CH14 122
+#define IMX_SC_R_DMA_1_CH15 123
+#define IMX_SC_R_DMA_1_CH16 124
+#define IMX_SC_R_DMA_1_CH17 125
+#define IMX_SC_R_DMA_1_CH18 126
+#define IMX_SC_R_DMA_1_CH19 127
+#define IMX_SC_R_DMA_1_CH20 128
+#define IMX_SC_R_DMA_1_CH21 129
+#define IMX_SC_R_DMA_1_CH22 130
+#define IMX_SC_R_DMA_1_CH23 131
+#define IMX_SC_R_DMA_1_CH24 132
+#define IMX_SC_R_DMA_1_CH25 133
+#define IMX_SC_R_DMA_1_CH26 134
+#define IMX_SC_R_DMA_1_CH27 135
+#define IMX_SC_R_DMA_1_CH28 136
+#define IMX_SC_R_DMA_1_CH29 137
+#define IMX_SC_R_DMA_1_CH30 138
+#define IMX_SC_R_DMA_1_CH31 139
+#define IMX_SC_R_UNUSED1 140
+#define IMX_SC_R_UNUSED2 141
+#define IMX_SC_R_UNUSED3 142
+#define IMX_SC_R_UNUSED4 143
+#define IMX_SC_R_GPU_0_PID0 144
+#define IMX_SC_R_GPU_0_PID1 145
+#define IMX_SC_R_GPU_0_PID2 146
+#define IMX_SC_R_GPU_0_PID3 147
+#define IMX_SC_R_GPU_1_PID0 148
+#define IMX_SC_R_GPU_1_PID1 149
+#define IMX_SC_R_GPU_1_PID2 150
+#define IMX_SC_R_GPU_1_PID3 151
+#define IMX_SC_R_PCIE_A 152
+#define IMX_SC_R_SERDES_0 153
+#define IMX_SC_R_MATCH_0 154
+#define IMX_SC_R_MATCH_1 155
+#define IMX_SC_R_MATCH_2 156
+#define IMX_SC_R_MATCH_3 157
+#define IMX_SC_R_MATCH_4 158
+#define IMX_SC_R_MATCH_5 159
+#define IMX_SC_R_MATCH_6 160
+#define IMX_SC_R_MATCH_7 161
+#define IMX_SC_R_MATCH_8 162
+#define IMX_SC_R_MATCH_9 163
+#define IMX_SC_R_MATCH_10 164
+#define IMX_SC_R_MATCH_11 165
+#define IMX_SC_R_MATCH_12 166
+#define IMX_SC_R_MATCH_13 167
+#define IMX_SC_R_MATCH_14 168
+#define IMX_SC_R_PCIE_B 169
+#define IMX_SC_R_SATA_0 170
+#define IMX_SC_R_SERDES_1 171
+#define IMX_SC_R_HSIO_GPIO 172
+#define IMX_SC_R_MATCH_15 173
+#define IMX_SC_R_MATCH_16 174
+#define IMX_SC_R_MATCH_17 175
+#define IMX_SC_R_MATCH_18 176
+#define IMX_SC_R_MATCH_19 177
+#define IMX_SC_R_MATCH_20 178
+#define IMX_SC_R_MATCH_21 179
+#define IMX_SC_R_MATCH_22 180
+#define IMX_SC_R_MATCH_23 181
+#define IMX_SC_R_MATCH_24 182
+#define IMX_SC_R_MATCH_25 183
+#define IMX_SC_R_MATCH_26 184
+#define IMX_SC_R_MATCH_27 185
+#define IMX_SC_R_MATCH_28 186
+#define IMX_SC_R_LCD_0 187
+#define IMX_SC_R_LCD_0_PWM_0 188
+#define IMX_SC_R_LCD_0_I2C_0 189
+#define IMX_SC_R_LCD_0_I2C_1 190
+#define IMX_SC_R_PWM_0 191
+#define IMX_SC_R_PWM_1 192
+#define IMX_SC_R_PWM_2 193
+#define IMX_SC_R_PWM_3 194
+#define IMX_SC_R_PWM_4 195
+#define IMX_SC_R_PWM_5 196
+#define IMX_SC_R_PWM_6 197
+#define IMX_SC_R_PWM_7 198
+#define IMX_SC_R_GPIO_0 199
+#define IMX_SC_R_GPIO_1 200
+#define IMX_SC_R_GPIO_2 201
+#define IMX_SC_R_GPIO_3 202
+#define IMX_SC_R_GPIO_4 203
+#define IMX_SC_R_GPIO_5 204
+#define IMX_SC_R_GPIO_6 205
+#define IMX_SC_R_GPIO_7 206
+#define IMX_SC_R_GPT_0 207
+#define IMX_SC_R_GPT_1 208
+#define IMX_SC_R_GPT_2 209
+#define IMX_SC_R_GPT_3 210
+#define IMX_SC_R_GPT_4 211
+#define IMX_SC_R_KPP 212
+#define IMX_SC_R_MU_0A 213
+#define IMX_SC_R_MU_1A 214
+#define IMX_SC_R_MU_2A 215
+#define IMX_SC_R_MU_3A 216
+#define IMX_SC_R_MU_4A 217
+#define IMX_SC_R_MU_5A 218
+#define IMX_SC_R_MU_6A 219
+#define IMX_SC_R_MU_7A 220
+#define IMX_SC_R_MU_8A 221
+#define IMX_SC_R_MU_9A 222
+#define IMX_SC_R_MU_10A 223
+#define IMX_SC_R_MU_11A 224
+#define IMX_SC_R_MU_12A 225
+#define IMX_SC_R_MU_13A 226
+#define IMX_SC_R_MU_5B 227
+#define IMX_SC_R_MU_6B 228
+#define IMX_SC_R_MU_7B 229
+#define IMX_SC_R_MU_8B 230
+#define IMX_SC_R_MU_9B 231
+#define IMX_SC_R_MU_10B 232
+#define IMX_SC_R_MU_11B 233
+#define IMX_SC_R_MU_12B 234
+#define IMX_SC_R_MU_13B 235
+#define IMX_SC_R_ROM_0 236
+#define IMX_SC_R_FSPI_0 237
+#define IMX_SC_R_FSPI_1 238
+#define IMX_SC_R_IEE 239
+#define IMX_SC_R_IEE_R0 240
+#define IMX_SC_R_IEE_R1 241
+#define IMX_SC_R_IEE_R2 242
+#define IMX_SC_R_IEE_R3 243
+#define IMX_SC_R_IEE_R4 244
+#define IMX_SC_R_IEE_R5 245
+#define IMX_SC_R_IEE_R6 246
+#define IMX_SC_R_IEE_R7 247
+#define IMX_SC_R_SDHC_0 248
+#define IMX_SC_R_SDHC_1 249
+#define IMX_SC_R_SDHC_2 250
+#define IMX_SC_R_ENET_0 251
+#define IMX_SC_R_ENET_1 252
+#define IMX_SC_R_MLB_0 253
+#define IMX_SC_R_DMA_2_CH0 254
+#define IMX_SC_R_DMA_2_CH1 255
+#define IMX_SC_R_DMA_2_CH2 256
+#define IMX_SC_R_DMA_2_CH3 257
+#define IMX_SC_R_DMA_2_CH4 258
+#define IMX_SC_R_USB_0 259
+#define IMX_SC_R_USB_1 260
+#define IMX_SC_R_USB_0_PHY 261
+#define IMX_SC_R_USB_2 262
+#define IMX_SC_R_USB_2_PHY 263
+#define IMX_SC_R_DTCP 264
+#define IMX_SC_R_NAND 265
+#define IMX_SC_R_LVDS_0 266
+#define IMX_SC_R_LVDS_0_PWM_0 267
+#define IMX_SC_R_LVDS_0_I2C_0 268
+#define IMX_SC_R_LVDS_0_I2C_1 269
+#define IMX_SC_R_LVDS_1 270
+#define IMX_SC_R_LVDS_1_PWM_0 271
+#define IMX_SC_R_LVDS_1_I2C_0 272
+#define IMX_SC_R_LVDS_1_I2C_1 273
+#define IMX_SC_R_LVDS_2 274
+#define IMX_SC_R_LVDS_2_PWM_0 275
+#define IMX_SC_R_LVDS_2_I2C_0 276
+#define IMX_SC_R_LVDS_2_I2C_1 277
+#define IMX_SC_R_M4_0_PID0 278
+#define IMX_SC_R_M4_0_PID1 279
+#define IMX_SC_R_M4_0_PID2 280
+#define IMX_SC_R_M4_0_PID3 281
+#define IMX_SC_R_M4_0_PID4 282
+#define IMX_SC_R_M4_0_RGPIO 283
+#define IMX_SC_R_M4_0_SEMA42 284
+#define IMX_SC_R_M4_0_TPM 285
+#define IMX_SC_R_M4_0_PIT 286
+#define IMX_SC_R_M4_0_UART 287
+#define IMX_SC_R_M4_0_I2C 288
+#define IMX_SC_R_M4_0_INTMUX 289
+#define IMX_SC_R_M4_0_SIM 290
+#define IMX_SC_R_M4_0_WDOG 291
+#define IMX_SC_R_M4_0_MU_0B 292
+#define IMX_SC_R_M4_0_MU_0A0 293
+#define IMX_SC_R_M4_0_MU_0A1 294
+#define IMX_SC_R_M4_0_MU_0A2 295
+#define IMX_SC_R_M4_0_MU_0A3 296
+#define IMX_SC_R_M4_0_MU_1A 297
+#define IMX_SC_R_M4_1_PID0 298
+#define IMX_SC_R_M4_1_PID1 299
+#define IMX_SC_R_M4_1_PID2 300
+#define IMX_SC_R_M4_1_PID3 301
+#define IMX_SC_R_M4_1_PID4 302
+#define IMX_SC_R_M4_1_RGPIO 303
+#define IMX_SC_R_M4_1_SEMA42 304
+#define IMX_SC_R_M4_1_TPM 305
+#define IMX_SC_R_M4_1_PIT 306
+#define IMX_SC_R_M4_1_UART 307
+#define IMX_SC_R_M4_1_I2C 308
+#define IMX_SC_R_M4_1_INTMUX 309
+#define IMX_SC_R_M4_1_SIM 310
+#define IMX_SC_R_M4_1_WDOG 311
+#define IMX_SC_R_M4_1_MU_0B 312
+#define IMX_SC_R_M4_1_MU_0A0 313
+#define IMX_SC_R_M4_1_MU_0A1 314
+#define IMX_SC_R_M4_1_MU_0A2 315
+#define IMX_SC_R_M4_1_MU_0A3 316
+#define IMX_SC_R_M4_1_MU_1A 317
+#define IMX_SC_R_SAI_0 318
+#define IMX_SC_R_SAI_1 319
+#define IMX_SC_R_SAI_2 320
+#define IMX_SC_R_IRQSTR_SCU2 321
+#define IMX_SC_R_IRQSTR_DSP 322
+#define IMX_SC_R_ELCDIF_PLL 323
+#define IMX_SC_R_UNUSED6 324
+#define IMX_SC_R_AUDIO_PLL_0 325
+#define IMX_SC_R_PI_0 326
+#define IMX_SC_R_PI_0_PWM_0 327
+#define IMX_SC_R_PI_0_PWM_1 328
+#define IMX_SC_R_PI_0_I2C_0 329
+#define IMX_SC_R_PI_0_PLL 330
+#define IMX_SC_R_PI_1 331
+#define IMX_SC_R_PI_1_PWM_0 332
+#define IMX_SC_R_PI_1_PWM_1 333
+#define IMX_SC_R_PI_1_I2C_0 334
+#define IMX_SC_R_PI_1_PLL 335
+#define IMX_SC_R_SC_PID0 336
+#define IMX_SC_R_SC_PID1 337
+#define IMX_SC_R_SC_PID2 338
+#define IMX_SC_R_SC_PID3 339
+#define IMX_SC_R_SC_PID4 340
+#define IMX_SC_R_SC_SEMA42 341
+#define IMX_SC_R_SC_TPM 342
+#define IMX_SC_R_SC_PIT 343
+#define IMX_SC_R_SC_UART 344
+#define IMX_SC_R_SC_I2C 345
+#define IMX_SC_R_SC_MU_0B 346
+#define IMX_SC_R_SC_MU_0A0 347
+#define IMX_SC_R_SC_MU_0A1 348
+#define IMX_SC_R_SC_MU_0A2 349
+#define IMX_SC_R_SC_MU_0A3 350
+#define IMX_SC_R_SC_MU_1A 351
+#define IMX_SC_R_SYSCNT_RD 352
+#define IMX_SC_R_SYSCNT_CMP 353
+#define IMX_SC_R_DEBUG 354
+#define IMX_SC_R_SYSTEM 355
+#define IMX_SC_R_SNVS 356
+#define IMX_SC_R_OTP 357
+#define IMX_SC_R_VPU_PID0 358
+#define IMX_SC_R_VPU_PID1 359
+#define IMX_SC_R_VPU_PID2 360
+#define IMX_SC_R_VPU_PID3 361
+#define IMX_SC_R_VPU_PID4 362
+#define IMX_SC_R_VPU_PID5 363
+#define IMX_SC_R_VPU_PID6 364
+#define IMX_SC_R_VPU_PID7 365
+#define IMX_SC_R_VPU_UART 366
+#define IMX_SC_R_VPUCORE 367
+#define IMX_SC_R_VPUCORE_0 368
+#define IMX_SC_R_VPUCORE_1 369
+#define IMX_SC_R_VPUCORE_2 370
+#define IMX_SC_R_VPUCORE_3 371
+#define IMX_SC_R_DMA_4_CH0 372
+#define IMX_SC_R_DMA_4_CH1 373
+#define IMX_SC_R_DMA_4_CH2 374
+#define IMX_SC_R_DMA_4_CH3 375
+#define IMX_SC_R_DMA_4_CH4 376
+#define IMX_SC_R_ISI_CH0 377
+#define IMX_SC_R_ISI_CH1 378
+#define IMX_SC_R_ISI_CH2 379
+#define IMX_SC_R_ISI_CH3 380
+#define IMX_SC_R_ISI_CH4 381
+#define IMX_SC_R_ISI_CH5 382
+#define IMX_SC_R_ISI_CH6 383
+#define IMX_SC_R_ISI_CH7 384
+#define IMX_SC_R_MJPEG_DEC_S0 385
+#define IMX_SC_R_MJPEG_DEC_S1 386
+#define IMX_SC_R_MJPEG_DEC_S2 387
+#define IMX_SC_R_MJPEG_DEC_S3 388
+#define IMX_SC_R_MJPEG_ENC_S0 389
+#define IMX_SC_R_MJPEG_ENC_S1 390
+#define IMX_SC_R_MJPEG_ENC_S2 391
+#define IMX_SC_R_MJPEG_ENC_S3 392
+#define IMX_SC_R_MIPI_0 393
+#define IMX_SC_R_MIPI_0_PWM_0 394
+#define IMX_SC_R_MIPI_0_I2C_0 395
+#define IMX_SC_R_MIPI_0_I2C_1 396
+#define IMX_SC_R_MIPI_1 397
+#define IMX_SC_R_MIPI_1_PWM_0 398
+#define IMX_SC_R_MIPI_1_I2C_0 399
+#define IMX_SC_R_MIPI_1_I2C_1 400
+#define IMX_SC_R_CSI_0 401
+#define IMX_SC_R_CSI_0_PWM_0 402
+#define IMX_SC_R_CSI_0_I2C_0 403
+#define IMX_SC_R_CSI_1 404
+#define IMX_SC_R_CSI_1_PWM_0 405
+#define IMX_SC_R_CSI_1_I2C_0 406
+#define IMX_SC_R_HDMI 407
+#define IMX_SC_R_HDMI_I2S 408
+#define IMX_SC_R_HDMI_I2C_0 409
+#define IMX_SC_R_HDMI_PLL_0 410
+#define IMX_SC_R_HDMI_RX 411
+#define IMX_SC_R_HDMI_RX_BYPASS 412
+#define IMX_SC_R_HDMI_RX_I2C_0 413
+#define IMX_SC_R_ASRC_0 414
+#define IMX_SC_R_ESAI_0 415
+#define IMX_SC_R_SPDIF_0 416
+#define IMX_SC_R_SPDIF_1 417
+#define IMX_SC_R_SAI_3 418
+#define IMX_SC_R_SAI_4 419
+#define IMX_SC_R_SAI_5 420
+#define IMX_SC_R_GPT_5 421
+#define IMX_SC_R_GPT_6 422
+#define IMX_SC_R_GPT_7 423
+#define IMX_SC_R_GPT_8 424
+#define IMX_SC_R_GPT_9 425
+#define IMX_SC_R_GPT_10 426
+#define IMX_SC_R_DMA_2_CH5 427
+#define IMX_SC_R_DMA_2_CH6 428
+#define IMX_SC_R_DMA_2_CH7 429
+#define IMX_SC_R_DMA_2_CH8 430
+#define IMX_SC_R_DMA_2_CH9 431
+#define IMX_SC_R_DMA_2_CH10 432
+#define IMX_SC_R_DMA_2_CH11 433
+#define IMX_SC_R_DMA_2_CH12 434
+#define IMX_SC_R_DMA_2_CH13 435
+#define IMX_SC_R_DMA_2_CH14 436
+#define IMX_SC_R_DMA_2_CH15 437
+#define IMX_SC_R_DMA_2_CH16 438
+#define IMX_SC_R_DMA_2_CH17 439
+#define IMX_SC_R_DMA_2_CH18 440
+#define IMX_SC_R_DMA_2_CH19 441
+#define IMX_SC_R_DMA_2_CH20 442
+#define IMX_SC_R_DMA_2_CH21 443
+#define IMX_SC_R_DMA_2_CH22 444
+#define IMX_SC_R_DMA_2_CH23 445
+#define IMX_SC_R_DMA_2_CH24 446
+#define IMX_SC_R_DMA_2_CH25 447
+#define IMX_SC_R_DMA_2_CH26 448
+#define IMX_SC_R_DMA_2_CH27 449
+#define IMX_SC_R_DMA_2_CH28 450
+#define IMX_SC_R_DMA_2_CH29 451
+#define IMX_SC_R_DMA_2_CH30 452
+#define IMX_SC_R_DMA_2_CH31 453
+#define IMX_SC_R_ASRC_1 454
+#define IMX_SC_R_ESAI_1 455
+#define IMX_SC_R_SAI_6 456
+#define IMX_SC_R_SAI_7 457
+#define IMX_SC_R_AMIX 458
+#define IMX_SC_R_MQS_0 459
+#define IMX_SC_R_DMA_3_CH0 460
+#define IMX_SC_R_DMA_3_CH1 461
+#define IMX_SC_R_DMA_3_CH2 462
+#define IMX_SC_R_DMA_3_CH3 463
+#define IMX_SC_R_DMA_3_CH4 464
+#define IMX_SC_R_DMA_3_CH5 465
+#define IMX_SC_R_DMA_3_CH6 466
+#define IMX_SC_R_DMA_3_CH7 467
+#define IMX_SC_R_DMA_3_CH8 468
+#define IMX_SC_R_DMA_3_CH9 469
+#define IMX_SC_R_DMA_3_CH10 470
+#define IMX_SC_R_DMA_3_CH11 471
+#define IMX_SC_R_DMA_3_CH12 472
+#define IMX_SC_R_DMA_3_CH13 473
+#define IMX_SC_R_DMA_3_CH14 474
+#define IMX_SC_R_DMA_3_CH15 475
+#define IMX_SC_R_DMA_3_CH16 476
+#define IMX_SC_R_DMA_3_CH17 477
+#define IMX_SC_R_DMA_3_CH18 478
+#define IMX_SC_R_DMA_3_CH19 479
+#define IMX_SC_R_DMA_3_CH20 480
+#define IMX_SC_R_DMA_3_CH21 481
+#define IMX_SC_R_DMA_3_CH22 482
+#define IMX_SC_R_DMA_3_CH23 483
+#define IMX_SC_R_DMA_3_CH24 484
+#define IMX_SC_R_DMA_3_CH25 485
+#define IMX_SC_R_DMA_3_CH26 486
+#define IMX_SC_R_DMA_3_CH27 487
+#define IMX_SC_R_DMA_3_CH28 488
+#define IMX_SC_R_DMA_3_CH29 489
+#define IMX_SC_R_DMA_3_CH30 490
+#define IMX_SC_R_DMA_3_CH31 491
+#define IMX_SC_R_AUDIO_PLL_1 492
+#define IMX_SC_R_AUDIO_CLK_0 493
+#define IMX_SC_R_AUDIO_CLK_1 494
+#define IMX_SC_R_MCLK_OUT_0 495
+#define IMX_SC_R_MCLK_OUT_1 496
+#define IMX_SC_R_PMIC_0 497
+#define IMX_SC_R_PMIC_1 498
+#define IMX_SC_R_SECO 499
+#define IMX_SC_R_CAAM_JR1 500
+#define IMX_SC_R_CAAM_JR2 501
+#define IMX_SC_R_CAAM_JR3 502
+#define IMX_SC_R_SECO_MU_2 503
+#define IMX_SC_R_SECO_MU_3 504
+#define IMX_SC_R_SECO_MU_4 505
+#define IMX_SC_R_HDMI_RX_PWM_0 506
+#define IMX_SC_R_A35 507
+#define IMX_SC_R_A35_0 508
+#define IMX_SC_R_A35_1 509
+#define IMX_SC_R_A35_2 510
+#define IMX_SC_R_A35_3 511
+#define IMX_SC_R_DSP 512
+#define IMX_SC_R_DSP_RAM 513
+#define IMX_SC_R_CAAM_JR1_OUT 514
+#define IMX_SC_R_CAAM_JR2_OUT 515
+#define IMX_SC_R_CAAM_JR3_OUT 516
+#define IMX_SC_R_VPU_DEC_0 517
+#define IMX_SC_R_VPU_ENC_0 518
+#define IMX_SC_R_CAAM_JR0 519
+#define IMX_SC_R_CAAM_JR0_OUT 520
+#define IMX_SC_R_PMIC_2 521
+#define IMX_SC_R_DBLOGIC 522
+#define IMX_SC_R_HDMI_PLL_1 523
+#define IMX_SC_R_BOARD_R0 524
+#define IMX_SC_R_BOARD_R1 525
+#define IMX_SC_R_BOARD_R2 526
+#define IMX_SC_R_BOARD_R3 527
+#define IMX_SC_R_BOARD_R4 528
+#define IMX_SC_R_BOARD_R5 529
+#define IMX_SC_R_BOARD_R6 530
+#define IMX_SC_R_BOARD_R7 531
+#define IMX_SC_R_MJPEG_DEC_MP 532
+#define IMX_SC_R_MJPEG_ENC_MP 533
+#define IMX_SC_R_VPU_TS_0 534
+#define IMX_SC_R_VPU_MU_0 535
+#define IMX_SC_R_VPU_MU_1 536
+#define IMX_SC_R_VPU_MU_2 537
+#define IMX_SC_R_VPU_MU_3 538
+#define IMX_SC_R_VPU_ENC_1 539
+#define IMX_SC_R_VPU 540
+#define IMX_SC_R_LAST 541
+
+#endif /* __DT_BINDINGS_RSCRC_IMX_H */
diff --git a/include/dt-bindings/media/xilinx-vip.h b/include/dt-bindings/media/xilinx-vip.h
index 6298fec00685..94ed3edfcc70 100644
--- a/include/dt-bindings/media/xilinx-vip.h
+++ b/include/dt-bindings/media/xilinx-vip.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Xilinx Video IP Core
*
@@ -6,10 +7,6 @@
*
* Contacts: Hyun Kwon <hyun.kwon@xilinx.com>
* Laurent Pinchart <laurent.pinchart@ideasonboard.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef __DT_BINDINGS_MEDIA_XILINX_VIP_H__
diff --git a/include/dt-bindings/regulator/active-semi,8945a-regulator.h b/include/dt-bindings/regulator/active-semi,8945a-regulator.h
new file mode 100644
index 000000000000..9bdba5e3141a
--- /dev/null
+++ b/include/dt-bindings/regulator/active-semi,8945a-regulator.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2018 Microchip Technology, Inc. All rights reserved.
+ *
+ * Device Tree binding constants for the ACT8945A PMIC regulators
+ */
+
+#ifndef _DT_BINDINGS_REGULATOR_ACT8945A_H
+#define _DT_BINDINGS_REGULATOR_ACT8945A_H
+
+/*
+ * These constants should be used to specify regulator modes in device tree for
+ * ACT8945A regulators as follows:
+ * ACT8945A_REGULATOR_MODE_FIXED: It is specific to DCDC regulators and it
+ * specifies the usage of fixed-frequency
+ * PWM.
+ *
+ * ACT8945A_REGULATOR_MODE_NORMAL: It is specific to LDO regulators and it
+ * specifies the usage of normal mode.
+ *
+ * ACT8945A_REGULATOR_MODE_LOWPOWER: For DCDC and LDO regulators; it specify
+ * the usage of proprietary power-saving
+ * mode.
+ */
+
+#define ACT8945A_REGULATOR_MODE_FIXED 1
+#define ACT8945A_REGULATOR_MODE_NORMAL 2
+#define ACT8945A_REGULATOR_MODE_LOWPOWER 3
+
+#endif
diff --git a/include/dt-bindings/reset/sun8i-de2.h b/include/dt-bindings/reset/sun8i-de2.h
index 9526017432f0..1c36a6ac86d6 100644
--- a/include/dt-bindings/reset/sun8i-de2.h
+++ b/include/dt-bindings/reset/sun8i-de2.h
@@ -10,5 +10,6 @@
#define RST_MIXER0 0
#define RST_MIXER1 1
#define RST_WB 2
+#define RST_ROT 3
#endif /* _DT_BINDINGS_RESET_SUN8I_DE2_H_ */
diff --git a/include/dt-bindings/reset/suniv-ccu-f1c100s.h b/include/dt-bindings/reset/suniv-ccu-f1c100s.h
new file mode 100644
index 000000000000..6a4b4385fe5a
--- /dev/null
+++ b/include/dt-bindings/reset/suniv-ccu-f1c100s.h
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+ *
+ * Copyright (C) 2018 Icenowy Zheng <icenowy@aosc.xyz>
+ *
+ */
+
+#ifndef _DT_BINDINGS_RST_SUNIV_F1C100S_H_
+#define _DT_BINDINGS_RST_SUNIV_F1C100S_H_
+
+#define RST_USB_PHY0 0
+#define RST_BUS_DMA 1
+#define RST_BUS_MMC0 2
+#define RST_BUS_MMC1 3
+#define RST_BUS_DRAM 4
+#define RST_BUS_SPI0 5
+#define RST_BUS_SPI1 6
+#define RST_BUS_OTG 7
+#define RST_BUS_VE 8
+#define RST_BUS_LCD 9
+#define RST_BUS_DEINTERLACE 10
+#define RST_BUS_CSI 11
+#define RST_BUS_TVD 12
+#define RST_BUS_TVE 13
+#define RST_BUS_DE_BE 14
+#define RST_BUS_DE_FE 15
+#define RST_BUS_CODEC 16
+#define RST_BUS_SPDIF 17
+#define RST_BUS_IR 18
+#define RST_BUS_RSB 19
+#define RST_BUS_I2S0 20
+#define RST_BUS_I2C0 21
+#define RST_BUS_I2C1 22
+#define RST_BUS_I2C2 23
+#define RST_BUS_UART0 24
+#define RST_BUS_UART1 25
+#define RST_BUS_UART2 26
+
+#endif /* _DT_BINDINGS_RST_SUNIV_F1C100S_H_ */
diff --git a/include/dt-bindings/sound/qcom,q6afe.h b/include/dt-bindings/sound/qcom,q6afe.h
index e2d3892240b8..1df06f8ad5c3 100644
--- a/include/dt-bindings/sound/qcom,q6afe.h
+++ b/include/dt-bindings/sound/qcom,q6afe.h
@@ -106,6 +106,7 @@
#define QUINARY_TDM_TX_6 101
#define QUINARY_TDM_RX_7 102
#define QUINARY_TDM_TX_7 103
+#define DISPLAY_PORT_RX 104
#endif /* __DT_BINDINGS_Q6_AFE_H__ */
diff --git a/include/kvm/arm_arch_timer.h b/include/kvm/arm_arch_timer.h
index 6502feb9524b..33771352dcd6 100644
--- a/include/kvm/arm_arch_timer.h
+++ b/include/kvm/arm_arch_timer.h
@@ -21,7 +21,6 @@
#include <linux/clocksource.h>
#include <linux/hrtimer.h>
-#include <linux/workqueue.h>
struct arch_timer_context {
/* Registers: control register, timer value */
@@ -52,9 +51,6 @@ struct arch_timer_cpu {
/* Background timer used when the guest is not running */
struct hrtimer bg_timer;
- /* Work queued with the above timer expires */
- struct work_struct expired;
-
/* Physical timer emulation */
struct hrtimer phys_timer;
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index ed80f147bd50..87715f20b69a 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -101,7 +101,7 @@ static inline bool has_acpi_companion(struct device *dev)
static inline void acpi_preset_companion(struct device *dev,
struct acpi_device *parent, u64 addr)
{
- ACPI_COMPANION_SET(dev, acpi_find_child_device(parent, addr, NULL));
+ ACPI_COMPANION_SET(dev, acpi_find_child_device(parent, addr, false));
}
static inline const char *acpi_dev_name(struct acpi_device *adev)
@@ -340,7 +340,14 @@ struct pci_dev;
int acpi_pci_irq_enable (struct pci_dev *dev);
void acpi_penalize_isa_irq(int irq, int active);
bool acpi_isa_irq_available(int irq);
+#ifdef CONFIG_PCI
void acpi_penalize_sci_irq(int irq, int trigger, int polarity);
+#else
+static inline void acpi_penalize_sci_irq(int irq, int trigger,
+ int polarity)
+{
+}
+#endif
void acpi_pci_irq_disable (struct pci_dev *dev);
extern int ec_read(u8 addr, u8 *val);
@@ -1054,6 +1061,17 @@ static inline int acpi_dev_gpio_irq_get(struct acpi_device *adev, int index)
}
#endif
+#if defined(CONFIG_ACPI) && IS_ENABLED(CONFIG_I2C)
+bool i2c_acpi_get_i2c_resource(struct acpi_resource *ares,
+ struct acpi_resource_i2c_serialbus **i2c);
+#else
+static inline bool i2c_acpi_get_i2c_resource(struct acpi_resource *ares,
+ struct acpi_resource_i2c_serialbus **i2c)
+{
+ return false;
+}
+#endif
+
/* Device properties */
#ifdef CONFIG_ACPI
@@ -1313,4 +1331,14 @@ static inline int find_acpi_cpu_cache_topology(unsigned int cpu, int level)
}
#endif
+#ifdef CONFIG_ACPI
+extern int acpi_platform_notify(struct device *dev, enum kobject_action action);
+#else
+static inline int
+acpi_platform_notify(struct device *dev, enum kobject_action action)
+{
+ return 0;
+}
+#endif
+
#endif /*_LINUX_ACPI_H*/
diff --git a/include/linux/adxl.h b/include/linux/adxl.h
index 2d29f55923e3..2a629acb4c3f 100644
--- a/include/linux/adxl.h
+++ b/include/linux/adxl.h
@@ -7,12 +7,7 @@
#ifndef _LINUX_ADXL_H
#define _LINUX_ADXL_H
-#ifdef CONFIG_ACPI_ADXL
const char * const *adxl_get_component_names(void);
int adxl_decode(u64 addr, u64 component_values[]);
-#else
-static inline const char * const *adxl_get_component_names(void) { return NULL; }
-static inline int adxl_decode(u64 addr, u64 component_values[]) { return -EOPNOTSUPP; }
-#endif
#endif /* _LINUX_ADXL_H */
diff --git a/include/linux/audit.h b/include/linux/audit.h
index 9334fbef7bae..a625c29a2ea2 100644
--- a/include/linux/audit.h
+++ b/include/linux/audit.h
@@ -115,8 +115,6 @@ extern int audit_classify_compat_syscall(int abi, unsigned syscall);
struct filename;
-extern void audit_log_session_info(struct audit_buffer *ab);
-
#define AUDIT_OFF 0
#define AUDIT_ON 1
#define AUDIT_LOCKED 2
@@ -153,8 +151,7 @@ extern void audit_log_link_denied(const char *operation);
extern void audit_log_lost(const char *message);
extern int audit_log_task_context(struct audit_buffer *ab);
-extern void audit_log_task_info(struct audit_buffer *ab,
- struct task_struct *tsk);
+extern void audit_log_task_info(struct audit_buffer *ab);
extern int audit_update_lsm_rules(void);
@@ -202,8 +199,7 @@ static inline int audit_log_task_context(struct audit_buffer *ab)
{
return 0;
}
-static inline void audit_log_task_info(struct audit_buffer *ab,
- struct task_struct *tsk)
+static inline void audit_log_task_info(struct audit_buffer *ab)
{ }
#define audit_enabled AUDIT_OFF
#endif /* CONFIG_AUDIT */
diff --git a/include/linux/avf/virtchnl.h b/include/linux/avf/virtchnl.h
index b2488055fd1d..7605b5919c3a 100644
--- a/include/linux/avf/virtchnl.h
+++ b/include/linux/avf/virtchnl.h
@@ -171,7 +171,7 @@ struct virtchnl_msg {
VIRTCHNL_CHECK_STRUCT_LEN(20, virtchnl_msg);
-/* Message descriptions and data structures.*/
+/* Message descriptions and data structures. */
/* VIRTCHNL_OP_VERSION
* VF posts its version number to the PF. PF responds with its version number
@@ -342,6 +342,8 @@ struct virtchnl_vsi_queue_config_info {
struct virtchnl_queue_pair_info qpair[1];
};
+VIRTCHNL_CHECK_STRUCT_LEN(72, virtchnl_vsi_queue_config_info);
+
/* VIRTCHNL_OP_REQUEST_QUEUES
* VF sends this message to request the PF to allocate additional queues to
* this VF. Each VF gets a guaranteed number of queues on init but asking for
@@ -357,8 +359,6 @@ struct virtchnl_vf_res_request {
u16 num_queue_pairs;
};
-VIRTCHNL_CHECK_STRUCT_LEN(72, virtchnl_vsi_queue_config_info);
-
/* VIRTCHNL_OP_CONFIG_IRQ_MAP
* VF uses this message to map vectors to queues.
* The rxq_map and txq_map fields are bitmaps used to indicate which queues
@@ -819,8 +819,8 @@ virtchnl_vc_validate_vf_msg(struct virtchnl_version_info *ver, u32 v_opcode,
if (msglen >= valid_len) {
struct virtchnl_tc_info *vti =
(struct virtchnl_tc_info *)msg;
- valid_len += vti->num_tc *
- sizeof(struct virtchnl_channel_info);
+ valid_len += (vti->num_tc - 1) *
+ sizeof(struct virtchnl_channel_info);
if (vti->num_tc == 0)
err_msg_format = true;
}
diff --git a/include/linux/bio.h b/include/linux/bio.h
index 056fb627edb3..7380b094dcca 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -491,35 +491,40 @@ do { \
bio_clear_flag(bio, BIO_THROTTLED);\
(bio)->bi_disk = (bdev)->bd_disk; \
(bio)->bi_partno = (bdev)->bd_partno; \
+ bio_associate_blkg(bio); \
} while (0)
#define bio_copy_dev(dst, src) \
do { \
(dst)->bi_disk = (src)->bi_disk; \
(dst)->bi_partno = (src)->bi_partno; \
+ bio_clone_blkg_association(dst, src); \
} while (0)
#define bio_dev(bio) \
disk_devt((bio)->bi_disk)
#if defined(CONFIG_MEMCG) && defined(CONFIG_BLK_CGROUP)
-int bio_associate_blkcg_from_page(struct bio *bio, struct page *page);
+void bio_associate_blkg_from_page(struct bio *bio, struct page *page);
#else
-static inline int bio_associate_blkcg_from_page(struct bio *bio,
- struct page *page) { return 0; }
+static inline void bio_associate_blkg_from_page(struct bio *bio,
+ struct page *page) { }
#endif
#ifdef CONFIG_BLK_CGROUP
-int bio_associate_blkcg(struct bio *bio, struct cgroup_subsys_state *blkcg_css);
-int bio_associate_blkg(struct bio *bio, struct blkcg_gq *blkg);
-void bio_disassociate_task(struct bio *bio);
-void bio_clone_blkcg_association(struct bio *dst, struct bio *src);
+void bio_disassociate_blkg(struct bio *bio);
+void bio_associate_blkg(struct bio *bio);
+void bio_associate_blkg_from_css(struct bio *bio,
+ struct cgroup_subsys_state *css);
+void bio_clone_blkg_association(struct bio *dst, struct bio *src);
#else /* CONFIG_BLK_CGROUP */
-static inline int bio_associate_blkcg(struct bio *bio,
- struct cgroup_subsys_state *blkcg_css) { return 0; }
-static inline void bio_disassociate_task(struct bio *bio) { }
-static inline void bio_clone_blkcg_association(struct bio *dst,
- struct bio *src) { }
+static inline void bio_disassociate_blkg(struct bio *bio) { }
+static inline void bio_associate_blkg(struct bio *bio) { }
+static inline void bio_associate_blkg_from_css(struct bio *bio,
+ struct cgroup_subsys_state *css)
+{ }
+static inline void bio_clone_blkg_association(struct bio *dst,
+ struct bio *src) { }
#endif /* CONFIG_BLK_CGROUP */
#ifdef CONFIG_HIGHMEM
diff --git a/include/linux/blk-cgroup.h b/include/linux/blk-cgroup.h
index 6d766a19f2bb..f025fd1e22e6 100644
--- a/include/linux/blk-cgroup.h
+++ b/include/linux/blk-cgroup.h
@@ -21,6 +21,7 @@
#include <linux/blkdev.h>
#include <linux/atomic.h>
#include <linux/kthread.h>
+#include <linux/fs.h>
/* percpu_counter batch for blkg_[rw]stats, per-cpu drift doesn't matter */
#define BLKG_STAT_CPU_BATCH (INT_MAX / 2)
@@ -122,11 +123,8 @@ struct blkcg_gq {
/* all non-root blkcg_gq's are guaranteed to have access to parent */
struct blkcg_gq *parent;
- /* request allocation list for this blkcg-q pair */
- struct request_list rl;
-
/* reference count */
- atomic_t refcnt;
+ struct percpu_ref refcnt;
/* is this blkg online? protected by both blkcg and q locks */
bool online;
@@ -184,6 +182,8 @@ extern struct cgroup_subsys_state * const blkcg_root_css;
struct blkcg_gq *blkg_lookup_slowpath(struct blkcg *blkcg,
struct request_queue *q, bool update_hint);
+struct blkcg_gq *__blkg_lookup_create(struct blkcg *blkcg,
+ struct request_queue *q);
struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
struct request_queue *q);
int blkcg_init_queue(struct request_queue *q);
@@ -230,22 +230,62 @@ int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
char *input, struct blkg_conf_ctx *ctx);
void blkg_conf_finish(struct blkg_conf_ctx *ctx);
+/**
+ * blkcg_css - find the current css
+ *
+ * Find the css associated with either the kthread or the current task.
+ * This may return a dying css, so it is up to the caller to use tryget logic
+ * to confirm it is alive and well.
+ */
+static inline struct cgroup_subsys_state *blkcg_css(void)
+{
+ struct cgroup_subsys_state *css;
+
+ css = kthread_blkcg();
+ if (css)
+ return css;
+ return task_css(current, io_cgrp_id);
+}
static inline struct blkcg *css_to_blkcg(struct cgroup_subsys_state *css)
{
return css ? container_of(css, struct blkcg, css) : NULL;
}
-static inline struct blkcg *bio_blkcg(struct bio *bio)
+/**
+ * __bio_blkcg - internal, inconsistent version to get blkcg
+ *
+ * DO NOT USE.
+ * This function is inconsistent and consequently is dangerous to use. The
+ * first part of the function returns a blkcg where a reference is owned by the
+ * bio. This means it does not need to be rcu protected as it cannot go away
+ * with the bio owning a reference to it. However, the latter potentially gets
+ * it from task_css(). This can race against task migration and the cgroup
+ * dying. It is also semantically different as it must be called rcu protected
+ * and is susceptible to failure when trying to get a reference to it.
+ * Therefore, it is not ok to assume that *_get() will always succeed on the
+ * blkcg returned here.
+ */
+static inline struct blkcg *__bio_blkcg(struct bio *bio)
{
- struct cgroup_subsys_state *css;
+ if (bio && bio->bi_blkg)
+ return bio->bi_blkg->blkcg;
+ return css_to_blkcg(blkcg_css());
+}
- if (bio && bio->bi_css)
- return css_to_blkcg(bio->bi_css);
- css = kthread_blkcg();
- if (css)
- return css_to_blkcg(css);
- return css_to_blkcg(task_css(current, io_cgrp_id));
+/**
+ * bio_blkcg - grab the blkcg associated with a bio
+ * @bio: target bio
+ *
+ * This returns the blkcg associated with a bio, %NULL if not associated.
+ * Callers are expected to either handle %NULL or know association has been
+ * done prior to calling this.
+ */
+static inline struct blkcg *bio_blkcg(struct bio *bio)
+{
+ if (bio && bio->bi_blkg)
+ return bio->bi_blkg->blkcg;
+ return NULL;
}
static inline bool blk_cgroup_congested(void)
@@ -328,16 +368,12 @@ static inline struct blkcg_gq *__blkg_lookup(struct blkcg *blkcg,
* @q: request_queue of interest
*
* Lookup blkg for the @blkcg - @q pair. This function should be called
- * under RCU read lock and is guaranteed to return %NULL if @q is bypassing
- * - see blk_queue_bypass_start() for details.
+ * under RCU read loc.
*/
static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg,
struct request_queue *q)
{
WARN_ON_ONCE(!rcu_read_lock_held());
-
- if (unlikely(blk_queue_bypass(q)))
- return NULL;
return __blkg_lookup(blkcg, q, false);
}
@@ -451,26 +487,35 @@ static inline int blkg_path(struct blkcg_gq *blkg, char *buf, int buflen)
*/
static inline void blkg_get(struct blkcg_gq *blkg)
{
- WARN_ON_ONCE(atomic_read(&blkg->refcnt) <= 0);
- atomic_inc(&blkg->refcnt);
+ percpu_ref_get(&blkg->refcnt);
}
/**
- * blkg_try_get - try and get a blkg reference
+ * blkg_tryget - try and get a blkg reference
* @blkg: blkg to get
*
* This is for use when doing an RCU lookup of the blkg. We may be in the midst
* of freeing this blkg, so we can only use it if the refcnt is not zero.
*/
-static inline struct blkcg_gq *blkg_try_get(struct blkcg_gq *blkg)
+static inline bool blkg_tryget(struct blkcg_gq *blkg)
{
- if (atomic_inc_not_zero(&blkg->refcnt))
- return blkg;
- return NULL;
+ return percpu_ref_tryget(&blkg->refcnt);
}
+/**
+ * blkg_tryget_closest - try and get a blkg ref on the closet blkg
+ * @blkg: blkg to get
+ *
+ * This walks up the blkg tree to find the closest non-dying blkg and returns
+ * the blkg that it did association with as it may not be the passed in blkg.
+ */
+static inline struct blkcg_gq *blkg_tryget_closest(struct blkcg_gq *blkg)
+{
+ while (blkg && !percpu_ref_tryget(&blkg->refcnt))
+ blkg = blkg->parent;
-void __blkg_release_rcu(struct rcu_head *rcu);
+ return blkg;
+}
/**
* blkg_put - put a blkg reference
@@ -478,9 +523,7 @@ void __blkg_release_rcu(struct rcu_head *rcu);
*/
static inline void blkg_put(struct blkcg_gq *blkg)
{
- WARN_ON_ONCE(atomic_read(&blkg->refcnt) <= 0);
- if (atomic_dec_and_test(&blkg->refcnt))
- call_rcu(&blkg->rcu_head, __blkg_release_rcu);
+ percpu_ref_put(&blkg->refcnt);
}
/**
@@ -515,94 +558,6 @@ static inline void blkg_put(struct blkcg_gq *blkg)
if (((d_blkg) = __blkg_lookup(css_to_blkcg(pos_css), \
(p_blkg)->q, false)))
-/**
- * blk_get_rl - get request_list to use
- * @q: request_queue of interest
- * @bio: bio which will be attached to the allocated request (may be %NULL)
- *
- * The caller wants to allocate a request from @q to use for @bio. Find
- * the request_list to use and obtain a reference on it. Should be called
- * under queue_lock. This function is guaranteed to return non-%NULL
- * request_list.
- */
-static inline struct request_list *blk_get_rl(struct request_queue *q,
- struct bio *bio)
-{
- struct blkcg *blkcg;
- struct blkcg_gq *blkg;
-
- rcu_read_lock();
-
- blkcg = bio_blkcg(bio);
-
- /* bypass blkg lookup and use @q->root_rl directly for root */
- if (blkcg == &blkcg_root)
- goto root_rl;
-
- /*
- * Try to use blkg->rl. blkg lookup may fail under memory pressure
- * or if either the blkcg or queue is going away. Fall back to
- * root_rl in such cases.
- */
- blkg = blkg_lookup(blkcg, q);
- if (unlikely(!blkg))
- goto root_rl;
-
- blkg_get(blkg);
- rcu_read_unlock();
- return &blkg->rl;
-root_rl:
- rcu_read_unlock();
- return &q->root_rl;
-}
-
-/**
- * blk_put_rl - put request_list
- * @rl: request_list to put
- *
- * Put the reference acquired by blk_get_rl(). Should be called under
- * queue_lock.
- */
-static inline void blk_put_rl(struct request_list *rl)
-{
- if (rl->blkg->blkcg != &blkcg_root)
- blkg_put(rl->blkg);
-}
-
-/**
- * blk_rq_set_rl - associate a request with a request_list
- * @rq: request of interest
- * @rl: target request_list
- *
- * Associate @rq with @rl so that accounting and freeing can know the
- * request_list @rq came from.
- */
-static inline void blk_rq_set_rl(struct request *rq, struct request_list *rl)
-{
- rq->rl = rl;
-}
-
-/**
- * blk_rq_rl - return the request_list a request came from
- * @rq: request of interest
- *
- * Return the request_list @rq is allocated from.
- */
-static inline struct request_list *blk_rq_rl(struct request *rq)
-{
- return rq->rl;
-}
-
-struct request_list *__blk_queue_next_rl(struct request_list *rl,
- struct request_queue *q);
-/**
- * blk_queue_for_each_rl - iterate through all request_lists of a request_queue
- *
- * Should be used under queue_lock.
- */
-#define blk_queue_for_each_rl(rl, q) \
- for ((rl) = &(q)->root_rl; (rl); (rl) = __blk_queue_next_rl((rl), (q)))
-
static inline int blkg_stat_init(struct blkg_stat *stat, gfp_t gfp)
{
int ret;
@@ -797,32 +752,34 @@ static inline bool blk_throtl_bio(struct request_queue *q, struct blkcg_gq *blkg
struct bio *bio) { return false; }
#endif
+
+static inline void blkcg_bio_issue_init(struct bio *bio)
+{
+ bio_issue_init(&bio->bi_issue, bio_sectors(bio));
+}
+
static inline bool blkcg_bio_issue_check(struct request_queue *q,
struct bio *bio)
{
- struct blkcg *blkcg;
struct blkcg_gq *blkg;
bool throtl = false;
rcu_read_lock();
- blkcg = bio_blkcg(bio);
-
- /* associate blkcg if bio hasn't attached one */
- bio_associate_blkcg(bio, &blkcg->css);
-
- blkg = blkg_lookup(blkcg, q);
- if (unlikely(!blkg)) {
- spin_lock_irq(q->queue_lock);
- blkg = blkg_lookup_create(blkcg, q);
- if (IS_ERR(blkg))
- blkg = NULL;
- spin_unlock_irq(q->queue_lock);
+
+ if (!bio->bi_blkg) {
+ char b[BDEVNAME_SIZE];
+
+ WARN_ONCE(1,
+ "no blkg associated for bio on block-device: %s\n",
+ bio_devname(bio, b));
+ bio_associate_blkg(bio);
}
+ blkg = bio->bi_blkg;
+
throtl = blk_throtl_bio(q, blkg, bio);
if (!throtl) {
- blkg = blkg ?: q->root_blkg;
/*
* If the bio is flagged with BIO_QUEUE_ENTERED it means this
* is a split bio and we would have already accounted for the
@@ -834,6 +791,8 @@ static inline bool blkcg_bio_issue_check(struct request_queue *q,
blkg_rwstat_add(&blkg->stat_ios, bio->bi_opf, 1);
}
+ blkcg_bio_issue_init(bio);
+
rcu_read_unlock();
return !throtl;
}
@@ -930,6 +889,7 @@ static inline int blkcg_activate_policy(struct request_queue *q,
static inline void blkcg_deactivate_policy(struct request_queue *q,
const struct blkcg_policy *pol) { }
+static inline struct blkcg *__bio_blkcg(struct bio *bio) { return NULL; }
static inline struct blkcg *bio_blkcg(struct bio *bio) { return NULL; }
static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg,
@@ -939,12 +899,7 @@ static inline char *blkg_path(struct blkcg_gq *blkg) { return NULL; }
static inline void blkg_get(struct blkcg_gq *blkg) { }
static inline void blkg_put(struct blkcg_gq *blkg) { }
-static inline struct request_list *blk_get_rl(struct request_queue *q,
- struct bio *bio) { return &q->root_rl; }
-static inline void blk_put_rl(struct request_list *rl) { }
-static inline void blk_rq_set_rl(struct request *rq, struct request_list *rl) { }
-static inline struct request_list *blk_rq_rl(struct request *rq) { return &rq->q->root_rl; }
-
+static inline void blkcg_bio_issue_init(struct bio *bio) { }
static inline bool blkcg_bio_issue_check(struct request_queue *q,
struct bio *bio) { return true; }
diff --git a/include/linux/blk-mq-pci.h b/include/linux/blk-mq-pci.h
index 9f4c17f0d2d8..0b1f45c62623 100644
--- a/include/linux/blk-mq-pci.h
+++ b/include/linux/blk-mq-pci.h
@@ -2,10 +2,10 @@
#ifndef _LINUX_BLK_MQ_PCI_H
#define _LINUX_BLK_MQ_PCI_H
-struct blk_mq_tag_set;
+struct blk_mq_queue_map;
struct pci_dev;
-int blk_mq_pci_map_queues(struct blk_mq_tag_set *set, struct pci_dev *pdev,
+int blk_mq_pci_map_queues(struct blk_mq_queue_map *qmap, struct pci_dev *pdev,
int offset);
#endif /* _LINUX_BLK_MQ_PCI_H */
diff --git a/include/linux/blk-mq-rdma.h b/include/linux/blk-mq-rdma.h
index b4ade198007d..7b6ecf9ac4c3 100644
--- a/include/linux/blk-mq-rdma.h
+++ b/include/linux/blk-mq-rdma.h
@@ -4,7 +4,7 @@
struct blk_mq_tag_set;
struct ib_device;
-int blk_mq_rdma_map_queues(struct blk_mq_tag_set *set,
+int blk_mq_rdma_map_queues(struct blk_mq_queue_map *map,
struct ib_device *dev, int first_vec);
#endif /* _LINUX_BLK_MQ_RDMA_H */
diff --git a/include/linux/blk-mq-virtio.h b/include/linux/blk-mq-virtio.h
index 69b4da262c45..687ae287e1dc 100644
--- a/include/linux/blk-mq-virtio.h
+++ b/include/linux/blk-mq-virtio.h
@@ -2,10 +2,10 @@
#ifndef _LINUX_BLK_MQ_VIRTIO_H
#define _LINUX_BLK_MQ_VIRTIO_H
-struct blk_mq_tag_set;
+struct blk_mq_queue_map;
struct virtio_device;
-int blk_mq_virtio_map_queues(struct blk_mq_tag_set *set,
+int blk_mq_virtio_map_queues(struct blk_mq_queue_map *qmap,
struct virtio_device *vdev, int first_vec);
#endif /* _LINUX_BLK_MQ_VIRTIO_H */
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
index 2286dc12c6bc..0e030f5f76b6 100644
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
@@ -37,7 +37,8 @@ struct blk_mq_hw_ctx {
struct blk_mq_ctx *dispatch_from;
unsigned int dispatch_busy;
- unsigned int nr_ctx;
+ unsigned short type;
+ unsigned short nr_ctx;
struct blk_mq_ctx **ctxs;
spinlock_t dispatch_wait_lock;
@@ -74,10 +75,31 @@ struct blk_mq_hw_ctx {
struct srcu_struct srcu[0];
};
+struct blk_mq_queue_map {
+ unsigned int *mq_map;
+ unsigned int nr_queues;
+ unsigned int queue_offset;
+};
+
+enum hctx_type {
+ HCTX_TYPE_DEFAULT, /* all I/O not otherwise accounted for */
+ HCTX_TYPE_READ, /* just for READ I/O */
+ HCTX_TYPE_POLL, /* polled I/O of any kind */
+
+ HCTX_MAX_TYPES,
+};
+
struct blk_mq_tag_set {
- unsigned int *mq_map;
+ /*
+ * map[] holds ctx -> hctx mappings, one map exists for each type
+ * that the driver wishes to support. There are no restrictions
+ * on maps being of the same size, and it's perfectly legal to
+ * share maps between types.
+ */
+ struct blk_mq_queue_map map[HCTX_MAX_TYPES];
+ unsigned int nr_maps; /* nr entries in map[] */
const struct blk_mq_ops *ops;
- unsigned int nr_hw_queues;
+ unsigned int nr_hw_queues; /* nr hw queues across maps */
unsigned int queue_depth; /* max hw supported */
unsigned int reserved_tags;
unsigned int cmd_size; /* per-request extra data */
@@ -99,6 +121,7 @@ struct blk_mq_queue_data {
typedef blk_status_t (queue_rq_fn)(struct blk_mq_hw_ctx *,
const struct blk_mq_queue_data *);
+typedef void (commit_rqs_fn)(struct blk_mq_hw_ctx *);
typedef bool (get_budget_fn)(struct blk_mq_hw_ctx *);
typedef void (put_budget_fn)(struct blk_mq_hw_ctx *);
typedef enum blk_eh_timer_return (timeout_fn)(struct request *, bool);
@@ -109,11 +132,13 @@ typedef int (init_request_fn)(struct blk_mq_tag_set *set, struct request *,
typedef void (exit_request_fn)(struct blk_mq_tag_set *set, struct request *,
unsigned int);
-typedef void (busy_iter_fn)(struct blk_mq_hw_ctx *, struct request *, void *,
+typedef bool (busy_iter_fn)(struct blk_mq_hw_ctx *, struct request *, void *,
bool);
-typedef void (busy_tag_iter_fn)(struct request *, void *, bool);
-typedef int (poll_fn)(struct blk_mq_hw_ctx *, unsigned int);
+typedef bool (busy_tag_iter_fn)(struct request *, void *, bool);
+typedef int (poll_fn)(struct blk_mq_hw_ctx *);
typedef int (map_queues_fn)(struct blk_mq_tag_set *set);
+typedef bool (busy_fn)(struct request_queue *);
+typedef void (complete_fn)(struct request *);
struct blk_mq_ops {
@@ -123,6 +148,15 @@ struct blk_mq_ops {
queue_rq_fn *queue_rq;
/*
+ * If a driver uses bd->last to judge when to submit requests to
+ * hardware, it must define this function. In case of errors that
+ * make us stop issuing further requests, this hook serves the
+ * purpose of kicking the hardware (which the last request otherwise
+ * would have done).
+ */
+ commit_rqs_fn *commit_rqs;
+
+ /*
* Reserve budget before queue request, once .queue_rq is
* run, it is driver's responsibility to release the
* reserved budget. Also we have to handle failure case
@@ -141,7 +175,7 @@ struct blk_mq_ops {
*/
poll_fn *poll;
- softirq_done_fn *complete;
+ complete_fn *complete;
/*
* Called when the block layer side of a hardware queue has been
@@ -165,6 +199,11 @@ struct blk_mq_ops {
/* Called from inside blk_get_request() */
void (*initialize_rq_fn)(struct request *rq);
+ /*
+ * If set, returns whether or not this queue currently is busy
+ */
+ busy_fn *busy;
+
map_queues_fn *map_queues;
#ifdef CONFIG_BLK_DEBUG_FS
@@ -218,6 +257,8 @@ void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule);
void blk_mq_free_request(struct request *rq);
bool blk_mq_can_queue(struct blk_mq_hw_ctx *);
+bool blk_mq_queue_inflight(struct request_queue *q);
+
enum {
/* return when out of requests */
BLK_MQ_REQ_NOWAIT = (__force blk_mq_req_flags_t)(1 << 0),
@@ -264,7 +305,7 @@ void blk_mq_add_to_requeue_list(struct request *rq, bool at_head,
bool kick_requeue_list);
void blk_mq_kick_requeue_list(struct request_queue *q);
void blk_mq_delay_kick_requeue_list(struct request_queue *q, unsigned long msecs);
-void blk_mq_complete_request(struct request *rq);
+bool blk_mq_complete_request(struct request *rq);
bool blk_mq_bio_list_merge(struct request_queue *q, struct list_head *list,
struct bio *bio);
bool blk_mq_queue_stopped(struct request_queue *q);
@@ -288,24 +329,12 @@ void blk_mq_freeze_queue_wait(struct request_queue *q);
int blk_mq_freeze_queue_wait_timeout(struct request_queue *q,
unsigned long timeout);
-int blk_mq_map_queues(struct blk_mq_tag_set *set);
+int blk_mq_map_queues(struct blk_mq_queue_map *qmap);
void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues);
void blk_mq_quiesce_queue_nowait(struct request_queue *q);
-/**
- * blk_mq_mark_complete() - Set request state to complete
- * @rq: request to set to complete state
- *
- * Returns true if request state was successfully set to complete. If
- * successful, the caller is responsibile for seeing this request is ended, as
- * blk_mq_complete_request will not work again.
- */
-static inline bool blk_mq_mark_complete(struct request *rq)
-{
- return cmpxchg(&rq->state, MQ_RQ_IN_FLIGHT, MQ_RQ_COMPLETE) ==
- MQ_RQ_IN_FLIGHT;
-}
+unsigned int blk_mq_rq_cpu(struct request *rq);
/*
* Driver command data is immediately after the request. So subtract request
@@ -328,4 +357,14 @@ static inline void *blk_mq_rq_to_pdu(struct request *rq)
for ((i) = 0; (i) < (hctx)->nr_ctx && \
({ ctx = (hctx)->ctxs[(i)]; 1; }); (i)++)
+static inline blk_qc_t request_to_qc_t(struct blk_mq_hw_ctx *hctx,
+ struct request *rq)
+{
+ if (rq->tag != -1)
+ return rq->tag | (hctx->queue_num << BLK_QC_T_SHIFT);
+
+ return rq->internal_tag | (hctx->queue_num << BLK_QC_T_SHIFT) |
+ BLK_QC_T_INTERNAL;
+}
+
#endif
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index 1dcf652ba0aa..5c7e7f859a24 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -174,11 +174,11 @@ struct bio {
void *bi_private;
#ifdef CONFIG_BLK_CGROUP
/*
- * Optional ioc and css associated with this bio. Put on bio
- * release. Read comment on top of bio_associate_current().
+ * Represents the association of the css and request_queue for the bio.
+ * If a bio goes direct to device, it will not have a blkg as it will
+ * not have a request_queue associated with it. The reference is put
+ * on release of the bio.
*/
- struct io_context *bi_ioc;
- struct cgroup_subsys_state *bi_css;
struct blkcg_gq *bi_blkg;
struct bio_issue bi_issue;
#endif
@@ -228,6 +228,7 @@ struct bio {
#define BIO_TRACE_COMPLETION 10 /* bio_endio() should trace the final completion
* of this bio. */
#define BIO_QUEUE_ENTERED 11 /* can use blk_queue_enter_live() */
+#define BIO_TRACKED 12 /* set if bio goes through the rq_qos path */
/* See BVEC_POOL_OFFSET below before adding new flags */
@@ -323,6 +324,8 @@ enum req_flag_bits {
/* command specific flags for REQ_OP_WRITE_ZEROES: */
__REQ_NOUNMAP, /* do not free blocks when zeroing */
+ __REQ_HIPRI,
+
/* for driver use */
__REQ_DRV,
__REQ_SWAP, /* swapping request. */
@@ -343,8 +346,8 @@ enum req_flag_bits {
#define REQ_RAHEAD (1ULL << __REQ_RAHEAD)
#define REQ_BACKGROUND (1ULL << __REQ_BACKGROUND)
#define REQ_NOWAIT (1ULL << __REQ_NOWAIT)
-
#define REQ_NOUNMAP (1ULL << __REQ_NOUNMAP)
+#define REQ_HIPRI (1ULL << __REQ_HIPRI)
#define REQ_DRV (1ULL << __REQ_DRV)
#define REQ_SWAP (1ULL << __REQ_SWAP)
@@ -422,17 +425,6 @@ static inline bool blk_qc_t_valid(blk_qc_t cookie)
return cookie != BLK_QC_T_NONE;
}
-static inline blk_qc_t blk_tag_to_qc_t(unsigned int tag, unsigned int queue_num,
- bool internal)
-{
- blk_qc_t ret = tag | (queue_num << BLK_QC_T_SHIFT);
-
- if (internal)
- ret |= BLK_QC_T_INTERNAL;
-
- return ret;
-}
-
static inline unsigned int blk_qc_t_to_queue_num(blk_qc_t cookie)
{
return (cookie & ~BLK_QC_T_INTERNAL) >> BLK_QC_T_SHIFT;
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 653ae90eec0b..338604dff7d0 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -58,25 +58,6 @@ struct blk_stat_callback;
typedef void (rq_end_io_fn)(struct request *, blk_status_t);
-#define BLK_RL_SYNCFULL (1U << 0)
-#define BLK_RL_ASYNCFULL (1U << 1)
-
-struct request_list {
- struct request_queue *q; /* the queue this rl belongs to */
-#ifdef CONFIG_BLK_CGROUP
- struct blkcg_gq *blkg; /* blkg this request pool belongs to */
-#endif
- /*
- * count[], starved[], and wait[] are indexed by
- * BLK_RW_SYNC/BLK_RW_ASYNC
- */
- int count[2];
- int starved[2];
- mempool_t *rq_pool;
- wait_queue_head_t wait[2];
- unsigned int flags;
-};
-
/*
* request flags */
typedef __u32 __bitwise req_flags_t;
@@ -85,8 +66,6 @@ typedef __u32 __bitwise req_flags_t;
#define RQF_SORTED ((__force req_flags_t)(1 << 0))
/* drive already may have started this one */
#define RQF_STARTED ((__force req_flags_t)(1 << 1))
-/* uses tagged queueing */
-#define RQF_QUEUED ((__force req_flags_t)(1 << 2))
/* may not be passed by ioscheduler */
#define RQF_SOFTBARRIER ((__force req_flags_t)(1 << 3))
/* request for flush sequence */
@@ -150,8 +129,8 @@ enum mq_rq_state {
struct request {
struct request_queue *q;
struct blk_mq_ctx *mq_ctx;
+ struct blk_mq_hw_ctx *mq_hctx;
- int cpu;
unsigned int cmd_flags; /* op and common flags */
req_flags_t rq_flags;
@@ -245,11 +224,7 @@ struct request {
refcount_t ref;
unsigned int timeout;
-
- /* access through blk_rq_set_deadline, blk_rq_deadline */
- unsigned long __deadline;
-
- struct list_head timeout_list;
+ unsigned long deadline;
union {
struct __call_single_data csd;
@@ -264,10 +239,6 @@ struct request {
/* for bidi */
struct request *next_rq;
-
-#ifdef CONFIG_BLK_CGROUP
- struct request_list *rl; /* rl this rq is alloced from */
-#endif
};
static inline bool blk_op_is_scsi(unsigned int op)
@@ -311,41 +282,21 @@ static inline unsigned short req_get_ioprio(struct request *req)
struct blk_queue_ctx;
-typedef void (request_fn_proc) (struct request_queue *q);
typedef blk_qc_t (make_request_fn) (struct request_queue *q, struct bio *bio);
-typedef bool (poll_q_fn) (struct request_queue *q, blk_qc_t);
-typedef int (prep_rq_fn) (struct request_queue *, struct request *);
-typedef void (unprep_rq_fn) (struct request_queue *, struct request *);
struct bio_vec;
-typedef void (softirq_done_fn)(struct request *);
typedef int (dma_drain_needed_fn)(struct request *);
-typedef int (lld_busy_fn) (struct request_queue *q);
-typedef int (bsg_job_fn) (struct bsg_job *);
-typedef int (init_rq_fn)(struct request_queue *, struct request *, gfp_t);
-typedef void (exit_rq_fn)(struct request_queue *, struct request *);
enum blk_eh_timer_return {
BLK_EH_DONE, /* drivers has completed the command */
BLK_EH_RESET_TIMER, /* reset timer and try again */
};
-typedef enum blk_eh_timer_return (rq_timed_out_fn)(struct request *);
-
enum blk_queue_state {
Queue_down,
Queue_up,
};
-struct blk_queue_tag {
- struct request **tag_index; /* map of busy tags */
- unsigned long *tag_map; /* bit map of free/busy tags */
- int max_depth; /* what we will send to device */
- int real_max_depth; /* what the array can hold */
- atomic_t refcnt; /* map can be shared */
- int alloc_policy; /* tag allocation policy */
- int next_tag; /* next tag */
-};
#define BLK_TAG_ALLOC_FIFO 0 /* allocate starting from 0 */
#define BLK_TAG_ALLOC_RR 1 /* allocate starting from last allocated tag */
@@ -443,40 +394,15 @@ struct request_queue {
struct list_head queue_head;
struct request *last_merge;
struct elevator_queue *elevator;
- int nr_rqs[2]; /* # allocated [a]sync rqs */
- int nr_rqs_elvpriv; /* # allocated rqs w/ elvpriv */
struct blk_queue_stats *stats;
struct rq_qos *rq_qos;
- /*
- * If blkcg is not used, @q->root_rl serves all requests. If blkcg
- * is used, root blkg allocates from @q->root_rl and all other
- * blkgs from their own blkg->rl. Which one to use should be
- * determined using bio_request_list().
- */
- struct request_list root_rl;
-
- request_fn_proc *request_fn;
make_request_fn *make_request_fn;
- poll_q_fn *poll_fn;
- prep_rq_fn *prep_rq_fn;
- unprep_rq_fn *unprep_rq_fn;
- softirq_done_fn *softirq_done_fn;
- rq_timed_out_fn *rq_timed_out_fn;
dma_drain_needed_fn *dma_drain_needed;
- lld_busy_fn *lld_busy_fn;
- /* Called just after a request is allocated */
- init_rq_fn *init_rq_fn;
- /* Called just before a request is freed */
- exit_rq_fn *exit_rq_fn;
- /* Called from inside blk_get_request() */
- void (*initialize_rq_fn)(struct request *rq);
const struct blk_mq_ops *mq_ops;
- unsigned int *mq_map;
-
/* sw queues */
struct blk_mq_ctx __percpu *queue_ctx;
unsigned int nr_queues;
@@ -487,17 +413,6 @@ struct request_queue {
struct blk_mq_hw_ctx **queue_hw_ctx;
unsigned int nr_hw_queues;
- /*
- * Dispatch queue sorting
- */
- sector_t end_sector;
- struct request *boundary_rq;
-
- /*
- * Delayed queue handling
- */
- struct delayed_work delay_work;
-
struct backing_dev_info *backing_dev_info;
/*
@@ -528,13 +443,7 @@ struct request_queue {
*/
gfp_t bounce_gfp;
- /*
- * protects queue structures from reentrancy. ->__queue_lock should
- * _never_ be used directly, it is queue private. always use
- * ->queue_lock.
- */
- spinlock_t __queue_lock;
- spinlock_t *queue_lock;
+ spinlock_t queue_lock;
/*
* queue kobject
@@ -544,7 +453,7 @@ struct request_queue {
/*
* mq queue kobject
*/
- struct kobject mq_kobj;
+ struct kobject *mq_kobj;
#ifdef CONFIG_BLK_DEV_INTEGRITY
struct blk_integrity integrity;
@@ -560,27 +469,12 @@ struct request_queue {
* queue settings
*/
unsigned long nr_requests; /* Max # of requests */
- unsigned int nr_congestion_on;
- unsigned int nr_congestion_off;
- unsigned int nr_batching;
unsigned int dma_drain_size;
void *dma_drain_buffer;
unsigned int dma_pad_mask;
unsigned int dma_alignment;
- struct blk_queue_tag *queue_tags;
-
- unsigned int nr_sorted;
- unsigned int in_flight[2];
-
- /*
- * Number of active block driver functions for which blk_drain_queue()
- * must wait. Must be incremented around functions that unlock the
- * queue_lock internally, e.g. scsi_request_fn().
- */
- unsigned int request_fn_active;
-
unsigned int rq_timeout;
int poll_nsec;
@@ -589,7 +483,6 @@ struct request_queue {
struct timer_list timeout;
struct work_struct timeout_work;
- struct list_head timeout_list;
struct list_head icq_list;
#ifdef CONFIG_BLK_CGROUP
@@ -644,11 +537,9 @@ struct request_queue {
struct mutex sysfs_lock;
- int bypass_depth;
atomic_t mq_freeze_depth;
#if defined(CONFIG_BLK_DEV_BSG)
- bsg_job_fn *bsg_job_fn;
struct bsg_class_device bsg_dev;
#endif
@@ -668,12 +559,12 @@ struct request_queue {
#ifdef CONFIG_BLK_DEBUG_FS
struct dentry *debugfs_dir;
struct dentry *sched_debugfs_dir;
+ struct dentry *rqos_debugfs_dir;
#endif
bool mq_sysfs_init_done;
size_t cmd_size;
- void *rq_alloc_data;
struct work_struct release_work;
@@ -681,10 +572,8 @@ struct request_queue {
u64 write_hints[BLK_MAX_WRITE_HINTS];
};
-#define QUEUE_FLAG_QUEUED 0 /* uses generic tag queueing */
#define QUEUE_FLAG_STOPPED 1 /* queue is stopped */
#define QUEUE_FLAG_DYING 2 /* queue being torn down */
-#define QUEUE_FLAG_BYPASS 3 /* act as dumb FIFO queue */
#define QUEUE_FLAG_BIDI 4 /* queue supports bidi requests */
#define QUEUE_FLAG_NOMERGES 5 /* disable merge attempts */
#define QUEUE_FLAG_SAME_COMP 6 /* complete on same CPU-group */
@@ -717,19 +606,15 @@ struct request_queue {
(1 << QUEUE_FLAG_ADD_RANDOM))
#define QUEUE_FLAG_MQ_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \
- (1 << QUEUE_FLAG_SAME_COMP) | \
- (1 << QUEUE_FLAG_POLL))
+ (1 << QUEUE_FLAG_SAME_COMP))
void blk_queue_flag_set(unsigned int flag, struct request_queue *q);
void blk_queue_flag_clear(unsigned int flag, struct request_queue *q);
bool blk_queue_flag_test_and_set(unsigned int flag, struct request_queue *q);
-bool blk_queue_flag_test_and_clear(unsigned int flag, struct request_queue *q);
-#define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags)
#define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags)
#define blk_queue_dying(q) test_bit(QUEUE_FLAG_DYING, &(q)->queue_flags)
#define blk_queue_dead(q) test_bit(QUEUE_FLAG_DEAD, &(q)->queue_flags)
-#define blk_queue_bypass(q) test_bit(QUEUE_FLAG_BYPASS, &(q)->queue_flags)
#define blk_queue_init_done(q) test_bit(QUEUE_FLAG_INIT_DONE, &(q)->queue_flags)
#define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags)
#define blk_queue_noxmerges(q) \
@@ -756,32 +641,20 @@ bool blk_queue_flag_test_and_clear(unsigned int flag, struct request_queue *q);
extern void blk_set_pm_only(struct request_queue *q);
extern void blk_clear_pm_only(struct request_queue *q);
-static inline int queue_in_flight(struct request_queue *q)
-{
- return q->in_flight[0] + q->in_flight[1];
-}
-
static inline bool blk_account_rq(struct request *rq)
{
return (rq->rq_flags & RQF_STARTED) && !blk_rq_is_passthrough(rq);
}
-#define blk_rq_cpu_valid(rq) ((rq)->cpu != -1)
#define blk_bidi_rq(rq) ((rq)->next_rq != NULL)
-/* rq->queuelist of dequeued request must be list_empty() */
-#define blk_queued_rq(rq) (!list_empty(&(rq)->queuelist))
#define list_entry_rq(ptr) list_entry((ptr), struct request, queuelist)
#define rq_data_dir(rq) (op_is_write(req_op(rq)) ? WRITE : READ)
-/*
- * Driver can handle struct request, if it either has an old style
- * request_fn defined, or is blk-mq based.
- */
-static inline bool queue_is_rq_based(struct request_queue *q)
+static inline bool queue_is_mq(struct request_queue *q)
{
- return q->request_fn || q->mq_ops;
+ return q->mq_ops;
}
static inline enum blk_zoned_model
@@ -839,27 +712,6 @@ static inline bool rq_is_sync(struct request *rq)
return op_is_sync(rq->cmd_flags);
}
-static inline bool blk_rl_full(struct request_list *rl, bool sync)
-{
- unsigned int flag = sync ? BLK_RL_SYNCFULL : BLK_RL_ASYNCFULL;
-
- return rl->flags & flag;
-}
-
-static inline void blk_set_rl_full(struct request_list *rl, bool sync)
-{
- unsigned int flag = sync ? BLK_RL_SYNCFULL : BLK_RL_ASYNCFULL;
-
- rl->flags |= flag;
-}
-
-static inline void blk_clear_rl_full(struct request_list *rl, bool sync)
-{
- unsigned int flag = sync ? BLK_RL_SYNCFULL : BLK_RL_ASYNCFULL;
-
- rl->flags &= ~flag;
-}
-
static inline bool rq_mergeable(struct request *rq)
{
if (blk_rq_is_passthrough(rq))
@@ -896,16 +748,6 @@ static inline unsigned int blk_queue_depth(struct request_queue *q)
return q->nr_requests;
}
-/*
- * q->prep_rq_fn return values
- */
-enum {
- BLKPREP_OK, /* serve it */
- BLKPREP_KILL, /* fatal error, kill, return -EIO */
- BLKPREP_DEFER, /* leave on queue */
- BLKPREP_INVALID, /* invalid command, kill, return -EREMOTEIO */
-};
-
extern unsigned long blk_max_low_pfn, blk_max_pfn;
/*
@@ -977,10 +819,8 @@ extern blk_qc_t direct_make_request(struct bio *bio);
extern void blk_rq_init(struct request_queue *q, struct request *rq);
extern void blk_init_request_from_bio(struct request *req, struct bio *bio);
extern void blk_put_request(struct request *);
-extern void __blk_put_request(struct request_queue *, struct request *);
extern struct request *blk_get_request(struct request_queue *, unsigned int op,
blk_mq_req_flags_t flags);
-extern void blk_requeue_request(struct request_queue *, struct request *);
extern int blk_lld_busy(struct request_queue *q);
extern int blk_rq_prep_clone(struct request *rq, struct request *rq_src,
struct bio_set *bs, gfp_t gfp_mask,
@@ -990,7 +830,6 @@ extern void blk_rq_unprep_clone(struct request *rq);
extern blk_status_t blk_insert_cloned_request(struct request_queue *q,
struct request *rq);
extern int blk_rq_append_bio(struct request *rq, struct bio **bio);
-extern void blk_delay_queue(struct request_queue *, unsigned long);
extern void blk_queue_split(struct request_queue *, struct bio **);
extern void blk_recount_segments(struct request_queue *, struct bio *);
extern int scsi_verify_blk_ioctl(struct block_device *, unsigned int);
@@ -1003,15 +842,7 @@ extern int sg_scsi_ioctl(struct request_queue *, struct gendisk *, fmode_t,
extern int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags);
extern void blk_queue_exit(struct request_queue *q);
-extern void blk_start_queue(struct request_queue *q);
-extern void blk_start_queue_async(struct request_queue *q);
-extern void blk_stop_queue(struct request_queue *q);
extern void blk_sync_queue(struct request_queue *q);
-extern void __blk_stop_queue(struct request_queue *q);
-extern void __blk_run_queue(struct request_queue *q);
-extern void __blk_run_queue_uncond(struct request_queue *q);
-extern void blk_run_queue(struct request_queue *);
-extern void blk_run_queue_async(struct request_queue *q);
extern int blk_rq_map_user(struct request_queue *, struct request *,
struct rq_map_data *, void __user *, unsigned long,
gfp_t);
@@ -1028,7 +859,7 @@ extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *,
int blk_status_to_errno(blk_status_t status);
blk_status_t errno_to_blk_status(int errno);
-bool blk_poll(struct request_queue *q, blk_qc_t cookie);
+int blk_poll(struct request_queue *q, blk_qc_t cookie, bool spin);
static inline struct request_queue *bdev_get_queue(struct block_device *bdev)
{
@@ -1166,13 +997,6 @@ static inline unsigned int blk_rq_count_bios(struct request *rq)
return nr_bios;
}
-/*
- * Request issue related functions.
- */
-extern struct request *blk_peek_request(struct request_queue *q);
-extern void blk_start_request(struct request *rq);
-extern struct request *blk_fetch_request(struct request_queue *q);
-
void blk_steal_bios(struct bio_list *list, struct request *rq);
/*
@@ -1190,27 +1014,18 @@ void blk_steal_bios(struct bio_list *list, struct request *rq);
*/
extern bool blk_update_request(struct request *rq, blk_status_t error,
unsigned int nr_bytes);
-extern void blk_finish_request(struct request *rq, blk_status_t error);
-extern bool blk_end_request(struct request *rq, blk_status_t error,
- unsigned int nr_bytes);
extern void blk_end_request_all(struct request *rq, blk_status_t error);
extern bool __blk_end_request(struct request *rq, blk_status_t error,
unsigned int nr_bytes);
extern void __blk_end_request_all(struct request *rq, blk_status_t error);
extern bool __blk_end_request_cur(struct request *rq, blk_status_t error);
-extern void blk_complete_request(struct request *);
extern void __blk_complete_request(struct request *);
extern void blk_abort_request(struct request *);
-extern void blk_unprep_request(struct request *);
/*
* Access functions for manipulating queue properties
*/
-extern struct request_queue *blk_init_queue_node(request_fn_proc *rfn,
- spinlock_t *lock, int node_id);
-extern struct request_queue *blk_init_queue(request_fn_proc *, spinlock_t *);
-extern int blk_init_allocated_queue(struct request_queue *);
extern void blk_cleanup_queue(struct request_queue *);
extern void blk_queue_make_request(struct request_queue *, make_request_fn *);
extern void blk_queue_bounce_limit(struct request_queue *, u64);
@@ -1249,15 +1064,10 @@ extern void blk_queue_update_dma_pad(struct request_queue *, unsigned int);
extern int blk_queue_dma_drain(struct request_queue *q,
dma_drain_needed_fn *dma_drain_needed,
void *buf, unsigned int size);
-extern void blk_queue_lld_busy(struct request_queue *q, lld_busy_fn *fn);
extern void blk_queue_segment_boundary(struct request_queue *, unsigned long);
extern void blk_queue_virt_boundary(struct request_queue *, unsigned long);
-extern void blk_queue_prep_rq(struct request_queue *, prep_rq_fn *pfn);
-extern void blk_queue_unprep_rq(struct request_queue *, unprep_rq_fn *ufn);
extern void blk_queue_dma_alignment(struct request_queue *, int);
extern void blk_queue_update_dma_alignment(struct request_queue *, int);
-extern void blk_queue_softirq_done(struct request_queue *, softirq_done_fn *);
-extern void blk_queue_rq_timed_out(struct request_queue *, rq_timed_out_fn *);
extern void blk_queue_rq_timeout(struct request_queue *, unsigned int);
extern void blk_queue_flush_queueable(struct request_queue *q, bool queueable);
extern void blk_queue_write_cache(struct request_queue *q, bool enabled, bool fua);
@@ -1293,8 +1103,7 @@ extern long nr_blockdev_pages(void);
bool __must_check blk_get_queue(struct request_queue *);
struct request_queue *blk_alloc_queue(gfp_t);
-struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id,
- spinlock_t *lock);
+struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id);
extern void blk_put_queue(struct request_queue *);
extern void blk_set_queue_dying(struct request_queue *);
@@ -1311,9 +1120,10 @@ extern void blk_set_queue_dying(struct request_queue *);
* schedule() where blk_schedule_flush_plug() is called.
*/
struct blk_plug {
- struct list_head list; /* requests */
struct list_head mq_list; /* blk-mq requests */
struct list_head cb_list; /* md requires an unplug callback */
+ unsigned short rq_count;
+ bool multiple_queues;
};
#define BLK_MAX_REQUEST_COUNT 16
#define BLK_PLUG_FLUSH_SIZE (128 * 1024)
@@ -1352,31 +1162,10 @@ static inline bool blk_needs_flush_plug(struct task_struct *tsk)
struct blk_plug *plug = tsk->plug;
return plug &&
- (!list_empty(&plug->list) ||
- !list_empty(&plug->mq_list) ||
+ (!list_empty(&plug->mq_list) ||
!list_empty(&plug->cb_list));
}
-/*
- * tag stuff
- */
-extern int blk_queue_start_tag(struct request_queue *, struct request *);
-extern struct request *blk_queue_find_tag(struct request_queue *, int);
-extern void blk_queue_end_tag(struct request_queue *, struct request *);
-extern int blk_queue_init_tags(struct request_queue *, int, struct blk_queue_tag *, int);
-extern void blk_queue_free_tags(struct request_queue *);
-extern int blk_queue_resize_tags(struct request_queue *, int);
-extern struct blk_queue_tag *blk_init_tags(int, int);
-extern void blk_free_tags(struct blk_queue_tag *);
-
-static inline struct request *blk_map_queue_find_tag(struct blk_queue_tag *bqt,
- int tag)
-{
- if (unlikely(bqt == NULL || tag >= bqt->real_max_depth))
- return NULL;
- return bqt->tag_index[tag];
-}
-
extern int blkdev_issue_flush(struct block_device *, gfp_t, sector_t *);
extern int blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
sector_t nr_sects, gfp_t gfp_mask, struct page *page);
@@ -1976,4 +1765,17 @@ static inline int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask,
#endif /* CONFIG_BLOCK */
+static inline void blk_wake_io_task(struct task_struct *waiter)
+{
+ /*
+ * If we're polling, the task itself is doing the completions. For
+ * that case, we don't need to signal a wakeup, it's enough to just
+ * mark us as RUNNING.
+ */
+ if (waiter == current)
+ __set_current_state(TASK_RUNNING);
+ else
+ wake_up_process(waiter);
+}
+
#endif
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index 33014ae73103..e734f163bd0b 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -23,6 +23,7 @@ struct bpf_prog;
struct bpf_map;
struct sock;
struct seq_file;
+struct btf;
struct btf_type;
/* map is generic key/value storage optionally accesible by eBPF programs */
@@ -52,6 +53,7 @@ struct bpf_map_ops {
void (*map_seq_show_elem)(struct bpf_map *map, void *key,
struct seq_file *m);
int (*map_check_btf)(const struct bpf_map *map,
+ const struct btf *btf,
const struct btf_type *key_type,
const struct btf_type *value_type);
};
@@ -126,6 +128,7 @@ static inline bool bpf_map_support_seq_show(const struct bpf_map *map)
}
int map_check_no_btf(const struct bpf_map *map,
+ const struct btf *btf,
const struct btf_type *key_type,
const struct btf_type *value_type);
@@ -268,15 +271,18 @@ struct bpf_prog_offload_ops {
int (*insn_hook)(struct bpf_verifier_env *env,
int insn_idx, int prev_insn_idx);
int (*finalize)(struct bpf_verifier_env *env);
+ int (*prepare)(struct bpf_prog *prog);
+ int (*translate)(struct bpf_prog *prog);
+ void (*destroy)(struct bpf_prog *prog);
};
struct bpf_prog_offload {
struct bpf_prog *prog;
struct net_device *netdev;
+ struct bpf_offload_dev *offdev;
void *dev_priv;
struct list_head offloads;
bool dev_state;
- const struct bpf_prog_offload_ops *dev_ops;
void *jited_image;
u32 jited_len;
};
@@ -293,9 +299,11 @@ struct bpf_prog_aux {
atomic_t refcnt;
u32 used_map_cnt;
u32 max_ctx_offset;
+ u32 max_pkt_offset;
u32 stack_depth;
u32 id;
- u32 func_cnt;
+ u32 func_cnt; /* used by non-func prog as the number of func progs */
+ u32 func_idx; /* 0 for non-func prog, the index in func array for func prog */
bool offload_requested;
struct bpf_prog **func;
void *jit_data; /* JIT specific data. arch dependent */
@@ -312,6 +320,30 @@ struct bpf_prog_aux {
void *security;
#endif
struct bpf_prog_offload *offload;
+ struct btf *btf;
+ struct bpf_func_info *func_info;
+ /* bpf_line_info loaded from userspace. linfo->insn_off
+ * has the xlated insn offset.
+ * Both the main and sub prog share the same linfo.
+ * The subprog can access its first linfo by
+ * using the linfo_idx.
+ */
+ struct bpf_line_info *linfo;
+ /* jited_linfo is the jited addr of the linfo. It has a
+ * one to one mapping to linfo:
+ * jited_linfo[i] is the jited addr for the linfo[i]->insn_off.
+ * Both the main and sub prog share the same jited_linfo.
+ * The subprog can access its first jited_linfo by
+ * using the linfo_idx.
+ */
+ void **jited_linfo;
+ u32 func_info_cnt;
+ u32 nr_linfo;
+ /* subprog can use linfo_idx to access its first linfo and
+ * jited_linfo.
+ * main prog always has linfo_idx == 0
+ */
+ u32 linfo_idx;
union {
struct work_struct work;
struct rcu_head rcu;
@@ -523,7 +555,8 @@ static inline void bpf_long_memcpy(void *dst, const void *src, u32 size)
}
/* verify correctness of eBPF program */
-int bpf_check(struct bpf_prog **fp, union bpf_attr *attr);
+int bpf_check(struct bpf_prog **fp, union bpf_attr *attr,
+ union bpf_attr __user *uattr);
void bpf_patch_call_args(struct bpf_insn *insn, u32 stack_depth);
/* Map specifics */
@@ -691,7 +724,8 @@ int bpf_map_offload_get_next_key(struct bpf_map *map,
bool bpf_offload_prog_map_match(struct bpf_prog *prog, struct bpf_map *map);
-struct bpf_offload_dev *bpf_offload_dev_create(void);
+struct bpf_offload_dev *
+bpf_offload_dev_create(const struct bpf_prog_offload_ops *ops);
void bpf_offload_dev_destroy(struct bpf_offload_dev *offdev);
int bpf_offload_dev_netdev_register(struct bpf_offload_dev *offdev,
struct net_device *netdev);
diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h
index d93e89761a8b..c233efc106c6 100644
--- a/include/linux/bpf_verifier.h
+++ b/include/linux/bpf_verifier.h
@@ -38,6 +38,7 @@ enum bpf_reg_liveness {
REG_LIVE_NONE = 0, /* reg hasn't been read or written this branch */
REG_LIVE_READ, /* reg was read, so we're sensitive to initial value */
REG_LIVE_WRITTEN, /* reg was written first, screening off later reads */
+ REG_LIVE_DONE = 4, /* liveness won't be updating this register anymore */
};
struct bpf_reg_state {
@@ -203,6 +204,7 @@ static inline bool bpf_verifier_log_needed(const struct bpf_verifier_log *log)
struct bpf_subprog_info {
u32 start; /* insn idx of function entry point */
+ u32 linfo_idx; /* The idx to the main_prog->aux->linfo */
u16 stack_depth; /* max. stack depth used by this function */
};
@@ -223,6 +225,7 @@ struct bpf_verifier_env {
bool allow_ptr_leaks;
bool seen_direct_write;
struct bpf_insn_aux_data *insn_aux_data; /* array of per-insn state */
+ const struct bpf_line_info *prev_linfo;
struct bpf_verifier_log log;
struct bpf_subprog_info subprog_info[BPF_MAX_SUBPROGS + 1];
u32 subprog_cnt;
@@ -245,7 +248,7 @@ static inline struct bpf_reg_state *cur_regs(struct bpf_verifier_env *env)
return cur_func(env)->regs;
}
-int bpf_prog_offload_verifier_prep(struct bpf_verifier_env *env);
+int bpf_prog_offload_verifier_prep(struct bpf_prog *prog);
int bpf_prog_offload_verify_insn(struct bpf_verifier_env *env,
int insn_idx, int prev_insn_idx);
int bpf_prog_offload_finalize(struct bpf_verifier_env *env);
diff --git a/include/linux/brcmphy.h b/include/linux/brcmphy.h
index 949e9af8d9d6..9cd00a37b8d3 100644
--- a/include/linux/brcmphy.h
+++ b/include/linux/brcmphy.h
@@ -28,6 +28,7 @@
#define PHY_ID_BCM89610 0x03625cd0
#define PHY_ID_BCM7250 0xae025280
+#define PHY_ID_BCM7255 0xae025120
#define PHY_ID_BCM7260 0xae025190
#define PHY_ID_BCM7268 0xae025090
#define PHY_ID_BCM7271 0xae0253b0
diff --git a/include/linux/bsg-lib.h b/include/linux/bsg-lib.h
index 6aeaf6472665..b356e0006731 100644
--- a/include/linux/bsg-lib.h
+++ b/include/linux/bsg-lib.h
@@ -31,6 +31,9 @@ struct device;
struct scatterlist;
struct request_queue;
+typedef int (bsg_job_fn) (struct bsg_job *);
+typedef enum blk_eh_timer_return (bsg_timeout_fn)(struct request *);
+
struct bsg_buffer {
unsigned int payload_len;
int sg_cnt;
@@ -72,7 +75,8 @@ struct bsg_job {
void bsg_job_done(struct bsg_job *job, int result,
unsigned int reply_payload_rcv_len);
struct request_queue *bsg_setup_queue(struct device *dev, const char *name,
- bsg_job_fn *job_fn, int dd_job_size);
+ bsg_job_fn *job_fn, bsg_timeout_fn *timeout, int dd_job_size);
+void bsg_remove_queue(struct request_queue *q);
void bsg_job_put(struct bsg_job *job);
int __must_check bsg_job_get(struct bsg_job *job);
diff --git a/include/linux/btf.h b/include/linux/btf.h
index e076c4697049..12502e25e767 100644
--- a/include/linux/btf.h
+++ b/include/linux/btf.h
@@ -7,6 +7,7 @@
#include <linux/types.h>
struct btf;
+struct btf_member;
struct btf_type;
union bpf_attr;
@@ -46,5 +47,24 @@ void btf_type_seq_show(const struct btf *btf, u32 type_id, void *obj,
struct seq_file *m);
int btf_get_fd_by_id(u32 id);
u32 btf_id(const struct btf *btf);
+bool btf_member_is_reg_int(const struct btf *btf, const struct btf_type *s,
+ const struct btf_member *m,
+ u32 expected_offset, u32 expected_size);
+
+#ifdef CONFIG_BPF_SYSCALL
+const struct btf_type *btf_type_by_id(const struct btf *btf, u32 type_id);
+const char *btf_name_by_offset(const struct btf *btf, u32 offset);
+#else
+static inline const struct btf_type *btf_type_by_id(const struct btf *btf,
+ u32 type_id)
+{
+ return NULL;
+}
+static inline const char *btf_name_by_offset(const struct btf *btf,
+ u32 offset)
+{
+ return NULL;
+}
+#endif
#endif
diff --git a/include/linux/can/dev.h b/include/linux/can/dev.h
index a83e1f632eb7..f01623aef2f7 100644
--- a/include/linux/can/dev.h
+++ b/include/linux/can/dev.h
@@ -169,6 +169,7 @@ void can_change_state(struct net_device *dev, struct can_frame *cf,
void can_put_echo_skb(struct sk_buff *skb, struct net_device *dev,
unsigned int idx);
+struct sk_buff *__can_get_echo_skb(struct net_device *dev, unsigned int idx, u8 *len_ptr);
unsigned int can_get_echo_skb(struct net_device *dev, unsigned int idx);
void can_free_echo_skb(struct net_device *dev, unsigned int idx);
diff --git a/include/linux/can/rx-offload.h b/include/linux/can/rx-offload.h
index cb31683bbe15..8268811a697e 100644
--- a/include/linux/can/rx-offload.h
+++ b/include/linux/can/rx-offload.h
@@ -41,7 +41,12 @@ int can_rx_offload_add_timestamp(struct net_device *dev, struct can_rx_offload *
int can_rx_offload_add_fifo(struct net_device *dev, struct can_rx_offload *offload, unsigned int weight);
int can_rx_offload_irq_offload_timestamp(struct can_rx_offload *offload, u64 reg);
int can_rx_offload_irq_offload_fifo(struct can_rx_offload *offload);
-int can_rx_offload_irq_queue_err_skb(struct can_rx_offload *offload, struct sk_buff *skb);
+int can_rx_offload_queue_sorted(struct can_rx_offload *offload,
+ struct sk_buff *skb, u32 timestamp);
+unsigned int can_rx_offload_get_echo_skb(struct can_rx_offload *offload,
+ unsigned int idx, u32 timestamp);
+int can_rx_offload_queue_tail(struct can_rx_offload *offload,
+ struct sk_buff *skb);
void can_rx_offload_reset(struct can_rx_offload *offload);
void can_rx_offload_del(struct can_rx_offload *offload);
void can_rx_offload_enable(struct can_rx_offload *offload);
diff --git a/include/linux/ceph/ceph_features.h b/include/linux/ceph/ceph_features.h
index 6b92b3395fa9..65a38c4a02a1 100644
--- a/include/linux/ceph/ceph_features.h
+++ b/include/linux/ceph/ceph_features.h
@@ -213,12 +213,6 @@ DEFINE_CEPH_FEATURE_DEPRECATED(63, 1, RESERVED_BROKEN, LUMINOUS) // client-facin
CEPH_FEATURE_NEW_OSDOPREPLY_ENCODING | \
CEPH_FEATURE_CEPHX_V2)
-#define CEPH_FEATURES_REQUIRED_DEFAULT \
- (CEPH_FEATURE_NOSRCADDR | \
- CEPH_FEATURE_SUBSCRIBE2 | \
- CEPH_FEATURE_RECONNECT_SEQ | \
- CEPH_FEATURE_PGID64 | \
- CEPH_FEATURE_PGPOOL3 | \
- CEPH_FEATURE_OSDENC)
+#define CEPH_FEATURES_REQUIRED_DEFAULT 0
#endif
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
index 9d12757a65b0..9968332cceed 100644
--- a/include/linux/cgroup.h
+++ b/include/linux/cgroup.h
@@ -93,6 +93,8 @@ extern struct css_set init_css_set;
bool css_has_online_children(struct cgroup_subsys_state *css);
struct cgroup_subsys_state *css_from_id(int id, struct cgroup_subsys *ss);
+struct cgroup_subsys_state *cgroup_e_css(struct cgroup *cgroup,
+ struct cgroup_subsys *ss);
struct cgroup_subsys_state *cgroup_get_e_css(struct cgroup *cgroup,
struct cgroup_subsys *ss);
struct cgroup_subsys_state *css_tryget_online_from_dir(struct dentry *dentry,
diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h
index 60c51871b04b..e443fa9fa859 100644
--- a/include/linux/clk-provider.h
+++ b/include/linux/clk-provider.h
@@ -1,12 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
- * linux/include/linux/clk-provider.h
- *
* Copyright (c) 2010-2011 Jeremy Kerr <jeremy.kerr@canonical.com>
* Copyright (C) 2011-2012 Linaro Ltd <mturquette@linaro.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef __LINUX_CLK_PROVIDER_H
#define __LINUX_CLK_PROVIDER_H
@@ -601,6 +596,12 @@ void clk_hw_unregister_fixed_factor(struct clk_hw *hw);
* @lock: register lock
*
* Clock with adjustable fractional divider affecting its output frequency.
+ *
+ * Flags:
+ * CLK_FRAC_DIVIDER_ZERO_BASED - by default the numerator and denominator
+ * is the value read from the register. If CLK_FRAC_DIVIDER_ZERO_BASED
+ * is set then the numerator and denominator are both the value read
+ * plus one.
*/
struct clk_fractional_divider {
struct clk_hw hw;
@@ -620,6 +621,8 @@ struct clk_fractional_divider {
#define to_clk_fd(_hw) container_of(_hw, struct clk_fractional_divider, hw)
+#define CLK_FRAC_DIVIDER_ZERO_BASED BIT(0)
+
extern const struct clk_ops clk_fractional_divider_ops;
struct clk *clk_register_fractional_divider(struct device *dev,
const char *name, const char *parent_name, unsigned long flags,
diff --git a/include/linux/clk/clk-conf.h b/include/linux/clk/clk-conf.h
index e0c362363c38..85f8cf9d1226 100644
--- a/include/linux/clk/clk-conf.h
+++ b/include/linux/clk/clk-conf.h
@@ -1,10 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2014 Samsung Electronics Co., Ltd.
* Sylwester Nawrocki <s.nawrocki@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/types.h>
diff --git a/include/linux/compat.h b/include/linux/compat.h
index 88720b443cd6..056be0d03722 100644
--- a/include/linux/compat.h
+++ b/include/linux/compat.h
@@ -169,6 +169,10 @@ typedef struct {
compat_sigset_word sig[_COMPAT_NSIG_WORDS];
} compat_sigset_t;
+int set_compat_user_sigmask(const compat_sigset_t __user *usigmask,
+ sigset_t *set, sigset_t *oldset,
+ size_t sigsetsize);
+
struct compat_sigaction {
#ifndef __ARCH_HAS_IRIX_SIGACTION
compat_uptr_t sa_handler;
@@ -558,6 +562,12 @@ asmlinkage long compat_sys_io_pgetevents(compat_aio_context_t ctx_id,
struct io_event __user *events,
struct old_timespec32 __user *timeout,
const struct __compat_aio_sigset __user *usig);
+asmlinkage long compat_sys_io_pgetevents_time64(compat_aio_context_t ctx_id,
+ compat_long_t min_nr,
+ compat_long_t nr,
+ struct io_event __user *events,
+ struct __kernel_timespec __user *timeout,
+ const struct __compat_aio_sigset __user *usig);
/* fs/cookies.c */
asmlinkage long compat_sys_lookup_dcookie(u32, u32, char __user *, compat_size_t);
@@ -643,11 +653,21 @@ asmlinkage long compat_sys_pselect6(int n, compat_ulong_t __user *inp,
compat_ulong_t __user *exp,
struct old_timespec32 __user *tsp,
void __user *sig);
+asmlinkage long compat_sys_pselect6_time64(int n, compat_ulong_t __user *inp,
+ compat_ulong_t __user *outp,
+ compat_ulong_t __user *exp,
+ struct __kernel_timespec __user *tsp,
+ void __user *sig);
asmlinkage long compat_sys_ppoll(struct pollfd __user *ufds,
unsigned int nfds,
struct old_timespec32 __user *tsp,
const compat_sigset_t __user *sigmask,
compat_size_t sigsetsize);
+asmlinkage long compat_sys_ppoll_time64(struct pollfd __user *ufds,
+ unsigned int nfds,
+ struct __kernel_timespec __user *tsp,
+ const compat_sigset_t __user *sigmask,
+ compat_size_t sigsetsize);
/* fs/signalfd.c */
asmlinkage long compat_sys_signalfd4(int ufd,
@@ -768,6 +788,9 @@ asmlinkage long compat_sys_rt_sigpending(compat_sigset_t __user *uset,
asmlinkage long compat_sys_rt_sigtimedwait(compat_sigset_t __user *uthese,
struct compat_siginfo __user *uinfo,
struct old_timespec32 __user *uts, compat_size_t sigsetsize);
+asmlinkage long compat_sys_rt_sigtimedwait_time64(compat_sigset_t __user *uthese,
+ struct compat_siginfo __user *uinfo,
+ struct __kernel_timespec __user *uts, compat_size_t sigsetsize);
asmlinkage long compat_sys_rt_sigqueueinfo(compat_pid_t pid, int sig,
struct compat_siginfo __user *uinfo);
/* No generic prototype for rt_sigreturn */
@@ -873,6 +896,9 @@ asmlinkage long compat_sys_move_pages(pid_t pid, compat_ulong_t nr_pages,
asmlinkage long compat_sys_rt_tgsigqueueinfo(compat_pid_t tgid,
compat_pid_t pid, int sig,
struct compat_siginfo __user *uinfo);
+asmlinkage long compat_sys_recvmmsg_time64(int fd, struct compat_mmsghdr __user *mmsg,
+ unsigned vlen, unsigned int flags,
+ struct __kernel_timespec __user *timeout);
asmlinkage long compat_sys_recvmmsg(int fd, struct compat_mmsghdr __user *mmsg,
unsigned vlen, unsigned int flags,
struct old_timespec32 __user *timeout);
diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h
index c0f5db3a9621..2010493e1040 100644
--- a/include/linux/compiler-gcc.h
+++ b/include/linux/compiler-gcc.h
@@ -143,18 +143,6 @@
#define KASAN_ABI_VERSION 3
#endif
-/*
- * Because __no_sanitize_address conflicts with inlining:
- * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=67368
- * we do one or the other.
- */
-#ifdef CONFIG_KASAN
-#define __no_sanitize_address_or_inline \
- __no_sanitize_address __maybe_unused notrace
-#else
-#define __no_sanitize_address_or_inline inline
-#endif
-
#if GCC_VERSION >= 50100
#define COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW 1
#endif
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index 18c80cfa4fc4..fc5004a4b07d 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -99,13 +99,22 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
* unique, to convince GCC not to merge duplicate inline asm statements.
*/
#define annotate_reachable() ({ \
- asm volatile("ANNOTATE_REACHABLE counter=%c0" \
- : : "i" (__COUNTER__)); \
+ asm volatile("%c0:\n\t" \
+ ".pushsection .discard.reachable\n\t" \
+ ".long %c0b - .\n\t" \
+ ".popsection\n\t" : : "i" (__COUNTER__)); \
})
#define annotate_unreachable() ({ \
- asm volatile("ANNOTATE_UNREACHABLE counter=%c0" \
- : : "i" (__COUNTER__)); \
+ asm volatile("%c0:\n\t" \
+ ".pushsection .discard.unreachable\n\t" \
+ ".long %c0b - .\n\t" \
+ ".popsection\n\t" : : "i" (__COUNTER__)); \
})
+#define ASM_UNREACHABLE \
+ "999:\n\t" \
+ ".pushsection .discard.unreachable\n\t" \
+ ".long 999b - .\n\t" \
+ ".popsection\n\t"
#else
#define annotate_reachable()
#define annotate_unreachable()
@@ -189,7 +198,7 @@ void __read_once_size(const volatile void *p, void *res, int size)
* https://gcc.gnu.org/bugzilla/show_bug.cgi?id=67368
* '__maybe_unused' allows us to avoid defined-but-not-used warnings.
*/
-# define __no_kasan_or_inline __no_sanitize_address __maybe_unused
+# define __no_kasan_or_inline __no_sanitize_address notrace __maybe_unused
#else
# define __no_kasan_or_inline __always_inline
#endif
@@ -293,45 +302,6 @@ static inline void *offset_to_ptr(const int *off)
return (void *)((unsigned long)off + *off);
}
-#else /* __ASSEMBLY__ */
-
-#ifdef __KERNEL__
-#ifndef LINKER_SCRIPT
-
-#ifdef CONFIG_STACK_VALIDATION
-.macro ANNOTATE_UNREACHABLE counter:req
-\counter:
- .pushsection .discard.unreachable
- .long \counter\()b -.
- .popsection
-.endm
-
-.macro ANNOTATE_REACHABLE counter:req
-\counter:
- .pushsection .discard.reachable
- .long \counter\()b -.
- .popsection
-.endm
-
-.macro ASM_UNREACHABLE
-999:
- .pushsection .discard.unreachable
- .long 999b - .
- .popsection
-.endm
-#else /* CONFIG_STACK_VALIDATION */
-.macro ANNOTATE_UNREACHABLE counter:req
-.endm
-
-.macro ANNOTATE_REACHABLE counter:req
-.endm
-
-.macro ASM_UNREACHABLE
-.endm
-#endif /* CONFIG_STACK_VALIDATION */
-
-#endif /* LINKER_SCRIPT */
-#endif /* __KERNEL__ */
#endif /* __ASSEMBLY__ */
/* Compile time object size, -1 for unknown */
diff --git a/include/linux/compiler_attributes.h b/include/linux/compiler_attributes.h
index 6b28c1b7310c..fe07b680dd4a 100644
--- a/include/linux/compiler_attributes.h
+++ b/include/linux/compiler_attributes.h
@@ -4,22 +4,26 @@
/*
* The attributes in this file are unconditionally defined and they directly
- * map to compiler attribute(s) -- except those that are optional.
+ * map to compiler attribute(s), unless one of the compilers does not support
+ * the attribute. In that case, __has_attribute is used to check for support
+ * and the reason is stated in its comment ("Optional: ...").
*
* Any other "attributes" (i.e. those that depend on a configuration option,
* on a compiler, on an architecture, on plugins, on other attributes...)
* should be defined elsewhere (e.g. compiler_types.h or compiler-*.h).
+ * The intention is to keep this file as simple as possible, as well as
+ * compiler- and version-agnostic (e.g. avoiding GCC_VERSION checks).
*
* This file is meant to be sorted (by actual attribute name,
* not by #define identifier). Use the __attribute__((__name__)) syntax
* (i.e. with underscores) to avoid future collisions with other macros.
- * If an attribute is optional, state the reason in the comment.
+ * Provide links to the documentation of each supported compiler, if it exists.
*/
/*
- * To check for optional attributes, we use __has_attribute, which is supported
- * on gcc >= 5, clang >= 2.9 and icc >= 17. In the meantime, to support
- * 4.6 <= gcc < 5, we implement __has_attribute by hand.
+ * __has_attribute is supported on gcc >= 5, clang >= 2.9 and icc >= 17.
+ * In the meantime, to support 4.6 <= gcc < 5, we implement __has_attribute
+ * by hand.
*
* sparse does not support __has_attribute (yet) and defines __GNUC_MINOR__
* depending on the compiler used to build it; however, these attributes have
@@ -33,7 +37,6 @@
# define __GCC4_has_attribute___designated_init__ 0
# define __GCC4_has_attribute___externally_visible__ 1
# define __GCC4_has_attribute___noclone__ 1
-# define __GCC4_has_attribute___optimize__ 1
# define __GCC4_has_attribute___nonstring__ 0
# define __GCC4_has_attribute___no_sanitize_address__ (__GNUC_MINOR__ >= 8)
#endif
@@ -159,17 +162,11 @@
/*
* Optional: not supported by clang
- * Note: icc does not recognize gcc's no-tracer
*
* gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-noclone-function-attribute
- * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-optimize-function-attribute
*/
#if __has_attribute(__noclone__)
-# if __has_attribute(__optimize__)
-# define __noclone __attribute__((__noclone__, __optimize__("no-tracer")))
-# else
-# define __noclone __attribute__((__noclone__))
-# endif
+# define __noclone __attribute__((__noclone__))
#else
# define __noclone
#endif
diff --git a/include/linux/compiler_types.h b/include/linux/compiler_types.h
index 3439d7d0249a..ba814f18cb4c 100644
--- a/include/linux/compiler_types.h
+++ b/include/linux/compiler_types.h
@@ -104,59 +104,6 @@ struct ftrace_likely_data {
unsigned long constant;
};
-#endif /* __KERNEL__ */
-
-#endif /* __ASSEMBLY__ */
-
-/*
- * The below symbols may be defined for one or more, but not ALL, of the above
- * compilers. We don't consider that to be an error, so set them to nothing.
- * For example, some of them are for compiler specific plugins.
- */
-#ifndef __latent_entropy
-# define __latent_entropy
-#endif
-
-#ifndef __randomize_layout
-# define __randomize_layout __designated_init
-#endif
-
-#ifndef __no_randomize_layout
-# define __no_randomize_layout
-#endif
-
-#ifndef randomized_struct_fields_start
-# define randomized_struct_fields_start
-# define randomized_struct_fields_end
-#endif
-
-/* Are two types/vars the same type (ignoring qualifiers)? */
-#define __same_type(a, b) __builtin_types_compatible_p(typeof(a), typeof(b))
-
-/* Is this type a native word size -- useful for atomic operations */
-#define __native_word(t) \
- (sizeof(t) == sizeof(char) || sizeof(t) == sizeof(short) || \
- sizeof(t) == sizeof(int) || sizeof(t) == sizeof(long))
-
-/* Helpers for emitting diagnostics in pragmas. */
-#ifndef __diag
-#define __diag(string)
-#endif
-
-#ifndef __diag_GCC
-#define __diag_GCC(version, severity, string)
-#endif
-
-#define __diag_push() __diag(push)
-#define __diag_pop() __diag(pop)
-
-#define __diag_ignore(compiler, version, option, comment) \
- __diag_ ## compiler(version, ignore, option)
-#define __diag_warn(compiler, version, option, comment) \
- __diag_ ## compiler(version, warn, option)
-#define __diag_error(compiler, version, option, comment) \
- __diag_ ## compiler(version, error, option)
-
#ifdef CONFIG_ENABLE_MUST_CHECK
#define __must_check __attribute__((__warn_unused_result__))
#else
@@ -211,4 +158,61 @@ struct ftrace_likely_data {
*/
#define noinline_for_stack noinline
+#endif /* __KERNEL__ */
+
+#endif /* __ASSEMBLY__ */
+
+/*
+ * The below symbols may be defined for one or more, but not ALL, of the above
+ * compilers. We don't consider that to be an error, so set them to nothing.
+ * For example, some of them are for compiler specific plugins.
+ */
+#ifndef __latent_entropy
+# define __latent_entropy
+#endif
+
+#ifndef __randomize_layout
+# define __randomize_layout __designated_init
+#endif
+
+#ifndef __no_randomize_layout
+# define __no_randomize_layout
+#endif
+
+#ifndef randomized_struct_fields_start
+# define randomized_struct_fields_start
+# define randomized_struct_fields_end
+#endif
+
+#ifndef asm_volatile_goto
+#define asm_volatile_goto(x...) asm goto(x)
+#endif
+
+/* Are two types/vars the same type (ignoring qualifiers)? */
+#define __same_type(a, b) __builtin_types_compatible_p(typeof(a), typeof(b))
+
+/* Is this type a native word size -- useful for atomic operations */
+#define __native_word(t) \
+ (sizeof(t) == sizeof(char) || sizeof(t) == sizeof(short) || \
+ sizeof(t) == sizeof(int) || sizeof(t) == sizeof(long))
+
+/* Helpers for emitting diagnostics in pragmas. */
+#ifndef __diag
+#define __diag(string)
+#endif
+
+#ifndef __diag_GCC
+#define __diag_GCC(version, severity, string)
+#endif
+
+#define __diag_push() __diag(push)
+#define __diag_pop() __diag(pop)
+
+#define __diag_ignore(compiler, version, option, comment) \
+ __diag_ ## compiler(version, ignore, option)
+#define __diag_warn(compiler, version, option, comment) \
+ __diag_ ## compiler(version, warn, option)
+#define __diag_error(compiler, version, option, comment) \
+ __diag_ ## compiler(version, error, option)
+
#endif /* __LINUX_COMPILER_TYPES_H */
diff --git a/include/linux/cordic.h b/include/linux/cordic.h
index cf68ca4a508c..3d656f54d64f 100644
--- a/include/linux/cordic.h
+++ b/include/linux/cordic.h
@@ -18,6 +18,15 @@
#include <linux/types.h>
+#define CORDIC_ANGLE_GEN 39797
+#define CORDIC_PRECISION_SHIFT 16
+#define CORDIC_NUM_ITER (CORDIC_PRECISION_SHIFT + 2)
+
+#define CORDIC_FIXED(X) ((s32)((X) << CORDIC_PRECISION_SHIFT))
+#define CORDIC_FLOAT(X) (((X) >= 0) \
+ ? ((((X) >> (CORDIC_PRECISION_SHIFT - 1)) + 1) >> 1) \
+ : -((((-(X)) >> (CORDIC_PRECISION_SHIFT - 1)) + 1) >> 1))
+
/**
* struct cordic_iq - i/q coordinate.
*
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index 882a9b9e34bc..c86d6d8bdfed 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -950,6 +950,14 @@ static inline bool policy_has_boost_freq(struct cpufreq_policy *policy)
}
#endif
+#if defined(CONFIG_ENERGY_MODEL) && defined(CONFIG_CPU_FREQ_GOV_SCHEDUTIL)
+void sched_cpufreq_governor_change(struct cpufreq_policy *policy,
+ struct cpufreq_governor *old_gov);
+#else
+static inline void sched_cpufreq_governor_change(struct cpufreq_policy *policy,
+ struct cpufreq_governor *old_gov) { }
+#endif
+
extern void arch_freq_prepare_all(void);
extern unsigned int arch_freq_get_on_cpu(int cpu);
diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h
index e0cd2baa8380..fd586d0301e7 100644
--- a/include/linux/cpuhotplug.h
+++ b/include/linux/cpuhotplug.h
@@ -164,6 +164,8 @@ enum cpuhp_state {
CPUHP_AP_PERF_ARM_L2X0_ONLINE,
CPUHP_AP_PERF_ARM_QCOM_L2_ONLINE,
CPUHP_AP_PERF_ARM_QCOM_L3_ONLINE,
+ CPUHP_AP_PERF_ARM_APM_XGENE_ONLINE,
+ CPUHP_AP_PERF_ARM_CAVIUM_TX2_UNCORE_ONLINE,
CPUHP_AP_PERF_POWERPC_NEST_IMC_ONLINE,
CPUHP_AP_PERF_POWERPC_CORE_IMC_ONLINE,
CPUHP_AP_PERF_POWERPC_THREAD_IMC_ONLINE,
diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h
index faed7a8977e8..4dff74f48d4b 100644
--- a/include/linux/cpuidle.h
+++ b/include/linux/cpuidle.h
@@ -33,6 +33,8 @@ struct cpuidle_state_usage {
unsigned long long disable;
unsigned long long usage;
unsigned long long time; /* in US */
+ unsigned long long above; /* Number of times it's been too deep */
+ unsigned long long below; /* Number of times it's been too shallow */
#ifdef CONFIG_SUSPEND
unsigned long long s2idle_usage;
unsigned long long s2idle_time; /* in US */
diff --git a/include/linux/crypto.h b/include/linux/crypto.h
index 3634ad6fe202..902ec171fc6d 100644
--- a/include/linux/crypto.h
+++ b/include/linux/crypto.h
@@ -49,7 +49,6 @@
#define CRYPTO_ALG_TYPE_BLKCIPHER 0x00000004
#define CRYPTO_ALG_TYPE_ABLKCIPHER 0x00000005
#define CRYPTO_ALG_TYPE_SKCIPHER 0x00000005
-#define CRYPTO_ALG_TYPE_GIVCIPHER 0x00000006
#define CRYPTO_ALG_TYPE_KPP 0x00000008
#define CRYPTO_ALG_TYPE_ACOMPRESS 0x0000000a
#define CRYPTO_ALG_TYPE_SCOMPRESS 0x0000000b
@@ -77,12 +76,6 @@
#define CRYPTO_ALG_NEED_FALLBACK 0x00000100
/*
- * This bit is set for symmetric key ciphers that have already been wrapped
- * with a generic IV generator to prevent them from being wrapped again.
- */
-#define CRYPTO_ALG_GENIV 0x00000200
-
-/*
* Set if the algorithm has passed automated run-time testing. Note that
* if there is no run-time testing for a given algorithm it is considered
* to have passed.
@@ -157,7 +150,6 @@ struct crypto_async_request;
struct crypto_blkcipher;
struct crypto_tfm;
struct crypto_type;
-struct skcipher_givcrypt_request;
typedef void (*crypto_completion_t)(struct crypto_async_request *req, int err);
@@ -246,31 +238,16 @@ struct cipher_desc {
* be called in parallel with the same transformation object.
* @decrypt: Decrypt a single block. This is a reverse counterpart to @encrypt
* and the conditions are exactly the same.
- * @givencrypt: Update the IV for encryption. With this function, a cipher
- * implementation may provide the function on how to update the IV
- * for encryption.
- * @givdecrypt: Update the IV for decryption. This is the reverse of
- * @givencrypt .
- * @geniv: The transformation implementation may use an "IV generator" provided
- * by the kernel crypto API. Several use cases have a predefined
- * approach how IVs are to be updated. For such use cases, the kernel
- * crypto API provides ready-to-use implementations that can be
- * referenced with this variable.
* @ivsize: IV size applicable for transformation. The consumer must provide an
* IV of exactly that size to perform the encrypt or decrypt operation.
*
- * All fields except @givencrypt , @givdecrypt , @geniv and @ivsize are
- * mandatory and must be filled.
+ * All fields except @ivsize are mandatory and must be filled.
*/
struct ablkcipher_alg {
int (*setkey)(struct crypto_ablkcipher *tfm, const u8 *key,
unsigned int keylen);
int (*encrypt)(struct ablkcipher_request *req);
int (*decrypt)(struct ablkcipher_request *req);
- int (*givencrypt)(struct skcipher_givcrypt_request *req);
- int (*givdecrypt)(struct skcipher_givcrypt_request *req);
-
- const char *geniv;
unsigned int min_keysize;
unsigned int max_keysize;
@@ -284,10 +261,9 @@ struct ablkcipher_alg {
* @setkey: see struct ablkcipher_alg
* @encrypt: see struct ablkcipher_alg
* @decrypt: see struct ablkcipher_alg
- * @geniv: see struct ablkcipher_alg
* @ivsize: see struct ablkcipher_alg
*
- * All fields except @geniv and @ivsize are mandatory and must be filled.
+ * All fields except @ivsize are mandatory and must be filled.
*/
struct blkcipher_alg {
int (*setkey)(struct crypto_tfm *tfm, const u8 *key,
@@ -299,8 +275,6 @@ struct blkcipher_alg {
struct scatterlist *dst, struct scatterlist *src,
unsigned int nbytes);
- const char *geniv;
-
unsigned int min_keysize;
unsigned int max_keysize;
unsigned int ivsize;
@@ -369,6 +343,115 @@ struct compress_alg {
unsigned int slen, u8 *dst, unsigned int *dlen);
};
+#ifdef CONFIG_CRYPTO_STATS
+/*
+ * struct crypto_istat_aead - statistics for AEAD algorithm
+ * @encrypt_cnt: number of encrypt requests
+ * @encrypt_tlen: total data size handled by encrypt requests
+ * @decrypt_cnt: number of decrypt requests
+ * @decrypt_tlen: total data size handled by decrypt requests
+ * @err_cnt: number of error for AEAD requests
+ */
+struct crypto_istat_aead {
+ atomic64_t encrypt_cnt;
+ atomic64_t encrypt_tlen;
+ atomic64_t decrypt_cnt;
+ atomic64_t decrypt_tlen;
+ atomic64_t err_cnt;
+};
+
+/*
+ * struct crypto_istat_akcipher - statistics for akcipher algorithm
+ * @encrypt_cnt: number of encrypt requests
+ * @encrypt_tlen: total data size handled by encrypt requests
+ * @decrypt_cnt: number of decrypt requests
+ * @decrypt_tlen: total data size handled by decrypt requests
+ * @verify_cnt: number of verify operation
+ * @sign_cnt: number of sign requests
+ * @err_cnt: number of error for akcipher requests
+ */
+struct crypto_istat_akcipher {
+ atomic64_t encrypt_cnt;
+ atomic64_t encrypt_tlen;
+ atomic64_t decrypt_cnt;
+ atomic64_t decrypt_tlen;
+ atomic64_t verify_cnt;
+ atomic64_t sign_cnt;
+ atomic64_t err_cnt;
+};
+
+/*
+ * struct crypto_istat_cipher - statistics for cipher algorithm
+ * @encrypt_cnt: number of encrypt requests
+ * @encrypt_tlen: total data size handled by encrypt requests
+ * @decrypt_cnt: number of decrypt requests
+ * @decrypt_tlen: total data size handled by decrypt requests
+ * @err_cnt: number of error for cipher requests
+ */
+struct crypto_istat_cipher {
+ atomic64_t encrypt_cnt;
+ atomic64_t encrypt_tlen;
+ atomic64_t decrypt_cnt;
+ atomic64_t decrypt_tlen;
+ atomic64_t err_cnt;
+};
+
+/*
+ * struct crypto_istat_compress - statistics for compress algorithm
+ * @compress_cnt: number of compress requests
+ * @compress_tlen: total data size handled by compress requests
+ * @decompress_cnt: number of decompress requests
+ * @decompress_tlen: total data size handled by decompress requests
+ * @err_cnt: number of error for compress requests
+ */
+struct crypto_istat_compress {
+ atomic64_t compress_cnt;
+ atomic64_t compress_tlen;
+ atomic64_t decompress_cnt;
+ atomic64_t decompress_tlen;
+ atomic64_t err_cnt;
+};
+
+/*
+ * struct crypto_istat_hash - statistics for has algorithm
+ * @hash_cnt: number of hash requests
+ * @hash_tlen: total data size hashed
+ * @err_cnt: number of error for hash requests
+ */
+struct crypto_istat_hash {
+ atomic64_t hash_cnt;
+ atomic64_t hash_tlen;
+ atomic64_t err_cnt;
+};
+
+/*
+ * struct crypto_istat_kpp - statistics for KPP algorithm
+ * @setsecret_cnt: number of setsecrey operation
+ * @generate_public_key_cnt: number of generate_public_key operation
+ * @compute_shared_secret_cnt: number of compute_shared_secret operation
+ * @err_cnt: number of error for KPP requests
+ */
+struct crypto_istat_kpp {
+ atomic64_t setsecret_cnt;
+ atomic64_t generate_public_key_cnt;
+ atomic64_t compute_shared_secret_cnt;
+ atomic64_t err_cnt;
+};
+
+/*
+ * struct crypto_istat_rng: statistics for RNG algorithm
+ * @generate_cnt: number of RNG generate requests
+ * @generate_tlen: total data size of generated data by the RNG
+ * @seed_cnt: number of times the RNG was seeded
+ * @err_cnt: number of error for RNG requests
+ */
+struct crypto_istat_rng {
+ atomic64_t generate_cnt;
+ atomic64_t generate_tlen;
+ atomic64_t seed_cnt;
+ atomic64_t err_cnt;
+};
+#endif /* CONFIG_CRYPTO_STATS */
#define cra_ablkcipher cra_u.ablkcipher
#define cra_blkcipher cra_u.blkcipher
@@ -454,32 +537,14 @@ struct compress_alg {
* @cra_refcnt: internally used
* @cra_destroy: internally used
*
- * All following statistics are for this crypto_alg
- * @encrypt_cnt: number of encrypt requests
- * @decrypt_cnt: number of decrypt requests
- * @compress_cnt: number of compress requests
- * @decompress_cnt: number of decompress requests
- * @generate_cnt: number of RNG generate requests
- * @seed_cnt: number of times the rng was seeded
- * @hash_cnt: number of hash requests
- * @sign_cnt: number of sign requests
- * @setsecret_cnt: number of setsecrey operation
- * @generate_public_key_cnt: number of generate_public_key operation
- * @verify_cnt: number of verify operation
- * @compute_shared_secret_cnt: number of compute_shared_secret operation
- * @encrypt_tlen: total data size handled by encrypt requests
- * @decrypt_tlen: total data size handled by decrypt requests
- * @compress_tlen: total data size handled by compress requests
- * @decompress_tlen: total data size handled by decompress requests
- * @generate_tlen: total data size of generated data by the RNG
- * @hash_tlen: total data size hashed
- * @akcipher_err_cnt: number of error for akcipher requests
- * @cipher_err_cnt: number of error for akcipher requests
- * @compress_err_cnt: number of error for akcipher requests
- * @aead_err_cnt: number of error for akcipher requests
- * @hash_err_cnt: number of error for akcipher requests
- * @rng_err_cnt: number of error for akcipher requests
- * @kpp_err_cnt: number of error for akcipher requests
+ * @stats: union of all possible crypto_istat_xxx structures
+ * @stats.aead: statistics for AEAD algorithm
+ * @stats.akcipher: statistics for akcipher algorithm
+ * @stats.cipher: statistics for cipher algorithm
+ * @stats.compress: statistics for compress algorithm
+ * @stats.hash: statistics for hash algorithm
+ * @stats.rng: statistics for rng algorithm
+ * @stats.kpp: statistics for KPP algorithm
*
* The struct crypto_alg describes a generic Crypto API algorithm and is common
* for all of the transformations. Any variable not documented here shall not
@@ -515,46 +580,86 @@ struct crypto_alg {
struct module *cra_module;
+#ifdef CONFIG_CRYPTO_STATS
union {
- atomic_t encrypt_cnt;
- atomic_t compress_cnt;
- atomic_t generate_cnt;
- atomic_t hash_cnt;
- atomic_t setsecret_cnt;
- };
- union {
- atomic64_t encrypt_tlen;
- atomic64_t compress_tlen;
- atomic64_t generate_tlen;
- atomic64_t hash_tlen;
- };
- union {
- atomic_t akcipher_err_cnt;
- atomic_t cipher_err_cnt;
- atomic_t compress_err_cnt;
- atomic_t aead_err_cnt;
- atomic_t hash_err_cnt;
- atomic_t rng_err_cnt;
- atomic_t kpp_err_cnt;
- };
- union {
- atomic_t decrypt_cnt;
- atomic_t decompress_cnt;
- atomic_t seed_cnt;
- atomic_t generate_public_key_cnt;
- };
- union {
- atomic64_t decrypt_tlen;
- atomic64_t decompress_tlen;
- };
- union {
- atomic_t verify_cnt;
- atomic_t compute_shared_secret_cnt;
- };
- atomic_t sign_cnt;
+ struct crypto_istat_aead aead;
+ struct crypto_istat_akcipher akcipher;
+ struct crypto_istat_cipher cipher;
+ struct crypto_istat_compress compress;
+ struct crypto_istat_hash hash;
+ struct crypto_istat_rng rng;
+ struct crypto_istat_kpp kpp;
+ } stats;
+#endif /* CONFIG_CRYPTO_STATS */
} CRYPTO_MINALIGN_ATTR;
+#ifdef CONFIG_CRYPTO_STATS
+void crypto_stats_init(struct crypto_alg *alg);
+void crypto_stats_get(struct crypto_alg *alg);
+void crypto_stats_ablkcipher_encrypt(unsigned int nbytes, int ret, struct crypto_alg *alg);
+void crypto_stats_ablkcipher_decrypt(unsigned int nbytes, int ret, struct crypto_alg *alg);
+void crypto_stats_aead_encrypt(unsigned int cryptlen, struct crypto_alg *alg, int ret);
+void crypto_stats_aead_decrypt(unsigned int cryptlen, struct crypto_alg *alg, int ret);
+void crypto_stats_ahash_update(unsigned int nbytes, int ret, struct crypto_alg *alg);
+void crypto_stats_ahash_final(unsigned int nbytes, int ret, struct crypto_alg *alg);
+void crypto_stats_akcipher_encrypt(unsigned int src_len, int ret, struct crypto_alg *alg);
+void crypto_stats_akcipher_decrypt(unsigned int src_len, int ret, struct crypto_alg *alg);
+void crypto_stats_akcipher_sign(int ret, struct crypto_alg *alg);
+void crypto_stats_akcipher_verify(int ret, struct crypto_alg *alg);
+void crypto_stats_compress(unsigned int slen, int ret, struct crypto_alg *alg);
+void crypto_stats_decompress(unsigned int slen, int ret, struct crypto_alg *alg);
+void crypto_stats_kpp_set_secret(struct crypto_alg *alg, int ret);
+void crypto_stats_kpp_generate_public_key(struct crypto_alg *alg, int ret);
+void crypto_stats_kpp_compute_shared_secret(struct crypto_alg *alg, int ret);
+void crypto_stats_rng_seed(struct crypto_alg *alg, int ret);
+void crypto_stats_rng_generate(struct crypto_alg *alg, unsigned int dlen, int ret);
+void crypto_stats_skcipher_encrypt(unsigned int cryptlen, int ret, struct crypto_alg *alg);
+void crypto_stats_skcipher_decrypt(unsigned int cryptlen, int ret, struct crypto_alg *alg);
+#else
+static inline void crypto_stats_init(struct crypto_alg *alg)
+{}
+static inline void crypto_stats_get(struct crypto_alg *alg)
+{}
+static inline void crypto_stats_ablkcipher_encrypt(unsigned int nbytes, int ret, struct crypto_alg *alg)
+{}
+static inline void crypto_stats_ablkcipher_decrypt(unsigned int nbytes, int ret, struct crypto_alg *alg)
+{}
+static inline void crypto_stats_aead_encrypt(unsigned int cryptlen, struct crypto_alg *alg, int ret)
+{}
+static inline void crypto_stats_aead_decrypt(unsigned int cryptlen, struct crypto_alg *alg, int ret)
+{}
+static inline void crypto_stats_ahash_update(unsigned int nbytes, int ret, struct crypto_alg *alg)
+{}
+static inline void crypto_stats_ahash_final(unsigned int nbytes, int ret, struct crypto_alg *alg)
+{}
+static inline void crypto_stats_akcipher_encrypt(unsigned int src_len, int ret, struct crypto_alg *alg)
+{}
+static inline void crypto_stats_akcipher_decrypt(unsigned int src_len, int ret, struct crypto_alg *alg)
+{}
+static inline void crypto_stats_akcipher_sign(int ret, struct crypto_alg *alg)
+{}
+static inline void crypto_stats_akcipher_verify(int ret, struct crypto_alg *alg)
+{}
+static inline void crypto_stats_compress(unsigned int slen, int ret, struct crypto_alg *alg)
+{}
+static inline void crypto_stats_decompress(unsigned int slen, int ret, struct crypto_alg *alg)
+{}
+static inline void crypto_stats_kpp_set_secret(struct crypto_alg *alg, int ret)
+{}
+static inline void crypto_stats_kpp_generate_public_key(struct crypto_alg *alg, int ret)
+{}
+static inline void crypto_stats_kpp_compute_shared_secret(struct crypto_alg *alg, int ret)
+{}
+static inline void crypto_stats_rng_seed(struct crypto_alg *alg, int ret)
+{}
+static inline void crypto_stats_rng_generate(struct crypto_alg *alg, unsigned int dlen, int ret)
+{}
+static inline void crypto_stats_skcipher_encrypt(unsigned int cryptlen, int ret, struct crypto_alg *alg)
+{}
+static inline void crypto_stats_skcipher_decrypt(unsigned int cryptlen, int ret, struct crypto_alg *alg)
+{}
+#endif
/*
* A helper struct for waiting for completion of async crypto ops
*/
@@ -800,14 +905,14 @@ static inline struct crypto_ablkcipher *__crypto_ablkcipher_cast(
static inline u32 crypto_skcipher_type(u32 type)
{
- type &= ~(CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_GENIV);
+ type &= ~CRYPTO_ALG_TYPE_MASK;
type |= CRYPTO_ALG_TYPE_BLKCIPHER;
return type;
}
static inline u32 crypto_skcipher_mask(u32 mask)
{
- mask &= ~(CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_GENIV);
+ mask &= ~CRYPTO_ALG_TYPE_MASK;
mask |= CRYPTO_ALG_TYPE_BLKCIPHER_MASK;
return mask;
}
@@ -973,38 +1078,6 @@ static inline struct crypto_ablkcipher *crypto_ablkcipher_reqtfm(
return __crypto_ablkcipher_cast(req->base.tfm);
}
-static inline void crypto_stat_ablkcipher_encrypt(struct ablkcipher_request *req,
- int ret)
-{
-#ifdef CONFIG_CRYPTO_STATS
- struct ablkcipher_tfm *crt =
- crypto_ablkcipher_crt(crypto_ablkcipher_reqtfm(req));
-
- if (ret && ret != -EINPROGRESS && ret != -EBUSY) {
- atomic_inc(&crt->base->base.__crt_alg->cipher_err_cnt);
- } else {
- atomic_inc(&crt->base->base.__crt_alg->encrypt_cnt);
- atomic64_add(req->nbytes, &crt->base->base.__crt_alg->encrypt_tlen);
- }
-#endif
-}
-
-static inline void crypto_stat_ablkcipher_decrypt(struct ablkcipher_request *req,
- int ret)
-{
-#ifdef CONFIG_CRYPTO_STATS
- struct ablkcipher_tfm *crt =
- crypto_ablkcipher_crt(crypto_ablkcipher_reqtfm(req));
-
- if (ret && ret != -EINPROGRESS && ret != -EBUSY) {
- atomic_inc(&crt->base->base.__crt_alg->cipher_err_cnt);
- } else {
- atomic_inc(&crt->base->base.__crt_alg->decrypt_cnt);
- atomic64_add(req->nbytes, &crt->base->base.__crt_alg->decrypt_tlen);
- }
-#endif
-}
-
/**
* crypto_ablkcipher_encrypt() - encrypt plaintext
* @req: reference to the ablkcipher_request handle that holds all information
@@ -1020,10 +1093,13 @@ static inline int crypto_ablkcipher_encrypt(struct ablkcipher_request *req)
{
struct ablkcipher_tfm *crt =
crypto_ablkcipher_crt(crypto_ablkcipher_reqtfm(req));
+ struct crypto_alg *alg = crt->base->base.__crt_alg;
+ unsigned int nbytes = req->nbytes;
int ret;
+ crypto_stats_get(alg);
ret = crt->encrypt(req);
- crypto_stat_ablkcipher_encrypt(req, ret);
+ crypto_stats_ablkcipher_encrypt(nbytes, ret, alg);
return ret;
}
@@ -1042,10 +1118,13 @@ static inline int crypto_ablkcipher_decrypt(struct ablkcipher_request *req)
{
struct ablkcipher_tfm *crt =
crypto_ablkcipher_crt(crypto_ablkcipher_reqtfm(req));
+ struct crypto_alg *alg = crt->base->base.__crt_alg;
+ unsigned int nbytes = req->nbytes;
int ret;
+ crypto_stats_get(alg);
ret = crt->decrypt(req);
- crypto_stat_ablkcipher_decrypt(req, ret);
+ crypto_stats_ablkcipher_decrypt(nbytes, ret, alg);
return ret;
}
diff --git a/include/linux/dax.h b/include/linux/dax.h
index 450b28db9533..0dd316a74a29 100644
--- a/include/linux/dax.h
+++ b/include/linux/dax.h
@@ -7,6 +7,8 @@
#include <linux/radix-tree.h>
#include <asm/pgtable.h>
+typedef unsigned long dax_entry_t;
+
struct iomap_ops;
struct dax_device;
struct dax_operations {
@@ -88,8 +90,8 @@ int dax_writeback_mapping_range(struct address_space *mapping,
struct block_device *bdev, struct writeback_control *wbc);
struct page *dax_layout_busy_page(struct address_space *mapping);
-bool dax_lock_mapping_entry(struct page *page);
-void dax_unlock_mapping_entry(struct page *page);
+dax_entry_t dax_lock_page(struct page *page);
+void dax_unlock_page(struct page *page, dax_entry_t cookie);
#else
static inline bool bdev_dax_supported(struct block_device *bdev,
int blocksize)
@@ -122,14 +124,14 @@ static inline int dax_writeback_mapping_range(struct address_space *mapping,
return -EOPNOTSUPP;
}
-static inline bool dax_lock_mapping_entry(struct page *page)
+static inline dax_entry_t dax_lock_page(struct page *page)
{
if (IS_DAX(page->mapping->host))
- return true;
- return false;
+ return ~0UL;
+ return 0;
}
-static inline void dax_unlock_mapping_entry(struct page *page)
+static inline void dax_unlock_page(struct page *page, dax_entry_t cookie)
{
}
#endif
diff --git a/include/linux/dell-led.h b/include/linux/dell-led.h
deleted file mode 100644
index 92521471517f..000000000000
--- a/include/linux/dell-led.h
+++ /dev/null
@@ -1,7 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __DELL_LED_H__
-#define __DELL_LED_H__
-
-int dell_micmute_led_set(int on);
-
-#endif
diff --git a/include/linux/devfreq.h b/include/linux/devfreq.h
index e4963b0f45da..fbffa74bfc1b 100644
--- a/include/linux/devfreq.h
+++ b/include/linux/devfreq.h
@@ -131,6 +131,9 @@ struct devfreq_dev_profile {
* @scaling_min_freq: Limit minimum frequency requested by OPP interface
* @scaling_max_freq: Limit maximum frequency requested by OPP interface
* @stop_polling: devfreq polling status of a device.
+ * @suspend_freq: frequency of a device set during suspend phase.
+ * @resume_freq: frequency of a device set in resume phase.
+ * @suspend_count: suspend requests counter for a device.
* @total_trans: Number of devfreq transitions
* @trans_table: Statistics of devfreq transitions
* @time_in_state: Statistics of devfreq states
@@ -167,6 +170,10 @@ struct devfreq {
unsigned long scaling_max_freq;
bool stop_polling;
+ unsigned long suspend_freq;
+ unsigned long resume_freq;
+ atomic_t suspend_count;
+
/* information for device frequency transition */
unsigned int total_trans;
unsigned int *trans_table;
@@ -198,6 +205,9 @@ extern void devm_devfreq_remove_device(struct device *dev,
extern int devfreq_suspend_device(struct devfreq *devfreq);
extern int devfreq_resume_device(struct devfreq *devfreq);
+extern void devfreq_suspend(void);
+extern void devfreq_resume(void);
+
/**
* update_devfreq() - Reevaluate the device and configure frequency
* @devfreq: the devfreq device
@@ -324,6 +334,9 @@ static inline int devfreq_resume_device(struct devfreq *devfreq)
return 0;
}
+static inline void devfreq_suspend(void) {}
+static inline void devfreq_resume(void) {}
+
static inline struct dev_pm_opp *devfreq_recommended_opp(struct device *dev,
unsigned long *freq, u32 flags)
{
diff --git a/include/linux/dma-debug.h b/include/linux/dma-debug.h
index 30213adbb6b9..2ad5c363d7d5 100644
--- a/include/linux/dma-debug.h
+++ b/include/linux/dma-debug.h
@@ -30,8 +30,6 @@ struct bus_type;
extern void dma_debug_add_bus(struct bus_type *bus);
-extern int dma_debug_resize_entries(u32 num_entries);
-
extern void debug_dma_map_single(struct device *dev, const void *addr,
unsigned long len);
@@ -72,17 +70,6 @@ extern void debug_dma_sync_single_for_device(struct device *dev,
dma_addr_t dma_handle,
size_t size, int direction);
-extern void debug_dma_sync_single_range_for_cpu(struct device *dev,
- dma_addr_t dma_handle,
- unsigned long offset,
- size_t size,
- int direction);
-
-extern void debug_dma_sync_single_range_for_device(struct device *dev,
- dma_addr_t dma_handle,
- unsigned long offset,
- size_t size, int direction);
-
extern void debug_dma_sync_sg_for_cpu(struct device *dev,
struct scatterlist *sg,
int nelems, int direction);
@@ -101,11 +88,6 @@ static inline void dma_debug_add_bus(struct bus_type *bus)
{
}
-static inline int dma_debug_resize_entries(u32 num_entries)
-{
- return 0;
-}
-
static inline void debug_dma_map_single(struct device *dev, const void *addr,
unsigned long len)
{
@@ -174,22 +156,6 @@ static inline void debug_dma_sync_single_for_device(struct device *dev,
{
}
-static inline void debug_dma_sync_single_range_for_cpu(struct device *dev,
- dma_addr_t dma_handle,
- unsigned long offset,
- size_t size,
- int direction)
-{
-}
-
-static inline void debug_dma_sync_single_range_for_device(struct device *dev,
- dma_addr_t dma_handle,
- unsigned long offset,
- size_t size,
- int direction)
-{
-}
-
static inline void debug_dma_sync_sg_for_cpu(struct device *dev,
struct scatterlist *sg,
int nelems, int direction)
diff --git a/include/linux/dma-direct.h b/include/linux/dma-direct.h
index bd73e7a91410..b7338702592a 100644
--- a/include/linux/dma-direct.h
+++ b/include/linux/dma-direct.h
@@ -5,8 +5,6 @@
#include <linux/dma-mapping.h>
#include <linux/mem_encrypt.h>
-#define DIRECT_MAPPING_ERROR 0
-
#ifdef CONFIG_ARCH_HAS_PHYS_TO_DMA
#include <asm/dma-direct.h>
#else
@@ -50,14 +48,6 @@ static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
return __sme_clr(__dma_to_phys(dev, daddr));
}
-#ifdef CONFIG_ARCH_HAS_DMA_MARK_CLEAN
-void dma_mark_clean(void *addr, size_t size);
-#else
-static inline void dma_mark_clean(void *addr, size_t size)
-{
-}
-#endif /* CONFIG_ARCH_HAS_DMA_MARK_CLEAN */
-
u64 dma_direct_get_required_mask(struct device *dev);
void *dma_direct_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
gfp_t gfp, unsigned long attrs);
@@ -67,11 +57,8 @@ void *dma_direct_alloc_pages(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs);
void dma_direct_free_pages(struct device *dev, size_t size, void *cpu_addr,
dma_addr_t dma_addr, unsigned long attrs);
-dma_addr_t dma_direct_map_page(struct device *dev, struct page *page,
- unsigned long offset, size_t size, enum dma_data_direction dir,
- unsigned long attrs);
-int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, int nents,
- enum dma_data_direction dir, unsigned long attrs);
+struct page *__dma_direct_alloc_pages(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs);
+void __dma_direct_free_pages(struct device *dev, size_t size, struct page *page);
int dma_direct_supported(struct device *dev, u64 mask);
-int dma_direct_mapping_error(struct device *dev, dma_addr_t dma_addr);
#endif /* _LINUX_DMA_DIRECT_H */
diff --git a/include/linux/dma-fence.h b/include/linux/dma-fence.h
index 02dba8cd033d..999e4b104410 100644
--- a/include/linux/dma-fence.h
+++ b/include/linux/dma-fence.h
@@ -541,6 +541,7 @@ static inline signed long dma_fence_wait(struct dma_fence *fence, bool intr)
return ret < 0 ? ret : 0;
}
+struct dma_fence *dma_fence_get_stub(void);
u64 dma_fence_context_alloc(unsigned num);
#define DMA_FENCE_TRACE(f, fmt, args...) \
diff --git a/include/linux/dma-iommu.h b/include/linux/dma-iommu.h
index e8ca5e654277..e760dc5d1fa8 100644
--- a/include/linux/dma-iommu.h
+++ b/include/linux/dma-iommu.h
@@ -69,7 +69,6 @@ dma_addr_t iommu_dma_map_resource(struct device *dev, phys_addr_t phys,
size_t size, enum dma_data_direction dir, unsigned long attrs);
void iommu_dma_unmap_resource(struct device *dev, dma_addr_t handle,
size_t size, enum dma_data_direction dir, unsigned long attrs);
-int iommu_dma_mapping_error(struct device *dev, dma_addr_t dma_addr);
/* The DMA API isn't _quite_ the whole story, though... */
void iommu_dma_map_msi_msg(int irq, struct msi_msg *msg);
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
index 15bd41447025..ba521d5506c9 100644
--- a/include/linux/dma-mapping.h
+++ b/include/linux/dma-mapping.h
@@ -128,13 +128,14 @@ struct dma_map_ops {
enum dma_data_direction dir);
void (*cache_sync)(struct device *dev, void *vaddr, size_t size,
enum dma_data_direction direction);
- int (*mapping_error)(struct device *dev, dma_addr_t dma_addr);
int (*dma_supported)(struct device *dev, u64 mask);
u64 (*get_required_mask)(struct device *dev);
};
-extern const struct dma_map_ops dma_direct_ops;
+#define DMA_MAPPING_ERROR (~(dma_addr_t)0)
+
extern const struct dma_map_ops dma_virt_ops;
+extern const struct dma_map_ops dma_dummy_ops;
#define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
@@ -220,6 +221,69 @@ static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
}
#endif
+static inline bool dma_is_direct(const struct dma_map_ops *ops)
+{
+ return likely(!ops);
+}
+
+/*
+ * All the dma_direct_* declarations are here just for the indirect call bypass,
+ * and must not be used directly drivers!
+ */
+dma_addr_t dma_direct_map_page(struct device *dev, struct page *page,
+ unsigned long offset, size_t size, enum dma_data_direction dir,
+ unsigned long attrs);
+int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, int nents,
+ enum dma_data_direction dir, unsigned long attrs);
+
+#if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \
+ defined(CONFIG_SWIOTLB)
+void dma_direct_sync_single_for_device(struct device *dev,
+ dma_addr_t addr, size_t size, enum dma_data_direction dir);
+void dma_direct_sync_sg_for_device(struct device *dev,
+ struct scatterlist *sgl, int nents, enum dma_data_direction dir);
+#else
+static inline void dma_direct_sync_single_for_device(struct device *dev,
+ dma_addr_t addr, size_t size, enum dma_data_direction dir)
+{
+}
+static inline void dma_direct_sync_sg_for_device(struct device *dev,
+ struct scatterlist *sgl, int nents, enum dma_data_direction dir)
+{
+}
+#endif
+
+#if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \
+ defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL) || \
+ defined(CONFIG_SWIOTLB)
+void dma_direct_unmap_page(struct device *dev, dma_addr_t addr,
+ size_t size, enum dma_data_direction dir, unsigned long attrs);
+void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sgl,
+ int nents, enum dma_data_direction dir, unsigned long attrs);
+void dma_direct_sync_single_for_cpu(struct device *dev,
+ dma_addr_t addr, size_t size, enum dma_data_direction dir);
+void dma_direct_sync_sg_for_cpu(struct device *dev,
+ struct scatterlist *sgl, int nents, enum dma_data_direction dir);
+#else
+static inline void dma_direct_unmap_page(struct device *dev, dma_addr_t addr,
+ size_t size, enum dma_data_direction dir, unsigned long attrs)
+{
+}
+static inline void dma_direct_unmap_sg(struct device *dev,
+ struct scatterlist *sgl, int nents, enum dma_data_direction dir,
+ unsigned long attrs)
+{
+}
+static inline void dma_direct_sync_single_for_cpu(struct device *dev,
+ dma_addr_t addr, size_t size, enum dma_data_direction dir)
+{
+}
+static inline void dma_direct_sync_sg_for_cpu(struct device *dev,
+ struct scatterlist *sgl, int nents, enum dma_data_direction dir)
+{
+}
+#endif
+
static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr,
size_t size,
enum dma_data_direction dir,
@@ -230,9 +294,12 @@ static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr,
BUG_ON(!valid_dma_direction(dir));
debug_dma_map_single(dev, ptr, size);
- addr = ops->map_page(dev, virt_to_page(ptr),
- offset_in_page(ptr), size,
- dir, attrs);
+ if (dma_is_direct(ops))
+ addr = dma_direct_map_page(dev, virt_to_page(ptr),
+ offset_in_page(ptr), size, dir, attrs);
+ else
+ addr = ops->map_page(dev, virt_to_page(ptr),
+ offset_in_page(ptr), size, dir, attrs);
debug_dma_map_page(dev, virt_to_page(ptr),
offset_in_page(ptr), size,
dir, addr, true);
@@ -247,11 +314,19 @@ static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t addr,
const struct dma_map_ops *ops = get_dma_ops(dev);
BUG_ON(!valid_dma_direction(dir));
- if (ops->unmap_page)
+ if (dma_is_direct(ops))
+ dma_direct_unmap_page(dev, addr, size, dir, attrs);
+ else if (ops->unmap_page)
ops->unmap_page(dev, addr, size, dir, attrs);
debug_dma_unmap_page(dev, addr, size, dir, true);
}
+static inline void dma_unmap_page_attrs(struct device *dev, dma_addr_t addr,
+ size_t size, enum dma_data_direction dir, unsigned long attrs)
+{
+ return dma_unmap_single_attrs(dev, addr, size, dir, attrs);
+}
+
/*
* dma_maps_sg_attrs returns 0 on error and > 0 on success.
* It should never return a value < 0.
@@ -264,7 +339,10 @@ static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
int ents;
BUG_ON(!valid_dma_direction(dir));
- ents = ops->map_sg(dev, sg, nents, dir, attrs);
+ if (dma_is_direct(ops))
+ ents = dma_direct_map_sg(dev, sg, nents, dir, attrs);
+ else
+ ents = ops->map_sg(dev, sg, nents, dir, attrs);
BUG_ON(ents < 0);
debug_dma_map_sg(dev, sg, nents, ents, dir);
@@ -279,7 +357,9 @@ static inline void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg
BUG_ON(!valid_dma_direction(dir));
debug_dma_unmap_sg(dev, sg, nents, dir);
- if (ops->unmap_sg)
+ if (dma_is_direct(ops))
+ dma_direct_unmap_sg(dev, sg, nents, dir, attrs);
+ else if (ops->unmap_sg)
ops->unmap_sg(dev, sg, nents, dir, attrs);
}
@@ -293,25 +373,15 @@ static inline dma_addr_t dma_map_page_attrs(struct device *dev,
dma_addr_t addr;
BUG_ON(!valid_dma_direction(dir));
- addr = ops->map_page(dev, page, offset, size, dir, attrs);
+ if (dma_is_direct(ops))
+ addr = dma_direct_map_page(dev, page, offset, size, dir, attrs);
+ else
+ addr = ops->map_page(dev, page, offset, size, dir, attrs);
debug_dma_map_page(dev, page, offset, size, dir, addr, false);
return addr;
}
-static inline void dma_unmap_page_attrs(struct device *dev,
- dma_addr_t addr, size_t size,
- enum dma_data_direction dir,
- unsigned long attrs)
-{
- const struct dma_map_ops *ops = get_dma_ops(dev);
-
- BUG_ON(!valid_dma_direction(dir));
- if (ops->unmap_page)
- ops->unmap_page(dev, addr, size, dir, attrs);
- debug_dma_unmap_page(dev, addr, size, dir, false);
-}
-
static inline dma_addr_t dma_map_resource(struct device *dev,
phys_addr_t phys_addr,
size_t size,
@@ -327,7 +397,7 @@ static inline dma_addr_t dma_map_resource(struct device *dev,
BUG_ON(pfn_valid(PHYS_PFN(phys_addr)));
addr = phys_addr;
- if (ops->map_resource)
+ if (ops && ops->map_resource)
addr = ops->map_resource(dev, phys_addr, size, dir, attrs);
debug_dma_map_resource(dev, phys_addr, size, dir, addr);
@@ -342,7 +412,7 @@ static inline void dma_unmap_resource(struct device *dev, dma_addr_t addr,
const struct dma_map_ops *ops = get_dma_ops(dev);
BUG_ON(!valid_dma_direction(dir));
- if (ops->unmap_resource)
+ if (ops && ops->unmap_resource)
ops->unmap_resource(dev, addr, size, dir, attrs);
debug_dma_unmap_resource(dev, addr, size, dir);
}
@@ -354,11 +424,20 @@ static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr,
const struct dma_map_ops *ops = get_dma_ops(dev);
BUG_ON(!valid_dma_direction(dir));
- if (ops->sync_single_for_cpu)
+ if (dma_is_direct(ops))
+ dma_direct_sync_single_for_cpu(dev, addr, size, dir);
+ else if (ops->sync_single_for_cpu)
ops->sync_single_for_cpu(dev, addr, size, dir);
debug_dma_sync_single_for_cpu(dev, addr, size, dir);
}
+static inline void dma_sync_single_range_for_cpu(struct device *dev,
+ dma_addr_t addr, unsigned long offset, size_t size,
+ enum dma_data_direction dir)
+{
+ return dma_sync_single_for_cpu(dev, addr + offset, size, dir);
+}
+
static inline void dma_sync_single_for_device(struct device *dev,
dma_addr_t addr, size_t size,
enum dma_data_direction dir)
@@ -366,37 +445,18 @@ static inline void dma_sync_single_for_device(struct device *dev,
const struct dma_map_ops *ops = get_dma_ops(dev);
BUG_ON(!valid_dma_direction(dir));
- if (ops->sync_single_for_device)
+ if (dma_is_direct(ops))
+ dma_direct_sync_single_for_device(dev, addr, size, dir);
+ else if (ops->sync_single_for_device)
ops->sync_single_for_device(dev, addr, size, dir);
debug_dma_sync_single_for_device(dev, addr, size, dir);
}
-static inline void dma_sync_single_range_for_cpu(struct device *dev,
- dma_addr_t addr,
- unsigned long offset,
- size_t size,
- enum dma_data_direction dir)
-{
- const struct dma_map_ops *ops = get_dma_ops(dev);
-
- BUG_ON(!valid_dma_direction(dir));
- if (ops->sync_single_for_cpu)
- ops->sync_single_for_cpu(dev, addr + offset, size, dir);
- debug_dma_sync_single_range_for_cpu(dev, addr, offset, size, dir);
-}
-
static inline void dma_sync_single_range_for_device(struct device *dev,
- dma_addr_t addr,
- unsigned long offset,
- size_t size,
- enum dma_data_direction dir)
+ dma_addr_t addr, unsigned long offset, size_t size,
+ enum dma_data_direction dir)
{
- const struct dma_map_ops *ops = get_dma_ops(dev);
-
- BUG_ON(!valid_dma_direction(dir));
- if (ops->sync_single_for_device)
- ops->sync_single_for_device(dev, addr + offset, size, dir);
- debug_dma_sync_single_range_for_device(dev, addr, offset, size, dir);
+ return dma_sync_single_for_device(dev, addr + offset, size, dir);
}
static inline void
@@ -406,7 +466,9 @@ dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
const struct dma_map_ops *ops = get_dma_ops(dev);
BUG_ON(!valid_dma_direction(dir));
- if (ops->sync_sg_for_cpu)
+ if (dma_is_direct(ops))
+ dma_direct_sync_sg_for_cpu(dev, sg, nelems, dir);
+ else if (ops->sync_sg_for_cpu)
ops->sync_sg_for_cpu(dev, sg, nelems, dir);
debug_dma_sync_sg_for_cpu(dev, sg, nelems, dir);
}
@@ -418,7 +480,9 @@ dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
const struct dma_map_ops *ops = get_dma_ops(dev);
BUG_ON(!valid_dma_direction(dir));
- if (ops->sync_sg_for_device)
+ if (dma_is_direct(ops))
+ dma_direct_sync_sg_for_device(dev, sg, nelems, dir);
+ else if (ops->sync_sg_for_device)
ops->sync_sg_for_device(dev, sg, nelems, dir);
debug_dma_sync_sg_for_device(dev, sg, nelems, dir);
@@ -431,16 +495,8 @@ dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
#define dma_map_page(d, p, o, s, r) dma_map_page_attrs(d, p, o, s, r, 0)
#define dma_unmap_page(d, a, s, r) dma_unmap_page_attrs(d, a, s, r, 0)
-static inline void
-dma_cache_sync(struct device *dev, void *vaddr, size_t size,
- enum dma_data_direction dir)
-{
- const struct dma_map_ops *ops = get_dma_ops(dev);
-
- BUG_ON(!valid_dma_direction(dir));
- if (ops->cache_sync)
- ops->cache_sync(dev, vaddr, size, dir);
-}
+void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
+ enum dma_data_direction dir);
extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
void *cpu_addr, dma_addr_t dma_addr, size_t size,
@@ -455,107 +511,29 @@ void *dma_common_pages_remap(struct page **pages, size_t size,
const void *caller);
void dma_common_free_remap(void *cpu_addr, size_t size, unsigned long vm_flags);
-/**
- * dma_mmap_attrs - map a coherent DMA allocation into user space
- * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
- * @vma: vm_area_struct describing requested user mapping
- * @cpu_addr: kernel CPU-view address returned from dma_alloc_attrs
- * @handle: device-view address returned from dma_alloc_attrs
- * @size: size of memory originally requested in dma_alloc_attrs
- * @attrs: attributes of mapping properties requested in dma_alloc_attrs
- *
- * Map a coherent DMA buffer previously allocated by dma_alloc_attrs
- * into user space. The coherent DMA buffer must not be freed by the
- * driver until the user space mapping has been released.
- */
-static inline int
-dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma, void *cpu_addr,
- dma_addr_t dma_addr, size_t size, unsigned long attrs)
-{
- const struct dma_map_ops *ops = get_dma_ops(dev);
- BUG_ON(!ops);
- if (ops->mmap)
- return ops->mmap(dev, vma, cpu_addr, dma_addr, size, attrs);
- return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size, attrs);
-}
+int __init dma_atomic_pool_init(gfp_t gfp, pgprot_t prot);
+bool dma_in_atomic_pool(void *start, size_t size);
+void *dma_alloc_from_pool(size_t size, struct page **ret_page, gfp_t flags);
+bool dma_free_from_pool(void *start, size_t size);
+int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
+ void *cpu_addr, dma_addr_t dma_addr, size_t size,
+ unsigned long attrs);
#define dma_mmap_coherent(d, v, c, h, s) dma_mmap_attrs(d, v, c, h, s, 0)
int
dma_common_get_sgtable(struct device *dev, struct sg_table *sgt, void *cpu_addr,
dma_addr_t dma_addr, size_t size, unsigned long attrs);
-static inline int
-dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt, void *cpu_addr,
- dma_addr_t dma_addr, size_t size,
- unsigned long attrs)
-{
- const struct dma_map_ops *ops = get_dma_ops(dev);
- BUG_ON(!ops);
- if (ops->get_sgtable)
- return ops->get_sgtable(dev, sgt, cpu_addr, dma_addr, size,
- attrs);
- return dma_common_get_sgtable(dev, sgt, cpu_addr, dma_addr, size,
- attrs);
-}
-
+int dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt,
+ void *cpu_addr, dma_addr_t dma_addr, size_t size,
+ unsigned long attrs);
#define dma_get_sgtable(d, t, v, h, s) dma_get_sgtable_attrs(d, t, v, h, s, 0)
-#ifndef arch_dma_alloc_attrs
-#define arch_dma_alloc_attrs(dev) (true)
-#endif
-
-static inline void *dma_alloc_attrs(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t flag,
- unsigned long attrs)
-{
- const struct dma_map_ops *ops = get_dma_ops(dev);
- void *cpu_addr;
-
- BUG_ON(!ops);
- WARN_ON_ONCE(dev && !dev->coherent_dma_mask);
-
- if (dma_alloc_from_dev_coherent(dev, size, dma_handle, &cpu_addr))
- return cpu_addr;
-
- /* let the implementation decide on the zone to allocate from: */
- flag &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM);
-
- if (!arch_dma_alloc_attrs(&dev))
- return NULL;
- if (!ops->alloc)
- return NULL;
-
- cpu_addr = ops->alloc(dev, size, dma_handle, flag, attrs);
- debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr);
- return cpu_addr;
-}
-
-static inline void dma_free_attrs(struct device *dev, size_t size,
- void *cpu_addr, dma_addr_t dma_handle,
- unsigned long attrs)
-{
- const struct dma_map_ops *ops = get_dma_ops(dev);
-
- BUG_ON(!ops);
-
- if (dma_release_from_dev_coherent(dev, get_order(size), cpu_addr))
- return;
- /*
- * On non-coherent platforms which implement DMA-coherent buffers via
- * non-cacheable remaps, ops->free() may call vunmap(). Thus getting
- * this far in IRQ context is a) at risk of a BUG_ON() or trying to
- * sleep on some machines, and b) an indication that the driver is
- * probably misusing the coherent API anyway.
- */
- WARN_ON(irqs_disabled());
-
- if (!ops->free || !cpu_addr)
- return;
-
- debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
- ops->free(dev, size, cpu_addr, dma_handle, attrs);
-}
+void *dma_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle,
+ gfp_t flag, unsigned long attrs);
+void dma_free_attrs(struct device *dev, size_t size, void *cpu_addr,
+ dma_addr_t dma_handle, unsigned long attrs);
static inline void *dma_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t gfp)
@@ -573,43 +551,16 @@ static inline void dma_free_coherent(struct device *dev, size_t size,
static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
{
- const struct dma_map_ops *ops = get_dma_ops(dev);
-
debug_dma_mapping_error(dev, dma_addr);
- if (ops->mapping_error)
- return ops->mapping_error(dev, dma_addr);
- return 0;
-}
-
-static inline void dma_check_mask(struct device *dev, u64 mask)
-{
- if (sme_active() && (mask < (((u64)sme_get_me_mask() << 1) - 1)))
- dev_warn(dev, "SME is active, device will require DMA bounce buffers\n");
-}
-
-static inline int dma_supported(struct device *dev, u64 mask)
-{
- const struct dma_map_ops *ops = get_dma_ops(dev);
-
- if (!ops)
- return 0;
- if (!ops->dma_supported)
- return 1;
- return ops->dma_supported(dev, mask);
-}
-
-#ifndef HAVE_ARCH_DMA_SET_MASK
-static inline int dma_set_mask(struct device *dev, u64 mask)
-{
- if (!dev->dma_mask || !dma_supported(dev, mask))
- return -EIO;
- dma_check_mask(dev, mask);
-
- *dev->dma_mask = mask;
+ if (dma_addr == DMA_MAPPING_ERROR)
+ return -ENOMEM;
return 0;
}
-#endif
+
+int dma_supported(struct device *dev, u64 mask);
+int dma_set_mask(struct device *dev, u64 mask);
+int dma_set_coherent_mask(struct device *dev, u64 mask);
static inline u64 dma_get_mask(struct device *dev)
{
@@ -618,21 +569,6 @@ static inline u64 dma_get_mask(struct device *dev)
return DMA_BIT_MASK(32);
}
-#ifdef CONFIG_ARCH_HAS_DMA_SET_COHERENT_MASK
-int dma_set_coherent_mask(struct device *dev, u64 mask);
-#else
-static inline int dma_set_coherent_mask(struct device *dev, u64 mask)
-{
- if (!dma_supported(dev, mask))
- return -EIO;
-
- dma_check_mask(dev, mask);
-
- dev->coherent_dma_mask = mask;
- return 0;
-}
-#endif
-
/*
* Set both the DMA mask and the coherent DMA mask to the same thing.
* Note that we don't check the return value from dma_set_coherent_mask()
@@ -676,8 +612,7 @@ static inline unsigned int dma_get_max_seg_size(struct device *dev)
return SZ_64K;
}
-static inline unsigned int dma_set_max_seg_size(struct device *dev,
- unsigned int size)
+static inline int dma_set_max_seg_size(struct device *dev, unsigned int size)
{
if (dev->dma_parms) {
dev->dma_parms->max_segment_size = size;
@@ -709,12 +644,13 @@ static inline unsigned long dma_max_pfn(struct device *dev)
}
#endif
+/*
+ * Please always use dma_alloc_coherent instead as it already zeroes the memory!
+ */
static inline void *dma_zalloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t flag)
{
- void *ret = dma_alloc_coherent(dev, size, dma_handle,
- flag | __GFP_ZERO);
- return ret;
+ return dma_alloc_coherent(dev, size, dma_handle, flag);
}
static inline int dma_get_cache_alignment(void)
@@ -796,7 +732,7 @@ static inline void dmam_release_declared_memory(struct device *dev)
static inline void *dma_alloc_wc(struct device *dev, size_t size,
dma_addr_t *dma_addr, gfp_t gfp)
{
- unsigned long attrs = DMA_ATTR_NO_WARN;
+ unsigned long attrs = DMA_ATTR_WRITE_COMBINE;
if (gfp & __GFP_NOWARN)
attrs |= DMA_ATTR_NO_WARN;
diff --git a/include/linux/dma-noncoherent.h b/include/linux/dma-noncoherent.h
index 9051b055beec..69b36ed31a99 100644
--- a/include/linux/dma-noncoherent.h
+++ b/include/linux/dma-noncoherent.h
@@ -38,7 +38,10 @@ pgprot_t arch_dma_mmap_pgprot(struct device *dev, pgprot_t prot,
void arch_dma_cache_sync(struct device *dev, void *vaddr, size_t size,
enum dma_data_direction direction);
#else
-#define arch_dma_cache_sync NULL
+static inline void arch_dma_cache_sync(struct device *dev, void *vaddr,
+ size_t size, enum dma_data_direction direction)
+{
+}
#endif /* CONFIG_DMA_NONCOHERENT_CACHE_SYNC */
#ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE
@@ -69,4 +72,6 @@ static inline void arch_sync_dma_for_cpu_all(struct device *dev)
}
#endif /* CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL */
+void arch_dma_prep_coherent(struct page *page, size_t size);
+
#endif /* _LINUX_DMA_NONCOHERENT_H */
diff --git a/include/linux/edac.h b/include/linux/edac.h
index 1d0c9ea8825d..342dabda9c7e 100644
--- a/include/linux/edac.h
+++ b/include/linux/edac.h
@@ -669,10 +669,4 @@ struct mem_ctl_info {
bool fake_inject_ue;
u16 fake_inject_count;
};
-
-/*
- * Maximum number of memory controllers in the coherent fabric.
- */
-#define EDAC_MAX_MCS 2 * MAX_NUMNODES
-
#endif
diff --git a/include/linux/efi.h b/include/linux/efi.h
index 845174e113ce..becd5d76a207 100644
--- a/include/linux/efi.h
+++ b/include/linux/efi.h
@@ -1000,13 +1000,11 @@ extern void efi_memmap_walk (efi_freemem_callback_t callback, void *arg);
extern void efi_gettimeofday (struct timespec64 *ts);
extern void efi_enter_virtual_mode (void); /* switch EFI to virtual mode, if possible */
#ifdef CONFIG_X86
-extern void efi_free_boot_services(void);
extern efi_status_t efi_query_variable_store(u32 attributes,
unsigned long size,
bool nonblocking);
extern void efi_find_mirror(void);
#else
-static inline void efi_free_boot_services(void) {}
static inline efi_status_t efi_query_variable_store(u32 attributes,
unsigned long size,
@@ -1046,7 +1044,6 @@ extern void efi_mem_reserve(phys_addr_t addr, u64 size);
extern int efi_mem_reserve_persistent(phys_addr_t addr, u64 size);
extern void efi_initialize_iomem_resources(struct resource *code_resource,
struct resource *data_resource, struct resource *bss_resource);
-extern void efi_reserve_boot_services(void);
extern int efi_get_fdt_params(struct efi_fdt_params *params);
extern struct kobject *efi_kobj;
@@ -1167,6 +1164,8 @@ static inline bool efi_enabled(int feature)
extern void efi_reboot(enum reboot_mode reboot_mode, const char *__unused);
extern bool efi_is_table_address(unsigned long phys_addr);
+
+extern int efi_apply_persistent_mem_reservations(void);
#else
static inline bool efi_enabled(int feature)
{
@@ -1185,6 +1184,11 @@ static inline bool efi_is_table_address(unsigned long phys_addr)
{
return false;
}
+
+static inline int efi_apply_persistent_mem_reservations(void)
+{
+ return 0;
+}
#endif
extern int efi_status_to_err(efi_status_t status);
@@ -1708,9 +1712,19 @@ extern struct efi_runtime_work efi_rts_work;
extern struct workqueue_struct *efi_rts_wq;
struct linux_efi_memreserve {
- phys_addr_t next;
- phys_addr_t base;
- phys_addr_t size;
+ int size; // allocated size of the array
+ atomic_t count; // number of entries used
+ phys_addr_t next; // pa of next struct instance
+ struct {
+ phys_addr_t base;
+ phys_addr_t size;
+ } entry[0];
};
+#define EFI_MEMRESERVE_SIZE(count) (sizeof(struct linux_efi_memreserve) + \
+ (count) * sizeof(((struct linux_efi_memreserve *)0)->entry[0]))
+
+#define EFI_MEMRESERVE_COUNT(size) (((size) - sizeof(struct linux_efi_memreserve)) \
+ / sizeof(((struct linux_efi_memreserve *)0)->entry[0]))
+
#endif /* _LINUX_EFI_H */
diff --git a/include/linux/elevator.h b/include/linux/elevator.h
index 015bb59c0331..2e9e2763bf47 100644
--- a/include/linux/elevator.h
+++ b/include/linux/elevator.h
@@ -23,74 +23,6 @@ enum elv_merge {
ELEVATOR_DISCARD_MERGE = 3,
};
-typedef enum elv_merge (elevator_merge_fn) (struct request_queue *, struct request **,
- struct bio *);
-
-typedef void (elevator_merge_req_fn) (struct request_queue *, struct request *, struct request *);
-
-typedef void (elevator_merged_fn) (struct request_queue *, struct request *, enum elv_merge);
-
-typedef int (elevator_allow_bio_merge_fn) (struct request_queue *,
- struct request *, struct bio *);
-
-typedef int (elevator_allow_rq_merge_fn) (struct request_queue *,
- struct request *, struct request *);
-
-typedef void (elevator_bio_merged_fn) (struct request_queue *,
- struct request *, struct bio *);
-
-typedef int (elevator_dispatch_fn) (struct request_queue *, int);
-
-typedef void (elevator_add_req_fn) (struct request_queue *, struct request *);
-typedef struct request *(elevator_request_list_fn) (struct request_queue *, struct request *);
-typedef void (elevator_completed_req_fn) (struct request_queue *, struct request *);
-typedef int (elevator_may_queue_fn) (struct request_queue *, unsigned int);
-
-typedef void (elevator_init_icq_fn) (struct io_cq *);
-typedef void (elevator_exit_icq_fn) (struct io_cq *);
-typedef int (elevator_set_req_fn) (struct request_queue *, struct request *,
- struct bio *, gfp_t);
-typedef void (elevator_put_req_fn) (struct request *);
-typedef void (elevator_activate_req_fn) (struct request_queue *, struct request *);
-typedef void (elevator_deactivate_req_fn) (struct request_queue *, struct request *);
-
-typedef int (elevator_init_fn) (struct request_queue *,
- struct elevator_type *e);
-typedef void (elevator_exit_fn) (struct elevator_queue *);
-typedef void (elevator_registered_fn) (struct request_queue *);
-
-struct elevator_ops
-{
- elevator_merge_fn *elevator_merge_fn;
- elevator_merged_fn *elevator_merged_fn;
- elevator_merge_req_fn *elevator_merge_req_fn;
- elevator_allow_bio_merge_fn *elevator_allow_bio_merge_fn;
- elevator_allow_rq_merge_fn *elevator_allow_rq_merge_fn;
- elevator_bio_merged_fn *elevator_bio_merged_fn;
-
- elevator_dispatch_fn *elevator_dispatch_fn;
- elevator_add_req_fn *elevator_add_req_fn;
- elevator_activate_req_fn *elevator_activate_req_fn;
- elevator_deactivate_req_fn *elevator_deactivate_req_fn;
-
- elevator_completed_req_fn *elevator_completed_req_fn;
-
- elevator_request_list_fn *elevator_former_req_fn;
- elevator_request_list_fn *elevator_latter_req_fn;
-
- elevator_init_icq_fn *elevator_init_icq_fn; /* see iocontext.h */
- elevator_exit_icq_fn *elevator_exit_icq_fn; /* ditto */
-
- elevator_set_req_fn *elevator_set_req_fn;
- elevator_put_req_fn *elevator_put_req_fn;
-
- elevator_may_queue_fn *elevator_may_queue_fn;
-
- elevator_init_fn *elevator_init_fn;
- elevator_exit_fn *elevator_exit_fn;
- elevator_registered_fn *elevator_registered_fn;
-};
-
struct blk_mq_alloc_data;
struct blk_mq_hw_ctx;
@@ -137,17 +69,14 @@ struct elevator_type
struct kmem_cache *icq_cache;
/* fields provided by elevator implementation */
- union {
- struct elevator_ops sq;
- struct elevator_mq_ops mq;
- } ops;
+ struct elevator_mq_ops ops;
+
size_t icq_size; /* see iocontext.h */
size_t icq_align; /* ditto */
struct elv_fs_entry *elevator_attrs;
char elevator_name[ELV_NAME_MAX];
const char *elevator_alias;
struct module *elevator_owner;
- bool uses_mq;
#ifdef CONFIG_BLK_DEBUG_FS
const struct blk_mq_debugfs_attr *queue_debugfs_attrs;
const struct blk_mq_debugfs_attr *hctx_debugfs_attrs;
@@ -175,40 +104,25 @@ struct elevator_queue
struct kobject kobj;
struct mutex sysfs_lock;
unsigned int registered:1;
- unsigned int uses_mq:1;
DECLARE_HASHTABLE(hash, ELV_HASH_BITS);
};
/*
* block elevator interface
*/
-extern void elv_dispatch_sort(struct request_queue *, struct request *);
-extern void elv_dispatch_add_tail(struct request_queue *, struct request *);
-extern void elv_add_request(struct request_queue *, struct request *, int);
-extern void __elv_add_request(struct request_queue *, struct request *, int);
extern enum elv_merge elv_merge(struct request_queue *, struct request **,
struct bio *);
extern void elv_merge_requests(struct request_queue *, struct request *,
struct request *);
extern void elv_merged_request(struct request_queue *, struct request *,
enum elv_merge);
-extern void elv_bio_merged(struct request_queue *q, struct request *,
- struct bio *);
extern bool elv_attempt_insert_merge(struct request_queue *, struct request *);
-extern void elv_requeue_request(struct request_queue *, struct request *);
extern struct request *elv_former_request(struct request_queue *, struct request *);
extern struct request *elv_latter_request(struct request_queue *, struct request *);
-extern int elv_may_queue(struct request_queue *, unsigned int);
-extern void elv_completed_request(struct request_queue *, struct request *);
-extern int elv_set_request(struct request_queue *q, struct request *rq,
- struct bio *bio, gfp_t gfp_mask);
-extern void elv_put_request(struct request_queue *, struct request *);
-extern void elv_drain_elevator(struct request_queue *);
/*
* io scheduler registration
*/
-extern void __init load_default_elevator_module(void);
extern int elv_register(struct elevator_type *);
extern void elv_unregister(struct elevator_type *);
@@ -260,9 +174,5 @@ enum {
#define rq_entry_fifo(ptr) list_entry((ptr), struct request, queuelist)
#define rq_fifo_clear(rq) list_del_init(&(rq)->queuelist)
-#else /* CONFIG_BLOCK */
-
-static inline void load_default_elevator_module(void) { }
-
#endif /* CONFIG_BLOCK */
#endif
diff --git a/include/linux/energy_model.h b/include/linux/energy_model.h
new file mode 100644
index 000000000000..aa027f7bcb3e
--- /dev/null
+++ b/include/linux/energy_model.h
@@ -0,0 +1,187 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_ENERGY_MODEL_H
+#define _LINUX_ENERGY_MODEL_H
+#include <linux/cpumask.h>
+#include <linux/jump_label.h>
+#include <linux/kobject.h>
+#include <linux/rcupdate.h>
+#include <linux/sched/cpufreq.h>
+#include <linux/sched/topology.h>
+#include <linux/types.h>
+
+#ifdef CONFIG_ENERGY_MODEL
+/**
+ * em_cap_state - Capacity state of a performance domain
+ * @frequency: The CPU frequency in KHz, for consistency with CPUFreq
+ * @power: The power consumed by 1 CPU at this level, in milli-watts
+ * @cost: The cost coefficient associated with this level, used during
+ * energy calculation. Equal to: power * max_frequency / frequency
+ */
+struct em_cap_state {
+ unsigned long frequency;
+ unsigned long power;
+ unsigned long cost;
+};
+
+/**
+ * em_perf_domain - Performance domain
+ * @table: List of capacity states, in ascending order
+ * @nr_cap_states: Number of capacity states
+ * @cpus: Cpumask covering the CPUs of the domain
+ *
+ * A "performance domain" represents a group of CPUs whose performance is
+ * scaled together. All CPUs of a performance domain must have the same
+ * micro-architecture. Performance domains often have a 1-to-1 mapping with
+ * CPUFreq policies.
+ */
+struct em_perf_domain {
+ struct em_cap_state *table;
+ int nr_cap_states;
+ unsigned long cpus[0];
+};
+
+#define EM_CPU_MAX_POWER 0xFFFF
+
+struct em_data_callback {
+ /**
+ * active_power() - Provide power at the next capacity state of a CPU
+ * @power : Active power at the capacity state in mW (modified)
+ * @freq : Frequency at the capacity state in kHz (modified)
+ * @cpu : CPU for which we do this operation
+ *
+ * active_power() must find the lowest capacity state of 'cpu' above
+ * 'freq' and update 'power' and 'freq' to the matching active power
+ * and frequency.
+ *
+ * The power is the one of a single CPU in the domain, expressed in
+ * milli-watts. It is expected to fit in the [0, EM_CPU_MAX_POWER]
+ * range.
+ *
+ * Return 0 on success.
+ */
+ int (*active_power)(unsigned long *power, unsigned long *freq, int cpu);
+};
+#define EM_DATA_CB(_active_power_cb) { .active_power = &_active_power_cb }
+
+struct em_perf_domain *em_cpu_get(int cpu);
+int em_register_perf_domain(cpumask_t *span, unsigned int nr_states,
+ struct em_data_callback *cb);
+
+/**
+ * em_pd_energy() - Estimates the energy consumed by the CPUs of a perf. domain
+ * @pd : performance domain for which energy has to be estimated
+ * @max_util : highest utilization among CPUs of the domain
+ * @sum_util : sum of the utilization of all CPUs in the domain
+ *
+ * Return: the sum of the energy consumed by the CPUs of the domain assuming
+ * a capacity state satisfying the max utilization of the domain.
+ */
+static inline unsigned long em_pd_energy(struct em_perf_domain *pd,
+ unsigned long max_util, unsigned long sum_util)
+{
+ unsigned long freq, scale_cpu;
+ struct em_cap_state *cs;
+ int i, cpu;
+
+ /*
+ * In order to predict the capacity state, map the utilization of the
+ * most utilized CPU of the performance domain to a requested frequency,
+ * like schedutil.
+ */
+ cpu = cpumask_first(to_cpumask(pd->cpus));
+ scale_cpu = arch_scale_cpu_capacity(NULL, cpu);
+ cs = &pd->table[pd->nr_cap_states - 1];
+ freq = map_util_freq(max_util, cs->frequency, scale_cpu);
+
+ /*
+ * Find the lowest capacity state of the Energy Model above the
+ * requested frequency.
+ */
+ for (i = 0; i < pd->nr_cap_states; i++) {
+ cs = &pd->table[i];
+ if (cs->frequency >= freq)
+ break;
+ }
+
+ /*
+ * The capacity of a CPU in the domain at that capacity state (cs)
+ * can be computed as:
+ *
+ * cs->freq * scale_cpu
+ * cs->cap = -------------------- (1)
+ * cpu_max_freq
+ *
+ * So, ignoring the costs of idle states (which are not available in
+ * the EM), the energy consumed by this CPU at that capacity state is
+ * estimated as:
+ *
+ * cs->power * cpu_util
+ * cpu_nrg = -------------------- (2)
+ * cs->cap
+ *
+ * since 'cpu_util / cs->cap' represents its percentage of busy time.
+ *
+ * NOTE: Although the result of this computation actually is in
+ * units of power, it can be manipulated as an energy value
+ * over a scheduling period, since it is assumed to be
+ * constant during that interval.
+ *
+ * By injecting (1) in (2), 'cpu_nrg' can be re-expressed as a product
+ * of two terms:
+ *
+ * cs->power * cpu_max_freq cpu_util
+ * cpu_nrg = ------------------------ * --------- (3)
+ * cs->freq scale_cpu
+ *
+ * The first term is static, and is stored in the em_cap_state struct
+ * as 'cs->cost'.
+ *
+ * Since all CPUs of the domain have the same micro-architecture, they
+ * share the same 'cs->cost', and the same CPU capacity. Hence, the
+ * total energy of the domain (which is the simple sum of the energy of
+ * all of its CPUs) can be factorized as:
+ *
+ * cs->cost * \Sum cpu_util
+ * pd_nrg = ------------------------ (4)
+ * scale_cpu
+ */
+ return cs->cost * sum_util / scale_cpu;
+}
+
+/**
+ * em_pd_nr_cap_states() - Get the number of capacity states of a perf. domain
+ * @pd : performance domain for which this must be done
+ *
+ * Return: the number of capacity states in the performance domain table
+ */
+static inline int em_pd_nr_cap_states(struct em_perf_domain *pd)
+{
+ return pd->nr_cap_states;
+}
+
+#else
+struct em_perf_domain {};
+struct em_data_callback {};
+#define EM_DATA_CB(_active_power_cb) { }
+
+static inline int em_register_perf_domain(cpumask_t *span,
+ unsigned int nr_states, struct em_data_callback *cb)
+{
+ return -EINVAL;
+}
+static inline struct em_perf_domain *em_cpu_get(int cpu)
+{
+ return NULL;
+}
+static inline unsigned long em_pd_energy(struct em_perf_domain *pd,
+ unsigned long max_util, unsigned long sum_util)
+{
+ return 0;
+}
+static inline int em_pd_nr_cap_states(struct em_perf_domain *pd)
+{
+ return 0;
+}
+#endif
+
+#endif
diff --git a/include/linux/etherdevice.h b/include/linux/etherdevice.h
index 572e11bb8696..2c0af7b00715 100644
--- a/include/linux/etherdevice.h
+++ b/include/linux/etherdevice.h
@@ -32,6 +32,7 @@
struct device;
int eth_platform_get_mac_address(struct device *dev, u8 *mac_addr);
unsigned char *arch_get_platform_mac_address(void);
+int nvmem_get_mac_address(struct device *dev, void *addrbuf);
u32 eth_get_headlen(void *data, unsigned int max_len);
__be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev);
extern const struct header_ops eth_header_ops;
diff --git a/include/linux/fanotify.h b/include/linux/fanotify.h
index a5a60691e48b..9e2142795335 100644
--- a/include/linux/fanotify.h
+++ b/include/linux/fanotify.h
@@ -37,10 +37,11 @@
/* Events that user can request to be notified on */
#define FANOTIFY_EVENTS (FAN_ACCESS | FAN_MODIFY | \
- FAN_CLOSE | FAN_OPEN)
+ FAN_CLOSE | FAN_OPEN | FAN_OPEN_EXEC)
/* Events that require a permission response from user */
-#define FANOTIFY_PERM_EVENTS (FAN_OPEN_PERM | FAN_ACCESS_PERM)
+#define FANOTIFY_PERM_EVENTS (FAN_OPEN_PERM | FAN_ACCESS_PERM | \
+ FAN_OPEN_EXEC_PERM)
/* Extra flags that may be reported with event or control handling of events */
#define FANOTIFY_EVENT_FLAGS (FAN_EVENT_ON_CHILD | FAN_ONDIR)
diff --git a/include/linux/filter.h b/include/linux/filter.h
index de629b706d1d..8c8544b375eb 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -449,6 +449,13 @@ struct sock_reuseport;
offsetof(TYPE, MEMBER) ... offsetofend(TYPE, MEMBER) - 1
#define bpf_ctx_range_till(TYPE, MEMBER1, MEMBER2) \
offsetof(TYPE, MEMBER1) ... offsetofend(TYPE, MEMBER2) - 1
+#if BITS_PER_LONG == 64
+# define bpf_ctx_range_ptr(TYPE, MEMBER) \
+ offsetof(TYPE, MEMBER) ... offsetofend(TYPE, MEMBER) - 1
+#else
+# define bpf_ctx_range_ptr(TYPE, MEMBER) \
+ offsetof(TYPE, MEMBER) ... offsetof(TYPE, MEMBER) + 8 - 1
+#endif /* BITS_PER_LONG == 64 */
#define bpf_target_off(TYPE, MEMBER, SIZE, PTR_SIZE) \
({ \
@@ -668,24 +675,10 @@ static inline u32 bpf_ctx_off_adjust_machine(u32 size)
return size;
}
-static inline bool bpf_ctx_narrow_align_ok(u32 off, u32 size_access,
- u32 size_default)
-{
- size_default = bpf_ctx_off_adjust_machine(size_default);
- size_access = bpf_ctx_off_adjust_machine(size_access);
-
-#ifdef __LITTLE_ENDIAN
- return (off & (size_default - 1)) == 0;
-#else
- return (off & (size_default - 1)) + size_access == size_default;
-#endif
-}
-
static inline bool
bpf_ctx_narrow_access_ok(u32 off, u32 size, u32 size_default)
{
- return bpf_ctx_narrow_align_ok(off, size, size_default) &&
- size <= size_default && (size & (size - 1)) == 0;
+ return size <= size_default && (size & (size - 1)) == 0;
}
#define bpf_classic_proglen(fprog) (fprog->len * sizeof(fprog->filter[0]))
@@ -732,6 +725,13 @@ void bpf_prog_free(struct bpf_prog *fp);
bool bpf_opcode_in_insntable(u8 code);
+void bpf_prog_free_linfo(struct bpf_prog *prog);
+void bpf_prog_fill_jited_linfo(struct bpf_prog *prog,
+ const u32 *insn_to_jit_off);
+int bpf_prog_alloc_jited_linfo(struct bpf_prog *prog);
+void bpf_prog_free_jited_linfo(struct bpf_prog *prog);
+void bpf_prog_free_unused_jited_linfo(struct bpf_prog *prog);
+
struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags);
struct bpf_prog *bpf_prog_realloc(struct bpf_prog *fp_old, unsigned int size,
gfp_t gfp_extra_flags);
@@ -854,7 +854,7 @@ bpf_run_sk_reuseport(struct sock_reuseport *reuse, struct sock *sk,
extern int bpf_jit_enable;
extern int bpf_jit_harden;
extern int bpf_jit_kallsyms;
-extern int bpf_jit_limit;
+extern long bpf_jit_limit;
typedef void (*bpf_jit_fill_hole_t)(void *area, unsigned int size);
@@ -866,6 +866,10 @@ void bpf_jit_binary_free(struct bpf_binary_header *hdr);
void bpf_jit_free(struct bpf_prog *fp);
+int bpf_jit_get_func_addr(const struct bpf_prog *prog,
+ const struct bpf_insn *insn, bool extra_pass,
+ u64 *func_addr, bool *func_addr_fixed);
+
struct bpf_prog *bpf_jit_blind_constants(struct bpf_prog *fp);
void bpf_jit_prog_release_other(struct bpf_prog *fp, struct bpf_prog *fp_other);
diff --git a/include/linux/firmware/imx/sci.h b/include/linux/firmware/imx/sci.h
index 29ada609de03..ebc55098faee 100644
--- a/include/linux/firmware/imx/sci.h
+++ b/include/linux/firmware/imx/sci.h
@@ -14,4 +14,5 @@
#include <linux/firmware/imx/types.h>
#include <linux/firmware/imx/svc/misc.h>
+#include <linux/firmware/imx/svc/pm.h>
#endif /* _SC_SCI_H */
diff --git a/include/linux/firmware/imx/svc/pm.h b/include/linux/firmware/imx/svc/pm.h
new file mode 100644
index 000000000000..1f6975dd37b0
--- /dev/null
+++ b/include/linux/firmware/imx/svc/pm.h
@@ -0,0 +1,85 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright (C) 2016 Freescale Semiconductor, Inc.
+ * Copyright 2017-2018 NXP
+ *
+ * Header file containing the public API for the System Controller (SC)
+ * Power Management (PM) function. This includes functions for power state
+ * control, clock control, reset control, and wake-up event control.
+ *
+ * PM_SVC (SVC) Power Management Service
+ *
+ * Module for the Power Management (PM) service.
+ */
+
+#ifndef _SC_PM_API_H
+#define _SC_PM_API_H
+
+#include <linux/firmware/imx/sci.h>
+
+/*
+ * This type is used to indicate RPC PM function calls.
+ */
+enum imx_sc_pm_func {
+ IMX_SC_PM_FUNC_UNKNOWN = 0,
+ IMX_SC_PM_FUNC_SET_SYS_POWER_MODE = 19,
+ IMX_SC_PM_FUNC_SET_PARTITION_POWER_MODE = 1,
+ IMX_SC_PM_FUNC_GET_SYS_POWER_MODE = 2,
+ IMX_SC_PM_FUNC_SET_RESOURCE_POWER_MODE = 3,
+ IMX_SC_PM_FUNC_GET_RESOURCE_POWER_MODE = 4,
+ IMX_SC_PM_FUNC_REQ_LOW_POWER_MODE = 16,
+ IMX_SC_PM_FUNC_SET_CPU_RESUME_ADDR = 17,
+ IMX_SC_PM_FUNC_REQ_SYS_IF_POWER_MODE = 18,
+ IMX_SC_PM_FUNC_SET_CLOCK_RATE = 5,
+ IMX_SC_PM_FUNC_GET_CLOCK_RATE = 6,
+ IMX_SC_PM_FUNC_CLOCK_ENABLE = 7,
+ IMX_SC_PM_FUNC_SET_CLOCK_PARENT = 14,
+ IMX_SC_PM_FUNC_GET_CLOCK_PARENT = 15,
+ IMX_SC_PM_FUNC_RESET = 13,
+ IMX_SC_PM_FUNC_RESET_REASON = 10,
+ IMX_SC_PM_FUNC_BOOT = 8,
+ IMX_SC_PM_FUNC_REBOOT = 9,
+ IMX_SC_PM_FUNC_REBOOT_PARTITION = 12,
+ IMX_SC_PM_FUNC_CPU_START = 11,
+};
+
+/*
+ * Defines for ALL parameters
+ */
+#define IMX_SC_PM_CLK_ALL UINT8_MAX /* All clocks */
+
+/*
+ * Defines for SC PM Power Mode
+ */
+#define IMX_SC_PM_PW_MODE_OFF 0 /* Power off */
+#define IMX_SC_PM_PW_MODE_STBY 1 /* Power in standby */
+#define IMX_SC_PM_PW_MODE_LP 2 /* Power in low-power */
+#define IMX_SC_PM_PW_MODE_ON 3 /* Power on */
+
+/*
+ * Defines for SC PM CLK
+ */
+#define IMX_SC_PM_CLK_SLV_BUS 0 /* Slave bus clock */
+#define IMX_SC_PM_CLK_MST_BUS 1 /* Master bus clock */
+#define IMX_SC_PM_CLK_PER 2 /* Peripheral clock */
+#define IMX_SC_PM_CLK_PHY 3 /* Phy clock */
+#define IMX_SC_PM_CLK_MISC 4 /* Misc clock */
+#define IMX_SC_PM_CLK_MISC0 0 /* Misc 0 clock */
+#define IMX_SC_PM_CLK_MISC1 1 /* Misc 1 clock */
+#define IMX_SC_PM_CLK_MISC2 2 /* Misc 2 clock */
+#define IMX_SC_PM_CLK_MISC3 3 /* Misc 3 clock */
+#define IMX_SC_PM_CLK_MISC4 4 /* Misc 4 clock */
+#define IMX_SC_PM_CLK_CPU 2 /* CPU clock */
+#define IMX_SC_PM_CLK_PLL 4 /* PLL */
+#define IMX_SC_PM_CLK_BYPASS 4 /* Bypass clock */
+
+/*
+ * Defines for SC PM CLK Parent
+ */
+#define IMX_SC_PM_PARENT_XTAL 0 /* Parent is XTAL. */
+#define IMX_SC_PM_PARENT_PLL0 1 /* Parent is PLL0 */
+#define IMX_SC_PM_PARENT_PLL1 2 /* Parent is PLL1 or PLL0/2 */
+#define IMX_SC_PM_PARENT_PLL2 3 /* Parent in PLL2 or PLL0/4 */
+#define IMX_SC_PM_PARENT_BYPS 4 /* Parent is a bypass clock. */
+
+#endif /* _SC_PM_API_H */
diff --git a/include/linux/firmware/imx/types.h b/include/linux/firmware/imx/types.h
index 9cbf0c4a6069..80821100e85f 100644
--- a/include/linux/firmware/imx/types.h
+++ b/include/linux/firmware/imx/types.h
@@ -10,558 +10,6 @@
#define _SC_TYPES_H
/*
- * This type is used to indicate a resource. Resources include peripherals
- * and bus masters (but not memory regions). Note items from list should
- * never be changed or removed (only added to at the end of the list).
- */
-enum imx_sc_rsrc {
- IMX_SC_R_A53 = 0,
- IMX_SC_R_A53_0 = 1,
- IMX_SC_R_A53_1 = 2,
- IMX_SC_R_A53_2 = 3,
- IMX_SC_R_A53_3 = 4,
- IMX_SC_R_A72 = 5,
- IMX_SC_R_A72_0 = 6,
- IMX_SC_R_A72_1 = 7,
- IMX_SC_R_A72_2 = 8,
- IMX_SC_R_A72_3 = 9,
- IMX_SC_R_CCI = 10,
- IMX_SC_R_DB = 11,
- IMX_SC_R_DRC_0 = 12,
- IMX_SC_R_DRC_1 = 13,
- IMX_SC_R_GIC_SMMU = 14,
- IMX_SC_R_IRQSTR_M4_0 = 15,
- IMX_SC_R_IRQSTR_M4_1 = 16,
- IMX_SC_R_SMMU = 17,
- IMX_SC_R_GIC = 18,
- IMX_SC_R_DC_0_BLIT0 = 19,
- IMX_SC_R_DC_0_BLIT1 = 20,
- IMX_SC_R_DC_0_BLIT2 = 21,
- IMX_SC_R_DC_0_BLIT_OUT = 22,
- IMX_SC_R_DC_0_CAPTURE0 = 23,
- IMX_SC_R_DC_0_CAPTURE1 = 24,
- IMX_SC_R_DC_0_WARP = 25,
- IMX_SC_R_DC_0_INTEGRAL0 = 26,
- IMX_SC_R_DC_0_INTEGRAL1 = 27,
- IMX_SC_R_DC_0_VIDEO0 = 28,
- IMX_SC_R_DC_0_VIDEO1 = 29,
- IMX_SC_R_DC_0_FRAC0 = 30,
- IMX_SC_R_DC_0_FRAC1 = 31,
- IMX_SC_R_DC_0 = 32,
- IMX_SC_R_GPU_2_PID0 = 33,
- IMX_SC_R_DC_0_PLL_0 = 34,
- IMX_SC_R_DC_0_PLL_1 = 35,
- IMX_SC_R_DC_1_BLIT0 = 36,
- IMX_SC_R_DC_1_BLIT1 = 37,
- IMX_SC_R_DC_1_BLIT2 = 38,
- IMX_SC_R_DC_1_BLIT_OUT = 39,
- IMX_SC_R_DC_1_CAPTURE0 = 40,
- IMX_SC_R_DC_1_CAPTURE1 = 41,
- IMX_SC_R_DC_1_WARP = 42,
- IMX_SC_R_DC_1_INTEGRAL0 = 43,
- IMX_SC_R_DC_1_INTEGRAL1 = 44,
- IMX_SC_R_DC_1_VIDEO0 = 45,
- IMX_SC_R_DC_1_VIDEO1 = 46,
- IMX_SC_R_DC_1_FRAC0 = 47,
- IMX_SC_R_DC_1_FRAC1 = 48,
- IMX_SC_R_DC_1 = 49,
- IMX_SC_R_GPU_3_PID0 = 50,
- IMX_SC_R_DC_1_PLL_0 = 51,
- IMX_SC_R_DC_1_PLL_1 = 52,
- IMX_SC_R_SPI_0 = 53,
- IMX_SC_R_SPI_1 = 54,
- IMX_SC_R_SPI_2 = 55,
- IMX_SC_R_SPI_3 = 56,
- IMX_SC_R_UART_0 = 57,
- IMX_SC_R_UART_1 = 58,
- IMX_SC_R_UART_2 = 59,
- IMX_SC_R_UART_3 = 60,
- IMX_SC_R_UART_4 = 61,
- IMX_SC_R_EMVSIM_0 = 62,
- IMX_SC_R_EMVSIM_1 = 63,
- IMX_SC_R_DMA_0_CH0 = 64,
- IMX_SC_R_DMA_0_CH1 = 65,
- IMX_SC_R_DMA_0_CH2 = 66,
- IMX_SC_R_DMA_0_CH3 = 67,
- IMX_SC_R_DMA_0_CH4 = 68,
- IMX_SC_R_DMA_0_CH5 = 69,
- IMX_SC_R_DMA_0_CH6 = 70,
- IMX_SC_R_DMA_0_CH7 = 71,
- IMX_SC_R_DMA_0_CH8 = 72,
- IMX_SC_R_DMA_0_CH9 = 73,
- IMX_SC_R_DMA_0_CH10 = 74,
- IMX_SC_R_DMA_0_CH11 = 75,
- IMX_SC_R_DMA_0_CH12 = 76,
- IMX_SC_R_DMA_0_CH13 = 77,
- IMX_SC_R_DMA_0_CH14 = 78,
- IMX_SC_R_DMA_0_CH15 = 79,
- IMX_SC_R_DMA_0_CH16 = 80,
- IMX_SC_R_DMA_0_CH17 = 81,
- IMX_SC_R_DMA_0_CH18 = 82,
- IMX_SC_R_DMA_0_CH19 = 83,
- IMX_SC_R_DMA_0_CH20 = 84,
- IMX_SC_R_DMA_0_CH21 = 85,
- IMX_SC_R_DMA_0_CH22 = 86,
- IMX_SC_R_DMA_0_CH23 = 87,
- IMX_SC_R_DMA_0_CH24 = 88,
- IMX_SC_R_DMA_0_CH25 = 89,
- IMX_SC_R_DMA_0_CH26 = 90,
- IMX_SC_R_DMA_0_CH27 = 91,
- IMX_SC_R_DMA_0_CH28 = 92,
- IMX_SC_R_DMA_0_CH29 = 93,
- IMX_SC_R_DMA_0_CH30 = 94,
- IMX_SC_R_DMA_0_CH31 = 95,
- IMX_SC_R_I2C_0 = 96,
- IMX_SC_R_I2C_1 = 97,
- IMX_SC_R_I2C_2 = 98,
- IMX_SC_R_I2C_3 = 99,
- IMX_SC_R_I2C_4 = 100,
- IMX_SC_R_ADC_0 = 101,
- IMX_SC_R_ADC_1 = 102,
- IMX_SC_R_FTM_0 = 103,
- IMX_SC_R_FTM_1 = 104,
- IMX_SC_R_CAN_0 = 105,
- IMX_SC_R_CAN_1 = 106,
- IMX_SC_R_CAN_2 = 107,
- IMX_SC_R_DMA_1_CH0 = 108,
- IMX_SC_R_DMA_1_CH1 = 109,
- IMX_SC_R_DMA_1_CH2 = 110,
- IMX_SC_R_DMA_1_CH3 = 111,
- IMX_SC_R_DMA_1_CH4 = 112,
- IMX_SC_R_DMA_1_CH5 = 113,
- IMX_SC_R_DMA_1_CH6 = 114,
- IMX_SC_R_DMA_1_CH7 = 115,
- IMX_SC_R_DMA_1_CH8 = 116,
- IMX_SC_R_DMA_1_CH9 = 117,
- IMX_SC_R_DMA_1_CH10 = 118,
- IMX_SC_R_DMA_1_CH11 = 119,
- IMX_SC_R_DMA_1_CH12 = 120,
- IMX_SC_R_DMA_1_CH13 = 121,
- IMX_SC_R_DMA_1_CH14 = 122,
- IMX_SC_R_DMA_1_CH15 = 123,
- IMX_SC_R_DMA_1_CH16 = 124,
- IMX_SC_R_DMA_1_CH17 = 125,
- IMX_SC_R_DMA_1_CH18 = 126,
- IMX_SC_R_DMA_1_CH19 = 127,
- IMX_SC_R_DMA_1_CH20 = 128,
- IMX_SC_R_DMA_1_CH21 = 129,
- IMX_SC_R_DMA_1_CH22 = 130,
- IMX_SC_R_DMA_1_CH23 = 131,
- IMX_SC_R_DMA_1_CH24 = 132,
- IMX_SC_R_DMA_1_CH25 = 133,
- IMX_SC_R_DMA_1_CH26 = 134,
- IMX_SC_R_DMA_1_CH27 = 135,
- IMX_SC_R_DMA_1_CH28 = 136,
- IMX_SC_R_DMA_1_CH29 = 137,
- IMX_SC_R_DMA_1_CH30 = 138,
- IMX_SC_R_DMA_1_CH31 = 139,
- IMX_SC_R_UNUSED1 = 140,
- IMX_SC_R_UNUSED2 = 141,
- IMX_SC_R_UNUSED3 = 142,
- IMX_SC_R_UNUSED4 = 143,
- IMX_SC_R_GPU_0_PID0 = 144,
- IMX_SC_R_GPU_0_PID1 = 145,
- IMX_SC_R_GPU_0_PID2 = 146,
- IMX_SC_R_GPU_0_PID3 = 147,
- IMX_SC_R_GPU_1_PID0 = 148,
- IMX_SC_R_GPU_1_PID1 = 149,
- IMX_SC_R_GPU_1_PID2 = 150,
- IMX_SC_R_GPU_1_PID3 = 151,
- IMX_SC_R_PCIE_A = 152,
- IMX_SC_R_SERDES_0 = 153,
- IMX_SC_R_MATCH_0 = 154,
- IMX_SC_R_MATCH_1 = 155,
- IMX_SC_R_MATCH_2 = 156,
- IMX_SC_R_MATCH_3 = 157,
- IMX_SC_R_MATCH_4 = 158,
- IMX_SC_R_MATCH_5 = 159,
- IMX_SC_R_MATCH_6 = 160,
- IMX_SC_R_MATCH_7 = 161,
- IMX_SC_R_MATCH_8 = 162,
- IMX_SC_R_MATCH_9 = 163,
- IMX_SC_R_MATCH_10 = 164,
- IMX_SC_R_MATCH_11 = 165,
- IMX_SC_R_MATCH_12 = 166,
- IMX_SC_R_MATCH_13 = 167,
- IMX_SC_R_MATCH_14 = 168,
- IMX_SC_R_PCIE_B = 169,
- IMX_SC_R_SATA_0 = 170,
- IMX_SC_R_SERDES_1 = 171,
- IMX_SC_R_HSIO_GPIO = 172,
- IMX_SC_R_MATCH_15 = 173,
- IMX_SC_R_MATCH_16 = 174,
- IMX_SC_R_MATCH_17 = 175,
- IMX_SC_R_MATCH_18 = 176,
- IMX_SC_R_MATCH_19 = 177,
- IMX_SC_R_MATCH_20 = 178,
- IMX_SC_R_MATCH_21 = 179,
- IMX_SC_R_MATCH_22 = 180,
- IMX_SC_R_MATCH_23 = 181,
- IMX_SC_R_MATCH_24 = 182,
- IMX_SC_R_MATCH_25 = 183,
- IMX_SC_R_MATCH_26 = 184,
- IMX_SC_R_MATCH_27 = 185,
- IMX_SC_R_MATCH_28 = 186,
- IMX_SC_R_LCD_0 = 187,
- IMX_SC_R_LCD_0_PWM_0 = 188,
- IMX_SC_R_LCD_0_I2C_0 = 189,
- IMX_SC_R_LCD_0_I2C_1 = 190,
- IMX_SC_R_PWM_0 = 191,
- IMX_SC_R_PWM_1 = 192,
- IMX_SC_R_PWM_2 = 193,
- IMX_SC_R_PWM_3 = 194,
- IMX_SC_R_PWM_4 = 195,
- IMX_SC_R_PWM_5 = 196,
- IMX_SC_R_PWM_6 = 197,
- IMX_SC_R_PWM_7 = 198,
- IMX_SC_R_GPIO_0 = 199,
- IMX_SC_R_GPIO_1 = 200,
- IMX_SC_R_GPIO_2 = 201,
- IMX_SC_R_GPIO_3 = 202,
- IMX_SC_R_GPIO_4 = 203,
- IMX_SC_R_GPIO_5 = 204,
- IMX_SC_R_GPIO_6 = 205,
- IMX_SC_R_GPIO_7 = 206,
- IMX_SC_R_GPT_0 = 207,
- IMX_SC_R_GPT_1 = 208,
- IMX_SC_R_GPT_2 = 209,
- IMX_SC_R_GPT_3 = 210,
- IMX_SC_R_GPT_4 = 211,
- IMX_SC_R_KPP = 212,
- IMX_SC_R_MU_0A = 213,
- IMX_SC_R_MU_1A = 214,
- IMX_SC_R_MU_2A = 215,
- IMX_SC_R_MU_3A = 216,
- IMX_SC_R_MU_4A = 217,
- IMX_SC_R_MU_5A = 218,
- IMX_SC_R_MU_6A = 219,
- IMX_SC_R_MU_7A = 220,
- IMX_SC_R_MU_8A = 221,
- IMX_SC_R_MU_9A = 222,
- IMX_SC_R_MU_10A = 223,
- IMX_SC_R_MU_11A = 224,
- IMX_SC_R_MU_12A = 225,
- IMX_SC_R_MU_13A = 226,
- IMX_SC_R_MU_5B = 227,
- IMX_SC_R_MU_6B = 228,
- IMX_SC_R_MU_7B = 229,
- IMX_SC_R_MU_8B = 230,
- IMX_SC_R_MU_9B = 231,
- IMX_SC_R_MU_10B = 232,
- IMX_SC_R_MU_11B = 233,
- IMX_SC_R_MU_12B = 234,
- IMX_SC_R_MU_13B = 235,
- IMX_SC_R_ROM_0 = 236,
- IMX_SC_R_FSPI_0 = 237,
- IMX_SC_R_FSPI_1 = 238,
- IMX_SC_R_IEE = 239,
- IMX_SC_R_IEE_R0 = 240,
- IMX_SC_R_IEE_R1 = 241,
- IMX_SC_R_IEE_R2 = 242,
- IMX_SC_R_IEE_R3 = 243,
- IMX_SC_R_IEE_R4 = 244,
- IMX_SC_R_IEE_R5 = 245,
- IMX_SC_R_IEE_R6 = 246,
- IMX_SC_R_IEE_R7 = 247,
- IMX_SC_R_SDHC_0 = 248,
- IMX_SC_R_SDHC_1 = 249,
- IMX_SC_R_SDHC_2 = 250,
- IMX_SC_R_ENET_0 = 251,
- IMX_SC_R_ENET_1 = 252,
- IMX_SC_R_MLB_0 = 253,
- IMX_SC_R_DMA_2_CH0 = 254,
- IMX_SC_R_DMA_2_CH1 = 255,
- IMX_SC_R_DMA_2_CH2 = 256,
- IMX_SC_R_DMA_2_CH3 = 257,
- IMX_SC_R_DMA_2_CH4 = 258,
- IMX_SC_R_USB_0 = 259,
- IMX_SC_R_USB_1 = 260,
- IMX_SC_R_USB_0_PHY = 261,
- IMX_SC_R_USB_2 = 262,
- IMX_SC_R_USB_2_PHY = 263,
- IMX_SC_R_DTCP = 264,
- IMX_SC_R_NAND = 265,
- IMX_SC_R_LVDS_0 = 266,
- IMX_SC_R_LVDS_0_PWM_0 = 267,
- IMX_SC_R_LVDS_0_I2C_0 = 268,
- IMX_SC_R_LVDS_0_I2C_1 = 269,
- IMX_SC_R_LVDS_1 = 270,
- IMX_SC_R_LVDS_1_PWM_0 = 271,
- IMX_SC_R_LVDS_1_I2C_0 = 272,
- IMX_SC_R_LVDS_1_I2C_1 = 273,
- IMX_SC_R_LVDS_2 = 274,
- IMX_SC_R_LVDS_2_PWM_0 = 275,
- IMX_SC_R_LVDS_2_I2C_0 = 276,
- IMX_SC_R_LVDS_2_I2C_1 = 277,
- IMX_SC_R_M4_0_PID0 = 278,
- IMX_SC_R_M4_0_PID1 = 279,
- IMX_SC_R_M4_0_PID2 = 280,
- IMX_SC_R_M4_0_PID3 = 281,
- IMX_SC_R_M4_0_PID4 = 282,
- IMX_SC_R_M4_0_RGPIO = 283,
- IMX_SC_R_M4_0_SEMA42 = 284,
- IMX_SC_R_M4_0_TPM = 285,
- IMX_SC_R_M4_0_PIT = 286,
- IMX_SC_R_M4_0_UART = 287,
- IMX_SC_R_M4_0_I2C = 288,
- IMX_SC_R_M4_0_INTMUX = 289,
- IMX_SC_R_M4_0_SIM = 290,
- IMX_SC_R_M4_0_WDOG = 291,
- IMX_SC_R_M4_0_MU_0B = 292,
- IMX_SC_R_M4_0_MU_0A0 = 293,
- IMX_SC_R_M4_0_MU_0A1 = 294,
- IMX_SC_R_M4_0_MU_0A2 = 295,
- IMX_SC_R_M4_0_MU_0A3 = 296,
- IMX_SC_R_M4_0_MU_1A = 297,
- IMX_SC_R_M4_1_PID0 = 298,
- IMX_SC_R_M4_1_PID1 = 299,
- IMX_SC_R_M4_1_PID2 = 300,
- IMX_SC_R_M4_1_PID3 = 301,
- IMX_SC_R_M4_1_PID4 = 302,
- IMX_SC_R_M4_1_RGPIO = 303,
- IMX_SC_R_M4_1_SEMA42 = 304,
- IMX_SC_R_M4_1_TPM = 305,
- IMX_SC_R_M4_1_PIT = 306,
- IMX_SC_R_M4_1_UART = 307,
- IMX_SC_R_M4_1_I2C = 308,
- IMX_SC_R_M4_1_INTMUX = 309,
- IMX_SC_R_M4_1_SIM = 310,
- IMX_SC_R_M4_1_WDOG = 311,
- IMX_SC_R_M4_1_MU_0B = 312,
- IMX_SC_R_M4_1_MU_0A0 = 313,
- IMX_SC_R_M4_1_MU_0A1 = 314,
- IMX_SC_R_M4_1_MU_0A2 = 315,
- IMX_SC_R_M4_1_MU_0A3 = 316,
- IMX_SC_R_M4_1_MU_1A = 317,
- IMX_SC_R_SAI_0 = 318,
- IMX_SC_R_SAI_1 = 319,
- IMX_SC_R_SAI_2 = 320,
- IMX_SC_R_IRQSTR_SCU2 = 321,
- IMX_SC_R_IRQSTR_DSP = 322,
- IMX_SC_R_UNUSED5 = 323,
- IMX_SC_R_UNUSED6 = 324,
- IMX_SC_R_AUDIO_PLL_0 = 325,
- IMX_SC_R_PI_0 = 326,
- IMX_SC_R_PI_0_PWM_0 = 327,
- IMX_SC_R_PI_0_PWM_1 = 328,
- IMX_SC_R_PI_0_I2C_0 = 329,
- IMX_SC_R_PI_0_PLL = 330,
- IMX_SC_R_PI_1 = 331,
- IMX_SC_R_PI_1_PWM_0 = 332,
- IMX_SC_R_PI_1_PWM_1 = 333,
- IMX_SC_R_PI_1_I2C_0 = 334,
- IMX_SC_R_PI_1_PLL = 335,
- IMX_SC_R_SC_PID0 = 336,
- IMX_SC_R_SC_PID1 = 337,
- IMX_SC_R_SC_PID2 = 338,
- IMX_SC_R_SC_PID3 = 339,
- IMX_SC_R_SC_PID4 = 340,
- IMX_SC_R_SC_SEMA42 = 341,
- IMX_SC_R_SC_TPM = 342,
- IMX_SC_R_SC_PIT = 343,
- IMX_SC_R_SC_UART = 344,
- IMX_SC_R_SC_I2C = 345,
- IMX_SC_R_SC_MU_0B = 346,
- IMX_SC_R_SC_MU_0A0 = 347,
- IMX_SC_R_SC_MU_0A1 = 348,
- IMX_SC_R_SC_MU_0A2 = 349,
- IMX_SC_R_SC_MU_0A3 = 350,
- IMX_SC_R_SC_MU_1A = 351,
- IMX_SC_R_SYSCNT_RD = 352,
- IMX_SC_R_SYSCNT_CMP = 353,
- IMX_SC_R_DEBUG = 354,
- IMX_SC_R_SYSTEM = 355,
- IMX_SC_R_SNVS = 356,
- IMX_SC_R_OTP = 357,
- IMX_SC_R_VPU_PID0 = 358,
- IMX_SC_R_VPU_PID1 = 359,
- IMX_SC_R_VPU_PID2 = 360,
- IMX_SC_R_VPU_PID3 = 361,
- IMX_SC_R_VPU_PID4 = 362,
- IMX_SC_R_VPU_PID5 = 363,
- IMX_SC_R_VPU_PID6 = 364,
- IMX_SC_R_VPU_PID7 = 365,
- IMX_SC_R_VPU_UART = 366,
- IMX_SC_R_VPUCORE = 367,
- IMX_SC_R_VPUCORE_0 = 368,
- IMX_SC_R_VPUCORE_1 = 369,
- IMX_SC_R_VPUCORE_2 = 370,
- IMX_SC_R_VPUCORE_3 = 371,
- IMX_SC_R_DMA_4_CH0 = 372,
- IMX_SC_R_DMA_4_CH1 = 373,
- IMX_SC_R_DMA_4_CH2 = 374,
- IMX_SC_R_DMA_4_CH3 = 375,
- IMX_SC_R_DMA_4_CH4 = 376,
- IMX_SC_R_ISI_CH0 = 377,
- IMX_SC_R_ISI_CH1 = 378,
- IMX_SC_R_ISI_CH2 = 379,
- IMX_SC_R_ISI_CH3 = 380,
- IMX_SC_R_ISI_CH4 = 381,
- IMX_SC_R_ISI_CH5 = 382,
- IMX_SC_R_ISI_CH6 = 383,
- IMX_SC_R_ISI_CH7 = 384,
- IMX_SC_R_MJPEG_DEC_S0 = 385,
- IMX_SC_R_MJPEG_DEC_S1 = 386,
- IMX_SC_R_MJPEG_DEC_S2 = 387,
- IMX_SC_R_MJPEG_DEC_S3 = 388,
- IMX_SC_R_MJPEG_ENC_S0 = 389,
- IMX_SC_R_MJPEG_ENC_S1 = 390,
- IMX_SC_R_MJPEG_ENC_S2 = 391,
- IMX_SC_R_MJPEG_ENC_S3 = 392,
- IMX_SC_R_MIPI_0 = 393,
- IMX_SC_R_MIPI_0_PWM_0 = 394,
- IMX_SC_R_MIPI_0_I2C_0 = 395,
- IMX_SC_R_MIPI_0_I2C_1 = 396,
- IMX_SC_R_MIPI_1 = 397,
- IMX_SC_R_MIPI_1_PWM_0 = 398,
- IMX_SC_R_MIPI_1_I2C_0 = 399,
- IMX_SC_R_MIPI_1_I2C_1 = 400,
- IMX_SC_R_CSI_0 = 401,
- IMX_SC_R_CSI_0_PWM_0 = 402,
- IMX_SC_R_CSI_0_I2C_0 = 403,
- IMX_SC_R_CSI_1 = 404,
- IMX_SC_R_CSI_1_PWM_0 = 405,
- IMX_SC_R_CSI_1_I2C_0 = 406,
- IMX_SC_R_HDMI = 407,
- IMX_SC_R_HDMI_I2S = 408,
- IMX_SC_R_HDMI_I2C_0 = 409,
- IMX_SC_R_HDMI_PLL_0 = 410,
- IMX_SC_R_HDMI_RX = 411,
- IMX_SC_R_HDMI_RX_BYPASS = 412,
- IMX_SC_R_HDMI_RX_I2C_0 = 413,
- IMX_SC_R_ASRC_0 = 414,
- IMX_SC_R_ESAI_0 = 415,
- IMX_SC_R_SPDIF_0 = 416,
- IMX_SC_R_SPDIF_1 = 417,
- IMX_SC_R_SAI_3 = 418,
- IMX_SC_R_SAI_4 = 419,
- IMX_SC_R_SAI_5 = 420,
- IMX_SC_R_GPT_5 = 421,
- IMX_SC_R_GPT_6 = 422,
- IMX_SC_R_GPT_7 = 423,
- IMX_SC_R_GPT_8 = 424,
- IMX_SC_R_GPT_9 = 425,
- IMX_SC_R_GPT_10 = 426,
- IMX_SC_R_DMA_2_CH5 = 427,
- IMX_SC_R_DMA_2_CH6 = 428,
- IMX_SC_R_DMA_2_CH7 = 429,
- IMX_SC_R_DMA_2_CH8 = 430,
- IMX_SC_R_DMA_2_CH9 = 431,
- IMX_SC_R_DMA_2_CH10 = 432,
- IMX_SC_R_DMA_2_CH11 = 433,
- IMX_SC_R_DMA_2_CH12 = 434,
- IMX_SC_R_DMA_2_CH13 = 435,
- IMX_SC_R_DMA_2_CH14 = 436,
- IMX_SC_R_DMA_2_CH15 = 437,
- IMX_SC_R_DMA_2_CH16 = 438,
- IMX_SC_R_DMA_2_CH17 = 439,
- IMX_SC_R_DMA_2_CH18 = 440,
- IMX_SC_R_DMA_2_CH19 = 441,
- IMX_SC_R_DMA_2_CH20 = 442,
- IMX_SC_R_DMA_2_CH21 = 443,
- IMX_SC_R_DMA_2_CH22 = 444,
- IMX_SC_R_DMA_2_CH23 = 445,
- IMX_SC_R_DMA_2_CH24 = 446,
- IMX_SC_R_DMA_2_CH25 = 447,
- IMX_SC_R_DMA_2_CH26 = 448,
- IMX_SC_R_DMA_2_CH27 = 449,
- IMX_SC_R_DMA_2_CH28 = 450,
- IMX_SC_R_DMA_2_CH29 = 451,
- IMX_SC_R_DMA_2_CH30 = 452,
- IMX_SC_R_DMA_2_CH31 = 453,
- IMX_SC_R_ASRC_1 = 454,
- IMX_SC_R_ESAI_1 = 455,
- IMX_SC_R_SAI_6 = 456,
- IMX_SC_R_SAI_7 = 457,
- IMX_SC_R_AMIX = 458,
- IMX_SC_R_MQS_0 = 459,
- IMX_SC_R_DMA_3_CH0 = 460,
- IMX_SC_R_DMA_3_CH1 = 461,
- IMX_SC_R_DMA_3_CH2 = 462,
- IMX_SC_R_DMA_3_CH3 = 463,
- IMX_SC_R_DMA_3_CH4 = 464,
- IMX_SC_R_DMA_3_CH5 = 465,
- IMX_SC_R_DMA_3_CH6 = 466,
- IMX_SC_R_DMA_3_CH7 = 467,
- IMX_SC_R_DMA_3_CH8 = 468,
- IMX_SC_R_DMA_3_CH9 = 469,
- IMX_SC_R_DMA_3_CH10 = 470,
- IMX_SC_R_DMA_3_CH11 = 471,
- IMX_SC_R_DMA_3_CH12 = 472,
- IMX_SC_R_DMA_3_CH13 = 473,
- IMX_SC_R_DMA_3_CH14 = 474,
- IMX_SC_R_DMA_3_CH15 = 475,
- IMX_SC_R_DMA_3_CH16 = 476,
- IMX_SC_R_DMA_3_CH17 = 477,
- IMX_SC_R_DMA_3_CH18 = 478,
- IMX_SC_R_DMA_3_CH19 = 479,
- IMX_SC_R_DMA_3_CH20 = 480,
- IMX_SC_R_DMA_3_CH21 = 481,
- IMX_SC_R_DMA_3_CH22 = 482,
- IMX_SC_R_DMA_3_CH23 = 483,
- IMX_SC_R_DMA_3_CH24 = 484,
- IMX_SC_R_DMA_3_CH25 = 485,
- IMX_SC_R_DMA_3_CH26 = 486,
- IMX_SC_R_DMA_3_CH27 = 487,
- IMX_SC_R_DMA_3_CH28 = 488,
- IMX_SC_R_DMA_3_CH29 = 489,
- IMX_SC_R_DMA_3_CH30 = 490,
- IMX_SC_R_DMA_3_CH31 = 491,
- IMX_SC_R_AUDIO_PLL_1 = 492,
- IMX_SC_R_AUDIO_CLK_0 = 493,
- IMX_SC_R_AUDIO_CLK_1 = 494,
- IMX_SC_R_MCLK_OUT_0 = 495,
- IMX_SC_R_MCLK_OUT_1 = 496,
- IMX_SC_R_PMIC_0 = 497,
- IMX_SC_R_PMIC_1 = 498,
- IMX_SC_R_SECO = 499,
- IMX_SC_R_CAAM_JR1 = 500,
- IMX_SC_R_CAAM_JR2 = 501,
- IMX_SC_R_CAAM_JR3 = 502,
- IMX_SC_R_SECO_MU_2 = 503,
- IMX_SC_R_SECO_MU_3 = 504,
- IMX_SC_R_SECO_MU_4 = 505,
- IMX_SC_R_HDMI_RX_PWM_0 = 506,
- IMX_SC_R_A35 = 507,
- IMX_SC_R_A35_0 = 508,
- IMX_SC_R_A35_1 = 509,
- IMX_SC_R_A35_2 = 510,
- IMX_SC_R_A35_3 = 511,
- IMX_SC_R_DSP = 512,
- IMX_SC_R_DSP_RAM = 513,
- IMX_SC_R_CAAM_JR1_OUT = 514,
- IMX_SC_R_CAAM_JR2_OUT = 515,
- IMX_SC_R_CAAM_JR3_OUT = 516,
- IMX_SC_R_VPU_DEC_0 = 517,
- IMX_SC_R_VPU_ENC_0 = 518,
- IMX_SC_R_CAAM_JR0 = 519,
- IMX_SC_R_CAAM_JR0_OUT = 520,
- IMX_SC_R_PMIC_2 = 521,
- IMX_SC_R_DBLOGIC = 522,
- IMX_SC_R_HDMI_PLL_1 = 523,
- IMX_SC_R_BOARD_R0 = 524,
- IMX_SC_R_BOARD_R1 = 525,
- IMX_SC_R_BOARD_R2 = 526,
- IMX_SC_R_BOARD_R3 = 527,
- IMX_SC_R_BOARD_R4 = 528,
- IMX_SC_R_BOARD_R5 = 529,
- IMX_SC_R_BOARD_R6 = 530,
- IMX_SC_R_BOARD_R7 = 531,
- IMX_SC_R_MJPEG_DEC_MP = 532,
- IMX_SC_R_MJPEG_ENC_MP = 533,
- IMX_SC_R_VPU_TS_0 = 534,
- IMX_SC_R_VPU_MU_0 = 535,
- IMX_SC_R_VPU_MU_1 = 536,
- IMX_SC_R_VPU_MU_2 = 537,
- IMX_SC_R_VPU_MU_3 = 538,
- IMX_SC_R_VPU_ENC_1 = 539,
- IMX_SC_R_VPU = 540,
- IMX_SC_R_LAST
-};
-
-/* NOTE - please add by replacing some of the UNUSED from above! */
-
-/*
* This type is used to indicate a control.
*/
enum imx_sc_ctrl {
diff --git a/include/linux/fs.h b/include/linux/fs.h
index c95c0807471f..6d52ce6af4ff 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -1044,10 +1044,15 @@ bool opens_in_grace(struct net *);
* Obviously, the last two criteria only matter for POSIX locks.
*/
struct file_lock {
- struct file_lock *fl_next; /* singly linked list for this inode */
+ struct file_lock *fl_blocker; /* The lock, that is blocking us */
struct list_head fl_list; /* link into file_lock_context */
struct hlist_node fl_link; /* node in global lists */
- struct list_head fl_block; /* circular list of blocked processes */
+ struct list_head fl_blocked_requests; /* list of requests with
+ * ->fl_blocker pointing here
+ */
+ struct list_head fl_blocked_member; /* node in
+ * ->fl_blocker->fl_blocked_requests
+ */
fl_owner_t fl_owner;
unsigned int fl_flags;
unsigned char fl_type;
@@ -1119,7 +1124,7 @@ extern void locks_remove_file(struct file *);
extern void locks_release_private(struct file_lock *);
extern void posix_test_lock(struct file *, struct file_lock *);
extern int posix_lock_file(struct file *, struct file_lock *, struct file_lock *);
-extern int posix_unblock_lock(struct file_lock *);
+extern int locks_delete_block(struct file_lock *);
extern int vfs_test_lock(struct file *, struct file_lock *);
extern int vfs_lock_file(struct file *, unsigned int, struct file_lock *, struct file_lock *);
extern int vfs_cancel_lock(struct file *filp, struct file_lock *fl);
@@ -1209,7 +1214,7 @@ static inline int posix_lock_file(struct file *filp, struct file_lock *fl,
return -ENOLCK;
}
-static inline int posix_unblock_lock(struct file_lock *waiter)
+static inline int locks_delete_block(struct file_lock *waiter)
{
return -ENOENT;
}
@@ -2021,7 +2026,7 @@ static inline void init_sync_kiocb(struct kiocb *kiocb, struct file *filp)
.ki_filp = filp,
.ki_flags = iocb_flags(filp),
.ki_hint = ki_hint_validate(file_write_hint(filp)),
- .ki_ioprio = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_NONE, 0),
+ .ki_ioprio = get_current_ioprio(),
};
}
diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h
index 34cf0fdd7dc7..610815e3f1aa 100644
--- a/include/linux/fscache-cache.h
+++ b/include/linux/fscache-cache.h
@@ -196,8 +196,7 @@ static inline void fscache_enqueue_retrieval(struct fscache_retrieval *op)
static inline void fscache_retrieval_complete(struct fscache_retrieval *op,
int n_pages)
{
- atomic_sub(n_pages, &op->n_pages);
- if (atomic_read(&op->n_pages) <= 0)
+ if (atomic_sub_return_relaxed(n_pages, &op->n_pages) <= 0)
fscache_op_complete(&op->op, false);
}
diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h
index fd1ce10553bf..2ccb08cb5d6a 100644
--- a/include/linux/fsnotify.h
+++ b/include/linux/fsnotify.h
@@ -26,30 +26,46 @@ static inline int fsnotify_parent(const struct path *path, struct dentry *dentry
return __fsnotify_parent(path, dentry, mask);
}
-/* simple call site for access decisions */
+/*
+ * Simple wrapper to consolidate calls fsnotify_parent()/fsnotify() when
+ * an event is on a path.
+ */
+static inline int fsnotify_path(struct inode *inode, const struct path *path,
+ __u32 mask)
+{
+ int ret = fsnotify_parent(path, NULL, mask);
+
+ if (ret)
+ return ret;
+ return fsnotify(inode, mask, path, FSNOTIFY_EVENT_PATH, NULL, 0);
+}
+
+/* Simple call site for access decisions */
static inline int fsnotify_perm(struct file *file, int mask)
{
+ int ret;
const struct path *path = &file->f_path;
struct inode *inode = file_inode(file);
__u32 fsnotify_mask = 0;
- int ret;
if (file->f_mode & FMODE_NONOTIFY)
return 0;
if (!(mask & (MAY_READ | MAY_OPEN)))
return 0;
- if (mask & MAY_OPEN)
+ if (mask & MAY_OPEN) {
fsnotify_mask = FS_OPEN_PERM;
- else if (mask & MAY_READ)
- fsnotify_mask = FS_ACCESS_PERM;
- else
- BUG();
- ret = fsnotify_parent(path, NULL, fsnotify_mask);
- if (ret)
- return ret;
+ if (file->f_flags & __FMODE_EXEC) {
+ ret = fsnotify_path(inode, path, FS_OPEN_EXEC_PERM);
- return fsnotify(inode, fsnotify_mask, path, FSNOTIFY_EVENT_PATH, NULL, 0);
+ if (ret)
+ return ret;
+ }
+ } else if (mask & MAY_READ) {
+ fsnotify_mask = FS_ACCESS_PERM;
+ }
+
+ return fsnotify_path(inode, path, fsnotify_mask);
}
/*
@@ -180,10 +196,8 @@ static inline void fsnotify_access(struct file *file)
if (S_ISDIR(inode->i_mode))
mask |= FS_ISDIR;
- if (!(file->f_mode & FMODE_NONOTIFY)) {
- fsnotify_parent(path, NULL, mask);
- fsnotify(inode, mask, path, FSNOTIFY_EVENT_PATH, NULL, 0);
- }
+ if (!(file->f_mode & FMODE_NONOTIFY))
+ fsnotify_path(inode, path, mask);
}
/*
@@ -198,10 +212,8 @@ static inline void fsnotify_modify(struct file *file)
if (S_ISDIR(inode->i_mode))
mask |= FS_ISDIR;
- if (!(file->f_mode & FMODE_NONOTIFY)) {
- fsnotify_parent(path, NULL, mask);
- fsnotify(inode, mask, path, FSNOTIFY_EVENT_PATH, NULL, 0);
- }
+ if (!(file->f_mode & FMODE_NONOTIFY))
+ fsnotify_path(inode, path, mask);
}
/*
@@ -215,9 +227,10 @@ static inline void fsnotify_open(struct file *file)
if (S_ISDIR(inode->i_mode))
mask |= FS_ISDIR;
+ if (file->f_flags & __FMODE_EXEC)
+ mask |= FS_OPEN_EXEC;
- fsnotify_parent(path, NULL, mask);
- fsnotify(inode, mask, path, FSNOTIFY_EVENT_PATH, NULL, 0);
+ fsnotify_path(inode, path, mask);
}
/*
@@ -233,10 +246,8 @@ static inline void fsnotify_close(struct file *file)
if (S_ISDIR(inode->i_mode))
mask |= FS_ISDIR;
- if (!(file->f_mode & FMODE_NONOTIFY)) {
- fsnotify_parent(path, NULL, mask);
- fsnotify(inode, mask, path, FSNOTIFY_EVENT_PATH, NULL, 0);
- }
+ if (!(file->f_mode & FMODE_NONOTIFY))
+ fsnotify_path(inode, path, mask);
}
/*
diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h
index 135b973e44d1..7639774e7475 100644
--- a/include/linux/fsnotify_backend.h
+++ b/include/linux/fsnotify_backend.h
@@ -38,6 +38,7 @@
#define FS_DELETE 0x00000200 /* Subfile was deleted */
#define FS_DELETE_SELF 0x00000400 /* Self was deleted */
#define FS_MOVE_SELF 0x00000800 /* Self was moved */
+#define FS_OPEN_EXEC 0x00001000 /* File was opened for exec */
#define FS_UNMOUNT 0x00002000 /* inode on umount fs */
#define FS_Q_OVERFLOW 0x00004000 /* Event queued overflowed */
@@ -45,6 +46,7 @@
#define FS_OPEN_PERM 0x00010000 /* open event in an permission hook */
#define FS_ACCESS_PERM 0x00020000 /* access event in a permissions hook */
+#define FS_OPEN_EXEC_PERM 0x00040000 /* open/exec event in a permission hook */
#define FS_EXCL_UNLINK 0x04000000 /* do not send events if object is unlinked */
#define FS_ISDIR 0x40000000 /* event occurred against dir */
@@ -62,11 +64,13 @@
#define FS_EVENTS_POSS_ON_CHILD (FS_ACCESS | FS_MODIFY | FS_ATTRIB |\
FS_CLOSE_WRITE | FS_CLOSE_NOWRITE | FS_OPEN |\
FS_MOVED_FROM | FS_MOVED_TO | FS_CREATE |\
- FS_DELETE | FS_OPEN_PERM | FS_ACCESS_PERM)
+ FS_DELETE | FS_OPEN_PERM | FS_ACCESS_PERM | \
+ FS_OPEN_EXEC | FS_OPEN_EXEC_PERM)
#define FS_MOVE (FS_MOVED_FROM | FS_MOVED_TO)
-#define ALL_FSNOTIFY_PERM_EVENTS (FS_OPEN_PERM | FS_ACCESS_PERM)
+#define ALL_FSNOTIFY_PERM_EVENTS (FS_OPEN_PERM | FS_ACCESS_PERM | \
+ FS_OPEN_EXEC_PERM)
/* Events that can be reported to backends */
#define ALL_FSNOTIFY_EVENTS (FS_ACCESS | FS_MODIFY | FS_ATTRIB | \
@@ -74,7 +78,8 @@
FS_MOVED_FROM | FS_MOVED_TO | FS_CREATE | \
FS_DELETE | FS_DELETE_SELF | FS_MOVE_SELF | \
FS_UNMOUNT | FS_Q_OVERFLOW | FS_IN_IGNORED | \
- FS_OPEN_PERM | FS_ACCESS_PERM | FS_DN_RENAME)
+ FS_OPEN_PERM | FS_ACCESS_PERM | FS_DN_RENAME | \
+ FS_OPEN_EXEC | FS_OPEN_EXEC_PERM)
/* Extra flags that may be reported with event or control handling of events */
#define ALL_FSNOTIFY_FLAGS (FS_EXCL_UNLINK | FS_ISDIR | FS_IN_ONESHOT | \
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index a397907e8d72..5c990e891d6a 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -420,6 +420,9 @@ enum {
};
void arch_ftrace_update_code(int command);
+void arch_ftrace_update_trampoline(struct ftrace_ops *ops);
+void *arch_ftrace_trampoline_func(struct ftrace_ops *ops, struct dyn_ftrace *rec);
+void arch_ftrace_trampoline_free(struct ftrace_ops *ops);
struct ftrace_rec_iter;
@@ -777,8 +780,8 @@ struct ftrace_ret_stack {
extern void return_to_handler(void);
extern int
-ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth,
- unsigned long frame_pointer, unsigned long *retp);
+function_graph_enter(unsigned long ret, unsigned long func,
+ unsigned long frame_pointer, unsigned long *retp);
unsigned long ftrace_graph_ret_addr(struct task_struct *task, int *idx,
unsigned long ret, unsigned long *retp);
diff --git a/include/linux/futex.h b/include/linux/futex.h
index 821ae502d3d8..ccaef0097785 100644
--- a/include/linux/futex.h
+++ b/include/linux/futex.h
@@ -9,9 +9,6 @@ struct inode;
struct mm_struct;
struct task_struct;
-extern int
-handle_futex_death(u32 __user *uaddr, struct task_struct *curr, int pi);
-
/*
* Futexes are matched on equal values of this key.
* The key type depends on whether it's a shared or private mapping.
@@ -55,11 +52,6 @@ extern void exit_robust_list(struct task_struct *curr);
long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout,
u32 __user *uaddr2, u32 val2, u32 val3);
-#ifdef CONFIG_HAVE_FUTEX_CMPXCHG
-#define futex_cmpxchg_enabled 1
-#else
-extern int futex_cmpxchg_enabled;
-#endif
#else
static inline void exit_robust_list(struct task_struct *curr)
{
diff --git a/include/linux/genhd.h b/include/linux/genhd.h
index 70fc838e6773..06c0fd594097 100644
--- a/include/linux/genhd.h
+++ b/include/linux/genhd.h
@@ -17,6 +17,7 @@
#include <linux/percpu-refcount.h>
#include <linux/uuid.h>
#include <linux/blk_types.h>
+#include <asm/local.h>
#ifdef CONFIG_BLOCK
@@ -89,6 +90,7 @@ struct disk_stats {
unsigned long merges[NR_STAT_GROUPS];
unsigned long io_ticks;
unsigned long time_in_queue;
+ local_t in_flight[2];
};
#define PARTITION_META_INFO_VOLNAMELTH 64
@@ -122,14 +124,13 @@ struct hd_struct {
int make_it_fail;
#endif
unsigned long stamp;
- atomic_t in_flight[2];
#ifdef CONFIG_SMP
struct disk_stats __percpu *dkstats;
#else
struct disk_stats dkstats;
#endif
struct percpu_ref ref;
- struct rcu_head rcu_head;
+ struct rcu_work rcu_work;
};
#define GENHD_FL_REMOVABLE 1
@@ -295,8 +296,11 @@ extern struct hd_struct *disk_map_sector_rcu(struct gendisk *disk,
#define part_stat_lock() ({ rcu_read_lock(); get_cpu(); })
#define part_stat_unlock() do { put_cpu(); rcu_read_unlock(); } while (0)
-#define __part_stat_add(cpu, part, field, addnd) \
- (per_cpu_ptr((part)->dkstats, (cpu))->field += (addnd))
+#define part_stat_get_cpu(part, field, cpu) \
+ (per_cpu_ptr((part)->dkstats, (cpu))->field)
+
+#define part_stat_get(part, field) \
+ part_stat_get_cpu(part, field, smp_processor_id())
#define part_stat_read(part, field) \
({ \
@@ -333,10 +337,9 @@ static inline void free_part_stats(struct hd_struct *part)
#define part_stat_lock() ({ rcu_read_lock(); 0; })
#define part_stat_unlock() rcu_read_unlock()
-#define __part_stat_add(cpu, part, field, addnd) \
- ((part)->dkstats.field += addnd)
-
-#define part_stat_read(part, field) ((part)->dkstats.field)
+#define part_stat_get(part, field) ((part)->dkstats.field)
+#define part_stat_get_cpu(part, field, cpu) part_stat_get(part, field)
+#define part_stat_read(part, field) part_stat_get(part, field)
static inline void part_stat_set_all(struct hd_struct *part, int value)
{
@@ -362,22 +365,33 @@ static inline void free_part_stats(struct hd_struct *part)
part_stat_read(part, field[STAT_WRITE]) + \
part_stat_read(part, field[STAT_DISCARD]))
-#define part_stat_add(cpu, part, field, addnd) do { \
- __part_stat_add((cpu), (part), field, addnd); \
+#define __part_stat_add(part, field, addnd) \
+ (part_stat_get(part, field) += (addnd))
+
+#define part_stat_add(part, field, addnd) do { \
+ __part_stat_add((part), field, addnd); \
if ((part)->partno) \
- __part_stat_add((cpu), &part_to_disk((part))->part0, \
+ __part_stat_add(&part_to_disk((part))->part0, \
field, addnd); \
} while (0)
-#define part_stat_dec(cpu, gendiskp, field) \
- part_stat_add(cpu, gendiskp, field, -1)
-#define part_stat_inc(cpu, gendiskp, field) \
- part_stat_add(cpu, gendiskp, field, 1)
-#define part_stat_sub(cpu, gendiskp, field, subnd) \
- part_stat_add(cpu, gendiskp, field, -subnd)
-
-void part_in_flight(struct request_queue *q, struct hd_struct *part,
- unsigned int inflight[2]);
+#define part_stat_dec(gendiskp, field) \
+ part_stat_add(gendiskp, field, -1)
+#define part_stat_inc(gendiskp, field) \
+ part_stat_add(gendiskp, field, 1)
+#define part_stat_sub(gendiskp, field, subnd) \
+ part_stat_add(gendiskp, field, -subnd)
+
+#define part_stat_local_dec(gendiskp, field) \
+ local_dec(&(part_stat_get(gendiskp, field)))
+#define part_stat_local_inc(gendiskp, field) \
+ local_inc(&(part_stat_get(gendiskp, field)))
+#define part_stat_local_read(gendiskp, field) \
+ local_read(&(part_stat_get(gendiskp, field)))
+#define part_stat_local_read_cpu(gendiskp, field, cpu) \
+ local_read(&(part_stat_get_cpu(gendiskp, field, cpu)))
+
+unsigned int part_in_flight(struct request_queue *q, struct hd_struct *part);
void part_in_flight_rw(struct request_queue *q, struct hd_struct *part,
unsigned int inflight[2]);
void part_dec_in_flight(struct request_queue *q, struct hd_struct *part,
@@ -398,8 +412,7 @@ static inline void free_part_info(struct hd_struct *part)
kfree(part->info);
}
-/* block/blk-core.c */
-extern void part_round_stats(struct request_queue *q, int cpu, struct hd_struct *part);
+void update_io_ticks(struct hd_struct *part, unsigned long now);
/* block/genhd.c */
extern void device_add_disk(struct device *parent, struct gendisk *disk,
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index 76f8db0b0e71..0705164f928c 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -510,18 +510,22 @@ alloc_pages(gfp_t gfp_mask, unsigned int order)
}
extern struct page *alloc_pages_vma(gfp_t gfp_mask, int order,
struct vm_area_struct *vma, unsigned long addr,
- int node);
+ int node, bool hugepage);
+#define alloc_hugepage_vma(gfp_mask, vma, addr, order) \
+ alloc_pages_vma(gfp_mask, order, vma, addr, numa_node_id(), true)
#else
#define alloc_pages(gfp_mask, order) \
alloc_pages_node(numa_node_id(), gfp_mask, order)
-#define alloc_pages_vma(gfp_mask, order, vma, addr, node)\
+#define alloc_pages_vma(gfp_mask, order, vma, addr, node, false)\
+ alloc_pages(gfp_mask, order)
+#define alloc_hugepage_vma(gfp_mask, vma, addr, order) \
alloc_pages(gfp_mask, order)
#endif
#define alloc_page(gfp_mask) alloc_pages(gfp_mask, 0)
#define alloc_page_vma(gfp_mask, vma, addr) \
- alloc_pages_vma(gfp_mask, 0, vma, addr, numa_node_id())
+ alloc_pages_vma(gfp_mask, 0, vma, addr, numa_node_id(), false)
#define alloc_page_vma_node(gfp_mask, vma, addr, node) \
- alloc_pages_vma(gfp_mask, 0, vma, addr, node)
+ alloc_pages_vma(gfp_mask, 0, vma, addr, node, false)
extern unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order);
extern unsigned long get_zeroed_page(gfp_t gfp_mask);
diff --git a/include/linux/gpio/consumer.h b/include/linux/gpio/consumer.h
index f2f887795d43..8aebcf822082 100644
--- a/include/linux/gpio/consumer.h
+++ b/include/linux/gpio/consumer.h
@@ -104,6 +104,7 @@ struct gpio_descs *__must_check
devm_gpiod_get_array_optional(struct device *dev, const char *con_id,
enum gpiod_flags flags);
void devm_gpiod_put(struct device *dev, struct gpio_desc *desc);
+void devm_gpiod_unhinge(struct device *dev, struct gpio_desc *desc);
void devm_gpiod_put_array(struct device *dev, struct gpio_descs *descs);
int gpiod_get_direction(struct gpio_desc *desc);
@@ -172,6 +173,10 @@ int desc_to_gpio(const struct gpio_desc *desc);
struct device_node;
struct fwnode_handle;
+struct gpio_desc *gpiod_get_from_of_node(struct device_node *node,
+ const char *propname, int index,
+ enum gpiod_flags dflags,
+ const char *label);
struct gpio_desc *devm_gpiod_get_from_of_node(struct device *dev,
struct device_node *node,
const char *propname, int index,
@@ -245,6 +250,15 @@ static inline void gpiod_put(struct gpio_desc *desc)
WARN_ON(1);
}
+static inline void devm_gpiod_unhinge(struct device *dev,
+ struct gpio_desc *desc)
+{
+ might_sleep();
+
+ /* GPIO can never have been requested */
+ WARN_ON(1);
+}
+
static inline void gpiod_put_array(struct gpio_descs *descs)
{
might_sleep();
@@ -518,6 +532,15 @@ struct device_node;
struct fwnode_handle;
static inline
+struct gpio_desc *gpiod_get_from_of_node(struct device_node *node,
+ const char *propname, int index,
+ enum gpiod_flags dflags,
+ const char *label)
+{
+ return ERR_PTR(-ENOSYS);
+}
+
+static inline
struct gpio_desc *devm_gpiod_get_from_of_node(struct device *dev,
struct device_node *node,
const char *propname, int index,
diff --git a/include/linux/hdmi.h b/include/linux/hdmi.h
index 4f3febc0f971..d2bacf502429 100644
--- a/include/linux/hdmi.h
+++ b/include/linux/hdmi.h
@@ -163,6 +163,9 @@ struct hdmi_avi_infoframe {
int hdmi_avi_infoframe_init(struct hdmi_avi_infoframe *frame);
ssize_t hdmi_avi_infoframe_pack(struct hdmi_avi_infoframe *frame, void *buffer,
size_t size);
+ssize_t hdmi_avi_infoframe_pack_only(const struct hdmi_avi_infoframe *frame,
+ void *buffer, size_t size);
+int hdmi_avi_infoframe_check(struct hdmi_avi_infoframe *frame);
enum hdmi_spd_sdi {
HDMI_SPD_SDI_UNKNOWN,
@@ -194,6 +197,9 @@ int hdmi_spd_infoframe_init(struct hdmi_spd_infoframe *frame,
const char *vendor, const char *product);
ssize_t hdmi_spd_infoframe_pack(struct hdmi_spd_infoframe *frame, void *buffer,
size_t size);
+ssize_t hdmi_spd_infoframe_pack_only(const struct hdmi_spd_infoframe *frame,
+ void *buffer, size_t size);
+int hdmi_spd_infoframe_check(struct hdmi_spd_infoframe *frame);
enum hdmi_audio_coding_type {
HDMI_AUDIO_CODING_TYPE_STREAM,
@@ -272,6 +278,9 @@ struct hdmi_audio_infoframe {
int hdmi_audio_infoframe_init(struct hdmi_audio_infoframe *frame);
ssize_t hdmi_audio_infoframe_pack(struct hdmi_audio_infoframe *frame,
void *buffer, size_t size);
+ssize_t hdmi_audio_infoframe_pack_only(const struct hdmi_audio_infoframe *frame,
+ void *buffer, size_t size);
+int hdmi_audio_infoframe_check(struct hdmi_audio_infoframe *frame);
enum hdmi_3d_structure {
HDMI_3D_STRUCTURE_INVALID = -1,
@@ -299,6 +308,9 @@ struct hdmi_vendor_infoframe {
int hdmi_vendor_infoframe_init(struct hdmi_vendor_infoframe *frame);
ssize_t hdmi_vendor_infoframe_pack(struct hdmi_vendor_infoframe *frame,
void *buffer, size_t size);
+ssize_t hdmi_vendor_infoframe_pack_only(const struct hdmi_vendor_infoframe *frame,
+ void *buffer, size_t size);
+int hdmi_vendor_infoframe_check(struct hdmi_vendor_infoframe *frame);
union hdmi_vendor_any_infoframe {
struct {
@@ -330,10 +342,14 @@ union hdmi_infoframe {
struct hdmi_audio_infoframe audio;
};
-ssize_t
-hdmi_infoframe_pack(union hdmi_infoframe *frame, void *buffer, size_t size);
-int hdmi_infoframe_unpack(union hdmi_infoframe *frame, void *buffer);
+ssize_t hdmi_infoframe_pack(union hdmi_infoframe *frame, void *buffer,
+ size_t size);
+ssize_t hdmi_infoframe_pack_only(const union hdmi_infoframe *frame,
+ void *buffer, size_t size);
+int hdmi_infoframe_check(union hdmi_infoframe *frame);
+int hdmi_infoframe_unpack(union hdmi_infoframe *frame,
+ const void *buffer, size_t size);
void hdmi_infoframe_log(const char *level, struct device *dev,
- union hdmi_infoframe *frame);
+ const union hdmi_infoframe *frame);
#endif /* _DRM_HDMI_H */
diff --git a/include/linux/hid-sensor-hub.h b/include/linux/hid-sensor-hub.h
index 331dc377c275..dc12f5c4b076 100644
--- a/include/linux/hid-sensor-hub.h
+++ b/include/linux/hid-sensor-hub.h
@@ -177,6 +177,7 @@ int sensor_hub_input_get_attribute_info(struct hid_sensor_hub_device *hsdev,
* @attr_usage_id: Attribute usage id as per spec
* @report_id: Report id to look for
* @flag: Synchronous or asynchronous read
+* @is_signed: If true then fields < 32 bits will be sign-extended
*
* Issues a synchronous or asynchronous read request for an input attribute.
* Returns data upto 32 bits.
@@ -190,7 +191,8 @@ enum sensor_hub_read_flags {
int sensor_hub_input_attr_get_raw_value(struct hid_sensor_hub_device *hsdev,
u32 usage_id,
u32 attr_usage_id, u32 report_id,
- enum sensor_hub_read_flags flag
+ enum sensor_hub_read_flags flag,
+ bool is_signed
);
/**
diff --git a/include/linux/hid.h b/include/linux/hid.h
index 2827b87590d8..a355d61940f2 100644
--- a/include/linux/hid.h
+++ b/include/linux/hid.h
@@ -722,8 +722,8 @@ struct hid_usage_id {
* input will not be passed to raw_event unless hid_device_io_start is
* called.
*
- * raw_event and event should return 0 on no action performed, 1 when no
- * further processing should be done and negative on error
+ * raw_event and event should return negative on error, any other value will
+ * pass the event on to .event() typically return 0 for success.
*
* input_mapping shall return a negative value to completely ignore this usage
* (e.g. doubled or invalid usage), zero to continue with parsing of this
@@ -1139,34 +1139,6 @@ static inline u32 hid_report_len(struct hid_report *report)
int hid_report_raw_event(struct hid_device *hid, int type, u8 *data, u32 size,
int interrupt);
-
-/**
- * struct hid_scroll_counter - Utility class for processing high-resolution
- * scroll events.
- * @dev: the input device for which events should be reported.
- * @microns_per_hi_res_unit: the amount moved by the user's finger for each
- * high-resolution unit reported by the mouse, in
- * microns.
- * @resolution_multiplier: the wheel's resolution in high-resolution mode as a
- * multiple of its lower resolution. For example, if
- * moving the wheel by one "notch" would result in a
- * value of 1 in low-resolution mode but 8 in
- * high-resolution, the multiplier is 8.
- * @remainder: counts the number of high-resolution units moved since the last
- * low-resolution event (REL_WHEEL or REL_HWHEEL) was sent. Should
- * only be used by class methods.
- */
-struct hid_scroll_counter {
- struct input_dev *dev;
- int microns_per_hi_res_unit;
- int resolution_multiplier;
-
- int remainder;
-};
-
-void hid_scroll_counter_handle_scroll(struct hid_scroll_counter *counter,
- int hi_res_value);
-
/* HID quirks API */
unsigned long hid_lookup_quirk(const struct hid_device *hdev);
int hid_quirks_init(char **quirks_param, __u16 bus, int count);
diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h
index 3892e9c8b2de..2e8957eac4d4 100644
--- a/include/linux/hrtimer.h
+++ b/include/linux/hrtimer.h
@@ -1,6 +1,5 @@
+// SPDX-License-Identifier: GPL-2.0
/*
- * include/linux/hrtimer.h
- *
* hrtimers - High-resolution kernel timers
*
* Copyright(C) 2005, Thomas Gleixner <tglx@linutronix.de>
@@ -9,8 +8,6 @@
* data type definitions, declarations, prototypes
*
* Started by: Thomas Gleixner and Ingo Molnar
- *
- * For licencing details see kernel-base/COPYING
*/
#ifndef _LINUX_HRTIMER_H
#define _LINUX_HRTIMER_H
diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h
index b3e24368930a..14131b6fae68 100644
--- a/include/linux/hyperv.h
+++ b/include/linux/hyperv.h
@@ -905,6 +905,13 @@ struct vmbus_channel {
bool probe_done;
+ /*
+ * We must offload the handling of the primary/sub channels
+ * from the single-threaded vmbus_connection.work_queue to
+ * two different workqueue, otherwise we can block
+ * vmbus_connection.work_queue and hang: see vmbus_process_offer().
+ */
+ struct work_struct add_channel_work;
};
static inline bool is_hvsock_channel(const struct vmbus_channel *c)
diff --git a/include/linux/i3c/ccc.h b/include/linux/i3c/ccc.h
new file mode 100644
index 000000000000..73b0982cc519
--- /dev/null
+++ b/include/linux/i3c/ccc.h
@@ -0,0 +1,385 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2018 Cadence Design Systems Inc.
+ *
+ * Author: Boris Brezillon <boris.brezillon@bootlin.com>
+ */
+
+#ifndef I3C_CCC_H
+#define I3C_CCC_H
+
+#include <linux/bitops.h>
+#include <linux/i3c/device.h>
+
+/* I3C CCC (Common Command Codes) related definitions */
+#define I3C_CCC_DIRECT BIT(7)
+
+#define I3C_CCC_ID(id, broadcast) \
+ ((id) | ((broadcast) ? 0 : I3C_CCC_DIRECT))
+
+/* Commands valid in both broadcast and unicast modes */
+#define I3C_CCC_ENEC(broadcast) I3C_CCC_ID(0x0, broadcast)
+#define I3C_CCC_DISEC(broadcast) I3C_CCC_ID(0x1, broadcast)
+#define I3C_CCC_ENTAS(as, broadcast) I3C_CCC_ID(0x2 + (as), broadcast)
+#define I3C_CCC_RSTDAA(broadcast) I3C_CCC_ID(0x6, broadcast)
+#define I3C_CCC_SETMWL(broadcast) I3C_CCC_ID(0x9, broadcast)
+#define I3C_CCC_SETMRL(broadcast) I3C_CCC_ID(0xa, broadcast)
+#define I3C_CCC_SETXTIME(broadcast) ((broadcast) ? 0x28 : 0x98)
+#define I3C_CCC_VENDOR(id, broadcast) ((id) + ((broadcast) ? 0x61 : 0xe0))
+
+/* Broadcast-only commands */
+#define I3C_CCC_ENTDAA I3C_CCC_ID(0x7, true)
+#define I3C_CCC_DEFSLVS I3C_CCC_ID(0x8, true)
+#define I3C_CCC_ENTTM I3C_CCC_ID(0xb, true)
+#define I3C_CCC_ENTHDR(x) I3C_CCC_ID(0x20 + (x), true)
+
+/* Unicast-only commands */
+#define I3C_CCC_SETDASA I3C_CCC_ID(0x7, false)
+#define I3C_CCC_SETNEWDA I3C_CCC_ID(0x8, false)
+#define I3C_CCC_GETMWL I3C_CCC_ID(0xb, false)
+#define I3C_CCC_GETMRL I3C_CCC_ID(0xc, false)
+#define I3C_CCC_GETPID I3C_CCC_ID(0xd, false)
+#define I3C_CCC_GETBCR I3C_CCC_ID(0xe, false)
+#define I3C_CCC_GETDCR I3C_CCC_ID(0xf, false)
+#define I3C_CCC_GETSTATUS I3C_CCC_ID(0x10, false)
+#define I3C_CCC_GETACCMST I3C_CCC_ID(0x11, false)
+#define I3C_CCC_SETBRGTGT I3C_CCC_ID(0x13, false)
+#define I3C_CCC_GETMXDS I3C_CCC_ID(0x14, false)
+#define I3C_CCC_GETHDRCAP I3C_CCC_ID(0x15, false)
+#define I3C_CCC_GETXTIME I3C_CCC_ID(0x19, false)
+
+#define I3C_CCC_EVENT_SIR BIT(0)
+#define I3C_CCC_EVENT_MR BIT(1)
+#define I3C_CCC_EVENT_HJ BIT(3)
+
+/**
+ * struct i3c_ccc_events - payload passed to ENEC/DISEC CCC
+ *
+ * @events: bitmask of I3C_CCC_EVENT_xxx events.
+ *
+ * Depending on the CCC command, the specific events coming from all devices
+ * (broadcast version) or a specific device (unicast version) will be
+ * enabled (ENEC) or disabled (DISEC).
+ */
+struct i3c_ccc_events {
+ u8 events;
+};
+
+/**
+ * struct i3c_ccc_mwl - payload passed to SETMWL/GETMWL CCC
+ *
+ * @len: maximum write length in bytes
+ *
+ * The maximum write length is only applicable to SDR private messages or
+ * extended Write CCCs (like SETXTIME).
+ */
+struct i3c_ccc_mwl {
+ __be16 len;
+};
+
+/**
+ * struct i3c_ccc_mrl - payload passed to SETMRL/GETMRL CCC
+ *
+ * @len: maximum read length in bytes
+ * @ibi_len: maximum IBI payload length
+ *
+ * The maximum read length is only applicable to SDR private messages or
+ * extended Read CCCs (like GETXTIME).
+ * The IBI length is only valid if the I3C slave is IBI capable
+ * (%I3C_BCR_IBI_REQ_CAP is set).
+ */
+struct i3c_ccc_mrl {
+ __be16 read_len;
+ u8 ibi_len;
+} __packed;
+
+/**
+ * struct i3c_ccc_dev_desc - I3C/I2C device descriptor used for DEFSLVS
+ *
+ * @dyn_addr: dynamic address assigned to the I3C slave or 0 if the entry is
+ * describing an I2C slave.
+ * @dcr: DCR value (not applicable to entries describing I2C devices)
+ * @lvr: LVR value (not applicable to entries describing I3C devices)
+ * @bcr: BCR value or 0 if this entry is describing an I2C slave
+ * @static_addr: static address or 0 if the device does not have a static
+ * address
+ *
+ * The DEFSLVS command should be passed an array of i3c_ccc_dev_desc
+ * descriptors (one entry per I3C/I2C dev controlled by the master).
+ */
+struct i3c_ccc_dev_desc {
+ u8 dyn_addr;
+ union {
+ u8 dcr;
+ u8 lvr;
+ };
+ u8 bcr;
+ u8 static_addr;
+};
+
+/**
+ * struct i3c_ccc_defslvs - payload passed to DEFSLVS CCC
+ *
+ * @count: number of dev descriptors
+ * @master: descriptor describing the current master
+ * @slaves: array of descriptors describing slaves controlled by the
+ * current master
+ *
+ * Information passed to the broadcast DEFSLVS to propagate device
+ * information to all masters currently acting as slaves on the bus.
+ * This is only meaningful if you have more than one master.
+ */
+struct i3c_ccc_defslvs {
+ u8 count;
+ struct i3c_ccc_dev_desc master;
+ struct i3c_ccc_dev_desc slaves[0];
+} __packed;
+
+/**
+ * enum i3c_ccc_test_mode - enum listing all available test modes
+ *
+ * @I3C_CCC_EXIT_TEST_MODE: exit test mode
+ * @I3C_CCC_VENDOR_TEST_MODE: enter vendor test mode
+ */
+enum i3c_ccc_test_mode {
+ I3C_CCC_EXIT_TEST_MODE,
+ I3C_CCC_VENDOR_TEST_MODE,
+};
+
+/**
+ * struct i3c_ccc_enttm - payload passed to ENTTM CCC
+ *
+ * @mode: one of the &enum i3c_ccc_test_mode modes
+ *
+ * Information passed to the ENTTM CCC to instruct an I3C device to enter a
+ * specific test mode.
+ */
+struct i3c_ccc_enttm {
+ u8 mode;
+};
+
+/**
+ * struct i3c_ccc_setda - payload passed to SETNEWDA and SETDASA CCCs
+ *
+ * @addr: dynamic address to assign to an I3C device
+ *
+ * Information passed to the SETNEWDA and SETDASA CCCs to assign/change the
+ * dynamic address of an I3C device.
+ */
+struct i3c_ccc_setda {
+ u8 addr;
+};
+
+/**
+ * struct i3c_ccc_getpid - payload passed to GETPID CCC
+ *
+ * @pid: 48 bits PID in big endian
+ */
+struct i3c_ccc_getpid {
+ u8 pid[6];
+};
+
+/**
+ * struct i3c_ccc_getbcr - payload passed to GETBCR CCC
+ *
+ * @bcr: BCR (Bus Characteristic Register) value
+ */
+struct i3c_ccc_getbcr {
+ u8 bcr;
+};
+
+/**
+ * struct i3c_ccc_getdcr - payload passed to GETDCR CCC
+ *
+ * @dcr: DCR (Device Characteristic Register) value
+ */
+struct i3c_ccc_getdcr {
+ u8 dcr;
+};
+
+#define I3C_CCC_STATUS_PENDING_INT(status) ((status) & GENMASK(3, 0))
+#define I3C_CCC_STATUS_PROTOCOL_ERROR BIT(5)
+#define I3C_CCC_STATUS_ACTIVITY_MODE(status) \
+ (((status) & GENMASK(7, 6)) >> 6)
+
+/**
+ * struct i3c_ccc_getstatus - payload passed to GETSTATUS CCC
+ *
+ * @status: status of the I3C slave (see I3C_CCC_STATUS_xxx macros for more
+ * information).
+ */
+struct i3c_ccc_getstatus {
+ __be16 status;
+};
+
+/**
+ * struct i3c_ccc_getaccmst - payload passed to GETACCMST CCC
+ *
+ * @newmaster: address of the master taking bus ownership
+ */
+struct i3c_ccc_getaccmst {
+ u8 newmaster;
+};
+
+/**
+ * struct i3c_ccc_bridged_slave_desc - bridged slave descriptor
+ *
+ * @addr: dynamic address of the bridged device
+ * @id: ID of the slave device behind the bridge
+ */
+struct i3c_ccc_bridged_slave_desc {
+ u8 addr;
+ __be16 id;
+} __packed;
+
+/**
+ * struct i3c_ccc_setbrgtgt - payload passed to SETBRGTGT CCC
+ *
+ * @count: number of bridged slaves
+ * @bslaves: bridged slave descriptors
+ */
+struct i3c_ccc_setbrgtgt {
+ u8 count;
+ struct i3c_ccc_bridged_slave_desc bslaves[0];
+} __packed;
+
+/**
+ * enum i3c_sdr_max_data_rate - max data rate values for private SDR transfers
+ */
+enum i3c_sdr_max_data_rate {
+ I3C_SDR0_FSCL_MAX,
+ I3C_SDR1_FSCL_8MHZ,
+ I3C_SDR2_FSCL_6MHZ,
+ I3C_SDR3_FSCL_4MHZ,
+ I3C_SDR4_FSCL_2MHZ,
+};
+
+/**
+ * enum i3c_tsco - clock to data turn-around
+ */
+enum i3c_tsco {
+ I3C_TSCO_8NS,
+ I3C_TSCO_9NS,
+ I3C_TSCO_10NS,
+ I3C_TSCO_11NS,
+ I3C_TSCO_12NS,
+};
+
+#define I3C_CCC_MAX_SDR_FSCL_MASK GENMASK(2, 0)
+#define I3C_CCC_MAX_SDR_FSCL(x) ((x) & I3C_CCC_MAX_SDR_FSCL_MASK)
+
+/**
+ * struct i3c_ccc_getmxds - payload passed to GETMXDS CCC
+ *
+ * @maxwr: write limitations
+ * @maxrd: read limitations
+ * @maxrdturn: maximum read turn-around expressed micro-seconds and
+ * little-endian formatted
+ */
+struct i3c_ccc_getmxds {
+ u8 maxwr;
+ u8 maxrd;
+ u8 maxrdturn[3];
+} __packed;
+
+#define I3C_CCC_HDR_MODE(mode) BIT(mode)
+
+/**
+ * struct i3c_ccc_gethdrcap - payload passed to GETHDRCAP CCC
+ *
+ * @modes: bitmap of supported HDR modes
+ */
+struct i3c_ccc_gethdrcap {
+ u8 modes;
+} __packed;
+
+/**
+ * enum i3c_ccc_setxtime_subcmd - SETXTIME sub-commands
+ */
+enum i3c_ccc_setxtime_subcmd {
+ I3C_CCC_SETXTIME_ST = 0x7f,
+ I3C_CCC_SETXTIME_DT = 0xbf,
+ I3C_CCC_SETXTIME_ENTER_ASYNC_MODE0 = 0xdf,
+ I3C_CCC_SETXTIME_ENTER_ASYNC_MODE1 = 0xef,
+ I3C_CCC_SETXTIME_ENTER_ASYNC_MODE2 = 0xf7,
+ I3C_CCC_SETXTIME_ENTER_ASYNC_MODE3 = 0xfb,
+ I3C_CCC_SETXTIME_ASYNC_TRIGGER = 0xfd,
+ I3C_CCC_SETXTIME_TPH = 0x3f,
+ I3C_CCC_SETXTIME_TU = 0x9f,
+ I3C_CCC_SETXTIME_ODR = 0x8f,
+};
+
+/**
+ * struct i3c_ccc_setxtime - payload passed to SETXTIME CCC
+ *
+ * @subcmd: one of the sub-commands ddefined in &enum i3c_ccc_setxtime_subcmd
+ * @data: sub-command payload. Amount of data is determined by
+ * &i3c_ccc_setxtime->subcmd
+ */
+struct i3c_ccc_setxtime {
+ u8 subcmd;
+ u8 data[0];
+} __packed;
+
+#define I3C_CCC_GETXTIME_SYNC_MODE BIT(0)
+#define I3C_CCC_GETXTIME_ASYNC_MODE(x) BIT((x) + 1)
+#define I3C_CCC_GETXTIME_OVERFLOW BIT(7)
+
+/**
+ * struct i3c_ccc_getxtime - payload retrieved from GETXTIME CCC
+ *
+ * @supported_modes: bitmap describing supported XTIME modes
+ * @state: current status (enabled mode and overflow status)
+ * @frequency: slave's internal oscillator frequency in 500KHz steps
+ * @inaccuracy: slave's internal oscillator inaccuracy in 0.1% steps
+ */
+struct i3c_ccc_getxtime {
+ u8 supported_modes;
+ u8 state;
+ u8 frequency;
+ u8 inaccuracy;
+} __packed;
+
+/**
+ * struct i3c_ccc_cmd_payload - CCC payload
+ *
+ * @len: payload length
+ * @data: payload data. This buffer must be DMA-able
+ */
+struct i3c_ccc_cmd_payload {
+ u16 len;
+ void *data;
+};
+
+/**
+ * struct i3c_ccc_cmd_dest - CCC command destination
+ *
+ * @addr: can be an I3C device address or the broadcast address if this is a
+ * broadcast CCC
+ * @payload: payload to be sent to this device or broadcasted
+ */
+struct i3c_ccc_cmd_dest {
+ u8 addr;
+ struct i3c_ccc_cmd_payload payload;
+};
+
+/**
+ * struct i3c_ccc_cmd - CCC command
+ *
+ * @rnw: true if the CCC should retrieve data from the device. Only valid for
+ * unicast commands
+ * @id: CCC command id
+ * @ndests: number of destinations. Should always be one for broadcast commands
+ * @dests: array of destinations and associated payload for this CCC. Most of
+ * the time, only one destination is provided
+ * @err: I3C error code
+ */
+struct i3c_ccc_cmd {
+ u8 rnw;
+ u8 id;
+ unsigned int ndests;
+ struct i3c_ccc_cmd_dest *dests;
+ enum i3c_error_code err;
+};
+
+#endif /* I3C_CCC_H */
diff --git a/include/linux/i3c/device.h b/include/linux/i3c/device.h
new file mode 100644
index 000000000000..5ecb055fd375
--- /dev/null
+++ b/include/linux/i3c/device.h
@@ -0,0 +1,331 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2018 Cadence Design Systems Inc.
+ *
+ * Author: Boris Brezillon <boris.brezillon@bootlin.com>
+ */
+
+#ifndef I3C_DEV_H
+#define I3C_DEV_H
+
+#include <linux/bitops.h>
+#include <linux/device.h>
+#include <linux/i2c.h>
+#include <linux/kconfig.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+
+/**
+ * enum i3c_error_code - I3C error codes
+ *
+ * These are the standard error codes as defined by the I3C specification.
+ * When -EIO is returned by the i3c_device_do_priv_xfers() or
+ * i3c_device_send_hdr_cmds() one can check the error code in
+ * &struct_i3c_priv_xfer.err or &struct i3c_hdr_cmd.err to get a better idea of
+ * what went wrong.
+ *
+ * @I3C_ERROR_UNKNOWN: unknown error, usually means the error is not I3C
+ * related
+ * @I3C_ERROR_M0: M0 error
+ * @I3C_ERROR_M1: M1 error
+ * @I3C_ERROR_M2: M2 error
+ */
+enum i3c_error_code {
+ I3C_ERROR_UNKNOWN = 0,
+ I3C_ERROR_M0 = 1,
+ I3C_ERROR_M1,
+ I3C_ERROR_M2,
+};
+
+/**
+ * enum i3c_hdr_mode - HDR mode ids
+ * @I3C_HDR_DDR: DDR mode
+ * @I3C_HDR_TSP: TSP mode
+ * @I3C_HDR_TSL: TSL mode
+ */
+enum i3c_hdr_mode {
+ I3C_HDR_DDR,
+ I3C_HDR_TSP,
+ I3C_HDR_TSL,
+};
+
+/**
+ * struct i3c_priv_xfer - I3C SDR private transfer
+ * @rnw: encodes the transfer direction. true for a read, false for a write
+ * @len: transfer length in bytes of the transfer
+ * @data: input/output buffer
+ * @data.in: input buffer. Must point to a DMA-able buffer
+ * @data.out: output buffer. Must point to a DMA-able buffer
+ * @err: I3C error code
+ */
+struct i3c_priv_xfer {
+ u8 rnw;
+ u16 len;
+ union {
+ void *in;
+ const void *out;
+ } data;
+ enum i3c_error_code err;
+};
+
+/**
+ * enum i3c_dcr - I3C DCR values
+ * @I3C_DCR_GENERIC_DEVICE: generic I3C device
+ */
+enum i3c_dcr {
+ I3C_DCR_GENERIC_DEVICE = 0,
+};
+
+#define I3C_PID_MANUF_ID(pid) (((pid) & GENMASK_ULL(47, 33)) >> 33)
+#define I3C_PID_RND_LOWER_32BITS(pid) (!!((pid) & BIT_ULL(32)))
+#define I3C_PID_RND_VAL(pid) ((pid) & GENMASK_ULL(31, 0))
+#define I3C_PID_PART_ID(pid) (((pid) & GENMASK_ULL(31, 16)) >> 16)
+#define I3C_PID_INSTANCE_ID(pid) (((pid) & GENMASK_ULL(15, 12)) >> 12)
+#define I3C_PID_EXTRA_INFO(pid) ((pid) & GENMASK_ULL(11, 0))
+
+#define I3C_BCR_DEVICE_ROLE(bcr) ((bcr) & GENMASK(7, 6))
+#define I3C_BCR_I3C_SLAVE (0 << 6)
+#define I3C_BCR_I3C_MASTER (1 << 6)
+#define I3C_BCR_HDR_CAP BIT(5)
+#define I3C_BCR_BRIDGE BIT(4)
+#define I3C_BCR_OFFLINE_CAP BIT(3)
+#define I3C_BCR_IBI_PAYLOAD BIT(2)
+#define I3C_BCR_IBI_REQ_CAP BIT(1)
+#define I3C_BCR_MAX_DATA_SPEED_LIM BIT(0)
+
+/**
+ * struct i3c_device_info - I3C device information
+ * @pid: Provisional ID
+ * @bcr: Bus Characteristic Register
+ * @dcr: Device Characteristic Register
+ * @static_addr: static/I2C address
+ * @dyn_addr: dynamic address
+ * @hdr_cap: supported HDR modes
+ * @max_read_ds: max read speed information
+ * @max_write_ds: max write speed information
+ * @max_ibi_len: max IBI payload length
+ * @max_read_turnaround: max read turn-around time in micro-seconds
+ * @max_read_len: max private SDR read length in bytes
+ * @max_write_len: max private SDR write length in bytes
+ *
+ * These are all basic information that should be advertised by an I3C device.
+ * Some of them are optional depending on the device type and device
+ * capabilities.
+ * For each I3C slave attached to a master with
+ * i3c_master_add_i3c_dev_locked(), the core will send the relevant CCC command
+ * to retrieve these data.
+ */
+struct i3c_device_info {
+ u64 pid;
+ u8 bcr;
+ u8 dcr;
+ u8 static_addr;
+ u8 dyn_addr;
+ u8 hdr_cap;
+ u8 max_read_ds;
+ u8 max_write_ds;
+ u8 max_ibi_len;
+ u32 max_read_turnaround;
+ u16 max_read_len;
+ u16 max_write_len;
+};
+
+/*
+ * I3C device internals are kept hidden from I3C device users. It's just
+ * simpler to refactor things when everything goes through getter/setters, and
+ * I3C device drivers should not have to worry about internal representation
+ * anyway.
+ */
+struct i3c_device;
+
+/* These macros should be used to i3c_device_id entries. */
+#define I3C_MATCH_MANUF_AND_PART (I3C_MATCH_MANUF | I3C_MATCH_PART)
+
+#define I3C_DEVICE(_manufid, _partid, _drvdata) \
+ { \
+ .match_flags = I3C_MATCH_MANUF_AND_PART, \
+ .manuf_id = _manufid, \
+ .part_id = _partid, \
+ .data = _drvdata, \
+ }
+
+#define I3C_DEVICE_EXTRA_INFO(_manufid, _partid, _info, _drvdata) \
+ { \
+ .match_flags = I3C_MATCH_MANUF_AND_PART | \
+ I3C_MATCH_EXTRA_INFO, \
+ .manuf_id = _manufid, \
+ .part_id = _partid, \
+ .extra_info = _info, \
+ .data = _drvdata, \
+ }
+
+#define I3C_CLASS(_dcr, _drvdata) \
+ { \
+ .match_flags = I3C_MATCH_DCR, \
+ .dcr = _dcr, \
+ }
+
+/**
+ * struct i3c_driver - I3C device driver
+ * @driver: inherit from device_driver
+ * @probe: I3C device probe method
+ * @remove: I3C device remove method
+ * @id_table: I3C device match table. Will be used by the framework to decide
+ * which device to bind to this driver
+ */
+struct i3c_driver {
+ struct device_driver driver;
+ int (*probe)(struct i3c_device *dev);
+ int (*remove)(struct i3c_device *dev);
+ const struct i3c_device_id *id_table;
+};
+
+static inline struct i3c_driver *drv_to_i3cdrv(struct device_driver *drv)
+{
+ return container_of(drv, struct i3c_driver, driver);
+}
+
+struct device *i3cdev_to_dev(struct i3c_device *i3cdev);
+struct i3c_device *dev_to_i3cdev(struct device *dev);
+
+static inline void i3cdev_set_drvdata(struct i3c_device *i3cdev,
+ void *data)
+{
+ struct device *dev = i3cdev_to_dev(i3cdev);
+
+ dev_set_drvdata(dev, data);
+}
+
+static inline void *i3cdev_get_drvdata(struct i3c_device *i3cdev)
+{
+ struct device *dev = i3cdev_to_dev(i3cdev);
+
+ return dev_get_drvdata(dev);
+}
+
+int i3c_driver_register_with_owner(struct i3c_driver *drv,
+ struct module *owner);
+void i3c_driver_unregister(struct i3c_driver *drv);
+
+#define i3c_driver_register(__drv) \
+ i3c_driver_register_with_owner(__drv, THIS_MODULE)
+
+/**
+ * module_i3c_driver() - Register a module providing an I3C driver
+ * @__drv: the I3C driver to register
+ *
+ * Provide generic init/exit functions that simply register/unregister an I3C
+ * driver.
+ * Should be used by any driver that does not require extra init/cleanup steps.
+ */
+#define module_i3c_driver(__drv) \
+ module_driver(__drv, i3c_driver_register, i3c_driver_unregister)
+
+/**
+ * i3c_i2c_driver_register() - Register an i2c and an i3c driver
+ * @i3cdrv: the I3C driver to register
+ * @i2cdrv: the I2C driver to register
+ *
+ * This function registers both @i2cdev and @i3cdev, and fails if one of these
+ * registrations fails. This is mainly useful for devices that support both I2C
+ * and I3C modes.
+ * Note that when CONFIG_I3C is not enabled, this function only registers the
+ * I2C driver.
+ *
+ * Return: 0 if both registrations succeeds, a negative error code otherwise.
+ */
+static inline int i3c_i2c_driver_register(struct i3c_driver *i3cdrv,
+ struct i2c_driver *i2cdrv)
+{
+ int ret;
+
+ ret = i2c_add_driver(i2cdrv);
+ if (ret || !IS_ENABLED(CONFIG_I3C))
+ return ret;
+
+ ret = i3c_driver_register(i3cdrv);
+ if (ret)
+ i2c_del_driver(i2cdrv);
+
+ return ret;
+}
+
+/**
+ * i3c_i2c_driver_unregister() - Unregister an i2c and an i3c driver
+ * @i3cdrv: the I3C driver to register
+ * @i2cdrv: the I2C driver to register
+ *
+ * This function unregisters both @i3cdrv and @i2cdrv.
+ * Note that when CONFIG_I3C is not enabled, this function only unregisters the
+ * @i2cdrv.
+ */
+static inline void i3c_i2c_driver_unregister(struct i3c_driver *i3cdrv,
+ struct i2c_driver *i2cdrv)
+{
+ if (IS_ENABLED(CONFIG_I3C))
+ i3c_driver_unregister(i3cdrv);
+
+ i2c_del_driver(i2cdrv);
+}
+
+/**
+ * module_i3c_i2c_driver() - Register a module providing an I3C and an I2C
+ * driver
+ * @__i3cdrv: the I3C driver to register
+ * @__i2cdrv: the I3C driver to register
+ *
+ * Provide generic init/exit functions that simply register/unregister an I3C
+ * and an I2C driver.
+ * This macro can be used even if CONFIG_I3C is disabled, in this case, only
+ * the I2C driver will be registered.
+ * Should be used by any driver that does not require extra init/cleanup steps.
+ */
+#define module_i3c_i2c_driver(__i3cdrv, __i2cdrv) \
+ module_driver(__i3cdrv, \
+ i3c_i2c_driver_register, \
+ i3c_i2c_driver_unregister)
+
+int i3c_device_do_priv_xfers(struct i3c_device *dev,
+ struct i3c_priv_xfer *xfers,
+ int nxfers);
+
+void i3c_device_get_info(struct i3c_device *dev, struct i3c_device_info *info);
+
+struct i3c_ibi_payload {
+ unsigned int len;
+ const void *data;
+};
+
+/**
+ * struct i3c_ibi_setup - IBI setup object
+ * @max_payload_len: maximum length of the payload associated to an IBI. If one
+ * IBI appears to have a payload that is bigger than this
+ * number, the IBI will be rejected.
+ * @num_slots: number of pre-allocated IBI slots. This should be chosen so that
+ * the system never runs out of IBI slots, otherwise you'll lose
+ * IBIs.
+ * @handler: IBI handler, every time an IBI is received. This handler is called
+ * in a workqueue context. It is allowed to sleep and send new
+ * messages on the bus, though it's recommended to keep the
+ * processing done there as fast as possible to avoid delaying
+ * processing of other queued on the same workqueue.
+ *
+ * Temporary structure used to pass information to i3c_device_request_ibi().
+ * This object can be allocated on the stack since i3c_device_request_ibi()
+ * copies every bit of information and do not use it after
+ * i3c_device_request_ibi() has returned.
+ */
+struct i3c_ibi_setup {
+ unsigned int max_payload_len;
+ unsigned int num_slots;
+ void (*handler)(struct i3c_device *dev,
+ const struct i3c_ibi_payload *payload);
+};
+
+int i3c_device_request_ibi(struct i3c_device *dev,
+ const struct i3c_ibi_setup *setup);
+void i3c_device_free_ibi(struct i3c_device *dev);
+int i3c_device_enable_ibi(struct i3c_device *dev);
+int i3c_device_disable_ibi(struct i3c_device *dev);
+
+#endif /* I3C_DEV_H */
diff --git a/include/linux/i3c/master.h b/include/linux/i3c/master.h
new file mode 100644
index 000000000000..f13fd8b1dd79
--- /dev/null
+++ b/include/linux/i3c/master.h
@@ -0,0 +1,648 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2018 Cadence Design Systems Inc.
+ *
+ * Author: Boris Brezillon <boris.brezillon@bootlin.com>
+ */
+
+#ifndef I3C_MASTER_H
+#define I3C_MASTER_H
+
+#include <asm/bitsperlong.h>
+
+#include <linux/bitops.h>
+#include <linux/i2c.h>
+#include <linux/i3c/ccc.h>
+#include <linux/i3c/device.h>
+#include <linux/rwsem.h>
+#include <linux/spinlock.h>
+#include <linux/workqueue.h>
+
+#define I3C_HOT_JOIN_ADDR 0x2
+#define I3C_BROADCAST_ADDR 0x7e
+#define I3C_MAX_ADDR GENMASK(6, 0)
+
+struct i3c_master_controller;
+struct i3c_bus;
+struct i2c_device;
+struct i3c_device;
+
+/**
+ * struct i3c_i2c_dev_desc - Common part of the I3C/I2C device descriptor
+ * @node: node element used to insert the slot into the I2C or I3C device
+ * list
+ * @master: I3C master that instantiated this device. Will be used to do
+ * I2C/I3C transfers
+ * @master_priv: master private data assigned to the device. Can be used to
+ * add master specific information
+ *
+ * This structure is describing common I3C/I2C dev information.
+ */
+struct i3c_i2c_dev_desc {
+ struct list_head node;
+ struct i3c_master_controller *master;
+ void *master_priv;
+};
+
+#define I3C_LVR_I2C_INDEX_MASK GENMASK(7, 5)
+#define I3C_LVR_I2C_INDEX(x) ((x) << 5)
+#define I3C_LVR_I2C_FM_MODE BIT(4)
+
+#define I2C_MAX_ADDR GENMASK(9, 0)
+
+/**
+ * struct i2c_dev_boardinfo - I2C device board information
+ * @node: used to insert the boardinfo object in the I2C boardinfo list
+ * @base: regular I2C board information
+ * @lvr: LVR (Legacy Virtual Register) needed by the I3C core to know about
+ * the I2C device limitations
+ *
+ * This structure is used to attach board-level information to an I2C device.
+ * Each I2C device connected on the I3C bus should have one.
+ */
+struct i2c_dev_boardinfo {
+ struct list_head node;
+ struct i2c_board_info base;
+ u8 lvr;
+};
+
+/**
+ * struct i2c_dev_desc - I2C device descriptor
+ * @common: common part of the I2C device descriptor
+ * @boardinfo: pointer to the boardinfo attached to this I2C device
+ * @dev: I2C device object registered to the I2C framework
+ *
+ * Each I2C device connected on the bus will have an i2c_dev_desc.
+ * This object is created by the core and later attached to the controller
+ * using &struct_i3c_master_controller->ops->attach_i2c_dev().
+ *
+ * &struct_i2c_dev_desc is the internal representation of an I2C device
+ * connected on an I3C bus. This object is also passed to all
+ * &struct_i3c_master_controller_ops hooks.
+ */
+struct i2c_dev_desc {
+ struct i3c_i2c_dev_desc common;
+ const struct i2c_dev_boardinfo *boardinfo;
+ struct i2c_client *dev;
+};
+
+/**
+ * struct i3c_ibi_slot - I3C IBI (In-Band Interrupt) slot
+ * @work: work associated to this slot. The IBI handler will be called from
+ * there
+ * @dev: the I3C device that has generated this IBI
+ * @len: length of the payload associated to this IBI
+ * @data: payload buffer
+ *
+ * An IBI slot is an object pre-allocated by the controller and used when an
+ * IBI comes in.
+ * Every time an IBI comes in, the I3C master driver should find a free IBI
+ * slot in its IBI slot pool, retrieve the IBI payload and queue the IBI using
+ * i3c_master_queue_ibi().
+ *
+ * How IBI slots are allocated is left to the I3C master driver, though, for
+ * simple kmalloc-based allocation, the generic IBI slot pool can be used.
+ */
+struct i3c_ibi_slot {
+ struct work_struct work;
+ struct i3c_dev_desc *dev;
+ unsigned int len;
+ void *data;
+};
+
+/**
+ * struct i3c_device_ibi_info - IBI information attached to a specific device
+ * @all_ibis_handled: used to be informed when no more IBIs are waiting to be
+ * processed. Used by i3c_device_disable_ibi() to wait for
+ * all IBIs to be dequeued
+ * @pending_ibis: count the number of pending IBIs. Each pending IBI has its
+ * work element queued to the controller workqueue
+ * @max_payload_len: maximum payload length for an IBI coming from this device.
+ * this value is specified when calling
+ * i3c_device_request_ibi() and should not change at run
+ * time. All messages IBIs exceeding this limit should be
+ * rejected by the master
+ * @num_slots: number of IBI slots reserved for this device
+ * @enabled: reflect the IBI status
+ * @handler: IBI handler specified at i3c_device_request_ibi() call time. This
+ * handler will be called from the controller workqueue, and as such
+ * is allowed to sleep (though it is recommended to process the IBI
+ * as fast as possible to not stall processing of other IBIs queued
+ * on the same workqueue).
+ * New I3C messages can be sent from the IBI handler
+ *
+ * The &struct_i3c_device_ibi_info object is allocated when
+ * i3c_device_request_ibi() is called and attached to a specific device. This
+ * object is here to manage IBIs coming from a specific I3C device.
+ *
+ * Note that this structure is the generic view of the IBI management
+ * infrastructure. I3C master drivers may have their own internal
+ * representation which they can associate to the device using
+ * controller-private data.
+ */
+struct i3c_device_ibi_info {
+ struct completion all_ibis_handled;
+ atomic_t pending_ibis;
+ unsigned int max_payload_len;
+ unsigned int num_slots;
+ unsigned int enabled;
+ void (*handler)(struct i3c_device *dev,
+ const struct i3c_ibi_payload *payload);
+};
+
+/**
+ * struct i3c_dev_boardinfo - I3C device board information
+ * @node: used to insert the boardinfo object in the I3C boardinfo list
+ * @init_dyn_addr: initial dynamic address requested by the FW. We provide no
+ * guarantee that the device will end up using this address,
+ * but try our best to assign this specific address to the
+ * device
+ * @static_addr: static address the I3C device listen on before it's been
+ * assigned a dynamic address by the master. Will be used during
+ * bus initialization to assign it a specific dynamic address
+ * before starting DAA (Dynamic Address Assignment)
+ * @pid: I3C Provisional ID exposed by the device. This is a unique identifier
+ * that may be used to attach boardinfo to i3c_dev_desc when the device
+ * does not have a static address
+ * @of_node: optional DT node in case the device has been described in the DT
+ *
+ * This structure is used to attach board-level information to an I3C device.
+ * Not all I3C devices connected on the bus will have a boardinfo. It's only
+ * needed if you want to attach extra resources to a device or assign it a
+ * specific dynamic address.
+ */
+struct i3c_dev_boardinfo {
+ struct list_head node;
+ u8 init_dyn_addr;
+ u8 static_addr;
+ u64 pid;
+ struct device_node *of_node;
+};
+
+/**
+ * struct i3c_dev_desc - I3C device descriptor
+ * @common: common part of the I3C device descriptor
+ * @info: I3C device information. Will be automatically filled when you create
+ * your device with i3c_master_add_i3c_dev_locked()
+ * @ibi_lock: lock used to protect the &struct_i3c_device->ibi
+ * @ibi: IBI info attached to a device. Should be NULL until
+ * i3c_device_request_ibi() is called
+ * @dev: pointer to the I3C device object exposed to I3C device drivers. This
+ * should never be accessed from I3C master controller drivers. Only core
+ * code should manipulate it in when updating the dev <-> desc link or
+ * when propagating IBI events to the driver
+ * @boardinfo: pointer to the boardinfo attached to this I3C device
+ *
+ * Internal representation of an I3C device. This object is only used by the
+ * core and passed to I3C master controller drivers when they're requested to
+ * do some operations on the device.
+ * The core maintains the link between the internal I3C dev descriptor and the
+ * object exposed to the I3C device drivers (&struct_i3c_device).
+ */
+struct i3c_dev_desc {
+ struct i3c_i2c_dev_desc common;
+ struct i3c_device_info info;
+ struct mutex ibi_lock;
+ struct i3c_device_ibi_info *ibi;
+ struct i3c_device *dev;
+ const struct i3c_dev_boardinfo *boardinfo;
+};
+
+/**
+ * struct i3c_device - I3C device object
+ * @dev: device object to register the I3C dev to the device model
+ * @desc: pointer to an i3c device descriptor object. This link is updated
+ * every time the I3C device is rediscovered with a different dynamic
+ * address assigned
+ * @bus: I3C bus this device is attached to
+ *
+ * I3C device object exposed to I3C device drivers. The takes care of linking
+ * this object to the relevant &struct_i3c_dev_desc one.
+ * All I3C devs on the I3C bus are represented, including I3C masters. For each
+ * of them, we have an instance of &struct i3c_device.
+ */
+struct i3c_device {
+ struct device dev;
+ struct i3c_dev_desc *desc;
+ struct i3c_bus *bus;
+};
+
+/*
+ * The I3C specification says the maximum number of devices connected on the
+ * bus is 11, but this number depends on external parameters like trace length,
+ * capacitive load per Device, and the types of Devices present on the Bus.
+ * I3C master can also have limitations, so this number is just here as a
+ * reference and should be adjusted on a per-controller/per-board basis.
+ */
+#define I3C_BUS_MAX_DEVS 11
+
+#define I3C_BUS_MAX_I3C_SCL_RATE 12900000
+#define I3C_BUS_TYP_I3C_SCL_RATE 12500000
+#define I3C_BUS_I2C_FM_PLUS_SCL_RATE 1000000
+#define I3C_BUS_I2C_FM_SCL_RATE 400000
+#define I3C_BUS_TLOW_OD_MIN_NS 200
+
+/**
+ * enum i3c_bus_mode - I3C bus mode
+ * @I3C_BUS_MODE_PURE: only I3C devices are connected to the bus. No limitation
+ * expected
+ * @I3C_BUS_MODE_MIXED_FAST: I2C devices with 50ns spike filter are present on
+ * the bus. The only impact in this mode is that the
+ * high SCL pulse has to stay below 50ns to trick I2C
+ * devices when transmitting I3C frames
+ * @I3C_BUS_MODE_MIXED_SLOW: I2C devices without 50ns spike filter are present
+ * on the bus
+ */
+enum i3c_bus_mode {
+ I3C_BUS_MODE_PURE,
+ I3C_BUS_MODE_MIXED_FAST,
+ I3C_BUS_MODE_MIXED_SLOW,
+};
+
+/**
+ * enum i3c_addr_slot_status - I3C address slot status
+ * @I3C_ADDR_SLOT_FREE: address is free
+ * @I3C_ADDR_SLOT_RSVD: address is reserved
+ * @I3C_ADDR_SLOT_I2C_DEV: address is assigned to an I2C device
+ * @I3C_ADDR_SLOT_I3C_DEV: address is assigned to an I3C device
+ * @I3C_ADDR_SLOT_STATUS_MASK: address slot mask
+ *
+ * On an I3C bus, addresses are assigned dynamically, and we need to know which
+ * addresses are free to use and which ones are already assigned.
+ *
+ * Addresses marked as reserved are those reserved by the I3C protocol
+ * (broadcast address, ...).
+ */
+enum i3c_addr_slot_status {
+ I3C_ADDR_SLOT_FREE,
+ I3C_ADDR_SLOT_RSVD,
+ I3C_ADDR_SLOT_I2C_DEV,
+ I3C_ADDR_SLOT_I3C_DEV,
+ I3C_ADDR_SLOT_STATUS_MASK = 3,
+};
+
+/**
+ * struct i3c_bus - I3C bus object
+ * @cur_master: I3C master currently driving the bus. Since I3C is multi-master
+ * this can change over the time. Will be used to let a master
+ * know whether it needs to request bus ownership before sending
+ * a frame or not
+ * @id: bus ID. Assigned by the framework when register the bus
+ * @addrslots: a bitmap with 2-bits per-slot to encode the address status and
+ * ease the DAA (Dynamic Address Assignment) procedure (see
+ * &enum i3c_addr_slot_status)
+ * @mode: bus mode (see &enum i3c_bus_mode)
+ * @scl_rate.i3c: maximum rate for the clock signal when doing I3C SDR/priv
+ * transfers
+ * @scl_rate.i2c: maximum rate for the clock signal when doing I2C transfers
+ * @scl_rate: SCL signal rate for I3C and I2C mode
+ * @devs.i3c: contains a list of I3C device descriptors representing I3C
+ * devices connected on the bus and successfully attached to the
+ * I3C master
+ * @devs.i2c: contains a list of I2C device descriptors representing I2C
+ * devices connected on the bus and successfully attached to the
+ * I3C master
+ * @devs: 2 lists containing all I3C/I2C devices connected to the bus
+ * @lock: read/write lock on the bus. This is needed to protect against
+ * operations that have an impact on the whole bus and the devices
+ * connected to it. For example, when asking slaves to drop their
+ * dynamic address (RSTDAA CCC), we need to make sure no one is trying
+ * to send I3C frames to these devices.
+ * Note that this lock does not protect against concurrency between
+ * devices: several drivers can send different I3C/I2C frames through
+ * the same master in parallel. This is the responsibility of the
+ * master to guarantee that frames are actually sent sequentially and
+ * not interlaced
+ *
+ * The I3C bus is represented with its own object and not implicitly described
+ * by the I3C master to cope with the multi-master functionality, where one bus
+ * can be shared amongst several masters, each of them requesting bus ownership
+ * when they need to.
+ */
+struct i3c_bus {
+ struct i3c_dev_desc *cur_master;
+ int id;
+ unsigned long addrslots[((I2C_MAX_ADDR + 1) * 2) / BITS_PER_LONG];
+ enum i3c_bus_mode mode;
+ struct {
+ unsigned long i3c;
+ unsigned long i2c;
+ } scl_rate;
+ struct {
+ struct list_head i3c;
+ struct list_head i2c;
+ } devs;
+ struct rw_semaphore lock;
+};
+
+/**
+ * struct i3c_master_controller_ops - I3C master methods
+ * @bus_init: hook responsible for the I3C bus initialization. You should at
+ * least call master_set_info() from there and set the bus mode.
+ * You can also put controller specific initialization in there.
+ * This method is mandatory.
+ * @bus_cleanup: cleanup everything done in
+ * &i3c_master_controller_ops->bus_init().
+ * This method is optional.
+ * @attach_i3c_dev: called every time an I3C device is attached to the bus. It
+ * can be after a DAA or when a device is statically declared
+ * by the FW, in which case it will only have a static address
+ * and the dynamic address will be 0.
+ * When this function is called, device information have not
+ * been retrieved yet.
+ * This is a good place to attach master controller specific
+ * data to I3C devices.
+ * This method is optional.
+ * @reattach_i3c_dev: called every time an I3C device has its addressed
+ * changed. It can be because the device has been powered
+ * down and has lost its address, or it can happen when a
+ * device had a static address and has been assigned a
+ * dynamic address with SETDASA.
+ * This method is optional.
+ * @detach_i3c_dev: called when an I3C device is detached from the bus. Usually
+ * happens when the master device is unregistered.
+ * This method is optional.
+ * @do_daa: do a DAA (Dynamic Address Assignment) procedure. This is procedure
+ * should send an ENTDAA CCC command and then add all devices
+ * discovered sure the DAA using i3c_master_add_i3c_dev_locked().
+ * Add devices added with i3c_master_add_i3c_dev_locked() will then be
+ * attached or re-attached to the controller.
+ * This method is mandatory.
+ * @supports_ccc_cmd: should return true if the CCC command is supported, false
+ * otherwise.
+ * This method is optional, if not provided the core assumes
+ * all CCC commands are supported.
+ * @send_ccc_cmd: send a CCC command
+ * This method is mandatory.
+ * @priv_xfers: do one or several private I3C SDR transfers
+ * This method is mandatory.
+ * @attach_i2c_dev: called every time an I2C device is attached to the bus.
+ * This is a good place to attach master controller specific
+ * data to I2C devices.
+ * This method is optional.
+ * @detach_i2c_dev: called when an I2C device is detached from the bus. Usually
+ * happens when the master device is unregistered.
+ * This method is optional.
+ * @i2c_xfers: do one or several I2C transfers. Note that, unlike i3c
+ * transfers, the core does not guarantee that buffers attached to
+ * the transfers are DMA-safe. If drivers want to have DMA-safe
+ * buffers, they should use the i2c_get_dma_safe_msg_buf()
+ * and i2c_put_dma_safe_msg_buf() helpers provided by the I2C
+ * framework.
+ * This method is mandatory.
+ * @i2c_funcs: expose the supported I2C functionalities.
+ * This method is mandatory.
+ * @request_ibi: attach an IBI handler to an I3C device. This implies defining
+ * an IBI handler and the constraints of the IBI (maximum payload
+ * length and number of pre-allocated slots).
+ * Some controllers support less IBI-capable devices than regular
+ * devices, so this method might return -%EBUSY if there's no
+ * more space for an extra IBI registration
+ * This method is optional.
+ * @free_ibi: free an IBI previously requested with ->request_ibi(). The IBI
+ * should have been disabled with ->disable_irq() prior to that
+ * This method is mandatory only if ->request_ibi is not NULL.
+ * @enable_ibi: enable the IBI. Only valid if ->request_ibi() has been called
+ * prior to ->enable_ibi(). The controller should first enable
+ * the IBI on the controller end (for example, unmask the hardware
+ * IRQ) and then send the ENEC CCC command (with the IBI flag set)
+ * to the I3C device.
+ * This method is mandatory only if ->request_ibi is not NULL.
+ * @disable_ibi: disable an IBI. First send the DISEC CCC command with the IBI
+ * flag set and then deactivate the hardware IRQ on the
+ * controller end.
+ * This method is mandatory only if ->request_ibi is not NULL.
+ * @recycle_ibi_slot: recycle an IBI slot. Called every time an IBI has been
+ * processed by its handler. The IBI slot should be put back
+ * in the IBI slot pool so that the controller can re-use it
+ * for a future IBI
+ * This method is mandatory only if ->request_ibi is not
+ * NULL.
+ */
+struct i3c_master_controller_ops {
+ int (*bus_init)(struct i3c_master_controller *master);
+ void (*bus_cleanup)(struct i3c_master_controller *master);
+ int (*attach_i3c_dev)(struct i3c_dev_desc *dev);
+ int (*reattach_i3c_dev)(struct i3c_dev_desc *dev, u8 old_dyn_addr);
+ void (*detach_i3c_dev)(struct i3c_dev_desc *dev);
+ int (*do_daa)(struct i3c_master_controller *master);
+ bool (*supports_ccc_cmd)(struct i3c_master_controller *master,
+ const struct i3c_ccc_cmd *cmd);
+ int (*send_ccc_cmd)(struct i3c_master_controller *master,
+ struct i3c_ccc_cmd *cmd);
+ int (*priv_xfers)(struct i3c_dev_desc *dev,
+ struct i3c_priv_xfer *xfers,
+ int nxfers);
+ int (*attach_i2c_dev)(struct i2c_dev_desc *dev);
+ void (*detach_i2c_dev)(struct i2c_dev_desc *dev);
+ int (*i2c_xfers)(struct i2c_dev_desc *dev,
+ const struct i2c_msg *xfers, int nxfers);
+ u32 (*i2c_funcs)(struct i3c_master_controller *master);
+ int (*request_ibi)(struct i3c_dev_desc *dev,
+ const struct i3c_ibi_setup *req);
+ void (*free_ibi)(struct i3c_dev_desc *dev);
+ int (*enable_ibi)(struct i3c_dev_desc *dev);
+ int (*disable_ibi)(struct i3c_dev_desc *dev);
+ void (*recycle_ibi_slot)(struct i3c_dev_desc *dev,
+ struct i3c_ibi_slot *slot);
+};
+
+/**
+ * struct i3c_master_controller - I3C master controller object
+ * @dev: device to be registered to the device-model
+ * @this: an I3C device object representing this master. This device will be
+ * added to the list of I3C devs available on the bus
+ * @i2c: I2C adapter used for backward compatibility. This adapter is
+ * registered to the I2C subsystem to be as transparent as possible to
+ * existing I2C drivers
+ * @ops: master operations. See &struct i3c_master_controller_ops
+ * @secondary: true if the master is a secondary master
+ * @init_done: true when the bus initialization is done
+ * @boardinfo.i3c: list of I3C boardinfo objects
+ * @boardinfo.i2c: list of I2C boardinfo objects
+ * @boardinfo: board-level information attached to devices connected on the bus
+ * @bus: I3C bus exposed by this master
+ * @wq: workqueue used to execute IBI handlers. Can also be used by master
+ * drivers if they need to postpone operations that need to take place
+ * in a thread context. Typical examples are Hot Join processing which
+ * requires taking the bus lock in maintenance, which in turn, can only
+ * be done from a sleep-able context
+ *
+ * A &struct i3c_master_controller has to be registered to the I3C subsystem
+ * through i3c_master_register(). None of &struct i3c_master_controller fields
+ * should be set manually, just pass appropriate values to
+ * i3c_master_register().
+ */
+struct i3c_master_controller {
+ struct device dev;
+ struct i3c_dev_desc *this;
+ struct i2c_adapter i2c;
+ const struct i3c_master_controller_ops *ops;
+ unsigned int secondary : 1;
+ unsigned int init_done : 1;
+ struct {
+ struct list_head i3c;
+ struct list_head i2c;
+ } boardinfo;
+ struct i3c_bus bus;
+ struct workqueue_struct *wq;
+};
+
+/**
+ * i3c_bus_for_each_i2cdev() - iterate over all I2C devices present on the bus
+ * @bus: the I3C bus
+ * @dev: an I2C device descriptor pointer updated to point to the current slot
+ * at each iteration of the loop
+ *
+ * Iterate over all I2C devs present on the bus.
+ */
+#define i3c_bus_for_each_i2cdev(bus, dev) \
+ list_for_each_entry(dev, &(bus)->devs.i2c, common.node)
+
+/**
+ * i3c_bus_for_each_i3cdev() - iterate over all I3C devices present on the bus
+ * @bus: the I3C bus
+ * @dev: and I3C device descriptor pointer updated to point to the current slot
+ * at each iteration of the loop
+ *
+ * Iterate over all I3C devs present on the bus.
+ */
+#define i3c_bus_for_each_i3cdev(bus, dev) \
+ list_for_each_entry(dev, &(bus)->devs.i3c, common.node)
+
+int i3c_master_do_i2c_xfers(struct i3c_master_controller *master,
+ const struct i2c_msg *xfers,
+ int nxfers);
+
+int i3c_master_disec_locked(struct i3c_master_controller *master, u8 addr,
+ u8 evts);
+int i3c_master_enec_locked(struct i3c_master_controller *master, u8 addr,
+ u8 evts);
+int i3c_master_entdaa_locked(struct i3c_master_controller *master);
+int i3c_master_defslvs_locked(struct i3c_master_controller *master);
+
+int i3c_master_get_free_addr(struct i3c_master_controller *master,
+ u8 start_addr);
+
+int i3c_master_add_i3c_dev_locked(struct i3c_master_controller *master,
+ u8 addr);
+int i3c_master_do_daa(struct i3c_master_controller *master);
+
+int i3c_master_set_info(struct i3c_master_controller *master,
+ const struct i3c_device_info *info);
+
+int i3c_master_register(struct i3c_master_controller *master,
+ struct device *parent,
+ const struct i3c_master_controller_ops *ops,
+ bool secondary);
+int i3c_master_unregister(struct i3c_master_controller *master);
+
+/**
+ * i3c_dev_get_master_data() - get master private data attached to an I3C
+ * device descriptor
+ * @dev: the I3C device descriptor to get private data from
+ *
+ * Return: the private data previously attached with i3c_dev_set_master_data()
+ * or NULL if no data has been attached to the device.
+ */
+static inline void *i3c_dev_get_master_data(const struct i3c_dev_desc *dev)
+{
+ return dev->common.master_priv;
+}
+
+/**
+ * i3c_dev_set_master_data() - attach master private data to an I3C device
+ * descriptor
+ * @dev: the I3C device descriptor to attach private data to
+ * @data: private data
+ *
+ * This functions allows a master controller to attach per-device private data
+ * which can then be retrieved with i3c_dev_get_master_data().
+ */
+static inline void i3c_dev_set_master_data(struct i3c_dev_desc *dev,
+ void *data)
+{
+ dev->common.master_priv = data;
+}
+
+/**
+ * i2c_dev_get_master_data() - get master private data attached to an I2C
+ * device descriptor
+ * @dev: the I2C device descriptor to get private data from
+ *
+ * Return: the private data previously attached with i2c_dev_set_master_data()
+ * or NULL if no data has been attached to the device.
+ */
+static inline void *i2c_dev_get_master_data(const struct i2c_dev_desc *dev)
+{
+ return dev->common.master_priv;
+}
+
+/**
+ * i2c_dev_set_master_data() - attach master private data to an I2C device
+ * descriptor
+ * @dev: the I2C device descriptor to attach private data to
+ * @data: private data
+ *
+ * This functions allows a master controller to attach per-device private data
+ * which can then be retrieved with i2c_device_get_master_data().
+ */
+static inline void i2c_dev_set_master_data(struct i2c_dev_desc *dev,
+ void *data)
+{
+ dev->common.master_priv = data;
+}
+
+/**
+ * i3c_dev_get_master() - get master used to communicate with a device
+ * @dev: I3C dev
+ *
+ * Return: the master controller driving @dev
+ */
+static inline struct i3c_master_controller *
+i3c_dev_get_master(struct i3c_dev_desc *dev)
+{
+ return dev->common.master;
+}
+
+/**
+ * i2c_dev_get_master() - get master used to communicate with a device
+ * @dev: I2C dev
+ *
+ * Return: the master controller driving @dev
+ */
+static inline struct i3c_master_controller *
+i2c_dev_get_master(struct i2c_dev_desc *dev)
+{
+ return dev->common.master;
+}
+
+/**
+ * i3c_master_get_bus() - get the bus attached to a master
+ * @master: master object
+ *
+ * Return: the I3C bus @master is connected to
+ */
+static inline struct i3c_bus *
+i3c_master_get_bus(struct i3c_master_controller *master)
+{
+ return &master->bus;
+}
+
+struct i3c_generic_ibi_pool;
+
+struct i3c_generic_ibi_pool *
+i3c_generic_ibi_alloc_pool(struct i3c_dev_desc *dev,
+ const struct i3c_ibi_setup *req);
+void i3c_generic_ibi_free_pool(struct i3c_generic_ibi_pool *pool);
+
+struct i3c_ibi_slot *
+i3c_generic_ibi_get_free_slot(struct i3c_generic_ibi_pool *pool);
+void i3c_generic_ibi_recycle_slot(struct i3c_generic_ibi_pool *pool,
+ struct i3c_ibi_slot *slot);
+
+void i3c_master_queue_ibi(struct i3c_dev_desc *dev, struct i3c_ibi_slot *slot);
+
+struct i3c_ibi_slot *i3c_master_get_free_ibi_slot(struct i3c_dev_desc *dev);
+
+#endif /* I3C_MASTER_H */
diff --git a/include/linux/i8253.h b/include/linux/i8253.h
index e6bb36a97519..8336b2f6f834 100644
--- a/include/linux/i8253.h
+++ b/include/linux/i8253.h
@@ -21,6 +21,7 @@
#define PIT_LATCH ((PIT_TICK_RATE + HZ/2) / HZ)
extern raw_spinlock_t i8253_lock;
+extern bool i8253_clear_counter_on_shutdown;
extern struct clock_event_device i8253_clockevent;
extern void clockevent_i8253_init(bool oneshot);
diff --git a/include/linux/ide.h b/include/linux/ide.h
index c74b0321922a..e7d29ae633cd 100644
--- a/include/linux/ide.h
+++ b/include/linux/ide.h
@@ -10,7 +10,7 @@
#include <linux/init.h>
#include <linux/ioport.h>
#include <linux/ata.h>
-#include <linux/blkdev.h>
+#include <linux/blk-mq.h>
#include <linux/proc_fs.h>
#include <linux/interrupt.h>
#include <linux/bitops.h>
@@ -50,6 +50,7 @@ struct ide_request {
struct scsi_request sreq;
u8 sense[SCSI_SENSE_BUFFERSIZE];
u8 type;
+ void *special;
};
static inline struct ide_request *ide_req(struct request *rq)
@@ -529,6 +530,10 @@ struct ide_drive_s {
struct request_queue *queue; /* request queue */
+ bool (*prep_rq)(struct ide_drive_s *, struct request *);
+
+ struct blk_mq_tag_set tag_set;
+
struct request *rq; /* current request */
void *driver_data; /* extra driver data */
u16 *id; /* identification info */
@@ -612,6 +617,10 @@ struct ide_drive_s {
bool sense_rq_armed;
struct request *sense_rq;
struct request_sense sense_data;
+
+ /* async sense insertion */
+ struct work_struct rq_work;
+ struct list_head rq_list;
};
typedef struct ide_drive_s ide_drive_t;
@@ -1089,6 +1098,7 @@ extern int ide_pci_clk;
int ide_end_rq(ide_drive_t *, struct request *, blk_status_t, unsigned int);
void ide_kill_rq(ide_drive_t *, struct request *);
+void ide_insert_request_head(ide_drive_t *, struct request *);
void __ide_set_handler(ide_drive_t *, ide_handler_t *, unsigned int);
void ide_set_handler(ide_drive_t *, ide_handler_t *, unsigned int);
@@ -1208,7 +1218,7 @@ extern void ide_stall_queue(ide_drive_t *drive, unsigned long timeout);
extern void ide_timer_expiry(struct timer_list *t);
extern irqreturn_t ide_intr(int irq, void *dev_id);
-extern void do_ide_request(struct request_queue *);
+extern blk_status_t ide_queue_rq(struct blk_mq_hw_ctx *, const struct blk_mq_queue_data *);
extern void ide_requeue_and_plug(ide_drive_t *drive, struct request *rq);
void ide_init_disk(struct gendisk *, ide_drive_t *);
diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h
index 0ef67f837ae1..3b04e72315e1 100644
--- a/include/linux/ieee80211.h
+++ b/include/linux/ieee80211.h
@@ -812,6 +812,8 @@ enum mesh_config_capab_flags {
IEEE80211_MESHCONF_CAPAB_POWER_SAVE_LEVEL = 0x40,
};
+#define IEEE80211_MESHCONF_FORM_CONNECTED_TO_GATE 0x1
+
/**
* mesh channel switch parameters element's flag indicator
*
@@ -1617,7 +1619,7 @@ struct ieee80211_he_mcs_nss_supp {
* struct ieee80211_he_operation - HE capabilities element
*
* This structure is the "HE operation element" fields as
- * described in P802.11ax_D2.0 section 9.4.2.238
+ * described in P802.11ax_D3.0 section 9.4.2.238
*/
struct ieee80211_he_operation {
__le32 he_oper_params;
@@ -2009,17 +2011,17 @@ ieee80211_he_ppe_size(u8 ppe_thres_hdr, const u8 *phy_cap_info)
}
/* HE Operation defines */
-#define IEEE80211_HE_OPERATION_BSS_COLOR_MASK 0x0000003f
-#define IEEE80211_HE_OPERATION_DFLT_PE_DURATION_MASK 0x000001c0
-#define IEEE80211_HE_OPERATION_DFLT_PE_DURATION_OFFSET 6
-#define IEEE80211_HE_OPERATION_TWT_REQUIRED 0x00000200
-#define IEEE80211_HE_OPERATION_RTS_THRESHOLD_MASK 0x000ffc00
-#define IEEE80211_HE_OPERATION_RTS_THRESHOLD_OFFSET 10
-#define IEEE80211_HE_OPERATION_PARTIAL_BSS_COLOR 0x00100000
-#define IEEE80211_HE_OPERATION_VHT_OPER_INFO 0x00200000
-#define IEEE80211_HE_OPERATION_MULTI_BSSID_AP 0x10000000
-#define IEEE80211_HE_OPERATION_TX_BSSID_INDICATOR 0x20000000
-#define IEEE80211_HE_OPERATION_BSS_COLOR_DISABLED 0x40000000
+#define IEEE80211_HE_OPERATION_DFLT_PE_DURATION_MASK 0x00000003
+#define IEEE80211_HE_OPERATION_TWT_REQUIRED 0x00000008
+#define IEEE80211_HE_OPERATION_RTS_THRESHOLD_MASK 0x00003ff0
+#define IEEE80211_HE_OPERATION_RTS_THRESHOLD_OFFSET 4
+#define IEEE80211_HE_OPERATION_VHT_OPER_INFO 0x00004000
+#define IEEE80211_HE_OPERATION_CO_LOCATED_BSS 0x00008000
+#define IEEE80211_HE_OPERATION_ER_SU_DISABLE 0x00010000
+#define IEEE80211_HE_OPERATION_BSS_COLOR_MASK 0x3f000000
+#define IEEE80211_HE_OPERATION_BSS_COLOR_OFFSET 24
+#define IEEE80211_HE_OPERATION_PARTIAL_BSS_COLOR 0x40000000
+#define IEEE80211_HE_OPERATION_BSS_COLOR_DISABLED 0x80000000
/*
* ieee80211_he_oper_size - calculate 802.11ax HE Operations IE size
@@ -2044,7 +2046,7 @@ ieee80211_he_oper_size(const u8 *he_oper_ie)
he_oper_params = le32_to_cpu(he_oper->he_oper_params);
if (he_oper_params & IEEE80211_HE_OPERATION_VHT_OPER_INFO)
oper_len += 3;
- if (he_oper_params & IEEE80211_HE_OPERATION_MULTI_BSSID_AP)
+ if (he_oper_params & IEEE80211_HE_OPERATION_CO_LOCATED_BSS)
oper_len++;
/* Add the first byte (extension ID) to the total length */
@@ -2685,6 +2687,10 @@ enum ieee80211_tdls_actioncode {
*/
#define WLAN_EXT_CAPA9_FTM_INITIATOR BIT(7)
+/* Defines support for TWT Requester and TWT Responder */
+#define WLAN_EXT_CAPA10_TWT_REQUESTER_SUPPORT BIT(5)
+#define WLAN_EXT_CAPA10_TWT_RESPONDER_SUPPORT BIT(6)
+
/* TDLS specific payload type in the LLC/SNAP header */
#define WLAN_TDLS_SNAP_RFTYPE 0x2
diff --git a/include/linux/if_bridge.h b/include/linux/if_bridge.h
index c20c7e197d07..627b788ba0ff 100644
--- a/include/linux/if_bridge.h
+++ b/include/linux/if_bridge.h
@@ -119,6 +119,8 @@ static inline int br_vlan_get_info(const struct net_device *dev, u16 vid,
struct net_device *br_fdb_find_port(const struct net_device *br_dev,
const unsigned char *addr,
__u16 vid);
+void br_fdb_clear_offload(const struct net_device *dev, u16 vid);
+bool br_port_flag_is_set(const struct net_device *dev, unsigned long flag);
#else
static inline struct net_device *
br_fdb_find_port(const struct net_device *br_dev,
@@ -127,6 +129,16 @@ br_fdb_find_port(const struct net_device *br_dev,
{
return NULL;
}
+
+static inline void br_fdb_clear_offload(const struct net_device *dev, u16 vid)
+{
+}
+
+static inline bool
+br_port_flag_is_set(const struct net_device *dev, unsigned long flag)
+{
+ return false;
+}
#endif
#endif
diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h
index 83ea4df6ab81..4cca4da7a6de 100644
--- a/include/linux/if_vlan.h
+++ b/include/linux/if_vlan.h
@@ -65,8 +65,7 @@ static inline struct vlan_ethhdr *vlan_eth_hdr(const struct sk_buff *skb)
#define VLAN_PRIO_MASK 0xe000 /* Priority Code Point */
#define VLAN_PRIO_SHIFT 13
-#define VLAN_CFI_MASK 0x1000 /* Canonical Format Indicator */
-#define VLAN_TAG_PRESENT VLAN_CFI_MASK
+#define VLAN_CFI_MASK 0x1000 /* Canonical Format Indicator / Drop Eligible Indicator */
#define VLAN_VID_MASK 0x0fff /* VLAN Identifier */
#define VLAN_N_VID 4096
@@ -78,10 +77,11 @@ static inline bool is_vlan_dev(const struct net_device *dev)
return dev->priv_flags & IFF_802_1Q_VLAN;
}
-#define skb_vlan_tag_present(__skb) ((__skb)->vlan_tci & VLAN_TAG_PRESENT)
-#define skb_vlan_tag_get(__skb) ((__skb)->vlan_tci & ~VLAN_TAG_PRESENT)
+#define skb_vlan_tag_present(__skb) ((__skb)->vlan_present)
+#define skb_vlan_tag_get(__skb) ((__skb)->vlan_tci)
#define skb_vlan_tag_get_id(__skb) ((__skb)->vlan_tci & VLAN_VID_MASK)
-#define skb_vlan_tag_get_prio(__skb) ((__skb)->vlan_tci & VLAN_PRIO_MASK)
+#define skb_vlan_tag_get_cfi(__skb) (!!((__skb)->vlan_tci & VLAN_CFI_MASK))
+#define skb_vlan_tag_get_prio(__skb) (((__skb)->vlan_tci & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT)
static inline int vlan_get_rx_ctag_filter_info(struct net_device *dev)
{
@@ -133,6 +133,9 @@ struct vlan_pcpu_stats {
extern struct net_device *__vlan_find_dev_deep_rcu(struct net_device *real_dev,
__be16 vlan_proto, u16 vlan_id);
+extern int vlan_for_each(struct net_device *dev,
+ int (*action)(struct net_device *dev, int vid,
+ void *arg), void *arg);
extern struct net_device *vlan_dev_real_dev(const struct net_device *dev);
extern u16 vlan_dev_vlan_id(const struct net_device *dev);
extern __be16 vlan_dev_vlan_proto(const struct net_device *dev);
@@ -236,6 +239,14 @@ __vlan_find_dev_deep_rcu(struct net_device *real_dev,
return NULL;
}
+static inline int
+vlan_for_each(struct net_device *dev,
+ int (*action)(struct net_device *dev, int vid, void *arg),
+ void *arg)
+{
+ return 0;
+}
+
static inline struct net_device *vlan_dev_real_dev(const struct net_device *dev)
{
BUG();
@@ -461,6 +472,31 @@ static inline struct sk_buff *vlan_insert_tag_set_proto(struct sk_buff *skb,
return skb;
}
+/**
+ * __vlan_hwaccel_clear_tag - clear hardware accelerated VLAN info
+ * @skb: skbuff to clear
+ *
+ * Clears the VLAN information from @skb
+ */
+static inline void __vlan_hwaccel_clear_tag(struct sk_buff *skb)
+{
+ skb->vlan_present = 0;
+}
+
+/**
+ * __vlan_hwaccel_copy_tag - copy hardware accelerated VLAN info from another skb
+ * @dst: skbuff to copy to
+ * @src: skbuff to copy from
+ *
+ * Copies VLAN information from @src to @dst (for branchless code)
+ */
+static inline void __vlan_hwaccel_copy_tag(struct sk_buff *dst, const struct sk_buff *src)
+{
+ dst->vlan_present = src->vlan_present;
+ dst->vlan_proto = src->vlan_proto;
+ dst->vlan_tci = src->vlan_tci;
+}
+
/*
* __vlan_hwaccel_push_inside - pushes vlan tag to the payload
* @skb: skbuff to tag
@@ -475,7 +511,7 @@ static inline struct sk_buff *__vlan_hwaccel_push_inside(struct sk_buff *skb)
skb = vlan_insert_tag_set_proto(skb, skb->vlan_proto,
skb_vlan_tag_get(skb));
if (likely(skb))
- skb->vlan_tci = 0;
+ __vlan_hwaccel_clear_tag(skb);
return skb;
}
@@ -491,7 +527,8 @@ static inline void __vlan_hwaccel_put_tag(struct sk_buff *skb,
__be16 vlan_proto, u16 vlan_tci)
{
skb->vlan_proto = vlan_proto;
- skb->vlan_tci = VLAN_TAG_PRESENT | vlan_tci;
+ skb->vlan_tci = vlan_tci;
+ skb->vlan_present = 1;
}
/**
@@ -531,8 +568,6 @@ static inline int __vlan_hwaccel_get_tag(const struct sk_buff *skb,
}
}
-#define HAVE_VLAN_GET_TAG
-
/**
* vlan_get_tag - get the VLAN ID from the skb
* @skb: skbuff to query
diff --git a/include/linux/indirect_call_wrapper.h b/include/linux/indirect_call_wrapper.h
new file mode 100644
index 000000000000..00d7e8e919c6
--- /dev/null
+++ b/include/linux/indirect_call_wrapper.h
@@ -0,0 +1,51 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_INDIRECT_CALL_WRAPPER_H
+#define _LINUX_INDIRECT_CALL_WRAPPER_H
+
+#ifdef CONFIG_RETPOLINE
+
+/*
+ * INDIRECT_CALL_$NR - wrapper for indirect calls with $NR known builtin
+ * @f: function pointer
+ * @f$NR: builtin functions names, up to $NR of them
+ * @__VA_ARGS__: arguments for @f
+ *
+ * Avoid retpoline overhead for known builtin, checking @f vs each of them and
+ * eventually invoking directly the builtin function. The functions are check
+ * in the given order. Fallback to the indirect call.
+ */
+#define INDIRECT_CALL_1(f, f1, ...) \
+ ({ \
+ likely(f == f1) ? f1(__VA_ARGS__) : f(__VA_ARGS__); \
+ })
+#define INDIRECT_CALL_2(f, f2, f1, ...) \
+ ({ \
+ likely(f == f2) ? f2(__VA_ARGS__) : \
+ INDIRECT_CALL_1(f, f1, __VA_ARGS__); \
+ })
+
+#define INDIRECT_CALLABLE_DECLARE(f) f
+#define INDIRECT_CALLABLE_SCOPE
+
+#else
+#define INDIRECT_CALL_1(f, f1, ...) f(__VA_ARGS__)
+#define INDIRECT_CALL_2(f, f2, f1, ...) f(__VA_ARGS__)
+#define INDIRECT_CALLABLE_DECLARE(f)
+#define INDIRECT_CALLABLE_SCOPE static
+#endif
+
+/*
+ * We can use INDIRECT_CALL_$NR for ipv6 related functions only if ipv6 is
+ * builtin, this macro simplify dealing with indirect calls with only ipv4/ipv6
+ * alternatives
+ */
+#if IS_BUILTIN(CONFIG_IPV6)
+#define INDIRECT_CALL_INET(f, f2, f1, ...) \
+ INDIRECT_CALL_2(f, f2, f1, __VA_ARGS__)
+#elif IS_ENABLED(CONFIG_INET)
+#define INDIRECT_CALL_INET(f, f2, f1, ...) INDIRECT_CALL_1(f, f1, __VA_ARGS__)
+#else
+#define INDIRECT_CALL_INET(f, f2, f1, ...) f(__VA_ARGS__)
+#endif
+
+#endif
diff --git a/include/linux/init.h b/include/linux/init.h
index 9c2aba1dbabf..5255069f5a9f 100644
--- a/include/linux/init.h
+++ b/include/linux/init.h
@@ -146,7 +146,6 @@ extern unsigned int reset_devices;
/* used by init/main.c */
void setup_arch(char **);
void prepare_namespace(void);
-void __init load_default_modules(void);
int __init init_rootfs(void);
#if defined(CONFIG_STRICT_KERNEL_RWX) || defined(CONFIG_STRICT_MODULE_RWX)
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index 1d6711c28271..c672f34235e7 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -247,10 +247,23 @@ struct irq_affinity_notify {
* the MSI(-X) vector space
* @post_vectors: Don't apply affinity to @post_vectors at end of
* the MSI(-X) vector space
+ * @nr_sets: Length of passed in *sets array
+ * @sets: Number of affinitized sets
*/
struct irq_affinity {
int pre_vectors;
int post_vectors;
+ int nr_sets;
+ int *sets;
+};
+
+/**
+ * struct irq_affinity_desc - Interrupt affinity descriptor
+ * @mask: cpumask to hold the affinity assignment
+ */
+struct irq_affinity_desc {
+ struct cpumask mask;
+ unsigned int is_managed : 1;
};
#if defined(CONFIG_SMP)
@@ -299,7 +312,9 @@ extern int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m);
extern int
irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify);
-struct cpumask *irq_create_affinity_masks(int nvec, const struct irq_affinity *affd);
+struct irq_affinity_desc *
+irq_create_affinity_masks(int nvec, const struct irq_affinity *affd);
+
int irq_calc_affinity_vectors(int minvec, int maxvec, const struct irq_affinity *affd);
#else /* CONFIG_SMP */
@@ -333,7 +348,7 @@ irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify)
return 0;
}
-static inline struct cpumask *
+static inline struct irq_affinity_desc *
irq_create_affinity_masks(int nvec, const struct irq_affinity *affd)
{
return NULL;
diff --git a/include/linux/ioprio.h b/include/linux/ioprio.h
index 9e30ed6443db..e9bfe6972aed 100644
--- a/include/linux/ioprio.h
+++ b/include/linux/ioprio.h
@@ -71,6 +71,19 @@ static inline int task_nice_ioclass(struct task_struct *task)
}
/*
+ * If the calling process has set an I/O priority, use that. Otherwise, return
+ * the default I/O priority.
+ */
+static inline int get_current_ioprio(void)
+{
+ struct io_context *ioc = current->io_context;
+
+ if (ioc)
+ return ioc->ioprio;
+ return IOPRIO_PRIO_VALUE(IOPRIO_CLASS_NONE, 0);
+}
+
+/*
* For inheritance, return the highest of the two given priorities
*/
extern int ioprio_best(unsigned short aprio, unsigned short bprio);
diff --git a/include/linux/irq.h b/include/linux/irq.h
index c9bffda04a45..def2b2aac8b1 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -27,6 +27,7 @@
struct seq_file;
struct module;
struct msi_msg;
+struct irq_affinity_desc;
enum irqchip_irq_state;
/*
@@ -834,11 +835,12 @@ struct cpumask *irq_data_get_effective_affinity_mask(struct irq_data *d)
unsigned int arch_dynirq_lower_bound(unsigned int from);
int __irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node,
- struct module *owner, const struct cpumask *affinity);
+ struct module *owner,
+ const struct irq_affinity_desc *affinity);
int __devm_irq_alloc_descs(struct device *dev, int irq, unsigned int from,
unsigned int cnt, int node, struct module *owner,
- const struct cpumask *affinity);
+ const struct irq_affinity_desc *affinity);
/* use macros to avoid needing export.h for THIS_MODULE */
#define irq_alloc_descs(irq, from, cnt, node) \
diff --git a/include/linux/irq_sim.h b/include/linux/irq_sim.h
index 630a57e55db6..4500d453a63e 100644
--- a/include/linux/irq_sim.h
+++ b/include/linux/irq_sim.h
@@ -16,7 +16,7 @@
struct irq_sim_work_ctx {
struct irq_work work;
- int irq;
+ unsigned long *pending;
};
struct irq_sim_irq_ctx {
diff --git a/include/linux/irqchip.h b/include/linux/irqchip.h
index 89c34b200671..950e4b2458f0 100644
--- a/include/linux/irqchip.h
+++ b/include/linux/irqchip.h
@@ -19,7 +19,7 @@
* the association between their DT compatible string and their
* initialization function.
*
- * @name: name that must be unique accross all IRQCHIP_DECLARE of the
+ * @name: name that must be unique across all IRQCHIP_DECLARE of the
* same file.
* @compstr: compatible string of the irqchip driver
* @fn: initialization function
@@ -30,7 +30,7 @@
* This macro must be used by the different irqchip drivers to declare
* the association between their version and their initialization function.
*
- * @name: name that must be unique accross all IRQCHIP_ACPI_DECLARE of the
+ * @name: name that must be unique across all IRQCHIP_ACPI_DECLARE of the
* same file.
* @subtable: Subtable to be identified in MADT
* @validate: Function to be called on that subtable to check its validity.
diff --git a/include/linux/irqchip/irq-madera.h b/include/linux/irqchip/irq-madera.h
new file mode 100644
index 000000000000..1160fa3769ae
--- /dev/null
+++ b/include/linux/irqchip/irq-madera.h
@@ -0,0 +1,132 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Interrupt support for Cirrus Logic Madera codecs
+ *
+ * Copyright (C) 2016-2018 Cirrus Logic, Inc. and
+ * Cirrus Logic International Semiconductor Ltd.
+ */
+
+#ifndef IRQCHIP_MADERA_H
+#define IRQCHIP_MADERA_H
+
+#include <linux/interrupt.h>
+#include <linux/mfd/madera/core.h>
+
+#define MADERA_IRQ_FLL1_LOCK 0
+#define MADERA_IRQ_FLL2_LOCK 1
+#define MADERA_IRQ_FLL3_LOCK 2
+#define MADERA_IRQ_FLLAO_LOCK 3
+#define MADERA_IRQ_CLK_SYS_ERR 4
+#define MADERA_IRQ_CLK_ASYNC_ERR 5
+#define MADERA_IRQ_CLK_DSP_ERR 6
+#define MADERA_IRQ_HPDET 7
+#define MADERA_IRQ_MICDET1 8
+#define MADERA_IRQ_MICDET2 9
+#define MADERA_IRQ_JD1_RISE 10
+#define MADERA_IRQ_JD1_FALL 11
+#define MADERA_IRQ_JD2_RISE 12
+#define MADERA_IRQ_JD2_FALL 13
+#define MADERA_IRQ_MICD_CLAMP_RISE 14
+#define MADERA_IRQ_MICD_CLAMP_FALL 15
+#define MADERA_IRQ_DRC2_SIG_DET 16
+#define MADERA_IRQ_DRC1_SIG_DET 17
+#define MADERA_IRQ_ASRC1_IN1_LOCK 18
+#define MADERA_IRQ_ASRC1_IN2_LOCK 19
+#define MADERA_IRQ_ASRC2_IN1_LOCK 20
+#define MADERA_IRQ_ASRC2_IN2_LOCK 21
+#define MADERA_IRQ_DSP_IRQ1 22
+#define MADERA_IRQ_DSP_IRQ2 23
+#define MADERA_IRQ_DSP_IRQ3 24
+#define MADERA_IRQ_DSP_IRQ4 25
+#define MADERA_IRQ_DSP_IRQ5 26
+#define MADERA_IRQ_DSP_IRQ6 27
+#define MADERA_IRQ_DSP_IRQ7 28
+#define MADERA_IRQ_DSP_IRQ8 29
+#define MADERA_IRQ_DSP_IRQ9 30
+#define MADERA_IRQ_DSP_IRQ10 31
+#define MADERA_IRQ_DSP_IRQ11 32
+#define MADERA_IRQ_DSP_IRQ12 33
+#define MADERA_IRQ_DSP_IRQ13 34
+#define MADERA_IRQ_DSP_IRQ14 35
+#define MADERA_IRQ_DSP_IRQ15 36
+#define MADERA_IRQ_DSP_IRQ16 37
+#define MADERA_IRQ_HP1L_SC 38
+#define MADERA_IRQ_HP1R_SC 39
+#define MADERA_IRQ_HP2L_SC 40
+#define MADERA_IRQ_HP2R_SC 41
+#define MADERA_IRQ_HP3L_SC 42
+#define MADERA_IRQ_HP3R_SC 43
+#define MADERA_IRQ_SPKOUTL_SC 44
+#define MADERA_IRQ_SPKOUTR_SC 45
+#define MADERA_IRQ_HP1L_ENABLE_DONE 46
+#define MADERA_IRQ_HP1R_ENABLE_DONE 47
+#define MADERA_IRQ_HP2L_ENABLE_DONE 48
+#define MADERA_IRQ_HP2R_ENABLE_DONE 49
+#define MADERA_IRQ_HP3L_ENABLE_DONE 50
+#define MADERA_IRQ_HP3R_ENABLE_DONE 51
+#define MADERA_IRQ_SPKOUTL_ENABLE_DONE 52
+#define MADERA_IRQ_SPKOUTR_ENABLE_DONE 53
+#define MADERA_IRQ_SPK_SHUTDOWN 54
+#define MADERA_IRQ_SPK_OVERHEAT 55
+#define MADERA_IRQ_SPK_OVERHEAT_WARN 56
+#define MADERA_IRQ_GPIO1 57
+#define MADERA_IRQ_GPIO2 58
+#define MADERA_IRQ_GPIO3 59
+#define MADERA_IRQ_GPIO4 60
+#define MADERA_IRQ_GPIO5 61
+#define MADERA_IRQ_GPIO6 62
+#define MADERA_IRQ_GPIO7 63
+#define MADERA_IRQ_GPIO8 64
+#define MADERA_IRQ_DSP1_BUS_ERR 65
+#define MADERA_IRQ_DSP2_BUS_ERR 66
+#define MADERA_IRQ_DSP3_BUS_ERR 67
+#define MADERA_IRQ_DSP4_BUS_ERR 68
+#define MADERA_IRQ_DSP5_BUS_ERR 69
+#define MADERA_IRQ_DSP6_BUS_ERR 70
+#define MADERA_IRQ_DSP7_BUS_ERR 71
+
+#define MADERA_NUM_IRQ 72
+
+/*
+ * These wrapper functions are for use by other child drivers of the
+ * same parent MFD.
+ */
+static inline int madera_get_irq_mapping(struct madera *madera, int irq)
+{
+ if (!madera->irq_dev)
+ return -ENODEV;
+
+ return regmap_irq_get_virq(madera->irq_data, irq);
+}
+
+static inline int madera_request_irq(struct madera *madera, int irq,
+ const char *name,
+ irq_handler_t handler, void *data)
+{
+ irq = madera_get_irq_mapping(madera, irq);
+ if (irq < 0)
+ return irq;
+
+ return request_threaded_irq(irq, NULL, handler, IRQF_ONESHOT, name,
+ data);
+}
+
+static inline void madera_free_irq(struct madera *madera, int irq, void *data)
+{
+ irq = madera_get_irq_mapping(madera, irq);
+ if (irq < 0)
+ return;
+
+ free_irq(irq, data);
+}
+
+static inline int madera_set_irq_wake(struct madera *madera, int irq, int on)
+{
+ irq = madera_get_irq_mapping(madera, irq);
+ if (irq < 0)
+ return irq;
+
+ return irq_set_irq_wake(irq, on);
+}
+
+#endif
diff --git a/include/linux/irqdomain.h b/include/linux/irqdomain.h
index 068aa46f0d55..35965f41d7be 100644
--- a/include/linux/irqdomain.h
+++ b/include/linux/irqdomain.h
@@ -43,6 +43,7 @@ struct irq_chip;
struct irq_data;
struct cpumask;
struct seq_file;
+struct irq_affinity_desc;
/* Number of irqs reserved for a legacy isa controller */
#define NUM_ISA_INTERRUPTS 16
@@ -266,7 +267,7 @@ extern bool irq_domain_check_msi_remap(void);
extern void irq_set_default_host(struct irq_domain *host);
extern int irq_domain_alloc_descs(int virq, unsigned int nr_irqs,
irq_hw_number_t hwirq, int node,
- const struct cpumask *affinity);
+ const struct irq_affinity_desc *affinity);
static inline struct fwnode_handle *of_node_to_fwnode(struct device_node *node)
{
@@ -449,7 +450,8 @@ static inline struct irq_domain *irq_domain_add_hierarchy(struct irq_domain *par
extern int __irq_domain_alloc_irqs(struct irq_domain *domain, int irq_base,
unsigned int nr_irqs, int node, void *arg,
- bool realloc, const struct cpumask *affinity);
+ bool realloc,
+ const struct irq_affinity_desc *affinity);
extern void irq_domain_free_irqs(unsigned int virq, unsigned int nr_irqs);
extern int irq_domain_activate_irq(struct irq_data *irq_data, bool early);
extern void irq_domain_deactivate_irq(struct irq_data *irq_data);
diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h
index b708e5169d1d..0f919d5fe84f 100644
--- a/include/linux/jbd2.h
+++ b/include/linux/jbd2.h
@@ -575,6 +575,7 @@ struct transaction_s
enum {
T_RUNNING,
T_LOCKED,
+ T_SWITCH,
T_FLUSH,
T_COMMIT,
T_COMMIT_DFLUSH,
@@ -662,13 +663,13 @@ struct transaction_s
/*
* Number of outstanding updates running on this transaction
- * [t_handle_lock]
+ * [none]
*/
atomic_t t_updates;
/*
* Number of buffers reserved for use by all handles in this transaction
- * handle but not yet modified. [t_handle_lock]
+ * handle but not yet modified. [none]
*/
atomic_t t_outstanding_credits;
@@ -690,7 +691,7 @@ struct transaction_s
ktime_t t_start_time;
/*
- * How many handles used this transaction? [t_handle_lock]
+ * How many handles used this transaction? [none]
*/
atomic_t t_handle_count;
diff --git a/include/linux/kexec.h b/include/linux/kexec.h
index 9e4e638fb505..b9b1bc5f9669 100644
--- a/include/linux/kexec.h
+++ b/include/linux/kexec.h
@@ -143,6 +143,15 @@ extern const struct kexec_file_ops * const kexec_file_loaders[];
int kexec_image_probe_default(struct kimage *image, void *buf,
unsigned long buf_len);
+int kexec_image_post_load_cleanup_default(struct kimage *image);
+
+/*
+ * If kexec_buf.mem is set to this value, kexec_locate_mem_hole()
+ * will try to allocate free memory. Arch may overwrite it.
+ */
+#ifndef KEXEC_BUF_MEM_UNKNOWN
+#define KEXEC_BUF_MEM_UNKNOWN 0
+#endif
/**
* struct kexec_buf - parameters for finding a place for a buffer in memory
@@ -174,6 +183,7 @@ int kexec_purgatory_get_set_symbol(struct kimage *image, const char *name,
bool get_value);
void *kexec_purgatory_get_symbol_addr(struct kimage *image, const char *name);
+void * __weak arch_kexec_kernel_image_load(struct kimage *image);
int __weak arch_kexec_apply_relocations_add(struct purgatory_info *pi,
Elf_Shdr *section,
const Elf_Shdr *relsec,
@@ -183,8 +193,6 @@ int __weak arch_kexec_apply_relocations(struct purgatory_info *pi,
const Elf_Shdr *relsec,
const Elf_Shdr *symtab);
-int __weak arch_kexec_walk_mem(struct kexec_buf *kbuf,
- int (*func)(struct resource *, void *));
extern int kexec_add_buffer(struct kexec_buf *kbuf);
int kexec_locate_mem_hole(struct kexec_buf *kbuf);
diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h
index e909413e4e38..e07e91daaacc 100644
--- a/include/linux/kprobes.h
+++ b/include/linux/kprobes.h
@@ -242,10 +242,13 @@ extern int arch_init_kprobes(void);
extern void show_registers(struct pt_regs *regs);
extern void kprobes_inc_nmissed_count(struct kprobe *p);
extern bool arch_within_kprobe_blacklist(unsigned long addr);
+extern int arch_populate_kprobe_blacklist(void);
extern bool arch_kprobe_on_func_entry(unsigned long offset);
extern bool kprobe_on_func_entry(kprobe_opcode_t *addr, const char *sym, unsigned long offset);
extern bool within_kprobe_blacklist(unsigned long addr);
+extern int kprobe_add_ksym_blacklist(unsigned long entry);
+extern int kprobe_add_area_blacklist(unsigned long start, unsigned long end);
struct kprobe_insn_cache {
struct mutex mutex;
@@ -379,6 +382,9 @@ int enable_kprobe(struct kprobe *kp);
void dump_kprobe(struct kprobe *kp);
+void *alloc_insn_page(void);
+void free_insn_page(void *page);
+
#else /* !CONFIG_KPROBES: */
static inline int kprobes_built_in(void)
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index c926698040e0..c38cc5eb7e73 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -449,6 +449,7 @@ struct kvm {
#endif
long tlbs_dirty;
struct list_head devices;
+ bool manual_dirty_log_protect;
struct dentry *debugfs_dentry;
struct kvm_stat_data **debugfs_stat_data;
struct srcu_struct srcu;
@@ -694,7 +695,8 @@ int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,
int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
void *data, unsigned long len);
int kvm_write_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
- void *data, int offset, unsigned long len);
+ void *data, unsigned int offset,
+ unsigned long len);
int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
gpa_t gpa, unsigned long len);
int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len);
@@ -753,7 +755,9 @@ int kvm_get_dirty_log(struct kvm *kvm,
struct kvm_dirty_log *log, int *is_dirty);
int kvm_get_dirty_log_protect(struct kvm *kvm,
- struct kvm_dirty_log *log, bool *is_dirty);
+ struct kvm_dirty_log *log, bool *flush);
+int kvm_clear_dirty_log_protect(struct kvm *kvm,
+ struct kvm_clear_dirty_log *log, bool *flush);
void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
struct kvm_memory_slot *slot,
@@ -762,9 +766,13 @@ void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
struct kvm_dirty_log *log);
+int kvm_vm_ioctl_clear_dirty_log(struct kvm *kvm,
+ struct kvm_clear_dirty_log *log);
int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level,
bool line_status);
+int kvm_vm_ioctl_enable_cap(struct kvm *kvm,
+ struct kvm_enable_cap *cap);
long kvm_arch_vm_ioctl(struct file *filp,
unsigned int ioctl, unsigned long arg);
diff --git a/include/linux/leds.h b/include/linux/leds.h
index 7393a316d9fa..5263f87e1d2c 100644
--- a/include/linux/leds.h
+++ b/include/linux/leds.h
@@ -51,6 +51,7 @@ struct led_classdev {
#define LED_PANIC_INDICATOR BIT(20)
#define LED_BRIGHT_HW_CHANGED BIT(21)
#define LED_RETAIN_AT_SHUTDOWN BIT(22)
+#define LED_INIT_DEFAULT_TRIGGER BIT(23)
/* set_brightness_work / blink_timer flags, atomic, private. */
unsigned long work_flags;
@@ -487,4 +488,24 @@ struct led_pattern {
int brightness;
};
+enum led_audio {
+ LED_AUDIO_MUTE, /* master mute LED */
+ LED_AUDIO_MICMUTE, /* mic mute LED */
+ NUM_AUDIO_LEDS
+};
+
+#if IS_ENABLED(CONFIG_LEDS_TRIGGER_AUDIO)
+enum led_brightness ledtrig_audio_get(enum led_audio type);
+void ledtrig_audio_set(enum led_audio type, enum led_brightness state);
+#else
+static inline enum led_brightness ledtrig_audio_get(enum led_audio type)
+{
+ return LED_OFF;
+}
+static inline void ledtrig_audio_set(enum led_audio type,
+ enum led_brightness state)
+{
+}
+#endif
+
#endif /* __LINUX_LEDS_H_INCLUDED */
diff --git a/include/linux/lightnvm.h b/include/linux/lightnvm.h
index 2fdeac1a420d..5d865a5d5cdc 100644
--- a/include/linux/lightnvm.h
+++ b/include/linux/lightnvm.h
@@ -90,7 +90,7 @@ typedef int (nvm_get_chk_meta_fn)(struct nvm_dev *, sector_t, int,
struct nvm_chk_meta *);
typedef int (nvm_submit_io_fn)(struct nvm_dev *, struct nvm_rq *);
typedef int (nvm_submit_io_sync_fn)(struct nvm_dev *, struct nvm_rq *);
-typedef void *(nvm_create_dma_pool_fn)(struct nvm_dev *, char *);
+typedef void *(nvm_create_dma_pool_fn)(struct nvm_dev *, char *, int);
typedef void (nvm_destroy_dma_pool_fn)(void *);
typedef void *(nvm_dev_dma_alloc_fn)(struct nvm_dev *, void *, gfp_t,
dma_addr_t *);
@@ -357,6 +357,7 @@ struct nvm_geo {
u32 clba; /* sectors per chunk */
u16 csecs; /* sector size */
u16 sos; /* out-of-band area size */
+ bool ext; /* metadata in extended data buffer */
/* device write constrains */
u32 ws_min; /* minimum write size */
diff --git a/include/linux/linkage.h b/include/linux/linkage.h
index 7c47b1a471d4..7e020782ade2 100644
--- a/include/linux/linkage.h
+++ b/include/linux/linkage.h
@@ -79,6 +79,12 @@
#define ALIGN __ALIGN
#define ALIGN_STR __ALIGN_STR
+#ifndef GLOBAL
+#define GLOBAL(name) \
+ .globl name ASM_NL \
+ name:
+#endif
+
#ifndef ENTRY
#define ENTRY(name) \
.globl name ASM_NL \
diff --git a/include/linux/linkmode.h b/include/linux/linkmode.h
index 22443d7fb5cd..a99c58866860 100644
--- a/include/linux/linkmode.h
+++ b/include/linux/linkmode.h
@@ -57,6 +57,15 @@ static inline void linkmode_clear_bit(int nr, volatile unsigned long *addr)
__clear_bit(nr, addr);
}
+static inline void linkmode_mod_bit(int nr, volatile unsigned long *addr,
+ int set)
+{
+ if (set)
+ linkmode_set_bit(nr, addr);
+ else
+ linkmode_clear_bit(nr, addr);
+}
+
static inline void linkmode_change_bit(int nr, volatile unsigned long *addr)
{
__change_bit(nr, addr);
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index 1fd82ff99c65..c5335df2372f 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -97,8 +97,6 @@ struct lock_class {
* Generation counter, when doing certain classes of graph walking,
* to ensure that we check one node only once:
*/
- unsigned int version;
-
int name_version;
const char *name;
diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h
index bac395f1d00a..5228c62af416 100644
--- a/include/linux/mempolicy.h
+++ b/include/linux/mempolicy.h
@@ -139,8 +139,6 @@ struct mempolicy *mpol_shared_policy_lookup(struct shared_policy *sp,
struct mempolicy *get_task_policy(struct task_struct *p);
struct mempolicy *__get_vma_policy(struct vm_area_struct *vma,
unsigned long addr);
-struct mempolicy *get_vma_policy(struct vm_area_struct *vma,
- unsigned long addr);
bool vma_policy_mof(struct vm_area_struct *vma);
extern void numa_default_policy(void);
diff --git a/include/linux/mfd/axp20x.h b/include/linux/mfd/axp20x.h
index 517e60eecbcb..1293695245df 100644
--- a/include/linux/mfd/axp20x.h
+++ b/include/linux/mfd/axp20x.h
@@ -35,7 +35,7 @@ enum axp20x_variants {
#define AXP152_ALDO_OP_MODE 0x13
#define AXP152_LDO0_CTRL 0x15
#define AXP152_DCDC2_V_OUT 0x23
-#define AXP152_DCDC2_V_SCAL 0x25
+#define AXP152_DCDC2_V_RAMP 0x25
#define AXP152_DCDC1_V_OUT 0x26
#define AXP152_DCDC3_V_OUT 0x27
#define AXP152_ALDO12_V_OUT 0x28
@@ -53,7 +53,7 @@ enum axp20x_variants {
#define AXP20X_USB_OTG_STATUS 0x02
#define AXP20X_PWR_OUT_CTRL 0x12
#define AXP20X_DCDC2_V_OUT 0x23
-#define AXP20X_DCDC2_LDO3_V_SCAL 0x25
+#define AXP20X_DCDC2_LDO3_V_RAMP 0x25
#define AXP20X_DCDC3_V_OUT 0x27
#define AXP20X_LDO24_V_OUT 0x28
#define AXP20X_LDO3_V_OUT 0x29
diff --git a/include/linux/mfd/wm8994/pdata.h b/include/linux/mfd/wm8994/pdata.h
index b19c370fe81a..f346167c0e00 100644
--- a/include/linux/mfd/wm8994/pdata.h
+++ b/include/linux/mfd/wm8994/pdata.h
@@ -20,9 +20,6 @@
#define WM8994_NUM_AIF 3
struct wm8994_ldo_pdata {
- /** GPIOs to enable regulator, 0 or less if not available */
- int enable;
-
const struct regulator_init_data *init_data;
};
diff --git a/include/linux/mii.h b/include/linux/mii.h
index 2da85b02e1c0..6fee8b1a4400 100644
--- a/include/linux/mii.h
+++ b/include/linux/mii.h
@@ -209,7 +209,7 @@ static inline u32 ethtool_adv_to_mii_ctrl1000_t(u32 ethadv)
/**
* linkmode_adv_to_mii_ctrl1000_t
- * advertising: the linkmode advertisement settings
+ * @advertising: the linkmode advertisement settings
*
* A small helper function that translates linkmode advertisement
* settings to phy autonegotiation advertisements for the
@@ -288,6 +288,25 @@ static inline u32 mii_stat1000_to_ethtool_lpa_t(u32 lpa)
}
/**
+ * mii_stat1000_mod_linkmode_lpa_t
+ * @advertising: target the linkmode advertisement settings
+ * @adv: value of the MII_STAT1000 register
+ *
+ * A small helper function that translates MII_STAT1000 bits, when in
+ * 1000Base-T mode, to linkmode advertisement settings. Other bits in
+ * advertising are not changes.
+ */
+static inline void mii_stat1000_mod_linkmode_lpa_t(unsigned long *advertising,
+ u32 lpa)
+{
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT,
+ advertising, lpa & LPA_1000HALF);
+
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
+ advertising, lpa & LPA_1000FULL);
+}
+
+/**
* ethtool_adv_to_mii_adv_x
* @ethadv: the ethtool advertisement settings
*
@@ -354,50 +373,104 @@ static inline u32 mii_lpa_to_ethtool_lpa_x(u32 lpa)
}
/**
+ * mii_adv_mod_linkmode_adv_t
+ * @advertising:pointer to destination link mode.
+ * @adv: value of the MII_ADVERTISE register
+ *
+ * A small helper function that translates MII_ADVERTISE bits to
+ * linkmode advertisement settings. Leaves other bits unchanged.
+ */
+static inline void mii_adv_mod_linkmode_adv_t(unsigned long *advertising,
+ u32 adv)
+{
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT,
+ advertising, adv & ADVERTISE_10HALF);
+
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT,
+ advertising, adv & ADVERTISE_10FULL);
+
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
+ advertising, adv & ADVERTISE_100HALF);
+
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
+ advertising, adv & ADVERTISE_100FULL);
+
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_Pause_BIT, advertising,
+ adv & ADVERTISE_PAUSE_CAP);
+
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
+ advertising, adv & ADVERTISE_PAUSE_ASYM);
+}
+
+/**
* mii_adv_to_linkmode_adv_t
* @advertising:pointer to destination link mode.
* @adv: value of the MII_ADVERTISE register
*
* A small helper function that translates MII_ADVERTISE bits
- * to linkmode advertisement settings.
+ * to linkmode advertisement settings. Clears the old value
+ * of advertising.
*/
static inline void mii_adv_to_linkmode_adv_t(unsigned long *advertising,
u32 adv)
{
linkmode_zero(advertising);
- if (adv & ADVERTISE_10HALF)
- linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT,
- advertising);
- if (adv & ADVERTISE_10FULL)
- linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT,
- advertising);
- if (adv & ADVERTISE_100HALF)
- linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
- advertising);
- if (adv & ADVERTISE_100FULL)
- linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
- advertising);
- if (adv & ADVERTISE_PAUSE_CAP)
- linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, advertising);
- if (adv & ADVERTISE_PAUSE_ASYM)
- linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, advertising);
+ mii_adv_mod_linkmode_adv_t(advertising, adv);
+}
+
+/**
+ * mii_lpa_to_linkmode_lpa_t
+ * @adv: value of the MII_LPA register
+ *
+ * A small helper function that translates MII_LPA bits, when in
+ * 1000Base-T mode, to linkmode LP advertisement settings. Clears the
+ * old value of advertising
+ */
+static inline void mii_lpa_to_linkmode_lpa_t(unsigned long *lp_advertising,
+ u32 lpa)
+{
+ mii_adv_to_linkmode_adv_t(lp_advertising, lpa);
+
+ if (lpa & LPA_LPACK)
+ linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
+ lp_advertising);
+
+}
+
+/**
+ * mii_lpa_mod_linkmode_lpa_t
+ * @adv: value of the MII_LPA register
+ *
+ * A small helper function that translates MII_LPA bits, when in
+ * 1000Base-T mode, to linkmode LP advertisement settings. Leaves
+ * other bits unchanged.
+ */
+static inline void mii_lpa_mod_linkmode_lpa_t(unsigned long *lp_advertising,
+ u32 lpa)
+{
+ mii_adv_mod_linkmode_adv_t(lp_advertising, lpa);
+
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
+ lp_advertising, lpa & LPA_LPACK);
}
/**
- * ethtool_adv_to_lcl_adv_t
- * @advertising:pointer to ethtool advertising
+ * linkmode_adv_to_lcl_adv_t
+ * @advertising:pointer to linkmode advertising
*
- * A small helper function that translates ethtool advertising to LVL
+ * A small helper function that translates linkmode advertising to LVL
* pause capabilities.
*/
-static inline u32 ethtool_adv_to_lcl_adv_t(u32 advertising)
+static inline u32 linkmode_adv_to_lcl_adv_t(unsigned long *advertising)
{
u32 lcl_adv = 0;
- if (advertising & ADVERTISED_Pause)
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_Pause_BIT,
+ advertising))
lcl_adv |= ADVERTISE_PAUSE_CAP;
- if (advertising & ADVERTISED_Asym_Pause)
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_Pause_BIT,
+ advertising))
lcl_adv |= ADVERTISE_PAUSE_ASYM;
return lcl_adv;
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
index dca6ab4eaa99..36e412c3d657 100644
--- a/include/linux/mlx4/device.h
+++ b/include/linux/mlx4/device.h
@@ -226,6 +226,7 @@ enum {
MLX4_DEV_CAP_FLAG2_SL_TO_VL_CHANGE_EVENT = 1ULL << 37,
MLX4_DEV_CAP_FLAG2_USER_MAC_EN = 1ULL << 38,
MLX4_DEV_CAP_FLAG2_DRIVER_VERSION_TO_FW = 1ULL << 39,
+ MLX4_DEV_CAP_FLAG2_SW_CQ_INIT = 1ULL << 40,
};
enum {
@@ -1136,7 +1137,8 @@ void mlx4_free_hwq_res(struct mlx4_dev *mdev, struct mlx4_hwq_resources *wqres,
int mlx4_cq_alloc(struct mlx4_dev *dev, int nent, struct mlx4_mtt *mtt,
struct mlx4_uar *uar, u64 db_rec, struct mlx4_cq *cq,
- unsigned vector, int collapsed, int timestamp_en);
+ unsigned int vector, int collapsed, int timestamp_en,
+ void *buf_addr, bool user_cq);
void mlx4_cq_free(struct mlx4_dev *dev, struct mlx4_cq *cq);
int mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align,
int *base, u8 flags, u8 usage);
diff --git a/include/linux/mlx5/cq.h b/include/linux/mlx5/cq.h
index 31a750570c38..612c8c2f2466 100644
--- a/include/linux/mlx5/cq.h
+++ b/include/linux/mlx5/cq.h
@@ -60,7 +60,7 @@ struct mlx5_core_cq {
} tasklet_ctx;
int reset_notify_added;
struct list_head reset_notify;
- struct mlx5_eq *eq;
+ struct mlx5_eq_comp *eq;
u16 uid;
};
@@ -125,9 +125,9 @@ struct mlx5_cq_modify_params {
};
enum {
- CQE_SIZE_64 = 0,
- CQE_SIZE_128 = 1,
- CQE_SIZE_128_PAD = 2,
+ CQE_STRIDE_64 = 0,
+ CQE_STRIDE_128 = 1,
+ CQE_STRIDE_128_PAD = 2,
};
#define MLX5_MAX_CQ_PERIOD (BIT(__mlx5_bit_sz(cqc, cq_period)) - 1)
@@ -135,8 +135,8 @@ enum {
static inline int cqe_sz_to_mlx_sz(u8 size, int padding_128_en)
{
- return padding_128_en ? CQE_SIZE_128_PAD :
- size == 64 ? CQE_SIZE_64 : CQE_SIZE_128;
+ return padding_128_en ? CQE_STRIDE_128_PAD :
+ size == 64 ? CQE_STRIDE_64 : CQE_STRIDE_128;
}
static inline void mlx5_cq_set_ci(struct mlx5_core_cq *cq)
diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h
index b4c0457fbebd..8c4a820bd4c1 100644
--- a/include/linux/mlx5/device.h
+++ b/include/linux/mlx5/device.h
@@ -212,6 +212,13 @@ enum {
MLX5_PFAULT_SUBTYPE_RDMA = 1,
};
+enum wqe_page_fault_type {
+ MLX5_WQE_PF_TYPE_RMP = 0,
+ MLX5_WQE_PF_TYPE_REQ_SEND_OR_WRITE = 1,
+ MLX5_WQE_PF_TYPE_RESP = 2,
+ MLX5_WQE_PF_TYPE_REQ_READ_OR_ATOMIC = 3,
+};
+
enum {
MLX5_PERM_LOCAL_READ = 1 << 2,
MLX5_PERM_LOCAL_WRITE = 1 << 3,
@@ -294,9 +301,15 @@ enum {
MLX5_EVENT_QUEUE_TYPE_DCT = 6,
};
+/* mlx5 components can subscribe to any one of these events via
+ * mlx5_eq_notifier_register API.
+ */
enum mlx5_event {
+ /* Special value to subscribe to any event */
+ MLX5_EVENT_TYPE_NOTIFY_ANY = 0x0,
+ /* HW events enum start: comp events are not subscribable */
MLX5_EVENT_TYPE_COMP = 0x0,
-
+ /* HW Async events enum start: subscribable events */
MLX5_EVENT_TYPE_PATH_MIG = 0x01,
MLX5_EVENT_TYPE_COMM_EST = 0x02,
MLX5_EVENT_TYPE_SQ_DRAINED = 0x03,
@@ -317,6 +330,7 @@ enum mlx5_event {
MLX5_EVENT_TYPE_TEMP_WARN_EVENT = 0x17,
MLX5_EVENT_TYPE_REMOTE_CONFIG = 0x19,
MLX5_EVENT_TYPE_GENERAL_EVENT = 0x22,
+ MLX5_EVENT_TYPE_MONITOR_COUNTER = 0x24,
MLX5_EVENT_TYPE_PPS_EVENT = 0x25,
MLX5_EVENT_TYPE_DB_BF_CONGESTION = 0x1a,
@@ -334,6 +348,8 @@ enum mlx5_event {
MLX5_EVENT_TYPE_FPGA_QP_ERROR = 0x21,
MLX5_EVENT_TYPE_DEVICE_TRACER = 0x26,
+
+ MLX5_EVENT_TYPE_MAX = MLX5_EVENT_TYPE_DEVICE_TRACER + 1,
};
enum {
@@ -405,6 +421,7 @@ enum {
MLX5_OPCODE_ATOMIC_MASKED_FA = 0x15,
MLX5_OPCODE_BIND_MW = 0x18,
MLX5_OPCODE_CONFIG_CMD = 0x1f,
+ MLX5_OPCODE_ENHANCED_MPSW = 0x29,
MLX5_RECV_OPCODE_RDMA_WRITE_IMM = 0x00,
MLX5_RECV_OPCODE_SEND = 0x01,
@@ -766,6 +783,11 @@ static inline u8 mlx5_get_cqe_format(struct mlx5_cqe64 *cqe)
return (cqe->op_own >> 2) & 0x3;
}
+static inline u8 get_cqe_opcode(struct mlx5_cqe64 *cqe)
+{
+ return cqe->op_own >> 4;
+}
+
static inline u8 get_cqe_lro_tcppsh(struct mlx5_cqe64 *cqe)
{
return (cqe->lro_tcppsh_abort_dupack >> 6) & 1;
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index aa5963b5d38e..4d16ba04790e 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -46,10 +46,11 @@
#include <linux/mempool.h>
#include <linux/interrupt.h>
#include <linux/idr.h>
+#include <linux/notifier.h>
#include <linux/mlx5/device.h>
#include <linux/mlx5/doorbell.h>
-#include <linux/mlx5/srq.h>
+#include <linux/mlx5/eq.h>
#include <linux/timecounter.h>
#include <linux/ptp_clock_kernel.h>
@@ -85,18 +86,6 @@ enum {
};
enum {
- MLX5_EQ_VEC_PAGES = 0,
- MLX5_EQ_VEC_CMD = 1,
- MLX5_EQ_VEC_ASYNC = 2,
- MLX5_EQ_VEC_PFAULT = 3,
- MLX5_EQ_VEC_COMP_BASE,
-};
-
-enum {
- MLX5_MAX_IRQ_NAME = 32
-};
-
-enum {
MLX5_ATOMIC_MODE_OFFSET = 16,
MLX5_ATOMIC_MODE_IB_COMP = 1,
MLX5_ATOMIC_MODE_CX = 2,
@@ -205,16 +194,7 @@ struct mlx5_rsc_debug {
};
enum mlx5_dev_event {
- MLX5_DEV_EVENT_SYS_ERROR,
- MLX5_DEV_EVENT_PORT_UP,
- MLX5_DEV_EVENT_PORT_DOWN,
- MLX5_DEV_EVENT_PORT_INITIALIZED,
- MLX5_DEV_EVENT_LID_CHANGE,
- MLX5_DEV_EVENT_PKEY_CHANGE,
- MLX5_DEV_EVENT_GUID_CHANGE,
- MLX5_DEV_EVENT_CLIENT_REREG,
- MLX5_DEV_EVENT_PPS,
- MLX5_DEV_EVENT_DELAY_DROP_TIMEOUT,
+ MLX5_DEV_EVENT_SYS_ERROR = 128, /* 0 - 127 are FW events */
};
enum mlx5_port_status {
@@ -222,14 +202,6 @@ enum mlx5_port_status {
MLX5_PORT_DOWN = 2,
};
-enum mlx5_eq_type {
- MLX5_EQ_TYPE_COMP,
- MLX5_EQ_TYPE_ASYNC,
-#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
- MLX5_EQ_TYPE_PF,
-#endif
-};
-
struct mlx5_bfreg_info {
u32 *sys_pages;
int num_low_latency_bfregs;
@@ -297,6 +269,8 @@ struct mlx5_cmd_stats {
};
struct mlx5_cmd {
+ struct mlx5_nb nb;
+
void *cmd_alloc_buf;
dma_addr_t alloc_dma;
int alloc_size;
@@ -366,51 +340,6 @@ struct mlx5_frag_buf_ctrl {
u8 log_frag_strides;
};
-struct mlx5_eq_tasklet {
- struct list_head list;
- struct list_head process_list;
- struct tasklet_struct task;
- /* lock on completion tasklet list */
- spinlock_t lock;
-};
-
-struct mlx5_eq_pagefault {
- struct work_struct work;
- /* Pagefaults lock */
- spinlock_t lock;
- struct workqueue_struct *wq;
- mempool_t *pool;
-};
-
-struct mlx5_cq_table {
- /* protect radix tree */
- spinlock_t lock;
- struct radix_tree_root tree;
-};
-
-struct mlx5_eq {
- struct mlx5_core_dev *dev;
- struct mlx5_cq_table cq_table;
- __be32 __iomem *doorbell;
- u32 cons_index;
- struct mlx5_frag_buf buf;
- int size;
- unsigned int irqn;
- u8 eqn;
- int nent;
- u64 mask;
- struct list_head list;
- int index;
- struct mlx5_rsc_debug *dbg;
- enum mlx5_eq_type type;
- union {
- struct mlx5_eq_tasklet tasklet_ctx;
-#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
- struct mlx5_eq_pagefault pf_ctx;
-#endif
- };
-};
-
struct mlx5_core_psv {
u32 psv_idx;
struct psv_layout {
@@ -463,36 +392,6 @@ struct mlx5_core_rsc_common {
struct completion free;
};
-struct mlx5_core_srq {
- struct mlx5_core_rsc_common common; /* must be first */
- u32 srqn;
- int max;
- size_t max_gs;
- size_t max_avail_gather;
- int wqe_shift;
- void (*event) (struct mlx5_core_srq *, enum mlx5_event);
-
- atomic_t refcount;
- struct completion free;
- u16 uid;
-};
-
-struct mlx5_eq_table {
- void __iomem *update_ci;
- void __iomem *update_arm_ci;
- struct list_head comp_eqs_list;
- struct mlx5_eq pages_eq;
- struct mlx5_eq async_eq;
- struct mlx5_eq cmd_eq;
-#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
- struct mlx5_eq pfault_eq;
-#endif
- int num_comp_vectors;
- /* protect EQs list
- */
- spinlock_t lock;
-};
-
struct mlx5_uars_page {
void __iomem *map;
bool wc;
@@ -542,13 +441,8 @@ struct mlx5_core_health {
};
struct mlx5_qp_table {
- /* protect radix tree
- */
- spinlock_t lock;
- struct radix_tree_root tree;
-};
+ struct notifier_block nb;
-struct mlx5_srq_table {
/* protect radix tree
*/
spinlock_t lock;
@@ -575,11 +469,6 @@ struct mlx5_core_sriov {
int enabled_vfs;
};
-struct mlx5_irq_info {
- cpumask_var_t mask;
- char name[MLX5_MAX_IRQ_NAME];
-};
-
struct mlx5_fc_stats {
spinlock_t counters_idr_lock; /* protects counters_idr */
struct idr counters_idr;
@@ -593,10 +482,12 @@ struct mlx5_fc_stats {
unsigned long sampling_interval; /* jiffies */
};
+struct mlx5_events;
struct mlx5_mpfs;
struct mlx5_eswitch;
struct mlx5_lag;
-struct mlx5_pagefault;
+struct mlx5_devcom;
+struct mlx5_eq_table;
struct mlx5_rate_limit {
u32 rate;
@@ -619,37 +510,12 @@ struct mlx5_rl_table {
struct mlx5_rl_entry *rl_entry;
};
-enum port_module_event_status_type {
- MLX5_MODULE_STATUS_PLUGGED = 0x1,
- MLX5_MODULE_STATUS_UNPLUGGED = 0x2,
- MLX5_MODULE_STATUS_ERROR = 0x3,
- MLX5_MODULE_STATUS_NUM = 0x3,
-};
-
-enum port_module_event_error_type {
- MLX5_MODULE_EVENT_ERROR_POWER_BUDGET_EXCEEDED,
- MLX5_MODULE_EVENT_ERROR_LONG_RANGE_FOR_NON_MLNX_CABLE_MODULE,
- MLX5_MODULE_EVENT_ERROR_BUS_STUCK,
- MLX5_MODULE_EVENT_ERROR_NO_EEPROM_RETRY_TIMEOUT,
- MLX5_MODULE_EVENT_ERROR_ENFORCE_PART_NUMBER_LIST,
- MLX5_MODULE_EVENT_ERROR_UNKNOWN_IDENTIFIER,
- MLX5_MODULE_EVENT_ERROR_HIGH_TEMPERATURE,
- MLX5_MODULE_EVENT_ERROR_BAD_CABLE,
- MLX5_MODULE_EVENT_ERROR_UNKNOWN,
- MLX5_MODULE_EVENT_ERROR_NUM,
-};
-
-struct mlx5_port_module_event_stats {
- u64 status_counters[MLX5_MODULE_STATUS_NUM];
- u64 error_counters[MLX5_MODULE_EVENT_ERROR_NUM];
-};
-
struct mlx5_priv {
char name[MLX5_MAX_NAME_LEN];
- struct mlx5_eq_table eq_table;
- struct mlx5_irq_info *irq_info;
+ struct mlx5_eq_table *eq_table;
/* pages stuff */
+ struct mlx5_nb pg_nb;
struct workqueue_struct *pg_wq;
struct rb_root page_root;
int fw_pages;
@@ -659,8 +525,6 @@ struct mlx5_priv {
struct mlx5_core_health health;
- struct mlx5_srq_table srq_table;
-
/* start: qp staff */
struct mlx5_qp_table qp_table;
struct dentry *qp_debugfs;
@@ -690,28 +554,18 @@ struct mlx5_priv {
struct list_head dev_list;
struct list_head ctx_list;
spinlock_t ctx_lock;
-
- struct list_head waiting_events_list;
- bool is_accum_events;
+ struct mlx5_events *events;
struct mlx5_flow_steering *steering;
struct mlx5_mpfs *mpfs;
struct mlx5_eswitch *eswitch;
struct mlx5_core_sriov sriov;
struct mlx5_lag *lag;
+ struct mlx5_devcom *devcom;
unsigned long pci_dev_data;
struct mlx5_fc_stats fc_stats;
struct mlx5_rl_table rl_table;
- struct mlx5_port_module_event_stats pme_stats;
-
-#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
- void (*pfault)(struct mlx5_core_dev *dev,
- void *context,
- struct mlx5_pagefault *pfault);
- void *pfault_ctx;
- struct srcu_struct pfault_srcu;
-#endif
struct mlx5_bfreg_data bfregs;
struct mlx5_uars_page *uar;
};
@@ -736,44 +590,6 @@ enum mlx5_pagefault_type_flags {
MLX5_PFAULT_RDMA = 1 << 2,
};
-/* Contains the details of a pagefault. */
-struct mlx5_pagefault {
- u32 bytes_committed;
- u32 token;
- u8 event_subtype;
- u8 type;
- union {
- /* Initiator or send message responder pagefault details. */
- struct {
- /* Received packet size, only valid for responders. */
- u32 packet_size;
- /*
- * Number of resource holding WQE, depends on type.
- */
- u32 wq_num;
- /*
- * WQE index. Refers to either the send queue or
- * receive queue, according to event_subtype.
- */
- u16 wqe_index;
- } wqe;
- /* RDMA responder pagefault details */
- struct {
- u32 r_key;
- /*
- * Received packet size, minimal size page fault
- * resolution required for forward progress.
- */
- u32 packet_size;
- u32 rdma_op_len;
- u64 rdma_va;
- } rdma;
- };
-
- struct mlx5_eq *eq;
- struct work_struct work;
-};
-
struct mlx5_td {
struct list_head tirs_list;
u32 tdn;
@@ -803,6 +619,8 @@ struct mlx5_pps {
};
struct mlx5_clock {
+ struct mlx5_core_dev *mdev;
+ struct mlx5_nb pps_nb;
seqlock_t lock;
struct cyclecounter cycles;
struct timecounter tc;
@@ -810,7 +628,6 @@ struct mlx5_clock {
u32 nominal_c_mult;
unsigned long overflow_period;
struct delayed_work overflow_work;
- struct mlx5_core_dev *mdev;
struct ptp_clock *ptp;
struct ptp_clock_info ptp_info;
struct mlx5_pps pps_info;
@@ -843,9 +660,6 @@ struct mlx5_core_dev {
/* sync interface state */
struct mutex intf_state_mutex;
unsigned long intf_state;
- void (*event) (struct mlx5_core_dev *dev,
- enum mlx5_dev_event event,
- unsigned long param);
struct mlx5_priv priv;
struct mlx5_profile *profile;
atomic_t num_qps;
@@ -859,9 +673,6 @@ struct mlx5_core_dev {
#ifdef CONFIG_MLX5_FPGA
struct mlx5_fpga_device *fpga;
#endif
-#ifdef CONFIG_RFS_ACCEL
- struct cpu_rmap *rmap;
-#endif
struct mlx5_clock clock;
struct mlx5_ib_clock_info *clock_info;
struct page *clock_info_page;
@@ -1070,13 +881,6 @@ struct mlx5_cmd_mailbox *mlx5_alloc_cmd_mailbox_chain(struct mlx5_core_dev *dev,
gfp_t flags, int npages);
void mlx5_free_cmd_mailbox_chain(struct mlx5_core_dev *dev,
struct mlx5_cmd_mailbox *head);
-int mlx5_core_create_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
- struct mlx5_srq_attr *in);
-int mlx5_core_destroy_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq);
-int mlx5_core_query_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
- struct mlx5_srq_attr *out);
-int mlx5_core_arm_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
- u16 lwm, int is_srq);
void mlx5_init_mkey_table(struct mlx5_core_dev *dev);
void mlx5_cleanup_mkey_table(struct mlx5_core_dev *dev);
int mlx5_core_create_mkey_cb(struct mlx5_core_dev *dev,
@@ -1095,9 +899,9 @@ int mlx5_core_alloc_pd(struct mlx5_core_dev *dev, u32 *pdn);
int mlx5_core_dealloc_pd(struct mlx5_core_dev *dev, u32 pdn);
int mlx5_core_mad_ifc(struct mlx5_core_dev *dev, const void *inb, void *outb,
u16 opmod, u8 port);
-void mlx5_pagealloc_init(struct mlx5_core_dev *dev);
+int mlx5_pagealloc_init(struct mlx5_core_dev *dev);
void mlx5_pagealloc_cleanup(struct mlx5_core_dev *dev);
-int mlx5_pagealloc_start(struct mlx5_core_dev *dev);
+void mlx5_pagealloc_start(struct mlx5_core_dev *dev);
void mlx5_pagealloc_stop(struct mlx5_core_dev *dev);
void mlx5_core_req_pages_handler(struct mlx5_core_dev *dev, u16 func_id,
s32 npages);
@@ -1108,9 +912,6 @@ void mlx5_unregister_debugfs(void);
void mlx5_fill_page_array(struct mlx5_frag_buf *buf, __be64 *pas);
void mlx5_fill_page_frag_array(struct mlx5_frag_buf *frag_buf, __be64 *pas);
-void mlx5_rsc_event(struct mlx5_core_dev *dev, u32 rsn, int event_type);
-void mlx5_srq_event(struct mlx5_core_dev *dev, u32 srqn, int event_type);
-struct mlx5_core_srq *mlx5_core_get_srq(struct mlx5_core_dev *dev, u32 srqn);
int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn,
unsigned int *irqn);
int mlx5_core_attach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn);
@@ -1155,6 +956,9 @@ int mlx5_alloc_bfreg(struct mlx5_core_dev *mdev, struct mlx5_sq_bfreg *bfreg,
bool map_wc, bool fast_path);
void mlx5_free_bfreg(struct mlx5_core_dev *mdev, struct mlx5_sq_bfreg *bfreg);
+unsigned int mlx5_comp_vectors_count(struct mlx5_core_dev *dev);
+struct cpumask *
+mlx5_comp_irq_get_affinity_mask(struct mlx5_core_dev *dev, int vector);
unsigned int mlx5_core_reserved_gids_count(struct mlx5_core_dev *dev);
int mlx5_core_roce_gid_set(struct mlx5_core_dev *dev, unsigned int index,
u8 roce_version, u8 roce_l3_type, const u8 *gid,
@@ -1202,23 +1006,21 @@ struct mlx5_interface {
void (*remove)(struct mlx5_core_dev *dev, void *context);
int (*attach)(struct mlx5_core_dev *dev, void *context);
void (*detach)(struct mlx5_core_dev *dev, void *context);
- void (*event)(struct mlx5_core_dev *dev, void *context,
- enum mlx5_dev_event event, unsigned long param);
- void (*pfault)(struct mlx5_core_dev *dev,
- void *context,
- struct mlx5_pagefault *pfault);
- void * (*get_dev)(void *context);
int protocol;
struct list_head list;
};
-void *mlx5_get_protocol_dev(struct mlx5_core_dev *mdev, int protocol);
int mlx5_register_interface(struct mlx5_interface *intf);
void mlx5_unregister_interface(struct mlx5_interface *intf);
+int mlx5_notifier_register(struct mlx5_core_dev *dev, struct notifier_block *nb);
+int mlx5_notifier_unregister(struct mlx5_core_dev *dev, struct notifier_block *nb);
+
int mlx5_core_query_vendor_id(struct mlx5_core_dev *mdev, u32 *vendor_id);
int mlx5_cmd_create_vport_lag(struct mlx5_core_dev *dev);
int mlx5_cmd_destroy_vport_lag(struct mlx5_core_dev *dev);
+bool mlx5_lag_is_roce(struct mlx5_core_dev *dev);
+bool mlx5_lag_is_sriov(struct mlx5_core_dev *dev);
bool mlx5_lag_is_active(struct mlx5_core_dev *dev);
struct net_device *mlx5_lag_get_roce_netdev(struct mlx5_core_dev *dev);
int mlx5_lag_query_cong_counters(struct mlx5_core_dev *dev,
@@ -1306,10 +1108,4 @@ enum {
MLX5_TRIGGERED_CMD_COMP = (u64)1 << 32,
};
-static inline const struct cpumask *
-mlx5_get_vector_affinity_hint(struct mlx5_core_dev *dev, int vector)
-{
- return dev->priv.irq_info[vector].mask;
-}
-
#endif /* MLX5_DRIVER_H */
diff --git a/include/linux/mlx5/eq.h b/include/linux/mlx5/eq.h
new file mode 100644
index 000000000000..00045cc4ea11
--- /dev/null
+++ b/include/linux/mlx5/eq.h
@@ -0,0 +1,72 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2018 Mellanox Technologies. */
+
+#ifndef MLX5_CORE_EQ_H
+#define MLX5_CORE_EQ_H
+
+enum {
+ MLX5_EQ_PAGEREQ_IDX = 0,
+ MLX5_EQ_CMD_IDX = 1,
+ MLX5_EQ_ASYNC_IDX = 2,
+ /* reserved to be used by mlx5_core ulps (mlx5e/mlx5_ib) */
+ MLX5_EQ_PFAULT_IDX = 3,
+ MLX5_EQ_MAX_ASYNC_EQS,
+ /* completion eqs vector indices start here */
+ MLX5_EQ_VEC_COMP_BASE = MLX5_EQ_MAX_ASYNC_EQS,
+};
+
+#define MLX5_NUM_CMD_EQE (32)
+#define MLX5_NUM_ASYNC_EQE (0x1000)
+#define MLX5_NUM_SPARE_EQE (0x80)
+
+struct mlx5_eq;
+struct mlx5_core_dev;
+
+struct mlx5_eq_param {
+ u8 index;
+ int nent;
+ u64 mask;
+ void *context;
+ irq_handler_t handler;
+};
+
+struct mlx5_eq *
+mlx5_eq_create_generic(struct mlx5_core_dev *dev, const char *name,
+ struct mlx5_eq_param *param);
+int
+mlx5_eq_destroy_generic(struct mlx5_core_dev *dev, struct mlx5_eq *eq);
+
+struct mlx5_eqe *mlx5_eq_get_eqe(struct mlx5_eq *eq, u32 cc);
+void mlx5_eq_update_ci(struct mlx5_eq *eq, u32 cc, bool arm);
+
+/* The HCA will think the queue has overflowed if we
+ * don't tell it we've been processing events. We
+ * create EQs with MLX5_NUM_SPARE_EQE extra entries,
+ * so we must update our consumer index at
+ * least that often.
+ *
+ * mlx5_eq_update_cc must be called on every EQE @EQ irq handler
+ */
+static inline u32 mlx5_eq_update_cc(struct mlx5_eq *eq, u32 cc)
+{
+ if (unlikely(cc >= MLX5_NUM_SPARE_EQE)) {
+ mlx5_eq_update_ci(eq, cc, 0);
+ cc = 0;
+ }
+ return cc;
+}
+
+struct mlx5_nb {
+ struct notifier_block nb;
+ u8 event_type;
+};
+
+#define mlx5_nb_cof(ptr, type, member) \
+ (container_of(container_of(ptr, struct mlx5_nb, nb), type, member))
+
+#define MLX5_NB_INIT(name, handler, event) do { \
+ (name)->nb.notifier_call = handler; \
+ (name)->event_type = MLX5_EVENT_TYPE_##event; \
+} while (0)
+
+#endif /* MLX5_CORE_EQ_H */
diff --git a/include/linux/mlx5/fs.h b/include/linux/mlx5/fs.h
index 5660f07d3be0..9df51da04621 100644
--- a/include/linux/mlx5/fs.h
+++ b/include/linux/mlx5/fs.h
@@ -86,6 +86,11 @@ struct mlx5_flow_spec {
u32 match_value[MLX5_ST_SZ_DW(fte_match_param)];
};
+enum {
+ MLX5_FLOW_DEST_VPORT_VHCA_ID = BIT(0),
+ MLX5_FLOW_DEST_VPORT_REFORMAT_ID = BIT(1),
+};
+
struct mlx5_flow_destination {
enum mlx5_flow_destination_type type;
union {
@@ -96,7 +101,8 @@ struct mlx5_flow_destination {
struct {
u16 num;
u16 vhca_id;
- bool vhca_id_valid;
+ u32 reformat_id;
+ u8 flags;
} vport;
};
};
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
index dbff9ff28f2c..821b751485fb 100644
--- a/include/linux/mlx5/mlx5_ifc.h
+++ b/include/linux/mlx5/mlx5_ifc.h
@@ -161,6 +161,8 @@ enum {
MLX5_CMD_OP_ALLOC_Q_COUNTER = 0x771,
MLX5_CMD_OP_DEALLOC_Q_COUNTER = 0x772,
MLX5_CMD_OP_QUERY_Q_COUNTER = 0x773,
+ MLX5_CMD_OP_SET_MONITOR_COUNTER = 0x774,
+ MLX5_CMD_OP_ARM_MONITOR_COUNTER = 0x775,
MLX5_CMD_OP_SET_PP_RATE_LIMIT = 0x780,
MLX5_CMD_OP_QUERY_RATE_LIMIT = 0x781,
MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT = 0x782,
@@ -349,7 +351,7 @@ struct mlx5_ifc_flow_table_prop_layout_bits {
u8 reformat_l3_tunnel_to_l2[0x1];
u8 reformat_l2_to_l3_tunnel[0x1];
u8 reformat_and_modify_action[0x1];
- u8 reserved_at_14[0xb];
+ u8 reserved_at_15[0xb];
u8 reserved_at_20[0x2];
u8 log_max_ft_size[0x6];
u8 log_max_modify_header_context[0x8];
@@ -421,6 +423,16 @@ struct mlx5_ifc_fte_match_set_lyr_2_4_bits {
union mlx5_ifc_ipv6_layout_ipv4_layout_auto_bits dst_ipv4_dst_ipv6;
};
+struct mlx5_ifc_nvgre_key_bits {
+ u8 hi[0x18];
+ u8 lo[0x8];
+};
+
+union mlx5_ifc_gre_key_bits {
+ struct mlx5_ifc_nvgre_key_bits nvgre;
+ u8 key[0x20];
+};
+
struct mlx5_ifc_fte_match_set_misc_bits {
u8 reserved_at_0[0x8];
u8 source_sqn[0x18];
@@ -442,8 +454,7 @@ struct mlx5_ifc_fte_match_set_misc_bits {
u8 reserved_at_64[0xc];
u8 gre_protocol[0x10];
- u8 gre_key_h[0x18];
- u8 gre_key_l[0x8];
+ union mlx5_ifc_gre_key_bits gre_key;
u8 vxlan_vni[0x18];
u8 reserved_at_b8[0x8];
@@ -582,11 +593,13 @@ struct mlx5_ifc_flow_table_nic_cap_bits {
};
struct mlx5_ifc_flow_table_eswitch_cap_bits {
- u8 reserved_at_0[0x1c];
- u8 fdb_multi_path_to_table[0x1];
- u8 reserved_at_1d[0x1];
+ u8 reserved_at_0[0x1a];
u8 multi_fdb_encap[0x1];
- u8 reserved_at_1e[0x1e1];
+ u8 reserved_at_1b[0x1];
+ u8 fdb_multi_path_to_table[0x1];
+ u8 reserved_at_1d[0x3];
+
+ u8 reserved_at_20[0x1e0];
struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_esw_fdb;
@@ -597,20 +610,28 @@ struct mlx5_ifc_flow_table_eswitch_cap_bits {
u8 reserved_at_800[0x7800];
};
+enum {
+ MLX5_COUNTER_SOURCE_ESWITCH = 0x0,
+ MLX5_COUNTER_FLOW_ESWITCH = 0x1,
+};
+
struct mlx5_ifc_e_switch_cap_bits {
u8 vport_svlan_strip[0x1];
u8 vport_cvlan_strip[0x1];
u8 vport_svlan_insert[0x1];
u8 vport_cvlan_insert_if_not_exist[0x1];
u8 vport_cvlan_insert_overwrite[0x1];
- u8 reserved_at_5[0x18];
+ u8 reserved_at_5[0x17];
+ u8 counter_eswitch_affinity[0x1];
u8 merged_eswitch[0x1];
u8 nic_vport_node_guid_modify[0x1];
u8 nic_vport_port_guid_modify[0x1];
u8 vxlan_encap_decap[0x1];
u8 nvgre_encap_decap[0x1];
- u8 reserved_at_22[0x9];
+ u8 reserved_at_22[0x1];
+ u8 log_max_fdb_encap_uplink[0x5];
+ u8 reserved_at_21[0x3];
u8 log_max_packet_reformat_context[0x5];
u8 reserved_2b[0x6];
u8 max_encap_header_size[0xa];
@@ -829,7 +850,7 @@ struct mlx5_ifc_vector_calc_cap_bits {
struct mlx5_ifc_calc_op calc2;
struct mlx5_ifc_calc_op calc3;
- u8 reserved_at_e0[0x720];
+ u8 reserved_at_c0[0x720];
};
enum {
@@ -883,6 +904,10 @@ enum {
MLX5_CAP_UMR_FENCE_NONE = 0x2,
};
+enum {
+ MLX5_UCTX_CAP_RAW_TX = 1UL << 0,
+};
+
struct mlx5_ifc_cmd_hca_cap_bits {
u8 reserved_at_0[0x30];
u8 vhca_id[0x10];
@@ -1043,7 +1068,8 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 vector_calc[0x1];
u8 umr_ptr_rlky[0x1];
u8 imaicl[0x1];
- u8 reserved_at_232[0x4];
+ u8 qp_packet_based[0x1];
+ u8 reserved_at_233[0x3];
u8 qkv[0x1];
u8 pkv[0x1];
u8 set_deth_sqpn[0x1];
@@ -1193,7 +1219,19 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 num_vhca_ports[0x8];
u8 reserved_at_618[0x6];
u8 sw_owner_id[0x1];
- u8 reserved_at_61f[0x1e1];
+ u8 reserved_at_61f[0x1];
+
+ u8 max_num_of_monitor_counters[0x10];
+ u8 num_ppcnt_monitor_counters[0x10];
+
+ u8 reserved_at_640[0x10];
+ u8 num_q_monitor_counters[0x10];
+
+ u8 reserved_at_660[0x40];
+
+ u8 uctx_cap[0x20];
+
+ u8 reserved_at_6c0[0x140];
};
enum mlx5_flow_destination_type {
@@ -1209,8 +1247,10 @@ enum mlx5_flow_destination_type {
struct mlx5_ifc_dest_format_struct_bits {
u8 destination_type[0x8];
u8 destination_id[0x18];
+
u8 destination_eswitch_owner_vhca_id_valid[0x1];
- u8 reserved_at_21[0xf];
+ u8 packet_reformat[0x1];
+ u8 reserved_at_22[0xe];
u8 destination_eswitch_owner_vhca_id[0x10];
};
@@ -1220,6 +1260,14 @@ struct mlx5_ifc_flow_counter_list_bits {
u8 reserved_at_20[0x20];
};
+struct mlx5_ifc_extended_dest_format_bits {
+ struct mlx5_ifc_dest_format_struct_bits destination_entry;
+
+ u8 packet_reformat_id[0x20];
+
+ u8 reserved_at_60[0x20];
+};
+
union mlx5_ifc_dest_format_struct_flow_counter_list_auto_bits {
struct mlx5_ifc_dest_format_struct_bits dest_format_struct;
struct mlx5_ifc_flow_counter_list_bits flow_counter_list;
@@ -2249,7 +2297,8 @@ struct mlx5_ifc_qpc_bits {
u8 st[0x8];
u8 reserved_at_10[0x3];
u8 pm_state[0x2];
- u8 reserved_at_15[0x3];
+ u8 reserved_at_15[0x1];
+ u8 req_e2e_credit_mode[0x2];
u8 offload_type[0x4];
u8 end_padding_mode[0x2];
u8 reserved_at_1e[0x2];
@@ -2440,7 +2489,8 @@ struct mlx5_ifc_flow_context_bits {
u8 reserved_at_60[0x10];
u8 action[0x10];
- u8 reserved_at_80[0x8];
+ u8 extended_destination[0x1];
+ u8 reserved_at_80[0x7];
u8 destination_list_size[0x18];
u8 reserved_at_a0[0x8];
@@ -2473,14 +2523,15 @@ struct mlx5_ifc_xrc_srqc_bits {
u8 wq_signature[0x1];
u8 cont_srq[0x1];
- u8 dbr_umem_valid[0x1];
+ u8 reserved_at_22[0x1];
u8 rlky[0x1];
u8 basic_cyclic_rcv_wqe[0x1];
u8 log_rq_stride[0x3];
u8 xrcd[0x18];
u8 page_offset[0x6];
- u8 reserved_at_46[0x2];
+ u8 reserved_at_46[0x1];
+ u8 dbr_umem_valid[0x1];
u8 cqn[0x18];
u8 reserved_at_60[0x20];
@@ -3795,6 +3846,83 @@ enum {
MLX5_VPORT_STATE_OP_MOD_ESW_VPORT = 0x1,
};
+struct mlx5_ifc_arm_monitor_counter_in_bits {
+ u8 opcode[0x10];
+ u8 uid[0x10];
+
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_at_40[0x20];
+
+ u8 reserved_at_60[0x20];
+};
+
+struct mlx5_ifc_arm_monitor_counter_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_at_40[0x40];
+};
+
+enum {
+ MLX5_QUERY_MONITOR_CNT_TYPE_PPCNT = 0x0,
+ MLX5_QUERY_MONITOR_CNT_TYPE_Q_COUNTER = 0x1,
+};
+
+enum mlx5_monitor_counter_ppcnt {
+ MLX5_QUERY_MONITOR_PPCNT_IN_RANGE_LENGTH_ERRORS = 0x0,
+ MLX5_QUERY_MONITOR_PPCNT_OUT_OF_RANGE_LENGTH_FIELD = 0x1,
+ MLX5_QUERY_MONITOR_PPCNT_FRAME_TOO_LONG_ERRORS = 0x2,
+ MLX5_QUERY_MONITOR_PPCNT_FRAME_CHECK_SEQUENCE_ERRORS = 0x3,
+ MLX5_QUERY_MONITOR_PPCNT_ALIGNMENT_ERRORS = 0x4,
+ MLX5_QUERY_MONITOR_PPCNT_IF_OUT_DISCARDS = 0x5,
+};
+
+enum {
+ MLX5_QUERY_MONITOR_Q_COUNTER_RX_OUT_OF_BUFFER = 0x4,
+};
+
+struct mlx5_ifc_monitor_counter_output_bits {
+ u8 reserved_at_0[0x4];
+ u8 type[0x4];
+ u8 reserved_at_8[0x8];
+ u8 counter[0x10];
+
+ u8 counter_group_id[0x20];
+};
+
+#define MLX5_CMD_SET_MONITOR_NUM_PPCNT_COUNTER_SET1 (6)
+#define MLX5_CMD_SET_MONITOR_NUM_Q_COUNTERS_SET1 (1)
+#define MLX5_CMD_SET_MONITOR_NUM_COUNTER (MLX5_CMD_SET_MONITOR_NUM_PPCNT_COUNTER_SET1 +\
+ MLX5_CMD_SET_MONITOR_NUM_Q_COUNTERS_SET1)
+
+struct mlx5_ifc_set_monitor_counter_in_bits {
+ u8 opcode[0x10];
+ u8 uid[0x10];
+
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_at_40[0x10];
+ u8 num_of_counters[0x10];
+
+ u8 reserved_at_60[0x20];
+
+ struct mlx5_ifc_monitor_counter_output_bits monitor_counter[MLX5_CMD_SET_MONITOR_NUM_COUNTER];
+};
+
+struct mlx5_ifc_set_monitor_counter_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_at_40[0x40];
+};
+
struct mlx5_ifc_query_vport_state_in_bits {
u8 opcode[0x10];
u8 reserved_at_10[0x10];
@@ -4660,7 +4788,7 @@ enum {
MLX5_QUERY_FLOW_GROUP_OUT_MATCH_CRITERIA_ENABLE_OUTER_HEADERS = 0x0,
MLX5_QUERY_FLOW_GROUP_OUT_MATCH_CRITERIA_ENABLE_MISC_PARAMETERS = 0x1,
MLX5_QUERY_FLOW_GROUP_OUT_MATCH_CRITERIA_ENABLE_INNER_HEADERS = 0x2,
- MLX5_QUERY_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_MISC_PARAMETERS_2 = 0X3,
+ MLX5_QUERY_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_MISC_PARAMETERS_2 = 0x3,
};
struct mlx5_ifc_query_flow_group_out_bits {
@@ -5566,7 +5694,7 @@ struct mlx5_ifc_modify_nic_vport_context_out_bits {
struct mlx5_ifc_modify_nic_vport_field_select_bits {
u8 reserved_at_0[0x12];
u8 affiliation[0x1];
- u8 reserved_at_e[0x1];
+ u8 reserved_at_13[0x1];
u8 disable_uc_local_lb[0x1];
u8 disable_mc_local_lb[0x1];
u8 node_guid[0x1];
@@ -6689,9 +6817,12 @@ struct mlx5_ifc_create_xrc_srq_in_bits {
struct mlx5_ifc_xrc_srqc_bits xrc_srq_context_entry;
- u8 reserved_at_280[0x40];
+ u8 reserved_at_280[0x60];
+
u8 xrc_srq_umem_valid[0x1];
- u8 reserved_at_2c1[0x5bf];
+ u8 reserved_at_2e1[0x1f];
+
+ u8 reserved_at_300[0x580];
u8 pas[0][0x40];
};
@@ -8160,7 +8291,9 @@ struct mlx5_ifc_pcam_regs_5000_to_507f_bits {
u8 port_access_reg_cap_mask_31_to_13[0x13];
u8 pbmc[0x1];
u8 pptb[0x1];
- u8 port_access_reg_cap_mask_10_to_0[0xb];
+ u8 port_access_reg_cap_mask_10_to_09[0x2];
+ u8 ppcnt[0x1];
+ u8 port_access_reg_cap_mask_07_to_00[0x8];
};
struct mlx5_ifc_pcam_reg_bits {
@@ -9024,7 +9157,7 @@ struct mlx5_ifc_dcbx_param_bits {
u8 dcbx_cee_cap[0x1];
u8 dcbx_ieee_cap[0x1];
u8 dcbx_standby_cap[0x1];
- u8 reserved_at_0[0x5];
+ u8 reserved_at_3[0x5];
u8 port_number[0x8];
u8 reserved_at_10[0xa];
u8 max_application_table_size[6];
@@ -9272,7 +9405,9 @@ struct mlx5_ifc_umem_bits {
struct mlx5_ifc_uctx_bits {
u8 modify_field_select[0x40];
- u8 reserved_at_40[0x1c0];
+ u8 cap[0x20];
+
+ u8 reserved_at_60[0x1a0];
};
struct mlx5_ifc_create_umem_in_bits {
diff --git a/include/linux/mlx5/port.h b/include/linux/mlx5/port.h
index 34aed6032f86..bf4bc01ffb0c 100644
--- a/include/linux/mlx5/port.h
+++ b/include/linux/mlx5/port.h
@@ -107,9 +107,6 @@ enum mlx5e_connector_type {
#define MLX5E_PROT_MASK(link_mode) (1 << link_mode)
-#define PORT_MODULE_EVENT_MODULE_STATUS_MASK 0xF
-#define PORT_MODULE_EVENT_ERROR_TYPE_MASK 0xF
-
int mlx5_set_port_caps(struct mlx5_core_dev *dev, u8 port_num, u32 caps);
int mlx5_query_port_ptys(struct mlx5_core_dev *dev, u32 *ptys,
int ptys_size, int proto_mask, u8 local_port);
diff --git a/include/linux/mlx5/qp.h b/include/linux/mlx5/qp.h
index fbe322c966bc..b26ea9077384 100644
--- a/include/linux/mlx5/qp.h
+++ b/include/linux/mlx5/qp.h
@@ -596,6 +596,11 @@ int mlx5_core_dealloc_q_counter(struct mlx5_core_dev *dev, u16 counter_id);
int mlx5_core_query_q_counter(struct mlx5_core_dev *dev, u16 counter_id,
int reset, void *out, int out_size);
+struct mlx5_core_rsc_common *mlx5_core_res_hold(struct mlx5_core_dev *dev,
+ int res_num,
+ enum mlx5_res_type res_type);
+void mlx5_core_res_put(struct mlx5_core_rsc_common *res);
+
static inline const char *mlx5_qp_type_str(int type)
{
switch (type) {
diff --git a/include/linux/mlx5/srq.h b/include/linux/mlx5/srq.h
deleted file mode 100644
index 1b1f3c20c6a3..000000000000
--- a/include/linux/mlx5/srq.h
+++ /dev/null
@@ -1,72 +0,0 @@
-/*
- * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#ifndef MLX5_SRQ_H
-#define MLX5_SRQ_H
-
-#include <linux/mlx5/driver.h>
-
-enum {
- MLX5_SRQ_FLAG_ERR = (1 << 0),
- MLX5_SRQ_FLAG_WQ_SIG = (1 << 1),
- MLX5_SRQ_FLAG_RNDV = (1 << 2),
-};
-
-struct mlx5_srq_attr {
- u32 type;
- u32 flags;
- u32 log_size;
- u32 wqe_shift;
- u32 log_page_size;
- u32 wqe_cnt;
- u32 srqn;
- u32 xrcd;
- u32 page_offset;
- u32 cqn;
- u32 pd;
- u32 lwm;
- u32 user_index;
- u64 db_record;
- __be64 *pas;
- u32 tm_log_list_size;
- u32 tm_next_tag;
- u32 tm_hw_phase_cnt;
- u32 tm_sw_phase_cnt;
- u16 uid;
-};
-
-struct mlx5_core_dev;
-
-void mlx5_init_srq_table(struct mlx5_core_dev *dev);
-void mlx5_cleanup_srq_table(struct mlx5_core_dev *dev);
-
-#endif /* MLX5_SRQ_H */
diff --git a/include/linux/mlx5/transobj.h b/include/linux/mlx5/transobj.h
index 7f5ca2cd3a32..a261d5528ff7 100644
--- a/include/linux/mlx5/transobj.h
+++ b/include/linux/mlx5/transobj.h
@@ -58,17 +58,6 @@ int mlx5_core_create_tis(struct mlx5_core_dev *dev, u32 *in, int inlen,
int mlx5_core_modify_tis(struct mlx5_core_dev *dev, u32 tisn, u32 *in,
int inlen);
void mlx5_core_destroy_tis(struct mlx5_core_dev *dev, u32 tisn);
-int mlx5_core_create_rmp(struct mlx5_core_dev *dev, u32 *in, int inlen,
- u32 *rmpn);
-int mlx5_core_modify_rmp(struct mlx5_core_dev *dev, u32 *in, int inlen);
-int mlx5_core_destroy_rmp(struct mlx5_core_dev *dev, u32 rmpn);
-int mlx5_core_query_rmp(struct mlx5_core_dev *dev, u32 rmpn, u32 *out);
-int mlx5_core_arm_rmp(struct mlx5_core_dev *dev, u32 rmpn, u16 lwm);
-int mlx5_core_create_xsrq(struct mlx5_core_dev *dev, u32 *in, int inlen,
- u32 *rmpn);
-int mlx5_core_destroy_xsrq(struct mlx5_core_dev *dev, u32 rmpn);
-int mlx5_core_arm_xsrq(struct mlx5_core_dev *dev, u32 rmpn, u16 lwm);
-
int mlx5_core_create_rqt(struct mlx5_core_dev *dev, u32 *in, int inlen,
u32 *rqtn);
int mlx5_core_modify_rqt(struct mlx5_core_dev *dev, u32 rqtn, u32 *in,
diff --git a/include/linux/mm.h b/include/linux/mm.h
index fcf9cc9d535f..5411de93a363 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1744,11 +1744,15 @@ int __pud_alloc(struct mm_struct *mm, p4d_t *p4d, unsigned long address);
static inline void mm_inc_nr_puds(struct mm_struct *mm)
{
+ if (mm_pud_folded(mm))
+ return;
atomic_long_add(PTRS_PER_PUD * sizeof(pud_t), &mm->pgtables_bytes);
}
static inline void mm_dec_nr_puds(struct mm_struct *mm)
{
+ if (mm_pud_folded(mm))
+ return;
atomic_long_sub(PTRS_PER_PUD * sizeof(pud_t), &mm->pgtables_bytes);
}
#endif
@@ -1768,11 +1772,15 @@ int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address);
static inline void mm_inc_nr_pmds(struct mm_struct *mm)
{
+ if (mm_pmd_folded(mm))
+ return;
atomic_long_add(PTRS_PER_PMD * sizeof(pmd_t), &mm->pgtables_bytes);
}
static inline void mm_dec_nr_pmds(struct mm_struct *mm)
{
+ if (mm_pmd_folded(mm))
+ return;
atomic_long_sub(PTRS_PER_PMD * sizeof(pmd_t), &mm->pgtables_bytes);
}
#endif
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 5ed8f6292a53..2c471a2c43fa 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -206,6 +206,11 @@ struct page {
#endif
} _struct_page_alignment;
+/*
+ * Used for sizing the vmemmap region on some architectures
+ */
+#define STRUCT_PAGE_MAX_SHIFT (order_base_2(sizeof(struct page)))
+
#define PAGE_FRAG_CACHE_MAX_SIZE __ALIGN_MASK(32768, ~PAGE_MASK)
#define PAGE_FRAG_CACHE_MAX_ORDER get_order(PAGE_FRAG_CACHE_MAX_SIZE)
diff --git a/include/linux/mmc/sdio_ids.h b/include/linux/mmc/sdio_ids.h
index 4224902a8e22..4332199c71c2 100644
--- a/include/linux/mmc/sdio_ids.h
+++ b/include/linux/mmc/sdio_ids.h
@@ -42,6 +42,7 @@
#define SDIO_DEVICE_ID_BROADCOM_4354 0x4354
#define SDIO_DEVICE_ID_BROADCOM_4356 0x4356
#define SDIO_DEVICE_ID_CYPRESS_4373 0x4373
+#define SDIO_DEVICE_ID_CYPRESS_43012 43012
#define SDIO_VENDOR_ID_INTEL 0x0089
#define SDIO_DEVICE_ID_INTEL_IWMC3200WIMAX 0x1402
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 847705a6d0ec..077d797d1f60 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -314,7 +314,7 @@ enum zone_type {
* Architecture Limit
* ---------------------------
* parisc, ia64, sparc <4G
- * s390 <2G
+ * s390, powerpc <2G
* arm Various
* alpha Unlimited or 0-16MB.
*
@@ -783,6 +783,12 @@ void memory_present(int nid, unsigned long start, unsigned long end);
static inline void memory_present(int nid, unsigned long start, unsigned long end) {}
#endif
+#if defined(CONFIG_SPARSEMEM)
+void memblocks_present(void);
+#else
+static inline void memblocks_present(void) {}
+#endif
+
#ifdef CONFIG_HAVE_MEMORYLESS_NODES
int local_memory_node(int node_id);
#else
diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
index 01797cb4587e..f9bd2f34b99f 100644
--- a/include/linux/mod_devicetable.h
+++ b/include/linux/mod_devicetable.h
@@ -448,6 +448,23 @@ struct pci_epf_device_id {
kernel_ulong_t driver_data;
};
+/* i3c */
+
+#define I3C_MATCH_DCR 0x1
+#define I3C_MATCH_MANUF 0x2
+#define I3C_MATCH_PART 0x4
+#define I3C_MATCH_EXTRA_INFO 0x8
+
+struct i3c_device_id {
+ __u8 match_flags;
+ __u8 dcr;
+ __u16 manuf_id;
+ __u16 part_id;
+ __u16 extra_info;
+
+ const void *data;
+};
+
/* spi */
#define SPI_NAME_SIZE 32
@@ -565,7 +582,7 @@ struct platform_device_id {
/**
* struct mdio_device_id - identifies PHY devices on an MDIO/MII bus
* @phy_id: The result of
- * (mdio_read(&MII_PHYSID1) << 16 | mdio_read(&PHYSID2)) & @phy_id_mask
+ * (mdio_read(&MII_PHYSID1) << 16 | mdio_read(&MII_PHYSID2)) & @phy_id_mask
* for this PHY type
* @phy_id_mask: Defines the significant bits of @phy_id. A value of 0
* is used to terminate an array of struct mdio_device_id.
diff --git a/include/linux/module.h b/include/linux/module.h
index fce6b4335e36..d5453eb5a68b 100644
--- a/include/linux/module.h
+++ b/include/linux/module.h
@@ -432,6 +432,10 @@ struct module {
unsigned int num_tracepoints;
tracepoint_ptr_t *tracepoints_ptrs;
#endif
+#ifdef CONFIG_BPF_EVENTS
+ unsigned int num_bpf_raw_events;
+ struct bpf_raw_event_map *bpf_raw_events;
+#endif
#ifdef HAVE_JUMP_LABEL
struct jump_entry *jump_entries;
unsigned int num_jump_entries;
@@ -486,6 +490,13 @@ struct module {
#define MODULE_ARCH_INIT {}
#endif
+#ifndef HAVE_ARCH_KALLSYMS_SYMBOL_VALUE
+static inline unsigned long kallsyms_symbol_value(const Elf_Sym *sym)
+{
+ return sym->st_value;
+}
+#endif
+
extern struct mutex module_mutex;
/* FIXME: It'd be nice to isolate modules during init, too, so they
diff --git a/include/linux/msi.h b/include/linux/msi.h
index 0e9c50052ff3..784fb52b9900 100644
--- a/include/linux/msi.h
+++ b/include/linux/msi.h
@@ -76,7 +76,7 @@ struct msi_desc {
unsigned int nvec_used;
struct device *dev;
struct msi_msg msg;
- struct cpumask *affinity;
+ struct irq_affinity_desc *affinity;
union {
/* PCI MSI/X specific data */
@@ -116,6 +116,8 @@ struct msi_desc {
list_first_entry(dev_to_msi_list((dev)), struct msi_desc, list)
#define for_each_msi_entry(desc, dev) \
list_for_each_entry((desc), dev_to_msi_list((dev)), list)
+#define for_each_msi_entry_safe(desc, tmp, dev) \
+ list_for_each_entry_safe((desc), (tmp), dev_to_msi_list((dev)), list)
#ifdef CONFIG_PCI_MSI
#define first_pci_msi_entry(pdev) first_msi_entry(&(pdev)->dev)
@@ -136,7 +138,7 @@ static inline void pci_write_msi_msg(unsigned int irq, struct msi_msg *msg)
#endif /* CONFIG_PCI_MSI */
struct msi_desc *alloc_msi_entry(struct device *dev, int nvec,
- const struct cpumask *affinity);
+ const struct irq_affinity_desc *affinity);
void free_msi_entry(struct msi_desc *entry);
void __pci_read_msi_msg(struct msi_desc *entry, struct msi_msg *msg);
void __pci_write_msi_msg(struct msi_desc *entry, struct msi_msg *msg);
diff --git a/include/linux/mtd/cfi.h b/include/linux/mtd/cfi.h
index 9b57a9b1b081..cbf77168658c 100644
--- a/include/linux/mtd/cfi.h
+++ b/include/linux/mtd/cfi.h
@@ -377,6 +377,7 @@ struct cfi_fixup {
#define CFI_MFR_SHARP 0x00B0
#define CFI_MFR_SST 0x00BF
#define CFI_MFR_ST 0x0020 /* STMicroelectronics */
+#define CFI_MFR_MICRON 0x002C /* Micron */
#define CFI_MFR_TOSHIBA 0x0098
#define CFI_MFR_WINBOND 0x00DA
diff --git a/include/linux/mtd/mtd.h b/include/linux/mtd/mtd.h
index cd0be91bdefa..ba8fa9072aca 100644
--- a/include/linux/mtd/mtd.h
+++ b/include/linux/mtd/mtd.h
@@ -207,6 +207,7 @@ struct mtd_debug_info {
struct mtd_info {
u_char type;
uint32_t flags;
+ uint32_t orig_flags; /* Flags as before running mtd checks */
uint64_t size; // Total size of the MTD
/* "Major" erase size for the device. Naïve users may take this
@@ -386,7 +387,7 @@ static inline struct device_node *mtd_get_of_node(struct mtd_info *mtd)
return dev_of_node(&mtd->dev);
}
-static inline int mtd_oobavail(struct mtd_info *mtd, struct mtd_oob_ops *ops)
+static inline u32 mtd_oobavail(struct mtd_info *mtd, struct mtd_oob_ops *ops)
{
return ops->mode == MTD_OPS_AUTO_OOB ? mtd->oobavail : mtd->oobsize;
}
diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h
index abe975c87b90..7f53ece2c039 100644
--- a/include/linux/mtd/nand.h
+++ b/include/linux/mtd/nand.h
@@ -324,9 +324,8 @@ static inline unsigned int nanddev_ntargets(const struct nand_device *nand)
*/
static inline unsigned int nanddev_neraseblocks(const struct nand_device *nand)
{
- return (u64)nand->memorg.luns_per_target *
- nand->memorg.eraseblocks_per_lun *
- nand->memorg.pages_per_eraseblock;
+ return nand->memorg.ntargets * nand->memorg.luns_per_target *
+ nand->memorg.eraseblocks_per_lun;
}
/**
@@ -569,7 +568,7 @@ static inline void nanddev_pos_next_eraseblock(struct nand_device *nand,
}
/**
- * nanddev_pos_next_eraseblock() - Move a position to the next page
+ * nanddev_pos_next_page() - Move a position to the next page
* @nand: NAND device
* @pos: the position to update
*
diff --git a/include/linux/mtd/rawnand.h b/include/linux/mtd/rawnand.h
index e10b126e148f..33e240acdc6d 100644
--- a/include/linux/mtd/rawnand.h
+++ b/include/linux/mtd/rawnand.h
@@ -203,9 +203,12 @@ enum nand_ecc_algo {
*/
#define NAND_IS_BOOT_MEDIUM 0x00400000
-/* Options set by nand scan */
-/* Nand scan has allocated controller struct */
-#define NAND_CONTROLLER_ALLOC 0x80000000
+/*
+ * Do not try to tweak the timings at runtime. This is needed when the
+ * controller initializes the timings on itself or when it relies on
+ * configuration done by the bootloader.
+ */
+#define NAND_KEEP_TIMINGS 0x00800000
/* Cell info constants */
#define NAND_CI_CHIPNR_MSK 0x03
@@ -245,49 +248,6 @@ struct nand_id {
};
/**
- * struct nand_controller_ops - Controller operations
- *
- * @attach_chip: this method is called after the NAND detection phase after
- * flash ID and MTD fields such as erase size, page size and OOB
- * size have been set up. ECC requirements are available if
- * provided by the NAND chip or device tree. Typically used to
- * choose the appropriate ECC configuration and allocate
- * associated resources.
- * This hook is optional.
- * @detach_chip: free all resources allocated/claimed in
- * nand_controller_ops->attach_chip().
- * This hook is optional.
- */
-struct nand_controller_ops {
- int (*attach_chip)(struct nand_chip *chip);
- void (*detach_chip)(struct nand_chip *chip);
-};
-
-/**
- * struct nand_controller - Structure used to describe a NAND controller
- *
- * @lock: protection lock
- * @active: the mtd device which holds the controller currently
- * @wq: wait queue to sleep on if a NAND operation is in
- * progress used instead of the per chip wait queue
- * when a hw controller is available.
- * @ops: NAND controller operations.
- */
-struct nand_controller {
- spinlock_t lock;
- struct nand_chip *active;
- wait_queue_head_t wq;
- const struct nand_controller_ops *ops;
-};
-
-static inline void nand_controller_init(struct nand_controller *nfc)
-{
- nfc->active = NULL;
- spin_lock_init(&nfc->lock);
- init_waitqueue_head(&nfc->wq);
-}
-
-/**
* struct nand_ecc_step_info - ECC step information of ECC engine
* @stepsize: data bytes per ECC step
* @strengths: array of supported strengths
@@ -879,18 +839,21 @@ struct nand_op_parser {
/**
* struct nand_operation - NAND operation descriptor
+ * @cs: the CS line to select for this NAND operation
* @instrs: array of instructions to execute
* @ninstrs: length of the @instrs array
*
* The actual operation structure that will be passed to chip->exec_op().
*/
struct nand_operation {
+ unsigned int cs;
const struct nand_op_instr *instrs;
unsigned int ninstrs;
};
-#define NAND_OPERATION(_instrs) \
+#define NAND_OPERATION(_cs, _instrs) \
{ \
+ .cs = _cs, \
.instrs = _instrs, \
.ninstrs = ARRAY_SIZE(_instrs), \
}
@@ -898,11 +861,68 @@ struct nand_operation {
int nand_op_parser_exec_op(struct nand_chip *chip,
const struct nand_op_parser *parser,
const struct nand_operation *op, bool check_only);
+/**
+ * struct nand_controller_ops - Controller operations
+ *
+ * @attach_chip: this method is called after the NAND detection phase after
+ * flash ID and MTD fields such as erase size, page size and OOB
+ * size have been set up. ECC requirements are available if
+ * provided by the NAND chip or device tree. Typically used to
+ * choose the appropriate ECC configuration and allocate
+ * associated resources.
+ * This hook is optional.
+ * @detach_chip: free all resources allocated/claimed in
+ * nand_controller_ops->attach_chip().
+ * This hook is optional.
+ * @exec_op: controller specific method to execute NAND operations.
+ * This method replaces chip->legacy.cmdfunc(),
+ * chip->legacy.{read,write}_{buf,byte,word}(),
+ * chip->legacy.dev_ready() and chip->legacy.waifunc().
+ * @setup_data_interface: setup the data interface and timing. If
+ * chipnr is set to %NAND_DATA_IFACE_CHECK_ONLY this
+ * means the configuration should not be applied but
+ * only checked.
+ * This hook is optional.
+ */
+struct nand_controller_ops {
+ int (*attach_chip)(struct nand_chip *chip);
+ void (*detach_chip)(struct nand_chip *chip);
+ int (*exec_op)(struct nand_chip *chip,
+ const struct nand_operation *op,
+ bool check_only);
+ int (*setup_data_interface)(struct nand_chip *chip, int chipnr,
+ const struct nand_data_interface *conf);
+};
+
+/**
+ * struct nand_controller - Structure used to describe a NAND controller
+ *
+ * @lock: protection lock
+ * @active: the mtd device which holds the controller currently
+ * @wq: wait queue to sleep on if a NAND operation is in
+ * progress used instead of the per chip wait queue
+ * when a hw controller is available.
+ * @ops: NAND controller operations.
+ */
+struct nand_controller {
+ spinlock_t lock;
+ struct nand_chip *active;
+ wait_queue_head_t wq;
+ const struct nand_controller_ops *ops;
+};
+
+static inline void nand_controller_init(struct nand_controller *nfc)
+{
+ nfc->active = NULL;
+ spin_lock_init(&nfc->lock);
+ init_waitqueue_head(&nfc->wq);
+}
/**
* struct nand_legacy - NAND chip legacy fields/hooks
* @IO_ADDR_R: address to read the 8 I/O lines of the flash device
* @IO_ADDR_W: address to write the 8 I/O lines of the flash device
+ * @select_chip: select/deselect a specific target/die
* @read_byte: read one byte from the chip
* @write_byte: write a single byte to the chip on the low 8 I/O lines
* @write_buf: write data from the buffer to the chip
@@ -921,6 +941,8 @@ int nand_op_parser_exec_op(struct nand_chip *chip,
* @get_features: get the NAND chip features
* @chip_delay: chip dependent delay for transferring data from array to read
* regs (tR).
+ * @dummy_controller: dummy controller implementation for drivers that can
+ * only control a single chip
*
* If you look at this structure you're already wrong. These fields/hooks are
* all deprecated.
@@ -928,6 +950,7 @@ int nand_op_parser_exec_op(struct nand_chip *chip,
struct nand_legacy {
void __iomem *IO_ADDR_R;
void __iomem *IO_ADDR_W;
+ void (*select_chip)(struct nand_chip *chip, int cs);
u8 (*read_byte)(struct nand_chip *chip);
void (*write_byte)(struct nand_chip *chip, u8 byte);
void (*write_buf)(struct nand_chip *chip, const u8 *buf, int len);
@@ -945,6 +968,7 @@ struct nand_legacy {
int (*get_features)(struct nand_chip *chip, int feature_addr,
u8 *subfeature_para);
int chip_delay;
+ struct nand_controller dummy_controller;
};
/**
@@ -955,17 +979,10 @@ struct nand_legacy {
* you're modifying an existing driver that is using those
* fields/hooks, you should consider reworking the driver
* avoid using them.
- * @select_chip: [REPLACEABLE] select chip nr
- * @exec_op: controller specific method to execute NAND operations.
- * This method replaces ->cmdfunc(),
- * ->legacy.{read,write}_{buf,byte,word}(),
- * ->legacy.dev_ready() and ->waifunc().
* @setup_read_retry: [FLASHSPECIFIC] flash (vendor) specific function for
* setting the read-retry mode. Mostly needed for MLC NAND.
* @ecc: [BOARDSPECIFIC] ECC control structure
* @buf_align: minimum buffer alignment required by a platform
- * @dummy_controller: dummy controller implementation for drivers that can
- * only control a single chip
* @state: [INTERN] the current state of the NAND device
* @oob_poi: "poison value buffer," used for laying out OOB data
* before writing
@@ -1012,11 +1029,11 @@ struct nand_legacy {
* this nand device will encounter their life times.
* @blocks_per_die: [INTERN] The number of PEBs in a die
* @data_interface: [INTERN] NAND interface timing information
+ * @cur_cs: currently selected target. -1 means no target selected,
+ * otherwise we should always have cur_cs >= 0 &&
+ * cur_cs < numchips. NAND Controller drivers should not
+ * modify this value, but they're allowed to read it.
* @read_retries: [INTERN] the number of read retry modes supported
- * @setup_data_interface: [OPTIONAL] setup the data interface and timing. If
- * chipnr is set to %NAND_DATA_IFACE_CHECK_ONLY this
- * means the configuration should not be applied but
- * only checked.
* @bbt: [INTERN] bad block table pointer
* @bbt_td: [REPLACEABLE] bad block table descriptor for flash
* lookup.
@@ -1037,13 +1054,7 @@ struct nand_chip {
struct nand_legacy legacy;
- void (*select_chip)(struct nand_chip *chip, int cs);
- int (*exec_op)(struct nand_chip *chip,
- const struct nand_operation *op,
- bool check_only);
int (*setup_read_retry)(struct nand_chip *chip, int retry_mode);
- int (*setup_data_interface)(struct nand_chip *chip, int chipnr,
- const struct nand_data_interface *conf);
unsigned int options;
unsigned int bbt_options;
@@ -1073,6 +1084,8 @@ struct nand_chip {
struct nand_data_interface data_interface;
+ int cur_cs;
+
int read_retries;
flstate_t state;
@@ -1082,7 +1095,6 @@ struct nand_chip {
struct nand_ecc_ctrl ecc;
unsigned long buf_align;
- struct nand_controller dummy_controller;
uint8_t *bbt;
struct nand_bbt_descr *bbt_td;
@@ -1098,15 +1110,6 @@ struct nand_chip {
} manufacturer;
};
-static inline int nand_exec_op(struct nand_chip *chip,
- const struct nand_operation *op)
-{
- if (!chip->exec_op)
- return -ENOTSUPP;
-
- return chip->exec_op(chip, op, false);
-}
-
extern const struct mtd_ooblayout_ops nand_ooblayout_sp_ops;
extern const struct mtd_ooblayout_ops nand_ooblayout_lp_ops;
@@ -1345,5 +1348,12 @@ void nand_release(struct nand_chip *chip);
* instruction and have no physical pin to check it.
*/
int nand_soft_waitrdy(struct nand_chip *chip, unsigned long timeout_ms);
+struct gpio_desc;
+int nand_gpio_waitrdy(struct nand_chip *chip, struct gpio_desc *gpiod,
+ unsigned long timeout_ms);
+
+/* Select/deselect a NAND target. */
+void nand_select_target(struct nand_chip *chip, unsigned int cs);
+void nand_deselect_target(struct nand_chip *chip);
#endif /* __LINUX_MTD_RAWNAND_H */
diff --git a/include/linux/mtd/sh_flctl.h b/include/linux/mtd/sh_flctl.h
index c759d403cbc0..78fc2d4218c8 100644
--- a/include/linux/mtd/sh_flctl.h
+++ b/include/linux/mtd/sh_flctl.h
@@ -1,20 +1,8 @@
-/*
+/* SPDX-License-Identifier: GPL-2.0
+ *
* SuperH FLCTL nand controller
*
* Copyright © 2008 Renesas Solutions Corp.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef __SH_FLCTL_H__
diff --git a/include/linux/mtd/spi-nor.h b/include/linux/mtd/spi-nor.h
index 7f0c7303575e..fa2d89e38e40 100644
--- a/include/linux/mtd/spi-nor.h
+++ b/include/linux/mtd/spi-nor.h
@@ -1,10 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Copyright (C) 2014 Freescale Semiconductor, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
#ifndef __LINUX_MTD_SPI_NOR_H
@@ -23,7 +19,8 @@
#define SNOR_MFR_ATMEL CFI_MFR_ATMEL
#define SNOR_MFR_GIGADEVICE 0xc8
#define SNOR_MFR_INTEL CFI_MFR_INTEL
-#define SNOR_MFR_MICRON CFI_MFR_ST /* ST Micro <--> Micron */
+#define SNOR_MFR_ST CFI_MFR_ST /* ST Micro */
+#define SNOR_MFR_MICRON CFI_MFR_MICRON /* Micron */
#define SNOR_MFR_MACRONIX CFI_MFR_MACRONIX
#define SNOR_MFR_SPANSION CFI_MFR_AMD
#define SNOR_MFR_SST CFI_MFR_SST
@@ -236,6 +233,8 @@ enum spi_nor_option_flags {
SNOR_F_READY_XSR_RDY = BIT(4),
SNOR_F_USE_CLSR = BIT(5),
SNOR_F_BROKEN_RESET = BIT(6),
+ SNOR_F_4B_OPCODES = BIT(7),
+ SNOR_F_HAS_4BAIT = BIT(8),
};
/**
diff --git a/include/linux/mtd/spinand.h b/include/linux/mtd/spinand.h
index 088ff96c3eb6..b92e2aa955b6 100644
--- a/include/linux/mtd/spinand.h
+++ b/include/linux/mtd/spinand.h
@@ -194,8 +194,10 @@ struct spinand_manufacturer {
};
/* SPI NAND manufacturers */
+extern const struct spinand_manufacturer gigadevice_spinand_manufacturer;
extern const struct spinand_manufacturer macronix_spinand_manufacturer;
extern const struct spinand_manufacturer micron_spinand_manufacturer;
+extern const struct spinand_manufacturer toshiba_spinand_manufacturer;
extern const struct spinand_manufacturer winbond_spinand_manufacturer;
/**
diff --git a/include/linux/net_dim.h b/include/linux/net_dim.h
index c79e859408e6..fd458389f7d1 100644
--- a/include/linux/net_dim.h
+++ b/include/linux/net_dim.h
@@ -406,6 +406,8 @@ static inline void net_dim(struct net_dim *dim,
}
/* fall through */
case NET_DIM_START_MEASURE:
+ net_dim_sample(end_sample.event_ctr, end_sample.pkt_ctr, end_sample.byte_ctr,
+ &dim->start_sample);
dim->state = NET_DIM_MEASURE_IN_PROGRESS;
break;
case NET_DIM_APPLY_NEW_PROFILE:
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index dc1d9ed33b31..1377d085ef99 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -845,6 +845,8 @@ enum tc_setup_type {
TC_SETUP_QDISC_PRIO,
TC_SETUP_QDISC_MQ,
TC_SETUP_QDISC_ETF,
+ TC_SETUP_ROOT_QDISC,
+ TC_SETUP_QDISC_GRED,
};
/* These structures hold the attributes of bpf state that are being passed
@@ -863,9 +865,6 @@ enum bpf_netdev_command {
XDP_QUERY_PROG,
XDP_QUERY_PROG_HW,
/* BPF program for offload callbacks, invoked at program load time. */
- BPF_OFFLOAD_VERIFIER_PREP,
- BPF_OFFLOAD_TRANSLATE,
- BPF_OFFLOAD_DESTROY,
BPF_OFFLOAD_MAP_ALLOC,
BPF_OFFLOAD_MAP_FREE,
XDP_QUERY_XSK_UMEM,
@@ -891,15 +890,6 @@ struct netdev_bpf {
/* flags with which program was installed */
u32 prog_flags;
};
- /* BPF_OFFLOAD_VERIFIER_PREP */
- struct {
- struct bpf_prog *prog;
- const struct bpf_prog_offload_ops *ops; /* callee set */
- } verifier;
- /* BPF_OFFLOAD_TRANSLATE, BPF_OFFLOAD_DESTROY */
- struct {
- struct bpf_prog *prog;
- } offload;
/* BPF_OFFLOAD_MAP_ALLOC, BPF_OFFLOAD_MAP_FREE */
struct {
struct bpf_offloaded_map *offmap;
@@ -1175,7 +1165,7 @@ struct dev_ifalias {
* entries to skb and update idx with the number of entries.
*
* int (*ndo_bridge_setlink)(struct net_device *dev, struct nlmsghdr *nlh,
- * u16 flags)
+ * u16 flags, struct netlink_ext_ack *extack)
* int (*ndo_bridge_getlink)(struct sk_buff *skb, u32 pid, u32 seq,
* struct net_device *dev, u32 filter_mask,
* int nlflags)
@@ -1397,10 +1387,16 @@ struct net_device_ops {
struct net_device *dev,
struct net_device *filter_dev,
int *idx);
-
+ int (*ndo_fdb_get)(struct sk_buff *skb,
+ struct nlattr *tb[],
+ struct net_device *dev,
+ const unsigned char *addr,
+ u16 vid, u32 portid, u32 seq,
+ struct netlink_ext_ack *extack);
int (*ndo_bridge_setlink)(struct net_device *dev,
struct nlmsghdr *nlh,
- u16 flags);
+ u16 flags,
+ struct netlink_ext_ack *extack);
int (*ndo_bridge_getlink)(struct sk_buff *skb,
u32 pid, u32 seq,
struct net_device *dev,
@@ -2388,13 +2384,13 @@ struct pcpu_sw_netstats {
u64 tx_packets;
u64 tx_bytes;
struct u64_stats_sync syncp;
-};
+} __aligned(4 * sizeof(u64));
struct pcpu_lstats {
u64 packets;
u64 bytes;
struct u64_stats_sync syncp;
-};
+} __aligned(2 * sizeof(u64));
#define __netdev_alloc_pcpu_stats(type, gfp) \
({ \
@@ -2459,7 +2455,8 @@ enum netdev_cmd {
NETDEV_REGISTER,
NETDEV_UNREGISTER,
NETDEV_CHANGEMTU, /* notify after mtu change happened */
- NETDEV_CHANGEADDR,
+ NETDEV_CHANGEADDR, /* notify after the address change */
+ NETDEV_PRE_CHANGEADDR, /* notify before the address change */
NETDEV_GOING_DOWN,
NETDEV_CHANGENAME,
NETDEV_FEAT_CHANGE,
@@ -2521,6 +2518,11 @@ struct netdev_notifier_changelowerstate_info {
void *lower_state_info; /* is lower dev state */
};
+struct netdev_notifier_pre_changeaddr_info {
+ struct netdev_notifier_info info; /* must be first */
+ const unsigned char *dev_addr;
+};
+
static inline void netdev_notifier_info_init(struct netdev_notifier_info *info,
struct net_device *dev)
{
@@ -2615,7 +2617,7 @@ struct net_device *dev_get_by_name(struct net *net, const char *name);
struct net_device *dev_get_by_name_rcu(struct net *net, const char *name);
struct net_device *__dev_get_by_name(struct net *net, const char *name);
int dev_alloc_name(struct net_device *dev, const char *name);
-int dev_open(struct net_device *dev);
+int dev_open(struct net_device *dev, struct netlink_ext_ack *extack);
void dev_close(struct net_device *dev);
void dev_close_many(struct list_head *head, bool unlink);
void dev_disable_lro(struct net_device *dev);
@@ -3190,6 +3192,26 @@ static inline void netdev_tx_sent_queue(struct netdev_queue *dev_queue,
#endif
}
+/* Variant of netdev_tx_sent_queue() for drivers that are aware
+ * that they should not test BQL status themselves.
+ * We do want to change __QUEUE_STATE_STACK_XOFF only for the last
+ * skb of a batch.
+ * Returns true if the doorbell must be used to kick the NIC.
+ */
+static inline bool __netdev_tx_sent_queue(struct netdev_queue *dev_queue,
+ unsigned int bytes,
+ bool xmit_more)
+{
+ if (xmit_more) {
+#ifdef CONFIG_BQL
+ dql_queued(&dev_queue->dql, bytes);
+#endif
+ return netif_tx_queue_stopped(dev_queue);
+ }
+ netdev_tx_sent_queue(dev_queue, bytes);
+ return true;
+}
+
/**
* netdev_sent_queue - report the number of bytes queued to hardware
* @dev: network device
@@ -3204,6 +3226,14 @@ static inline void netdev_sent_queue(struct net_device *dev, unsigned int bytes)
netdev_tx_sent_queue(netdev_get_tx_queue(dev, 0), bytes);
}
+static inline bool __netdev_sent_queue(struct net_device *dev,
+ unsigned int bytes,
+ bool xmit_more)
+{
+ return __netdev_tx_sent_queue(netdev_get_tx_queue(dev, 0), bytes,
+ xmit_more);
+}
+
static inline void netdev_tx_completed_queue(struct netdev_queue *dev_queue,
unsigned int pkts, unsigned int bytes)
{
@@ -3593,8 +3623,10 @@ int dev_ioctl(struct net *net, unsigned int cmd, struct ifreq *ifr,
int dev_ifconf(struct net *net, struct ifconf *, int);
int dev_ethtool(struct net *net, struct ifreq *);
unsigned int dev_get_flags(const struct net_device *);
-int __dev_change_flags(struct net_device *, unsigned int flags);
-int dev_change_flags(struct net_device *, unsigned int);
+int __dev_change_flags(struct net_device *dev, unsigned int flags,
+ struct netlink_ext_ack *extack);
+int dev_change_flags(struct net_device *dev, unsigned int flags,
+ struct netlink_ext_ack *extack);
void __dev_notify_flags(struct net_device *, unsigned int old_flags,
unsigned int gchanges);
int dev_change_name(struct net_device *, const char *);
@@ -3607,7 +3639,10 @@ int dev_set_mtu_ext(struct net_device *dev, int mtu,
int dev_set_mtu(struct net_device *, int);
int dev_change_tx_queue_len(struct net_device *, unsigned long);
void dev_set_group(struct net_device *, int);
-int dev_set_mac_address(struct net_device *, struct sockaddr *);
+int dev_pre_changeaddr_notify(struct net_device *dev, const char *addr,
+ struct netlink_ext_ack *extack);
+int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa,
+ struct netlink_ext_ack *extack);
int dev_change_carrier(struct net_device *, bool new_carrier);
int dev_get_phys_port_id(struct net_device *dev,
struct netdev_phys_item_id *ppid);
@@ -4048,6 +4083,16 @@ int __hw_addr_sync_dev(struct netdev_hw_addr_list *list,
int (*sync)(struct net_device *, const unsigned char *),
int (*unsync)(struct net_device *,
const unsigned char *));
+int __hw_addr_ref_sync_dev(struct netdev_hw_addr_list *list,
+ struct net_device *dev,
+ int (*sync)(struct net_device *,
+ const unsigned char *, int),
+ int (*unsync)(struct net_device *,
+ const unsigned char *, int));
+void __hw_addr_ref_unsync_dev(struct netdev_hw_addr_list *list,
+ struct net_device *dev,
+ int (*unsync)(struct net_device *,
+ const unsigned char *, int));
void __hw_addr_unsync_dev(struct netdev_hw_addr_list *list,
struct net_device *dev,
int (*unsync)(struct net_device *,
@@ -4312,9 +4357,10 @@ static inline bool can_checksum_protocol(netdev_features_t features,
}
#ifdef CONFIG_BUG
-void netdev_rx_csum_fault(struct net_device *dev);
+void netdev_rx_csum_fault(struct net_device *dev, struct sk_buff *skb);
#else
-static inline void netdev_rx_csum_fault(struct net_device *dev)
+static inline void netdev_rx_csum_fault(struct net_device *dev,
+ struct sk_buff *skb)
{
}
#endif
@@ -4340,7 +4386,7 @@ static inline netdev_tx_t netdev_start_xmit(struct sk_buff *skb, struct net_devi
struct netdev_queue *txq, bool more)
{
const struct net_device_ops *ops = dev->netdev_ops;
- int rc;
+ netdev_tx_t rc;
rc = __netdev_start_xmit(ops, skb, dev, more);
if (rc == NETDEV_TX_OK)
diff --git a/include/linux/netfilter/ipset/ip_set.h b/include/linux/netfilter/ipset/ip_set.h
index 34fc80f3eb90..f2e1e6b13ca4 100644
--- a/include/linux/netfilter/ipset/ip_set.h
+++ b/include/linux/netfilter/ipset/ip_set.h
@@ -303,18 +303,18 @@ ip_set_put_flags(struct sk_buff *skb, struct ip_set *set)
/* Netlink CB args */
enum {
IPSET_CB_NET = 0, /* net namespace */
+ IPSET_CB_PROTO, /* ipset protocol */
IPSET_CB_DUMP, /* dump single set/all sets */
IPSET_CB_INDEX, /* set index */
IPSET_CB_PRIVATE, /* set private data */
IPSET_CB_ARG0, /* type specific */
- IPSET_CB_ARG1,
};
/* register and unregister set references */
extern ip_set_id_t ip_set_get_byname(struct net *net,
const char *name, struct ip_set **set);
extern void ip_set_put_byindex(struct net *net, ip_set_id_t index);
-extern const char *ip_set_name_byindex(struct net *net, ip_set_id_t index);
+extern void ip_set_name_byindex(struct net *net, ip_set_id_t index, char *name);
extern ip_set_id_t ip_set_nfnl_get_byindex(struct net *net, ip_set_id_t index);
extern void ip_set_nfnl_put(struct net *net, ip_set_id_t index);
diff --git a/include/linux/netfilter/ipset/ip_set_comment.h b/include/linux/netfilter/ipset/ip_set_comment.h
index 8e2bab1e8e90..70877f8de7e9 100644
--- a/include/linux/netfilter/ipset/ip_set_comment.h
+++ b/include/linux/netfilter/ipset/ip_set_comment.h
@@ -43,11 +43,11 @@ ip_set_init_comment(struct ip_set *set, struct ip_set_comment *comment,
rcu_assign_pointer(comment->c, c);
}
-/* Used only when dumping a set, protected by rcu_read_lock_bh() */
+/* Used only when dumping a set, protected by rcu_read_lock() */
static inline int
ip_set_put_comment(struct sk_buff *skb, const struct ip_set_comment *comment)
{
- struct ip_set_comment_rcu *c = rcu_dereference_bh(comment->c);
+ struct ip_set_comment_rcu *c = rcu_dereference(comment->c);
if (!c)
return 0;
diff --git a/include/linux/netfilter/nf_conntrack_proto_gre.h b/include/linux/netfilter/nf_conntrack_proto_gre.h
index b8d95564bd53..6989e2e4eabf 100644
--- a/include/linux/netfilter/nf_conntrack_proto_gre.h
+++ b/include/linux/netfilter/nf_conntrack_proto_gre.h
@@ -21,6 +21,19 @@ struct nf_ct_gre_keymap {
struct nf_conntrack_tuple tuple;
};
+enum grep_conntrack {
+ GRE_CT_UNREPLIED,
+ GRE_CT_REPLIED,
+ GRE_CT_MAX
+};
+
+struct netns_proto_gre {
+ struct nf_proto_net nf;
+ rwlock_t keymap_lock;
+ struct list_head keymap_list;
+ unsigned int gre_timeouts[GRE_CT_MAX];
+};
+
/* add new tuple->key_reply pair to keymap */
int nf_ct_gre_keymap_add(struct nf_conn *ct, enum ip_conntrack_dir dir,
struct nf_conntrack_tuple *t);
@@ -28,7 +41,5 @@ int nf_ct_gre_keymap_add(struct nf_conn *ct, enum ip_conntrack_dir dir,
/* delete keymap entries */
void nf_ct_gre_keymap_destroy(struct nf_conn *ct);
-void nf_nat_need_gre(void);
-
#endif /* __KERNEL__ */
#endif /* _CONNTRACK_PROTO_GRE_H */
diff --git a/include/linux/netfilter/nfnetlink.h b/include/linux/netfilter/nfnetlink.h
index 4a520d3304a2..cf09ab37b45b 100644
--- a/include/linux/netfilter/nfnetlink.h
+++ b/include/linux/netfilter/nfnetlink.h
@@ -62,18 +62,6 @@ static inline bool lockdep_nfnl_is_held(__u8 subsys_id)
}
#endif /* CONFIG_PROVE_LOCKING */
-/*
- * nfnl_dereference - fetch RCU pointer when updates are prevented by subsys mutex
- *
- * @p: The pointer to read, prior to dereferencing
- * @ss: The nfnetlink subsystem ID
- *
- * Return the value of the specified RCU-protected pointer, but omit
- * the READ_ONCE(), because caller holds the NFNL subsystem mutex.
- */
-#define nfnl_dereference(p, ss) \
- rcu_dereference_protected(p, lockdep_nfnl_is_held(ss))
-
#define MODULE_ALIAS_NFNL_SUBSYS(subsys) \
MODULE_ALIAS("nfnetlink-subsys-" __stringify(subsys))
diff --git a/include/linux/netfilter_bridge.h b/include/linux/netfilter_bridge.h
index fa0686500970..5f2614d02e03 100644
--- a/include/linux/netfilter_bridge.h
+++ b/include/linux/netfilter_bridge.h
@@ -17,43 +17,58 @@ static inline void br_drop_fake_rtable(struct sk_buff *skb)
skb_dst_drop(skb);
}
+static inline struct nf_bridge_info *
+nf_bridge_info_get(const struct sk_buff *skb)
+{
+ return skb_ext_find(skb, SKB_EXT_BRIDGE_NF);
+}
+
+static inline bool nf_bridge_info_exists(const struct sk_buff *skb)
+{
+ return skb_ext_exist(skb, SKB_EXT_BRIDGE_NF);
+}
+
static inline int nf_bridge_get_physinif(const struct sk_buff *skb)
{
- struct nf_bridge_info *nf_bridge;
+ const struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb);
- if (skb->nf_bridge == NULL)
+ if (!nf_bridge)
return 0;
- nf_bridge = skb->nf_bridge;
return nf_bridge->physindev ? nf_bridge->physindev->ifindex : 0;
}
static inline int nf_bridge_get_physoutif(const struct sk_buff *skb)
{
- struct nf_bridge_info *nf_bridge;
+ const struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb);
- if (skb->nf_bridge == NULL)
+ if (!nf_bridge)
return 0;
- nf_bridge = skb->nf_bridge;
return nf_bridge->physoutdev ? nf_bridge->physoutdev->ifindex : 0;
}
static inline struct net_device *
nf_bridge_get_physindev(const struct sk_buff *skb)
{
- return skb->nf_bridge ? skb->nf_bridge->physindev : NULL;
+ const struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb);
+
+ return nf_bridge ? nf_bridge->physindev : NULL;
}
static inline struct net_device *
nf_bridge_get_physoutdev(const struct sk_buff *skb)
{
- return skb->nf_bridge ? skb->nf_bridge->physoutdev : NULL;
+ const struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb);
+
+ return nf_bridge ? nf_bridge->physoutdev : NULL;
}
static inline bool nf_bridge_in_prerouting(const struct sk_buff *skb)
{
- return skb->nf_bridge && skb->nf_bridge->in_prerouting;
+ const struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb);
+
+ return nf_bridge && nf_bridge->in_prerouting;
}
#else
#define br_drop_fake_rtable(skb) do { } while (0)
diff --git a/include/linux/netlink.h b/include/linux/netlink.h
index 4da90a6ab536..4e8add270200 100644
--- a/include/linux/netlink.h
+++ b/include/linux/netlink.h
@@ -34,8 +34,8 @@ struct netlink_skb_parms {
#define NETLINK_CREDS(skb) (&NETLINK_CB((skb)).creds)
-extern void netlink_table_grab(void);
-extern void netlink_table_ungrab(void);
+void netlink_table_grab(void);
+void netlink_table_ungrab(void);
#define NL_CFG_F_NONROOT_RECV (1 << 0)
#define NL_CFG_F_NONROOT_SEND (1 << 1)
@@ -51,7 +51,7 @@ struct netlink_kernel_cfg {
bool (*compare)(struct net *net, struct sock *sk);
};
-extern struct sock *__netlink_kernel_create(struct net *net, int unit,
+struct sock *__netlink_kernel_create(struct net *net, int unit,
struct module *module,
struct netlink_kernel_cfg *cfg);
static inline struct sock *
@@ -110,24 +110,33 @@ struct netlink_ext_ack {
} \
} while (0)
-extern void netlink_kernel_release(struct sock *sk);
-extern int __netlink_change_ngroups(struct sock *sk, unsigned int groups);
-extern int netlink_change_ngroups(struct sock *sk, unsigned int groups);
-extern void __netlink_clear_multicast_users(struct sock *sk, unsigned int group);
-extern void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err,
- const struct netlink_ext_ack *extack);
-extern int netlink_has_listeners(struct sock *sk, unsigned int group);
-
-extern int netlink_unicast(struct sock *ssk, struct sk_buff *skb, __u32 portid, int nonblock);
-extern int netlink_broadcast(struct sock *ssk, struct sk_buff *skb, __u32 portid,
- __u32 group, gfp_t allocation);
-extern int netlink_broadcast_filtered(struct sock *ssk, struct sk_buff *skb,
- __u32 portid, __u32 group, gfp_t allocation,
- int (*filter)(struct sock *dsk, struct sk_buff *skb, void *data),
- void *filter_data);
-extern int netlink_set_err(struct sock *ssk, __u32 portid, __u32 group, int code);
-extern int netlink_register_notifier(struct notifier_block *nb);
-extern int netlink_unregister_notifier(struct notifier_block *nb);
+static inline void nl_set_extack_cookie_u64(struct netlink_ext_ack *extack,
+ u64 cookie)
+{
+ u64 __cookie = cookie;
+
+ memcpy(extack->cookie, &__cookie, sizeof(__cookie));
+ extack->cookie_len = sizeof(__cookie);
+}
+
+void netlink_kernel_release(struct sock *sk);
+int __netlink_change_ngroups(struct sock *sk, unsigned int groups);
+int netlink_change_ngroups(struct sock *sk, unsigned int groups);
+void __netlink_clear_multicast_users(struct sock *sk, unsigned int group);
+void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err,
+ const struct netlink_ext_ack *extack);
+int netlink_has_listeners(struct sock *sk, unsigned int group);
+
+int netlink_unicast(struct sock *ssk, struct sk_buff *skb, __u32 portid, int nonblock);
+int netlink_broadcast(struct sock *ssk, struct sk_buff *skb, __u32 portid,
+ __u32 group, gfp_t allocation);
+int netlink_broadcast_filtered(struct sock *ssk, struct sk_buff *skb,
+ __u32 portid, __u32 group, gfp_t allocation,
+ int (*filter)(struct sock *dsk, struct sk_buff *skb, void *data),
+ void *filter_data);
+int netlink_set_err(struct sock *ssk, __u32 portid, __u32 group, int code);
+int netlink_register_notifier(struct notifier_block *nb);
+int netlink_unregister_notifier(struct notifier_block *nb);
/* finegrained unicast helpers: */
struct sock *netlink_getsockbyfilp(struct file *filp);
@@ -203,7 +212,7 @@ struct netlink_dump_control {
u16 min_dump_alloc;
};
-extern int __netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
+int __netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
const struct nlmsghdr *nlh,
struct netlink_dump_control *control);
static inline int netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
@@ -222,8 +231,8 @@ struct netlink_tap {
struct list_head list;
};
-extern int netlink_add_tap(struct netlink_tap *nt);
-extern int netlink_remove_tap(struct netlink_tap *nt);
+int netlink_add_tap(struct netlink_tap *nt);
+int netlink_remove_tap(struct netlink_tap *nt);
bool __netlink_ns_capable(const struct netlink_skb_parms *nsp,
struct user_namespace *ns, int cap);
diff --git a/include/linux/nmi.h b/include/linux/nmi.h
index 08f9247e9827..9003e29cde46 100644
--- a/include/linux/nmi.h
+++ b/include/linux/nmi.h
@@ -119,6 +119,8 @@ static inline int hardlockup_detector_perf_init(void) { return 0; }
void watchdog_nmi_stop(void);
void watchdog_nmi_start(void);
int watchdog_nmi_probe(void);
+int watchdog_nmi_enable(unsigned int cpu);
+void watchdog_nmi_disable(unsigned int cpu);
/**
* touch_nmi_watchdog - restart NMI watchdog timeout.
diff --git a/include/linux/nvme-fc-driver.h b/include/linux/nvme-fc-driver.h
index 496ff759f84c..91745cc3704c 100644
--- a/include/linux/nvme-fc-driver.h
+++ b/include/linux/nvme-fc-driver.h
@@ -403,7 +403,6 @@ struct nvme_fc_port_template {
void **handle);
void (*delete_queue)(struct nvme_fc_local_port *,
unsigned int qidx, void *handle);
- void (*poll_queue)(struct nvme_fc_local_port *, void *handle);
int (*ls_req)(struct nvme_fc_local_port *,
struct nvme_fc_remote_port *,
struct nvmefc_ls_req *);
@@ -649,22 +648,6 @@ enum {
* sequence in one LLDD operation. Errors during Data
* sequence transmit must not allow RSP sequence to be sent.
*/
- NVMET_FCTGTFEAT_CMD_IN_ISR = (1 << 1),
- /* Bit 2: When 0, the LLDD is calling the cmd rcv handler
- * in a non-isr context, allowing the transport to finish
- * op completion in the calling context. When 1, the LLDD
- * is calling the cmd rcv handler in an ISR context,
- * requiring the transport to transition to a workqueue
- * for op completion.
- */
- NVMET_FCTGTFEAT_OPDONE_IN_ISR = (1 << 2),
- /* Bit 3: When 0, the LLDD is calling the op done handler
- * in a non-isr context, allowing the transport to finish
- * op completion in the calling context. When 1, the LLDD
- * is calling the op done handler in an ISR context,
- * requiring the transport to transition to a workqueue
- * for op completion.
- */
};
diff --git a/include/linux/nvme-tcp.h b/include/linux/nvme-tcp.h
new file mode 100644
index 000000000000..03d87c0550a9
--- /dev/null
+++ b/include/linux/nvme-tcp.h
@@ -0,0 +1,189 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * NVMe over Fabrics TCP protocol header.
+ * Copyright (c) 2018 Lightbits Labs. All rights reserved.
+ */
+
+#ifndef _LINUX_NVME_TCP_H
+#define _LINUX_NVME_TCP_H
+
+#include <linux/nvme.h>
+
+#define NVME_TCP_DISC_PORT 8009
+#define NVME_TCP_ADMIN_CCSZ SZ_8K
+#define NVME_TCP_DIGEST_LENGTH 4
+
+enum nvme_tcp_pfv {
+ NVME_TCP_PFV_1_0 = 0x0,
+};
+
+enum nvme_tcp_fatal_error_status {
+ NVME_TCP_FES_INVALID_PDU_HDR = 0x01,
+ NVME_TCP_FES_PDU_SEQ_ERR = 0x02,
+ NVME_TCP_FES_HDR_DIGEST_ERR = 0x03,
+ NVME_TCP_FES_DATA_OUT_OF_RANGE = 0x04,
+ NVME_TCP_FES_R2T_LIMIT_EXCEEDED = 0x05,
+ NVME_TCP_FES_DATA_LIMIT_EXCEEDED = 0x05,
+ NVME_TCP_FES_UNSUPPORTED_PARAM = 0x06,
+};
+
+enum nvme_tcp_digest_option {
+ NVME_TCP_HDR_DIGEST_ENABLE = (1 << 0),
+ NVME_TCP_DATA_DIGEST_ENABLE = (1 << 1),
+};
+
+enum nvme_tcp_pdu_type {
+ nvme_tcp_icreq = 0x0,
+ nvme_tcp_icresp = 0x1,
+ nvme_tcp_h2c_term = 0x2,
+ nvme_tcp_c2h_term = 0x3,
+ nvme_tcp_cmd = 0x4,
+ nvme_tcp_rsp = 0x5,
+ nvme_tcp_h2c_data = 0x6,
+ nvme_tcp_c2h_data = 0x7,
+ nvme_tcp_r2t = 0x9,
+};
+
+enum nvme_tcp_pdu_flags {
+ NVME_TCP_F_HDGST = (1 << 0),
+ NVME_TCP_F_DDGST = (1 << 1),
+ NVME_TCP_F_DATA_LAST = (1 << 2),
+ NVME_TCP_F_DATA_SUCCESS = (1 << 3),
+};
+
+/**
+ * struct nvme_tcp_hdr - nvme tcp pdu common header
+ *
+ * @type: pdu type
+ * @flags: pdu specific flags
+ * @hlen: pdu header length
+ * @pdo: pdu data offset
+ * @plen: pdu wire byte length
+ */
+struct nvme_tcp_hdr {
+ __u8 type;
+ __u8 flags;
+ __u8 hlen;
+ __u8 pdo;
+ __le32 plen;
+};
+
+/**
+ * struct nvme_tcp_icreq_pdu - nvme tcp initialize connection request pdu
+ *
+ * @hdr: pdu generic header
+ * @pfv: pdu version format
+ * @hpda: host pdu data alignment (dwords, 0's based)
+ * @digest: digest types enabled
+ * @maxr2t: maximum r2ts per request supported
+ */
+struct nvme_tcp_icreq_pdu {
+ struct nvme_tcp_hdr hdr;
+ __le16 pfv;
+ __u8 hpda;
+ __u8 digest;
+ __le32 maxr2t;
+ __u8 rsvd2[112];
+};
+
+/**
+ * struct nvme_tcp_icresp_pdu - nvme tcp initialize connection response pdu
+ *
+ * @hdr: pdu common header
+ * @pfv: pdu version format
+ * @cpda: controller pdu data alignment (dowrds, 0's based)
+ * @digest: digest types enabled
+ * @maxdata: maximum data capsules per r2t supported
+ */
+struct nvme_tcp_icresp_pdu {
+ struct nvme_tcp_hdr hdr;
+ __le16 pfv;
+ __u8 cpda;
+ __u8 digest;
+ __le32 maxdata;
+ __u8 rsvd[112];
+};
+
+/**
+ * struct nvme_tcp_term_pdu - nvme tcp terminate connection pdu
+ *
+ * @hdr: pdu common header
+ * @fes: fatal error status
+ * @fei: fatal error information
+ */
+struct nvme_tcp_term_pdu {
+ struct nvme_tcp_hdr hdr;
+ __le16 fes;
+ __le32 fei;
+ __u8 rsvd[8];
+};
+
+/**
+ * struct nvme_tcp_cmd_pdu - nvme tcp command capsule pdu
+ *
+ * @hdr: pdu common header
+ * @cmd: nvme command
+ */
+struct nvme_tcp_cmd_pdu {
+ struct nvme_tcp_hdr hdr;
+ struct nvme_command cmd;
+};
+
+/**
+ * struct nvme_tcp_rsp_pdu - nvme tcp response capsule pdu
+ *
+ * @hdr: pdu common header
+ * @hdr: nvme-tcp generic header
+ * @cqe: nvme completion queue entry
+ */
+struct nvme_tcp_rsp_pdu {
+ struct nvme_tcp_hdr hdr;
+ struct nvme_completion cqe;
+};
+
+/**
+ * struct nvme_tcp_r2t_pdu - nvme tcp ready-to-transfer pdu
+ *
+ * @hdr: pdu common header
+ * @command_id: nvme command identifier which this relates to
+ * @ttag: transfer tag (controller generated)
+ * @r2t_offset: offset from the start of the command data
+ * @r2t_length: length the host is allowed to send
+ */
+struct nvme_tcp_r2t_pdu {
+ struct nvme_tcp_hdr hdr;
+ __u16 command_id;
+ __u16 ttag;
+ __le32 r2t_offset;
+ __le32 r2t_length;
+ __u8 rsvd[4];
+};
+
+/**
+ * struct nvme_tcp_data_pdu - nvme tcp data pdu
+ *
+ * @hdr: pdu common header
+ * @command_id: nvme command identifier which this relates to
+ * @ttag: transfer tag (controller generated)
+ * @data_offset: offset from the start of the command data
+ * @data_length: length of the data stream
+ */
+struct nvme_tcp_data_pdu {
+ struct nvme_tcp_hdr hdr;
+ __u16 command_id;
+ __u16 ttag;
+ __le32 data_offset;
+ __le32 data_length;
+ __u8 rsvd[4];
+};
+
+union nvme_tcp_pdu {
+ struct nvme_tcp_icreq_pdu icreq;
+ struct nvme_tcp_icresp_pdu icresp;
+ struct nvme_tcp_cmd_pdu cmd;
+ struct nvme_tcp_rsp_pdu rsp;
+ struct nvme_tcp_r2t_pdu r2t;
+ struct nvme_tcp_data_pdu data;
+};
+
+#endif /* _LINUX_NVME_TCP_H */
diff --git a/include/linux/nvme.h b/include/linux/nvme.h
index 818dbe9331be..bbcc83886899 100644
--- a/include/linux/nvme.h
+++ b/include/linux/nvme.h
@@ -52,15 +52,20 @@ enum {
enum {
NVMF_TRTYPE_RDMA = 1, /* RDMA */
NVMF_TRTYPE_FC = 2, /* Fibre Channel */
+ NVMF_TRTYPE_TCP = 3, /* TCP/IP */
NVMF_TRTYPE_LOOP = 254, /* Reserved for host usage */
NVMF_TRTYPE_MAX,
};
/* Transport Requirements codes for Discovery Log Page entry TREQ field */
enum {
- NVMF_TREQ_NOT_SPECIFIED = 0, /* Not specified */
- NVMF_TREQ_REQUIRED = 1, /* Required */
- NVMF_TREQ_NOT_REQUIRED = 2, /* Not Required */
+ NVMF_TREQ_NOT_SPECIFIED = 0, /* Not specified */
+ NVMF_TREQ_REQUIRED = 1, /* Required */
+ NVMF_TREQ_NOT_REQUIRED = 2, /* Not Required */
+#define NVME_TREQ_SECURE_CHANNEL_MASK \
+ (NVMF_TREQ_REQUIRED | NVMF_TREQ_NOT_REQUIRED)
+
+ NVMF_TREQ_DISABLE_SQFLOW = (1 << 2), /* Supports SQ flow control disable */
};
/* RDMA QP Service Type codes for Discovery Log Page entry TSAS
@@ -198,6 +203,11 @@ enum {
NVME_PS_FLAGS_NON_OP_STATE = 1 << 1,
};
+enum nvme_ctrl_attr {
+ NVME_CTRL_ATTR_HID_128_BIT = (1 << 0),
+ NVME_CTRL_ATTR_TBKAS = (1 << 6),
+};
+
struct nvme_id_ctrl {
__le16 vid;
__le16 ssvid;
@@ -214,7 +224,11 @@ struct nvme_id_ctrl {
__le32 rtd3e;
__le32 oaes;
__le32 ctratt;
- __u8 rsvd100[156];
+ __u8 rsvd100[28];
+ __le16 crdt1;
+ __le16 crdt2;
+ __le16 crdt3;
+ __u8 rsvd134[122];
__le16 oacs;
__u8 acl;
__u8 aerl;
@@ -481,12 +495,21 @@ enum {
NVME_AER_NOTICE_NS_CHANGED = 0x00,
NVME_AER_NOTICE_FW_ACT_STARTING = 0x01,
NVME_AER_NOTICE_ANA = 0x03,
+ NVME_AER_NOTICE_DISC_CHANGED = 0xf0,
};
enum {
- NVME_AEN_CFG_NS_ATTR = 1 << 8,
- NVME_AEN_CFG_FW_ACT = 1 << 9,
- NVME_AEN_CFG_ANA_CHANGE = 1 << 11,
+ NVME_AEN_BIT_NS_ATTR = 8,
+ NVME_AEN_BIT_FW_ACT = 9,
+ NVME_AEN_BIT_ANA_CHANGE = 11,
+ NVME_AEN_BIT_DISC_CHANGE = 31,
+};
+
+enum {
+ NVME_AEN_CFG_NS_ATTR = 1 << NVME_AEN_BIT_NS_ATTR,
+ NVME_AEN_CFG_FW_ACT = 1 << NVME_AEN_BIT_FW_ACT,
+ NVME_AEN_CFG_ANA_CHANGE = 1 << NVME_AEN_BIT_ANA_CHANGE,
+ NVME_AEN_CFG_DISC_CHANGE = 1 << NVME_AEN_BIT_DISC_CHANGE,
};
struct nvme_lba_range_type {
@@ -639,7 +662,12 @@ struct nvme_common_command {
__le32 cdw2[2];
__le64 metadata;
union nvme_data_ptr dptr;
- __le32 cdw10[6];
+ __le32 cdw10;
+ __le32 cdw11;
+ __le32 cdw12;
+ __le32 cdw13;
+ __le32 cdw14;
+ __le32 cdw15;
};
struct nvme_rw_command {
@@ -738,6 +766,15 @@ enum {
NVME_HOST_MEM_RETURN = (1 << 1),
};
+struct nvme_feat_host_behavior {
+ __u8 acre;
+ __u8 resv1[511];
+};
+
+enum {
+ NVME_ENABLE_ACRE = 1,
+};
+
/* Admin commands */
enum nvme_admin_opcode {
@@ -792,6 +829,7 @@ enum {
NVME_FEAT_RRL = 0x12,
NVME_FEAT_PLM_CONFIG = 0x13,
NVME_FEAT_PLM_WINDOW = 0x14,
+ NVME_FEAT_HOST_BEHAVIOR = 0x16,
NVME_FEAT_SW_PROGRESS = 0x80,
NVME_FEAT_HOST_ID = 0x81,
NVME_FEAT_RESV_MASK = 0x82,
@@ -1030,6 +1068,10 @@ struct nvmf_disc_rsp_page_hdr {
struct nvmf_disc_rsp_page_entry entries[0];
};
+enum {
+ NVME_CONNECT_DISABLE_SQFLOW = (1 << 2),
+};
+
struct nvmf_connect_command {
__u8 opcode;
__u8 resv1;
@@ -1126,6 +1168,20 @@ struct nvme_command {
};
};
+struct nvme_error_slot {
+ __le64 error_count;
+ __le16 sqid;
+ __le16 cmdid;
+ __le16 status_field;
+ __le16 param_error_location;
+ __le64 lba;
+ __le32 nsid;
+ __u8 vs;
+ __u8 resv[3];
+ __le64 cs;
+ __u8 resv2[24];
+};
+
static inline bool nvme_is_write(struct nvme_command *cmd)
{
/*
@@ -1243,6 +1299,7 @@ enum {
NVME_SC_ANA_TRANSITION = 0x303,
NVME_SC_HOST_PATH_ERROR = 0x370,
+ NVME_SC_CRD = 0x1800,
NVME_SC_DNR = 0x4000,
};
diff --git a/include/linux/objagg.h b/include/linux/objagg.h
new file mode 100644
index 000000000000..34f38c186ea0
--- /dev/null
+++ b/include/linux/objagg.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
+/* Copyright (c) 2018 Mellanox Technologies. All rights reserved */
+
+#ifndef _OBJAGG_H
+#define _OBJAGG_H
+
+struct objagg_ops {
+ size_t obj_size;
+ void * (*delta_create)(void *priv, void *parent_obj, void *obj);
+ void (*delta_destroy)(void *priv, void *delta_priv);
+ void * (*root_create)(void *priv, void *obj);
+ void (*root_destroy)(void *priv, void *root_priv);
+};
+
+struct objagg;
+struct objagg_obj;
+
+const void *objagg_obj_root_priv(const struct objagg_obj *objagg_obj);
+const void *objagg_obj_delta_priv(const struct objagg_obj *objagg_obj);
+const void *objagg_obj_raw(const struct objagg_obj *objagg_obj);
+
+struct objagg_obj *objagg_obj_get(struct objagg *objagg, void *obj);
+void objagg_obj_put(struct objagg *objagg, struct objagg_obj *objagg_obj);
+struct objagg *objagg_create(const struct objagg_ops *ops, void *priv);
+void objagg_destroy(struct objagg *objagg);
+
+struct objagg_obj_stats {
+ unsigned int user_count;
+ unsigned int delta_user_count; /* includes delta object users */
+};
+
+struct objagg_obj_stats_info {
+ struct objagg_obj_stats stats;
+ struct objagg_obj *objagg_obj; /* associated object */
+ bool is_root;
+};
+
+struct objagg_stats {
+ unsigned int stats_info_count;
+ struct objagg_obj_stats_info stats_info[];
+};
+
+const struct objagg_stats *objagg_stats_get(struct objagg *objagg);
+void objagg_stats_put(const struct objagg_stats *objagg_stats);
+
+#endif
diff --git a/include/linux/of.h b/include/linux/of.h
index a5aee3c438ad..0fe5bef81a7e 100644
--- a/include/linux/of.h
+++ b/include/linux/of.h
@@ -66,7 +66,6 @@ struct device_node {
unsigned long _flags;
void *data;
#if defined(CONFIG_SPARC)
- const char *path_component_name;
unsigned int unique_id;
struct of_irq_controller *irq_trans;
#endif
diff --git a/include/linux/of_fdt.h b/include/linux/of_fdt.h
index b9cd9ebdf9b9..a713e5d156d8 100644
--- a/include/linux/of_fdt.h
+++ b/include/linux/of_fdt.h
@@ -76,6 +76,7 @@ extern int early_init_dt_scan_memory(unsigned long node, const char *uname,
extern int early_init_dt_scan_chosen_stdout(void);
extern void early_init_fdt_scan_reserved_mem(void);
extern void early_init_fdt_reserve_self(void);
+extern void __init early_init_dt_scan_chosen_arch(unsigned long node);
extern void early_init_dt_add_memory_arch(u64 base, u64 size);
extern int early_init_dt_mark_hotplug_memory_arch(u64 base, u64 size);
extern int early_init_dt_reserve_memory_arch(phys_addr_t base, phys_addr_t size,
diff --git a/include/linux/of_net.h b/include/linux/of_net.h
index 90d81ee9e6a0..9cd72aab76fe 100644
--- a/include/linux/of_net.h
+++ b/include/linux/of_net.h
@@ -13,7 +13,6 @@
struct net_device;
extern int of_get_phy_mode(struct device_node *np);
extern const void *of_get_mac_address(struct device_node *np);
-extern int of_get_nvmem_mac_address(struct device_node *np, void *addr);
extern struct net_device *of_find_net_device_by_node(struct device_node *np);
#else
static inline int of_get_phy_mode(struct device_node *np)
@@ -26,11 +25,6 @@ static inline const void *of_get_mac_address(struct device_node *np)
return NULL;
}
-static inline int of_get_nvmem_mac_address(struct device_node *np, void *addr)
-{
- return -ENODEV;
-}
-
static inline struct net_device *of_find_net_device_by_node(struct device_node *np)
{
return NULL;
diff --git a/include/linux/pci.h b/include/linux/pci.h
index 11c71c4ecf75..51a5a5217667 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -1960,7 +1960,11 @@ int pcibios_set_pcie_reset_state(struct pci_dev *dev,
enum pcie_reset_state state);
int pcibios_add_device(struct pci_dev *dev);
void pcibios_release_device(struct pci_dev *dev);
+#ifdef CONFIG_PCI
void pcibios_penalize_isa_irq(int irq, int active);
+#else
+static inline void pcibios_penalize_isa_irq(int irq, int active) {}
+#endif
int pcibios_alloc_irq(struct pci_dev *dev);
void pcibios_free_irq(struct pci_dev *dev);
resource_size_t pcibios_default_alignment(void);
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index 69f0abe1ba1a..d86d5a2477fc 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -545,6 +545,9 @@
#define PCI_DEVICE_ID_AMD_16H_NB_F4 0x1534
#define PCI_DEVICE_ID_AMD_16H_M30H_NB_F3 0x1583
#define PCI_DEVICE_ID_AMD_16H_M30H_NB_F4 0x1584
+#define PCI_DEVICE_ID_AMD_17H_DF_F3 0x1463
+#define PCI_DEVICE_ID_AMD_17H_M10H_DF_F3 0x15eb
+#define PCI_DEVICE_ID_AMD_17H_M30H_DF_F3 0x1493
#define PCI_DEVICE_ID_AMD_CNB17H_F3 0x1703
#define PCI_DEVICE_ID_AMD_LANCE 0x2000
#define PCI_DEVICE_ID_AMD_LANCE_HOME 0x2001
@@ -2359,6 +2362,8 @@
#define PCI_VENDOR_ID_SYNOPSYS 0x16c3
+#define PCI_VENDOR_ID_USR 0x16ec
+
#define PCI_VENDOR_ID_VITESSE 0x1725
#define PCI_DEVICE_ID_VITESSE_VSC7174 0x7174
diff --git a/include/linux/pe.h b/include/linux/pe.h
index 143ce75be5f0..3482b18a48b5 100644
--- a/include/linux/pe.h
+++ b/include/linux/pe.h
@@ -166,7 +166,7 @@ struct mz_hdr {
uint16_t oem_info; /* oem specific */
uint16_t reserved1[10]; /* reserved */
uint32_t peaddr; /* address of pe header */
- char message[64]; /* message to print */
+ char message[]; /* message to print */
};
struct mz_reloc {
diff --git a/include/linux/percpu-rwsem.h b/include/linux/percpu-rwsem.h
index 79b99d653e03..71b75643c432 100644
--- a/include/linux/percpu-rwsem.h
+++ b/include/linux/percpu-rwsem.h
@@ -41,7 +41,7 @@ static inline void percpu_down_read_preempt_disable(struct percpu_rw_semaphore *
* cannot both change sem->state from readers_fast and start checking
* counters while we are here. So if we see !sem->state, we know that
* the writer won't be checking until we're past the preempt_enable()
- * and that one the synchronize_sched() is done, the writer will see
+ * and that once the synchronize_rcu() is done, the writer will see
* anything we did within this RCU-sched read-size critical section.
*/
__this_cpu_inc(*sem->read_count);
diff --git a/include/linux/perf/arm_pmu.h b/include/linux/perf/arm_pmu.h
index bf309ff6f244..4641e850b204 100644
--- a/include/linux/perf/arm_pmu.h
+++ b/include/linux/perf/arm_pmu.h
@@ -102,8 +102,10 @@ struct arm_pmu {
int (*filter_match)(struct perf_event *event);
int num_events;
bool secure_access; /* 32-bit ARM only */
-#define ARMV8_PMUV3_MAX_COMMON_EVENTS 0x40
+#define ARMV8_PMUV3_MAX_COMMON_EVENTS 0x40
DECLARE_BITMAP(pmceid_bitmap, ARMV8_PMUV3_MAX_COMMON_EVENTS);
+#define ARMV8_PMUV3_EXT_COMMON_EVENT_BASE 0x4000
+ DECLARE_BITMAP(pmceid_ext_bitmap, ARMV8_PMUV3_MAX_COMMON_EVENTS);
struct platform_device *plat_device;
struct pmu_hw_events __percpu *hw_events;
struct hlist_node node;
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 53c500f0ca79..1d5c551a5add 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -262,8 +262,8 @@ struct pmu {
*/
int capabilities;
- int * __percpu pmu_disable_count;
- struct perf_cpu_context * __percpu pmu_cpu_context;
+ int __percpu *pmu_disable_count;
+ struct perf_cpu_context __percpu *pmu_cpu_context;
atomic_t exclusive_cnt; /* < 0: cpu; > 0: tsk */
int task_ctx_nr;
int hrtimer_interval_ms;
diff --git a/include/linux/phy.h b/include/linux/phy.h
index 3ea87f774a76..da039f211c22 100644
--- a/include/linux/phy.h
+++ b/include/linux/phy.h
@@ -58,6 +58,11 @@ extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_10gbit_full_features) __ro_after_ini
#define PHY_10GBIT_FEATURES ((unsigned long *)&phy_10gbit_features)
#define PHY_10GBIT_FULL_FEATURES ((unsigned long *)&phy_10gbit_full_features)
+extern const int phy_10_100_features_array[4];
+extern const int phy_basic_t1_features_array[2];
+extern const int phy_gbit_features_array[2];
+extern const int phy_10gbit_features_array[1];
+
/*
* Set phydev->irq to PHY_POLL if interrupts are not supported,
* or not desired for this PHY. Set to PHY_IGNORE_INTERRUPT if
@@ -66,9 +71,8 @@ extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_10gbit_full_features) __ro_after_ini
#define PHY_POLL -1
#define PHY_IGNORE_INTERRUPT -2
-#define PHY_HAS_INTERRUPT 0x00000001
-#define PHY_IS_INTERNAL 0x00000002
-#define PHY_RST_AFTER_CLK_EN 0x00000004
+#define PHY_IS_INTERNAL 0x00000001
+#define PHY_RST_AFTER_CLK_EN 0x00000002
#define MDIO_DEVICE_IS_PHY 0x80000000
/* Interface Mode definitions */
@@ -178,7 +182,6 @@ static inline const char *phy_modes(phy_interface_t interface)
#define PHY_INIT_TIMEOUT 100000
#define PHY_STATE_TIME 1
#define PHY_FORCE_TIMEOUT 10
-#define PHY_AN_TIMEOUT 10
#define PHY_MAX_ADDR 32
@@ -264,57 +267,27 @@ static inline struct mii_bus *devm_mdiobus_alloc(struct device *dev)
void devm_mdiobus_free(struct device *dev, struct mii_bus *bus);
struct phy_device *mdiobus_scan(struct mii_bus *bus, int addr);
-#define PHY_INTERRUPT_DISABLED 0x0
-#define PHY_INTERRUPT_ENABLED 0x80000000
+#define PHY_INTERRUPT_DISABLED false
+#define PHY_INTERRUPT_ENABLED true
/* PHY state machine states:
*
* DOWN: PHY device and driver are not ready for anything. probe
* should be called if and only if the PHY is in this state,
* given that the PHY device exists.
- * - PHY driver probe function will, depending on the PHY, set
- * the state to STARTING or READY
- *
- * STARTING: PHY device is coming up, and the ethernet driver is
- * not ready. PHY drivers may set this in the probe function.
- * If they do, they are responsible for making sure the state is
- * eventually set to indicate whether the PHY is UP or READY,
- * depending on the state when the PHY is done starting up.
- * - PHY driver will set the state to READY
- * - start will set the state to PENDING
+ * - PHY driver probe function will set the state to READY
*
* READY: PHY is ready to send and receive packets, but the
* controller is not. By default, PHYs which do not implement
- * probe will be set to this state by phy_probe(). If the PHY
- * driver knows the PHY is ready, and the PHY state is STARTING,
- * then it sets this STATE.
+ * probe will be set to this state by phy_probe().
* - start will set the state to UP
*
- * PENDING: PHY device is coming up, but the ethernet driver is
- * ready. phy_start will set this state if the PHY state is
- * STARTING.
- * - PHY driver will set the state to UP when the PHY is ready
- *
* UP: The PHY and attached device are ready to do work.
* Interrupts should be started here.
- * - timer moves to AN
- *
- * AN: The PHY is currently negotiating the link state. Link is
- * therefore down for now. phy_timer will set this state when it
- * detects the state is UP. config_aneg will set this state
- * whenever called with phydev->autoneg set to AUTONEG_ENABLE.
- * - If autonegotiation finishes, but there's no link, it sets
- * the state to NOLINK.
- * - If aneg finishes with link, it sets the state to RUNNING,
- * and calls adjust_link
- * - If autonegotiation did not finish after an arbitrary amount
- * of time, autonegotiation should be tried again if the PHY
- * supports "magic" autonegotiation (back to AN)
- * - If it didn't finish, and no magic_aneg, move to FORCING.
+ * - timer moves to NOLINK or RUNNING
*
* NOLINK: PHY is up, but not currently plugged in.
- * - If the timer notes that the link comes back, we move to RUNNING
- * - config_aneg moves to AN
+ * - irq or timer will set RUNNING if link comes back
* - phy_stop moves to HALTED
*
* FORCING: PHY is being configured with forced settings
@@ -325,11 +298,7 @@ struct phy_device *mdiobus_scan(struct mii_bus *bus, int addr);
*
* RUNNING: PHY is currently up, running, and possibly sending
* and/or receiving packets
- * - timer will set CHANGELINK if we're polling (this ensures the
- * link state is polled every other cycle of this state machine,
- * which makes it every other second)
- * - irq will set CHANGELINK
- * - config_aneg will set AN
+ * - irq or timer will set NOLINK if link goes down
* - phy_stop moves to HALTED
*
* CHANGELINK: PHY experienced a change in link state
@@ -349,16 +318,13 @@ struct phy_device *mdiobus_scan(struct mii_bus *bus, int addr);
*/
enum phy_state {
PHY_DOWN = 0,
- PHY_STARTING,
PHY_READY,
- PHY_PENDING,
+ PHY_HALTED,
PHY_UP,
- PHY_AN,
PHY_RUNNING,
PHY_NOLINK,
PHY_FORCING,
PHY_CHANGELINK,
- PHY_HALTED,
PHY_RESUMING
};
@@ -390,7 +356,6 @@ struct phy_c45_device_ids {
* giving up on the current attempt at acquiring a link
* irq: IRQ number of the PHY's interrupt (-1 if none)
* phy_timer: The timer for handling the state machine
- * phy_queue: A work_queue for the phy_mac_interrupt
* attached_dev: The attached enet driver's device instance ptr
* adjust_link: Callback for the enet controller to respond to
* changes in the link state.
@@ -427,6 +392,9 @@ struct phy_device {
/* The most recently read link state */
unsigned link:1;
+ /* Interrupts are enabled */
+ unsigned interrupts:1;
+
enum phy_state state;
u32 dev_flags;
@@ -442,14 +410,11 @@ struct phy_device {
int pause;
int asym_pause;
- /* Enabled Interrupts */
- u32 interrupts;
-
- /* Union of PHY and Attached devices' supported modes */
- /* See mii.h for more info */
- u32 supported;
- u32 advertising;
- u32 lp_advertising;
+ /* Union of PHY and Attached devices' supported link modes */
+ /* See ethtool.h for more info */
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(supported);
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(advertising);
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(lp_advertising);
/* Energy efficient ethernet modes which should be prohibited */
u32 eee_broken_modes;
@@ -475,7 +440,6 @@ struct phy_device {
void *priv;
/* Interrupt and Polling infrastructure */
- struct work_struct phy_queue;
struct delayed_work state_queue;
struct mutex lock;
@@ -674,6 +638,10 @@ struct phy_driver {
#define PHY_ANY_ID "MATCH ANY PHY"
#define PHY_ANY_UID 0xffffffff
+#define PHY_ID_MATCH_EXACT(id) .phy_id = (id), .phy_id_mask = GENMASK(31, 0)
+#define PHY_ID_MATCH_MODEL(id) .phy_id = (id), .phy_id_mask = GENMASK(31, 4)
+#define PHY_ID_MATCH_VENDOR(id) .phy_id = (id), .phy_id_mask = GENMASK(31, 10)
+
/* A Structure for boards to register fixups with the PHY Lib */
struct phy_fixup {
struct list_head list;
@@ -697,9 +665,31 @@ struct phy_setting {
const struct phy_setting *
phy_lookup_setting(int speed, int duplex, const unsigned long *mask,
- size_t maxbit, bool exact);
+ bool exact);
size_t phy_speeds(unsigned int *speeds, size_t size,
- unsigned long *mask, size_t maxbit);
+ unsigned long *mask);
+
+static inline bool __phy_is_started(struct phy_device *phydev)
+{
+ WARN_ON(!mutex_is_locked(&phydev->lock));
+
+ return phydev->state >= PHY_UP;
+}
+
+/**
+ * phy_is_started - Convenience function to check whether PHY is started
+ * @phydev: The phy_device struct
+ */
+static inline bool phy_is_started(struct phy_device *phydev)
+{
+ bool started;
+
+ mutex_lock(&phydev->lock);
+ started = __phy_is_started(phydev);
+ mutex_unlock(&phydev->lock);
+
+ return started;
+}
void phy_resolve_aneg_linkmode(struct phy_device *phydev);
@@ -1050,11 +1040,9 @@ int phy_driver_register(struct phy_driver *new_driver, struct module *owner);
int phy_drivers_register(struct phy_driver *new_driver, int n,
struct module *owner);
void phy_state_machine(struct work_struct *work);
-void phy_change_work(struct work_struct *work);
void phy_mac_interrupt(struct phy_device *phydev);
void phy_start_machine(struct phy_device *phydev);
void phy_stop_machine(struct phy_device *phydev);
-void phy_trigger_machine(struct phy_device *phydev);
int phy_ethtool_sset(struct phy_device *phydev, struct ethtool_cmd *cmd);
void phy_ethtool_ksettings_get(struct phy_device *phydev,
struct ethtool_link_ksettings *cmd);
diff --git a/include/linux/phy_fixed.h b/include/linux/phy_fixed.h
index ee54453a40a0..9525567b1951 100644
--- a/include/linux/phy_fixed.h
+++ b/include/linux/phy_fixed.h
@@ -13,6 +13,7 @@ struct fixed_phy_status {
struct device_node;
#if IS_ENABLED(CONFIG_FIXED_PHY)
+extern int fixed_phy_change_carrier(struct net_device *dev, bool new_carrier);
extern int fixed_phy_add(unsigned int irq, int phy_id,
struct fixed_phy_status *status,
int link_gpio);
@@ -47,6 +48,10 @@ static inline int fixed_phy_set_link_update(struct phy_device *phydev,
{
return -ENODEV;
}
+static inline int fixed_phy_change_carrier(struct net_device *dev, bool new_carrier)
+{
+ return -EINVAL;
+}
#endif /* CONFIG_FIXED_PHY */
#endif /* __PHY_FIXED_H */
diff --git a/include/linux/phy_led_triggers.h b/include/linux/phy_led_triggers.h
index b37b05bfd1a6..4587ce362535 100644
--- a/include/linux/phy_led_triggers.h
+++ b/include/linux/phy_led_triggers.h
@@ -20,7 +20,7 @@ struct phy_device;
#include <linux/leds.h>
#include <linux/phy.h>
-#define PHY_LED_TRIGGER_SPEED_SUFFIX_SIZE 10
+#define PHY_LED_TRIGGER_SPEED_SUFFIX_SIZE 11
#define PHY_LINK_LED_TRIGGER_NAME_SIZE (MII_BUS_ID_SIZE + \
FIELD_SIZEOF(struct mdio_device, addr)+\
diff --git a/include/linux/platform_data/davinci_asp.h b/include/linux/platform_data/davinci_asp.h
index 85ad68f9206a..7fe80f1c7e08 100644
--- a/include/linux/platform_data/davinci_asp.h
+++ b/include/linux/platform_data/davinci_asp.h
@@ -79,6 +79,7 @@ struct davinci_mcasp_pdata {
/* McASP specific fields */
int tdm_slots;
u8 op_mode;
+ u8 dismod;
u8 num_serializer;
u8 *serial_dir;
u8 version;
diff --git a/include/linux/platform_data/gpio-davinci.h b/include/linux/platform_data/gpio-davinci.h
index f92a47e18034..a93841bfb9f7 100644
--- a/include/linux/platform_data/gpio-davinci.h
+++ b/include/linux/platform_data/gpio-davinci.h
@@ -17,6 +17,8 @@
#define __DAVINCI_GPIO_PLATFORM_H
struct davinci_gpio_platform_data {
+ bool no_auto_base;
+ u32 base;
u32 ngpio;
u32 gpio_unbanked;
};
diff --git a/include/linux/platform_data/mdio-gpio.h b/include/linux/platform_data/mdio-gpio.h
new file mode 100644
index 000000000000..13874fa6e767
--- /dev/null
+++ b/include/linux/platform_data/mdio-gpio.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * MDIO-GPIO bus platform data structure
+ */
+
+#ifndef __LINUX_MDIO_GPIO_PDATA_H
+#define __LINUX_MDIO_GPIO_PDATA_H
+
+struct mdio_gpio_platform_data {
+ u32 phy_mask;
+ u32 phy_ignore_ta_mask;
+};
+
+#endif /* __LINUX_MDIO_GPIO_PDATA_H */
diff --git a/include/linux/pm.h b/include/linux/pm.h
index e723b78d8357..0bd9de116826 100644
--- a/include/linux/pm.h
+++ b/include/linux/pm.h
@@ -26,6 +26,7 @@
#include <linux/spinlock.h>
#include <linux/wait.h>
#include <linux/timer.h>
+#include <linux/hrtimer.h>
#include <linux/completion.h>
/*
@@ -608,7 +609,7 @@ struct dev_pm_info {
unsigned int should_wakeup:1;
#endif
#ifdef CONFIG_PM
- struct timer_list suspend_timer;
+ struct hrtimer suspend_timer;
unsigned long timer_expires;
struct work_struct work;
wait_queue_head_t wait_queue;
@@ -631,7 +632,7 @@ struct dev_pm_info {
enum rpm_status runtime_status;
int runtime_error;
int autosuspend_delay;
- unsigned long last_busy;
+ u64 last_busy;
unsigned long active_jiffies;
unsigned long suspended_jiffies;
unsigned long accounting_timestamp;
diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h
index 3b5d7280e52e..dd364abb649a 100644
--- a/include/linux/pm_domain.h
+++ b/include/linux/pm_domain.h
@@ -73,6 +73,7 @@ struct genpd_power_state {
struct genpd_lock_ops;
struct dev_pm_opp;
+struct opp_table;
struct generic_pm_domain {
struct device dev;
@@ -94,6 +95,7 @@ struct generic_pm_domain {
unsigned int performance_state; /* Aggregated max performance state */
int (*power_off)(struct generic_pm_domain *domain);
int (*power_on)(struct generic_pm_domain *domain);
+ struct opp_table *opp_table; /* OPP table of the genpd */
unsigned int (*opp_to_performance_state)(struct generic_pm_domain *genpd,
struct dev_pm_opp *opp);
int (*set_performance_state)(struct generic_pm_domain *genpd,
@@ -134,6 +136,10 @@ struct gpd_link {
struct list_head master_node;
struct generic_pm_domain *slave;
struct list_head slave_node;
+
+ /* Sub-domain's per-master domain performance state */
+ unsigned int performance_state;
+ unsigned int prev_performance_state;
};
struct gpd_timing_data {
@@ -258,8 +264,8 @@ int of_genpd_add_subdomain(struct of_phandle_args *parent,
struct generic_pm_domain *of_genpd_remove_last(struct device_node *np);
int of_genpd_parse_idle_states(struct device_node *dn,
struct genpd_power_state **states, int *n);
-unsigned int of_genpd_opp_to_performance_state(struct device *dev,
- struct device_node *np);
+unsigned int pm_genpd_opp_to_performance_state(struct device *genpd_dev,
+ struct dev_pm_opp *opp);
int genpd_dev_pm_attach(struct device *dev);
struct device *genpd_dev_pm_attach_by_id(struct device *dev,
@@ -300,8 +306,8 @@ static inline int of_genpd_parse_idle_states(struct device_node *dn,
}
static inline unsigned int
-of_genpd_opp_to_performance_state(struct device *dev,
- struct device_node *np)
+pm_genpd_opp_to_performance_state(struct device *genpd_dev,
+ struct dev_pm_opp *opp)
{
return 0;
}
diff --git a/include/linux/pm_opp.h b/include/linux/pm_opp.h
index 5d399eeef172..0a2a88e5a383 100644
--- a/include/linux/pm_opp.h
+++ b/include/linux/pm_opp.h
@@ -126,6 +126,9 @@ struct opp_table *dev_pm_opp_set_clkname(struct device *dev, const char * name);
void dev_pm_opp_put_clkname(struct opp_table *opp_table);
struct opp_table *dev_pm_opp_register_set_opp_helper(struct device *dev, int (*set_opp)(struct dev_pm_set_opp_data *data));
void dev_pm_opp_unregister_set_opp_helper(struct opp_table *opp_table);
+struct opp_table *dev_pm_opp_set_genpd_virt_dev(struct device *dev, struct device *virt_dev, int index);
+void dev_pm_opp_put_genpd_virt_dev(struct opp_table *opp_table, struct device *virt_dev);
+int dev_pm_opp_xlate_performance_state(struct opp_table *src_table, struct opp_table *dst_table, unsigned int pstate);
int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq);
int dev_pm_opp_set_sharing_cpus(struct device *cpu_dev, const struct cpumask *cpumask);
int dev_pm_opp_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask);
@@ -272,6 +275,18 @@ static inline struct opp_table *dev_pm_opp_set_clkname(struct device *dev, const
static inline void dev_pm_opp_put_clkname(struct opp_table *opp_table) {}
+static inline struct opp_table *dev_pm_opp_set_genpd_virt_dev(struct device *dev, struct device *virt_dev, int index)
+{
+ return ERR_PTR(-ENOTSUPP);
+}
+
+static inline void dev_pm_opp_put_genpd_virt_dev(struct opp_table *opp_table, struct device *virt_dev) {}
+
+static inline int dev_pm_opp_xlate_performance_state(struct opp_table *src_table, struct opp_table *dst_table, unsigned int pstate)
+{
+ return -ENOTSUPP;
+}
+
static inline int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq)
{
return -ENOTSUPP;
@@ -305,8 +320,8 @@ int dev_pm_opp_of_cpumask_add_table(const struct cpumask *cpumask);
void dev_pm_opp_of_cpumask_remove_table(const struct cpumask *cpumask);
int dev_pm_opp_of_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask);
struct device_node *dev_pm_opp_of_get_opp_desc_node(struct device *dev);
-struct dev_pm_opp *of_dev_pm_opp_find_required_opp(struct device *dev, struct device_node *np);
struct device_node *dev_pm_opp_get_of_node(struct dev_pm_opp *opp);
+int of_get_required_opp_performance_state(struct device_node *np, int index);
#else
static inline int dev_pm_opp_of_add_table(struct device *dev)
{
@@ -341,13 +356,13 @@ static inline struct device_node *dev_pm_opp_of_get_opp_desc_node(struct device
return NULL;
}
-static inline struct dev_pm_opp *of_dev_pm_opp_find_required_opp(struct device *dev, struct device_node *np)
+static inline struct device_node *dev_pm_opp_get_of_node(struct dev_pm_opp *opp)
{
return NULL;
}
-static inline struct device_node *dev_pm_opp_get_of_node(struct dev_pm_opp *opp)
+static inline int of_get_required_opp_performance_state(struct device_node *np, int index)
{
- return NULL;
+ return -ENOTSUPP;
}
#endif
diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h
index f0fc4700b6ff..54af4eef169f 100644
--- a/include/linux/pm_runtime.h
+++ b/include/linux/pm_runtime.h
@@ -51,7 +51,7 @@ extern void pm_runtime_no_callbacks(struct device *dev);
extern void pm_runtime_irq_safe(struct device *dev);
extern void __pm_runtime_use_autosuspend(struct device *dev, bool use);
extern void pm_runtime_set_autosuspend_delay(struct device *dev, int delay);
-extern unsigned long pm_runtime_autosuspend_expiration(struct device *dev);
+extern u64 pm_runtime_autosuspend_expiration(struct device *dev);
extern void pm_runtime_update_max_time_suspended(struct device *dev,
s64 delta_ns);
extern void pm_runtime_set_memalloc_noio(struct device *dev, bool enable);
@@ -105,7 +105,7 @@ static inline bool pm_runtime_callbacks_present(struct device *dev)
static inline void pm_runtime_mark_last_busy(struct device *dev)
{
- WRITE_ONCE(dev->power.last_busy, jiffies);
+ WRITE_ONCE(dev->power.last_busy, ktime_to_ns(ktime_get()));
}
static inline bool pm_runtime_is_irq_safe(struct device *dev)
@@ -168,7 +168,7 @@ static inline void __pm_runtime_use_autosuspend(struct device *dev,
bool use) {}
static inline void pm_runtime_set_autosuspend_delay(struct device *dev,
int delay) {}
-static inline unsigned long pm_runtime_autosuspend_expiration(
+static inline u64 pm_runtime_autosuspend_expiration(
struct device *dev) { return 0; }
static inline void pm_runtime_set_memalloc_noio(struct device *dev,
bool enable){}
diff --git a/include/linux/power/smartreflex.h b/include/linux/power/smartreflex.h
index 7b81dad712de..d0b37e937037 100644
--- a/include/linux/power/smartreflex.h
+++ b/include/linux/power/smartreflex.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* OMAP Smartreflex Defines and Routines
*
@@ -11,10 +12,6 @@
*
* Copyright (C) 2007 Texas Instruments, Inc.
* Lesly A M <x0080970@ti.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef __POWER_SMARTREFLEX_H
@@ -303,9 +300,6 @@ void omap_sr_enable(struct voltagedomain *voltdm);
void omap_sr_disable(struct voltagedomain *voltdm);
void omap_sr_disable_reset_volt(struct voltagedomain *voltdm);
-/* API to register the pmic specific data with the smartreflex driver. */
-void omap_sr_register_pmic(struct omap_sr_pmic_data *pmic_data);
-
/* Smartreflex driver hooks to be called from Smartreflex class driver */
int sr_enable(struct omap_sr *sr, unsigned long volt);
void sr_disable(struct omap_sr *sr);
@@ -320,7 +314,5 @@ static inline void omap_sr_enable(struct voltagedomain *voltdm) {}
static inline void omap_sr_disable(struct voltagedomain *voltdm) {}
static inline void omap_sr_disable_reset_volt(
struct voltagedomain *voltdm) {}
-static inline void omap_sr_register_pmic(
- struct omap_sr_pmic_data *pmic_data) {}
#endif
#endif
diff --git a/include/linux/preempt.h b/include/linux/preempt.h
index c01813c3fbe9..dd92b1a93919 100644
--- a/include/linux/preempt.h
+++ b/include/linux/preempt.h
@@ -53,9 +53,6 @@
#define SOFTIRQ_DISABLE_OFFSET (2 * SOFTIRQ_OFFSET)
-/* We use the MSB mostly because its available */
-#define PREEMPT_NEED_RESCHED 0x80000000
-
#define PREEMPT_DISABLED (PREEMPT_DISABLE_OFFSET + PREEMPT_ENABLED)
/*
diff --git a/include/linux/printk.h b/include/linux/printk.h
index cf3eccfe1543..55aa96975fa2 100644
--- a/include/linux/printk.h
+++ b/include/linux/printk.h
@@ -166,11 +166,6 @@ int vprintk_emit(int facility, int level,
asmlinkage __printf(1, 0)
int vprintk(const char *fmt, va_list args);
-asmlinkage __printf(5, 6) __cold
-int printk_emit(int facility, int level,
- const char *dict, size_t dictlen,
- const char *fmt, ...);
-
asmlinkage __printf(1, 2) __cold
int printk(const char *fmt, ...);
diff --git a/include/linux/property.h b/include/linux/property.h
index ac8a1ebc4c1b..3789ec755fb6 100644
--- a/include/linux/property.h
+++ b/include/linux/property.h
@@ -311,4 +311,16 @@ fwnode_graph_get_remote_node(const struct fwnode_handle *fwnode, u32 port,
int fwnode_graph_parse_endpoint(const struct fwnode_handle *fwnode,
struct fwnode_endpoint *endpoint);
+/* -------------------------------------------------------------------------- */
+/* Software fwnode support - when HW description is incomplete or missing */
+
+bool is_software_node(const struct fwnode_handle *fwnode);
+
+int software_node_notify(struct device *dev, unsigned long action);
+
+struct fwnode_handle *
+fwnode_create_software_node(const struct property_entry *properties,
+ const struct fwnode_handle *parent);
+void fwnode_remove_software_node(struct fwnode_handle *fwnode);
+
#endif /* _LINUX_PROPERTY_H_ */
diff --git a/include/linux/psi.h b/include/linux/psi.h
index 8e0725aac0aa..7006008d5b72 100644
--- a/include/linux/psi.h
+++ b/include/linux/psi.h
@@ -1,6 +1,7 @@
#ifndef _LINUX_PSI_H
#define _LINUX_PSI_H
+#include <linux/jump_label.h>
#include <linux/psi_types.h>
#include <linux/sched.h>
@@ -9,7 +10,7 @@ struct css_set;
#ifdef CONFIG_PSI
-extern bool psi_disabled;
+extern struct static_key_false psi_disabled;
void psi_init(void);
diff --git a/include/linux/pstore.h b/include/linux/pstore.h
index a15bc4d48752..b146181e8709 100644
--- a/include/linux/pstore.h
+++ b/include/linux/pstore.h
@@ -26,27 +26,38 @@
#include <linux/errno.h>
#include <linux/kmsg_dump.h>
#include <linux/mutex.h>
-#include <linux/spinlock.h>
+#include <linux/semaphore.h>
#include <linux/time.h>
#include <linux/types.h>
struct module;
-/* pstore record types (see fs/pstore/inode.c for filename templates) */
+/*
+ * pstore record types (see fs/pstore/platform.c for pstore_type_names[])
+ * These values may be written to storage (see EFI vars backend), so
+ * they are kind of an ABI. Be careful changing the mappings.
+ */
enum pstore_type_id {
+ /* Frontend storage types */
PSTORE_TYPE_DMESG = 0,
PSTORE_TYPE_MCE = 1,
PSTORE_TYPE_CONSOLE = 2,
PSTORE_TYPE_FTRACE = 3,
- /* PPC64 partition types */
+
+ /* PPC64-specific partition types */
PSTORE_TYPE_PPC_RTAS = 4,
PSTORE_TYPE_PPC_OF = 5,
PSTORE_TYPE_PPC_COMMON = 6,
PSTORE_TYPE_PMSG = 7,
PSTORE_TYPE_PPC_OPAL = 8,
- PSTORE_TYPE_UNKNOWN = 255
+
+ /* End of the list */
+ PSTORE_TYPE_MAX
};
+const char *pstore_type_to_name(enum pstore_type_id type);
+enum pstore_type_id pstore_name_to_type(const char *name);
+
struct pstore_info;
/**
* struct pstore_record - details of a pstore record entry
@@ -85,12 +96,15 @@ struct pstore_record {
/**
* struct pstore_info - backend pstore driver structure
*
- * @owner: module which is repsonsible for this backend driver
+ * @owner: module which is responsible for this backend driver
* @name: name of the backend driver
*
- * @buf_lock: spinlock to serialize access to @buf
+ * @buf_lock: semaphore to serialize access to @buf
* @buf: preallocated crash dump buffer
- * @bufsize: size of @buf available for crash dump writes
+ * @bufsize: size of @buf available for crash dump bytes (must match
+ * smallest number of bytes available for writing to a
+ * backend entry, since compressed bytes don't take kindly
+ * to being truncated)
*
* @read_mutex: serializes @open, @read, @close, and @erase callbacks
* @flags: bitfield of frontends the backend can accept writes for
@@ -170,7 +184,7 @@ struct pstore_info {
struct module *owner;
char *name;
- spinlock_t buf_lock;
+ struct semaphore buf_lock;
char *buf;
size_t bufsize;
@@ -189,14 +203,13 @@ struct pstore_info {
};
/* Supported frontends */
-#define PSTORE_FLAGS_DMESG (1 << 0)
-#define PSTORE_FLAGS_CONSOLE (1 << 1)
-#define PSTORE_FLAGS_FTRACE (1 << 2)
-#define PSTORE_FLAGS_PMSG (1 << 3)
+#define PSTORE_FLAGS_DMESG BIT(0)
+#define PSTORE_FLAGS_CONSOLE BIT(1)
+#define PSTORE_FLAGS_FTRACE BIT(2)
+#define PSTORE_FLAGS_PMSG BIT(3)
extern int pstore_register(struct pstore_info *);
extern void pstore_unregister(struct pstore_info *);
-extern bool pstore_cannot_block_path(enum kmsg_dump_reason reason);
struct pstore_ftrace_record {
unsigned long ip;
diff --git a/include/linux/pstore_ram.h b/include/linux/pstore_ram.h
index 602d64725222..337971c41980 100644
--- a/include/linux/pstore_ram.h
+++ b/include/linux/pstore_ram.h
@@ -22,6 +22,7 @@
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/list.h>
+#include <linux/pstore.h>
#include <linux/types.h>
/*
@@ -30,6 +31,11 @@
* PRZ_FLAG_NO_LOCK is used. For all other cases, locking is required.
*/
#define PRZ_FLAG_NO_LOCK BIT(0)
+/*
+ * If a PRZ should only have a single-boot lifetime, this marks it as
+ * getting wiped after its contents get copied out after boot.
+ */
+#define PRZ_FLAG_ZAP_OLD BIT(1)
struct persistent_ram_buffer;
struct rs_control;
@@ -42,17 +48,55 @@ struct persistent_ram_ecc_info {
uint16_t *par;
};
+/**
+ * struct persistent_ram_zone - Details of a persistent RAM zone (PRZ)
+ * used as a pstore backend
+ *
+ * @paddr: physical address of the mapped RAM area
+ * @size: size of mapping
+ * @label: unique name of this PRZ
+ * @type: frontend type for this PRZ
+ * @flags: holds PRZ_FLAGS_* bits
+ *
+ * @buffer_lock:
+ * locks access to @buffer "size" bytes and "start" offset
+ * @buffer:
+ * pointer to actual RAM area managed by this PRZ
+ * @buffer_size:
+ * bytes in @buffer->data (not including any trailing ECC bytes)
+ *
+ * @par_buffer:
+ * pointer into @buffer->data containing ECC bytes for @buffer->data
+ * @par_header:
+ * pointer into @buffer->data containing ECC bytes for @buffer header
+ * (i.e. all fields up to @data)
+ * @rs_decoder:
+ * RSLIB instance for doing ECC calculations
+ * @corrected_bytes:
+ * ECC corrected bytes accounting since boot
+ * @bad_blocks:
+ * ECC uncorrectable bytes accounting since boot
+ * @ecc_info:
+ * ECC configuration details
+ *
+ * @old_log:
+ * saved copy of @buffer->data prior to most recent wipe
+ * @old_log_size:
+ * bytes contained in @old_log
+ *
+ */
struct persistent_ram_zone {
phys_addr_t paddr;
size_t size;
void *vaddr;
char *label;
- struct persistent_ram_buffer *buffer;
- size_t buffer_size;
+ enum pstore_type_id type;
u32 flags;
+
raw_spinlock_t buffer_lock;
+ struct persistent_ram_buffer *buffer;
+ size_t buffer_size;
- /* ECC correction */
char *par_buffer;
char *par_header;
struct rs_control *rs_decoder;
diff --git a/include/linux/ptp_clock_kernel.h b/include/linux/ptp_clock_kernel.h
index 51349d124ee5..7121bbe76979 100644
--- a/include/linux/ptp_clock_kernel.h
+++ b/include/linux/ptp_clock_kernel.h
@@ -39,6 +39,15 @@ struct ptp_clock_request {
};
struct system_device_crosststamp;
+
+/**
+ * struct ptp_system_timestamp - system time corresponding to a PHC timestamp
+ */
+struct ptp_system_timestamp {
+ struct timespec64 pre_ts;
+ struct timespec64 post_ts;
+};
+
/**
* struct ptp_clock_info - decribes a PTP hardware clock
*
@@ -73,8 +82,18 @@ struct system_device_crosststamp;
* parameter delta: Desired change in nanoseconds.
*
* @gettime64: Reads the current time from the hardware clock.
+ * This method is deprecated. New drivers should implement
+ * the @gettimex64 method instead.
* parameter ts: Holds the result.
*
+ * @gettimex64: Reads the current time from the hardware clock and optionally
+ * also the system clock.
+ * parameter ts: Holds the PHC timestamp.
+ * parameter sts: If not NULL, it holds a pair of timestamps from
+ * the system clock. The first reading is made right before
+ * reading the lowest bits of the PHC timestamp and the second
+ * reading immediately follows that.
+ *
* @getcrosststamp: Reads the current time from the hardware clock and
* system clock simultaneously.
* parameter cts: Contains timestamp (device,system) pair,
@@ -124,6 +143,8 @@ struct ptp_clock_info {
int (*adjfreq)(struct ptp_clock_info *ptp, s32 delta);
int (*adjtime)(struct ptp_clock_info *ptp, s64 delta);
int (*gettime64)(struct ptp_clock_info *ptp, struct timespec64 *ts);
+ int (*gettimex64)(struct ptp_clock_info *ptp, struct timespec64 *ts,
+ struct ptp_system_timestamp *sts);
int (*getcrosststamp)(struct ptp_clock_info *ptp,
struct system_device_crosststamp *cts);
int (*settime64)(struct ptp_clock_info *p, const struct timespec64 *ts);
@@ -247,4 +268,16 @@ static inline int ptp_schedule_worker(struct ptp_clock *ptp,
#endif
+static inline void ptp_read_system_prets(struct ptp_system_timestamp *sts)
+{
+ if (sts)
+ ktime_get_real_ts64(&sts->pre_ts);
+}
+
+static inline void ptp_read_system_postts(struct ptp_system_timestamp *sts)
+{
+ if (sts)
+ ktime_get_real_ts64(&sts->post_ts);
+}
+
#endif
diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h
index 6c2ffed907f5..edb9b040c94c 100644
--- a/include/linux/ptrace.h
+++ b/include/linux/ptrace.h
@@ -64,15 +64,12 @@ extern void exit_ptrace(struct task_struct *tracer, struct list_head *dead);
#define PTRACE_MODE_NOAUDIT 0x04
#define PTRACE_MODE_FSCREDS 0x08
#define PTRACE_MODE_REALCREDS 0x10
-#define PTRACE_MODE_SCHED 0x20
-#define PTRACE_MODE_IBPB 0x40
/* shorthands for READ/ATTACH and FSCREDS/REALCREDS combinations */
#define PTRACE_MODE_READ_FSCREDS (PTRACE_MODE_READ | PTRACE_MODE_FSCREDS)
#define PTRACE_MODE_READ_REALCREDS (PTRACE_MODE_READ | PTRACE_MODE_REALCREDS)
#define PTRACE_MODE_ATTACH_FSCREDS (PTRACE_MODE_ATTACH | PTRACE_MODE_FSCREDS)
#define PTRACE_MODE_ATTACH_REALCREDS (PTRACE_MODE_ATTACH | PTRACE_MODE_REALCREDS)
-#define PTRACE_MODE_SPEC_IBPB (PTRACE_MODE_ATTACH_REALCREDS | PTRACE_MODE_IBPB)
/**
* ptrace_may_access - check whether the caller is permitted to access
@@ -90,20 +87,6 @@ extern void exit_ptrace(struct task_struct *tracer, struct list_head *dead);
*/
extern bool ptrace_may_access(struct task_struct *task, unsigned int mode);
-/**
- * ptrace_may_access - check whether the caller is permitted to access
- * a target task.
- * @task: target task
- * @mode: selects type of access and caller credentials
- *
- * Returns true on success, false on denial.
- *
- * Similar to ptrace_may_access(). Only to be called from context switch
- * code. Does not call into audit and the regular LSM hooks due to locking
- * constraints.
- */
-extern bool ptrace_may_access_sched(struct task_struct *task, unsigned int mode);
-
static inline int ptrace_reparented(struct task_struct *child)
{
return !same_thread_group(child->real_parent, child->parent);
@@ -428,4 +411,5 @@ extern int task_current_syscall(struct task_struct *target, long *callno,
unsigned long args[6], unsigned int maxargs,
unsigned long *sp, unsigned long *pc);
+extern void sigaction_compat_abi(struct k_sigaction *act, struct k_sigaction *oact);
#endif
diff --git a/include/linux/pwm.h b/include/linux/pwm.h
index 56518adc31dd..d5199b507d79 100644
--- a/include/linux/pwm.h
+++ b/include/linux/pwm.h
@@ -349,42 +349,6 @@ static inline int pwm_config(struct pwm_device *pwm, int duty_ns,
}
/**
- * pwm_set_polarity() - configure the polarity of a PWM signal
- * @pwm: PWM device
- * @polarity: new polarity of the PWM signal
- *
- * Note that the polarity cannot be configured while the PWM device is
- * enabled.
- *
- * Returns: 0 on success or a negative error code on failure.
- */
-static inline int pwm_set_polarity(struct pwm_device *pwm,
- enum pwm_polarity polarity)
-{
- struct pwm_state state;
-
- if (!pwm)
- return -EINVAL;
-
- pwm_get_state(pwm, &state);
- if (state.polarity == polarity)
- return 0;
-
- /*
- * Changing the polarity of a running PWM without adjusting the
- * dutycycle/period value is a bit risky (can introduce glitches).
- * Return -EBUSY in this case.
- * Note that this is allowed when using pwm_apply_state() because
- * the user specifies all the parameters.
- */
- if (state.enabled)
- return -EBUSY;
-
- state.polarity = polarity;
- return pwm_apply_state(pwm, &state);
-}
-
-/**
* pwm_enable() - start a PWM output toggling
* @pwm: PWM device
*
@@ -483,12 +447,6 @@ static inline int pwm_capture(struct pwm_device *pwm,
return -EINVAL;
}
-static inline int pwm_set_polarity(struct pwm_device *pwm,
- enum pwm_polarity polarity)
-{
- return -ENOTSUPP;
-}
-
static inline int pwm_enable(struct pwm_device *pwm)
{
return -EINVAL;
diff --git a/include/linux/qed/qed_if.h b/include/linux/qed/qed_if.h
index a47321a0d572..91c536a01b56 100644
--- a/include/linux/qed/qed_if.h
+++ b/include/linux/qed/qed_if.h
@@ -47,6 +47,7 @@
#include <linux/slab.h>
#include <linux/qed/common_hsi.h>
#include <linux/qed/qed_chain.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
enum dcbx_protocol_type {
DCBX_PROTOCOL_ISCSI,
@@ -448,11 +449,24 @@ struct qed_mfw_tlv_iscsi {
bool tx_bytes_set;
};
+enum qed_db_rec_width {
+ DB_REC_WIDTH_32B,
+ DB_REC_WIDTH_64B,
+};
+
+enum qed_db_rec_space {
+ DB_REC_KERNEL,
+ DB_REC_USER,
+};
+
#define DIRECT_REG_WR(reg_addr, val) writel((u32)val, \
(void __iomem *)(reg_addr))
#define DIRECT_REG_RD(reg_addr) readl((void __iomem *)(reg_addr))
+#define DIRECT_REG_WR64(reg_addr, val) writeq((u32)val, \
+ (void __iomem *)(reg_addr))
+
#define QED_COALESCE_MAX 0x1FF
#define QED_DEFAULT_RX_USECS 12
#define QED_DEFAULT_TX_USECS 48
@@ -1015,6 +1029,33 @@ struct qed_common_ops {
*/
int (*set_led)(struct qed_dev *cdev,
enum qed_led_mode mode);
+/**
+ * @brief db_recovery_add - add doorbell information to the doorbell
+ * recovery mechanism.
+ *
+ * @param cdev
+ * @param db_addr - doorbell address
+ * @param db_data - address of where db_data is stored
+ * @param db_is_32b - doorbell is 32b pr 64b
+ * @param db_is_user - doorbell recovery addresses are user or kernel space
+ */
+ int (*db_recovery_add)(struct qed_dev *cdev,
+ void __iomem *db_addr,
+ void *db_data,
+ enum qed_db_rec_width db_width,
+ enum qed_db_rec_space db_space);
+
+/**
+ * @brief db_recovery_del - remove doorbell information from the doorbell
+ * recovery mechanism. db_data serves as key (db_addr is not unique).
+ *
+ * @param cdev
+ * @param db_addr - doorbell address
+ * @param db_data - address where db_data is stored. Serves as key for the
+ * entry to delete.
+ */
+ int (*db_recovery_del)(struct qed_dev *cdev,
+ void __iomem *db_addr, void *db_data);
/**
* @brief update_drv_state - API to inform the change in the driver state.
diff --git a/include/linux/rcupdate_wait.h b/include/linux/rcupdate_wait.h
index 8a16c3eb3dd0..c0578ba23c1a 100644
--- a/include/linux/rcupdate_wait.h
+++ b/include/linux/rcupdate_wait.h
@@ -31,21 +31,4 @@ do { \
#define wait_rcu_gp(...) _wait_rcu_gp(false, __VA_ARGS__)
-/**
- * synchronize_rcu_mult - Wait concurrently for multiple grace periods
- * @...: List of call_rcu() functions for different grace periods to wait on
- *
- * This macro waits concurrently for multiple types of RCU grace periods.
- * For example, synchronize_rcu_mult(call_rcu, call_rcu_tasks) would wait
- * on concurrent RCU and RCU-tasks grace periods. Waiting on a give SRCU
- * domain requires you to write a wrapper function for that SRCU domain's
- * call_srcu() function, supplying the corresponding srcu_struct.
- *
- * If Tiny RCU, tell _wait_rcu_gp() does not bother waiting for RCU,
- * given that anywhere synchronize_rcu_mult() can be called is automatically
- * a grace period.
- */
-#define synchronize_rcu_mult(...) \
- _wait_rcu_gp(IS_ENABLED(CONFIG_TINY_RCU), __VA_ARGS__)
-
#endif /* _LINUX_SCHED_RCUPDATE_WAIT_H */
diff --git a/include/linux/regmap.h b/include/linux/regmap.h
index a367d59c301d..1781b6cb793c 100644
--- a/include/linux/regmap.h
+++ b/include/linux/regmap.h
@@ -1089,27 +1089,48 @@ int regmap_fields_read(struct regmap_field *field, unsigned int id,
int regmap_fields_update_bits_base(struct regmap_field *field, unsigned int id,
unsigned int mask, unsigned int val,
bool *change, bool async, bool force);
+/**
+ * struct regmap_irq_type - IRQ type definitions.
+ *
+ * @type_reg_offset: Offset register for the irq type setting.
+ * @type_rising_val: Register value to configure RISING type irq.
+ * @type_falling_val: Register value to configure FALLING type irq.
+ * @type_level_low_val: Register value to configure LEVEL_LOW type irq.
+ * @type_level_high_val: Register value to configure LEVEL_HIGH type irq.
+ * @types_supported: logical OR of IRQ_TYPE_* flags indicating supported types.
+ */
+struct regmap_irq_type {
+ unsigned int type_reg_offset;
+ unsigned int type_reg_mask;
+ unsigned int type_rising_val;
+ unsigned int type_falling_val;
+ unsigned int type_level_low_val;
+ unsigned int type_level_high_val;
+ unsigned int types_supported;
+};
/**
* struct regmap_irq - Description of an IRQ for the generic regmap irq_chip.
*
* @reg_offset: Offset of the status/mask register within the bank
* @mask: Mask used to flag/control the register.
- * @type_reg_offset: Offset register for the irq type setting.
- * @type_rising_mask: Mask bit to configure RISING type irq.
- * @type_falling_mask: Mask bit to configure FALLING type irq.
+ * @type: IRQ trigger type setting details if supported.
*/
struct regmap_irq {
unsigned int reg_offset;
unsigned int mask;
- unsigned int type_reg_offset;
- unsigned int type_rising_mask;
- unsigned int type_falling_mask;
+ struct regmap_irq_type type;
};
#define REGMAP_IRQ_REG(_irq, _off, _mask) \
[_irq] = { .reg_offset = (_off), .mask = (_mask) }
+#define REGMAP_IRQ_REG_LINE(_id, _reg_bits) \
+ [_id] = { \
+ .mask = BIT((_id) % (_reg_bits)), \
+ .reg_offset = (_id) / (_reg_bits), \
+ }
+
/**
* struct regmap_irq_chip - Description of a generic regmap irq_chip.
*
@@ -1131,6 +1152,12 @@ struct regmap_irq {
* @ack_invert: Inverted ack register: cleared bits for ack.
* @wake_invert: Inverted wake register: cleared bits are wake enabled.
* @type_invert: Invert the type flags.
+ * @type_in_mask: Use the mask registers for controlling irq type. For
+ * interrupts defining type_rising/falling_mask use mask_base
+ * for edge configuration and never update bits in type_base.
+ * @clear_on_unmask: For chips with interrupts cleared on read: read the status
+ * registers before unmasking interrupts to clear any bits
+ * set when they were masked.
* @runtime_pm: Hold a runtime PM lock on the device when accessing it.
*
* @num_regs: Number of registers in each control bank.
@@ -1169,6 +1196,8 @@ struct regmap_irq_chip {
bool wake_invert:1;
bool runtime_pm:1;
bool type_invert:1;
+ bool type_in_mask:1;
+ bool clear_on_unmask:1;
int num_regs;
diff --git a/include/linux/regulator/consumer.h b/include/linux/regulator/consumer.h
index 25602afd4844..f3f76051e8b0 100644
--- a/include/linux/regulator/consumer.h
+++ b/include/linux/regulator/consumer.h
@@ -508,7 +508,7 @@ static inline int regulator_get_error_flags(struct regulator *regulator,
static inline int regulator_set_load(struct regulator *regulator, int load_uA)
{
- return REGULATOR_MODE_NORMAL;
+ return 0;
}
static inline int regulator_allow_bypass(struct regulator *regulator,
diff --git a/include/linux/regulator/driver.h b/include/linux/regulator/driver.h
index a9c030192147..389bcaf7900f 100644
--- a/include/linux/regulator/driver.h
+++ b/include/linux/regulator/driver.h
@@ -15,11 +15,12 @@
#ifndef __LINUX_REGULATOR_DRIVER_H_
#define __LINUX_REGULATOR_DRIVER_H_
-#define MAX_COUPLED 4
+#define MAX_COUPLED 2
#include <linux/device.h>
#include <linux/notifier.h>
#include <linux/regulator/consumer.h>
+#include <linux/ww_mutex.h>
struct gpio_desc;
struct regmap;
@@ -462,7 +463,7 @@ struct regulator_dev {
struct coupling_desc coupling_desc;
struct blocking_notifier_head notifier;
- struct mutex mutex; /* consumer lock */
+ struct ww_mutex mutex; /* consumer lock */
struct task_struct *mutex_owner;
int ref_cnt;
struct module *owner;
@@ -473,7 +474,6 @@ struct regulator_dev {
struct regmap *regmap;
struct delayed_work disable_work;
- int deferred_disables;
void *reg_data; /* regulator_dev data */
@@ -545,4 +545,7 @@ int regulator_set_active_discharge_regmap(struct regulator_dev *rdev,
bool enable);
void *regulator_get_init_drvdata(struct regulator_init_data *reg_init_data);
+void regulator_lock(struct regulator_dev *rdev);
+void regulator_unlock(struct regulator_dev *rdev);
+
#endif
diff --git a/include/linux/regulator/machine.h b/include/linux/regulator/machine.h
index a459a5e973a7..1d34a70ffda2 100644
--- a/include/linux/regulator/machine.h
+++ b/include/linux/regulator/machine.h
@@ -158,6 +158,9 @@ struct regulation_constraints {
/* used for coupled regulators */
int max_spread;
+ /* used for changing voltage in steps */
+ int max_uV_step;
+
/* valid regulator operating modes for this machine */
unsigned int valid_modes_mask;
diff --git a/include/linux/regulator/pfuze100.h b/include/linux/regulator/pfuze100.h
index cb5aecd40f07..331d7d940c7a 100644
--- a/include/linux/regulator/pfuze100.h
+++ b/include/linux/regulator/pfuze100.h
@@ -33,7 +33,8 @@
#define PFUZE100_VGEN4 12
#define PFUZE100_VGEN5 13
#define PFUZE100_VGEN6 14
-#define PFUZE100_MAX_REGULATOR 15
+#define PFUZE100_COIN 15
+#define PFUZE100_MAX_REGULATOR 16
#define PFUZE200_SW1AB 0
#define PFUZE200_SW2 1
diff --git a/include/linux/reservation.h b/include/linux/reservation.h
index 02166e815afb..2f0ffca35780 100644
--- a/include/linux/reservation.h
+++ b/include/linux/reservation.h
@@ -68,7 +68,6 @@ struct reservation_object_list {
* @seq: sequence count for managing RCU read-side synchronization
* @fence_excl: the exclusive fence, if there is one currently
* @fence: list of current shared fences
- * @staged: staged copy of shared fences for RCU updates
*/
struct reservation_object {
struct ww_mutex lock;
@@ -76,7 +75,6 @@ struct reservation_object {
struct dma_fence __rcu *fence_excl;
struct reservation_object_list __rcu *fence;
- struct reservation_object_list *staged;
};
#define reservation_object_held(obj) lockdep_is_held(&(obj)->lock.base)
@@ -95,7 +93,6 @@ reservation_object_init(struct reservation_object *obj)
__seqcount_init(&obj->seq, reservation_seqcount_string, &reservation_seqcount_class);
RCU_INIT_POINTER(obj->fence, NULL);
RCU_INIT_POINTER(obj->fence_excl, NULL);
- obj->staged = NULL;
}
/**
@@ -124,7 +121,6 @@ reservation_object_fini(struct reservation_object *obj)
kfree(fobj);
}
- kfree(obj->staged);
ww_mutex_destroy(&obj->lock);
}
@@ -218,6 +214,11 @@ reservation_object_trylock(struct reservation_object *obj)
static inline void
reservation_object_unlock(struct reservation_object *obj)
{
+#ifdef CONFIG_DEBUG_MUTEXES
+ /* Test shared fence slot reservation */
+ if (obj->fence)
+ obj->fence->shared_max = obj->fence->shared_count;
+#endif
ww_mutex_unlock(&obj->lock);
}
@@ -265,7 +266,8 @@ reservation_object_get_excl_rcu(struct reservation_object *obj)
return fence;
}
-int reservation_object_reserve_shared(struct reservation_object *obj);
+int reservation_object_reserve_shared(struct reservation_object *obj,
+ unsigned int num_fences);
void reservation_object_add_shared_fence(struct reservation_object *obj,
struct dma_fence *fence);
diff --git a/include/linux/rhashtable.h b/include/linux/rhashtable.h
index eb7111039247..20f9c6af7473 100644
--- a/include/linux/rhashtable.h
+++ b/include/linux/rhashtable.h
@@ -75,8 +75,19 @@ struct bucket_table {
struct rhash_head __rcu *buckets[] ____cacheline_aligned_in_smp;
};
+/*
+ * NULLS_MARKER() expects a hash value with the low
+ * bits mostly likely to be significant, and it discards
+ * the msb.
+ * We git it an address, in which the bottom 2 bits are
+ * always 0, and the msb might be significant.
+ * So we shift the address down one bit to align with
+ * expectations and avoid losing a significant bit.
+ */
+#define RHT_NULLS_MARKER(ptr) \
+ ((void *)NULLS_MARKER(((unsigned long) (ptr)) >> 1))
#define INIT_RHT_NULLS_HEAD(ptr) \
- ((ptr) = (typeof(ptr)) NULLS_MARKER(0))
+ ((ptr) = RHT_NULLS_MARKER(&(ptr)))
static inline bool rht_is_a_nulls(const struct rhash_head *ptr)
{
@@ -471,6 +482,7 @@ static inline struct rhash_head *__rhashtable_lookup(
.ht = ht,
.key = key,
};
+ struct rhash_head __rcu * const *head;
struct bucket_table *tbl;
struct rhash_head *he;
unsigned int hash;
@@ -478,13 +490,19 @@ static inline struct rhash_head *__rhashtable_lookup(
tbl = rht_dereference_rcu(ht->tbl, ht);
restart:
hash = rht_key_hashfn(ht, tbl, key, params);
- rht_for_each_rcu(he, tbl, hash) {
- if (params.obj_cmpfn ?
- params.obj_cmpfn(&arg, rht_obj(ht, he)) :
- rhashtable_compare(&arg, rht_obj(ht, he)))
- continue;
- return he;
- }
+ head = rht_bucket(tbl, hash);
+ do {
+ rht_for_each_rcu_continue(he, *head, tbl, hash) {
+ if (params.obj_cmpfn ?
+ params.obj_cmpfn(&arg, rht_obj(ht, he)) :
+ rhashtable_compare(&arg, rht_obj(ht, he)))
+ continue;
+ return he;
+ }
+ /* An object might have been moved to a different hash chain,
+ * while we walk along it - better check and retry.
+ */
+ } while (he != RHT_NULLS_MARKER(head));
/* Ensure we see any new tables. */
smp_rmb();
diff --git a/include/linux/sbitmap.h b/include/linux/sbitmap.h
index 804a50983ec5..14d558146aea 100644
--- a/include/linux/sbitmap.h
+++ b/include/linux/sbitmap.h
@@ -30,14 +30,24 @@ struct seq_file;
*/
struct sbitmap_word {
/**
- * @word: The bitmap word itself.
+ * @depth: Number of bits being used in @word/@cleared
*/
- unsigned long word;
+ unsigned long depth;
/**
- * @depth: Number of bits being used in @word.
+ * @word: word holding free bits
*/
- unsigned long depth;
+ unsigned long word ____cacheline_aligned_in_smp;
+
+ /**
+ * @cleared: word holding cleared bits
+ */
+ unsigned long cleared ____cacheline_aligned_in_smp;
+
+ /**
+ * @swap_lock: Held while swapping word <-> cleared
+ */
+ spinlock_t swap_lock;
} ____cacheline_aligned_in_smp;
/**
@@ -125,6 +135,11 @@ struct sbitmap_queue {
*/
struct sbq_wait_state *ws;
+ /*
+ * @ws_active: count of currently active ws waitqueues
+ */
+ atomic_t ws_active;
+
/**
* @round_robin: Allocate bits in strict round-robin order.
*/
@@ -250,12 +265,14 @@ static inline void __sbitmap_for_each_set(struct sbitmap *sb,
nr = SB_NR_TO_BIT(sb, start);
while (scanned < sb->depth) {
- struct sbitmap_word *word = &sb->map[index];
- unsigned int depth = min_t(unsigned int, word->depth - nr,
+ unsigned long word;
+ unsigned int depth = min_t(unsigned int,
+ sb->map[index].depth - nr,
sb->depth - scanned);
scanned += depth;
- if (!word->word)
+ word = sb->map[index].word & ~sb->map[index].cleared;
+ if (!word)
goto next;
/*
@@ -265,7 +282,7 @@ static inline void __sbitmap_for_each_set(struct sbitmap *sb,
*/
depth += nr;
while (1) {
- nr = find_next_bit(&word->word, depth, nr);
+ nr = find_next_bit(&word, depth, nr);
if (nr >= depth)
break;
if (!fn(sb, (index << sb->shift) + nr, data))
@@ -310,6 +327,19 @@ static inline void sbitmap_clear_bit(struct sbitmap *sb, unsigned int bitnr)
clear_bit(SB_NR_TO_BIT(sb, bitnr), __sbitmap_word(sb, bitnr));
}
+/*
+ * This one is special, since it doesn't actually clear the bit, rather it
+ * sets the corresponding bit in the ->cleared mask instead. Paired with
+ * the caller doing sbitmap_batch_clear() if a given index is full, which
+ * will clear the previously freed entries in the corresponding ->word.
+ */
+static inline void sbitmap_deferred_clear_bit(struct sbitmap *sb, unsigned int bitnr)
+{
+ unsigned long *addr = &sb->map[SB_NR_TO_INDEX(sb, bitnr)].cleared;
+
+ set_bit(SB_NR_TO_BIT(sb, bitnr), addr);
+}
+
static inline void sbitmap_clear_bit_unlock(struct sbitmap *sb,
unsigned int bitnr)
{
@@ -321,8 +351,6 @@ static inline int sbitmap_test_bit(struct sbitmap *sb, unsigned int bitnr)
return test_bit(SB_NR_TO_BIT(sb, bitnr), __sbitmap_word(sb, bitnr));
}
-unsigned int sbitmap_weight(const struct sbitmap *sb);
-
/**
* sbitmap_show() - Dump &struct sbitmap information to a &struct seq_file.
* @sb: Bitmap to show.
@@ -531,4 +559,45 @@ void sbitmap_queue_wake_up(struct sbitmap_queue *sbq);
*/
void sbitmap_queue_show(struct sbitmap_queue *sbq, struct seq_file *m);
+struct sbq_wait {
+ struct sbitmap_queue *sbq; /* if set, sbq_wait is accounted */
+ struct wait_queue_entry wait;
+};
+
+#define DEFINE_SBQ_WAIT(name) \
+ struct sbq_wait name = { \
+ .sbq = NULL, \
+ .wait = { \
+ .private = current, \
+ .func = autoremove_wake_function, \
+ .entry = LIST_HEAD_INIT((name).wait.entry), \
+ } \
+ }
+
+/*
+ * Wrapper around prepare_to_wait_exclusive(), which maintains some extra
+ * internal state.
+ */
+void sbitmap_prepare_to_wait(struct sbitmap_queue *sbq,
+ struct sbq_wait_state *ws,
+ struct sbq_wait *sbq_wait, int state);
+
+/*
+ * Must be paired with sbitmap_prepare_to_wait().
+ */
+void sbitmap_finish_wait(struct sbitmap_queue *sbq, struct sbq_wait_state *ws,
+ struct sbq_wait *sbq_wait);
+
+/*
+ * Wrapper around add_wait_queue(), which maintains some extra internal state
+ */
+void sbitmap_add_wait_queue(struct sbitmap_queue *sbq,
+ struct sbq_wait_state *ws,
+ struct sbq_wait *sbq_wait);
+
+/*
+ * Must be paired with sbitmap_add_wait_queue()
+ */
+void sbitmap_del_wait_queue(struct sbq_wait *sbq_wait);
+
#endif /* __LINUX_SCALE_BITMAP_H */
diff --git a/include/linux/scatterlist.h b/include/linux/scatterlist.h
index 093aa57120b0..b96f0d0b5b8f 100644
--- a/include/linux/scatterlist.h
+++ b/include/linux/scatterlist.h
@@ -324,10 +324,10 @@ size_t sg_zero_buffer(struct scatterlist *sgl, unsigned int nents,
* Like SG_CHUNK_SIZE, but for archs that have sg chaining. This limit
* is totally arbitrary, a setting of 2048 will get you at least 8mb ios.
*/
-#ifdef CONFIG_ARCH_HAS_SG_CHAIN
-#define SG_MAX_SEGMENTS 2048
-#else
+#ifdef CONFIG_ARCH_NO_SG_CHAIN
#define SG_MAX_SEGMENTS SG_CHUNK_SIZE
+#else
+#define SG_MAX_SEGMENTS 2048
#endif
#ifdef CONFIG_SG_POOL
diff --git a/include/linux/sched.h b/include/linux/sched.h
index a51c13c2b1a0..89541d248893 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -176,7 +176,7 @@ struct task_group;
* TASK_RUNNING store which can collide with __set_current_state(TASK_RUNNING).
*
* However, with slightly different timing the wakeup TASK_RUNNING store can
- * also collide with the TASK_UNINTERRUPTIBLE store. Loosing that store is not
+ * also collide with the TASK_UNINTERRUPTIBLE store. Losing that store is not
* a problem either because that will result in one extra go around the loop
* and our @cond test will save the day.
*
@@ -515,7 +515,7 @@ struct sched_dl_entity {
/*
* Actual scheduling parameters. Initialized with the values above,
- * they are continously updated during task execution. Note that
+ * they are continuously updated during task execution. Note that
* the remaining runtime could be < 0 in case we are in overrun.
*/
s64 runtime; /* Remaining runtime for this instance */
@@ -572,8 +572,10 @@ union rcu_special {
struct {
u8 blocked;
u8 need_qs;
+ u8 exp_hint; /* Hint for performance. */
+ u8 pad; /* No garbage from compiler! */
} b; /* Bits. */
- u16 s; /* Set of bits. */
+ u32 s; /* Set of bits. */
};
enum perf_event_task_context {
@@ -993,7 +995,7 @@ struct task_struct {
/* cg_list protected by css_set_lock and tsk->alloc_lock: */
struct list_head cg_list;
#endif
-#ifdef CONFIG_INTEL_RDT
+#ifdef CONFIG_RESCTRL
u32 closid;
u32 rmid;
#endif
@@ -1116,6 +1118,7 @@ struct task_struct {
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
/* Index of current stored address in ret_stack: */
int curr_ret_stack;
+ int curr_ret_depth;
/* Stack of return addresses for return function tracing: */
struct ftrace_ret_stack *ret_stack;
@@ -1453,6 +1456,8 @@ static inline bool is_percpu_thread(void)
#define PFA_SPREAD_SLAB 2 /* Spread some slab caches over cpuset */
#define PFA_SPEC_SSB_DISABLE 3 /* Speculative Store Bypass disabled */
#define PFA_SPEC_SSB_FORCE_DISABLE 4 /* Speculative Store Bypass force disabled*/
+#define PFA_SPEC_IB_DISABLE 5 /* Indirect branch speculation restricted */
+#define PFA_SPEC_IB_FORCE_DISABLE 6 /* Indirect branch speculation permanently restricted */
#define TASK_PFA_TEST(name, func) \
static inline bool task_##func(struct task_struct *p) \
@@ -1484,6 +1489,13 @@ TASK_PFA_CLEAR(SPEC_SSB_DISABLE, spec_ssb_disable)
TASK_PFA_TEST(SPEC_SSB_FORCE_DISABLE, spec_ssb_force_disable)
TASK_PFA_SET(SPEC_SSB_FORCE_DISABLE, spec_ssb_force_disable)
+TASK_PFA_TEST(SPEC_IB_DISABLE, spec_ib_disable)
+TASK_PFA_SET(SPEC_IB_DISABLE, spec_ib_disable)
+TASK_PFA_CLEAR(SPEC_IB_DISABLE, spec_ib_disable)
+
+TASK_PFA_TEST(SPEC_IB_FORCE_DISABLE, spec_ib_force_disable)
+TASK_PFA_SET(SPEC_IB_FORCE_DISABLE, spec_ib_force_disable)
+
static inline void
current_restore_flags(unsigned long orig_flags, unsigned long flags)
{
diff --git a/include/linux/sched/cpufreq.h b/include/linux/sched/cpufreq.h
index 59667444669f..afa940cd50dc 100644
--- a/include/linux/sched/cpufreq.h
+++ b/include/linux/sched/cpufreq.h
@@ -20,6 +20,12 @@ void cpufreq_add_update_util_hook(int cpu, struct update_util_data *data,
void (*func)(struct update_util_data *data, u64 time,
unsigned int flags));
void cpufreq_remove_update_util_hook(int cpu);
+
+static inline unsigned long map_util_freq(unsigned long util,
+ unsigned long freq, unsigned long cap)
+{
+ return (freq + (freq >> 2)) * util / cap;
+}
#endif /* CONFIG_CPU_FREQ */
#endif /* _LINUX_SCHED_CPUFREQ_H */
diff --git a/include/linux/sched/isolation.h b/include/linux/sched/isolation.h
index 4a6582c27dea..b0fb1446fe04 100644
--- a/include/linux/sched/isolation.h
+++ b/include/linux/sched/isolation.h
@@ -16,7 +16,7 @@ enum hk_flags {
};
#ifdef CONFIG_CPU_ISOLATION
-DECLARE_STATIC_KEY_FALSE(housekeeping_overriden);
+DECLARE_STATIC_KEY_FALSE(housekeeping_overridden);
extern int housekeeping_any_cpu(enum hk_flags flags);
extern const struct cpumask *housekeeping_cpumask(enum hk_flags flags);
extern void housekeeping_affine(struct task_struct *t, enum hk_flags flags);
@@ -43,7 +43,7 @@ static inline void housekeeping_init(void) { }
static inline bool housekeeping_cpu(int cpu, enum hk_flags flags)
{
#ifdef CONFIG_CPU_ISOLATION
- if (static_branch_unlikely(&housekeeping_overriden))
+ if (static_branch_unlikely(&housekeeping_overridden))
return housekeeping_test_cpu(cpu, flags);
#endif
return true;
diff --git a/include/linux/sched/mm.h b/include/linux/sched/mm.h
index aebb370a0006..3bfa6a0cbba4 100644
--- a/include/linux/sched/mm.h
+++ b/include/linux/sched/mm.h
@@ -153,7 +153,7 @@ static inline gfp_t current_gfp_context(gfp_t flags)
{
/*
* NOIO implies both NOIO and NOFS and it is a weaker context
- * so always make sure it makes precendence
+ * so always make sure it makes precedence
*/
if (unlikely(current->flags & PF_MEMALLOC_NOIO))
flags &= ~(__GFP_IO | __GFP_FS);
diff --git a/include/linux/sched/smt.h b/include/linux/sched/smt.h
new file mode 100644
index 000000000000..59d3736c454c
--- /dev/null
+++ b/include/linux/sched/smt.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_SCHED_SMT_H
+#define _LINUX_SCHED_SMT_H
+
+#include <linux/static_key.h>
+
+#ifdef CONFIG_SCHED_SMT
+extern struct static_key_false sched_smt_present;
+
+static __always_inline bool sched_smt_active(void)
+{
+ return static_branch_likely(&sched_smt_present);
+}
+#else
+static inline bool sched_smt_active(void) { return false; }
+#endif
+
+void arch_smt_update(void);
+
+#endif
diff --git a/include/linux/sched/stat.h b/include/linux/sched/stat.h
index f30954cc059d..568286411b43 100644
--- a/include/linux/sched/stat.h
+++ b/include/linux/sched/stat.h
@@ -8,7 +8,7 @@
* Various counters maintained by the scheduler and fork(),
* exposed via /proc, sys.c or used by drivers via these APIs.
*
- * ( Note that all these values are aquired without locking,
+ * ( Note that all these values are acquired without locking,
* so they can only be relied on in narrow circumstances. )
*/
diff --git a/include/linux/sched/topology.h b/include/linux/sched/topology.h
index 6b9976180c1e..c31d3a47a47c 100644
--- a/include/linux/sched/topology.h
+++ b/include/linux/sched/topology.h
@@ -89,7 +89,6 @@ struct sched_domain {
unsigned int newidle_idx;
unsigned int wake_idx;
unsigned int forkexec_idx;
- unsigned int smt_gain;
int nohz_idle; /* NOHZ IDLE status */
int flags; /* See SD_* */
@@ -202,6 +201,14 @@ extern void set_sched_topology(struct sched_domain_topology_level *tl);
# define SD_INIT_NAME(type)
#endif
+#ifndef arch_scale_cpu_capacity
+static __always_inline
+unsigned long arch_scale_cpu_capacity(struct sched_domain *sd, int cpu)
+{
+ return SCHED_CAPACITY_SCALE;
+}
+#endif
+
#else /* CONFIG_SMP */
struct sched_domain_attr;
@@ -217,6 +224,14 @@ static inline bool cpus_share_cache(int this_cpu, int that_cpu)
return true;
}
+#ifndef arch_scale_cpu_capacity
+static __always_inline
+unsigned long arch_scale_cpu_capacity(void __always_unused *sd, int cpu)
+{
+ return SCHED_CAPACITY_SCALE;
+}
+#endif
+
#endif /* !CONFIG_SMP */
static inline int task_node(const struct task_struct *p)
diff --git a/include/linux/sfp.h b/include/linux/sfp.h
index d37518e89db2..d9d9de3fcf8e 100644
--- a/include/linux/sfp.h
+++ b/include/linux/sfp.h
@@ -224,7 +224,7 @@ struct sfp_eeprom_ext {
*
* See the SFF-8472 specification and related documents for the definition
* of these structure members. This can be obtained from
- * ftp://ftp.seagate.com/sff
+ * https://www.snia.org/technology-communities/sff/specifications
*/
struct sfp_eeprom_id {
struct sfp_eeprom_base base;
diff --git a/include/linux/signal.h b/include/linux/signal.h
index f428e86f4800..cc7e2c1cd444 100644
--- a/include/linux/signal.h
+++ b/include/linux/signal.h
@@ -273,6 +273,10 @@ extern int group_send_sig_info(int sig, struct kernel_siginfo *info,
struct task_struct *p, enum pid_type type);
extern int __group_send_sig_info(int, struct kernel_siginfo *, struct task_struct *);
extern int sigprocmask(int, sigset_t *, sigset_t *);
+extern int set_user_sigmask(const sigset_t __user *usigmask, sigset_t *set,
+ sigset_t *oldset, size_t sigsetsize);
+extern void restore_user_sigmask(const void __user *usigmask,
+ sigset_t *sigsaved);
extern void set_current_blocked(sigset_t *);
extern void __set_current_blocked(const sigset_t *);
extern int show_unhandled_signals;
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 0ba687454267..93f56fddd92a 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -245,6 +245,7 @@ struct iov_iter;
struct napi_struct;
struct bpf_prog;
union bpf_attr;
+struct skb_ext;
#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
struct nf_conntrack {
@@ -254,7 +255,6 @@ struct nf_conntrack {
#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
struct nf_bridge_info {
- refcount_t use;
enum {
BRNF_PROTO_UNCHANGED,
BRNF_PROTO_8021Q,
@@ -481,10 +481,11 @@ static inline void sock_zerocopy_get(struct ubuf_info *uarg)
}
void sock_zerocopy_put(struct ubuf_info *uarg);
-void sock_zerocopy_put_abort(struct ubuf_info *uarg);
+void sock_zerocopy_put_abort(struct ubuf_info *uarg, bool have_uref);
void sock_zerocopy_callback(struct ubuf_info *uarg, bool success);
+int skb_zerocopy_iter_dgram(struct sk_buff *skb, struct msghdr *msg, int len);
int skb_zerocopy_iter_stream(struct sock *sk, struct sk_buff *skb,
struct msghdr *msg, int len,
struct ubuf_info *uarg);
@@ -615,6 +616,8 @@ typedef unsigned char *sk_buff_data_t;
* @pkt_type: Packet class
* @fclone: skbuff clone status
* @ipvs_property: skbuff is owned by ipvs
+ * @offload_fwd_mark: Packet was L2-forwarded in hardware
+ * @offload_l3_fwd_mark: Packet was L3-forwarded in hardware
* @tc_skip_classify: do not classify packet. set by IFB device
* @tc_at_ingress: used within tc_classify to distinguish in/egress
* @tc_redirected: packet was redirected by a tc action
@@ -633,6 +636,7 @@ typedef unsigned char *sk_buff_data_t;
* @queue_mapping: Queue mapping for multiqueue devices
* @xmit_more: More SKBs are pending for this queue
* @pfmemalloc: skbuff was allocated from PFMEMALLOC reserves
+ * @active_extensions: active extensions (skb_ext_id types)
* @ndisc_nodetype: router type (from link layer)
* @ooo_okay: allow the mapping of a socket to a queue to be changed
* @l4_hash: indicate hash is a canonical 4-tuple hash over transport
@@ -662,6 +666,7 @@ typedef unsigned char *sk_buff_data_t;
* @data: Data head pointer
* @truesize: Buffer size
* @users: User count - see {datagram,tcp}.c
+ * @extensions: allocated extensions, valid if active_extensions is nonzero
*/
struct sk_buff {
@@ -709,15 +714,9 @@ struct sk_buff {
struct list_head tcp_tsorted_anchor;
};
-#ifdef CONFIG_XFRM
- struct sec_path *sp;
-#endif
#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
unsigned long _nfct;
#endif
-#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
- struct nf_bridge_info *nf_bridge;
-#endif
unsigned int len,
data_len;
__u16 mac_len,
@@ -744,7 +743,9 @@ struct sk_buff {
head_frag:1,
xmit_more:1,
pfmemalloc:1;
-
+#ifdef CONFIG_SKB_EXTENSIONS
+ __u8 active_extensions;
+#endif
/* fields enclosed in headers_start/headers_end are copied
* using a single memcpy() in __copy_skb_header()
*/
@@ -777,6 +778,14 @@ struct sk_buff {
__u8 encap_hdr_csum:1;
__u8 csum_valid:1;
+#ifdef __BIG_ENDIAN_BITFIELD
+#define PKT_VLAN_PRESENT_BIT 7
+#else
+#define PKT_VLAN_PRESENT_BIT 0
+#endif
+#define PKT_VLAN_PRESENT_OFFSET() offsetof(struct sk_buff, __pkt_vlan_present_offset)
+ __u8 __pkt_vlan_present_offset[0];
+ __u8 vlan_present:1;
__u8 csum_complete_sw:1;
__u8 csum_level:2;
__u8 csum_not_inet:1;
@@ -784,13 +793,13 @@ struct sk_buff {
#ifdef CONFIG_IPV6_NDISC_NODETYPE
__u8 ndisc_nodetype:2;
#endif
- __u8 ipvs_property:1;
+ __u8 ipvs_property:1;
__u8 inner_protocol_type:1;
__u8 remcsum_offload:1;
#ifdef CONFIG_NET_SWITCHDEV
__u8 offload_fwd_mark:1;
- __u8 offload_mr_fwd_mark:1;
+ __u8 offload_l3_fwd_mark:1;
#endif
#ifdef CONFIG_NET_CLS_ACT
__u8 tc_skip_classify:1;
@@ -858,6 +867,11 @@ struct sk_buff {
*data;
unsigned int truesize;
refcount_t users;
+
+#ifdef CONFIG_SKB_EXTENSIONS
+ /* only useable after checking ->active_extensions != 0 */
+ struct skb_ext *extensions;
+#endif
};
#ifdef __KERNEL__
@@ -1317,15 +1331,35 @@ static inline struct ubuf_info *skb_zcopy(struct sk_buff *skb)
return is_zcopy ? skb_uarg(skb) : NULL;
}
-static inline void skb_zcopy_set(struct sk_buff *skb, struct ubuf_info *uarg)
+static inline void skb_zcopy_set(struct sk_buff *skb, struct ubuf_info *uarg,
+ bool *have_ref)
{
if (skb && uarg && !skb_zcopy(skb)) {
- sock_zerocopy_get(uarg);
+ if (unlikely(have_ref && *have_ref))
+ *have_ref = false;
+ else
+ sock_zerocopy_get(uarg);
skb_shinfo(skb)->destructor_arg = uarg;
skb_shinfo(skb)->tx_flags |= SKBTX_ZEROCOPY_FRAG;
}
}
+static inline void skb_zcopy_set_nouarg(struct sk_buff *skb, void *val)
+{
+ skb_shinfo(skb)->destructor_arg = (void *)((uintptr_t) val | 0x1UL);
+ skb_shinfo(skb)->tx_flags |= SKBTX_ZEROCOPY_FRAG;
+}
+
+static inline bool skb_zcopy_is_nouarg(struct sk_buff *skb)
+{
+ return (uintptr_t) skb_shinfo(skb)->destructor_arg & 0x1UL;
+}
+
+static inline void *skb_zcopy_get_nouarg(struct sk_buff *skb)
+{
+ return (void *)((uintptr_t) skb_shinfo(skb)->destructor_arg & ~0x1UL);
+}
+
/* Release a reference on a zerocopy structure */
static inline void skb_zcopy_clear(struct sk_buff *skb, bool zerocopy)
{
@@ -1335,7 +1369,7 @@ static inline void skb_zcopy_clear(struct sk_buff *skb, bool zerocopy)
if (uarg->callback == sock_zerocopy_callback) {
uarg->zerocopy = uarg->zerocopy && zerocopy;
sock_zerocopy_put(uarg);
- } else {
+ } else if (!skb_zcopy_is_nouarg(skb)) {
uarg->callback(uarg, zerocopy);
}
@@ -1349,7 +1383,7 @@ static inline void skb_zcopy_abort(struct sk_buff *skb)
struct ubuf_info *uarg = skb_zcopy(skb);
if (uarg) {
- sock_zerocopy_put_abort(uarg);
+ sock_zerocopy_put_abort(uarg, false);
skb_shinfo(skb)->tx_flags &= ~SKBTX_ZEROCOPY_FRAG;
}
}
@@ -1725,8 +1759,6 @@ static inline void skb_queue_head_init_class(struct sk_buff_head *list,
* The "__skb_xxxx()" functions are the non-atomic ones that
* can only be called with interrupts disabled.
*/
-void skb_insert(struct sk_buff *old, struct sk_buff *newsk,
- struct sk_buff_head *list);
static inline void __skb_insert(struct sk_buff *newsk,
struct sk_buff *prev, struct sk_buff *next,
struct sk_buff_head *list)
@@ -2508,10 +2540,8 @@ int ___pskb_trim(struct sk_buff *skb, unsigned int len);
static inline void __skb_set_length(struct sk_buff *skb, unsigned int len)
{
- if (unlikely(skb_is_nonlinear(skb))) {
- WARN_ON(1);
+ if (WARN_ON(skb_is_nonlinear(skb)))
return;
- }
skb->len = len;
skb_set_tail_pointer(skb, len);
}
@@ -3309,6 +3339,9 @@ static inline int skb_copy_datagram_msg(const struct sk_buff *from, int offset,
}
int skb_copy_and_csum_datagram_msg(struct sk_buff *skb, int hlen,
struct msghdr *msg);
+int skb_copy_and_hash_datagram_iter(const struct sk_buff *skb, int offset,
+ struct iov_iter *to, int len,
+ struct ahash_request *hash);
int skb_copy_datagram_from_iter(struct sk_buff *skb, int offset,
struct iov_iter *from, int len);
int zerocopy_sg_from_iter(struct sk_buff *skb, struct iov_iter *frm);
@@ -3329,7 +3362,6 @@ int skb_splice_bits(struct sk_buff *skb, struct sock *sk, unsigned int offset,
unsigned int flags);
int skb_send_sock_locked(struct sock *sk, struct sk_buff *skb, int offset,
int len);
-int skb_send_sock(struct sock *sk, struct sk_buff *skb, int offset, int len);
void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to);
unsigned int skb_zerocopy_headlen(const struct sk_buff *from);
int skb_zerocopy(struct sk_buff *to, struct sk_buff *from,
@@ -3870,18 +3902,97 @@ static inline void nf_conntrack_get(struct nf_conntrack *nfct)
atomic_inc(&nfct->use);
}
#endif
+
+#ifdef CONFIG_SKB_EXTENSIONS
+enum skb_ext_id {
#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
-static inline void nf_bridge_put(struct nf_bridge_info *nf_bridge)
+ SKB_EXT_BRIDGE_NF,
+#endif
+#ifdef CONFIG_XFRM
+ SKB_EXT_SEC_PATH,
+#endif
+ SKB_EXT_NUM, /* must be last */
+};
+
+/**
+ * struct skb_ext - sk_buff extensions
+ * @refcnt: 1 on allocation, deallocated on 0
+ * @offset: offset to add to @data to obtain extension address
+ * @chunks: size currently allocated, stored in SKB_EXT_ALIGN_SHIFT units
+ * @data: start of extension data, variable sized
+ *
+ * Note: offsets/lengths are stored in chunks of 8 bytes, this allows
+ * to use 'u8' types while allowing up to 2kb worth of extension data.
+ */
+struct skb_ext {
+ refcount_t refcnt;
+ u8 offset[SKB_EXT_NUM]; /* in chunks of 8 bytes */
+ u8 chunks; /* same */
+ char data[0] __aligned(8);
+};
+
+void *skb_ext_add(struct sk_buff *skb, enum skb_ext_id id);
+void __skb_ext_del(struct sk_buff *skb, enum skb_ext_id id);
+void __skb_ext_put(struct skb_ext *ext);
+
+static inline void skb_ext_put(struct sk_buff *skb)
{
- if (nf_bridge && refcount_dec_and_test(&nf_bridge->use))
- kfree(nf_bridge);
+ if (skb->active_extensions)
+ __skb_ext_put(skb->extensions);
}
-static inline void nf_bridge_get(struct nf_bridge_info *nf_bridge)
+
+static inline void __skb_ext_copy(struct sk_buff *dst,
+ const struct sk_buff *src)
+{
+ dst->active_extensions = src->active_extensions;
+
+ if (src->active_extensions) {
+ struct skb_ext *ext = src->extensions;
+
+ refcount_inc(&ext->refcnt);
+ dst->extensions = ext;
+ }
+}
+
+static inline void skb_ext_copy(struct sk_buff *dst, const struct sk_buff *src)
+{
+ skb_ext_put(dst);
+ __skb_ext_copy(dst, src);
+}
+
+static inline bool __skb_ext_exist(const struct skb_ext *ext, enum skb_ext_id i)
{
- if (nf_bridge)
- refcount_inc(&nf_bridge->use);
+ return !!ext->offset[i];
}
-#endif /* CONFIG_BRIDGE_NETFILTER */
+
+static inline bool skb_ext_exist(const struct sk_buff *skb, enum skb_ext_id id)
+{
+ return skb->active_extensions & (1 << id);
+}
+
+static inline void skb_ext_del(struct sk_buff *skb, enum skb_ext_id id)
+{
+ if (skb_ext_exist(skb, id))
+ __skb_ext_del(skb, id);
+}
+
+static inline void *skb_ext_find(const struct sk_buff *skb, enum skb_ext_id id)
+{
+ if (skb_ext_exist(skb, id)) {
+ struct skb_ext *ext = skb->extensions;
+
+ return (void *)ext + (ext->offset[id] << 3);
+ }
+
+ return NULL;
+}
+#else
+static inline void skb_ext_put(struct sk_buff *skb) {}
+static inline void skb_ext_del(struct sk_buff *skb, int unused) {}
+static inline void __skb_ext_copy(struct sk_buff *d, const struct sk_buff *s) {}
+static inline void skb_ext_copy(struct sk_buff *dst, const struct sk_buff *s) {}
+#endif /* CONFIG_SKB_EXTENSIONS */
+
static inline void nf_reset(struct sk_buff *skb)
{
#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
@@ -3889,8 +4000,7 @@ static inline void nf_reset(struct sk_buff *skb)
skb->_nfct = 0;
#endif
#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
- nf_bridge_put(skb->nf_bridge);
- skb->nf_bridge = NULL;
+ skb_ext_del(skb, SKB_EXT_BRIDGE_NF);
#endif
}
@@ -3908,7 +4018,7 @@ static inline void ipvs_reset(struct sk_buff *skb)
#endif
}
-/* Note: This doesn't put any conntrack and bridge info in dst. */
+/* Note: This doesn't put any conntrack info in dst. */
static inline void __nf_copy(struct sk_buff *dst, const struct sk_buff *src,
bool copy)
{
@@ -3916,10 +4026,6 @@ static inline void __nf_copy(struct sk_buff *dst, const struct sk_buff *src,
dst->_nfct = src->_nfct;
nf_conntrack_get(skb_nfct(src));
#endif
-#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
- dst->nf_bridge = src->nf_bridge;
- nf_bridge_get(src->nf_bridge);
-#endif
#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE) || defined(CONFIG_NF_TABLES)
if (copy)
dst->nf_trace = src->nf_trace;
@@ -3931,9 +4037,6 @@ static inline void nf_copy(struct sk_buff *dst, const struct sk_buff *src)
#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
nf_conntrack_put(skb_nfct(dst));
#endif
-#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
- nf_bridge_put(dst->nf_bridge);
-#endif
__nf_copy(dst, src, true);
}
@@ -3955,12 +4058,19 @@ static inline void skb_init_secmark(struct sk_buff *skb)
{ }
#endif
+static inline int secpath_exists(const struct sk_buff *skb)
+{
+#ifdef CONFIG_XFRM
+ return skb_ext_exist(skb, SKB_EXT_SEC_PATH);
+#else
+ return 0;
+#endif
+}
+
static inline bool skb_irq_freeable(const struct sk_buff *skb)
{
return !skb->destructor &&
-#if IS_ENABLED(CONFIG_XFRM)
- !skb->sp &&
-#endif
+ !secpath_exists(skb) &&
!skb_nfct(skb) &&
!skb->_skb_refdst &&
!skb_has_frag_list(skb);
@@ -4006,10 +4116,10 @@ static inline bool skb_get_dst_pending_confirm(const struct sk_buff *skb)
return skb->dst_pending_confirm != 0;
}
-static inline struct sec_path *skb_sec_path(struct sk_buff *skb)
+static inline struct sec_path *skb_sec_path(const struct sk_buff *skb)
{
#ifdef CONFIG_XFRM
- return skb->sp;
+ return skb_ext_find(skb, SKB_EXT_SEC_PATH);
#else
return NULL;
#endif
diff --git a/include/linux/skmsg.h b/include/linux/skmsg.h
index 2a11e9d91dfa..178a3933a71b 100644
--- a/include/linux/skmsg.h
+++ b/include/linux/skmsg.h
@@ -36,6 +36,7 @@ struct sk_msg_sg {
struct scatterlist data[MAX_MSG_FRAGS + 1];
};
+/* UAPI in filter.c depends on struct sk_msg_sg being first element. */
struct sk_msg {
struct sk_msg_sg sg;
void *data;
@@ -416,6 +417,14 @@ static inline void sk_psock_put(struct sock *sk, struct sk_psock *psock)
sk_psock_drop(sk, psock);
}
+static inline void sk_psock_data_ready(struct sock *sk, struct sk_psock *psock)
+{
+ if (psock->parser.enabled)
+ psock->parser.saved_data_ready(sk);
+ else
+ sk->sk_data_ready(sk);
+}
+
static inline void psock_set_prog(struct bpf_prog **pprog,
struct bpf_prog *prog)
{
diff --git a/include/linux/socket.h b/include/linux/socket.h
index 8b571e9b9f76..ab2041a00e01 100644
--- a/include/linux/socket.h
+++ b/include/linux/socket.h
@@ -286,6 +286,7 @@ struct ucred {
#define MSG_NOSIGNAL 0x4000 /* Do not generate SIGPIPE */
#define MSG_MORE 0x8000 /* Sender will send more */
#define MSG_WAITFORONE 0x10000 /* recvmmsg(): block until 1+ packets avail */
+#define MSG_SENDPAGE_NOPOLICY 0x10000 /* sendpage() internal : do no apply policy */
#define MSG_SENDPAGE_NOTLAST 0x20000 /* sendpage() internal : not the last page */
#define MSG_BATCH 0x40000 /* sendmmsg(): more messages coming */
#define MSG_EOF MSG_FIN
@@ -348,7 +349,8 @@ struct ucred {
extern int move_addr_to_kernel(void __user *uaddr, int ulen, struct sockaddr_storage *kaddr);
extern int put_cmsg(struct msghdr*, int level, int type, int len, void *data);
-struct timespec64;
+struct __kernel_timespec;
+struct old_timespec32;
/* The __sys_...msg variants allow MSG_CMSG_COMPAT iff
* forbid_cmsg_compat==false
@@ -357,8 +359,10 @@ extern long __sys_recvmsg(int fd, struct user_msghdr __user *msg,
unsigned int flags, bool forbid_cmsg_compat);
extern long __sys_sendmsg(int fd, struct user_msghdr __user *msg,
unsigned int flags, bool forbid_cmsg_compat);
-extern int __sys_recvmmsg(int fd, struct mmsghdr __user *mmsg, unsigned int vlen,
- unsigned int flags, struct timespec64 *timeout);
+extern int __sys_recvmmsg(int fd, struct mmsghdr __user *mmsg,
+ unsigned int vlen, unsigned int flags,
+ struct __kernel_timespec __user *timeout,
+ struct old_timespec32 __user *timeout32);
extern int __sys_sendmmsg(int fd, struct mmsghdr __user *mmsg,
unsigned int vlen, unsigned int flags,
bool forbid_cmsg_compat);
diff --git a/include/linux/spi/pxa2xx_spi.h b/include/linux/spi/pxa2xx_spi.h
index 9ec4c147abbc..b0674e330ef6 100644
--- a/include/linux/spi/pxa2xx_spi.h
+++ b/include/linux/spi/pxa2xx_spi.h
@@ -25,6 +25,7 @@ struct dma_chan;
struct pxa2xx_spi_master {
u16 num_chipselect;
u8 enable_dma;
+ bool is_slave;
/* DMA engine specific config */
bool (*dma_filter)(struct dma_chan *chan, void *param);
diff --git a/include/linux/spi/spi-mem.h b/include/linux/spi/spi-mem.h
index 69ee30456864..3fe24500c5ee 100644
--- a/include/linux/spi/spi-mem.h
+++ b/include/linux/spi/spi-mem.h
@@ -57,10 +57,12 @@
/**
* enum spi_mem_data_dir - describes the direction of a SPI memory data
* transfer from the controller perspective
+ * @SPI_MEM_NO_DATA: no data transferred
* @SPI_MEM_DATA_IN: data coming from the SPI memory
- * @SPI_MEM_DATA_OUT: data sent the SPI memory
+ * @SPI_MEM_DATA_OUT: data sent to the SPI memory
*/
enum spi_mem_data_dir {
+ SPI_MEM_NO_DATA,
SPI_MEM_DATA_IN,
SPI_MEM_DATA_OUT,
};
@@ -123,6 +125,49 @@ struct spi_mem_op {
}
/**
+ * struct spi_mem_dirmap_info - Direct mapping information
+ * @op_tmpl: operation template that should be used by the direct mapping when
+ * the memory device is accessed
+ * @offset: absolute offset this direct mapping is pointing to
+ * @length: length in byte of this direct mapping
+ *
+ * These information are used by the controller specific implementation to know
+ * the portion of memory that is directly mapped and the spi_mem_op that should
+ * be used to access the device.
+ * A direct mapping is only valid for one direction (read or write) and this
+ * direction is directly encoded in the ->op_tmpl.data.dir field.
+ */
+struct spi_mem_dirmap_info {
+ struct spi_mem_op op_tmpl;
+ u64 offset;
+ u64 length;
+};
+
+/**
+ * struct spi_mem_dirmap_desc - Direct mapping descriptor
+ * @mem: the SPI memory device this direct mapping is attached to
+ * @info: information passed at direct mapping creation time
+ * @nodirmap: set to 1 if the SPI controller does not implement
+ * ->mem_ops->dirmap_create() or when this function returned an
+ * error. If @nodirmap is true, all spi_mem_dirmap_{read,write}()
+ * calls will use spi_mem_exec_op() to access the memory. This is a
+ * degraded mode that allows spi_mem drivers to use the same code
+ * no matter whether the controller supports direct mapping or not
+ * @priv: field pointing to controller specific data
+ *
+ * Common part of a direct mapping descriptor. This object is created by
+ * spi_mem_dirmap_create() and controller implementation of ->create_dirmap()
+ * can create/attach direct mapping resources to the descriptor in the ->priv
+ * field.
+ */
+struct spi_mem_dirmap_desc {
+ struct spi_mem *mem;
+ struct spi_mem_dirmap_info info;
+ unsigned int nodirmap;
+ void *priv;
+};
+
+/**
* struct spi_mem - describes a SPI memory device
* @spi: the underlying SPI device
* @drvpriv: spi_mem_driver private data
@@ -177,10 +222,32 @@ static inline void *spi_mem_get_drvdata(struct spi_mem *mem)
* Note that if the implementation of this function allocates memory
* dynamically, then it should do so with devm_xxx(), as we don't
* have a ->free_name() function.
+ * @dirmap_create: create a direct mapping descriptor that can later be used to
+ * access the memory device. This method is optional
+ * @dirmap_destroy: destroy a memory descriptor previous created by
+ * ->dirmap_create()
+ * @dirmap_read: read data from the memory device using the direct mapping
+ * created by ->dirmap_create(). The function can return less
+ * data than requested (for example when the request is crossing
+ * the currently mapped area), and the caller of
+ * spi_mem_dirmap_read() is responsible for calling it again in
+ * this case.
+ * @dirmap_write: write data to the memory device using the direct mapping
+ * created by ->dirmap_create(). The function can return less
+ * data than requested (for example when the request is crossing
+ * the currently mapped area), and the caller of
+ * spi_mem_dirmap_write() is responsible for calling it again in
+ * this case.
*
* This interface should be implemented by SPI controllers providing an
* high-level interface to execute SPI memory operation, which is usually the
* case for QSPI controllers.
+ *
+ * Note on ->dirmap_{read,write}(): drivers should avoid accessing the direct
+ * mapping from the CPU because doing that can stall the CPU waiting for the
+ * SPI mem transaction to finish, and this will make real-time maintainers
+ * unhappy and might make your system less reactive. Instead, drivers should
+ * use DMA to access this direct mapping.
*/
struct spi_controller_mem_ops {
int (*adjust_op_size)(struct spi_mem *mem, struct spi_mem_op *op);
@@ -189,6 +256,12 @@ struct spi_controller_mem_ops {
int (*exec_op)(struct spi_mem *mem,
const struct spi_mem_op *op);
const char *(*get_name)(struct spi_mem *mem);
+ int (*dirmap_create)(struct spi_mem_dirmap_desc *desc);
+ void (*dirmap_destroy)(struct spi_mem_dirmap_desc *desc);
+ ssize_t (*dirmap_read)(struct spi_mem_dirmap_desc *desc,
+ u64 offs, size_t len, void *buf);
+ ssize_t (*dirmap_write)(struct spi_mem_dirmap_desc *desc,
+ u64 offs, size_t len, const void *buf);
};
/**
@@ -249,6 +322,15 @@ int spi_mem_exec_op(struct spi_mem *mem,
const char *spi_mem_get_name(struct spi_mem *mem);
+struct spi_mem_dirmap_desc *
+spi_mem_dirmap_create(struct spi_mem *mem,
+ const struct spi_mem_dirmap_info *info);
+void spi_mem_dirmap_destroy(struct spi_mem_dirmap_desc *desc);
+ssize_t spi_mem_dirmap_read(struct spi_mem_dirmap_desc *desc,
+ u64 offs, size_t len, void *buf);
+ssize_t spi_mem_dirmap_write(struct spi_mem_dirmap_desc *desc,
+ u64 offs, size_t len, const void *buf);
+
int spi_mem_driver_register_with_owner(struct spi_mem_driver *drv,
struct module *owner);
diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h
index 6be77fa5ab90..314d922ca607 100644
--- a/include/linux/spi/spi.h
+++ b/include/linux/spi/spi.h
@@ -154,7 +154,10 @@ struct spi_device {
#define SPI_TX_QUAD 0x200 /* transmit with 4 wires */
#define SPI_RX_DUAL 0x400 /* receive with 2 wires */
#define SPI_RX_QUAD 0x800 /* receive with 4 wires */
-#define SPI_CS_WORD 0x1000 /* toggle cs after each word */
+#define SPI_CS_WORD 0x1000 /* toggle cs after each word */
+#define SPI_TX_OCTAL 0x2000 /* transmit with 8 wires */
+#define SPI_RX_OCTAL 0x4000 /* receive with 8 wires */
+#define SPI_3WIRE_HIZ 0x8000 /* high impedance turnaround */
int irq;
void *controller_state;
void *controller_data;
diff --git a/include/linux/srcu.h b/include/linux/srcu.h
index 67135d4a8a30..c614375cd264 100644
--- a/include/linux/srcu.h
+++ b/include/linux/srcu.h
@@ -38,20 +38,20 @@ struct srcu_struct;
#ifdef CONFIG_DEBUG_LOCK_ALLOC
-int __init_srcu_struct(struct srcu_struct *sp, const char *name,
+int __init_srcu_struct(struct srcu_struct *ssp, const char *name,
struct lock_class_key *key);
-#define init_srcu_struct(sp) \
+#define init_srcu_struct(ssp) \
({ \
static struct lock_class_key __srcu_key; \
\
- __init_srcu_struct((sp), #sp, &__srcu_key); \
+ __init_srcu_struct((ssp), #ssp, &__srcu_key); \
})
#define __SRCU_DEP_MAP_INIT(srcu_name) .dep_map = { .name = #srcu_name },
#else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
-int init_srcu_struct(struct srcu_struct *sp);
+int init_srcu_struct(struct srcu_struct *ssp);
#define __SRCU_DEP_MAP_INIT(srcu_name)
#endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */
@@ -67,28 +67,28 @@ int init_srcu_struct(struct srcu_struct *sp);
struct srcu_struct { };
#endif
-void call_srcu(struct srcu_struct *sp, struct rcu_head *head,
+void call_srcu(struct srcu_struct *ssp, struct rcu_head *head,
void (*func)(struct rcu_head *head));
-void _cleanup_srcu_struct(struct srcu_struct *sp, bool quiesced);
-int __srcu_read_lock(struct srcu_struct *sp) __acquires(sp);
-void __srcu_read_unlock(struct srcu_struct *sp, int idx) __releases(sp);
-void synchronize_srcu(struct srcu_struct *sp);
+void _cleanup_srcu_struct(struct srcu_struct *ssp, bool quiesced);
+int __srcu_read_lock(struct srcu_struct *ssp) __acquires(ssp);
+void __srcu_read_unlock(struct srcu_struct *ssp, int idx) __releases(ssp);
+void synchronize_srcu(struct srcu_struct *ssp);
/**
* cleanup_srcu_struct - deconstruct a sleep-RCU structure
- * @sp: structure to clean up.
+ * @ssp: structure to clean up.
*
* Must invoke this after you are finished using a given srcu_struct that
* was initialized via init_srcu_struct(), else you leak memory.
*/
-static inline void cleanup_srcu_struct(struct srcu_struct *sp)
+static inline void cleanup_srcu_struct(struct srcu_struct *ssp)
{
- _cleanup_srcu_struct(sp, false);
+ _cleanup_srcu_struct(ssp, false);
}
/**
* cleanup_srcu_struct_quiesced - deconstruct a quiesced sleep-RCU structure
- * @sp: structure to clean up.
+ * @ssp: structure to clean up.
*
* Must invoke this after you are finished using a given srcu_struct that
* was initialized via init_srcu_struct(), else you leak memory. Also,
@@ -103,16 +103,16 @@ static inline void cleanup_srcu_struct(struct srcu_struct *sp)
* (with high probability, anyway), and will also cause the srcu_struct
* to be leaked.
*/
-static inline void cleanup_srcu_struct_quiesced(struct srcu_struct *sp)
+static inline void cleanup_srcu_struct_quiesced(struct srcu_struct *ssp)
{
- _cleanup_srcu_struct(sp, true);
+ _cleanup_srcu_struct(ssp, true);
}
#ifdef CONFIG_DEBUG_LOCK_ALLOC
/**
* srcu_read_lock_held - might we be in SRCU read-side critical section?
- * @sp: The srcu_struct structure to check
+ * @ssp: The srcu_struct structure to check
*
* If CONFIG_DEBUG_LOCK_ALLOC is selected, returns nonzero iff in an SRCU
* read-side critical section. In absence of CONFIG_DEBUG_LOCK_ALLOC,
@@ -126,16 +126,16 @@ static inline void cleanup_srcu_struct_quiesced(struct srcu_struct *sp)
* relies on normal RCU, it can be called from the CPU which
* is in the idle loop from an RCU point of view or offline.
*/
-static inline int srcu_read_lock_held(const struct srcu_struct *sp)
+static inline int srcu_read_lock_held(const struct srcu_struct *ssp)
{
if (!debug_lockdep_rcu_enabled())
return 1;
- return lock_is_held(&sp->dep_map);
+ return lock_is_held(&ssp->dep_map);
}
#else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
-static inline int srcu_read_lock_held(const struct srcu_struct *sp)
+static inline int srcu_read_lock_held(const struct srcu_struct *ssp)
{
return 1;
}
@@ -145,7 +145,7 @@ static inline int srcu_read_lock_held(const struct srcu_struct *sp)
/**
* srcu_dereference_check - fetch SRCU-protected pointer for later dereferencing
* @p: the pointer to fetch and protect for later dereferencing
- * @sp: pointer to the srcu_struct, which is used to check that we
+ * @ssp: pointer to the srcu_struct, which is used to check that we
* really are in an SRCU read-side critical section.
* @c: condition to check for update-side use
*
@@ -154,29 +154,32 @@ static inline int srcu_read_lock_held(const struct srcu_struct *sp)
* to 1. The @c argument will normally be a logical expression containing
* lockdep_is_held() calls.
*/
-#define srcu_dereference_check(p, sp, c) \
- __rcu_dereference_check((p), (c) || srcu_read_lock_held(sp), __rcu)
+#define srcu_dereference_check(p, ssp, c) \
+ __rcu_dereference_check((p), (c) || srcu_read_lock_held(ssp), __rcu)
/**
* srcu_dereference - fetch SRCU-protected pointer for later dereferencing
* @p: the pointer to fetch and protect for later dereferencing
- * @sp: pointer to the srcu_struct, which is used to check that we
+ * @ssp: pointer to the srcu_struct, which is used to check that we
* really are in an SRCU read-side critical section.
*
* Makes rcu_dereference_check() do the dirty work. If PROVE_RCU
* is enabled, invoking this outside of an RCU read-side critical
* section will result in an RCU-lockdep splat.
*/
-#define srcu_dereference(p, sp) srcu_dereference_check((p), (sp), 0)
+#define srcu_dereference(p, ssp) srcu_dereference_check((p), (ssp), 0)
/**
* srcu_dereference_notrace - no tracing and no lockdep calls from here
+ * @p: the pointer to fetch and protect for later dereferencing
+ * @ssp: pointer to the srcu_struct, which is used to check that we
+ * really are in an SRCU read-side critical section.
*/
-#define srcu_dereference_notrace(p, sp) srcu_dereference_check((p), (sp), 1)
+#define srcu_dereference_notrace(p, ssp) srcu_dereference_check((p), (ssp), 1)
/**
* srcu_read_lock - register a new reader for an SRCU-protected structure.
- * @sp: srcu_struct in which to register the new reader.
+ * @ssp: srcu_struct in which to register the new reader.
*
* Enter an SRCU read-side critical section. Note that SRCU read-side
* critical sections may be nested. However, it is illegal to
@@ -191,44 +194,44 @@ static inline int srcu_read_lock_held(const struct srcu_struct *sp)
* srcu_read_unlock() in an irq handler if the matching srcu_read_lock()
* was invoked in process context.
*/
-static inline int srcu_read_lock(struct srcu_struct *sp) __acquires(sp)
+static inline int srcu_read_lock(struct srcu_struct *ssp) __acquires(ssp)
{
int retval;
- retval = __srcu_read_lock(sp);
- rcu_lock_acquire(&(sp)->dep_map);
+ retval = __srcu_read_lock(ssp);
+ rcu_lock_acquire(&(ssp)->dep_map);
return retval;
}
/* Used by tracing, cannot be traced and cannot invoke lockdep. */
static inline notrace int
-srcu_read_lock_notrace(struct srcu_struct *sp) __acquires(sp)
+srcu_read_lock_notrace(struct srcu_struct *ssp) __acquires(ssp)
{
int retval;
- retval = __srcu_read_lock(sp);
+ retval = __srcu_read_lock(ssp);
return retval;
}
/**
* srcu_read_unlock - unregister a old reader from an SRCU-protected structure.
- * @sp: srcu_struct in which to unregister the old reader.
+ * @ssp: srcu_struct in which to unregister the old reader.
* @idx: return value from corresponding srcu_read_lock().
*
* Exit an SRCU read-side critical section.
*/
-static inline void srcu_read_unlock(struct srcu_struct *sp, int idx)
- __releases(sp)
+static inline void srcu_read_unlock(struct srcu_struct *ssp, int idx)
+ __releases(ssp)
{
- rcu_lock_release(&(sp)->dep_map);
- __srcu_read_unlock(sp, idx);
+ rcu_lock_release(&(ssp)->dep_map);
+ __srcu_read_unlock(ssp, idx);
}
/* Used by tracing, cannot be traced and cannot call lockdep. */
static inline notrace void
-srcu_read_unlock_notrace(struct srcu_struct *sp, int idx) __releases(sp)
+srcu_read_unlock_notrace(struct srcu_struct *ssp, int idx) __releases(ssp)
{
- __srcu_read_unlock(sp, idx);
+ __srcu_read_unlock(ssp, idx);
}
/**
diff --git a/include/linux/srcutiny.h b/include/linux/srcutiny.h
index f41d2fb09f87..b19216aaaef2 100644
--- a/include/linux/srcutiny.h
+++ b/include/linux/srcutiny.h
@@ -60,7 +60,7 @@ void srcu_drive_gp(struct work_struct *wp);
#define DEFINE_STATIC_SRCU(name) \
static struct srcu_struct name = __SRCU_STRUCT_INIT(name, name)
-void synchronize_srcu(struct srcu_struct *sp);
+void synchronize_srcu(struct srcu_struct *ssp);
/*
* Counts the new reader in the appropriate per-CPU element of the
@@ -68,36 +68,36 @@ void synchronize_srcu(struct srcu_struct *sp);
* __srcu_read_unlock() must be in the same handler instance. Returns an
* index that must be passed to the matching srcu_read_unlock().
*/
-static inline int __srcu_read_lock(struct srcu_struct *sp)
+static inline int __srcu_read_lock(struct srcu_struct *ssp)
{
int idx;
- idx = READ_ONCE(sp->srcu_idx);
- WRITE_ONCE(sp->srcu_lock_nesting[idx], sp->srcu_lock_nesting[idx] + 1);
+ idx = READ_ONCE(ssp->srcu_idx);
+ WRITE_ONCE(ssp->srcu_lock_nesting[idx], ssp->srcu_lock_nesting[idx] + 1);
return idx;
}
-static inline void synchronize_srcu_expedited(struct srcu_struct *sp)
+static inline void synchronize_srcu_expedited(struct srcu_struct *ssp)
{
- synchronize_srcu(sp);
+ synchronize_srcu(ssp);
}
-static inline void srcu_barrier(struct srcu_struct *sp)
+static inline void srcu_barrier(struct srcu_struct *ssp)
{
- synchronize_srcu(sp);
+ synchronize_srcu(ssp);
}
/* Defined here to avoid size increase for non-torture kernels. */
-static inline void srcu_torture_stats_print(struct srcu_struct *sp,
+static inline void srcu_torture_stats_print(struct srcu_struct *ssp,
char *tt, char *tf)
{
int idx;
- idx = READ_ONCE(sp->srcu_idx) & 0x1;
+ idx = READ_ONCE(ssp->srcu_idx) & 0x1;
pr_alert("%s%s Tiny SRCU per-CPU(idx=%d): (%hd,%hd)\n",
tt, tf, idx,
- READ_ONCE(sp->srcu_lock_nesting[!idx]),
- READ_ONCE(sp->srcu_lock_nesting[idx]));
+ READ_ONCE(ssp->srcu_lock_nesting[!idx]),
+ READ_ONCE(ssp->srcu_lock_nesting[idx]));
}
#endif
diff --git a/include/linux/srcutree.h b/include/linux/srcutree.h
index 0ae91b3a7406..6f292bd3e7db 100644
--- a/include/linux/srcutree.h
+++ b/include/linux/srcutree.h
@@ -51,7 +51,7 @@ struct srcu_data {
unsigned long grpmask; /* Mask for leaf srcu_node */
/* ->srcu_data_have_cbs[]. */
int cpu;
- struct srcu_struct *sp;
+ struct srcu_struct *ssp;
};
/*
@@ -138,8 +138,8 @@ struct srcu_struct {
#define DEFINE_SRCU(name) __DEFINE_SRCU(name, /* not static */)
#define DEFINE_STATIC_SRCU(name) __DEFINE_SRCU(name, static)
-void synchronize_srcu_expedited(struct srcu_struct *sp);
-void srcu_barrier(struct srcu_struct *sp);
-void srcu_torture_stats_print(struct srcu_struct *sp, char *tt, char *tf);
+void synchronize_srcu_expedited(struct srcu_struct *ssp);
+void srcu_barrier(struct srcu_struct *ssp);
+void srcu_torture_stats_print(struct srcu_struct *ssp, char *tt, char *tf);
#endif
diff --git a/include/linux/sunrpc/xdr.h b/include/linux/sunrpc/xdr.h
index 43106ffa6788..2ec128060239 100644
--- a/include/linux/sunrpc/xdr.h
+++ b/include/linux/sunrpc/xdr.h
@@ -72,7 +72,6 @@ xdr_buf_init(struct xdr_buf *buf, void *start, size_t len)
buf->head[0].iov_base = start;
buf->head[0].iov_len = len;
buf->tail[0].iov_len = 0;
- buf->bvec = NULL;
buf->pages = NULL;
buf->page_len = 0;
buf->flags = 0;
diff --git a/include/linux/swap.h b/include/linux/swap.h
index d8a07a4f171d..a8f6d5d89524 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -18,6 +18,8 @@ struct notifier_block;
struct bio;
+struct pagevec;
+
#define SWAP_FLAG_PREFER 0x8000 /* set if swap priority specified */
#define SWAP_FLAG_PRIO_MASK 0x7fff
#define SWAP_FLAG_PRIO_SHIFT 0
@@ -369,7 +371,7 @@ static inline int node_reclaim(struct pglist_data *pgdat, gfp_t mask,
#endif
extern int page_evictable(struct page *page);
-extern void check_move_unevictable_pages(struct page **, int nr_pages);
+extern void check_move_unevictable_pages(struct pagevec *pvec);
extern int kswapd_run(int nid);
extern void kswapd_stop(int nid);
diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h
index a387b59640a4..7c007ed7505f 100644
--- a/include/linux/swiotlb.h
+++ b/include/linux/swiotlb.h
@@ -16,8 +16,6 @@ enum swiotlb_force {
SWIOTLB_NO_FORCE, /* swiotlb=noforce */
};
-extern enum swiotlb_force swiotlb_force;
-
/*
* Maximum allowable number of contiguous slabs to map,
* must be a power of 2. What is the appropriate value ?
@@ -46,9 +44,6 @@ enum dma_sync_target {
SYNC_FOR_DEVICE = 1,
};
-/* define the last possible byte of physical address space as a mapping error */
-#define SWIOTLB_MAP_ERROR (~(phys_addr_t)0x0)
-
extern phys_addr_t swiotlb_tbl_map_single(struct device *hwdev,
dma_addr_t tbl_dma_addr,
phys_addr_t phys, size_t size,
@@ -65,56 +60,44 @@ extern void swiotlb_tbl_sync_single(struct device *hwdev,
size_t size, enum dma_data_direction dir,
enum dma_sync_target target);
-/* Accessory functions. */
-
-extern dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
- unsigned long offset, size_t size,
- enum dma_data_direction dir,
- unsigned long attrs);
-extern void swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr,
- size_t size, enum dma_data_direction dir,
- unsigned long attrs);
-
-extern int
-swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems,
- enum dma_data_direction dir,
- unsigned long attrs);
-
-extern void
-swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
- int nelems, enum dma_data_direction dir,
- unsigned long attrs);
-
-extern void
-swiotlb_sync_single_for_cpu(struct device *hwdev, dma_addr_t dev_addr,
- size_t size, enum dma_data_direction dir);
-
-extern void
-swiotlb_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
- int nelems, enum dma_data_direction dir);
-
-extern void
-swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr,
- size_t size, enum dma_data_direction dir);
-
-extern void
-swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
- int nelems, enum dma_data_direction dir);
-
extern int
swiotlb_dma_supported(struct device *hwdev, u64 mask);
#ifdef CONFIG_SWIOTLB
-extern void __init swiotlb_exit(void);
+extern enum swiotlb_force swiotlb_force;
+extern phys_addr_t io_tlb_start, io_tlb_end;
+
+static inline bool is_swiotlb_buffer(phys_addr_t paddr)
+{
+ return paddr >= io_tlb_start && paddr < io_tlb_end;
+}
+
+bool swiotlb_map(struct device *dev, phys_addr_t *phys, dma_addr_t *dma_addr,
+ size_t size, enum dma_data_direction dir, unsigned long attrs);
+void __init swiotlb_exit(void);
unsigned int swiotlb_max_segment(void);
#else
-static inline void swiotlb_exit(void) { }
-static inline unsigned int swiotlb_max_segment(void) { return 0; }
-#endif
+#define swiotlb_force SWIOTLB_NO_FORCE
+static inline bool is_swiotlb_buffer(phys_addr_t paddr)
+{
+ return false;
+}
+static inline bool swiotlb_map(struct device *dev, phys_addr_t *phys,
+ dma_addr_t *dma_addr, size_t size, enum dma_data_direction dir,
+ unsigned long attrs)
+{
+ return false;
+}
+static inline void swiotlb_exit(void)
+{
+}
+static inline unsigned int swiotlb_max_segment(void)
+{
+ return 0;
+}
+#endif /* CONFIG_SWIOTLB */
extern void swiotlb_print_info(void);
extern void swiotlb_set_max_segment(unsigned int);
-extern const struct dma_map_ops swiotlb_dma_ops;
-
#endif /* __LINUX_SWIOTLB_H */
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
index 2ac3d13a915b..251979d2e709 100644
--- a/include/linux/syscalls.h
+++ b/include/linux/syscalls.h
@@ -296,12 +296,18 @@ asmlinkage long sys_io_getevents(aio_context_t ctx_id,
long min_nr,
long nr,
struct io_event __user *events,
- struct timespec __user *timeout);
+ struct __kernel_timespec __user *timeout);
asmlinkage long sys_io_pgetevents(aio_context_t ctx_id,
long min_nr,
long nr,
struct io_event __user *events,
- struct timespec __user *timeout,
+ struct __kernel_timespec __user *timeout,
+ const struct __aio_sigset *sig);
+asmlinkage long sys_io_pgetevents_time32(aio_context_t ctx_id,
+ long min_nr,
+ long nr,
+ struct io_event __user *events,
+ struct old_timespec32 __user *timeout,
const struct __aio_sigset *sig);
/* fs/xattr.c */
@@ -466,10 +472,16 @@ asmlinkage long sys_sendfile64(int out_fd, int in_fd,
/* fs/select.c */
asmlinkage long sys_pselect6(int, fd_set __user *, fd_set __user *,
- fd_set __user *, struct timespec __user *,
+ fd_set __user *, struct __kernel_timespec __user *,
+ void __user *);
+asmlinkage long sys_pselect6_time32(int, fd_set __user *, fd_set __user *,
+ fd_set __user *, struct old_timespec32 __user *,
void __user *);
asmlinkage long sys_ppoll(struct pollfd __user *, unsigned int,
- struct timespec __user *, const sigset_t __user *,
+ struct __kernel_timespec __user *, const sigset_t __user *,
+ size_t);
+asmlinkage long sys_ppoll_time32(struct pollfd __user *, unsigned int,
+ struct old_timespec32 __user *, const sigset_t __user *,
size_t);
/* fs/signalfd.c */
@@ -541,7 +553,7 @@ asmlinkage long sys_unshare(unsigned long unshare_flags);
/* kernel/futex.c */
asmlinkage long sys_futex(u32 __user *uaddr, int op, u32 val,
- struct timespec __user *utime, u32 __user *uaddr2,
+ struct __kernel_timespec __user *utime, u32 __user *uaddr2,
u32 val3);
asmlinkage long sys_get_robust_list(int pid,
struct robust_list_head __user * __user *head_ptr,
@@ -637,6 +649,10 @@ asmlinkage long sys_rt_sigtimedwait(const sigset_t __user *uthese,
siginfo_t __user *uinfo,
const struct __kernel_timespec __user *uts,
size_t sigsetsize);
+asmlinkage long sys_rt_sigtimedwait_time32(const sigset_t __user *uthese,
+ siginfo_t __user *uinfo,
+ const struct old_timespec32 __user *uts,
+ size_t sigsetsize);
asmlinkage long sys_rt_sigqueueinfo(pid_t pid, int sig, siginfo_t __user *uinfo);
/* kernel/sys.c */
@@ -831,6 +847,9 @@ asmlinkage long sys_accept4(int, struct sockaddr __user *, int __user *, int);
asmlinkage long sys_recvmmsg(int fd, struct mmsghdr __user *msg,
unsigned int vlen, unsigned flags,
struct __kernel_timespec __user *timeout);
+asmlinkage long sys_recvmmsg_time32(int fd, struct mmsghdr __user *msg,
+ unsigned int vlen, unsigned flags,
+ struct old_timespec32 __user *timeout);
asmlinkage long sys_wait4(pid_t pid, int __user *stat_addr,
int options, struct rusage __user *ru);
diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h
index 987cefa337de..786816cf4aa5 100644
--- a/include/linux/sysfs.h
+++ b/include/linux/sysfs.h
@@ -234,7 +234,7 @@ int __must_check sysfs_create_file_ns(struct kobject *kobj,
const struct attribute *attr,
const void *ns);
int __must_check sysfs_create_files(struct kobject *kobj,
- const struct attribute **attr);
+ const struct attribute * const *attr);
int __must_check sysfs_chmod_file(struct kobject *kobj,
const struct attribute *attr, umode_t mode);
struct kernfs_node *sysfs_break_active_protection(struct kobject *kobj,
@@ -243,7 +243,7 @@ void sysfs_unbreak_active_protection(struct kernfs_node *kn);
void sysfs_remove_file_ns(struct kobject *kobj, const struct attribute *attr,
const void *ns);
bool sysfs_remove_file_self(struct kobject *kobj, const struct attribute *attr);
-void sysfs_remove_files(struct kobject *kobj, const struct attribute **attr);
+void sysfs_remove_files(struct kobject *kobj, const struct attribute * const *attr);
int __must_check sysfs_create_bin_file(struct kobject *kobj,
const struct bin_attribute *attr);
@@ -342,7 +342,7 @@ static inline int sysfs_create_file_ns(struct kobject *kobj,
}
static inline int sysfs_create_files(struct kobject *kobj,
- const struct attribute **attr)
+ const struct attribute * const *attr)
{
return 0;
}
@@ -377,7 +377,7 @@ static inline bool sysfs_remove_file_self(struct kobject *kobj,
}
static inline void sysfs_remove_files(struct kobject *kobj,
- const struct attribute **attr)
+ const struct attribute * const *attr)
{
}
diff --git a/include/linux/t10-pi.h b/include/linux/t10-pi.h
index b9626aa7e90c..3e2a80cc7b56 100644
--- a/include/linux/t10-pi.h
+++ b/include/linux/t10-pi.h
@@ -39,12 +39,13 @@ struct t10_pi_tuple {
static inline u32 t10_pi_ref_tag(struct request *rq)
{
+ unsigned int shift = ilog2(queue_logical_block_size(rq->q));
+
#ifdef CONFIG_BLK_DEV_INTEGRITY
- return blk_rq_pos(rq) >>
- (rq->q->integrity.interval_exp - 9) & 0xffffffff;
-#else
- return -1U;
+ if (rq->q->integrity.interval_exp)
+ shift = rq->q->integrity.interval_exp;
#endif
+ return blk_rq_pos(rq) >> (shift - SECTOR_SHIFT) & 0xffffffff;
}
extern const struct blk_integrity_profile t10_pi_type1_crc;
diff --git a/include/linux/tcp.h b/include/linux/tcp.h
index 8ed77bb4ed86..a9b0280687d5 100644
--- a/include/linux/tcp.h
+++ b/include/linux/tcp.h
@@ -196,6 +196,7 @@ struct tcp_sock {
u32 rcv_tstamp; /* timestamp of last received ACK (for keepalives) */
u32 lsndtime; /* timestamp of last sent data packet (for restart window) */
u32 last_oow_ack_time; /* timestamp of last out-of-window ACK */
+ u32 compressed_ack_rcv_nxt;
u32 tsoffset; /* timestamp offset */
diff --git a/include/linux/thinkpad_acpi.h b/include/linux/thinkpad_acpi.h
deleted file mode 100644
index 9fb317970c01..000000000000
--- a/include/linux/thinkpad_acpi.h
+++ /dev/null
@@ -1,16 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __THINKPAD_ACPI_H__
-#define __THINKPAD_ACPI_H__
-
-/* These two functions return 0 if success, or negative error code
- (e g -ENODEV if no led present) */
-
-enum {
- TPACPI_LED_MUTE,
- TPACPI_LED_MICMUTE,
- TPACPI_LED_MAX,
-};
-
-int tpacpi_led_set(int whichled, bool on);
-
-#endif
diff --git a/include/linux/time32.h b/include/linux/time32.h
index 61904a6c098f..118b9977080c 100644
--- a/include/linux/time32.h
+++ b/include/linux/time32.h
@@ -96,31 +96,6 @@ static inline int timespec_compare(const struct timespec *lhs, const struct time
return lhs->tv_nsec - rhs->tv_nsec;
}
-extern void set_normalized_timespec(struct timespec *ts, time_t sec, s64 nsec);
-
-static inline struct timespec timespec_add(struct timespec lhs,
- struct timespec rhs)
-{
- struct timespec ts_delta;
-
- set_normalized_timespec(&ts_delta, lhs.tv_sec + rhs.tv_sec,
- lhs.tv_nsec + rhs.tv_nsec);
- return ts_delta;
-}
-
-/*
- * sub = lhs - rhs, in normalized form
- */
-static inline struct timespec timespec_sub(struct timespec lhs,
- struct timespec rhs)
-{
- struct timespec ts_delta;
-
- set_normalized_timespec(&ts_delta, lhs.tv_sec - rhs.tv_sec,
- lhs.tv_nsec - rhs.tv_nsec);
- return ts_delta;
-}
-
/*
* Returns true if the timespec is norm, false if denorm:
*/
diff --git a/include/linux/timekeeping.h b/include/linux/timekeeping.h
index 29975e93fcb8..a8ab0f143ac4 100644
--- a/include/linux/timekeeping.h
+++ b/include/linux/timekeeping.h
@@ -262,18 +262,4 @@ void read_persistent_wall_and_boot_offset(struct timespec64 *wall_clock,
struct timespec64 *boot_offset);
extern int update_persistent_clock64(struct timespec64 now);
-/*
- * deprecated aliases, don't use in new code
- */
-#define getnstimeofday64(ts) ktime_get_real_ts64(ts)
-
-static inline struct timespec64 current_kernel_time64(void)
-{
- struct timespec64 ts;
-
- ktime_get_coarse_real_ts64(&ts);
-
- return ts;
-}
-
#endif
diff --git a/include/linux/timekeeping32.h b/include/linux/timekeeping32.h
index a502616f7e1c..cc59cc9e0e84 100644
--- a/include/linux/timekeeping32.h
+++ b/include/linux/timekeeping32.h
@@ -6,15 +6,6 @@
* over time so we can remove the file here.
*/
-static inline void do_gettimeofday(struct timeval *tv)
-{
- struct timespec64 now;
-
- ktime_get_real_ts64(&now);
- tv->tv_sec = now.tv_sec;
- tv->tv_usec = now.tv_nsec/1000;
-}
-
static inline unsigned long get_seconds(void)
{
return ktime_get_real_seconds();
@@ -52,10 +43,4 @@ static inline void getboottime(struct timespec *ts)
*ts = timespec64_to_timespec(ts64);
}
-/*
- * Persistent clock related interfaces
- */
-extern void read_persistent_clock(struct timespec *ts);
-extern int update_persistent_clock(struct timespec now);
-
#endif
diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h
index 4130a5497d40..8a62731673f7 100644
--- a/include/linux/trace_events.h
+++ b/include/linux/trace_events.h
@@ -471,7 +471,8 @@ void perf_event_detach_bpf_prog(struct perf_event *event);
int perf_event_query_prog_array(struct perf_event *event, void __user *info);
int bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *prog);
int bpf_probe_unregister(struct bpf_raw_event_map *btp, struct bpf_prog *prog);
-struct bpf_raw_event_map *bpf_find_raw_tracepoint(const char *name);
+struct bpf_raw_event_map *bpf_get_raw_tracepoint(const char *name);
+void bpf_put_raw_tracepoint(struct bpf_raw_event_map *btp);
int bpf_get_perf_event_info(const struct perf_event *event, u32 *prog_id,
u32 *fd_type, const char **buf,
u64 *probe_offset, u64 *probe_addr);
@@ -502,10 +503,13 @@ static inline int bpf_probe_unregister(struct bpf_raw_event_map *btp, struct bpf
{
return -EOPNOTSUPP;
}
-static inline struct bpf_raw_event_map *bpf_find_raw_tracepoint(const char *name)
+static inline struct bpf_raw_event_map *bpf_get_raw_tracepoint(const char *name)
{
return NULL;
}
+static inline void bpf_put_raw_tracepoint(struct bpf_raw_event_map *btp)
+{
+}
static inline int bpf_get_perf_event_info(const struct perf_event *event,
u32 *prog_id, u32 *fd_type,
const char **buf, u64 *probe_offset,
diff --git a/include/linux/tracehook.h b/include/linux/tracehook.h
index 40b0b4c1bf7b..df20f8bdbfa3 100644
--- a/include/linux/tracehook.h
+++ b/include/linux/tracehook.h
@@ -83,8 +83,8 @@ static inline int ptrace_report_syscall(struct pt_regs *regs)
* tracehook_report_syscall_entry - task is about to attempt a system call
* @regs: user register state of current task
*
- * This will be called if %TIF_SYSCALL_TRACE has been set, when the
- * current task has just entered the kernel for a system call.
+ * This will be called if %TIF_SYSCALL_TRACE or %TIF_SYSCALL_EMU have been set,
+ * when the current task has just entered the kernel for a system call.
* Full user register state is available here. Changing the values
* in @regs can affect the system call number and arguments to be tried.
* It is safe to block here, preventing the system call from beginning.
diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h
index 538ba1a58f5b..9c3186578ce0 100644
--- a/include/linux/tracepoint.h
+++ b/include/linux/tracepoint.h
@@ -82,7 +82,7 @@ int unregister_tracepoint_module_notifier(struct notifier_block *nb)
static inline void tracepoint_synchronize_unregister(void)
{
synchronize_srcu(&tracepoint_srcu);
- synchronize_sched();
+ synchronize_rcu();
}
#else
static inline void tracepoint_synchronize_unregister(void)
@@ -166,7 +166,7 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p)
struct tracepoint_func *it_func_ptr; \
void *it_func; \
void *__data; \
- int __maybe_unused idx = 0; \
+ int __maybe_unused __idx = 0; \
\
if (!(cond)) \
return; \
@@ -182,7 +182,7 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p)
* doesn't work from the idle path. \
*/ \
if (rcuidle) { \
- idx = srcu_read_lock_notrace(&tracepoint_srcu); \
+ __idx = srcu_read_lock_notrace(&tracepoint_srcu);\
rcu_irq_enter_irqson(); \
} \
\
@@ -198,7 +198,7 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p)
\
if (rcuidle) { \
rcu_irq_exit_irqson(); \
- srcu_read_unlock_notrace(&tracepoint_srcu, idx);\
+ srcu_read_unlock_notrace(&tracepoint_srcu, __idx);\
} \
\
preempt_enable_notrace(); \
diff --git a/include/linux/tty.h b/include/linux/tty.h
index 414db2bce715..392138fe59b6 100644
--- a/include/linux/tty.h
+++ b/include/linux/tty.h
@@ -556,6 +556,7 @@ extern struct tty_struct *tty_init_dev(struct tty_driver *driver, int idx);
extern void tty_release_struct(struct tty_struct *tty, int idx);
extern int tty_release(struct inode *inode, struct file *filp);
extern void tty_init_termios(struct tty_struct *tty);
+extern void tty_save_termios(struct tty_struct *tty);
extern int tty_standard_install(struct tty_driver *driver,
struct tty_struct *tty);
diff --git a/include/linux/types.h b/include/linux/types.h
index 9834e90aa010..c2615d6a019e 100644
--- a/include/linux/types.h
+++ b/include/linux/types.h
@@ -212,8 +212,8 @@ struct ustat {
* weird ABI and we need to ask it explicitly.
*
* The alignment is required to guarantee that bit 0 of @next will be
- * clear under normal conditions -- as long as we use call_rcu(),
- * call_rcu_bh(), call_rcu_sched(), or call_srcu() to queue callback.
+ * clear under normal conditions -- as long as we use call_rcu() or
+ * call_srcu() to queue the callback.
*
* This guarantee is important for few reasons:
* - future call_rcu_lazy() will make use of lower bits in the pointer;
diff --git a/include/linux/udp.h b/include/linux/udp.h
index 320d49d85484..2725c83395bf 100644
--- a/include/linux/udp.h
+++ b/include/linux/udp.h
@@ -49,7 +49,13 @@ struct udp_sock {
unsigned int corkflag; /* Cork is required */
__u8 encap_type; /* Is this an Encapsulation socket? */
unsigned char no_check6_tx:1,/* Send zero UDP6 checksums on TX? */
- no_check6_rx:1;/* Allow zero UDP6 checksums on RX? */
+ no_check6_rx:1,/* Allow zero UDP6 checksums on RX? */
+ encap_enabled:1, /* This socket enabled encap
+ * processing; UDP tunnels and
+ * different encapsulation layer set
+ * this
+ */
+ gro_enabled:1; /* Can accept GRO packets */
/*
* Following member retains the information to create a UDP header
* when the socket is uncorked.
@@ -71,6 +77,7 @@ struct udp_sock {
* For encapsulation sockets.
*/
int (*encap_rcv)(struct sock *sk, struct sk_buff *skb);
+ int (*encap_err_lookup)(struct sock *sk, struct sk_buff *skb);
void (*encap_destroy)(struct sock *sk);
/* GRO functions for UDP socket */
@@ -115,6 +122,23 @@ static inline bool udp_get_no_check6_rx(struct sock *sk)
return udp_sk(sk)->no_check6_rx;
}
+static inline void udp_cmsg_recv(struct msghdr *msg, struct sock *sk,
+ struct sk_buff *skb)
+{
+ int gso_size;
+
+ if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
+ gso_size = skb_shinfo(skb)->gso_size;
+ put_cmsg(msg, SOL_UDP, UDP_GRO, sizeof(gso_size), &gso_size);
+ }
+}
+
+static inline bool udp_unexpected_gso(struct sock *sk, struct sk_buff *skb)
+{
+ return !udp_sk(sk)->gro_enabled && skb_is_gso(skb) &&
+ skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4;
+}
+
#define udp_portaddr_for_each_entry(__sk, list) \
hlist_for_each_entry(__sk, list, __sk_common.skc_portaddr_node)
diff --git a/include/linux/uio.h b/include/linux/uio.h
index 55ce99ddb912..ecf584f6b82d 100644
--- a/include/linux/uio.h
+++ b/include/linux/uio.h
@@ -11,6 +11,7 @@
#include <linux/kernel.h>
#include <linux/thread_info.h>
+#include <crypto/hash.h>
#include <uapi/linux/uio.h>
struct page;
@@ -266,9 +267,11 @@ static inline void iov_iter_reexpand(struct iov_iter *i, size_t count)
{
i->count = count;
}
-size_t csum_and_copy_to_iter(const void *addr, size_t bytes, __wsum *csum, struct iov_iter *i);
+size_t csum_and_copy_to_iter(const void *addr, size_t bytes, void *csump, struct iov_iter *i);
size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum, struct iov_iter *i);
bool csum_and_copy_from_iter_full(void *addr, size_t bytes, __wsum *csum, struct iov_iter *i);
+size_t hash_and_copy_to_iter(const void *addr, size_t bytes, void *hashp,
+ struct iov_iter *i);
int import_iovec(int type, const struct iovec __user * uvector,
unsigned nr_segs, unsigned fast_segs,
diff --git a/include/linux/usb.h b/include/linux/usb.h
index 4cdd515a4385..5e49e82c4368 100644
--- a/include/linux/usb.h
+++ b/include/linux/usb.h
@@ -407,11 +407,11 @@ struct usb_host_bos {
};
int __usb_get_extra_descriptor(char *buffer, unsigned size,
- unsigned char type, void **ptr);
+ unsigned char type, void **ptr, size_t min);
#define usb_get_extra_descriptor(ifpoint, type, ptr) \
__usb_get_extra_descriptor((ifpoint)->extra, \
(ifpoint)->extralen, \
- type, (void **)ptr)
+ type, (void **)ptr, sizeof(**(ptr)))
/* ----------------------------------------------------------------------- */
diff --git a/include/linux/usb/quirks.h b/include/linux/usb/quirks.h
index b7a99ce56bc9..a1be64c9940f 100644
--- a/include/linux/usb/quirks.h
+++ b/include/linux/usb/quirks.h
@@ -66,4 +66,7 @@
/* Device needs a pause after every control message. */
#define USB_QUIRK_DELAY_CTRL_MSG BIT(13)
+/* Hub needs extra delay after resetting its port. */
+#define USB_QUIRK_HUB_SLOW_RESET BIT(14)
+
#endif /* __LINUX_USB_QUIRKS_H */
diff --git a/include/linux/writeback.h b/include/linux/writeback.h
index fdfd04e348f6..738a0c24874f 100644
--- a/include/linux/writeback.h
+++ b/include/linux/writeback.h
@@ -246,7 +246,8 @@ static inline void wbc_attach_fdatawrite_inode(struct writeback_control *wbc,
*
* @bio is a part of the writeback in progress controlled by @wbc. Perform
* writeback specific initialization. This is used to apply the cgroup
- * writeback context.
+ * writeback context. Must be called after the bio has been associated with
+ * a device.
*/
static inline void wbc_init_bio(struct writeback_control *wbc, struct bio *bio)
{
@@ -257,7 +258,7 @@ static inline void wbc_init_bio(struct writeback_control *wbc, struct bio *bio)
* regular writeback instead of writing things out itself.
*/
if (wbc->wb)
- bio_associate_blkcg(bio, wbc->wb->blkcg_css);
+ bio_associate_blkg_from_css(bio, wbc->wb->blkcg_css);
}
#else /* CONFIG_CGROUP_WRITEBACK */
diff --git a/include/linux/xarray.h b/include/linux/xarray.h
index d9514928ddac..f492e21c4aa2 100644
--- a/include/linux/xarray.h
+++ b/include/linux/xarray.h
@@ -289,9 +289,7 @@ struct xarray {
void xa_init_flags(struct xarray *, gfp_t flags);
void *xa_load(struct xarray *, unsigned long index);
void *xa_store(struct xarray *, unsigned long index, void *entry, gfp_t);
-void *xa_cmpxchg(struct xarray *, unsigned long index,
- void *old, void *entry, gfp_t);
-int xa_reserve(struct xarray *, unsigned long index, gfp_t);
+void *xa_erase(struct xarray *, unsigned long index);
void *xa_store_range(struct xarray *, unsigned long first, unsigned long last,
void *entry, gfp_t);
bool xa_get_mark(struct xarray *, unsigned long index, xa_mark_t);
@@ -344,65 +342,6 @@ static inline bool xa_marked(const struct xarray *xa, xa_mark_t mark)
}
/**
- * xa_erase() - Erase this entry from the XArray.
- * @xa: XArray.
- * @index: Index of entry.
- *
- * This function is the equivalent of calling xa_store() with %NULL as
- * the third argument. The XArray does not need to allocate memory, so
- * the user does not need to provide GFP flags.
- *
- * Context: Process context. Takes and releases the xa_lock.
- * Return: The entry which used to be at this index.
- */
-static inline void *xa_erase(struct xarray *xa, unsigned long index)
-{
- return xa_store(xa, index, NULL, 0);
-}
-
-/**
- * xa_insert() - Store this entry in the XArray unless another entry is
- * already present.
- * @xa: XArray.
- * @index: Index into array.
- * @entry: New entry.
- * @gfp: Memory allocation flags.
- *
- * If you would rather see the existing entry in the array, use xa_cmpxchg().
- * This function is for users who don't care what the entry is, only that
- * one is present.
- *
- * Context: Process context. Takes and releases the xa_lock.
- * May sleep if the @gfp flags permit.
- * Return: 0 if the store succeeded. -EEXIST if another entry was present.
- * -ENOMEM if memory could not be allocated.
- */
-static inline int xa_insert(struct xarray *xa, unsigned long index,
- void *entry, gfp_t gfp)
-{
- void *curr = xa_cmpxchg(xa, index, NULL, entry, gfp);
- if (!curr)
- return 0;
- if (xa_is_err(curr))
- return xa_err(curr);
- return -EEXIST;
-}
-
-/**
- * xa_release() - Release a reserved entry.
- * @xa: XArray.
- * @index: Index of entry.
- *
- * After calling xa_reserve(), you can call this function to release the
- * reservation. If the entry at @index has been stored to, this function
- * will do nothing.
- */
-static inline void xa_release(struct xarray *xa, unsigned long index)
-{
- xa_cmpxchg(xa, index, NULL, NULL, 0);
-}
-
-/**
* xa_for_each() - Iterate over a portion of an XArray.
* @xa: XArray.
* @entry: Entry retrieved from array.
@@ -455,6 +394,7 @@ void *__xa_store(struct xarray *, unsigned long index, void *entry, gfp_t);
void *__xa_cmpxchg(struct xarray *, unsigned long index, void *old,
void *entry, gfp_t);
int __xa_alloc(struct xarray *, u32 *id, u32 max, void *entry, gfp_t);
+int __xa_reserve(struct xarray *, unsigned long index, gfp_t);
void __xa_set_mark(struct xarray *, unsigned long index, xa_mark_t);
void __xa_clear_mark(struct xarray *, unsigned long index, xa_mark_t);
@@ -487,6 +427,58 @@ static inline int __xa_insert(struct xarray *xa, unsigned long index,
}
/**
+ * xa_store_bh() - Store this entry in the XArray.
+ * @xa: XArray.
+ * @index: Index into array.
+ * @entry: New entry.
+ * @gfp: Memory allocation flags.
+ *
+ * This function is like calling xa_store() except it disables softirqs
+ * while holding the array lock.
+ *
+ * Context: Any context. Takes and releases the xa_lock while
+ * disabling softirqs.
+ * Return: The entry which used to be at this index.
+ */
+static inline void *xa_store_bh(struct xarray *xa, unsigned long index,
+ void *entry, gfp_t gfp)
+{
+ void *curr;
+
+ xa_lock_bh(xa);
+ curr = __xa_store(xa, index, entry, gfp);
+ xa_unlock_bh(xa);
+
+ return curr;
+}
+
+/**
+ * xa_store_irq() - Erase this entry from the XArray.
+ * @xa: XArray.
+ * @index: Index into array.
+ * @entry: New entry.
+ * @gfp: Memory allocation flags.
+ *
+ * This function is like calling xa_store() except it disables interrupts
+ * while holding the array lock.
+ *
+ * Context: Process context. Takes and releases the xa_lock while
+ * disabling interrupts.
+ * Return: The entry which used to be at this index.
+ */
+static inline void *xa_store_irq(struct xarray *xa, unsigned long index,
+ void *entry, gfp_t gfp)
+{
+ void *curr;
+
+ xa_lock_irq(xa);
+ curr = __xa_store(xa, index, entry, gfp);
+ xa_unlock_irq(xa);
+
+ return curr;
+}
+
+/**
* xa_erase_bh() - Erase this entry from the XArray.
* @xa: XArray.
* @index: Index of entry.
@@ -495,7 +487,7 @@ static inline int __xa_insert(struct xarray *xa, unsigned long index,
* the third argument. The XArray does not need to allocate memory, so
* the user does not need to provide GFP flags.
*
- * Context: Process context. Takes and releases the xa_lock while
+ * Context: Any context. Takes and releases the xa_lock while
* disabling softirqs.
* Return: The entry which used to be at this index.
*/
@@ -535,6 +527,115 @@ static inline void *xa_erase_irq(struct xarray *xa, unsigned long index)
}
/**
+ * xa_cmpxchg() - Conditionally replace an entry in the XArray.
+ * @xa: XArray.
+ * @index: Index into array.
+ * @old: Old value to test against.
+ * @entry: New value to place in array.
+ * @gfp: Memory allocation flags.
+ *
+ * If the entry at @index is the same as @old, replace it with @entry.
+ * If the return value is equal to @old, then the exchange was successful.
+ *
+ * Context: Any context. Takes and releases the xa_lock. May sleep
+ * if the @gfp flags permit.
+ * Return: The old value at this index or xa_err() if an error happened.
+ */
+static inline void *xa_cmpxchg(struct xarray *xa, unsigned long index,
+ void *old, void *entry, gfp_t gfp)
+{
+ void *curr;
+
+ xa_lock(xa);
+ curr = __xa_cmpxchg(xa, index, old, entry, gfp);
+ xa_unlock(xa);
+
+ return curr;
+}
+
+/**
+ * xa_cmpxchg_bh() - Conditionally replace an entry in the XArray.
+ * @xa: XArray.
+ * @index: Index into array.
+ * @old: Old value to test against.
+ * @entry: New value to place in array.
+ * @gfp: Memory allocation flags.
+ *
+ * This function is like calling xa_cmpxchg() except it disables softirqs
+ * while holding the array lock.
+ *
+ * Context: Any context. Takes and releases the xa_lock while
+ * disabling softirqs. May sleep if the @gfp flags permit.
+ * Return: The old value at this index or xa_err() if an error happened.
+ */
+static inline void *xa_cmpxchg_bh(struct xarray *xa, unsigned long index,
+ void *old, void *entry, gfp_t gfp)
+{
+ void *curr;
+
+ xa_lock_bh(xa);
+ curr = __xa_cmpxchg(xa, index, old, entry, gfp);
+ xa_unlock_bh(xa);
+
+ return curr;
+}
+
+/**
+ * xa_cmpxchg_irq() - Conditionally replace an entry in the XArray.
+ * @xa: XArray.
+ * @index: Index into array.
+ * @old: Old value to test against.
+ * @entry: New value to place in array.
+ * @gfp: Memory allocation flags.
+ *
+ * This function is like calling xa_cmpxchg() except it disables interrupts
+ * while holding the array lock.
+ *
+ * Context: Process context. Takes and releases the xa_lock while
+ * disabling interrupts. May sleep if the @gfp flags permit.
+ * Return: The old value at this index or xa_err() if an error happened.
+ */
+static inline void *xa_cmpxchg_irq(struct xarray *xa, unsigned long index,
+ void *old, void *entry, gfp_t gfp)
+{
+ void *curr;
+
+ xa_lock_irq(xa);
+ curr = __xa_cmpxchg(xa, index, old, entry, gfp);
+ xa_unlock_irq(xa);
+
+ return curr;
+}
+
+/**
+ * xa_insert() - Store this entry in the XArray unless another entry is
+ * already present.
+ * @xa: XArray.
+ * @index: Index into array.
+ * @entry: New entry.
+ * @gfp: Memory allocation flags.
+ *
+ * If you would rather see the existing entry in the array, use xa_cmpxchg().
+ * This function is for users who don't care what the entry is, only that
+ * one is present.
+ *
+ * Context: Process context. Takes and releases the xa_lock.
+ * May sleep if the @gfp flags permit.
+ * Return: 0 if the store succeeded. -EEXIST if another entry was present.
+ * -ENOMEM if memory could not be allocated.
+ */
+static inline int xa_insert(struct xarray *xa, unsigned long index,
+ void *entry, gfp_t gfp)
+{
+ void *curr = xa_cmpxchg(xa, index, NULL, entry, gfp);
+ if (!curr)
+ return 0;
+ if (xa_is_err(curr))
+ return xa_err(curr);
+ return -EEXIST;
+}
+
+/**
* xa_alloc() - Find somewhere to store this entry in the XArray.
* @xa: XArray.
* @id: Pointer to ID.
@@ -575,7 +676,7 @@ static inline int xa_alloc(struct xarray *xa, u32 *id, u32 max, void *entry,
* Updates the @id pointer with the index, then stores the entry at that
* index. A concurrent lookup will not see an uninitialised @id.
*
- * Context: Process context. Takes and releases the xa_lock while
+ * Context: Any context. Takes and releases the xa_lock while
* disabling softirqs. May sleep if the @gfp flags permit.
* Return: 0 on success, -ENOMEM if memory allocation fails or -ENOSPC if
* there is no more space in the XArray.
@@ -621,6 +722,98 @@ static inline int xa_alloc_irq(struct xarray *xa, u32 *id, u32 max, void *entry,
return err;
}
+/**
+ * xa_reserve() - Reserve this index in the XArray.
+ * @xa: XArray.
+ * @index: Index into array.
+ * @gfp: Memory allocation flags.
+ *
+ * Ensures there is somewhere to store an entry at @index in the array.
+ * If there is already something stored at @index, this function does
+ * nothing. If there was nothing there, the entry is marked as reserved.
+ * Loading from a reserved entry returns a %NULL pointer.
+ *
+ * If you do not use the entry that you have reserved, call xa_release()
+ * or xa_erase() to free any unnecessary memory.
+ *
+ * Context: Any context. Takes and releases the xa_lock.
+ * May sleep if the @gfp flags permit.
+ * Return: 0 if the reservation succeeded or -ENOMEM if it failed.
+ */
+static inline
+int xa_reserve(struct xarray *xa, unsigned long index, gfp_t gfp)
+{
+ int ret;
+
+ xa_lock(xa);
+ ret = __xa_reserve(xa, index, gfp);
+ xa_unlock(xa);
+
+ return ret;
+}
+
+/**
+ * xa_reserve_bh() - Reserve this index in the XArray.
+ * @xa: XArray.
+ * @index: Index into array.
+ * @gfp: Memory allocation flags.
+ *
+ * A softirq-disabling version of xa_reserve().
+ *
+ * Context: Any context. Takes and releases the xa_lock while
+ * disabling softirqs.
+ * Return: 0 if the reservation succeeded or -ENOMEM if it failed.
+ */
+static inline
+int xa_reserve_bh(struct xarray *xa, unsigned long index, gfp_t gfp)
+{
+ int ret;
+
+ xa_lock_bh(xa);
+ ret = __xa_reserve(xa, index, gfp);
+ xa_unlock_bh(xa);
+
+ return ret;
+}
+
+/**
+ * xa_reserve_irq() - Reserve this index in the XArray.
+ * @xa: XArray.
+ * @index: Index into array.
+ * @gfp: Memory allocation flags.
+ *
+ * An interrupt-disabling version of xa_reserve().
+ *
+ * Context: Process context. Takes and releases the xa_lock while
+ * disabling interrupts.
+ * Return: 0 if the reservation succeeded or -ENOMEM if it failed.
+ */
+static inline
+int xa_reserve_irq(struct xarray *xa, unsigned long index, gfp_t gfp)
+{
+ int ret;
+
+ xa_lock_irq(xa);
+ ret = __xa_reserve(xa, index, gfp);
+ xa_unlock_irq(xa);
+
+ return ret;
+}
+
+/**
+ * xa_release() - Release a reserved entry.
+ * @xa: XArray.
+ * @index: Index of entry.
+ *
+ * After calling xa_reserve(), you can call this function to release the
+ * reservation. If the entry at @index has been stored to, this function
+ * will do nothing.
+ */
+static inline void xa_release(struct xarray *xa, unsigned long index)
+{
+ xa_cmpxchg(xa, index, NULL, NULL, 0);
+}
+
/* Everything below here is the Advanced API. Proceed with caution. */
/*
diff --git a/include/media/cec.h b/include/media/cec.h
index 3fe5e5d2bb7e..707411ef8ba2 100644
--- a/include/media/cec.h
+++ b/include/media/cec.h
@@ -155,6 +155,7 @@ struct cec_adapter {
unsigned int transmit_queue_sz;
struct list_head wait_queue;
struct cec_data *transmitting;
+ bool transmit_in_progress;
struct task_struct *kthread_config;
struct completion config_completion;
diff --git a/include/media/davinci/vpbe.h b/include/media/davinci/vpbe.h
index 79a566d7defd..5c31a7682492 100644
--- a/include/media/davinci/vpbe.h
+++ b/include/media/davinci/vpbe.h
@@ -100,10 +100,6 @@ struct vpbe_config {
struct vpbe_device;
struct vpbe_device_ops {
- /* crop cap for the display */
- int (*g_cropcap)(struct vpbe_device *vpbe_dev,
- struct v4l2_cropcap *cropcap);
-
/* Enumerate the outputs */
int (*enum_outputs)(struct vpbe_device *vpbe_dev,
struct v4l2_output *output);
diff --git a/include/media/media-request.h b/include/media/media-request.h
index 0ce75c35131f..bd36d7431698 100644
--- a/include/media/media-request.h
+++ b/include/media/media-request.h
@@ -68,7 +68,7 @@ struct media_request {
unsigned int access_count;
struct list_head objects;
unsigned int num_incomplete_objects;
- struct wait_queue_head poll_wait;
+ wait_queue_head_t poll_wait;
spinlock_t lock;
};
diff --git a/include/media/mpeg2-ctrls.h b/include/media/mpeg2-ctrls.h
new file mode 100644
index 000000000000..d21f40edc09e
--- /dev/null
+++ b/include/media/mpeg2-ctrls.h
@@ -0,0 +1,86 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * These are the MPEG2 state controls for use with stateless MPEG-2
+ * codec drivers.
+ *
+ * It turns out that these structs are not stable yet and will undergo
+ * more changes. So keep them private until they are stable and ready to
+ * become part of the official public API.
+ */
+
+#ifndef _MPEG2_CTRLS_H_
+#define _MPEG2_CTRLS_H_
+
+#define V4L2_CID_MPEG_VIDEO_MPEG2_SLICE_PARAMS (V4L2_CID_MPEG_BASE+250)
+#define V4L2_CID_MPEG_VIDEO_MPEG2_QUANTIZATION (V4L2_CID_MPEG_BASE+251)
+
+/* enum v4l2_ctrl_type type values */
+#define V4L2_CTRL_TYPE_MPEG2_SLICE_PARAMS 0x0103
+#define V4L2_CTRL_TYPE_MPEG2_QUANTIZATION 0x0104
+
+#define V4L2_MPEG2_PICTURE_CODING_TYPE_I 1
+#define V4L2_MPEG2_PICTURE_CODING_TYPE_P 2
+#define V4L2_MPEG2_PICTURE_CODING_TYPE_B 3
+#define V4L2_MPEG2_PICTURE_CODING_TYPE_D 4
+
+struct v4l2_mpeg2_sequence {
+ /* ISO/IEC 13818-2, ITU-T Rec. H.262: Sequence header */
+ __u16 horizontal_size;
+ __u16 vertical_size;
+ __u32 vbv_buffer_size;
+
+ /* ISO/IEC 13818-2, ITU-T Rec. H.262: Sequence extension */
+ __u8 profile_and_level_indication;
+ __u8 progressive_sequence;
+ __u8 chroma_format;
+ __u8 pad;
+};
+
+struct v4l2_mpeg2_picture {
+ /* ISO/IEC 13818-2, ITU-T Rec. H.262: Picture header */
+ __u8 picture_coding_type;
+
+ /* ISO/IEC 13818-2, ITU-T Rec. H.262: Picture coding extension */
+ __u8 f_code[2][2];
+ __u8 intra_dc_precision;
+ __u8 picture_structure;
+ __u8 top_field_first;
+ __u8 frame_pred_frame_dct;
+ __u8 concealment_motion_vectors;
+ __u8 q_scale_type;
+ __u8 intra_vlc_format;
+ __u8 alternate_scan;
+ __u8 repeat_first_field;
+ __u8 progressive_frame;
+ __u8 pad;
+};
+
+struct v4l2_ctrl_mpeg2_slice_params {
+ __u32 bit_size;
+ __u32 data_bit_offset;
+
+ struct v4l2_mpeg2_sequence sequence;
+ struct v4l2_mpeg2_picture picture;
+
+ /* ISO/IEC 13818-2, ITU-T Rec. H.262: Slice */
+ __u8 quantiser_scale_code;
+
+ __u8 backward_ref_index;
+ __u8 forward_ref_index;
+ __u8 pad;
+};
+
+struct v4l2_ctrl_mpeg2_quantization {
+ /* ISO/IEC 13818-2, ITU-T Rec. H.262: Quant matrix extension */
+ __u8 load_intra_quantiser_matrix;
+ __u8 load_non_intra_quantiser_matrix;
+ __u8 load_chroma_intra_quantiser_matrix;
+ __u8 load_chroma_non_intra_quantiser_matrix;
+
+ __u8 intra_quantiser_matrix[64];
+ __u8 non_intra_quantiser_matrix[64];
+ __u8 chroma_intra_quantiser_matrix[64];
+ __u8 chroma_non_intra_quantiser_matrix[64];
+};
+
+#endif
diff --git a/include/media/rc-map.h b/include/media/rc-map.h
index bfa3017cecba..d621acadfbf3 100644
--- a/include/media/rc-map.h
+++ b/include/media/rc-map.h
@@ -277,6 +277,7 @@ struct rc_map *rc_map_get(const char *name);
#define RC_MAP_WINFAST "rc-winfast"
#define RC_MAP_WINFAST_USBII_DELUXE "rc-winfast-usbii-deluxe"
#define RC_MAP_SU3000 "rc-su3000"
+#define RC_MAP_XBOX_DVD "rc-xbox-dvd"
#define RC_MAP_ZX_IRDEC "rc-zx-irdec"
/*
diff --git a/include/media/v4l2-common.h b/include/media/v4l2-common.h
index 82715645617b..0c511ed8ffb0 100644
--- a/include/media/v4l2-common.h
+++ b/include/media/v4l2-common.h
@@ -396,4 +396,9 @@ int v4l2_g_parm_cap(struct video_device *vdev,
int v4l2_s_parm_cap(struct video_device *vdev,
struct v4l2_subdev *sd, struct v4l2_streamparm *a);
+/* Compare two v4l2_fract structs */
+#define V4L2_FRACT_COMPARE(a, OP, b) \
+ ((u64)(a).numerator * (b).denominator OP \
+ (u64)(b).numerator * (a).denominator)
+
#endif /* V4L2_COMMON_H_ */
diff --git a/include/media/v4l2-ctrls.h b/include/media/v4l2-ctrls.h
index 83ce0593b275..d63cf227b0ab 100644
--- a/include/media/v4l2-ctrls.h
+++ b/include/media/v4l2-ctrls.h
@@ -22,6 +22,12 @@
#include <linux/videodev2.h>
#include <media/media-request.h>
+/*
+ * Include the mpeg2 stateless codec compound control definitions.
+ * This will move to the public headers once this API is fully stable.
+ */
+#include <media/mpeg2-ctrls.h>
+
/* forward references */
struct file;
struct v4l2_ctrl_handler;
diff --git a/include/media/v4l2-dev.h b/include/media/v4l2-dev.h
index 456ac13eca1d..48531e57cc5a 100644
--- a/include/media/v4l2-dev.h
+++ b/include/media/v4l2-dev.h
@@ -74,10 +74,19 @@ struct v4l2_ctrl_handler;
* indicates that file->private_data points to &struct v4l2_fh.
* This flag is set by the core when v4l2_fh_init() is called.
* All new drivers should use it.
+ * @V4L2_FL_QUIRK_INVERTED_CROP:
+ * some old M2M drivers use g/s_crop/cropcap incorrectly: crop and
+ * compose are swapped. If this flag is set, then the selection
+ * targets are swapped in the g/s_crop/cropcap functions in v4l2-ioctl.c.
+ * This allows those drivers to correctly implement the selection API,
+ * but the old crop API will still work as expected in order to preserve
+ * backwards compatibility.
+ * Never set this flag for new drivers.
*/
enum v4l2_video_device_flags {
- V4L2_FL_REGISTERED = 0,
- V4L2_FL_USES_V4L2_FH = 1,
+ V4L2_FL_REGISTERED = 0,
+ V4L2_FL_USES_V4L2_FH = 1,
+ V4L2_FL_QUIRK_INVERTED_CROP = 2,
};
/* Priority helper functions */
diff --git a/include/media/v4l2-ioctl.h b/include/media/v4l2-ioctl.h
index 5848d92c30da..8533ece5026e 100644
--- a/include/media/v4l2-ioctl.h
+++ b/include/media/v4l2-ioctl.h
@@ -48,6 +48,9 @@ struct v4l2_fh;
* @vidioc_enum_fmt_meta_cap: pointer to the function that implements
* :ref:`VIDIOC_ENUM_FMT <vidioc_enum_fmt>` ioctl logic
* for metadata capture
+ * @vidioc_enum_fmt_meta_out: pointer to the function that implements
+ * :ref:`VIDIOC_ENUM_FMT <vidioc_enum_fmt>` ioctl logic
+ * for metadata output
* @vidioc_g_fmt_vid_cap: pointer to the function that implements
* :ref:`VIDIOC_G_FMT <vidioc_g_fmt>` ioctl logic for video capture
* in single plane mode
@@ -80,6 +83,8 @@ struct v4l2_fh;
* Radio output
* @vidioc_g_fmt_meta_cap: pointer to the function that implements
* :ref:`VIDIOC_G_FMT <vidioc_g_fmt>` ioctl logic for metadata capture
+ * @vidioc_g_fmt_meta_out: pointer to the function that implements
+ * :ref:`VIDIOC_G_FMT <vidioc_g_fmt>` ioctl logic for metadata output
* @vidioc_s_fmt_vid_cap: pointer to the function that implements
* :ref:`VIDIOC_S_FMT <vidioc_g_fmt>` ioctl logic for video capture
* in single plane mode
@@ -112,6 +117,8 @@ struct v4l2_fh;
* Radio output
* @vidioc_s_fmt_meta_cap: pointer to the function that implements
* :ref:`VIDIOC_S_FMT <vidioc_g_fmt>` ioctl logic for metadata capture
+ * @vidioc_s_fmt_meta_out: pointer to the function that implements
+ * :ref:`VIDIOC_S_FMT <vidioc_g_fmt>` ioctl logic for metadata output
* @vidioc_try_fmt_vid_cap: pointer to the function that implements
* :ref:`VIDIOC_TRY_FMT <vidioc_g_fmt>` ioctl logic for video capture
* in single plane mode
@@ -146,6 +153,8 @@ struct v4l2_fh;
* Radio output
* @vidioc_try_fmt_meta_cap: pointer to the function that implements
* :ref:`VIDIOC_TRY_FMT <vidioc_g_fmt>` ioctl logic for metadata capture
+ * @vidioc_try_fmt_meta_out: pointer to the function that implements
+ * :ref:`VIDIOC_TRY_FMT <vidioc_g_fmt>` ioctl logic for metadata output
* @vidioc_reqbufs: pointer to the function that implements
* :ref:`VIDIOC_REQBUFS <vidioc_reqbufs>` ioctl
* @vidioc_querybuf: pointer to the function that implements
@@ -220,12 +229,8 @@ struct v4l2_fh;
* :ref:`VIDIOC_G_MODULATOR <vidioc_g_modulator>` ioctl
* @vidioc_s_modulator: pointer to the function that implements
* :ref:`VIDIOC_S_MODULATOR <vidioc_g_modulator>` ioctl
- * @vidioc_cropcap: pointer to the function that implements
- * :ref:`VIDIOC_CROPCAP <vidioc_cropcap>` ioctl
- * @vidioc_g_crop: pointer to the function that implements
- * :ref:`VIDIOC_G_CROP <vidioc_g_crop>` ioctl
- * @vidioc_s_crop: pointer to the function that implements
- * :ref:`VIDIOC_S_CROP <vidioc_g_crop>` ioctl
+ * @vidioc_g_pixelaspect: pointer to the function that implements
+ * the pixelaspect part of the :ref:`VIDIOC_CROPCAP <vidioc_cropcap>` ioctl
* @vidioc_g_selection: pointer to the function that implements
* :ref:`VIDIOC_G_SELECTION <vidioc_g_selection>` ioctl
* @vidioc_s_selection: pointer to the function that implements
@@ -318,6 +323,8 @@ struct v4l2_ioctl_ops {
struct v4l2_fmtdesc *f);
int (*vidioc_enum_fmt_meta_cap)(struct file *file, void *fh,
struct v4l2_fmtdesc *f);
+ int (*vidioc_enum_fmt_meta_out)(struct file *file, void *fh,
+ struct v4l2_fmtdesc *f);
/* VIDIOC_G_FMT handlers */
int (*vidioc_g_fmt_vid_cap)(struct file *file, void *fh,
@@ -346,6 +353,8 @@ struct v4l2_ioctl_ops {
struct v4l2_format *f);
int (*vidioc_g_fmt_meta_cap)(struct file *file, void *fh,
struct v4l2_format *f);
+ int (*vidioc_g_fmt_meta_out)(struct file *file, void *fh,
+ struct v4l2_format *f);
/* VIDIOC_S_FMT handlers */
int (*vidioc_s_fmt_vid_cap)(struct file *file, void *fh,
@@ -374,6 +383,8 @@ struct v4l2_ioctl_ops {
struct v4l2_format *f);
int (*vidioc_s_fmt_meta_cap)(struct file *file, void *fh,
struct v4l2_format *f);
+ int (*vidioc_s_fmt_meta_out)(struct file *file, void *fh,
+ struct v4l2_format *f);
/* VIDIOC_TRY_FMT handlers */
int (*vidioc_try_fmt_vid_cap)(struct file *file, void *fh,
@@ -402,6 +413,8 @@ struct v4l2_ioctl_ops {
struct v4l2_format *f);
int (*vidioc_try_fmt_meta_cap)(struct file *file, void *fh,
struct v4l2_format *f);
+ int (*vidioc_try_fmt_meta_out)(struct file *file, void *fh,
+ struct v4l2_format *f);
/* Buffer handlers */
int (*vidioc_reqbufs)(struct file *file, void *fh,
@@ -491,12 +504,8 @@ struct v4l2_ioctl_ops {
int (*vidioc_s_modulator)(struct file *file, void *fh,
const struct v4l2_modulator *a);
/* Crop ioctls */
- int (*vidioc_cropcap)(struct file *file, void *fh,
- struct v4l2_cropcap *a);
- int (*vidioc_g_crop)(struct file *file, void *fh,
- struct v4l2_crop *a);
- int (*vidioc_s_crop)(struct file *file, void *fh,
- const struct v4l2_crop *a);
+ int (*vidioc_g_pixelaspect)(struct file *file, void *fh,
+ int buf_type, struct v4l2_fract *aspect);
int (*vidioc_g_selection)(struct file *file, void *fh,
struct v4l2_selection *s);
int (*vidioc_s_selection)(struct file *file, void *fh,
diff --git a/include/media/v4l2-mem2mem.h b/include/media/v4l2-mem2mem.h
index 58c1ecf3d648..5467264771ec 100644
--- a/include/media/v4l2-mem2mem.h
+++ b/include/media/v4l2-mem2mem.h
@@ -624,7 +624,7 @@ v4l2_m2m_dst_buf_remove_by_idx(struct v4l2_m2m_ctx *m2m_ctx, unsigned int idx)
/* v4l2 request helper */
-void vb2_m2m_request_queue(struct media_request *req);
+void v4l2_m2m_request_queue(struct media_request *req);
/* v4l2 ioctl helpers */
diff --git a/include/media/v4l2-subdev.h b/include/media/v4l2-subdev.h
index 9102d6ca566e..47af609dc8f1 100644
--- a/include/media/v4l2-subdev.h
+++ b/include/media/v4l2-subdev.h
@@ -776,7 +776,11 @@ struct v4l2_subdev_internal_ops {
#define V4L2_SUBDEV_FL_IS_SPI (1U << 1)
/* Set this flag if this subdev needs a device node. */
#define V4L2_SUBDEV_FL_HAS_DEVNODE (1U << 2)
-/* Set this flag if this subdev generates events. */
+/*
+ * Set this flag if this subdev generates events.
+ * Note controls can send events, thus drivers exposing controls
+ * should set this flag.
+ */
#define V4L2_SUBDEV_FL_HAS_EVENTS (1U << 3)
struct regulator_bulk_data;
diff --git a/include/media/videobuf2-core.h b/include/media/videobuf2-core.h
index e86981d615ae..4a737b2c610b 100644
--- a/include/media/videobuf2-core.h
+++ b/include/media/videobuf2-core.h
@@ -239,6 +239,7 @@ struct vb2_queue;
* @num_planes: number of planes in the buffer
* on an internal driver queue.
* @timestamp: frame timestamp in ns.
+ * @request: the request this buffer is associated with.
* @req_obj: used to bind this buffer to a request. This
* request object has a refcount.
*/
@@ -249,6 +250,7 @@ struct vb2_buffer {
unsigned int memory;
unsigned int num_planes;
u64 timestamp;
+ struct media_request *request;
struct media_request_object req_obj;
/* private: internal use only
diff --git a/include/net/act_api.h b/include/net/act_api.h
index 05c7df41d737..dbc795ec659e 100644
--- a/include/net/act_api.h
+++ b/include/net/act_api.h
@@ -194,35 +194,5 @@ static inline void tcf_action_stats_update(struct tc_action *a, u64 bytes,
#endif
}
-#ifdef CONFIG_NET_CLS_ACT
-int tc_setup_cb_egdev_register(const struct net_device *dev,
- tc_setup_cb_t *cb, void *cb_priv);
-void tc_setup_cb_egdev_unregister(const struct net_device *dev,
- tc_setup_cb_t *cb, void *cb_priv);
-int tc_setup_cb_egdev_call(const struct net_device *dev,
- enum tc_setup_type type, void *type_data,
- bool err_stop);
-#else
-static inline
-int tc_setup_cb_egdev_register(const struct net_device *dev,
- tc_setup_cb_t *cb, void *cb_priv)
-{
- return 0;
-}
-
-static inline
-void tc_setup_cb_egdev_unregister(const struct net_device *dev,
- tc_setup_cb_t *cb, void *cb_priv)
-{
-}
-
-static inline
-int tc_setup_cb_egdev_call(const struct net_device *dev,
- enum tc_setup_type type, void *type_data,
- bool err_stop)
-{
- return 0;
-}
-#endif
#endif
diff --git a/include/net/addrconf.h b/include/net/addrconf.h
index 14b789a123e7..1656c5978498 100644
--- a/include/net/addrconf.h
+++ b/include/net/addrconf.h
@@ -317,6 +317,8 @@ bool ipv6_chk_acast_addr(struct net *net, struct net_device *dev,
const struct in6_addr *addr);
bool ipv6_chk_acast_addr_src(struct net *net, struct net_device *dev,
const struct in6_addr *addr);
+int ipv6_anycast_init(void);
+void ipv6_anycast_cleanup(void);
/* Device notifier */
int register_inet6addr_notifier(struct notifier_block *nb);
diff --git a/include/net/af_rxrpc.h b/include/net/af_rxrpc.h
index de587948042a..1adefe42c0a6 100644
--- a/include/net/af_rxrpc.h
+++ b/include/net/af_rxrpc.h
@@ -77,7 +77,8 @@ int rxrpc_kernel_retry_call(struct socket *, struct rxrpc_call *,
struct sockaddr_rxrpc *, struct key *);
int rxrpc_kernel_check_call(struct socket *, struct rxrpc_call *,
enum rxrpc_call_completion *, u32 *);
-u32 rxrpc_kernel_check_life(struct socket *, struct rxrpc_call *);
+u32 rxrpc_kernel_check_life(const struct socket *, const struct rxrpc_call *);
+void rxrpc_kernel_probe_life(struct socket *, struct rxrpc_call *);
u32 rxrpc_kernel_get_epoch(struct socket *, struct rxrpc_call *);
bool rxrpc_kernel_get_reply_time(struct socket *, struct rxrpc_call *,
ktime_t *);
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index 1fa41b7a1be3..e0c41eb1c860 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -777,8 +777,10 @@ struct cfg80211_crypto_settings {
* @probe_resp: probe response template (AP mode only)
* @ftm_responder: enable FTM responder functionality; -1 for no change
* (which also implies no change in LCI/civic location data)
- * @lci: LCI subelement content
- * @civicloc: Civic location subelement content
+ * @lci: Measurement Report element content, starting with Measurement Token
+ * (measurement type 8)
+ * @civicloc: Measurement Report element content, starting with Measurement
+ * Token (measurement type 11)
* @lci_len: LCI data length
* @civicloc_len: Civic location data length
*/
@@ -1296,6 +1298,7 @@ struct cfg80211_tid_stats {
* @rx_beacon: number of beacons received from this peer
* @rx_beacon_signal_avg: signal strength average (in dBm) for beacons received
* from this peer
+ * @connected_to_gate: true if mesh STA has a path to mesh gate
* @rx_duration: aggregate PPDU duration(usecs) for all the frames from a peer
* @pertid: per-TID statistics, see &struct cfg80211_tid_stats, using the last
* (IEEE80211_NUM_TIDS) index for MSDUs not encapsulated in QoS-MPDUs.
@@ -1350,6 +1353,8 @@ struct station_info {
u64 rx_beacon;
u64 rx_duration;
u8 rx_beacon_signal_avg;
+ u8 connected_to_gate;
+
struct cfg80211_tid_stats *pertid;
s8 ack_signal;
s8 avg_ack_signal;
@@ -1559,6 +1564,10 @@ struct bss_parameters {
* @plink_timeout: If no tx activity is seen from a STA we've established
* peering with for longer than this time (in seconds), then remove it
* from the STA's list of peers. Default is 30 minutes.
+ * @dot11MeshConnectedToMeshGate: if set to true, advertise that this STA is
+ * connected to a mesh gate in mesh formation info. If false, the
+ * value in mesh formation is determined by the presence of root paths
+ * in the mesh path table
*/
struct mesh_config {
u16 dot11MeshRetryTimeout;
@@ -1578,6 +1587,7 @@ struct mesh_config {
u16 dot11MeshHWMPperrMinInterval;
u16 dot11MeshHWMPnetDiameterTraversalTime;
u8 dot11MeshHWMPRootMode;
+ bool dot11MeshConnectedToMeshGate;
u16 dot11MeshHWMPRannInterval;
bool dot11MeshGateAnnouncementProtocol;
bool dot11MeshForwarding;
@@ -2815,7 +2825,7 @@ struct cfg80211_external_auth_params {
};
/**
- * cfg80211_ftm_responder_stats - FTM responder statistics
+ * struct cfg80211_ftm_responder_stats - FTM responder statistics
*
* @filled: bitflag of flags using the bits of &enum nl80211_ftm_stats to
* indicate the relevant values in this struct for them
@@ -2849,6 +2859,190 @@ struct cfg80211_ftm_responder_stats {
};
/**
+ * struct cfg80211_pmsr_ftm_result - FTM result
+ * @failure_reason: if this measurement failed (PMSR status is
+ * %NL80211_PMSR_STATUS_FAILURE), this gives a more precise
+ * reason than just "failure"
+ * @burst_index: if reporting partial results, this is the index
+ * in [0 .. num_bursts-1] of the burst that's being reported
+ * @num_ftmr_attempts: number of FTM request frames transmitted
+ * @num_ftmr_successes: number of FTM request frames acked
+ * @busy_retry_time: if failure_reason is %NL80211_PMSR_FTM_FAILURE_PEER_BUSY,
+ * fill this to indicate in how many seconds a retry is deemed possible
+ * by the responder
+ * @num_bursts_exp: actual number of bursts exponent negotiated
+ * @burst_duration: actual burst duration negotiated
+ * @ftms_per_burst: actual FTMs per burst negotiated
+ * @lci_len: length of LCI information (if present)
+ * @civicloc_len: length of civic location information (if present)
+ * @lci: LCI data (may be %NULL)
+ * @civicloc: civic location data (may be %NULL)
+ * @rssi_avg: average RSSI over FTM action frames reported
+ * @rssi_spread: spread of the RSSI over FTM action frames reported
+ * @tx_rate: bitrate for transmitted FTM action frame response
+ * @rx_rate: bitrate of received FTM action frame
+ * @rtt_avg: average of RTTs measured (must have either this or @dist_avg)
+ * @rtt_variance: variance of RTTs measured (note that standard deviation is
+ * the square root of the variance)
+ * @rtt_spread: spread of the RTTs measured
+ * @dist_avg: average of distances (mm) measured
+ * (must have either this or @rtt_avg)
+ * @dist_variance: variance of distances measured (see also @rtt_variance)
+ * @dist_spread: spread of distances measured (see also @rtt_spread)
+ * @num_ftmr_attempts_valid: @num_ftmr_attempts is valid
+ * @num_ftmr_successes_valid: @num_ftmr_successes is valid
+ * @rssi_avg_valid: @rssi_avg is valid
+ * @rssi_spread_valid: @rssi_spread is valid
+ * @tx_rate_valid: @tx_rate is valid
+ * @rx_rate_valid: @rx_rate is valid
+ * @rtt_avg_valid: @rtt_avg is valid
+ * @rtt_variance_valid: @rtt_variance is valid
+ * @rtt_spread_valid: @rtt_spread is valid
+ * @dist_avg_valid: @dist_avg is valid
+ * @dist_variance_valid: @dist_variance is valid
+ * @dist_spread_valid: @dist_spread is valid
+ */
+struct cfg80211_pmsr_ftm_result {
+ const u8 *lci;
+ const u8 *civicloc;
+ unsigned int lci_len;
+ unsigned int civicloc_len;
+ enum nl80211_peer_measurement_ftm_failure_reasons failure_reason;
+ u32 num_ftmr_attempts, num_ftmr_successes;
+ s16 burst_index;
+ u8 busy_retry_time;
+ u8 num_bursts_exp;
+ u8 burst_duration;
+ u8 ftms_per_burst;
+ s32 rssi_avg;
+ s32 rssi_spread;
+ struct rate_info tx_rate, rx_rate;
+ s64 rtt_avg;
+ s64 rtt_variance;
+ s64 rtt_spread;
+ s64 dist_avg;
+ s64 dist_variance;
+ s64 dist_spread;
+
+ u16 num_ftmr_attempts_valid:1,
+ num_ftmr_successes_valid:1,
+ rssi_avg_valid:1,
+ rssi_spread_valid:1,
+ tx_rate_valid:1,
+ rx_rate_valid:1,
+ rtt_avg_valid:1,
+ rtt_variance_valid:1,
+ rtt_spread_valid:1,
+ dist_avg_valid:1,
+ dist_variance_valid:1,
+ dist_spread_valid:1;
+};
+
+/**
+ * struct cfg80211_pmsr_result - peer measurement result
+ * @addr: address of the peer
+ * @host_time: host time (use ktime_get_boottime() adjust to the time when the
+ * measurement was made)
+ * @ap_tsf: AP's TSF at measurement time
+ * @status: status of the measurement
+ * @final: if reporting partial results, mark this as the last one; if not
+ * reporting partial results always set this flag
+ * @ap_tsf_valid: indicates the @ap_tsf value is valid
+ * @type: type of the measurement reported, note that we only support reporting
+ * one type at a time, but you can report multiple results separately and
+ * they're all aggregated for userspace.
+ */
+struct cfg80211_pmsr_result {
+ u64 host_time, ap_tsf;
+ enum nl80211_peer_measurement_status status;
+
+ u8 addr[ETH_ALEN];
+
+ u8 final:1,
+ ap_tsf_valid:1;
+
+ enum nl80211_peer_measurement_type type;
+
+ union {
+ struct cfg80211_pmsr_ftm_result ftm;
+ };
+};
+
+/**
+ * struct cfg80211_pmsr_ftm_request_peer - FTM request data
+ * @requested: indicates FTM is requested
+ * @preamble: frame preamble to use
+ * @burst_period: burst period to use
+ * @asap: indicates to use ASAP mode
+ * @num_bursts_exp: number of bursts exponent
+ * @burst_duration: burst duration
+ * @ftms_per_burst: number of FTMs per burst
+ * @ftmr_retries: number of retries for FTM request
+ * @request_lci: request LCI information
+ * @request_civicloc: request civic location information
+ *
+ * See also nl80211 for the respective attribute documentation.
+ */
+struct cfg80211_pmsr_ftm_request_peer {
+ enum nl80211_preamble preamble;
+ u16 burst_period;
+ u8 requested:1,
+ asap:1,
+ request_lci:1,
+ request_civicloc:1;
+ u8 num_bursts_exp;
+ u8 burst_duration;
+ u8 ftms_per_burst;
+ u8 ftmr_retries;
+};
+
+/**
+ * struct cfg80211_pmsr_request_peer - peer data for a peer measurement request
+ * @addr: MAC address
+ * @chandef: channel to use
+ * @report_ap_tsf: report the associated AP's TSF
+ * @ftm: FTM data, see &struct cfg80211_pmsr_ftm_request_peer
+ */
+struct cfg80211_pmsr_request_peer {
+ u8 addr[ETH_ALEN];
+ struct cfg80211_chan_def chandef;
+ u8 report_ap_tsf:1;
+ struct cfg80211_pmsr_ftm_request_peer ftm;
+};
+
+/**
+ * struct cfg80211_pmsr_request - peer measurement request
+ * @cookie: cookie, set by cfg80211
+ * @nl_portid: netlink portid - used by cfg80211
+ * @drv_data: driver data for this request, if required for aborting,
+ * not otherwise freed or anything by cfg80211
+ * @mac_addr: MAC address used for (randomised) request
+ * @mac_addr_mask: MAC address mask used for randomisation, bits that
+ * are 0 in the mask should be randomised, bits that are 1 should
+ * be taken from the @mac_addr
+ * @list: used by cfg80211 to hold on to the request
+ * @timeout: timeout (in milliseconds) for the whole operation, if
+ * zero it means there's no timeout
+ * @n_peers: number of peers to do measurements with
+ * @peers: per-peer measurement request data
+ */
+struct cfg80211_pmsr_request {
+ u64 cookie;
+ void *drv_data;
+ u32 n_peers;
+ u32 nl_portid;
+
+ u32 timeout;
+
+ u8 mac_addr[ETH_ALEN] __aligned(2);
+ u8 mac_addr_mask[ETH_ALEN] __aligned(2);
+
+ struct list_head list;
+
+ struct cfg80211_pmsr_request_peer peers[];
+};
+
+/**
* struct cfg80211_ops - backend description for wireless configuration
*
* This struct is registered by fullmac card drivers and/or wireless stacks
@@ -3183,6 +3377,8 @@ struct cfg80211_ftm_responder_stats {
*
* @get_ftm_responder_stats: Retrieve FTM responder statistics, if available.
* Statistics should be cumulative, currently no way to reset is provided.
+ * @start_pmsr: start peer measurement (e.g. FTM)
+ * @abort_pmsr: abort peer measurement
*/
struct cfg80211_ops {
int (*suspend)(struct wiphy *wiphy, struct cfg80211_wowlan *wow);
@@ -3492,6 +3688,11 @@ struct cfg80211_ops {
int (*get_ftm_responder_stats)(struct wiphy *wiphy,
struct net_device *dev,
struct cfg80211_ftm_responder_stats *ftm_stats);
+
+ int (*start_pmsr)(struct wiphy *wiphy, struct wireless_dev *wdev,
+ struct cfg80211_pmsr_request *request);
+ void (*abort_pmsr)(struct wiphy *wiphy, struct wireless_dev *wdev,
+ struct cfg80211_pmsr_request *request);
};
/*
@@ -3864,6 +4065,42 @@ struct wiphy_iftype_ext_capab {
};
/**
+ * struct cfg80211_pmsr_capabilities - cfg80211 peer measurement capabilities
+ * @max_peers: maximum number of peers in a single measurement
+ * @report_ap_tsf: can report assoc AP's TSF for radio resource measurement
+ * @randomize_mac_addr: can randomize MAC address for measurement
+ * @ftm.supported: FTM measurement is supported
+ * @ftm.asap: ASAP-mode is supported
+ * @ftm.non_asap: non-ASAP-mode is supported
+ * @ftm.request_lci: can request LCI data
+ * @ftm.request_civicloc: can request civic location data
+ * @ftm.preambles: bitmap of preambles supported (&enum nl80211_preamble)
+ * @ftm.bandwidths: bitmap of bandwidths supported (&enum nl80211_chan_width)
+ * @ftm.max_bursts_exponent: maximum burst exponent supported
+ * (set to -1 if not limited; note that setting this will necessarily
+ * forbid using the value 15 to let the responder pick)
+ * @ftm.max_ftms_per_burst: maximum FTMs per burst supported (set to 0 if
+ * not limited)
+ */
+struct cfg80211_pmsr_capabilities {
+ unsigned int max_peers;
+ u8 report_ap_tsf:1,
+ randomize_mac_addr:1;
+
+ struct {
+ u32 preambles;
+ u32 bandwidths;
+ s8 max_bursts_exponent;
+ u8 max_ftms_per_burst;
+ u8 supported:1,
+ asap:1,
+ non_asap:1,
+ request_lci:1,
+ request_civicloc:1;
+ } ftm;
+};
+
+/**
* struct wiphy - wireless hardware description
* @reg_notifier: the driver's regulatory notification callback,
* note that if your driver uses wiphy_apply_custom_regulatory()
@@ -4027,6 +4264,8 @@ struct wiphy_iftype_ext_capab {
* @txq_limit: configuration of internal TX queue frame limit
* @txq_memory_limit: configuration internal TX queue memory limit
* @txq_quantum: configuration of internal TX queue scheduler quantum
+ *
+ * @pmsr_capa: peer measurement capabilities
*/
struct wiphy {
/* assign these fields before you register the wiphy */
@@ -4163,6 +4402,8 @@ struct wiphy {
u32 txq_memory_limit;
u32 txq_quantum;
+ const struct cfg80211_pmsr_capabilities *pmsr_capa;
+
char priv[0] __aligned(NETDEV_ALIGN);
};
@@ -4365,6 +4606,9 @@ struct cfg80211_cqm_config;
* @owner_nlportid: (private) owner socket port ID
* @nl_owner_dead: (private) owner socket went away
* @cqm_config: (private) nl80211 RSSI monitor state
+ * @pmsr_list: (private) peer measurement requests
+ * @pmsr_lock: (private) peer measurements requests/results lock
+ * @pmsr_free_wk: (private) peer measurements cleanup work
*/
struct wireless_dev {
struct wiphy *wiphy;
@@ -4436,6 +4680,10 @@ struct wireless_dev {
#endif
struct cfg80211_cqm_config *cqm_config;
+
+ struct list_head pmsr_list;
+ spinlock_t pmsr_lock;
+ struct work_struct pmsr_free_wk;
};
static inline u8 *wdev_address(struct wireless_dev *wdev)
@@ -5328,7 +5576,8 @@ void cfg80211_ibss_joined(struct net_device *dev, const u8 *bssid,
* cfg80211 then sends a notification to userspace.
*/
void cfg80211_notify_new_peer_candidate(struct net_device *dev,
- const u8 *macaddr, const u8 *ie, u8 ie_len, gfp_t gfp);
+ const u8 *macaddr, const u8 *ie, u8 ie_len,
+ int sig_dbm, gfp_t gfp);
/**
* DOC: RFkill integration
@@ -6630,6 +6879,31 @@ int cfg80211_external_auth_request(struct net_device *netdev,
struct cfg80211_external_auth_params *params,
gfp_t gfp);
+/**
+ * cfg80211_pmsr_report - report peer measurement result data
+ * @wdev: the wireless device reporting the measurement
+ * @req: the original measurement request
+ * @result: the result data
+ * @gfp: allocation flags
+ */
+void cfg80211_pmsr_report(struct wireless_dev *wdev,
+ struct cfg80211_pmsr_request *req,
+ struct cfg80211_pmsr_result *result,
+ gfp_t gfp);
+
+/**
+ * cfg80211_pmsr_complete - report peer measurement completed
+ * @wdev: the wireless device reporting the measurement
+ * @req: the original measurement request
+ * @gfp: allocation flags
+ *
+ * Report that the entire measurement completed, after this
+ * the request pointer will no longer be valid.
+ */
+void cfg80211_pmsr_complete(struct wireless_dev *wdev,
+ struct cfg80211_pmsr_request *req,
+ gfp_t gfp);
+
/* Logging, debugging and troubleshooting/diagnostic helpers. */
/* wiphy_printk helpers, similar to dev_printk */
diff --git a/include/net/devlink.h b/include/net/devlink.h
index 45db0c79462d..67f4293bc970 100644
--- a/include/net/devlink.h
+++ b/include/net/devlink.h
@@ -365,6 +365,7 @@ enum devlink_param_generic_id {
DEVLINK_PARAM_GENERIC_ID_IGNORE_ARI,
DEVLINK_PARAM_GENERIC_ID_MSIX_VEC_PER_PF_MAX,
DEVLINK_PARAM_GENERIC_ID_MSIX_VEC_PER_PF_MIN,
+ DEVLINK_PARAM_GENERIC_ID_FW_LOAD_POLICY,
/* add new param generic ids above here*/
__DEVLINK_PARAM_GENERIC_ID_MAX,
@@ -392,6 +393,9 @@ enum devlink_param_generic_id {
#define DEVLINK_PARAM_GENERIC_MSIX_VEC_PER_PF_MIN_NAME "msix_vec_per_pf_min"
#define DEVLINK_PARAM_GENERIC_MSIX_VEC_PER_PF_MIN_TYPE DEVLINK_PARAM_TYPE_U32
+#define DEVLINK_PARAM_GENERIC_FW_LOAD_POLICY_NAME "fw_load_policy"
+#define DEVLINK_PARAM_GENERIC_FW_LOAD_POLICY_TYPE DEVLINK_PARAM_TYPE_U8
+
#define DEVLINK_PARAM_GENERIC(_id, _cmodes, _get, _set, _validate) \
{ \
.id = DEVLINK_PARAM_GENERIC_ID_##_id, \
diff --git a/include/net/dsa.h b/include/net/dsa.h
index 23690c44e167..b3eefe8e18fd 100644
--- a/include/net/dsa.h
+++ b/include/net/dsa.h
@@ -36,7 +36,7 @@ enum dsa_tag_protocol {
DSA_TAG_PROTO_DSA,
DSA_TAG_PROTO_EDSA,
DSA_TAG_PROTO_GSWIP,
- DSA_TAG_PROTO_KSZ,
+ DSA_TAG_PROTO_KSZ9477,
DSA_TAG_PROTO_LAN9303,
DSA_TAG_PROTO_MTK,
DSA_TAG_PROTO_QCA,
@@ -113,6 +113,7 @@ struct dsa_device_ops {
struct packet_type *pt);
int (*flow_dissect)(const struct sk_buff *skb, __be16 *proto,
int *offset);
+ unsigned int overhead;
};
struct dsa_switch_tree {
diff --git a/include/net/flow.h b/include/net/flow.h
index 8ce21793094e..93f2c9a0f098 100644
--- a/include/net/flow.h
+++ b/include/net/flow.h
@@ -38,8 +38,8 @@ struct flowi_common {
#define FLOWI_FLAG_KNOWN_NH 0x02
#define FLOWI_FLAG_SKIP_NH_OIF 0x04
__u32 flowic_secid;
- struct flowi_tunnel flowic_tun_key;
kuid_t flowic_uid;
+ struct flowi_tunnel flowic_tun_key;
};
union flowi_uli {
diff --git a/include/net/flow_dissector.h b/include/net/flow_dissector.h
index 6a4586dcdede..2b26979efb48 100644
--- a/include/net/flow_dissector.h
+++ b/include/net/flow_dissector.h
@@ -209,8 +209,8 @@ enum flow_dissector_key_id {
FLOW_DISSECTOR_KEY_ETH_ADDRS, /* struct flow_dissector_key_eth_addrs */
FLOW_DISSECTOR_KEY_TIPC, /* struct flow_dissector_key_tipc */
FLOW_DISSECTOR_KEY_ARP, /* struct flow_dissector_key_arp */
- FLOW_DISSECTOR_KEY_VLAN, /* struct flow_dissector_key_flow_vlan */
- FLOW_DISSECTOR_KEY_FLOW_LABEL, /* struct flow_dissector_key_flow_tags */
+ FLOW_DISSECTOR_KEY_VLAN, /* struct flow_dissector_key_vlan */
+ FLOW_DISSECTOR_KEY_FLOW_LABEL, /* struct flow_dissector_key_tags */
FLOW_DISSECTOR_KEY_GRE_KEYID, /* struct flow_dissector_key_keyid */
FLOW_DISSECTOR_KEY_MPLS_ENTROPY, /* struct flow_dissector_key_keyid */
FLOW_DISSECTOR_KEY_ENC_KEYID, /* struct flow_dissector_key_keyid */
@@ -221,7 +221,7 @@ enum flow_dissector_key_id {
FLOW_DISSECTOR_KEY_MPLS, /* struct flow_dissector_key_mpls */
FLOW_DISSECTOR_KEY_TCP, /* struct flow_dissector_key_tcp */
FLOW_DISSECTOR_KEY_IP, /* struct flow_dissector_key_ip */
- FLOW_DISSECTOR_KEY_CVLAN, /* struct flow_dissector_key_flow_vlan */
+ FLOW_DISSECTOR_KEY_CVLAN, /* struct flow_dissector_key_vlan */
FLOW_DISSECTOR_KEY_ENC_IP, /* struct flow_dissector_key_ip */
FLOW_DISSECTOR_KEY_ENC_OPTS, /* struct flow_dissector_key_enc_opts */
diff --git a/include/net/gen_stats.h b/include/net/gen_stats.h
index 946bd53a9f81..ca23860adbb9 100644
--- a/include/net/gen_stats.h
+++ b/include/net/gen_stats.h
@@ -10,7 +10,7 @@
struct gnet_stats_basic_cpu {
struct gnet_stats_basic_packed bstats;
struct u64_stats_sync syncp;
-};
+} __aligned(2 * sizeof(u64));
struct net_rate_estimator;
diff --git a/include/net/geneve.h b/include/net/geneve.h
index a7600ed55ea3..fc6a7e0a874a 100644
--- a/include/net/geneve.h
+++ b/include/net/geneve.h
@@ -60,6 +60,12 @@ struct genevehdr {
struct geneve_opt options[];
};
+static inline bool netif_is_geneve(const struct net_device *dev)
+{
+ return dev->rtnl_link_ops &&
+ !strcmp(dev->rtnl_link_ops->kind, "geneve");
+}
+
#ifdef CONFIG_INET
struct net_device *geneve_dev_create_fb(struct net *net, const char *name,
u8 name_assign_type, u16 dst_port);
diff --git a/include/net/gre.h b/include/net/gre.h
index 797142eee9cd..b60f212c16c6 100644
--- a/include/net/gre.h
+++ b/include/net/gre.h
@@ -37,8 +37,17 @@ struct net_device *gretap_fb_dev_create(struct net *net, const char *name,
int gre_parse_header(struct sk_buff *skb, struct tnl_ptk_info *tpi,
bool *csum_err, __be16 proto, int nhs);
-bool is_gretap_dev(const struct net_device *dev);
-bool is_ip6gretap_dev(const struct net_device *dev);
+static inline bool netif_is_gretap(const struct net_device *dev)
+{
+ return dev->rtnl_link_ops &&
+ !strcmp(dev->rtnl_link_ops->kind, "gretap");
+}
+
+static inline bool netif_is_ip6gretap(const struct net_device *dev)
+{
+ return dev->rtnl_link_ops &&
+ !strcmp(dev->rtnl_link_ops->kind, "ip6gretap");
+}
static inline int gre_calc_hlen(__be16 o_flags)
{
diff --git a/include/net/icmp.h b/include/net/icmp.h
index 3ef2743a8eec..6ac3a5bd0117 100644
--- a/include/net/icmp.h
+++ b/include/net/icmp.h
@@ -41,7 +41,7 @@ struct net;
void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info);
int icmp_rcv(struct sk_buff *skb);
-void icmp_err(struct sk_buff *skb, u32 info);
+int icmp_err(struct sk_buff *skb, u32 info);
int icmp_init(void);
void icmp_out_count(struct net *net, unsigned char type);
diff --git a/include/net/if_inet6.h b/include/net/if_inet6.h
index d7578cf49c3a..c9c78c15bce0 100644
--- a/include/net/if_inet6.h
+++ b/include/net/if_inet6.h
@@ -146,10 +146,12 @@ struct ifacaddr6 {
struct in6_addr aca_addr;
struct fib6_info *aca_rt;
struct ifacaddr6 *aca_next;
+ struct hlist_node aca_addr_lst;
int aca_users;
refcount_t aca_refcnt;
unsigned long aca_cstamp;
unsigned long aca_tstamp;
+ struct rcu_head rcu;
};
#define IFA_HOST IPV6_ADDR_LOOPBACK
diff --git a/include/net/inet6_hashtables.h b/include/net/inet6_hashtables.h
index 6e91e38a31da..9db98af46985 100644
--- a/include/net/inet6_hashtables.h
+++ b/include/net/inet6_hashtables.h
@@ -115,9 +115,8 @@ int inet6_hash(struct sock *sk);
((__sk)->sk_family == AF_INET6) && \
ipv6_addr_equal(&(__sk)->sk_v6_daddr, (__saddr)) && \
ipv6_addr_equal(&(__sk)->sk_v6_rcv_saddr, (__daddr)) && \
- (!(__sk)->sk_bound_dev_if || \
- ((__sk)->sk_bound_dev_if == (__dif)) || \
- ((__sk)->sk_bound_dev_if == (__sdif))) && \
+ (((__sk)->sk_bound_dev_if == (__dif)) || \
+ ((__sk)->sk_bound_dev_if == (__sdif))) && \
net_eq(sock_net(__sk), (__net)))
#endif /* _INET6_HASHTABLES_H */
diff --git a/include/net/inet_common.h b/include/net/inet_common.h
index 3ca969cbd161..975901a95c0f 100644
--- a/include/net/inet_common.h
+++ b/include/net/inet_common.h
@@ -2,6 +2,8 @@
#ifndef _INET_COMMON_H
#define _INET_COMMON_H
+#include <linux/indirect_call_wrapper.h>
+
extern const struct proto_ops inet_stream_ops;
extern const struct proto_ops inet_dgram_ops;
@@ -54,4 +56,11 @@ static inline void inet_ctl_sock_destroy(struct sock *sk)
sock_release(sk->sk_socket);
}
+#define indirect_call_gro_receive(f2, f1, cb, head, skb) \
+({ \
+ unlikely(gro_recursion_inc_test(skb)) ? \
+ NAPI_GRO_CB(skb)->flush |= 1, NULL : \
+ INDIRECT_CALL_2(cb, f2, f1, head, skb); \
+})
+
#endif
diff --git a/include/net/inet_hashtables.h b/include/net/inet_hashtables.h
index 9141e95529e7..babb14136705 100644
--- a/include/net/inet_hashtables.h
+++ b/include/net/inet_hashtables.h
@@ -79,6 +79,7 @@ struct inet_ehash_bucket {
struct inet_bind_bucket {
possible_net_t ib_net;
+ int l3mdev;
unsigned short port;
signed char fastreuse;
signed char fastreuseport;
@@ -188,10 +189,21 @@ static inline void inet_ehash_locks_free(struct inet_hashinfo *hashinfo)
hashinfo->ehash_locks = NULL;
}
+static inline bool inet_sk_bound_dev_eq(struct net *net, int bound_dev_if,
+ int dif, int sdif)
+{
+#if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV)
+ return inet_bound_dev_eq(!!net->ipv4.sysctl_tcp_l3mdev_accept,
+ bound_dev_if, dif, sdif);
+#else
+ return inet_bound_dev_eq(true, bound_dev_if, dif, sdif);
+#endif
+}
+
struct inet_bind_bucket *
inet_bind_bucket_create(struct kmem_cache *cachep, struct net *net,
struct inet_bind_hashbucket *head,
- const unsigned short snum);
+ const unsigned short snum, int l3mdev);
void inet_bind_bucket_destroy(struct kmem_cache *cachep,
struct inet_bind_bucket *tb);
@@ -225,6 +237,7 @@ void inet_hashinfo2_init(struct inet_hashinfo *h, const char *name,
unsigned long numentries, int scale,
unsigned long low_limit,
unsigned long high_limit);
+int inet_hashinfo2_init_mod(struct inet_hashinfo *h);
bool inet_ehash_insert(struct sock *sk, struct sock *osk);
bool inet_ehash_nolisten(struct sock *sk, struct sock *osk);
@@ -282,9 +295,8 @@ static inline struct sock *inet_lookup_listener(struct net *net,
#define INET_MATCH(__sk, __net, __cookie, __saddr, __daddr, __ports, __dif, __sdif) \
(((__sk)->sk_portpair == (__ports)) && \
((__sk)->sk_addrpair == (__cookie)) && \
- (!(__sk)->sk_bound_dev_if || \
- ((__sk)->sk_bound_dev_if == (__dif)) || \
- ((__sk)->sk_bound_dev_if == (__sdif))) && \
+ (((__sk)->sk_bound_dev_if == (__dif)) || \
+ ((__sk)->sk_bound_dev_if == (__sdif))) && \
net_eq(sock_net(__sk), (__net)))
#else /* 32-bit arch */
#define INET_ADDR_COOKIE(__name, __saddr, __daddr) \
@@ -294,9 +306,8 @@ static inline struct sock *inet_lookup_listener(struct net *net,
(((__sk)->sk_portpair == (__ports)) && \
((__sk)->sk_daddr == (__saddr)) && \
((__sk)->sk_rcv_saddr == (__daddr)) && \
- (!(__sk)->sk_bound_dev_if || \
- ((__sk)->sk_bound_dev_if == (__dif)) || \
- ((__sk)->sk_bound_dev_if == (__sdif))) && \
+ (((__sk)->sk_bound_dev_if == (__dif)) || \
+ ((__sk)->sk_bound_dev_if == (__sdif))) && \
net_eq(sock_net(__sk), (__net)))
#endif /* 64-bit arch */
diff --git a/include/net/inet_sock.h b/include/net/inet_sock.h
index a80fd0ac4563..e8eef85006aa 100644
--- a/include/net/inet_sock.h
+++ b/include/net/inet_sock.h
@@ -130,6 +130,27 @@ static inline int inet_request_bound_dev_if(const struct sock *sk,
return sk->sk_bound_dev_if;
}
+static inline int inet_sk_bound_l3mdev(const struct sock *sk)
+{
+#ifdef CONFIG_NET_L3_MASTER_DEV
+ struct net *net = sock_net(sk);
+
+ if (!net->ipv4.sysctl_tcp_l3mdev_accept)
+ return l3mdev_master_ifindex_by_index(net,
+ sk->sk_bound_dev_if);
+#endif
+
+ return 0;
+}
+
+static inline bool inet_bound_dev_eq(bool l3mdev_accept, int bound_dev_if,
+ int dif, int sdif)
+{
+ if (!bound_dev_if)
+ return !sdif || l3mdev_accept;
+ return bound_dev_if == dif || bound_dev_if == sdif;
+}
+
struct inet_cork {
unsigned int flags;
__be32 addr;
diff --git a/include/net/ip.h b/include/net/ip.h
index 72593e171d14..8866bfce6121 100644
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@ -155,6 +155,7 @@ int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt,
void ip_list_rcv(struct list_head *head, struct packet_type *pt,
struct net_device *orig_dev);
int ip_local_deliver(struct sk_buff *skb);
+void ip_protocol_deliver_rcu(struct net *net, struct sk_buff *skb, int proto);
int ip_mr_input(struct sk_buff *skb);
int ip_output(struct net *net, struct sock *sk, struct sk_buff *skb);
int ip_mc_output(struct net *net, struct sock *sk, struct sk_buff *skb);
@@ -421,7 +422,8 @@ static inline unsigned int ip_skb_dst_mtu(struct sock *sk,
}
struct dst_metrics *ip_fib_metrics_init(struct net *net, struct nlattr *fc_mx,
- int fc_mx_len);
+ int fc_mx_len,
+ struct netlink_ext_ack *extack);
static inline void ip_fib_metrics_put(struct dst_metrics *fib_metrics)
{
if (fib_metrics != &dst_default_metrics &&
diff --git a/include/net/ip6_tunnel.h b/include/net/ip6_tunnel.h
index 236e40ba06bf..69b4bcf880c9 100644
--- a/include/net/ip6_tunnel.h
+++ b/include/net/ip6_tunnel.h
@@ -69,6 +69,8 @@ struct ip6_tnl_encap_ops {
size_t (*encap_hlen)(struct ip_tunnel_encap *e);
int (*build_header)(struct sk_buff *skb, struct ip_tunnel_encap *e,
u8 *protocol, struct flowi6 *fl6);
+ int (*err_handler)(struct sk_buff *skb, struct inet6_skb_parm *opt,
+ u8 type, u8 code, int offset, __be32 info);
};
#ifdef CONFIG_INET
diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h
index b0d022ff6ea1..cbcf35ce1b14 100644
--- a/include/net/ip_tunnels.h
+++ b/include/net/ip_tunnels.h
@@ -144,25 +144,6 @@ struct ip_tunnel {
bool ignore_df;
};
-#define TUNNEL_CSUM __cpu_to_be16(0x01)
-#define TUNNEL_ROUTING __cpu_to_be16(0x02)
-#define TUNNEL_KEY __cpu_to_be16(0x04)
-#define TUNNEL_SEQ __cpu_to_be16(0x08)
-#define TUNNEL_STRICT __cpu_to_be16(0x10)
-#define TUNNEL_REC __cpu_to_be16(0x20)
-#define TUNNEL_VERSION __cpu_to_be16(0x40)
-#define TUNNEL_NO_KEY __cpu_to_be16(0x80)
-#define TUNNEL_DONT_FRAGMENT __cpu_to_be16(0x0100)
-#define TUNNEL_OAM __cpu_to_be16(0x0200)
-#define TUNNEL_CRIT_OPT __cpu_to_be16(0x0400)
-#define TUNNEL_GENEVE_OPT __cpu_to_be16(0x0800)
-#define TUNNEL_VXLAN_OPT __cpu_to_be16(0x1000)
-#define TUNNEL_NOCACHE __cpu_to_be16(0x2000)
-#define TUNNEL_ERSPAN_OPT __cpu_to_be16(0x4000)
-
-#define TUNNEL_OPTIONS_PRESENT \
- (TUNNEL_GENEVE_OPT | TUNNEL_VXLAN_OPT | TUNNEL_ERSPAN_OPT)
-
struct tnl_ptk_info {
__be16 flags;
__be16 proto;
@@ -311,6 +292,7 @@ struct ip_tunnel_encap_ops {
size_t (*encap_hlen)(struct ip_tunnel_encap *e);
int (*build_header)(struct sk_buff *skb, struct ip_tunnel_encap *e,
u8 *protocol, struct flowi4 *fl4);
+ int (*err_handler)(struct sk_buff *skb, u32 info);
};
#define MAX_IPTUN_ENCAP_OPS 8
diff --git a/include/net/ipv6.h b/include/net/ipv6.h
index 829650540780..daf80863d3a5 100644
--- a/include/net/ipv6.h
+++ b/include/net/ipv6.h
@@ -975,6 +975,8 @@ int ip6_output(struct net *net, struct sock *sk, struct sk_buff *skb);
int ip6_forward(struct sk_buff *skb);
int ip6_input(struct sk_buff *skb);
int ip6_mc_input(struct sk_buff *skb);
+void ip6_protocol_deliver_rcu(struct net *net, struct sk_buff *skb, int nexthdr,
+ bool have_final);
int __ip6_local_out(struct net *net, struct sock *sk, struct sk_buff *skb);
int ip6_local_out(struct net *net, struct sock *sk, struct sk_buff *skb);
diff --git a/include/net/l3mdev.h b/include/net/l3mdev.h
index 3832099289c5..78fa0ac4613c 100644
--- a/include/net/l3mdev.h
+++ b/include/net/l3mdev.h
@@ -101,6 +101,17 @@ struct net_device *l3mdev_master_dev_rcu(const struct net_device *_dev)
return master;
}
+int l3mdev_master_upper_ifindex_by_index_rcu(struct net *net, int ifindex);
+static inline
+int l3mdev_master_upper_ifindex_by_index(struct net *net, int ifindex)
+{
+ rcu_read_lock();
+ ifindex = l3mdev_master_upper_ifindex_by_index_rcu(net, ifindex);
+ rcu_read_unlock();
+
+ return ifindex;
+}
+
u32 l3mdev_fib_table_rcu(const struct net_device *dev);
u32 l3mdev_fib_table_by_index(struct net *net, int ifindex);
static inline u32 l3mdev_fib_table(const struct net_device *dev)
@@ -208,6 +219,17 @@ static inline int l3mdev_master_ifindex_by_index(struct net *net, int ifindex)
}
static inline
+int l3mdev_master_upper_ifindex_by_index_rcu(struct net *net, int ifindex)
+{
+ return 0;
+}
+static inline
+int l3mdev_master_upper_ifindex_by_index(struct net *net, int ifindex)
+{
+ return 0;
+}
+
+static inline
struct net_device *l3mdev_master_dev_rcu(const struct net_device *dev)
{
return NULL;
diff --git a/include/net/mac80211.h b/include/net/mac80211.h
index 71985e95d2d9..88219cc137c3 100644
--- a/include/net/mac80211.h
+++ b/include/net/mac80211.h
@@ -467,7 +467,7 @@ struct ieee80211_mu_group_data {
};
/**
- * ieee80211_ftm_responder_params - FTM responder parameters
+ * struct ieee80211_ftm_responder_params - FTM responder parameters
*
* @lci: LCI subelement content
* @civicloc: CIVIC location subelement content
@@ -496,6 +496,8 @@ struct ieee80211_ftm_responder_params {
* @uora_ocw_range: UORA element's OCW Range field
* @frame_time_rts_th: HE duration RTS threshold, in units of 32us
* @he_support: does this BSS support HE
+ * @twt_requester: does this BSS support TWT requester (relevant for managed
+ * mode only, set if the AP advertises TWT responder role)
* @assoc: association status
* @ibss_joined: indicates whether this station is part of an IBSS
* or not
@@ -594,6 +596,7 @@ struct ieee80211_bss_conf {
u8 uora_ocw_range;
u16 frame_time_rts_th;
bool he_support;
+ bool twt_requester;
/* association related data */
bool assoc, ibss_joined;
bool ibss_creator;
@@ -3239,6 +3242,11 @@ enum ieee80211_reconfig_type {
* When the scan finishes, ieee80211_scan_completed() must be called;
* note that it also must be called when the scan cannot finish due to
* any error unless this callback returned a negative error code.
+ * This callback is also allowed to return the special return value 1,
+ * this indicates that hardware scan isn't desirable right now and a
+ * software scan should be done instead. A driver wishing to use this
+ * capability must ensure its (hardware) scan capabilities aren't
+ * advertised as more capable than mac80211's software scan is.
* The callback can sleep.
*
* @cancel_hw_scan: Ask the low-level tp cancel the active hw scan.
@@ -3623,6 +3631,9 @@ enum ieee80211_reconfig_type {
* skb is always a real frame, head may or may not be an A-MSDU.
* @get_ftm_responder_stats: Retrieve FTM responder statistics, if available.
* Statistics should be cumulative, currently no way to reset is provided.
+ *
+ * @start_pmsr: start peer measurement (e.g. FTM) (this call can sleep)
+ * @abort_pmsr: abort peer measurement (this call can sleep)
*/
struct ieee80211_ops {
void (*tx)(struct ieee80211_hw *hw,
@@ -3911,6 +3922,10 @@ struct ieee80211_ops {
int (*get_ftm_responder_stats)(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct cfg80211_ftm_responder_stats *ftm_stats);
+ int (*start_pmsr)(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct cfg80211_pmsr_request *request);
+ void (*abort_pmsr)(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct cfg80211_pmsr_request *request);
};
/**
@@ -6091,6 +6106,14 @@ void ieee80211_unreserve_tid(struct ieee80211_sta *sta, u8 tid);
* @txq: pointer obtained from station or virtual interface
*
* Returns the skb if successful, %NULL if no frame was available.
+ *
+ * Note that this must be called in an rcu_read_lock() critical section,
+ * which can only be released after the SKB was handled. Some pointers in
+ * skb->cb, e.g. the key pointer, are protected by by RCU and thus the
+ * critical section must persist not just for the duration of this call
+ * but for the duration of the frame handling.
+ * However, also note that while in the wake_tx_queue() method,
+ * rcu_read_lock() is already held.
*/
struct sk_buff *ieee80211_tx_dequeue(struct ieee80211_hw *hw,
struct ieee80211_txq *txq);
diff --git a/include/net/neighbour.h b/include/net/neighbour.h
index f58b384aa6c9..7c1ab9edba03 100644
--- a/include/net/neighbour.h
+++ b/include/net/neighbour.h
@@ -140,8 +140,8 @@ struct neighbour {
unsigned long updated;
rwlock_t lock;
refcount_t refcnt;
- struct sk_buff_head arp_queue;
unsigned int arp_queue_len_bytes;
+ struct sk_buff_head arp_queue;
struct timer_list timer;
unsigned long used;
atomic_t probes;
@@ -149,11 +149,13 @@ struct neighbour {
__u8 nud_state;
__u8 type;
__u8 dead;
+ u8 protocol;
seqlock_t ha_lock;
- unsigned char ha[ALIGN(MAX_ADDR_LEN, sizeof(unsigned long))];
+ unsigned char ha[ALIGN(MAX_ADDR_LEN, sizeof(unsigned long))] __aligned(8);
struct hh_cache hh;
int (*output)(struct neighbour *, struct sk_buff *);
const struct neigh_ops *ops;
+ struct list_head gc_list;
struct rcu_head rcu;
struct net_device *dev;
u8 primary_key[0];
@@ -172,6 +174,7 @@ struct pneigh_entry {
possible_net_t net;
struct net_device *dev;
u8 flags;
+ u8 protocol;
u8 key[0];
};
@@ -214,6 +217,8 @@ struct neigh_table {
struct timer_list proxy_timer;
struct sk_buff_head proxy_queue;
atomic_t entries;
+ atomic_t gc_entries;
+ struct list_head gc_list;
rwlock_t lock;
unsigned long last_rand;
struct neigh_statistics __percpu *stats;
@@ -250,6 +255,7 @@ static inline void *neighbour_priv(const struct neighbour *n)
#define NEIGH_UPDATE_F_ISROUTER 0x40000000
#define NEIGH_UPDATE_F_ADMIN 0x80000000
+extern const struct nla_policy nda_policy[];
static inline bool neigh_key_eq16(const struct neighbour *n, const void *pkey)
{
@@ -454,6 +460,7 @@ static inline int neigh_hh_bridge(struct hh_cache *hh, struct sk_buff *skb)
static inline int neigh_hh_output(const struct hh_cache *hh, struct sk_buff *skb)
{
+ unsigned int hh_alen = 0;
unsigned int seq;
unsigned int hh_len;
@@ -461,16 +468,33 @@ static inline int neigh_hh_output(const struct hh_cache *hh, struct sk_buff *skb
seq = read_seqbegin(&hh->hh_lock);
hh_len = hh->hh_len;
if (likely(hh_len <= HH_DATA_MOD)) {
- /* this is inlined by gcc */
- memcpy(skb->data - HH_DATA_MOD, hh->hh_data, HH_DATA_MOD);
+ hh_alen = HH_DATA_MOD;
+
+ /* skb_push() would proceed silently if we have room for
+ * the unaligned size but not for the aligned size:
+ * check headroom explicitly.
+ */
+ if (likely(skb_headroom(skb) >= HH_DATA_MOD)) {
+ /* this is inlined by gcc */
+ memcpy(skb->data - HH_DATA_MOD, hh->hh_data,
+ HH_DATA_MOD);
+ }
} else {
- unsigned int hh_alen = HH_DATA_ALIGN(hh_len);
+ hh_alen = HH_DATA_ALIGN(hh_len);
- memcpy(skb->data - hh_alen, hh->hh_data, hh_alen);
+ if (likely(skb_headroom(skb) >= hh_alen)) {
+ memcpy(skb->data - hh_alen, hh->hh_data,
+ hh_alen);
+ }
}
} while (read_seqretry(&hh->hh_lock, seq));
- skb_push(skb, hh_len);
+ if (WARN_ON_ONCE(skb_headroom(skb) < hh_alen)) {
+ kfree_skb(skb);
+ return NET_XMIT_DROP;
+ }
+
+ __skb_push(skb, hh_len);
return dev_queue_xmit(skb);
}
@@ -528,24 +552,6 @@ static inline void neigh_ha_snapshot(char *dst, const struct neighbour *n,
} while (read_seqretry(&n->ha_lock, seq));
}
-static inline void neigh_update_ext_learned(struct neighbour *neigh, u32 flags,
- int *notify)
-{
- u8 ndm_flags = 0;
-
- if (!(flags & NEIGH_UPDATE_F_ADMIN))
- return;
-
- ndm_flags |= (flags & NEIGH_UPDATE_F_EXT_LEARNED) ? NTF_EXT_LEARNED : 0;
- if ((neigh->flags ^ ndm_flags) & NTF_EXT_LEARNED) {
- if (ndm_flags & NTF_EXT_LEARNED)
- neigh->flags |= NTF_EXT_LEARNED;
- else
- neigh->flags &= ~NTF_EXT_LEARNED;
- *notify = 1;
- }
-}
-
static inline void neigh_update_is_router(struct neighbour *neigh, u32 flags,
int *notify)
{
diff --git a/include/net/netfilter/br_netfilter.h b/include/net/netfilter/br_netfilter.h
index 74af19c3a8f7..4cd56808ac4e 100644
--- a/include/net/netfilter/br_netfilter.h
+++ b/include/net/netfilter/br_netfilter.h
@@ -6,12 +6,12 @@
static inline struct nf_bridge_info *nf_bridge_alloc(struct sk_buff *skb)
{
- skb->nf_bridge = kzalloc(sizeof(struct nf_bridge_info), GFP_ATOMIC);
+ struct nf_bridge_info *b = skb_ext_add(skb, SKB_EXT_BRIDGE_NF);
- if (likely(skb->nf_bridge))
- refcount_set(&(skb->nf_bridge->use), 1);
+ if (b)
+ memset(b, 0, sizeof(*b));
- return skb->nf_bridge;
+ return b;
}
void nf_bridge_update_protocol(struct sk_buff *skb);
@@ -22,12 +22,6 @@ int br_nf_hook_thresh(unsigned int hook, struct net *net, struct sock *sk,
int (*okfn)(struct net *, struct sock *,
struct sk_buff *));
-static inline struct nf_bridge_info *
-nf_bridge_info_get(const struct sk_buff *skb)
-{
- return skb->nf_bridge;
-}
-
unsigned int nf_bridge_encap_header_len(const struct sk_buff *skb);
static inline void nf_bridge_push_encap_header(struct sk_buff *skb)
diff --git a/include/net/netfilter/ipv4/nf_nat_masquerade.h b/include/net/netfilter/ipv4/nf_nat_masquerade.h
index cd24be4c4a99..13d55206bb9f 100644
--- a/include/net/netfilter/ipv4/nf_nat_masquerade.h
+++ b/include/net/netfilter/ipv4/nf_nat_masquerade.h
@@ -9,7 +9,7 @@ nf_nat_masquerade_ipv4(struct sk_buff *skb, unsigned int hooknum,
const struct nf_nat_range2 *range,
const struct net_device *out);
-void nf_nat_masquerade_ipv4_register_notifier(void);
+int nf_nat_masquerade_ipv4_register_notifier(void);
void nf_nat_masquerade_ipv4_unregister_notifier(void);
#endif /*_NF_NAT_MASQUERADE_IPV4_H_ */
diff --git a/include/net/netfilter/ipv6/nf_nat_masquerade.h b/include/net/netfilter/ipv6/nf_nat_masquerade.h
index 0c3b5ebf0bb8..2917bf95c437 100644
--- a/include/net/netfilter/ipv6/nf_nat_masquerade.h
+++ b/include/net/netfilter/ipv6/nf_nat_masquerade.h
@@ -5,7 +5,7 @@
unsigned int
nf_nat_masquerade_ipv6(struct sk_buff *skb, const struct nf_nat_range2 *range,
const struct net_device *out);
-void nf_nat_masquerade_ipv6_register_notifier(void);
+int nf_nat_masquerade_ipv6_register_notifier(void);
void nf_nat_masquerade_ipv6_unregister_notifier(void);
#endif /* _NF_NAT_MASQUERADE_IPV6_H_ */
diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h
index 7e012312cd61..249d0a5b12b8 100644
--- a/include/net/netfilter/nf_conntrack.h
+++ b/include/net/netfilter/nf_conntrack.h
@@ -27,12 +27,17 @@
#include <net/netfilter/nf_conntrack_tuple.h>
+struct nf_ct_udp {
+ unsigned long stream_ts;
+};
+
/* per conntrack: protocol private data */
union nf_conntrack_proto {
/* insert conntrack proto private data here */
struct nf_ct_dccp dccp;
struct ip_ct_sctp sctp;
struct ip_ct_tcp tcp;
+ struct nf_ct_udp udp;
struct nf_ct_gre gre;
unsigned int tmpl_padto;
};
diff --git a/include/net/netfilter/nf_conntrack_acct.h b/include/net/netfilter/nf_conntrack_acct.h
index 79d8d16732b4..bc6745d3010e 100644
--- a/include/net/netfilter/nf_conntrack_acct.h
+++ b/include/net/netfilter/nf_conntrack_acct.h
@@ -46,9 +46,6 @@ struct nf_conn_acct *nf_ct_acct_ext_add(struct nf_conn *ct, gfp_t gfp)
return acct;
};
-unsigned int seq_print_acct(struct seq_file *s, const struct nf_conn *ct,
- int dir);
-
/* Check if connection tracking accounting is enabled */
static inline bool nf_ct_acct_enabled(struct net *net)
{
@@ -61,8 +58,7 @@ static inline void nf_ct_set_acct(struct net *net, bool enable)
net->ct.sysctl_acct = enable;
}
-int nf_conntrack_acct_pernet_init(struct net *net);
-void nf_conntrack_acct_pernet_fini(struct net *net);
+void nf_conntrack_acct_pernet_init(struct net *net);
int nf_conntrack_acct_init(void);
void nf_conntrack_acct_fini(void);
diff --git a/include/net/netfilter/nf_conntrack_ecache.h b/include/net/netfilter/nf_conntrack_ecache.h
index 3f1ce9a8776e..52b44192b43f 100644
--- a/include/net/netfilter/nf_conntrack_ecache.h
+++ b/include/net/netfilter/nf_conntrack_ecache.h
@@ -142,7 +142,7 @@ void nf_ct_expect_event_report(enum ip_conntrack_expect_events event,
struct nf_conntrack_expect *exp,
u32 portid, int report);
-int nf_conntrack_ecache_pernet_init(struct net *net);
+void nf_conntrack_ecache_pernet_init(struct net *net);
void nf_conntrack_ecache_pernet_fini(struct net *net);
int nf_conntrack_ecache_init(void);
@@ -182,10 +182,7 @@ static inline void nf_ct_expect_event_report(enum ip_conntrack_expect_events e,
u32 portid,
int report) {}
-static inline int nf_conntrack_ecache_pernet_init(struct net *net)
-{
- return 0;
-}
+static inline void nf_conntrack_ecache_pernet_init(struct net *net) {}
static inline void nf_conntrack_ecache_pernet_fini(struct net *net)
{
diff --git a/include/net/netfilter/nf_conntrack_helper.h b/include/net/netfilter/nf_conntrack_helper.h
index 2492120b8097..ec52a8dc32fd 100644
--- a/include/net/netfilter/nf_conntrack_helper.h
+++ b/include/net/netfilter/nf_conntrack_helper.h
@@ -124,8 +124,7 @@ static inline void *nfct_help_data(const struct nf_conn *ct)
return (void *)help->data;
}
-int nf_conntrack_helper_pernet_init(struct net *net);
-void nf_conntrack_helper_pernet_fini(struct net *net);
+void nf_conntrack_helper_pernet_init(struct net *net);
int nf_conntrack_helper_init(void);
void nf_conntrack_helper_fini(void);
diff --git a/include/net/netfilter/nf_conntrack_l4proto.h b/include/net/netfilter/nf_conntrack_l4proto.h
index eed04af9b75e..ae7b86f587f2 100644
--- a/include/net/netfilter/nf_conntrack_l4proto.h
+++ b/include/net/netfilter/nf_conntrack_l4proto.h
@@ -153,4 +153,43 @@ void nf_ct_l4proto_log_invalid(const struct sk_buff *skb,
const char *fmt, ...) { }
#endif /* CONFIG_SYSCTL */
+static inline struct nf_generic_net *nf_generic_pernet(struct net *net)
+{
+ return &net->ct.nf_ct_proto.generic;
+}
+
+static inline struct nf_tcp_net *nf_tcp_pernet(struct net *net)
+{
+ return &net->ct.nf_ct_proto.tcp;
+}
+
+static inline struct nf_udp_net *nf_udp_pernet(struct net *net)
+{
+ return &net->ct.nf_ct_proto.udp;
+}
+
+static inline struct nf_icmp_net *nf_icmp_pernet(struct net *net)
+{
+ return &net->ct.nf_ct_proto.icmp;
+}
+
+static inline struct nf_icmp_net *nf_icmpv6_pernet(struct net *net)
+{
+ return &net->ct.nf_ct_proto.icmpv6;
+}
+
+#ifdef CONFIG_NF_CT_PROTO_DCCP
+static inline struct nf_dccp_net *nf_dccp_pernet(struct net *net)
+{
+ return &net->ct.nf_ct_proto.dccp;
+}
+#endif
+
+#ifdef CONFIG_NF_CT_PROTO_SCTP
+static inline struct nf_sctp_net *nf_sctp_pernet(struct net *net)
+{
+ return &net->ct.nf_ct_proto.sctp;
+}
+#endif
+
#endif /*_NF_CONNTRACK_PROTOCOL_H*/
diff --git a/include/net/netfilter/nf_conntrack_timestamp.h b/include/net/netfilter/nf_conntrack_timestamp.h
index 3b661986be8f..0ed617bf0a3d 100644
--- a/include/net/netfilter/nf_conntrack_timestamp.h
+++ b/include/net/netfilter/nf_conntrack_timestamp.h
@@ -49,21 +49,12 @@ static inline void nf_ct_set_tstamp(struct net *net, bool enable)
}
#ifdef CONFIG_NF_CONNTRACK_TIMESTAMP
-int nf_conntrack_tstamp_pernet_init(struct net *net);
-void nf_conntrack_tstamp_pernet_fini(struct net *net);
+void nf_conntrack_tstamp_pernet_init(struct net *net);
int nf_conntrack_tstamp_init(void);
void nf_conntrack_tstamp_fini(void);
#else
-static inline int nf_conntrack_tstamp_pernet_init(struct net *net)
-{
- return 0;
-}
-
-static inline void nf_conntrack_tstamp_pernet_fini(struct net *net)
-{
- return;
-}
+static inline void nf_conntrack_tstamp_pernet_init(struct net *net) {}
static inline int nf_conntrack_tstamp_init(void)
{
diff --git a/include/net/netfilter/nf_flow_table.h b/include/net/netfilter/nf_flow_table.h
index 77e2761d4f2f..7d5cda7ce32a 100644
--- a/include/net/netfilter/nf_flow_table.h
+++ b/include/net/netfilter/nf_flow_table.h
@@ -95,10 +95,6 @@ void flow_offload_free(struct flow_offload *flow);
int flow_offload_add(struct nf_flowtable *flow_table, struct flow_offload *flow);
struct flow_offload_tuple_rhash *flow_offload_lookup(struct nf_flowtable *flow_table,
struct flow_offload_tuple *tuple);
-int nf_flow_table_iterate(struct nf_flowtable *flow_table,
- void (*iter)(struct flow_offload *flow, void *data),
- void *data);
-
void nf_flow_table_cleanup(struct net_device *dev);
int nf_flow_table_init(struct nf_flowtable *flow_table);
diff --git a/include/net/netfilter/nf_nat_l3proto.h b/include/net/netfilter/nf_nat_l3proto.h
index d300b8f03972..d774ca0c4c5e 100644
--- a/include/net/netfilter/nf_nat_l3proto.h
+++ b/include/net/netfilter/nf_nat_l3proto.h
@@ -2,18 +2,11 @@
#ifndef _NF_NAT_L3PROTO_H
#define _NF_NAT_L3PROTO_H
-struct nf_nat_l4proto;
struct nf_nat_l3proto {
u8 l3proto;
- bool (*in_range)(const struct nf_conntrack_tuple *t,
- const struct nf_nat_range2 *range);
-
- u32 (*secure_port)(const struct nf_conntrack_tuple *t, __be16);
-
bool (*manip_pkt)(struct sk_buff *skb,
unsigned int iphdroff,
- const struct nf_nat_l4proto *l4proto,
const struct nf_conntrack_tuple *target,
enum nf_nat_manip_type maniptype);
diff --git a/include/net/netfilter/nf_nat_l4proto.h b/include/net/netfilter/nf_nat_l4proto.h
index b4d6b29bca62..95a4655bd1ad 100644
--- a/include/net/netfilter/nf_nat_l4proto.h
+++ b/include/net/netfilter/nf_nat_l4proto.h
@@ -5,78 +5,12 @@
#include <net/netfilter/nf_nat.h>
#include <linux/netfilter/nfnetlink_conntrack.h>
-struct nf_nat_range;
struct nf_nat_l3proto;
-struct nf_nat_l4proto {
- /* Protocol number. */
- u8 l4proto;
-
- /* Translate a packet to the target according to manip type.
- * Return true if succeeded.
- */
- bool (*manip_pkt)(struct sk_buff *skb,
- const struct nf_nat_l3proto *l3proto,
- unsigned int iphdroff, unsigned int hdroff,
- const struct nf_conntrack_tuple *tuple,
- enum nf_nat_manip_type maniptype);
-
- /* Is the manipable part of the tuple between min and max incl? */
- bool (*in_range)(const struct nf_conntrack_tuple *tuple,
- enum nf_nat_manip_type maniptype,
- const union nf_conntrack_man_proto *min,
- const union nf_conntrack_man_proto *max);
-
- /* Alter the per-proto part of the tuple (depending on
- * maniptype), to give a unique tuple in the given range if
- * possible. Per-protocol part of tuple is initialized to the
- * incoming packet.
- */
- void (*unique_tuple)(const struct nf_nat_l3proto *l3proto,
- struct nf_conntrack_tuple *tuple,
- const struct nf_nat_range2 *range,
- enum nf_nat_manip_type maniptype,
- const struct nf_conn *ct);
-
- int (*nlattr_to_range)(struct nlattr *tb[],
- struct nf_nat_range2 *range);
-};
-
-/* Protocol registration. */
-int nf_nat_l4proto_register(u8 l3proto, const struct nf_nat_l4proto *l4proto);
-void nf_nat_l4proto_unregister(u8 l3proto,
- const struct nf_nat_l4proto *l4proto);
-
-const struct nf_nat_l4proto *__nf_nat_l4proto_find(u8 l3proto, u8 l4proto);
-
-/* Built-in protocols. */
-extern const struct nf_nat_l4proto nf_nat_l4proto_tcp;
-extern const struct nf_nat_l4proto nf_nat_l4proto_udp;
-extern const struct nf_nat_l4proto nf_nat_l4proto_icmp;
-extern const struct nf_nat_l4proto nf_nat_l4proto_icmpv6;
-extern const struct nf_nat_l4proto nf_nat_l4proto_unknown;
-#ifdef CONFIG_NF_NAT_PROTO_DCCP
-extern const struct nf_nat_l4proto nf_nat_l4proto_dccp;
-#endif
-#ifdef CONFIG_NF_NAT_PROTO_SCTP
-extern const struct nf_nat_l4proto nf_nat_l4proto_sctp;
-#endif
-#ifdef CONFIG_NF_NAT_PROTO_UDPLITE
-extern const struct nf_nat_l4proto nf_nat_l4proto_udplite;
-#endif
-
-bool nf_nat_l4proto_in_range(const struct nf_conntrack_tuple *tuple,
- enum nf_nat_manip_type maniptype,
- const union nf_conntrack_man_proto *min,
- const union nf_conntrack_man_proto *max);
-
-void nf_nat_l4proto_unique_tuple(const struct nf_nat_l3proto *l3proto,
- struct nf_conntrack_tuple *tuple,
- const struct nf_nat_range2 *range,
- enum nf_nat_manip_type maniptype,
- const struct nf_conn *ct, u16 *rover);
-
-int nf_nat_l4proto_nlattr_to_range(struct nlattr *tb[],
- struct nf_nat_range2 *range);
-
+/* Translate a packet to the target according to manip type. Return on success. */
+bool nf_nat_l4proto_manip_pkt(struct sk_buff *skb,
+ const struct nf_nat_l3proto *l3proto,
+ unsigned int iphdroff, unsigned int hdroff,
+ const struct nf_conntrack_tuple *tuple,
+ enum nf_nat_manip_type maniptype);
#endif /*_NF_NAT_L4PROTO_H*/
diff --git a/include/net/netns/conntrack.h b/include/net/netns/conntrack.h
index 9795d628a127..51cba0b8adf5 100644
--- a/include/net/netns/conntrack.h
+++ b/include/net/netns/conntrack.h
@@ -97,18 +97,14 @@ struct netns_ct {
struct delayed_work ecache_dwork;
bool ecache_dwork_pending;
#endif
+ bool auto_assign_helper_warned;
#ifdef CONFIG_SYSCTL
struct ctl_table_header *sysctl_header;
- struct ctl_table_header *acct_sysctl_header;
- struct ctl_table_header *tstamp_sysctl_header;
- struct ctl_table_header *event_sysctl_header;
- struct ctl_table_header *helper_sysctl_header;
#endif
unsigned int sysctl_log_invalid; /* Log invalid packets */
int sysctl_events;
int sysctl_acct;
int sysctl_auto_assign_helper;
- bool auto_assign_helper_warned;
int sysctl_tstamp;
int sysctl_checksum;
diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
index e47503b4e4d1..104a6669e344 100644
--- a/include/net/netns/ipv4.h
+++ b/include/net/netns/ipv4.h
@@ -103,6 +103,9 @@ struct netns_ipv4 {
/* Shall we try to damage output packets if routing dev changes? */
int sysctl_ip_dynaddr;
int sysctl_ip_early_demux;
+#ifdef CONFIG_NET_L3_MASTER_DEV
+ int sysctl_raw_l3mdev_accept;
+#endif
int sysctl_tcp_early_demux;
int sysctl_udp_early_demux;
diff --git a/include/net/netns/xfrm.h b/include/net/netns/xfrm.h
index 9991e5ef52cc..59f45b1e9dac 100644
--- a/include/net/netns/xfrm.h
+++ b/include/net/netns/xfrm.h
@@ -5,6 +5,7 @@
#include <linux/list.h>
#include <linux/wait.h>
#include <linux/workqueue.h>
+#include <linux/rhashtable-types.h>
#include <linux/xfrm.h>
#include <net/dst_ops.h>
@@ -53,6 +54,7 @@ struct netns_xfrm {
unsigned int policy_count[XFRM_POLICY_MAX * 2];
struct work_struct policy_hash_work;
struct xfrm_policy_hthresh policy_hthresh;
+ struct list_head inexact_bins;
struct sock *nlsk;
diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h
index 72ffb3120ced..40965fbbcd31 100644
--- a/include/net/pkt_cls.h
+++ b/include/net/pkt_cls.h
@@ -81,6 +81,14 @@ void __tcf_block_cb_unregister(struct tcf_block *block,
struct tcf_block_cb *block_cb);
void tcf_block_cb_unregister(struct tcf_block *block,
tc_setup_cb_t *cb, void *cb_ident);
+int __tc_indr_block_cb_register(struct net_device *dev, void *cb_priv,
+ tc_indr_block_bind_cb_t *cb, void *cb_ident);
+int tc_indr_block_cb_register(struct net_device *dev, void *cb_priv,
+ tc_indr_block_bind_cb_t *cb, void *cb_ident);
+void __tc_indr_block_cb_unregister(struct net_device *dev,
+ tc_indr_block_bind_cb_t *cb, void *cb_ident);
+void tc_indr_block_cb_unregister(struct net_device *dev,
+ tc_indr_block_bind_cb_t *cb, void *cb_ident);
int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
struct tcf_result *res, bool compat_mode);
@@ -183,6 +191,32 @@ void tcf_block_cb_unregister(struct tcf_block *block,
{
}
+static inline
+int __tc_indr_block_cb_register(struct net_device *dev, void *cb_priv,
+ tc_indr_block_bind_cb_t *cb, void *cb_ident)
+{
+ return 0;
+}
+
+static inline
+int tc_indr_block_cb_register(struct net_device *dev, void *cb_priv,
+ tc_indr_block_bind_cb_t *cb, void *cb_ident)
+{
+ return 0;
+}
+
+static inline
+void __tc_indr_block_cb_unregister(struct net_device *dev,
+ tc_indr_block_bind_cb_t *cb, void *cb_ident)
+{
+}
+
+static inline
+void tc_indr_block_cb_unregister(struct net_device *dev,
+ tc_indr_block_bind_cb_t *cb, void *cb_ident)
+{
+}
+
static inline int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
struct tcf_result *res, bool compat_mode)
{
@@ -585,8 +619,8 @@ tcf_match_indev(struct sk_buff *skb, int ifindex)
}
#endif /* CONFIG_NET_CLS_IND */
-int tc_setup_cb_call(struct tcf_block *block, struct tcf_exts *exts,
- enum tc_setup_type type, void *type_data, bool err_stop);
+int tc_setup_cb_call(struct tcf_block *block, enum tc_setup_type type,
+ void *type_data, bool err_stop);
enum tc_block_command {
TC_BLOCK_BIND,
@@ -609,6 +643,7 @@ struct tc_cls_common_offload {
struct tc_cls_u32_knode {
struct tcf_exts *exts;
+ struct tcf_result *res;
struct tc_u32_sel *sel;
u32 handle;
u32 val;
@@ -787,12 +822,21 @@ enum tc_mq_command {
TC_MQ_CREATE,
TC_MQ_DESTROY,
TC_MQ_STATS,
+ TC_MQ_GRAFT,
+};
+
+struct tc_mq_opt_offload_graft_params {
+ unsigned long queue;
+ u32 child_handle;
};
struct tc_mq_qopt_offload {
enum tc_mq_command command;
u32 handle;
- struct tc_qopt_offload_stats stats;
+ union {
+ struct tc_qopt_offload_stats stats;
+ struct tc_mq_opt_offload_graft_params graft_params;
+ };
};
enum tc_red_command {
@@ -800,13 +844,16 @@ enum tc_red_command {
TC_RED_DESTROY,
TC_RED_STATS,
TC_RED_XSTATS,
+ TC_RED_GRAFT,
};
struct tc_red_qopt_offload_params {
u32 min;
u32 max;
u32 probability;
+ u32 limit;
bool is_ecn;
+ bool is_harddrop;
struct gnet_stats_queue *qstats;
};
@@ -818,6 +865,51 @@ struct tc_red_qopt_offload {
struct tc_red_qopt_offload_params set;
struct tc_qopt_offload_stats stats;
struct red_stats *xstats;
+ u32 child_handle;
+ };
+};
+
+enum tc_gred_command {
+ TC_GRED_REPLACE,
+ TC_GRED_DESTROY,
+ TC_GRED_STATS,
+};
+
+struct tc_gred_vq_qopt_offload_params {
+ bool present;
+ u32 limit;
+ u32 prio;
+ u32 min;
+ u32 max;
+ bool is_ecn;
+ bool is_harddrop;
+ u32 probability;
+ /* Only need backlog, see struct tc_prio_qopt_offload_params */
+ u32 *backlog;
+};
+
+struct tc_gred_qopt_offload_params {
+ bool grio_on;
+ bool wred_on;
+ unsigned int dp_cnt;
+ unsigned int dp_def;
+ struct gnet_stats_queue *qstats;
+ struct tc_gred_vq_qopt_offload_params tab[MAX_DPs];
+};
+
+struct tc_gred_qopt_offload_stats {
+ struct gnet_stats_basic_packed bstats[MAX_DPs];
+ struct gnet_stats_queue qstats[MAX_DPs];
+ struct red_stats *xstats[MAX_DPs];
+};
+
+struct tc_gred_qopt_offload {
+ enum tc_gred_command command;
+ u32 handle;
+ u32 parent;
+ union {
+ struct tc_gred_qopt_offload_params set;
+ struct tc_gred_qopt_offload_stats stats;
};
};
@@ -854,4 +946,14 @@ struct tc_prio_qopt_offload {
};
};
+enum tc_root_command {
+ TC_ROOT_GRAFT,
+};
+
+struct tc_root_qopt_offload {
+ enum tc_root_command command;
+ u32 handle;
+ bool ingress;
+};
+
#endif
diff --git a/include/net/protocol.h b/include/net/protocol.h
index 4fc75f7ae23b..92b3eaad6088 100644
--- a/include/net/protocol.h
+++ b/include/net/protocol.h
@@ -42,7 +42,10 @@ struct net_protocol {
int (*early_demux)(struct sk_buff *skb);
int (*early_demux_handler)(struct sk_buff *skb);
int (*handler)(struct sk_buff *skb);
- void (*err_handler)(struct sk_buff *skb, u32 info);
+
+ /* This returns an error if we weren't able to handle the error. */
+ int (*err_handler)(struct sk_buff *skb, u32 info);
+
unsigned int no_policy:1,
netns_ok:1,
/* does the protocol do more stringent
@@ -58,10 +61,12 @@ struct inet6_protocol {
void (*early_demux_handler)(struct sk_buff *skb);
int (*handler)(struct sk_buff *skb);
- void (*err_handler)(struct sk_buff *skb,
+ /* This returns an error if we weren't able to handle the error. */
+ int (*err_handler)(struct sk_buff *skb,
struct inet6_skb_parm *opt,
u8 type, u8 code, int offset,
__be32 info);
+
unsigned int flags; /* INET6_PROTO_xxx */
};
diff --git a/include/net/raw.h b/include/net/raw.h
index 9c9fa98a91a4..821ff4887f77 100644
--- a/include/net/raw.h
+++ b/include/net/raw.h
@@ -17,7 +17,7 @@
#ifndef _RAW_H
#define _RAW_H
-
+#include <net/inet_sock.h>
#include <net/protocol.h>
#include <linux/icmp.h>
@@ -61,6 +61,7 @@ void raw_seq_stop(struct seq_file *seq, void *v);
int raw_hash_sk(struct sock *sk);
void raw_unhash_sk(struct sock *sk);
+void raw_init(void);
struct raw_sock {
/* inet_sock has to be the first member */
@@ -74,4 +75,15 @@ static inline struct raw_sock *raw_sk(const struct sock *sk)
return (struct raw_sock *)sk;
}
+static inline bool raw_sk_bound_dev_eq(struct net *net, int bound_dev_if,
+ int dif, int sdif)
+{
+#if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV)
+ return inet_bound_dev_eq(!!net->ipv4.sysctl_raw_l3mdev_accept,
+ bound_dev_if, dif, sdif);
+#else
+ return inet_bound_dev_eq(true, bound_dev_if, dif, sdif);
+#endif
+}
+
#endif /* _RAW_H */
diff --git a/include/net/rtnetlink.h b/include/net/rtnetlink.h
index cf26e5aacac4..e2091bb2b3a8 100644
--- a/include/net/rtnetlink.h
+++ b/include/net/rtnetlink.h
@@ -159,7 +159,8 @@ struct net *rtnl_link_get_net(struct net *src_net, struct nlattr *tb[]);
struct net_device *rtnl_create_link(struct net *net, const char *ifname,
unsigned char name_assign_type,
const struct rtnl_link_ops *ops,
- struct nlattr *tb[]);
+ struct nlattr *tb[],
+ struct netlink_ext_ack *extack);
int rtnl_delete_link(struct net_device *dev);
int rtnl_configure_link(struct net_device *dev, const struct ifinfomsg *ifm);
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index 4d736427a4cb..9481f2c142e2 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -24,6 +24,9 @@ struct bpf_flow_keys;
typedef int tc_setup_cb_t(enum tc_setup_type type,
void *type_data, void *cb_priv);
+typedef int tc_indr_block_bind_cb_t(struct net_device *dev, void *cb_priv,
+ enum tc_setup_type type, void *type_data);
+
struct qdisc_rate_table {
struct tc_ratespec rate;
u32 data[256];
@@ -579,6 +582,30 @@ void qdisc_put(struct Qdisc *qdisc);
void qdisc_put_unlocked(struct Qdisc *qdisc);
void qdisc_tree_reduce_backlog(struct Qdisc *qdisc, unsigned int n,
unsigned int len);
+#ifdef CONFIG_NET_SCHED
+int qdisc_offload_dump_helper(struct Qdisc *q, enum tc_setup_type type,
+ void *type_data);
+void qdisc_offload_graft_helper(struct net_device *dev, struct Qdisc *sch,
+ struct Qdisc *new, struct Qdisc *old,
+ enum tc_setup_type type, void *type_data,
+ struct netlink_ext_ack *extack);
+#else
+static inline int
+qdisc_offload_dump_helper(struct Qdisc *q, enum tc_setup_type type,
+ void *type_data)
+{
+ q->flags &= ~TCQ_F_OFFLOADED;
+ return 0;
+}
+
+static inline void
+qdisc_offload_graft_helper(struct net_device *dev, struct Qdisc *sch,
+ struct Qdisc *new, struct Qdisc *old,
+ enum tc_setup_type type, void *type_data,
+ struct netlink_ext_ack *extack)
+{
+}
+#endif
struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
const struct Qdisc_ops *ops,
struct netlink_ext_ack *extack);
diff --git a/include/net/sctp/constants.h b/include/net/sctp/constants.h
index 8dadc74c22e7..4588bdc2b8f0 100644
--- a/include/net/sctp/constants.h
+++ b/include/net/sctp/constants.h
@@ -71,7 +71,7 @@ enum { SCTP_DEFAULT_INSTREAMS = SCTP_MAX_STREAM };
SCTP_NUM_AUTH_CHUNK_TYPES)
/* These are the different flavours of event. */
-enum sctp_event {
+enum sctp_event_type {
SCTP_EVENT_T_CHUNK = 1,
SCTP_EVENT_T_TIMEOUT,
SCTP_EVENT_T_OTHER,
diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
index 8c2caa370e0f..1d13ec3f2707 100644
--- a/include/net/sctp/sctp.h
+++ b/include/net/sctp/sctp.h
@@ -151,8 +151,8 @@ int sctp_primitive_RECONF(struct net *net, struct sctp_association *asoc,
* sctp/input.c
*/
int sctp_rcv(struct sk_buff *skb);
-void sctp_v4_err(struct sk_buff *skb, u32 info);
-void sctp_hash_endpoint(struct sctp_endpoint *);
+int sctp_v4_err(struct sk_buff *skb, u32 info);
+int sctp_hash_endpoint(struct sctp_endpoint *ep);
void sctp_unhash_endpoint(struct sctp_endpoint *);
struct sock *sctp_err_lookup(struct net *net, int family, struct sk_buff *,
struct sctphdr *, struct sctp_association **,
@@ -608,4 +608,21 @@ static inline __u32 sctp_dst_mtu(const struct dst_entry *dst)
SCTP_DEFAULT_MINSEGMENT));
}
+static inline bool sctp_transport_pmtu_check(struct sctp_transport *t)
+{
+ __u32 pmtu = sctp_dst_mtu(t->dst);
+
+ if (t->pathmtu == pmtu)
+ return true;
+
+ t->pathmtu = pmtu;
+
+ return false;
+}
+
+static inline __u32 sctp_min_frag_point(struct sctp_sock *sp, __u16 datasize)
+{
+ return sctp_mtu_payload(sp, SCTP_DEFAULT_MINSEGMENT, datasize);
+}
+
#endif /* __net_sctp_h__ */
diff --git a/include/net/sctp/sm.h b/include/net/sctp/sm.h
index 9e3d32746430..24825a81829e 100644
--- a/include/net/sctp/sm.h
+++ b/include/net/sctp/sm.h
@@ -173,7 +173,7 @@ sctp_state_fn_t sctp_sf_autoclose_timer_expire;
__u8 sctp_get_chunk_type(struct sctp_chunk *chunk);
const struct sctp_sm_table_entry *sctp_sm_lookup_event(
struct net *net,
- enum sctp_event event_type,
+ enum sctp_event_type event_type,
enum sctp_state state,
union sctp_subtype event_subtype);
int sctp_chunk_iif(const struct sctp_chunk *);
@@ -313,7 +313,7 @@ struct sctp_chunk *sctp_process_strreset_resp(
/* Prototypes for statetable processing. */
-int sctp_do_sm(struct net *net, enum sctp_event event_type,
+int sctp_do_sm(struct net *net, enum sctp_event_type event_type,
union sctp_subtype subtype, enum sctp_state state,
struct sctp_endpoint *ep, struct sctp_association *asoc,
void *event_arg, gfp_t gfp);
diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
index a11f93790476..003020eb6e66 100644
--- a/include/net/sctp/structs.h
+++ b/include/net/sctp/structs.h
@@ -96,7 +96,9 @@ struct sctp_stream;
struct sctp_bind_bucket {
unsigned short port;
- unsigned short fastreuse;
+ signed char fastreuse;
+ signed char fastreuseport;
+ kuid_t fastuid;
struct hlist_node node;
struct hlist_head owner;
struct net *net;
@@ -215,7 +217,7 @@ struct sctp_sock {
* These two structures must be grouped together for the usercopy
* whitelist region.
*/
- struct sctp_event_subscribe subscribe;
+ __u16 subscribe;
struct sctp_initmsg initmsg;
int user_frag;
@@ -1190,6 +1192,8 @@ int sctp_bind_addr_conflict(struct sctp_bind_addr *, const union sctp_addr *,
struct sctp_sock *, struct sctp_sock *);
int sctp_bind_addr_state(const struct sctp_bind_addr *bp,
const union sctp_addr *addr);
+int sctp_bind_addrs_check(struct sctp_sock *sp,
+ struct sctp_sock *sp2, int cnt2);
union sctp_addr *sctp_find_unmatch_addr(struct sctp_bind_addr *bp,
const union sctp_addr *addrs,
int addrcnt,
@@ -2073,8 +2077,12 @@ struct sctp_association {
int sent_cnt_removable;
+ __u16 subscribe;
+
__u64 abandoned_unsent[SCTP_PR_INDEX(MAX) + 1];
__u64 abandoned_sent[SCTP_PR_INDEX(MAX) + 1];
+
+ struct rcu_head rcu;
};
diff --git a/include/net/sctp/ulpevent.h b/include/net/sctp/ulpevent.h
index 51b4e0626c34..bd922a0fe914 100644
--- a/include/net/sctp/ulpevent.h
+++ b/include/net/sctp/ulpevent.h
@@ -164,30 +164,39 @@ void sctp_ulpevent_read_nxtinfo(const struct sctp_ulpevent *event,
__u16 sctp_ulpevent_get_notification_type(const struct sctp_ulpevent *event);
+static inline void sctp_ulpevent_type_set(__u16 *subscribe,
+ __u16 sn_type, __u8 on)
+{
+ if (sn_type > SCTP_SN_TYPE_MAX)
+ return;
+
+ if (on)
+ *subscribe |= (1 << (sn_type - SCTP_SN_TYPE_BASE));
+ else
+ *subscribe &= ~(1 << (sn_type - SCTP_SN_TYPE_BASE));
+}
+
/* Is this event type enabled? */
-static inline int sctp_ulpevent_type_enabled(__u16 sn_type,
- struct sctp_event_subscribe *mask)
+static inline bool sctp_ulpevent_type_enabled(__u16 subscribe, __u16 sn_type)
{
- int offset = sn_type - SCTP_SN_TYPE_BASE;
- char *amask = (char *) mask;
+ if (sn_type > SCTP_SN_TYPE_MAX)
+ return false;
- if (offset >= sizeof(struct sctp_event_subscribe))
- return 0;
- return amask[offset];
+ return subscribe & (1 << (sn_type - SCTP_SN_TYPE_BASE));
}
/* Given an event subscription, is this event enabled? */
-static inline int sctp_ulpevent_is_enabled(const struct sctp_ulpevent *event,
- struct sctp_event_subscribe *mask)
+static inline bool sctp_ulpevent_is_enabled(const struct sctp_ulpevent *event,
+ __u16 subscribe)
{
__u16 sn_type;
- int enabled = 1;
- if (sctp_ulpevent_is_notification(event)) {
- sn_type = sctp_ulpevent_get_notification_type(event);
- enabled = sctp_ulpevent_type_enabled(sn_type, mask);
- }
- return enabled;
+ if (!sctp_ulpevent_is_notification(event))
+ return true;
+
+ sn_type = sctp_ulpevent_get_notification_type(event);
+
+ return sctp_ulpevent_type_enabled(subscribe, sn_type);
}
#endif /* __sctp_ulpevent_h__ */
diff --git a/include/net/seg6.h b/include/net/seg6.h
index 2567941a2f32..8b2dc6869fd1 100644
--- a/include/net/seg6.h
+++ b/include/net/seg6.h
@@ -16,7 +16,6 @@
#include <linux/net.h>
#include <linux/ipv6.h>
-#include <net/lwtunnel.h>
#include <linux/seg6.h>
#include <linux/rhashtable-types.h>
diff --git a/include/net/sock.h b/include/net/sock.h
index f665d74ae509..a6235c286ef9 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -1110,7 +1110,7 @@ struct proto {
unsigned int inuse_idx;
#endif
- bool (*stream_memory_free)(const struct sock *sk);
+ bool (*stream_memory_free)(const struct sock *sk, int wake);
bool (*stream_memory_read)(const struct sock *sk);
/* Memory pressure */
void (*enter_memory_pressure)(struct sock *sk);
@@ -1192,19 +1192,29 @@ static inline void sk_refcnt_debug_release(const struct sock *sk)
#define sk_refcnt_debug_release(sk) do { } while (0)
#endif /* SOCK_REFCNT_DEBUG */
-static inline bool sk_stream_memory_free(const struct sock *sk)
+static inline bool __sk_stream_memory_free(const struct sock *sk, int wake)
{
if (sk->sk_wmem_queued >= sk->sk_sndbuf)
return false;
return sk->sk_prot->stream_memory_free ?
- sk->sk_prot->stream_memory_free(sk) : true;
+ sk->sk_prot->stream_memory_free(sk, wake) : true;
}
-static inline bool sk_stream_is_writeable(const struct sock *sk)
+static inline bool sk_stream_memory_free(const struct sock *sk)
+{
+ return __sk_stream_memory_free(sk, 0);
+}
+
+static inline bool __sk_stream_is_writeable(const struct sock *sk, int wake)
{
return sk_stream_wspace(sk) >= sk_stream_min_wspace(sk) &&
- sk_stream_memory_free(sk);
+ __sk_stream_memory_free(sk, wake);
+}
+
+static inline bool sk_stream_is_writeable(const struct sock *sk)
+{
+ return __sk_stream_is_writeable(sk, 0);
}
static inline int sk_under_cgroup_hierarchy(struct sock *sk,
@@ -2340,22 +2350,39 @@ static inline void sock_recv_ts_and_drops(struct msghdr *msg, struct sock *sk,
void __sock_tx_timestamp(__u16 tsflags, __u8 *tx_flags);
/**
- * sock_tx_timestamp - checks whether the outgoing packet is to be time stamped
+ * _sock_tx_timestamp - checks whether the outgoing packet is to be time stamped
* @sk: socket sending this packet
* @tsflags: timestamping flags to use
* @tx_flags: completed with instructions for time stamping
+ * @tskey: filled in with next sk_tskey (not for TCP, which uses seqno)
*
* Note: callers should take care of initial ``*tx_flags`` value (usually 0)
*/
-static inline void sock_tx_timestamp(const struct sock *sk, __u16 tsflags,
- __u8 *tx_flags)
+static inline void _sock_tx_timestamp(struct sock *sk, __u16 tsflags,
+ __u8 *tx_flags, __u32 *tskey)
{
- if (unlikely(tsflags))
+ if (unlikely(tsflags)) {
__sock_tx_timestamp(tsflags, tx_flags);
+ if (tsflags & SOF_TIMESTAMPING_OPT_ID && tskey &&
+ tsflags & SOF_TIMESTAMPING_TX_RECORD_MASK)
+ *tskey = sk->sk_tskey++;
+ }
if (unlikely(sock_flag(sk, SOCK_WIFI_STATUS)))
*tx_flags |= SKBTX_WIFI_STATUS;
}
+static inline void sock_tx_timestamp(struct sock *sk, __u16 tsflags,
+ __u8 *tx_flags)
+{
+ _sock_tx_timestamp(sk, tsflags, tx_flags, NULL);
+}
+
+static inline void skb_setup_tx_timestamp(struct sk_buff *skb, __u16 tsflags)
+{
+ _sock_tx_timestamp(skb->sk, tsflags, &skb_shinfo(skb)->tx_flags,
+ &skb_shinfo(skb)->tskey);
+}
+
/**
* sk_eat_skb - Release a skb if it is no longer needed
* @sk: socket to eat this skb from
diff --git a/include/net/switchdev.h b/include/net/switchdev.h
index 881ecb1555bf..a7fdab5ee6c3 100644
--- a/include/net/switchdev.h
+++ b/include/net/switchdev.h
@@ -95,8 +95,8 @@ struct switchdev_obj_port_vlan {
u16 vid_end;
};
-#define SWITCHDEV_OBJ_PORT_VLAN(obj) \
- container_of(obj, struct switchdev_obj_port_vlan, obj)
+#define SWITCHDEV_OBJ_PORT_VLAN(OBJ) \
+ container_of((OBJ), struct switchdev_obj_port_vlan, obj)
/* SWITCHDEV_OBJ_ID_PORT_MDB */
struct switchdev_obj_port_mdb {
@@ -105,8 +105,8 @@ struct switchdev_obj_port_mdb {
u16 vid;
};
-#define SWITCHDEV_OBJ_PORT_MDB(obj) \
- container_of(obj, struct switchdev_obj_port_mdb, obj)
+#define SWITCHDEV_OBJ_PORT_MDB(OBJ) \
+ container_of((OBJ), struct switchdev_obj_port_mdb, obj)
void switchdev_trans_item_enqueue(struct switchdev_trans *trans,
void *data, void (*destructor)(void const *),
@@ -121,10 +121,6 @@ typedef int switchdev_obj_dump_cb_t(struct switchdev_obj *obj);
* @switchdev_port_attr_get: Get a port attribute (see switchdev_attr).
*
* @switchdev_port_attr_set: Set a port attribute (see switchdev_attr).
- *
- * @switchdev_port_obj_add: Add an object to port (see switchdev_obj_*).
- *
- * @switchdev_port_obj_del: Delete an object from port (see switchdev_obj_*).
*/
struct switchdev_ops {
int (*switchdev_port_attr_get)(struct net_device *dev,
@@ -132,11 +128,6 @@ struct switchdev_ops {
int (*switchdev_port_attr_set)(struct net_device *dev,
const struct switchdev_attr *attr,
struct switchdev_trans *trans);
- int (*switchdev_port_obj_add)(struct net_device *dev,
- const struct switchdev_obj *obj,
- struct switchdev_trans *trans);
- int (*switchdev_port_obj_del)(struct net_device *dev,
- const struct switchdev_obj *obj);
};
enum switchdev_notifier_type {
@@ -146,6 +137,11 @@ enum switchdev_notifier_type {
SWITCHDEV_FDB_DEL_TO_DEVICE,
SWITCHDEV_FDB_OFFLOADED,
+ SWITCHDEV_PORT_OBJ_ADD, /* Blocking. */
+ SWITCHDEV_PORT_OBJ_DEL, /* Blocking. */
+
+ SWITCHDEV_VXLAN_FDB_ADD_TO_BRIDGE,
+ SWITCHDEV_VXLAN_FDB_DEL_TO_BRIDGE,
SWITCHDEV_VXLAN_FDB_ADD_TO_DEVICE,
SWITCHDEV_VXLAN_FDB_DEL_TO_DEVICE,
SWITCHDEV_VXLAN_FDB_OFFLOADED,
@@ -153,6 +149,7 @@ enum switchdev_notifier_type {
struct switchdev_notifier_info {
struct net_device *dev;
+ struct netlink_ext_ack *extack;
};
struct switchdev_notifier_fdb_info {
@@ -163,12 +160,25 @@ struct switchdev_notifier_fdb_info {
offloaded:1;
};
+struct switchdev_notifier_port_obj_info {
+ struct switchdev_notifier_info info; /* must be first */
+ const struct switchdev_obj *obj;
+ struct switchdev_trans *trans;
+ bool handled;
+};
+
static inline struct net_device *
switchdev_notifier_info_to_dev(const struct switchdev_notifier_info *info)
{
return info->dev;
}
+static inline struct netlink_ext_ack *
+switchdev_notifier_info_to_extack(const struct switchdev_notifier_info *info)
+{
+ return info->extack;
+}
+
#ifdef CONFIG_NET_SWITCHDEV
void switchdev_deferred_process(void);
@@ -177,13 +187,22 @@ int switchdev_port_attr_get(struct net_device *dev,
int switchdev_port_attr_set(struct net_device *dev,
const struct switchdev_attr *attr);
int switchdev_port_obj_add(struct net_device *dev,
- const struct switchdev_obj *obj);
+ const struct switchdev_obj *obj,
+ struct netlink_ext_ack *extack);
int switchdev_port_obj_del(struct net_device *dev,
const struct switchdev_obj *obj);
+
int register_switchdev_notifier(struct notifier_block *nb);
int unregister_switchdev_notifier(struct notifier_block *nb);
int call_switchdev_notifiers(unsigned long val, struct net_device *dev,
struct switchdev_notifier_info *info);
+
+int register_switchdev_blocking_notifier(struct notifier_block *nb);
+int unregister_switchdev_blocking_notifier(struct notifier_block *nb);
+int call_switchdev_blocking_notifiers(unsigned long val, struct net_device *dev,
+ struct switchdev_notifier_info *info,
+ struct netlink_ext_ack *extack);
+
void switchdev_port_fwd_mark_set(struct net_device *dev,
struct net_device *group_dev,
bool joining);
@@ -191,6 +210,19 @@ void switchdev_port_fwd_mark_set(struct net_device *dev,
bool switchdev_port_same_parent_id(struct net_device *a,
struct net_device *b);
+int switchdev_handle_port_obj_add(struct net_device *dev,
+ struct switchdev_notifier_port_obj_info *port_obj_info,
+ bool (*check_cb)(const struct net_device *dev),
+ int (*add_cb)(struct net_device *dev,
+ const struct switchdev_obj *obj,
+ struct switchdev_trans *trans,
+ struct netlink_ext_ack *extack));
+int switchdev_handle_port_obj_del(struct net_device *dev,
+ struct switchdev_notifier_port_obj_info *port_obj_info,
+ bool (*check_cb)(const struct net_device *dev),
+ int (*del_cb)(struct net_device *dev,
+ const struct switchdev_obj *obj));
+
#define SWITCHDEV_SET_OPS(netdev, ops) ((netdev)->switchdev_ops = (ops))
#else
@@ -211,7 +243,8 @@ static inline int switchdev_port_attr_set(struct net_device *dev,
}
static inline int switchdev_port_obj_add(struct net_device *dev,
- const struct switchdev_obj *obj)
+ const struct switchdev_obj *obj,
+ struct netlink_ext_ack *extack)
{
return -EOPNOTSUPP;
}
@@ -239,12 +272,55 @@ static inline int call_switchdev_notifiers(unsigned long val,
return NOTIFY_DONE;
}
+static inline int
+register_switchdev_blocking_notifier(struct notifier_block *nb)
+{
+ return 0;
+}
+
+static inline int
+unregister_switchdev_blocking_notifier(struct notifier_block *nb)
+{
+ return 0;
+}
+
+static inline int
+call_switchdev_blocking_notifiers(unsigned long val,
+ struct net_device *dev,
+ struct switchdev_notifier_info *info,
+ struct netlink_ext_ack *extack)
+{
+ return NOTIFY_DONE;
+}
+
static inline bool switchdev_port_same_parent_id(struct net_device *a,
struct net_device *b)
{
return false;
}
+static inline int
+switchdev_handle_port_obj_add(struct net_device *dev,
+ struct switchdev_notifier_port_obj_info *port_obj_info,
+ bool (*check_cb)(const struct net_device *dev),
+ int (*add_cb)(struct net_device *dev,
+ const struct switchdev_obj *obj,
+ struct switchdev_trans *trans,
+ struct netlink_ext_ack *extack))
+{
+ return 0;
+}
+
+static inline int
+switchdev_handle_port_obj_del(struct net_device *dev,
+ struct switchdev_notifier_port_obj_info *port_obj_info,
+ bool (*check_cb)(const struct net_device *dev),
+ int (*del_cb)(struct net_device *dev,
+ const struct switchdev_obj *obj))
+{
+ return 0;
+}
+
#define SWITCHDEV_SET_OPS(netdev, ops) do {} while (0)
#endif
diff --git a/include/net/tcp.h b/include/net/tcp.h
index a18914d20486..e0a65c067662 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -313,7 +313,7 @@ extern struct proto tcp_prot;
void tcp_tasklet_init(void);
-void tcp_v4_err(struct sk_buff *skb, u32);
+int tcp_v4_err(struct sk_buff *skb, u32);
void tcp_shutdown(struct sock *sk, int how);
@@ -1124,7 +1124,7 @@ void tcp_rate_check_app_limited(struct sock *sk);
*/
static inline int tcp_is_sack(const struct tcp_sock *tp)
{
- return tp->rx_opt.sack_ok;
+ return likely(tp->rx_opt.sack_ok);
}
static inline bool tcp_is_reno(const struct tcp_sock *tp)
@@ -1315,33 +1315,16 @@ static inline __sum16 tcp_v4_check(int len, __be32 saddr,
return csum_tcpudp_magic(saddr,daddr,len,IPPROTO_TCP,base);
}
-static inline __sum16 __tcp_checksum_complete(struct sk_buff *skb)
-{
- return __skb_checksum_complete(skb);
-}
-
static inline bool tcp_checksum_complete(struct sk_buff *skb)
{
return !skb_csum_unnecessary(skb) &&
- __tcp_checksum_complete(skb);
+ __skb_checksum_complete(skb);
}
bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb);
int tcp_filter(struct sock *sk, struct sk_buff *skb);
-
-#undef STATE_TRACE
-
-#ifdef STATE_TRACE
-static const char *statename[]={
- "Unused","Established","Syn Sent","Syn Recv",
- "Fin Wait 1","Fin Wait 2","Time Wait", "Close",
- "Close Wait","Last ACK","Listen","Closing"
-};
-#endif
void tcp_set_state(struct sock *sk, int state);
-
void tcp_done(struct sock *sk);
-
int tcp_abort(struct sock *sk, int err);
static inline void tcp_sack_reset(struct tcp_options_received *rx_opt)
@@ -1385,7 +1368,7 @@ static inline int tcp_win_from_space(const struct sock *sk, int space)
/* Note: caller must be prepared to deal with negative returns */
static inline int tcp_space(const struct sock *sk)
{
- return tcp_win_from_space(sk, sk->sk_rcvbuf -
+ return tcp_win_from_space(sk, sk->sk_rcvbuf - sk->sk_backlog.len -
atomic_read(&sk->sk_rmem_alloc));
}
@@ -1572,9 +1555,21 @@ struct tcp_md5sig_key *tcp_v4_md5_lookup(const struct sock *sk,
const struct sock *addr_sk);
#ifdef CONFIG_TCP_MD5SIG
-struct tcp_md5sig_key *tcp_md5_do_lookup(const struct sock *sk,
- const union tcp_md5_addr *addr,
- int family);
+#include <linux/jump_label.h>
+extern struct static_key tcp_md5_needed;
+struct tcp_md5sig_key *__tcp_md5_do_lookup(const struct sock *sk,
+ const union tcp_md5_addr *addr,
+ int family);
+static inline struct tcp_md5sig_key *
+tcp_md5_do_lookup(const struct sock *sk,
+ const union tcp_md5_addr *addr,
+ int family)
+{
+ if (!static_key_false(&tcp_md5_needed))
+ return NULL;
+ return __tcp_md5_do_lookup(sk, addr, family);
+}
+
#define tcp_twsk_md5_key(twsk) ((twsk)->tw_md5_key)
#else
static inline struct tcp_md5sig_key *tcp_md5_do_lookup(const struct sock *sk,
@@ -1875,12 +1870,16 @@ static inline u32 tcp_notsent_lowat(const struct tcp_sock *tp)
return tp->notsent_lowat ?: net->ipv4.sysctl_tcp_notsent_lowat;
}
-static inline bool tcp_stream_memory_free(const struct sock *sk)
+/* @wake is one when sk_stream_write_space() calls us.
+ * This sends EPOLLOUT only if notsent_bytes is half the limit.
+ * This mimics the strategy used in sock_def_write_space().
+ */
+static inline bool tcp_stream_memory_free(const struct sock *sk, int wake)
{
const struct tcp_sock *tp = tcp_sk(sk);
u32 notsent_bytes = tp->write_seq - tp->snd_nxt;
- return notsent_bytes < tcp_notsent_lowat(tp);
+ return (notsent_bytes << wake) < tcp_notsent_lowat(tp);
}
#ifdef CONFIG_PROC_FS
diff --git a/include/net/tls.h b/include/net/tls.h
index bab5627ff5e3..2a6ac8d642af 100644
--- a/include/net/tls.h
+++ b/include/net/tls.h
@@ -76,6 +76,10 @@
*
* void (*unhash)(struct tls_device *device, struct sock *sk);
* This function cleans listen state set by Inline TLS driver
+ *
+ * void (*release)(struct kref *kref);
+ * Release the registered device and allocated resources
+ * @kref: Number of reference to tls_device
*/
struct tls_device {
char name[TLS_DEVICE_NAME_MAX];
@@ -83,6 +87,8 @@ struct tls_device {
int (*feature)(struct tls_device *device);
int (*hash)(struct tls_device *device, struct sock *sk);
void (*unhash)(struct tls_device *device, struct sock *sk);
+ void (*release)(struct kref *kref);
+ struct kref kref;
};
enum {
@@ -454,6 +460,15 @@ tls_offload_ctx_tx(const struct tls_context *tls_ctx)
return (struct tls_offload_context_tx *)tls_ctx->priv_ctx_tx;
}
+static inline bool tls_sw_has_ctx_tx(const struct sock *sk)
+{
+ struct tls_context *ctx = tls_get_ctx(sk);
+
+ if (!ctx)
+ return false;
+ return !!tls_sw_ctx_tx(ctx);
+}
+
static inline struct tls_offload_context_rx *
tls_offload_ctx_rx(const struct tls_context *tls_ctx)
{
diff --git a/include/net/udp.h b/include/net/udp.h
index 9e82cb391dea..fd6d948755c8 100644
--- a/include/net/udp.h
+++ b/include/net/udp.h
@@ -252,6 +252,17 @@ static inline int udp_rqueue_get(struct sock *sk)
return sk_rmem_alloc_get(sk) - READ_ONCE(udp_sk(sk)->forward_deficit);
}
+static inline bool udp_sk_bound_dev_eq(struct net *net, int bound_dev_if,
+ int dif, int sdif)
+{
+#if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV)
+ return inet_bound_dev_eq(!!net->ipv4.sysctl_udp_l3mdev_accept,
+ bound_dev_if, dif, sdif);
+#else
+ return inet_bound_dev_eq(true, bound_dev_if, dif, sdif);
+#endif
+}
+
/* net/ipv4/udp.c */
void udp_destruct_sock(struct sock *sk);
void skb_consume_udp(struct sock *sk, struct sk_buff *skb, int len);
@@ -272,7 +283,7 @@ bool udp_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst);
int udp_get_port(struct sock *sk, unsigned short snum,
int (*saddr_cmp)(const struct sock *,
const struct sock *));
-void udp_err(struct sk_buff *, u32);
+int udp_err(struct sk_buff *, u32);
int udp_abort(struct sock *sk, int err);
int udp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len);
int udp_push_pending_frames(struct sock *sk);
@@ -406,17 +417,24 @@ static inline int copy_linear_skb(struct sk_buff *skb, int len, int off,
} while(0)
#if IS_ENABLED(CONFIG_IPV6)
-#define __UDPX_INC_STATS(sk, field) \
-do { \
- if ((sk)->sk_family == AF_INET) \
- __UDP_INC_STATS(sock_net(sk), field, 0); \
- else \
- __UDP6_INC_STATS(sock_net(sk), field, 0); \
-} while (0)
+#define __UDPX_MIB(sk, ipv4) \
+({ \
+ ipv4 ? (IS_UDPLITE(sk) ? sock_net(sk)->mib.udplite_statistics : \
+ sock_net(sk)->mib.udp_statistics) : \
+ (IS_UDPLITE(sk) ? sock_net(sk)->mib.udplite_stats_in6 : \
+ sock_net(sk)->mib.udp_stats_in6); \
+})
#else
-#define __UDPX_INC_STATS(sk, field) __UDP_INC_STATS(sock_net(sk), field, 0)
+#define __UDPX_MIB(sk, ipv4) \
+({ \
+ IS_UDPLITE(sk) ? sock_net(sk)->mib.udplite_statistics : \
+ sock_net(sk)->mib.udp_statistics; \
+})
#endif
+#define __UDPX_INC_STATS(sk, field) \
+ __SNMP_INC_STATS(__UDPX_MIB(sk, (sk)->sk_family == AF_INET), field)
+
#ifdef CONFIG_PROC_FS
struct udp_seq_afinfo {
sa_family_t family;
@@ -450,4 +468,26 @@ DECLARE_STATIC_KEY_FALSE(udpv6_encap_needed_key);
void udpv6_encap_enable(void);
#endif
+static inline struct sk_buff *udp_rcv_segment(struct sock *sk,
+ struct sk_buff *skb, bool ipv4)
+{
+ struct sk_buff *segs;
+
+ /* the GSO CB lays after the UDP one, no need to save and restore any
+ * CB fragment
+ */
+ segs = __skb_gso_segment(skb, NETIF_F_SG, false);
+ if (unlikely(IS_ERR_OR_NULL(segs))) {
+ int segs_nr = skb_shinfo(skb)->gso_segs;
+
+ atomic_add(segs_nr, &sk->sk_drops);
+ SNMP_ADD_STATS(__UDPX_MIB(sk, ipv4), UDP_MIB_INERRORS, segs_nr);
+ kfree_skb(skb);
+ return NULL;
+ }
+
+ consume_skb(skb);
+ return segs;
+}
+
#endif /* _UDP_H */
diff --git a/include/net/udp_tunnel.h b/include/net/udp_tunnel.h
index fe680ab6b15a..b8137953fea3 100644
--- a/include/net/udp_tunnel.h
+++ b/include/net/udp_tunnel.h
@@ -30,6 +30,7 @@ struct udp_port_cfg {
__be16 local_udp_port;
__be16 peer_udp_port;
+ int bind_ifindex;
unsigned int use_udp_checksums:1,
use_udp6_tx_checksums:1,
use_udp6_rx_checksums:1,
@@ -64,6 +65,8 @@ static inline int udp_sock_create(struct net *net,
}
typedef int (*udp_tunnel_encap_rcv_t)(struct sock *sk, struct sk_buff *skb);
+typedef int (*udp_tunnel_encap_err_lookup_t)(struct sock *sk,
+ struct sk_buff *skb);
typedef void (*udp_tunnel_encap_destroy_t)(struct sock *sk);
typedef struct sk_buff *(*udp_tunnel_gro_receive_t)(struct sock *sk,
struct list_head *head,
@@ -76,6 +79,7 @@ struct udp_tunnel_sock_cfg {
/* Used for setting up udp_sock fields, see udp.h for details */
__u8 encap_type;
udp_tunnel_encap_rcv_t encap_rcv;
+ udp_tunnel_encap_err_lookup_t encap_err_lookup;
udp_tunnel_encap_destroy_t encap_destroy;
udp_tunnel_gro_receive_t gro_receive;
udp_tunnel_gro_complete_t gro_complete;
@@ -165,6 +169,12 @@ static inline int udp_tunnel_handle_offloads(struct sk_buff *skb, bool udp_csum)
static inline void udp_tunnel_encap_enable(struct socket *sock)
{
+ struct udp_sock *up = udp_sk(sock->sk);
+
+ if (up->encap_enabled)
+ return;
+
+ up->encap_enabled = 1;
#if IS_ENABLED(CONFIG_IPV6)
if (sock->sk->sk_family == PF_INET6)
ipv6_stub->udpv6_encap_enable();
diff --git a/include/net/vxlan.h b/include/net/vxlan.h
index 03431c148e16..236403eb5ba6 100644
--- a/include/net/vxlan.h
+++ b/include/net/vxlan.h
@@ -216,6 +216,7 @@ struct vxlan_config {
unsigned long age_interval;
unsigned int addrmax;
bool no_share;
+ enum ifla_vxlan_df df;
};
struct vxlan_dev_node {
@@ -420,11 +421,16 @@ struct switchdev_notifier_vxlan_fdb_info {
u8 eth_addr[ETH_ALEN];
__be32 vni;
bool offloaded;
+ bool added_by_user;
};
#if IS_ENABLED(CONFIG_VXLAN)
int vxlan_fdb_find_uc(struct net_device *dev, const u8 *mac, __be32 vni,
struct switchdev_notifier_vxlan_fdb_info *fdb_info);
+int vxlan_fdb_replay(const struct net_device *dev, __be32 vni,
+ struct notifier_block *nb);
+void vxlan_fdb_clear_offload(const struct net_device *dev, __be32 vni);
+
#else
static inline int
vxlan_fdb_find_uc(struct net_device *dev, const u8 *mac, __be32 vni,
@@ -432,6 +438,17 @@ vxlan_fdb_find_uc(struct net_device *dev, const u8 *mac, __be32 vni,
{
return -ENOENT;
}
+
+static inline int vxlan_fdb_replay(const struct net_device *dev, __be32 vni,
+ struct notifier_block *nb)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline void
+vxlan_fdb_clear_offload(const struct net_device *dev, __be32 vni)
+{
+}
#endif
#endif
diff --git a/include/net/xfrm.h b/include/net/xfrm.h
index 0eb390c205af..7298a53b9702 100644
--- a/include/net/xfrm.h
+++ b/include/net/xfrm.h
@@ -577,6 +577,7 @@ struct xfrm_policy {
/* This lock only affects elements except for entry. */
rwlock_t lock;
refcount_t refcnt;
+ u32 pos;
struct timer_list timer;
atomic_t genid;
@@ -589,6 +590,7 @@ struct xfrm_policy {
struct xfrm_lifetime_cur curlft;
struct xfrm_policy_walk_entry walk;
struct xfrm_policy_queue polq;
+ bool bydst_reinsert;
u8 type;
u8 action;
u8 flags;
@@ -596,6 +598,7 @@ struct xfrm_policy {
u16 family;
struct xfrm_sec_ctx *security;
struct xfrm_tmpl xfrm_vec[XFRM_MAX_DEPTH];
+ struct hlist_node bydst_inexact_list;
struct rcu_head rcu;
};
@@ -1093,7 +1096,6 @@ struct xfrm_offload {
};
struct sec_path {
- refcount_t refcnt;
int len;
int olen;
@@ -1101,41 +1103,13 @@ struct sec_path {
struct xfrm_offload ovec[XFRM_MAX_OFFLOAD_DEPTH];
};
-static inline int secpath_exists(struct sk_buff *skb)
-{
-#ifdef CONFIG_XFRM
- return skb->sp != NULL;
-#else
- return 0;
-#endif
-}
-
-static inline struct sec_path *
-secpath_get(struct sec_path *sp)
-{
- if (sp)
- refcount_inc(&sp->refcnt);
- return sp;
-}
-
-void __secpath_destroy(struct sec_path *sp);
-
-static inline void
-secpath_put(struct sec_path *sp)
-{
- if (sp && refcount_dec_and_test(&sp->refcnt))
- __secpath_destroy(sp);
-}
-
-struct sec_path *secpath_dup(struct sec_path *src);
-int secpath_set(struct sk_buff *skb);
+struct sec_path *secpath_set(struct sk_buff *skb);
static inline void
secpath_reset(struct sk_buff *skb)
{
#ifdef CONFIG_XFRM
- secpath_put(skb->sp);
- skb->sp = NULL;
+ skb_ext_del(skb, SKB_EXT_SEC_PATH);
#endif
}
@@ -1191,7 +1165,7 @@ static inline int __xfrm_policy_check2(struct sock *sk, int dir,
if (sk && sk->sk_policy[XFRM_POLICY_IN])
return __xfrm_policy_check(sk, ndir, skb, family);
- return (!net->xfrm.policy_count[dir] && !skb->sp) ||
+ return (!net->xfrm.policy_count[dir] && !secpath_exists(skb)) ||
(skb_dst(skb)->flags & DST_NOPOLICY) ||
__xfrm_policy_check(sk, ndir, skb, family);
}
@@ -1552,6 +1526,7 @@ int xfrm_state_walk(struct net *net, struct xfrm_state_walk *walk,
int (*func)(struct xfrm_state *, int, void*), void *);
void xfrm_state_walk_done(struct xfrm_state_walk *walk, struct net *net);
struct xfrm_state *xfrm_state_alloc(struct net *net);
+void xfrm_state_free(struct xfrm_state *x);
struct xfrm_state *xfrm_state_find(const xfrm_address_t *daddr,
const xfrm_address_t *saddr,
const struct flowi *fl,
@@ -1902,14 +1877,16 @@ static inline void xfrm_states_delete(struct xfrm_state **states, int n)
#ifdef CONFIG_XFRM
static inline struct xfrm_state *xfrm_input_state(struct sk_buff *skb)
{
- return skb->sp->xvec[skb->sp->len - 1];
+ struct sec_path *sp = skb_sec_path(skb);
+
+ return sp->xvec[sp->len - 1];
}
#endif
static inline struct xfrm_offload *xfrm_offload(struct sk_buff *skb)
{
#ifdef CONFIG_XFRM
- struct sec_path *sp = skb->sp;
+ struct sec_path *sp = skb_sec_path(skb);
if (!sp || !sp->olen || sp->len != sp->olen)
return NULL;
@@ -1967,7 +1944,7 @@ static inline void xfrm_dev_state_delete(struct xfrm_state *x)
static inline void xfrm_dev_state_free(struct xfrm_state *x)
{
struct xfrm_state_offload *xso = &x->xso;
- struct net_device *dev = xso->dev;
+ struct net_device *dev = xso->dev;
if (dev && dev->xfrmdev_ops) {
if (dev->xfrmdev_ops->xdo_dev_state_free)
diff --git a/include/scsi/scsi_cmnd.h b/include/scsi/scsi_cmnd.h
index c891ada3c5c2..d85e6befa26b 100644
--- a/include/scsi/scsi_cmnd.h
+++ b/include/scsi/scsi_cmnd.h
@@ -61,6 +61,9 @@ struct scsi_pointer {
/* flags preserved across unprep / reprep */
#define SCMD_PRESERVED_FLAGS (SCMD_UNCHECKED_ISA_DMA | SCMD_INITIALIZED)
+/* for scmd->state */
+#define SCMD_STATE_COMPLETE 0
+
struct scsi_cmnd {
struct scsi_request req;
struct scsi_device *device;
@@ -145,6 +148,7 @@ struct scsi_cmnd {
int result; /* Status code from lower level driver */
int flags; /* Command flags */
+ unsigned long state; /* Command completion state */
unsigned char tag; /* SCSI-II queued command tag */
};
@@ -171,7 +175,7 @@ extern void *scsi_kmap_atomic_sg(struct scatterlist *sg, int sg_count,
size_t *offset, size_t *len);
extern void scsi_kunmap_atomic_sg(void *virt);
-extern int scsi_init_io(struct scsi_cmnd *cmd);
+extern blk_status_t scsi_init_io(struct scsi_cmnd *cmd);
#ifdef CONFIG_SCSI_DMA
extern int scsi_dma_map(struct scsi_cmnd *cmd);
diff --git a/include/scsi/scsi_dh.h b/include/scsi/scsi_dh.h
index c7bba2b24849..a862dc23c68d 100644
--- a/include/scsi/scsi_dh.h
+++ b/include/scsi/scsi_dh.h
@@ -69,7 +69,7 @@ struct scsi_device_handler {
int (*attach)(struct scsi_device *);
void (*detach)(struct scsi_device *);
int (*activate)(struct scsi_device *, activate_complete, void *);
- int (*prep_fn)(struct scsi_device *, struct request *);
+ blk_status_t (*prep_fn)(struct scsi_device *, struct request *);
int (*set_params)(struct scsi_device *, const char *);
void (*rescan)(struct scsi_device *);
};
diff --git a/include/scsi/scsi_driver.h b/include/scsi/scsi_driver.h
index fae8b465233e..6dffa8555a39 100644
--- a/include/scsi/scsi_driver.h
+++ b/include/scsi/scsi_driver.h
@@ -2,6 +2,7 @@
#ifndef _SCSI_SCSI_DRIVER_H
#define _SCSI_SCSI_DRIVER_H
+#include <linux/blk_types.h>
#include <linux/device.h>
struct module;
@@ -13,7 +14,7 @@ struct scsi_driver {
struct device_driver gendrv;
void (*rescan)(struct device *);
- int (*init_command)(struct scsi_cmnd *);
+ blk_status_t (*init_command)(struct scsi_cmnd *);
void (*uninit_command)(struct scsi_cmnd *);
int (*done)(struct scsi_cmnd *);
int (*eh_action)(struct scsi_cmnd *, int);
diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h
index 7ba34a0ca8bf..6ca954e9f752 100644
--- a/include/scsi/scsi_host.h
+++ b/include/scsi/scsi_host.h
@@ -11,7 +11,6 @@
#include <linux/blk-mq.h>
#include <scsi/scsi.h>
-struct request_queue;
struct block_device;
struct completion;
struct module;
@@ -22,7 +21,6 @@ struct scsi_target;
struct Scsi_Host;
struct scsi_host_cmd_pool;
struct scsi_transport_template;
-struct blk_queue_tags;
/*
@@ -539,14 +537,8 @@ struct Scsi_Host {
struct scsi_host_template *hostt;
struct scsi_transport_template *transportt;
- /*
- * Area to keep a shared tag map (if needed, will be
- * NULL if not).
- */
- union {
- struct blk_queue_tag *bqt;
- struct blk_mq_tag_set tag_set;
- };
+ /* Area to keep a shared tag map */
+ struct blk_mq_tag_set tag_set;
atomic_t host_busy; /* commands actually active on low-level */
atomic_t host_blocked;
@@ -640,7 +632,6 @@ struct Scsi_Host {
/* The controller does not support WRITE SAME */
unsigned no_write_same:1;
- unsigned use_blk_mq:1;
unsigned use_cmd_list:1;
/* Host responded with short (<36 bytes) INQUIRY result */
@@ -734,11 +725,6 @@ static inline int scsi_host_in_recovery(struct Scsi_Host *shost)
shost->tmf_in_progress;
}
-static inline bool shost_use_blk_mq(struct Scsi_Host *shost)
-{
- return shost->use_blk_mq;
-}
-
extern int scsi_queue_work(struct Scsi_Host *, struct work_struct *);
extern void scsi_flush_work(struct Scsi_Host *);
diff --git a/include/scsi/scsi_tcq.h b/include/scsi/scsi_tcq.h
index e192a0caa850..6053d46e794e 100644
--- a/include/scsi/scsi_tcq.h
+++ b/include/scsi/scsi_tcq.h
@@ -23,19 +23,15 @@ static inline struct scsi_cmnd *scsi_host_find_tag(struct Scsi_Host *shost,
int tag)
{
struct request *req = NULL;
+ u16 hwq;
if (tag == SCSI_NO_TAG)
return NULL;
- if (shost_use_blk_mq(shost)) {
- u16 hwq = blk_mq_unique_tag_to_hwq(tag);
-
- if (hwq < shost->tag_set.nr_hw_queues) {
- req = blk_mq_tag_to_rq(shost->tag_set.tags[hwq],
- blk_mq_unique_tag_to_tag(tag));
- }
- } else {
- req = blk_map_queue_find_tag(shost->bqt, tag);
+ hwq = blk_mq_unique_tag_to_hwq(tag);
+ if (hwq < shost->tag_set.nr_hw_queues) {
+ req = blk_mq_tag_to_rq(shost->tag_set.tags[hwq],
+ blk_mq_unique_tag_to_tag(tag));
}
if (!req)
diff --git a/include/soc/fsl/dpaa2-io.h b/include/soc/fsl/dpaa2-io.h
index 70997ab2146c..3fbd71c27ba3 100644
--- a/include/soc/fsl/dpaa2-io.h
+++ b/include/soc/fsl/dpaa2-io.h
@@ -116,4 +116,8 @@ struct dpaa2_io_store *dpaa2_io_store_create(unsigned int max_frames,
void dpaa2_io_store_destroy(struct dpaa2_io_store *s);
struct dpaa2_dq *dpaa2_io_store_next(struct dpaa2_io_store *s, int *is_last);
+int dpaa2_io_query_fq_count(struct dpaa2_io *d, u32 fqid,
+ u32 *fcnt, u32 *bcnt);
+int dpaa2_io_query_bp_count(struct dpaa2_io *d, u16 bpid,
+ u32 *num);
#endif /* __FSL_DPAA2_IO_H */
diff --git a/include/soc/fsl/qman.h b/include/soc/fsl/qman.h
index 56877660d5ba..5cc7af06c1ba 100644
--- a/include/soc/fsl/qman.h
+++ b/include/soc/fsl/qman.h
@@ -1205,8 +1205,10 @@ void qman_dqrr_get_ithresh(struct qman_portal *portal, u8 *ithresh);
* qman_dqrr_set_ithresh - Set coalesce interrupt threshold
* @portal: portal to set the new value on
* @ithresh: new threshold value
+ *
+ * Returns 0 on success, or a negative error code.
*/
-void qman_dqrr_set_ithresh(struct qman_portal *portal, u8 ithresh);
+int qman_dqrr_set_ithresh(struct qman_portal *portal, u8 ithresh);
/**
* qman_dqrr_get_iperiod - Get coalesce interrupt period
@@ -1219,7 +1221,9 @@ void qman_portal_get_iperiod(struct qman_portal *portal, u32 *iperiod);
* qman_dqrr_set_iperiod - Set coalesce interrupt period
* @portal: portal to set the new value on
* @ithresh: new period value
+ *
+ * Returns 0 on success, or a negative error code.
*/
-void qman_portal_set_iperiod(struct qman_portal *portal, u32 iperiod);
+int qman_portal_set_iperiod(struct qman_portal *portal, u32 iperiod);
#endif /* __FSL_QMAN_H */
diff --git a/include/soc/tegra/pmc.h b/include/soc/tegra/pmc.h
index 562426812ab2..bf761e68a7c7 100644
--- a/include/soc/tegra/pmc.h
+++ b/include/soc/tegra/pmc.h
@@ -26,11 +26,9 @@
struct clk;
struct reset_control;
-#ifdef CONFIG_SMP
bool tegra_pmc_cpu_is_powered(unsigned int cpuid);
int tegra_pmc_cpu_power_on(unsigned int cpuid);
int tegra_pmc_cpu_remove_clamping(unsigned int cpuid);
-#endif /* CONFIG_SMP */
/*
* powergate and I/O rail APIs
diff --git a/include/sound/compress_driver.h b/include/sound/compress_driver.h
index ea8c93bbb0e0..0cdc3999ecfa 100644
--- a/include/sound/compress_driver.h
+++ b/include/sound/compress_driver.h
@@ -23,6 +23,7 @@ struct snd_compr_ops;
* struct snd_compr_runtime: runtime stream description
* @state: stream state
* @ops: pointer to DSP callbacks
+ * @dma_buffer_p: runtime dma buffer pointer
* @buffer: pointer to kernel buffer, valid only when not in mmap mode or
* DSP doesn't implement copy
* @buffer_size: size of the above buffer
@@ -37,6 +38,7 @@ struct snd_compr_ops;
struct snd_compr_runtime {
snd_pcm_state_t state;
struct snd_compr_ops *ops;
+ struct snd_dma_buffer *dma_buffer_p;
void *buffer;
u64 buffer_size;
u32 fragment_size;
@@ -175,6 +177,23 @@ static inline void snd_compr_drain_notify(struct snd_compr_stream *stream)
wake_up(&stream->runtime->sleep);
}
+/**
+ * snd_compr_set_runtime_buffer - Set the Compress runtime buffer
+ * @substream: compress substream to set
+ * @bufp: the buffer information, NULL to clear
+ *
+ * Copy the buffer information to runtime buffer when @bufp is non-NULL.
+ * Otherwise it clears the current buffer information.
+ */
+static inline void snd_compr_set_runtime_buffer(
+ struct snd_compr_stream *substream,
+ struct snd_dma_buffer *bufp)
+{
+ struct snd_compr_runtime *runtime = substream->runtime;
+
+ runtime->dma_buffer_p = bufp;
+}
+
int snd_compr_stop_error(struct snd_compr_stream *stream,
snd_pcm_state_t state);
diff --git a/include/sound/hda_codec.h b/include/sound/hda_codec.h
index 0d98bb9068b1..7fa48b100936 100644
--- a/include/sound/hda_codec.h
+++ b/include/sound/hda_codec.h
@@ -236,6 +236,7 @@ struct hda_codec {
/* misc flags */
unsigned int in_freeing:1; /* being released */
unsigned int registered:1; /* codec was registered */
+ unsigned int display_power_control:1; /* needs display power */
unsigned int spdif_status_reset :1; /* needs to toggle SPDIF for each
* status change
* (e.g. Realtek codecs)
diff --git a/include/sound/hda_component.h b/include/sound/hda_component.h
index 78626cde7081..2ec31b358950 100644
--- a/include/sound/hda_component.h
+++ b/include/sound/hda_component.h
@@ -5,10 +5,15 @@
#define __SOUND_HDA_COMPONENT_H
#include <drm/drm_audio_component.h>
+#include <sound/hdaudio.h>
+
+/* virtual idx for controller */
+#define HDA_CODEC_IDX_CONTROLLER HDA_MAX_CODECS
#ifdef CONFIG_SND_HDA_COMPONENT
int snd_hdac_set_codec_wakeup(struct hdac_bus *bus, bool enable);
-int snd_hdac_display_power(struct hdac_bus *bus, bool enable);
+void snd_hdac_display_power(struct hdac_bus *bus, unsigned int idx,
+ bool enable);
int snd_hdac_sync_audio_rate(struct hdac_device *codec, hda_nid_t nid,
int dev_id, int rate);
int snd_hdac_acomp_get_eld(struct hdac_device *codec, hda_nid_t nid, int dev_id,
@@ -25,9 +30,9 @@ static inline int snd_hdac_set_codec_wakeup(struct hdac_bus *bus, bool enable)
{
return 0;
}
-static inline int snd_hdac_display_power(struct hdac_bus *bus, bool enable)
+static inline void snd_hdac_display_power(struct hdac_bus *bus,
+ unsigned int idx, bool enable)
{
- return 0;
}
static inline int snd_hdac_sync_audio_rate(struct hdac_device *codec,
hda_nid_t nid, int dev_id, int rate)
diff --git a/include/sound/hdaudio.h b/include/sound/hdaudio.h
index cd1773d0e08f..b4fa1c775251 100644
--- a/include/sound/hdaudio.h
+++ b/include/sound/hdaudio.h
@@ -79,7 +79,6 @@ struct hdac_device {
/* misc flags */
atomic_t in_pm; /* suspend/resume being performed */
- bool link_power_control:1;
/* sysfs */
struct hdac_widget_tree *widgets;
@@ -99,6 +98,12 @@ enum {
HDA_DEV_ASOC,
};
+enum {
+ SND_SKL_PCI_BIND_AUTO, /* automatic selection based on pci class */
+ SND_SKL_PCI_BIND_LEGACY,/* bind only with legacy driver */
+ SND_SKL_PCI_BIND_ASOC /* bind only with ASoC driver */
+};
+
/* direction */
enum {
HDA_INPUT, HDA_OUTPUT
@@ -237,8 +242,6 @@ struct hdac_bus_ops {
/* get a response from the last command */
int (*get_response)(struct hdac_bus *bus, unsigned int addr,
unsigned int *res);
- /* control the link power */
- int (*link_power)(struct hdac_bus *bus, bool enable);
};
/*
@@ -363,7 +366,8 @@ struct hdac_bus {
/* DRM component interface */
struct drm_audio_component *audio_component;
- int drm_power_refcount;
+ long display_power_status;
+ bool display_power_active;
/* parameters required for enhanced capabilities */
int num_streams;
@@ -389,6 +393,7 @@ void snd_hdac_bus_queue_event(struct hdac_bus *bus, u32 res, u32 res_ex);
int snd_hdac_bus_add_device(struct hdac_bus *bus, struct hdac_device *codec);
void snd_hdac_bus_remove_device(struct hdac_bus *bus,
struct hdac_device *codec);
+void snd_hdac_bus_process_unsol_events(struct work_struct *work);
static inline void snd_hdac_codec_link_up(struct hdac_device *codec)
{
@@ -404,7 +409,6 @@ int snd_hdac_bus_send_cmd(struct hdac_bus *bus, unsigned int val);
int snd_hdac_bus_get_response(struct hdac_bus *bus, unsigned int addr,
unsigned int *res);
int snd_hdac_bus_parse_capabilities(struct hdac_bus *bus);
-int snd_hdac_link_power(struct hdac_device *codec, bool enable);
bool snd_hdac_bus_init_chip(struct hdac_bus *bus, bool full_reset);
void snd_hdac_bus_stop_chip(struct hdac_bus *bus);
diff --git a/include/sound/pcm_params.h b/include/sound/pcm_params.h
index 2dd37cada7c0..888a833d3b00 100644
--- a/include/sound/pcm_params.h
+++ b/include/sound/pcm_params.h
@@ -254,11 +254,13 @@ static inline int snd_interval_empty(const struct snd_interval *i)
static inline int snd_interval_single(const struct snd_interval *i)
{
return (i->min == i->max ||
- (i->min + 1 == i->max && i->openmax));
+ (i->min + 1 == i->max && (i->openmin || i->openmax)));
}
static inline int snd_interval_value(const struct snd_interval *i)
{
+ if (i->openmin && !i->openmax)
+ return i->max;
return i->min;
}
diff --git a/include/sound/simple_card_utils.h b/include/sound/simple_card_utils.h
index fb0318f9b10f..6d69ed2bd7b1 100644
--- a/include/sound/simple_card_utils.h
+++ b/include/sound/simple_card_utils.h
@@ -116,12 +116,12 @@ int asoc_simple_card_clean_reference(struct snd_soc_card *card);
void asoc_simple_card_convert_fixup(struct asoc_simple_card_data *data,
struct snd_pcm_hw_params *params);
-void asoc_simple_card_parse_convert(struct device *dev, char *prefix,
+void asoc_simple_card_parse_convert(struct device *dev,
+ struct device_node *np, char *prefix,
struct asoc_simple_card_data *data);
int asoc_simple_card_of_parse_routing(struct snd_soc_card *card,
- char *prefix,
- int optional);
+ char *prefix);
int asoc_simple_card_of_parse_widgets(struct snd_soc_card *card,
char *prefix);
diff --git a/include/sound/soc-acpi-intel-match.h b/include/sound/soc-acpi-intel-match.h
index f48f59e5b7b0..bb5e1e4ce8bf 100644
--- a/include/sound/soc-acpi-intel-match.h
+++ b/include/sound/soc-acpi-intel-match.h
@@ -24,6 +24,7 @@ extern struct snd_soc_acpi_mach snd_soc_acpi_intel_kbl_machines[];
extern struct snd_soc_acpi_mach snd_soc_acpi_intel_bxt_machines[];
extern struct snd_soc_acpi_mach snd_soc_acpi_intel_glk_machines[];
extern struct snd_soc_acpi_mach snd_soc_acpi_intel_cnl_machines[];
+extern struct snd_soc_acpi_mach snd_soc_acpi_intel_icl_machines[];
/*
* generic table used for HDA codec-based platforms, possibly with
diff --git a/include/sound/soc-acpi.h b/include/sound/soc-acpi.h
index e45b2330d16a..266e64e3c24c 100644
--- a/include/sound/soc-acpi.h
+++ b/include/sound/soc-acpi.h
@@ -38,6 +38,20 @@ struct snd_soc_acpi_mach *
snd_soc_acpi_find_machine(struct snd_soc_acpi_mach *machines);
/**
+ * snd_soc_acpi_mach_params: interface for machine driver configuration
+ *
+ * @acpi_ipc_irq_index: used for BYT-CR detection
+ * @platform: string used for HDaudio codec support
+ * @codec_mask: used for HDAudio support
+ */
+struct snd_soc_acpi_mach_params {
+ u32 acpi_ipc_irq_index;
+ const char *platform;
+ u32 codec_mask;
+ u32 dmic_num;
+};
+
+/**
* snd_soc_acpi_mach: ACPI-based machine descriptor. Most of the fields are
* related to the hardware, except for the firmware and topology file names.
* A platform supported by legacy and Sound Open Firmware (SOF) would expose
@@ -68,6 +82,7 @@ struct snd_soc_acpi_mach {
struct snd_soc_acpi_mach * (*machine_quirk)(void *arg);
const void *quirk_data;
void *pdata;
+ struct snd_soc_acpi_mach_params mach_params;
const char *sof_fw_filename;
const char *sof_tplg_filename;
const char *asoc_plat_name;
diff --git a/include/sound/soc.h b/include/sound/soc.h
index f1dab1f4b194..8ec1de856ee7 100644
--- a/include/sound/soc.h
+++ b/include/sound/soc.h
@@ -553,12 +553,12 @@ static inline void snd_soc_jack_free_gpios(struct snd_soc_jack *jack, int count,
}
#endif
-#ifdef CONFIG_SND_SOC_AC97_BUS
struct snd_ac97 *snd_soc_alloc_ac97_component(struct snd_soc_component *component);
struct snd_ac97 *snd_soc_new_ac97_component(struct snd_soc_component *component,
unsigned int id, unsigned int id_mask);
void snd_soc_free_ac97_component(struct snd_ac97 *ac97);
+#ifdef CONFIG_SND_SOC_AC97_BUS
int snd_soc_set_ac97_ops(struct snd_ac97_bus_ops *ops);
int snd_soc_set_ac97_ops_of_reset(struct snd_ac97_bus_ops *ops,
struct platform_device *pdev);
@@ -1192,7 +1192,7 @@ struct snd_soc_pcm_runtime {
((i) < rtd->num_codecs) && ((dai) = rtd->codec_dais[i]); \
(i)++)
#define for_each_rtd_codec_dai_rollback(rtd, i, dai) \
- for (; ((i--) >= 0) && ((dai) = rtd->codec_dais[i]);)
+ for (; ((--i) >= 0) && ((dai) = rtd->codec_dais[i]);)
/* mixer control */
@@ -1477,10 +1477,20 @@ int snd_soc_of_parse_tdm_slot(struct device_node *np,
unsigned int *rx_mask,
unsigned int *slots,
unsigned int *slot_width);
-void snd_soc_of_parse_audio_prefix(struct snd_soc_card *card,
+void snd_soc_of_parse_node_prefix(struct device_node *np,
struct snd_soc_codec_conf *codec_conf,
struct device_node *of_node,
const char *propname);
+static inline
+void snd_soc_of_parse_audio_prefix(struct snd_soc_card *card,
+ struct snd_soc_codec_conf *codec_conf,
+ struct device_node *of_node,
+ const char *propname)
+{
+ snd_soc_of_parse_node_prefix(card->dev->of_node,
+ codec_conf, of_node, propname);
+}
+
int snd_soc_of_parse_audio_routing(struct snd_soc_card *card,
const char *propname);
unsigned int snd_soc_of_parse_daifmt(struct device_node *np,
diff --git a/include/trace/events/bcache.h b/include/trace/events/bcache.h
index 2cbd6e42ad83..e4526f85c19d 100644
--- a/include/trace/events/bcache.h
+++ b/include/trace/events/bcache.h
@@ -221,9 +221,30 @@ DEFINE_EVENT(cache_set, bcache_journal_entry_full,
TP_ARGS(c)
);
-DEFINE_EVENT(bcache_bio, bcache_journal_write,
- TP_PROTO(struct bio *bio),
- TP_ARGS(bio)
+TRACE_EVENT(bcache_journal_write,
+ TP_PROTO(struct bio *bio, u32 keys),
+ TP_ARGS(bio, keys),
+
+ TP_STRUCT__entry(
+ __field(dev_t, dev )
+ __field(sector_t, sector )
+ __field(unsigned int, nr_sector )
+ __array(char, rwbs, 6 )
+ __field(u32, nr_keys )
+ ),
+
+ TP_fast_assign(
+ __entry->dev = bio_dev(bio);
+ __entry->sector = bio->bi_iter.bi_sector;
+ __entry->nr_sector = bio->bi_iter.bi_size >> 9;
+ __entry->nr_keys = keys;
+ blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size);
+ ),
+
+ TP_printk("%d,%d %s %llu + %u keys %u",
+ MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
+ (unsigned long long)__entry->sector, __entry->nr_sector,
+ __entry->nr_keys)
);
/* Btree */
diff --git a/include/trace/events/btrfs.h b/include/trace/events/btrfs.h
index 8568946f491d..2887503e4d12 100644
--- a/include/trace/events/btrfs.h
+++ b/include/trace/events/btrfs.h
@@ -92,7 +92,7 @@ TRACE_DEFINE_ENUM(COMMIT_TRANS);
#define TP_STRUCT__entry_fsid __array(u8, fsid, BTRFS_FSID_SIZE)
#define TP_fast_assign_fsid(fs_info) \
- memcpy(__entry->fsid, fs_info->fsid, BTRFS_FSID_SIZE)
+ memcpy(__entry->fsid, fs_info->fs_devices->fsid, BTRFS_FSID_SIZE)
#define TP_STRUCT__entry_btrfs(args...) \
TP_STRUCT__entry( \
@@ -1048,6 +1048,8 @@ TRACE_EVENT(btrfs_trigger_flush,
{ FLUSH_DELAYED_ITEMS, "FLUSH_DELAYED_ITEMS"}, \
{ FLUSH_DELALLOC, "FLUSH_DELALLOC"}, \
{ FLUSH_DELALLOC_WAIT, "FLUSH_DELALLOC_WAIT"}, \
+ { FLUSH_DELAYED_REFS_NR, "FLUSH_DELAYED_REFS_NR"}, \
+ { FLUSH_DELAYED_REFS, "FLUSH_ELAYED_REFS"}, \
{ ALLOC_CHUNK, "ALLOC_CHUNK"}, \
{ COMMIT_TRANS, "COMMIT_TRANS"})
diff --git a/include/trace/events/ext4.h b/include/trace/events/ext4.h
index 698e0d8a5ca4..d68e9e536814 100644
--- a/include/trace/events/ext4.h
+++ b/include/trace/events/ext4.h
@@ -226,6 +226,26 @@ TRACE_EVENT(ext4_drop_inode,
(unsigned long) __entry->ino, __entry->drop)
);
+TRACE_EVENT(ext4_nfs_commit_metadata,
+ TP_PROTO(struct inode *inode),
+
+ TP_ARGS(inode),
+
+ TP_STRUCT__entry(
+ __field( dev_t, dev )
+ __field( ino_t, ino )
+ ),
+
+ TP_fast_assign(
+ __entry->dev = inode->i_sb->s_dev;
+ __entry->ino = inode->i_ino;
+ ),
+
+ TP_printk("dev %d,%d ino %lu",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ (unsigned long) __entry->ino)
+);
+
TRACE_EVENT(ext4_mark_inode_dirty,
TP_PROTO(struct inode *inode, unsigned long IP),
diff --git a/include/trace/events/filelock.h b/include/trace/events/filelock.h
index 68b17c116907..fad7befa612d 100644
--- a/include/trace/events/filelock.h
+++ b/include/trace/events/filelock.h
@@ -68,7 +68,7 @@ DECLARE_EVENT_CLASS(filelock_lock,
__field(struct file_lock *, fl)
__field(unsigned long, i_ino)
__field(dev_t, s_dev)
- __field(struct file_lock *, fl_next)
+ __field(struct file_lock *, fl_blocker)
__field(fl_owner_t, fl_owner)
__field(unsigned int, fl_pid)
__field(unsigned int, fl_flags)
@@ -82,7 +82,7 @@ DECLARE_EVENT_CLASS(filelock_lock,
__entry->fl = fl ? fl : NULL;
__entry->s_dev = inode->i_sb->s_dev;
__entry->i_ino = inode->i_ino;
- __entry->fl_next = fl ? fl->fl_next : NULL;
+ __entry->fl_blocker = fl ? fl->fl_blocker : NULL;
__entry->fl_owner = fl ? fl->fl_owner : NULL;
__entry->fl_pid = fl ? fl->fl_pid : 0;
__entry->fl_flags = fl ? fl->fl_flags : 0;
@@ -92,9 +92,9 @@ DECLARE_EVENT_CLASS(filelock_lock,
__entry->ret = ret;
),
- TP_printk("fl=0x%p dev=0x%x:0x%x ino=0x%lx fl_next=0x%p fl_owner=0x%p fl_pid=%u fl_flags=%s fl_type=%s fl_start=%lld fl_end=%lld ret=%d",
+ TP_printk("fl=0x%p dev=0x%x:0x%x ino=0x%lx fl_blocker=0x%p fl_owner=0x%p fl_pid=%u fl_flags=%s fl_type=%s fl_start=%lld fl_end=%lld ret=%d",
__entry->fl, MAJOR(__entry->s_dev), MINOR(__entry->s_dev),
- __entry->i_ino, __entry->fl_next, __entry->fl_owner,
+ __entry->i_ino, __entry->fl_blocker, __entry->fl_owner,
__entry->fl_pid, show_fl_flags(__entry->fl_flags),
show_fl_type(__entry->fl_type),
__entry->fl_start, __entry->fl_end, __entry->ret)
@@ -125,7 +125,7 @@ DECLARE_EVENT_CLASS(filelock_lease,
__field(struct file_lock *, fl)
__field(unsigned long, i_ino)
__field(dev_t, s_dev)
- __field(struct file_lock *, fl_next)
+ __field(struct file_lock *, fl_blocker)
__field(fl_owner_t, fl_owner)
__field(unsigned int, fl_flags)
__field(unsigned char, fl_type)
@@ -137,7 +137,7 @@ DECLARE_EVENT_CLASS(filelock_lease,
__entry->fl = fl ? fl : NULL;
__entry->s_dev = inode->i_sb->s_dev;
__entry->i_ino = inode->i_ino;
- __entry->fl_next = fl ? fl->fl_next : NULL;
+ __entry->fl_blocker = fl ? fl->fl_blocker : NULL;
__entry->fl_owner = fl ? fl->fl_owner : NULL;
__entry->fl_flags = fl ? fl->fl_flags : 0;
__entry->fl_type = fl ? fl->fl_type : 0;
@@ -145,9 +145,9 @@ DECLARE_EVENT_CLASS(filelock_lease,
__entry->fl_downgrade_time = fl ? fl->fl_downgrade_time : 0;
),
- TP_printk("fl=0x%p dev=0x%x:0x%x ino=0x%lx fl_next=0x%p fl_owner=0x%p fl_flags=%s fl_type=%s fl_break_time=%lu fl_downgrade_time=%lu",
+ TP_printk("fl=0x%p dev=0x%x:0x%x ino=0x%lx fl_blocker=0x%p fl_owner=0x%p fl_flags=%s fl_type=%s fl_break_time=%lu fl_downgrade_time=%lu",
__entry->fl, MAJOR(__entry->s_dev), MINOR(__entry->s_dev),
- __entry->i_ino, __entry->fl_next, __entry->fl_owner,
+ __entry->i_ino, __entry->fl_blocker, __entry->fl_owner,
show_fl_flags(__entry->fl_flags),
show_fl_type(__entry->fl_type),
__entry->fl_break_time, __entry->fl_downgrade_time)
diff --git a/include/trace/events/kyber.h b/include/trace/events/kyber.h
index a9834c37ac40..c0e7d24ca256 100644
--- a/include/trace/events/kyber.h
+++ b/include/trace/events/kyber.h
@@ -31,8 +31,8 @@ TRACE_EVENT(kyber_latency,
TP_fast_assign(
__entry->dev = disk_devt(dev_to_disk(kobj_to_dev(q->kobj.parent)));
- strlcpy(__entry->domain, domain, DOMAIN_LEN);
- strlcpy(__entry->type, type, DOMAIN_LEN);
+ strlcpy(__entry->domain, domain, sizeof(__entry->domain));
+ strlcpy(__entry->type, type, sizeof(__entry->type));
__entry->percentile = percentile;
__entry->numerator = numerator;
__entry->denominator = denominator;
@@ -60,7 +60,7 @@ TRACE_EVENT(kyber_adjust,
TP_fast_assign(
__entry->dev = disk_devt(dev_to_disk(kobj_to_dev(q->kobj.parent)));
- strlcpy(__entry->domain, domain, DOMAIN_LEN);
+ strlcpy(__entry->domain, domain, sizeof(__entry->domain));
__entry->depth = depth;
),
@@ -82,7 +82,7 @@ TRACE_EVENT(kyber_throttled,
TP_fast_assign(
__entry->dev = disk_devt(dev_to_disk(kobj_to_dev(q->kobj.parent)));
- strlcpy(__entry->domain, domain, DOMAIN_LEN);
+ strlcpy(__entry->domain, domain, sizeof(__entry->domain));
),
TP_printk("%d,%d %s", MAJOR(__entry->dev), MINOR(__entry->dev),
diff --git a/include/trace/events/net.h b/include/trace/events/net.h
index 00aa72ce0e7c..1efd7d9b25fe 100644
--- a/include/trace/events/net.h
+++ b/include/trace/events/net.h
@@ -244,6 +244,65 @@ DEFINE_EVENT(net_dev_rx_verbose_template, netif_rx_ni_entry,
TP_ARGS(skb)
);
+DECLARE_EVENT_CLASS(net_dev_rx_exit_template,
+
+ TP_PROTO(int ret),
+
+ TP_ARGS(ret),
+
+ TP_STRUCT__entry(
+ __field(int, ret)
+ ),
+
+ TP_fast_assign(
+ __entry->ret = ret;
+ ),
+
+ TP_printk("ret=%d", __entry->ret)
+);
+
+DEFINE_EVENT(net_dev_rx_exit_template, napi_gro_frags_exit,
+
+ TP_PROTO(int ret),
+
+ TP_ARGS(ret)
+);
+
+DEFINE_EVENT(net_dev_rx_exit_template, napi_gro_receive_exit,
+
+ TP_PROTO(int ret),
+
+ TP_ARGS(ret)
+);
+
+DEFINE_EVENT(net_dev_rx_exit_template, netif_receive_skb_exit,
+
+ TP_PROTO(int ret),
+
+ TP_ARGS(ret)
+);
+
+DEFINE_EVENT(net_dev_rx_exit_template, netif_rx_exit,
+
+ TP_PROTO(int ret),
+
+ TP_ARGS(ret)
+);
+
+DEFINE_EVENT(net_dev_rx_exit_template, netif_rx_ni_exit,
+
+ TP_PROTO(int ret),
+
+ TP_ARGS(ret)
+);
+
+DEFINE_EVENT(net_dev_rx_exit_template, netif_receive_skb_list_exit,
+
+ TP_PROTO(int ret),
+
+ TP_ARGS(ret)
+);
+
#endif /* _TRACE_NET_H */
/* This part must be outside protection */
diff --git a/include/trace/events/objagg.h b/include/trace/events/objagg.h
new file mode 100644
index 000000000000..fcec0fc9eb0c
--- /dev/null
+++ b/include/trace/events/objagg.h
@@ -0,0 +1,228 @@
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
+/* Copyright (c) 2018 Mellanox Technologies. All rights reserved */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM objagg
+
+#if !defined(__TRACE_OBJAGG_H) || defined(TRACE_HEADER_MULTI_READ)
+#define __TRACE_OBJAGG_H
+
+#include <linux/tracepoint.h>
+
+struct objagg;
+struct objagg_obj;
+
+TRACE_EVENT(objagg_create,
+ TP_PROTO(const struct objagg *objagg),
+
+ TP_ARGS(objagg),
+
+ TP_STRUCT__entry(
+ __field(const void *, objagg)
+ ),
+
+ TP_fast_assign(
+ __entry->objagg = objagg;
+ ),
+
+ TP_printk("objagg %p", __entry->objagg)
+);
+
+TRACE_EVENT(objagg_destroy,
+ TP_PROTO(const struct objagg *objagg),
+
+ TP_ARGS(objagg),
+
+ TP_STRUCT__entry(
+ __field(const void *, objagg)
+ ),
+
+ TP_fast_assign(
+ __entry->objagg = objagg;
+ ),
+
+ TP_printk("objagg %p", __entry->objagg)
+);
+
+TRACE_EVENT(objagg_obj_create,
+ TP_PROTO(const struct objagg *objagg,
+ const struct objagg_obj *obj),
+
+ TP_ARGS(objagg, obj),
+
+ TP_STRUCT__entry(
+ __field(const void *, objagg)
+ __field(const void *, obj)
+ ),
+
+ TP_fast_assign(
+ __entry->objagg = objagg;
+ __entry->obj = obj;
+ ),
+
+ TP_printk("objagg %p, obj %p", __entry->objagg, __entry->obj)
+);
+
+TRACE_EVENT(objagg_obj_destroy,
+ TP_PROTO(const struct objagg *objagg,
+ const struct objagg_obj *obj),
+
+ TP_ARGS(objagg, obj),
+
+ TP_STRUCT__entry(
+ __field(const void *, objagg)
+ __field(const void *, obj)
+ ),
+
+ TP_fast_assign(
+ __entry->objagg = objagg;
+ __entry->obj = obj;
+ ),
+
+ TP_printk("objagg %p, obj %p", __entry->objagg, __entry->obj)
+);
+
+TRACE_EVENT(objagg_obj_get,
+ TP_PROTO(const struct objagg *objagg,
+ const struct objagg_obj *obj,
+ unsigned int refcount),
+
+ TP_ARGS(objagg, obj, refcount),
+
+ TP_STRUCT__entry(
+ __field(const void *, objagg)
+ __field(const void *, obj)
+ __field(unsigned int, refcount)
+ ),
+
+ TP_fast_assign(
+ __entry->objagg = objagg;
+ __entry->obj = obj;
+ __entry->refcount = refcount;
+ ),
+
+ TP_printk("objagg %p, obj %p, refcount %u",
+ __entry->objagg, __entry->obj, __entry->refcount)
+);
+
+TRACE_EVENT(objagg_obj_put,
+ TP_PROTO(const struct objagg *objagg,
+ const struct objagg_obj *obj,
+ unsigned int refcount),
+
+ TP_ARGS(objagg, obj, refcount),
+
+ TP_STRUCT__entry(
+ __field(const void *, objagg)
+ __field(const void *, obj)
+ __field(unsigned int, refcount)
+ ),
+
+ TP_fast_assign(
+ __entry->objagg = objagg;
+ __entry->obj = obj;
+ __entry->refcount = refcount;
+ ),
+
+ TP_printk("objagg %p, obj %p, refcount %u",
+ __entry->objagg, __entry->obj, __entry->refcount)
+);
+
+TRACE_EVENT(objagg_obj_parent_assign,
+ TP_PROTO(const struct objagg *objagg,
+ const struct objagg_obj *obj,
+ const struct objagg_obj *parent,
+ unsigned int parent_refcount),
+
+ TP_ARGS(objagg, obj, parent, parent_refcount),
+
+ TP_STRUCT__entry(
+ __field(const void *, objagg)
+ __field(const void *, obj)
+ __field(const void *, parent)
+ __field(unsigned int, parent_refcount)
+ ),
+
+ TP_fast_assign(
+ __entry->objagg = objagg;
+ __entry->obj = obj;
+ __entry->parent = parent;
+ __entry->parent_refcount = parent_refcount;
+ ),
+
+ TP_printk("objagg %p, obj %p, parent %p, parent_refcount %u",
+ __entry->objagg, __entry->obj,
+ __entry->parent, __entry->parent_refcount)
+);
+
+TRACE_EVENT(objagg_obj_parent_unassign,
+ TP_PROTO(const struct objagg *objagg,
+ const struct objagg_obj *obj,
+ const struct objagg_obj *parent,
+ unsigned int parent_refcount),
+
+ TP_ARGS(objagg, obj, parent, parent_refcount),
+
+ TP_STRUCT__entry(
+ __field(const void *, objagg)
+ __field(const void *, obj)
+ __field(const void *, parent)
+ __field(unsigned int, parent_refcount)
+ ),
+
+ TP_fast_assign(
+ __entry->objagg = objagg;
+ __entry->obj = obj;
+ __entry->parent = parent;
+ __entry->parent_refcount = parent_refcount;
+ ),
+
+ TP_printk("objagg %p, obj %p, parent %p, parent_refcount %u",
+ __entry->objagg, __entry->obj,
+ __entry->parent, __entry->parent_refcount)
+);
+
+TRACE_EVENT(objagg_obj_root_create,
+ TP_PROTO(const struct objagg *objagg,
+ const struct objagg_obj *obj),
+
+ TP_ARGS(objagg, obj),
+
+ TP_STRUCT__entry(
+ __field(const void *, objagg)
+ __field(const void *, obj)
+ ),
+
+ TP_fast_assign(
+ __entry->objagg = objagg;
+ __entry->obj = obj;
+ ),
+
+ TP_printk("objagg %p, obj %p",
+ __entry->objagg, __entry->obj)
+);
+
+TRACE_EVENT(objagg_obj_root_destroy,
+ TP_PROTO(const struct objagg *objagg,
+ const struct objagg_obj *obj),
+
+ TP_ARGS(objagg, obj),
+
+ TP_STRUCT__entry(
+ __field(const void *, objagg)
+ __field(const void *, obj)
+ ),
+
+ TP_fast_assign(
+ __entry->objagg = objagg;
+ __entry->obj = obj;
+ ),
+
+ TP_printk("objagg %p, obj %p",
+ __entry->objagg, __entry->obj)
+);
+
+#endif /* __TRACE_OBJAGG_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/rxrpc.h b/include/trace/events/rxrpc.h
index 573d5b901fb1..5b50fe4906d2 100644
--- a/include/trace/events/rxrpc.h
+++ b/include/trace/events/rxrpc.h
@@ -181,6 +181,7 @@ enum rxrpc_timer_trace {
enum rxrpc_propose_ack_trace {
rxrpc_propose_ack_client_tx_end,
rxrpc_propose_ack_input_data,
+ rxrpc_propose_ack_ping_for_check_life,
rxrpc_propose_ack_ping_for_keepalive,
rxrpc_propose_ack_ping_for_lost_ack,
rxrpc_propose_ack_ping_for_lost_reply,
@@ -380,6 +381,7 @@ enum rxrpc_tx_point {
#define rxrpc_propose_ack_traces \
EM(rxrpc_propose_ack_client_tx_end, "ClTxEnd") \
EM(rxrpc_propose_ack_input_data, "DataIn ") \
+ EM(rxrpc_propose_ack_ping_for_check_life, "ChkLife") \
EM(rxrpc_propose_ack_ping_for_keepalive, "KeepAlv") \
EM(rxrpc_propose_ack_ping_for_lost_ack, "LostAck") \
EM(rxrpc_propose_ack_ping_for_lost_reply, "LostRpl") \
diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h
index f07b270d4fc4..9a4bdfadab07 100644
--- a/include/trace/events/sched.h
+++ b/include/trace/events/sched.h
@@ -107,6 +107,8 @@ DEFINE_EVENT(sched_wakeup_template, sched_wakeup_new,
#ifdef CREATE_TRACE_POINTS
static inline long __trace_sched_switch_state(bool preempt, struct task_struct *p)
{
+ unsigned int state;
+
#ifdef CONFIG_SCHED_DEBUG
BUG_ON(p != current);
#endif /* CONFIG_SCHED_DEBUG */
@@ -118,7 +120,15 @@ static inline long __trace_sched_switch_state(bool preempt, struct task_struct *
if (preempt)
return TASK_REPORT_MAX;
- return 1 << task_state_index(p);
+ /*
+ * task_state_index() uses fls() and returns a value from 0-8 range.
+ * Decrement it by 1 (except TASK_RUNNING state i.e 0) before using
+ * it for left shift operation to get the correct task->state
+ * mapping.
+ */
+ state = task_state_index(p);
+
+ return state ? (1 << (state - 1)) : state;
}
#endif /* CREATE_TRACE_POINTS */
diff --git a/include/uapi/asm-generic/Kbuild.asm b/include/uapi/asm-generic/Kbuild.asm
index 21381449d98a..355c4ac2c0b0 100644
--- a/include/uapi/asm-generic/Kbuild.asm
+++ b/include/uapi/asm-generic/Kbuild.asm
@@ -3,6 +3,7 @@
#
mandatory-y += auxvec.h
mandatory-y += bitsperlong.h
+mandatory-y += bpf_perf_event.h
mandatory-y += byteorder.h
mandatory-y += errno.h
mandatory-y += fcntl.h
diff --git a/include/uapi/asm-generic/unistd.h b/include/uapi/asm-generic/unistd.h
index 538546edbfbd..d90127298f12 100644
--- a/include/uapi/asm-generic/unistd.h
+++ b/include/uapi/asm-generic/unistd.h
@@ -738,9 +738,11 @@ __SYSCALL(__NR_statx, sys_statx)
__SC_COMP(__NR_io_pgetevents, sys_io_pgetevents, compat_sys_io_pgetevents)
#define __NR_rseq 293
__SYSCALL(__NR_rseq, sys_rseq)
+#define __NR_kexec_file_load 294
+__SYSCALL(__NR_kexec_file_load, sys_kexec_file_load)
#undef __NR_syscalls
-#define __NR_syscalls 294
+#define __NR_syscalls 295
/*
* 32 bit systems traditionally used different
@@ -760,8 +762,10 @@ __SYSCALL(__NR_rseq, sys_rseq)
#define __NR_ftruncate __NR3264_ftruncate
#define __NR_lseek __NR3264_lseek
#define __NR_sendfile __NR3264_sendfile
+#if defined(__ARCH_WANT_NEW_STAT) || defined(__ARCH_WANT_STAT64)
#define __NR_newfstatat __NR3264_fstatat
#define __NR_fstat __NR3264_fstat
+#endif
#define __NR_mmap __NR3264_mmap
#define __NR_fadvise64 __NR3264_fadvise64
#ifdef __NR3264_stat
@@ -776,8 +780,10 @@ __SYSCALL(__NR_rseq, sys_rseq)
#define __NR_ftruncate64 __NR3264_ftruncate
#define __NR_llseek __NR3264_lseek
#define __NR_sendfile64 __NR3264_sendfile
+#if defined(__ARCH_WANT_NEW_STAT) || defined(__ARCH_WANT_STAT64)
#define __NR_fstatat64 __NR3264_fstatat
#define __NR_fstat64 __NR3264_fstat
+#endif
#define __NR_mmap2 __NR3264_mmap
#define __NR_fadvise64_64 __NR3264_fadvise64
#ifdef __NR3264_stat
diff --git a/include/uapi/drm/amdgpu_drm.h b/include/uapi/drm/amdgpu_drm.h
index 370e9a5536ef..be84e43c1e19 100644
--- a/include/uapi/drm/amdgpu_drm.h
+++ b/include/uapi/drm/amdgpu_drm.h
@@ -326,6 +326,12 @@ struct drm_amdgpu_gem_userptr {
/* GFX9 and later: */
#define AMDGPU_TILING_SWIZZLE_MODE_SHIFT 0
#define AMDGPU_TILING_SWIZZLE_MODE_MASK 0x1f
+#define AMDGPU_TILING_DCC_OFFSET_256B_SHIFT 5
+#define AMDGPU_TILING_DCC_OFFSET_256B_MASK 0xFFFFFF
+#define AMDGPU_TILING_DCC_PITCH_MAX_SHIFT 29
+#define AMDGPU_TILING_DCC_PITCH_MAX_MASK 0x3FFF
+#define AMDGPU_TILING_DCC_INDEPENDENT_64B_SHIFT 43
+#define AMDGPU_TILING_DCC_INDEPENDENT_64B_MASK 0x1
/* Set/Get helpers for tiling flags. */
#define AMDGPU_TILING_SET(field, value) \
diff --git a/include/uapi/drm/drm_fourcc.h b/include/uapi/drm/drm_fourcc.h
index 0cd40ebfa1b1..0b44260a5ee9 100644
--- a/include/uapi/drm/drm_fourcc.h
+++ b/include/uapi/drm/drm_fourcc.h
@@ -151,6 +151,21 @@ extern "C" {
#define DRM_FORMAT_VYUY fourcc_code('V', 'Y', 'U', 'Y') /* [31:0] Y1:Cb0:Y0:Cr0 8:8:8:8 little endian */
#define DRM_FORMAT_AYUV fourcc_code('A', 'Y', 'U', 'V') /* [31:0] A:Y:Cb:Cr 8:8:8:8 little endian */
+#define DRM_FORMAT_XYUV8888 fourcc_code('X', 'Y', 'U', 'V') /* [31:0] X:Y:Cb:Cr 8:8:8:8 little endian */
+
+/*
+ * packed YCbCr420 2x2 tiled formats
+ * first 64 bits will contain Y,Cb,Cr components for a 2x2 tile
+ */
+/* [63:0] A3:A2:Y3:0:Cr0:0:Y2:0:A1:A0:Y1:0:Cb0:0:Y0:0 1:1:8:2:8:2:8:2:1:1:8:2:8:2:8:2 little endian */
+#define DRM_FORMAT_Y0L0 fourcc_code('Y', '0', 'L', '0')
+/* [63:0] X3:X2:Y3:0:Cr0:0:Y2:0:X1:X0:Y1:0:Cb0:0:Y0:0 1:1:8:2:8:2:8:2:1:1:8:2:8:2:8:2 little endian */
+#define DRM_FORMAT_X0L0 fourcc_code('X', '0', 'L', '0')
+
+/* [63:0] A3:A2:Y3:Cr0:Y2:A1:A0:Y1:Cb0:Y0 1:1:10:10:10:1:1:10:10:10 little endian */
+#define DRM_FORMAT_Y0L2 fourcc_code('Y', '0', 'L', '2')
+/* [63:0] X3:X2:Y3:Cr0:Y2:X1:X0:Y1:Cb0:Y0 1:1:10:10:10:1:1:10:10:10 little endian */
+#define DRM_FORMAT_X0L2 fourcc_code('X', '0', 'L', '2')
/*
* 2 plane RGB + A
diff --git a/include/uapi/drm/drm_mode.h b/include/uapi/drm/drm_mode.h
index d3e0fe31efc5..a439c2e67896 100644
--- a/include/uapi/drm/drm_mode.h
+++ b/include/uapi/drm/drm_mode.h
@@ -888,6 +888,25 @@ struct drm_mode_revoke_lease {
__u32 lessee_id;
};
+/**
+ * struct drm_mode_rect - Two dimensional rectangle.
+ * @x1: Horizontal starting coordinate (inclusive).
+ * @y1: Vertical starting coordinate (inclusive).
+ * @x2: Horizontal ending coordinate (exclusive).
+ * @y2: Vertical ending coordinate (exclusive).
+ *
+ * With drm subsystem using struct drm_rect to manage rectangular area this
+ * export it to user-space.
+ *
+ * Currently used by drm_mode_atomic blob property FB_DAMAGE_CLIPS.
+ */
+struct drm_mode_rect {
+ __s32 x1;
+ __s32 y1;
+ __s32 x2;
+ __s32 y2;
+};
+
#if defined(__cplusplus)
}
#endif
diff --git a/include/uapi/drm/i915_drm.h b/include/uapi/drm/i915_drm.h
index a4446f452040..298b2e197744 100644
--- a/include/uapi/drm/i915_drm.h
+++ b/include/uapi/drm/i915_drm.h
@@ -412,6 +412,14 @@ typedef struct drm_i915_irq_wait {
int irq_seq;
} drm_i915_irq_wait_t;
+/*
+ * Different modes of per-process Graphics Translation Table,
+ * see I915_PARAM_HAS_ALIASING_PPGTT
+ */
+#define I915_GEM_PPGTT_NONE 0
+#define I915_GEM_PPGTT_ALIASING 1
+#define I915_GEM_PPGTT_FULL 2
+
/* Ioctl to query kernel params:
*/
#define I915_PARAM_IRQ_ACTIVE 1
diff --git a/include/uapi/drm/msm_drm.h b/include/uapi/drm/msm_drm.h
index c06d0a5bdd80..91a16b333c69 100644
--- a/include/uapi/drm/msm_drm.h
+++ b/include/uapi/drm/msm_drm.h
@@ -105,14 +105,24 @@ struct drm_msm_gem_new {
__u32 handle; /* out */
};
-#define MSM_INFO_IOVA 0x01
-
-#define MSM_INFO_FLAGS (MSM_INFO_IOVA)
+/* Get or set GEM buffer info. The requested value can be passed
+ * directly in 'value', or for data larger than 64b 'value' is a
+ * pointer to userspace buffer, with 'len' specifying the number of
+ * bytes copied into that buffer. For info returned by pointer,
+ * calling the GEM_INFO ioctl with null 'value' will return the
+ * required buffer size in 'len'
+ */
+#define MSM_INFO_GET_OFFSET 0x00 /* get mmap() offset, returned by value */
+#define MSM_INFO_GET_IOVA 0x01 /* get iova, returned by value */
+#define MSM_INFO_SET_NAME 0x02 /* set the debug name (by pointer) */
+#define MSM_INFO_GET_NAME 0x03 /* get debug name, returned by pointer */
struct drm_msm_gem_info {
__u32 handle; /* in */
- __u32 flags; /* in - combination of MSM_INFO_* flags */
- __u64 offset; /* out, mmap() offset or iova */
+ __u32 info; /* in - one of MSM_INFO_* */
+ __u64 value; /* in or out */
+ __u32 len; /* in or out */
+ __u32 pad;
};
#define MSM_PREP_READ 0x01
@@ -188,8 +198,11 @@ struct drm_msm_gem_submit_cmd {
*/
#define MSM_SUBMIT_BO_READ 0x0001
#define MSM_SUBMIT_BO_WRITE 0x0002
+#define MSM_SUBMIT_BO_DUMP 0x0004
-#define MSM_SUBMIT_BO_FLAGS (MSM_SUBMIT_BO_READ | MSM_SUBMIT_BO_WRITE)
+#define MSM_SUBMIT_BO_FLAGS (MSM_SUBMIT_BO_READ | \
+ MSM_SUBMIT_BO_WRITE | \
+ MSM_SUBMIT_BO_DUMP)
struct drm_msm_gem_submit_bo {
__u32 flags; /* in, mask of MSM_SUBMIT_BO_x */
diff --git a/include/uapi/drm/v3d_drm.h b/include/uapi/drm/v3d_drm.h
index 7b6627783608..35c7d813c66e 100644
--- a/include/uapi/drm/v3d_drm.h
+++ b/include/uapi/drm/v3d_drm.h
@@ -36,6 +36,7 @@ extern "C" {
#define DRM_V3D_MMAP_BO 0x03
#define DRM_V3D_GET_PARAM 0x04
#define DRM_V3D_GET_BO_OFFSET 0x05
+#define DRM_V3D_SUBMIT_TFU 0x06
#define DRM_IOCTL_V3D_SUBMIT_CL DRM_IOWR(DRM_COMMAND_BASE + DRM_V3D_SUBMIT_CL, struct drm_v3d_submit_cl)
#define DRM_IOCTL_V3D_WAIT_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_V3D_WAIT_BO, struct drm_v3d_wait_bo)
@@ -43,6 +44,7 @@ extern "C" {
#define DRM_IOCTL_V3D_MMAP_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_V3D_MMAP_BO, struct drm_v3d_mmap_bo)
#define DRM_IOCTL_V3D_GET_PARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_V3D_GET_PARAM, struct drm_v3d_get_param)
#define DRM_IOCTL_V3D_GET_BO_OFFSET DRM_IOWR(DRM_COMMAND_BASE + DRM_V3D_GET_BO_OFFSET, struct drm_v3d_get_bo_offset)
+#define DRM_IOCTL_V3D_SUBMIT_TFU DRM_IOW(DRM_COMMAND_BASE + DRM_V3D_SUBMIT_TFU, struct drm_v3d_submit_tfu)
/**
* struct drm_v3d_submit_cl - ioctl argument for submitting commands to the 3D
@@ -58,10 +60,15 @@ struct drm_v3d_submit_cl {
* coordinate shader to determine where primitives land on the screen,
* then writes out the state updates and draw calls necessary per tile
* to the tile allocation BO.
+ *
+ * This BCL will block on any previous BCL submitted on the
+ * same FD, but not on any RCL or BCLs submitted by other
+ * clients -- that is left up to the submitter to control
+ * using in_sync_bcl if necessary.
*/
__u32 bcl_start;
- /** End address of the BCL (first byte after the BCL) */
+ /** End address of the BCL (first byte after the BCL) */
__u32 bcl_end;
/* Offset of the render command list.
@@ -69,10 +76,15 @@ struct drm_v3d_submit_cl {
* This is the second set of commands executed, which will either
* execute the tiles that have been set up by the BCL, or a fixed set
* of tiles (in the case of RCL-only blits).
+ *
+ * This RCL will block on this submit's BCL, and any previous
+ * RCL submitted on the same FD, but not on any RCL or BCLs
+ * submitted by other clients -- that is left up to the
+ * submitter to control using in_sync_rcl if necessary.
*/
__u32 rcl_start;
- /** End address of the RCL (first byte after the RCL) */
+ /** End address of the RCL (first byte after the RCL) */
__u32 rcl_end;
/** An optional sync object to wait on before starting the BCL. */
@@ -169,6 +181,7 @@ enum drm_v3d_param {
DRM_V3D_PARAM_V3D_CORE0_IDENT0,
DRM_V3D_PARAM_V3D_CORE0_IDENT1,
DRM_V3D_PARAM_V3D_CORE0_IDENT2,
+ DRM_V3D_PARAM_SUPPORTS_TFU,
};
struct drm_v3d_get_param {
@@ -187,6 +200,28 @@ struct drm_v3d_get_bo_offset {
__u32 offset;
};
+struct drm_v3d_submit_tfu {
+ __u32 icfg;
+ __u32 iia;
+ __u32 iis;
+ __u32 ica;
+ __u32 iua;
+ __u32 ioa;
+ __u32 ios;
+ __u32 coef[4];
+ /* First handle is the output BO, following are other inputs.
+ * 0 for unused.
+ */
+ __u32 bo_handles[4];
+ /* sync object to block on before running the TFU job. Each TFU
+ * job will execute in the order submitted to its FD. Synchronization
+ * against rendering jobs requires using sync objects.
+ */
+ __u32 in_sync;
+ /* Sync object to signal when the TFU job is done. */
+ __u32 out_sync;
+};
+
#if defined(__cplusplus)
}
#endif
diff --git a/include/uapi/drm/virtgpu_drm.h b/include/uapi/drm/virtgpu_drm.h
index 9a781f0611df..f06a789f34cd 100644
--- a/include/uapi/drm/virtgpu_drm.h
+++ b/include/uapi/drm/virtgpu_drm.h
@@ -47,6 +47,13 @@ extern "C" {
#define DRM_VIRTGPU_WAIT 0x08
#define DRM_VIRTGPU_GET_CAPS 0x09
+#define VIRTGPU_EXECBUF_FENCE_FD_IN 0x01
+#define VIRTGPU_EXECBUF_FENCE_FD_OUT 0x02
+#define VIRTGPU_EXECBUF_FLAGS (\
+ VIRTGPU_EXECBUF_FENCE_FD_IN |\
+ VIRTGPU_EXECBUF_FENCE_FD_OUT |\
+ 0)
+
struct drm_virtgpu_map {
__u64 offset; /* use for mmap system call */
__u32 handle;
@@ -54,12 +61,12 @@ struct drm_virtgpu_map {
};
struct drm_virtgpu_execbuffer {
- __u32 flags; /* for future use */
+ __u32 flags;
__u32 size;
__u64 command; /* void* */
__u64 bo_handles;
__u32 num_bo_handles;
- __u32 pad;
+ __s32 fence_fd; /* in/out fence fd (see VIRTGPU_EXECBUF_FENCE_FD_IN/OUT) */
};
#define VIRTGPU_PARAM_3D_FEATURES 1 /* do we have 3D features in the hw */
@@ -137,7 +144,7 @@ struct drm_virtgpu_get_caps {
DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_MAP, struct drm_virtgpu_map)
#define DRM_IOCTL_VIRTGPU_EXECBUFFER \
- DRM_IOW(DRM_COMMAND_BASE + DRM_VIRTGPU_EXECBUFFER,\
+ DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_EXECBUFFER,\
struct drm_virtgpu_execbuffer)
#define DRM_IOCTL_VIRTGPU_GETPARAM \
diff --git a/include/uapi/linux/aio_abi.h b/include/uapi/linux/aio_abi.h
index ce43d340f010..8387e0af0f76 100644
--- a/include/uapi/linux/aio_abi.h
+++ b/include/uapi/linux/aio_abi.h
@@ -50,6 +50,8 @@ enum {
*
* IOCB_FLAG_RESFD - Set if the "aio_resfd" member of the "struct iocb"
* is valid.
+ * IOCB_FLAG_IOPRIO - Set if the "aio_reqprio" member of the "struct iocb"
+ * is valid.
*/
#define IOCB_FLAG_RESFD (1 << 0)
#define IOCB_FLAG_IOPRIO (1 << 1)
diff --git a/include/uapi/linux/blkzoned.h b/include/uapi/linux/blkzoned.h
index 8f08ff9bdea0..6fa38d001d84 100644
--- a/include/uapi/linux/blkzoned.h
+++ b/include/uapi/linux/blkzoned.h
@@ -141,7 +141,7 @@ struct blk_zone_range {
*/
#define BLKREPORTZONE _IOWR(0x12, 130, struct blk_zone_report)
#define BLKRESETZONE _IOW(0x12, 131, struct blk_zone_range)
-#define BLKGETZONESZ _IOW(0x12, 132, __u32)
-#define BLKGETNRZONES _IOW(0x12, 133, __u32)
+#define BLKGETZONESZ _IOR(0x12, 132, __u32)
+#define BLKGETNRZONES _IOR(0x12, 133, __u32)
#endif /* _UAPI_BLKZONED_H */
diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
index 852dc17ab47a..91c43884f295 100644
--- a/include/uapi/linux/bpf.h
+++ b/include/uapi/linux/bpf.h
@@ -133,6 +133,14 @@ enum bpf_map_type {
BPF_MAP_TYPE_STACK,
};
+/* Note that tracing related programs such as
+ * BPF_PROG_TYPE_{KPROBE,TRACEPOINT,PERF_EVENT,RAW_TRACEPOINT}
+ * are not subject to a stable API since kernel internal data
+ * structures can change from release to release and may
+ * therefore break existing tracing BPF programs. Tracing BPF
+ * programs correspond to /a/ specific kernel which is to be
+ * analyzed, and not /a/ specific kernel /and/ all future ones.
+ */
enum bpf_prog_type {
BPF_PROG_TYPE_UNSPEC,
BPF_PROG_TYPE_SOCKET_FILTER,
@@ -232,6 +240,20 @@ enum bpf_attach_type {
*/
#define BPF_F_STRICT_ALIGNMENT (1U << 0)
+/* If BPF_F_ANY_ALIGNMENT is used in BPF_PROF_LOAD command, the
+ * verifier will allow any alignment whatsoever. On platforms
+ * with strict alignment requirements for loads ands stores (such
+ * as sparc and mips) the verifier validates that all loads and
+ * stores provably follow this requirement. This flag turns that
+ * checking and enforcement off.
+ *
+ * It is mostly used for testing when we want to validate the
+ * context and memory access aspects of the verifier, but because
+ * of an unaligned access the alignment check would trigger before
+ * the one we are interested in.
+ */
+#define BPF_F_ANY_ALIGNMENT (1U << 1)
+
/* when bpf_ldimm64->src_reg == BPF_PSEUDO_MAP_FD, bpf_ldimm64->imm == fd */
#define BPF_PSEUDO_MAP_FD 1
@@ -257,9 +279,6 @@ enum bpf_attach_type {
/* Specify numa node during map creation */
#define BPF_F_NUMA_NODE (1U << 2)
-/* flags for BPF_PROG_QUERY */
-#define BPF_F_QUERY_EFFECTIVE (1U << 0)
-
#define BPF_OBJ_NAME_LEN 16U
/* Flags for accessing BPF object */
@@ -269,6 +288,12 @@ enum bpf_attach_type {
/* Flag for stack_map, store build_id+offset instead of pointer */
#define BPF_F_STACK_BUILD_ID (1U << 5)
+/* Zero-initialize hash function seed. This should only be used for testing. */
+#define BPF_F_ZERO_SEED (1U << 6)
+
+/* flags for BPF_PROG_QUERY */
+#define BPF_F_QUERY_EFFECTIVE (1U << 0)
+
enum bpf_stack_build_id_status {
/* user space need an empty entry to identify end of a trace */
BPF_STACK_BUILD_ID_EMPTY = 0,
@@ -326,7 +351,7 @@ union bpf_attr {
__u32 log_level; /* verbosity level of verifier */
__u32 log_size; /* size of user buffer */
__aligned_u64 log_buf; /* user supplied buffer */
- __u32 kern_version; /* checked when prog_type=kprobe */
+ __u32 kern_version; /* not used */
__u32 prog_flags;
char prog_name[BPF_OBJ_NAME_LEN];
__u32 prog_ifindex; /* ifindex of netdev to prep for */
@@ -335,6 +360,13 @@ union bpf_attr {
* (context accesses, allowed helpers, etc).
*/
__u32 expected_attach_type;
+ __u32 prog_btf_fd; /* fd pointing to BTF type data */
+ __u32 func_info_rec_size; /* userspace bpf_func_info size */
+ __aligned_u64 func_info; /* func info */
+ __u32 func_info_cnt; /* number of bpf_func_info records */
+ __u32 line_info_rec_size; /* userspace bpf_line_info size */
+ __aligned_u64 line_info; /* line info */
+ __u32 line_info_cnt; /* number of bpf_line_info records */
};
struct { /* anonymous struct used by BPF_OBJ_* commands */
@@ -353,8 +385,11 @@ union bpf_attr {
struct { /* anonymous struct used by BPF_PROG_TEST_RUN command */
__u32 prog_fd;
__u32 retval;
- __u32 data_size_in;
- __u32 data_size_out;
+ __u32 data_size_in; /* input: len of data_in */
+ __u32 data_size_out; /* input/output: len of data_out
+ * returns ENOSPC if data_out
+ * is too small.
+ */
__aligned_u64 data_in;
__aligned_u64 data_out;
__u32 repeat;
@@ -475,18 +510,6 @@ union bpf_attr {
* Return
* 0 on success, or a negative error in case of failure.
*
- * int bpf_map_pop_elem(struct bpf_map *map, void *value)
- * Description
- * Pop an element from *map*.
- * Return
- * 0 on success, or a negative error in case of failure.
- *
- * int bpf_map_peek_elem(struct bpf_map *map, void *value)
- * Description
- * Get an element from *map* without removing it.
- * Return
- * 0 on success, or a negative error in case of failure.
- *
* int bpf_probe_read(void *dst, u32 size, const void *src)
* Description
* For tracing programs, safely attempt to read *size* bytes from
@@ -1910,9 +1933,9 @@ union bpf_attr {
* is set to metric from route (IPv4/IPv6 only), and ifindex
* is set to the device index of the nexthop from the FIB lookup.
*
- * *plen* argument is the size of the passed in struct.
- * *flags* argument can be a combination of one or more of the
- * following values:
+ * *plen* argument is the size of the passed in struct.
+ * *flags* argument can be a combination of one or more of the
+ * following values:
*
* **BPF_FIB_LOOKUP_DIRECT**
* Do a direct table lookup vs full lookup using FIB
@@ -1921,9 +1944,9 @@ union bpf_attr {
* Perform lookup from an egress perspective (default is
* ingress).
*
- * *ctx* is either **struct xdp_md** for XDP programs or
- * **struct sk_buff** tc cls_act programs.
- * Return
+ * *ctx* is either **struct xdp_md** for XDP programs or
+ * **struct sk_buff** tc cls_act programs.
+ * Return
* * < 0 if any input argument is invalid
* * 0 on success (packet is forwarded, nexthop neighbor exists)
* * > 0 one of **BPF_FIB_LKUP_RET_** codes explaining why the
@@ -2068,8 +2091,8 @@ union bpf_attr {
* translated to a keycode using the rc keymap, and reported as
* an input key down event. After a period a key up event is
* generated. This period can be extended by calling either
- * **bpf_rc_keydown** () again with the same values, or calling
- * **bpf_rc_repeat** ().
+ * **bpf_rc_keydown**\ () again with the same values, or calling
+ * **bpf_rc_repeat**\ ().
*
* Some protocols include a toggle bit, in case the button was
* released and pressed again between consecutive scancodes.
@@ -2152,29 +2175,30 @@ union bpf_attr {
* The *flags* meaning is specific for each map type,
* and has to be 0 for cgroup local storage.
*
- * Depending on the bpf program type, a local storage area
- * can be shared between multiple instances of the bpf program,
+ * Depending on the BPF program type, a local storage area
+ * can be shared between multiple instances of the BPF program,
* running simultaneously.
*
* A user should care about the synchronization by himself.
- * For example, by using the BPF_STX_XADD instruction to alter
+ * For example, by using the **BPF_STX_XADD** instruction to alter
* the shared data.
* Return
- * Pointer to the local storage area.
+ * A pointer to the local storage area.
*
* int bpf_sk_select_reuseport(struct sk_reuseport_md *reuse, struct bpf_map *map, void *key, u64 flags)
* Description
- * Select a SO_REUSEPORT sk from a BPF_MAP_TYPE_REUSEPORT_ARRAY map
- * It checks the selected sk is matching the incoming
- * request in the skb.
+ * Select a **SO_REUSEPORT** socket from a
+ * **BPF_MAP_TYPE_REUSEPORT_ARRAY** *map*.
+ * It checks the selected socket is matching the incoming
+ * request in the socket buffer.
* Return
* 0 on success, or a negative error in case of failure.
*
- * struct bpf_sock *bpf_sk_lookup_tcp(void *ctx, struct bpf_sock_tuple *tuple, u32 tuple_size, u32 netns, u64 flags)
+ * struct bpf_sock *bpf_sk_lookup_tcp(void *ctx, struct bpf_sock_tuple *tuple, u32 tuple_size, u64 netns, u64 flags)
* Description
* Look for TCP socket matching *tuple*, optionally in a child
* network namespace *netns*. The return value must be checked,
- * and if non-NULL, released via **bpf_sk_release**\ ().
+ * and if non-**NULL**, released via **bpf_sk_release**\ ().
*
* The *ctx* should point to the context of the program, such as
* the skb or socket (depending on the hook in use). This is used
@@ -2187,12 +2211,14 @@ union bpf_attr {
* **sizeof**\ (*tuple*\ **->ipv6**)
* Look for an IPv6 socket.
*
- * If the *netns* is zero, then the socket lookup table in the
- * netns associated with the *ctx* will be used. For the TC hooks,
- * this in the netns of the device in the skb. For socket hooks,
- * this in the netns of the socket. If *netns* is non-zero, then
- * it specifies the ID of the netns relative to the netns
- * associated with the *ctx*.
+ * If the *netns* is a negative signed 32-bit integer, then the
+ * socket lookup table in the netns associated with the *ctx* will
+ * will be used. For the TC hooks, this is the netns of the device
+ * in the skb. For socket hooks, this is the netns of the socket.
+ * If *netns* is any other signed 32-bit value greater than or
+ * equal to zero then it specifies the ID of the netns relative to
+ * the netns associated with the *ctx*. *netns* values beyond the
+ * range of 32-bit integers are reserved for future use.
*
* All values for *flags* are reserved for future usage, and must
* be left at zero.
@@ -2200,13 +2226,15 @@ union bpf_attr {
* This helper is available only if the kernel was compiled with
* **CONFIG_NET** configuration option.
* Return
- * Pointer to *struct bpf_sock*, or NULL in case of failure.
+ * Pointer to **struct bpf_sock**, or **NULL** in case of failure.
+ * For sockets with reuseport option, the **struct bpf_sock**
+ * result is from **reuse->socks**\ [] using the hash of the tuple.
*
- * struct bpf_sock *bpf_sk_lookup_udp(void *ctx, struct bpf_sock_tuple *tuple, u32 tuple_size, u32 netns, u64 flags)
+ * struct bpf_sock *bpf_sk_lookup_udp(void *ctx, struct bpf_sock_tuple *tuple, u32 tuple_size, u64 netns, u64 flags)
* Description
* Look for UDP socket matching *tuple*, optionally in a child
* network namespace *netns*. The return value must be checked,
- * and if non-NULL, released via **bpf_sk_release**\ ().
+ * and if non-**NULL**, released via **bpf_sk_release**\ ().
*
* The *ctx* should point to the context of the program, such as
* the skb or socket (depending on the hook in use). This is used
@@ -2219,12 +2247,14 @@ union bpf_attr {
* **sizeof**\ (*tuple*\ **->ipv6**)
* Look for an IPv6 socket.
*
- * If the *netns* is zero, then the socket lookup table in the
- * netns associated with the *ctx* will be used. For the TC hooks,
- * this in the netns of the device in the skb. For socket hooks,
- * this in the netns of the socket. If *netns* is non-zero, then
- * it specifies the ID of the netns relative to the netns
- * associated with the *ctx*.
+ * If the *netns* is a negative signed 32-bit integer, then the
+ * socket lookup table in the netns associated with the *ctx* will
+ * will be used. For the TC hooks, this is the netns of the device
+ * in the skb. For socket hooks, this is the netns of the socket.
+ * If *netns* is any other signed 32-bit value greater than or
+ * equal to zero then it specifies the ID of the netns relative to
+ * the netns associated with the *ctx*. *netns* values beyond the
+ * range of 32-bit integers are reserved for future use.
*
* All values for *flags* are reserved for future usage, and must
* be left at zero.
@@ -2232,31 +2262,71 @@ union bpf_attr {
* This helper is available only if the kernel was compiled with
* **CONFIG_NET** configuration option.
* Return
- * Pointer to *struct bpf_sock*, or NULL in case of failure.
+ * Pointer to **struct bpf_sock**, or **NULL** in case of failure.
+ * For sockets with reuseport option, the **struct bpf_sock**
+ * result is from **reuse->socks**\ [] using the hash of the tuple.
*
- * int bpf_sk_release(struct bpf_sock *sk)
+ * int bpf_sk_release(struct bpf_sock *sock)
* Description
- * Release the reference held by *sock*. *sock* must be a non-NULL
- * pointer that was returned from bpf_sk_lookup_xxx\ ().
+ * Release the reference held by *sock*. *sock* must be a
+ * non-**NULL** pointer that was returned from
+ * **bpf_sk_lookup_xxx**\ ().
* Return
* 0 on success, or a negative error in case of failure.
*
+ * int bpf_map_pop_elem(struct bpf_map *map, void *value)
+ * Description
+ * Pop an element from *map*.
+ * Return
+ * 0 on success, or a negative error in case of failure.
+ *
+ * int bpf_map_peek_elem(struct bpf_map *map, void *value)
+ * Description
+ * Get an element from *map* without removing it.
+ * Return
+ * 0 on success, or a negative error in case of failure.
+ *
* int bpf_msg_push_data(struct sk_buff *skb, u32 start, u32 len, u64 flags)
* Description
- * For socket policies, insert *len* bytes into msg at offset
+ * For socket policies, insert *len* bytes into *msg* at offset
* *start*.
*
* If a program of type **BPF_PROG_TYPE_SK_MSG** is run on a
- * *msg* it may want to insert metadata or options into the msg.
+ * *msg* it may want to insert metadata or options into the *msg*.
* This can later be read and used by any of the lower layer BPF
* hooks.
*
* This helper may fail if under memory pressure (a malloc
* fails) in these cases BPF programs will get an appropriate
* error and BPF programs will need to handle them.
+ * Return
+ * 0 on success, or a negative error in case of failure.
*
+ * int bpf_msg_pop_data(struct sk_msg_buff *msg, u32 start, u32 pop, u64 flags)
+ * Description
+ * Will remove *pop* bytes from a *msg* starting at byte *start*.
+ * This may result in **ENOMEM** errors under certain situations if
+ * an allocation and copy are required due to a full ring buffer.
+ * However, the helper will try to avoid doing the allocation
+ * if possible. Other errors can occur if input parameters are
+ * invalid either due to *start* byte not being valid part of *msg*
+ * payload and/or *pop* value being to large.
* Return
* 0 on success, or a negative error in case of failure.
+ *
+ * int bpf_rc_pointer_rel(void *ctx, s32 rel_x, s32 rel_y)
+ * Description
+ * This helper is used in programs implementing IR decoding, to
+ * report a successfully decoded pointer movement.
+ *
+ * The *ctx* should point to the lirc sample as passed into
+ * the program.
+ *
+ * This helper is only available is the kernel was compiled with
+ * the **CONFIG_BPF_LIRC_MODE2** configuration option set to
+ * "**y**".
+ * Return
+ * 0
*/
#define __BPF_FUNC_MAPPER(FN) \
FN(unspec), \
@@ -2349,7 +2419,9 @@ union bpf_attr {
FN(map_push_elem), \
FN(map_pop_elem), \
FN(map_peek_elem), \
- FN(msg_push_data),
+ FN(msg_push_data), \
+ FN(msg_pop_data), \
+ FN(rc_pointer_rel),
/* integer value in 'imm' field of BPF_CALL instruction selects which helper
* function eBPF program intends to call
@@ -2405,6 +2477,9 @@ enum bpf_func_id {
/* BPF_FUNC_perf_event_output for sk_buff input context. */
#define BPF_F_CTXLEN_MASK (0xfffffULL << 32)
+/* Current network namespace */
+#define BPF_F_CURRENT_NETNS (-1L)
+
/* Mode for BPF_FUNC_skb_adjust_room helper. */
enum bpf_adj_room_mode {
BPF_ADJ_ROOM_NET,
@@ -2422,6 +2497,12 @@ enum bpf_lwt_encap_mode {
BPF_LWT_ENCAP_SEG6_INLINE
};
+#define __bpf_md_ptr(type, name) \
+union { \
+ type name; \
+ __u64 :64; \
+} __attribute__((aligned(8)))
+
/* user accessible mirror of in-kernel sk_buff.
* new fields can only be added to the end of this structure
*/
@@ -2456,7 +2537,9 @@ struct __sk_buff {
/* ... here. */
__u32 data_meta;
- struct bpf_flow_keys *flow_keys;
+ __bpf_md_ptr(struct bpf_flow_keys *, flow_keys);
+ __u64 tstamp;
+ __u32 wire_len;
};
struct bpf_tunnel_key {
@@ -2572,8 +2655,8 @@ enum sk_action {
* be added to the end of this structure
*/
struct sk_msg_md {
- void *data;
- void *data_end;
+ __bpf_md_ptr(void *, data);
+ __bpf_md_ptr(void *, data_end);
__u32 family;
__u32 remote_ip4; /* Stored in network byte order */
@@ -2582,6 +2665,7 @@ struct sk_msg_md {
__u32 local_ip6[4]; /* Stored in network byte order */
__u32 remote_port; /* Stored in network byte order */
__u32 local_port; /* stored in host byte order */
+ __u32 size; /* Total size of sk_msg */
};
struct sk_reuseport_md {
@@ -2589,8 +2673,9 @@ struct sk_reuseport_md {
* Start of directly accessible data. It begins from
* the tcp/udp header.
*/
- void *data;
- void *data_end; /* End of directly accessible data */
+ __bpf_md_ptr(void *, data);
+ /* End of directly accessible data */
+ __bpf_md_ptr(void *, data_end);
/*
* Total length of packet (starting from the tcp/udp header).
* Note that the directly accessible bytes (data_end - data)
@@ -2631,6 +2716,18 @@ struct bpf_prog_info {
__u32 nr_jited_func_lens;
__aligned_u64 jited_ksyms;
__aligned_u64 jited_func_lens;
+ __u32 btf_id;
+ __u32 func_info_rec_size;
+ __aligned_u64 func_info;
+ __u32 nr_func_info;
+ __u32 nr_line_info;
+ __aligned_u64 line_info;
+ __aligned_u64 jited_line_info;
+ __u32 nr_jited_line_info;
+ __u32 line_info_rec_size;
+ __u32 jited_line_info_rec_size;
+ __u32 nr_prog_tags;
+ __aligned_u64 prog_tags;
} __attribute__((aligned(8)));
struct bpf_map_info {
@@ -2942,4 +3039,19 @@ struct bpf_flow_keys {
};
};
+struct bpf_func_info {
+ __u32 insn_off;
+ __u32 type_id;
+};
+
+#define BPF_LINE_INFO_LINE_NUM(line_col) ((line_col) >> 10)
+#define BPF_LINE_INFO_LINE_COL(line_col) ((line_col) & 0x3ff)
+
+struct bpf_line_info {
+ __u32 insn_off;
+ __u32 file_name_off;
+ __u32 line_off;
+ __u32 line_col;
+};
+
#endif /* _UAPI__LINUX_BPF_H__ */
diff --git a/include/uapi/linux/btf.h b/include/uapi/linux/btf.h
index 972265f32871..7b7475ef2f17 100644
--- a/include/uapi/linux/btf.h
+++ b/include/uapi/linux/btf.h
@@ -34,13 +34,16 @@ struct btf_type {
* bits 0-15: vlen (e.g. # of struct's members)
* bits 16-23: unused
* bits 24-27: kind (e.g. int, ptr, array...etc)
- * bits 28-31: unused
+ * bits 28-30: unused
+ * bit 31: kind_flag, currently used by
+ * struct, union and fwd
*/
__u32 info;
/* "size" is used by INT, ENUM, STRUCT and UNION.
* "size" tells the size of the type it is describing.
*
- * "type" is used by PTR, TYPEDEF, VOLATILE, CONST and RESTRICT.
+ * "type" is used by PTR, TYPEDEF, VOLATILE, CONST, RESTRICT,
+ * FUNC and FUNC_PROTO.
* "type" is a type_id referring to another type.
*/
union {
@@ -51,6 +54,7 @@ struct btf_type {
#define BTF_INFO_KIND(info) (((info) >> 24) & 0x0f)
#define BTF_INFO_VLEN(info) ((info) & 0xffff)
+#define BTF_INFO_KFLAG(info) ((info) >> 31)
#define BTF_KIND_UNKN 0 /* Unknown */
#define BTF_KIND_INT 1 /* Integer */
@@ -64,8 +68,10 @@ struct btf_type {
#define BTF_KIND_VOLATILE 9 /* Volatile */
#define BTF_KIND_CONST 10 /* Const */
#define BTF_KIND_RESTRICT 11 /* Restrict */
-#define BTF_KIND_MAX 11
-#define NR_BTF_KINDS 12
+#define BTF_KIND_FUNC 12 /* Function */
+#define BTF_KIND_FUNC_PROTO 13 /* Function Proto */
+#define BTF_KIND_MAX 13
+#define NR_BTF_KINDS 14
/* For some specific BTF_KIND, "struct btf_type" is immediately
* followed by extra data.
@@ -107,7 +113,29 @@ struct btf_array {
struct btf_member {
__u32 name_off;
__u32 type;
- __u32 offset; /* offset in bits */
+ /* If the type info kind_flag is set, the btf_member offset
+ * contains both member bitfield size and bit offset. The
+ * bitfield size is set for bitfield members. If the type
+ * info kind_flag is not set, the offset contains only bit
+ * offset.
+ */
+ __u32 offset;
+};
+
+/* If the struct/union type info kind_flag is set, the
+ * following two macros are used to access bitfield_size
+ * and bit_offset from btf_member.offset.
+ */
+#define BTF_MEMBER_BITFIELD_SIZE(val) ((val) >> 24)
+#define BTF_MEMBER_BIT_OFFSET(val) ((val) & 0xffffff)
+
+/* BTF_KIND_FUNC_PROTO is followed by multiple "struct btf_param".
+ * The exact number of btf_param is stored in the vlen (of the
+ * info in "struct btf_type").
+ */
+struct btf_param {
+ __u32 name_off;
+ __u32 type;
};
#endif /* _UAPI__LINUX_BTF_H__ */
diff --git a/include/uapi/linux/btrfs.h b/include/uapi/linux/btrfs.h
index 5ca1d21fc4a7..e0763bc4158e 100644
--- a/include/uapi/linux/btrfs.h
+++ b/include/uapi/linux/btrfs.h
@@ -269,6 +269,7 @@ struct btrfs_ioctl_fs_info_args {
#define BTRFS_FEATURE_INCOMPAT_RAID56 (1ULL << 7)
#define BTRFS_FEATURE_INCOMPAT_SKINNY_METADATA (1ULL << 8)
#define BTRFS_FEATURE_INCOMPAT_NO_HOLES (1ULL << 9)
+#define BTRFS_FEATURE_INCOMPAT_METADATA_UUID (1ULL << 10)
struct btrfs_ioctl_feature_flags {
__u64 compat_flags;
diff --git a/include/uapi/linux/btrfs_tree.h b/include/uapi/linux/btrfs_tree.h
index aff1356c2bb8..e974f4bb5378 100644
--- a/include/uapi/linux/btrfs_tree.h
+++ b/include/uapi/linux/btrfs_tree.h
@@ -458,6 +458,7 @@ struct btrfs_free_space_header {
#define BTRFS_SUPER_FLAG_METADUMP (1ULL << 33)
#define BTRFS_SUPER_FLAG_METADUMP_V2 (1ULL << 34)
#define BTRFS_SUPER_FLAG_CHANGING_FSID (1ULL << 35)
+#define BTRFS_SUPER_FLAG_CHANGING_FSID_V2 (1ULL << 36)
/*
diff --git a/include/uapi/linux/cryptouser.h b/include/uapi/linux/cryptouser.h
index 6dafbc3e4414..4dc1603919ce 100644
--- a/include/uapi/linux/cryptouser.h
+++ b/include/uapi/linux/cryptouser.h
@@ -76,45 +76,69 @@ struct crypto_user_alg {
__u32 cru_flags;
};
-struct crypto_stat {
- char type[CRYPTO_MAX_NAME];
- union {
- __u32 stat_encrypt_cnt;
- __u32 stat_compress_cnt;
- __u32 stat_generate_cnt;
- __u32 stat_hash_cnt;
- __u32 stat_setsecret_cnt;
- };
- union {
- __u64 stat_encrypt_tlen;
- __u64 stat_compress_tlen;
- __u64 stat_generate_tlen;
- __u64 stat_hash_tlen;
- };
- union {
- __u32 stat_akcipher_err_cnt;
- __u32 stat_cipher_err_cnt;
- __u32 stat_compress_err_cnt;
- __u32 stat_aead_err_cnt;
- __u32 stat_hash_err_cnt;
- __u32 stat_rng_err_cnt;
- __u32 stat_kpp_err_cnt;
- };
- union {
- __u32 stat_decrypt_cnt;
- __u32 stat_decompress_cnt;
- __u32 stat_seed_cnt;
- __u32 stat_generate_public_key_cnt;
- };
- union {
- __u64 stat_decrypt_tlen;
- __u64 stat_decompress_tlen;
- };
- union {
- __u32 stat_verify_cnt;
- __u32 stat_compute_shared_secret_cnt;
- };
- __u32 stat_sign_cnt;
+struct crypto_stat_aead {
+ char type[CRYPTO_MAX_NAME];
+ __u64 stat_encrypt_cnt;
+ __u64 stat_encrypt_tlen;
+ __u64 stat_decrypt_cnt;
+ __u64 stat_decrypt_tlen;
+ __u64 stat_err_cnt;
+};
+
+struct crypto_stat_akcipher {
+ char type[CRYPTO_MAX_NAME];
+ __u64 stat_encrypt_cnt;
+ __u64 stat_encrypt_tlen;
+ __u64 stat_decrypt_cnt;
+ __u64 stat_decrypt_tlen;
+ __u64 stat_verify_cnt;
+ __u64 stat_sign_cnt;
+ __u64 stat_err_cnt;
+};
+
+struct crypto_stat_cipher {
+ char type[CRYPTO_MAX_NAME];
+ __u64 stat_encrypt_cnt;
+ __u64 stat_encrypt_tlen;
+ __u64 stat_decrypt_cnt;
+ __u64 stat_decrypt_tlen;
+ __u64 stat_err_cnt;
+};
+
+struct crypto_stat_compress {
+ char type[CRYPTO_MAX_NAME];
+ __u64 stat_compress_cnt;
+ __u64 stat_compress_tlen;
+ __u64 stat_decompress_cnt;
+ __u64 stat_decompress_tlen;
+ __u64 stat_err_cnt;
+};
+
+struct crypto_stat_hash {
+ char type[CRYPTO_MAX_NAME];
+ __u64 stat_hash_cnt;
+ __u64 stat_hash_tlen;
+ __u64 stat_err_cnt;
+};
+
+struct crypto_stat_kpp {
+ char type[CRYPTO_MAX_NAME];
+ __u64 stat_setsecret_cnt;
+ __u64 stat_generate_public_key_cnt;
+ __u64 stat_compute_shared_secret_cnt;
+ __u64 stat_err_cnt;
+};
+
+struct crypto_stat_rng {
+ char type[CRYPTO_MAX_NAME];
+ __u64 stat_generate_cnt;
+ __u64 stat_generate_tlen;
+ __u64 stat_seed_cnt;
+ __u64 stat_err_cnt;
+};
+
+struct crypto_stat_larval {
+ char type[CRYPTO_MAX_NAME];
};
struct crypto_report_larval {
diff --git a/include/uapi/linux/devlink.h b/include/uapi/linux/devlink.h
index 79407bbd296d..6e52d3660654 100644
--- a/include/uapi/linux/devlink.h
+++ b/include/uapi/linux/devlink.h
@@ -163,6 +163,11 @@ enum devlink_param_cmode {
DEVLINK_PARAM_CMODE_MAX = __DEVLINK_PARAM_CMODE_MAX - 1
};
+enum devlink_param_fw_load_policy_value {
+ DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_DRIVER,
+ DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_FLASH,
+};
+
enum devlink_attr {
/* don't change the order or add anything between, this is ABI! */
DEVLINK_ATTR_UNSPEC,
diff --git a/include/uapi/linux/elf.h b/include/uapi/linux/elf.h
index c5358e0ae7c5..e4d6ddd93567 100644
--- a/include/uapi/linux/elf.h
+++ b/include/uapi/linux/elf.h
@@ -420,10 +420,12 @@ typedef struct elf64_shdr {
#define NT_ARM_HW_WATCH 0x403 /* ARM hardware watchpoint registers */
#define NT_ARM_SYSTEM_CALL 0x404 /* ARM system call number */
#define NT_ARM_SVE 0x405 /* ARM Scalable Vector Extension registers */
+#define NT_ARM_PAC_MASK 0x406 /* ARM pointer authentication code masks */
#define NT_ARC_V2 0x600 /* ARCv2 accumulator/extra registers */
#define NT_VMCOREDD 0x700 /* Vmcore Device Dump Note */
#define NT_MIPS_DSP 0x800 /* MIPS DSP ASE registers */
#define NT_MIPS_FP_MODE 0x801 /* MIPS floating-point mode */
+#define NT_MIPS_MSA 0x802 /* MIPS SIMD registers */
/* Note header in a PT_NOTE section */
typedef struct elf32_note {
diff --git a/include/uapi/linux/ethtool.h b/include/uapi/linux/ethtool.h
index c8f8e2455bf3..17be76aeb468 100644
--- a/include/uapi/linux/ethtool.h
+++ b/include/uapi/linux/ethtool.h
@@ -882,7 +882,7 @@ struct ethtool_rx_flow_spec {
__u32 location;
};
-/* How rings are layed out when accessing virtual functions or
+/* How rings are laid out when accessing virtual functions or
* offloaded queues is device specific. To allow users to do flow
* steering and specify these queues the ring cookie is partitioned
* into a 32bit queue index with an 8 bit virtual function id.
@@ -891,7 +891,7 @@ struct ethtool_rx_flow_spec {
* devices start supporting PCIe w/ARI. However at the moment I
* do not know of any devices that support this so I do not reserve
* space for this at this time. If a future patch consumes the next
- * byte it should be aware of this possiblity.
+ * byte it should be aware of this possibility.
*/
#define ETHTOOL_RX_FLOW_SPEC_RING 0x00000000FFFFFFFFLL
#define ETHTOOL_RX_FLOW_SPEC_RING_VF 0x000000FF00000000LL
diff --git a/include/uapi/linux/fanotify.h b/include/uapi/linux/fanotify.h
index b86740d1c50a..909c98fcace2 100644
--- a/include/uapi/linux/fanotify.h
+++ b/include/uapi/linux/fanotify.h
@@ -10,11 +10,13 @@
#define FAN_CLOSE_WRITE 0x00000008 /* Writtable file closed */
#define FAN_CLOSE_NOWRITE 0x00000010 /* Unwrittable file closed */
#define FAN_OPEN 0x00000020 /* File was opened */
+#define FAN_OPEN_EXEC 0x00001000 /* File was opened for exec */
#define FAN_Q_OVERFLOW 0x00004000 /* Event queued overflowed */
#define FAN_OPEN_PERM 0x00010000 /* File open in perm check */
#define FAN_ACCESS_PERM 0x00020000 /* File accessed in perm check */
+#define FAN_OPEN_EXEC_PERM 0x00040000 /* File open/exec in perm check */
#define FAN_ONDIR 0x40000000 /* event occurred against dir */
diff --git a/include/uapi/linux/hash_info.h b/include/uapi/linux/hash_info.h
index eea5d02c58de..74a8609fcb4d 100644
--- a/include/uapi/linux/hash_info.h
+++ b/include/uapi/linux/hash_info.h
@@ -33,6 +33,8 @@ enum hash_algo {
HASH_ALGO_TGR_160,
HASH_ALGO_TGR_192,
HASH_ALGO_SM3_256,
+ HASH_ALGO_STREEBOG_256,
+ HASH_ALGO_STREEBOG_512,
HASH_ALGO__LAST
};
diff --git a/include/uapi/linux/if_bridge.h b/include/uapi/linux/if_bridge.h
index e41eda3c71f1..773e476a8e54 100644
--- a/include/uapi/linux/if_bridge.h
+++ b/include/uapi/linux/if_bridge.h
@@ -292,4 +292,25 @@ struct br_mcast_stats {
__u64 mcast_bytes[BR_MCAST_DIR_SIZE];
__u64 mcast_packets[BR_MCAST_DIR_SIZE];
};
+
+/* bridge boolean options
+ * BR_BOOLOPT_NO_LL_LEARN - disable learning from link-local packets
+ *
+ * IMPORTANT: if adding a new option do not forget to handle
+ * it in br_boolopt_toggle/get and bridge sysfs
+ */
+enum br_boolopt_id {
+ BR_BOOLOPT_NO_LL_LEARN,
+ BR_BOOLOPT_MAX
+};
+
+/* struct br_boolopt_multi - change multiple bridge boolean options
+ *
+ * @optval: new option values (bit per option)
+ * @optmask: options to change (bit per option)
+ */
+struct br_boolopt_multi {
+ __u32 optval;
+ __u32 optmask;
+};
#endif /* _UAPI_LINUX_IF_BRIDGE_H */
diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h
index 1debfa42cba1..d6533828123a 100644
--- a/include/uapi/linux/if_link.h
+++ b/include/uapi/linux/if_link.h
@@ -288,6 +288,7 @@ enum {
IFLA_BR_MCAST_IGMP_VERSION,
IFLA_BR_MCAST_MLD_VERSION,
IFLA_BR_VLAN_STATS_PER_PORT,
+ IFLA_BR_MULTI_BOOLOPT,
__IFLA_BR_MAX,
};
@@ -533,6 +534,7 @@ enum {
IFLA_VXLAN_LABEL,
IFLA_VXLAN_GPE,
IFLA_VXLAN_TTL_INHERIT,
+ IFLA_VXLAN_DF,
__IFLA_VXLAN_MAX
};
#define IFLA_VXLAN_MAX (__IFLA_VXLAN_MAX - 1)
@@ -542,6 +544,14 @@ struct ifla_vxlan_port_range {
__be16 high;
};
+enum ifla_vxlan_df {
+ VXLAN_DF_UNSET = 0,
+ VXLAN_DF_SET,
+ VXLAN_DF_INHERIT,
+ __VXLAN_DF_END,
+ VXLAN_DF_MAX = __VXLAN_DF_END - 1,
+};
+
/* GENEVE section */
enum {
IFLA_GENEVE_UNSPEC,
@@ -557,10 +567,19 @@ enum {
IFLA_GENEVE_UDP_ZERO_CSUM6_RX,
IFLA_GENEVE_LABEL,
IFLA_GENEVE_TTL_INHERIT,
+ IFLA_GENEVE_DF,
__IFLA_GENEVE_MAX
};
#define IFLA_GENEVE_MAX (__IFLA_GENEVE_MAX - 1)
+enum ifla_geneve_df {
+ GENEVE_DF_UNSET = 0,
+ GENEVE_DF_SET,
+ GENEVE_DF_INHERIT,
+ __GENEVE_DF_END,
+ GENEVE_DF_MAX = __GENEVE_DF_END - 1,
+};
+
/* PPP section */
enum {
IFLA_PPP_UNSPEC,
diff --git a/include/uapi/linux/if_tun.h b/include/uapi/linux/if_tun.h
index ee432cd3018c..23a6753b37df 100644
--- a/include/uapi/linux/if_tun.h
+++ b/include/uapi/linux/if_tun.h
@@ -59,6 +59,7 @@
#define TUNGETVNETBE _IOR('T', 223, int)
#define TUNSETSTEERINGEBPF _IOR('T', 224, int)
#define TUNSETFILTEREBPF _IOR('T', 225, int)
+#define TUNSETCARRIER _IOW('T', 226, int)
/* TUNSETIFF ifr flags */
#define IFF_TUN 0x0001
diff --git a/include/uapi/linux/if_tunnel.h b/include/uapi/linux/if_tunnel.h
index 1b3d148c4560..7d9105533c7b 100644
--- a/include/uapi/linux/if_tunnel.h
+++ b/include/uapi/linux/if_tunnel.h
@@ -160,4 +160,24 @@ enum {
};
#define IFLA_VTI_MAX (__IFLA_VTI_MAX - 1)
+
+#define TUNNEL_CSUM __cpu_to_be16(0x01)
+#define TUNNEL_ROUTING __cpu_to_be16(0x02)
+#define TUNNEL_KEY __cpu_to_be16(0x04)
+#define TUNNEL_SEQ __cpu_to_be16(0x08)
+#define TUNNEL_STRICT __cpu_to_be16(0x10)
+#define TUNNEL_REC __cpu_to_be16(0x20)
+#define TUNNEL_VERSION __cpu_to_be16(0x40)
+#define TUNNEL_NO_KEY __cpu_to_be16(0x80)
+#define TUNNEL_DONT_FRAGMENT __cpu_to_be16(0x0100)
+#define TUNNEL_OAM __cpu_to_be16(0x0200)
+#define TUNNEL_CRIT_OPT __cpu_to_be16(0x0400)
+#define TUNNEL_GENEVE_OPT __cpu_to_be16(0x0800)
+#define TUNNEL_VXLAN_OPT __cpu_to_be16(0x1000)
+#define TUNNEL_NOCACHE __cpu_to_be16(0x2000)
+#define TUNNEL_ERSPAN_OPT __cpu_to_be16(0x4000)
+
+#define TUNNEL_OPTIONS_PRESENT \
+ (TUNNEL_GENEVE_OPT | TUNNEL_VXLAN_OPT | TUNNEL_ERSPAN_OPT)
+
#endif /* _UAPI_IF_TUNNEL_H_ */
diff --git a/include/uapi/linux/in.h b/include/uapi/linux/in.h
index 48e8a225b985..f6052e70bf40 100644
--- a/include/uapi/linux/in.h
+++ b/include/uapi/linux/in.h
@@ -266,10 +266,14 @@ struct sockaddr_in {
#define IN_CLASSD(a) ((((long int) (a)) & 0xf0000000) == 0xe0000000)
#define IN_MULTICAST(a) IN_CLASSD(a)
-#define IN_MULTICAST_NET 0xF0000000
+#define IN_MULTICAST_NET 0xe0000000
-#define IN_EXPERIMENTAL(a) ((((long int) (a)) & 0xf0000000) == 0xf0000000)
-#define IN_BADCLASS(a) IN_EXPERIMENTAL((a))
+#define IN_BADCLASS(a) ((((long int) (a) ) == 0xffffffff)
+#define IN_EXPERIMENTAL(a) IN_BADCLASS((a))
+
+#define IN_CLASSE(a) ((((long int) (a)) & 0xf0000000) == 0xf0000000)
+#define IN_CLASSE_NET 0xffffffff
+#define IN_CLASSE_NSHIFT 0
/* Address to accept any incoming messages. */
#define INADDR_ANY ((unsigned long int) 0x00000000)
diff --git a/include/uapi/linux/input-event-codes.h b/include/uapi/linux/input-event-codes.h
index 6d180cc60a5d..ae366b87426a 100644
--- a/include/uapi/linux/input-event-codes.h
+++ b/include/uapi/linux/input-event-codes.h
@@ -716,7 +716,6 @@
* the situation described above.
*/
#define REL_RESERVED 0x0a
-#define REL_WHEEL_HI_RES 0x0b
#define REL_MAX 0x0f
#define REL_CNT (REL_MAX+1)
diff --git a/include/uapi/linux/kfd_ioctl.h b/include/uapi/linux/kfd_ioctl.h
index f5ff8a76e208..e622fd1fbd46 100644
--- a/include/uapi/linux/kfd_ioctl.h
+++ b/include/uapi/linux/kfd_ioctl.h
@@ -83,11 +83,11 @@ struct kfd_ioctl_set_cu_mask_args {
};
struct kfd_ioctl_get_queue_wave_state_args {
- uint64_t ctl_stack_address; /* to KFD */
- uint32_t ctl_stack_used_size; /* from KFD */
- uint32_t save_area_used_size; /* from KFD */
- uint32_t queue_id; /* to KFD */
- uint32_t pad;
+ __u64 ctl_stack_address; /* to KFD */
+ __u32 ctl_stack_used_size; /* from KFD */
+ __u32 save_area_used_size; /* from KFD */
+ __u32 queue_id; /* to KFD */
+ __u32 pad;
};
/* For kfd_ioctl_set_memory_policy_args.default_policy and alternate_policy */
@@ -255,10 +255,10 @@ struct kfd_hsa_memory_exception_data {
/* hw exception data */
struct kfd_hsa_hw_exception_data {
- uint32_t reset_type;
- uint32_t reset_cause;
- uint32_t memory_lost;
- uint32_t gpu_id;
+ __u32 reset_type;
+ __u32 reset_cause;
+ __u32 memory_lost;
+ __u32 gpu_id;
};
/* Event data */
@@ -398,6 +398,24 @@ struct kfd_ioctl_unmap_memory_from_gpu_args {
__u32 n_success; /* to/from KFD */
};
+struct kfd_ioctl_get_dmabuf_info_args {
+ __u64 size; /* from KFD */
+ __u64 metadata_ptr; /* to KFD */
+ __u32 metadata_size; /* to KFD (space allocated by user)
+ * from KFD (actual metadata size)
+ */
+ __u32 gpu_id; /* from KFD */
+ __u32 flags; /* from KFD (KFD_IOC_ALLOC_MEM_FLAGS) */
+ __u32 dmabuf_fd; /* to KFD */
+};
+
+struct kfd_ioctl_import_dmabuf_args {
+ __u64 va_addr; /* to KFD */
+ __u64 handle; /* from KFD */
+ __u32 gpu_id; /* to KFD */
+ __u32 dmabuf_fd; /* to KFD */
+};
+
#define AMDKFD_IOCTL_BASE 'K'
#define AMDKFD_IO(nr) _IO(AMDKFD_IOCTL_BASE, nr)
#define AMDKFD_IOR(nr, type) _IOR(AMDKFD_IOCTL_BASE, nr, type)
@@ -486,7 +504,13 @@ struct kfd_ioctl_unmap_memory_from_gpu_args {
#define AMDKFD_IOC_GET_QUEUE_WAVE_STATE \
AMDKFD_IOWR(0x1B, struct kfd_ioctl_get_queue_wave_state_args)
+#define AMDKFD_IOC_GET_DMABUF_INFO \
+ AMDKFD_IOWR(0x1C, struct kfd_ioctl_get_dmabuf_info_args)
+
+#define AMDKFD_IOC_IMPORT_DMABUF \
+ AMDKFD_IOWR(0x1D, struct kfd_ioctl_import_dmabuf_args)
+
#define AMDKFD_COMMAND_START 0x01
-#define AMDKFD_COMMAND_END 0x1C
+#define AMDKFD_COMMAND_END 0x1E
#endif
diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h
index 2b7a652c9fa4..6d4ea4b6c922 100644
--- a/include/uapi/linux/kvm.h
+++ b/include/uapi/linux/kvm.h
@@ -492,6 +492,17 @@ struct kvm_dirty_log {
};
};
+/* for KVM_CLEAR_DIRTY_LOG */
+struct kvm_clear_dirty_log {
+ __u32 slot;
+ __u32 num_pages;
+ __u64 first_page;
+ union {
+ void __user *dirty_bitmap; /* one bit per page */
+ __u64 padding2;
+ };
+};
+
/* for KVM_SET_SIGNAL_MASK */
struct kvm_signal_mask {
__u32 len;
@@ -975,6 +986,8 @@ struct kvm_ppc_resize_hpt {
#define KVM_CAP_HYPERV_ENLIGHTENED_VMCS 163
#define KVM_CAP_EXCEPTION_PAYLOAD 164
#define KVM_CAP_ARM_VM_IPA_SIZE 165
+#define KVM_CAP_MANUAL_DIRTY_LOG_PROTECT 166
+#define KVM_CAP_HYPERV_CPUID 167
#ifdef KVM_CAP_IRQ_ROUTING
@@ -1421,6 +1434,12 @@ struct kvm_enc_region {
#define KVM_GET_NESTED_STATE _IOWR(KVMIO, 0xbe, struct kvm_nested_state)
#define KVM_SET_NESTED_STATE _IOW(KVMIO, 0xbf, struct kvm_nested_state)
+/* Available with KVM_CAP_MANUAL_DIRTY_LOG_PROTECT */
+#define KVM_CLEAR_DIRTY_LOG _IOWR(KVMIO, 0xc0, struct kvm_clear_dirty_log)
+
+/* Available with KVM_CAP_HYPERV_CPUID */
+#define KVM_GET_SUPPORTED_HV_CPUID _IOWR(KVMIO, 0xc1, struct kvm_cpuid2)
+
/* Secure Encrypted Virtualization command */
enum sev_cmd_id {
/* Guest initialization commands */
diff --git a/include/uapi/linux/ncsi.h b/include/uapi/linux/ncsi.h
index 0a26a5576645..a3f87c54fdb3 100644
--- a/include/uapi/linux/ncsi.h
+++ b/include/uapi/linux/ncsi.h
@@ -26,6 +26,12 @@
* @NCSI_CMD_SEND_CMD: send NC-SI command to network card.
* Requires NCSI_ATTR_IFINDEX, NCSI_ATTR_PACKAGE_ID
* and NCSI_ATTR_CHANNEL_ID.
+ * @NCSI_CMD_SET_PACKAGE_MASK: set a whitelist of allowed packages.
+ * Requires NCSI_ATTR_IFINDEX and NCSI_ATTR_PACKAGE_MASK.
+ * @NCSI_CMD_SET_CHANNEL_MASK: set a whitelist of allowed channels.
+ * Requires NCSI_ATTR_IFINDEX, NCSI_ATTR_PACKAGE_ID, and
+ * NCSI_ATTR_CHANNEL_MASK. If NCSI_ATTR_CHANNEL_ID is present it sets
+ * the primary channel.
* @NCSI_CMD_MAX: highest command number
*/
enum ncsi_nl_commands {
@@ -34,6 +40,8 @@ enum ncsi_nl_commands {
NCSI_CMD_SET_INTERFACE,
NCSI_CMD_CLEAR_INTERFACE,
NCSI_CMD_SEND_CMD,
+ NCSI_CMD_SET_PACKAGE_MASK,
+ NCSI_CMD_SET_CHANNEL_MASK,
__NCSI_CMD_AFTER_LAST,
NCSI_CMD_MAX = __NCSI_CMD_AFTER_LAST - 1
@@ -48,6 +56,10 @@ enum ncsi_nl_commands {
* @NCSI_ATTR_PACKAGE_ID: package ID
* @NCSI_ATTR_CHANNEL_ID: channel ID
* @NCSI_ATTR_DATA: command payload
+ * @NCSI_ATTR_MULTI_FLAG: flag to signal that multi-mode should be enabled with
+ * NCSI_CMD_SET_PACKAGE_MASK or NCSI_CMD_SET_CHANNEL_MASK.
+ * @NCSI_ATTR_PACKAGE_MASK: 32-bit mask of allowed packages.
+ * @NCSI_ATTR_CHANNEL_MASK: 32-bit mask of allowed channels.
* @NCSI_ATTR_MAX: highest attribute number
*/
enum ncsi_nl_attrs {
@@ -57,6 +69,9 @@ enum ncsi_nl_attrs {
NCSI_ATTR_PACKAGE_ID,
NCSI_ATTR_CHANNEL_ID,
NCSI_ATTR_DATA,
+ NCSI_ATTR_MULTI_FLAG,
+ NCSI_ATTR_PACKAGE_MASK,
+ NCSI_ATTR_CHANNEL_MASK,
__NCSI_ATTR_AFTER_LAST,
NCSI_ATTR_MAX = __NCSI_ATTR_AFTER_LAST - 1
diff --git a/include/uapi/linux/neighbour.h b/include/uapi/linux/neighbour.h
index 998155444e0d..cd144e3099a3 100644
--- a/include/uapi/linux/neighbour.h
+++ b/include/uapi/linux/neighbour.h
@@ -28,6 +28,7 @@ enum {
NDA_MASTER,
NDA_LINK_NETNSID,
NDA_SRC_VNI,
+ NDA_PROTOCOL, /* Originator of entry */
__NDA_MAX
};
diff --git a/include/uapi/linux/net_namespace.h b/include/uapi/linux/net_namespace.h
index 0187c74d8889..9f9956809565 100644
--- a/include/uapi/linux/net_namespace.h
+++ b/include/uapi/linux/net_namespace.h
@@ -16,6 +16,8 @@ enum {
NETNSA_NSID,
NETNSA_PID,
NETNSA_FD,
+ NETNSA_TARGET_NSID,
+ NETNSA_CURRENT_NSID,
__NETNSA_MAX,
};
diff --git a/include/uapi/linux/net_tstamp.h b/include/uapi/linux/net_tstamp.h
index 97ff3c17ec4d..e5b39721c6e4 100644
--- a/include/uapi/linux/net_tstamp.h
+++ b/include/uapi/linux/net_tstamp.h
@@ -155,8 +155,8 @@ enum txtime_flags {
};
struct sock_txtime {
- clockid_t clockid; /* reference clockid */
- __u32 flags; /* as defined by enum txtime_flags */
+ __kernel_clockid_t clockid;/* reference clockid */
+ __u32 flags; /* as defined by enum txtime_flags */
};
#endif /* _NET_TIMESTAMPING_H */
diff --git a/include/uapi/linux/netfilter.h b/include/uapi/linux/netfilter.h
index cca10e767cd8..ca9e63d6e0e4 100644
--- a/include/uapi/linux/netfilter.h
+++ b/include/uapi/linux/netfilter.h
@@ -34,10 +34,6 @@
/* only for userspace compatibility */
#ifndef __KERNEL__
-/* Generic cache responses from hook functions.
- <= 0x2000 is used for protocol-flags. */
-#define NFC_UNKNOWN 0x4000
-#define NFC_ALTERED 0x8000
/* NF_VERDICT_BITS should be 8 now, but userspace might break if this changes */
#define NF_VERDICT_BITS 16
diff --git a/include/uapi/linux/netfilter/ipset/ip_set.h b/include/uapi/linux/netfilter/ipset/ip_set.h
index 60236f694143..ea69ca21ff23 100644
--- a/include/uapi/linux/netfilter/ipset/ip_set.h
+++ b/include/uapi/linux/netfilter/ipset/ip_set.h
@@ -13,8 +13,9 @@
#include <linux/types.h>
-/* The protocol version */
-#define IPSET_PROTOCOL 6
+/* The protocol versions */
+#define IPSET_PROTOCOL 7
+#define IPSET_PROTOCOL_MIN 6
/* The max length of strings including NUL: set and type identifiers */
#define IPSET_MAXNAMELEN 32
@@ -38,17 +39,19 @@ enum ipset_cmd {
IPSET_CMD_TEST, /* 11: Test an element in a set */
IPSET_CMD_HEADER, /* 12: Get set header data only */
IPSET_CMD_TYPE, /* 13: Get set type */
+ IPSET_CMD_GET_BYNAME, /* 14: Get set index by name */
+ IPSET_CMD_GET_BYINDEX, /* 15: Get set name by index */
IPSET_MSG_MAX, /* Netlink message commands */
/* Commands in userspace: */
- IPSET_CMD_RESTORE = IPSET_MSG_MAX, /* 14: Enter restore mode */
- IPSET_CMD_HELP, /* 15: Get help */
- IPSET_CMD_VERSION, /* 16: Get program version */
- IPSET_CMD_QUIT, /* 17: Quit from interactive mode */
+ IPSET_CMD_RESTORE = IPSET_MSG_MAX, /* 16: Enter restore mode */
+ IPSET_CMD_HELP, /* 17: Get help */
+ IPSET_CMD_VERSION, /* 18: Get program version */
+ IPSET_CMD_QUIT, /* 19: Quit from interactive mode */
IPSET_CMD_MAX,
- IPSET_CMD_COMMIT = IPSET_CMD_MAX, /* 18: Commit buffered commands */
+ IPSET_CMD_COMMIT = IPSET_CMD_MAX, /* 20: Commit buffered commands */
};
/* Attributes at command level */
@@ -66,6 +69,7 @@ enum {
IPSET_ATTR_LINENO, /* 9: Restore lineno */
IPSET_ATTR_PROTOCOL_MIN, /* 10: Minimal supported version number */
IPSET_ATTR_REVISION_MIN = IPSET_ATTR_PROTOCOL_MIN, /* type rev min */
+ IPSET_ATTR_INDEX, /* 11: Kernel index of set */
__IPSET_ATTR_CMD_MAX,
};
#define IPSET_ATTR_CMD_MAX (__IPSET_ATTR_CMD_MAX - 1)
@@ -223,6 +227,7 @@ enum ipset_adt {
/* Sets are identified by an index in kernel space. Tweak with ip_set_id_t
* and IPSET_INVALID_ID if you want to increase the max number of sets.
+ * Also, IPSET_ATTR_INDEX must be changed.
*/
typedef __u16 ip_set_id_t;
diff --git a/include/uapi/linux/netfilter/nf_tables.h b/include/uapi/linux/netfilter/nf_tables.h
index 579974b0bf0d..7de4f1bdaf06 100644
--- a/include/uapi/linux/netfilter/nf_tables.h
+++ b/include/uapi/linux/netfilter/nf_tables.h
@@ -1635,8 +1635,8 @@ enum nft_ng_attributes {
NFTA_NG_MODULUS,
NFTA_NG_TYPE,
NFTA_NG_OFFSET,
- NFTA_NG_SET_NAME,
- NFTA_NG_SET_ID,
+ NFTA_NG_SET_NAME, /* deprecated */
+ NFTA_NG_SET_ID, /* deprecated */
__NFTA_NG_MAX
};
#define NFTA_NG_MAX (__NFTA_NG_MAX - 1)
diff --git a/include/uapi/linux/netfilter_bridge.h b/include/uapi/linux/netfilter_bridge.h
index 156ccd089df1..1610fdbab98d 100644
--- a/include/uapi/linux/netfilter_bridge.h
+++ b/include/uapi/linux/netfilter_bridge.h
@@ -11,6 +11,10 @@
#include <linux/if_vlan.h>
#include <linux/if_pppox.h>
+#ifndef __KERNEL__
+#include <limits.h> /* for INT_MIN, INT_MAX */
+#endif
+
/* Bridge Hooks */
/* After promisc drops, checksum checks. */
#define NF_BR_PRE_ROUTING 0
diff --git a/include/uapi/linux/netfilter_decnet.h b/include/uapi/linux/netfilter_decnet.h
index 61f1c7dfd033..3c77f54560f2 100644
--- a/include/uapi/linux/netfilter_decnet.h
+++ b/include/uapi/linux/netfilter_decnet.h
@@ -15,16 +15,6 @@
#include <limits.h> /* for INT_MIN, INT_MAX */
-/* IP Cache bits. */
-/* Src IP address. */
-#define NFC_DN_SRC 0x0001
-/* Dest IP address. */
-#define NFC_DN_DST 0x0002
-/* Input device. */
-#define NFC_DN_IF_IN 0x0004
-/* Output device. */
-#define NFC_DN_IF_OUT 0x0008
-
/* kernel define is in netfilter_defs.h */
#define NF_DN_NUMHOOKS 7
#endif /* ! __KERNEL__ */
diff --git a/include/uapi/linux/netfilter_ipv4.h b/include/uapi/linux/netfilter_ipv4.h
index c3b060775e13..155e77d6a42d 100644
--- a/include/uapi/linux/netfilter_ipv4.h
+++ b/include/uapi/linux/netfilter_ipv4.h
@@ -13,34 +13,6 @@
#include <limits.h> /* for INT_MIN, INT_MAX */
-/* IP Cache bits. */
-/* Src IP address. */
-#define NFC_IP_SRC 0x0001
-/* Dest IP address. */
-#define NFC_IP_DST 0x0002
-/* Input device. */
-#define NFC_IP_IF_IN 0x0004
-/* Output device. */
-#define NFC_IP_IF_OUT 0x0008
-/* TOS. */
-#define NFC_IP_TOS 0x0010
-/* Protocol. */
-#define NFC_IP_PROTO 0x0020
-/* IP options. */
-#define NFC_IP_OPTIONS 0x0040
-/* Frag & flags. */
-#define NFC_IP_FRAG 0x0080
-
-/* Per-protocol information: only matters if proto match. */
-/* TCP flags. */
-#define NFC_IP_TCPFLAGS 0x0100
-/* Source port. */
-#define NFC_IP_SRC_PT 0x0200
-/* Dest port. */
-#define NFC_IP_DST_PT 0x0400
-/* Something else about the proto */
-#define NFC_IP_PROTO_UNKNOWN 0x2000
-
/* IP Hooks */
/* After promisc drops, checksum checks. */
#define NF_IP_PRE_ROUTING 0
diff --git a/include/uapi/linux/netfilter_ipv6.h b/include/uapi/linux/netfilter_ipv6.h
index dc624fd24d25..80aa9b0799af 100644
--- a/include/uapi/linux/netfilter_ipv6.h
+++ b/include/uapi/linux/netfilter_ipv6.h
@@ -16,35 +16,6 @@
#include <limits.h> /* for INT_MIN, INT_MAX */
-/* IP Cache bits. */
-/* Src IP address. */
-#define NFC_IP6_SRC 0x0001
-/* Dest IP address. */
-#define NFC_IP6_DST 0x0002
-/* Input device. */
-#define NFC_IP6_IF_IN 0x0004
-/* Output device. */
-#define NFC_IP6_IF_OUT 0x0008
-/* TOS. */
-#define NFC_IP6_TOS 0x0010
-/* Protocol. */
-#define NFC_IP6_PROTO 0x0020
-/* IP options. */
-#define NFC_IP6_OPTIONS 0x0040
-/* Frag & flags. */
-#define NFC_IP6_FRAG 0x0080
-
-
-/* Per-protocol information: only matters if proto match. */
-/* TCP flags. */
-#define NFC_IP6_TCPFLAGS 0x0100
-/* Source port. */
-#define NFC_IP6_SRC_PT 0x0200
-/* Dest port. */
-#define NFC_IP6_DST_PT 0x0400
-/* Something else about the proto */
-#define NFC_IP6_PROTO_UNKNOWN 0x2000
-
/* IP6 Hooks */
/* After promisc drops, checksum checks. */
#define NF_IP6_PRE_ROUTING 0
diff --git a/include/uapi/linux/netlink.h b/include/uapi/linux/netlink.h
index 486ed1f0c0bc..0a4d73317759 100644
--- a/include/uapi/linux/netlink.h
+++ b/include/uapi/linux/netlink.h
@@ -155,7 +155,7 @@ enum nlmsgerr_attrs {
#define NETLINK_LIST_MEMBERSHIPS 9
#define NETLINK_CAP_ACK 10
#define NETLINK_EXT_ACK 11
-#define NETLINK_DUMP_STRICT_CHK 12
+#define NETLINK_GET_STRICT_CHK 12
struct nl_pktinfo {
__u32 group;
diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h
index 6d610bae30a9..31ae5c7f10e3 100644
--- a/include/uapi/linux/nl80211.h
+++ b/include/uapi/linux/nl80211.h
@@ -1036,6 +1036,35 @@
* @NL80211_CMD_GET_FTM_RESPONDER_STATS: Retrieve FTM responder statistics, in
* the %NL80211_ATTR_FTM_RESPONDER_STATS attribute.
*
+ * @NL80211_CMD_PEER_MEASUREMENT_START: start a (set of) peer measurement(s)
+ * with the given parameters, which are encapsulated in the nested
+ * %NL80211_ATTR_PEER_MEASUREMENTS attribute. Optionally, MAC address
+ * randomization may be enabled and configured by specifying the
+ * %NL80211_ATTR_MAC and %NL80211_ATTR_MAC_MASK attributes.
+ * If a timeout is requested, use the %NL80211_ATTR_TIMEOUT attribute.
+ * A u64 cookie for further %NL80211_ATTR_COOKIE use is is returned in
+ * the netlink extended ack message.
+ *
+ * To cancel a measurement, close the socket that requested it.
+ *
+ * Measurement results are reported to the socket that requested the
+ * measurement using @NL80211_CMD_PEER_MEASUREMENT_RESULT when they
+ * become available, so applications must ensure a large enough socket
+ * buffer size.
+ *
+ * Depending on driver support it may or may not be possible to start
+ * multiple concurrent measurements.
+ * @NL80211_CMD_PEER_MEASUREMENT_RESULT: This command number is used for the
+ * result notification from the driver to the requesting socket.
+ * @NL80211_CMD_PEER_MEASUREMENT_COMPLETE: Notification only, indicating that
+ * the measurement completed, using the measurement cookie
+ * (%NL80211_ATTR_COOKIE).
+ *
+ * @NL80211_CMD_NOTIFY_RADAR: Notify the kernel that a radar signal was
+ * detected and reported by a neighboring device on the channel
+ * indicated by %NL80211_ATTR_WIPHY_FREQ and other attributes
+ * determining the width and type.
+ *
* @NL80211_CMD_MAX: highest used command number
* @__NL80211_CMD_AFTER_LAST: internal use
*/
@@ -1250,6 +1279,12 @@ enum nl80211_commands {
NL80211_CMD_GET_FTM_RESPONDER_STATS,
+ NL80211_CMD_PEER_MEASUREMENT_START,
+ NL80211_CMD_PEER_MEASUREMENT_RESULT,
+ NL80211_CMD_PEER_MEASUREMENT_COMPLETE,
+
+ NL80211_CMD_NOTIFY_RADAR,
+
/* add new commands above here */
/* used to define NL80211_CMD_MAX below */
@@ -1706,7 +1741,7 @@ enum nl80211_commands {
* the values passed in @NL80211_ATTR_SCAN_SSIDS (eg. if an SSID
* is included in the probe request, but the match attributes
* will never let it go through), -EINVAL may be returned.
- * If ommited, no filtering is done.
+ * If omitted, no filtering is done.
*
* @NL80211_ATTR_INTERFACE_COMBINATIONS: Nested attribute listing the supported
* interface combinations. In each nested item, it contains attributes
@@ -1811,7 +1846,7 @@ enum nl80211_commands {
*
* @NL80211_ATTR_INACTIVITY_TIMEOUT: timeout value in seconds, this can be
* used by the drivers which has MLME in firmware and does not have support
- * to report per station tx/rx activity to free up the staion entry from
+ * to report per station tx/rx activity to free up the station entry from
* the list. This needs to be used when the driver advertises the
* capability to timeout the stations.
*
@@ -2172,7 +2207,7 @@ enum nl80211_commands {
*
* @NL80211_ATTR_SCHED_SCAN_RSSI_ADJUST: When present the RSSI level for BSSs in
* the specified band is to be adjusted before doing
- * %NL80211_ATTR_SCHED_SCAN_RELATIVE_RSSI based comparision to figure out
+ * %NL80211_ATTR_SCHED_SCAN_RELATIVE_RSSI based comparison to figure out
* better BSSs. The attribute value is a packed structure
* value as specified by &struct nl80211_bss_select_rssi_adjust.
*
@@ -2254,6 +2289,16 @@ enum nl80211_commands {
* @NL80211_ATTR_FTM_RESPONDER_STATS: Nested attribute with FTM responder
* statistics, see &enum nl80211_ftm_responder_stats.
*
+ * @NL80211_ATTR_TIMEOUT: Timeout for the given operation in milliseconds (u32),
+ * if the attribute is not given no timeout is requested. Note that 0 is an
+ * invalid value.
+ *
+ * @NL80211_ATTR_PEER_MEASUREMENTS: peer measurements request (and result)
+ * data, uses nested attributes specified in
+ * &enum nl80211_peer_measurement_attrs.
+ * This is also used for capability advertisement in the wiphy information,
+ * with the appropriate sub-attributes.
+ *
* @NUM_NL80211_ATTR: total number of nl80211_attrs available
* @NL80211_ATTR_MAX: highest attribute number currently defined
* @__NL80211_ATTR_AFTER_LAST: internal use
@@ -2699,6 +2744,10 @@ enum nl80211_attrs {
NL80211_ATTR_FTM_RESPONDER_STATS,
+ NL80211_ATTR_TIMEOUT,
+
+ NL80211_ATTR_PEER_MEASUREMENTS,
+
/* add attributes here, update the policy in nl80211.c */
__NL80211_ATTR_AFTER_LAST,
@@ -3074,6 +3123,8 @@ enum nl80211_sta_bss_param {
* with an FCS error (u32, from this station). This count may not include
* some packets with an FCS error due to TA corruption. Hence this counter
* might not be fully accurate.
+ * @NL80211_STA_INFO_CONNECTED_TO_GATE: set to true if STA has a path to a
+ * mesh gate (u8, 0 or 1)
* @__NL80211_STA_INFO_AFTER_LAST: internal
* @NL80211_STA_INFO_MAX: highest possible station info attribute
*/
@@ -3116,6 +3167,7 @@ enum nl80211_sta_info {
NL80211_STA_INFO_ACK_SIGNAL_AVG,
NL80211_STA_INFO_RX_MPDUS,
NL80211_STA_INFO_FCS_ERROR_COUNT,
+ NL80211_STA_INFO_CONNECTED_TO_GATE,
/* keep last */
__NL80211_STA_INFO_AFTER_LAST,
@@ -3895,6 +3947,11 @@ enum nl80211_mesh_power_mode {
* remove it from the STA's list of peers. You may set this to 0 to disable
* the removal of the STA. Default is 30 minutes.
*
+ * @NL80211_MESHCONF_CONNECTED_TO_GATE: If set to true then this mesh STA
+ * will advertise that it is connected to a gate in the mesh formation
+ * field. If left unset then the mesh formation field will only
+ * advertise such if there is an active root mesh path.
+ *
* @__NL80211_MESHCONF_ATTR_AFTER_LAST: internal use
*/
enum nl80211_meshconf_params {
@@ -3927,6 +3984,7 @@ enum nl80211_meshconf_params {
NL80211_MESHCONF_POWER_MODE,
NL80211_MESHCONF_AWAKE_WINDOW,
NL80211_MESHCONF_PLINK_TIMEOUT,
+ NL80211_MESHCONF_CONNECTED_TO_GATE,
/* keep last */
__NL80211_MESHCONF_ATTR_AFTER_LAST,
@@ -4859,7 +4917,7 @@ enum nl80211_iface_limit_attrs {
* numbers = [ #{STA} <= 1, #{P2P-client,P2P-GO} <= 3 ], max = 4
* => allows a STA plus three P2P interfaces
*
- * The list of these four possiblities could completely be contained
+ * The list of these four possibilities could completely be contained
* within the %NL80211_ATTR_INTERFACE_COMBINATIONS attribute to indicate
* that any of these groups must match.
*
@@ -4889,7 +4947,7 @@ enum nl80211_if_combination_attrs {
* enum nl80211_plink_state - state of a mesh peer link finite state machine
*
* @NL80211_PLINK_LISTEN: initial state, considered the implicit
- * state of non existant mesh peer links
+ * state of non existent mesh peer links
* @NL80211_PLINK_OPN_SNT: mesh plink open frame has been sent to
* this mesh peer
* @NL80211_PLINK_OPN_RCVD: mesh plink open frame has been received
@@ -5381,7 +5439,7 @@ enum nl80211_timeout_reason {
* request parameters IE in the probe request
* @NL80211_SCAN_FLAG_ACCEPT_BCAST_PROBE_RESP: accept broadcast probe responses
* @NL80211_SCAN_FLAG_OCE_PROBE_REQ_HIGH_TX_RATE: send probe request frames at
- * rate of at least 5.5M. In case non OCE AP is dicovered in the channel,
+ * rate of at least 5.5M. In case non OCE AP is discovered in the channel,
* only the first probe req in the channel will be sent in high rate.
* @NL80211_SCAN_FLAG_OCE_PROBE_REQ_DEFERRAL_SUPPRESSION: allow probe request
* tx deferral (dot11FILSProbeDelay shall be set to 15ms)
@@ -5842,9 +5900,11 @@ enum nl80211_external_auth_action {
* @__NL80211_FTM_RESP_ATTR_INVALID: Invalid
* @NL80211_FTM_RESP_ATTR_ENABLED: FTM responder is enabled
* @NL80211_FTM_RESP_ATTR_LCI: The content of Measurement Report Element
- * (9.4.2.22 in 802.11-2016) with type 8 - LCI (9.4.2.22.10)
+ * (9.4.2.22 in 802.11-2016) with type 8 - LCI (9.4.2.22.10),
+ * i.e. starting with the measurement token
* @NL80211_FTM_RESP_ATTR_CIVIC: The content of Measurement Report Element
- * (9.4.2.22 in 802.11-2016) with type 11 - Civic (Section 9.4.2.22.13)
+ * (9.4.2.22 in 802.11-2016) with type 11 - Civic (Section 9.4.2.22.13),
+ * i.e. starting with the measurement token
* @__NL80211_FTM_RESP_ATTR_LAST: Internal
* @NL80211_FTM_RESP_ATTR_MAX: highest FTM responder attribute.
*/
@@ -5906,4 +5966,386 @@ enum nl80211_ftm_responder_stats {
NL80211_FTM_STATS_MAX = __NL80211_FTM_STATS_AFTER_LAST - 1
};
+/**
+ * enum nl80211_preamble - frame preamble types
+ * @NL80211_PREAMBLE_LEGACY: legacy (HR/DSSS, OFDM, ERP PHY) preamble
+ * @NL80211_PREAMBLE_HT: HT preamble
+ * @NL80211_PREAMBLE_VHT: VHT preamble
+ * @NL80211_PREAMBLE_DMG: DMG preamble
+ */
+enum nl80211_preamble {
+ NL80211_PREAMBLE_LEGACY,
+ NL80211_PREAMBLE_HT,
+ NL80211_PREAMBLE_VHT,
+ NL80211_PREAMBLE_DMG,
+};
+
+/**
+ * enum nl80211_peer_measurement_type - peer measurement types
+ * @NL80211_PMSR_TYPE_INVALID: invalid/unused, needed as we use
+ * these numbers also for attributes
+ *
+ * @NL80211_PMSR_TYPE_FTM: flight time measurement
+ *
+ * @NUM_NL80211_PMSR_TYPES: internal
+ * @NL80211_PMSR_TYPE_MAX: highest type number
+ */
+enum nl80211_peer_measurement_type {
+ NL80211_PMSR_TYPE_INVALID,
+
+ NL80211_PMSR_TYPE_FTM,
+
+ NUM_NL80211_PMSR_TYPES,
+ NL80211_PMSR_TYPE_MAX = NUM_NL80211_PMSR_TYPES - 1
+};
+
+/**
+ * enum nl80211_peer_measurement_status - peer measurement status
+ * @NL80211_PMSR_STATUS_SUCCESS: measurement completed successfully
+ * @NL80211_PMSR_STATUS_REFUSED: measurement was locally refused
+ * @NL80211_PMSR_STATUS_TIMEOUT: measurement timed out
+ * @NL80211_PMSR_STATUS_FAILURE: measurement failed, a type-dependent
+ * reason may be available in the response data
+ */
+enum nl80211_peer_measurement_status {
+ NL80211_PMSR_STATUS_SUCCESS,
+ NL80211_PMSR_STATUS_REFUSED,
+ NL80211_PMSR_STATUS_TIMEOUT,
+ NL80211_PMSR_STATUS_FAILURE,
+};
+
+/**
+ * enum nl80211_peer_measurement_req - peer measurement request attributes
+ * @__NL80211_PMSR_REQ_ATTR_INVALID: invalid
+ *
+ * @NL80211_PMSR_REQ_ATTR_DATA: This is a nested attribute with measurement
+ * type-specific request data inside. The attributes used are from the
+ * enums named nl80211_peer_measurement_<type>_req.
+ * @NL80211_PMSR_REQ_ATTR_GET_AP_TSF: include AP TSF timestamp, if supported
+ * (flag attribute)
+ *
+ * @NUM_NL80211_PMSR_REQ_ATTRS: internal
+ * @NL80211_PMSR_REQ_ATTR_MAX: highest attribute number
+ */
+enum nl80211_peer_measurement_req {
+ __NL80211_PMSR_REQ_ATTR_INVALID,
+
+ NL80211_PMSR_REQ_ATTR_DATA,
+ NL80211_PMSR_REQ_ATTR_GET_AP_TSF,
+
+ /* keep last */
+ NUM_NL80211_PMSR_REQ_ATTRS,
+ NL80211_PMSR_REQ_ATTR_MAX = NUM_NL80211_PMSR_REQ_ATTRS - 1
+};
+
+/**
+ * enum nl80211_peer_measurement_resp - peer measurement response attributes
+ * @__NL80211_PMSR_RESP_ATTR_INVALID: invalid
+ *
+ * @NL80211_PMSR_RESP_ATTR_DATA: This is a nested attribute with measurement
+ * type-specific results inside. The attributes used are from the enums
+ * named nl80211_peer_measurement_<type>_resp.
+ * @NL80211_PMSR_RESP_ATTR_STATUS: u32 value with the measurement status
+ * (using values from &enum nl80211_peer_measurement_status.)
+ * @NL80211_PMSR_RESP_ATTR_HOST_TIME: host time (%CLOCK_BOOTTIME) when the
+ * result was measured; this value is not expected to be accurate to
+ * more than 20ms. (u64, nanoseconds)
+ * @NL80211_PMSR_RESP_ATTR_AP_TSF: TSF of the AP that the interface
+ * doing the measurement is connected to when the result was measured.
+ * This shall be accurately reported if supported and requested
+ * (u64, usec)
+ * @NL80211_PMSR_RESP_ATTR_FINAL: If results are sent to the host partially
+ * (*e.g. with FTM per-burst data) this flag will be cleared on all but
+ * the last result; if all results are combined it's set on the single
+ * result.
+ * @NL80211_PMSR_RESP_ATTR_PAD: padding for 64-bit attributes, ignore
+ *
+ * @NUM_NL80211_PMSR_RESP_ATTRS: internal
+ * @NL80211_PMSR_RESP_ATTR_MAX: highest attribute number
+ */
+enum nl80211_peer_measurement_resp {
+ __NL80211_PMSR_RESP_ATTR_INVALID,
+
+ NL80211_PMSR_RESP_ATTR_DATA,
+ NL80211_PMSR_RESP_ATTR_STATUS,
+ NL80211_PMSR_RESP_ATTR_HOST_TIME,
+ NL80211_PMSR_RESP_ATTR_AP_TSF,
+ NL80211_PMSR_RESP_ATTR_FINAL,
+ NL80211_PMSR_RESP_ATTR_PAD,
+
+ /* keep last */
+ NUM_NL80211_PMSR_RESP_ATTRS,
+ NL80211_PMSR_RESP_ATTR_MAX = NUM_NL80211_PMSR_RESP_ATTRS - 1
+};
+
+/**
+ * enum nl80211_peer_measurement_peer_attrs - peer attributes for measurement
+ * @__NL80211_PMSR_PEER_ATTR_INVALID: invalid
+ *
+ * @NL80211_PMSR_PEER_ATTR_ADDR: peer's MAC address
+ * @NL80211_PMSR_PEER_ATTR_CHAN: channel definition, nested, using top-level
+ * attributes like %NL80211_ATTR_WIPHY_FREQ etc.
+ * @NL80211_PMSR_PEER_ATTR_REQ: This is a nested attribute indexed by
+ * measurement type, with attributes from the
+ * &enum nl80211_peer_measurement_req inside.
+ * @NL80211_PMSR_PEER_ATTR_RESP: This is a nested attribute indexed by
+ * measurement type, with attributes from the
+ * &enum nl80211_peer_measurement_resp inside.
+ *
+ * @NUM_NL80211_PMSR_PEER_ATTRS: internal
+ * @NL80211_PMSR_PEER_ATTR_MAX: highest attribute number
+ */
+enum nl80211_peer_measurement_peer_attrs {
+ __NL80211_PMSR_PEER_ATTR_INVALID,
+
+ NL80211_PMSR_PEER_ATTR_ADDR,
+ NL80211_PMSR_PEER_ATTR_CHAN,
+ NL80211_PMSR_PEER_ATTR_REQ,
+ NL80211_PMSR_PEER_ATTR_RESP,
+
+ /* keep last */
+ NUM_NL80211_PMSR_PEER_ATTRS,
+ NL80211_PMSR_PEER_ATTR_MAX = NUM_NL80211_PMSR_PEER_ATTRS - 1,
+};
+
+/**
+ * enum nl80211_peer_measurement_attrs - peer measurement attributes
+ * @__NL80211_PMSR_ATTR_INVALID: invalid
+ *
+ * @NL80211_PMSR_ATTR_MAX_PEERS: u32 attribute used for capability
+ * advertisement only, indicates the maximum number of peers
+ * measurements can be done with in a single request
+ * @NL80211_PMSR_ATTR_REPORT_AP_TSF: flag attribute in capability
+ * indicating that the connected AP's TSF can be reported in
+ * measurement results
+ * @NL80211_PMSR_ATTR_RANDOMIZE_MAC_ADDR: flag attribute in capability
+ * indicating that MAC address randomization is supported.
+ * @NL80211_PMSR_ATTR_TYPE_CAPA: capabilities reported by the device,
+ * this contains a nesting indexed by measurement type, and
+ * type-specific capabilities inside, which are from the enums
+ * named nl80211_peer_measurement_<type>_capa.
+ * @NL80211_PMSR_ATTR_PEERS: nested attribute, the nesting index is
+ * meaningless, just a list of peers to measure with, with the
+ * sub-attributes taken from
+ * &enum nl80211_peer_measurement_peer_attrs.
+ *
+ * @NUM_NL80211_PMSR_ATTR: internal
+ * @NL80211_PMSR_ATTR_MAX: highest attribute number
+ */
+enum nl80211_peer_measurement_attrs {
+ __NL80211_PMSR_ATTR_INVALID,
+
+ NL80211_PMSR_ATTR_MAX_PEERS,
+ NL80211_PMSR_ATTR_REPORT_AP_TSF,
+ NL80211_PMSR_ATTR_RANDOMIZE_MAC_ADDR,
+ NL80211_PMSR_ATTR_TYPE_CAPA,
+ NL80211_PMSR_ATTR_PEERS,
+
+ /* keep last */
+ NUM_NL80211_PMSR_ATTR,
+ NL80211_PMSR_ATTR_MAX = NUM_NL80211_PMSR_ATTR - 1
+};
+
+/**
+ * enum nl80211_peer_measurement_ftm_capa - FTM capabilities
+ * @__NL80211_PMSR_FTM_CAPA_ATTR_INVALID: invalid
+ *
+ * @NL80211_PMSR_FTM_CAPA_ATTR_ASAP: flag attribute indicating ASAP mode
+ * is supported
+ * @NL80211_PMSR_FTM_CAPA_ATTR_NON_ASAP: flag attribute indicating non-ASAP
+ * mode is supported
+ * @NL80211_PMSR_FTM_CAPA_ATTR_REQ_LCI: flag attribute indicating if LCI
+ * data can be requested during the measurement
+ * @NL80211_PMSR_FTM_CAPA_ATTR_REQ_CIVICLOC: flag attribute indicating if civic
+ * location data can be requested during the measurement
+ * @NL80211_PMSR_FTM_CAPA_ATTR_PREAMBLES: u32 bitmap attribute of bits
+ * from &enum nl80211_preamble.
+ * @NL80211_PMSR_FTM_CAPA_ATTR_BANDWIDTHS: bitmap of values from
+ * &enum nl80211_chan_width indicating the supported channel
+ * bandwidths for FTM. Note that a higher channel bandwidth may be
+ * configured to allow for other measurements types with different
+ * bandwidth requirement in the same measurement.
+ * @NL80211_PMSR_FTM_CAPA_ATTR_MAX_BURSTS_EXPONENT: u32 attribute indicating
+ * the maximum bursts exponent that can be used (if not present anything
+ * is valid)
+ * @NL80211_PMSR_FTM_CAPA_ATTR_MAX_FTMS_PER_BURST: u32 attribute indicating
+ * the maximum FTMs per burst (if not present anything is valid)
+ *
+ * @NUM_NL80211_PMSR_FTM_CAPA_ATTR: internal
+ * @NL80211_PMSR_FTM_CAPA_ATTR_MAX: highest attribute number
+ */
+enum nl80211_peer_measurement_ftm_capa {
+ __NL80211_PMSR_FTM_CAPA_ATTR_INVALID,
+
+ NL80211_PMSR_FTM_CAPA_ATTR_ASAP,
+ NL80211_PMSR_FTM_CAPA_ATTR_NON_ASAP,
+ NL80211_PMSR_FTM_CAPA_ATTR_REQ_LCI,
+ NL80211_PMSR_FTM_CAPA_ATTR_REQ_CIVICLOC,
+ NL80211_PMSR_FTM_CAPA_ATTR_PREAMBLES,
+ NL80211_PMSR_FTM_CAPA_ATTR_BANDWIDTHS,
+ NL80211_PMSR_FTM_CAPA_ATTR_MAX_BURSTS_EXPONENT,
+ NL80211_PMSR_FTM_CAPA_ATTR_MAX_FTMS_PER_BURST,
+
+ /* keep last */
+ NUM_NL80211_PMSR_FTM_CAPA_ATTR,
+ NL80211_PMSR_FTM_CAPA_ATTR_MAX = NUM_NL80211_PMSR_FTM_CAPA_ATTR - 1
+};
+
+/**
+ * enum nl80211_peer_measurement_ftm_req - FTM request attributes
+ * @__NL80211_PMSR_FTM_REQ_ATTR_INVALID: invalid
+ *
+ * @NL80211_PMSR_FTM_REQ_ATTR_ASAP: ASAP mode requested (flag)
+ * @NL80211_PMSR_FTM_REQ_ATTR_PREAMBLE: preamble type (see
+ * &enum nl80211_preamble), optional for DMG (u32)
+ * @NL80211_PMSR_FTM_REQ_ATTR_NUM_BURSTS_EXP: number of bursts exponent as in
+ * 802.11-2016 9.4.2.168 "Fine Timing Measurement Parameters element"
+ * (u8, 0-15, optional with default 15 i.e. "no preference")
+ * @NL80211_PMSR_FTM_REQ_ATTR_BURST_PERIOD: interval between bursts in units
+ * of 100ms (u16, optional with default 0)
+ * @NL80211_PMSR_FTM_REQ_ATTR_BURST_DURATION: burst duration, as in 802.11-2016
+ * Table 9-257 "Burst Duration field encoding" (u8, 0-15, optional with
+ * default 15 i.e. "no preference")
+ * @NL80211_PMSR_FTM_REQ_ATTR_FTMS_PER_BURST: number of successful FTM frames
+ * requested per burst
+ * (u8, 0-31, optional with default 0 i.e. "no preference")
+ * @NL80211_PMSR_FTM_REQ_ATTR_NUM_FTMR_RETRIES: number of FTMR frame retries
+ * (u8, default 3)
+ * @NL80211_PMSR_FTM_REQ_ATTR_REQUEST_LCI: request LCI data (flag)
+ * @NL80211_PMSR_FTM_REQ_ATTR_REQUEST_CIVICLOC: request civic location data
+ * (flag)
+ *
+ * @NUM_NL80211_PMSR_FTM_REQ_ATTR: internal
+ * @NL80211_PMSR_FTM_REQ_ATTR_MAX: highest attribute number
+ */
+enum nl80211_peer_measurement_ftm_req {
+ __NL80211_PMSR_FTM_REQ_ATTR_INVALID,
+
+ NL80211_PMSR_FTM_REQ_ATTR_ASAP,
+ NL80211_PMSR_FTM_REQ_ATTR_PREAMBLE,
+ NL80211_PMSR_FTM_REQ_ATTR_NUM_BURSTS_EXP,
+ NL80211_PMSR_FTM_REQ_ATTR_BURST_PERIOD,
+ NL80211_PMSR_FTM_REQ_ATTR_BURST_DURATION,
+ NL80211_PMSR_FTM_REQ_ATTR_FTMS_PER_BURST,
+ NL80211_PMSR_FTM_REQ_ATTR_NUM_FTMR_RETRIES,
+ NL80211_PMSR_FTM_REQ_ATTR_REQUEST_LCI,
+ NL80211_PMSR_FTM_REQ_ATTR_REQUEST_CIVICLOC,
+
+ /* keep last */
+ NUM_NL80211_PMSR_FTM_REQ_ATTR,
+ NL80211_PMSR_FTM_REQ_ATTR_MAX = NUM_NL80211_PMSR_FTM_REQ_ATTR - 1
+};
+
+/**
+ * enum nl80211_peer_measurement_ftm_failure_reasons - FTM failure reasons
+ * @NL80211_PMSR_FTM_FAILURE_UNSPECIFIED: unspecified failure, not used
+ * @NL80211_PMSR_FTM_FAILURE_NO_RESPONSE: no response from the FTM responder
+ * @NL80211_PMSR_FTM_FAILURE_REJECTED: FTM responder rejected measurement
+ * @NL80211_PMSR_FTM_FAILURE_WRONG_CHANNEL: we already know the peer is
+ * on a different channel, so can't measure (if we didn't know, we'd
+ * try and get no response)
+ * @NL80211_PMSR_FTM_FAILURE_PEER_NOT_CAPABLE: peer can't actually do FTM
+ * @NL80211_PMSR_FTM_FAILURE_INVALID_TIMESTAMP: invalid T1/T4 timestamps
+ * received
+ * @NL80211_PMSR_FTM_FAILURE_PEER_BUSY: peer reports busy, you may retry
+ * later (see %NL80211_PMSR_FTM_RESP_ATTR_BUSY_RETRY_TIME)
+ * @NL80211_PMSR_FTM_FAILURE_BAD_CHANGED_PARAMS: parameters were changed
+ * by the peer and are no longer supported
+ */
+enum nl80211_peer_measurement_ftm_failure_reasons {
+ NL80211_PMSR_FTM_FAILURE_UNSPECIFIED,
+ NL80211_PMSR_FTM_FAILURE_NO_RESPONSE,
+ NL80211_PMSR_FTM_FAILURE_REJECTED,
+ NL80211_PMSR_FTM_FAILURE_WRONG_CHANNEL,
+ NL80211_PMSR_FTM_FAILURE_PEER_NOT_CAPABLE,
+ NL80211_PMSR_FTM_FAILURE_INVALID_TIMESTAMP,
+ NL80211_PMSR_FTM_FAILURE_PEER_BUSY,
+ NL80211_PMSR_FTM_FAILURE_BAD_CHANGED_PARAMS,
+};
+
+/**
+ * enum nl80211_peer_measurement_ftm_resp - FTM response attributes
+ * @__NL80211_PMSR_FTM_RESP_ATTR_INVALID: invalid
+ *
+ * @NL80211_PMSR_FTM_RESP_ATTR_FAIL_REASON: FTM-specific failure reason
+ * (u32, optional)
+ * @NL80211_PMSR_FTM_RESP_ATTR_BURST_INDEX: optional, if bursts are reported
+ * as separate results then it will be the burst index 0...(N-1) and
+ * the top level will indicate partial results (u32)
+ * @NL80211_PMSR_FTM_RESP_ATTR_NUM_FTMR_ATTEMPTS: number of FTM Request frames
+ * transmitted (u32, optional)
+ * @NL80211_PMSR_FTM_RESP_ATTR_NUM_FTMR_SUCCESSES: number of FTM Request frames
+ * that were acknowleged (u32, optional)
+ * @NL80211_PMSR_FTM_RESP_ATTR_BUSY_RETRY_TIME: retry time received from the
+ * busy peer (u32, seconds)
+ * @NL80211_PMSR_FTM_RESP_ATTR_NUM_BURSTS_EXP: actual number of bursts exponent
+ * used by the responder (similar to request, u8)
+ * @NL80211_PMSR_FTM_RESP_ATTR_BURST_DURATION: actual burst duration used by
+ * the responder (similar to request, u8)
+ * @NL80211_PMSR_FTM_RESP_ATTR_FTMS_PER_BURST: actual FTMs per burst used
+ * by the responder (similar to request, u8)
+ * @NL80211_PMSR_FTM_RESP_ATTR_RSSI_AVG: average RSSI across all FTM action
+ * frames (optional, s32, 1/2 dBm)
+ * @NL80211_PMSR_FTM_RESP_ATTR_RSSI_SPREAD: RSSI spread across all FTM action
+ * frames (optional, s32, 1/2 dBm)
+ * @NL80211_PMSR_FTM_RESP_ATTR_TX_RATE: bitrate we used for the response to the
+ * FTM action frame (optional, nested, using &enum nl80211_rate_info
+ * attributes)
+ * @NL80211_PMSR_FTM_RESP_ATTR_RX_RATE: bitrate the responder used for the FTM
+ * action frame (optional, nested, using &enum nl80211_rate_info attrs)
+ * @NL80211_PMSR_FTM_RESP_ATTR_RTT_AVG: average RTT (s64, picoseconds, optional
+ * but one of RTT/DIST must be present)
+ * @NL80211_PMSR_FTM_RESP_ATTR_RTT_VARIANCE: RTT variance (u64, ps^2, note that
+ * standard deviation is the square root of variance, optional)
+ * @NL80211_PMSR_FTM_RESP_ATTR_RTT_SPREAD: RTT spread (u64, picoseconds,
+ * optional)
+ * @NL80211_PMSR_FTM_RESP_ATTR_DIST_AVG: average distance (s64, mm, optional
+ * but one of RTT/DIST must be present)
+ * @NL80211_PMSR_FTM_RESP_ATTR_DIST_VARIANCE: distance variance (u64, mm^2, note
+ * that standard deviation is the square root of variance, optional)
+ * @NL80211_PMSR_FTM_RESP_ATTR_DIST_SPREAD: distance spread (u64, mm, optional)
+ * @NL80211_PMSR_FTM_RESP_ATTR_LCI: LCI data from peer (binary, optional);
+ * this is the contents of the Measurement Report Element (802.11-2016
+ * 9.4.2.22.1) starting with the Measurement Token, with Measurement
+ * Type 8.
+ * @NL80211_PMSR_FTM_RESP_ATTR_CIVICLOC: civic location data from peer
+ * (binary, optional);
+ * this is the contents of the Measurement Report Element (802.11-2016
+ * 9.4.2.22.1) starting with the Measurement Token, with Measurement
+ * Type 11.
+ * @NL80211_PMSR_FTM_RESP_ATTR_PAD: ignore, for u64/s64 padding only
+ *
+ * @NUM_NL80211_PMSR_FTM_RESP_ATTR: internal
+ * @NL80211_PMSR_FTM_RESP_ATTR_MAX: highest attribute number
+ */
+enum nl80211_peer_measurement_ftm_resp {
+ __NL80211_PMSR_FTM_RESP_ATTR_INVALID,
+
+ NL80211_PMSR_FTM_RESP_ATTR_FAIL_REASON,
+ NL80211_PMSR_FTM_RESP_ATTR_BURST_INDEX,
+ NL80211_PMSR_FTM_RESP_ATTR_NUM_FTMR_ATTEMPTS,
+ NL80211_PMSR_FTM_RESP_ATTR_NUM_FTMR_SUCCESSES,
+ NL80211_PMSR_FTM_RESP_ATTR_BUSY_RETRY_TIME,
+ NL80211_PMSR_FTM_RESP_ATTR_NUM_BURSTS_EXP,
+ NL80211_PMSR_FTM_RESP_ATTR_BURST_DURATION,
+ NL80211_PMSR_FTM_RESP_ATTR_FTMS_PER_BURST,
+ NL80211_PMSR_FTM_RESP_ATTR_RSSI_AVG,
+ NL80211_PMSR_FTM_RESP_ATTR_RSSI_SPREAD,
+ NL80211_PMSR_FTM_RESP_ATTR_TX_RATE,
+ NL80211_PMSR_FTM_RESP_ATTR_RX_RATE,
+ NL80211_PMSR_FTM_RESP_ATTR_RTT_AVG,
+ NL80211_PMSR_FTM_RESP_ATTR_RTT_VARIANCE,
+ NL80211_PMSR_FTM_RESP_ATTR_RTT_SPREAD,
+ NL80211_PMSR_FTM_RESP_ATTR_DIST_AVG,
+ NL80211_PMSR_FTM_RESP_ATTR_DIST_VARIANCE,
+ NL80211_PMSR_FTM_RESP_ATTR_DIST_SPREAD,
+ NL80211_PMSR_FTM_RESP_ATTR_LCI,
+ NL80211_PMSR_FTM_RESP_ATTR_CIVICLOC,
+ NL80211_PMSR_FTM_RESP_ATTR_PAD,
+
+ /* keep last */
+ NUM_NL80211_PMSR_FTM_RESP_ATTR,
+ NL80211_PMSR_FTM_RESP_ATTR_MAX = NUM_NL80211_PMSR_FTM_RESP_ATTR - 1
+};
+
#endif /* __LINUX_NL80211_H */
diff --git a/include/uapi/linux/pkt_cls.h b/include/uapi/linux/pkt_cls.h
index 401d0c1e612d..95d0db2a8350 100644
--- a/include/uapi/linux/pkt_cls.h
+++ b/include/uapi/linux/pkt_cls.h
@@ -485,6 +485,11 @@ enum {
TCA_FLOWER_IN_HW_COUNT,
+ TCA_FLOWER_KEY_PORT_SRC_MIN, /* be16 */
+ TCA_FLOWER_KEY_PORT_SRC_MAX, /* be16 */
+ TCA_FLOWER_KEY_PORT_DST_MIN, /* be16 */
+ TCA_FLOWER_KEY_PORT_DST_MAX, /* be16 */
+
__TCA_FLOWER_MAX,
};
@@ -518,6 +523,8 @@ enum {
TCA_FLOWER_KEY_FLAGS_FRAG_IS_FIRST = (1 << 1),
};
+#define TCA_FLOWER_MASK_FLAGS_RANGE (1 << 0) /* Range-based match */
+
/* Match-all classifier */
enum {
diff --git a/include/uapi/linux/pkt_sched.h b/include/uapi/linux/pkt_sched.h
index 89ee47c2f17d..0d18b1d1fbbc 100644
--- a/include/uapi/linux/pkt_sched.h
+++ b/include/uapi/linux/pkt_sched.h
@@ -291,11 +291,38 @@ enum {
TCA_GRED_DPS,
TCA_GRED_MAX_P,
TCA_GRED_LIMIT,
+ TCA_GRED_VQ_LIST, /* nested TCA_GRED_VQ_ENTRY */
__TCA_GRED_MAX,
};
#define TCA_GRED_MAX (__TCA_GRED_MAX - 1)
+enum {
+ TCA_GRED_VQ_ENTRY_UNSPEC,
+ TCA_GRED_VQ_ENTRY, /* nested TCA_GRED_VQ_* */
+ __TCA_GRED_VQ_ENTRY_MAX,
+};
+#define TCA_GRED_VQ_ENTRY_MAX (__TCA_GRED_VQ_ENTRY_MAX - 1)
+
+enum {
+ TCA_GRED_VQ_UNSPEC,
+ TCA_GRED_VQ_PAD,
+ TCA_GRED_VQ_DP, /* u32 */
+ TCA_GRED_VQ_STAT_BYTES, /* u64 */
+ TCA_GRED_VQ_STAT_PACKETS, /* u32 */
+ TCA_GRED_VQ_STAT_BACKLOG, /* u32 */
+ TCA_GRED_VQ_STAT_PROB_DROP, /* u32 */
+ TCA_GRED_VQ_STAT_PROB_MARK, /* u32 */
+ TCA_GRED_VQ_STAT_FORCED_DROP, /* u32 */
+ TCA_GRED_VQ_STAT_FORCED_MARK, /* u32 */
+ TCA_GRED_VQ_STAT_PDROP, /* u32 */
+ TCA_GRED_VQ_STAT_OTHER, /* u32 */
+ TCA_GRED_VQ_FLAGS, /* u32 */
+ __TCA_GRED_VQ_MAX
+};
+
+#define TCA_GRED_VQ_MAX (__TCA_GRED_VQ_MAX - 1)
+
struct tc_gred_qopt {
__u32 limit; /* HARD maximal queue length (bytes) */
__u32 qth_min; /* Min average length threshold (bytes) */
@@ -864,6 +891,8 @@ enum {
TCA_FQ_LOW_RATE_THRESHOLD, /* per packet delay under this rate */
+ TCA_FQ_CE_THRESHOLD, /* DCTCP-like CE-marking threshold */
+
__TCA_FQ_MAX
};
@@ -882,6 +911,7 @@ struct tc_fq_qd_stats {
__u32 inactive_flows;
__u32 throttled_flows;
__u32 unthrottle_latency_ns;
+ __u64 ce_mark; /* packets above ce_threshold */
};
/* Heavy-Hitter Filter */
diff --git a/include/uapi/linux/prctl.h b/include/uapi/linux/prctl.h
index c0d7ea0bf5b6..b4875a93363a 100644
--- a/include/uapi/linux/prctl.h
+++ b/include/uapi/linux/prctl.h
@@ -212,6 +212,7 @@ struct prctl_mm_map {
#define PR_SET_SPECULATION_CTRL 53
/* Speculation control variants */
# define PR_SPEC_STORE_BYPASS 0
+# define PR_SPEC_INDIRECT_BRANCH 1
/* Return and control values for PR_SET/GET_SPECULATION_CTRL */
# define PR_SPEC_NOT_AFFECTED 0
# define PR_SPEC_PRCTL (1UL << 0)
@@ -219,4 +220,12 @@ struct prctl_mm_map {
# define PR_SPEC_DISABLE (1UL << 2)
# define PR_SPEC_FORCE_DISABLE (1UL << 3)
+/* Reset arm64 pointer authentication keys */
+#define PR_PAC_RESET_KEYS 54
+# define PR_PAC_APIAKEY (1UL << 0)
+# define PR_PAC_APIBKEY (1UL << 1)
+# define PR_PAC_APDAKEY (1UL << 2)
+# define PR_PAC_APDBKEY (1UL << 3)
+# define PR_PAC_APGAKEY (1UL << 4)
+
#endif /* _LINUX_PRCTL_H */
diff --git a/include/uapi/linux/ptp_clock.h b/include/uapi/linux/ptp_clock.h
index 3039bf6a742e..d73d83950265 100644
--- a/include/uapi/linux/ptp_clock.h
+++ b/include/uapi/linux/ptp_clock.h
@@ -84,6 +84,16 @@ struct ptp_sys_offset {
struct ptp_clock_time ts[2 * PTP_MAX_SAMPLES + 1];
};
+struct ptp_sys_offset_extended {
+ unsigned int n_samples; /* Desired number of measurements. */
+ unsigned int rsv[3]; /* Reserved for future use. */
+ /*
+ * Array of [system, phc, system] time stamps. The kernel will provide
+ * 3*n_samples time stamps.
+ */
+ struct ptp_clock_time ts[PTP_MAX_SAMPLES][3];
+};
+
struct ptp_sys_offset_precise {
struct ptp_clock_time device;
struct ptp_clock_time sys_realtime;
@@ -136,6 +146,8 @@ struct ptp_pin_desc {
#define PTP_PIN_SETFUNC _IOW(PTP_CLK_MAGIC, 7, struct ptp_pin_desc)
#define PTP_SYS_OFFSET_PRECISE \
_IOWR(PTP_CLK_MAGIC, 8, struct ptp_sys_offset_precise)
+#define PTP_SYS_OFFSET_EXTENDED \
+ _IOW(PTP_CLK_MAGIC, 9, struct ptp_sys_offset_extended)
struct ptp_extts_event {
struct ptp_clock_time t; /* Time event occured. */
diff --git a/include/uapi/linux/sctp.h b/include/uapi/linux/sctp.h
index 34dd3d497f2c..d584073532b8 100644
--- a/include/uapi/linux/sctp.h
+++ b/include/uapi/linux/sctp.h
@@ -129,6 +129,7 @@ typedef __s32 sctp_assoc_t;
#define SCTP_STREAM_SCHEDULER_VALUE 124
#define SCTP_INTERLEAVING_SUPPORTED 125
#define SCTP_SENDMSG_CONNECT 126
+#define SCTP_EVENT 127
/* PR-SCTP policies */
#define SCTP_PR_SCTP_NONE 0x0000
@@ -568,6 +569,8 @@ struct sctp_assoc_reset_event {
#define SCTP_ASSOC_CHANGE_DENIED 0x0004
#define SCTP_ASSOC_CHANGE_FAILED 0x0008
+#define SCTP_STREAM_CHANGE_DENIED SCTP_ASSOC_CHANGE_DENIED
+#define SCTP_STREAM_CHANGE_FAILED SCTP_ASSOC_CHANGE_FAILED
struct sctp_stream_change_event {
__u16 strchange_type;
__u16 strchange_flags;
@@ -630,7 +633,9 @@ union sctp_notification {
*/
enum sctp_sn_type {
- SCTP_SN_TYPE_BASE = (1<<15),
+ SCTP_SN_TYPE_BASE = (1<<15),
+ SCTP_DATA_IO_EVENT = SCTP_SN_TYPE_BASE,
+#define SCTP_DATA_IO_EVENT SCTP_DATA_IO_EVENT
SCTP_ASSOC_CHANGE,
#define SCTP_ASSOC_CHANGE SCTP_ASSOC_CHANGE
SCTP_PEER_ADDR_CHANGE,
@@ -655,6 +660,8 @@ enum sctp_sn_type {
#define SCTP_ASSOC_RESET_EVENT SCTP_ASSOC_RESET_EVENT
SCTP_STREAM_CHANGE_EVENT,
#define SCTP_STREAM_CHANGE_EVENT SCTP_STREAM_CHANGE_EVENT
+ SCTP_SN_TYPE_MAX = SCTP_STREAM_CHANGE_EVENT,
+#define SCTP_SN_TYPE_MAX SCTP_SN_TYPE_MAX
};
/* Notification error codes used to fill up the error fields in some
@@ -1148,9 +1155,16 @@ struct sctp_add_streams {
uint16_t sas_outstrms;
};
+struct sctp_event {
+ sctp_assoc_t se_assoc_id;
+ uint16_t se_type;
+ uint8_t se_on;
+};
+
/* SCTP Stream schedulers */
enum sctp_sched_type {
SCTP_SS_FCFS,
+ SCTP_SS_DEFAULT = SCTP_SS_FCFS,
SCTP_SS_PRIO,
SCTP_SS_RR,
SCTP_SS_MAX = SCTP_SS_RR
diff --git a/include/uapi/linux/snmp.h b/include/uapi/linux/snmp.h
index f80135e5feaa..86dc24a96c90 100644
--- a/include/uapi/linux/snmp.h
+++ b/include/uapi/linux/snmp.h
@@ -243,6 +243,7 @@ enum
LINUX_MIB_TCPREQQFULLDROP, /* TCPReqQFullDrop */
LINUX_MIB_TCPRETRANSFAIL, /* TCPRetransFail */
LINUX_MIB_TCPRCVCOALESCE, /* TCPRcvCoalesce */
+ LINUX_MIB_TCPBACKLOGCOALESCE, /* TCPBacklogCoalesce */
LINUX_MIB_TCPOFOQUEUE, /* TCPOFOQueue */
LINUX_MIB_TCPOFODROP, /* TCPOFODrop */
LINUX_MIB_TCPOFOMERGE, /* TCPOFOMerge */
diff --git a/include/uapi/linux/tcp.h b/include/uapi/linux/tcp.h
index e02d31986ff9..8bb6cc5f3235 100644
--- a/include/uapi/linux/tcp.h
+++ b/include/uapi/linux/tcp.h
@@ -266,6 +266,7 @@ enum {
TCP_NLA_BYTES_RETRANS, /* Data bytes retransmitted */
TCP_NLA_DSACK_DUPS, /* DSACK blocks received */
TCP_NLA_REORD_SEEN, /* reordering events seen */
+ TCP_NLA_SRTT, /* smoothed RTT in usecs */
};
/* for TCP_MD5SIG socket option */
diff --git a/include/uapi/linux/udp.h b/include/uapi/linux/udp.h
index 09502de447f5..30baccb6c9c4 100644
--- a/include/uapi/linux/udp.h
+++ b/include/uapi/linux/udp.h
@@ -33,6 +33,7 @@ struct udphdr {
#define UDP_NO_CHECK6_TX 101 /* Disable sending checksum for UDP6X */
#define UDP_NO_CHECK6_RX 102 /* Disable accpeting checksum for UDP6 */
#define UDP_SEGMENT 103 /* Set GSO segmentation size */
+#define UDP_GRO 104 /* This socket can receive UDP GRO packets */
/* UDP encapsulation types */
#define UDP_ENCAP_ESPINUDP_NON_IKE 1 /* draft-ietf-ipsec-nat-t-ike-00/01 */
diff --git a/include/uapi/linux/v4l2-common.h b/include/uapi/linux/v4l2-common.h
index 4f7b892377cd..7d21c1634b4d 100644
--- a/include/uapi/linux/v4l2-common.h
+++ b/include/uapi/linux/v4l2-common.h
@@ -79,24 +79,11 @@
/* Current composing area plus all padding pixels */
#define V4L2_SEL_TGT_COMPOSE_PADDED 0x0103
-/* Backward compatibility target definitions --- to be removed. */
-#define V4L2_SEL_TGT_CROP_ACTIVE V4L2_SEL_TGT_CROP
-#define V4L2_SEL_TGT_COMPOSE_ACTIVE V4L2_SEL_TGT_COMPOSE
-#define V4L2_SUBDEV_SEL_TGT_CROP_ACTUAL V4L2_SEL_TGT_CROP
-#define V4L2_SUBDEV_SEL_TGT_COMPOSE_ACTUAL V4L2_SEL_TGT_COMPOSE
-#define V4L2_SUBDEV_SEL_TGT_CROP_BOUNDS V4L2_SEL_TGT_CROP_BOUNDS
-#define V4L2_SUBDEV_SEL_TGT_COMPOSE_BOUNDS V4L2_SEL_TGT_COMPOSE_BOUNDS
-
/* Selection flags */
#define V4L2_SEL_FLAG_GE (1 << 0)
#define V4L2_SEL_FLAG_LE (1 << 1)
#define V4L2_SEL_FLAG_KEEP_CONFIG (1 << 2)
-/* Backward compatibility flag definitions --- to be removed. */
-#define V4L2_SUBDEV_SEL_FLAG_SIZE_GE V4L2_SEL_FLAG_GE
-#define V4L2_SUBDEV_SEL_FLAG_SIZE_LE V4L2_SEL_FLAG_LE
-#define V4L2_SUBDEV_SEL_FLAG_KEEP_CONFIG V4L2_SEL_FLAG_KEEP_CONFIG
-
struct v4l2_edid {
__u32 pad;
__u32 start_block;
@@ -105,4 +92,19 @@ struct v4l2_edid {
__u8 *edid;
};
+#ifndef __KERNEL__
+/* Backward compatibility target definitions --- to be removed. */
+#define V4L2_SEL_TGT_CROP_ACTIVE V4L2_SEL_TGT_CROP
+#define V4L2_SEL_TGT_COMPOSE_ACTIVE V4L2_SEL_TGT_COMPOSE
+#define V4L2_SUBDEV_SEL_TGT_CROP_ACTUAL V4L2_SEL_TGT_CROP
+#define V4L2_SUBDEV_SEL_TGT_COMPOSE_ACTUAL V4L2_SEL_TGT_COMPOSE
+#define V4L2_SUBDEV_SEL_TGT_CROP_BOUNDS V4L2_SEL_TGT_CROP_BOUNDS
+#define V4L2_SUBDEV_SEL_TGT_COMPOSE_BOUNDS V4L2_SEL_TGT_COMPOSE_BOUNDS
+
+/* Backward compatibility flag definitions --- to be removed. */
+#define V4L2_SUBDEV_SEL_FLAG_SIZE_GE V4L2_SEL_FLAG_GE
+#define V4L2_SUBDEV_SEL_FLAG_SIZE_LE V4L2_SEL_FLAG_LE
+#define V4L2_SUBDEV_SEL_FLAG_KEEP_CONFIG V4L2_SEL_FLAG_KEEP_CONFIG
+#endif
+
#endif /* __V4L2_COMMON__ */
diff --git a/include/uapi/linux/v4l2-controls.h b/include/uapi/linux/v4l2-controls.h
index 51b095898f4b..3dcfc6148f99 100644
--- a/include/uapi/linux/v4l2-controls.h
+++ b/include/uapi/linux/v4l2-controls.h
@@ -50,6 +50,8 @@
#ifndef __LINUX_V4L2_CONTROLS_H
#define __LINUX_V4L2_CONTROLS_H
+#include <linux/types.h>
+
/* Control classes */
#define V4L2_CTRL_CLASS_USER 0x00980000 /* Old-style 'user' controls */
#define V4L2_CTRL_CLASS_MPEG 0x00990000 /* MPEG-compression controls */
@@ -402,9 +404,6 @@ enum v4l2_mpeg_video_multi_slice_mode {
#define V4L2_CID_MPEG_VIDEO_MV_V_SEARCH_RANGE (V4L2_CID_MPEG_BASE+228)
#define V4L2_CID_MPEG_VIDEO_FORCE_KEY_FRAME (V4L2_CID_MPEG_BASE+229)
-#define V4L2_CID_MPEG_VIDEO_MPEG2_SLICE_PARAMS (V4L2_CID_MPEG_BASE+250)
-#define V4L2_CID_MPEG_VIDEO_MPEG2_QUANTIZATION (V4L2_CID_MPEG_BASE+251)
-
#define V4L2_CID_MPEG_VIDEO_H263_I_FRAME_QP (V4L2_CID_MPEG_BASE+300)
#define V4L2_CID_MPEG_VIDEO_H263_P_FRAME_QP (V4L2_CID_MPEG_BASE+301)
#define V4L2_CID_MPEG_VIDEO_H263_B_FRAME_QP (V4L2_CID_MPEG_BASE+302)
@@ -1095,66 +1094,4 @@ enum v4l2_detect_md_mode {
#define V4L2_CID_DETECT_MD_THRESHOLD_GRID (V4L2_CID_DETECT_CLASS_BASE + 3)
#define V4L2_CID_DETECT_MD_REGION_GRID (V4L2_CID_DETECT_CLASS_BASE + 4)
-#define V4L2_MPEG2_PICTURE_CODING_TYPE_I 1
-#define V4L2_MPEG2_PICTURE_CODING_TYPE_P 2
-#define V4L2_MPEG2_PICTURE_CODING_TYPE_B 3
-#define V4L2_MPEG2_PICTURE_CODING_TYPE_D 4
-
-struct v4l2_mpeg2_sequence {
- /* ISO/IEC 13818-2, ITU-T Rec. H.262: Sequence header */
- __u16 horizontal_size;
- __u16 vertical_size;
- __u32 vbv_buffer_size;
-
- /* ISO/IEC 13818-2, ITU-T Rec. H.262: Sequence extension */
- __u8 profile_and_level_indication;
- __u8 progressive_sequence;
- __u8 chroma_format;
-};
-
-struct v4l2_mpeg2_picture {
- /* ISO/IEC 13818-2, ITU-T Rec. H.262: Picture header */
- __u8 picture_coding_type;
-
- /* ISO/IEC 13818-2, ITU-T Rec. H.262: Picture coding extension */
- __u8 f_code[2][2];
- __u8 intra_dc_precision;
- __u8 picture_structure;
- __u8 top_field_first;
- __u8 frame_pred_frame_dct;
- __u8 concealment_motion_vectors;
- __u8 q_scale_type;
- __u8 intra_vlc_format;
- __u8 alternate_scan;
- __u8 repeat_first_field;
- __u8 progressive_frame;
-};
-
-struct v4l2_ctrl_mpeg2_slice_params {
- __u32 bit_size;
- __u32 data_bit_offset;
-
- struct v4l2_mpeg2_sequence sequence;
- struct v4l2_mpeg2_picture picture;
-
- /* ISO/IEC 13818-2, ITU-T Rec. H.262: Slice */
- __u8 quantiser_scale_code;
-
- __u8 backward_ref_index;
- __u8 forward_ref_index;
-};
-
-struct v4l2_ctrl_mpeg2_quantization {
- /* ISO/IEC 13818-2, ITU-T Rec. H.262: Quant matrix extension */
- __u8 load_intra_quantiser_matrix;
- __u8 load_non_intra_quantiser_matrix;
- __u8 load_chroma_intra_quantiser_matrix;
- __u8 load_chroma_non_intra_quantiser_matrix;
-
- __u8 intra_quantiser_matrix[64];
- __u8 non_intra_quantiser_matrix[64];
- __u8 chroma_intra_quantiser_matrix[64];
- __u8 chroma_non_intra_quantiser_matrix[64];
-};
-
#endif
diff --git a/include/uapi/linux/vfio.h b/include/uapi/linux/vfio.h
index 813102810f53..02bb7ad6e986 100644
--- a/include/uapi/linux/vfio.h
+++ b/include/uapi/linux/vfio.h
@@ -354,6 +354,21 @@ struct vfio_region_gfx_edid {
};
/*
+ * 10de vendor sub-type
+ *
+ * NVIDIA GPU NVlink2 RAM is coherent RAM mapped onto the host address space.
+ */
+#define VFIO_REGION_SUBTYPE_NVIDIA_NVLINK2_RAM (1)
+
+/*
+ * 1014 vendor sub-type
+ *
+ * IBM NPU NVlink2 ATSD (Address Translation Shootdown) register of NPU
+ * to do TLB invalidation on a GPU.
+ */
+#define VFIO_REGION_SUBTYPE_IBM_NVLINK2_ATSD (1)
+
+/*
* The MSIX mappable capability informs that MSIX data of a BAR can be mmapped
* which allows direct access to non-MSIX registers which happened to be within
* the same system page.
@@ -363,6 +378,33 @@ struct vfio_region_gfx_edid {
*/
#define VFIO_REGION_INFO_CAP_MSIX_MAPPABLE 3
+/*
+ * Capability with compressed real address (aka SSA - small system address)
+ * where GPU RAM is mapped on a system bus. Used by a GPU for DMA routing
+ * and by the userspace to associate a NVLink bridge with a GPU.
+ */
+#define VFIO_REGION_INFO_CAP_NVLINK2_SSATGT 4
+
+struct vfio_region_info_cap_nvlink2_ssatgt {
+ struct vfio_info_cap_header header;
+ __u64 tgt;
+};
+
+/*
+ * Capability with an NVLink link speed. The value is read by
+ * the NVlink2 bridge driver from the bridge's "ibm,nvlink-speed"
+ * property in the device tree. The value is fixed in the hardware
+ * and failing to provide the correct value results in the link
+ * not working with no indication from the driver why.
+ */
+#define VFIO_REGION_INFO_CAP_NVLINK2_LNKSPD 5
+
+struct vfio_region_info_cap_nvlink2_lnkspd {
+ struct vfio_info_cap_header header;
+ __u32 link_speed;
+ __u32 __pad;
+};
+
/**
* VFIO_DEVICE_GET_IRQ_INFO - _IOWR(VFIO_TYPE, VFIO_BASE + 9,
* struct vfio_irq_info)
diff --git a/include/uapi/linux/videodev2.h b/include/uapi/linux/videodev2.h
index c8e8ff810190..b5671ce2724f 100644
--- a/include/uapi/linux/videodev2.h
+++ b/include/uapi/linux/videodev2.h
@@ -145,6 +145,7 @@ enum v4l2_buf_type {
V4L2_BUF_TYPE_SDR_CAPTURE = 11,
V4L2_BUF_TYPE_SDR_OUTPUT = 12,
V4L2_BUF_TYPE_META_CAPTURE = 13,
+ V4L2_BUF_TYPE_META_OUTPUT = 14,
/* Deprecated, do not use */
V4L2_BUF_TYPE_PRIVATE = 0x80,
};
@@ -469,6 +470,7 @@ struct v4l2_capability {
#define V4L2_CAP_READWRITE 0x01000000 /* read/write systemcalls */
#define V4L2_CAP_ASYNCIO 0x02000000 /* async I/O */
#define V4L2_CAP_STREAMING 0x04000000 /* streaming I/O ioctls */
+#define V4L2_CAP_META_OUTPUT 0x08000000 /* Is a metadata output device */
#define V4L2_CAP_TOUCH 0x10000000 /* Is a touch device */
@@ -689,6 +691,7 @@ struct v4l2_pix_format {
#define V4L2_PIX_FMT_MT21C v4l2_fourcc('M', 'T', '2', '1') /* Mediatek compressed block mode */
#define V4L2_PIX_FMT_INZI v4l2_fourcc('I', 'N', 'Z', 'I') /* Intel Planar Greyscale 10-bit and Depth 16-bit */
#define V4L2_PIX_FMT_SUNXI_TILED_NV12 v4l2_fourcc('S', 'T', '1', '2') /* Sunxi Tiled NV12 Format */
+#define V4L2_PIX_FMT_CNF4 v4l2_fourcc('C', 'N', 'F', '4') /* Intel 4-bit packed depth confidence information */
/* 10bit raw bayer packed, 32 bytes for every 25 pixels, last LSB 6 bits unused */
#define V4L2_PIX_FMT_IPU3_SBGGR10 v4l2_fourcc('i', 'p', '3', 'b') /* IPU3 packed 10-bit BGGR bayer */
@@ -879,6 +882,7 @@ struct v4l2_requestbuffers {
#define V4L2_BUF_CAP_SUPPORTS_USERPTR (1 << 1)
#define V4L2_BUF_CAP_SUPPORTS_DMABUF (1 << 2)
#define V4L2_BUF_CAP_SUPPORTS_REQUESTS (1 << 3)
+#define V4L2_BUF_CAP_SUPPORTS_ORPHANED_BUFS (1 << 4)
/**
* struct v4l2_plane - plane info for multi-planar buffers
@@ -1622,8 +1626,6 @@ struct v4l2_ext_control {
__u8 __user *p_u8;
__u16 __user *p_u16;
__u32 __user *p_u32;
- struct v4l2_ctrl_mpeg2_slice_params __user *p_mpeg2_slice_params;
- struct v4l2_ctrl_mpeg2_quantization __user *p_mpeg2_quantization;
void __user *ptr;
};
} __attribute__ ((packed));
@@ -1669,8 +1671,6 @@ enum v4l2_ctrl_type {
V4L2_CTRL_TYPE_U8 = 0x0100,
V4L2_CTRL_TYPE_U16 = 0x0101,
V4L2_CTRL_TYPE_U32 = 0x0102,
- V4L2_CTRL_TYPE_MPEG2_SLICE_PARAMS = 0x0103,
- V4L2_CTRL_TYPE_MPEG2_QUANTIZATION = 0x0104,
};
/* Used in the VIDIOC_QUERYCTRL ioctl for querying controls */
diff --git a/include/uapi/linux/virtio_config.h b/include/uapi/linux/virtio_config.h
index 449132c76b1c..1196e1c1d4f6 100644
--- a/include/uapi/linux/virtio_config.h
+++ b/include/uapi/linux/virtio_config.h
@@ -75,6 +75,9 @@
*/
#define VIRTIO_F_IOMMU_PLATFORM 33
+/* This feature indicates support for the packed virtqueue layout. */
+#define VIRTIO_F_RING_PACKED 34
+
/*
* Does the device support Single Root I/O Virtualization?
*/
diff --git a/include/uapi/linux/virtio_gpu.h b/include/uapi/linux/virtio_gpu.h
index f43c3c6171ff..8e88eba1fa7a 100644
--- a/include/uapi/linux/virtio_gpu.h
+++ b/include/uapi/linux/virtio_gpu.h
@@ -41,6 +41,7 @@
#include <linux/types.h>
#define VIRTIO_GPU_F_VIRGL 0
+#define VIRTIO_GPU_F_EDID 1
enum virtio_gpu_ctrl_type {
VIRTIO_GPU_UNDEFINED = 0,
@@ -56,6 +57,7 @@ enum virtio_gpu_ctrl_type {
VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING,
VIRTIO_GPU_CMD_GET_CAPSET_INFO,
VIRTIO_GPU_CMD_GET_CAPSET,
+ VIRTIO_GPU_CMD_GET_EDID,
/* 3d commands */
VIRTIO_GPU_CMD_CTX_CREATE = 0x0200,
@@ -76,6 +78,7 @@ enum virtio_gpu_ctrl_type {
VIRTIO_GPU_RESP_OK_DISPLAY_INFO,
VIRTIO_GPU_RESP_OK_CAPSET_INFO,
VIRTIO_GPU_RESP_OK_CAPSET,
+ VIRTIO_GPU_RESP_OK_EDID,
/* error responses */
VIRTIO_GPU_RESP_ERR_UNSPEC = 0x1200,
@@ -291,6 +294,21 @@ struct virtio_gpu_resp_capset {
__u8 capset_data[];
};
+/* VIRTIO_GPU_CMD_GET_EDID */
+struct virtio_gpu_cmd_get_edid {
+ struct virtio_gpu_ctrl_hdr hdr;
+ __le32 scanout;
+ __le32 padding;
+};
+
+/* VIRTIO_GPU_RESP_OK_EDID */
+struct virtio_gpu_resp_edid {
+ struct virtio_gpu_ctrl_hdr hdr;
+ __le32 size;
+ __le32 padding;
+ __u8 edid[1024];
+};
+
#define VIRTIO_GPU_EVENT_DISPLAY (1 << 0)
struct virtio_gpu_config {
diff --git a/include/uapi/linux/virtio_ring.h b/include/uapi/linux/virtio_ring.h
index 6d5d5faa989b..2414f8af26b3 100644
--- a/include/uapi/linux/virtio_ring.h
+++ b/include/uapi/linux/virtio_ring.h
@@ -44,6 +44,13 @@
/* This means the buffer contains a list of buffer descriptors. */
#define VRING_DESC_F_INDIRECT 4
+/*
+ * Mark a descriptor as available or used in packed ring.
+ * Notice: they are defined as shifts instead of shifted values.
+ */
+#define VRING_PACKED_DESC_F_AVAIL 7
+#define VRING_PACKED_DESC_F_USED 15
+
/* The Host uses this in used->flags to advise the Guest: don't kick me when
* you add a buffer. It's unreliable, so it's simply an optimization. Guest
* will still kick if it's out of buffers. */
@@ -53,6 +60,23 @@
* optimization. */
#define VRING_AVAIL_F_NO_INTERRUPT 1
+/* Enable events in packed ring. */
+#define VRING_PACKED_EVENT_FLAG_ENABLE 0x0
+/* Disable events in packed ring. */
+#define VRING_PACKED_EVENT_FLAG_DISABLE 0x1
+/*
+ * Enable events for a specific descriptor in packed ring.
+ * (as specified by Descriptor Ring Change Event Offset/Wrap Counter).
+ * Only valid if VIRTIO_RING_F_EVENT_IDX has been negotiated.
+ */
+#define VRING_PACKED_EVENT_FLAG_DESC 0x2
+
+/*
+ * Wrap counter bit shift in event suppression structure
+ * of packed ring.
+ */
+#define VRING_PACKED_EVENT_F_WRAP_CTR 15
+
/* We support indirect buffer descriptors */
#define VIRTIO_RING_F_INDIRECT_DESC 28
@@ -171,4 +195,32 @@ static inline int vring_need_event(__u16 event_idx, __u16 new_idx, __u16 old)
return (__u16)(new_idx - event_idx - 1) < (__u16)(new_idx - old);
}
+struct vring_packed_desc_event {
+ /* Descriptor Ring Change Event Offset/Wrap Counter. */
+ __le16 off_wrap;
+ /* Descriptor Ring Change Event Flags. */
+ __le16 flags;
+};
+
+struct vring_packed_desc {
+ /* Buffer Address. */
+ __le64 addr;
+ /* Buffer Length. */
+ __le32 len;
+ /* Buffer ID. */
+ __le16 id;
+ /* The flags depending on descriptor type. */
+ __le16 flags;
+};
+
+struct vring_packed {
+ unsigned int num;
+
+ struct vring_packed_desc *desc;
+
+ struct vring_packed_desc_event *driver;
+
+ struct vring_packed_desc_event *device;
+};
+
#endif /* _UAPI_LINUX_VIRTIO_RING_H */
diff --git a/include/uapi/sound/firewire.h b/include/uapi/sound/firewire.h
index f0a547d86679..ae12826ed641 100644
--- a/include/uapi/sound/firewire.h
+++ b/include/uapi/sound/firewire.h
@@ -12,6 +12,7 @@
#define SNDRV_FIREWIRE_EVENT_EFW_RESPONSE 0x4e617475
#define SNDRV_FIREWIRE_EVENT_DIGI00X_MESSAGE 0x746e736c
#define SNDRV_FIREWIRE_EVENT_MOTU_NOTIFICATION 0x64776479
+#define SNDRV_FIREWIRE_EVENT_TASCAM_CONTROL 0x7473636d
struct snd_firewire_event_common {
unsigned int type; /* SNDRV_FIREWIRE_EVENT_xxx */
@@ -53,12 +54,24 @@ struct snd_firewire_event_motu_notification {
__u32 message; /* MOTU-specific bits. */
};
+struct snd_firewire_tascam_change {
+ unsigned int index;
+ __be32 before;
+ __be32 after;
+};
+
+struct snd_firewire_event_tascam_control {
+ unsigned int type;
+ struct snd_firewire_tascam_change changes[0];
+};
+
union snd_firewire_event {
struct snd_firewire_event_common common;
struct snd_firewire_event_lock_status lock_status;
struct snd_firewire_event_dice_notification dice_notification;
struct snd_firewire_event_efw_response efw_response;
struct snd_firewire_event_digi00x_message digi00x_message;
+ struct snd_firewire_event_tascam_control tascam_control;
struct snd_firewire_event_motu_notification motu_notification;
};
@@ -66,6 +79,7 @@ union snd_firewire_event {
#define SNDRV_FIREWIRE_IOCTL_GET_INFO _IOR('H', 0xf8, struct snd_firewire_get_info)
#define SNDRV_FIREWIRE_IOCTL_LOCK _IO('H', 0xf9)
#define SNDRV_FIREWIRE_IOCTL_UNLOCK _IO('H', 0xfa)
+#define SNDRV_FIREWIRE_IOCTL_TASCAM_STATE _IOR('H', 0xfb, struct snd_firewire_tascam_state)
#define SNDRV_FIREWIRE_TYPE_DICE 1
#define SNDRV_FIREWIRE_TYPE_FIREWORKS 2
@@ -88,4 +102,10 @@ struct snd_firewire_get_info {
* Returns -EBUSY if the driver is already streaming.
*/
+#define SNDRV_FIREWIRE_TASCAM_STATE_COUNT 64
+
+struct snd_firewire_tascam_state {
+ __be32 data[SNDRV_FIREWIRE_TASCAM_STATE_COUNT];
+};
+
#endif /* _UAPI_SOUND_FIREWIRE_H_INCLUDED */
diff --git a/include/video/imx-ipu-v3.h b/include/video/imx-ipu-v3.h
index abbad94e14a1..e582e8e7527a 100644
--- a/include/video/imx-ipu-v3.h
+++ b/include/video/imx-ipu-v3.h
@@ -246,6 +246,9 @@ struct ipu_image {
struct v4l2_rect rect;
dma_addr_t phys0;
dma_addr_t phys1;
+ /* chroma plane offset overrides */
+ u32 u_offset;
+ u32 v_offset;
};
void ipu_cpmem_zero(struct ipuv3_channel *ch);
@@ -387,6 +390,12 @@ int ipu_ic_task_init(struct ipu_ic *ic,
int out_width, int out_height,
enum ipu_color_space in_cs,
enum ipu_color_space out_cs);
+int ipu_ic_task_init_rsc(struct ipu_ic *ic,
+ int in_width, int in_height,
+ int out_width, int out_height,
+ enum ipu_color_space in_cs,
+ enum ipu_color_space out_cs,
+ u32 rsc);
int ipu_ic_task_graphics_init(struct ipu_ic *ic,
enum ipu_color_space in_g_cs,
bool galpha_en, u32 galpha,
diff --git a/include/xen/balloon.h b/include/xen/balloon.h
index 61f410fd74e4..4914b93a23f2 100644
--- a/include/xen/balloon.h
+++ b/include/xen/balloon.h
@@ -44,8 +44,3 @@ static inline void xen_balloon_init(void)
{
}
#endif
-
-#ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG
-struct resource;
-void arch_xen_balloon_init(struct resource *hostmem_resource);
-#endif
diff --git a/include/xen/interface/hvm/start_info.h b/include/xen/interface/hvm/start_info.h
index 648415976ead..50af9ea2ff1e 100644
--- a/include/xen/interface/hvm/start_info.h
+++ b/include/xen/interface/hvm/start_info.h
@@ -33,7 +33,7 @@
* | magic | Contains the magic value XEN_HVM_START_MAGIC_VALUE
* | | ("xEn3" with the 0x80 bit of the "E" set).
* 4 +----------------+
- * | version | Version of this structure. Current version is 0. New
+ * | version | Version of this structure. Current version is 1. New
* | | versions are guaranteed to be backwards-compatible.
* 8 +----------------+
* | flags | SIF_xxx flags.
@@ -48,6 +48,15 @@
* 32 +----------------+
* | rsdp_paddr | Physical address of the RSDP ACPI data structure.
* 40 +----------------+
+ * | memmap_paddr | Physical address of the (optional) memory map. Only
+ * | | present in version 1 and newer of the structure.
+ * 48 +----------------+
+ * | memmap_entries | Number of entries in the memory map table. Zero
+ * | | if there is no memory map being provided. Only
+ * | | present in version 1 and newer of the structure.
+ * 52 +----------------+
+ * | reserved | Version 1 and newer only.
+ * 56 +----------------+
*
* The layout of each entry in the module structure is the following:
*
@@ -62,14 +71,52 @@
* | reserved |
* 32 +----------------+
*
+ * The layout of each entry in the memory map table is as follows:
+ *
+ * 0 +----------------+
+ * | addr | Base address
+ * 8 +----------------+
+ * | size | Size of mapping in bytes
+ * 16 +----------------+
+ * | type | Type of mapping as defined between the hypervisor
+ * | | and guest. See XEN_HVM_MEMMAP_TYPE_* values below.
+ * 20 +----------------|
+ * | reserved |
+ * 24 +----------------+
+ *
* The address and sizes are always a 64bit little endian unsigned integer.
*
* NB: Xen on x86 will always try to place all the data below the 4GiB
* boundary.
+ *
+ * Version numbers of the hvm_start_info structure have evolved like this:
+ *
+ * Version 0: Initial implementation.
+ *
+ * Version 1: Added the memmap_paddr/memmap_entries fields (plus 4 bytes of
+ * padding) to the end of the hvm_start_info struct. These new
+ * fields can be used to pass a memory map to the guest. The
+ * memory map is optional and so guests that understand version 1
+ * of the structure must check that memmap_entries is non-zero
+ * before trying to read the memory map.
*/
#define XEN_HVM_START_MAGIC_VALUE 0x336ec578
/*
+ * The values used in the type field of the memory map table entries are
+ * defined below and match the Address Range Types as defined in the "System
+ * Address Map Interfaces" section of the ACPI Specification. Please refer to
+ * section 15 in version 6.2 of the ACPI spec: http://uefi.org/specifications
+ */
+#define XEN_HVM_MEMMAP_TYPE_RAM 1
+#define XEN_HVM_MEMMAP_TYPE_RESERVED 2
+#define XEN_HVM_MEMMAP_TYPE_ACPI 3
+#define XEN_HVM_MEMMAP_TYPE_NVS 4
+#define XEN_HVM_MEMMAP_TYPE_UNUSABLE 5
+#define XEN_HVM_MEMMAP_TYPE_DISABLED 6
+#define XEN_HVM_MEMMAP_TYPE_PMEM 7
+
+/*
* C representation of the x86/HVM start info layout.
*
* The canonical definition of this layout is above, this is just a way to
@@ -86,6 +133,13 @@ struct hvm_start_info {
uint64_t cmdline_paddr; /* Physical address of the command line. */
uint64_t rsdp_paddr; /* Physical address of the RSDP ACPI data */
/* structure. */
+ /* All following fields only present in version 1 and newer */
+ uint64_t memmap_paddr; /* Physical address of an array of */
+ /* hvm_memmap_table_entry. */
+ uint32_t memmap_entries; /* Number of entries in the memmap table. */
+ /* Value will be zero if there is no memory */
+ /* map being provided. */
+ uint32_t reserved; /* Must be zero. */
};
struct hvm_modlist_entry {
@@ -95,4 +149,11 @@ struct hvm_modlist_entry {
uint64_t reserved;
};
+struct hvm_memmap_table_entry {
+ uint64_t addr; /* Base address of the memory region */
+ uint64_t size; /* Size of the memory region in bytes */
+ uint32_t type; /* Mapping type */
+ uint32_t reserved; /* Must be zero for Version 1. */
+};
+
#endif /* __XEN_PUBLIC_ARCH_X86_HVM_START_INFO_H__ */
diff --git a/include/xen/xen-front-pgdir-shbuf.h b/include/xen/xen-front-pgdir-shbuf.h
new file mode 100644
index 000000000000..150ef7ec51ec
--- /dev/null
+++ b/include/xen/xen-front-pgdir-shbuf.h
@@ -0,0 +1,89 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+
+/*
+ * Xen frontend/backend page directory based shared buffer
+ * helper module.
+ *
+ * Copyright (C) 2018 EPAM Systems Inc.
+ *
+ * Author: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com>
+ */
+
+#ifndef __XEN_FRONT_PGDIR_SHBUF_H_
+#define __XEN_FRONT_PGDIR_SHBUF_H_
+
+#include <linux/kernel.h>
+
+#include <xen/grant_table.h>
+
+struct xen_front_pgdir_shbuf_ops;
+
+struct xen_front_pgdir_shbuf {
+ /*
+ * Number of references granted for the backend use:
+ *
+ * - for frontend allocated/imported buffers this holds the number
+ * of grant references for the page directory and the pages
+ * of the buffer
+ *
+ * - for the buffer provided by the backend this only holds the number
+ * of grant references for the page directory itself as grant
+ * references for the buffer will be provided by the backend.
+ */
+ int num_grefs;
+ grant_ref_t *grefs;
+ /* Page directory backing storage. */
+ u8 *directory;
+
+ /*
+ * Number of pages for the shared buffer itself (excluding the page
+ * directory).
+ */
+ int num_pages;
+ /*
+ * Backing storage of the shared buffer: these are the pages being
+ * shared.
+ */
+ struct page **pages;
+
+ struct xenbus_device *xb_dev;
+
+ /* These are the ops used internally depending on be_alloc mode. */
+ const struct xen_front_pgdir_shbuf_ops *ops;
+
+ /* Xen map handles for the buffer allocated by the backend. */
+ grant_handle_t *backend_map_handles;
+};
+
+struct xen_front_pgdir_shbuf_cfg {
+ struct xenbus_device *xb_dev;
+
+ /* Number of pages of the buffer backing storage. */
+ int num_pages;
+ /* Pages of the buffer to be shared. */
+ struct page **pages;
+
+ /*
+ * This is allocated outside because there are use-cases when
+ * the buffer structure is allocated as a part of a bigger one.
+ */
+ struct xen_front_pgdir_shbuf *pgdir;
+ /*
+ * Mode of grant reference sharing: if set then backend will share
+ * grant references to the buffer with the frontend.
+ */
+ int be_alloc;
+};
+
+int xen_front_pgdir_shbuf_alloc(struct xen_front_pgdir_shbuf_cfg *cfg);
+
+grant_ref_t
+xen_front_pgdir_shbuf_get_dir_start(struct xen_front_pgdir_shbuf *buf);
+
+int xen_front_pgdir_shbuf_map(struct xen_front_pgdir_shbuf *buf);
+
+int xen_front_pgdir_shbuf_unmap(struct xen_front_pgdir_shbuf *buf);
+
+void xen_front_pgdir_shbuf_free(struct xen_front_pgdir_shbuf *buf);
+
+#endif /* __XEN_FRONT_PGDIR_SHBUF_H_ */
diff --git a/include/xen/xen-ops.h b/include/xen/xen-ops.h
index 18803ff76e27..4969817124a8 100644
--- a/include/xen/xen-ops.h
+++ b/include/xen/xen-ops.h
@@ -42,16 +42,12 @@ int xen_setup_shutdown_event(void);
extern unsigned long *xen_contiguous_bitmap;
-#ifdef CONFIG_XEN_PV
+#if defined(CONFIG_XEN_PV) || defined(CONFIG_ARM) || defined(CONFIG_ARM64)
int xen_create_contiguous_region(phys_addr_t pstart, unsigned int order,
unsigned int address_bits,
dma_addr_t *dma_handle);
void xen_destroy_contiguous_region(phys_addr_t pstart, unsigned int order);
-
-int xen_remap_pfn(struct vm_area_struct *vma, unsigned long addr,
- xen_pfn_t *pfn, int nr, int *err_ptr, pgprot_t prot,
- unsigned int domid, bool no_translate, struct page **pages);
#else
static inline int xen_create_contiguous_region(phys_addr_t pstart,
unsigned int order,
@@ -63,7 +59,13 @@ static inline int xen_create_contiguous_region(phys_addr_t pstart,
static inline void xen_destroy_contiguous_region(phys_addr_t pstart,
unsigned int order) { }
+#endif
+#if defined(CONFIG_XEN_PV)
+int xen_remap_pfn(struct vm_area_struct *vma, unsigned long addr,
+ xen_pfn_t *pfn, int nr, int *err_ptr, pgprot_t prot,
+ unsigned int domid, bool no_translate, struct page **pages);
+#else
static inline int xen_remap_pfn(struct vm_area_struct *vma, unsigned long addr,
xen_pfn_t *pfn, int nr, int *err_ptr,
pgprot_t prot, unsigned int domid,
diff --git a/include/xen/xen.h b/include/xen/xen.h
index d7a2678da77f..0e2156786ad2 100644
--- a/include/xen/xen.h
+++ b/include/xen/xen.h
@@ -29,6 +29,9 @@ extern bool xen_pvh;
extern uint32_t xen_start_flags;
+#include <xen/interface/hvm/start_info.h>
+extern struct hvm_start_info pvh_start_info;
+
#ifdef CONFIG_XEN_DOM0
#include <xen/interface/xen.h>
#include <asm/xen/hypervisor.h>