summaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorRafael J. Wysocki <rafael.j.wysocki@intel.com>2016-10-30 06:12:50 +0100
committerRafael J. Wysocki <rafael.j.wysocki@intel.com>2016-10-30 06:12:50 +0100
commitfe0f59c41255d339f4f059be62c350c3c48a3f95 (patch)
treefde2381bf94a14ea1553f88f9e9e37b45ed280dc /include
parentcpufreq: intel_pstate: Always set max P-state in performance mode (diff)
parentDocumentation: intel_pstate: PID tuning is not always available (diff)
downloadlinux-fe0f59c41255d339f4f059be62c350c3c48a3f95.tar.xz
linux-fe0f59c41255d339f4f059be62c350c3c48a3f95.zip
Merge back earlier cpufreq material for v4.10.
Diffstat (limited to 'include')
-rw-r--r--include/acpi/acconfig.h4
-rw-r--r--include/acpi/acoutput.h6
-rw-r--r--include/acpi/acpiosxf.h43
-rw-r--r--include/acpi/acpixf.h44
-rw-r--r--include/acpi/actbl.h164
-rw-r--r--include/acpi/actypes.h64
-rw-r--r--include/acpi/cppc_acpi.h18
-rw-r--r--include/acpi/platform/acenv.h90
-rw-r--r--include/acpi/platform/acenvex.h21
-rw-r--r--include/acpi/platform/acgcc.h17
-rw-r--r--include/acpi/platform/acgccex.h58
-rw-r--r--include/acpi/platform/aclinux.h22
-rw-r--r--include/acpi/platform/aclinuxex.h2
-rw-r--r--include/acpi/processor.h4
-rw-r--r--include/asm-generic/export.h94
-rw-r--r--include/asm-generic/libata-portmap.h7
-rw-r--r--include/asm-generic/percpu.h53
-rw-r--r--include/asm-generic/pgtable.h3
-rw-r--r--include/asm-generic/uaccess.h24
-rw-r--r--include/asm-generic/vmlinux.lds.h63
-rw-r--r--include/crypto/algapi.h70
-rw-r--r--include/crypto/engine.h107
-rw-r--r--include/crypto/ghash.h23
-rw-r--r--include/drm/bridge/analogix_dp.h4
-rw-r--r--include/drm/drmP.h177
-rw-r--r--include/drm/drm_atomic.h154
-rw-r--r--include/drm/drm_atomic_helper.h18
-rw-r--r--include/drm/drm_blend.h62
-rw-r--r--include/drm/drm_bridge.h218
-rw-r--r--include/drm/drm_color_mgmt.h61
-rw-r--r--include/drm/drm_connector.h778
-rw-r--r--include/drm/drm_core.h34
-rw-r--r--include/drm/drm_crtc.h1939
-rw-r--r--include/drm/drm_crtc_helper.h6
-rw-r--r--include/drm/drm_dp_aux_dev.h62
-rw-r--r--include/drm/drm_dp_helper.h22
-rw-r--r--include/drm/drm_edid.h30
-rw-r--r--include/drm/drm_encoder.h249
-rw-r--r--include/drm/drm_fb_helper.h46
-rw-r--r--include/drm/drm_fourcc.h3
-rw-r--r--include/drm/drm_framebuffer.h267
-rw-r--r--include/drm/drm_gem.h4
-rw-r--r--include/drm/drm_mipi_dsi.h6
-rw-r--r--include/drm/drm_mm.h12
-rw-r--r--include/drm/drm_mode_object.h125
-rw-r--r--include/drm/drm_modes.h27
-rw-r--r--include/drm/drm_modeset_helper.h36
-rw-r--r--include/drm/drm_modeset_helper_vtables.h57
-rw-r--r--include/drm/drm_plane.h526
-rw-r--r--include/drm/drm_plane_helper.h9
-rw-r--r--include/drm/drm_property.h295
-rw-r--r--include/drm/drm_simple_kms_helper.h31
-rw-r--r--include/drm/drm_vma_manager.h22
-rw-r--r--include/drm/i2c/tda998x.h29
-rw-r--r--include/drm/i915_drm.h2
-rw-r--r--include/drm/i915_pciids.h38
-rw-r--r--include/drm/ttm/ttm_bo_api.h32
-rw-r--r--include/drm/ttm/ttm_bo_driver.h9
-rw-r--r--include/drm/ttm/ttm_memory.h1
-rw-r--r--include/drm/ttm/ttm_placement.h56
-rw-r--r--include/dt-bindings/clock/exynos5410.h3
-rw-r--r--include/dt-bindings/clock/exynos5420.h11
-rw-r--r--include/dt-bindings/clock/exynos5440.h2
-rw-r--r--include/dt-bindings/clock/gxbb-aoclkc.h66
-rw-r--r--include/dt-bindings/clock/gxbb-clkc.h16
-rw-r--r--include/dt-bindings/clock/imx5-clock.h15
-rw-r--r--include/dt-bindings/clock/imx6qdl-clock.h4
-rw-r--r--include/dt-bindings/clock/maxim,max77620.h21
-rw-r--r--include/dt-bindings/clock/meson8b-clkc.h2
-rw-r--r--include/dt-bindings/clock/mt2701-clk.h486
-rw-r--r--include/dt-bindings/clock/qcom,gcc-mdm9615.h327
-rw-r--r--include/dt-bindings/clock/qcom,gcc-msm8996.h5
-rw-r--r--include/dt-bindings/clock/qcom,lcc-mdm9615.h52
-rw-r--r--include/dt-bindings/clock/qcom,mmcc-msm8996.h1
-rw-r--r--include/dt-bindings/clock/r7s72100-clock.h3
-rw-r--r--include/dt-bindings/clock/r8a7794-clock.h25
-rw-r--r--include/dt-bindings/clock/rk3399-cru.h3
-rw-r--r--include/dt-bindings/clock/sun6i-a31-ccu.h187
-rw-r--r--include/dt-bindings/clock/sun8i-a23-a33-ccu.h127
-rw-r--r--include/dt-bindings/clock/zx296718-clock.h163
-rw-r--r--include/dt-bindings/display/tda998x.h7
-rw-r--r--include/dt-bindings/memory/mt2701-larb-port.h2
-rw-r--r--include/dt-bindings/mfd/qcom-rpm.h22
-rw-r--r--include/dt-bindings/mfd/stm32f4-rcc.h98
-rw-r--r--include/dt-bindings/net/mscc-phy-vsc8531.h21
-rw-r--r--include/dt-bindings/pinctrl/samsung.h57
-rw-r--r--include/dt-bindings/reset/gxbb-aoclkc.h66
-rw-r--r--include/dt-bindings/reset/mt2701-resets.h83
-rw-r--r--include/dt-bindings/reset/qcom,gcc-mdm9615.h136
-rw-r--r--include/dt-bindings/reset/sun6i-a31-ccu.h106
-rw-r--r--include/dt-bindings/reset/sun8i-a23-a33-ccu.h87
-rw-r--r--include/dt-bindings/soc/rockchip,boot-mode.h15
-rw-r--r--include/dt-bindings/thermal/tegra124-soctherm.h5
-rw-r--r--include/kvm/arm_vgic.h18
-rw-r--r--include/linux/acpi.h53
-rw-r--r--include/linux/acpi_iort.h42
-rw-r--r--include/linux/aer.h2
-rw-r--r--include/linux/amba/bus.h6
-rw-r--r--include/linux/amba/clcd.h63
-rw-r--r--include/linux/amba/serial.h9
-rw-r--r--include/linux/amd-iommu.h43
-rw-r--r--include/linux/ata.h69
-rw-r--r--include/linux/atmel_serial.h2
-rw-r--r--include/linux/auto_dev-ioctl.h211
-rw-r--r--include/linux/auto_fs.h1
-rw-r--r--include/linux/bcma/bcma.h3
-rw-r--r--include/linux/bcma/bcma_regs.h2
-rw-r--r--include/linux/bio.h3
-rw-r--r--include/linux/bitfield.h93
-rw-r--r--include/linux/bitmap.h18
-rw-r--r--include/linux/bitops.h36
-rw-r--r--include/linux/blk-cgroup.h13
-rw-r--r--include/linux/blk-mq-pci.h9
-rw-r--r--include/linux/blk-mq.h48
-rw-r--r--include/linux/blk_types.h21
-rw-r--r--include/linux/blkdev.h4
-rw-r--r--include/linux/blockgroup_lock.h28
-rw-r--r--include/linux/bootmem.h9
-rw-r--r--include/linux/bpf.h15
-rw-r--r--include/linux/bpf_verifier.h102
-rw-r--r--include/linux/bug.h3
-rw-r--r--include/linux/can/dev.h3
-rw-r--r--include/linux/ccp.h3
-rw-r--r--include/linux/cec-funcs.h78
-rw-r--r--include/linux/cec.h5
-rw-r--r--include/linux/ceph/auth.h2
-rw-r--r--include/linux/ceph/ceph_fs.h12
-rw-r--r--include/linux/ceph/cls_lock_client.h49
-rw-r--r--include/linux/ceph/libceph.h3
-rw-r--r--include/linux/ceph/mon_client.h3
-rw-r--r--include/linux/ceph/osd_client.h23
-rw-r--r--include/linux/cgroup.h33
-rw-r--r--include/linux/clk-provider.h16
-rw-r--r--include/linux/compaction.h40
-rw-r--r--include/linux/compat.h1
-rw-r--r--include/linux/compiler-gcc.h7
-rw-r--r--include/linux/compiler.h34
-rw-r--r--include/linux/console.h6
-rw-r--r--include/linux/coresight.h5
-rw-r--r--include/linux/cpu.h23
-rw-r--r--include/linux/cpuhotplug.h143
-rw-r--r--include/linux/cred.h11
-rw-r--r--include/linux/ctype.h5
-rw-r--r--include/linux/dax.h6
-rw-r--r--include/linux/dcache.h5
-rw-r--r--include/linux/debugfs.h17
-rw-r--r--include/linux/devfreq-event.h5
-rw-r--r--include/linux/device-mapper.h1
-rw-r--r--include/linux/device.h3
-rw-r--r--include/linux/dma-debug.h19
-rw-r--r--include/linux/dma-iommu.h12
-rw-r--r--include/linux/dma-mapping.h48
-rw-r--r--include/linux/dma/dw.h5
-rw-r--r--include/linux/dma/hsu.h9
-rw-r--r--include/linux/dmaengine.h16
-rw-r--r--include/linux/efi.h74
-rw-r--r--include/linux/export.h31
-rw-r--r--include/linux/exportfs.h13
-rw-r--r--include/linux/extcon.h191
-rw-r--r--include/linux/extcon/extcon-adc-jack.h4
-rw-r--r--include/linux/f2fs_fs.h1
-rw-r--r--include/linux/falloc.h3
-rw-r--r--include/linux/fdtable.h8
-rw-r--r--include/linux/fence-array.h10
-rw-r--r--include/linux/fence.h2
-rw-r--r--include/linux/filter.h64
-rw-r--r--include/linux/firmware/meson/meson_sm.h31
-rw-r--r--include/linux/fs.h57
-rw-r--r--include/linux/fscrypto.h29
-rw-r--r--include/linux/fsnotify.h14
-rw-r--r--include/linux/fsnotify_backend.h9
-rw-r--r--include/linux/ftrace.h19
-rw-r--r--include/linux/ftrace_irq.h31
-rw-r--r--include/linux/genhd.h2
-rw-r--r--include/linux/gpio/driver.h9
-rw-r--r--include/linux/hid-sensor-hub.h1
-rw-r--r--include/linux/hid.h4
-rw-r--r--include/linux/huge_mm.h15
-rw-r--r--include/linux/hugetlb.h8
-rw-r--r--include/linux/hw_random.h4
-rw-r--r--include/linux/hwmon.h342
-rw-r--r--include/linux/hyperv.h101
-rw-r--r--include/linux/hypervisor.h17
-rw-r--r--include/linux/i2c-mux.h8
-rw-r--r--include/linux/i2c.h47
-rw-r--r--include/linux/if_bridge.h1
-rw-r--r--include/linux/if_link.h1
-rw-r--r--include/linux/if_team.h2
-rw-r--r--include/linux/if_vlan.h34
-rw-r--r--include/linux/iio/consumer.h12
-rw-r--r--include/linux/iio/iio.h3
-rw-r--r--include/linux/iio/trigger.h26
-rw-r--r--include/linux/iio/triggered_buffer.h8
-rw-r--r--include/linux/inet_diag.h4
-rw-r--r--include/linux/init.h62
-rw-r--r--include/linux/init_task.h11
-rw-r--r--include/linux/interrupt.h14
-rw-r--r--include/linux/io-mapping.h98
-rw-r--r--include/linux/iomap.h4
-rw-r--r--include/linux/iommu.h39
-rw-r--r--include/linux/ioprio.h1
-rw-r--r--include/linux/ipc_namespace.h1
-rw-r--r--include/linux/ipv6.h1
-rw-r--r--include/linux/irq.h28
-rw-r--r--include/linux/irqchip/arm-gic-v3.h4
-rw-r--r--include/linux/irqdesc.h3
-rw-r--r--include/linux/jiffies.h4
-rw-r--r--include/linux/jump_label.h18
-rw-r--r--include/linux/kern_levels.h2
-rw-r--r--include/linux/kernel.h57
-rw-r--r--include/linux/kernfs.h28
-rw-r--r--include/linux/kexec.h6
-rw-r--r--include/linux/kmemleak.h18
-rw-r--r--include/linux/kthread.h88
-rw-r--r--include/linux/ktime.h12
-rw-r--r--include/linux/kvm_host.h4
-rw-r--r--include/linux/leds.h9
-rw-r--r--include/linux/lglock.h81
-rw-r--r--include/linux/libata.h3
-rw-r--r--include/linux/libnvdimm.h28
-rw-r--r--include/linux/lightnvm.h18
-rw-r--r--include/linux/list.h7
-rw-r--r--include/linux/livepatch.h3
-rw-r--r--include/linux/lockdep.h4
-rw-r--r--include/linux/lsm_audit.h2
-rw-r--r--include/linux/lsm_hooks.h37
-rw-r--r--include/linux/mbus.h18
-rw-r--r--include/linux/mcb.h23
-rw-r--r--include/linux/memblock.h1
-rw-r--r--include/linux/memcontrol.h21
-rw-r--r--include/linux/mfd/88pm80x.h4
-rw-r--r--include/linux/mfd/abx500/ab8500.h2
-rw-r--r--include/linux/mfd/ac100.h178
-rw-r--r--include/linux/mfd/arizona/core.h12
-rw-r--r--include/linux/mfd/axp20x.h60
-rw-r--r--include/linux/mfd/cros_ec.h18
-rw-r--r--include/linux/mfd/cros_ec_commands.h34
-rw-r--r--include/linux/mfd/da9063/core.h4
-rw-r--r--include/linux/mfd/da9063/pdata.h4
-rw-r--r--include/linux/mfd/da9063/registers.h4
-rw-r--r--include/linux/mfd/db8500-prcmu.h6
-rw-r--r--include/linux/mfd/dbx500-prcmu.h9
-rw-r--r--include/linux/mfd/lp873x.h268
-rw-r--r--include/linux/mfd/max14577-private.h2
-rw-r--r--include/linux/mfd/max14577.h2
-rw-r--r--include/linux/mfd/rk808.h154
-rw-r--r--include/linux/mfd/stmpe.h21
-rw-r--r--include/linux/mfd/syscon/exynos5-pmu.h4
-rw-r--r--include/linux/mfd/tps65217.h12
-rw-r--r--include/linux/mfd/tps65218.h6
-rw-r--r--include/linux/mfd/twl6040.h2
-rw-r--r--include/linux/miscdevice.h8
-rw-r--r--include/linux/mlx4/cmd.h3
-rw-r--r--include/linux/mlx4/device.h16
-rw-r--r--include/linux/mlx4/qp.h2
-rw-r--r--include/linux/mlx5/cq.h6
-rw-r--r--include/linux/mlx5/device.h454
-rw-r--r--include/linux/mlx5/driver.h35
-rw-r--r--include/linux/mlx5/fs.h6
-rw-r--r--include/linux/mlx5/mlx5_ifc.h302
-rw-r--r--include/linux/mlx5/port.h40
-rw-r--r--include/linux/mlx5/qp.h128
-rw-r--r--include/linux/mlx5/vport.h2
-rw-r--r--include/linux/mm.h46
-rw-r--r--include/linux/mm_types.h2
-rw-r--r--include/linux/mmc/card.h1
-rw-r--r--include/linux/mmc/core.h10
-rw-r--r--include/linux/mmc/dw_mmc.h2
-rw-r--r--include/linux/mmc/host.h5
-rw-r--r--include/linux/mmc/sdio_ids.h1
-rw-r--r--include/linux/mount.h2
-rw-r--r--include/linux/mroute.h2
-rw-r--r--include/linux/mroute6.h2
-rw-r--r--include/linux/msi.h5
-rw-r--r--include/linux/mtd/mtd.h107
-rw-r--r--include/linux/mtd/nand.h234
-rw-r--r--include/linux/nd.h8
-rw-r--r--include/linux/net.h6
-rw-r--r--include/linux/netdevice.h36
-rw-r--r--include/linux/netfilter.h63
-rw-r--r--include/linux/netfilter/nf_conntrack_common.h4
-rw-r--r--include/linux/netfilter/nf_conntrack_proto_gre.h64
-rw-r--r--include/linux/netfilter_ingress.h18
-rw-r--r--include/linux/nfs4.h1
-rw-r--r--include/linux/nfs_fs_sb.h3
-rw-r--r--include/linux/nfs_xdr.h8
-rw-r--r--include/linux/nmi.h31
-rw-r--r--include/linux/of.h144
-rw-r--r--include/linux/of_fdt.h3
-rw-r--r--include/linux/of_gpio.h5
-rw-r--r--include/linux/of_pci.h10
-rw-r--r--include/linux/omap-dma.h19
-rw-r--r--include/linux/omap-gpmc.h4
-rw-r--r--include/linux/oom.h46
-rw-r--r--include/linux/padata.h2
-rw-r--r--include/linux/page_ext.h8
-rw-r--r--include/linux/page_owner.h2
-rw-r--r--include/linux/pagemap.h125
-rw-r--r--include/linux/pci.h36
-rw-r--r--include/linux/percpu-rwsem.h108
-rw-r--r--include/linux/perf/arm_pmu.h15
-rw-r--r--include/linux/perf_event.h33
-rw-r--r--include/linux/phy.h3
-rw-r--r--include/linux/phy/phy.h3
-rw-r--r--include/linux/pid_namespace.h1
-rw-r--r--include/linux/pipe_fs_i.h59
-rw-r--r--include/linux/pkeys.h40
-rw-r--r--include/linux/platform_data/dma-dw.h4
-rw-r--r--include/linux/platform_data/dma-mmp_tdma.h2
-rw-r--r--include/linux/platform_data/dma-s3c24xx.h6
-rw-r--r--include/linux/platform_data/gpio-htc-egpio.h (renamed from include/linux/mfd/htc-egpio.h)0
-rw-r--r--include/linux/platform_data/gpio-lpc32xx.h50
-rw-r--r--include/linux/platform_data/media/camera-pxa.h2
-rw-r--r--include/linux/platform_data/remoteproc-omap.h6
-rw-r--r--include/linux/pm_domain.h74
-rw-r--r--include/linux/posix_acl.h23
-rw-r--r--include/linux/posix_acl_xattr.h31
-rw-r--r--include/linux/power/bq24735-charger.h4
-rw-r--r--include/linux/power/bq27xxx_battery.h1
-rw-r--r--include/linux/power/sbs-battery.h8
-rw-r--r--include/linux/printk.h1
-rw-r--r--include/linux/proc_ns.h2
-rw-r--r--include/linux/property.h2
-rw-r--r--include/linux/pstore.h17
-rw-r--r--include/linux/pstore_ram.h7
-rw-r--r--include/linux/ptp_clock_kernel.h5
-rw-r--r--include/linux/pwm.h5
-rw-r--r--include/linux/pxa2xx_ssp.h20
-rw-r--r--include/linux/qed/common_hsi.h359
-rw-r--r--include/linux/qed/eth_common.h155
-rw-r--r--include/linux/qed/iscsi_common.h28
-rw-r--r--include/linux/qed/qed_chain.h13
-rw-r--r--include/linux/qed/qed_eth_if.h3
-rw-r--r--include/linux/qed/qed_if.h36
-rw-r--r--include/linux/qed/qed_ll2_if.h139
-rw-r--r--include/linux/qed/qed_roce_if.h604
-rw-r--r--include/linux/qed/qede_roce.h88
-rw-r--r--include/linux/qed/rdma_common.h1
-rw-r--r--include/linux/qed/tcp_common.h16
-rw-r--r--include/linux/radix-tree.h14
-rw-r--r--include/linux/raid/pq.h6
-rw-r--r--include/linux/random.h17
-rw-r--r--include/linux/rcu_sync.h1
-rw-r--r--include/linux/rcupdate.h1
-rw-r--r--include/linux/regmap.h8
-rw-r--r--include/linux/regulator/consumer.h3
-rw-r--r--include/linux/regulator/driver.h10
-rw-r--r--include/linux/relay.h26
-rw-r--r--include/linux/remoteproc.h16
-rw-r--r--include/linux/rhashtable.h543
-rw-r--r--include/linux/rpmsg.h248
-rw-r--r--include/linux/rtnetlink.h2
-rw-r--r--include/linux/sbitmap.h373
-rw-r--r--include/linux/sched.h133
-rw-r--r--include/linux/security.h25
-rw-r--r--include/linux/sem.h1
-rw-r--r--include/linux/seq_file.h4
-rw-r--r--include/linux/serial_core.h13
-rw-r--r--include/linux/skbuff.h81
-rw-r--r--include/linux/slab.h8
-rw-r--r--include/linux/smp.h3
-rw-r--r--include/linux/soc/qcom/smd.h29
-rw-r--r--include/linux/spi/spi.h83
-rw-r--r--include/linux/splice.h3
-rw-r--r--include/linux/sunrpc/auth.h1
-rw-r--r--include/linux/sunrpc/clnt.h17
-rw-r--r--include/linux/sunrpc/rpc_rdma.h39
-rw-r--r--include/linux/sunrpc/sched.h4
-rw-r--r--include/linux/sunrpc/svc_rdma.h10
-rw-r--r--include/linux/sunrpc/xdr.h12
-rw-r--r--include/linux/sunrpc/xprt.h12
-rw-r--r--include/linux/sunrpc/xprtmultipath.h2
-rw-r--r--include/linux/sunrpc/xprtrdma.h4
-rw-r--r--include/linux/suspend.h2
-rw-r--r--include/linux/swap.h13
-rw-r--r--include/linux/sync_file.h20
-rw-r--r--include/linux/syscalls.h8
-rw-r--r--include/linux/sysctl.h7
-rw-r--r--include/linux/t10-pi.h20
-rw-r--r--include/linux/tcp.h21
-rw-r--r--include/linux/thermal.h43
-rw-r--r--include/linux/thread_info.h22
-rw-r--r--include/linux/time64.h1
-rw-r--r--include/linux/timekeeping.h2
-rw-r--r--include/linux/torture.h2
-rw-r--r--include/linux/u64_stats_sync.h45
-rw-r--r--include/linux/uio.h19
-rw-r--r--include/linux/ulpi/driver.h8
-rw-r--r--include/linux/ulpi/interface.h9
-rw-r--r--include/linux/usb/composite.h3
-rw-r--r--include/linux/usb/gadget.h30
-rw-r--r--include/linux/usb_usual.h2
-rw-r--r--include/linux/user_namespace.h44
-rw-r--r--include/linux/utsname.h1
-rw-r--r--include/linux/vgaarb.h128
-rw-r--r--include/linux/vme.h2
-rw-r--r--include/linux/wait.h17
-rw-r--r--include/linux/watchdog.h24
-rw-r--r--include/linux/win_minmax.h37
-rw-r--r--include/linux/workqueue.h1
-rw-r--r--include/linux/writeback.h1
-rw-r--r--include/linux/xattr.h7
-rw-r--r--include/media/cec.h2
-rw-r--r--include/media/drv-intf/sh_mobile_ceu.h1
-rw-r--r--include/media/drv-intf/sh_mobile_csi2.h48
-rw-r--r--include/media/i2c/smiapp.h7
-rw-r--r--include/media/media-device.h131
-rw-r--r--include/media/media-devnode.h5
-rw-r--r--include/media/media-entity.h248
-rw-r--r--include/media/rc-core.h2
-rw-r--r--include/media/rc-map.h105
-rw-r--r--include/media/rcar-fcp.h2
-rw-r--r--include/media/soc_camera.h7
-rw-r--r--include/media/v4l2-ctrls.h62
-rw-r--r--include/media/v4l2-dev.h18
-rw-r--r--include/media/v4l2-device.h68
-rw-r--r--include/media/v4l2-dv-timings.h4
-rw-r--r--include/media/v4l2-event.h3
-rw-r--r--include/media/v4l2-flash-led-class.h15
-rw-r--r--include/media/v4l2-ioctl.h510
-rw-r--r--include/media/v4l2-mc.h4
-rw-r--r--include/media/v4l2-mem2mem.h264
-rw-r--r--include/media/v4l2-subdev.h32
-rw-r--r--include/media/videobuf2-core.h380
-rw-r--r--include/media/videobuf2-v4l2.h182
-rw-r--r--include/media/vsp1.h2
-rw-r--r--include/net/addrconf.h3
-rw-r--r--include/net/af_rxrpc.h53
-rw-r--r--include/net/af_unix.h2
-rw-r--r--include/net/bluetooth/bluetooth.h4
-rw-r--r--include/net/bluetooth/hci.h7
-rw-r--r--include/net/bluetooth/hci_core.h11
-rw-r--r--include/net/bluetooth/hci_mon.h4
-rw-r--r--include/net/bluetooth/mgmt.h24
-rw-r--r--include/net/bonding.h12
-rw-r--r--include/net/cfg80211.h343
-rw-r--r--include/net/devlink.h1
-rw-r--r--include/net/dsa.h53
-rw-r--r--include/net/dst_metadata.h52
-rw-r--r--include/net/flow.h3
-rw-r--r--include/net/flow_dissector.h14
-rw-r--r--include/net/fq.h3
-rw-r--r--include/net/fq_impl.h7
-rw-r--r--include/net/gre.h10
-rw-r--r--include/net/ieee80211_radiotap.h21
-rw-r--r--include/net/if_inet6.h1
-rw-r--r--include/net/inet_connection_sock.h4
-rw-r--r--include/net/ip.h23
-rw-r--r--include/net/ip6_route.h3
-rw-r--r--include/net/ip6_tunnel.h1
-rw-r--r--include/net/ip_fib.h52
-rw-r--r--include/net/ip_tunnels.h21
-rw-r--r--include/net/kcm.h37
-rw-r--r--include/net/l3mdev.h131
-rw-r--r--include/net/lwtunnel.h44
-rw-r--r--include/net/mac80211.h108
-rw-r--r--include/net/mpls.h15
-rw-r--r--include/net/ncsi.h5
-rw-r--r--include/net/net_namespace.h1
-rw-r--r--include/net/netfilter/br_netfilter.h6
-rw-r--r--include/net/netfilter/nf_conntrack.h56
-rw-r--r--include/net/netfilter/nf_conntrack_core.h3
-rw-r--r--include/net/netfilter/nf_conntrack_ecache.h17
-rw-r--r--include/net/netfilter/nf_conntrack_l3proto.h4
-rw-r--r--include/net/netfilter/nf_conntrack_l4proto.h8
-rw-r--r--include/net/netfilter/nf_conntrack_synproxy.h14
-rw-r--r--include/net/netfilter/nf_log.h14
-rw-r--r--include/net/netfilter/nf_queue.h69
-rw-r--r--include/net/netfilter/nf_tables.h22
-rw-r--r--include/net/netfilter/nf_tables_bridge.h7
-rw-r--r--include/net/netfilter/nf_tables_core.h3
-rw-r--r--include/net/netfilter/nf_tables_ipv4.h42
-rw-r--r--include/net/netfilter/nf_tables_ipv6.h53
-rw-r--r--include/net/netfilter/nft_meta.h4
-rw-r--r--include/net/netfilter/nft_reject.h4
-rw-r--r--include/net/netns/conntrack.h8
-rw-r--r--include/net/netns/ipv4.h1
-rw-r--r--include/net/netns/netfilter.h2
-rw-r--r--include/net/netns/xfrm.h12
-rw-r--r--include/net/pkt_cls.h24
-rw-r--r--include/net/pkt_sched.h4
-rw-r--r--include/net/pptp.h23
-rw-r--r--include/net/route.h10
-rw-r--r--include/net/sch_generic.h76
-rw-r--r--include/net/sctp/sctp.h10
-rw-r--r--include/net/sctp/sm.h94
-rw-r--r--include/net/sctp/structs.h18
-rw-r--r--include/net/sock.h23
-rw-r--r--include/net/strparser.h142
-rw-r--r--include/net/switchdev.h52
-rw-r--r--include/net/tc_act/tc_ife.h2
-rw-r--r--include/net/tc_act/tc_skbmod.h30
-rw-r--r--include/net/tc_act/tc_tunnel_key.h30
-rw-r--r--include/net/tc_act/tc_vlan.h26
-rw-r--r--include/net/tcp.h63
-rw-r--r--include/net/udp.h1
-rw-r--r--include/net/vxlan.h18
-rw-r--r--include/net/xfrm.h6
-rw-r--r--include/rdma/ib_hdrs.h178
-rw-r--r--include/rdma/ib_verbs.h104
-rw-r--r--include/rdma/rdmavt_qp.h19
-rw-r--r--include/rxrpc/packet.h17
-rw-r--r--include/scsi/scsi_host.h5
-rw-r--r--include/scsi/scsi_transport_sas.h5
-rw-r--r--include/soc/at91/atmel-sfr.h14
-rw-r--r--include/soc/fsl/bman.h129
-rw-r--r--include/soc/fsl/qman.h1074
-rw-r--r--include/soc/rockchip/rockchip_sip.h27
-rw-r--r--include/sound/da7219.h2
-rw-r--r--include/sound/hda_register.h36
-rw-r--r--include/sound/hdaudio.h13
-rw-r--r--include/sound/hdaudio_ext.h12
-rw-r--r--include/sound/l3.h15
-rw-r--r--include/sound/rt5660.h31
-rw-r--r--include/sound/s3c24xx_uda134x.h1
-rw-r--r--include/sound/simple_card_utils.h35
-rw-r--r--include/sound/soc.h19
-rw-r--r--include/sound/tlv.h78
-rw-r--r--include/trace/events/cgroup.h163
-rw-r--r--include/trace/events/compaction.h2
-rw-r--r--include/trace/events/cpuhp.h28
-rw-r--r--include/trace/events/f2fs.h18
-rw-r--r--include/trace/events/intel_ish.h30
-rw-r--r--include/trace/events/irq.h2
-rw-r--r--include/trace/events/mce.h9
-rw-r--r--include/trace/events/rxrpc.h625
-rw-r--r--include/uapi/asm-generic/mman-common.h5
-rw-r--r--include/uapi/asm-generic/unistd.h12
-rw-r--r--include/uapi/drm/amdgpu_drm.h7
-rw-r--r--include/uapi/drm/drm.h1
-rw-r--r--include/uapi/drm/drm_mode.h39
-rw-r--r--include/uapi/drm/i915_drm.h59
-rw-r--r--include/uapi/drm/msm_drm.h22
-rw-r--r--include/uapi/linux/Kbuild4
-rw-r--r--include/uapi/linux/audit.h4
-rw-r--r--include/uapi/linux/auto_dev-ioctl.h221
-rw-r--r--include/uapi/linux/auto_fs.h1
-rw-r--r--include/uapi/linux/batman_adv.h94
-rw-r--r--include/uapi/linux/bpf.h51
-rw-r--r--include/uapi/linux/bpf_perf_event.h18
-rw-r--r--include/uapi/linux/btrfs.h12
-rw-r--r--include/uapi/linux/dvb/video.h3
-rw-r--r--include/uapi/linux/ethtool.h11
-rw-r--r--include/uapi/linux/falloc.h18
-rw-r--r--include/uapi/linux/fs.h5
-rw-r--r--include/uapi/linux/fuse.h10
-rw-r--r--include/uapi/linux/hsi/hsi_char.h17
-rw-r--r--include/uapi/linux/if_bridge.h2
-rw-r--r--include/uapi/linux/if_link.h30
-rw-r--r--include/uapi/linux/if_tunnel.h17
-rw-r--r--include/uapi/linux/inet_diag.h20
-rw-r--r--include/uapi/linux/input.h1
-rw-r--r--include/uapi/linux/ipv6.h1
-rw-r--r--include/uapi/linux/magic.h1
-rw-r--r--include/uapi/linux/media-bus-format.h10
-rw-r--r--include/uapi/linux/media.h1
-rw-r--r--include/uapi/linux/mii.h1
-rw-r--r--include/uapi/linux/ndctl.h30
-rw-r--r--include/uapi/linux/netfilter/nf_log.h12
-rw-r--r--include/uapi/linux/netfilter/nf_tables.h106
-rw-r--r--include/uapi/linux/netfilter/nfnetlink_conntrack.h8
-rw-r--r--include/uapi/linux/netfilter/xt_hashlimit.h23
-rw-r--r--include/uapi/linux/nfs4.h5
-rw-r--r--include/uapi/linux/nl80211.h270
-rw-r--r--include/uapi/linux/nsfs.h13
-rw-r--r--include/uapi/linux/openvswitch.h17
-rw-r--r--include/uapi/linux/pci_regs.h15
-rw-r--r--include/uapi/linux/pkt_cls.h19
-rw-r--r--include/uapi/linux/pkt_sched.h4
-rw-r--r--include/uapi/linux/posix_acl.h39
-rw-r--r--include/uapi/linux/posix_acl_xattr.h38
-rw-r--r--include/uapi/linux/serial_reg.h8
-rw-r--r--include/uapi/linux/snmp.h1
-rw-r--r--include/uapi/linux/sync_file.h13
-rw-r--r--include/uapi/linux/tc_act/tc_ife.h3
-rw-r--r--include/uapi/linux/tc_act/tc_skbmod.h39
-rw-r--r--include/uapi/linux/tc_act/tc_tunnel_key.h41
-rw-r--r--include/uapi/linux/tc_act/tc_vlan.h2
-rw-r--r--include/uapi/linux/tcp.h3
-rw-r--r--include/uapi/linux/tipc_netlink.h4
-rw-r--r--include/uapi/linux/usb/functionfs.h2
-rw-r--r--include/uapi/linux/v4l2-dv-timings.h12
-rw-r--r--include/uapi/linux/videodev2.h38
-rw-r--r--include/uapi/linux/xfrm.h2
-rw-r--r--include/uapi/rdma/Kbuild7
-rw-r--r--include/uapi/rdma/cxgb3-abi.h76
-rw-r--r--include/uapi/rdma/cxgb4-abi.h81
-rw-r--r--include/uapi/rdma/ib_user_verbs.h27
-rw-r--r--include/uapi/rdma/mlx4-abi.h107
-rw-r--r--include/uapi/rdma/mlx5-abi.h249
-rw-r--r--include/uapi/rdma/mthca-abi.h111
-rw-r--r--include/uapi/rdma/nes-abi.h114
-rw-r--r--include/uapi/rdma/ocrdma-abi.h151
-rw-r--r--include/uapi/rdma/qedr-abi.h106
-rw-r--r--include/uapi/scsi/cxlflash_ioctl.h19
-rw-r--r--include/uapi/sound/Kbuild1
-rw-r--r--include/uapi/sound/asoc.h35
-rw-r--r--include/uapi/sound/asound.h3
-rw-r--r--include/uapi/sound/snd_sst_tokens.h214
-rw-r--r--include/uapi/sound/tlv.h69
-rw-r--r--include/video/exynos_mipi_dsim.h358
-rw-r--r--include/video/imx-ipu-image-convert.h207
-rw-r--r--include/video/imx-ipu-v3.h76
-rw-r--r--include/xen/interface/sched.h100
-rw-r--r--include/xen/xen.h3
605 files changed, 22716 insertions, 7115 deletions
diff --git a/include/acpi/acconfig.h b/include/acpi/acconfig.h
index fe2e3ac7b5e3..12c2882bf647 100644
--- a/include/acpi/acconfig.h
+++ b/include/acpi/acconfig.h
@@ -144,6 +144,10 @@
#define ACPI_ADDRESS_RANGE_MAX 2
+/* Maximum number of While() loops before abort */
+
+#define ACPI_MAX_LOOP_COUNT 0xFFFF
+
/******************************************************************************
*
* ACPI Specification constants (Do not change unless the specification changes)
diff --git a/include/acpi/acoutput.h b/include/acpi/acoutput.h
index 34f601e7b88d..48eb4dd99bb1 100644
--- a/include/acpi/acoutput.h
+++ b/include/acpi/acoutput.h
@@ -366,7 +366,7 @@
ACPI_TRACE_ENTRY (name, acpi_ut_trace_u32, u32, value)
#define ACPI_FUNCTION_TRACE_STR(name, string) \
- ACPI_TRACE_ENTRY (name, acpi_ut_trace_str, char *, string)
+ ACPI_TRACE_ENTRY (name, acpi_ut_trace_str, const char *, string)
#define ACPI_FUNCTION_ENTRY() \
acpi_ut_track_stack_ptr()
@@ -425,6 +425,9 @@
#define return_PTR(pointer) \
ACPI_TRACE_EXIT (acpi_ut_ptr_exit, void *, pointer)
+#define return_STR(string) \
+ ACPI_TRACE_EXIT (acpi_ut_str_exit, const char *, string)
+
#define return_VALUE(value) \
ACPI_TRACE_EXIT (acpi_ut_value_exit, u64, value)
@@ -478,6 +481,7 @@
#define return_VOID return
#define return_ACPI_STATUS(s) return(s)
#define return_PTR(s) return(s)
+#define return_STR(s) return(s)
#define return_VALUE(s) return(s)
#define return_UINT8(s) return(s)
#define return_UINT32(s) return(s)
diff --git a/include/acpi/acpiosxf.h b/include/acpi/acpiosxf.h
index 562603d7aabe..f3414c83abb1 100644
--- a/include/acpi/acpiosxf.h
+++ b/include/acpi/acpiosxf.h
@@ -371,6 +371,12 @@ acpi_status acpi_os_wait_command_ready(void);
acpi_status acpi_os_notify_command_complete(void);
#endif
+#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_trace_point
+void
+acpi_os_trace_point(acpi_trace_event_type type,
+ u8 begin, u8 *aml, char *pathname);
+#endif
+
/*
* Obtain ACPI table(s)
*/
@@ -416,41 +422,4 @@ char *acpi_os_get_next_filename(void *dir_handle);
void acpi_os_close_directory(void *dir_handle);
#endif
-/*
- * File I/O and related support
- */
-#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_open_file
-ACPI_FILE acpi_os_open_file(const char *path, u8 modes);
-#endif
-
-#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_close_file
-void acpi_os_close_file(ACPI_FILE file);
-#endif
-
-#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_read_file
-int
-acpi_os_read_file(ACPI_FILE file,
- void *buffer, acpi_size size, acpi_size count);
-#endif
-
-#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_write_file
-int
-acpi_os_write_file(ACPI_FILE file,
- void *buffer, acpi_size size, acpi_size count);
-#endif
-
-#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_get_file_offset
-long acpi_os_get_file_offset(ACPI_FILE file);
-#endif
-
-#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_set_file_offset
-acpi_status acpi_os_set_file_offset(ACPI_FILE file, long offset, u8 from);
-#endif
-
-#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_trace_point
-void
-acpi_os_trace_point(acpi_trace_event_type type,
- u8 begin, u8 *aml, char *pathname);
-#endif
-
#endif /* __ACPIOSXF_H__ */
diff --git a/include/acpi/acpixf.h b/include/acpi/acpixf.h
index 1ff3a76c265d..c7b3a132dbe7 100644
--- a/include/acpi/acpixf.h
+++ b/include/acpi/acpixf.h
@@ -46,7 +46,7 @@
/* Current ACPICA subsystem version in YYYYMMDD format */
-#define ACPI_CA_VERSION 0x20160422
+#define ACPI_CA_VERSION 0x20160831
#include <acpi/acconfig.h>
#include <acpi/actypes.h>
@@ -195,6 +195,13 @@ ACPI_INIT_GLOBAL(u8, acpi_gbl_do_not_use_xsdt, FALSE);
ACPI_INIT_GLOBAL(u8, acpi_gbl_group_module_level_code, TRUE);
/*
+ * Optionally support module level code by parsing the entire table as
+ * a term_list. Default is FALSE, do not execute entire table until some
+ * lock order issues are fixed.
+ */
+ACPI_INIT_GLOBAL(u8, acpi_gbl_parse_table_as_term_list, FALSE);
+
+/*
* Optionally use 32-bit FADT addresses if and when there is a conflict
* (address mismatch) between the 32-bit and 64-bit versions of the
* address. Although ACPICA adheres to the ACPI specification which
@@ -416,18 +423,19 @@ ACPI_GLOBAL(u8, acpi_gbl_system_awake_and_running);
/*
* Initialization
*/
-ACPI_EXTERNAL_RETURN_STATUS(acpi_status __init
+ACPI_EXTERNAL_RETURN_STATUS(acpi_status ACPI_INIT_FUNCTION
acpi_initialize_tables(struct acpi_table_desc
*initial_storage,
u32 initial_table_count,
u8 allow_resize))
-ACPI_EXTERNAL_RETURN_STATUS(acpi_status __init acpi_initialize_subsystem(void))
-
-ACPI_EXTERNAL_RETURN_STATUS(acpi_status __init acpi_enable_subsystem(u32 flags))
-
-ACPI_EXTERNAL_RETURN_STATUS(acpi_status __init
- acpi_initialize_objects(u32 flags))
-ACPI_EXTERNAL_RETURN_STATUS(acpi_status __init acpi_terminate(void))
+ACPI_EXTERNAL_RETURN_STATUS(acpi_status ACPI_INIT_FUNCTION
+ acpi_initialize_subsystem(void))
+ACPI_EXTERNAL_RETURN_STATUS(acpi_status ACPI_INIT_FUNCTION
+ acpi_enable_subsystem(u32 flags))
+ACPI_EXTERNAL_RETURN_STATUS(acpi_status ACPI_INIT_FUNCTION
+ acpi_initialize_objects(u32 flags))
+ACPI_EXTERNAL_RETURN_STATUS(acpi_status ACPI_INIT_FUNCTION
+ acpi_terminate(void))
/*
* Miscellaneous global interfaces
@@ -467,7 +475,7 @@ ACPI_EXTERNAL_RETURN_STATUS(acpi_status
/*
* ACPI table load/unload interfaces
*/
-ACPI_EXTERNAL_RETURN_STATUS(acpi_status __init
+ACPI_EXTERNAL_RETURN_STATUS(acpi_status ACPI_INIT_FUNCTION
acpi_install_table(acpi_physical_address address,
u8 physical))
@@ -476,14 +484,17 @@ ACPI_EXTERNAL_RETURN_STATUS(acpi_status
ACPI_EXTERNAL_RETURN_STATUS(acpi_status
acpi_unload_parent_table(acpi_handle object))
-ACPI_EXTERNAL_RETURN_STATUS(acpi_status __init acpi_load_tables(void))
+
+ACPI_EXTERNAL_RETURN_STATUS(acpi_status ACPI_INIT_FUNCTION
+ acpi_load_tables(void))
/*
* ACPI table manipulation interfaces
*/
-ACPI_EXTERNAL_RETURN_STATUS(acpi_status __init acpi_reallocate_root_table(void))
+ACPI_EXTERNAL_RETURN_STATUS(acpi_status ACPI_INIT_FUNCTION
+ acpi_reallocate_root_table(void))
-ACPI_EXTERNAL_RETURN_STATUS(acpi_status __init
+ACPI_EXTERNAL_RETURN_STATUS(acpi_status ACPI_INIT_FUNCTION
acpi_find_root_pointer(acpi_physical_address
*rsdp_address))
ACPI_EXTERNAL_RETURN_STATUS(acpi_status
@@ -732,6 +743,10 @@ ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
u32 gpe_number))
ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
+ acpi_mask_gpe(acpi_handle gpe_device,
+ u32 gpe_number, u8 is_masked))
+
+ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
acpi_mark_gpe_for_wake(acpi_handle gpe_device,
u32 gpe_number))
@@ -935,9 +950,6 @@ ACPI_DBG_DEPENDENT_RETURN_VOID(void
acpi_trace_point(acpi_trace_event_type type,
u8 begin,
u8 *aml, char *pathname))
-ACPI_APP_DEPENDENT_RETURN_VOID(ACPI_PRINTF_LIKE(1)
- void ACPI_INTERNAL_VAR_XFACE
- acpi_log_error(const char *format, ...))
acpi_status acpi_initialize_debugger(void);
diff --git a/include/acpi/actbl.h b/include/acpi/actbl.h
index c19700e2a2fe..1b949e08015c 100644
--- a/include/acpi/actbl.h
+++ b/include/acpi/actbl.h
@@ -230,62 +230,72 @@ struct acpi_table_facs {
/* Fields common to all versions of the FADT */
struct acpi_table_fadt {
- struct acpi_table_header header; /* Common ACPI table header */
- u32 facs; /* 32-bit physical address of FACS */
- u32 dsdt; /* 32-bit physical address of DSDT */
- u8 model; /* System Interrupt Model (ACPI 1.0) - not used in ACPI 2.0+ */
- u8 preferred_profile; /* Conveys preferred power management profile to OSPM. */
- u16 sci_interrupt; /* System vector of SCI interrupt */
- u32 smi_command; /* 32-bit Port address of SMI command port */
- u8 acpi_enable; /* Value to write to SMI_CMD to enable ACPI */
- u8 acpi_disable; /* Value to write to SMI_CMD to disable ACPI */
- u8 s4_bios_request; /* Value to write to SMI_CMD to enter S4BIOS state */
- u8 pstate_control; /* Processor performance state control */
- u32 pm1a_event_block; /* 32-bit port address of Power Mgt 1a Event Reg Blk */
- u32 pm1b_event_block; /* 32-bit port address of Power Mgt 1b Event Reg Blk */
- u32 pm1a_control_block; /* 32-bit port address of Power Mgt 1a Control Reg Blk */
- u32 pm1b_control_block; /* 32-bit port address of Power Mgt 1b Control Reg Blk */
- u32 pm2_control_block; /* 32-bit port address of Power Mgt 2 Control Reg Blk */
- u32 pm_timer_block; /* 32-bit port address of Power Mgt Timer Ctrl Reg Blk */
- u32 gpe0_block; /* 32-bit port address of General Purpose Event 0 Reg Blk */
- u32 gpe1_block; /* 32-bit port address of General Purpose Event 1 Reg Blk */
- u8 pm1_event_length; /* Byte Length of ports at pm1x_event_block */
- u8 pm1_control_length; /* Byte Length of ports at pm1x_control_block */
- u8 pm2_control_length; /* Byte Length of ports at pm2_control_block */
- u8 pm_timer_length; /* Byte Length of ports at pm_timer_block */
- u8 gpe0_block_length; /* Byte Length of ports at gpe0_block */
- u8 gpe1_block_length; /* Byte Length of ports at gpe1_block */
- u8 gpe1_base; /* Offset in GPE number space where GPE1 events start */
- u8 cst_control; /* Support for the _CST object and C-States change notification */
- u16 c2_latency; /* Worst case HW latency to enter/exit C2 state */
- u16 c3_latency; /* Worst case HW latency to enter/exit C3 state */
- u16 flush_size; /* Processor memory cache line width, in bytes */
- u16 flush_stride; /* Number of flush strides that need to be read */
- u8 duty_offset; /* Processor duty cycle index in processor P_CNT reg */
- u8 duty_width; /* Processor duty cycle value bit width in P_CNT register */
- u8 day_alarm; /* Index to day-of-month alarm in RTC CMOS RAM */
- u8 month_alarm; /* Index to month-of-year alarm in RTC CMOS RAM */
- u8 century; /* Index to century in RTC CMOS RAM */
- u16 boot_flags; /* IA-PC Boot Architecture Flags (see below for individual flags) */
- u8 reserved; /* Reserved, must be zero */
- u32 flags; /* Miscellaneous flag bits (see below for individual flags) */
- struct acpi_generic_address reset_register; /* 64-bit address of the Reset register */
- u8 reset_value; /* Value to write to the reset_register port to reset the system */
- u16 arm_boot_flags; /* ARM-Specific Boot Flags (see below for individual flags) (ACPI 5.1) */
- u8 minor_revision; /* FADT Minor Revision (ACPI 5.1) */
- u64 Xfacs; /* 64-bit physical address of FACS */
- u64 Xdsdt; /* 64-bit physical address of DSDT */
- struct acpi_generic_address xpm1a_event_block; /* 64-bit Extended Power Mgt 1a Event Reg Blk address */
- struct acpi_generic_address xpm1b_event_block; /* 64-bit Extended Power Mgt 1b Event Reg Blk address */
- struct acpi_generic_address xpm1a_control_block; /* 64-bit Extended Power Mgt 1a Control Reg Blk address */
- struct acpi_generic_address xpm1b_control_block; /* 64-bit Extended Power Mgt 1b Control Reg Blk address */
- struct acpi_generic_address xpm2_control_block; /* 64-bit Extended Power Mgt 2 Control Reg Blk address */
- struct acpi_generic_address xpm_timer_block; /* 64-bit Extended Power Mgt Timer Ctrl Reg Blk address */
- struct acpi_generic_address xgpe0_block; /* 64-bit Extended General Purpose Event 0 Reg Blk address */
- struct acpi_generic_address xgpe1_block; /* 64-bit Extended General Purpose Event 1 Reg Blk address */
- struct acpi_generic_address sleep_control; /* 64-bit Sleep Control register (ACPI 5.0) */
- struct acpi_generic_address sleep_status; /* 64-bit Sleep Status register (ACPI 5.0) */
- u64 hypervisor_id; /* Hypervisor Vendor ID (ACPI 6.0) */
+ struct acpi_table_header header; /* [V1] Common ACPI table header */
+ u32 facs; /* [V1] 32-bit physical address of FACS */
+ u32 dsdt; /* [V1] 32-bit physical address of DSDT */
+ u8 model; /* [V1] System Interrupt Model (ACPI 1.0) - not used in ACPI 2.0+ */
+ u8 preferred_profile; /* [V1] Conveys preferred power management profile to OSPM. */
+ u16 sci_interrupt; /* [V1] System vector of SCI interrupt */
+ u32 smi_command; /* [V1] 32-bit Port address of SMI command port */
+ u8 acpi_enable; /* [V1] Value to write to SMI_CMD to enable ACPI */
+ u8 acpi_disable; /* [V1] Value to write to SMI_CMD to disable ACPI */
+ u8 s4_bios_request; /* [V1] Value to write to SMI_CMD to enter S4BIOS state */
+ u8 pstate_control; /* [V1] Processor performance state control */
+ u32 pm1a_event_block; /* [V1] 32-bit port address of Power Mgt 1a Event Reg Blk */
+ u32 pm1b_event_block; /* [V1] 32-bit port address of Power Mgt 1b Event Reg Blk */
+ u32 pm1a_control_block; /* [V1] 32-bit port address of Power Mgt 1a Control Reg Blk */
+ u32 pm1b_control_block; /* [V1] 32-bit port address of Power Mgt 1b Control Reg Blk */
+ u32 pm2_control_block; /* [V1] 32-bit port address of Power Mgt 2 Control Reg Blk */
+ u32 pm_timer_block; /* [V1] 32-bit port address of Power Mgt Timer Ctrl Reg Blk */
+ u32 gpe0_block; /* [V1] 32-bit port address of General Purpose Event 0 Reg Blk */
+ u32 gpe1_block; /* [V1] 32-bit port address of General Purpose Event 1 Reg Blk */
+ u8 pm1_event_length; /* [V1] Byte Length of ports at pm1x_event_block */
+ u8 pm1_control_length; /* [V1] Byte Length of ports at pm1x_control_block */
+ u8 pm2_control_length; /* [V1] Byte Length of ports at pm2_control_block */
+ u8 pm_timer_length; /* [V1] Byte Length of ports at pm_timer_block */
+ u8 gpe0_block_length; /* [V1] Byte Length of ports at gpe0_block */
+ u8 gpe1_block_length; /* [V1] Byte Length of ports at gpe1_block */
+ u8 gpe1_base; /* [V1] Offset in GPE number space where GPE1 events start */
+ u8 cst_control; /* [V1] Support for the _CST object and C-States change notification */
+ u16 c2_latency; /* [V1] Worst case HW latency to enter/exit C2 state */
+ u16 c3_latency; /* [V1] Worst case HW latency to enter/exit C3 state */
+ u16 flush_size; /* [V1] Processor memory cache line width, in bytes */
+ u16 flush_stride; /* [V1] Number of flush strides that need to be read */
+ u8 duty_offset; /* [V1] Processor duty cycle index in processor P_CNT reg */
+ u8 duty_width; /* [V1] Processor duty cycle value bit width in P_CNT register */
+ u8 day_alarm; /* [V1] Index to day-of-month alarm in RTC CMOS RAM */
+ u8 month_alarm; /* [V1] Index to month-of-year alarm in RTC CMOS RAM */
+ u8 century; /* [V1] Index to century in RTC CMOS RAM */
+ u16 boot_flags; /* [V3] IA-PC Boot Architecture Flags (see below for individual flags) */
+ u8 reserved; /* [V1] Reserved, must be zero */
+ u32 flags; /* [V1] Miscellaneous flag bits (see below for individual flags) */
+ /* End of Version 1 FADT fields (ACPI 1.0) */
+
+ struct acpi_generic_address reset_register; /* [V3] 64-bit address of the Reset register */
+ u8 reset_value; /* [V3] Value to write to the reset_register port to reset the system */
+ u16 arm_boot_flags; /* [V5] ARM-Specific Boot Flags (see below for individual flags) (ACPI 5.1) */
+ u8 minor_revision; /* [V5] FADT Minor Revision (ACPI 5.1) */
+ u64 Xfacs; /* [V3] 64-bit physical address of FACS */
+ u64 Xdsdt; /* [V3] 64-bit physical address of DSDT */
+ struct acpi_generic_address xpm1a_event_block; /* [V3] 64-bit Extended Power Mgt 1a Event Reg Blk address */
+ struct acpi_generic_address xpm1b_event_block; /* [V3] 64-bit Extended Power Mgt 1b Event Reg Blk address */
+ struct acpi_generic_address xpm1a_control_block; /* [V3] 64-bit Extended Power Mgt 1a Control Reg Blk address */
+ struct acpi_generic_address xpm1b_control_block; /* [V3] 64-bit Extended Power Mgt 1b Control Reg Blk address */
+ struct acpi_generic_address xpm2_control_block; /* [V3] 64-bit Extended Power Mgt 2 Control Reg Blk address */
+ struct acpi_generic_address xpm_timer_block; /* [V3] 64-bit Extended Power Mgt Timer Ctrl Reg Blk address */
+ struct acpi_generic_address xgpe0_block; /* [V3] 64-bit Extended General Purpose Event 0 Reg Blk address */
+ struct acpi_generic_address xgpe1_block; /* [V3] 64-bit Extended General Purpose Event 1 Reg Blk address */
+ /* End of Version 3 FADT fields (ACPI 2.0) */
+
+ struct acpi_generic_address sleep_control; /* [V4] 64-bit Sleep Control register (ACPI 5.0) */
+ /* End of Version 4 FADT fields (ACPI 3.0 and ACPI 4.0) (Field was originally reserved in ACPI 3.0) */
+
+ struct acpi_generic_address sleep_status; /* [V5] 64-bit Sleep Status register (ACPI 5.0) */
+ /* End of Version 5 FADT fields (ACPI 5.0) */
+
+ u64 hypervisor_id; /* [V6] Hypervisor Vendor ID (ACPI 6.0) */
+ /* End of Version 6 FADT fields (ACPI 6.0) */
+
};
/* Masks for FADT IA-PC Boot Architecture Flags (boot_flags) [Vx]=Introduced in this FADT revision */
@@ -301,8 +311,8 @@ struct acpi_table_fadt {
/* Masks for FADT ARM Boot Architecture Flags (arm_boot_flags) ACPI 5.1 */
-#define ACPI_FADT_PSCI_COMPLIANT (1) /* 00: [V5+] PSCI 0.2+ is implemented */
-#define ACPI_FADT_PSCI_USE_HVC (1<<1) /* 01: [V5+] HVC must be used instead of SMC as the PSCI conduit */
+#define ACPI_FADT_PSCI_COMPLIANT (1) /* 00: [V5] PSCI 0.2+ is implemented */
+#define ACPI_FADT_PSCI_USE_HVC (1<<1) /* 01: [V5] HVC must be used instead of SMC as the PSCI conduit */
/* Masks for FADT flags */
@@ -399,20 +409,34 @@ struct acpi_table_desc {
* match the expected length. In other words, the length of the
* FADT is the bottom line as to what the version really is.
*
- * For reference, the values below are as follows:
- * FADT V1 size: 0x074
- * FADT V2 size: 0x084
- * FADT V3 size: 0x0F4
- * FADT V4 size: 0x0F4
- * FADT V5 size: 0x10C
- * FADT V6 size: 0x114
+ * NOTE: There is no officialy released V2 of the FADT. This
+ * version was used only for prototyping and testing during the
+ * 32-bit to 64-bit transition. V3 was the first official 64-bit
+ * version of the FADT.
+ *
+ * Update this list of defines when a new version of the FADT is
+ * added to the ACPI specification. Note that the FADT version is
+ * only incremented when new fields are appended to the existing
+ * version. Therefore, the FADT version is competely independent
+ * from the version of the ACPI specification where it is
+ * defined.
+ *
+ * For reference, the various FADT lengths are as follows:
+ * FADT V1 size: 0x074 ACPI 1.0
+ * FADT V3 size: 0x0F4 ACPI 2.0
+ * FADT V4 size: 0x100 ACPI 3.0 and ACPI 4.0
+ * FADT V5 size: 0x10C ACPI 5.0
+ * FADT V6 size: 0x114 ACPI 6.0
*/
-#define ACPI_FADT_V1_SIZE (u32) (ACPI_FADT_OFFSET (flags) + 4)
-#define ACPI_FADT_V2_SIZE (u32) (ACPI_FADT_OFFSET (minor_revision) + 1)
-#define ACPI_FADT_V3_SIZE (u32) (ACPI_FADT_OFFSET (sleep_control))
-#define ACPI_FADT_V5_SIZE (u32) (ACPI_FADT_OFFSET (hypervisor_id))
-#define ACPI_FADT_V6_SIZE (u32) (sizeof (struct acpi_table_fadt))
+#define ACPI_FADT_V1_SIZE (u32) (ACPI_FADT_OFFSET (flags) + 4) /* ACPI 1.0 */
+#define ACPI_FADT_V3_SIZE (u32) (ACPI_FADT_OFFSET (sleep_control)) /* ACPI 2.0 */
+#define ACPI_FADT_V4_SIZE (u32) (ACPI_FADT_OFFSET (sleep_status)) /* ACPI 3.0 and ACPI 4.0 */
+#define ACPI_FADT_V5_SIZE (u32) (ACPI_FADT_OFFSET (hypervisor_id)) /* ACPI 5.0 */
+#define ACPI_FADT_V6_SIZE (u32) (sizeof (struct acpi_table_fadt)) /* ACPI 6.0 */
+
+/* Update these when new FADT versions are added */
+#define ACPI_FADT_MAX_VERSION 6
#define ACPI_FADT_CONFORMANCE "ACPI 6.1 (FADT version 6)"
#endif /* __ACTBL_H__ */
diff --git a/include/acpi/actypes.h b/include/acpi/actypes.h
index cb389efd321c..1d798abae710 100644
--- a/include/acpi/actypes.h
+++ b/include/acpi/actypes.h
@@ -732,16 +732,17 @@ typedef u32 acpi_event_type;
* The encoding of acpi_event_status is illustrated below.
* Note that a set bit (1) indicates the property is TRUE
* (e.g. if bit 0 is set then the event is enabled).
- * +-------------+-+-+-+-+-+
- * | Bits 31:5 |4|3|2|1|0|
- * +-------------+-+-+-+-+-+
- * | | | | | |
- * | | | | | +- Enabled?
- * | | | | +--- Enabled for wake?
- * | | | +----- Status bit set?
- * | | +------- Enable bit set?
- * | +--------- Has a handler?
- * +--------------- <Reserved>
+ * +-------------+-+-+-+-+-+-+
+ * | Bits 31:6 |5|4|3|2|1|0|
+ * +-------------+-+-+-+-+-+-+
+ * | | | | | | |
+ * | | | | | | +- Enabled?
+ * | | | | | +--- Enabled for wake?
+ * | | | | +----- Status bit set?
+ * | | | +------- Enable bit set?
+ * | | +--------- Has a handler?
+ * | +----------- Masked?
+ * +----------------- <Reserved>
*/
typedef u32 acpi_event_status;
@@ -751,6 +752,7 @@ typedef u32 acpi_event_status;
#define ACPI_EVENT_FLAG_STATUS_SET (acpi_event_status) 0x04
#define ACPI_EVENT_FLAG_ENABLE_SET (acpi_event_status) 0x08
#define ACPI_EVENT_FLAG_HAS_HANDLER (acpi_event_status) 0x10
+#define ACPI_EVENT_FLAG_MASKED (acpi_event_status) 0x20
#define ACPI_EVENT_FLAG_SET ACPI_EVENT_FLAG_STATUS_SET
/* Actions for acpi_set_gpe, acpi_gpe_wakeup, acpi_hw_low_set_gpe */
@@ -761,14 +763,15 @@ typedef u32 acpi_event_status;
/*
* GPE info flags - Per GPE
- * +-------+-+-+---+
- * | 7:5 |4|3|2:0|
- * +-------+-+-+---+
- * | | | |
- * | | | +-- Type of dispatch:to method, handler, notify, or none
- * | | +----- Interrupt type: edge or level triggered
- * | +------- Is a Wake GPE
- * +------------ <Reserved>
+ * +---+-+-+-+---+
+ * |7:6|5|4|3|2:0|
+ * +---+-+-+-+---+
+ * | | | | |
+ * | | | | +-- Type of dispatch:to method, handler, notify, or none
+ * | | | +----- Interrupt type: edge or level triggered
+ * | | +------- Is a Wake GPE
+ * | +--------- Is GPE masked by the software GPE masking machanism
+ * +------------ <Reserved>
*/
#define ACPI_GPE_DISPATCH_NONE (u8) 0x00
#define ACPI_GPE_DISPATCH_METHOD (u8) 0x01
@@ -1031,12 +1034,6 @@ struct acpi_statistics {
u32 method_count;
};
-/* Table Event Types */
-
-#define ACPI_TABLE_EVENT_LOAD 0x0
-#define ACPI_TABLE_EVENT_UNLOAD 0x1
-#define ACPI_NUM_TABLE_EVENTS 2
-
/*
* Types specific to the OS service interfaces
*/
@@ -1088,9 +1085,13 @@ acpi_status (*acpi_exception_handler) (acpi_status aml_status,
typedef
acpi_status (*acpi_table_handler) (u32 event, void *table, void *context);
-#define ACPI_TABLE_LOAD 0x0
-#define ACPI_TABLE_UNLOAD 0x1
-#define ACPI_NUM_TABLE_EVENTS 2
+/* Table Event Types */
+
+#define ACPI_TABLE_EVENT_LOAD 0x0
+#define ACPI_TABLE_EVENT_UNLOAD 0x1
+#define ACPI_TABLE_EVENT_INSTALL 0x2
+#define ACPI_TABLE_EVENT_UNINSTALL 0x3
+#define ACPI_NUM_TABLE_EVENTS 4
/* Address Spaces (For Operation Regions) */
@@ -1285,15 +1286,6 @@ typedef enum {
#define ACPI_OSI_WIN_8 0x0C
#define ACPI_OSI_WIN_10 0x0D
-/* Definitions of file IO */
-
-#define ACPI_FILE_READING 0x01
-#define ACPI_FILE_WRITING 0x02
-#define ACPI_FILE_BINARY 0x04
-
-#define ACPI_FILE_BEGIN 0x01
-#define ACPI_FILE_END 0x02
-
/* Definitions of getopt */
#define ACPI_OPT_END -1
diff --git a/include/acpi/cppc_acpi.h b/include/acpi/cppc_acpi.h
index 284965cbc9af..427a7c3e6c75 100644
--- a/include/acpi/cppc_acpi.h
+++ b/include/acpi/cppc_acpi.h
@@ -24,7 +24,9 @@
#define CPPC_NUM_ENT 21
#define CPPC_REV 2
-#define PCC_CMD_COMPLETE 1
+#define PCC_CMD_COMPLETE_MASK (1 << 0)
+#define PCC_ERROR_MASK (1 << 2)
+
#define MAX_CPC_REG_ENT 19
/* CPPC specific PCC commands. */
@@ -49,6 +51,7 @@ struct cpc_reg {
*/
struct cpc_register_resource {
acpi_object_type type;
+ u64 __iomem *sys_mem_vaddr;
union {
struct cpc_reg reg;
u64 int_value;
@@ -60,8 +63,11 @@ struct cpc_desc {
int num_entries;
int version;
int cpu_id;
+ int write_cmd_status;
+ int write_cmd_id;
struct cpc_register_resource cpc_regs[MAX_CPC_REG_ENT];
struct acpi_psd_package domain_info;
+ struct kobject kobj;
};
/* These are indexes into the per-cpu cpc_regs[]. Order is important. */
@@ -96,7 +102,6 @@ enum cppc_regs {
struct cppc_perf_caps {
u32 highest_perf;
u32 nominal_perf;
- u32 reference_perf;
u32 lowest_perf;
};
@@ -108,13 +113,13 @@ struct cppc_perf_ctrls {
struct cppc_perf_fb_ctrs {
u64 reference;
- u64 prev_reference;
u64 delivered;
- u64 prev_delivered;
+ u64 reference_perf;
+ u64 ctr_wrap_time;
};
/* Per CPU container for runtime CPPC management. */
-struct cpudata {
+struct cppc_cpudata {
int cpu;
struct cppc_perf_caps perf_caps;
struct cppc_perf_ctrls perf_ctrls;
@@ -127,6 +132,7 @@ struct cpudata {
extern int cppc_get_perf_ctrs(int cpu, struct cppc_perf_fb_ctrs *perf_fb_ctrs);
extern int cppc_set_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls);
extern int cppc_get_perf_caps(int cpu, struct cppc_perf_caps *caps);
-extern int acpi_get_psd_map(struct cpudata **);
+extern int acpi_get_psd_map(struct cppc_cpudata **);
+extern unsigned int cppc_get_transition_latency(int cpu);
#endif /* _CPPC_ACPI_H*/
diff --git a/include/acpi/platform/acenv.h b/include/acpi/platform/acenv.h
index 86b5a8447606..34cce729109c 100644
--- a/include/acpi/platform/acenv.h
+++ b/include/acpi/platform/acenv.h
@@ -78,6 +78,7 @@
(defined ACPI_EXAMPLE_APP)
#define ACPI_APPLICATION
#define ACPI_SINGLE_THREADED
+#define USE_NATIVE_ALLOCATE_ZEROED
#endif
/* iASL configuration */
@@ -124,7 +125,6 @@
#ifdef ACPI_DUMP_APP
#define ACPI_USE_NATIVE_MEMORY_MAPPING
-#define USE_NATIVE_ALLOCATE_ZEROED
#endif
/* acpi_names/Example configuration. Hardware disabled */
@@ -149,7 +149,6 @@
/* Common for all ACPICA applications */
#ifdef ACPI_APPLICATION
-#define ACPI_USE_SYSTEM_CLIBRARY
#define ACPI_USE_LOCAL_CACHE
#endif
@@ -167,10 +166,21 @@
/******************************************************************************
*
* Host configuration files. The compiler configuration files are included
- * by the host files.
+ * first.
*
*****************************************************************************/
+#if defined(__GNUC__) && !defined(__INTEL_COMPILER)
+#include <acpi/platform/acgcc.h>
+
+#elif defined(_MSC_VER)
+#include "acmsvc.h"
+
+#elif defined(__INTEL_COMPILER)
+#include "acintel.h"
+
+#endif
+
#if defined(_LINUX) || defined(__linux__)
#include <acpi/platform/aclinux.h>
@@ -210,18 +220,20 @@
#elif defined(__OS2__)
#include "acos2.h"
-#elif defined(_AED_EFI)
-#include "acefi.h"
-
-#elif defined(_GNU_EFI)
-#include "acefi.h"
-
#elif defined(__HAIKU__)
#include "achaiku.h"
#elif defined(__QNX__)
#include "acqnx.h"
+/*
+ * EFI applications can be built with -nostdlib, in this case, it must be
+ * included after including all other host environmental definitions, in
+ * order to override the definitions.
+ */
+#elif defined(_AED_EFI) || defined(_GNU_EFI) || defined(_EDK2_EFI)
+#include "acefi.h"
+
#else
/* Unknown environment */
@@ -326,7 +338,8 @@
* ACPI_USE_SYSTEM_CLIBRARY - Define this if linking to an actual C library.
* Otherwise, local versions of string/memory functions will be used.
* ACPI_USE_STANDARD_HEADERS - Define this if linking to a C library and
- * the standard header files may be used.
+ * the standard header files may be used. Defining this implies that
+ * ACPI_USE_SYSTEM_CLIBRARY has been defined.
*
* The ACPICA subsystem only uses low level C library functions that do not
* call operating system services and may therefore be inlined in the code.
@@ -334,7 +347,6 @@
* It may be necessary to tailor these include files to the target
* generation environment.
*/
-#ifdef ACPI_USE_SYSTEM_CLIBRARY
/* Use the standard C library headers. We want to keep these to a minimum. */
@@ -342,57 +354,20 @@
/* Use the standard headers from the standard locations */
-#include <stdarg.h>
#include <stdlib.h>
#include <string.h>
#include <ctype.h>
+#ifdef ACPI_APPLICATION
+#include <stdio.h>
+#include <fcntl.h>
+#include <errno.h>
+#include <time.h>
+#include <signal.h>
+#endif
#endif /* ACPI_USE_STANDARD_HEADERS */
-/* We will be linking to the standard Clib functions */
-
-#else
-
-/******************************************************************************
- *
- * Not using native C library, use local implementations
- *
- *****************************************************************************/
-
-/*
- * Use local definitions of C library macros and functions. These function
- * implementations may not be as efficient as an inline or assembly code
- * implementation provided by a native C library, but they are functionally
- * equivalent.
- */
-#ifndef va_arg
-
-#ifndef _VALIST
-#define _VALIST
-typedef char *va_list;
-#endif /* _VALIST */
-
-/* Storage alignment properties */
-
-#define _AUPBND (sizeof (acpi_native_int) - 1)
-#define _ADNBND (sizeof (acpi_native_int) - 1)
-
-/* Variable argument list macro definitions */
-
-#define _bnd(X, bnd) (((sizeof (X)) + (bnd)) & (~(bnd)))
-#define va_arg(ap, T) (*(T *)(((ap) += (_bnd (T, _AUPBND))) - (_bnd (T,_ADNBND))))
-#define va_end(ap) (ap = (va_list) NULL)
-#define va_start(ap, A) (void) ((ap) = (((char *) &(A)) + (_bnd (A,_AUPBND))))
-
-#endif /* va_arg */
-
-/* Use the local (ACPICA) definitions of the clib functions */
-
-#endif /* ACPI_USE_SYSTEM_CLIBRARY */
-
-#ifndef ACPI_FILE
#ifdef ACPI_APPLICATION
-#include <stdio.h>
#define ACPI_FILE FILE *
#define ACPI_FILE_OUT stdout
#define ACPI_FILE_ERR stderr
@@ -401,6 +376,9 @@ typedef char *va_list;
#define ACPI_FILE_OUT NULL
#define ACPI_FILE_ERR NULL
#endif /* ACPI_APPLICATION */
-#endif /* ACPI_FILE */
+
+#ifndef ACPI_INIT_FUNCTION
+#define ACPI_INIT_FUNCTION
+#endif
#endif /* __ACENV_H__ */
diff --git a/include/acpi/platform/acenvex.h b/include/acpi/platform/acenvex.h
index 4f15c1d10b61..b3171b9d6974 100644
--- a/include/acpi/platform/acenvex.h
+++ b/include/acpi/platform/acenvex.h
@@ -56,17 +56,24 @@
#if defined(_LINUX) || defined(__linux__)
#include <acpi/platform/aclinuxex.h>
-#elif defined(WIN32)
-#include "acwinex.h"
+#elif defined(__DragonFly__)
+#include "acdragonflyex.h"
-#elif defined(_AED_EFI)
+/*
+ * EFI applications can be built with -nostdlib, in this case, it must be
+ * included after including all other host environmental definitions, in
+ * order to override the definitions.
+ */
+#elif defined(_AED_EFI) || defined(_GNU_EFI) || defined(_EDK2_EFI)
#include "acefiex.h"
-#elif defined(_GNU_EFI)
-#include "acefiex.h"
+#endif
-#elif defined(__DragonFly__)
-#include "acdragonflyex.h"
+#if defined(__GNUC__) && !defined(__INTEL_COMPILER)
+#include "acgccex.h"
+
+#elif defined(_MSC_VER)
+#include "acmsvcex.h"
#endif
diff --git a/include/acpi/platform/acgcc.h b/include/acpi/platform/acgcc.h
index c5a216c976fa..8f66aaabadf7 100644
--- a/include/acpi/platform/acgcc.h
+++ b/include/acpi/platform/acgcc.h
@@ -44,6 +44,12 @@
#ifndef __ACGCC_H__
#define __ACGCC_H__
+/*
+ * Use compiler specific <stdarg.h> is a good practice for even when
+ * -nostdinc is specified (i.e., ACPI_USE_STANDARD_HEADERS undefined.
+ */
+#include <stdarg.h>
+
#define ACPI_INLINE __inline__
/* Function name is used for debug output. Non-ANSI, compiler-dependent */
@@ -64,17 +70,6 @@
*/
#define ACPI_UNUSED_VAR __attribute__ ((unused))
-/*
- * Some versions of gcc implement strchr() with a buggy macro. So,
- * undef it here. Prevents error messages of this form (usually from the
- * file getopt.c):
- *
- * error: logical '&&' with non-zero constant will always evaluate as true
- */
-#ifdef strchr
-#undef strchr
-#endif
-
/* GCC supports __VA_ARGS__ in macros */
#define COMPILER_VA_MACRO 1
diff --git a/include/acpi/platform/acgccex.h b/include/acpi/platform/acgccex.h
new file mode 100644
index 000000000000..46ead2caada4
--- /dev/null
+++ b/include/acpi/platform/acgccex.h
@@ -0,0 +1,58 @@
+/******************************************************************************
+ *
+ * Name: acgccex.h - Extra GCC specific defines, etc.
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2016, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#ifndef __ACGCCEX_H__
+#define __ACGCCEX_H__
+
+/*
+ * Some versions of gcc implement strchr() with a buggy macro. So,
+ * undef it here. Prevents error messages of this form (usually from the
+ * file getopt.c):
+ *
+ * error: logical '&&' with non-zero constant will always evaluate as true
+ */
+#ifdef strchr
+#undef strchr
+#endif
+
+#endif /* __ACGCCEX_H__ */
diff --git a/include/acpi/platform/aclinux.h b/include/acpi/platform/aclinux.h
index 93b61b1f2beb..a5d98d171866 100644
--- a/include/acpi/platform/aclinux.h
+++ b/include/acpi/platform/aclinux.h
@@ -92,6 +92,8 @@
#include <asm/acenv.h>
#endif
+#define ACPI_INIT_FUNCTION __init
+
#ifndef CONFIG_ACPI
/* External globals for __KERNEL__, stubs is needed */
@@ -168,13 +170,21 @@
#define ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_get_next_filename
#define ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_close_directory
+#define ACPI_MSG_ERROR KERN_ERR "ACPI Error: "
+#define ACPI_MSG_EXCEPTION KERN_ERR "ACPI Exception: "
+#define ACPI_MSG_WARNING KERN_WARNING "ACPI Warning: "
+#define ACPI_MSG_INFO KERN_INFO "ACPI: "
+
+#define ACPI_MSG_BIOS_ERROR KERN_ERR "ACPI BIOS Error (bug): "
+#define ACPI_MSG_BIOS_WARNING KERN_WARNING "ACPI BIOS Warning (bug): "
+
#else /* !__KERNEL__ */
-#include <stdarg.h>
-#include <string.h>
-#include <stdlib.h>
-#include <ctype.h>
+#define ACPI_USE_STANDARD_HEADERS
+
+#ifdef ACPI_USE_STANDARD_HEADERS
#include <unistd.h>
+#endif
/* Define/disable kernel-specific declarators */
@@ -205,8 +215,4 @@
#endif /* __KERNEL__ */
-/* Linux uses GCC */
-
-#include <acpi/platform/acgcc.h>
-
#endif /* __ACLINUX_H__ */
diff --git a/include/acpi/platform/aclinuxex.h b/include/acpi/platform/aclinuxex.h
index f8bb0d857bff..a5509d87230a 100644
--- a/include/acpi/platform/aclinuxex.h
+++ b/include/acpi/platform/aclinuxex.h
@@ -71,7 +71,7 @@
/*
* Overrides for in-kernel ACPICA
*/
-acpi_status __init acpi_os_initialize(void);
+acpi_status ACPI_INIT_FUNCTION acpi_os_initialize(void);
acpi_status acpi_os_terminate(void);
diff --git a/include/acpi/processor.h b/include/acpi/processor.h
index bfe6b2e10f3a..f3db11c24654 100644
--- a/include/acpi/processor.h
+++ b/include/acpi/processor.h
@@ -359,7 +359,7 @@ extern int acpi_processor_set_throttling(struct acpi_processor *pr,
* onlined/offlined. In such case the flags.throttling will be updated.
*/
extern void acpi_processor_reevaluate_tstate(struct acpi_processor *pr,
- unsigned long action);
+ bool is_dead);
extern const struct file_operations acpi_processor_throttling_fops;
extern void acpi_processor_throttling_init(void);
#else
@@ -380,7 +380,7 @@ static inline int acpi_processor_set_throttling(struct acpi_processor *pr,
}
static inline void acpi_processor_reevaluate_tstate(struct acpi_processor *pr,
- unsigned long action) {}
+ bool is_dead) {}
static inline void acpi_processor_throttling_init(void) {}
#endif /* CONFIG_ACPI_CPU_FREQ_PSS */
diff --git a/include/asm-generic/export.h b/include/asm-generic/export.h
new file mode 100644
index 000000000000..43199a049da5
--- /dev/null
+++ b/include/asm-generic/export.h
@@ -0,0 +1,94 @@
+#ifndef __ASM_GENERIC_EXPORT_H
+#define __ASM_GENERIC_EXPORT_H
+
+#ifndef KSYM_FUNC
+#define KSYM_FUNC(x) x
+#endif
+#ifdef CONFIG_64BIT
+#define __put .quad
+#ifndef KSYM_ALIGN
+#define KSYM_ALIGN 8
+#endif
+#ifndef KCRC_ALIGN
+#define KCRC_ALIGN 8
+#endif
+#else
+#define __put .long
+#ifndef KSYM_ALIGN
+#define KSYM_ALIGN 4
+#endif
+#ifndef KCRC_ALIGN
+#define KCRC_ALIGN 4
+#endif
+#endif
+
+#ifdef CONFIG_HAVE_UNDERSCORE_SYMBOL_PREFIX
+#define KSYM(name) _##name
+#else
+#define KSYM(name) name
+#endif
+
+/*
+ * note on .section use: @progbits vs %progbits nastiness doesn't matter,
+ * since we immediately emit into those sections anyway.
+ */
+.macro ___EXPORT_SYMBOL name,val,sec
+#ifdef CONFIG_MODULES
+ .globl KSYM(__ksymtab_\name)
+ .section ___ksymtab\sec+\name,"a"
+ .balign KSYM_ALIGN
+KSYM(__ksymtab_\name):
+ __put \val, KSYM(__kstrtab_\name)
+ .previous
+ .section __ksymtab_strings,"a"
+KSYM(__kstrtab_\name):
+#ifdef CONFIG_HAVE_UNDERSCORE_SYMBOL_PREFIX
+ .asciz "_\name"
+#else
+ .asciz "\name"
+#endif
+ .previous
+#ifdef CONFIG_MODVERSIONS
+ .section ___kcrctab\sec+\name,"a"
+ .balign KCRC_ALIGN
+KSYM(__kcrctab_\name):
+ __put KSYM(__crc_\name)
+ .weak KSYM(__crc_\name)
+ .previous
+#endif
+#endif
+.endm
+#undef __put
+
+#if defined(__KSYM_DEPS__)
+
+#define __EXPORT_SYMBOL(sym, val, sec) === __KSYM_##sym ===
+
+#elif defined(CONFIG_TRIM_UNUSED_KSYMS)
+
+#include <linux/kconfig.h>
+#include <generated/autoksyms.h>
+
+#define __EXPORT_SYMBOL(sym, val, sec) \
+ __cond_export_sym(sym, val, sec, config_enabled(__KSYM_##sym))
+#define __cond_export_sym(sym, val, sec, conf) \
+ ___cond_export_sym(sym, val, sec, conf)
+#define ___cond_export_sym(sym, val, sec, enabled) \
+ __cond_export_sym_##enabled(sym, val, sec)
+#define __cond_export_sym_1(sym, val, sec) ___EXPORT_SYMBOL sym, val, sec
+#define __cond_export_sym_0(sym, val, sec) /* nothing */
+
+#else
+#define __EXPORT_SYMBOL(sym, val, sec) ___EXPORT_SYMBOL sym, val, sec
+#endif
+
+#define EXPORT_SYMBOL(name) \
+ __EXPORT_SYMBOL(name, KSYM_FUNC(KSYM(name)),)
+#define EXPORT_SYMBOL_GPL(name) \
+ __EXPORT_SYMBOL(name, KSYM_FUNC(KSYM(name)), _gpl)
+#define EXPORT_DATA_SYMBOL(name) \
+ __EXPORT_SYMBOL(name, KSYM(name),)
+#define EXPORT_DATA_SYMBOL_GPL(name) \
+ __EXPORT_SYMBOL(name, KSYM(name),_gpl)
+
+#endif
diff --git a/include/asm-generic/libata-portmap.h b/include/asm-generic/libata-portmap.h
deleted file mode 100644
index cf14f2ff40b6..000000000000
--- a/include/asm-generic/libata-portmap.h
+++ /dev/null
@@ -1,7 +0,0 @@
-#ifndef __ASM_GENERIC_LIBATA_PORTMAP_H
-#define __ASM_GENERIC_LIBATA_PORTMAP_H
-
-#define ATA_PRIMARY_IRQ(dev) 14
-#define ATA_SECONDARY_IRQ(dev) 15
-
-#endif
diff --git a/include/asm-generic/percpu.h b/include/asm-generic/percpu.h
index 4d9f233c4ba8..40e887068da2 100644
--- a/include/asm-generic/percpu.h
+++ b/include/asm-generic/percpu.h
@@ -65,6 +65,11 @@ extern void setup_per_cpu_areas(void);
#define PER_CPU_DEF_ATTRIBUTES
#endif
+#define raw_cpu_generic_read(pcp) \
+({ \
+ *raw_cpu_ptr(&(pcp)); \
+})
+
#define raw_cpu_generic_to_op(pcp, val, op) \
do { \
*raw_cpu_ptr(&(pcp)) op val; \
@@ -72,34 +77,39 @@ do { \
#define raw_cpu_generic_add_return(pcp, val) \
({ \
- raw_cpu_add(pcp, val); \
- raw_cpu_read(pcp); \
+ typeof(&(pcp)) __p = raw_cpu_ptr(&(pcp)); \
+ \
+ *__p += val; \
+ *__p; \
})
#define raw_cpu_generic_xchg(pcp, nval) \
({ \
+ typeof(&(pcp)) __p = raw_cpu_ptr(&(pcp)); \
typeof(pcp) __ret; \
- __ret = raw_cpu_read(pcp); \
- raw_cpu_write(pcp, nval); \
+ __ret = *__p; \
+ *__p = nval; \
__ret; \
})
#define raw_cpu_generic_cmpxchg(pcp, oval, nval) \
({ \
+ typeof(&(pcp)) __p = raw_cpu_ptr(&(pcp)); \
typeof(pcp) __ret; \
- __ret = raw_cpu_read(pcp); \
+ __ret = *__p; \
if (__ret == (oval)) \
- raw_cpu_write(pcp, nval); \
+ *__p = nval; \
__ret; \
})
#define raw_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \
({ \
+ typeof(&(pcp1)) __p1 = raw_cpu_ptr(&(pcp1)); \
+ typeof(&(pcp2)) __p2 = raw_cpu_ptr(&(pcp2)); \
int __ret = 0; \
- if (raw_cpu_read(pcp1) == (oval1) && \
- raw_cpu_read(pcp2) == (oval2)) { \
- raw_cpu_write(pcp1, nval1); \
- raw_cpu_write(pcp2, nval2); \
+ if (*__p1 == (oval1) && *__p2 == (oval2)) { \
+ *__p1 = nval1; \
+ *__p2 = nval2; \
__ret = 1; \
} \
(__ret); \
@@ -109,7 +119,7 @@ do { \
({ \
typeof(pcp) __ret; \
preempt_disable(); \
- __ret = *this_cpu_ptr(&(pcp)); \
+ __ret = raw_cpu_generic_read(pcp); \
preempt_enable(); \
__ret; \
})
@@ -118,17 +128,17 @@ do { \
do { \
unsigned long __flags; \
raw_local_irq_save(__flags); \
- *raw_cpu_ptr(&(pcp)) op val; \
+ raw_cpu_generic_to_op(pcp, val, op); \
raw_local_irq_restore(__flags); \
} while (0)
+
#define this_cpu_generic_add_return(pcp, val) \
({ \
typeof(pcp) __ret; \
unsigned long __flags; \
raw_local_irq_save(__flags); \
- raw_cpu_add(pcp, val); \
- __ret = raw_cpu_read(pcp); \
+ __ret = raw_cpu_generic_add_return(pcp, val); \
raw_local_irq_restore(__flags); \
__ret; \
})
@@ -138,8 +148,7 @@ do { \
typeof(pcp) __ret; \
unsigned long __flags; \
raw_local_irq_save(__flags); \
- __ret = raw_cpu_read(pcp); \
- raw_cpu_write(pcp, nval); \
+ __ret = raw_cpu_generic_xchg(pcp, nval); \
raw_local_irq_restore(__flags); \
__ret; \
})
@@ -149,9 +158,7 @@ do { \
typeof(pcp) __ret; \
unsigned long __flags; \
raw_local_irq_save(__flags); \
- __ret = raw_cpu_read(pcp); \
- if (__ret == (oval)) \
- raw_cpu_write(pcp, nval); \
+ __ret = raw_cpu_generic_cmpxchg(pcp, oval, nval); \
raw_local_irq_restore(__flags); \
__ret; \
})
@@ -168,16 +175,16 @@ do { \
})
#ifndef raw_cpu_read_1
-#define raw_cpu_read_1(pcp) (*raw_cpu_ptr(&(pcp)))
+#define raw_cpu_read_1(pcp) raw_cpu_generic_read(pcp)
#endif
#ifndef raw_cpu_read_2
-#define raw_cpu_read_2(pcp) (*raw_cpu_ptr(&(pcp)))
+#define raw_cpu_read_2(pcp) raw_cpu_generic_read(pcp)
#endif
#ifndef raw_cpu_read_4
-#define raw_cpu_read_4(pcp) (*raw_cpu_ptr(&(pcp)))
+#define raw_cpu_read_4(pcp) raw_cpu_generic_read(pcp)
#endif
#ifndef raw_cpu_read_8
-#define raw_cpu_read_8(pcp) (*raw_cpu_ptr(&(pcp)))
+#define raw_cpu_read_8(pcp) raw_cpu_generic_read(pcp)
#endif
#ifndef raw_cpu_write_1
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
index d4458b6dbfb4..c4f8fd2fd384 100644
--- a/include/asm-generic/pgtable.h
+++ b/include/asm-generic/pgtable.h
@@ -800,6 +800,9 @@ static inline int pmd_clear_huge(pmd_t *pmd)
#endif
#endif
+struct file;
+int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
+ unsigned long size, pgprot_t *vma_prot);
#endif /* !__ASSEMBLY__ */
#ifndef io_remap_pfn_range
diff --git a/include/asm-generic/uaccess.h b/include/asm-generic/uaccess.h
index 5dea1fb6979c..cc6bb319e464 100644
--- a/include/asm-generic/uaccess.h
+++ b/include/asm-generic/uaccess.h
@@ -69,10 +69,6 @@ struct exception_table_entry
unsigned long insn, fixup;
};
-/* Returns 0 if exception not found and fixup otherwise. */
-extern unsigned long search_exception_table(unsigned long);
-
-
/*
* architectures with an MMU should override these two
*/
@@ -231,14 +227,18 @@ extern int __put_user_bad(void) __attribute__((noreturn));
might_fault(); \
access_ok(VERIFY_READ, __p, sizeof(*ptr)) ? \
__get_user((x), (__typeof__(*(ptr)) *)__p) : \
- -EFAULT; \
+ ((x) = (__typeof__(*(ptr)))0,-EFAULT); \
})
#ifndef __get_user_fn
static inline int __get_user_fn(size_t size, const void __user *ptr, void *x)
{
- size = __copy_from_user(x, ptr, size);
- return size ? -EFAULT : size;
+ size_t n = __copy_from_user(x, ptr, size);
+ if (unlikely(n)) {
+ memset(x + (size - n), 0, n);
+ return -EFAULT;
+ }
+ return 0;
}
#define __get_user_fn(sz, u, k) __get_user_fn(sz, u, k)
@@ -258,11 +258,13 @@ extern int __get_user_bad(void) __attribute__((noreturn));
static inline long copy_from_user(void *to,
const void __user * from, unsigned long n)
{
+ unsigned long res = n;
might_fault();
- if (access_ok(VERIFY_READ, from, n))
- return __copy_from_user(to, from, n);
- else
- return n;
+ if (likely(access_ok(VERIFY_READ, from, n)))
+ res = __copy_from_user(to, from, n);
+ if (unlikely(res))
+ memset(to + (n - res), 0, res);
+ return res;
}
static inline long copy_to_user(void __user *to,
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index 24563970ff7b..30747960bc54 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -196,9 +196,14 @@
*(.dtb.init.rodata) \
VMLINUX_SYMBOL(__dtb_end) = .;
-/* .data section */
+/*
+ * .data section
+ * LD_DEAD_CODE_DATA_ELIMINATION option enables -fdata-sections generates
+ * .data.identifier which needs to be pulled in with .data, but don't want to
+ * pull in .data..stuff which has its own requirements. Same for bss.
+ */
#define DATA_DATA \
- *(.data) \
+ *(.data .data.[0-9a-zA-Z_]*) \
*(.ref.data) \
*(.data..shared_aligned) /* percpu related */ \
MEM_KEEP(init.data) \
@@ -320,76 +325,76 @@
/* Kernel symbol table: Normal symbols */ \
__ksymtab : AT(ADDR(__ksymtab) - LOAD_OFFSET) { \
VMLINUX_SYMBOL(__start___ksymtab) = .; \
- *(SORT(___ksymtab+*)) \
+ KEEP(*(SORT(___ksymtab+*))) \
VMLINUX_SYMBOL(__stop___ksymtab) = .; \
} \
\
/* Kernel symbol table: GPL-only symbols */ \
__ksymtab_gpl : AT(ADDR(__ksymtab_gpl) - LOAD_OFFSET) { \
VMLINUX_SYMBOL(__start___ksymtab_gpl) = .; \
- *(SORT(___ksymtab_gpl+*)) \
+ KEEP(*(SORT(___ksymtab_gpl+*))) \
VMLINUX_SYMBOL(__stop___ksymtab_gpl) = .; \
} \
\
/* Kernel symbol table: Normal unused symbols */ \
__ksymtab_unused : AT(ADDR(__ksymtab_unused) - LOAD_OFFSET) { \
VMLINUX_SYMBOL(__start___ksymtab_unused) = .; \
- *(SORT(___ksymtab_unused+*)) \
+ KEEP(*(SORT(___ksymtab_unused+*))) \
VMLINUX_SYMBOL(__stop___ksymtab_unused) = .; \
} \
\
/* Kernel symbol table: GPL-only unused symbols */ \
__ksymtab_unused_gpl : AT(ADDR(__ksymtab_unused_gpl) - LOAD_OFFSET) { \
VMLINUX_SYMBOL(__start___ksymtab_unused_gpl) = .; \
- *(SORT(___ksymtab_unused_gpl+*)) \
+ KEEP(*(SORT(___ksymtab_unused_gpl+*))) \
VMLINUX_SYMBOL(__stop___ksymtab_unused_gpl) = .; \
} \
\
/* Kernel symbol table: GPL-future-only symbols */ \
__ksymtab_gpl_future : AT(ADDR(__ksymtab_gpl_future) - LOAD_OFFSET) { \
VMLINUX_SYMBOL(__start___ksymtab_gpl_future) = .; \
- *(SORT(___ksymtab_gpl_future+*)) \
+ KEEP(*(SORT(___ksymtab_gpl_future+*))) \
VMLINUX_SYMBOL(__stop___ksymtab_gpl_future) = .; \
} \
\
/* Kernel symbol table: Normal symbols */ \
__kcrctab : AT(ADDR(__kcrctab) - LOAD_OFFSET) { \
VMLINUX_SYMBOL(__start___kcrctab) = .; \
- *(SORT(___kcrctab+*)) \
+ KEEP(*(SORT(___kcrctab+*))) \
VMLINUX_SYMBOL(__stop___kcrctab) = .; \
} \
\
/* Kernel symbol table: GPL-only symbols */ \
__kcrctab_gpl : AT(ADDR(__kcrctab_gpl) - LOAD_OFFSET) { \
VMLINUX_SYMBOL(__start___kcrctab_gpl) = .; \
- *(SORT(___kcrctab_gpl+*)) \
+ KEEP(*(SORT(___kcrctab_gpl+*))) \
VMLINUX_SYMBOL(__stop___kcrctab_gpl) = .; \
} \
\
/* Kernel symbol table: Normal unused symbols */ \
__kcrctab_unused : AT(ADDR(__kcrctab_unused) - LOAD_OFFSET) { \
VMLINUX_SYMBOL(__start___kcrctab_unused) = .; \
- *(SORT(___kcrctab_unused+*)) \
+ KEEP(*(SORT(___kcrctab_unused+*))) \
VMLINUX_SYMBOL(__stop___kcrctab_unused) = .; \
} \
\
/* Kernel symbol table: GPL-only unused symbols */ \
__kcrctab_unused_gpl : AT(ADDR(__kcrctab_unused_gpl) - LOAD_OFFSET) { \
VMLINUX_SYMBOL(__start___kcrctab_unused_gpl) = .; \
- *(SORT(___kcrctab_unused_gpl+*)) \
+ KEEP(*(SORT(___kcrctab_unused_gpl+*))) \
VMLINUX_SYMBOL(__stop___kcrctab_unused_gpl) = .; \
} \
\
/* Kernel symbol table: GPL-future-only symbols */ \
__kcrctab_gpl_future : AT(ADDR(__kcrctab_gpl_future) - LOAD_OFFSET) { \
VMLINUX_SYMBOL(__start___kcrctab_gpl_future) = .; \
- *(SORT(___kcrctab_gpl_future+*)) \
+ KEEP(*(SORT(___kcrctab_gpl_future+*))) \
VMLINUX_SYMBOL(__stop___kcrctab_gpl_future) = .; \
} \
\
/* Kernel symbol table: strings */ \
__ksymtab_strings : AT(ADDR(__ksymtab_strings) - LOAD_OFFSET) { \
- *(__ksymtab_strings) \
+ KEEP(*(__ksymtab_strings)) \
} \
\
/* __*init sections */ \
@@ -424,12 +429,17 @@
#define SECURITY_INIT \
.security_initcall.init : AT(ADDR(.security_initcall.init) - LOAD_OFFSET) { \
VMLINUX_SYMBOL(__security_initcall_start) = .; \
- *(.security_initcall.init) \
+ KEEP(*(.security_initcall.init)) \
VMLINUX_SYMBOL(__security_initcall_end) = .; \
}
/* .text section. Map to function alignment to avoid address changes
- * during second ld run in second ld pass when generating System.map */
+ * during second ld run in second ld pass when generating System.map
+ * LD_DEAD_CODE_DATA_ELIMINATION option enables -ffunction-sections generates
+ * .text.identifier which needs to be pulled in with .text , but some
+ * architectures define .text.foo which is not intended to be pulled in here.
+ * Those enabling LD_DEAD_CODE_DATA_ELIMINATION must ensure they don't have
+ * conflicting section names, and must pull in .text.[0-9a-zA-Z_]* */
#define TEXT_TEXT \
ALIGN_FUNCTION(); \
*(.text.hot .text .text.fixup .text.unlikely) \
@@ -454,6 +464,12 @@
*(.spinlock.text) \
VMLINUX_SYMBOL(__lock_text_end) = .;
+#define CPUIDLE_TEXT \
+ ALIGN_FUNCTION(); \
+ VMLINUX_SYMBOL(__cpuidle_text_start) = .; \
+ *(.cpuidle.text) \
+ VMLINUX_SYMBOL(__cpuidle_text_end) = .;
+
#define KPROBES_TEXT \
ALIGN_FUNCTION(); \
VMLINUX_SYMBOL(__kprobes_text_start) = .; \
@@ -527,6 +543,7 @@
/* init and exit section handling */
#define INIT_DATA \
+ KEEP(*(SORT(___kentry+*))) \
*(.init.data) \
MEM_DISCARD(init.data) \
KERNEL_CTORS() \
@@ -593,7 +610,7 @@
BSS_FIRST_SECTIONS \
*(.bss..page_aligned) \
*(.dynbss) \
- *(.bss) \
+ *(.bss .bss.[0-9a-zA-Z_]*) \
*(COMMON) \
}
@@ -676,12 +693,12 @@
#define INIT_CALLS_LEVEL(level) \
VMLINUX_SYMBOL(__initcall##level##_start) = .; \
- *(.initcall##level##.init) \
- *(.initcall##level##s.init) \
+ KEEP(*(.initcall##level##.init)) \
+ KEEP(*(.initcall##level##s.init)) \
#define INIT_CALLS \
VMLINUX_SYMBOL(__initcall_start) = .; \
- *(.initcallearly.init) \
+ KEEP(*(.initcallearly.init)) \
INIT_CALLS_LEVEL(0) \
INIT_CALLS_LEVEL(1) \
INIT_CALLS_LEVEL(2) \
@@ -695,21 +712,21 @@
#define CON_INITCALL \
VMLINUX_SYMBOL(__con_initcall_start) = .; \
- *(.con_initcall.init) \
+ KEEP(*(.con_initcall.init)) \
VMLINUX_SYMBOL(__con_initcall_end) = .;
#define SECURITY_INITCALL \
VMLINUX_SYMBOL(__security_initcall_start) = .; \
- *(.security_initcall.init) \
+ KEEP(*(.security_initcall.init)) \
VMLINUX_SYMBOL(__security_initcall_end) = .;
#ifdef CONFIG_BLK_DEV_INITRD
#define INIT_RAM_FS \
. = ALIGN(4); \
VMLINUX_SYMBOL(__initramfs_start) = .; \
- *(.init.ramfs) \
+ KEEP(*(.init.ramfs)) \
. = ALIGN(8); \
- *(.init.ramfs.info)
+ KEEP(*(.init.ramfs.info))
#else
#define INIT_RAM_FS
#endif
diff --git a/include/crypto/algapi.h b/include/crypto/algapi.h
index 8637cdfe382a..404e9558e879 100644
--- a/include/crypto/algapi.h
+++ b/include/crypto/algapi.h
@@ -15,7 +15,6 @@
#include <linux/crypto.h>
#include <linux/list.h>
#include <linux/kernel.h>
-#include <linux/kthread.h>
#include <linux/skbuff.h>
struct crypto_aead;
@@ -129,75 +128,6 @@ struct ablkcipher_walk {
unsigned int blocksize;
};
-#define ENGINE_NAME_LEN 30
-/*
- * struct crypto_engine - crypto hardware engine
- * @name: the engine name
- * @idling: the engine is entering idle state
- * @busy: request pump is busy
- * @running: the engine is on working
- * @cur_req_prepared: current request is prepared
- * @list: link with the global crypto engine list
- * @queue_lock: spinlock to syncronise access to request queue
- * @queue: the crypto queue of the engine
- * @rt: whether this queue is set to run as a realtime task
- * @prepare_crypt_hardware: a request will soon arrive from the queue
- * so the subsystem requests the driver to prepare the hardware
- * by issuing this call
- * @unprepare_crypt_hardware: there are currently no more requests on the
- * queue so the subsystem notifies the driver that it may relax the
- * hardware by issuing this call
- * @prepare_request: do some prepare if need before handle the current request
- * @unprepare_request: undo any work done by prepare_message()
- * @crypt_one_request: do encryption for current request
- * @kworker: thread struct for request pump
- * @kworker_task: pointer to task for request pump kworker thread
- * @pump_requests: work struct for scheduling work to the request pump
- * @priv_data: the engine private data
- * @cur_req: the current request which is on processing
- */
-struct crypto_engine {
- char name[ENGINE_NAME_LEN];
- bool idling;
- bool busy;
- bool running;
- bool cur_req_prepared;
-
- struct list_head list;
- spinlock_t queue_lock;
- struct crypto_queue queue;
-
- bool rt;
-
- int (*prepare_crypt_hardware)(struct crypto_engine *engine);
- int (*unprepare_crypt_hardware)(struct crypto_engine *engine);
-
- int (*prepare_request)(struct crypto_engine *engine,
- struct ablkcipher_request *req);
- int (*unprepare_request)(struct crypto_engine *engine,
- struct ablkcipher_request *req);
- int (*crypt_one_request)(struct crypto_engine *engine,
- struct ablkcipher_request *req);
-
- struct kthread_worker kworker;
- struct task_struct *kworker_task;
- struct kthread_work pump_requests;
-
- void *priv_data;
- struct ablkcipher_request *cur_req;
-};
-
-int crypto_transfer_request(struct crypto_engine *engine,
- struct ablkcipher_request *req, bool need_pump);
-int crypto_transfer_request_to_engine(struct crypto_engine *engine,
- struct ablkcipher_request *req);
-void crypto_finalize_request(struct crypto_engine *engine,
- struct ablkcipher_request *req, int err);
-int crypto_engine_start(struct crypto_engine *engine);
-int crypto_engine_stop(struct crypto_engine *engine);
-struct crypto_engine *crypto_engine_alloc_init(struct device *dev, bool rt);
-int crypto_engine_exit(struct crypto_engine *engine);
-
extern const struct crypto_type crypto_ablkcipher_type;
extern const struct crypto_type crypto_blkcipher_type;
diff --git a/include/crypto/engine.h b/include/crypto/engine.h
new file mode 100644
index 000000000000..04eb5c77addd
--- /dev/null
+++ b/include/crypto/engine.h
@@ -0,0 +1,107 @@
+/*
+ * Crypto engine API
+ *
+ * Copyright (c) 2016 Baolin Wang <baolin.wang@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ */
+#ifndef _CRYPTO_ENGINE_H
+#define _CRYPTO_ENGINE_H
+
+#include <linux/crypto.h>
+#include <linux/list.h>
+#include <linux/kernel.h>
+#include <linux/kthread.h>
+#include <crypto/algapi.h>
+#include <crypto/hash.h>
+
+#define ENGINE_NAME_LEN 30
+/*
+ * struct crypto_engine - crypto hardware engine
+ * @name: the engine name
+ * @idling: the engine is entering idle state
+ * @busy: request pump is busy
+ * @running: the engine is on working
+ * @cur_req_prepared: current request is prepared
+ * @list: link with the global crypto engine list
+ * @queue_lock: spinlock to syncronise access to request queue
+ * @queue: the crypto queue of the engine
+ * @rt: whether this queue is set to run as a realtime task
+ * @prepare_crypt_hardware: a request will soon arrive from the queue
+ * so the subsystem requests the driver to prepare the hardware
+ * by issuing this call
+ * @unprepare_crypt_hardware: there are currently no more requests on the
+ * queue so the subsystem notifies the driver that it may relax the
+ * hardware by issuing this call
+ * @prepare_cipher_request: do some prepare if need before handle the current request
+ * @unprepare_cipher_request: undo any work done by prepare_cipher_request()
+ * @cipher_one_request: do encryption for current request
+ * @prepare_hash_request: do some prepare if need before handle the current request
+ * @unprepare_hash_request: undo any work done by prepare_hash_request()
+ * @hash_one_request: do hash for current request
+ * @kworker: thread struct for request pump
+ * @kworker_task: pointer to task for request pump kworker thread
+ * @pump_requests: work struct for scheduling work to the request pump
+ * @priv_data: the engine private data
+ * @cur_req: the current request which is on processing
+ */
+struct crypto_engine {
+ char name[ENGINE_NAME_LEN];
+ bool idling;
+ bool busy;
+ bool running;
+ bool cur_req_prepared;
+
+ struct list_head list;
+ spinlock_t queue_lock;
+ struct crypto_queue queue;
+
+ bool rt;
+
+ int (*prepare_crypt_hardware)(struct crypto_engine *engine);
+ int (*unprepare_crypt_hardware)(struct crypto_engine *engine);
+
+ int (*prepare_cipher_request)(struct crypto_engine *engine,
+ struct ablkcipher_request *req);
+ int (*unprepare_cipher_request)(struct crypto_engine *engine,
+ struct ablkcipher_request *req);
+ int (*prepare_hash_request)(struct crypto_engine *engine,
+ struct ahash_request *req);
+ int (*unprepare_hash_request)(struct crypto_engine *engine,
+ struct ahash_request *req);
+ int (*cipher_one_request)(struct crypto_engine *engine,
+ struct ablkcipher_request *req);
+ int (*hash_one_request)(struct crypto_engine *engine,
+ struct ahash_request *req);
+
+ struct kthread_worker kworker;
+ struct task_struct *kworker_task;
+ struct kthread_work pump_requests;
+
+ void *priv_data;
+ struct crypto_async_request *cur_req;
+};
+
+int crypto_transfer_cipher_request(struct crypto_engine *engine,
+ struct ablkcipher_request *req,
+ bool need_pump);
+int crypto_transfer_cipher_request_to_engine(struct crypto_engine *engine,
+ struct ablkcipher_request *req);
+int crypto_transfer_hash_request(struct crypto_engine *engine,
+ struct ahash_request *req, bool need_pump);
+int crypto_transfer_hash_request_to_engine(struct crypto_engine *engine,
+ struct ahash_request *req);
+void crypto_finalize_cipher_request(struct crypto_engine *engine,
+ struct ablkcipher_request *req, int err);
+void crypto_finalize_hash_request(struct crypto_engine *engine,
+ struct ahash_request *req, int err);
+int crypto_engine_start(struct crypto_engine *engine);
+int crypto_engine_stop(struct crypto_engine *engine);
+struct crypto_engine *crypto_engine_alloc_init(struct device *dev, bool rt);
+int crypto_engine_exit(struct crypto_engine *engine);
+
+#endif /* _CRYPTO_ENGINE_H */
diff --git a/include/crypto/ghash.h b/include/crypto/ghash.h
new file mode 100644
index 000000000000..2a61c9bbab8f
--- /dev/null
+++ b/include/crypto/ghash.h
@@ -0,0 +1,23 @@
+/*
+ * Common values for GHASH algorithms
+ */
+
+#ifndef __CRYPTO_GHASH_H__
+#define __CRYPTO_GHASH_H__
+
+#include <linux/types.h>
+#include <crypto/gf128mul.h>
+
+#define GHASH_BLOCK_SIZE 16
+#define GHASH_DIGEST_SIZE 16
+
+struct ghash_ctx {
+ struct gf128mul_4k *gf128;
+};
+
+struct ghash_desc_ctx {
+ u8 buffer[GHASH_BLOCK_SIZE];
+ u32 bytes;
+};
+
+#endif
diff --git a/include/drm/bridge/analogix_dp.h b/include/drm/bridge/analogix_dp.h
index 261b86d20e77..f6f0c062205c 100644
--- a/include/drm/bridge/analogix_dp.h
+++ b/include/drm/bridge/analogix_dp.h
@@ -38,6 +38,10 @@ struct analogix_dp_plat_data {
struct drm_connector *);
};
+int analogix_dp_psr_supported(struct device *dev);
+int analogix_dp_enable_psr(struct device *dev);
+int analogix_dp_disable_psr(struct device *dev);
+
int analogix_dp_resume(struct device *dev);
int analogix_dp_suspend(struct device *dev);
diff --git a/include/drm/drmP.h b/include/drm/drmP.h
index d3778652e462..672644031bd5 100644
--- a/include/drm/drmP.h
+++ b/include/drm/drmP.h
@@ -51,6 +51,7 @@
#include <linux/platform_device.h>
#include <linux/poll.h>
#include <linux/ratelimit.h>
+#include <linux/rbtree.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/types.h>
@@ -127,6 +128,7 @@ struct dma_buf_attachment;
* run-time by echoing the debug value in its sysfs node:
* # echo 0xf > /sys/module/drm/parameters/debug
*/
+#define DRM_UT_NONE 0x00
#define DRM_UT_CORE 0x01
#define DRM_UT_DRIVER 0x02
#define DRM_UT_KMS 0x04
@@ -134,11 +136,14 @@ struct dma_buf_attachment;
#define DRM_UT_ATOMIC 0x10
#define DRM_UT_VBL 0x20
-extern __printf(2, 3)
-void drm_ut_debug_printk(const char *function_name,
- const char *format, ...);
-extern __printf(1, 2)
-void drm_err(const char *format, ...);
+extern __printf(6, 7)
+void drm_dev_printk(const struct device *dev, const char *level,
+ unsigned int category, const char *function_name,
+ const char *prefix, const char *format, ...);
+
+extern __printf(3, 4)
+void drm_printk(const char *level, unsigned int category,
+ const char *format, ...);
/***********************************************************************/
/** \name DRM template customization defaults */
@@ -146,6 +151,7 @@ void drm_err(const char *format, ...);
/* driver capabilities and requirements mask */
#define DRIVER_USE_AGP 0x1
+#define DRIVER_LEGACY 0x2
#define DRIVER_PCI_DMA 0x8
#define DRIVER_SG 0x10
#define DRIVER_HAVE_DMA 0x20
@@ -162,14 +168,37 @@ void drm_err(const char *format, ...);
/** \name Macros to make printk easier */
/*@{*/
+#define _DRM_PRINTK(once, level, fmt, ...) \
+ do { \
+ printk##once(KERN_##level "[" DRM_NAME "] " fmt, \
+ ##__VA_ARGS__); \
+ } while (0)
+
+#define DRM_INFO(fmt, ...) \
+ _DRM_PRINTK(, INFO, fmt, ##__VA_ARGS__)
+#define DRM_NOTE(fmt, ...) \
+ _DRM_PRINTK(, NOTICE, fmt, ##__VA_ARGS__)
+#define DRM_WARN(fmt, ...) \
+ _DRM_PRINTK(, WARNING, fmt, ##__VA_ARGS__)
+
+#define DRM_INFO_ONCE(fmt, ...) \
+ _DRM_PRINTK(_once, INFO, fmt, ##__VA_ARGS__)
+#define DRM_NOTE_ONCE(fmt, ...) \
+ _DRM_PRINTK(_once, NOTICE, fmt, ##__VA_ARGS__)
+#define DRM_WARN_ONCE(fmt, ...) \
+ _DRM_PRINTK(_once, WARNING, fmt, ##__VA_ARGS__)
+
/**
* Error output.
*
* \param fmt printf() like format string.
* \param arg arguments
*/
-#define DRM_ERROR(fmt, ...) \
- drm_err(fmt, ##__VA_ARGS__)
+#define DRM_DEV_ERROR(dev, fmt, ...) \
+ drm_dev_printk(dev, KERN_ERR, DRM_UT_NONE, __func__, " *ERROR*",\
+ fmt, ##__VA_ARGS__)
+#define DRM_ERROR(fmt, ...) \
+ drm_printk(KERN_ERR, DRM_UT_NONE, fmt, ##__VA_ARGS__)
/**
* Rate limited error output. Like DRM_ERROR() but won't flood the log.
@@ -177,21 +206,30 @@ void drm_err(const char *format, ...);
* \param fmt printf() like format string.
* \param arg arguments
*/
-#define DRM_ERROR_RATELIMITED(fmt, ...) \
+#define DRM_DEV_ERROR_RATELIMITED(dev, fmt, ...) \
({ \
static DEFINE_RATELIMIT_STATE(_rs, \
DEFAULT_RATELIMIT_INTERVAL, \
DEFAULT_RATELIMIT_BURST); \
\
if (__ratelimit(&_rs)) \
- drm_err(fmt, ##__VA_ARGS__); \
+ DRM_DEV_ERROR(dev, fmt, ##__VA_ARGS__); \
})
+#define DRM_ERROR_RATELIMITED(fmt, ...) \
+ DRM_DEV_ERROR_RATELIMITED(NULL, fmt, ##__VA_ARGS__)
-#define DRM_INFO(fmt, ...) \
- printk(KERN_INFO "[" DRM_NAME "] " fmt, ##__VA_ARGS__)
+#define DRM_DEV_INFO(dev, fmt, ...) \
+ drm_dev_printk(dev, KERN_INFO, DRM_UT_NONE, __func__, "", fmt, \
+ ##__VA_ARGS__)
-#define DRM_INFO_ONCE(fmt, ...) \
- printk_once(KERN_INFO "[" DRM_NAME "] " fmt, ##__VA_ARGS__)
+#define DRM_DEV_INFO_ONCE(dev, fmt, ...) \
+({ \
+ static bool __print_once __read_mostly; \
+ if (!__print_once) { \
+ __print_once = true; \
+ DRM_DEV_INFO(dev, fmt, ##__VA_ARGS__); \
+ } \
+})
/**
* Debug output.
@@ -199,37 +237,74 @@ void drm_err(const char *format, ...);
* \param fmt printf() like format string.
* \param arg arguments
*/
-#define DRM_DEBUG(fmt, args...) \
- do { \
- if (unlikely(drm_debug & DRM_UT_CORE)) \
- drm_ut_debug_printk(__func__, fmt, ##args); \
- } while (0)
+#define DRM_DEV_DEBUG(dev, fmt, args...) \
+ drm_dev_printk(dev, KERN_DEBUG, DRM_UT_CORE, __func__, "", fmt, \
+ ##args)
+#define DRM_DEBUG(fmt, ...) \
+ drm_printk(KERN_DEBUG, DRM_UT_CORE, fmt, ##__VA_ARGS__)
+
+#define DRM_DEV_DEBUG_DRIVER(dev, fmt, args...) \
+ drm_dev_printk(dev, KERN_DEBUG, DRM_UT_DRIVER, __func__, "", \
+ fmt, ##args)
+#define DRM_DEBUG_DRIVER(fmt, ...) \
+ drm_printk(KERN_DEBUG, DRM_UT_DRIVER, fmt, ##__VA_ARGS__)
+
+#define DRM_DEV_DEBUG_KMS(dev, fmt, args...) \
+ drm_dev_printk(dev, KERN_DEBUG, DRM_UT_KMS, __func__, "", fmt, \
+ ##args)
+#define DRM_DEBUG_KMS(fmt, ...) \
+ drm_printk(KERN_DEBUG, DRM_UT_KMS, fmt, ##__VA_ARGS__)
+
+#define DRM_DEV_DEBUG_PRIME(dev, fmt, args...) \
+ drm_dev_printk(dev, KERN_DEBUG, DRM_UT_PRIME, __func__, "", \
+ fmt, ##args)
+#define DRM_DEBUG_PRIME(fmt, ...) \
+ drm_printk(KERN_DEBUG, DRM_UT_PRIME, fmt, ##__VA_ARGS__)
+
+#define DRM_DEV_DEBUG_ATOMIC(dev, fmt, args...) \
+ drm_dev_printk(dev, KERN_DEBUG, DRM_UT_ATOMIC, __func__, "", \
+ fmt, ##args)
+#define DRM_DEBUG_ATOMIC(fmt, ...) \
+ drm_printk(KERN_DEBUG, DRM_UT_ATOMIC, fmt, ##__VA_ARGS__)
+
+#define DRM_DEV_DEBUG_VBL(dev, fmt, args...) \
+ drm_dev_printk(dev, KERN_DEBUG, DRM_UT_VBL, __func__, "", fmt, \
+ ##args)
+#define DRM_DEBUG_VBL(fmt, ...) \
+ drm_printk(KERN_DEBUG, DRM_UT_VBL, fmt, ##__VA_ARGS__)
+
+#define _DRM_DEV_DEFINE_DEBUG_RATELIMITED(dev, level, fmt, args...) \
+({ \
+ static DEFINE_RATELIMIT_STATE(_rs, \
+ DEFAULT_RATELIMIT_INTERVAL, \
+ DEFAULT_RATELIMIT_BURST); \
+ if (__ratelimit(&_rs)) \
+ drm_dev_printk(dev, KERN_DEBUG, DRM_UT_ ## level, \
+ __func__, "", fmt, ##args); \
+})
-#define DRM_DEBUG_DRIVER(fmt, args...) \
- do { \
- if (unlikely(drm_debug & DRM_UT_DRIVER)) \
- drm_ut_debug_printk(__func__, fmt, ##args); \
- } while (0)
-#define DRM_DEBUG_KMS(fmt, args...) \
- do { \
- if (unlikely(drm_debug & DRM_UT_KMS)) \
- drm_ut_debug_printk(__func__, fmt, ##args); \
- } while (0)
-#define DRM_DEBUG_PRIME(fmt, args...) \
- do { \
- if (unlikely(drm_debug & DRM_UT_PRIME)) \
- drm_ut_debug_printk(__func__, fmt, ##args); \
- } while (0)
-#define DRM_DEBUG_ATOMIC(fmt, args...) \
- do { \
- if (unlikely(drm_debug & DRM_UT_ATOMIC)) \
- drm_ut_debug_printk(__func__, fmt, ##args); \
- } while (0)
-#define DRM_DEBUG_VBL(fmt, args...) \
- do { \
- if (unlikely(drm_debug & DRM_UT_VBL)) \
- drm_ut_debug_printk(__func__, fmt, ##args); \
- } while (0)
+/**
+ * Rate limited debug output. Like DRM_DEBUG() but won't flood the log.
+ *
+ * \param fmt printf() like format string.
+ * \param arg arguments
+ */
+#define DRM_DEV_DEBUG_RATELIMITED(dev, fmt, args...) \
+ DEV__DRM_DEFINE_DEBUG_RATELIMITED(dev, CORE, fmt, ##args)
+#define DRM_DEBUG_RATELIMITED(fmt, args...) \
+ DRM_DEV_DEBUG_RATELIMITED(NULL, fmt, ##args)
+#define DRM_DEV_DEBUG_DRIVER_RATELIMITED(dev, fmt, args...) \
+ _DRM_DEV_DEFINE_DEBUG_RATELIMITED(dev, DRIVER, fmt, ##args)
+#define DRM_DEBUG_DRIVER_RATELIMITED(fmt, args...) \
+ DRM_DEV_DEBUG_DRIVER_RATELIMITED(NULL, fmt, ##args)
+#define DRM_DEV_DEBUG_KMS_RATELIMITED(dev, fmt, args...) \
+ _DRM_DEV_DEFINE_DEBUG_RATELIMITED(dev, KMS, fmt, ##args)
+#define DRM_DEBUG_KMS_RATELIMITED(fmt, args...) \
+ DRM_DEV_DEBUG_KMS_RATELIMITED(NULL, fmt, ##args)
+#define DRM_DEV_DEBUG_PRIME_RATELIMITED(dev, fmt, args...) \
+ _DRM_DEV_DEFINE_DEBUG_RATELIMITED(dev, PRIME, fmt, ##args)
+#define DRM_DEBUG_PRIME_RATELIMITED(fmt, args...) \
+ DRM_DEV_DEBUG_PRIME_RATELIMITED(NULL, fmt, ##args)
/*@}*/
@@ -295,10 +370,10 @@ struct drm_pending_event {
we deliver the event, for tracing only */
};
-/* initial implementaton using a linked list - todo hashtab */
struct drm_prime_file_private {
- struct list_head head;
struct mutex lock;
+ struct rb_root dmabufs;
+ struct rb_root handles;
};
/** File private data */
@@ -320,7 +395,6 @@ struct drm_file {
unsigned is_master:1;
struct pid *pid;
- kuid_t uid;
drm_magic_t magic;
struct list_head lhead;
struct drm_minor *minor;
@@ -642,7 +716,7 @@ struct drm_driver {
};
enum drm_minor_type {
- DRM_MINOR_LEGACY,
+ DRM_MINOR_PRIMARY,
DRM_MINOR_CONTROL,
DRM_MINOR_RENDER,
DRM_MINOR_CNT,
@@ -856,7 +930,7 @@ static inline bool drm_is_control_client(const struct drm_file *file_priv)
static inline bool drm_is_primary_client(const struct drm_file *file_priv)
{
- return file_priv->minor->type == DRM_MINOR_LEGACY;
+ return file_priv->minor->type == DRM_MINOR_PRIMARY;
}
/******************************************************************/
@@ -937,8 +1011,11 @@ static inline int drm_debugfs_remove_files(const struct drm_info_list *files,
}
#endif
+struct dma_buf_export_info;
+
extern struct dma_buf *drm_gem_prime_export(struct drm_device *dev,
- struct drm_gem_object *obj, int flags);
+ struct drm_gem_object *obj,
+ int flags);
extern int drm_gem_prime_handle_to_fd(struct drm_device *dev,
struct drm_file *file_priv, uint32_t handle, uint32_t flags,
int *prime_fd);
@@ -946,6 +1023,8 @@ extern struct drm_gem_object *drm_gem_prime_import(struct drm_device *dev,
struct dma_buf *dma_buf);
extern int drm_gem_prime_fd_to_handle(struct drm_device *dev,
struct drm_file *file_priv, int prime_fd, uint32_t *handle);
+struct dma_buf *drm_gem_dmabuf_export(struct drm_device *dev,
+ struct dma_buf_export_info *exp_info);
extern void drm_gem_dmabuf_release(struct dma_buf *dma_buf);
extern int drm_prime_sg_to_page_addr_arrays(struct sg_table *sgt, struct page **pages,
diff --git a/include/drm/drm_atomic.h b/include/drm/drm_atomic.h
index 856a9c85a838..9701f2dfb784 100644
--- a/include/drm/drm_atomic.h
+++ b/include/drm/drm_atomic.h
@@ -30,6 +30,160 @@
#include <drm/drm_crtc.h>
+/**
+ * struct drm_crtc_commit - track modeset commits on a CRTC
+ *
+ * This structure is used to track pending modeset changes and atomic commit on
+ * a per-CRTC basis. Since updating the list should never block this structure
+ * is reference counted to allow waiters to safely wait on an event to complete,
+ * without holding any locks.
+ *
+ * It has 3 different events in total to allow a fine-grained synchronization
+ * between outstanding updates::
+ *
+ * atomic commit thread hardware
+ *
+ * write new state into hardware ----> ...
+ * signal hw_done
+ * switch to new state on next
+ * ... v/hblank
+ *
+ * wait for buffers to show up ...
+ *
+ * ... send completion irq
+ * irq handler signals flip_done
+ * cleanup old buffers
+ *
+ * signal cleanup_done
+ *
+ * wait for flip_done <----
+ * clean up atomic state
+ *
+ * The important bit to know is that cleanup_done is the terminal event, but the
+ * ordering between flip_done and hw_done is entirely up to the specific driver
+ * and modeset state change.
+ *
+ * For an implementation of how to use this look at
+ * drm_atomic_helper_setup_commit() from the atomic helper library.
+ */
+struct drm_crtc_commit {
+ /**
+ * @crtc:
+ *
+ * DRM CRTC for this commit.
+ */
+ struct drm_crtc *crtc;
+
+ /**
+ * @ref:
+ *
+ * Reference count for this structure. Needed to allow blocking on
+ * completions without the risk of the completion disappearing
+ * meanwhile.
+ */
+ struct kref ref;
+
+ /**
+ * @flip_done:
+ *
+ * Will be signaled when the hardware has flipped to the new set of
+ * buffers. Signals at the same time as when the drm event for this
+ * commit is sent to userspace, or when an out-fence is singalled. Note
+ * that for most hardware, in most cases this happens after @hw_done is
+ * signalled.
+ */
+ struct completion flip_done;
+
+ /**
+ * @hw_done:
+ *
+ * Will be signalled when all hw register changes for this commit have
+ * been written out. Especially when disabling a pipe this can be much
+ * later than than @flip_done, since that can signal already when the
+ * screen goes black, whereas to fully shut down a pipe more register
+ * I/O is required.
+ *
+ * Note that this does not need to include separately reference-counted
+ * resources like backing storage buffer pinning, or runtime pm
+ * management.
+ */
+ struct completion hw_done;
+
+ /**
+ * @cleanup_done:
+ *
+ * Will be signalled after old buffers have been cleaned up by calling
+ * drm_atomic_helper_cleanup_planes(). Since this can only happen after
+ * a vblank wait completed it might be a bit later. This completion is
+ * useful to throttle updates and avoid hardware updates getting ahead
+ * of the buffer cleanup too much.
+ */
+ struct completion cleanup_done;
+
+ /**
+ * @commit_entry:
+ *
+ * Entry on the per-CRTC commit_list. Protected by crtc->commit_lock.
+ */
+ struct list_head commit_entry;
+
+ /**
+ * @event:
+ *
+ * &drm_pending_vblank_event pointer to clean up private events.
+ */
+ struct drm_pending_vblank_event *event;
+};
+
+struct __drm_planes_state {
+ struct drm_plane *ptr;
+ struct drm_plane_state *state;
+};
+
+struct __drm_crtcs_state {
+ struct drm_crtc *ptr;
+ struct drm_crtc_state *state;
+ struct drm_crtc_commit *commit;
+};
+
+struct __drm_connnectors_state {
+ struct drm_connector *ptr;
+ struct drm_connector_state *state;
+};
+
+/**
+ * struct drm_atomic_state - the global state object for atomic updates
+ * @dev: parent DRM device
+ * @allow_modeset: allow full modeset
+ * @legacy_cursor_update: hint to enforce legacy cursor IOCTL semantics
+ * @legacy_set_config: Disable conflicting encoders instead of failing with -EINVAL.
+ * @planes: pointer to array of structures with per-plane data
+ * @crtcs: pointer to array of CRTC pointers
+ * @num_connector: size of the @connectors and @connector_states arrays
+ * @connectors: pointer to array of structures with per-connector data
+ * @acquire_ctx: acquire context for this atomic modeset state update
+ */
+struct drm_atomic_state {
+ struct drm_device *dev;
+ bool allow_modeset : 1;
+ bool legacy_cursor_update : 1;
+ bool legacy_set_config : 1;
+ struct __drm_planes_state *planes;
+ struct __drm_crtcs_state *crtcs;
+ int num_connector;
+ struct __drm_connnectors_state *connectors;
+
+ struct drm_modeset_acquire_ctx *acquire_ctx;
+
+ /**
+ * @commit_work:
+ *
+ * Work item which can be used by the driver or helpers to execute the
+ * commit without blocking.
+ */
+ struct work_struct commit_work;
+};
+
void drm_crtc_commit_put(struct drm_crtc_commit *commit);
static inline void drm_crtc_commit_get(struct drm_crtc_commit *commit)
{
diff --git a/include/drm/drm_atomic_helper.h b/include/drm/drm_atomic_helper.h
index d86ae5dcd7b4..7ff92b09fd9c 100644
--- a/include/drm/drm_atomic_helper.h
+++ b/include/drm/drm_atomic_helper.h
@@ -29,6 +29,8 @@
#define DRM_ATOMIC_HELPER_H_
#include <drm/drm_crtc.h>
+#include <drm/drm_modeset_helper_vtables.h>
+#include <drm/drm_modeset_helper.h>
struct drm_atomic_state;
@@ -43,8 +45,9 @@ int drm_atomic_helper_commit(struct drm_device *dev,
struct drm_atomic_state *state,
bool nonblock);
-void drm_atomic_helper_wait_for_fences(struct drm_device *dev,
- struct drm_atomic_state *state);
+int drm_atomic_helper_wait_for_fences(struct drm_device *dev,
+ struct drm_atomic_state *state,
+ bool pre_swap);
bool drm_atomic_helper_framebuffer_changed(struct drm_device *dev,
struct drm_atomic_state *old_state,
struct drm_crtc *crtc);
@@ -63,14 +66,19 @@ void drm_atomic_helper_commit_modeset_enables(struct drm_device *dev,
int drm_atomic_helper_prepare_planes(struct drm_device *dev,
struct drm_atomic_state *state);
+
+#define DRM_PLANE_COMMIT_ACTIVE_ONLY BIT(0)
+#define DRM_PLANE_COMMIT_NO_DISABLE_AFTER_MODESET BIT(1)
+
void drm_atomic_helper_commit_planes(struct drm_device *dev,
struct drm_atomic_state *state,
- bool active_only);
+ uint32_t flags);
void drm_atomic_helper_cleanup_planes(struct drm_device *dev,
struct drm_atomic_state *old_state);
void drm_atomic_helper_commit_planes_on_crtc(struct drm_crtc_state *old_crtc_state);
-void drm_atomic_helper_disable_planes_on_crtc(struct drm_crtc *crtc,
- bool atomic);
+void
+drm_atomic_helper_disable_planes_on_crtc(struct drm_crtc_state *old_crtc_state,
+ bool atomic);
void drm_atomic_helper_swap_state(struct drm_atomic_state *state,
bool stall);
diff --git a/include/drm/drm_blend.h b/include/drm/drm_blend.h
new file mode 100644
index 000000000000..36baa175de99
--- /dev/null
+++ b/include/drm/drm_blend.h
@@ -0,0 +1,62 @@
+/*
+ * Copyright (c) 2016 Intel Corporation
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that copyright
+ * notice and this permission notice appear in supporting documentation, and
+ * that the name of the copyright holders not be used in advertising or
+ * publicity pertaining to distribution of the software without specific,
+ * written prior permission. The copyright holders make no representations
+ * about the suitability of this software for any purpose. It is provided "as
+ * is" without express or implied warranty.
+ *
+ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+ * OF THIS SOFTWARE.
+ */
+
+#ifndef __DRM_BLEND_H__
+#define __DRM_BLEND_H__
+
+#include <linux/list.h>
+#include <linux/ctype.h>
+
+struct drm_device;
+struct drm_atomic_state;
+
+/*
+ * Rotation property bits. DRM_ROTATE_<degrees> rotates the image by the
+ * specified amount in degrees in counter clockwise direction. DRM_REFLECT_X and
+ * DRM_REFLECT_Y reflects the image along the specified axis prior to rotation
+ *
+ * WARNING: These defines are UABI since they're exposed in the rotation
+ * property.
+ */
+#define DRM_ROTATE_0 BIT(0)
+#define DRM_ROTATE_90 BIT(1)
+#define DRM_ROTATE_180 BIT(2)
+#define DRM_ROTATE_270 BIT(3)
+#define DRM_ROTATE_MASK (DRM_ROTATE_0 | DRM_ROTATE_90 | \
+ DRM_ROTATE_180 | DRM_ROTATE_270)
+#define DRM_REFLECT_X BIT(4)
+#define DRM_REFLECT_Y BIT(5)
+#define DRM_REFLECT_MASK (DRM_REFLECT_X | DRM_REFLECT_Y)
+
+struct drm_property *drm_mode_create_rotation_property(struct drm_device *dev,
+ unsigned int supported_rotations);
+unsigned int drm_rotation_simplify(unsigned int rotation,
+ unsigned int supported_rotations);
+
+int drm_plane_create_zpos_property(struct drm_plane *plane,
+ unsigned int zpos,
+ unsigned int min, unsigned int max);
+int drm_plane_create_zpos_immutable_property(struct drm_plane *plane,
+ unsigned int zpos);
+int drm_atomic_normalize_zpos(struct drm_device *dev,
+ struct drm_atomic_state *state);
+#endif
diff --git a/include/drm/drm_bridge.h b/include/drm/drm_bridge.h
new file mode 100644
index 000000000000..530a1d6e8cde
--- /dev/null
+++ b/include/drm/drm_bridge.h
@@ -0,0 +1,218 @@
+/*
+ * Copyright (c) 2016 Intel Corporation
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that copyright
+ * notice and this permission notice appear in supporting documentation, and
+ * that the name of the copyright holders not be used in advertising or
+ * publicity pertaining to distribution of the software without specific,
+ * written prior permission. The copyright holders make no representations
+ * about the suitability of this software for any purpose. It is provided "as
+ * is" without express or implied warranty.
+ *
+ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+ * OF THIS SOFTWARE.
+ */
+
+#ifndef __DRM_BRIDGE_H__
+#define __DRM_BRIDGE_H__
+
+#include <linux/list.h>
+#include <linux/ctype.h>
+#include <drm/drm_mode_object.h>
+#include <drm/drm_modes.h>
+
+struct drm_bridge;
+
+/**
+ * struct drm_bridge_funcs - drm_bridge control functions
+ */
+struct drm_bridge_funcs {
+ /**
+ * @attach:
+ *
+ * This callback is invoked whenever our bridge is being attached to a
+ * &drm_encoder.
+ *
+ * The attach callback is optional.
+ *
+ * RETURNS:
+ *
+ * Zero on success, error code on failure.
+ */
+ int (*attach)(struct drm_bridge *bridge);
+
+ /**
+ * @detach:
+ *
+ * This callback is invoked whenever our bridge is being detached from a
+ * &drm_encoder.
+ *
+ * The detach callback is optional.
+ */
+ void (*detach)(struct drm_bridge *bridge);
+
+ /**
+ * @mode_fixup:
+ *
+ * This callback is used to validate and adjust a mode. The paramater
+ * mode is the display mode that should be fed to the next element in
+ * the display chain, either the final &drm_connector or the next
+ * &drm_bridge. The parameter adjusted_mode is the input mode the bridge
+ * requires. It can be modified by this callback and does not need to
+ * match mode.
+ *
+ * This is the only hook that allows a bridge to reject a modeset. If
+ * this function passes all other callbacks must succeed for this
+ * configuration.
+ *
+ * The mode_fixup callback is optional.
+ *
+ * NOTE:
+ *
+ * This function is called in the check phase of atomic modesets, which
+ * can be aborted for any reason (including on userspace's request to
+ * just check whether a configuration would be possible). Drivers MUST
+ * NOT touch any persistent state (hardware or software) or data
+ * structures except the passed in @state parameter.
+ *
+ * RETURNS:
+ *
+ * True if an acceptable configuration is possible, false if the modeset
+ * operation should be rejected.
+ */
+ bool (*mode_fixup)(struct drm_bridge *bridge,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode);
+ /**
+ * @disable:
+ *
+ * This callback should disable the bridge. It is called right before
+ * the preceding element in the display pipe is disabled. If the
+ * preceding element is a bridge this means it's called before that
+ * bridge's ->disable() function. If the preceding element is a
+ * &drm_encoder it's called right before the encoder's ->disable(),
+ * ->prepare() or ->dpms() hook from struct &drm_encoder_helper_funcs.
+ *
+ * The bridge can assume that the display pipe (i.e. clocks and timing
+ * signals) feeding it is still running when this callback is called.
+ *
+ * The disable callback is optional.
+ */
+ void (*disable)(struct drm_bridge *bridge);
+
+ /**
+ * @post_disable:
+ *
+ * This callback should disable the bridge. It is called right after
+ * the preceding element in the display pipe is disabled. If the
+ * preceding element is a bridge this means it's called after that
+ * bridge's ->post_disable() function. If the preceding element is a
+ * &drm_encoder it's called right after the encoder's ->disable(),
+ * ->prepare() or ->dpms() hook from struct &drm_encoder_helper_funcs.
+ *
+ * The bridge must assume that the display pipe (i.e. clocks and timing
+ * singals) feeding it is no longer running when this callback is
+ * called.
+ *
+ * The post_disable callback is optional.
+ */
+ void (*post_disable)(struct drm_bridge *bridge);
+
+ /**
+ * @mode_set:
+ *
+ * This callback should set the given mode on the bridge. It is called
+ * after the ->mode_set() callback for the preceding element in the
+ * display pipeline has been called already. The display pipe (i.e.
+ * clocks and timing signals) is off when this function is called.
+ */
+ void (*mode_set)(struct drm_bridge *bridge,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode);
+ /**
+ * @pre_enable:
+ *
+ * This callback should enable the bridge. It is called right before
+ * the preceding element in the display pipe is enabled. If the
+ * preceding element is a bridge this means it's called before that
+ * bridge's ->pre_enable() function. If the preceding element is a
+ * &drm_encoder it's called right before the encoder's ->enable(),
+ * ->commit() or ->dpms() hook from struct &drm_encoder_helper_funcs.
+ *
+ * The display pipe (i.e. clocks and timing signals) feeding this bridge
+ * will not yet be running when this callback is called. The bridge must
+ * not enable the display link feeding the next bridge in the chain (if
+ * there is one) when this callback is called.
+ *
+ * The pre_enable callback is optional.
+ */
+ void (*pre_enable)(struct drm_bridge *bridge);
+
+ /**
+ * @enable:
+ *
+ * This callback should enable the bridge. It is called right after
+ * the preceding element in the display pipe is enabled. If the
+ * preceding element is a bridge this means it's called after that
+ * bridge's ->enable() function. If the preceding element is a
+ * &drm_encoder it's called right after the encoder's ->enable(),
+ * ->commit() or ->dpms() hook from struct &drm_encoder_helper_funcs.
+ *
+ * The bridge can assume that the display pipe (i.e. clocks and timing
+ * signals) feeding it is running when this callback is called. This
+ * callback must enable the display link feeding the next bridge in the
+ * chain if there is one.
+ *
+ * The enable callback is optional.
+ */
+ void (*enable)(struct drm_bridge *bridge);
+};
+
+/**
+ * struct drm_bridge - central DRM bridge control structure
+ * @dev: DRM device this bridge belongs to
+ * @encoder: encoder to which this bridge is connected
+ * @next: the next bridge in the encoder chain
+ * @of_node: device node pointer to the bridge
+ * @list: to keep track of all added bridges
+ * @funcs: control functions
+ * @driver_private: pointer to the bridge driver's internal context
+ */
+struct drm_bridge {
+ struct drm_device *dev;
+ struct drm_encoder *encoder;
+ struct drm_bridge *next;
+#ifdef CONFIG_OF
+ struct device_node *of_node;
+#endif
+ struct list_head list;
+
+ const struct drm_bridge_funcs *funcs;
+ void *driver_private;
+};
+
+int drm_bridge_add(struct drm_bridge *bridge);
+void drm_bridge_remove(struct drm_bridge *bridge);
+struct drm_bridge *of_drm_find_bridge(struct device_node *np);
+int drm_bridge_attach(struct drm_device *dev, struct drm_bridge *bridge);
+void drm_bridge_detach(struct drm_bridge *bridge);
+
+bool drm_bridge_mode_fixup(struct drm_bridge *bridge,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode);
+void drm_bridge_disable(struct drm_bridge *bridge);
+void drm_bridge_post_disable(struct drm_bridge *bridge);
+void drm_bridge_mode_set(struct drm_bridge *bridge,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode);
+void drm_bridge_pre_enable(struct drm_bridge *bridge);
+void drm_bridge_enable(struct drm_bridge *bridge);
+
+#endif
diff --git a/include/drm/drm_color_mgmt.h b/include/drm/drm_color_mgmt.h
new file mode 100644
index 000000000000..c767238ac9d5
--- /dev/null
+++ b/include/drm/drm_color_mgmt.h
@@ -0,0 +1,61 @@
+/*
+ * Copyright (c) 2016 Intel Corporation
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that copyright
+ * notice and this permission notice appear in supporting documentation, and
+ * that the name of the copyright holders not be used in advertising or
+ * publicity pertaining to distribution of the software without specific,
+ * written prior permission. The copyright holders make no representations
+ * about the suitability of this software for any purpose. It is provided "as
+ * is" without express or implied warranty.
+ *
+ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+ * OF THIS SOFTWARE.
+ */
+
+#ifndef __DRM_COLOR_MGMT_H__
+#define __DRM_COLOR_MGMT_H__
+
+#include <linux/ctype.h>
+
+void drm_crtc_enable_color_mgmt(struct drm_crtc *crtc,
+ uint degamma_lut_size,
+ bool has_ctm,
+ uint gamma_lut_size);
+
+int drm_mode_crtc_set_gamma_size(struct drm_crtc *crtc,
+ int gamma_size);
+
+/**
+ * drm_color_lut_extract - clamp&round LUT entries
+ * @user_input: input value
+ * @bit_precision: number of bits the hw LUT supports
+ *
+ * Extract a degamma/gamma LUT value provided by user (in the form of
+ * &drm_color_lut entries) and round it to the precision supported by the
+ * hardware.
+ */
+static inline uint32_t drm_color_lut_extract(uint32_t user_input,
+ uint32_t bit_precision)
+{
+ uint32_t val = user_input;
+ uint32_t max = 0xffff >> (16 - bit_precision);
+
+ /* Round only if we're not using full precision. */
+ if (bit_precision < 16) {
+ val += 1UL << (16 - bit_precision - 1);
+ val >>= 16 - bit_precision;
+ }
+
+ return clamp_val(val, 0, max);
+}
+
+
+#endif
diff --git a/include/drm/drm_connector.h b/include/drm/drm_connector.h
new file mode 100644
index 000000000000..ac9d7d8e0e43
--- /dev/null
+++ b/include/drm/drm_connector.h
@@ -0,0 +1,778 @@
+/*
+ * Copyright (c) 2016 Intel Corporation
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that copyright
+ * notice and this permission notice appear in supporting documentation, and
+ * that the name of the copyright holders not be used in advertising or
+ * publicity pertaining to distribution of the software without specific,
+ * written prior permission. The copyright holders make no representations
+ * about the suitability of this software for any purpose. It is provided "as
+ * is" without express or implied warranty.
+ *
+ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+ * OF THIS SOFTWARE.
+ */
+
+#ifndef __DRM_CONNECTOR_H__
+#define __DRM_CONNECTOR_H__
+
+#include <linux/list.h>
+#include <linux/ctype.h>
+#include <drm/drm_mode_object.h>
+
+#include <uapi/drm/drm_mode.h>
+
+struct drm_device;
+
+struct drm_connector_helper_funcs;
+struct drm_device;
+struct drm_crtc;
+struct drm_encoder;
+struct drm_property;
+struct drm_property_blob;
+struct edid;
+
+enum drm_connector_force {
+ DRM_FORCE_UNSPECIFIED,
+ DRM_FORCE_OFF,
+ DRM_FORCE_ON, /* force on analog part normally */
+ DRM_FORCE_ON_DIGITAL, /* for DVI-I use digital connector */
+};
+
+/**
+ * enum drm_connector_status - status for a &drm_connector
+ *
+ * This enum is used to track the connector status. There are no separate
+ * #defines for the uapi!
+ */
+enum drm_connector_status {
+ /**
+ * @connector_status_connected: The connector is definitely connected to
+ * a sink device, and can be enabled.
+ */
+ connector_status_connected = 1,
+ /**
+ * @connector_status_disconnected: The connector isn't connected to a
+ * sink device which can be autodetect. For digital outputs like DP or
+ * HDMI (which can be realiable probed) this means there's really
+ * nothing there. It is driver-dependent whether a connector with this
+ * status can be lit up or not.
+ */
+ connector_status_disconnected = 2,
+ /**
+ * @connector_status_unknown: The connector's status could not be
+ * reliably detected. This happens when probing would either cause
+ * flicker (like load-detection when the connector is in use), or when a
+ * hardware resource isn't available (like when load-detection needs a
+ * free CRTC). It should be possible to light up the connector with one
+ * of the listed fallback modes. For default configuration userspace
+ * should only try to light up connectors with unknown status when
+ * there's not connector with @connector_status_connected.
+ */
+ connector_status_unknown = 3,
+};
+
+enum subpixel_order {
+ SubPixelUnknown = 0,
+ SubPixelHorizontalRGB,
+ SubPixelHorizontalBGR,
+ SubPixelVerticalRGB,
+ SubPixelVerticalBGR,
+ SubPixelNone,
+};
+
+/**
+ * struct drm_display_info - runtime data about the connected sink
+ *
+ * Describes a given display (e.g. CRT or flat panel) and its limitations. For
+ * fixed display sinks like built-in panels there's not much difference between
+ * this and struct &drm_connector. But for sinks with a real cable this
+ * structure is meant to describe all the things at the other end of the cable.
+ *
+ * For sinks which provide an EDID this can be filled out by calling
+ * drm_add_edid_modes().
+ */
+struct drm_display_info {
+ /**
+ * @name: Name of the display.
+ */
+ char name[DRM_DISPLAY_INFO_LEN];
+
+ /**
+ * @width_mm: Physical width in mm.
+ */
+ unsigned int width_mm;
+ /**
+ * @height_mm: Physical height in mm.
+ */
+ unsigned int height_mm;
+
+ /**
+ * @pixel_clock: Maximum pixel clock supported by the sink, in units of
+ * 100Hz. This mismatches the clok in &drm_display_mode (which is in
+ * kHZ), because that's what the EDID uses as base unit.
+ */
+ unsigned int pixel_clock;
+ /**
+ * @bpc: Maximum bits per color channel. Used by HDMI and DP outputs.
+ */
+ unsigned int bpc;
+
+ /**
+ * @subpixel_order: Subpixel order of LCD panels.
+ */
+ enum subpixel_order subpixel_order;
+
+#define DRM_COLOR_FORMAT_RGB444 (1<<0)
+#define DRM_COLOR_FORMAT_YCRCB444 (1<<1)
+#define DRM_COLOR_FORMAT_YCRCB422 (1<<2)
+
+ /**
+ * @color_formats: HDMI Color formats, selects between RGB and YCrCb
+ * modes. Used DRM_COLOR_FORMAT\_ defines, which are _not_ the same ones
+ * as used to describe the pixel format in framebuffers, and also don't
+ * match the formats in @bus_formats which are shared with v4l.
+ */
+ u32 color_formats;
+
+ /**
+ * @bus_formats: Pixel data format on the wire, somewhat redundant with
+ * @color_formats. Array of size @num_bus_formats encoded using
+ * MEDIA_BUS_FMT\_ defines shared with v4l and media drivers.
+ */
+ const u32 *bus_formats;
+ /**
+ * @num_bus_formats: Size of @bus_formats array.
+ */
+ unsigned int num_bus_formats;
+
+#define DRM_BUS_FLAG_DE_LOW (1<<0)
+#define DRM_BUS_FLAG_DE_HIGH (1<<1)
+/* drive data on pos. edge */
+#define DRM_BUS_FLAG_PIXDATA_POSEDGE (1<<2)
+/* drive data on neg. edge */
+#define DRM_BUS_FLAG_PIXDATA_NEGEDGE (1<<3)
+
+ /**
+ * @bus_flags: Additional information (like pixel signal polarity) for
+ * the pixel data on the bus, using DRM_BUS_FLAGS\_ defines.
+ */
+ u32 bus_flags;
+
+ /**
+ * @max_tmds_clock: Maximum TMDS clock rate supported by the
+ * sink in kHz. 0 means undefined.
+ */
+ int max_tmds_clock;
+
+ /**
+ * @dvi_dual: Dual-link DVI sink?
+ */
+ bool dvi_dual;
+
+ /**
+ * @edid_hdmi_dc_modes: Mask of supported hdmi deep color modes. Even
+ * more stuff redundant with @bus_formats.
+ */
+ u8 edid_hdmi_dc_modes;
+
+ /**
+ * @cea_rev: CEA revision of the HDMI sink.
+ */
+ u8 cea_rev;
+};
+
+int drm_display_info_set_bus_formats(struct drm_display_info *info,
+ const u32 *formats,
+ unsigned int num_formats);
+
+/**
+ * struct drm_connector_state - mutable connector state
+ * @connector: backpointer to the connector
+ * @best_encoder: can be used by helpers and drivers to select the encoder
+ * @state: backpointer to global drm_atomic_state
+ */
+struct drm_connector_state {
+ struct drm_connector *connector;
+
+ /**
+ * @crtc: CRTC to connect connector to, NULL if disabled.
+ *
+ * Do not change this directly, use drm_atomic_set_crtc_for_connector()
+ * instead.
+ */
+ struct drm_crtc *crtc;
+
+ struct drm_encoder *best_encoder;
+
+ struct drm_atomic_state *state;
+};
+
+/**
+ * struct drm_connector_funcs - control connectors on a given device
+ *
+ * Each CRTC may have one or more connectors attached to it. The functions
+ * below allow the core DRM code to control connectors, enumerate available modes,
+ * etc.
+ */
+struct drm_connector_funcs {
+ /**
+ * @dpms:
+ *
+ * Legacy entry point to set the per-connector DPMS state. Legacy DPMS
+ * is exposed as a standard property on the connector, but diverted to
+ * this callback in the drm core. Note that atomic drivers don't
+ * implement the 4 level DPMS support on the connector any more, but
+ * instead only have an on/off "ACTIVE" property on the CRTC object.
+ *
+ * Drivers implementing atomic modeset should use
+ * drm_atomic_helper_connector_dpms() to implement this hook.
+ *
+ * RETURNS:
+ *
+ * 0 on success or a negative error code on failure.
+ */
+ int (*dpms)(struct drm_connector *connector, int mode);
+
+ /**
+ * @reset:
+ *
+ * Reset connector hardware and software state to off. This function isn't
+ * called by the core directly, only through drm_mode_config_reset().
+ * It's not a helper hook only for historical reasons.
+ *
+ * Atomic drivers can use drm_atomic_helper_connector_reset() to reset
+ * atomic state using this hook.
+ */
+ void (*reset)(struct drm_connector *connector);
+
+ /**
+ * @detect:
+ *
+ * Check to see if anything is attached to the connector. The parameter
+ * force is set to false whilst polling, true when checking the
+ * connector due to a user request. force can be used by the driver to
+ * avoid expensive, destructive operations during automated probing.
+ *
+ * FIXME:
+ *
+ * Note that this hook is only called by the probe helper. It's not in
+ * the helper library vtable purely for historical reasons. The only DRM
+ * core entry point to probe connector state is @fill_modes.
+ *
+ * RETURNS:
+ *
+ * drm_connector_status indicating the connector's status.
+ */
+ enum drm_connector_status (*detect)(struct drm_connector *connector,
+ bool force);
+
+ /**
+ * @force:
+ *
+ * This function is called to update internal encoder state when the
+ * connector is forced to a certain state by userspace, either through
+ * the sysfs interfaces or on the kernel cmdline. In that case the
+ * @detect callback isn't called.
+ *
+ * FIXME:
+ *
+ * Note that this hook is only called by the probe helper. It's not in
+ * the helper library vtable purely for historical reasons. The only DRM
+ * core entry point to probe connector state is @fill_modes.
+ */
+ void (*force)(struct drm_connector *connector);
+
+ /**
+ * @fill_modes:
+ *
+ * Entry point for output detection and basic mode validation. The
+ * driver should reprobe the output if needed (e.g. when hotplug
+ * handling is unreliable), add all detected modes to connector->modes
+ * and filter out any the device can't support in any configuration. It
+ * also needs to filter out any modes wider or higher than the
+ * parameters max_width and max_height indicate.
+ *
+ * The drivers must also prune any modes no longer valid from
+ * connector->modes. Furthermore it must update connector->status and
+ * connector->edid. If no EDID has been received for this output
+ * connector->edid must be NULL.
+ *
+ * Drivers using the probe helpers should use
+ * drm_helper_probe_single_connector_modes() or
+ * drm_helper_probe_single_connector_modes_nomerge() to implement this
+ * function.
+ *
+ * RETURNS:
+ *
+ * The number of modes detected and filled into connector->modes.
+ */
+ int (*fill_modes)(struct drm_connector *connector, uint32_t max_width, uint32_t max_height);
+
+ /**
+ * @set_property:
+ *
+ * This is the legacy entry point to update a property attached to the
+ * connector.
+ *
+ * Drivers implementing atomic modeset should use
+ * drm_atomic_helper_connector_set_property() to implement this hook.
+ *
+ * This callback is optional if the driver does not support any legacy
+ * driver-private properties.
+ *
+ * RETURNS:
+ *
+ * 0 on success or a negative error code on failure.
+ */
+ int (*set_property)(struct drm_connector *connector, struct drm_property *property,
+ uint64_t val);
+
+ /**
+ * @late_register:
+ *
+ * This optional hook can be used to register additional userspace
+ * interfaces attached to the connector, light backlight control, i2c,
+ * DP aux or similar interfaces. It is called late in the driver load
+ * sequence from drm_connector_register() when registering all the
+ * core drm connector interfaces. Everything added from this callback
+ * should be unregistered in the early_unregister callback.
+ *
+ * Returns:
+ *
+ * 0 on success, or a negative error code on failure.
+ */
+ int (*late_register)(struct drm_connector *connector);
+
+ /**
+ * @early_unregister:
+ *
+ * This optional hook should be used to unregister the additional
+ * userspace interfaces attached to the connector from
+ * late_register(). It is called from drm_connector_unregister(),
+ * early in the driver unload sequence to disable userspace access
+ * before data structures are torndown.
+ */
+ void (*early_unregister)(struct drm_connector *connector);
+
+ /**
+ * @destroy:
+ *
+ * Clean up connector resources. This is called at driver unload time
+ * through drm_mode_config_cleanup(). It can also be called at runtime
+ * when a connector is being hot-unplugged for drivers that support
+ * connector hotplugging (e.g. DisplayPort MST).
+ */
+ void (*destroy)(struct drm_connector *connector);
+
+ /**
+ * @atomic_duplicate_state:
+ *
+ * Duplicate the current atomic state for this connector and return it.
+ * The core and helpers guarantee that any atomic state duplicated with
+ * this hook and still owned by the caller (i.e. not transferred to the
+ * driver by calling ->atomic_commit() from struct
+ * &drm_mode_config_funcs) will be cleaned up by calling the
+ * @atomic_destroy_state hook in this structure.
+ *
+ * Atomic drivers which don't subclass struct &drm_connector_state should use
+ * drm_atomic_helper_connector_duplicate_state(). Drivers that subclass the
+ * state structure to extend it with driver-private state should use
+ * __drm_atomic_helper_connector_duplicate_state() to make sure shared state is
+ * duplicated in a consistent fashion across drivers.
+ *
+ * It is an error to call this hook before connector->state has been
+ * initialized correctly.
+ *
+ * NOTE:
+ *
+ * If the duplicate state references refcounted resources this hook must
+ * acquire a reference for each of them. The driver must release these
+ * references again in @atomic_destroy_state.
+ *
+ * RETURNS:
+ *
+ * Duplicated atomic state or NULL when the allocation failed.
+ */
+ struct drm_connector_state *(*atomic_duplicate_state)(struct drm_connector *connector);
+
+ /**
+ * @atomic_destroy_state:
+ *
+ * Destroy a state duplicated with @atomic_duplicate_state and release
+ * or unreference all resources it references
+ */
+ void (*atomic_destroy_state)(struct drm_connector *connector,
+ struct drm_connector_state *state);
+
+ /**
+ * @atomic_set_property:
+ *
+ * Decode a driver-private property value and store the decoded value
+ * into the passed-in state structure. Since the atomic core decodes all
+ * standardized properties (even for extensions beyond the core set of
+ * properties which might not be implemented by all drivers) this
+ * requires drivers to subclass the state structure.
+ *
+ * Such driver-private properties should really only be implemented for
+ * truly hardware/vendor specific state. Instead it is preferred to
+ * standardize atomic extension and decode the properties used to expose
+ * such an extension in the core.
+ *
+ * Do not call this function directly, use
+ * drm_atomic_connector_set_property() instead.
+ *
+ * This callback is optional if the driver does not support any
+ * driver-private atomic properties.
+ *
+ * NOTE:
+ *
+ * This function is called in the state assembly phase of atomic
+ * modesets, which can be aborted for any reason (including on
+ * userspace's request to just check whether a configuration would be
+ * possible). Drivers MUST NOT touch any persistent state (hardware or
+ * software) or data structures except the passed in @state parameter.
+ *
+ * Also since userspace controls in which order properties are set this
+ * function must not do any input validation (since the state update is
+ * incomplete and hence likely inconsistent). Instead any such input
+ * validation must be done in the various atomic_check callbacks.
+ *
+ * RETURNS:
+ *
+ * 0 if the property has been found, -EINVAL if the property isn't
+ * implemented by the driver (which shouldn't ever happen, the core only
+ * asks for properties attached to this connector). No other validation
+ * is allowed by the driver. The core already checks that the property
+ * value is within the range (integer, valid enum value, ...) the driver
+ * set when registering the property.
+ */
+ int (*atomic_set_property)(struct drm_connector *connector,
+ struct drm_connector_state *state,
+ struct drm_property *property,
+ uint64_t val);
+
+ /**
+ * @atomic_get_property:
+ *
+ * Reads out the decoded driver-private property. This is used to
+ * implement the GETCONNECTOR IOCTL.
+ *
+ * Do not call this function directly, use
+ * drm_atomic_connector_get_property() instead.
+ *
+ * This callback is optional if the driver does not support any
+ * driver-private atomic properties.
+ *
+ * RETURNS:
+ *
+ * 0 on success, -EINVAL if the property isn't implemented by the
+ * driver (which shouldn't ever happen, the core only asks for
+ * properties attached to this connector).
+ */
+ int (*atomic_get_property)(struct drm_connector *connector,
+ const struct drm_connector_state *state,
+ struct drm_property *property,
+ uint64_t *val);
+};
+
+/* mode specified on the command line */
+struct drm_cmdline_mode {
+ bool specified;
+ bool refresh_specified;
+ bool bpp_specified;
+ int xres, yres;
+ int bpp;
+ int refresh;
+ bool rb;
+ bool interlace;
+ bool cvt;
+ bool margins;
+ enum drm_connector_force force;
+};
+
+/**
+ * struct drm_connector - central DRM connector control structure
+ * @dev: parent DRM device
+ * @kdev: kernel device for sysfs attributes
+ * @attr: sysfs attributes
+ * @head: list management
+ * @base: base KMS object
+ * @name: human readable name, can be overwritten by the driver
+ * @connector_type: one of the DRM_MODE_CONNECTOR_<foo> types from drm_mode.h
+ * @connector_type_id: index into connector type enum
+ * @interlace_allowed: can this connector handle interlaced modes?
+ * @doublescan_allowed: can this connector handle doublescan?
+ * @stereo_allowed: can this connector handle stereo modes?
+ * @registered: is this connector exposed (registered) with userspace?
+ * @modes: modes available on this connector (from fill_modes() + user)
+ * @status: one of the drm_connector_status enums (connected, not, or unknown)
+ * @probed_modes: list of modes derived directly from the display
+ * @funcs: connector control functions
+ * @edid_blob_ptr: DRM property containing EDID if present
+ * @properties: property tracking for this connector
+ * @dpms: current dpms state
+ * @helper_private: mid-layer private data
+ * @cmdline_mode: mode line parsed from the kernel cmdline for this connector
+ * @force: a DRM_FORCE_<foo> state for forced mode sets
+ * @override_edid: has the EDID been overwritten through debugfs for testing?
+ * @encoder_ids: valid encoders for this connector
+ * @encoder: encoder driving this connector, if any
+ * @eld: EDID-like data, if present
+ * @latency_present: AV delay info from ELD, if found
+ * @video_latency: video latency info from ELD, if found
+ * @audio_latency: audio latency info from ELD, if found
+ * @null_edid_counter: track sinks that give us all zeros for the EDID
+ * @bad_edid_counter: track sinks that give us an EDID with invalid checksum
+ * @edid_corrupt: indicates whether the last read EDID was corrupt
+ * @debugfs_entry: debugfs directory for this connector
+ * @state: current atomic state for this connector
+ * @has_tile: is this connector connected to a tiled monitor
+ * @tile_group: tile group for the connected monitor
+ * @tile_is_single_monitor: whether the tile is one monitor housing
+ * @num_h_tile: number of horizontal tiles in the tile group
+ * @num_v_tile: number of vertical tiles in the tile group
+ * @tile_h_loc: horizontal location of this tile
+ * @tile_v_loc: vertical location of this tile
+ * @tile_h_size: horizontal size of this tile.
+ * @tile_v_size: vertical size of this tile.
+ *
+ * Each connector may be connected to one or more CRTCs, or may be clonable by
+ * another connector if they can share a CRTC. Each connector also has a specific
+ * position in the broader display (referred to as a 'screen' though it could
+ * span multiple monitors).
+ */
+struct drm_connector {
+ struct drm_device *dev;
+ struct device *kdev;
+ struct device_attribute *attr;
+ struct list_head head;
+
+ struct drm_mode_object base;
+
+ char *name;
+
+ /**
+ * @index: Compacted connector index, which matches the position inside
+ * the mode_config.list for drivers not supporting hot-add/removing. Can
+ * be used as an array index. It is invariant over the lifetime of the
+ * connector.
+ */
+ unsigned index;
+
+ int connector_type;
+ int connector_type_id;
+ bool interlace_allowed;
+ bool doublescan_allowed;
+ bool stereo_allowed;
+ bool registered;
+ struct list_head modes; /* list of modes on this connector */
+
+ enum drm_connector_status status;
+
+ /* these are modes added by probing with DDC or the BIOS */
+ struct list_head probed_modes;
+
+ /**
+ * @display_info: Display information is filled from EDID information
+ * when a display is detected. For non hot-pluggable displays such as
+ * flat panels in embedded systems, the driver should initialize the
+ * display_info.width_mm and display_info.height_mm fields with the
+ * physical size of the display.
+ */
+ struct drm_display_info display_info;
+ const struct drm_connector_funcs *funcs;
+
+ struct drm_property_blob *edid_blob_ptr;
+ struct drm_object_properties properties;
+
+ /**
+ * @path_blob_ptr:
+ *
+ * DRM blob property data for the DP MST path property.
+ */
+ struct drm_property_blob *path_blob_ptr;
+
+ /**
+ * @tile_blob_ptr:
+ *
+ * DRM blob property data for the tile property (used mostly by DP MST).
+ * This is meant for screens which are driven through separate display
+ * pipelines represented by &drm_crtc, which might not be running with
+ * genlocked clocks. For tiled panels which are genlocked, like
+ * dual-link LVDS or dual-link DSI, the driver should try to not expose
+ * the tiling and virtualize both &drm_crtc and &drm_plane if needed.
+ */
+ struct drm_property_blob *tile_blob_ptr;
+
+/* should we poll this connector for connects and disconnects */
+/* hot plug detectable */
+#define DRM_CONNECTOR_POLL_HPD (1 << 0)
+/* poll for connections */
+#define DRM_CONNECTOR_POLL_CONNECT (1 << 1)
+/* can cleanly poll for disconnections without flickering the screen */
+/* DACs should rarely do this without a lot of testing */
+#define DRM_CONNECTOR_POLL_DISCONNECT (1 << 2)
+
+ /**
+ * @polled:
+ *
+ * Connector polling mode, a combination of
+ *
+ * DRM_CONNECTOR_POLL_HPD
+ * The connector generates hotplug events and doesn't need to be
+ * periodically polled. The CONNECT and DISCONNECT flags must not
+ * be set together with the HPD flag.
+ *
+ * DRM_CONNECTOR_POLL_CONNECT
+ * Periodically poll the connector for connection.
+ *
+ * DRM_CONNECTOR_POLL_DISCONNECT
+ * Periodically poll the connector for disconnection.
+ *
+ * Set to 0 for connectors that don't support connection status
+ * discovery.
+ */
+ uint8_t polled;
+
+ /* requested DPMS state */
+ int dpms;
+
+ const struct drm_connector_helper_funcs *helper_private;
+
+ /* forced on connector */
+ struct drm_cmdline_mode cmdline_mode;
+ enum drm_connector_force force;
+ bool override_edid;
+
+#define DRM_CONNECTOR_MAX_ENCODER 3
+ uint32_t encoder_ids[DRM_CONNECTOR_MAX_ENCODER];
+ struct drm_encoder *encoder; /* currently active encoder */
+
+#define MAX_ELD_BYTES 128
+ /* EDID bits */
+ uint8_t eld[MAX_ELD_BYTES];
+ bool latency_present[2];
+ int video_latency[2]; /* [0]: progressive, [1]: interlaced */
+ int audio_latency[2];
+ int null_edid_counter; /* needed to workaround some HW bugs where we get all 0s */
+ unsigned bad_edid_counter;
+
+ /* Flag for raw EDID header corruption - used in Displayport
+ * compliance testing - * Displayport Link CTS Core 1.2 rev1.1 4.2.2.6
+ */
+ bool edid_corrupt;
+
+ struct dentry *debugfs_entry;
+
+ struct drm_connector_state *state;
+
+ /* DisplayID bits */
+ bool has_tile;
+ struct drm_tile_group *tile_group;
+ bool tile_is_single_monitor;
+
+ uint8_t num_h_tile, num_v_tile;
+ uint8_t tile_h_loc, tile_v_loc;
+ uint16_t tile_h_size, tile_v_size;
+};
+
+#define obj_to_connector(x) container_of(x, struct drm_connector, base)
+
+int drm_connector_init(struct drm_device *dev,
+ struct drm_connector *connector,
+ const struct drm_connector_funcs *funcs,
+ int connector_type);
+int drm_connector_register(struct drm_connector *connector);
+void drm_connector_unregister(struct drm_connector *connector);
+int drm_mode_connector_attach_encoder(struct drm_connector *connector,
+ struct drm_encoder *encoder);
+
+void drm_connector_cleanup(struct drm_connector *connector);
+static inline unsigned drm_connector_index(struct drm_connector *connector)
+{
+ return connector->index;
+}
+
+/**
+ * drm_connector_lookup - lookup connector object
+ * @dev: DRM device
+ * @id: connector object id
+ *
+ * This function looks up the connector object specified by id
+ * add takes a reference to it.
+ */
+static inline struct drm_connector *drm_connector_lookup(struct drm_device *dev,
+ uint32_t id)
+{
+ struct drm_mode_object *mo;
+ mo = drm_mode_object_find(dev, id, DRM_MODE_OBJECT_CONNECTOR);
+ return mo ? obj_to_connector(mo) : NULL;
+}
+
+/**
+ * drm_connector_reference - incr the connector refcnt
+ * @connector: connector
+ *
+ * This function increments the connector's refcount.
+ */
+static inline void drm_connector_reference(struct drm_connector *connector)
+{
+ drm_mode_object_reference(&connector->base);
+}
+
+/**
+ * drm_connector_unreference - unref a connector
+ * @connector: connector to unref
+ *
+ * This function decrements the connector's refcount and frees it if it drops to zero.
+ */
+static inline void drm_connector_unreference(struct drm_connector *connector)
+{
+ drm_mode_object_unreference(&connector->base);
+}
+
+const char *drm_get_connector_status_name(enum drm_connector_status status);
+const char *drm_get_subpixel_order_name(enum subpixel_order order);
+const char *drm_get_dpms_name(int val);
+const char *drm_get_dvi_i_subconnector_name(int val);
+const char *drm_get_dvi_i_select_name(int val);
+const char *drm_get_tv_subconnector_name(int val);
+const char *drm_get_tv_select_name(int val);
+
+int drm_mode_create_dvi_i_properties(struct drm_device *dev);
+int drm_mode_create_tv_properties(struct drm_device *dev,
+ unsigned int num_modes,
+ const char * const modes[]);
+int drm_mode_create_scaling_mode_property(struct drm_device *dev);
+int drm_mode_create_aspect_ratio_property(struct drm_device *dev);
+int drm_mode_create_suggested_offset_properties(struct drm_device *dev);
+
+int drm_mode_connector_set_path_property(struct drm_connector *connector,
+ const char *path);
+int drm_mode_connector_set_tile_property(struct drm_connector *connector);
+int drm_mode_connector_update_edid_property(struct drm_connector *connector,
+ const struct edid *edid);
+
+/**
+ * drm_for_each_connector - iterate over all connectors
+ * @connector: the loop cursor
+ * @dev: the DRM device
+ *
+ * Iterate over all connectors of @dev.
+ */
+#define drm_for_each_connector(connector, dev) \
+ for (assert_drm_connector_list_read_locked(&(dev)->mode_config), \
+ connector = list_first_entry(&(dev)->mode_config.connector_list, \
+ struct drm_connector, head); \
+ &connector->head != (&(dev)->mode_config.connector_list); \
+ connector = list_next_entry(connector, head))
+
+#endif
diff --git a/include/drm/drm_core.h b/include/drm/drm_core.h
deleted file mode 100644
index 4e7523863a4b..000000000000
--- a/include/drm/drm_core.h
+++ /dev/null
@@ -1,34 +0,0 @@
-/*
- * Copyright 2004 Jon Smirl <jonsmirl@gmail.com>
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sub license,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * VIA, S3 GRAPHICS, AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-#define CORE_AUTHOR "Gareth Hughes, Leif Delgass, José Fonseca, Jon Smirl"
-
-#define CORE_NAME "drm"
-#define CORE_DESC "DRM shared core routines"
-#define CORE_DATE "20060810"
-
-#define DRM_IF_MAJOR 1
-#define DRM_IF_MINOR 4
-
-#define CORE_MAJOR 1
-#define CORE_MINOR 1
-#define CORE_PATCHLEVEL 0
diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h
index 44e070800b6d..0aa292526567 100644
--- a/include/drm/drm_crtc.h
+++ b/include/drm/drm_crtc.h
@@ -35,40 +35,27 @@
#include <uapi/drm/drm_mode.h>
#include <uapi/drm/drm_fourcc.h>
#include <drm/drm_modeset_lock.h>
+#include <drm/drm_rect.h>
+#include <drm/drm_mode_object.h>
+#include <drm/drm_framebuffer.h>
+#include <drm/drm_modes.h>
+#include <drm/drm_connector.h>
+#include <drm/drm_encoder.h>
+#include <drm/drm_property.h>
+#include <drm/drm_bridge.h>
+#include <drm/drm_edid.h>
+#include <drm/drm_plane.h>
+#include <drm/drm_blend.h>
+#include <drm/drm_color_mgmt.h>
struct drm_device;
struct drm_mode_set;
-struct drm_framebuffer;
-struct drm_object_properties;
struct drm_file;
struct drm_clip_rect;
struct device_node;
struct fence;
struct edid;
-struct drm_mode_object {
- uint32_t id;
- uint32_t type;
- struct drm_object_properties *properties;
- struct kref refcount;
- void (*free_cb)(struct kref *kref);
-};
-
-#define DRM_OBJECT_MAX_PROPERTY 24
-struct drm_object_properties {
- int count, atomic_count;
- /* NOTE: if we ever start dynamically destroying properties (ie.
- * not at drm_mode_config_cleanup() time), then we'd have to do
- * a better job of detaching property from mode objects to avoid
- * dangling property pointers:
- */
- struct drm_property *properties[DRM_OBJECT_MAX_PROPERTY];
- /* do not read/write values directly, but use drm_object_property_get_value()
- * and drm_object_property_set_value():
- */
- uint64_t values[DRM_OBJECT_MAX_PROPERTY];
-};
-
static inline int64_t U642I64(uint64_t val)
{
return (int64_t)*((int64_t *)&val);
@@ -78,84 +65,6 @@ static inline uint64_t I642U64(int64_t val)
return (uint64_t)*((uint64_t *)&val);
}
-/*
- * Rotation property bits. DRM_ROTATE_<degrees> rotates the image by the
- * specified amount in degrees in counter clockwise direction. DRM_REFLECT_X and
- * DRM_REFLECT_Y reflects the image along the specified axis prior to rotation
- */
-#define DRM_ROTATE_MASK 0x0f
-#define DRM_ROTATE_0 0
-#define DRM_ROTATE_90 1
-#define DRM_ROTATE_180 2
-#define DRM_ROTATE_270 3
-#define DRM_REFLECT_MASK (~DRM_ROTATE_MASK)
-#define DRM_REFLECT_X 4
-#define DRM_REFLECT_Y 5
-
-enum drm_connector_force {
- DRM_FORCE_UNSPECIFIED,
- DRM_FORCE_OFF,
- DRM_FORCE_ON, /* force on analog part normally */
- DRM_FORCE_ON_DIGITAL, /* for DVI-I use digital connector */
-};
-
-#include <drm/drm_modes.h>
-
-enum drm_connector_status {
- connector_status_connected = 1,
- connector_status_disconnected = 2,
- connector_status_unknown = 3,
-};
-
-enum subpixel_order {
- SubPixelUnknown = 0,
- SubPixelHorizontalRGB,
- SubPixelHorizontalBGR,
- SubPixelVerticalRGB,
- SubPixelVerticalBGR,
- SubPixelNone,
-};
-
-#define DRM_COLOR_FORMAT_RGB444 (1<<0)
-#define DRM_COLOR_FORMAT_YCRCB444 (1<<1)
-#define DRM_COLOR_FORMAT_YCRCB422 (1<<2)
-
-#define DRM_BUS_FLAG_DE_LOW (1<<0)
-#define DRM_BUS_FLAG_DE_HIGH (1<<1)
-/* drive data on pos. edge */
-#define DRM_BUS_FLAG_PIXDATA_POSEDGE (1<<2)
-/* drive data on neg. edge */
-#define DRM_BUS_FLAG_PIXDATA_NEGEDGE (1<<3)
-
-/*
- * Describes a given display (e.g. CRT or flat panel) and its limitations.
- */
-struct drm_display_info {
- char name[DRM_DISPLAY_INFO_LEN];
-
- /* Physical size */
- unsigned int width_mm;
- unsigned int height_mm;
-
- /* Clock limits FIXME: storage format */
- unsigned int min_vfreq, max_vfreq;
- unsigned int min_hfreq, max_hfreq;
- unsigned int pixel_clock;
- unsigned int bpc;
-
- enum subpixel_order subpixel_order;
- u32 color_formats;
-
- const u32 *bus_formats;
- unsigned int num_bus_formats;
- u32 bus_flags;
-
- /* Mask of supported hdmi deep color modes */
- u8 edid_hdmi_dc_modes;
-
- u8 cea_rev;
-};
-
/* data corresponds to displayid vend/prod/serial */
struct drm_tile_group {
struct kref refcount;
@@ -164,130 +73,7 @@ struct drm_tile_group {
u8 group_data[8];
};
-/**
- * struct drm_framebuffer_funcs - framebuffer hooks
- */
-struct drm_framebuffer_funcs {
- /**
- * @destroy:
- *
- * Clean up framebuffer resources, specifically also unreference the
- * backing storage. The core guarantees to call this function for every
- * framebuffer successfully created by ->fb_create() in
- * &drm_mode_config_funcs. Drivers must also call
- * drm_framebuffer_cleanup() to release DRM core resources for this
- * framebuffer.
- */
- void (*destroy)(struct drm_framebuffer *framebuffer);
-
- /**
- * @create_handle:
- *
- * Create a buffer handle in the driver-specific buffer manager (either
- * GEM or TTM) valid for the passed-in struct &drm_file. This is used by
- * the core to implement the GETFB IOCTL, which returns (for
- * sufficiently priviledged user) also a native buffer handle. This can
- * be used for seamless transitions between modesetting clients by
- * copying the current screen contents to a private buffer and blending
- * between that and the new contents.
- *
- * GEM based drivers should call drm_gem_handle_create() to create the
- * handle.
- *
- * RETURNS:
- *
- * 0 on success or a negative error code on failure.
- */
- int (*create_handle)(struct drm_framebuffer *fb,
- struct drm_file *file_priv,
- unsigned int *handle);
- /**
- * @dirty:
- *
- * Optional callback for the dirty fb IOCTL.
- *
- * Userspace can notify the driver via this callback that an area of the
- * framebuffer has changed and should be flushed to the display
- * hardware. This can also be used internally, e.g. by the fbdev
- * emulation, though that's not the case currently.
- *
- * See documentation in drm_mode.h for the struct drm_mode_fb_dirty_cmd
- * for more information as all the semantics and arguments have a one to
- * one mapping on this function.
- *
- * RETURNS:
- *
- * 0 on success or a negative error code on failure.
- */
- int (*dirty)(struct drm_framebuffer *framebuffer,
- struct drm_file *file_priv, unsigned flags,
- unsigned color, struct drm_clip_rect *clips,
- unsigned num_clips);
-};
-
-struct drm_framebuffer {
- struct drm_device *dev;
- /*
- * Note that the fb is refcounted for the benefit of driver internals,
- * for example some hw, disabling a CRTC/plane is asynchronous, and
- * scanout does not actually complete until the next vblank. So some
- * cleanup (like releasing the reference(s) on the backing GEM bo(s))
- * should be deferred. In cases like this, the driver would like to
- * hold a ref to the fb even though it has already been removed from
- * userspace perspective.
- * The refcount is stored inside the mode object.
- */
- /*
- * Place on the dev->mode_config.fb_list, access protected by
- * dev->mode_config.fb_lock.
- */
- struct list_head head;
- struct drm_mode_object base;
- const struct drm_framebuffer_funcs *funcs;
- unsigned int pitches[4];
- unsigned int offsets[4];
- uint64_t modifier[4];
- unsigned int width;
- unsigned int height;
- /* depth can be 15 or 16 */
- unsigned int depth;
- int bits_per_pixel;
- int flags;
- uint32_t pixel_format; /* fourcc format */
- int hot_x;
- int hot_y;
- struct list_head filp_head;
-};
-
-struct drm_property_blob {
- struct drm_mode_object base;
- struct drm_device *dev;
- struct list_head head_global;
- struct list_head head_file;
- size_t length;
- unsigned char data[];
-};
-
-struct drm_property_enum {
- uint64_t value;
- struct list_head head;
- char name[DRM_PROP_NAME_LEN];
-};
-
-struct drm_property {
- struct list_head head;
- struct drm_mode_object base;
- uint32_t flags;
- char name[DRM_PROP_NAME_LEN];
- uint32_t num_values;
- uint64_t *values;
- struct drm_device *dev;
-
- struct list_head enum_list;
-};
-
struct drm_crtc;
-struct drm_connector;
struct drm_encoder;
struct drm_pending_vblank_event;
struct drm_plane;
@@ -296,7 +82,6 @@ struct drm_atomic_state;
struct drm_crtc_helper_funcs;
struct drm_encoder_helper_funcs;
-struct drm_connector_helper_funcs;
struct drm_plane_helper_funcs;
/**
@@ -324,8 +109,6 @@ struct drm_plane_helper_funcs;
* @ctm: Transformation matrix
* @gamma_lut: Lookup table for converting pixel data after the
* conversion matrix
- * @event: optional pointer to a DRM event to signal upon completion of the
- * state update
* @state: backpointer to global drm_atomic_state
*
* Note that the distinction between @enable and @active is rather subtile:
@@ -374,6 +157,46 @@ struct drm_crtc_state {
struct drm_property_blob *ctm;
struct drm_property_blob *gamma_lut;
+ /**
+ * @event:
+ *
+ * Optional pointer to a DRM event to signal upon completion of the
+ * state update. The driver must send out the event when the atomic
+ * commit operation completes. There are two cases:
+ *
+ * - The event is for a CRTC which is being disabled through this
+ * atomic commit. In that case the event can be send out any time
+ * after the hardware has stopped scanning out the current
+ * framebuffers. It should contain the timestamp and counter for the
+ * last vblank before the display pipeline was shut off.
+ *
+ * - For a CRTC which is enabled at the end of the commit (even when it
+ * undergoes an full modeset) the vblank timestamp and counter must
+ * be for the vblank right before the first frame that scans out the
+ * new set of buffers. Again the event can only be sent out after the
+ * hardware has stopped scanning out the old buffers.
+ *
+ * - Events for disabled CRTCs are not allowed, and drivers can ignore
+ * that case.
+ *
+ * This can be handled by the drm_crtc_send_vblank_event() function,
+ * which the driver should call on the provided event upon completion of
+ * the atomic commit. Note that if the driver supports vblank signalling
+ * and timestamping the vblank counters and timestamps must agree with
+ * the ones returned from page flip events. With the current vblank
+ * helper infrastructure this can be achieved by holding a vblank
+ * reference while the page flip is pending, acquired through
+ * drm_crtc_vblank_get() and released with drm_crtc_vblank_put().
+ * Drivers are free to implement their own vblank counter and timestamp
+ * tracking though, e.g. if they have accurate timestamp registers in
+ * hardware.
+ *
+ * For hardware which supports some means to synchronize vblank
+ * interrupt delivery with committing display state there's also
+ * drm_crtc_arm_vblank_event(). See the documentation of that function
+ * for a detailed discussion of the constraints it needs to be used
+ * safely.
+ */
struct drm_pending_vblank_event *event;
struct drm_atomic_state *state;
@@ -545,16 +368,6 @@ struct drm_crtc_funcs {
* counter and timestamp tracking though, e.g. if they have accurate
* timestamp registers in hardware.
*
- * FIXME:
- *
- * Up to that point drivers need to manage events themselves and can use
- * even->base.list freely for that. Specifically they need to ensure
- * that they don't send out page flip (or vblank) events for which the
- * corresponding drm file has been closed already. The drm core
- * unfortunately does not (yet) take care of that. Therefore drivers
- * currently must clean up and release pending events in their
- * ->preclose driver function.
- *
* This callback is optional.
*
* NOTE:
@@ -581,6 +394,24 @@ struct drm_crtc_funcs {
uint32_t flags);
/**
+ * @page_flip_target:
+ *
+ * Same as @page_flip but with an additional parameter specifying the
+ * absolute target vertical blank period (as reported by
+ * drm_crtc_vblank_count()) when the flip should take effect.
+ *
+ * Note that the core code calls drm_crtc_vblank_get before this entry
+ * point, and will call drm_crtc_vblank_put if this entry point returns
+ * any non-0 error code. It's the driver's responsibility to call
+ * drm_crtc_vblank_put after this entry point returns 0, typically when
+ * the flip completes.
+ */
+ int (*page_flip_target)(struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ struct drm_pending_vblank_event *event,
+ uint32_t flags, uint32_t target);
+
+ /**
* @set_property:
*
* This is the legacy entry point to update a property attached to the
@@ -852,1201 +683,6 @@ struct drm_crtc {
};
/**
- * struct drm_connector_state - mutable connector state
- * @connector: backpointer to the connector
- * @crtc: CRTC to connect connector to, NULL if disabled
- * @best_encoder: can be used by helpers and drivers to select the encoder
- * @state: backpointer to global drm_atomic_state
- */
-struct drm_connector_state {
- struct drm_connector *connector;
-
- struct drm_crtc *crtc; /* do not write directly, use drm_atomic_set_crtc_for_connector() */
-
- struct drm_encoder *best_encoder;
-
- struct drm_atomic_state *state;
-};
-
-/**
- * struct drm_connector_funcs - control connectors on a given device
- *
- * Each CRTC may have one or more connectors attached to it. The functions
- * below allow the core DRM code to control connectors, enumerate available modes,
- * etc.
- */
-struct drm_connector_funcs {
- /**
- * @dpms:
- *
- * Legacy entry point to set the per-connector DPMS state. Legacy DPMS
- * is exposed as a standard property on the connector, but diverted to
- * this callback in the drm core. Note that atomic drivers don't
- * implement the 4 level DPMS support on the connector any more, but
- * instead only have an on/off "ACTIVE" property on the CRTC object.
- *
- * Drivers implementing atomic modeset should use
- * drm_atomic_helper_connector_dpms() to implement this hook.
- *
- * RETURNS:
- *
- * 0 on success or a negative error code on failure.
- */
- int (*dpms)(struct drm_connector *connector, int mode);
-
- /**
- * @reset:
- *
- * Reset connector hardware and software state to off. This function isn't
- * called by the core directly, only through drm_mode_config_reset().
- * It's not a helper hook only for historical reasons.
- *
- * Atomic drivers can use drm_atomic_helper_connector_reset() to reset
- * atomic state using this hook.
- */
- void (*reset)(struct drm_connector *connector);
-
- /**
- * @detect:
- *
- * Check to see if anything is attached to the connector. The parameter
- * force is set to false whilst polling, true when checking the
- * connector due to a user request. force can be used by the driver to
- * avoid expensive, destructive operations during automated probing.
- *
- * FIXME:
- *
- * Note that this hook is only called by the probe helper. It's not in
- * the helper library vtable purely for historical reasons. The only DRM
- * core entry point to probe connector state is @fill_modes.
- *
- * RETURNS:
- *
- * drm_connector_status indicating the connector's status.
- */
- enum drm_connector_status (*detect)(struct drm_connector *connector,
- bool force);
-
- /**
- * @force:
- *
- * This function is called to update internal encoder state when the
- * connector is forced to a certain state by userspace, either through
- * the sysfs interfaces or on the kernel cmdline. In that case the
- * @detect callback isn't called.
- *
- * FIXME:
- *
- * Note that this hook is only called by the probe helper. It's not in
- * the helper library vtable purely for historical reasons. The only DRM
- * core entry point to probe connector state is @fill_modes.
- */
- void (*force)(struct drm_connector *connector);
-
- /**
- * @fill_modes:
- *
- * Entry point for output detection and basic mode validation. The
- * driver should reprobe the output if needed (e.g. when hotplug
- * handling is unreliable), add all detected modes to connector->modes
- * and filter out any the device can't support in any configuration. It
- * also needs to filter out any modes wider or higher than the
- * parameters max_width and max_height indicate.
- *
- * The drivers must also prune any modes no longer valid from
- * connector->modes. Furthermore it must update connector->status and
- * connector->edid. If no EDID has been received for this output
- * connector->edid must be NULL.
- *
- * Drivers using the probe helpers should use
- * drm_helper_probe_single_connector_modes() or
- * drm_helper_probe_single_connector_modes_nomerge() to implement this
- * function.
- *
- * RETURNS:
- *
- * The number of modes detected and filled into connector->modes.
- */
- int (*fill_modes)(struct drm_connector *connector, uint32_t max_width, uint32_t max_height);
-
- /**
- * @set_property:
- *
- * This is the legacy entry point to update a property attached to the
- * connector.
- *
- * Drivers implementing atomic modeset should use
- * drm_atomic_helper_connector_set_property() to implement this hook.
- *
- * This callback is optional if the driver does not support any legacy
- * driver-private properties.
- *
- * RETURNS:
- *
- * 0 on success or a negative error code on failure.
- */
- int (*set_property)(struct drm_connector *connector, struct drm_property *property,
- uint64_t val);
-
- /**
- * @late_register:
- *
- * This optional hook can be used to register additional userspace
- * interfaces attached to the connector, light backlight control, i2c,
- * DP aux or similar interfaces. It is called late in the driver load
- * sequence from drm_connector_register() when registering all the
- * core drm connector interfaces. Everything added from this callback
- * should be unregistered in the early_unregister callback.
- *
- * Returns:
- *
- * 0 on success, or a negative error code on failure.
- */
- int (*late_register)(struct drm_connector *connector);
-
- /**
- * @early_unregister:
- *
- * This optional hook should be used to unregister the additional
- * userspace interfaces attached to the connector from
- * late_unregister(). It is called from drm_connector_unregister(),
- * early in the driver unload sequence to disable userspace access
- * before data structures are torndown.
- */
- void (*early_unregister)(struct drm_connector *connector);
-
- /**
- * @destroy:
- *
- * Clean up connector resources. This is called at driver unload time
- * through drm_mode_config_cleanup(). It can also be called at runtime
- * when a connector is being hot-unplugged for drivers that support
- * connector hotplugging (e.g. DisplayPort MST).
- */
- void (*destroy)(struct drm_connector *connector);
-
- /**
- * @atomic_duplicate_state:
- *
- * Duplicate the current atomic state for this connector and return it.
- * The core and helpers gurantee that any atomic state duplicated with
- * this hook and still owned by the caller (i.e. not transferred to the
- * driver by calling ->atomic_commit() from struct
- * &drm_mode_config_funcs) will be cleaned up by calling the
- * @atomic_destroy_state hook in this structure.
- *
- * Atomic drivers which don't subclass struct &drm_connector_state should use
- * drm_atomic_helper_connector_duplicate_state(). Drivers that subclass the
- * state structure to extend it with driver-private state should use
- * __drm_atomic_helper_connector_duplicate_state() to make sure shared state is
- * duplicated in a consistent fashion across drivers.
- *
- * It is an error to call this hook before connector->state has been
- * initialized correctly.
- *
- * NOTE:
- *
- * If the duplicate state references refcounted resources this hook must
- * acquire a reference for each of them. The driver must release these
- * references again in @atomic_destroy_state.
- *
- * RETURNS:
- *
- * Duplicated atomic state or NULL when the allocation failed.
- */
- struct drm_connector_state *(*atomic_duplicate_state)(struct drm_connector *connector);
-
- /**
- * @atomic_destroy_state:
- *
- * Destroy a state duplicated with @atomic_duplicate_state and release
- * or unreference all resources it references
- */
- void (*atomic_destroy_state)(struct drm_connector *connector,
- struct drm_connector_state *state);
-
- /**
- * @atomic_set_property:
- *
- * Decode a driver-private property value and store the decoded value
- * into the passed-in state structure. Since the atomic core decodes all
- * standardized properties (even for extensions beyond the core set of
- * properties which might not be implemented by all drivers) this
- * requires drivers to subclass the state structure.
- *
- * Such driver-private properties should really only be implemented for
- * truly hardware/vendor specific state. Instead it is preferred to
- * standardize atomic extension and decode the properties used to expose
- * such an extension in the core.
- *
- * Do not call this function directly, use
- * drm_atomic_connector_set_property() instead.
- *
- * This callback is optional if the driver does not support any
- * driver-private atomic properties.
- *
- * NOTE:
- *
- * This function is called in the state assembly phase of atomic
- * modesets, which can be aborted for any reason (including on
- * userspace's request to just check whether a configuration would be
- * possible). Drivers MUST NOT touch any persistent state (hardware or
- * software) or data structures except the passed in @state parameter.
- *
- * Also since userspace controls in which order properties are set this
- * function must not do any input validation (since the state update is
- * incomplete and hence likely inconsistent). Instead any such input
- * validation must be done in the various atomic_check callbacks.
- *
- * RETURNS:
- *
- * 0 if the property has been found, -EINVAL if the property isn't
- * implemented by the driver (which shouldn't ever happen, the core only
- * asks for properties attached to this connector). No other validation
- * is allowed by the driver. The core already checks that the property
- * value is within the range (integer, valid enum value, ...) the driver
- * set when registering the property.
- */
- int (*atomic_set_property)(struct drm_connector *connector,
- struct drm_connector_state *state,
- struct drm_property *property,
- uint64_t val);
-
- /**
- * @atomic_get_property:
- *
- * Reads out the decoded driver-private property. This is used to
- * implement the GETCONNECTOR IOCTL.
- *
- * Do not call this function directly, use
- * drm_atomic_connector_get_property() instead.
- *
- * This callback is optional if the driver does not support any
- * driver-private atomic properties.
- *
- * RETURNS:
- *
- * 0 on success, -EINVAL if the property isn't implemented by the
- * driver (which shouldn't ever happen, the core only asks for
- * properties attached to this connector).
- */
- int (*atomic_get_property)(struct drm_connector *connector,
- const struct drm_connector_state *state,
- struct drm_property *property,
- uint64_t *val);
-};
-
-/**
- * struct drm_encoder_funcs - encoder controls
- *
- * Encoders sit between CRTCs and connectors.
- */
-struct drm_encoder_funcs {
- /**
- * @reset:
- *
- * Reset encoder hardware and software state to off. This function isn't
- * called by the core directly, only through drm_mode_config_reset().
- * It's not a helper hook only for historical reasons.
- */
- void (*reset)(struct drm_encoder *encoder);
-
- /**
- * @destroy:
- *
- * Clean up encoder resources. This is only called at driver unload time
- * through drm_mode_config_cleanup() since an encoder cannot be
- * hotplugged in DRM.
- */
- void (*destroy)(struct drm_encoder *encoder);
-
- /**
- * @late_register:
- *
- * This optional hook can be used to register additional userspace
- * interfaces attached to the encoder like debugfs interfaces.
- * It is called late in the driver load sequence from drm_dev_register().
- * Everything added from this callback should be unregistered in
- * the early_unregister callback.
- *
- * Returns:
- *
- * 0 on success, or a negative error code on failure.
- */
- int (*late_register)(struct drm_encoder *encoder);
-
- /**
- * @early_unregister:
- *
- * This optional hook should be used to unregister the additional
- * userspace interfaces attached to the encoder from
- * late_unregister(). It is called from drm_dev_unregister(),
- * early in the driver unload sequence to disable userspace access
- * before data structures are torndown.
- */
- void (*early_unregister)(struct drm_encoder *encoder);
-};
-
-#define DRM_CONNECTOR_MAX_ENCODER 3
-
-/**
- * struct drm_encoder - central DRM encoder structure
- * @dev: parent DRM device
- * @head: list management
- * @base: base KMS object
- * @name: human readable name, can be overwritten by the driver
- * @encoder_type: one of the %DRM_MODE_ENCODER_<foo> types in drm_mode.h
- * @possible_crtcs: bitmask of potential CRTC bindings
- * @possible_clones: bitmask of potential sibling encoders for cloning
- * @crtc: currently bound CRTC
- * @bridge: bridge associated to the encoder
- * @funcs: control functions
- * @helper_private: mid-layer private data
- *
- * CRTCs drive pixels to encoders, which convert them into signals
- * appropriate for a given connector or set of connectors.
- */
-struct drm_encoder {
- struct drm_device *dev;
- struct list_head head;
-
- struct drm_mode_object base;
- char *name;
- int encoder_type;
-
- /**
- * @index: Position inside the mode_config.list, can be used as an array
- * index. It is invariant over the lifetime of the encoder.
- */
- unsigned index;
-
- uint32_t possible_crtcs;
- uint32_t possible_clones;
-
- struct drm_crtc *crtc;
- struct drm_bridge *bridge;
- const struct drm_encoder_funcs *funcs;
- const struct drm_encoder_helper_funcs *helper_private;
-};
-
-/* should we poll this connector for connects and disconnects */
-/* hot plug detectable */
-#define DRM_CONNECTOR_POLL_HPD (1 << 0)
-/* poll for connections */
-#define DRM_CONNECTOR_POLL_CONNECT (1 << 1)
-/* can cleanly poll for disconnections without flickering the screen */
-/* DACs should rarely do this without a lot of testing */
-#define DRM_CONNECTOR_POLL_DISCONNECT (1 << 2)
-
-#define MAX_ELD_BYTES 128
-
-/**
- * struct drm_connector - central DRM connector control structure
- * @dev: parent DRM device
- * @kdev: kernel device for sysfs attributes
- * @attr: sysfs attributes
- * @head: list management
- * @base: base KMS object
- * @name: human readable name, can be overwritten by the driver
- * @connector_type: one of the %DRM_MODE_CONNECTOR_<foo> types from drm_mode.h
- * @connector_type_id: index into connector type enum
- * @interlace_allowed: can this connector handle interlaced modes?
- * @doublescan_allowed: can this connector handle doublescan?
- * @stereo_allowed: can this connector handle stereo modes?
- * @registered: is this connector exposed (registered) with userspace?
- * @modes: modes available on this connector (from fill_modes() + user)
- * @status: one of the drm_connector_status enums (connected, not, or unknown)
- * @probed_modes: list of modes derived directly from the display
- * @display_info: information about attached display (e.g. from EDID)
- * @funcs: connector control functions
- * @edid_blob_ptr: DRM property containing EDID if present
- * @properties: property tracking for this connector
- * @polled: a %DRM_CONNECTOR_POLL_<foo> value for core driven polling
- * @dpms: current dpms state
- * @helper_private: mid-layer private data
- * @cmdline_mode: mode line parsed from the kernel cmdline for this connector
- * @force: a %DRM_FORCE_<foo> state for forced mode sets
- * @override_edid: has the EDID been overwritten through debugfs for testing?
- * @encoder_ids: valid encoders for this connector
- * @encoder: encoder driving this connector, if any
- * @eld: EDID-like data, if present
- * @dvi_dual: dual link DVI, if found
- * @max_tmds_clock: max clock rate, if found
- * @latency_present: AV delay info from ELD, if found
- * @video_latency: video latency info from ELD, if found
- * @audio_latency: audio latency info from ELD, if found
- * @null_edid_counter: track sinks that give us all zeros for the EDID
- * @bad_edid_counter: track sinks that give us an EDID with invalid checksum
- * @edid_corrupt: indicates whether the last read EDID was corrupt
- * @debugfs_entry: debugfs directory for this connector
- * @state: current atomic state for this connector
- * @has_tile: is this connector connected to a tiled monitor
- * @tile_group: tile group for the connected monitor
- * @tile_is_single_monitor: whether the tile is one monitor housing
- * @num_h_tile: number of horizontal tiles in the tile group
- * @num_v_tile: number of vertical tiles in the tile group
- * @tile_h_loc: horizontal location of this tile
- * @tile_v_loc: vertical location of this tile
- * @tile_h_size: horizontal size of this tile.
- * @tile_v_size: vertical size of this tile.
- *
- * Each connector may be connected to one or more CRTCs, or may be clonable by
- * another connector if they can share a CRTC. Each connector also has a specific
- * position in the broader display (referred to as a 'screen' though it could
- * span multiple monitors).
- */
-struct drm_connector {
- struct drm_device *dev;
- struct device *kdev;
- struct device_attribute *attr;
- struct list_head head;
-
- struct drm_mode_object base;
-
- char *name;
-
- /**
- * @index: Compacted connector index, which matches the position inside
- * the mode_config.list for drivers not supporting hot-add/removing. Can
- * be used as an array index. It is invariant over the lifetime of the
- * connector.
- */
- unsigned index;
-
- int connector_type;
- int connector_type_id;
- bool interlace_allowed;
- bool doublescan_allowed;
- bool stereo_allowed;
- bool registered;
- struct list_head modes; /* list of modes on this connector */
-
- enum drm_connector_status status;
-
- /* these are modes added by probing with DDC or the BIOS */
- struct list_head probed_modes;
-
- struct drm_display_info display_info;
- const struct drm_connector_funcs *funcs;
-
- struct drm_property_blob *edid_blob_ptr;
- struct drm_object_properties properties;
-
- /**
- * @path_blob_ptr:
- *
- * DRM blob property data for the DP MST path property.
- */
- struct drm_property_blob *path_blob_ptr;
-
- /**
- * @tile_blob_ptr:
- *
- * DRM blob property data for the tile property (used mostly by DP MST).
- * This is meant for screens which are driven through separate display
- * pipelines represented by &drm_crtc, which might not be running with
- * genlocked clocks. For tiled panels which are genlocked, like
- * dual-link LVDS or dual-link DSI, the driver should try to not expose
- * the tiling and virtualize both &drm_crtc and &drm_plane if needed.
- */
- struct drm_property_blob *tile_blob_ptr;
-
- uint8_t polled; /* DRM_CONNECTOR_POLL_* */
-
- /* requested DPMS state */
- int dpms;
-
- const struct drm_connector_helper_funcs *helper_private;
-
- /* forced on connector */
- struct drm_cmdline_mode cmdline_mode;
- enum drm_connector_force force;
- bool override_edid;
- uint32_t encoder_ids[DRM_CONNECTOR_MAX_ENCODER];
- struct drm_encoder *encoder; /* currently active encoder */
-
- /* EDID bits */
- uint8_t eld[MAX_ELD_BYTES];
- bool dvi_dual;
- int max_tmds_clock; /* in MHz */
- bool latency_present[2];
- int video_latency[2]; /* [0]: progressive, [1]: interlaced */
- int audio_latency[2];
- int null_edid_counter; /* needed to workaround some HW bugs where we get all 0s */
- unsigned bad_edid_counter;
-
- /* Flag for raw EDID header corruption - used in Displayport
- * compliance testing - * Displayport Link CTS Core 1.2 rev1.1 4.2.2.6
- */
- bool edid_corrupt;
-
- struct dentry *debugfs_entry;
-
- struct drm_connector_state *state;
-
- /* DisplayID bits */
- bool has_tile;
- struct drm_tile_group *tile_group;
- bool tile_is_single_monitor;
-
- uint8_t num_h_tile, num_v_tile;
- uint8_t tile_h_loc, tile_v_loc;
- uint16_t tile_h_size, tile_v_size;
-};
-
-/**
- * struct drm_plane_state - mutable plane state
- * @plane: backpointer to the plane
- * @crtc: currently bound CRTC, NULL if disabled
- * @fb: currently bound framebuffer
- * @fence: optional fence to wait for before scanning out @fb
- * @crtc_x: left position of visible portion of plane on crtc
- * @crtc_y: upper position of visible portion of plane on crtc
- * @crtc_w: width of visible portion of plane on crtc
- * @crtc_h: height of visible portion of plane on crtc
- * @src_x: left position of visible portion of plane within
- * plane (in 16.16)
- * @src_y: upper position of visible portion of plane within
- * plane (in 16.16)
- * @src_w: width of visible portion of plane (in 16.16)
- * @src_h: height of visible portion of plane (in 16.16)
- * @rotation: rotation of the plane
- * @zpos: priority of the given plane on crtc (optional)
- * @normalized_zpos: normalized value of zpos: unique, range from 0 to N-1
- * where N is the number of active planes for given crtc
- * @state: backpointer to global drm_atomic_state
- */
-struct drm_plane_state {
- struct drm_plane *plane;
-
- struct drm_crtc *crtc; /* do not write directly, use drm_atomic_set_crtc_for_plane() */
- struct drm_framebuffer *fb; /* do not write directly, use drm_atomic_set_fb_for_plane() */
- struct fence *fence;
-
- /* Signed dest location allows it to be partially off screen */
- int32_t crtc_x, crtc_y;
- uint32_t crtc_w, crtc_h;
-
- /* Source values are 16.16 fixed point */
- uint32_t src_x, src_y;
- uint32_t src_h, src_w;
-
- /* Plane rotation */
- unsigned int rotation;
-
- /* Plane zpos */
- unsigned int zpos;
- unsigned int normalized_zpos;
-
- struct drm_atomic_state *state;
-};
-
-
-/**
- * struct drm_plane_funcs - driver plane control functions
- */
-struct drm_plane_funcs {
- /**
- * @update_plane:
- *
- * This is the legacy entry point to enable and configure the plane for
- * the given CRTC and framebuffer. It is never called to disable the
- * plane, i.e. the passed-in crtc and fb paramters are never NULL.
- *
- * The source rectangle in frame buffer memory coordinates is given by
- * the src_x, src_y, src_w and src_h parameters (as 16.16 fixed point
- * values). Devices that don't support subpixel plane coordinates can
- * ignore the fractional part.
- *
- * The destination rectangle in CRTC coordinates is given by the
- * crtc_x, crtc_y, crtc_w and crtc_h parameters (as integer values).
- * Devices scale the source rectangle to the destination rectangle. If
- * scaling is not supported, and the source rectangle size doesn't match
- * the destination rectangle size, the driver must return a
- * -<errorname>EINVAL</errorname> error.
- *
- * Drivers implementing atomic modeset should use
- * drm_atomic_helper_update_plane() to implement this hook.
- *
- * RETURNS:
- *
- * 0 on success or a negative error code on failure.
- */
- int (*update_plane)(struct drm_plane *plane,
- struct drm_crtc *crtc, struct drm_framebuffer *fb,
- int crtc_x, int crtc_y,
- unsigned int crtc_w, unsigned int crtc_h,
- uint32_t src_x, uint32_t src_y,
- uint32_t src_w, uint32_t src_h);
-
- /**
- * @disable_plane:
- *
- * This is the legacy entry point to disable the plane. The DRM core
- * calls this method in response to a DRM_IOCTL_MODE_SETPLANE IOCTL call
- * with the frame buffer ID set to 0. Disabled planes must not be
- * processed by the CRTC.
- *
- * Drivers implementing atomic modeset should use
- * drm_atomic_helper_disable_plane() to implement this hook.
- *
- * RETURNS:
- *
- * 0 on success or a negative error code on failure.
- */
- int (*disable_plane)(struct drm_plane *plane);
-
- /**
- * @destroy:
- *
- * Clean up plane resources. This is only called at driver unload time
- * through drm_mode_config_cleanup() since a plane cannot be hotplugged
- * in DRM.
- */
- void (*destroy)(struct drm_plane *plane);
-
- /**
- * @reset:
- *
- * Reset plane hardware and software state to off. This function isn't
- * called by the core directly, only through drm_mode_config_reset().
- * It's not a helper hook only for historical reasons.
- *
- * Atomic drivers can use drm_atomic_helper_plane_reset() to reset
- * atomic state using this hook.
- */
- void (*reset)(struct drm_plane *plane);
-
- /**
- * @set_property:
- *
- * This is the legacy entry point to update a property attached to the
- * plane.
- *
- * Drivers implementing atomic modeset should use
- * drm_atomic_helper_plane_set_property() to implement this hook.
- *
- * This callback is optional if the driver does not support any legacy
- * driver-private properties.
- *
- * RETURNS:
- *
- * 0 on success or a negative error code on failure.
- */
- int (*set_property)(struct drm_plane *plane,
- struct drm_property *property, uint64_t val);
-
- /**
- * @atomic_duplicate_state:
- *
- * Duplicate the current atomic state for this plane and return it.
- * The core and helpers gurantee that any atomic state duplicated with
- * this hook and still owned by the caller (i.e. not transferred to the
- * driver by calling ->atomic_commit() from struct
- * &drm_mode_config_funcs) will be cleaned up by calling the
- * @atomic_destroy_state hook in this structure.
- *
- * Atomic drivers which don't subclass struct &drm_plane_state should use
- * drm_atomic_helper_plane_duplicate_state(). Drivers that subclass the
- * state structure to extend it with driver-private state should use
- * __drm_atomic_helper_plane_duplicate_state() to make sure shared state is
- * duplicated in a consistent fashion across drivers.
- *
- * It is an error to call this hook before plane->state has been
- * initialized correctly.
- *
- * NOTE:
- *
- * If the duplicate state references refcounted resources this hook must
- * acquire a reference for each of them. The driver must release these
- * references again in @atomic_destroy_state.
- *
- * RETURNS:
- *
- * Duplicated atomic state or NULL when the allocation failed.
- */
- struct drm_plane_state *(*atomic_duplicate_state)(struct drm_plane *plane);
-
- /**
- * @atomic_destroy_state:
- *
- * Destroy a state duplicated with @atomic_duplicate_state and release
- * or unreference all resources it references
- */
- void (*atomic_destroy_state)(struct drm_plane *plane,
- struct drm_plane_state *state);
-
- /**
- * @atomic_set_property:
- *
- * Decode a driver-private property value and store the decoded value
- * into the passed-in state structure. Since the atomic core decodes all
- * standardized properties (even for extensions beyond the core set of
- * properties which might not be implemented by all drivers) this
- * requires drivers to subclass the state structure.
- *
- * Such driver-private properties should really only be implemented for
- * truly hardware/vendor specific state. Instead it is preferred to
- * standardize atomic extension and decode the properties used to expose
- * such an extension in the core.
- *
- * Do not call this function directly, use
- * drm_atomic_plane_set_property() instead.
- *
- * This callback is optional if the driver does not support any
- * driver-private atomic properties.
- *
- * NOTE:
- *
- * This function is called in the state assembly phase of atomic
- * modesets, which can be aborted for any reason (including on
- * userspace's request to just check whether a configuration would be
- * possible). Drivers MUST NOT touch any persistent state (hardware or
- * software) or data structures except the passed in @state parameter.
- *
- * Also since userspace controls in which order properties are set this
- * function must not do any input validation (since the state update is
- * incomplete and hence likely inconsistent). Instead any such input
- * validation must be done in the various atomic_check callbacks.
- *
- * RETURNS:
- *
- * 0 if the property has been found, -EINVAL if the property isn't
- * implemented by the driver (which shouldn't ever happen, the core only
- * asks for properties attached to this plane). No other validation is
- * allowed by the driver. The core already checks that the property
- * value is within the range (integer, valid enum value, ...) the driver
- * set when registering the property.
- */
- int (*atomic_set_property)(struct drm_plane *plane,
- struct drm_plane_state *state,
- struct drm_property *property,
- uint64_t val);
-
- /**
- * @atomic_get_property:
- *
- * Reads out the decoded driver-private property. This is used to
- * implement the GETPLANE IOCTL.
- *
- * Do not call this function directly, use
- * drm_atomic_plane_get_property() instead.
- *
- * This callback is optional if the driver does not support any
- * driver-private atomic properties.
- *
- * RETURNS:
- *
- * 0 on success, -EINVAL if the property isn't implemented by the
- * driver (which should never happen, the core only asks for
- * properties attached to this plane).
- */
- int (*atomic_get_property)(struct drm_plane *plane,
- const struct drm_plane_state *state,
- struct drm_property *property,
- uint64_t *val);
- /**
- * @late_register:
- *
- * This optional hook can be used to register additional userspace
- * interfaces attached to the plane like debugfs interfaces.
- * It is called late in the driver load sequence from drm_dev_register().
- * Everything added from this callback should be unregistered in
- * the early_unregister callback.
- *
- * Returns:
- *
- * 0 on success, or a negative error code on failure.
- */
- int (*late_register)(struct drm_plane *plane);
-
- /**
- * @early_unregister:
- *
- * This optional hook should be used to unregister the additional
- * userspace interfaces attached to the plane from
- * late_unregister(). It is called from drm_dev_unregister(),
- * early in the driver unload sequence to disable userspace access
- * before data structures are torndown.
- */
- void (*early_unregister)(struct drm_plane *plane);
-};
-
-enum drm_plane_type {
- DRM_PLANE_TYPE_OVERLAY,
- DRM_PLANE_TYPE_PRIMARY,
- DRM_PLANE_TYPE_CURSOR,
-};
-
-
-/**
- * struct drm_plane - central DRM plane control structure
- * @dev: DRM device this plane belongs to
- * @head: for list management
- * @name: human readable name, can be overwritten by the driver
- * @base: base mode object
- * @possible_crtcs: pipes this plane can be bound to
- * @format_types: array of formats supported by this plane
- * @format_count: number of formats supported
- * @format_default: driver hasn't supplied supported formats for the plane
- * @crtc: currently bound CRTC
- * @fb: currently bound fb
- * @old_fb: Temporary tracking of the old fb while a modeset is ongoing. Used by
- * drm_mode_set_config_internal() to implement correct refcounting.
- * @funcs: helper functions
- * @properties: property tracking for this plane
- * @type: type of plane (overlay, primary, cursor)
- * @state: current atomic state for this plane
- * @zpos_property: zpos property for this plane
- * @helper_private: mid-layer private data
- */
-struct drm_plane {
- struct drm_device *dev;
- struct list_head head;
-
- char *name;
-
- /**
- * @mutex:
- *
- * Protects modeset plane state, together with the mutex of &drm_crtc
- * this plane is linked to (when active, getting actived or getting
- * disabled).
- */
- struct drm_modeset_lock mutex;
-
- struct drm_mode_object base;
-
- uint32_t possible_crtcs;
- uint32_t *format_types;
- unsigned int format_count;
- bool format_default;
-
- struct drm_crtc *crtc;
- struct drm_framebuffer *fb;
-
- struct drm_framebuffer *old_fb;
-
- const struct drm_plane_funcs *funcs;
-
- struct drm_object_properties properties;
-
- enum drm_plane_type type;
-
- /**
- * @index: Position inside the mode_config.list, can be used as an array
- * index. It is invariant over the lifetime of the plane.
- */
- unsigned index;
-
- const struct drm_plane_helper_funcs *helper_private;
-
- struct drm_plane_state *state;
-
- struct drm_property *zpos_property;
-};
-
-/**
- * struct drm_bridge_funcs - drm_bridge control functions
- * @attach: Called during drm_bridge_attach
- */
-struct drm_bridge_funcs {
- int (*attach)(struct drm_bridge *bridge);
-
- /**
- * @mode_fixup:
- *
- * This callback is used to validate and adjust a mode. The paramater
- * mode is the display mode that should be fed to the next element in
- * the display chain, either the final &drm_connector or the next
- * &drm_bridge. The parameter adjusted_mode is the input mode the bridge
- * requires. It can be modified by this callback and does not need to
- * match mode.
- *
- * This is the only hook that allows a bridge to reject a modeset. If
- * this function passes all other callbacks must succeed for this
- * configuration.
- *
- * NOTE:
- *
- * This function is called in the check phase of atomic modesets, which
- * can be aborted for any reason (including on userspace's request to
- * just check whether a configuration would be possible). Drivers MUST
- * NOT touch any persistent state (hardware or software) or data
- * structures except the passed in @state parameter.
- *
- * RETURNS:
- *
- * True if an acceptable configuration is possible, false if the modeset
- * operation should be rejected.
- */
- bool (*mode_fixup)(struct drm_bridge *bridge,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode);
- /**
- * @disable:
- *
- * This callback should disable the bridge. It is called right before
- * the preceding element in the display pipe is disabled. If the
- * preceding element is a bridge this means it's called before that
- * bridge's ->disable() function. If the preceding element is a
- * &drm_encoder it's called right before the encoder's ->disable(),
- * ->prepare() or ->dpms() hook from struct &drm_encoder_helper_funcs.
- *
- * The bridge can assume that the display pipe (i.e. clocks and timing
- * signals) feeding it is still running when this callback is called.
- *
- * The disable callback is optional.
- */
- void (*disable)(struct drm_bridge *bridge);
-
- /**
- * @post_disable:
- *
- * This callback should disable the bridge. It is called right after
- * the preceding element in the display pipe is disabled. If the
- * preceding element is a bridge this means it's called after that
- * bridge's ->post_disable() function. If the preceding element is a
- * &drm_encoder it's called right after the encoder's ->disable(),
- * ->prepare() or ->dpms() hook from struct &drm_encoder_helper_funcs.
- *
- * The bridge must assume that the display pipe (i.e. clocks and timing
- * singals) feeding it is no longer running when this callback is
- * called.
- *
- * The post_disable callback is optional.
- */
- void (*post_disable)(struct drm_bridge *bridge);
-
- /**
- * @mode_set:
- *
- * This callback should set the given mode on the bridge. It is called
- * after the ->mode_set() callback for the preceding element in the
- * display pipeline has been called already. The display pipe (i.e.
- * clocks and timing signals) is off when this function is called.
- */
- void (*mode_set)(struct drm_bridge *bridge,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode);
- /**
- * @pre_enable:
- *
- * This callback should enable the bridge. It is called right before
- * the preceding element in the display pipe is enabled. If the
- * preceding element is a bridge this means it's called before that
- * bridge's ->pre_enable() function. If the preceding element is a
- * &drm_encoder it's called right before the encoder's ->enable(),
- * ->commit() or ->dpms() hook from struct &drm_encoder_helper_funcs.
- *
- * The display pipe (i.e. clocks and timing signals) feeding this bridge
- * will not yet be running when this callback is called. The bridge must
- * not enable the display link feeding the next bridge in the chain (if
- * there is one) when this callback is called.
- *
- * The pre_enable callback is optional.
- */
- void (*pre_enable)(struct drm_bridge *bridge);
-
- /**
- * @enable:
- *
- * This callback should enable the bridge. It is called right after
- * the preceding element in the display pipe is enabled. If the
- * preceding element is a bridge this means it's called after that
- * bridge's ->enable() function. If the preceding element is a
- * &drm_encoder it's called right after the encoder's ->enable(),
- * ->commit() or ->dpms() hook from struct &drm_encoder_helper_funcs.
- *
- * The bridge can assume that the display pipe (i.e. clocks and timing
- * signals) feeding it is running when this callback is called. This
- * callback must enable the display link feeding the next bridge in the
- * chain if there is one.
- *
- * The enable callback is optional.
- */
- void (*enable)(struct drm_bridge *bridge);
-};
-
-/**
- * struct drm_bridge - central DRM bridge control structure
- * @dev: DRM device this bridge belongs to
- * @encoder: encoder to which this bridge is connected
- * @next: the next bridge in the encoder chain
- * @of_node: device node pointer to the bridge
- * @list: to keep track of all added bridges
- * @funcs: control functions
- * @driver_private: pointer to the bridge driver's internal context
- */
-struct drm_bridge {
- struct drm_device *dev;
- struct drm_encoder *encoder;
- struct drm_bridge *next;
-#ifdef CONFIG_OF
- struct device_node *of_node;
-#endif
- struct list_head list;
-
- const struct drm_bridge_funcs *funcs;
- void *driver_private;
-};
-
-/**
- * struct drm_crtc_commit - track modeset commits on a CRTC
- *
- * This structure is used to track pending modeset changes and atomic commit on
- * a per-CRTC basis. Since updating the list should never block this structure
- * is reference counted to allow waiters to safely wait on an event to complete,
- * without holding any locks.
- *
- * It has 3 different events in total to allow a fine-grained synchronization
- * between outstanding updates::
- *
- * atomic commit thread hardware
- *
- * write new state into hardware ----> ...
- * signal hw_done
- * switch to new state on next
- * ... v/hblank
- *
- * wait for buffers to show up ...
- *
- * ... send completion irq
- * irq handler signals flip_done
- * cleanup old buffers
- *
- * signal cleanup_done
- *
- * wait for flip_done <----
- * clean up atomic state
- *
- * The important bit to know is that cleanup_done is the terminal event, but the
- * ordering between flip_done and hw_done is entirely up to the specific driver
- * and modeset state change.
- *
- * For an implementation of how to use this look at
- * drm_atomic_helper_setup_commit() from the atomic helper library.
- */
-struct drm_crtc_commit {
- /**
- * @crtc:
- *
- * DRM CRTC for this commit.
- */
- struct drm_crtc *crtc;
-
- /**
- * @ref:
- *
- * Reference count for this structure. Needed to allow blocking on
- * completions without the risk of the completion disappearing
- * meanwhile.
- */
- struct kref ref;
-
- /**
- * @flip_done:
- *
- * Will be signaled when the hardware has flipped to the new set of
- * buffers. Signals at the same time as when the drm event for this
- * commit is sent to userspace, or when an out-fence is singalled. Note
- * that for most hardware, in most cases this happens after @hw_done is
- * signalled.
- */
- struct completion flip_done;
-
- /**
- * @hw_done:
- *
- * Will be signalled when all hw register changes for this commit have
- * been written out. Especially when disabling a pipe this can be much
- * later than than @flip_done, since that can signal already when the
- * screen goes black, whereas to fully shut down a pipe more register
- * I/O is required.
- *
- * Note that this does not need to include separately reference-counted
- * resources like backing storage buffer pinning, or runtime pm
- * management.
- */
- struct completion hw_done;
-
- /**
- * @cleanup_done:
- *
- * Will be signalled after old buffers have been cleaned up by calling
- * drm_atomic_helper_cleanup_planes(). Since this can only happen after
- * a vblank wait completed it might be a bit later. This completion is
- * useful to throttle updates and avoid hardware updates getting ahead
- * of the buffer cleanup too much.
- */
- struct completion cleanup_done;
-
- /**
- * @commit_entry:
- *
- * Entry on the per-CRTC commit_list. Protected by crtc->commit_lock.
- */
- struct list_head commit_entry;
-
- /**
- * @event:
- *
- * &drm_pending_vblank_event pointer to clean up private events.
- */
- struct drm_pending_vblank_event *event;
-};
-
-struct __drm_planes_state {
- struct drm_plane *ptr;
- struct drm_plane_state *state;
-};
-
-struct __drm_crtcs_state {
- struct drm_crtc *ptr;
- struct drm_crtc_state *state;
- struct drm_crtc_commit *commit;
-};
-
-struct __drm_connnectors_state {
- struct drm_connector *ptr;
- struct drm_connector_state *state;
-};
-
-/**
- * struct drm_atomic_state - the global state object for atomic updates
- * @dev: parent DRM device
- * @allow_modeset: allow full modeset
- * @legacy_cursor_update: hint to enforce legacy cursor IOCTL semantics
- * @legacy_set_config: Disable conflicting encoders instead of failing with -EINVAL.
- * @planes: pointer to array of structures with per-plane data
- * @crtcs: pointer to array of CRTC pointers
- * @num_connector: size of the @connectors and @connector_states arrays
- * @connectors: pointer to array of structures with per-connector data
- * @acquire_ctx: acquire context for this atomic modeset state update
- */
-struct drm_atomic_state {
- struct drm_device *dev;
- bool allow_modeset : 1;
- bool legacy_cursor_update : 1;
- bool legacy_set_config : 1;
- struct __drm_planes_state *planes;
- struct __drm_crtcs_state *crtcs;
- int num_connector;
- struct __drm_connnectors_state *connectors;
-
- struct drm_modeset_acquire_ctx *acquire_ctx;
-
- /**
- * @commit_work:
- *
- * Work item which can be used by the driver or helpers to execute the
- * commit without blocking.
- */
- struct work_struct commit_work;
-};
-
-
-/**
* struct drm_mode_set - new values for a CRTC config change
* @fb: framebuffer to use for new config
* @crtc: CRTC whose configuration we're about to change
@@ -2237,17 +873,9 @@ struct drm_mode_config_funcs {
* CRTC index supplied in &drm_event to userspace.
*
* The drm core will supply a struct &drm_event in the event
- * member of each CRTC's &drm_crtc_state structure. This can be handled by the
- * drm_crtc_send_vblank_event() function, which the driver should call on
- * the provided event upon completion of the atomic commit. Note that if
- * the driver supports vblank signalling and timestamping the vblank
- * counters and timestamps must agree with the ones returned from page
- * flip events. With the current vblank helper infrastructure this can
- * be achieved by holding a vblank reference while the page flip is
- * pending, acquired through drm_crtc_vblank_get() and released with
- * drm_crtc_vblank_put(). Drivers are free to implement their own vblank
- * counter and timestamp tracking though, e.g. if they have accurate
- * timestamp registers in hardware.
+ * member of each CRTC's &drm_crtc_state structure. See the
+ * documentation for &drm_crtc_state for more details about the precise
+ * semantics of this event.
*
* NOTE:
*
@@ -2636,12 +1264,6 @@ struct drm_mode_config {
*/
struct drm_property *aspect_ratio_property;
/**
- * @dirty_info_property: Optional connector property to give userspace a
- * hint that the DIRTY_FB ioctl should be used.
- */
- struct drm_property *dirty_info_property;
-
- /**
* @degamma_lut_property: Optional CRTC property to set the LUT used to
* convert the framebuffer's colors to linear gamma.
*/
@@ -2702,43 +1324,7 @@ struct drm_mode_config {
struct drm_mode_config_helper_funcs *helper_private;
};
-/**
- * drm_for_each_plane_mask - iterate over planes specified by bitmask
- * @plane: the loop cursor
- * @dev: the DRM device
- * @plane_mask: bitmask of plane indices
- *
- * Iterate over all planes specified by bitmask.
- */
-#define drm_for_each_plane_mask(plane, dev, plane_mask) \
- list_for_each_entry((plane), &(dev)->mode_config.plane_list, head) \
- for_each_if ((plane_mask) & (1 << drm_plane_index(plane)))
-
-/**
- * drm_for_each_encoder_mask - iterate over encoders specified by bitmask
- * @encoder: the loop cursor
- * @dev: the DRM device
- * @encoder_mask: bitmask of encoder indices
- *
- * Iterate over all encoders specified by bitmask.
- */
-#define drm_for_each_encoder_mask(encoder, dev, encoder_mask) \
- list_for_each_entry((encoder), &(dev)->mode_config.encoder_list, head) \
- for_each_if ((encoder_mask) & (1 << drm_encoder_index(encoder)))
-
#define obj_to_crtc(x) container_of(x, struct drm_crtc, base)
-#define obj_to_connector(x) container_of(x, struct drm_connector, base)
-#define obj_to_encoder(x) container_of(x, struct drm_encoder, base)
-#define obj_to_mode(x) container_of(x, struct drm_display_mode, base)
-#define obj_to_fb(x) container_of(x, struct drm_framebuffer, base)
-#define obj_to_property(x) container_of(x, struct drm_property, base)
-#define obj_to_blob(x) container_of(x, struct drm_property_blob, base)
-#define obj_to_plane(x) container_of(x, struct drm_plane, base)
-
-struct drm_prop_enum_list {
- int type;
- char *name;
-};
extern __printf(6, 7)
int drm_crtc_init_with_planes(struct drm_device *dev,
@@ -2756,7 +1342,7 @@ extern void drm_crtc_cleanup(struct drm_crtc *crtc);
* Given a registered CRTC, return the index of that CRTC within a DRM
* device's list of CRTCs.
*/
-static inline unsigned int drm_crtc_index(struct drm_crtc *crtc)
+static inline unsigned int drm_crtc_index(const struct drm_crtc *crtc)
{
return crtc->index;
}
@@ -2773,184 +1359,17 @@ static inline uint32_t drm_crtc_mask(struct drm_crtc *crtc)
return 1 << drm_crtc_index(crtc);
}
-int drm_connector_init(struct drm_device *dev,
- struct drm_connector *connector,
- const struct drm_connector_funcs *funcs,
- int connector_type);
-int drm_connector_register(struct drm_connector *connector);
-void drm_connector_unregister(struct drm_connector *connector);
-
-extern void drm_connector_cleanup(struct drm_connector *connector);
-static inline unsigned drm_connector_index(struct drm_connector *connector)
-{
- return connector->index;
-}
-
-extern __printf(5, 6)
-int drm_encoder_init(struct drm_device *dev,
- struct drm_encoder *encoder,
- const struct drm_encoder_funcs *funcs,
- int encoder_type, const char *name, ...);
-
-/**
- * drm_encoder_index - find the index of a registered encoder
- * @encoder: encoder to find index for
- *
- * Given a registered encoder, return the index of that encoder within a DRM
- * device's list of encoders.
- */
-static inline unsigned int drm_encoder_index(struct drm_encoder *encoder)
-{
- return encoder->index;
-}
-
-/**
- * drm_encoder_crtc_ok - can a given crtc drive a given encoder?
- * @encoder: encoder to test
- * @crtc: crtc to test
- *
- * Return false if @encoder can't be driven by @crtc, true otherwise.
- */
-static inline bool drm_encoder_crtc_ok(struct drm_encoder *encoder,
- struct drm_crtc *crtc)
-{
- return !!(encoder->possible_crtcs & drm_crtc_mask(crtc));
-}
-
-extern __printf(8, 9)
-int drm_universal_plane_init(struct drm_device *dev,
- struct drm_plane *plane,
- unsigned long possible_crtcs,
- const struct drm_plane_funcs *funcs,
- const uint32_t *formats,
- unsigned int format_count,
- enum drm_plane_type type,
- const char *name, ...);
-extern int drm_plane_init(struct drm_device *dev,
- struct drm_plane *plane,
- unsigned long possible_crtcs,
- const struct drm_plane_funcs *funcs,
- const uint32_t *formats, unsigned int format_count,
- bool is_primary);
-extern void drm_plane_cleanup(struct drm_plane *plane);
-
-/**
- * drm_plane_index - find the index of a registered plane
- * @plane: plane to find index for
- *
- * Given a registered plane, return the index of that plane within a DRM
- * device's list of planes.
- */
-static inline unsigned int drm_plane_index(struct drm_plane *plane)
-{
- return plane->index;
-}
-extern struct drm_plane * drm_plane_from_index(struct drm_device *dev, int idx);
-extern void drm_plane_force_disable(struct drm_plane *plane);
extern void drm_crtc_get_hv_timing(const struct drm_display_mode *mode,
int *hdisplay, int *vdisplay);
extern int drm_crtc_force_disable(struct drm_crtc *crtc);
extern int drm_crtc_force_disable_all(struct drm_device *dev);
-extern void drm_encoder_cleanup(struct drm_encoder *encoder);
-
-extern const char *drm_get_connector_status_name(enum drm_connector_status status);
-extern const char *drm_get_subpixel_order_name(enum subpixel_order order);
-extern const char *drm_get_dpms_name(int val);
-extern const char *drm_get_dvi_i_subconnector_name(int val);
-extern const char *drm_get_dvi_i_select_name(int val);
-extern const char *drm_get_tv_subconnector_name(int val);
-extern const char *drm_get_tv_select_name(int val);
extern void drm_mode_config_init(struct drm_device *dev);
extern void drm_mode_config_reset(struct drm_device *dev);
extern void drm_mode_config_cleanup(struct drm_device *dev);
-extern int drm_mode_connector_set_path_property(struct drm_connector *connector,
- const char *path);
-int drm_mode_connector_set_tile_property(struct drm_connector *connector);
-extern int drm_mode_connector_update_edid_property(struct drm_connector *connector,
- const struct edid *edid);
-
-extern int drm_display_info_set_bus_formats(struct drm_display_info *info,
- const u32 *formats,
- unsigned int num_formats);
-
-static inline bool drm_property_type_is(struct drm_property *property,
- uint32_t type)
-{
- /* instanceof for props.. handles extended type vs original types: */
- if (property->flags & DRM_MODE_PROP_EXTENDED_TYPE)
- return (property->flags & DRM_MODE_PROP_EXTENDED_TYPE) == type;
- return property->flags & type;
-}
-
-extern int drm_object_property_set_value(struct drm_mode_object *obj,
- struct drm_property *property,
- uint64_t val);
-extern int drm_object_property_get_value(struct drm_mode_object *obj,
- struct drm_property *property,
- uint64_t *value);
-extern int drm_framebuffer_init(struct drm_device *dev,
- struct drm_framebuffer *fb,
- const struct drm_framebuffer_funcs *funcs);
-extern struct drm_framebuffer *drm_framebuffer_lookup(struct drm_device *dev,
- uint32_t id);
-extern void drm_framebuffer_remove(struct drm_framebuffer *fb);
-extern void drm_framebuffer_cleanup(struct drm_framebuffer *fb);
-extern void drm_framebuffer_unregister_private(struct drm_framebuffer *fb);
-
-extern void drm_object_attach_property(struct drm_mode_object *obj,
- struct drm_property *property,
- uint64_t init_val);
-extern struct drm_property *drm_property_create(struct drm_device *dev, int flags,
- const char *name, int num_values);
-extern struct drm_property *drm_property_create_enum(struct drm_device *dev, int flags,
- const char *name,
- const struct drm_prop_enum_list *props,
- int num_values);
-struct drm_property *drm_property_create_bitmask(struct drm_device *dev,
- int flags, const char *name,
- const struct drm_prop_enum_list *props,
- int num_props,
- uint64_t supported_bits);
-struct drm_property *drm_property_create_range(struct drm_device *dev, int flags,
- const char *name,
- uint64_t min, uint64_t max);
-struct drm_property *drm_property_create_signed_range(struct drm_device *dev,
- int flags, const char *name,
- int64_t min, int64_t max);
-struct drm_property *drm_property_create_object(struct drm_device *dev,
- int flags, const char *name, uint32_t type);
-struct drm_property *drm_property_create_bool(struct drm_device *dev, int flags,
- const char *name);
-struct drm_property_blob *drm_property_create_blob(struct drm_device *dev,
- size_t length,
- const void *data);
-struct drm_property_blob *drm_property_lookup_blob(struct drm_device *dev,
- uint32_t id);
-struct drm_property_blob *drm_property_reference_blob(struct drm_property_blob *blob);
-void drm_property_unreference_blob(struct drm_property_blob *blob);
-extern void drm_property_destroy(struct drm_device *dev, struct drm_property *property);
-extern int drm_property_add_enum(struct drm_property *property, int index,
- uint64_t value, const char *name);
-extern int drm_mode_create_dvi_i_properties(struct drm_device *dev);
-extern int drm_mode_create_tv_properties(struct drm_device *dev,
- unsigned int num_modes,
- const char * const modes[]);
-extern int drm_mode_create_scaling_mode_property(struct drm_device *dev);
-extern int drm_mode_create_aspect_ratio_property(struct drm_device *dev);
-extern int drm_mode_create_dirty_info_property(struct drm_device *dev);
-extern int drm_mode_create_suggested_offset_properties(struct drm_device *dev);
-
-extern int drm_mode_connector_attach_encoder(struct drm_connector *connector,
- struct drm_encoder *encoder);
-extern int drm_mode_crtc_set_gamma_size(struct drm_crtc *crtc,
- int gamma_size);
-
extern int drm_mode_set_config_internal(struct drm_mode_set *set);
-extern uint32_t drm_mode_legacy_fb_format(uint32_t bpp, uint32_t depth);
-
extern struct drm_tile_group *drm_mode_create_tile_group(struct drm_device *dev,
char topology[8]);
extern struct drm_tile_group *drm_mode_get_tile_group(struct drm_device *dev,
@@ -2958,40 +1377,7 @@ extern struct drm_tile_group *drm_mode_get_tile_group(struct drm_device *dev,
extern void drm_mode_put_tile_group(struct drm_device *dev,
struct drm_tile_group *tg);
-extern int drm_mode_plane_set_obj_prop(struct drm_plane *plane,
- struct drm_property *property,
- uint64_t value);
-
-extern struct drm_property *drm_mode_create_rotation_property(struct drm_device *dev,
- unsigned int supported_rotations);
-extern unsigned int drm_rotation_simplify(unsigned int rotation,
- unsigned int supported_rotations);
-extern void drm_crtc_enable_color_mgmt(struct drm_crtc *crtc,
- uint degamma_lut_size,
- bool has_ctm,
- uint gamma_lut_size);
-
-int drm_plane_create_zpos_property(struct drm_plane *plane,
- unsigned int zpos,
- unsigned int min, unsigned int max);
-
-int drm_plane_create_zpos_immutable_property(struct drm_plane *plane,
- unsigned int zpos);
-
/* Helpers */
-struct drm_mode_object *drm_mode_object_find(struct drm_device *dev,
- uint32_t id, uint32_t type);
-void drm_mode_object_reference(struct drm_mode_object *obj);
-void drm_mode_object_unreference(struct drm_mode_object *obj);
-
-static inline struct drm_plane *drm_plane_find(struct drm_device *dev,
- uint32_t id)
-{
- struct drm_mode_object *mo;
- mo = drm_mode_object_find(dev, id, DRM_MODE_OBJECT_PLANE);
- return mo ? obj_to_plane(mo) : NULL;
-}
-
static inline struct drm_crtc *drm_crtc_find(struct drm_device *dev,
uint32_t id)
{
@@ -3000,120 +1386,6 @@ static inline struct drm_crtc *drm_crtc_find(struct drm_device *dev,
return mo ? obj_to_crtc(mo) : NULL;
}
-static inline struct drm_encoder *drm_encoder_find(struct drm_device *dev,
- uint32_t id)
-{
- struct drm_mode_object *mo;
- mo = drm_mode_object_find(dev, id, DRM_MODE_OBJECT_ENCODER);
- return mo ? obj_to_encoder(mo) : NULL;
-}
-
-/**
- * drm_connector_lookup - lookup connector object
- * @dev: DRM device
- * @id: connector object id
- *
- * This function looks up the connector object specified by id
- * add takes a reference to it.
- */
-static inline struct drm_connector *drm_connector_lookup(struct drm_device *dev,
- uint32_t id)
-{
- struct drm_mode_object *mo;
- mo = drm_mode_object_find(dev, id, DRM_MODE_OBJECT_CONNECTOR);
- return mo ? obj_to_connector(mo) : NULL;
-}
-
-static inline struct drm_property *drm_property_find(struct drm_device *dev,
- uint32_t id)
-{
- struct drm_mode_object *mo;
- mo = drm_mode_object_find(dev, id, DRM_MODE_OBJECT_PROPERTY);
- return mo ? obj_to_property(mo) : NULL;
-}
-
-/*
- * Extract a degamma/gamma LUT value provided by user and round it to the
- * precision supported by the hardware.
- */
-static inline uint32_t drm_color_lut_extract(uint32_t user_input,
- uint32_t bit_precision)
-{
- uint32_t val = user_input;
- uint32_t max = 0xffff >> (16 - bit_precision);
-
- /* Round only if we're not using full precision. */
- if (bit_precision < 16) {
- val += 1UL << (16 - bit_precision - 1);
- val >>= 16 - bit_precision;
- }
-
- return clamp_val(val, 0, max);
-}
-
-/**
- * drm_framebuffer_reference - incr the fb refcnt
- * @fb: framebuffer
- *
- * This functions increments the fb's refcount.
- */
-static inline void drm_framebuffer_reference(struct drm_framebuffer *fb)
-{
- drm_mode_object_reference(&fb->base);
-}
-
-/**
- * drm_framebuffer_unreference - unref a framebuffer
- * @fb: framebuffer to unref
- *
- * This functions decrements the fb's refcount and frees it if it drops to zero.
- */
-static inline void drm_framebuffer_unreference(struct drm_framebuffer *fb)
-{
- drm_mode_object_unreference(&fb->base);
-}
-
-/**
- * drm_framebuffer_read_refcount - read the framebuffer reference count.
- * @fb: framebuffer
- *
- * This functions returns the framebuffer's reference count.
- */
-static inline uint32_t drm_framebuffer_read_refcount(struct drm_framebuffer *fb)
-{
- return atomic_read(&fb->base.refcount.refcount);
-}
-
-/**
- * drm_connector_reference - incr the connector refcnt
- * @connector: connector
- *
- * This function increments the connector's refcount.
- */
-static inline void drm_connector_reference(struct drm_connector *connector)
-{
- drm_mode_object_reference(&connector->base);
-}
-
-/**
- * drm_connector_unreference - unref a connector
- * @connector: connector to unref
- *
- * This function decrements the connector's refcount and frees it if it drops to zero.
- */
-static inline void drm_connector_unreference(struct drm_connector *connector)
-{
- drm_mode_object_unreference(&connector->base);
-}
-
-/* Plane list iterator for legacy (overlay only) planes. */
-#define drm_for_each_legacy_plane(plane, dev) \
- list_for_each_entry(plane, &(dev)->mode_config.plane_list, head) \
- for_each_if (plane->type == DRM_PLANE_TYPE_OVERLAY)
-
-#define drm_for_each_plane(plane, dev) \
- list_for_each_entry(plane, &(dev)->mode_config.plane_list, head)
-
#define drm_for_each_crtc(crtc, dev) \
list_for_each_entry(crtc, &(dev)->mode_config.crtc_list, head)
@@ -3131,67 +1403,4 @@ assert_drm_connector_list_read_locked(struct drm_mode_config *mode_config)
!drm_modeset_is_locked(&mode_config->connection_mutex));
}
-#define drm_for_each_connector(connector, dev) \
- for (assert_drm_connector_list_read_locked(&(dev)->mode_config), \
- connector = list_first_entry(&(dev)->mode_config.connector_list, \
- struct drm_connector, head); \
- &connector->head != (&(dev)->mode_config.connector_list); \
- connector = list_next_entry(connector, head))
-
-#define drm_for_each_encoder(encoder, dev) \
- list_for_each_entry(encoder, &(dev)->mode_config.encoder_list, head)
-
-#define drm_for_each_fb(fb, dev) \
- for (WARN_ON(!mutex_is_locked(&(dev)->mode_config.fb_lock)), \
- fb = list_first_entry(&(dev)->mode_config.fb_list, \
- struct drm_framebuffer, head); \
- &fb->head != (&(dev)->mode_config.fb_list); \
- fb = list_next_entry(fb, head))
-
-/* drm_edid.c */
-bool drm_probe_ddc(struct i2c_adapter *adapter);
-struct edid *drm_get_edid(struct drm_connector *connector,
- struct i2c_adapter *adapter);
-struct edid *drm_get_edid_switcheroo(struct drm_connector *connector,
- struct i2c_adapter *adapter);
-struct edid *drm_edid_duplicate(const struct edid *edid);
-int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid);
-
-u8 drm_match_cea_mode(const struct drm_display_mode *to_match);
-enum hdmi_picture_aspect drm_get_cea_aspect_ratio(const u8 video_code);
-bool drm_detect_hdmi_monitor(struct edid *edid);
-bool drm_detect_monitor_audio(struct edid *edid);
-bool drm_rgb_quant_range_selectable(struct edid *edid);
-int drm_add_modes_noedid(struct drm_connector *connector,
- int hdisplay, int vdisplay);
-void drm_set_preferred_mode(struct drm_connector *connector,
- int hpref, int vpref);
-
-int drm_edid_header_is_valid(const u8 *raw_edid);
-bool drm_edid_block_valid(u8 *raw_edid, int block, bool print_bad_edid,
- bool *edid_corrupt);
-bool drm_edid_is_valid(struct edid *edid);
-void drm_edid_get_monitor_name(struct edid *edid, char *name,
- int buflen);
-struct drm_display_mode *drm_mode_find_dmt(struct drm_device *dev,
- int hsize, int vsize, int fresh,
- bool rb);
-
-/* drm_bridge.c */
-extern int drm_bridge_add(struct drm_bridge *bridge);
-extern void drm_bridge_remove(struct drm_bridge *bridge);
-extern struct drm_bridge *of_drm_find_bridge(struct device_node *np);
-extern int drm_bridge_attach(struct drm_device *dev, struct drm_bridge *bridge);
-
-bool drm_bridge_mode_fixup(struct drm_bridge *bridge,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode);
-void drm_bridge_disable(struct drm_bridge *bridge);
-void drm_bridge_post_disable(struct drm_bridge *bridge);
-void drm_bridge_mode_set(struct drm_bridge *bridge,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode);
-void drm_bridge_pre_enable(struct drm_bridge *bridge);
-void drm_bridge_enable(struct drm_bridge *bridge);
-
#endif /* __DRM_CRTC_H__ */
diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h
index 4b37afa2b73b..982c299e435a 100644
--- a/include/drm/drm_crtc_helper.h
+++ b/include/drm/drm_crtc_helper.h
@@ -41,6 +41,7 @@
#include <drm/drm_crtc.h>
#include <drm/drm_modeset_helper_vtables.h>
+#include <drm/drm_modeset_helper.h>
extern void drm_helper_disable_unused_functions(struct drm_device *dev);
extern int drm_crtc_helper_set_config(struct drm_mode_set *set);
@@ -53,11 +54,6 @@ extern bool drm_helper_encoder_in_use(struct drm_encoder *encoder);
extern int drm_helper_connector_dpms(struct drm_connector *connector, int mode);
-extern void drm_helper_move_panel_connectors_to_head(struct drm_device *);
-
-extern void drm_helper_mode_fill_fb_struct(struct drm_framebuffer *fb,
- const struct drm_mode_fb_cmd2 *mode_cmd);
-
extern void drm_helper_resume_force_mode(struct drm_device *dev);
int drm_helper_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode,
diff --git a/include/drm/drm_dp_aux_dev.h b/include/drm/drm_dp_aux_dev.h
deleted file mode 100644
index 1b76d990d8ab..000000000000
--- a/include/drm/drm_dp_aux_dev.h
+++ /dev/null
@@ -1,62 +0,0 @@
-/*
- * Copyright © 2015 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
- * Authors:
- * Rafael Antognolli <rafael.antognolli@intel.com>
- *
- */
-
-#ifndef DRM_DP_AUX_DEV
-#define DRM_DP_AUX_DEV
-
-#include <drm/drm_dp_helper.h>
-
-#ifdef CONFIG_DRM_DP_AUX_CHARDEV
-
-int drm_dp_aux_dev_init(void);
-void drm_dp_aux_dev_exit(void);
-int drm_dp_aux_register_devnode(struct drm_dp_aux *aux);
-void drm_dp_aux_unregister_devnode(struct drm_dp_aux *aux);
-
-#else
-
-static inline int drm_dp_aux_dev_init(void)
-{
- return 0;
-}
-
-static inline void drm_dp_aux_dev_exit(void)
-{
-}
-
-static inline int drm_dp_aux_register_devnode(struct drm_dp_aux *aux)
-{
- return 0;
-}
-
-static inline void drm_dp_aux_unregister_devnode(struct drm_dp_aux *aux)
-{
-}
-
-#endif
-
-#endif
diff --git a/include/drm/drm_dp_helper.h b/include/drm/drm_dp_helper.h
index 63b8bd502444..2a79882cb68e 100644
--- a/include/drm/drm_dp_helper.h
+++ b/include/drm/drm_dp_helper.h
@@ -211,14 +211,16 @@
# define DP_DS_PORT_TYPE_DVI 2
# define DP_DS_PORT_TYPE_HDMI 3
# define DP_DS_PORT_TYPE_NON_EDID 4
+# define DP_DS_PORT_TYPE_DP_DUALMODE 5
+# define DP_DS_PORT_TYPE_WIRELESS 6
# define DP_DS_PORT_HPD (1 << 3)
/* offset 1 for VGA is maximum megapixels per second / 8 */
/* offset 2 */
-# define DP_DS_VGA_MAX_BPC_MASK (3 << 0)
-# define DP_DS_VGA_8BPC 0
-# define DP_DS_VGA_10BPC 1
-# define DP_DS_VGA_12BPC 2
-# define DP_DS_VGA_16BPC 3
+# define DP_DS_MAX_BPC_MASK (3 << 0)
+# define DP_DS_8BPC 0
+# define DP_DS_10BPC 1
+# define DP_DS_12BPC 2
+# define DP_DS_16BPC 3
/* link configuration */
#define DP_LINK_BW_SET 0x100
@@ -443,6 +445,9 @@
#define DP_SOURCE_OUI 0x300
#define DP_SINK_OUI 0x400
#define DP_BRANCH_OUI 0x500
+#define DP_BRANCH_ID 0x503
+#define DP_BRANCH_HW_REV 0x509
+#define DP_BRANCH_SW_REV 0x50A
#define DP_SET_POWER 0x600
# define DP_SET_POWER_D0 0x1
@@ -813,6 +818,13 @@ int drm_dp_link_probe(struct drm_dp_aux *aux, struct drm_dp_link *link);
int drm_dp_link_power_up(struct drm_dp_aux *aux, struct drm_dp_link *link);
int drm_dp_link_power_down(struct drm_dp_aux *aux, struct drm_dp_link *link);
int drm_dp_link_configure(struct drm_dp_aux *aux, struct drm_dp_link *link);
+int drm_dp_downstream_max_clock(const u8 dpcd[DP_RECEIVER_CAP_SIZE],
+ const u8 port_cap[4]);
+int drm_dp_downstream_max_bpc(const u8 dpcd[DP_RECEIVER_CAP_SIZE],
+ const u8 port_cap[4]);
+int drm_dp_downstream_id(struct drm_dp_aux *aux, char id[6]);
+void drm_dp_downstream_debug(struct seq_file *m, const u8 dpcd[DP_RECEIVER_CAP_SIZE],
+ const u8 port_cap[4], struct drm_dp_aux *aux);
void drm_dp_aux_init(struct drm_dp_aux *aux);
int drm_dp_aux_register(struct drm_dp_aux *aux);
diff --git a/include/drm/drm_edid.h b/include/drm/drm_edid.h
index 919933d1beb4..c3a7d440bc11 100644
--- a/include/drm/drm_edid.h
+++ b/include/drm/drm_edid.h
@@ -25,6 +25,9 @@
#include <linux/types.h>
+struct drm_device;
+struct i2c_adapter;
+
#define EDID_LENGTH 128
#define DDC_ADDR 0x50
#define DDC_ADDR2 0x52 /* E-DDC 1.2 - where DisplayID can hide */
@@ -423,9 +426,36 @@ static inline u8 drm_eld_get_conn_type(const uint8_t *eld)
return eld[DRM_ELD_SAD_COUNT_CONN_TYPE] & DRM_ELD_CONN_TYPE_MASK;
}
+bool drm_probe_ddc(struct i2c_adapter *adapter);
struct edid *drm_do_get_edid(struct drm_connector *connector,
int (*get_edid_block)(void *data, u8 *buf, unsigned int block,
size_t len),
void *data);
+struct edid *drm_get_edid(struct drm_connector *connector,
+ struct i2c_adapter *adapter);
+struct edid *drm_get_edid_switcheroo(struct drm_connector *connector,
+ struct i2c_adapter *adapter);
+struct edid *drm_edid_duplicate(const struct edid *edid);
+int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid);
+
+u8 drm_match_cea_mode(const struct drm_display_mode *to_match);
+enum hdmi_picture_aspect drm_get_cea_aspect_ratio(const u8 video_code);
+bool drm_detect_hdmi_monitor(struct edid *edid);
+bool drm_detect_monitor_audio(struct edid *edid);
+bool drm_rgb_quant_range_selectable(struct edid *edid);
+int drm_add_modes_noedid(struct drm_connector *connector,
+ int hdisplay, int vdisplay);
+void drm_set_preferred_mode(struct drm_connector *connector,
+ int hpref, int vpref);
+
+int drm_edid_header_is_valid(const u8 *raw_edid);
+bool drm_edid_block_valid(u8 *raw_edid, int block, bool print_bad_edid,
+ bool *edid_corrupt);
+bool drm_edid_is_valid(struct edid *edid);
+void drm_edid_get_monitor_name(struct edid *edid, char *name,
+ int buflen);
+struct drm_display_mode *drm_mode_find_dmt(struct drm_device *dev,
+ int hsize, int vsize, int fresh,
+ bool rb);
#endif /* __DRM_EDID_H__ */
diff --git a/include/drm/drm_encoder.h b/include/drm/drm_encoder.h
new file mode 100644
index 000000000000..387e33a4d6ee
--- /dev/null
+++ b/include/drm/drm_encoder.h
@@ -0,0 +1,249 @@
+/*
+ * Copyright (c) 2016 Intel Corporation
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that copyright
+ * notice and this permission notice appear in supporting documentation, and
+ * that the name of the copyright holders not be used in advertising or
+ * publicity pertaining to distribution of the software without specific,
+ * written prior permission. The copyright holders make no representations
+ * about the suitability of this software for any purpose. It is provided "as
+ * is" without express or implied warranty.
+ *
+ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+ * OF THIS SOFTWARE.
+ */
+
+#ifndef __DRM_ENCODER_H__
+#define __DRM_ENCODER_H__
+
+#include <linux/list.h>
+#include <linux/ctype.h>
+#include <drm/drm_mode_object.h>
+
+/**
+ * struct drm_encoder_funcs - encoder controls
+ *
+ * Encoders sit between CRTCs and connectors.
+ */
+struct drm_encoder_funcs {
+ /**
+ * @reset:
+ *
+ * Reset encoder hardware and software state to off. This function isn't
+ * called by the core directly, only through drm_mode_config_reset().
+ * It's not a helper hook only for historical reasons.
+ */
+ void (*reset)(struct drm_encoder *encoder);
+
+ /**
+ * @destroy:
+ *
+ * Clean up encoder resources. This is only called at driver unload time
+ * through drm_mode_config_cleanup() since an encoder cannot be
+ * hotplugged in DRM.
+ */
+ void (*destroy)(struct drm_encoder *encoder);
+
+ /**
+ * @late_register:
+ *
+ * This optional hook can be used to register additional userspace
+ * interfaces attached to the encoder like debugfs interfaces.
+ * It is called late in the driver load sequence from drm_dev_register().
+ * Everything added from this callback should be unregistered in
+ * the early_unregister callback.
+ *
+ * Returns:
+ *
+ * 0 on success, or a negative error code on failure.
+ */
+ int (*late_register)(struct drm_encoder *encoder);
+
+ /**
+ * @early_unregister:
+ *
+ * This optional hook should be used to unregister the additional
+ * userspace interfaces attached to the encoder from
+ * late_unregister(). It is called from drm_dev_unregister(),
+ * early in the driver unload sequence to disable userspace access
+ * before data structures are torndown.
+ */
+ void (*early_unregister)(struct drm_encoder *encoder);
+};
+
+/**
+ * struct drm_encoder - central DRM encoder structure
+ * @dev: parent DRM device
+ * @head: list management
+ * @base: base KMS object
+ * @name: human readable name, can be overwritten by the driver
+ * @crtc: currently bound CRTC
+ * @bridge: bridge associated to the encoder
+ * @funcs: control functions
+ * @helper_private: mid-layer private data
+ *
+ * CRTCs drive pixels to encoders, which convert them into signals
+ * appropriate for a given connector or set of connectors.
+ */
+struct drm_encoder {
+ struct drm_device *dev;
+ struct list_head head;
+
+ struct drm_mode_object base;
+ char *name;
+ /**
+ * @encoder_type:
+ *
+ * One of the DRM_MODE_ENCODER_<foo> types in drm_mode.h. The following
+ * encoder types are defined thus far:
+ *
+ * - DRM_MODE_ENCODER_DAC for VGA and analog on DVI-I/DVI-A.
+ *
+ * - DRM_MODE_ENCODER_TMDS for DVI, HDMI and (embedded) DisplayPort.
+ *
+ * - DRM_MODE_ENCODER_LVDS for display panels, or in general any panel
+ * with a proprietary parallel connector.
+ *
+ * - DRM_MODE_ENCODER_TVDAC for TV output (Composite, S-Video,
+ * Component, SCART).
+ *
+ * - DRM_MODE_ENCODER_VIRTUAL for virtual machine displays
+ *
+ * - DRM_MODE_ENCODER_DSI for panels connected using the DSI serial bus.
+ *
+ * - DRM_MODE_ENCODER_DPI for panels connected using the DPI parallel
+ * bus.
+ *
+ * - DRM_MODE_ENCODER_DPMST for special fake encoders used to allow
+ * mutliple DP MST streams to share one physical encoder.
+ */
+ int encoder_type;
+
+ /**
+ * @index: Position inside the mode_config.list, can be used as an array
+ * index. It is invariant over the lifetime of the encoder.
+ */
+ unsigned index;
+
+ /**
+ * @possible_crtcs: Bitmask of potential CRTC bindings, using
+ * drm_crtc_index() as the index into the bitfield. The driver must set
+ * the bits for all &drm_crtc objects this encoder can be connected to
+ * before calling drm_encoder_init().
+ *
+ * In reality almost every driver gets this wrong.
+ *
+ * Note that since CRTC objects can't be hotplugged the assigned indices
+ * are stable and hence known before registering all objects.
+ */
+ uint32_t possible_crtcs;
+
+ /**
+ * @possible_clones: Bitmask of potential sibling encoders for cloning,
+ * using drm_encoder_index() as the index into the bitfield. The driver
+ * must set the bits for all &drm_encoder objects which can clone a
+ * &drm_crtc together with this encoder before calling
+ * drm_encoder_init(). Drivers should set the bit representing the
+ * encoder itself, too. Cloning bits should be set such that when two
+ * encoders can be used in a cloned configuration, they both should have
+ * each another bits set.
+ *
+ * In reality almost every driver gets this wrong.
+ *
+ * Note that since encoder objects can't be hotplugged the assigned indices
+ * are stable and hence known before registering all objects.
+ */
+ uint32_t possible_clones;
+
+ struct drm_crtc *crtc;
+ struct drm_bridge *bridge;
+ const struct drm_encoder_funcs *funcs;
+ const struct drm_encoder_helper_funcs *helper_private;
+};
+
+#define obj_to_encoder(x) container_of(x, struct drm_encoder, base)
+
+__printf(5, 6)
+int drm_encoder_init(struct drm_device *dev,
+ struct drm_encoder *encoder,
+ const struct drm_encoder_funcs *funcs,
+ int encoder_type, const char *name, ...);
+
+/**
+ * drm_encoder_index - find the index of a registered encoder
+ * @encoder: encoder to find index for
+ *
+ * Given a registered encoder, return the index of that encoder within a DRM
+ * device's list of encoders.
+ */
+static inline unsigned int drm_encoder_index(struct drm_encoder *encoder)
+{
+ return encoder->index;
+}
+
+/* FIXME: We have an include file mess still, drm_crtc.h needs untangling. */
+static inline uint32_t drm_crtc_mask(struct drm_crtc *crtc);
+
+/**
+ * drm_encoder_crtc_ok - can a given crtc drive a given encoder?
+ * @encoder: encoder to test
+ * @crtc: crtc to test
+ *
+ * Returns false if @encoder can't be driven by @crtc, true otherwise.
+ */
+static inline bool drm_encoder_crtc_ok(struct drm_encoder *encoder,
+ struct drm_crtc *crtc)
+{
+ return !!(encoder->possible_crtcs & drm_crtc_mask(crtc));
+}
+
+/**
+ * drm_encoder_find - find a &drm_encoder
+ * @dev: DRM device
+ * @id: encoder id
+ *
+ * Returns the encoder with @id, NULL if it doesn't exist. Simple wrapper around
+ * drm_mode_object_find().
+ */
+static inline struct drm_encoder *drm_encoder_find(struct drm_device *dev,
+ uint32_t id)
+{
+ struct drm_mode_object *mo;
+
+ mo = drm_mode_object_find(dev, id, DRM_MODE_OBJECT_ENCODER);
+
+ return mo ? obj_to_encoder(mo) : NULL;
+}
+
+void drm_encoder_cleanup(struct drm_encoder *encoder);
+
+/**
+ * drm_for_each_encoder_mask - iterate over encoders specified by bitmask
+ * @encoder: the loop cursor
+ * @dev: the DRM device
+ * @encoder_mask: bitmask of encoder indices
+ *
+ * Iterate over all encoders specified by bitmask.
+ */
+#define drm_for_each_encoder_mask(encoder, dev, encoder_mask) \
+ list_for_each_entry((encoder), &(dev)->mode_config.encoder_list, head) \
+ for_each_if ((encoder_mask) & (1 << drm_encoder_index(encoder)))
+
+/**
+ * drm_for_each_encoder - iterate over all encoders
+ * @encoder: the loop cursor
+ * @dev: the DRM device
+ *
+ * Iterate over all encoders of @dev.
+ */
+#define drm_for_each_encoder(encoder, dev) \
+ list_for_each_entry(encoder, &(dev)->mode_config.encoder_list, head)
+
+#endif
diff --git a/include/drm/drm_fb_helper.h b/include/drm/drm_fb_helper.h
index db8d4780eaa2..ed8edfef75b2 100644
--- a/include/drm/drm_fb_helper.h
+++ b/include/drm/drm_fb_helper.h
@@ -32,6 +32,7 @@
struct drm_fb_helper;
+#include <drm/drm_crtc.h>
#include <linux/kgdb.h>
enum mode_set_atomic {
@@ -176,6 +177,7 @@ struct drm_fb_helper_connector {
* the screen buffer
* @dirty_lock: spinlock protecting @dirty_clip
* @dirty_work: worker used to flush the framebuffer
+ * @resume_work: worker used during resume if the console lock is already taken
*
* This is the main structure used by the fbdev helpers. Drivers supporting
* fbdev emulation should embedded this into their overall driver structure.
@@ -196,6 +198,7 @@ struct drm_fb_helper {
struct drm_clip_rect dirty_clip;
spinlock_t dirty_lock;
struct work_struct dirty_work;
+ struct work_struct resume_work;
/**
* @kernel_fb_list:
@@ -214,8 +217,20 @@ struct drm_fb_helper {
bool delayed_hotplug;
};
+/**
+ * define DRM_FB_HELPER_DEFAULT_OPS - helper define for drm drivers
+ *
+ * Helper define to register default implementations of drm_fb_helper
+ * functions. To be used in struct fb_ops of drm drivers.
+ */
+#define DRM_FB_HELPER_DEFAULT_OPS \
+ .fb_check_var = drm_fb_helper_check_var, \
+ .fb_set_par = drm_fb_helper_set_par, \
+ .fb_setcmap = drm_fb_helper_setcmap, \
+ .fb_blank = drm_fb_helper_blank, \
+ .fb_pan_display = drm_fb_helper_pan_display
+
#ifdef CONFIG_DRM_FBDEV_EMULATION
-int drm_fb_helper_modinit(void);
void drm_fb_helper_prepare(struct drm_device *dev, struct drm_fb_helper *helper,
const struct drm_fb_helper_funcs *funcs);
int drm_fb_helper_init(struct drm_device *dev,
@@ -263,7 +278,9 @@ void drm_fb_helper_cfb_copyarea(struct fb_info *info,
void drm_fb_helper_cfb_imageblit(struct fb_info *info,
const struct fb_image *image);
-void drm_fb_helper_set_suspend(struct drm_fb_helper *fb_helper, int state);
+void drm_fb_helper_set_suspend(struct drm_fb_helper *fb_helper, bool suspend);
+void drm_fb_helper_set_suspend_unlocked(struct drm_fb_helper *fb_helper,
+ bool suspend);
int drm_fb_helper_setcmap(struct fb_cmap *cmap, struct fb_info *info);
@@ -283,11 +300,6 @@ int drm_fb_helper_add_one_connector(struct drm_fb_helper *fb_helper, struct drm_
int drm_fb_helper_remove_one_connector(struct drm_fb_helper *fb_helper,
struct drm_connector *connector);
#else
-static inline int drm_fb_helper_modinit(void)
-{
- return 0;
-}
-
static inline void drm_fb_helper_prepare(struct drm_device *dev,
struct drm_fb_helper *helper,
const struct drm_fb_helper_funcs *funcs)
@@ -417,7 +429,12 @@ static inline void drm_fb_helper_cfb_imageblit(struct fb_info *info,
}
static inline void drm_fb_helper_set_suspend(struct drm_fb_helper *fb_helper,
- int state)
+ bool suspend)
+{
+}
+
+static inline void
+drm_fb_helper_set_suspend_unlocked(struct drm_fb_helper *fb_helper, bool suspend)
{
}
@@ -475,5 +492,18 @@ drm_fb_helper_remove_one_connector(struct drm_fb_helper *fb_helper,
{
return 0;
}
+
+#endif
+
+static inline int
+drm_fb_helper_remove_conflicting_framebuffers(struct apertures_struct *a,
+ const char *name, bool primary)
+{
+#if IS_REACHABLE(CONFIG_FB)
+ return remove_conflicting_framebuffers(a, name, primary);
+#else
+ return 0;
#endif
+}
+
#endif
diff --git a/include/drm/drm_fourcc.h b/include/drm/drm_fourcc.h
index 7f90a396cf2b..30c30fa87ee8 100644
--- a/include/drm/drm_fourcc.h
+++ b/include/drm/drm_fourcc.h
@@ -25,6 +25,7 @@
#include <linux/types.h>
#include <uapi/drm/drm_fourcc.h>
+uint32_t drm_mode_legacy_fb_format(uint32_t bpp, uint32_t depth);
void drm_fb_get_bpp_depth(uint32_t format, unsigned int *depth, int *bpp);
int drm_format_num_planes(uint32_t format);
int drm_format_plane_cpp(uint32_t format, int plane);
@@ -32,6 +33,6 @@ int drm_format_horz_chroma_subsampling(uint32_t format);
int drm_format_vert_chroma_subsampling(uint32_t format);
int drm_format_plane_width(int width, uint32_t format, int plane);
int drm_format_plane_height(int height, uint32_t format, int plane);
-const char *drm_get_format_name(uint32_t format);
+char *drm_get_format_name(uint32_t format) __malloc;
#endif /* __DRM_FOURCC_H__ */
diff --git a/include/drm/drm_framebuffer.h b/include/drm/drm_framebuffer.h
new file mode 100644
index 000000000000..f5ae1f436a4b
--- /dev/null
+++ b/include/drm/drm_framebuffer.h
@@ -0,0 +1,267 @@
+/*
+ * Copyright (c) 2016 Intel Corporation
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that copyright
+ * notice and this permission notice appear in supporting documentation, and
+ * that the name of the copyright holders not be used in advertising or
+ * publicity pertaining to distribution of the software without specific,
+ * written prior permission. The copyright holders make no representations
+ * about the suitability of this software for any purpose. It is provided "as
+ * is" without express or implied warranty.
+ *
+ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+ * OF THIS SOFTWARE.
+ */
+
+#ifndef __DRM_FRAMEBUFFER_H__
+#define __DRM_FRAMEBUFFER_H__
+
+#include <linux/list.h>
+#include <linux/ctype.h>
+#include <drm/drm_mode_object.h>
+
+struct drm_framebuffer;
+struct drm_file;
+struct drm_device;
+
+/**
+ * struct drm_framebuffer_funcs - framebuffer hooks
+ */
+struct drm_framebuffer_funcs {
+ /**
+ * @destroy:
+ *
+ * Clean up framebuffer resources, specifically also unreference the
+ * backing storage. The core guarantees to call this function for every
+ * framebuffer successfully created by ->fb_create() in
+ * &drm_mode_config_funcs. Drivers must also call
+ * drm_framebuffer_cleanup() to release DRM core resources for this
+ * framebuffer.
+ */
+ void (*destroy)(struct drm_framebuffer *framebuffer);
+
+ /**
+ * @create_handle:
+ *
+ * Create a buffer handle in the driver-specific buffer manager (either
+ * GEM or TTM) valid for the passed-in struct &drm_file. This is used by
+ * the core to implement the GETFB IOCTL, which returns (for
+ * sufficiently priviledged user) also a native buffer handle. This can
+ * be used for seamless transitions between modesetting clients by
+ * copying the current screen contents to a private buffer and blending
+ * between that and the new contents.
+ *
+ * GEM based drivers should call drm_gem_handle_create() to create the
+ * handle.
+ *
+ * RETURNS:
+ *
+ * 0 on success or a negative error code on failure.
+ */
+ int (*create_handle)(struct drm_framebuffer *fb,
+ struct drm_file *file_priv,
+ unsigned int *handle);
+ /**
+ * @dirty:
+ *
+ * Optional callback for the dirty fb IOCTL.
+ *
+ * Userspace can notify the driver via this callback that an area of the
+ * framebuffer has changed and should be flushed to the display
+ * hardware. This can also be used internally, e.g. by the fbdev
+ * emulation, though that's not the case currently.
+ *
+ * See documentation in drm_mode.h for the struct drm_mode_fb_dirty_cmd
+ * for more information as all the semantics and arguments have a one to
+ * one mapping on this function.
+ *
+ * RETURNS:
+ *
+ * 0 on success or a negative error code on failure.
+ */
+ int (*dirty)(struct drm_framebuffer *framebuffer,
+ struct drm_file *file_priv, unsigned flags,
+ unsigned color, struct drm_clip_rect *clips,
+ unsigned num_clips);
+};
+
+/**
+ * struct drm_framebuffer - frame buffer object
+ *
+ * Note that the fb is refcounted for the benefit of driver internals,
+ * for example some hw, disabling a CRTC/plane is asynchronous, and
+ * scanout does not actually complete until the next vblank. So some
+ * cleanup (like releasing the reference(s) on the backing GEM bo(s))
+ * should be deferred. In cases like this, the driver would like to
+ * hold a ref to the fb even though it has already been removed from
+ * userspace perspective. See drm_framebuffer_reference() and
+ * drm_framebuffer_unreference().
+ *
+ * The refcount is stored inside the mode object @base.
+ */
+struct drm_framebuffer {
+ /**
+ * @dev: DRM device this framebuffer belongs to
+ */
+ struct drm_device *dev;
+ /**
+ * @head: Place on the dev->mode_config.fb_list, access protected by
+ * dev->mode_config.fb_lock.
+ */
+ struct list_head head;
+
+ /**
+ * @base: base modeset object structure, contains the reference count.
+ */
+ struct drm_mode_object base;
+ /**
+ * @funcs: framebuffer vfunc table
+ */
+ const struct drm_framebuffer_funcs *funcs;
+ /**
+ * @pitches: Line stride per buffer. For userspace created object this
+ * is copied from drm_mode_fb_cmd2.
+ */
+ unsigned int pitches[4];
+ /**
+ * @offsets: Offset from buffer start to the actual pixel data in bytes,
+ * per buffer. For userspace created object this is copied from
+ * drm_mode_fb_cmd2.
+ *
+ * Note that this is a linear offset and does not take into account
+ * tiling or buffer laytou per @modifier. It meant to be used when the
+ * actual pixel data for this framebuffer plane starts at an offset,
+ * e.g. when multiple planes are allocated within the same backing
+ * storage buffer object. For tiled layouts this generally means it
+ * @offsets must at least be tile-size aligned, but hardware often has
+ * stricter requirements.
+ *
+ * This should not be used to specifiy x/y pixel offsets into the buffer
+ * data (even for linear buffers). Specifying an x/y pixel offset is
+ * instead done through the source rectangle in struct &drm_plane_state.
+ */
+ unsigned int offsets[4];
+ /**
+ * @modifier: Data layout modifier, per buffer. This is used to describe
+ * tiling, or also special layouts (like compression) of auxiliary
+ * buffers. For userspace created object this is copied from
+ * drm_mode_fb_cmd2.
+ */
+ uint64_t modifier[4];
+ /**
+ * @width: Logical width of the visible area of the framebuffer, in
+ * pixels.
+ */
+ unsigned int width;
+ /**
+ * @height: Logical height of the visible area of the framebuffer, in
+ * pixels.
+ */
+ unsigned int height;
+ /**
+ * @depth: Depth in bits per pixel for RGB formats. 0 for everything
+ * else. Legacy information derived from @pixel_format, it's suggested to use
+ * the DRM FOURCC codes and helper functions directly instead.
+ */
+ unsigned int depth;
+ /**
+ * @bits_per_pixel: Storage used bits per pixel for RGB formats. 0 for
+ * everything else. Legacy information derived from @pixel_format, it's
+ * suggested to use the DRM FOURCC codes and helper functions directly
+ * instead.
+ */
+ int bits_per_pixel;
+ /**
+ * @flags: Framebuffer flags like DRM_MODE_FB_INTERLACED or
+ * DRM_MODE_FB_MODIFIERS.
+ */
+ int flags;
+ /**
+ * @pixel_format: DRM FOURCC code describing the pixel format.
+ */
+ uint32_t pixel_format; /* fourcc format */
+ /**
+ * @hot_x: X coordinate of the cursor hotspot. Used by the legacy cursor
+ * IOCTL when the driver supports cursor through a DRM_PLANE_TYPE_CURSOR
+ * universal plane.
+ */
+ int hot_x;
+ /**
+ * @hot_y: Y coordinate of the cursor hotspot. Used by the legacy cursor
+ * IOCTL when the driver supports cursor through a DRM_PLANE_TYPE_CURSOR
+ * universal plane.
+ */
+ int hot_y;
+ /**
+ * @filp_head: Placed on struct &drm_file fbs list_head, protected by
+ * fbs_lock in the same structure.
+ */
+ struct list_head filp_head;
+};
+
+#define obj_to_fb(x) container_of(x, struct drm_framebuffer, base)
+
+int drm_framebuffer_init(struct drm_device *dev,
+ struct drm_framebuffer *fb,
+ const struct drm_framebuffer_funcs *funcs);
+struct drm_framebuffer *drm_framebuffer_lookup(struct drm_device *dev,
+ uint32_t id);
+void drm_framebuffer_remove(struct drm_framebuffer *fb);
+void drm_framebuffer_cleanup(struct drm_framebuffer *fb);
+void drm_framebuffer_unregister_private(struct drm_framebuffer *fb);
+
+/**
+ * drm_framebuffer_reference - incr the fb refcnt
+ * @fb: framebuffer
+ *
+ * This functions increments the fb's refcount.
+ */
+static inline void drm_framebuffer_reference(struct drm_framebuffer *fb)
+{
+ drm_mode_object_reference(&fb->base);
+}
+
+/**
+ * drm_framebuffer_unreference - unref a framebuffer
+ * @fb: framebuffer to unref
+ *
+ * This functions decrements the fb's refcount and frees it if it drops to zero.
+ */
+static inline void drm_framebuffer_unreference(struct drm_framebuffer *fb)
+{
+ drm_mode_object_unreference(&fb->base);
+}
+
+/**
+ * drm_framebuffer_read_refcount - read the framebuffer reference count.
+ * @fb: framebuffer
+ *
+ * This functions returns the framebuffer's reference count.
+ */
+static inline uint32_t drm_framebuffer_read_refcount(struct drm_framebuffer *fb)
+{
+ return atomic_read(&fb->base.refcount.refcount);
+}
+
+/**
+ * drm_for_each_fb - iterate over all framebuffers
+ * @fb: the loop cursor
+ * @dev: the DRM device
+ *
+ * Iterate over all framebuffers of @dev. User must hold the fb_lock from
+ * &drm_mode_config.
+ */
+#define drm_for_each_fb(fb, dev) \
+ for (WARN_ON(!mutex_is_locked(&(dev)->mode_config.fb_lock)), \
+ fb = list_first_entry(&(dev)->mode_config.fb_list, \
+ struct drm_framebuffer, head); \
+ &fb->head != (&(dev)->mode_config.fb_list); \
+ fb = list_next_entry(fb, head))
+#endif
diff --git a/include/drm/drm_gem.h b/include/drm/drm_gem.h
index fca1cd1b9c26..9f63736e6163 100644
--- a/include/drm/drm_gem.h
+++ b/include/drm/drm_gem.h
@@ -210,8 +210,8 @@ drm_gem_object_reference(struct drm_gem_object *obj)
* drm_gem_object_unreference_unlocked().
*
* Drivers should never call this directly in their code. Instead they should
- * wrap it up into a driver_gem_object_unreference(struct driver_gem_object
- * *obj) wrapper function, and use that. Shared code should never call this, to
+ * wrap it up into a ``driver_gem_object_unreference(struct driver_gem_object
+ * *obj)`` wrapper function, and use that. Shared code should never call this, to
* avoid breaking drivers by accident which still depend upon dev->struct_mutex
* locking.
*/
diff --git a/include/drm/drm_mipi_dsi.h b/include/drm/drm_mipi_dsi.h
index 47ac92584d76..4fef19064b0f 100644
--- a/include/drm/drm_mipi_dsi.h
+++ b/include/drm/drm_mipi_dsi.h
@@ -265,11 +265,15 @@ int mipi_dsi_dcs_set_column_address(struct mipi_dsi_device *dsi, u16 start,
u16 end);
int mipi_dsi_dcs_set_page_address(struct mipi_dsi_device *dsi, u16 start,
u16 end);
-int mipi_dsi_dcs_set_tear_scanline(struct mipi_dsi_device *dsi, u16 scanline);
int mipi_dsi_dcs_set_tear_off(struct mipi_dsi_device *dsi);
int mipi_dsi_dcs_set_tear_on(struct mipi_dsi_device *dsi,
enum mipi_dsi_dcs_tear_mode mode);
int mipi_dsi_dcs_set_pixel_format(struct mipi_dsi_device *dsi, u8 format);
+int mipi_dsi_dcs_set_tear_scanline(struct mipi_dsi_device *dsi, u16 scanline);
+int mipi_dsi_dcs_set_display_brightness(struct mipi_dsi_device *dsi,
+ u16 brightness);
+int mipi_dsi_dcs_get_display_brightness(struct mipi_dsi_device *dsi,
+ u16 *brightness);
/**
* struct mipi_dsi_driver - DSI driver
diff --git a/include/drm/drm_mm.h b/include/drm/drm_mm.h
index fc65118e5077..205ddcf6d55d 100644
--- a/include/drm/drm_mm.h
+++ b/include/drm/drm_mm.h
@@ -37,6 +37,7 @@
* Generic range manager structs
*/
#include <linux/bug.h>
+#include <linux/rbtree.h>
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/spinlock.h>
@@ -61,6 +62,7 @@ enum drm_mm_allocator_flags {
struct drm_mm_node {
struct list_head node_list;
struct list_head hole_stack;
+ struct rb_node rb;
unsigned hole_follows : 1;
unsigned scanned_block : 1;
unsigned scanned_prev_free : 1;
@@ -70,6 +72,7 @@ struct drm_mm_node {
unsigned long color;
u64 start;
u64 size;
+ u64 __subtree_last;
struct drm_mm *mm;
};
@@ -79,6 +82,9 @@ struct drm_mm {
/* head_node.node_list is the list of all memory nodes, ordered
* according to the (increasing) start address of the memory node. */
struct drm_mm_node head_node;
+ /* Keep an interval_tree for fast lookup of drm_mm_nodes by address. */
+ struct rb_root interval_tree;
+
unsigned int scan_check_range : 1;
unsigned scan_alignment;
unsigned long scan_color;
@@ -295,6 +301,12 @@ void drm_mm_init(struct drm_mm *mm,
void drm_mm_takedown(struct drm_mm *mm);
bool drm_mm_clean(struct drm_mm *mm);
+struct drm_mm_node *
+drm_mm_interval_first(struct drm_mm *mm, u64 start, u64 last);
+
+struct drm_mm_node *
+drm_mm_interval_next(struct drm_mm_node *node, u64 start, u64 last);
+
void drm_mm_init_scan(struct drm_mm *mm,
u64 size,
unsigned alignment,
diff --git a/include/drm/drm_mode_object.h b/include/drm/drm_mode_object.h
new file mode 100644
index 000000000000..43460b21d112
--- /dev/null
+++ b/include/drm/drm_mode_object.h
@@ -0,0 +1,125 @@
+/*
+ * Copyright (c) 2016 Intel Corporation
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that copyright
+ * notice and this permission notice appear in supporting documentation, and
+ * that the name of the copyright holders not be used in advertising or
+ * publicity pertaining to distribution of the software without specific,
+ * written prior permission. The copyright holders make no representations
+ * about the suitability of this software for any purpose. It is provided "as
+ * is" without express or implied warranty.
+ *
+ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+ * OF THIS SOFTWARE.
+ */
+
+#ifndef __DRM_MODESET_H__
+#define __DRM_MODESET_H__
+
+#include <linux/kref.h>
+struct drm_object_properties;
+struct drm_property;
+struct drm_device;
+
+/**
+ * struct drm_mode_object - base structure for modeset objects
+ * @id: userspace visible identifier
+ * @type: type of the object, one of DRM_MODE_OBJECT\_\*
+ * @properties: properties attached to this object, including values
+ * @refcount: reference count for objects which with dynamic lifetime
+ * @free_cb: free function callback, only set for objects with dynamic lifetime
+ *
+ * Base structure for modeset objects visible to userspace. Objects can be
+ * looked up using drm_mode_object_find(). Besides basic uapi interface
+ * properties like @id and @type it provides two services:
+ *
+ * - It tracks attached properties and their values. This is used by &drm_crtc,
+ * &drm_plane and &drm_connector. Properties are attached by calling
+ * drm_object_attach_property() before the object is visible to userspace.
+ *
+ * - For objects with dynamic lifetimes (as indicated by a non-NULL @free_cb) it
+ * provides reference counting through drm_mode_object_reference() and
+ * drm_mode_object_unreference(). This is used by &drm_framebuffer,
+ * &drm_connector and &drm_property_blob. These objects provide specialized
+ * reference counting wrappers.
+ */
+struct drm_mode_object {
+ uint32_t id;
+ uint32_t type;
+ struct drm_object_properties *properties;
+ struct kref refcount;
+ void (*free_cb)(struct kref *kref);
+};
+
+#define DRM_OBJECT_MAX_PROPERTY 24
+/**
+ * struct drm_object_properties - property tracking for &drm_mode_object
+ */
+struct drm_object_properties {
+ /**
+ * @count: number of valid properties, must be less than or equal to
+ * DRM_OBJECT_MAX_PROPERTY.
+ */
+
+ int count;
+ /**
+ * @properties: Array of pointers to &drm_property.
+ *
+ * NOTE: if we ever start dynamically destroying properties (ie.
+ * not at drm_mode_config_cleanup() time), then we'd have to do
+ * a better job of detaching property from mode objects to avoid
+ * dangling property pointers:
+ */
+ struct drm_property *properties[DRM_OBJECT_MAX_PROPERTY];
+
+ /**
+ * @values: Array to store the property values, matching @properties. Do
+ * not read/write values directly, but use
+ * drm_object_property_get_value() and drm_object_property_set_value().
+ *
+ * Note that atomic drivers do not store mutable properties in this
+ * array, but only the decoded values in the corresponding state
+ * structure. The decoding is done using the ->atomic_get_property and
+ * ->atomic_set_property hooks of the corresponding object. Hence atomic
+ * drivers should not use drm_object_property_set_value() and
+ * drm_object_property_get_value() on mutable objects, i.e. those
+ * without the DRM_MODE_PROP_IMMUTABLE flag set.
+ */
+ uint64_t values[DRM_OBJECT_MAX_PROPERTY];
+};
+
+/* Avoid boilerplate. I'm tired of typing. */
+#define DRM_ENUM_NAME_FN(fnname, list) \
+ const char *fnname(int val) \
+ { \
+ int i; \
+ for (i = 0; i < ARRAY_SIZE(list); i++) { \
+ if (list[i].type == val) \
+ return list[i].name; \
+ } \
+ return "(unknown)"; \
+ }
+
+struct drm_mode_object *drm_mode_object_find(struct drm_device *dev,
+ uint32_t id, uint32_t type);
+void drm_mode_object_reference(struct drm_mode_object *obj);
+void drm_mode_object_unreference(struct drm_mode_object *obj);
+
+int drm_object_property_set_value(struct drm_mode_object *obj,
+ struct drm_property *property,
+ uint64_t val);
+int drm_object_property_get_value(struct drm_mode_object *obj,
+ struct drm_property *property,
+ uint64_t *value);
+
+void drm_object_attach_property(struct drm_mode_object *obj,
+ struct drm_property *property,
+ uint64_t init_val);
+#endif
diff --git a/include/drm/drm_modes.h b/include/drm/drm_modes.h
index ff481770d76b..9934d91619c1 100644
--- a/include/drm/drm_modes.h
+++ b/include/drm/drm_modes.h
@@ -27,6 +27,13 @@
#ifndef __DRM_MODES_H__
#define __DRM_MODES_H__
+#include <linux/hdmi.h>
+
+#include <drm/drm_mode_object.h>
+#include <drm/drm_connector.h>
+
+struct videomode;
+
/*
* Note on terminology: here, for brevity and convenience, we refer to connector
* control chips as 'CRTCs'. They can control any type of connector, VGA, LVDS,
@@ -400,20 +407,7 @@ struct drm_display_mode {
enum hdmi_picture_aspect picture_aspect_ratio;
};
-/* mode specified on the command line */
-struct drm_cmdline_mode {
- bool specified;
- bool refresh_specified;
- bool bpp_specified;
- int xres, yres;
- int bpp;
- int refresh;
- bool rb;
- bool interlace;
- bool cvt;
- bool margins;
- enum drm_connector_force force;
-};
+#define obj_to_mode(x) container_of(x, struct drm_display_mode, base)
/**
* drm_mode_is_stereo - check for stereo mode flags
@@ -434,7 +428,7 @@ struct drm_cmdline_mode;
struct drm_display_mode *drm_mode_create(struct drm_device *dev);
void drm_mode_destroy(struct drm_device *dev, struct drm_display_mode *mode);
void drm_mode_convert_to_umode(struct drm_mode_modeinfo *out,
- const struct drm_display_mode *in);
+ const struct drm_display_mode *in);
int drm_mode_convert_umode(struct drm_display_mode *out,
const struct drm_mode_modeinfo *in);
void drm_mode_probed_add(struct drm_connector *connector, struct drm_display_mode *mode);
@@ -457,8 +451,9 @@ void drm_display_mode_from_videomode(const struct videomode *vm,
struct drm_display_mode *dmode);
void drm_display_mode_to_videomode(const struct drm_display_mode *dmode,
struct videomode *vm);
+void drm_bus_flags_from_videomode(const struct videomode *vm, u32 *bus_flags);
int of_get_drm_display_mode(struct device_node *np,
- struct drm_display_mode *dmode,
+ struct drm_display_mode *dmode, u32 *bus_flags,
int index);
void drm_mode_set_name(struct drm_display_mode *mode);
diff --git a/include/drm/drm_modeset_helper.h b/include/drm/drm_modeset_helper.h
new file mode 100644
index 000000000000..b8051d5abe10
--- /dev/null
+++ b/include/drm/drm_modeset_helper.h
@@ -0,0 +1,36 @@
+/*
+ * Copyright (c) 2016 Intel Corporation
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that copyright
+ * notice and this permission notice appear in supporting documentation, and
+ * that the name of the copyright holders not be used in advertising or
+ * publicity pertaining to distribution of the software without specific,
+ * written prior permission. The copyright holders make no representations
+ * about the suitability of this software for any purpose. It is provided "as
+ * is" without express or implied warranty.
+ *
+ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+ * OF THIS SOFTWARE.
+ */
+
+#ifndef __DRM_KMS_HELPER_H__
+#define __DRM_KMS_HELPER_H__
+
+#include <drm/drmP.h>
+
+void drm_helper_move_panel_connectors_to_head(struct drm_device *);
+
+void drm_helper_mode_fill_fb_struct(struct drm_framebuffer *fb,
+ const struct drm_mode_fb_cmd2 *mode_cmd);
+
+int drm_crtc_init(struct drm_device *dev, struct drm_crtc *crtc,
+ const struct drm_crtc_funcs *funcs);
+
+#endif
diff --git a/include/drm/drm_modeset_helper_vtables.h b/include/drm/drm_modeset_helper_vtables.h
index b55f21857a98..10e449c86dbd 100644
--- a/include/drm/drm_modeset_helper_vtables.h
+++ b/include/drm/drm_modeset_helper_vtables.h
@@ -266,6 +266,8 @@ struct drm_crtc_helper_funcs {
* disable anything at the CRTC level. To ensure that runtime PM
* handling (using either DPMS or the new "ACTIVE" property) works
* @disable must be the inverse of @enable for atomic drivers.
+ * Atomic drivers should consider to use @atomic_disable instead of
+ * this one.
*
* NOTE:
*
@@ -391,6 +393,28 @@ struct drm_crtc_helper_funcs {
*/
void (*atomic_flush)(struct drm_crtc *crtc,
struct drm_crtc_state *old_crtc_state);
+
+ /**
+ * @atomic_disable:
+ *
+ * This callback should be used to disable the CRTC. With the atomic
+ * drivers it is called after all encoders connected to this CRTC have
+ * been shut off already using their own ->disable hook. If that
+ * sequence is too simple drivers can just add their own hooks and call
+ * it from this CRTC callback here by looping over all encoders
+ * connected to it using for_each_encoder_on_crtc().
+ *
+ * This hook is used only by atomic helpers. Atomic drivers don't
+ * need to implement it if there's no need to disable anything at the
+ * CRTC level.
+ *
+ * Comparing to @disable, this one provides the additional input
+ * parameter @old_crtc_state which could be used to access the old
+ * state. Atomic drivers should consider to use this one instead
+ * of @disable.
+ */
+ void (*atomic_disable)(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_crtc_state);
};
/**
@@ -523,12 +547,41 @@ struct drm_encoder_helper_funcs {
*
* This callback is used both by the legacy CRTC helpers and the atomic
* modeset helpers. It is optional in the atomic helpers.
+ *
+ * NOTE:
+ *
+ * If the driver uses the atomic modeset helpers and needs to inspect
+ * the connector state or connector display info during mode setting,
+ * @atomic_mode_set can be used instead.
*/
void (*mode_set)(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode);
/**
+ * @atomic_mode_set:
+ *
+ * This callback is used to update the display mode of an encoder.
+ *
+ * Note that the display pipe is completely off when this function is
+ * called. Drivers which need hardware to be running before they program
+ * the new display mode (because they implement runtime PM) should not
+ * use this hook, because the helper library calls it only once and not
+ * every time the display pipeline is suspended using either DPMS or the
+ * new "ACTIVE" property. Such drivers should instead move all their
+ * encoder setup into the ->enable() callback.
+ *
+ * This callback is used by the atomic modeset helpers in place of the
+ * @mode_set callback, if set by the driver. It is optional and should
+ * be used instead of @mode_set if the driver needs to inspect the
+ * connector state or display info, since there is no direct way to
+ * go from the encoder to the current connector.
+ */
+ void (*atomic_mode_set)(struct drm_encoder *encoder,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state);
+
+ /**
* @get_crtc:
*
* This callback is used by the legacy CRTC helpers to work around
@@ -826,7 +879,7 @@ struct drm_plane_helper_funcs {
* everything else must complete successfully.
*/
int (*prepare_fb)(struct drm_plane *plane,
- const struct drm_plane_state *new_state);
+ struct drm_plane_state *new_state);
/**
* @cleanup_fb:
*
@@ -837,7 +890,7 @@ struct drm_plane_helper_funcs {
* transitional plane helpers, but it is optional.
*/
void (*cleanup_fb)(struct drm_plane *plane,
- const struct drm_plane_state *old_state);
+ struct drm_plane_state *old_state);
/**
* @atomic_check:
diff --git a/include/drm/drm_plane.h b/include/drm/drm_plane.h
new file mode 100644
index 000000000000..43cf193e54d6
--- /dev/null
+++ b/include/drm/drm_plane.h
@@ -0,0 +1,526 @@
+/*
+ * Copyright (c) 2016 Intel Corporation
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that copyright
+ * notice and this permission notice appear in supporting documentation, and
+ * that the name of the copyright holders not be used in advertising or
+ * publicity pertaining to distribution of the software without specific,
+ * written prior permission. The copyright holders make no representations
+ * about the suitability of this software for any purpose. It is provided "as
+ * is" without express or implied warranty.
+ *
+ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+ * OF THIS SOFTWARE.
+ */
+
+#ifndef __DRM_PLANE_H__
+#define __DRM_PLANE_H__
+
+#include <linux/list.h>
+#include <linux/ctype.h>
+#include <drm/drm_mode_object.h>
+
+struct drm_crtc;
+
+/**
+ * struct drm_plane_state - mutable plane state
+ * @plane: backpointer to the plane
+ * @crtc: currently bound CRTC, NULL if disabled
+ * @fb: currently bound framebuffer
+ * @fence: optional fence to wait for before scanning out @fb
+ * @crtc_x: left position of visible portion of plane on crtc
+ * @crtc_y: upper position of visible portion of plane on crtc
+ * @crtc_w: width of visible portion of plane on crtc
+ * @crtc_h: height of visible portion of plane on crtc
+ * @src_x: left position of visible portion of plane within
+ * plane (in 16.16)
+ * @src_y: upper position of visible portion of plane within
+ * plane (in 16.16)
+ * @src_w: width of visible portion of plane (in 16.16)
+ * @src_h: height of visible portion of plane (in 16.16)
+ * @rotation: rotation of the plane
+ * @zpos: priority of the given plane on crtc (optional)
+ * @normalized_zpos: normalized value of zpos: unique, range from 0 to N-1
+ * where N is the number of active planes for given crtc
+ * @src: clipped source coordinates of the plane (in 16.16)
+ * @dst: clipped destination coordinates of the plane
+ * @visible: visibility of the plane
+ * @state: backpointer to global drm_atomic_state
+ */
+struct drm_plane_state {
+ struct drm_plane *plane;
+
+ struct drm_crtc *crtc; /* do not write directly, use drm_atomic_set_crtc_for_plane() */
+ struct drm_framebuffer *fb; /* do not write directly, use drm_atomic_set_fb_for_plane() */
+ struct fence *fence;
+
+ /* Signed dest location allows it to be partially off screen */
+ int32_t crtc_x, crtc_y;
+ uint32_t crtc_w, crtc_h;
+
+ /* Source values are 16.16 fixed point */
+ uint32_t src_x, src_y;
+ uint32_t src_h, src_w;
+
+ /* Plane rotation */
+ unsigned int rotation;
+
+ /* Plane zpos */
+ unsigned int zpos;
+ unsigned int normalized_zpos;
+
+ /* Clipped coordinates */
+ struct drm_rect src, dst;
+
+ /*
+ * Is the plane actually visible? Can be false even
+ * if fb!=NULL and crtc!=NULL, due to clipping.
+ */
+ bool visible;
+
+ struct drm_atomic_state *state;
+};
+
+
+/**
+ * struct drm_plane_funcs - driver plane control functions
+ */
+struct drm_plane_funcs {
+ /**
+ * @update_plane:
+ *
+ * This is the legacy entry point to enable and configure the plane for
+ * the given CRTC and framebuffer. It is never called to disable the
+ * plane, i.e. the passed-in crtc and fb paramters are never NULL.
+ *
+ * The source rectangle in frame buffer memory coordinates is given by
+ * the src_x, src_y, src_w and src_h parameters (as 16.16 fixed point
+ * values). Devices that don't support subpixel plane coordinates can
+ * ignore the fractional part.
+ *
+ * The destination rectangle in CRTC coordinates is given by the
+ * crtc_x, crtc_y, crtc_w and crtc_h parameters (as integer values).
+ * Devices scale the source rectangle to the destination rectangle. If
+ * scaling is not supported, and the source rectangle size doesn't match
+ * the destination rectangle size, the driver must return a
+ * -<errorname>EINVAL</errorname> error.
+ *
+ * Drivers implementing atomic modeset should use
+ * drm_atomic_helper_update_plane() to implement this hook.
+ *
+ * RETURNS:
+ *
+ * 0 on success or a negative error code on failure.
+ */
+ int (*update_plane)(struct drm_plane *plane,
+ struct drm_crtc *crtc, struct drm_framebuffer *fb,
+ int crtc_x, int crtc_y,
+ unsigned int crtc_w, unsigned int crtc_h,
+ uint32_t src_x, uint32_t src_y,
+ uint32_t src_w, uint32_t src_h);
+
+ /**
+ * @disable_plane:
+ *
+ * This is the legacy entry point to disable the plane. The DRM core
+ * calls this method in response to a DRM_IOCTL_MODE_SETPLANE IOCTL call
+ * with the frame buffer ID set to 0. Disabled planes must not be
+ * processed by the CRTC.
+ *
+ * Drivers implementing atomic modeset should use
+ * drm_atomic_helper_disable_plane() to implement this hook.
+ *
+ * RETURNS:
+ *
+ * 0 on success or a negative error code on failure.
+ */
+ int (*disable_plane)(struct drm_plane *plane);
+
+ /**
+ * @destroy:
+ *
+ * Clean up plane resources. This is only called at driver unload time
+ * through drm_mode_config_cleanup() since a plane cannot be hotplugged
+ * in DRM.
+ */
+ void (*destroy)(struct drm_plane *plane);
+
+ /**
+ * @reset:
+ *
+ * Reset plane hardware and software state to off. This function isn't
+ * called by the core directly, only through drm_mode_config_reset().
+ * It's not a helper hook only for historical reasons.
+ *
+ * Atomic drivers can use drm_atomic_helper_plane_reset() to reset
+ * atomic state using this hook.
+ */
+ void (*reset)(struct drm_plane *plane);
+
+ /**
+ * @set_property:
+ *
+ * This is the legacy entry point to update a property attached to the
+ * plane.
+ *
+ * Drivers implementing atomic modeset should use
+ * drm_atomic_helper_plane_set_property() to implement this hook.
+ *
+ * This callback is optional if the driver does not support any legacy
+ * driver-private properties.
+ *
+ * RETURNS:
+ *
+ * 0 on success or a negative error code on failure.
+ */
+ int (*set_property)(struct drm_plane *plane,
+ struct drm_property *property, uint64_t val);
+
+ /**
+ * @atomic_duplicate_state:
+ *
+ * Duplicate the current atomic state for this plane and return it.
+ * The core and helpers gurantee that any atomic state duplicated with
+ * this hook and still owned by the caller (i.e. not transferred to the
+ * driver by calling ->atomic_commit() from struct
+ * &drm_mode_config_funcs) will be cleaned up by calling the
+ * @atomic_destroy_state hook in this structure.
+ *
+ * Atomic drivers which don't subclass struct &drm_plane_state should use
+ * drm_atomic_helper_plane_duplicate_state(). Drivers that subclass the
+ * state structure to extend it with driver-private state should use
+ * __drm_atomic_helper_plane_duplicate_state() to make sure shared state is
+ * duplicated in a consistent fashion across drivers.
+ *
+ * It is an error to call this hook before plane->state has been
+ * initialized correctly.
+ *
+ * NOTE:
+ *
+ * If the duplicate state references refcounted resources this hook must
+ * acquire a reference for each of them. The driver must release these
+ * references again in @atomic_destroy_state.
+ *
+ * RETURNS:
+ *
+ * Duplicated atomic state or NULL when the allocation failed.
+ */
+ struct drm_plane_state *(*atomic_duplicate_state)(struct drm_plane *plane);
+
+ /**
+ * @atomic_destroy_state:
+ *
+ * Destroy a state duplicated with @atomic_duplicate_state and release
+ * or unreference all resources it references
+ */
+ void (*atomic_destroy_state)(struct drm_plane *plane,
+ struct drm_plane_state *state);
+
+ /**
+ * @atomic_set_property:
+ *
+ * Decode a driver-private property value and store the decoded value
+ * into the passed-in state structure. Since the atomic core decodes all
+ * standardized properties (even for extensions beyond the core set of
+ * properties which might not be implemented by all drivers) this
+ * requires drivers to subclass the state structure.
+ *
+ * Such driver-private properties should really only be implemented for
+ * truly hardware/vendor specific state. Instead it is preferred to
+ * standardize atomic extension and decode the properties used to expose
+ * such an extension in the core.
+ *
+ * Do not call this function directly, use
+ * drm_atomic_plane_set_property() instead.
+ *
+ * This callback is optional if the driver does not support any
+ * driver-private atomic properties.
+ *
+ * NOTE:
+ *
+ * This function is called in the state assembly phase of atomic
+ * modesets, which can be aborted for any reason (including on
+ * userspace's request to just check whether a configuration would be
+ * possible). Drivers MUST NOT touch any persistent state (hardware or
+ * software) or data structures except the passed in @state parameter.
+ *
+ * Also since userspace controls in which order properties are set this
+ * function must not do any input validation (since the state update is
+ * incomplete and hence likely inconsistent). Instead any such input
+ * validation must be done in the various atomic_check callbacks.
+ *
+ * RETURNS:
+ *
+ * 0 if the property has been found, -EINVAL if the property isn't
+ * implemented by the driver (which shouldn't ever happen, the core only
+ * asks for properties attached to this plane). No other validation is
+ * allowed by the driver. The core already checks that the property
+ * value is within the range (integer, valid enum value, ...) the driver
+ * set when registering the property.
+ */
+ int (*atomic_set_property)(struct drm_plane *plane,
+ struct drm_plane_state *state,
+ struct drm_property *property,
+ uint64_t val);
+
+ /**
+ * @atomic_get_property:
+ *
+ * Reads out the decoded driver-private property. This is used to
+ * implement the GETPLANE IOCTL.
+ *
+ * Do not call this function directly, use
+ * drm_atomic_plane_get_property() instead.
+ *
+ * This callback is optional if the driver does not support any
+ * driver-private atomic properties.
+ *
+ * RETURNS:
+ *
+ * 0 on success, -EINVAL if the property isn't implemented by the
+ * driver (which should never happen, the core only asks for
+ * properties attached to this plane).
+ */
+ int (*atomic_get_property)(struct drm_plane *plane,
+ const struct drm_plane_state *state,
+ struct drm_property *property,
+ uint64_t *val);
+ /**
+ * @late_register:
+ *
+ * This optional hook can be used to register additional userspace
+ * interfaces attached to the plane like debugfs interfaces.
+ * It is called late in the driver load sequence from drm_dev_register().
+ * Everything added from this callback should be unregistered in
+ * the early_unregister callback.
+ *
+ * Returns:
+ *
+ * 0 on success, or a negative error code on failure.
+ */
+ int (*late_register)(struct drm_plane *plane);
+
+ /**
+ * @early_unregister:
+ *
+ * This optional hook should be used to unregister the additional
+ * userspace interfaces attached to the plane from
+ * late_unregister(). It is called from drm_dev_unregister(),
+ * early in the driver unload sequence to disable userspace access
+ * before data structures are torndown.
+ */
+ void (*early_unregister)(struct drm_plane *plane);
+};
+
+/**
+ * enum drm_plane_type - uapi plane type enumeration
+ *
+ * For historical reasons not all planes are made the same. This enumeration is
+ * used to tell the different types of planes apart to implement the different
+ * uapi semantics for them. For userspace which is universal plane aware and
+ * which is using that atomic IOCTL there's no difference between these planes
+ * (beyong what the driver and hardware can support of course).
+ *
+ * For compatibility with legacy userspace, only overlay planes are made
+ * available to userspace by default. Userspace clients may set the
+ * DRM_CLIENT_CAP_UNIVERSAL_PLANES client capability bit to indicate that they
+ * wish to receive a universal plane list containing all plane types. See also
+ * drm_for_each_legacy_plane().
+ *
+ * WARNING: The values of this enum is UABI since they're exposed in the "type"
+ * property.
+ */
+enum drm_plane_type {
+ /**
+ * @DRM_PLANE_TYPE_OVERLAY:
+ *
+ * Overlay planes represent all non-primary, non-cursor planes. Some
+ * drivers refer to these types of planes as "sprites" internally.
+ */
+ DRM_PLANE_TYPE_OVERLAY,
+
+ /**
+ * @DRM_PLANE_TYPE_PRIMARY:
+ *
+ * Primary planes represent a "main" plane for a CRTC. Primary planes
+ * are the planes operated upon by CRTC modesetting and flipping
+ * operations described in the page_flip and set_config hooks in struct
+ * &drm_crtc_funcs.
+ */
+ DRM_PLANE_TYPE_PRIMARY,
+
+ /**
+ * @DRM_PLANE_TYPE_CURSOR:
+ *
+ * Cursor planes represent a "cursor" plane for a CRTC. Cursor planes
+ * are the planes operated upon by the DRM_IOCTL_MODE_CURSOR and
+ * DRM_IOCTL_MODE_CURSOR2 IOCTLs.
+ */
+ DRM_PLANE_TYPE_CURSOR,
+};
+
+
+/**
+ * struct drm_plane - central DRM plane control structure
+ * @dev: DRM device this plane belongs to
+ * @head: for list management
+ * @name: human readable name, can be overwritten by the driver
+ * @base: base mode object
+ * @possible_crtcs: pipes this plane can be bound to
+ * @format_types: array of formats supported by this plane
+ * @format_count: number of formats supported
+ * @format_default: driver hasn't supplied supported formats for the plane
+ * @crtc: currently bound CRTC
+ * @fb: currently bound fb
+ * @old_fb: Temporary tracking of the old fb while a modeset is ongoing. Used by
+ * drm_mode_set_config_internal() to implement correct refcounting.
+ * @funcs: helper functions
+ * @properties: property tracking for this plane
+ * @type: type of plane (overlay, primary, cursor)
+ * @state: current atomic state for this plane
+ * @zpos_property: zpos property for this plane
+ * @helper_private: mid-layer private data
+ */
+struct drm_plane {
+ struct drm_device *dev;
+ struct list_head head;
+
+ char *name;
+
+ /**
+ * @mutex:
+ *
+ * Protects modeset plane state, together with the mutex of &drm_crtc
+ * this plane is linked to (when active, getting actived or getting
+ * disabled).
+ */
+ struct drm_modeset_lock mutex;
+
+ struct drm_mode_object base;
+
+ uint32_t possible_crtcs;
+ uint32_t *format_types;
+ unsigned int format_count;
+ bool format_default;
+
+ struct drm_crtc *crtc;
+ struct drm_framebuffer *fb;
+
+ struct drm_framebuffer *old_fb;
+
+ const struct drm_plane_funcs *funcs;
+
+ struct drm_object_properties properties;
+
+ enum drm_plane_type type;
+
+ /**
+ * @index: Position inside the mode_config.list, can be used as an array
+ * index. It is invariant over the lifetime of the plane.
+ */
+ unsigned index;
+
+ const struct drm_plane_helper_funcs *helper_private;
+
+ struct drm_plane_state *state;
+
+ struct drm_property *zpos_property;
+};
+
+#define obj_to_plane(x) container_of(x, struct drm_plane, base)
+
+extern __printf(8, 9)
+int drm_universal_plane_init(struct drm_device *dev,
+ struct drm_plane *plane,
+ unsigned long possible_crtcs,
+ const struct drm_plane_funcs *funcs,
+ const uint32_t *formats,
+ unsigned int format_count,
+ enum drm_plane_type type,
+ const char *name, ...);
+extern int drm_plane_init(struct drm_device *dev,
+ struct drm_plane *plane,
+ unsigned long possible_crtcs,
+ const struct drm_plane_funcs *funcs,
+ const uint32_t *formats, unsigned int format_count,
+ bool is_primary);
+extern void drm_plane_cleanup(struct drm_plane *plane);
+
+/**
+ * drm_plane_index - find the index of a registered plane
+ * @plane: plane to find index for
+ *
+ * Given a registered plane, return the index of that plane within a DRM
+ * device's list of planes.
+ */
+static inline unsigned int drm_plane_index(struct drm_plane *plane)
+{
+ return plane->index;
+}
+extern struct drm_plane * drm_plane_from_index(struct drm_device *dev, int idx);
+extern void drm_plane_force_disable(struct drm_plane *plane);
+
+int drm_mode_plane_set_obj_prop(struct drm_plane *plane,
+ struct drm_property *property,
+ uint64_t value);
+
+/**
+ * drm_plane_find - find a &drm_plane
+ * @dev: DRM device
+ * @id: plane id
+ *
+ * Returns the plane with @id, NULL if it doesn't exist. Simple wrapper around
+ * drm_mode_object_find().
+ */
+static inline struct drm_plane *drm_plane_find(struct drm_device *dev,
+ uint32_t id)
+{
+ struct drm_mode_object *mo;
+ mo = drm_mode_object_find(dev, id, DRM_MODE_OBJECT_PLANE);
+ return mo ? obj_to_plane(mo) : NULL;
+}
+
+/**
+ * drm_for_each_plane_mask - iterate over planes specified by bitmask
+ * @plane: the loop cursor
+ * @dev: the DRM device
+ * @plane_mask: bitmask of plane indices
+ *
+ * Iterate over all planes specified by bitmask.
+ */
+#define drm_for_each_plane_mask(plane, dev, plane_mask) \
+ list_for_each_entry((plane), &(dev)->mode_config.plane_list, head) \
+ for_each_if ((plane_mask) & (1 << drm_plane_index(plane)))
+
+/**
+ * drm_for_each_legacy_plane - iterate over all planes for legacy userspace
+ * @plane: the loop cursor
+ * @dev: the DRM device
+ *
+ * Iterate over all legacy planes of @dev, excluding primary and cursor planes.
+ * This is useful for implementing userspace apis when userspace is not
+ * universal plane aware. See also enum &drm_plane_type.
+ */
+#define drm_for_each_legacy_plane(plane, dev) \
+ list_for_each_entry(plane, &(dev)->mode_config.plane_list, head) \
+ for_each_if (plane->type == DRM_PLANE_TYPE_OVERLAY)
+
+/**
+ * drm_for_each_plane - iterate over all planes
+ * @plane: the loop cursor
+ * @dev: the DRM device
+ *
+ * Iterate over all planes of @dev, include primary and cursor planes.
+ */
+#define drm_for_each_plane(plane, dev) \
+ list_for_each_entry(plane, &(dev)->mode_config.plane_list, head)
+
+
+#endif
diff --git a/include/drm/drm_plane_helper.h b/include/drm/drm_plane_helper.h
index 0e0c3573cce0..c18959685c06 100644
--- a/include/drm/drm_plane_helper.h
+++ b/include/drm/drm_plane_helper.h
@@ -27,6 +27,7 @@
#include <drm/drm_rect.h>
#include <drm/drm_crtc.h>
#include <drm/drm_modeset_helper_vtables.h>
+#include <drm/drm_modeset_helper.h>
/*
* Drivers that don't allow primary plane scaling may pass this macro in place
@@ -37,9 +38,11 @@
*/
#define DRM_PLANE_HELPER_NO_SCALING (1<<16)
-int drm_crtc_init(struct drm_device *dev, struct drm_crtc *crtc,
- const struct drm_crtc_funcs *funcs);
-
+int drm_plane_helper_check_state(struct drm_plane_state *state,
+ const struct drm_rect *clip,
+ int min_scale, int max_scale,
+ bool can_position,
+ bool can_update_disabled);
int drm_plane_helper_check_update(struct drm_plane *plane,
struct drm_crtc *crtc,
struct drm_framebuffer *fb,
diff --git a/include/drm/drm_property.h b/include/drm/drm_property.h
new file mode 100644
index 000000000000..43c4b6a2046d
--- /dev/null
+++ b/include/drm/drm_property.h
@@ -0,0 +1,295 @@
+/*
+ * Copyright (c) 2016 Intel Corporation
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that copyright
+ * notice and this permission notice appear in supporting documentation, and
+ * that the name of the copyright holders not be used in advertising or
+ * publicity pertaining to distribution of the software without specific,
+ * written prior permission. The copyright holders make no representations
+ * about the suitability of this software for any purpose. It is provided "as
+ * is" without express or implied warranty.
+ *
+ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+ * OF THIS SOFTWARE.
+ */
+
+#ifndef __DRM_PROPERTY_H__
+#define __DRM_PROPERTY_H__
+
+#include <linux/list.h>
+#include <linux/ctype.h>
+#include <drm/drm_mode_object.h>
+
+/**
+ * struct drm_property_enum - symbolic values for enumerations
+ * @value: numeric property value for this enum entry
+ * @head: list of enum values, linked to enum_list in &drm_property
+ * @name: symbolic name for the enum
+ *
+ * For enumeration and bitmask properties this structure stores the symbolic
+ * decoding for each value. This is used for example for the rotation property.
+ */
+struct drm_property_enum {
+ uint64_t value;
+ struct list_head head;
+ char name[DRM_PROP_NAME_LEN];
+};
+
+/**
+ * struct drm_property - modeset object property
+ *
+ * This structure represent a modeset object property. It combines both the name
+ * of the property with the set of permissible values. This means that when a
+ * driver wants to use a property with the same name on different objects, but
+ * with different value ranges, then it must create property for each one. An
+ * example would be rotation of &drm_plane, when e.g. the primary plane cannot
+ * be rotated. But if both the name and the value range match, then the same
+ * property structure can be instantiated multiple times for the same object.
+ * Userspace must be able to cope with this and cannot assume that the same
+ * symbolic property will have the same modeset object ID on all modeset
+ * objects.
+ *
+ * Properties are created by one of the special functions, as explained in
+ * detail in the @flags structure member.
+ *
+ * To actually expose a property it must be attached to each object using
+ * drm_object_attach_property(). Currently properties can only be attached to
+ * &drm_connector, &drm_crtc and &drm_plane.
+ *
+ * Properties are also used as the generic metadatatransport for the atomic
+ * IOCTL. Everything that was set directly in structures in the legacy modeset
+ * IOCTLs (like the plane source or destination windows, or e.g. the links to
+ * the CRTC) is exposed as a property with the DRM_MODE_PROP_ATOMIC flag set.
+ */
+struct drm_property {
+ /**
+ * @head: per-device list of properties, for cleanup.
+ */
+ struct list_head head;
+
+ /**
+ * @base: base KMS object
+ */
+ struct drm_mode_object base;
+
+ /**
+ * @flags:
+ *
+ * Property flags and type. A property needs to be one of the following
+ * types:
+ *
+ * DRM_MODE_PROP_RANGE
+ * Range properties report their minimum and maximum admissible unsigned values.
+ * The KMS core verifies that values set by application fit in that
+ * range. The range is unsigned. Range properties are created using
+ * drm_property_create_range().
+ *
+ * DRM_MODE_PROP_SIGNED_RANGE
+ * Range properties report their minimum and maximum admissible unsigned values.
+ * The KMS core verifies that values set by application fit in that
+ * range. The range is signed. Range properties are created using
+ * drm_property_create_signed_range().
+ *
+ * DRM_MODE_PROP_ENUM
+ * Enumerated properties take a numerical value that ranges from 0 to
+ * the number of enumerated values defined by the property minus one,
+ * and associate a free-formed string name to each value. Applications
+ * can retrieve the list of defined value-name pairs and use the
+ * numerical value to get and set property instance values. Enum
+ * properties are created using drm_property_create_enum().
+ *
+ * DRM_MODE_PROP_BITMASK
+ * Bitmask properties are enumeration properties that additionally
+ * restrict all enumerated values to the 0..63 range. Bitmask property
+ * instance values combine one or more of the enumerated bits defined
+ * by the property. Bitmask properties are created using
+ * drm_property_create_bitmask().
+ *
+ * DRM_MODE_PROB_OBJECT
+ * Object properties are used to link modeset objects. This is used
+ * extensively in the atomic support to create the display pipeline,
+ * by linking &drm_framebuffer to &drm_plane, &drm_plane to
+ * &drm_crtc and &drm_connector to &drm_crtc. An object property can
+ * only link to a specific type of &drm_mode_object, this limit is
+ * enforced by the core. Object properties are created using
+ * drm_property_create_object().
+ *
+ * Object properties work like blob properties, but in a more
+ * general fashion. They are limited to atomic drivers and must have
+ * the DRM_MODE_PROP_ATOMIC flag set.
+ *
+ * DRM_MODE_PROP_BLOB
+ * Blob properties store a binary blob without any format restriction.
+ * The binary blobs are created as KMS standalone objects, and blob
+ * property instance values store the ID of their associated blob
+ * object. Blob properties are created by calling
+ * drm_property_create() with DRM_MODE_PROP_BLOB as the type.
+ *
+ * Actual blob objects to contain blob data are created using
+ * drm_property_create_blob(), or through the corresponding IOCTL.
+ *
+ * Besides the built-in limit to only accept blob objects blob
+ * properties work exactly like object properties. The only reasons
+ * blob properties exist is backwards compatibility with existing
+ * userspace.
+ *
+ * In addition a property can have any combination of the below flags:
+ *
+ * DRM_MODE_PROP_ATOMIC
+ * Set for properties which encode atomic modeset state. Such
+ * properties are not exposed to legacy userspace.
+ *
+ * DRM_MODE_PROP_IMMUTABLE
+ * Set for properties where userspace cannot be changed by
+ * userspace. The kernel is allowed to update the value of these
+ * properties. This is generally used to expose probe state to
+ * usersapce, e.g. the EDID, or the connector path property on DP
+ * MST sinks.
+ */
+ uint32_t flags;
+
+ /**
+ * @name: symbolic name of the properties
+ */
+ char name[DRM_PROP_NAME_LEN];
+
+ /**
+ * @num_values: size of the @values array.
+ */
+ uint32_t num_values;
+
+ /**
+ * @values:
+ *
+ * Array with limits and values for the property. The
+ * interpretation of these limits is dependent upon the type per @flags.
+ */
+ uint64_t *values;
+
+ /**
+ * @dev: DRM device
+ */
+ struct drm_device *dev;
+
+ /**
+ * @enum_list:
+ *
+ * List of &drm_prop_enum_list structures with the symbolic names for
+ * enum and bitmask values.
+ */
+ struct list_head enum_list;
+};
+
+/**
+ * struct drm_property_blob - Blob data for &drm_property
+ * @base: base KMS object
+ * @dev: DRM device
+ * @head_global: entry on the global blob list in &drm_mode_config
+ * property_blob_list.
+ * @head_file: entry on the per-file blob list in &drm_file blobs list.
+ * @length: size of the blob in bytes, invariant over the lifetime of the object
+ * @data: actual data, embedded at the end of this structure
+ *
+ * Blobs are used to store bigger values than what fits directly into the 64
+ * bits available for a &drm_property.
+ *
+ * Blobs are reference counted using drm_property_reference_blob() and
+ * drm_property_unreference_blob(). They are created using
+ * drm_property_create_blob().
+ */
+struct drm_property_blob {
+ struct drm_mode_object base;
+ struct drm_device *dev;
+ struct list_head head_global;
+ struct list_head head_file;
+ size_t length;
+ unsigned char data[];
+};
+
+struct drm_prop_enum_list {
+ int type;
+ char *name;
+};
+
+#define obj_to_property(x) container_of(x, struct drm_property, base)
+#define obj_to_blob(x) container_of(x, struct drm_property_blob, base)
+
+/**
+ * drm_property_type_is - check the type of a property
+ * @property: property to check
+ * @type: property type to compare with
+ *
+ * This is a helper function becauase the uapi encoding of property types is
+ * a bit special for historical reasons.
+ */
+static inline bool drm_property_type_is(struct drm_property *property,
+ uint32_t type)
+{
+ /* instanceof for props.. handles extended type vs original types: */
+ if (property->flags & DRM_MODE_PROP_EXTENDED_TYPE)
+ return (property->flags & DRM_MODE_PROP_EXTENDED_TYPE) == type;
+ return property->flags & type;
+}
+
+struct drm_property *drm_property_create(struct drm_device *dev, int flags,
+ const char *name, int num_values);
+struct drm_property *drm_property_create_enum(struct drm_device *dev, int flags,
+ const char *name,
+ const struct drm_prop_enum_list *props,
+ int num_values);
+struct drm_property *drm_property_create_bitmask(struct drm_device *dev,
+ int flags, const char *name,
+ const struct drm_prop_enum_list *props,
+ int num_props,
+ uint64_t supported_bits);
+struct drm_property *drm_property_create_range(struct drm_device *dev, int flags,
+ const char *name,
+ uint64_t min, uint64_t max);
+struct drm_property *drm_property_create_signed_range(struct drm_device *dev,
+ int flags, const char *name,
+ int64_t min, int64_t max);
+struct drm_property *drm_property_create_object(struct drm_device *dev,
+ int flags, const char *name, uint32_t type);
+struct drm_property *drm_property_create_bool(struct drm_device *dev, int flags,
+ const char *name);
+int drm_property_add_enum(struct drm_property *property, int index,
+ uint64_t value, const char *name);
+void drm_property_destroy(struct drm_device *dev, struct drm_property *property);
+
+struct drm_property_blob *drm_property_create_blob(struct drm_device *dev,
+ size_t length,
+ const void *data);
+struct drm_property_blob *drm_property_lookup_blob(struct drm_device *dev,
+ uint32_t id);
+int drm_property_replace_global_blob(struct drm_device *dev,
+ struct drm_property_blob **replace,
+ size_t length,
+ const void *data,
+ struct drm_mode_object *obj_holds_id,
+ struct drm_property *prop_holds_id);
+struct drm_property_blob *drm_property_reference_blob(struct drm_property_blob *blob);
+void drm_property_unreference_blob(struct drm_property_blob *blob);
+
+/**
+ * drm_connector_find - find property object
+ * @dev: DRM device
+ * @id: property object id
+ *
+ * This function looks up the property object specified by id and returns it.
+ */
+static inline struct drm_property *drm_property_find(struct drm_device *dev,
+ uint32_t id)
+{
+ struct drm_mode_object *mo;
+ mo = drm_mode_object_find(dev, id, DRM_MODE_OBJECT_PROPERTY);
+ return mo ? obj_to_property(mo) : NULL;
+}
+
+#endif
diff --git a/include/drm/drm_simple_kms_helper.h b/include/drm/drm_simple_kms_helper.h
index 269039722f91..01a8436ccb0a 100644
--- a/include/drm/drm_simple_kms_helper.h
+++ b/include/drm/drm_simple_kms_helper.h
@@ -60,9 +60,35 @@ struct drm_simple_display_pipe_funcs {
*
* This function is called when the underlying plane state is updated.
* This hook is optional.
+ *
+ * This is the function drivers should submit the
+ * &drm_pending_vblank_event from. Using either
+ * drm_crtc_arm_vblank_event(), when the driver supports vblank
+ * interrupt handling, or drm_crtc_send_vblank_event() directly in case
+ * the hardware lacks vblank support entirely.
*/
void (*update)(struct drm_simple_display_pipe *pipe,
struct drm_plane_state *plane_state);
+
+ /**
+ * @prepare_fb:
+ *
+ * Optional, called by struct &drm_plane_helper_funcs ->prepare_fb .
+ * Please read the documentation for the ->prepare_fb hook in
+ * struct &drm_plane_helper_funcs for more details.
+ */
+ int (*prepare_fb)(struct drm_simple_display_pipe *pipe,
+ struct drm_plane_state *plane_state);
+
+ /**
+ * @cleanup_fb:
+ *
+ * Optional, called by struct &drm_plane_helper_funcs ->cleanup_fb .
+ * Please read the documentation for the ->cleanup_fb hook in
+ * struct &drm_plane_helper_funcs for more details.
+ */
+ void (*cleanup_fb)(struct drm_simple_display_pipe *pipe,
+ struct drm_plane_state *plane_state);
};
/**
@@ -85,6 +111,11 @@ struct drm_simple_display_pipe {
const struct drm_simple_display_pipe_funcs *funcs;
};
+int drm_simple_display_pipe_attach_bridge(struct drm_simple_display_pipe *pipe,
+ struct drm_bridge *bridge);
+
+void drm_simple_display_pipe_detach_bridge(struct drm_simple_display_pipe *pipe);
+
int drm_simple_display_pipe_init(struct drm_device *dev,
struct drm_simple_display_pipe *pipe,
const struct drm_simple_display_pipe_funcs *funcs,
diff --git a/include/drm/drm_vma_manager.h b/include/drm/drm_vma_manager.h
index 06ea8e077ec2..9c03895dc479 100644
--- a/include/drm/drm_vma_manager.h
+++ b/include/drm/drm_vma_manager.h
@@ -24,29 +24,28 @@
*/
#include <drm/drm_mm.h>
-#include <linux/fs.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/rbtree.h>
#include <linux/spinlock.h>
#include <linux/types.h>
+struct drm_file;
+
struct drm_vma_offset_file {
struct rb_node vm_rb;
- struct file *vm_filp;
+ struct drm_file *vm_tag;
unsigned long vm_count;
};
struct drm_vma_offset_node {
rwlock_t vm_lock;
struct drm_mm_node vm_node;
- struct rb_node vm_rb;
struct rb_root vm_files;
};
struct drm_vma_offset_manager {
rwlock_t vm_lock;
- struct rb_root vm_addr_space_rb;
struct drm_mm vm_addr_space_mm;
};
@@ -62,10 +61,11 @@ int drm_vma_offset_add(struct drm_vma_offset_manager *mgr,
void drm_vma_offset_remove(struct drm_vma_offset_manager *mgr,
struct drm_vma_offset_node *node);
-int drm_vma_node_allow(struct drm_vma_offset_node *node, struct file *filp);
-void drm_vma_node_revoke(struct drm_vma_offset_node *node, struct file *filp);
+int drm_vma_node_allow(struct drm_vma_offset_node *node, struct drm_file *tag);
+void drm_vma_node_revoke(struct drm_vma_offset_node *node,
+ struct drm_file *tag);
bool drm_vma_node_is_allowed(struct drm_vma_offset_node *node,
- struct file *filp);
+ struct drm_file *tag);
/**
* drm_vma_offset_exact_lookup_locked() - Look up node by exact address
@@ -216,9 +216,9 @@ static inline void drm_vma_node_unmap(struct drm_vma_offset_node *node,
/**
* drm_vma_node_verify_access() - Access verification helper for TTM
* @node: Offset node
- * @filp: Open-file
+ * @tag: Tag of file to check
*
- * This checks whether @filp is granted access to @node. It is the same as
+ * This checks whether @tag is granted access to @node. It is the same as
* drm_vma_node_is_allowed() but suitable as drop-in helper for TTM
* verify_access() callbacks.
*
@@ -226,9 +226,9 @@ static inline void drm_vma_node_unmap(struct drm_vma_offset_node *node,
* 0 if access is granted, -EACCES otherwise.
*/
static inline int drm_vma_node_verify_access(struct drm_vma_offset_node *node,
- struct file *filp)
+ struct drm_file *tag)
{
- return drm_vma_node_is_allowed(node, filp) ? 0 : -EACCES;
+ return drm_vma_node_is_allowed(node, tag) ? 0 : -EACCES;
}
#endif /* __DRM_VMA_MANAGER_H__ */
diff --git a/include/drm/i2c/tda998x.h b/include/drm/i2c/tda998x.h
index 3e419d92cf5a..a25483090cd5 100644
--- a/include/drm/i2c/tda998x.h
+++ b/include/drm/i2c/tda998x.h
@@ -1,6 +1,24 @@
#ifndef __DRM_I2C_TDA998X_H__
#define __DRM_I2C_TDA998X_H__
+#include <linux/hdmi.h>
+#include <dt-bindings/display/tda998x.h>
+
+enum {
+ AFMT_UNUSED = 0,
+ AFMT_SPDIF = TDA998x_SPDIF,
+ AFMT_I2S = TDA998x_I2S,
+};
+
+struct tda998x_audio_params {
+ u8 config;
+ u8 format;
+ unsigned sample_width;
+ unsigned sample_rate;
+ struct hdmi_audio_infoframe cea;
+ u8 status[5];
+};
+
struct tda998x_encoder_params {
u8 swap_b:3;
u8 mirr_b:1;
@@ -15,16 +33,7 @@ struct tda998x_encoder_params {
u8 swap_e:3;
u8 mirr_e:1;
- u8 audio_cfg;
- u8 audio_clk_cfg;
- u8 audio_frame[6];
-
- enum {
- AFMT_SPDIF,
- AFMT_I2S
- } audio_format;
-
- unsigned audio_sample_rate;
+ struct tda998x_audio_params audio_params;
};
#endif
diff --git a/include/drm/i915_drm.h b/include/drm/i915_drm.h
index b1755f8db36b..4e1b274e1164 100644
--- a/include/drm/i915_drm.h
+++ b/include/drm/i915_drm.h
@@ -93,6 +93,6 @@ extern bool i915_gpu_turbo_disable(void);
#define I845_TSEG_SIZE_1M (3 << 1)
#define INTEL_BSM 0x5c
-#define INTEL_BSM_MASK (0xFFFF << 20)
+#define INTEL_BSM_MASK (-(1u << 20))
#endif /* _I915_DRM_H_ */
diff --git a/include/drm/i915_pciids.h b/include/drm/i915_pciids.h
index 33466bfc6440..0d5f4268d75f 100644
--- a/include/drm/i915_pciids.h
+++ b/include/drm/i915_pciids.h
@@ -134,7 +134,7 @@
#define INTEL_IVB_Q_IDS(info) \
INTEL_QUANTA_VGA_DEVICE(info) /* Quanta transcode */
-#define INTEL_HSW_D_IDS(info) \
+#define INTEL_HSW_IDS(info) \
INTEL_VGA_DEVICE(0x0402, info), /* GT1 desktop */ \
INTEL_VGA_DEVICE(0x0412, info), /* GT2 desktop */ \
INTEL_VGA_DEVICE(0x0422, info), /* GT3 desktop */ \
@@ -179,9 +179,7 @@
INTEL_VGA_DEVICE(0x0D2B, info), /* CRW GT3 reserved */ \
INTEL_VGA_DEVICE(0x0D0E, info), /* CRW GT1 reserved */ \
INTEL_VGA_DEVICE(0x0D1E, info), /* CRW GT2 reserved */ \
- INTEL_VGA_DEVICE(0x0D2E, info) /* CRW GT3 reserved */ \
-
-#define INTEL_HSW_M_IDS(info) \
+ INTEL_VGA_DEVICE(0x0D2E, info), /* CRW GT3 reserved */ \
INTEL_VGA_DEVICE(0x0406, info), /* GT1 mobile */ \
INTEL_VGA_DEVICE(0x0416, info), /* GT2 mobile */ \
INTEL_VGA_DEVICE(0x0426, info), /* GT2 mobile */ \
@@ -198,17 +196,15 @@
INTEL_VGA_DEVICE(0x0D16, info), /* CRW GT2 mobile */ \
INTEL_VGA_DEVICE(0x0D26, info) /* CRW GT3 mobile */
-#define INTEL_VLV_M_IDS(info) \
+#define INTEL_VLV_IDS(info) \
INTEL_VGA_DEVICE(0x0f30, info), \
INTEL_VGA_DEVICE(0x0f31, info), \
INTEL_VGA_DEVICE(0x0f32, info), \
INTEL_VGA_DEVICE(0x0f33, info), \
- INTEL_VGA_DEVICE(0x0157, info)
-
-#define INTEL_VLV_D_IDS(info) \
+ INTEL_VGA_DEVICE(0x0157, info), \
INTEL_VGA_DEVICE(0x0155, info)
-#define INTEL_BDW_GT12M_IDS(info) \
+#define INTEL_BDW_GT12_IDS(info) \
INTEL_VGA_DEVICE(0x1602, info), /* GT1 ULT */ \
INTEL_VGA_DEVICE(0x1606, info), /* GT1 ULT */ \
INTEL_VGA_DEVICE(0x160B, info), /* GT1 Iris */ \
@@ -216,21 +212,17 @@
INTEL_VGA_DEVICE(0x1612, info), /* GT2 Halo */ \
INTEL_VGA_DEVICE(0x1616, info), /* GT2 ULT */ \
INTEL_VGA_DEVICE(0x161B, info), /* GT2 ULT */ \
- INTEL_VGA_DEVICE(0x161E, info) /* GT2 ULX */
-
-#define INTEL_BDW_GT12D_IDS(info) \
+ INTEL_VGA_DEVICE(0x161E, info), /* GT2 ULX */ \
INTEL_VGA_DEVICE(0x160A, info), /* GT1 Server */ \
INTEL_VGA_DEVICE(0x160D, info), /* GT1 Workstation */ \
INTEL_VGA_DEVICE(0x161A, info), /* GT2 Server */ \
INTEL_VGA_DEVICE(0x161D, info) /* GT2 Workstation */
-#define INTEL_BDW_GT3M_IDS(info) \
+#define INTEL_BDW_GT3_IDS(info) \
INTEL_VGA_DEVICE(0x1622, info), /* ULT */ \
INTEL_VGA_DEVICE(0x1626, info), /* ULT */ \
INTEL_VGA_DEVICE(0x162B, info), /* Iris */ \
- INTEL_VGA_DEVICE(0x162E, info) /* ULX */
-
-#define INTEL_BDW_GT3D_IDS(info) \
+ INTEL_VGA_DEVICE(0x162E, info), /* ULX */\
INTEL_VGA_DEVICE(0x162A, info), /* Server */ \
INTEL_VGA_DEVICE(0x162D, info) /* Workstation */
@@ -244,14 +236,12 @@
INTEL_VGA_DEVICE(0x163A, info), /* Server */ \
INTEL_VGA_DEVICE(0x163D, info) /* Workstation */
-#define INTEL_BDW_M_IDS(info) \
- INTEL_BDW_GT12M_IDS(info), \
- INTEL_BDW_GT3M_IDS(info), \
- INTEL_BDW_RSVDM_IDS(info)
-
-#define INTEL_BDW_D_IDS(info) \
- INTEL_BDW_GT12D_IDS(info), \
- INTEL_BDW_GT3D_IDS(info), \
+#define INTEL_BDW_IDS(info) \
+ INTEL_BDW_GT12_IDS(info), \
+ INTEL_BDW_GT3_IDS(info), \
+ INTEL_BDW_RSVDM_IDS(info), \
+ INTEL_BDW_GT12_IDS(info), \
+ INTEL_BDW_GT3_IDS(info), \
INTEL_BDW_RSVDD_IDS(info)
#define INTEL_CHV_IDS(info) \
diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h
index 6f2c59887ba6..9eb940d6755f 100644
--- a/include/drm/ttm/ttm_bo_api.h
+++ b/include/drm/ttm/ttm_bo_api.h
@@ -45,37 +45,7 @@ struct ttm_bo_device;
struct drm_mm_node;
-/**
- * struct ttm_place
- *
- * @fpfn: first valid page frame number to put the object
- * @lpfn: last valid page frame number to put the object
- * @flags: memory domain and caching flags for the object
- *
- * Structure indicating a possible place to put an object.
- */
-struct ttm_place {
- unsigned fpfn;
- unsigned lpfn;
- uint32_t flags;
-};
-
-/**
- * struct ttm_placement
- *
- * @num_placement: number of preferred placements
- * @placement: preferred placements
- * @num_busy_placement: number of preferred placements when need to evict buffer
- * @busy_placement: preferred placements when need to evict buffer
- *
- * Structure indicating the placement you request for an object.
- */
-struct ttm_placement {
- unsigned num_placement;
- const struct ttm_place *placement;
- unsigned num_busy_placement;
- const struct ttm_place *busy_placement;
-};
+struct ttm_placement;
/**
* struct ttm_bus_placement
diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h
index 99c6d01d24f2..4f0a92185995 100644
--- a/include/drm/ttm/ttm_bo_driver.h
+++ b/include/drm/ttm/ttm_bo_driver.h
@@ -133,7 +133,6 @@ struct ttm_tt {
* struct ttm_dma_tt
*
* @ttm: Base ttm_tt struct.
- * @cpu_address: The CPU address of the pages
* @dma_address: The DMA (bus) addresses of the pages
* @pages_list: used by some page allocation backend
*
@@ -143,7 +142,6 @@ struct ttm_tt {
*/
struct ttm_dma_tt {
struct ttm_tt ttm;
- void **cpu_address;
dma_addr_t *dma_address;
struct list_head pages_list;
};
@@ -961,7 +959,6 @@ void ttm_mem_io_free(struct ttm_bo_device *bdev,
* ttm_bo_move_ttm
*
* @bo: A pointer to a struct ttm_buffer_object.
- * @evict: 1: This is an eviction. Don't try to pipeline.
* @interruptible: Sleep interruptible if waiting.
* @no_wait_gpu: Return immediately if the GPU is busy.
* @new_mem: struct ttm_mem_reg indicating where to move.
@@ -977,14 +974,13 @@ void ttm_mem_io_free(struct ttm_bo_device *bdev,
*/
extern int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
- bool evict, bool interruptible, bool no_wait_gpu,
+ bool interruptible, bool no_wait_gpu,
struct ttm_mem_reg *new_mem);
/**
* ttm_bo_move_memcpy
*
* @bo: A pointer to a struct ttm_buffer_object.
- * @evict: 1: This is an eviction. Don't try to pipeline.
* @interruptible: Sleep interruptible if waiting.
* @no_wait_gpu: Return immediately if the GPU is busy.
* @new_mem: struct ttm_mem_reg indicating where to move.
@@ -1000,8 +996,7 @@ extern int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
*/
extern int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
- bool evict, bool interruptible,
- bool no_wait_gpu,
+ bool interruptible, bool no_wait_gpu,
struct ttm_mem_reg *new_mem);
/**
diff --git a/include/drm/ttm/ttm_memory.h b/include/drm/ttm/ttm_memory.h
index 72dcbe81dd07..c4520890f267 100644
--- a/include/drm/ttm/ttm_memory.h
+++ b/include/drm/ttm/ttm_memory.h
@@ -155,4 +155,5 @@ extern int ttm_mem_global_alloc_page(struct ttm_mem_global *glob,
extern void ttm_mem_global_free_page(struct ttm_mem_global *glob,
struct page *page);
extern size_t ttm_round_pot(size_t size);
+extern uint64_t ttm_get_kernel_zone_memory_size(struct ttm_mem_global *glob);
#endif
diff --git a/include/drm/ttm/ttm_placement.h b/include/drm/ttm/ttm_placement.h
index 8ed44f9bbdfb..932be0c8086e 100644
--- a/include/drm/ttm/ttm_placement.h
+++ b/include/drm/ttm/ttm_placement.h
@@ -30,6 +30,9 @@
#ifndef _TTM_PLACEMENT_H_
#define _TTM_PLACEMENT_H_
+
+#include <linux/types.h>
+
/*
* Memory regions for data placement.
*/
@@ -37,24 +40,12 @@
#define TTM_PL_SYSTEM 0
#define TTM_PL_TT 1
#define TTM_PL_VRAM 2
-#define TTM_PL_PRIV0 3
-#define TTM_PL_PRIV1 4
-#define TTM_PL_PRIV2 5
-#define TTM_PL_PRIV3 6
-#define TTM_PL_PRIV4 7
-#define TTM_PL_PRIV5 8
-#define TTM_PL_SWAPPED 15
+#define TTM_PL_PRIV 3
#define TTM_PL_FLAG_SYSTEM (1 << TTM_PL_SYSTEM)
#define TTM_PL_FLAG_TT (1 << TTM_PL_TT)
#define TTM_PL_FLAG_VRAM (1 << TTM_PL_VRAM)
-#define TTM_PL_FLAG_PRIV0 (1 << TTM_PL_PRIV0)
-#define TTM_PL_FLAG_PRIV1 (1 << TTM_PL_PRIV1)
-#define TTM_PL_FLAG_PRIV2 (1 << TTM_PL_PRIV2)
-#define TTM_PL_FLAG_PRIV3 (1 << TTM_PL_PRIV3)
-#define TTM_PL_FLAG_PRIV4 (1 << TTM_PL_PRIV4)
-#define TTM_PL_FLAG_PRIV5 (1 << TTM_PL_PRIV5)
-#define TTM_PL_FLAG_SWAPPED (1 << TTM_PL_SWAPPED)
+#define TTM_PL_FLAG_PRIV (1 << TTM_PL_PRIV)
#define TTM_PL_MASK_MEM 0x0000FFFF
/*
@@ -72,7 +63,6 @@
#define TTM_PL_FLAG_CACHED (1 << 16)
#define TTM_PL_FLAG_UNCACHED (1 << 17)
#define TTM_PL_FLAG_WC (1 << 18)
-#define TTM_PL_FLAG_SHARED (1 << 20)
#define TTM_PL_FLAG_NO_EVICT (1 << 21)
#define TTM_PL_FLAG_TOPDOWN (1 << 22)
@@ -82,14 +72,36 @@
#define TTM_PL_MASK_MEMTYPE (TTM_PL_MASK_MEM | TTM_PL_MASK_CACHING)
-/*
- * Access flags to be used for CPU- and GPU- mappings.
- * The idea is that the TTM synchronization mechanism will
- * allow concurrent READ access and exclusive write access.
- * Currently GPU- and CPU accesses are exclusive.
+/**
+ * struct ttm_place
+ *
+ * @fpfn: first valid page frame number to put the object
+ * @lpfn: last valid page frame number to put the object
+ * @flags: memory domain and caching flags for the object
+ *
+ * Structure indicating a possible place to put an object.
*/
+struct ttm_place {
+ unsigned fpfn;
+ unsigned lpfn;
+ uint32_t flags;
+};
-#define TTM_ACCESS_READ (1 << 0)
-#define TTM_ACCESS_WRITE (1 << 1)
+/**
+ * struct ttm_placement
+ *
+ * @num_placement: number of preferred placements
+ * @placement: preferred placements
+ * @num_busy_placement: number of preferred placements when need to evict buffer
+ * @busy_placement: preferred placements when need to evict buffer
+ *
+ * Structure indicating the placement you request for an object.
+ */
+struct ttm_placement {
+ unsigned num_placement;
+ const struct ttm_place *placement;
+ unsigned num_busy_placement;
+ const struct ttm_place *busy_placement;
+};
#endif
diff --git a/include/dt-bindings/clock/exynos5410.h b/include/dt-bindings/clock/exynos5410.h
index 85b467b3a207..6cb4e90f81fc 100644
--- a/include/dt-bindings/clock/exynos5410.h
+++ b/include/dt-bindings/clock/exynos5410.h
@@ -19,6 +19,7 @@
#define CLK_FOUT_MPLL 4
#define CLK_FOUT_BPLL 5
#define CLK_FOUT_KPLL 6
+#define CLK_FOUT_EPLL 7
/* gate for special clocks (sclk) */
#define CLK_SCLK_UART0 128
@@ -55,6 +56,8 @@
#define CLK_MMC0 351
#define CLK_MMC1 352
#define CLK_MMC2 353
+#define CLK_PDMA0 362
+#define CLK_PDMA1 363
#define CLK_USBH20 365
#define CLK_USBD300 366
#define CLK_USBD301 367
diff --git a/include/dt-bindings/clock/exynos5420.h b/include/dt-bindings/clock/exynos5420.h
index 17ab8394bec7..6fd21c291416 100644
--- a/include/dt-bindings/clock/exynos5420.h
+++ b/include/dt-bindings/clock/exynos5420.h
@@ -214,6 +214,9 @@
#define CLK_MOUT_SW_ACLK400 651
#define CLK_MOUT_USER_ACLK300_GSCL 652
#define CLK_MOUT_SW_ACLK300_GSCL 653
+#define CLK_MOUT_MCLK_CDREX 654
+#define CLK_MOUT_BPLL 655
+#define CLK_MOUT_MX_MSPLL_CCORE 656
/* divider clocks */
#define CLK_DOUT_PIXEL 768
@@ -239,8 +242,14 @@
#define CLK_DOUT_ACLK300_DISP1 788
#define CLK_DOUT_ACLK300_GSCL 789
#define CLK_DOUT_ACLK400_DISP1 790
+#define CLK_DOUT_PCLK_CDREX 791
+#define CLK_DOUT_SCLK_CDREX 792
+#define CLK_DOUT_ACLK_CDREX1 793
+#define CLK_DOUT_CCLK_DREX0 794
+#define CLK_DOUT_CLK2X_PHY0 795
+#define CLK_DOUT_PCLK_CORE_MEM 796
/* must be greater than maximal clock id */
-#define CLK_NR_CLKS 791
+#define CLK_NR_CLKS 797
#endif /* _DT_BINDINGS_CLOCK_EXYNOS_5420_H */
diff --git a/include/dt-bindings/clock/exynos5440.h b/include/dt-bindings/clock/exynos5440.h
index c66fc405a79a..842cdc0adff1 100644
--- a/include/dt-bindings/clock/exynos5440.h
+++ b/include/dt-bindings/clock/exynos5440.h
@@ -14,6 +14,8 @@
#define CLK_XTAL 1
#define CLK_ARM_CLK 2
+#define CLK_CPLLA 3
+#define CLK_CPLLB 4
#define CLK_SPI_BAUD 16
#define CLK_PB0_250 17
#define CLK_PR0_250 18
diff --git a/include/dt-bindings/clock/gxbb-aoclkc.h b/include/dt-bindings/clock/gxbb-aoclkc.h
new file mode 100644
index 000000000000..31751482d13c
--- /dev/null
+++ b/include/dt-bindings/clock/gxbb-aoclkc.h
@@ -0,0 +1,66 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright (c) 2016 BayLibre, SAS.
+ * Author: Neil Armstrong <narmstrong@baylibre.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * BSD LICENSE
+ *
+ * Copyright (c) 2016 BayLibre, SAS.
+ * Author: Neil Armstrong <narmstrong@baylibre.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef DT_BINDINGS_CLOCK_AMLOGIC_MESON_GXBB_AOCLK
+#define DT_BINDINGS_CLOCK_AMLOGIC_MESON_GXBB_AOCLK
+
+#define CLKID_AO_REMOTE 0
+#define CLKID_AO_I2C_MASTER 1
+#define CLKID_AO_I2C_SLAVE 2
+#define CLKID_AO_UART1 3
+#define CLKID_AO_UART2 4
+#define CLKID_AO_IR_BLASTER 5
+
+#endif
diff --git a/include/dt-bindings/clock/gxbb-clkc.h b/include/dt-bindings/clock/gxbb-clkc.h
index f889d80246cb..baade6f429d0 100644
--- a/include/dt-bindings/clock/gxbb-clkc.h
+++ b/include/dt-bindings/clock/gxbb-clkc.h
@@ -6,7 +6,23 @@
#define __GXBB_CLKC_H
#define CLKID_CPUCLK 1
+#define CLKID_HDMI_PLL 2
+#define CLKID_FCLK_DIV2 4
+#define CLKID_FCLK_DIV3 5
+#define CLKID_FCLK_DIV4 6
#define CLKID_CLK81 12
+#define CLKID_MPLL2 15
+#define CLKID_SPI 34
+#define CLKID_I2C 22
#define CLKID_ETH 36
+#define CLKID_USB0 50
+#define CLKID_USB1 51
+#define CLKID_USB 55
+#define CLKID_USB1_DDR_BRIDGE 64
+#define CLKID_USB0_DDR_BRIDGE 65
+#define CLKID_AO_I2C 93
+#define CLKID_SD_EMMC_A 94
+#define CLKID_SD_EMMC_B 95
+#define CLKID_SD_EMMC_C 96
#endif /* __GXBB_CLKC_H */
diff --git a/include/dt-bindings/clock/imx5-clock.h b/include/dt-bindings/clock/imx5-clock.h
index f4b7478e23c8..d382fc71aa83 100644
--- a/include/dt-bindings/clock/imx5-clock.h
+++ b/include/dt-bindings/clock/imx5-clock.h
@@ -201,6 +201,19 @@
#define IMX5_CLK_STEP_SEL 189
#define IMX5_CLK_CPU_PODF_SEL 190
#define IMX5_CLK_ARM 191
-#define IMX5_CLK_END 192
+#define IMX5_CLK_FIRI_PRED 192
+#define IMX5_CLK_FIRI_SEL 193
+#define IMX5_CLK_FIRI_PODF 194
+#define IMX5_CLK_FIRI_SERIAL_GATE 195
+#define IMX5_CLK_FIRI_IPG_GATE 196
+#define IMX5_CLK_CSI0_MCLK1_PRED 197
+#define IMX5_CLK_CSI0_MCLK1_SEL 198
+#define IMX5_CLK_CSI0_MCLK1_PODF 199
+#define IMX5_CLK_CSI0_MCLK1_GATE 200
+#define IMX5_CLK_IEEE1588_PRED 201
+#define IMX5_CLK_IEEE1588_SEL 202
+#define IMX5_CLK_IEEE1588_PODF 203
+#define IMX5_CLK_IEEE1588_GATE 204
+#define IMX5_CLK_END 205
#endif /* __DT_BINDINGS_CLOCK_IMX5_H */
diff --git a/include/dt-bindings/clock/imx6qdl-clock.h b/include/dt-bindings/clock/imx6qdl-clock.h
index 29050337d9d5..da59fd9cdb5e 100644
--- a/include/dt-bindings/clock/imx6qdl-clock.h
+++ b/include/dt-bindings/clock/imx6qdl-clock.h
@@ -269,6 +269,8 @@
#define IMX6QDL_CLK_PRG0_APB 256
#define IMX6QDL_CLK_PRG1_APB 257
#define IMX6QDL_CLK_PRE_AXI 258
-#define IMX6QDL_CLK_END 259
+#define IMX6QDL_CLK_MLB_SEL 259
+#define IMX6QDL_CLK_MLB_PODF 260
+#define IMX6QDL_CLK_END 261
#endif /* __DT_BINDINGS_CLOCK_IMX6QDL_H */
diff --git a/include/dt-bindings/clock/maxim,max77620.h b/include/dt-bindings/clock/maxim,max77620.h
new file mode 100644
index 000000000000..82aba2849681
--- /dev/null
+++ b/include/dt-bindings/clock/maxim,max77620.h
@@ -0,0 +1,21 @@
+/*
+ * Copyright (C) 2016 NVIDIA CORPORATION. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Device Tree binding constants clocks for the Maxim 77620 PMIC.
+ */
+
+#ifndef _DT_BINDINGS_CLOCK_MAXIM_MAX77620_CLOCK_H
+#define _DT_BINDINGS_CLOCK_MAXIM_MAX77620_CLOCK_H
+
+/* Fixed rate clocks. */
+
+#define MAX77620_CLK_32K_OUT0 0
+
+/* Total number of clocks. */
+#define MAX77620_CLKS_NUM (MAX77620_CLK_32K_OUT0 + 1)
+
+#endif /* _DT_BINDINGS_CLOCK_MAXIM_MAX77620_CLOCK_H */
diff --git a/include/dt-bindings/clock/meson8b-clkc.h b/include/dt-bindings/clock/meson8b-clkc.h
index 595a58d0969a..a55ff8c9b30f 100644
--- a/include/dt-bindings/clock/meson8b-clkc.h
+++ b/include/dt-bindings/clock/meson8b-clkc.h
@@ -22,6 +22,4 @@
#define CLKID_MPEG_SEL 14
#define CLKID_MPEG_DIV 15
-#define CLK_NR_CLKS (CLKID_MPEG_DIV + 1)
-
#endif /* __MESON8B_CLKC_H */
diff --git a/include/dt-bindings/clock/mt2701-clk.h b/include/dt-bindings/clock/mt2701-clk.h
new file mode 100644
index 000000000000..2062c67e2e51
--- /dev/null
+++ b/include/dt-bindings/clock/mt2701-clk.h
@@ -0,0 +1,486 @@
+/*
+ * Copyright (c) 2014 MediaTek Inc.
+ * Author: Shunli Wang <shunli.wang@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DT_BINDINGS_CLK_MT2701_H
+#define _DT_BINDINGS_CLK_MT2701_H
+
+/* TOPCKGEN */
+#define CLK_TOP_SYSPLL 1
+#define CLK_TOP_SYSPLL_D2 2
+#define CLK_TOP_SYSPLL_D3 3
+#define CLK_TOP_SYSPLL_D5 4
+#define CLK_TOP_SYSPLL_D7 5
+#define CLK_TOP_SYSPLL1_D2 6
+#define CLK_TOP_SYSPLL1_D4 7
+#define CLK_TOP_SYSPLL1_D8 8
+#define CLK_TOP_SYSPLL1_D16 9
+#define CLK_TOP_SYSPLL2_D2 10
+#define CLK_TOP_SYSPLL2_D4 11
+#define CLK_TOP_SYSPLL2_D8 12
+#define CLK_TOP_SYSPLL3_D2 13
+#define CLK_TOP_SYSPLL3_D4 14
+#define CLK_TOP_SYSPLL4_D2 15
+#define CLK_TOP_SYSPLL4_D4 16
+#define CLK_TOP_UNIVPLL 17
+#define CLK_TOP_UNIVPLL_D2 18
+#define CLK_TOP_UNIVPLL_D3 19
+#define CLK_TOP_UNIVPLL_D5 20
+#define CLK_TOP_UNIVPLL_D7 21
+#define CLK_TOP_UNIVPLL_D26 22
+#define CLK_TOP_UNIVPLL_D52 23
+#define CLK_TOP_UNIVPLL_D108 24
+#define CLK_TOP_USB_PHY48M 25
+#define CLK_TOP_UNIVPLL1_D2 26
+#define CLK_TOP_UNIVPLL1_D4 27
+#define CLK_TOP_UNIVPLL1_D8 28
+#define CLK_TOP_UNIVPLL2_D2 29
+#define CLK_TOP_UNIVPLL2_D4 30
+#define CLK_TOP_UNIVPLL2_D8 31
+#define CLK_TOP_UNIVPLL2_D16 32
+#define CLK_TOP_UNIVPLL2_D32 33
+#define CLK_TOP_UNIVPLL3_D2 34
+#define CLK_TOP_UNIVPLL3_D4 35
+#define CLK_TOP_UNIVPLL3_D8 36
+#define CLK_TOP_MSDCPLL 37
+#define CLK_TOP_MSDCPLL_D2 38
+#define CLK_TOP_MSDCPLL_D4 39
+#define CLK_TOP_MSDCPLL_D8 40
+#define CLK_TOP_MMPLL 41
+#define CLK_TOP_MMPLL_D2 42
+#define CLK_TOP_DMPLL 43
+#define CLK_TOP_DMPLL_D2 44
+#define CLK_TOP_DMPLL_D4 45
+#define CLK_TOP_DMPLL_X2 46
+#define CLK_TOP_TVDPLL 47
+#define CLK_TOP_TVDPLL_D2 48
+#define CLK_TOP_TVDPLL_D4 49
+#define CLK_TOP_TVD2PLL 50
+#define CLK_TOP_TVD2PLL_D2 51
+#define CLK_TOP_HADDS2PLL_98M 52
+#define CLK_TOP_HADDS2PLL_294M 53
+#define CLK_TOP_HADDS2_FB 54
+#define CLK_TOP_MIPIPLL_D2 55
+#define CLK_TOP_MIPIPLL_D4 56
+#define CLK_TOP_HDMIPLL 57
+#define CLK_TOP_HDMIPLL_D2 58
+#define CLK_TOP_HDMIPLL_D3 59
+#define CLK_TOP_HDMI_SCL_RX 60
+#define CLK_TOP_HDMI_0_PIX340M 61
+#define CLK_TOP_HDMI_0_DEEP340M 62
+#define CLK_TOP_HDMI_0_PLL340M 63
+#define CLK_TOP_AUD1PLL_98M 64
+#define CLK_TOP_AUD2PLL_90M 65
+#define CLK_TOP_AUDPLL 66
+#define CLK_TOP_AUDPLL_D4 67
+#define CLK_TOP_AUDPLL_D8 68
+#define CLK_TOP_AUDPLL_D16 69
+#define CLK_TOP_AUDPLL_D24 70
+#define CLK_TOP_ETHPLL_500M 71
+#define CLK_TOP_VDECPLL 72
+#define CLK_TOP_VENCPLL 73
+#define CLK_TOP_MIPIPLL 74
+#define CLK_TOP_ARMPLL_1P3G 75
+
+#define CLK_TOP_MM_SEL 76
+#define CLK_TOP_DDRPHYCFG_SEL 77
+#define CLK_TOP_MEM_SEL 78
+#define CLK_TOP_AXI_SEL 79
+#define CLK_TOP_CAMTG_SEL 80
+#define CLK_TOP_MFG_SEL 81
+#define CLK_TOP_VDEC_SEL 82
+#define CLK_TOP_PWM_SEL 83
+#define CLK_TOP_MSDC30_0_SEL 84
+#define CLK_TOP_USB20_SEL 85
+#define CLK_TOP_SPI0_SEL 86
+#define CLK_TOP_UART_SEL 87
+#define CLK_TOP_AUDINTBUS_SEL 88
+#define CLK_TOP_AUDIO_SEL 89
+#define CLK_TOP_MSDC30_2_SEL 90
+#define CLK_TOP_MSDC30_1_SEL 91
+#define CLK_TOP_DPI1_SEL 92
+#define CLK_TOP_DPI0_SEL 93
+#define CLK_TOP_SCP_SEL 94
+#define CLK_TOP_PMICSPI_SEL 95
+#define CLK_TOP_APLL_SEL 96
+#define CLK_TOP_HDMI_SEL 97
+#define CLK_TOP_TVE_SEL 98
+#define CLK_TOP_EMMC_HCLK_SEL 99
+#define CLK_TOP_NFI2X_SEL 100
+#define CLK_TOP_RTC_SEL 101
+#define CLK_TOP_OSD_SEL 102
+#define CLK_TOP_NR_SEL 103
+#define CLK_TOP_DI_SEL 104
+#define CLK_TOP_FLASH_SEL 105
+#define CLK_TOP_ASM_M_SEL 106
+#define CLK_TOP_ASM_I_SEL 107
+#define CLK_TOP_INTDIR_SEL 108
+#define CLK_TOP_HDMIRX_BIST_SEL 109
+#define CLK_TOP_ETHIF_SEL 110
+#define CLK_TOP_MS_CARD_SEL 111
+#define CLK_TOP_ASM_H_SEL 112
+#define CLK_TOP_SPI1_SEL 113
+#define CLK_TOP_CMSYS_SEL 114
+#define CLK_TOP_MSDC30_3_SEL 115
+#define CLK_TOP_HDMIRX26_24_SEL 116
+#define CLK_TOP_AUD2DVD_SEL 117
+#define CLK_TOP_8BDAC_SEL 118
+#define CLK_TOP_SPI2_SEL 119
+#define CLK_TOP_AUD_MUX1_SEL 120
+#define CLK_TOP_AUD_MUX2_SEL 121
+#define CLK_TOP_AUDPLL_MUX_SEL 122
+#define CLK_TOP_AUD_K1_SRC_SEL 123
+#define CLK_TOP_AUD_K2_SRC_SEL 124
+#define CLK_TOP_AUD_K3_SRC_SEL 125
+#define CLK_TOP_AUD_K4_SRC_SEL 126
+#define CLK_TOP_AUD_K5_SRC_SEL 127
+#define CLK_TOP_AUD_K6_SRC_SEL 128
+#define CLK_TOP_PADMCLK_SEL 129
+#define CLK_TOP_AUD_EXTCK1_DIV 130
+#define CLK_TOP_AUD_EXTCK2_DIV 131
+#define CLK_TOP_AUD_MUX1_DIV 132
+#define CLK_TOP_AUD_MUX2_DIV 133
+#define CLK_TOP_AUD_K1_SRC_DIV 134
+#define CLK_TOP_AUD_K2_SRC_DIV 135
+#define CLK_TOP_AUD_K3_SRC_DIV 136
+#define CLK_TOP_AUD_K4_SRC_DIV 137
+#define CLK_TOP_AUD_K5_SRC_DIV 138
+#define CLK_TOP_AUD_K6_SRC_DIV 139
+#define CLK_TOP_AUD_I2S1_MCLK 140
+#define CLK_TOP_AUD_I2S2_MCLK 141
+#define CLK_TOP_AUD_I2S3_MCLK 142
+#define CLK_TOP_AUD_I2S4_MCLK 143
+#define CLK_TOP_AUD_I2S5_MCLK 144
+#define CLK_TOP_AUD_I2S6_MCLK 145
+#define CLK_TOP_AUD_48K_TIMING 146
+#define CLK_TOP_AUD_44K_TIMING 147
+
+#define CLK_TOP_32K_INTERNAL 148
+#define CLK_TOP_32K_EXTERNAL 149
+#define CLK_TOP_CLK26M_D8 150
+#define CLK_TOP_8BDAC 151
+#define CLK_TOP_WBG_DIG_416M 152
+#define CLK_TOP_DPI 153
+#define CLK_TOP_HDMITX_CLKDIG_CTS 154
+#define CLK_TOP_DSI0_LNTC_DSI 155
+#define CLK_TOP_AUD_EXT1 156
+#define CLK_TOP_AUD_EXT2 157
+#define CLK_TOP_NFI1X_PAD 158
+#define CLK_TOP_NR 159
+
+/* APMIXEDSYS */
+
+#define CLK_APMIXED_ARMPLL 1
+#define CLK_APMIXED_MAINPLL 2
+#define CLK_APMIXED_UNIVPLL 3
+#define CLK_APMIXED_MMPLL 4
+#define CLK_APMIXED_MSDCPLL 5
+#define CLK_APMIXED_TVDPLL 6
+#define CLK_APMIXED_AUD1PLL 7
+#define CLK_APMIXED_TRGPLL 8
+#define CLK_APMIXED_ETHPLL 9
+#define CLK_APMIXED_VDECPLL 10
+#define CLK_APMIXED_HADDS2PLL 11
+#define CLK_APMIXED_AUD2PLL 12
+#define CLK_APMIXED_TVD2PLL 13
+#define CLK_APMIXED_NR 14
+
+/* DDRPHY */
+
+#define CLK_DDRPHY_VENCPLL 1
+#define CLK_DDRPHY_NR 2
+
+/* INFRACFG */
+
+#define CLK_INFRA_DBG 1
+#define CLK_INFRA_SMI 2
+#define CLK_INFRA_QAXI_CM4 3
+#define CLK_INFRA_AUD_SPLIN_B 4
+#define CLK_INFRA_AUDIO 5
+#define CLK_INFRA_EFUSE 6
+#define CLK_INFRA_L2C_SRAM 7
+#define CLK_INFRA_M4U 8
+#define CLK_INFRA_CONNMCU 9
+#define CLK_INFRA_TRNG 10
+#define CLK_INFRA_RAMBUFIF 11
+#define CLK_INFRA_CPUM 12
+#define CLK_INFRA_KP 13
+#define CLK_INFRA_CEC 14
+#define CLK_INFRA_IRRX 15
+#define CLK_INFRA_PMICSPI 16
+#define CLK_INFRA_PMICWRAP 17
+#define CLK_INFRA_DDCCI 18
+#define CLK_INFRA_CLK_13M 19
+#define CLK_INFRA_NR 20
+
+/* PERICFG */
+
+#define CLK_PERI_NFI 1
+#define CLK_PERI_THERM 2
+#define CLK_PERI_PWM1 3
+#define CLK_PERI_PWM2 4
+#define CLK_PERI_PWM3 5
+#define CLK_PERI_PWM4 6
+#define CLK_PERI_PWM5 7
+#define CLK_PERI_PWM6 8
+#define CLK_PERI_PWM7 9
+#define CLK_PERI_PWM 10
+#define CLK_PERI_USB0 11
+#define CLK_PERI_USB1 12
+#define CLK_PERI_AP_DMA 13
+#define CLK_PERI_MSDC30_0 14
+#define CLK_PERI_MSDC30_1 15
+#define CLK_PERI_MSDC30_2 16
+#define CLK_PERI_MSDC30_3 17
+#define CLK_PERI_MSDC50_3 18
+#define CLK_PERI_NLI 19
+#define CLK_PERI_UART0 20
+#define CLK_PERI_UART1 21
+#define CLK_PERI_UART2 22
+#define CLK_PERI_UART3 23
+#define CLK_PERI_BTIF 24
+#define CLK_PERI_I2C0 25
+#define CLK_PERI_I2C1 26
+#define CLK_PERI_I2C2 27
+#define CLK_PERI_I2C3 28
+#define CLK_PERI_AUXADC 29
+#define CLK_PERI_SPI0 30
+#define CLK_PERI_ETH 31
+#define CLK_PERI_USB0_MCU 32
+
+#define CLK_PERI_USB1_MCU 33
+#define CLK_PERI_USB_SLV 34
+#define CLK_PERI_GCPU 35
+#define CLK_PERI_NFI_ECC 36
+#define CLK_PERI_NFI_PAD 37
+#define CLK_PERI_FLASH 38
+#define CLK_PERI_HOST89_INT 39
+#define CLK_PERI_HOST89_SPI 40
+#define CLK_PERI_HOST89_DVD 41
+#define CLK_PERI_SPI1 42
+#define CLK_PERI_SPI2 43
+#define CLK_PERI_FCI 44
+
+#define CLK_PERI_UART0_SEL 45
+#define CLK_PERI_UART1_SEL 46
+#define CLK_PERI_UART2_SEL 47
+#define CLK_PERI_UART3_SEL 48
+#define CLK_PERI_NR 49
+
+/* AUDIO */
+
+#define CLK_AUD_AFE 1
+#define CLK_AUD_LRCK_DETECT 2
+#define CLK_AUD_I2S 3
+#define CLK_AUD_APLL_TUNER 4
+#define CLK_AUD_HDMI 5
+#define CLK_AUD_SPDF 6
+#define CLK_AUD_SPDF2 7
+#define CLK_AUD_APLL 8
+#define CLK_AUD_TML 9
+#define CLK_AUD_AHB_IDLE_EXT 10
+#define CLK_AUD_AHB_IDLE_INT 11
+
+#define CLK_AUD_I2SIN1 12
+#define CLK_AUD_I2SIN2 13
+#define CLK_AUD_I2SIN3 14
+#define CLK_AUD_I2SIN4 15
+#define CLK_AUD_I2SIN5 16
+#define CLK_AUD_I2SIN6 17
+#define CLK_AUD_I2SO1 18
+#define CLK_AUD_I2SO2 19
+#define CLK_AUD_I2SO3 20
+#define CLK_AUD_I2SO4 21
+#define CLK_AUD_I2SO5 22
+#define CLK_AUD_I2SO6 23
+#define CLK_AUD_ASRCI1 24
+#define CLK_AUD_ASRCI2 25
+#define CLK_AUD_ASRCO1 26
+#define CLK_AUD_ASRCO2 27
+#define CLK_AUD_ASRC11 28
+#define CLK_AUD_ASRC12 29
+#define CLK_AUD_HDMIRX 30
+#define CLK_AUD_INTDIR 31
+#define CLK_AUD_A1SYS 32
+#define CLK_AUD_A2SYS 33
+#define CLK_AUD_AFE_CONN 34
+#define CLK_AUD_AFE_PCMIF 35
+#define CLK_AUD_AFE_MRGIF 36
+
+#define CLK_AUD_MMIF_UL1 37
+#define CLK_AUD_MMIF_UL2 38
+#define CLK_AUD_MMIF_UL3 39
+#define CLK_AUD_MMIF_UL4 40
+#define CLK_AUD_MMIF_UL5 41
+#define CLK_AUD_MMIF_UL6 42
+#define CLK_AUD_MMIF_DL1 43
+#define CLK_AUD_MMIF_DL2 44
+#define CLK_AUD_MMIF_DL3 45
+#define CLK_AUD_MMIF_DL4 46
+#define CLK_AUD_MMIF_DL5 47
+#define CLK_AUD_MMIF_DL6 48
+#define CLK_AUD_MMIF_DLMCH 49
+#define CLK_AUD_MMIF_ARB1 50
+#define CLK_AUD_MMIF_AWB1 51
+#define CLK_AUD_MMIF_AWB2 52
+#define CLK_AUD_MMIF_DAI 53
+
+#define CLK_AUD_DMIC1 54
+#define CLK_AUD_DMIC2 55
+#define CLK_AUD_ASRCI3 56
+#define CLK_AUD_ASRCI4 57
+#define CLK_AUD_ASRCI5 58
+#define CLK_AUD_ASRCI6 59
+#define CLK_AUD_ASRCO3 60
+#define CLK_AUD_ASRCO4 61
+#define CLK_AUD_ASRCO5 62
+#define CLK_AUD_ASRCO6 63
+#define CLK_AUD_MEM_ASRC1 64
+#define CLK_AUD_MEM_ASRC2 65
+#define CLK_AUD_MEM_ASRC3 66
+#define CLK_AUD_MEM_ASRC4 67
+#define CLK_AUD_MEM_ASRC5 68
+#define CLK_AUD_DSD_ENC 69
+#define CLK_AUD_ASRC_BRG 70
+#define CLK_AUD_NR 71
+
+/* MMSYS */
+
+#define CLK_MM_SMI_COMMON 1
+#define CLK_MM_SMI_LARB0 2
+#define CLK_MM_CMDQ 3
+#define CLK_MM_MUTEX 4
+#define CLK_MM_DISP_COLOR 5
+#define CLK_MM_DISP_BLS 6
+#define CLK_MM_DISP_WDMA 7
+#define CLK_MM_DISP_RDMA 8
+#define CLK_MM_DISP_OVL 9
+#define CLK_MM_MDP_TDSHP 10
+#define CLK_MM_MDP_WROT 11
+#define CLK_MM_MDP_WDMA 12
+#define CLK_MM_MDP_RSZ1 13
+#define CLK_MM_MDP_RSZ0 14
+#define CLK_MM_MDP_RDMA 15
+#define CLK_MM_MDP_BLS_26M 16
+#define CLK_MM_CAM_MDP 17
+#define CLK_MM_FAKE_ENG 18
+#define CLK_MM_MUTEX_32K 19
+#define CLK_MM_DISP_RDMA1 20
+#define CLK_MM_DISP_UFOE 21
+
+#define CLK_MM_DSI_ENGINE 22
+#define CLK_MM_DSI_DIG 23
+#define CLK_MM_DPI_DIGL 24
+#define CLK_MM_DPI_ENGINE 25
+#define CLK_MM_DPI1_DIGL 26
+#define CLK_MM_DPI1_ENGINE 27
+#define CLK_MM_TVE_OUTPUT 28
+#define CLK_MM_TVE_INPUT 29
+#define CLK_MM_HDMI_PIXEL 30
+#define CLK_MM_HDMI_PLL 31
+#define CLK_MM_HDMI_AUDIO 32
+#define CLK_MM_HDMI_SPDIF 33
+#define CLK_MM_TVE_FMM 34
+#define CLK_MM_NR 35
+
+/* IMGSYS */
+
+#define CLK_IMG_SMI_COMM 1
+#define CLK_IMG_RESZ 2
+#define CLK_IMG_JPGDEC_SMI 3
+#define CLK_IMG_JPGDEC 4
+#define CLK_IMG_VENC_LT 5
+#define CLK_IMG_VENC 6
+#define CLK_IMG_NR 7
+
+/* VDEC */
+
+#define CLK_VDEC_CKGEN 1
+#define CLK_VDEC_LARB 2
+#define CLK_VDEC_NR 3
+
+/* HIFSYS */
+
+#define CLK_HIFSYS_USB0PHY 1
+#define CLK_HIFSYS_USB1PHY 2
+#define CLK_HIFSYS_PCIE0 3
+#define CLK_HIFSYS_PCIE1 4
+#define CLK_HIFSYS_PCIE2 5
+#define CLK_HIFSYS_NR 6
+
+/* ETHSYS */
+#define CLK_ETHSYS_HSDMA 1
+#define CLK_ETHSYS_ESW 2
+#define CLK_ETHSYS_GP2 3
+#define CLK_ETHSYS_GP1 4
+#define CLK_ETHSYS_PCM 5
+#define CLK_ETHSYS_GDMA 6
+#define CLK_ETHSYS_I2S 7
+#define CLK_ETHSYS_CRYPTO 8
+#define CLK_ETHSYS_NR 9
+
+/* BDP */
+
+#define CLK_BDP_BRG_BA 1
+#define CLK_BDP_BRG_DRAM 2
+#define CLK_BDP_LARB_DRAM 3
+#define CLK_BDP_WR_VDI_PXL 4
+#define CLK_BDP_WR_VDI_DRAM 5
+#define CLK_BDP_WR_B 6
+#define CLK_BDP_DGI_IN 7
+#define CLK_BDP_DGI_OUT 8
+#define CLK_BDP_FMT_MAST_27 9
+#define CLK_BDP_FMT_B 10
+#define CLK_BDP_OSD_B 11
+#define CLK_BDP_OSD_DRAM 12
+#define CLK_BDP_OSD_AGENT 13
+#define CLK_BDP_OSD_PXL 14
+#define CLK_BDP_RLE_B 15
+#define CLK_BDP_RLE_AGENT 16
+#define CLK_BDP_RLE_DRAM 17
+#define CLK_BDP_F27M 18
+#define CLK_BDP_F27M_VDOUT 19
+#define CLK_BDP_F27_74_74 20
+#define CLK_BDP_F2FS 21
+#define CLK_BDP_F2FS74_148 22
+#define CLK_BDP_FB 23
+#define CLK_BDP_VDO_DRAM 24
+#define CLK_BDP_VDO_2FS 25
+#define CLK_BDP_VDO_B 26
+#define CLK_BDP_WR_DI_PXL 27
+#define CLK_BDP_WR_DI_DRAM 28
+#define CLK_BDP_WR_DI_B 29
+#define CLK_BDP_NR_PXL 30
+#define CLK_BDP_NR_DRAM 31
+#define CLK_BDP_NR_B 32
+
+#define CLK_BDP_RX_F 33
+#define CLK_BDP_RX_X 34
+#define CLK_BDP_RXPDT 35
+#define CLK_BDP_RX_CSCL_N 36
+#define CLK_BDP_RX_CSCL 37
+#define CLK_BDP_RX_DDCSCL_N 38
+#define CLK_BDP_RX_DDCSCL 39
+#define CLK_BDP_RX_VCO 40
+#define CLK_BDP_RX_DP 41
+#define CLK_BDP_RX_P 42
+#define CLK_BDP_RX_M 43
+#define CLK_BDP_RX_PLL 44
+#define CLK_BDP_BRG_RT_B 45
+#define CLK_BDP_BRG_RT_DRAM 46
+#define CLK_BDP_LARBRT_DRAM 47
+#define CLK_BDP_TMDS_SYN 48
+#define CLK_BDP_HDMI_MON 49
+#define CLK_BDP_NR 50
+
+#endif /* _DT_BINDINGS_CLK_MT2701_H */
diff --git a/include/dt-bindings/clock/qcom,gcc-mdm9615.h b/include/dt-bindings/clock/qcom,gcc-mdm9615.h
new file mode 100644
index 000000000000..9ab2c4087120
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,gcc-mdm9615.h
@@ -0,0 +1,327 @@
+/*
+ * Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ * Copyright (c) BayLibre, SAS.
+ * Author : Neil Armstrong <narmstrong@baylibre.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DT_BINDINGS_CLK_MDM_GCC_9615_H
+#define _DT_BINDINGS_CLK_MDM_GCC_9615_H
+
+#define AFAB_CLK_SRC 0
+#define AFAB_CORE_CLK 1
+#define SFAB_MSS_Q6_SW_A_CLK 2
+#define SFAB_MSS_Q6_FW_A_CLK 3
+#define QDSS_STM_CLK 4
+#define SCSS_A_CLK 5
+#define SCSS_H_CLK 6
+#define SCSS_XO_SRC_CLK 7
+#define AFAB_EBI1_CH0_A_CLK 8
+#define AFAB_EBI1_CH1_A_CLK 9
+#define AFAB_AXI_S0_FCLK 10
+#define AFAB_AXI_S1_FCLK 11
+#define AFAB_AXI_S2_FCLK 12
+#define AFAB_AXI_S3_FCLK 13
+#define AFAB_AXI_S4_FCLK 14
+#define SFAB_CORE_CLK 15
+#define SFAB_AXI_S0_FCLK 16
+#define SFAB_AXI_S1_FCLK 17
+#define SFAB_AXI_S2_FCLK 18
+#define SFAB_AXI_S3_FCLK 19
+#define SFAB_AXI_S4_FCLK 20
+#define SFAB_AHB_S0_FCLK 21
+#define SFAB_AHB_S1_FCLK 22
+#define SFAB_AHB_S2_FCLK 23
+#define SFAB_AHB_S3_FCLK 24
+#define SFAB_AHB_S4_FCLK 25
+#define SFAB_AHB_S5_FCLK 26
+#define SFAB_AHB_S6_FCLK 27
+#define SFAB_AHB_S7_FCLK 28
+#define QDSS_AT_CLK_SRC 29
+#define QDSS_AT_CLK 30
+#define QDSS_TRACECLKIN_CLK_SRC 31
+#define QDSS_TRACECLKIN_CLK 32
+#define QDSS_TSCTR_CLK_SRC 33
+#define QDSS_TSCTR_CLK 34
+#define SFAB_ADM0_M0_A_CLK 35
+#define SFAB_ADM0_M1_A_CLK 36
+#define SFAB_ADM0_M2_H_CLK 37
+#define ADM0_CLK 38
+#define ADM0_PBUS_CLK 39
+#define MSS_XPU_CLK 40
+#define IMEM0_A_CLK 41
+#define QDSS_H_CLK 42
+#define PCIE_A_CLK 43
+#define PCIE_AUX_CLK 44
+#define PCIE_PHY_REF_CLK 45
+#define PCIE_H_CLK 46
+#define SFAB_CLK_SRC 47
+#define MAHB0_CLK 48
+#define Q6SW_CLK_SRC 49
+#define Q6SW_CLK 50
+#define Q6FW_CLK_SRC 51
+#define Q6FW_CLK 52
+#define SFAB_MSS_M_A_CLK 53
+#define SFAB_USB3_M_A_CLK 54
+#define SFAB_LPASS_Q6_A_CLK 55
+#define SFAB_AFAB_M_A_CLK 56
+#define AFAB_SFAB_M0_A_CLK 57
+#define AFAB_SFAB_M1_A_CLK 58
+#define SFAB_SATA_S_H_CLK 59
+#define DFAB_CLK_SRC 60
+#define DFAB_CLK 61
+#define SFAB_DFAB_M_A_CLK 62
+#define DFAB_SFAB_M_A_CLK 63
+#define DFAB_SWAY0_H_CLK 64
+#define DFAB_SWAY1_H_CLK 65
+#define DFAB_ARB0_H_CLK 66
+#define DFAB_ARB1_H_CLK 67
+#define PPSS_H_CLK 68
+#define PPSS_PROC_CLK 69
+#define PPSS_TIMER0_CLK 70
+#define PPSS_TIMER1_CLK 71
+#define PMEM_A_CLK 72
+#define DMA_BAM_H_CLK 73
+#define SIC_H_CLK 74
+#define SPS_TIC_H_CLK 75
+#define SLIMBUS_H_CLK 76
+#define SLIMBUS_XO_SRC_CLK 77
+#define CFPB_2X_CLK_SRC 78
+#define CFPB_CLK 79
+#define CFPB0_H_CLK 80
+#define CFPB1_H_CLK 81
+#define CFPB2_H_CLK 82
+#define SFAB_CFPB_M_H_CLK 83
+#define CFPB_MASTER_H_CLK 84
+#define SFAB_CFPB_S_H_CLK 85
+#define CFPB_SPLITTER_H_CLK 86
+#define TSIF_H_CLK 87
+#define TSIF_INACTIVITY_TIMERS_CLK 88
+#define TSIF_REF_SRC 89
+#define TSIF_REF_CLK 90
+#define CE1_H_CLK 91
+#define CE1_CORE_CLK 92
+#define CE1_SLEEP_CLK 93
+#define CE2_H_CLK 94
+#define CE2_CORE_CLK 95
+#define SFPB_H_CLK_SRC 97
+#define SFPB_H_CLK 98
+#define SFAB_SFPB_M_H_CLK 99
+#define SFAB_SFPB_S_H_CLK 100
+#define RPM_PROC_CLK 101
+#define RPM_BUS_H_CLK 102
+#define RPM_SLEEP_CLK 103
+#define RPM_TIMER_CLK 104
+#define RPM_MSG_RAM_H_CLK 105
+#define PMIC_ARB0_H_CLK 106
+#define PMIC_ARB1_H_CLK 107
+#define PMIC_SSBI2_SRC 108
+#define PMIC_SSBI2_CLK 109
+#define SDC1_H_CLK 110
+#define SDC2_H_CLK 111
+#define SDC3_H_CLK 112
+#define SDC4_H_CLK 113
+#define SDC5_H_CLK 114
+#define SDC1_SRC 115
+#define SDC2_SRC 116
+#define SDC3_SRC 117
+#define SDC4_SRC 118
+#define SDC5_SRC 119
+#define SDC1_CLK 120
+#define SDC2_CLK 121
+#define SDC3_CLK 122
+#define SDC4_CLK 123
+#define SDC5_CLK 124
+#define DFAB_A2_H_CLK 125
+#define USB_HS1_H_CLK 126
+#define USB_HS1_XCVR_SRC 127
+#define USB_HS1_XCVR_CLK 128
+#define USB_HSIC_H_CLK 129
+#define USB_HSIC_XCVR_FS_SRC 130
+#define USB_HSIC_XCVR_FS_CLK 131
+#define USB_HSIC_SYSTEM_CLK_SRC 132
+#define USB_HSIC_SYSTEM_CLK 133
+#define CFPB0_C0_H_CLK 134
+#define CFPB0_C1_H_CLK 135
+#define CFPB0_D0_H_CLK 136
+#define CFPB0_D1_H_CLK 137
+#define USB_FS1_H_CLK 138
+#define USB_FS1_XCVR_FS_SRC 139
+#define USB_FS1_XCVR_FS_CLK 140
+#define USB_FS1_SYSTEM_CLK 141
+#define USB_FS2_H_CLK 142
+#define USB_FS2_XCVR_FS_SRC 143
+#define USB_FS2_XCVR_FS_CLK 144
+#define USB_FS2_SYSTEM_CLK 145
+#define GSBI_COMMON_SIM_SRC 146
+#define GSBI1_H_CLK 147
+#define GSBI2_H_CLK 148
+#define GSBI3_H_CLK 149
+#define GSBI4_H_CLK 150
+#define GSBI5_H_CLK 151
+#define GSBI6_H_CLK 152
+#define GSBI7_H_CLK 153
+#define GSBI8_H_CLK 154
+#define GSBI9_H_CLK 155
+#define GSBI10_H_CLK 156
+#define GSBI11_H_CLK 157
+#define GSBI12_H_CLK 158
+#define GSBI1_UART_SRC 159
+#define GSBI1_UART_CLK 160
+#define GSBI2_UART_SRC 161
+#define GSBI2_UART_CLK 162
+#define GSBI3_UART_SRC 163
+#define GSBI3_UART_CLK 164
+#define GSBI4_UART_SRC 165
+#define GSBI4_UART_CLK 166
+#define GSBI5_UART_SRC 167
+#define GSBI5_UART_CLK 168
+#define GSBI6_UART_SRC 169
+#define GSBI6_UART_CLK 170
+#define GSBI7_UART_SRC 171
+#define GSBI7_UART_CLK 172
+#define GSBI8_UART_SRC 173
+#define GSBI8_UART_CLK 174
+#define GSBI9_UART_SRC 175
+#define GSBI9_UART_CLK 176
+#define GSBI10_UART_SRC 177
+#define GSBI10_UART_CLK 178
+#define GSBI11_UART_SRC 179
+#define GSBI11_UART_CLK 180
+#define GSBI12_UART_SRC 181
+#define GSBI12_UART_CLK 182
+#define GSBI1_QUP_SRC 183
+#define GSBI1_QUP_CLK 184
+#define GSBI2_QUP_SRC 185
+#define GSBI2_QUP_CLK 186
+#define GSBI3_QUP_SRC 187
+#define GSBI3_QUP_CLK 188
+#define GSBI4_QUP_SRC 189
+#define GSBI4_QUP_CLK 190
+#define GSBI5_QUP_SRC 191
+#define GSBI5_QUP_CLK 192
+#define GSBI6_QUP_SRC 193
+#define GSBI6_QUP_CLK 194
+#define GSBI7_QUP_SRC 195
+#define GSBI7_QUP_CLK 196
+#define GSBI8_QUP_SRC 197
+#define GSBI8_QUP_CLK 198
+#define GSBI9_QUP_SRC 199
+#define GSBI9_QUP_CLK 200
+#define GSBI10_QUP_SRC 201
+#define GSBI10_QUP_CLK 202
+#define GSBI11_QUP_SRC 203
+#define GSBI11_QUP_CLK 204
+#define GSBI12_QUP_SRC 205
+#define GSBI12_QUP_CLK 206
+#define GSBI1_SIM_CLK 207
+#define GSBI2_SIM_CLK 208
+#define GSBI3_SIM_CLK 209
+#define GSBI4_SIM_CLK 210
+#define GSBI5_SIM_CLK 211
+#define GSBI6_SIM_CLK 212
+#define GSBI7_SIM_CLK 213
+#define GSBI8_SIM_CLK 214
+#define GSBI9_SIM_CLK 215
+#define GSBI10_SIM_CLK 216
+#define GSBI11_SIM_CLK 217
+#define GSBI12_SIM_CLK 218
+#define USB_HSIC_HSIC_CLK_SRC 219
+#define USB_HSIC_HSIC_CLK 220
+#define USB_HSIC_HSIO_CAL_CLK 221
+#define SPDM_CFG_H_CLK 222
+#define SPDM_MSTR_H_CLK 223
+#define SPDM_FF_CLK_SRC 224
+#define SPDM_FF_CLK 225
+#define SEC_CTRL_CLK 226
+#define SEC_CTRL_ACC_CLK_SRC 227
+#define SEC_CTRL_ACC_CLK 228
+#define TLMM_H_CLK 229
+#define TLMM_CLK 230
+#define SFAB_MSS_S_H_CLK 231
+#define MSS_SLP_CLK 232
+#define MSS_Q6SW_JTAG_CLK 233
+#define MSS_Q6FW_JTAG_CLK 234
+#define MSS_S_H_CLK 235
+#define MSS_CXO_SRC_CLK 236
+#define SATA_H_CLK 237
+#define SATA_CLK_SRC 238
+#define SATA_RXOOB_CLK 239
+#define SATA_PMALIVE_CLK 240
+#define SATA_PHY_REF_CLK 241
+#define TSSC_CLK_SRC 242
+#define TSSC_CLK 243
+#define PDM_SRC 244
+#define PDM_CLK 245
+#define GP0_SRC 246
+#define GP0_CLK 247
+#define GP1_SRC 248
+#define GP1_CLK 249
+#define GP2_SRC 250
+#define GP2_CLK 251
+#define MPM_CLK 252
+#define EBI1_CLK_SRC 253
+#define EBI1_CH0_CLK 254
+#define EBI1_CH1_CLK 255
+#define EBI1_2X_CLK 256
+#define EBI1_CH0_DQ_CLK 257
+#define EBI1_CH1_DQ_CLK 258
+#define EBI1_CH0_CA_CLK 259
+#define EBI1_CH1_CA_CLK 260
+#define EBI1_XO_CLK 261
+#define SFAB_SMPSS_S_H_CLK 262
+#define PRNG_SRC 263
+#define PRNG_CLK 264
+#define PXO_SRC 265
+#define LPASS_CXO_CLK 266
+#define LPASS_PXO_CLK 267
+#define SPDM_CY_PORT0_CLK 268
+#define SPDM_CY_PORT1_CLK 269
+#define SPDM_CY_PORT2_CLK 270
+#define SPDM_CY_PORT3_CLK 271
+#define SPDM_CY_PORT4_CLK 272
+#define SPDM_CY_PORT5_CLK 273
+#define SPDM_CY_PORT6_CLK 274
+#define SPDM_CY_PORT7_CLK 275
+#define PLL0 276
+#define PLL0_VOTE 277
+#define PLL3 278
+#define PLL3_VOTE 279
+#define PLL4_VOTE 280
+#define PLL5 281
+#define PLL5_VOTE 282
+#define PLL6 283
+#define PLL6_VOTE 284
+#define PLL7_VOTE 285
+#define PLL8 286
+#define PLL8_VOTE 287
+#define PLL9 288
+#define PLL10 289
+#define PLL11 290
+#define PLL12 291
+#define PLL13 292
+#define PLL14 293
+#define PLL14_VOTE 294
+#define USB_HS3_H_CLK 295
+#define USB_HS3_XCVR_SRC 296
+#define USB_HS3_XCVR_CLK 297
+#define USB_HS4_H_CLK 298
+#define USB_HS4_XCVR_SRC 299
+#define USB_HS4_XCVR_CLK 300
+#define SATA_PHY_CFG_CLK 301
+#define SATA_A_CLK 302
+#define CE3_SRC 303
+#define CE3_CORE_CLK 304
+#define CE3_H_CLK 305
+#define USB_HS1_SYSTEM_CLK_SRC 306
+#define USB_HS1_SYSTEM_CLK 307
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,gcc-msm8996.h b/include/dt-bindings/clock/qcom,gcc-msm8996.h
index 6f814db11c7e..1828723eb621 100644
--- a/include/dt-bindings/clock/qcom,gcc-msm8996.h
+++ b/include/dt-bindings/clock/qcom,gcc-msm8996.h
@@ -335,6 +335,11 @@
#define GCC_MSMPU_BCR 98
#define GCC_MSS_Q6_BCR 99
#define GCC_QREFS_VBG_CAL_BCR 100
+#define GCC_PCIE_PHY_COM_BCR 101
+#define GCC_PCIE_PHY_COM_NOCSR_BCR 102
+#define GCC_USB3_PHY_BCR 103
+#define GCC_USB3PHY_PHY_BCR 104
+
/* Indexes for GDSCs */
#define AGGRE0_NOC_GDSC 0
diff --git a/include/dt-bindings/clock/qcom,lcc-mdm9615.h b/include/dt-bindings/clock/qcom,lcc-mdm9615.h
new file mode 100644
index 000000000000..cac963a2fdcb
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,lcc-mdm9615.h
@@ -0,0 +1,52 @@
+/*
+ * Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ * Copyright (c) BayLibre, SAS.
+ * Author : Neil Armstrong <narmstrong@baylibre.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DT_BINDINGS_CLK_LCC_MDM9615_H
+#define _DT_BINDINGS_CLK_LCC_MDM9615_H
+
+#define PLL4 0
+#define MI2S_OSR_SRC 1
+#define MI2S_OSR_CLK 2
+#define MI2S_DIV_CLK 3
+#define MI2S_BIT_DIV_CLK 4
+#define MI2S_BIT_CLK 5
+#define PCM_SRC 6
+#define PCM_CLK_OUT 7
+#define PCM_CLK 8
+#define SLIMBUS_SRC 9
+#define AUDIO_SLIMBUS_CLK 10
+#define SPS_SLIMBUS_CLK 11
+#define CODEC_I2S_MIC_OSR_SRC 12
+#define CODEC_I2S_MIC_OSR_CLK 13
+#define CODEC_I2S_MIC_DIV_CLK 14
+#define CODEC_I2S_MIC_BIT_DIV_CLK 15
+#define CODEC_I2S_MIC_BIT_CLK 16
+#define SPARE_I2S_MIC_OSR_SRC 17
+#define SPARE_I2S_MIC_OSR_CLK 18
+#define SPARE_I2S_MIC_DIV_CLK 19
+#define SPARE_I2S_MIC_BIT_DIV_CLK 20
+#define SPARE_I2S_MIC_BIT_CLK 21
+#define CODEC_I2S_SPKR_OSR_SRC 22
+#define CODEC_I2S_SPKR_OSR_CLK 23
+#define CODEC_I2S_SPKR_DIV_CLK 24
+#define CODEC_I2S_SPKR_BIT_DIV_CLK 25
+#define CODEC_I2S_SPKR_BIT_CLK 26
+#define SPARE_I2S_SPKR_OSR_SRC 27
+#define SPARE_I2S_SPKR_OSR_CLK 28
+#define SPARE_I2S_SPKR_DIV_CLK 29
+#define SPARE_I2S_SPKR_BIT_DIV_CLK 30
+#define SPARE_I2S_SPKR_BIT_CLK 31
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,mmcc-msm8996.h b/include/dt-bindings/clock/qcom,mmcc-msm8996.h
index 7d3a7fa1a1bd..5abc445ad815 100644
--- a/include/dt-bindings/clock/qcom,mmcc-msm8996.h
+++ b/include/dt-bindings/clock/qcom,mmcc-msm8996.h
@@ -298,5 +298,6 @@
#define FD_GDSC 12
#define MDSS_GDSC 13
#define GPU_GX_GDSC 14
+#define MMAGIC_BIMC_GDSC 15
#endif
diff --git a/include/dt-bindings/clock/r7s72100-clock.h b/include/dt-bindings/clock/r7s72100-clock.h
index 5128f4d94f44..3cd813896d08 100644
--- a/include/dt-bindings/clock/r7s72100-clock.h
+++ b/include/dt-bindings/clock/r7s72100-clock.h
@@ -25,6 +25,9 @@
#define R7S72100_CLK_SCIF6 1
#define R7S72100_CLK_SCIF7 0
+/* MSTP7 */
+#define R7S72100_CLK_ETHER 4
+
/* MSTP9 */
#define R7S72100_CLK_I2C0 7
#define R7S72100_CLK_I2C1 6
diff --git a/include/dt-bindings/clock/r8a7794-clock.h b/include/dt-bindings/clock/r8a7794-clock.h
index a3491ba2f6ec..9d02f5317c7c 100644
--- a/include/dt-bindings/clock/r8a7794-clock.h
+++ b/include/dt-bindings/clock/r8a7794-clock.h
@@ -67,6 +67,7 @@
#define R8A7794_CLK_IRQC 7
/* MSTP5 */
+#define R8A7794_CLK_AUDIO_DMAC0 2
#define R8A7794_CLK_PWM 23
/* MSTP7 */
@@ -107,6 +108,30 @@
#define R8A7794_CLK_I2C1 30
#define R8A7794_CLK_I2C0 31
+/* MSTP10 */
+#define R8A7794_CLK_SSI_ALL 5
+#define R8A7794_CLK_SSI9 6
+#define R8A7794_CLK_SSI8 7
+#define R8A7794_CLK_SSI7 8
+#define R8A7794_CLK_SSI6 9
+#define R8A7794_CLK_SSI5 10
+#define R8A7794_CLK_SSI4 11
+#define R8A7794_CLK_SSI3 12
+#define R8A7794_CLK_SSI2 13
+#define R8A7794_CLK_SSI1 14
+#define R8A7794_CLK_SSI0 15
+#define R8A7794_CLK_SCU_ALL 17
+#define R8A7794_CLK_SCU_DVC1 18
+#define R8A7794_CLK_SCU_DVC0 19
+#define R8A7794_CLK_SCU_CTU1_MIX1 20
+#define R8A7794_CLK_SCU_CTU0_MIX0 21
+#define R8A7794_CLK_SCU_SRC6 25
+#define R8A7794_CLK_SCU_SRC5 26
+#define R8A7794_CLK_SCU_SRC4 27
+#define R8A7794_CLK_SCU_SRC3 28
+#define R8A7794_CLK_SCU_SRC2 29
+#define R8A7794_CLK_SCU_SRC1 30
+
/* MSTP11 */
#define R8A7794_CLK_SCIFA3 6
#define R8A7794_CLK_SCIFA4 7
diff --git a/include/dt-bindings/clock/rk3399-cru.h b/include/dt-bindings/clock/rk3399-cru.h
index 50a44cffb070..220a60f20d3b 100644
--- a/include/dt-bindings/clock/rk3399-cru.h
+++ b/include/dt-bindings/clock/rk3399-cru.h
@@ -131,12 +131,15 @@
#define SCLK_DPHY_RX0_CFG 165
#define SCLK_RMII_SRC 166
#define SCLK_PCIEPHY_REF100M 167
+#define SCLK_DDRC 168
#define DCLK_VOP0 180
#define DCLK_VOP1 181
#define DCLK_VOP0_DIV 182
#define DCLK_VOP1_DIV 183
#define DCLK_M0_PERILP 184
+#define DCLK_VOP0_FRAC 185
+#define DCLK_VOP1_FRAC 186
#define FCLK_CM0S 190
diff --git a/include/dt-bindings/clock/sun6i-a31-ccu.h b/include/dt-bindings/clock/sun6i-a31-ccu.h
new file mode 100644
index 000000000000..4482530fb6f5
--- /dev/null
+++ b/include/dt-bindings/clock/sun6i-a31-ccu.h
@@ -0,0 +1,187 @@
+/*
+ * Copyright (C) 2016 Chen-Yu Tsai <wens@csie.org>
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ * a) This file is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Or, alternatively,
+ *
+ * b) Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following
+ * conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef _DT_BINDINGS_CLK_SUN6I_A31_H_
+#define _DT_BINDINGS_CLK_SUN6I_A31_H_
+
+#define CLK_PLL_PERIPH 10
+
+#define CLK_CPU 18
+
+#define CLK_AHB1_MIPIDSI 23
+#define CLK_AHB1_SS 24
+#define CLK_AHB1_DMA 25
+#define CLK_AHB1_MMC0 26
+#define CLK_AHB1_MMC1 27
+#define CLK_AHB1_MMC2 28
+#define CLK_AHB1_MMC3 29
+#define CLK_AHB1_NAND1 30
+#define CLK_AHB1_NAND0 31
+#define CLK_AHB1_SDRAM 32
+#define CLK_AHB1_EMAC 33
+#define CLK_AHB1_TS 34
+#define CLK_AHB1_HSTIMER 35
+#define CLK_AHB1_SPI0 36
+#define CLK_AHB1_SPI1 37
+#define CLK_AHB1_SPI2 38
+#define CLK_AHB1_SPI3 39
+#define CLK_AHB1_OTG 40
+#define CLK_AHB1_EHCI0 41
+#define CLK_AHB1_EHCI1 42
+#define CLK_AHB1_OHCI0 43
+#define CLK_AHB1_OHCI1 44
+#define CLK_AHB1_OHCI2 45
+#define CLK_AHB1_VE 46
+#define CLK_AHB1_LCD0 47
+#define CLK_AHB1_LCD1 48
+#define CLK_AHB1_CSI 49
+#define CLK_AHB1_HDMI 50
+#define CLK_AHB1_BE0 51
+#define CLK_AHB1_BE1 52
+#define CLK_AHB1_FE0 53
+#define CLK_AHB1_FE1 54
+#define CLK_AHB1_MP 55
+#define CLK_AHB1_GPU 56
+#define CLK_AHB1_DEU0 57
+#define CLK_AHB1_DEU1 58
+#define CLK_AHB1_DRC0 59
+#define CLK_AHB1_DRC1 60
+
+#define CLK_APB1_CODEC 61
+#define CLK_APB1_SPDIF 62
+#define CLK_APB1_DIGITAL_MIC 63
+#define CLK_APB1_PIO 64
+#define CLK_APB1_DAUDIO0 65
+#define CLK_APB1_DAUDIO1 66
+
+#define CLK_APB2_I2C0 67
+#define CLK_APB2_I2C1 68
+#define CLK_APB2_I2C2 69
+#define CLK_APB2_I2C3 70
+#define CLK_APB2_UART0 71
+#define CLK_APB2_UART1 72
+#define CLK_APB2_UART2 73
+#define CLK_APB2_UART3 74
+#define CLK_APB2_UART4 75
+#define CLK_APB2_UART5 76
+
+#define CLK_NAND0 77
+#define CLK_NAND1 78
+#define CLK_MMC0 79
+#define CLK_MMC0_SAMPLE 80
+#define CLK_MMC0_OUTPUT 81
+#define CLK_MMC1 82
+#define CLK_MMC1_SAMPLE 83
+#define CLK_MMC1_OUTPUT 84
+#define CLK_MMC2 85
+#define CLK_MMC2_SAMPLE 86
+#define CLK_MMC2_OUTPUT 87
+#define CLK_MMC3 88
+#define CLK_MMC3_SAMPLE 89
+#define CLK_MMC3_OUTPUT 90
+#define CLK_TS 91
+#define CLK_SS 92
+#define CLK_SPI0 93
+#define CLK_SPI1 94
+#define CLK_SPI2 95
+#define CLK_SPI3 96
+#define CLK_DAUDIO0 97
+#define CLK_DAUDIO1 98
+#define CLK_SPDIF 99
+#define CLK_USB_PHY0 100
+#define CLK_USB_PHY1 101
+#define CLK_USB_PHY2 102
+#define CLK_USB_OHCI0 103
+#define CLK_USB_OHCI1 104
+#define CLK_USB_OHCI2 105
+
+#define CLK_DRAM_VE 110
+#define CLK_DRAM_CSI_ISP 111
+#define CLK_DRAM_TS 112
+#define CLK_DRAM_DRC0 113
+#define CLK_DRAM_DRC1 114
+#define CLK_DRAM_DEU0 115
+#define CLK_DRAM_DEU1 116
+#define CLK_DRAM_FE0 117
+#define CLK_DRAM_FE1 118
+#define CLK_DRAM_BE0 119
+#define CLK_DRAM_BE1 120
+#define CLK_DRAM_MP 121
+
+#define CLK_BE0 122
+#define CLK_BE1 123
+#define CLK_FE0 124
+#define CLK_FE1 125
+#define CLK_MP 126
+#define CLK_LCD0_CH0 127
+#define CLK_LCD1_CH0 128
+#define CLK_LCD0_CH1 129
+#define CLK_LCD1_CH1 130
+#define CLK_CSI0_SCLK 131
+#define CLK_CSI0_MCLK 132
+#define CLK_CSI1_MCLK 133
+#define CLK_VE 134
+#define CLK_CODEC 135
+#define CLK_AVS 136
+#define CLK_DIGITAL_MIC 137
+#define CLK_HDMI 138
+#define CLK_HDMI_DDC 139
+#define CLK_PS 140
+
+#define CLK_MIPI_DSI 143
+#define CLK_MIPI_DSI_DPHY 144
+#define CLK_MIPI_CSI_DPHY 145
+#define CLK_IEP_DRC0 146
+#define CLK_IEP_DRC1 147
+#define CLK_IEP_DEU0 148
+#define CLK_IEP_DEU1 149
+#define CLK_GPU_CORE 150
+#define CLK_GPU_MEMORY 151
+#define CLK_GPU_HYD 152
+#define CLK_ATS 153
+#define CLK_TRACE 154
+
+#define CLK_OUT_A 155
+#define CLK_OUT_B 156
+#define CLK_OUT_C 157
+
+#endif /* _DT_BINDINGS_CLK_SUN6I_A31_H_ */
diff --git a/include/dt-bindings/clock/sun8i-a23-a33-ccu.h b/include/dt-bindings/clock/sun8i-a23-a33-ccu.h
new file mode 100644
index 000000000000..f8222b6b2cc3
--- /dev/null
+++ b/include/dt-bindings/clock/sun8i-a23-a33-ccu.h
@@ -0,0 +1,127 @@
+/*
+ * Copyright (C) 2016 Maxime Ripard <maxime.ripard@free-electrons.com>
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ * a) This file is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Or, alternatively,
+ *
+ * b) Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following
+ * conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef _DT_BINDINGS_CLK_SUN8I_A23_A33_H_
+#define _DT_BINDINGS_CLK_SUN8I_A23_A33_H_
+
+#define CLK_CPUX 18
+
+#define CLK_BUS_MIPI_DSI 23
+#define CLK_BUS_SS 24
+#define CLK_BUS_DMA 25
+#define CLK_BUS_MMC0 26
+#define CLK_BUS_MMC1 27
+#define CLK_BUS_MMC2 28
+#define CLK_BUS_NAND 29
+#define CLK_BUS_DRAM 30
+#define CLK_BUS_HSTIMER 31
+#define CLK_BUS_SPI0 32
+#define CLK_BUS_SPI1 33
+#define CLK_BUS_OTG 34
+#define CLK_BUS_EHCI 35
+#define CLK_BUS_OHCI 36
+#define CLK_BUS_VE 37
+#define CLK_BUS_LCD 38
+#define CLK_BUS_CSI 39
+#define CLK_BUS_DE_BE 40
+#define CLK_BUS_DE_FE 41
+#define CLK_BUS_GPU 42
+#define CLK_BUS_MSGBOX 43
+#define CLK_BUS_SPINLOCK 44
+#define CLK_BUS_DRC 45
+#define CLK_BUS_SAT 46
+#define CLK_BUS_CODEC 47
+#define CLK_BUS_PIO 48
+#define CLK_BUS_I2S0 49
+#define CLK_BUS_I2S1 50
+#define CLK_BUS_I2C0 51
+#define CLK_BUS_I2C1 52
+#define CLK_BUS_I2C2 53
+#define CLK_BUS_UART0 54
+#define CLK_BUS_UART1 55
+#define CLK_BUS_UART2 56
+#define CLK_BUS_UART3 57
+#define CLK_BUS_UART4 58
+#define CLK_NAND 59
+#define CLK_MMC0 60
+#define CLK_MMC0_SAMPLE 61
+#define CLK_MMC0_OUTPUT 62
+#define CLK_MMC1 63
+#define CLK_MMC1_SAMPLE 64
+#define CLK_MMC1_OUTPUT 65
+#define CLK_MMC2 66
+#define CLK_MMC2_SAMPLE 67
+#define CLK_MMC2_OUTPUT 68
+#define CLK_SS 69
+#define CLK_SPI0 70
+#define CLK_SPI1 71
+#define CLK_I2S0 72
+#define CLK_I2S1 73
+#define CLK_USB_PHY0 74
+#define CLK_USB_PHY1 75
+#define CLK_USB_HSIC 76
+#define CLK_USB_HSIC_12M 77
+#define CLK_USB_OHCI 78
+
+#define CLK_DRAM_VE 80
+#define CLK_DRAM_CSI 81
+#define CLK_DRAM_DRC 82
+#define CLK_DRAM_DE_FE 83
+#define CLK_DRAM_DE_BE 84
+#define CLK_DE_BE 85
+#define CLK_DE_FE 86
+#define CLK_LCD_CH0 87
+#define CLK_LCD_CH1 88
+#define CLK_CSI_SCLK 89
+#define CLK_CSI_MCLK 90
+#define CLK_VE 91
+#define CLK_AC_DIG 92
+#define CLK_AC_DIG_4X 93
+#define CLK_AVS 94
+
+#define CLK_DSI_SCLK 96
+#define CLK_DSI_DPHY 97
+#define CLK_DRC 98
+#define CLK_GPU 99
+#define CLK_ATS 100
+
+#endif /* _DT_BINDINGS_CLK_SUN8I_A23_A33_H_ */
diff --git a/include/dt-bindings/clock/zx296718-clock.h b/include/dt-bindings/clock/zx296718-clock.h
new file mode 100644
index 000000000000..822d52385080
--- /dev/null
+++ b/include/dt-bindings/clock/zx296718-clock.h
@@ -0,0 +1,163 @@
+/*
+ * Copyright (C) 2015 - 2016 ZTE Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __DT_BINDINGS_CLOCK_ZX296718_H
+#define __DT_BINDINGS_CLOCK_ZX296718_H
+
+/* PLL */
+#define ZX296718_PLL_CPU 1
+#define ZX296718_PLL_MAC 2
+#define ZX296718_PLL_MM0 3
+#define ZX296718_PLL_MM1 4
+#define ZX296718_PLL_VGA 5
+#define ZX296718_PLL_DDR 6
+#define ZX296718_PLL_AUDIO 7
+#define ZX296718_PLL_HSIC 8
+#define CPU_DBG_GATE 9
+#define A72_GATE 10
+#define CPU_PERI_GATE 11
+#define A53_GATE 12
+#define DDR1_GATE 13
+#define DDR0_GATE 14
+#define SD1_WCLK 15
+#define SD1_AHB 16
+#define SD0_WCLK 17
+#define SD0_AHB 18
+#define EMMC_WCLK 19
+#define EMMC_NAND_AXI 20
+#define NAND_WCLK 21
+#define EMMC_NAND_AHB 22
+#define LSP1_148M5 23
+#define LSP1_99M 24
+#define LSP1_24M 25
+#define LSP0_74M25 26
+#define LSP0_32K 27
+#define LSP0_148M5 28
+#define LSP0_99M 29
+#define LSP0_24M 30
+#define DEMUX_AXI 31
+#define DEMUX_APB 32
+#define DEMUX_148M5 33
+#define DEMUX_108M 34
+#define AUDIO_APB 35
+#define AUDIO_99M 36
+#define AUDIO_24M 37
+#define AUDIO_16M384 38
+#define AUDIO_32K 39
+#define WDT_WCLK 40
+#define TIMER_WCLK 41
+#define VDE_ACLK 42
+#define VCE_ACLK 43
+#define HDE_ACLK 44
+#define GPU_ACLK 45
+#define SAPPU_ACLK 46
+#define SAPPU_WCLK 47
+#define VOU_ACLK 48
+#define VOU_MAIN_WCLK 49
+#define VOU_AUX_WCLK 50
+#define VOU_PPU_WCLK 51
+#define MIPI_CFG_CLK 52
+#define VGA_I2C_WCLK 53
+#define MIPI_REF_CLK 54
+#define HDMI_OSC_CEC 55
+#define HDMI_OSC_CLK 56
+#define HDMI_XCLK 57
+#define VIU_M0_ACLK 58
+#define VIU_M1_ACLK 59
+#define VIU_WCLK 60
+#define VIU_JPEG_WCLK 61
+#define VIU_CFG_CLK 62
+#define TS_SYS_WCLK 63
+#define TS_SYS_108M 64
+#define USB20_HCLK 65
+#define USB20_PHY_CLK 66
+#define USB21_HCLK 67
+#define USB21_PHY_CLK 68
+#define GMAC_RMIICLK 69
+#define GMAC_PCLK 70
+#define GMAC_ACLK 71
+#define GMAC_RFCLK 72
+#define TEMPSENSOR_GATE 73
+
+#define TOP_NR_CLKS 74
+
+
+#define LSP0_TIMER3_PCLK 1
+#define LSP0_TIMER3_WCLK 2
+#define LSP0_TIMER4_PCLK 3
+#define LSP0_TIMER4_WCLK 4
+#define LSP0_TIMER5_PCLK 5
+#define LSP0_TIMER5_WCLK 6
+#define LSP0_UART3_PCLK 7
+#define LSP0_UART3_WCLK 8
+#define LSP0_UART1_PCLK 9
+#define LSP0_UART1_WCLK 10
+#define LSP0_UART2_PCLK 11
+#define LSP0_UART2_WCLK 12
+#define LSP0_SPIFC0_PCLK 13
+#define LSP0_SPIFC0_WCLK 14
+#define LSP0_I2C4_PCLK 15
+#define LSP0_I2C4_WCLK 16
+#define LSP0_I2C5_PCLK 17
+#define LSP0_I2C5_WCLK 18
+#define LSP0_SSP0_PCLK 19
+#define LSP0_SSP0_WCLK 20
+#define LSP0_SSP1_PCLK 21
+#define LSP0_SSP1_WCLK 22
+#define LSP0_USIM_PCLK 23
+#define LSP0_USIM_WCLK 24
+#define LSP0_GPIO_PCLK 25
+#define LSP0_GPIO_WCLK 26
+#define LSP0_I2C3_PCLK 27
+#define LSP0_I2C3_WCLK 28
+
+#define LSP0_NR_CLKS 29
+
+
+#define LSP1_UART4_PCLK 1
+#define LSP1_UART4_WCLK 2
+#define LSP1_UART5_PCLK 3
+#define LSP1_UART5_WCLK 4
+#define LSP1_PWM_PCLK 5
+#define LSP1_PWM_WCLK 6
+#define LSP1_I2C2_PCLK 7
+#define LSP1_I2C2_WCLK 8
+#define LSP1_SSP2_PCLK 9
+#define LSP1_SSP2_WCLK 10
+#define LSP1_SSP3_PCLK 11
+#define LSP1_SSP3_WCLK 12
+#define LSP1_SSP4_PCLK 13
+#define LSP1_SSP4_WCLK 14
+#define LSP1_USIM1_PCLK 15
+#define LSP1_USIM1_WCLK 16
+
+#define LSP1_NR_CLKS 17
+
+
+#define AUDIO_I2S0_WCLK 1
+#define AUDIO_I2S0_PCLK 2
+#define AUDIO_I2S1_WCLK 3
+#define AUDIO_I2S1_PCLK 4
+#define AUDIO_I2S2_WCLK 5
+#define AUDIO_I2S2_PCLK 6
+#define AUDIO_I2S3_WCLK 7
+#define AUDIO_I2S3_PCLK 8
+#define AUDIO_I2C0_WCLK 9
+#define AUDIO_I2C0_PCLK 10
+#define AUDIO_SPDIF0_WCLK 11
+#define AUDIO_SPDIF0_PCLK 12
+#define AUDIO_SPDIF1_WCLK 13
+#define AUDIO_SPDIF1_PCLK 14
+#define AUDIO_TIMER_WCLK 15
+#define AUDIO_TIMER_PCLK 16
+#define AUDIO_TDM_WCLK 17
+#define AUDIO_TDM_PCLK 18
+#define AUDIO_TS_PCLK 19
+
+#define AUDIO_NR_CLKS 20
+
+#endif
diff --git a/include/dt-bindings/display/tda998x.h b/include/dt-bindings/display/tda998x.h
new file mode 100644
index 000000000000..34757a3847ef
--- /dev/null
+++ b/include/dt-bindings/display/tda998x.h
@@ -0,0 +1,7 @@
+#ifndef _DT_BINDINGS_TDA998X_H
+#define _DT_BINDINGS_TDA998X_H
+
+#define TDA998x_SPDIF 1
+#define TDA998x_I2S 2
+
+#endif /*_DT_BINDINGS_TDA998X_H */
diff --git a/include/dt-bindings/memory/mt2701-larb-port.h b/include/dt-bindings/memory/mt2701-larb-port.h
index 78f66786da91..6764d7447422 100644
--- a/include/dt-bindings/memory/mt2701-larb-port.h
+++ b/include/dt-bindings/memory/mt2701-larb-port.h
@@ -26,7 +26,7 @@
#define LARB0_PORT_OFFSET 0
#define LARB1_PORT_OFFSET 11
#define LARB2_PORT_OFFSET 21
-#define LARB3_PORT_OFFSET 43
+#define LARB3_PORT_OFFSET 44
#define MT2701_M4U_ID_LARB0(port) ((port) + LARB0_PORT_OFFSET)
#define MT2701_M4U_ID_LARB1(port) ((port) + LARB1_PORT_OFFSET)
diff --git a/include/dt-bindings/mfd/qcom-rpm.h b/include/dt-bindings/mfd/qcom-rpm.h
index 13a9d4bf2662..54aef5e21763 100644
--- a/include/dt-bindings/mfd/qcom-rpm.h
+++ b/include/dt-bindings/mfd/qcom-rpm.h
@@ -147,6 +147,28 @@
#define QCOM_RPM_SMB208_S1b 137
#define QCOM_RPM_SMB208_S2a 138
#define QCOM_RPM_SMB208_S2b 139
+#define QCOM_RPM_PM8018_SMPS1 140
+#define QCOM_RPM_PM8018_SMPS2 141
+#define QCOM_RPM_PM8018_SMPS3 142
+#define QCOM_RPM_PM8018_SMPS4 143
+#define QCOM_RPM_PM8018_SMPS5 144
+#define QCOM_RPM_PM8018_LDO1 145
+#define QCOM_RPM_PM8018_LDO2 146
+#define QCOM_RPM_PM8018_LDO3 147
+#define QCOM_RPM_PM8018_LDO4 148
+#define QCOM_RPM_PM8018_LDO5 149
+#define QCOM_RPM_PM8018_LDO6 150
+#define QCOM_RPM_PM8018_LDO7 151
+#define QCOM_RPM_PM8018_LDO8 152
+#define QCOM_RPM_PM8018_LDO9 153
+#define QCOM_RPM_PM8018_LDO10 154
+#define QCOM_RPM_PM8018_LDO11 155
+#define QCOM_RPM_PM8018_LDO12 156
+#define QCOM_RPM_PM8018_LDO13 157
+#define QCOM_RPM_PM8018_LDO14 158
+#define QCOM_RPM_PM8018_LVS1 159
+#define QCOM_RPM_PM8018_NCP 160
+#define QCOM_RPM_VOLTAGE_CORNER 161
/*
* Constants used to select force mode for regulators.
diff --git a/include/dt-bindings/mfd/stm32f4-rcc.h b/include/dt-bindings/mfd/stm32f4-rcc.h
new file mode 100644
index 000000000000..e98942dc0d44
--- /dev/null
+++ b/include/dt-bindings/mfd/stm32f4-rcc.h
@@ -0,0 +1,98 @@
+/*
+ * This header provides constants for the STM32F4 RCC IP
+ */
+
+#ifndef _DT_BINDINGS_MFD_STM32F4_RCC_H
+#define _DT_BINDINGS_MFD_STM32F4_RCC_H
+
+/* AHB1 */
+#define STM32F4_RCC_AHB1_GPIOA 0
+#define STM32F4_RCC_AHB1_GPIOB 1
+#define STM32F4_RCC_AHB1_GPIOC 2
+#define STM32F4_RCC_AHB1_GPIOD 3
+#define STM32F4_RCC_AHB1_GPIOE 4
+#define STM32F4_RCC_AHB1_GPIOF 5
+#define STM32F4_RCC_AHB1_GPIOG 6
+#define STM32F4_RCC_AHB1_GPIOH 7
+#define STM32F4_RCC_AHB1_GPIOI 8
+#define STM32F4_RCC_AHB1_GPIOJ 9
+#define STM32F4_RCC_AHB1_GPIOK 10
+#define STM32F4_RCC_AHB1_CRC 12
+#define STM32F4_RCC_AHB1_DMA1 21
+#define STM32F4_RCC_AHB1_DMA2 22
+#define STM32F4_RCC_AHB1_DMA2D 23
+#define STM32F4_RCC_AHB1_ETHMAC 25
+#define STM32F4_RCC_AHB1_OTGHS 29
+
+#define STM32F4_AHB1_RESET(bit) (STM32F4_RCC_AHB1_##bit + (0x10 * 8))
+#define STM32F4_AHB1_CLOCK(bit) (STM32F4_RCC_AHB1_##bit + (0x30 * 8))
+
+
+/* AHB2 */
+#define STM32F4_RCC_AHB2_DCMI 0
+#define STM32F4_RCC_AHB2_CRYP 4
+#define STM32F4_RCC_AHB2_HASH 5
+#define STM32F4_RCC_AHB2_RNG 6
+#define STM32F4_RCC_AHB2_OTGFS 7
+
+#define STM32F4_AHB2_RESET(bit) (STM32F4_RCC_AHB2_##bit + (0x14 * 8))
+#define STM32F4_AHB2_CLOCK(bit) (STM32F4_RCC_AHB2_##bit + (0x34 * 8))
+
+/* AHB3 */
+#define STM32F4_RCC_AHB3_FMC 0
+
+#define STM32F4_AHB3_RESET(bit) (STM32F4_RCC_AHB3_##bit + (0x18 * 8))
+#define STM32F4_AHB3_CLOCK(bit) (STM32F4_RCC_AHB3_##bit + (0x38 * 8))
+
+/* APB1 */
+#define STM32F4_RCC_APB1_TIM2 0
+#define STM32F4_RCC_APB1_TIM3 1
+#define STM32F4_RCC_APB1_TIM4 2
+#define STM32F4_RCC_APB1_TIM5 3
+#define STM32F4_RCC_APB1_TIM6 4
+#define STM32F4_RCC_APB1_TIM7 5
+#define STM32F4_RCC_APB1_TIM12 6
+#define STM32F4_RCC_APB1_TIM13 7
+#define STM32F4_RCC_APB1_TIM14 8
+#define STM32F4_RCC_APB1_WWDG 11
+#define STM32F4_RCC_APB1_SPI2 14
+#define STM32F4_RCC_APB1_SPI3 15
+#define STM32F4_RCC_APB1_UART2 17
+#define STM32F4_RCC_APB1_UART3 18
+#define STM32F4_RCC_APB1_UART4 19
+#define STM32F4_RCC_APB1_UART5 20
+#define STM32F4_RCC_APB1_I2C1 21
+#define STM32F4_RCC_APB1_I2C2 22
+#define STM32F4_RCC_APB1_I2C3 23
+#define STM32F4_RCC_APB1_CAN1 25
+#define STM32F4_RCC_APB1_CAN2 26
+#define STM32F4_RCC_APB1_PWR 28
+#define STM32F4_RCC_APB1_DAC 29
+#define STM32F4_RCC_APB1_UART7 30
+#define STM32F4_RCC_APB1_UART8 31
+
+#define STM32F4_APB1_RESET(bit) (STM32F4_RCC_APB1_##bit + (0x20 * 8))
+#define STM32F4_APB1_CLOCK(bit) (STM32F4_RCC_APB1_##bit + (0x40 * 8))
+
+/* APB2 */
+#define STM32F4_RCC_APB2_TIM1 0
+#define STM32F4_RCC_APB2_TIM8 1
+#define STM32F4_RCC_APB2_USART1 4
+#define STM32F4_RCC_APB2_USART6 5
+#define STM32F4_RCC_APB2_ADC 8
+#define STM32F4_RCC_APB2_SDIO 11
+#define STM32F4_RCC_APB2_SPI1 12
+#define STM32F4_RCC_APB2_SPI4 13
+#define STM32F4_RCC_APB2_SYSCFG 14
+#define STM32F4_RCC_APB2_TIM9 16
+#define STM32F4_RCC_APB2_TIM10 17
+#define STM32F4_RCC_APB2_TIM11 18
+#define STM32F4_RCC_APB2_SPI5 20
+#define STM32F4_RCC_APB2_SPI6 21
+#define STM32F4_RCC_APB2_SAI1 22
+#define STM32F4_RCC_APB2_LTDC 26
+
+#define STM32F4_APB2_RESET(bit) (STM32F4_RCC_APB2_##bit + (0x24 * 8))
+#define STM32F4_APB2_CLOCK(bit) (STM32F4_RCC_APB2_##bit + (0x44 * 8))
+
+#endif /* _DT_BINDINGS_MFD_STM32F4_RCC_H */
diff --git a/include/dt-bindings/net/mscc-phy-vsc8531.h b/include/dt-bindings/net/mscc-phy-vsc8531.h
new file mode 100644
index 000000000000..2383dd20ff43
--- /dev/null
+++ b/include/dt-bindings/net/mscc-phy-vsc8531.h
@@ -0,0 +1,21 @@
+/*
+ * Device Tree constants for Microsemi VSC8531 PHY
+ *
+ * Author: Nagaraju Lakkaraju
+ *
+ * License: Dual MIT/GPL
+ * Copyright (c) 2016 Microsemi Corporation
+ */
+
+#ifndef _DT_BINDINGS_MSCC_VSC8531_H
+#define _DT_BINDINGS_MSCC_VSC8531_H
+
+/* MAC interface Edge rate control VDDMAC in milli Volts */
+#define MSCC_VDDMAC_3300 3300
+#define MSCC_VDDMAC_2500 2500
+#define MSCC_VDDMAC_1800 1800
+#define MSCC_VDDMAC_1500 1500
+#define MSCC_VDDMAC_MAX 4
+#define MSCC_SLOWDOWN_MAX 8
+
+#endif
diff --git a/include/dt-bindings/pinctrl/samsung.h b/include/dt-bindings/pinctrl/samsung.h
new file mode 100644
index 000000000000..6276eb785e2b
--- /dev/null
+++ b/include/dt-bindings/pinctrl/samsung.h
@@ -0,0 +1,57 @@
+/*
+ * Samsung's Exynos pinctrl bindings
+ *
+ * Copyright (c) 2016 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com
+ * Author: Krzysztof Kozlowski <krzk@kernel.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#ifndef __DT_BINDINGS_PINCTRL_SAMSUNG_H__
+#define __DT_BINDINGS_PINCTRL_SAMSUNG_H__
+
+#define EXYNOS_PIN_PULL_NONE 0
+#define EXYNOS_PIN_PULL_DOWN 1
+#define EXYNOS_PIN_PULL_UP 3
+
+#define S3C64XX_PIN_PULL_NONE 0
+#define S3C64XX_PIN_PULL_DOWN 1
+#define S3C64XX_PIN_PULL_UP 2
+
+/* Pin function in power down mode */
+#define EXYNOS_PIN_PDN_OUT0 0
+#define EXYNOS_PIN_PDN_OUT1 1
+#define EXYNOS_PIN_PDN_INPUT 2
+#define EXYNOS_PIN_PDN_PREV 3
+
+/* Drive strengths for Exynos3250, Exynos4 (all) and Exynos5250 */
+#define EXYNOS4_PIN_DRV_LV1 0
+#define EXYNOS4_PIN_DRV_LV2 2
+#define EXYNOS4_PIN_DRV_LV3 1
+#define EXYNOS4_PIN_DRV_LV4 3
+
+/* Drive strengths for Exynos5260 */
+#define EXYNOS5260_PIN_DRV_LV1 0
+#define EXYNOS5260_PIN_DRV_LV2 1
+#define EXYNOS5260_PIN_DRV_LV4 2
+#define EXYNOS5260_PIN_DRV_LV6 3
+
+/* Drive strengths for Exynos5410, Exynos542x and Exynos5800 */
+#define EXYNOS5420_PIN_DRV_LV1 0
+#define EXYNOS5420_PIN_DRV_LV2 1
+#define EXYNOS5420_PIN_DRV_LV3 2
+#define EXYNOS5420_PIN_DRV_LV4 3
+
+#define EXYNOS_PIN_FUNC_INPUT 0
+#define EXYNOS_PIN_FUNC_OUTPUT 1
+#define EXYNOS_PIN_FUNC_2 2
+#define EXYNOS_PIN_FUNC_3 3
+#define EXYNOS_PIN_FUNC_4 4
+#define EXYNOS_PIN_FUNC_5 5
+#define EXYNOS_PIN_FUNC_6 6
+#define EXYNOS_PIN_FUNC_F 0xf
+
+#endif /* __DT_BINDINGS_PINCTRL_SAMSUNG_H__ */
diff --git a/include/dt-bindings/reset/gxbb-aoclkc.h b/include/dt-bindings/reset/gxbb-aoclkc.h
new file mode 100644
index 000000000000..9e3fd60c309c
--- /dev/null
+++ b/include/dt-bindings/reset/gxbb-aoclkc.h
@@ -0,0 +1,66 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright (c) 2016 BayLibre, SAS.
+ * Author: Neil Armstrong <narmstrong@baylibre.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * BSD LICENSE
+ *
+ * Copyright (c) 2016 BayLibre, SAS.
+ * Author: Neil Armstrong <narmstrong@baylibre.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef DT_BINDINGS_RESET_AMLOGIC_MESON_GXBB_AOCLK
+#define DT_BINDINGS_RESET_AMLOGIC_MESON_GXBB_AOCLK
+
+#define RESET_AO_REMOTE 0
+#define RESET_AO_I2C_MASTER 1
+#define RESET_AO_I2C_SLAVE 2
+#define RESET_AO_UART1 3
+#define RESET_AO_UART2 4
+#define RESET_AO_IR_BLASTER 5
+
+#endif
diff --git a/include/dt-bindings/reset/mt2701-resets.h b/include/dt-bindings/reset/mt2701-resets.h
new file mode 100644
index 000000000000..aaf03057f755
--- /dev/null
+++ b/include/dt-bindings/reset/mt2701-resets.h
@@ -0,0 +1,83 @@
+/*
+ * Copyright (c) 2015 MediaTek, Shunli Wang <shunli.wang@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DT_BINDINGS_RESET_CONTROLLER_MT2701
+#define _DT_BINDINGS_RESET_CONTROLLER_MT2701
+
+/* INFRACFG resets */
+#define MT2701_INFRA_EMI_REG_RST 0
+#define MT2701_INFRA_DRAMC0_A0_RST 1
+#define MT2701_INFRA_FHCTL_RST 2
+#define MT2701_INFRA_APCIRQ_EINT_RST 3
+#define MT2701_INFRA_APXGPT_RST 4
+#define MT2701_INFRA_SCPSYS_RST 5
+#define MT2701_INFRA_KP_RST 6
+#define MT2701_INFRA_PMIC_WRAP_RST 7
+#define MT2701_INFRA_MIPI_RST 8
+#define MT2701_INFRA_IRRX_RST 9
+#define MT2701_INFRA_CEC_RST 10
+#define MT2701_INFRA_EMI_RST 32
+#define MT2701_INFRA_DRAMC0_RST 34
+#define MT2701_INFRA_TRNG_RST 37
+#define MT2701_INFRA_SYSIRQ_RST 38
+
+/* PERICFG resets */
+#define MT2701_PERI_UART0_SW_RST 0
+#define MT2701_PERI_UART1_SW_RST 1
+#define MT2701_PERI_UART2_SW_RST 2
+#define MT2701_PERI_UART3_SW_RST 3
+#define MT2701_PERI_GCPU_SW_RST 5
+#define MT2701_PERI_BTIF_SW_RST 6
+#define MT2701_PERI_PWM_SW_RST 8
+#define MT2701_PERI_AUXADC_SW_RST 10
+#define MT2701_PERI_DMA_SW_RST 11
+#define MT2701_PERI_NFI_SW_RST 14
+#define MT2701_PERI_NLI_SW_RST 15
+#define MT2701_PERI_THERM_SW_RST 16
+#define MT2701_PERI_MSDC2_SW_RST 17
+#define MT2701_PERI_MSDC0_SW_RST 19
+#define MT2701_PERI_MSDC1_SW_RST 20
+#define MT2701_PERI_I2C0_SW_RST 22
+#define MT2701_PERI_I2C1_SW_RST 23
+#define MT2701_PERI_I2C2_SW_RST 24
+#define MT2701_PERI_I2C3_SW_RST 25
+#define MT2701_PERI_USB_SW_RST 28
+#define MT2701_PERI_ETH_SW_RST 29
+#define MT2701_PERI_SPI0_SW_RST 33
+
+/* TOPRGU resets */
+#define MT2701_TOPRGU_INFRA_RST 0
+#define MT2701_TOPRGU_MM_RST 1
+#define MT2701_TOPRGU_MFG_RST 2
+#define MT2701_TOPRGU_ETHDMA_RST 3
+#define MT2701_TOPRGU_VDEC_RST 4
+#define MT2701_TOPRGU_VENC_IMG_RST 5
+#define MT2701_TOPRGU_DDRPHY_RST 6
+#define MT2701_TOPRGU_MD_RST 7
+#define MT2701_TOPRGU_INFRA_AO_RST 8
+#define MT2701_TOPRGU_CONN_RST 9
+#define MT2701_TOPRGU_APMIXED_RST 10
+#define MT2701_TOPRGU_HIFSYS_RST 11
+#define MT2701_TOPRGU_CONN_MCU_RST 12
+#define MT2701_TOPRGU_BDP_DISP_RST 13
+
+/* HIFSYS resets */
+#define MT2701_HIFSYS_UHOST0_RST 3
+#define MT2701_HIFSYS_UHOST1_RST 4
+#define MT2701_HIFSYS_UPHY0_RST 21
+#define MT2701_HIFSYS_UPHY1_RST 22
+#define MT2701_HIFSYS_PCIE0_RST 24
+#define MT2701_HIFSYS_PCIE1_RST 25
+#define MT2701_HIFSYS_PCIE2_RST 26
+
+#endif /* _DT_BINDINGS_RESET_CONTROLLER_MT2701 */
diff --git a/include/dt-bindings/reset/qcom,gcc-mdm9615.h b/include/dt-bindings/reset/qcom,gcc-mdm9615.h
new file mode 100644
index 000000000000..7f86e9a59df4
--- /dev/null
+++ b/include/dt-bindings/reset/qcom,gcc-mdm9615.h
@@ -0,0 +1,136 @@
+/*
+ * Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ * Copyright (c) BayLibre, SAS.
+ * Author : Neil Armstrong <narmstrong@baylibre.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DT_BINDINGS_RESET_GCC_MDM9615_H
+#define _DT_BINDINGS_RESET_GCC_MDM9615_H
+
+#define SFAB_MSS_Q6_SW_RESET 0
+#define SFAB_MSS_Q6_FW_RESET 1
+#define QDSS_STM_RESET 2
+#define AFAB_SMPSS_S_RESET 3
+#define AFAB_SMPSS_M1_RESET 4
+#define AFAB_SMPSS_M0_RESET 5
+#define AFAB_EBI1_CH0_RESET 6
+#define AFAB_EBI1_CH1_RESET 7
+#define SFAB_ADM0_M0_RESET 8
+#define SFAB_ADM0_M1_RESET 9
+#define SFAB_ADM0_M2_RESET 10
+#define ADM0_C2_RESET 11
+#define ADM0_C1_RESET 12
+#define ADM0_C0_RESET 13
+#define ADM0_PBUS_RESET 14
+#define ADM0_RESET 15
+#define QDSS_CLKS_SW_RESET 16
+#define QDSS_POR_RESET 17
+#define QDSS_TSCTR_RESET 18
+#define QDSS_HRESET_RESET 19
+#define QDSS_AXI_RESET 20
+#define QDSS_DBG_RESET 21
+#define PCIE_A_RESET 22
+#define PCIE_AUX_RESET 23
+#define PCIE_H_RESET 24
+#define SFAB_PCIE_M_RESET 25
+#define SFAB_PCIE_S_RESET 26
+#define SFAB_MSS_M_RESET 27
+#define SFAB_USB3_M_RESET 28
+#define SFAB_RIVA_M_RESET 29
+#define SFAB_LPASS_RESET 30
+#define SFAB_AFAB_M_RESET 31
+#define AFAB_SFAB_M0_RESET 32
+#define AFAB_SFAB_M1_RESET 33
+#define SFAB_SATA_S_RESET 34
+#define SFAB_DFAB_M_RESET 35
+#define DFAB_SFAB_M_RESET 36
+#define DFAB_SWAY0_RESET 37
+#define DFAB_SWAY1_RESET 38
+#define DFAB_ARB0_RESET 39
+#define DFAB_ARB1_RESET 40
+#define PPSS_PROC_RESET 41
+#define PPSS_RESET 42
+#define DMA_BAM_RESET 43
+#define SPS_TIC_H_RESET 44
+#define SLIMBUS_H_RESET 45
+#define SFAB_CFPB_M_RESET 46
+#define SFAB_CFPB_S_RESET 47
+#define TSIF_H_RESET 48
+#define CE1_H_RESET 49
+#define CE1_CORE_RESET 50
+#define CE1_SLEEP_RESET 51
+#define CE2_H_RESET 52
+#define CE2_CORE_RESET 53
+#define SFAB_SFPB_M_RESET 54
+#define SFAB_SFPB_S_RESET 55
+#define RPM_PROC_RESET 56
+#define PMIC_SSBI2_RESET 57
+#define SDC1_RESET 58
+#define SDC2_RESET 59
+#define SDC3_RESET 60
+#define SDC4_RESET 61
+#define SDC5_RESET 62
+#define DFAB_A2_RESET 63
+#define USB_HS1_RESET 64
+#define USB_HSIC_RESET 65
+#define USB_FS1_XCVR_RESET 66
+#define USB_FS1_RESET 67
+#define USB_FS2_XCVR_RESET 68
+#define USB_FS2_RESET 69
+#define GSBI1_RESET 70
+#define GSBI2_RESET 71
+#define GSBI3_RESET 72
+#define GSBI4_RESET 73
+#define GSBI5_RESET 74
+#define GSBI6_RESET 75
+#define GSBI7_RESET 76
+#define GSBI8_RESET 77
+#define GSBI9_RESET 78
+#define GSBI10_RESET 79
+#define GSBI11_RESET 80
+#define GSBI12_RESET 81
+#define SPDM_RESET 82
+#define TLMM_H_RESET 83
+#define SFAB_MSS_S_RESET 84
+#define MSS_SLP_RESET 85
+#define MSS_Q6SW_JTAG_RESET 86
+#define MSS_Q6FW_JTAG_RESET 87
+#define MSS_RESET 88
+#define SATA_H_RESET 89
+#define SATA_RXOOB_RESE 90
+#define SATA_PMALIVE_RESET 91
+#define SATA_SFAB_M_RESET 92
+#define TSSC_RESET 93
+#define PDM_RESET 94
+#define MPM_H_RESET 95
+#define MPM_RESET 96
+#define SFAB_SMPSS_S_RESET 97
+#define PRNG_RESET 98
+#define RIVA_RESET 99
+#define USB_HS3_RESET 100
+#define USB_HS4_RESET 101
+#define CE3_RESET 102
+#define PCIE_EXT_PCI_RESET 103
+#define PCIE_PHY_RESET 104
+#define PCIE_PCI_RESET 105
+#define PCIE_POR_RESET 106
+#define PCIE_HCLK_RESET 107
+#define PCIE_ACLK_RESET 108
+#define CE3_H_RESET 109
+#define SFAB_CE3_M_RESET 110
+#define SFAB_CE3_S_RESET 111
+#define SATA_RESET 112
+#define CE3_SLEEP_RESET 113
+#define GSS_SLP_RESET 114
+#define GSS_RESET 115
+
+#endif
diff --git a/include/dt-bindings/reset/sun6i-a31-ccu.h b/include/dt-bindings/reset/sun6i-a31-ccu.h
new file mode 100644
index 000000000000..fbff365ed6e1
--- /dev/null
+++ b/include/dt-bindings/reset/sun6i-a31-ccu.h
@@ -0,0 +1,106 @@
+/*
+ * Copyright (C) 2016 Chen-Yu Tsai <wens@csie.org>
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ * a) This file is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Or, alternatively,
+ *
+ * b) Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following
+ * conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef _DT_BINDINGS_RST_SUN6I_A31_H_
+#define _DT_BINDINGS_RST_SUN6I_A31_H_
+
+#define RST_USB_PHY0 0
+#define RST_USB_PHY1 1
+#define RST_USB_PHY2 2
+
+#define RST_AHB1_MIPI_DSI 3
+#define RST_AHB1_SS 4
+#define RST_AHB1_DMA 5
+#define RST_AHB1_MMC0 6
+#define RST_AHB1_MMC1 7
+#define RST_AHB1_MMC2 8
+#define RST_AHB1_MMC3 9
+#define RST_AHB1_NAND1 10
+#define RST_AHB1_NAND0 11
+#define RST_AHB1_SDRAM 12
+#define RST_AHB1_EMAC 13
+#define RST_AHB1_TS 14
+#define RST_AHB1_HSTIMER 15
+#define RST_AHB1_SPI0 16
+#define RST_AHB1_SPI1 17
+#define RST_AHB1_SPI2 18
+#define RST_AHB1_SPI3 19
+#define RST_AHB1_OTG 20
+#define RST_AHB1_EHCI0 21
+#define RST_AHB1_EHCI1 22
+#define RST_AHB1_OHCI0 23
+#define RST_AHB1_OHCI1 24
+#define RST_AHB1_OHCI2 25
+#define RST_AHB1_VE 26
+#define RST_AHB1_LCD0 27
+#define RST_AHB1_LCD1 28
+#define RST_AHB1_CSI 29
+#define RST_AHB1_HDMI 30
+#define RST_AHB1_BE0 31
+#define RST_AHB1_BE1 32
+#define RST_AHB1_FE0 33
+#define RST_AHB1_FE1 34
+#define RST_AHB1_MP 35
+#define RST_AHB1_GPU 36
+#define RST_AHB1_DEU0 37
+#define RST_AHB1_DEU1 38
+#define RST_AHB1_DRC0 39
+#define RST_AHB1_DRC1 40
+#define RST_AHB1_LVDS 41
+
+#define RST_APB1_CODEC 42
+#define RST_APB1_SPDIF 43
+#define RST_APB1_DIGITAL_MIC 44
+#define RST_APB1_DAUDIO0 45
+#define RST_APB1_DAUDIO1 46
+#define RST_APB2_I2C0 47
+#define RST_APB2_I2C1 48
+#define RST_APB2_I2C2 49
+#define RST_APB2_I2C3 50
+#define RST_APB2_UART0 51
+#define RST_APB2_UART1 52
+#define RST_APB2_UART2 53
+#define RST_APB2_UART3 54
+#define RST_APB2_UART4 55
+#define RST_APB2_UART5 56
+
+#endif /* _DT_BINDINGS_RST_SUN6I_A31_H_ */
diff --git a/include/dt-bindings/reset/sun8i-a23-a33-ccu.h b/include/dt-bindings/reset/sun8i-a23-a33-ccu.h
new file mode 100644
index 000000000000..6121f2b0cd0a
--- /dev/null
+++ b/include/dt-bindings/reset/sun8i-a23-a33-ccu.h
@@ -0,0 +1,87 @@
+/*
+ * Copyright (C) 2016 Maxime Ripard <maxime.ripard@free-electrons.com>
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ * a) This file is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Or, alternatively,
+ *
+ * b) Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following
+ * conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef _DT_BINDINGS_RST_SUN8I_A23_A33_H_
+#define _DT_BINDINGS_RST_SUN8I_A23_A33_H_
+
+#define RST_USB_PHY0 0
+#define RST_USB_PHY1 1
+#define RST_USB_HSIC 2
+#define RST_MBUS 3
+#define RST_BUS_MIPI_DSI 4
+#define RST_BUS_SS 5
+#define RST_BUS_DMA 6
+#define RST_BUS_MMC0 7
+#define RST_BUS_MMC1 8
+#define RST_BUS_MMC2 9
+#define RST_BUS_NAND 10
+#define RST_BUS_DRAM 11
+#define RST_BUS_HSTIMER 12
+#define RST_BUS_SPI0 13
+#define RST_BUS_SPI1 14
+#define RST_BUS_OTG 15
+#define RST_BUS_EHCI 16
+#define RST_BUS_OHCI 17
+#define RST_BUS_VE 18
+#define RST_BUS_LCD 19
+#define RST_BUS_CSI 20
+#define RST_BUS_DE_BE 21
+#define RST_BUS_DE_FE 22
+#define RST_BUS_GPU 23
+#define RST_BUS_MSGBOX 24
+#define RST_BUS_SPINLOCK 25
+#define RST_BUS_DRC 26
+#define RST_BUS_SAT 27
+#define RST_BUS_LVDS 28
+#define RST_BUS_CODEC 29
+#define RST_BUS_I2S0 30
+#define RST_BUS_I2S1 31
+#define RST_BUS_I2C0 32
+#define RST_BUS_I2C1 33
+#define RST_BUS_I2C2 34
+#define RST_BUS_UART0 35
+#define RST_BUS_UART1 36
+#define RST_BUS_UART2 37
+#define RST_BUS_UART3 38
+#define RST_BUS_UART4 39
+
+#endif /* _DT_BINDINGS_RST_SUN8I_A23_A33_H_ */
diff --git a/include/dt-bindings/soc/rockchip,boot-mode.h b/include/dt-bindings/soc/rockchip,boot-mode.h
new file mode 100644
index 000000000000..ae7c867e736a
--- /dev/null
+++ b/include/dt-bindings/soc/rockchip,boot-mode.h
@@ -0,0 +1,15 @@
+#ifndef __ROCKCHIP_BOOT_MODE_H
+#define __ROCKCHIP_BOOT_MODE_H
+
+/*high 24 bits is tag, low 8 bits is type*/
+#define REBOOT_FLAG 0x5242C300
+/* normal boot */
+#define BOOT_NORMAL (REBOOT_FLAG + 0)
+/* enter bootloader rockusb mode */
+#define BOOT_BL_DOWNLOAD (REBOOT_FLAG + 1)
+/* enter recovery */
+#define BOOT_RECOVERY (REBOOT_FLAG + 3)
+ /* enter fastboot mode */
+#define BOOT_FASTBOOT (REBOOT_FLAG + 9)
+
+#endif
diff --git a/include/dt-bindings/thermal/tegra124-soctherm.h b/include/dt-bindings/thermal/tegra124-soctherm.h
index 729ab9fc325e..2a99f1d52bb5 100644
--- a/include/dt-bindings/thermal/tegra124-soctherm.h
+++ b/include/dt-bindings/thermal/tegra124-soctherm.h
@@ -11,4 +11,9 @@
#define TEGRA124_SOCTHERM_SENSOR_PLLX 3
#define TEGRA124_SOCTHERM_SENSOR_NUM 4
+#define TEGRA_SOCTHERM_THROT_LEVEL_LOW 0
+#define TEGRA_SOCTHERM_THROT_LEVEL_MED 1
+#define TEGRA_SOCTHERM_THROT_LEVEL_HIGH 2
+#define TEGRA_SOCTHERM_THROT_LEVEL_NONE -1
+
#endif
diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h
index 19b698ef3336..002f0922cd92 100644
--- a/include/kvm/arm_vgic.h
+++ b/include/kvm/arm_vgic.h
@@ -20,9 +20,11 @@
#include <linux/kvm.h>
#include <linux/irqreturn.h>
#include <linux/spinlock.h>
+#include <linux/static_key.h>
#include <linux/types.h>
#include <kvm/iodev.h>
#include <linux/list.h>
+#include <linux/jump_label.h>
#define VGIC_V3_MAX_CPUS 255
#define VGIC_V2_MAX_CPUS 8
@@ -49,6 +51,9 @@ struct vgic_global {
/* Physical address of vgic virtual cpu interface */
phys_addr_t vcpu_base;
+ /* GICV mapping */
+ void __iomem *vcpu_base_va;
+
/* virtual control interface mapping */
void __iomem *vctrl_base;
@@ -63,6 +68,9 @@ struct vgic_global {
/* Only needed for the legacy KVM_CREATE_IRQCHIP */
bool can_emulate_gicv2;
+
+ /* GIC system register CPU interface */
+ struct static_key_false gicv3_cpuif;
};
extern struct vgic_global kvm_vgic_global_state;
@@ -217,7 +225,6 @@ struct vgic_v2_cpu_if {
};
struct vgic_v3_cpu_if {
-#ifdef CONFIG_KVM_ARM_VGIC_V3
u32 vgic_hcr;
u32 vgic_vmcr;
u32 vgic_sre; /* Restored only, change ignored */
@@ -227,7 +234,6 @@ struct vgic_v3_cpu_if {
u32 vgic_ap0r[4];
u32 vgic_ap1r[4];
u64 vgic_lr[VGIC_V3_MAX_LRS];
-#endif
};
struct vgic_cpu {
@@ -265,6 +271,8 @@ struct vgic_cpu {
bool lpis_enabled;
};
+extern struct static_key_false vgic_v2_cpuif_trap;
+
int kvm_vgic_addr(struct kvm *kvm, unsigned long type, u64 *addr, bool write);
void kvm_vgic_early_init(struct kvm *kvm);
int kvm_vgic_create(struct kvm *kvm, u32 type);
@@ -294,13 +302,7 @@ bool kvm_vcpu_has_pending_irqs(struct kvm_vcpu *vcpu);
void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu);
void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu);
-#ifdef CONFIG_KVM_ARM_VGIC_V3
void vgic_v3_dispatch_sgi(struct kvm_vcpu *vcpu, u64 reg);
-#else
-static inline void vgic_v3_dispatch_sgi(struct kvm_vcpu *vcpu, u64 reg)
-{
-}
-#endif
/**
* kvm_vgic_get_max_vcpus - Get the maximum number of VCPUs allowed by HW
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index c5eaf2f80a4c..ddbeda6dbdc8 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -85,6 +85,8 @@ static inline const char *acpi_dev_name(struct acpi_device *adev)
return dev_name(&adev->dev);
}
+struct device *acpi_get_first_physical_node(struct acpi_device *adev);
+
enum acpi_irq_model_id {
ACPI_IRQ_MODEL_PIC = 0,
ACPI_IRQ_MODEL_IOAPIC,
@@ -267,12 +269,18 @@ static inline bool invalid_phys_cpuid(phys_cpuid_t phys_id)
return phys_id == PHYS_CPUID_INVALID;
}
+/* Validate the processor object's proc_id */
+bool acpi_processor_validate_proc_id(int proc_id);
+
#ifdef CONFIG_ACPI_HOTPLUG_CPU
/* Arch dependent functions for cpu hotplug support */
int acpi_map_cpu(acpi_handle handle, phys_cpuid_t physid, int *pcpu);
int acpi_unmap_cpu(int cpu);
+int acpi_map_cpu2node(acpi_handle handle, int cpu, int physid);
#endif /* CONFIG_ACPI_HOTPLUG_CPU */
+void acpi_set_processor_mapping(void);
+
#ifdef CONFIG_ACPI_HOTPLUG_IOAPIC
int acpi_get_ioapic_id(acpi_handle handle, u32 gsi_base, u64 *phys_addr);
#endif
@@ -634,6 +642,11 @@ static inline const char *acpi_dev_name(struct acpi_device *adev)
return NULL;
}
+static inline struct device *acpi_get_first_physical_node(struct acpi_device *adev)
+{
+ return NULL;
+}
+
static inline void acpi_early_init(void) { }
static inline void acpi_subsystem_init(void) { }
@@ -751,6 +764,12 @@ static inline int acpi_reconfig_notifier_unregister(struct notifier_block *nb)
#endif /* !CONFIG_ACPI */
+#ifdef CONFIG_ACPI_HOTPLUG_IOAPIC
+int acpi_ioapic_add(acpi_handle root);
+#else
+static inline int acpi_ioapic_add(acpi_handle root) { return 0; }
+#endif
+
#ifdef CONFIG_ACPI
void acpi_os_set_prepare_sleep(int (*func)(u8 sleep_state,
u32 pm1a_ctrl, u32 pm1b_ctrl));
@@ -927,9 +946,17 @@ struct acpi_reference_args {
#ifdef CONFIG_ACPI
int acpi_dev_get_property(struct acpi_device *adev, const char *name,
acpi_object_type type, const union acpi_object **obj);
-int acpi_node_get_property_reference(struct fwnode_handle *fwnode,
- const char *name, size_t index,
- struct acpi_reference_args *args);
+int __acpi_node_get_property_reference(struct fwnode_handle *fwnode,
+ const char *name, size_t index, size_t num_args,
+ struct acpi_reference_args *args);
+
+static inline int acpi_node_get_property_reference(struct fwnode_handle *fwnode,
+ const char *name, size_t index,
+ struct acpi_reference_args *args)
+{
+ return __acpi_node_get_property_reference(fwnode, name, index,
+ MAX_ACPI_REFERENCE_ARGS, args);
+}
int acpi_node_prop_get(struct fwnode_handle *fwnode, const char *propname,
void **valptr);
@@ -1005,6 +1032,14 @@ static inline int acpi_dev_get_property(struct acpi_device *adev,
return -ENXIO;
}
+static inline int
+__acpi_node_get_property_reference(struct fwnode_handle *fwnode,
+ const char *name, size_t index, size_t num_args,
+ struct acpi_reference_args *args)
+{
+ return -ENXIO;
+}
+
static inline int acpi_node_get_property_reference(struct fwnode_handle *fwnode,
const char *name, size_t index,
struct acpi_reference_args *args)
@@ -1074,4 +1109,16 @@ void acpi_table_upgrade(void);
static inline void acpi_table_upgrade(void) { }
#endif
+#if defined(CONFIG_ACPI) && defined(CONFIG_ACPI_WATCHDOG)
+extern bool acpi_has_watchdog(void);
+#else
+static inline bool acpi_has_watchdog(void) { return false; }
+#endif
+
+#ifdef CONFIG_ACPI_SPCR_TABLE
+int parse_spcr(bool earlycon);
+#else
+static inline int parse_spcr(bool earlycon) { return 0; }
+#endif
+
#endif /*_LINUX_ACPI_H*/
diff --git a/include/linux/acpi_iort.h b/include/linux/acpi_iort.h
new file mode 100644
index 000000000000..0e32dac8fd03
--- /dev/null
+++ b/include/linux/acpi_iort.h
@@ -0,0 +1,42 @@
+/*
+ * Copyright (C) 2016, Semihalf
+ * Author: Tomasz Nowicki <tn@semihalf.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
+ * Place - Suite 330, Boston, MA 02111-1307 USA.
+ */
+
+#ifndef __ACPI_IORT_H__
+#define __ACPI_IORT_H__
+
+#include <linux/acpi.h>
+#include <linux/fwnode.h>
+#include <linux/irqdomain.h>
+
+int iort_register_domain_token(int trans_id, struct fwnode_handle *fw_node);
+void iort_deregister_domain_token(int trans_id);
+struct fwnode_handle *iort_find_domain_token(int trans_id);
+#ifdef CONFIG_ACPI_IORT
+void acpi_iort_init(void);
+u32 iort_msi_map_rid(struct device *dev, u32 req_id);
+struct irq_domain *iort_get_device_domain(struct device *dev, u32 req_id);
+#else
+static inline void acpi_iort_init(void) { }
+static inline u32 iort_msi_map_rid(struct device *dev, u32 req_id)
+{ return req_id; }
+static inline struct irq_domain *iort_get_device_domain(struct device *dev,
+ u32 req_id)
+{ return NULL; }
+#endif
+
+#endif /* __ACPI_IORT_H__ */
diff --git a/include/linux/aer.h b/include/linux/aer.h
index 164049357e5c..04602cbe85dc 100644
--- a/include/linux/aer.h
+++ b/include/linux/aer.h
@@ -63,7 +63,7 @@ static inline int pci_cleanup_aer_error_status_regs(struct pci_dev *dev)
}
#endif
-void cper_print_aer(struct pci_dev *dev, int cper_severity,
+void cper_print_aer(struct pci_dev *dev, int aer_severity,
struct aer_capability_regs *aer);
int cper_severity_to_aer(int cper_severity);
void aer_recover_queue(int domain, unsigned int bus, unsigned int devfn,
diff --git a/include/linux/amba/bus.h b/include/linux/amba/bus.h
index 3d8dcdd1aeae..d143c13bed26 100644
--- a/include/linux/amba/bus.h
+++ b/include/linux/amba/bus.h
@@ -53,8 +53,14 @@ enum amba_vendor {
AMBA_VENDOR_ST = 0x80,
AMBA_VENDOR_QCOM = 0x51,
AMBA_VENDOR_LSI = 0xb6,
+ AMBA_VENDOR_LINUX = 0xfe, /* This value is not official */
};
+/* This is used to generate pseudo-ID for AMBA device */
+#define AMBA_LINUX_ID(conf, rev, part) \
+ (((conf) & 0xff) << 24 | ((rev) & 0xf) << 20 | \
+ AMBA_VENDOR_LINUX << 12 | ((part) & 0xfff))
+
extern struct bus_type amba_bustype;
#define to_amba_device(d) container_of(d, struct amba_device, dev)
diff --git a/include/linux/amba/clcd.h b/include/linux/amba/clcd.h
index e82e3ee2c54a..1035879b322c 100644
--- a/include/linux/amba/clcd.h
+++ b/include/linux/amba/clcd.h
@@ -67,6 +67,17 @@
#define CNTL_LDMAFIFOTIME (1 << 15)
#define CNTL_WATERMARK (1 << 16)
+/* ST Microelectronics variant bits */
+#define CNTL_ST_1XBPP_444 0x0
+#define CNTL_ST_1XBPP_5551 (1 << 17)
+#define CNTL_ST_1XBPP_565 (1 << 18)
+#define CNTL_ST_CDWID_12 0x0
+#define CNTL_ST_CDWID_16 (1 << 19)
+#define CNTL_ST_CDWID_18 (1 << 20)
+#define CNTL_ST_CDWID_24 ((1 << 19)|(1 << 20))
+#define CNTL_ST_CEAEN (1 << 21)
+#define CNTL_ST_LCDBPP24_PACKED (6 << 1)
+
enum {
/* individual formats */
CLCD_CAP_RGB444 = (1 << 0),
@@ -93,6 +104,8 @@ enum {
CLCD_CAP_ALL = CLCD_CAP_BGR | CLCD_CAP_RGB,
};
+struct backlight_device;
+
struct clcd_panel {
struct fb_videomode mode;
signed short width; /* width in mm */
@@ -105,6 +118,13 @@ struct clcd_panel {
fixedtimings:1,
grayscale:1;
unsigned int connector;
+ struct backlight_device *backlight;
+ /*
+ * If the B/R lines are switched between the CLCD
+ * and the panel we need to know this and not try to
+ * compensate with the BGR bit in the control register.
+ */
+ bool bgr_connection;
};
struct clcd_regs {
@@ -170,11 +190,38 @@ struct clcd_board {
struct amba_device;
struct clk;
+/**
+ * struct clcd_vendor_data - holds hardware (IP-block) vendor-specific
+ * variant information
+ *
+ * @clock_timregs: the CLCD needs to be clocked when accessing the
+ * timer registers, or the hardware will hang.
+ * @packed_24_bit_pixels: this variant supports 24bit packed pixel data,
+ * so that RGB accesses 3 bytes at a time, not just on even 32bit
+ * boundaries, packing the pixel data in memory. ST Microelectronics
+ * have this.
+ * @st_bitmux_control: ST Microelectronics have implemented output
+ * bit line multiplexing into the CLCD control register. This indicates
+ * that we need to use this.
+ * @init_board: custom board init function for this variant
+ * @init_panel: custom panel init function for this variant
+ */
+struct clcd_vendor_data {
+ bool clock_timregs;
+ bool packed_24_bit_pixels;
+ bool st_bitmux_control;
+ int (*init_board)(struct amba_device *adev,
+ struct clcd_board *board);
+ int (*init_panel)(struct clcd_fb *fb,
+ struct device_node *panel);
+};
+
/* this data structure describes each frame buffer device we find */
struct clcd_fb {
struct fb_info fb;
struct amba_device *dev;
struct clk *clk;
+ struct clcd_vendor_data *vendor;
struct clcd_panel *panel;
struct clcd_board *board;
void *board_data;
@@ -231,16 +278,22 @@ static inline void clcdfb_decode(struct clcd_fb *fb, struct clcd_regs *regs)
if (var->grayscale)
val |= CNTL_LCDBW;
- if (fb->panel->caps && fb->board->caps &&
- var->bits_per_pixel >= 16) {
+ if (fb->panel->caps && fb->board->caps && var->bits_per_pixel >= 16) {
/*
* if board and panel supply capabilities, we can support
- * changing BGR/RGB depending on supplied parameters
+ * changing BGR/RGB depending on supplied parameters. Here
+ * we switch to what the framebuffer is providing if need
+ * be, so if the framebuffer is BGR but the display connection
+ * is RGB (first case) we switch it around. Vice versa mutatis
+ * mutandis if the framebuffer is RGB but the display connection
+ * is BGR, we flip it around.
*/
if (var->red.offset == 0)
val &= ~CNTL_BGR;
else
val |= CNTL_BGR;
+ if (fb->panel->bgr_connection)
+ val ^= CNTL_BGR;
}
switch (var->bits_per_pixel) {
@@ -270,6 +323,10 @@ static inline void clcdfb_decode(struct clcd_fb *fb, struct clcd_regs *regs)
else
val |= CNTL_LCDBPP16_444;
break;
+ case 24:
+ /* Modified variant supporting 24 bit packed pixels */
+ val |= CNTL_ST_LCDBPP24_PACKED;
+ break;
case 32:
val |= CNTL_LCDBPP24;
break;
diff --git a/include/linux/amba/serial.h b/include/linux/amba/serial.h
index d76a19ba2cff..ad0965e21a5e 100644
--- a/include/linux/amba/serial.h
+++ b/include/linux/amba/serial.h
@@ -104,6 +104,15 @@
#define UART01x_FR_CTS 0x001
#define UART01x_FR_TMSK (UART01x_FR_TXFF + UART01x_FR_BUSY)
+/*
+ * Some bits of Flag Register on ZTE device have different position from
+ * standard ones.
+ */
+#define ZX_UART01x_FR_BUSY 0x100
+#define ZX_UART01x_FR_DSR 0x008
+#define ZX_UART01x_FR_CTS 0x002
+#define ZX_UART011_FR_RI 0x001
+
#define UART011_CR_CTSEN 0x8000 /* CTS hardware flow control */
#define UART011_CR_RTSEN 0x4000 /* RTS hardware flow control */
#define UART011_CR_OUT2 0x2000 /* OUT2 */
diff --git a/include/linux/amd-iommu.h b/include/linux/amd-iommu.h
index 2b08e79f5100..09751d349963 100644
--- a/include/linux/amd-iommu.h
+++ b/include/linux/amd-iommu.h
@@ -22,6 +22,20 @@
#include <linux/types.h>
+/*
+ * This is mainly used to communicate information back-and-forth
+ * between SVM and IOMMU for setting up and tearing down posted
+ * interrupt
+ */
+struct amd_iommu_pi_data {
+ u32 ga_tag;
+ u32 prev_ga_tag;
+ u64 base;
+ bool is_guest_mode;
+ struct vcpu_data *vcpu_data;
+ void *ir_data;
+};
+
#ifdef CONFIG_AMD_IOMMU
struct task_struct;
@@ -168,11 +182,34 @@ typedef void (*amd_iommu_invalidate_ctx)(struct pci_dev *pdev, int pasid);
extern int amd_iommu_set_invalidate_ctx_cb(struct pci_dev *pdev,
amd_iommu_invalidate_ctx cb);
-
-#else
+#else /* CONFIG_AMD_IOMMU */
static inline int amd_iommu_detect(void) { return -ENODEV; }
-#endif
+#endif /* CONFIG_AMD_IOMMU */
+
+#if defined(CONFIG_AMD_IOMMU) && defined(CONFIG_IRQ_REMAP)
+
+/* IOMMU AVIC Function */
+extern int amd_iommu_register_ga_log_notifier(int (*notifier)(u32));
+
+extern int
+amd_iommu_update_ga(int cpu, bool is_run, void *data);
+
+#else /* defined(CONFIG_AMD_IOMMU) && defined(CONFIG_IRQ_REMAP) */
+
+static inline int
+amd_iommu_register_ga_log_notifier(int (*notifier)(u32))
+{
+ return 0;
+}
+
+static inline int
+amd_iommu_update_ga(int cpu, bool is_run, void *data)
+{
+ return 0;
+}
+
+#endif /* defined(CONFIG_AMD_IOMMU) && defined(CONFIG_IRQ_REMAP) */
#endif /* _ASM_X86_AMD_IOMMU_H */
diff --git a/include/linux/ata.h b/include/linux/ata.h
index adbc812c009b..fdb180367ba1 100644
--- a/include/linux/ata.h
+++ b/include/linux/ata.h
@@ -105,6 +105,7 @@ enum {
ATA_ID_CFA_KEY_MGMT = 162,
ATA_ID_CFA_MODES = 163,
ATA_ID_DATA_SET_MGMT = 169,
+ ATA_ID_SCT_CMD_XPORT = 206,
ATA_ID_ROT_SPEED = 217,
ATA_ID_PIO4 = (1 << 1),
@@ -789,6 +790,48 @@ static inline bool ata_id_sense_reporting_enabled(const u16 *id)
}
/**
+ *
+ * Word: 206 - SCT Command Transport
+ * 15:12 - Vendor Specific
+ * 11:6 - Reserved
+ * 5 - SCT Command Transport Data Tables supported
+ * 4 - SCT Command Transport Features Control supported
+ * 3 - SCT Command Transport Error Recovery Control supported
+ * 2 - SCT Command Transport Write Same supported
+ * 1 - SCT Command Transport Long Sector Access supported
+ * 0 - SCT Command Transport supported
+ */
+static inline bool ata_id_sct_data_tables(const u16 *id)
+{
+ return id[ATA_ID_SCT_CMD_XPORT] & (1 << 5) ? true : false;
+}
+
+static inline bool ata_id_sct_features_ctrl(const u16 *id)
+{
+ return id[ATA_ID_SCT_CMD_XPORT] & (1 << 4) ? true : false;
+}
+
+static inline bool ata_id_sct_error_recovery_ctrl(const u16 *id)
+{
+ return id[ATA_ID_SCT_CMD_XPORT] & (1 << 3) ? true : false;
+}
+
+static inline bool ata_id_sct_write_same(const u16 *id)
+{
+ return id[ATA_ID_SCT_CMD_XPORT] & (1 << 2) ? true : false;
+}
+
+static inline bool ata_id_sct_long_sector_access(const u16 *id)
+{
+ return id[ATA_ID_SCT_CMD_XPORT] & (1 << 1) ? true : false;
+}
+
+static inline bool ata_id_sct_supported(const u16 *id)
+{
+ return id[ATA_ID_SCT_CMD_XPORT] & (1 << 0) ? true : false;
+}
+
+/**
* ata_id_major_version - get ATA level of drive
* @id: Identify data
*
@@ -1071,32 +1114,6 @@ static inline void ata_id_to_hd_driveid(u16 *id)
#endif
}
-/*
- * Write LBA Range Entries to the buffer that will cover the extent from
- * sector to sector + count. This is used for TRIM and for ADD LBA(S)
- * TO NV CACHE PINNED SET.
- */
-static inline unsigned ata_set_lba_range_entries(void *_buffer,
- unsigned num, u64 sector, unsigned long count)
-{
- __le64 *buffer = _buffer;
- unsigned i = 0, used_bytes;
-
- while (i < num) {
- u64 entry = sector |
- ((u64)(count > 0xffff ? 0xffff : count) << 48);
- buffer[i++] = __cpu_to_le64(entry);
- if (count <= 0xffff)
- break;
- count -= 0xffff;
- sector += 0xffff;
- }
-
- used_bytes = ALIGN(i * 8, 512);
- memset(buffer + i, 0, used_bytes - i * 8);
- return used_bytes;
-}
-
static inline bool ata_ok(u8 status)
{
return ((status & (ATA_BUSY | ATA_DRDY | ATA_DF | ATA_DRQ | ATA_ERR))
diff --git a/include/linux/atmel_serial.h b/include/linux/atmel_serial.h
index 5a4d664af87a..bd2560502f3c 100644
--- a/include/linux/atmel_serial.h
+++ b/include/linux/atmel_serial.h
@@ -118,6 +118,8 @@
#define ATMEL_US_BRGR 0x20 /* Baud Rate Generator Register */
#define ATMEL_US_CD GENMASK(15, 0) /* Clock Divider */
+#define ATMEL_US_FP_OFFSET 16 /* Fractional Part */
+#define ATMEL_US_FP_MASK 0x7
#define ATMEL_US_RTOR 0x24 /* Receiver Time-out Register for USART */
#define ATMEL_UA_RTOR 0x28 /* Receiver Time-out Register for UART */
diff --git a/include/linux/auto_dev-ioctl.h b/include/linux/auto_dev-ioctl.h
index 7caaf298f539..28c15050ebe6 100644
--- a/include/linux/auto_dev-ioctl.h
+++ b/include/linux/auto_dev-ioctl.h
@@ -10,214 +10,5 @@
#ifndef _LINUX_AUTO_DEV_IOCTL_H
#define _LINUX_AUTO_DEV_IOCTL_H
-#include <linux/auto_fs.h>
-#include <linux/string.h>
-
-#define AUTOFS_DEVICE_NAME "autofs"
-
-#define AUTOFS_DEV_IOCTL_VERSION_MAJOR 1
-#define AUTOFS_DEV_IOCTL_VERSION_MINOR 0
-
-#define AUTOFS_DEVID_LEN 16
-
-#define AUTOFS_DEV_IOCTL_SIZE sizeof(struct autofs_dev_ioctl)
-
-/*
- * An ioctl interface for autofs mount point control.
- */
-
-struct args_protover {
- __u32 version;
-};
-
-struct args_protosubver {
- __u32 sub_version;
-};
-
-struct args_openmount {
- __u32 devid;
-};
-
-struct args_ready {
- __u32 token;
-};
-
-struct args_fail {
- __u32 token;
- __s32 status;
-};
-
-struct args_setpipefd {
- __s32 pipefd;
-};
-
-struct args_timeout {
- __u64 timeout;
-};
-
-struct args_requester {
- __u32 uid;
- __u32 gid;
-};
-
-struct args_expire {
- __u32 how;
-};
-
-struct args_askumount {
- __u32 may_umount;
-};
-
-struct args_ismountpoint {
- union {
- struct args_in {
- __u32 type;
- } in;
- struct args_out {
- __u32 devid;
- __u32 magic;
- } out;
- };
-};
-
-/*
- * All the ioctls use this structure.
- * When sending a path size must account for the total length
- * of the chunk of memory otherwise is is the size of the
- * structure.
- */
-
-struct autofs_dev_ioctl {
- __u32 ver_major;
- __u32 ver_minor;
- __u32 size; /* total size of data passed in
- * including this struct */
- __s32 ioctlfd; /* automount command fd */
-
- /* Command parameters */
-
- union {
- struct args_protover protover;
- struct args_protosubver protosubver;
- struct args_openmount openmount;
- struct args_ready ready;
- struct args_fail fail;
- struct args_setpipefd setpipefd;
- struct args_timeout timeout;
- struct args_requester requester;
- struct args_expire expire;
- struct args_askumount askumount;
- struct args_ismountpoint ismountpoint;
- };
-
- char path[0];
-};
-
-static inline void init_autofs_dev_ioctl(struct autofs_dev_ioctl *in)
-{
- memset(in, 0, sizeof(struct autofs_dev_ioctl));
- in->ver_major = AUTOFS_DEV_IOCTL_VERSION_MAJOR;
- in->ver_minor = AUTOFS_DEV_IOCTL_VERSION_MINOR;
- in->size = sizeof(struct autofs_dev_ioctl);
- in->ioctlfd = -1;
-}
-
-/*
- * If you change this make sure you make the corresponding change
- * to autofs-dev-ioctl.c:lookup_ioctl()
- */
-enum {
- /* Get various version info */
- AUTOFS_DEV_IOCTL_VERSION_CMD = 0x71,
- AUTOFS_DEV_IOCTL_PROTOVER_CMD,
- AUTOFS_DEV_IOCTL_PROTOSUBVER_CMD,
-
- /* Open mount ioctl fd */
- AUTOFS_DEV_IOCTL_OPENMOUNT_CMD,
-
- /* Close mount ioctl fd */
- AUTOFS_DEV_IOCTL_CLOSEMOUNT_CMD,
-
- /* Mount/expire status returns */
- AUTOFS_DEV_IOCTL_READY_CMD,
- AUTOFS_DEV_IOCTL_FAIL_CMD,
-
- /* Activate/deactivate autofs mount */
- AUTOFS_DEV_IOCTL_SETPIPEFD_CMD,
- AUTOFS_DEV_IOCTL_CATATONIC_CMD,
-
- /* Expiry timeout */
- AUTOFS_DEV_IOCTL_TIMEOUT_CMD,
-
- /* Get mount last requesting uid and gid */
- AUTOFS_DEV_IOCTL_REQUESTER_CMD,
-
- /* Check for eligible expire candidates */
- AUTOFS_DEV_IOCTL_EXPIRE_CMD,
-
- /* Request busy status */
- AUTOFS_DEV_IOCTL_ASKUMOUNT_CMD,
-
- /* Check if path is a mountpoint */
- AUTOFS_DEV_IOCTL_ISMOUNTPOINT_CMD,
-};
-
-#define AUTOFS_IOCTL 0x93
-
-#define AUTOFS_DEV_IOCTL_VERSION \
- _IOWR(AUTOFS_IOCTL, \
- AUTOFS_DEV_IOCTL_VERSION_CMD, struct autofs_dev_ioctl)
-
-#define AUTOFS_DEV_IOCTL_PROTOVER \
- _IOWR(AUTOFS_IOCTL, \
- AUTOFS_DEV_IOCTL_PROTOVER_CMD, struct autofs_dev_ioctl)
-
-#define AUTOFS_DEV_IOCTL_PROTOSUBVER \
- _IOWR(AUTOFS_IOCTL, \
- AUTOFS_DEV_IOCTL_PROTOSUBVER_CMD, struct autofs_dev_ioctl)
-
-#define AUTOFS_DEV_IOCTL_OPENMOUNT \
- _IOWR(AUTOFS_IOCTL, \
- AUTOFS_DEV_IOCTL_OPENMOUNT_CMD, struct autofs_dev_ioctl)
-
-#define AUTOFS_DEV_IOCTL_CLOSEMOUNT \
- _IOWR(AUTOFS_IOCTL, \
- AUTOFS_DEV_IOCTL_CLOSEMOUNT_CMD, struct autofs_dev_ioctl)
-
-#define AUTOFS_DEV_IOCTL_READY \
- _IOWR(AUTOFS_IOCTL, \
- AUTOFS_DEV_IOCTL_READY_CMD, struct autofs_dev_ioctl)
-
-#define AUTOFS_DEV_IOCTL_FAIL \
- _IOWR(AUTOFS_IOCTL, \
- AUTOFS_DEV_IOCTL_FAIL_CMD, struct autofs_dev_ioctl)
-
-#define AUTOFS_DEV_IOCTL_SETPIPEFD \
- _IOWR(AUTOFS_IOCTL, \
- AUTOFS_DEV_IOCTL_SETPIPEFD_CMD, struct autofs_dev_ioctl)
-
-#define AUTOFS_DEV_IOCTL_CATATONIC \
- _IOWR(AUTOFS_IOCTL, \
- AUTOFS_DEV_IOCTL_CATATONIC_CMD, struct autofs_dev_ioctl)
-
-#define AUTOFS_DEV_IOCTL_TIMEOUT \
- _IOWR(AUTOFS_IOCTL, \
- AUTOFS_DEV_IOCTL_TIMEOUT_CMD, struct autofs_dev_ioctl)
-
-#define AUTOFS_DEV_IOCTL_REQUESTER \
- _IOWR(AUTOFS_IOCTL, \
- AUTOFS_DEV_IOCTL_REQUESTER_CMD, struct autofs_dev_ioctl)
-
-#define AUTOFS_DEV_IOCTL_EXPIRE \
- _IOWR(AUTOFS_IOCTL, \
- AUTOFS_DEV_IOCTL_EXPIRE_CMD, struct autofs_dev_ioctl)
-
-#define AUTOFS_DEV_IOCTL_ASKUMOUNT \
- _IOWR(AUTOFS_IOCTL, \
- AUTOFS_DEV_IOCTL_ASKUMOUNT_CMD, struct autofs_dev_ioctl)
-
-#define AUTOFS_DEV_IOCTL_ISMOUNTPOINT \
- _IOWR(AUTOFS_IOCTL, \
- AUTOFS_DEV_IOCTL_ISMOUNTPOINT_CMD, struct autofs_dev_ioctl)
-
+#include <uapi/linux/auto_dev-ioctl.h>
#endif /* _LINUX_AUTO_DEV_IOCTL_H */
diff --git a/include/linux/auto_fs.h b/include/linux/auto_fs.h
index b4066bb89083..b8f814c95cf5 100644
--- a/include/linux/auto_fs.h
+++ b/include/linux/auto_fs.h
@@ -10,7 +10,6 @@
#define _LINUX_AUTO_FS_H
#include <linux/fs.h>
-#include <linux/limits.h>
#include <linux/ioctl.h>
#include <uapi/linux/auto_fs.h>
#endif /* _LINUX_AUTO_FS_H */
diff --git a/include/linux/bcma/bcma.h b/include/linux/bcma/bcma.h
index 3db25df396cb..8eeedb2db924 100644
--- a/include/linux/bcma/bcma.h
+++ b/include/linux/bcma/bcma.h
@@ -205,6 +205,9 @@ struct bcma_host_ops {
#define BCMA_PKG_ID_BCM4709 0
#define BCMA_CHIP_ID_BCM47094 53030
#define BCMA_CHIP_ID_BCM53018 53018
+#define BCMA_CHIP_ID_BCM53573 53573
+#define BCMA_PKG_ID_BCM53573 0
+#define BCMA_PKG_ID_BCM47189 1
/* Board types (on PCI usually equals to the subsystem dev id) */
/* BCM4313 */
diff --git a/include/linux/bcma/bcma_regs.h b/include/linux/bcma/bcma_regs.h
index ebd5c1fcdea4..9986f8288d01 100644
--- a/include/linux/bcma/bcma_regs.h
+++ b/include/linux/bcma/bcma_regs.h
@@ -10,6 +10,7 @@
#define BCMA_CLKCTLST_HAVEALPREQ 0x00000008 /* ALP available request */
#define BCMA_CLKCTLST_HAVEHTREQ 0x00000010 /* HT available request */
#define BCMA_CLKCTLST_HWCROFF 0x00000020 /* Force HW clock request off */
+#define BCMA_CLKCTLST_HQCLKREQ 0x00000040 /* HQ Clock */
#define BCMA_CLKCTLST_EXTRESREQ 0x00000700 /* Mask of external resource requests */
#define BCMA_CLKCTLST_EXTRESREQ_SHIFT 8
#define BCMA_CLKCTLST_HAVEALP 0x00010000 /* ALP available */
@@ -23,6 +24,7 @@
#define BCMA_CLKCTLST_4328A0_HAVEALP 0x00020000 /* 4328a0 has reversed bits */
/* Agent registers (common for every core) */
+#define BCMA_OOB_SEL_OUT_A30 0x0100
#define BCMA_IOCTL 0x0408 /* IO control */
#define BCMA_IOCTL_CLK 0x0001
#define BCMA_IOCTL_FGC 0x0002
diff --git a/include/linux/bio.h b/include/linux/bio.h
index 23ddf4b46a9b..97cb48f03dc7 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -1,6 +1,4 @@
/*
- * 2.5 block I/O model
- *
* Copyright (C) 2001 Jens Axboe <axboe@suse.de>
*
* This program is free software; you can redistribute it and/or modify
@@ -461,6 +459,7 @@ static inline void bio_flush_dcache_pages(struct bio *bi)
extern void bio_copy_data(struct bio *dst, struct bio *src);
extern int bio_alloc_pages(struct bio *bio, gfp_t gfp);
+extern void bio_free_pages(struct bio *bio);
extern struct bio *bio_copy_user_iov(struct request_queue *,
struct rq_map_data *,
diff --git a/include/linux/bitfield.h b/include/linux/bitfield.h
new file mode 100644
index 000000000000..f6505d83069d
--- /dev/null
+++ b/include/linux/bitfield.h
@@ -0,0 +1,93 @@
+/*
+ * Copyright (C) 2014 Felix Fietkau <nbd@nbd.name>
+ * Copyright (C) 2004 - 2009 Ivo van Doorn <IvDoorn@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _LINUX_BITFIELD_H
+#define _LINUX_BITFIELD_H
+
+#include <linux/bug.h>
+
+/*
+ * Bitfield access macros
+ *
+ * FIELD_{GET,PREP} macros take as first parameter shifted mask
+ * from which they extract the base mask and shift amount.
+ * Mask must be a compilation time constant.
+ *
+ * Example:
+ *
+ * #define REG_FIELD_A GENMASK(6, 0)
+ * #define REG_FIELD_B BIT(7)
+ * #define REG_FIELD_C GENMASK(15, 8)
+ * #define REG_FIELD_D GENMASK(31, 16)
+ *
+ * Get:
+ * a = FIELD_GET(REG_FIELD_A, reg);
+ * b = FIELD_GET(REG_FIELD_B, reg);
+ *
+ * Set:
+ * reg = FIELD_PREP(REG_FIELD_A, 1) |
+ * FIELD_PREP(REG_FIELD_B, 0) |
+ * FIELD_PREP(REG_FIELD_C, c) |
+ * FIELD_PREP(REG_FIELD_D, 0x40);
+ *
+ * Modify:
+ * reg &= ~REG_FIELD_C;
+ * reg |= FIELD_PREP(REG_FIELD_C, c);
+ */
+
+#define __bf_shf(x) (__builtin_ffsll(x) - 1)
+
+#define __BF_FIELD_CHECK(_mask, _reg, _val, _pfx) \
+ ({ \
+ BUILD_BUG_ON_MSG(!__builtin_constant_p(_mask), \
+ _pfx "mask is not constant"); \
+ BUILD_BUG_ON_MSG(!(_mask), _pfx "mask is zero"); \
+ BUILD_BUG_ON_MSG(__builtin_constant_p(_val) ? \
+ ~((_mask) >> __bf_shf(_mask)) & (_val) : 0, \
+ _pfx "value too large for the field"); \
+ BUILD_BUG_ON_MSG((_mask) > (typeof(_reg))~0ull, \
+ _pfx "type of reg too small for mask"); \
+ __BUILD_BUG_ON_NOT_POWER_OF_2((_mask) + \
+ (1ULL << __bf_shf(_mask))); \
+ })
+
+/**
+ * FIELD_PREP() - prepare a bitfield element
+ * @_mask: shifted mask defining the field's length and position
+ * @_val: value to put in the field
+ *
+ * FIELD_PREP() masks and shifts up the value. The result should
+ * be combined with other fields of the bitfield using logical OR.
+ */
+#define FIELD_PREP(_mask, _val) \
+ ({ \
+ __BF_FIELD_CHECK(_mask, 0ULL, _val, "FIELD_PREP: "); \
+ ((typeof(_mask))(_val) << __bf_shf(_mask)) & (_mask); \
+ })
+
+/**
+ * FIELD_GET() - extract a bitfield element
+ * @_mask: shifted mask defining the field's length and position
+ * @_reg: 32bit value of entire bitfield
+ *
+ * FIELD_GET() extracts the field specified by @_mask from the
+ * bitfield passed in as @_reg by masking and shifting it down.
+ */
+#define FIELD_GET(_mask, _reg) \
+ ({ \
+ __BF_FIELD_CHECK(_mask, _reg, 0U, "FIELD_GET: "); \
+ (typeof(_mask))(((_reg) & (_mask)) >> __bf_shf(_mask)); \
+ })
+
+#endif
diff --git a/include/linux/bitmap.h b/include/linux/bitmap.h
index 598bc999f4c2..3b77588a9360 100644
--- a/include/linux/bitmap.h
+++ b/include/linux/bitmap.h
@@ -339,6 +339,24 @@ static inline int bitmap_parse(const char *buf, unsigned int buflen,
return __bitmap_parse(buf, buflen, 0, maskp, nmaskbits);
}
+/*
+ * bitmap_from_u64 - Check and swap words within u64.
+ * @mask: source bitmap
+ * @dst: destination bitmap
+ *
+ * In 32-bit Big Endian kernel, when using (u32 *)(&val)[*]
+ * to read u64 mask, we will get the wrong word.
+ * That is "(u32 *)(&val)[0]" gets the upper 32 bits,
+ * but we expect the lower 32-bits of u64.
+ */
+static inline void bitmap_from_u64(unsigned long *dst, u64 mask)
+{
+ dst[0] = mask & ULONG_MAX;
+
+ if (sizeof(mask) > sizeof(unsigned long))
+ dst[1] = mask >> 32;
+}
+
#endif /* __ASSEMBLY__ */
#endif /* __LINUX_BITMAP_H */
diff --git a/include/linux/bitops.h b/include/linux/bitops.h
index 299e76b59fe9..a83c822c35c2 100644
--- a/include/linux/bitops.h
+++ b/include/linux/bitops.h
@@ -65,16 +65,6 @@ static inline int get_bitmask_order(unsigned int count)
return order; /* We could be slightly more clever with -1 here... */
}
-static inline int get_count_order(unsigned int count)
-{
- int order;
-
- order = fls(count) - 1;
- if (count & (count - 1))
- order++;
- return order;
-}
-
static __always_inline unsigned long hweight_long(unsigned long w)
{
return sizeof(w) == 4 ? hweight32(w) : hweight64(w);
@@ -191,6 +181,32 @@ static inline unsigned fls_long(unsigned long l)
return fls64(l);
}
+static inline int get_count_order(unsigned int count)
+{
+ int order;
+
+ order = fls(count) - 1;
+ if (count & (count - 1))
+ order++;
+ return order;
+}
+
+/**
+ * get_count_order_long - get order after rounding @l up to power of 2
+ * @l: parameter
+ *
+ * it is same as get_count_order() but with long type parameter
+ */
+static inline int get_count_order_long(unsigned long l)
+{
+ if (l == 0UL)
+ return -1;
+ else if (l & (l - 1UL))
+ return (int)fls_long(l);
+ else
+ return (int)fls_long(l) - 1;
+}
+
/**
* __ffs64 - find first set bit in a 64 bit word
* @word: The 64 bit word
diff --git a/include/linux/blk-cgroup.h b/include/linux/blk-cgroup.h
index 10648e300c93..3bf5d33800ab 100644
--- a/include/linux/blk-cgroup.h
+++ b/include/linux/blk-cgroup.h
@@ -45,7 +45,7 @@ struct blkcg {
spinlock_t lock;
struct radix_tree_root blkg_tree;
- struct blkcg_gq *blkg_hint;
+ struct blkcg_gq __rcu *blkg_hint;
struct hlist_head blkg_list;
struct blkcg_policy_data *cpd[BLKCG_MAX_POLS];
@@ -343,16 +343,7 @@ static inline struct blkcg *cpd_to_blkcg(struct blkcg_policy_data *cpd)
*/
static inline int blkg_path(struct blkcg_gq *blkg, char *buf, int buflen)
{
- char *p;
-
- p = cgroup_path(blkg->blkcg->css.cgroup, buf, buflen);
- if (!p) {
- strncpy(buf, "<unavailable>", buflen);
- return -ENAMETOOLONG;
- }
-
- memmove(buf, p, buf + buflen - p);
- return 0;
+ return cgroup_path(blkg->blkcg->css.cgroup, buf, buflen);
}
/**
diff --git a/include/linux/blk-mq-pci.h b/include/linux/blk-mq-pci.h
new file mode 100644
index 000000000000..6ab595259112
--- /dev/null
+++ b/include/linux/blk-mq-pci.h
@@ -0,0 +1,9 @@
+#ifndef _LINUX_BLK_MQ_PCI_H
+#define _LINUX_BLK_MQ_PCI_H
+
+struct blk_mq_tag_set;
+struct pci_dev;
+
+int blk_mq_pci_map_queues(struct blk_mq_tag_set *set, struct pci_dev *pdev);
+
+#endif /* _LINUX_BLK_MQ_PCI_H */
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
index e43bbffb5b7a..535ab2e13d2e 100644
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
@@ -2,31 +2,19 @@
#define BLK_MQ_H
#include <linux/blkdev.h>
+#include <linux/sbitmap.h>
struct blk_mq_tags;
struct blk_flush_queue;
-struct blk_mq_cpu_notifier {
- struct list_head list;
- void *data;
- int (*notify)(void *data, unsigned long action, unsigned int cpu);
-};
-
-struct blk_mq_ctxmap {
- unsigned int size;
- unsigned int bits_per_word;
- struct blk_align_bitmap *map;
-};
-
struct blk_mq_hw_ctx {
struct {
spinlock_t lock;
struct list_head dispatch;
+ unsigned long state; /* BLK_MQ_S_* flags */
} ____cacheline_aligned_in_smp;
- unsigned long state; /* BLK_MQ_S_* flags */
- struct delayed_work run_work;
- struct delayed_work delay_work;
+ struct work_struct run_work;
cpumask_var_t cpumask;
int next_cpu;
int next_cpu_batch;
@@ -38,10 +26,10 @@ struct blk_mq_hw_ctx {
void *driver_data;
- struct blk_mq_ctxmap ctx_map;
+ struct sbitmap ctx_map;
- unsigned int nr_ctx;
struct blk_mq_ctx **ctxs;
+ unsigned int nr_ctx;
atomic_t wait_index;
@@ -49,7 +37,7 @@ struct blk_mq_hw_ctx {
unsigned long queued;
unsigned long run;
-#define BLK_MQ_MAX_DISPATCH_ORDER 10
+#define BLK_MQ_MAX_DISPATCH_ORDER 7
unsigned long dispatched[BLK_MQ_MAX_DISPATCH_ORDER];
unsigned int numa_node;
@@ -57,14 +45,18 @@ struct blk_mq_hw_ctx {
atomic_t nr_active;
- struct blk_mq_cpu_notifier cpu_notifier;
+ struct delayed_work delay_work;
+
+ struct hlist_node cpuhp_dead;
struct kobject kobj;
+ unsigned long poll_considered;
unsigned long poll_invoked;
unsigned long poll_success;
};
struct blk_mq_tag_set {
+ unsigned int *mq_map;
struct blk_mq_ops *ops;
unsigned int nr_hw_queues;
unsigned int queue_depth; /* max hw supported */
@@ -88,7 +80,6 @@ struct blk_mq_queue_data {
};
typedef int (queue_rq_fn)(struct blk_mq_hw_ctx *, const struct blk_mq_queue_data *);
-typedef struct blk_mq_hw_ctx *(map_queue_fn)(struct request_queue *, const int);
typedef enum blk_eh_timer_return (timeout_fn)(struct request *, bool);
typedef int (init_hctx_fn)(struct blk_mq_hw_ctx *, void *, unsigned int);
typedef void (exit_hctx_fn)(struct blk_mq_hw_ctx *, unsigned int);
@@ -102,6 +93,7 @@ typedef void (busy_iter_fn)(struct blk_mq_hw_ctx *, struct request *, void *,
bool);
typedef void (busy_tag_iter_fn)(struct request *, void *, bool);
typedef int (poll_fn)(struct blk_mq_hw_ctx *, unsigned int);
+typedef int (map_queues_fn)(struct blk_mq_tag_set *set);
struct blk_mq_ops {
@@ -111,11 +103,6 @@ struct blk_mq_ops {
queue_rq_fn *queue_rq;
/*
- * Map to specific hardware queue
- */
- map_queue_fn *map_queue;
-
- /*
* Called on request timeout
*/
timeout_fn *timeout;
@@ -147,6 +134,8 @@ struct blk_mq_ops {
init_request_fn *init_request;
exit_request_fn *exit_request;
reinit_request_fn *reinit_request;
+
+ map_queues_fn *map_queues;
};
enum {
@@ -158,6 +147,7 @@ enum {
BLK_MQ_F_TAG_SHARED = 1 << 1,
BLK_MQ_F_SG_MERGE = 1 << 2,
BLK_MQ_F_DEFER_ISSUE = 1 << 4,
+ BLK_MQ_F_BLOCKING = 1 << 5,
BLK_MQ_F_ALLOC_POLICY_START_BIT = 8,
BLK_MQ_F_ALLOC_POLICY_BITS = 1,
@@ -178,8 +168,8 @@ enum {
struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *);
struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
struct request_queue *q);
-int blk_mq_register_disk(struct gendisk *);
-void blk_mq_unregister_disk(struct gendisk *);
+int blk_mq_register_dev(struct device *, struct request_queue *);
+void blk_mq_unregister_dev(struct device *, struct request_queue *);
int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set);
void blk_mq_free_tag_set(struct blk_mq_tag_set *set);
@@ -201,7 +191,6 @@ struct request *blk_mq_alloc_request(struct request_queue *q, int rw,
struct request *blk_mq_alloc_request_hctx(struct request_queue *q, int op,
unsigned int flags, unsigned int hctx_idx);
struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag);
-struct cpumask *blk_mq_tags_cpumask(struct blk_mq_tags *tags);
enum {
BLK_MQ_UNIQUE_TAG_BITS = 16,
@@ -220,8 +209,6 @@ static inline u16 blk_mq_unique_tag_to_tag(u32 unique_tag)
return unique_tag & BLK_MQ_UNIQUE_TAG_MASK;
}
-struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *, const int ctx_index);
-struct blk_mq_hw_ctx *blk_mq_alloc_single_hw_queue(struct blk_mq_tag_set *, unsigned int, int);
int blk_mq_request_started(struct request *rq);
void blk_mq_start_request(struct request *rq);
@@ -232,6 +219,7 @@ void blk_mq_requeue_request(struct request *rq);
void blk_mq_add_to_requeue_list(struct request *rq, bool at_head);
void blk_mq_cancel_requeue_work(struct request_queue *q);
void blk_mq_kick_requeue_list(struct request_queue *q);
+void blk_mq_delay_kick_requeue_list(struct request_queue *q, unsigned long msecs);
void blk_mq_abort_requeue_list(struct request_queue *q);
void blk_mq_complete_request(struct request *rq, int error);
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index 436f43f87da9..cd395ecec99d 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -16,7 +16,6 @@ struct block_device;
struct io_context;
struct cgroup_subsys_state;
typedef void (bio_end_io_t) (struct bio *);
-typedef void (bio_destructor_t) (struct bio *);
#ifdef CONFIG_BLOCK
/*
@@ -89,14 +88,22 @@ struct bio {
struct bio_vec bi_inline_vecs[0];
};
-#define BIO_OP_SHIFT (8 * sizeof(unsigned int) - REQ_OP_BITS)
+#define BIO_OP_SHIFT (8 * FIELD_SIZEOF(struct bio, bi_opf) - REQ_OP_BITS)
+#define bio_flags(bio) ((bio)->bi_opf & ((1 << BIO_OP_SHIFT) - 1))
#define bio_op(bio) ((bio)->bi_opf >> BIO_OP_SHIFT)
-#define bio_set_op_attrs(bio, op, op_flags) do { \
- WARN_ON(op >= (1 << REQ_OP_BITS)); \
- (bio)->bi_opf &= ((1 << BIO_OP_SHIFT) - 1); \
- (bio)->bi_opf |= ((unsigned int) (op) << BIO_OP_SHIFT); \
- (bio)->bi_opf |= op_flags; \
+#define bio_set_op_attrs(bio, op, op_flags) do { \
+ if (__builtin_constant_p(op)) \
+ BUILD_BUG_ON((op) + 0U >= (1U << REQ_OP_BITS)); \
+ else \
+ WARN_ON_ONCE((op) + 0U >= (1U << REQ_OP_BITS)); \
+ if (__builtin_constant_p(op_flags)) \
+ BUILD_BUG_ON((op_flags) + 0U >= (1U << BIO_OP_SHIFT)); \
+ else \
+ WARN_ON_ONCE((op_flags) + 0U >= (1U << BIO_OP_SHIFT)); \
+ (bio)->bi_opf = bio_flags(bio); \
+ (bio)->bi_opf |= (((op) + 0U) << BIO_OP_SHIFT); \
+ (bio)->bi_opf |= (op_flags); \
} while (0)
#define BIO_RESET_BYTES offsetof(struct bio, bi_max_vecs)
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index e79055c8b577..c47c358ba052 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -449,7 +449,7 @@ struct request_queue {
struct list_head requeue_list;
spinlock_t requeue_lock;
- struct work_struct requeue_work;
+ struct delayed_work requeue_work;
struct mutex sysfs_lock;
@@ -1440,8 +1440,8 @@ static inline bool req_gap_front_merge(struct request *req, struct bio *bio)
return bio_will_gap(req->q, bio, req->bio);
}
-struct work_struct;
int kblockd_schedule_work(struct work_struct *work);
+int kblockd_schedule_work_on(int cpu, struct work_struct *work);
int kblockd_schedule_delayed_work(struct delayed_work *dwork, unsigned long delay);
int kblockd_schedule_delayed_work_on(int cpu, struct delayed_work *dwork, unsigned long delay);
diff --git a/include/linux/blockgroup_lock.h b/include/linux/blockgroup_lock.h
index e44b88ba552b..225bdb7daec7 100644
--- a/include/linux/blockgroup_lock.h
+++ b/include/linux/blockgroup_lock.h
@@ -10,28 +10,10 @@
#include <linux/cache.h>
#ifdef CONFIG_SMP
-
-/*
- * We want a power-of-two. Is there a better way than this?
- */
-
-#if NR_CPUS >= 32
-#define NR_BG_LOCKS 128
-#elif NR_CPUS >= 16
-#define NR_BG_LOCKS 64
-#elif NR_CPUS >= 8
-#define NR_BG_LOCKS 32
-#elif NR_CPUS >= 4
-#define NR_BG_LOCKS 16
-#elif NR_CPUS >= 2
-#define NR_BG_LOCKS 8
+#define NR_BG_LOCKS (4 << ilog2(NR_CPUS < 32 ? NR_CPUS : 32))
#else
-#define NR_BG_LOCKS 4
-#endif
-
-#else /* CONFIG_SMP */
#define NR_BG_LOCKS 1
-#endif /* CONFIG_SMP */
+#endif
struct bgl_lock {
spinlock_t lock;
@@ -49,14 +31,10 @@ static inline void bgl_lock_init(struct blockgroup_lock *bgl)
spin_lock_init(&bgl->locks[i].lock);
}
-/*
- * The accessor is a macro so we can embed a blockgroup_lock into different
- * superblock types
- */
static inline spinlock_t *
bgl_lock_ptr(struct blockgroup_lock *bgl, unsigned int block_group)
{
- return &bgl->locks[(block_group) & (NR_BG_LOCKS-1)].lock;
+ return &bgl->locks[block_group & (NR_BG_LOCKS-1)].lock;
}
#endif
diff --git a/include/linux/bootmem.h b/include/linux/bootmem.h
index f9be32691718..962164d36506 100644
--- a/include/linux/bootmem.h
+++ b/include/linux/bootmem.h
@@ -7,6 +7,7 @@
#include <linux/mmzone.h>
#include <linux/mm_types.h>
#include <asm/dma.h>
+#include <asm/processor.h>
/*
* simple boot-time physical memory area allocator.
@@ -119,6 +120,10 @@ extern void *__alloc_bootmem_low_node(pg_data_t *pgdat,
#define BOOTMEM_LOW_LIMIT __pa(MAX_DMA_ADDRESS)
#endif
+#ifndef ARCH_LOW_ADDRESS_LIMIT
+#define ARCH_LOW_ADDRESS_LIMIT 0xffffffffUL
+#endif
+
#define alloc_bootmem(x) \
__alloc_bootmem(x, SMP_CACHE_BYTES, BOOTMEM_LOW_LIMIT)
#define alloc_bootmem_align(x, align) \
@@ -180,10 +185,6 @@ static inline void * __init memblock_virt_alloc_nopanic(
NUMA_NO_NODE);
}
-#ifndef ARCH_LOW_ADDRESS_LIMIT
-#define ARCH_LOW_ADDRESS_LIMIT 0xffffffffUL
-#endif
-
static inline void * __init memblock_virt_alloc_low(
phys_addr_t size, phys_addr_t align)
{
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index 11134238417d..c201017b5730 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -96,6 +96,7 @@ enum bpf_return_type {
struct bpf_func_proto {
u64 (*func)(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
bool gpl_only;
+ bool pkt_access;
enum bpf_return_type ret_type;
enum bpf_arg_type arg1_type;
enum bpf_arg_type arg2_type;
@@ -138,6 +139,13 @@ enum bpf_reg_type {
*/
PTR_TO_PACKET,
PTR_TO_PACKET_END, /* skb->data + headlen */
+
+ /* PTR_TO_MAP_VALUE_ADJ is used for doing pointer math inside of a map
+ * elem value. We only allow this if we can statically verify that
+ * access from this register are going to fall within the size of the
+ * map element.
+ */
+ PTR_TO_MAP_VALUE_ADJ,
};
struct bpf_prog;
@@ -151,7 +159,8 @@ struct bpf_verifier_ops {
*/
bool (*is_valid_access)(int off, int size, enum bpf_access_type type,
enum bpf_reg_type *reg_type);
-
+ int (*gen_prologue)(struct bpf_insn *insn, bool direct_write,
+ const struct bpf_prog *prog);
u32 (*convert_ctx_access)(enum bpf_access_type type, int dst_reg,
int src_reg, int ctx_off,
struct bpf_insn *insn, struct bpf_prog *prog);
@@ -297,6 +306,10 @@ static inline struct bpf_prog *bpf_prog_add(struct bpf_prog *prog, int i)
static inline void bpf_prog_put(struct bpf_prog *prog)
{
}
+static inline struct bpf_prog *bpf_prog_inc(struct bpf_prog *prog)
+{
+ return ERR_PTR(-EOPNOTSUPP);
+}
#endif /* CONFIG_BPF_SYSCALL */
/* verifier prototypes for helper functions called from eBPF programs */
diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h
new file mode 100644
index 000000000000..7035b997aaa5
--- /dev/null
+++ b/include/linux/bpf_verifier.h
@@ -0,0 +1,102 @@
+/* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ */
+#ifndef _LINUX_BPF_VERIFIER_H
+#define _LINUX_BPF_VERIFIER_H 1
+
+#include <linux/bpf.h> /* for enum bpf_reg_type */
+#include <linux/filter.h> /* for MAX_BPF_STACK */
+
+ /* Just some arbitrary values so we can safely do math without overflowing and
+ * are obviously wrong for any sort of memory access.
+ */
+#define BPF_REGISTER_MAX_RANGE (1024 * 1024 * 1024)
+#define BPF_REGISTER_MIN_RANGE -(1024 * 1024 * 1024)
+
+struct bpf_reg_state {
+ enum bpf_reg_type type;
+ /*
+ * Used to determine if any memory access using this register will
+ * result in a bad access.
+ */
+ u64 min_value, max_value;
+ union {
+ /* valid when type == CONST_IMM | PTR_TO_STACK | UNKNOWN_VALUE */
+ s64 imm;
+
+ /* valid when type == PTR_TO_PACKET* */
+ struct {
+ u32 id;
+ u16 off;
+ u16 range;
+ };
+
+ /* valid when type == CONST_PTR_TO_MAP | PTR_TO_MAP_VALUE |
+ * PTR_TO_MAP_VALUE_OR_NULL
+ */
+ struct bpf_map *map_ptr;
+ };
+};
+
+enum bpf_stack_slot_type {
+ STACK_INVALID, /* nothing was stored in this stack slot */
+ STACK_SPILL, /* register spilled into stack */
+ STACK_MISC /* BPF program wrote some data into this slot */
+};
+
+#define BPF_REG_SIZE 8 /* size of eBPF register in bytes */
+
+/* state of the program:
+ * type of all registers and stack info
+ */
+struct bpf_verifier_state {
+ struct bpf_reg_state regs[MAX_BPF_REG];
+ u8 stack_slot_type[MAX_BPF_STACK];
+ struct bpf_reg_state spilled_regs[MAX_BPF_STACK / BPF_REG_SIZE];
+};
+
+/* linked list of verifier states used to prune search */
+struct bpf_verifier_state_list {
+ struct bpf_verifier_state state;
+ struct bpf_verifier_state_list *next;
+};
+
+struct bpf_insn_aux_data {
+ enum bpf_reg_type ptr_type; /* pointer type for load/store insns */
+};
+
+#define MAX_USED_MAPS 64 /* max number of maps accessed by one eBPF program */
+
+struct bpf_verifier_env;
+struct bpf_ext_analyzer_ops {
+ int (*insn_hook)(struct bpf_verifier_env *env,
+ int insn_idx, int prev_insn_idx);
+};
+
+/* single container for all structs
+ * one verifier_env per bpf_check() call
+ */
+struct bpf_verifier_env {
+ struct bpf_prog *prog; /* eBPF program being verified */
+ struct bpf_verifier_stack_elem *head; /* stack of verifier states to be processed */
+ int stack_size; /* number of states to be processed */
+ struct bpf_verifier_state cur_state; /* current verifier state */
+ struct bpf_verifier_state_list **explored_states; /* search pruning optimization */
+ const struct bpf_ext_analyzer_ops *analyzer_ops; /* external analyzer ops */
+ void *analyzer_priv; /* pointer to external analyzer's private data */
+ struct bpf_map *used_maps[MAX_USED_MAPS]; /* array of map's used by eBPF program */
+ u32 used_map_cnt; /* number of used maps */
+ u32 id_gen; /* used to generate unique reg IDs */
+ bool allow_ptr_leaks;
+ bool seen_direct_write;
+ bool varlen_map_value_access;
+ struct bpf_insn_aux_data *insn_aux_data; /* array of per-insn state */
+};
+
+int bpf_analyzer(struct bpf_prog *prog, const struct bpf_ext_analyzer_ops *ops,
+ void *priv);
+
+#endif /* _LINUX_BPF_VERIFIER_H */
diff --git a/include/linux/bug.h b/include/linux/bug.h
index e51b0709e78d..292d6a10b0c2 100644
--- a/include/linux/bug.h
+++ b/include/linux/bug.h
@@ -13,6 +13,7 @@ enum bug_trap_type {
struct pt_regs;
#ifdef __CHECKER__
+#define __BUILD_BUG_ON_NOT_POWER_OF_2(n) (0)
#define BUILD_BUG_ON_NOT_POWER_OF_2(n) (0)
#define BUILD_BUG_ON_ZERO(e) (0)
#define BUILD_BUG_ON_NULL(e) ((void*)0)
@@ -24,6 +25,8 @@ struct pt_regs;
#else /* __CHECKER__ */
/* Force a compilation error if a constant expression is not a power of 2 */
+#define __BUILD_BUG_ON_NOT_POWER_OF_2(n) \
+ BUILD_BUG_ON(((n) & ((n) - 1)) != 0)
#define BUILD_BUG_ON_NOT_POWER_OF_2(n) \
BUILD_BUG_ON((n) == 0 || (((n) & ((n) - 1)) != 0))
diff --git a/include/linux/can/dev.h b/include/linux/can/dev.h
index 5261751f6bd4..5f5270941ba0 100644
--- a/include/linux/can/dev.h
+++ b/include/linux/can/dev.h
@@ -32,6 +32,7 @@ enum can_mode {
* CAN common private data
*/
struct can_priv {
+ struct net_device *dev;
struct can_device_stats can_stats;
struct can_bittiming bittiming, data_bittiming;
@@ -47,7 +48,7 @@ struct can_priv {
u32 ctrlmode_static; /* static enabled options for driver/hardware */
int restart_ms;
- struct timer_list restart_timer;
+ struct delayed_work restart_work;
int (*do_set_bittiming)(struct net_device *dev);
int (*do_set_data_bittiming)(struct net_device *dev);
diff --git a/include/linux/ccp.h b/include/linux/ccp.h
index 7c2bb27c067c..a7653339fedb 100644
--- a/include/linux/ccp.h
+++ b/include/linux/ccp.h
@@ -238,9 +238,6 @@ struct ccp_xts_aes_engine {
};
/***** SHA engine *****/
-#define CCP_SHA_BLOCKSIZE SHA256_BLOCK_SIZE
-#define CCP_SHA_CTXSIZE SHA256_DIGEST_SIZE
-
/**
* ccp_sha_type - type of SHA operation
*
diff --git a/include/linux/cec-funcs.h b/include/linux/cec-funcs.h
index 82c3d3b7269d..138bbf721e70 100644
--- a/include/linux/cec-funcs.h
+++ b/include/linux/cec-funcs.h
@@ -162,10 +162,11 @@ static inline void cec_msg_standby(struct cec_msg *msg)
/* One Touch Record Feature */
-static inline void cec_msg_record_off(struct cec_msg *msg)
+static inline void cec_msg_record_off(struct cec_msg *msg, bool reply)
{
msg->len = 2;
msg->msg[1] = CEC_MSG_RECORD_OFF;
+ msg->reply = reply ? CEC_MSG_RECORD_STATUS : 0;
}
struct cec_op_arib_data {
@@ -227,7 +228,7 @@ static inline void cec_set_digital_service_id(__u8 *msg,
if (digital->service_id_method == CEC_OP_SERVICE_ID_METHOD_BY_CHANNEL) {
*msg++ = (digital->channel.channel_number_fmt << 2) |
(digital->channel.major >> 8);
- *msg++ = digital->channel.major && 0xff;
+ *msg++ = digital->channel.major & 0xff;
*msg++ = digital->channel.minor >> 8;
*msg++ = digital->channel.minor & 0xff;
*msg++ = 0;
@@ -323,6 +324,7 @@ static inline void cec_msg_record_on_phys_addr(struct cec_msg *msg,
}
static inline void cec_msg_record_on(struct cec_msg *msg,
+ bool reply,
const struct cec_op_record_src *rec_src)
{
switch (rec_src->type) {
@@ -346,6 +348,7 @@ static inline void cec_msg_record_on(struct cec_msg *msg,
rec_src->ext_phys_addr.phys_addr);
break;
}
+ msg->reply = reply ? CEC_MSG_RECORD_STATUS : 0;
}
static inline void cec_ops_record_on(const struct cec_msg *msg,
@@ -1141,6 +1144,75 @@ static inline void cec_msg_give_device_vendor_id(struct cec_msg *msg,
msg->reply = reply ? CEC_MSG_DEVICE_VENDOR_ID : 0;
}
+static inline void cec_msg_vendor_command(struct cec_msg *msg,
+ __u8 size, const __u8 *vendor_cmd)
+{
+ if (size > 14)
+ size = 14;
+ msg->len = 2 + size;
+ msg->msg[1] = CEC_MSG_VENDOR_COMMAND;
+ memcpy(msg->msg + 2, vendor_cmd, size);
+}
+
+static inline void cec_ops_vendor_command(const struct cec_msg *msg,
+ __u8 *size,
+ const __u8 **vendor_cmd)
+{
+ *size = msg->len - 2;
+
+ if (*size > 14)
+ *size = 14;
+ *vendor_cmd = msg->msg + 2;
+}
+
+static inline void cec_msg_vendor_command_with_id(struct cec_msg *msg,
+ __u32 vendor_id, __u8 size,
+ const __u8 *vendor_cmd)
+{
+ if (size > 11)
+ size = 11;
+ msg->len = 5 + size;
+ msg->msg[1] = CEC_MSG_VENDOR_COMMAND_WITH_ID;
+ msg->msg[2] = vendor_id >> 16;
+ msg->msg[3] = (vendor_id >> 8) & 0xff;
+ msg->msg[4] = vendor_id & 0xff;
+ memcpy(msg->msg + 5, vendor_cmd, size);
+}
+
+static inline void cec_ops_vendor_command_with_id(const struct cec_msg *msg,
+ __u32 *vendor_id, __u8 *size,
+ const __u8 **vendor_cmd)
+{
+ *size = msg->len - 5;
+
+ if (*size > 11)
+ *size = 11;
+ *vendor_id = (msg->msg[2] << 16) | (msg->msg[3] << 8) | msg->msg[4];
+ *vendor_cmd = msg->msg + 5;
+}
+
+static inline void cec_msg_vendor_remote_button_down(struct cec_msg *msg,
+ __u8 size,
+ const __u8 *rc_code)
+{
+ if (size > 14)
+ size = 14;
+ msg->len = 2 + size;
+ msg->msg[1] = CEC_MSG_VENDOR_REMOTE_BUTTON_DOWN;
+ memcpy(msg->msg + 2, rc_code, size);
+}
+
+static inline void cec_ops_vendor_remote_button_down(const struct cec_msg *msg,
+ __u8 *size,
+ const __u8 **rc_code)
+{
+ *size = msg->len - 2;
+
+ if (*size > 14)
+ *size = 14;
+ *rc_code = msg->msg + 2;
+}
+
static inline void cec_msg_vendor_remote_button_up(struct cec_msg *msg)
{
msg->len = 2;
@@ -1277,7 +1349,7 @@ static inline void cec_msg_user_control_pressed(struct cec_msg *msg,
msg->len += 4;
msg->msg[3] = (ui_cmd->channel_identifier.channel_number_fmt << 2) |
(ui_cmd->channel_identifier.major >> 8);
- msg->msg[4] = ui_cmd->channel_identifier.major && 0xff;
+ msg->msg[4] = ui_cmd->channel_identifier.major & 0xff;
msg->msg[5] = ui_cmd->channel_identifier.minor >> 8;
msg->msg[6] = ui_cmd->channel_identifier.minor & 0xff;
break;
diff --git a/include/linux/cec.h b/include/linux/cec.h
index b3e22893a002..851968e803fa 100644
--- a/include/linux/cec.h
+++ b/include/linux/cec.h
@@ -364,7 +364,7 @@ struct cec_caps {
* @num_log_addrs: how many logical addresses should be claimed. Set by the
* caller.
* @vendor_id: the vendor ID of the device. Set by the caller.
- * @flags: set to 0.
+ * @flags: flags.
* @osd_name: the OSD name of the device. Set by the caller.
* @primary_device_type: the primary device type for each logical address.
* Set by the caller.
@@ -389,6 +389,9 @@ struct cec_log_addrs {
__u8 features[CEC_MAX_LOG_ADDRS][12];
};
+/* Allow a fallback to unregistered */
+#define CEC_LOG_ADDRS_FL_ALLOW_UNREG_FALLBACK (1 << 0)
+
/* Events */
/* Event that occurs when the adapter state changes */
diff --git a/include/linux/ceph/auth.h b/include/linux/ceph/auth.h
index 1563265d2097..374bb1c4ef52 100644
--- a/include/linux/ceph/auth.h
+++ b/include/linux/ceph/auth.h
@@ -104,7 +104,7 @@ extern int ceph_auth_build_hello(struct ceph_auth_client *ac,
extern int ceph_handle_auth_reply(struct ceph_auth_client *ac,
void *buf, size_t len,
void *reply_buf, size_t reply_len);
-extern int ceph_entity_name_encode(const char *name, void **p, void *end);
+int ceph_auth_entity_name_encode(const char *name, void **p, void *end);
extern int ceph_build_auth(struct ceph_auth_client *ac,
void *msg_buf, size_t msg_len);
diff --git a/include/linux/ceph/ceph_fs.h b/include/linux/ceph/ceph_fs.h
index 7868d602c0a0..f96de8de4fa7 100644
--- a/include/linux/ceph/ceph_fs.h
+++ b/include/linux/ceph/ceph_fs.h
@@ -138,6 +138,9 @@ struct ceph_dir_layout {
#define CEPH_MSG_POOLOP_REPLY 48
#define CEPH_MSG_POOLOP 49
+/* mon commands */
+#define CEPH_MSG_MON_COMMAND 50
+#define CEPH_MSG_MON_COMMAND_ACK 51
/* osd */
#define CEPH_MSG_OSD_MAP 41
@@ -176,6 +179,14 @@ struct ceph_mon_statfs_reply {
struct ceph_statfs st;
} __attribute__ ((packed));
+struct ceph_mon_command {
+ struct ceph_mon_request_header monhdr;
+ struct ceph_fsid fsid;
+ __le32 num_strs; /* always 1 */
+ __le32 str_len;
+ char str[];
+} __attribute__ ((packed));
+
struct ceph_osd_getmap {
struct ceph_mon_request_header monhdr;
struct ceph_fsid fsid;
@@ -270,6 +281,7 @@ enum {
CEPH_SESSION_FLUSHMSG,
CEPH_SESSION_FLUSHMSG_ACK,
CEPH_SESSION_FORCE_RO,
+ CEPH_SESSION_REJECT,
};
extern const char *ceph_session_op_name(int op);
diff --git a/include/linux/ceph/cls_lock_client.h b/include/linux/ceph/cls_lock_client.h
new file mode 100644
index 000000000000..84884d8d4710
--- /dev/null
+++ b/include/linux/ceph/cls_lock_client.h
@@ -0,0 +1,49 @@
+#ifndef _LINUX_CEPH_CLS_LOCK_CLIENT_H
+#define _LINUX_CEPH_CLS_LOCK_CLIENT_H
+
+#include <linux/ceph/osd_client.h>
+
+enum ceph_cls_lock_type {
+ CEPH_CLS_LOCK_NONE = 0,
+ CEPH_CLS_LOCK_EXCLUSIVE = 1,
+ CEPH_CLS_LOCK_SHARED = 2,
+};
+
+struct ceph_locker_id {
+ struct ceph_entity_name name; /* locker's client name */
+ char *cookie; /* locker's cookie */
+};
+
+struct ceph_locker_info {
+ struct ceph_entity_addr addr; /* locker's address */
+};
+
+struct ceph_locker {
+ struct ceph_locker_id id;
+ struct ceph_locker_info info;
+};
+
+int ceph_cls_lock(struct ceph_osd_client *osdc,
+ struct ceph_object_id *oid,
+ struct ceph_object_locator *oloc,
+ char *lock_name, u8 type, char *cookie,
+ char *tag, char *desc, u8 flags);
+int ceph_cls_unlock(struct ceph_osd_client *osdc,
+ struct ceph_object_id *oid,
+ struct ceph_object_locator *oloc,
+ char *lock_name, char *cookie);
+int ceph_cls_break_lock(struct ceph_osd_client *osdc,
+ struct ceph_object_id *oid,
+ struct ceph_object_locator *oloc,
+ char *lock_name, char *cookie,
+ struct ceph_entity_name *locker);
+
+void ceph_free_lockers(struct ceph_locker *lockers, u32 num_lockers);
+
+int ceph_cls_lock_info(struct ceph_osd_client *osdc,
+ struct ceph_object_id *oid,
+ struct ceph_object_locator *oloc,
+ char *lock_name, u8 *type, char **tag,
+ struct ceph_locker **lockers, u32 *num_lockers);
+
+#endif
diff --git a/include/linux/ceph/libceph.h b/include/linux/ceph/libceph.h
index 83fc1fff7061..1816c5e26581 100644
--- a/include/linux/ceph/libceph.h
+++ b/include/linux/ceph/libceph.h
@@ -264,7 +264,8 @@ extern struct ceph_client *ceph_create_client(struct ceph_options *opt,
void *private,
u64 supported_features,
u64 required_features);
-extern u64 ceph_client_id(struct ceph_client *client);
+struct ceph_entity_addr *ceph_client_addr(struct ceph_client *client);
+u64 ceph_client_gid(struct ceph_client *client);
extern void ceph_destroy_client(struct ceph_client *client);
extern int __ceph_open_session(struct ceph_client *client,
unsigned long started);
diff --git a/include/linux/ceph/mon_client.h b/include/linux/ceph/mon_client.h
index 24d704d1ea5c..d5a3ecea578d 100644
--- a/include/linux/ceph/mon_client.h
+++ b/include/linux/ceph/mon_client.h
@@ -141,6 +141,9 @@ int ceph_monc_get_version(struct ceph_mon_client *monc, const char *what,
int ceph_monc_get_version_async(struct ceph_mon_client *monc, const char *what,
ceph_monc_callback_t cb, u64 private_data);
+int ceph_monc_blacklist_add(struct ceph_mon_client *monc,
+ struct ceph_entity_addr *client_addr);
+
extern int ceph_monc_open_session(struct ceph_mon_client *monc);
extern int ceph_monc_validate_auth(struct ceph_mon_client *monc);
diff --git a/include/linux/ceph/osd_client.h b/include/linux/ceph/osd_client.h
index 858932304260..96337b15a60d 100644
--- a/include/linux/ceph/osd_client.h
+++ b/include/linux/ceph/osd_client.h
@@ -121,6 +121,9 @@ struct ceph_osd_req_op {
struct ceph_osd_data response_data;
} notify;
struct {
+ struct ceph_osd_data response_data;
+ } list_watchers;
+ struct {
u64 expected_object_size;
u64 expected_write_size;
} alloc_hint;
@@ -249,6 +252,12 @@ struct ceph_osd_linger_request {
size_t *preply_len;
};
+struct ceph_watch_item {
+ struct ceph_entity_name name;
+ u64 cookie;
+ struct ceph_entity_addr addr;
+};
+
struct ceph_osd_client {
struct ceph_client *client;
@@ -346,7 +355,6 @@ extern void osd_req_op_cls_response_data_pages(struct ceph_osd_request *,
struct page **pages, u64 length,
u32 alignment, bool pages_from_pool,
bool own_pages);
-
extern void osd_req_op_cls_init(struct ceph_osd_request *osd_req,
unsigned int which, u16 opcode,
const char *class, const char *method);
@@ -389,6 +397,14 @@ extern void ceph_osdc_sync(struct ceph_osd_client *osdc);
extern void ceph_osdc_flush_notifies(struct ceph_osd_client *osdc);
void ceph_osdc_maybe_request_map(struct ceph_osd_client *osdc);
+int ceph_osdc_call(struct ceph_osd_client *osdc,
+ struct ceph_object_id *oid,
+ struct ceph_object_locator *oloc,
+ const char *class, const char *method,
+ unsigned int flags,
+ struct page *req_page, size_t req_len,
+ struct page *resp_page, size_t *resp_len);
+
extern int ceph_osdc_readpages(struct ceph_osd_client *osdc,
struct ceph_vino vino,
struct ceph_file_layout *layout,
@@ -434,5 +450,10 @@ int ceph_osdc_notify(struct ceph_osd_client *osdc,
size_t *preply_len);
int ceph_osdc_watch_check(struct ceph_osd_client *osdc,
struct ceph_osd_linger_request *lreq);
+int ceph_osdc_list_watchers(struct ceph_osd_client *osdc,
+ struct ceph_object_id *oid,
+ struct ceph_object_locator *oloc,
+ struct ceph_watch_item **watchers,
+ u32 *num_watchers);
#endif
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
index 984f73b719a9..c83c23f0577b 100644
--- a/include/linux/cgroup.h
+++ b/include/linux/cgroup.h
@@ -97,7 +97,7 @@ int cgroup_add_legacy_cftypes(struct cgroup_subsys *ss, struct cftype *cfts);
int cgroup_rm_cftypes(struct cftype *cfts);
void cgroup_file_notify(struct cgroup_file *cfile);
-char *task_cgroup_path(struct task_struct *task, char *buf, size_t buflen);
+int task_cgroup_path(struct task_struct *task, char *buf, size_t buflen);
int cgroupstats_build(struct cgroupstats *stats, struct dentry *dentry);
int proc_cgroup_show(struct seq_file *m, struct pid_namespace *ns,
struct pid *pid, struct task_struct *tsk);
@@ -497,6 +497,23 @@ static inline bool cgroup_is_descendant(struct cgroup *cgrp,
return cgrp->ancestor_ids[ancestor->level] == ancestor->id;
}
+/**
+ * task_under_cgroup_hierarchy - test task's membership of cgroup ancestry
+ * @task: the task to be tested
+ * @ancestor: possible ancestor of @task's cgroup
+ *
+ * Tests whether @task's default cgroup hierarchy is a descendant of @ancestor.
+ * It follows all the same rules as cgroup_is_descendant, and only applies
+ * to the default hierarchy.
+ */
+static inline bool task_under_cgroup_hierarchy(struct task_struct *task,
+ struct cgroup *ancestor)
+{
+ struct css_set *cset = task_css_set(task);
+
+ return cgroup_is_descendant(cset->dfl_cgrp, ancestor);
+}
+
/* no synchronization, the result can only be used as a hint */
static inline bool cgroup_is_populated(struct cgroup *cgrp)
{
@@ -538,8 +555,7 @@ static inline int cgroup_name(struct cgroup *cgrp, char *buf, size_t buflen)
return kernfs_name(cgrp->kn, buf, buflen);
}
-static inline char * __must_check cgroup_path(struct cgroup *cgrp, char *buf,
- size_t buflen)
+static inline int cgroup_path(struct cgroup *cgrp, char *buf, size_t buflen)
{
return kernfs_path(cgrp->kn, buf, buflen);
}
@@ -557,6 +573,7 @@ static inline void pr_cont_cgroup_path(struct cgroup *cgrp)
#else /* !CONFIG_CGROUPS */
struct cgroup_subsys_state;
+struct cgroup;
static inline void css_put(struct cgroup_subsys_state *css) {}
static inline int cgroup_attach_task_all(struct task_struct *from,
@@ -574,6 +591,11 @@ static inline void cgroup_free(struct task_struct *p) {}
static inline int cgroup_init_early(void) { return 0; }
static inline int cgroup_init(void) { return 0; }
+static inline bool task_under_cgroup_hierarchy(struct task_struct *task,
+ struct cgroup *ancestor)
+{
+ return true;
+}
#endif /* !CONFIG_CGROUPS */
/*
@@ -621,6 +643,7 @@ struct cgroup_namespace {
atomic_t count;
struct ns_common ns;
struct user_namespace *user_ns;
+ struct ucounts *ucounts;
struct css_set *root_cset;
};
@@ -634,8 +657,8 @@ struct cgroup_namespace *copy_cgroup_ns(unsigned long flags,
struct user_namespace *user_ns,
struct cgroup_namespace *old_ns);
-char *cgroup_path_ns(struct cgroup *cgrp, char *buf, size_t buflen,
- struct cgroup_namespace *ns);
+int cgroup_path_ns(struct cgroup *cgrp, char *buf, size_t buflen,
+ struct cgroup_namespace *ns);
#else /* !CONFIG_CGROUPS */
diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h
index a39c0c530778..af596381fa0f 100644
--- a/include/linux/clk-provider.h
+++ b/include/linux/clk-provider.h
@@ -772,7 +772,7 @@ struct clk_onecell_data {
};
struct clk_hw_onecell_data {
- size_t num;
+ unsigned int num;
struct clk_hw *hws[];
};
@@ -780,6 +780,18 @@ extern struct of_device_id __clk_of_table;
#define CLK_OF_DECLARE(name, compat, fn) OF_DECLARE_1(clk, name, compat, fn)
+/*
+ * Use this macro when you have a driver that requires two initialization
+ * routines, one at of_clk_init(), and one at platform device probe
+ */
+#define CLK_OF_DECLARE_DRIVER(name, compat, fn) \
+ static void name##_of_clk_init_driver(struct device_node *np) \
+ { \
+ of_node_clear_flag(np, OF_POPULATED); \
+ fn(np); \
+ } \
+ OF_DECLARE_1(clk, name, compat, name##_of_clk_init_driver)
+
#ifdef CONFIG_OF
int of_clk_add_provider(struct device_node *np,
struct clk *(*clk_src_get)(struct of_phandle_args *args,
@@ -842,7 +854,7 @@ of_clk_hw_onecell_get(struct of_phandle_args *clkspec, void *data)
{
return ERR_PTR(-ENOENT);
}
-static inline int of_clk_get_parent_count(struct device_node *np)
+static inline unsigned int of_clk_get_parent_count(struct device_node *np)
{
return 0;
}
diff --git a/include/linux/compaction.h b/include/linux/compaction.h
index d4e106b5dc27..0d8415820fc3 100644
--- a/include/linux/compaction.h
+++ b/include/linux/compaction.h
@@ -6,8 +6,10 @@
* Lower value means higher priority, analogically to reclaim priority.
*/
enum compact_priority {
+ COMPACT_PRIO_SYNC_FULL,
+ MIN_COMPACT_PRIORITY = COMPACT_PRIO_SYNC_FULL,
COMPACT_PRIO_SYNC_LIGHT,
- MIN_COMPACT_PRIORITY = COMPACT_PRIO_SYNC_LIGHT,
+ MIN_COMPACT_COSTLY_PRIORITY = COMPACT_PRIO_SYNC_LIGHT,
DEF_COMPACT_PRIORITY = COMPACT_PRIO_SYNC_LIGHT,
COMPACT_PRIO_ASYNC,
INIT_COMPACT_PRIORITY = COMPACT_PRIO_ASYNC
@@ -49,14 +51,37 @@ enum compact_result {
COMPACT_CONTENDED,
/*
- * direct compaction partially compacted a zone and there might be
- * suitable pages
+ * direct compaction terminated after concluding that the allocation
+ * should now succeed
*/
- COMPACT_PARTIAL,
+ COMPACT_SUCCESS,
};
struct alloc_context; /* in mm/internal.h */
+/*
+ * Number of free order-0 pages that should be available above given watermark
+ * to make sure compaction has reasonable chance of not running out of free
+ * pages that it needs to isolate as migration target during its work.
+ */
+static inline unsigned long compact_gap(unsigned int order)
+{
+ /*
+ * Although all the isolations for migration are temporary, compaction
+ * free scanner may have up to 1 << order pages on its list and then
+ * try to split an (order - 1) free page. At that point, a gap of
+ * 1 << order might not be enough, so it's safer to require twice that
+ * amount. Note that the number of pages on the list is also
+ * effectively limited by COMPACT_CLUSTER_MAX, as that's the maximum
+ * that the migrate scanner can have isolated on migrate list, and free
+ * scanner is only invoked when the number of isolated free pages is
+ * lower than that. But it's not worth to complicate the formula here
+ * as a bigger gap for higher orders than strictly necessary can also
+ * improve chances of compaction success.
+ */
+ return 2UL << order;
+}
+
#ifdef CONFIG_COMPACTION
extern int sysctl_compact_memory;
extern int sysctl_compaction_handler(struct ctl_table *table, int write,
@@ -70,7 +95,6 @@ extern int fragmentation_index(struct zone *zone, unsigned int order);
extern enum compact_result try_to_compact_pages(gfp_t gfp_mask,
unsigned int order, unsigned int alloc_flags,
const struct alloc_context *ac, enum compact_priority prio);
-extern void compact_pgdat(pg_data_t *pgdat, int order);
extern void reset_isolation_suitable(pg_data_t *pgdat);
extern enum compact_result compaction_suitable(struct zone *zone, int order,
unsigned int alloc_flags, int classzone_idx);
@@ -89,7 +113,7 @@ static inline bool compaction_made_progress(enum compact_result result)
* that the compaction successfully isolated and migrated some
* pageblocks.
*/
- if (result == COMPACT_PARTIAL)
+ if (result == COMPACT_SUCCESS)
return true;
return false;
@@ -154,10 +178,6 @@ extern void kcompactd_stop(int nid);
extern void wakeup_kcompactd(pg_data_t *pgdat, int order, int classzone_idx);
#else
-static inline void compact_pgdat(pg_data_t *pgdat, int order)
-{
-}
-
static inline void reset_isolation_suitable(pg_data_t *pgdat)
{
}
diff --git a/include/linux/compat.h b/include/linux/compat.h
index f964ef79e0ad..63609398ef9f 100644
--- a/include/linux/compat.h
+++ b/include/linux/compat.h
@@ -432,7 +432,6 @@ asmlinkage long compat_sys_settimeofday(struct compat_timeval __user *tv,
asmlinkage long compat_sys_adjtimex(struct compat_timex __user *utp);
-extern __printf(1, 2) int compat_printk(const char *fmt, ...);
extern void sigset_from_compat(sigset_t *set, const compat_sigset_t *compat);
extern void sigset_to_compat(compat_sigset_t *compat, const sigset_t *set);
diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h
index 573c5a18908f..432f5c97e18f 100644
--- a/include/linux/compiler-gcc.h
+++ b/include/linux/compiler-gcc.h
@@ -188,6 +188,13 @@
#endif /* GCC_VERSION >= 40300 */
#if GCC_VERSION >= 40500
+
+#ifndef __CHECKER__
+#ifdef LATENT_ENTROPY_PLUGIN
+#define __latent_entropy __attribute__((latent_entropy))
+#endif
+#endif
+
/*
* Mark a position in code as unreachable. This can be used to
* suppress control flow warnings after asm blocks that transfer
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index 436aa4e42221..cf0fa5d86059 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -182,6 +182,29 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
# define unreachable() do { } while (1)
#endif
+/*
+ * KENTRY - kernel entry point
+ * This can be used to annotate symbols (functions or data) that are used
+ * without their linker symbol being referenced explicitly. For example,
+ * interrupt vector handlers, or functions in the kernel image that are found
+ * programatically.
+ *
+ * Not required for symbols exported with EXPORT_SYMBOL, or initcalls. Those
+ * are handled in their own way (with KEEP() in linker scripts).
+ *
+ * KENTRY can be avoided if the symbols in question are marked as KEEP() in the
+ * linker script. For example an architecture could KEEP() its entire
+ * boot/exception vector code rather than annotate each function and data.
+ */
+#ifndef KENTRY
+# define KENTRY(sym) \
+ extern typeof(sym) sym; \
+ static const unsigned long __kentry_##sym \
+ __used \
+ __attribute__((section("___kentry" "+" #sym ), used)) \
+ = (unsigned long)&sym;
+#endif
+
#ifndef RELOC_HIDE
# define RELOC_HIDE(ptr, off) \
({ unsigned long __ptr; \
@@ -406,6 +429,10 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
# define __attribute_const__ /* unimplemented */
#endif
+#ifndef __latent_entropy
+# define __latent_entropy
+#endif
+
/*
* Tell gcc if a function is cold. The compiler will assume any path
* directly leading to the call is unlikely.
@@ -527,13 +554,14 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
* object's lifetime is managed by something other than RCU. That
* "something other" might be reference counting or simple immortality.
*
- * The seemingly unused size_t variable is to validate @p is indeed a pointer
- * type by making sure it can be dereferenced.
+ * The seemingly unused variable ___typecheck_p validates that @p is
+ * indeed a pointer type by using a pointer to typeof(*p) as the type.
+ * Taking a pointer to typeof(*p) again is needed in case p is void *.
*/
#define lockless_dereference(p) \
({ \
typeof(p) _________p1 = READ_ONCE(p); \
- size_t __maybe_unused __size_of_ptr = sizeof(*(p)); \
+ typeof(*(p)) *___typecheck_p __maybe_unused; \
smp_read_barrier_depends(); /* Dependency order vs. p above. */ \
(_________p1); \
})
diff --git a/include/linux/console.h b/include/linux/console.h
index d530c4627e54..3672809234a7 100644
--- a/include/linux/console.h
+++ b/include/linux/console.h
@@ -173,6 +173,12 @@ static inline void console_sysfs_notify(void)
#endif
extern bool console_suspend_enabled;
+#ifdef CONFIG_OF
+extern void console_set_by_of(void);
+#else
+static inline void console_set_by_of(void) {}
+#endif
+
/* Suspend and resume console messages over PM events */
extern void suspend_console(void);
extern void resume_console(void);
diff --git a/include/linux/coresight.h b/include/linux/coresight.h
index 385d62e64abb..2a5982c37dfb 100644
--- a/include/linux/coresight.h
+++ b/include/linux/coresight.h
@@ -232,8 +232,9 @@ struct coresight_ops_source {
int (*cpu_id)(struct coresight_device *csdev);
int (*trace_id)(struct coresight_device *csdev);
int (*enable)(struct coresight_device *csdev,
- struct perf_event_attr *attr, u32 mode);
- void (*disable)(struct coresight_device *csdev);
+ struct perf_event *event, u32 mode);
+ void (*disable)(struct coresight_device *csdev,
+ struct perf_event *event);
};
struct coresight_ops {
diff --git a/include/linux/cpu.h b/include/linux/cpu.h
index 797d9c8e9a1b..b886dc17f2f3 100644
--- a/include/linux/cpu.h
+++ b/include/linux/cpu.h
@@ -61,17 +61,8 @@ struct notifier_block;
#define CPU_DOWN_PREPARE 0x0005 /* CPU (unsigned)v going down */
#define CPU_DOWN_FAILED 0x0006 /* CPU (unsigned)v NOT going down */
#define CPU_DEAD 0x0007 /* CPU (unsigned)v dead */
-#define CPU_DYING 0x0008 /* CPU (unsigned)v not running any task,
- * not handling interrupts, soon dead.
- * Called on the dying cpu, interrupts
- * are already disabled. Must not
- * sleep, must not fail */
#define CPU_POST_DEAD 0x0009 /* CPU (unsigned)v dead, cpu_hotplug
* lock is dropped */
-#define CPU_STARTING 0x000A /* CPU (unsigned)v soon running.
- * Called on the new cpu, just before
- * enabling interrupts. Must not sleep,
- * must not fail */
#define CPU_BROKEN 0x000B /* CPU (unsigned)v did not die properly,
* perhaps due to preemption. */
@@ -86,9 +77,6 @@ struct notifier_block;
#define CPU_DOWN_PREPARE_FROZEN (CPU_DOWN_PREPARE | CPU_TASKS_FROZEN)
#define CPU_DOWN_FAILED_FROZEN (CPU_DOWN_FAILED | CPU_TASKS_FROZEN)
#define CPU_DEAD_FROZEN (CPU_DEAD | CPU_TASKS_FROZEN)
-#define CPU_DYING_FROZEN (CPU_DYING | CPU_TASKS_FROZEN)
-#define CPU_STARTING_FROZEN (CPU_STARTING | CPU_TASKS_FROZEN)
-
#ifdef CONFIG_SMP
extern bool cpuhp_tasks_frozen;
@@ -228,7 +216,11 @@ static inline void cpu_hotplug_done(void) {}
#endif /* CONFIG_HOTPLUG_CPU */
#ifdef CONFIG_PM_SLEEP_SMP
-extern int disable_nonboot_cpus(void);
+extern int freeze_secondary_cpus(int primary);
+static inline int disable_nonboot_cpus(void)
+{
+ return freeze_secondary_cpus(0);
+}
extern void enable_nonboot_cpus(void);
#else /* !CONFIG_PM_SLEEP_SMP */
static inline int disable_nonboot_cpus(void) { return 0; }
@@ -239,6 +231,11 @@ void cpu_startup_entry(enum cpuhp_state state);
void cpu_idle_poll_ctrl(bool enable);
+/* Attach to any functions which should be considered cpuidle. */
+#define __cpuidle __attribute__((__section__(".cpuidle.text")))
+
+bool cpu_in_idle(unsigned long pc);
+
void arch_cpu_idle(void);
void arch_cpu_idle_prepare(void);
void arch_cpu_idle_enter(void);
diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h
index 242bf530edfc..9b207a8c5af3 100644
--- a/include/linux/cpuhotplug.h
+++ b/include/linux/cpuhotplug.h
@@ -1,6 +1,8 @@
#ifndef __CPUHOTPLUG_H
#define __CPUHOTPLUG_H
+#include <linux/types.h>
+
enum cpuhp_state {
CPUHP_OFFLINE,
CPUHP_CREATE_THREADS,
@@ -14,15 +16,42 @@ enum cpuhp_state {
CPUHP_PERF_SUPERH,
CPUHP_X86_HPET_DEAD,
CPUHP_X86_APB_DEAD,
+ CPUHP_VIRT_NET_DEAD,
+ CPUHP_SLUB_DEAD,
+ CPUHP_MM_WRITEBACK_DEAD,
+ CPUHP_SOFTIRQ_DEAD,
+ CPUHP_NET_MVNETA_DEAD,
+ CPUHP_CPUIDLE_DEAD,
+ CPUHP_ARM64_FPSIMD_DEAD,
+ CPUHP_ARM_OMAP_WAKE_DEAD,
+ CPUHP_IRQ_POLL_DEAD,
+ CPUHP_BLOCK_SOFTIRQ_DEAD,
+ CPUHP_VIRT_SCSI_DEAD,
+ CPUHP_ACPI_CPUDRV_DEAD,
+ CPUHP_S390_PFAULT_DEAD,
+ CPUHP_BLK_MQ_DEAD,
CPUHP_WORKQUEUE_PREP,
CPUHP_POWER_NUMA_PREPARE,
CPUHP_HRTIMERS_PREPARE,
CPUHP_PROFILE_PREPARE,
CPUHP_X2APIC_PREPARE,
CPUHP_SMPCFD_PREPARE,
+ CPUHP_RELAY_PREPARE,
+ CPUHP_SLAB_PREPARE,
+ CPUHP_MD_RAID5_PREPARE,
CPUHP_RCUTREE_PREP,
+ CPUHP_CPUIDLE_COUPLED_PREPARE,
+ CPUHP_POWERPC_PMAC_PREPARE,
+ CPUHP_POWERPC_MMU_CTX_PREPARE,
+ CPUHP_XEN_PREPARE,
+ CPUHP_XEN_EVTCHN_PREPARE,
CPUHP_NOTIFY_PREPARE,
+ CPUHP_ARM_SHMOBILE_SCU_PREPARE,
+ CPUHP_SH_SH3X_PREPARE,
+ CPUHP_BLK_MQ_PREPARE,
CPUHP_TIMERS_DEAD,
+ CPUHP_NOTF_ERR_INJ_PREPARE,
+ CPUHP_MIPS_SOC_PREPARE,
CPUHP_BRINGUP_CPU,
CPUHP_AP_IDLE_DEAD,
CPUHP_AP_OFFLINE,
@@ -45,6 +74,8 @@ enum cpuhp_state {
CPUHP_AP_PERF_METAG_STARTING,
CPUHP_AP_MIPS_OP_LOONGSON3_STARTING,
CPUHP_AP_ARM_VFP_STARTING,
+ CPUHP_AP_ARM64_DEBUG_MONITORS_STARTING,
+ CPUHP_AP_PERF_ARM_HW_BREAKPOINT_STARTING,
CPUHP_AP_PERF_ARM_STARTING,
CPUHP_AP_ARM_L2X0_STARTING,
CPUHP_AP_ARM_ARCH_TIMER_STARTING,
@@ -68,7 +99,6 @@ enum cpuhp_state {
CPUHP_AP_ARM64_ISNDEP_STARTING,
CPUHP_AP_SMPCFD_DYING,
CPUHP_AP_X86_TBOOT_DYING,
- CPUHP_AP_NOTIFY_STARTING,
CPUHP_AP_ONLINE,
CPUHP_TEARDOWN_CPU,
CPUHP_AP_ONLINE_IDLE,
@@ -86,6 +116,7 @@ enum cpuhp_state {
CPUHP_AP_PERF_S390_SF_ONLINE,
CPUHP_AP_PERF_ARM_CCI_ONLINE,
CPUHP_AP_PERF_ARM_CCN_ONLINE,
+ CPUHP_AP_PERF_ARM_L2X0_ONLINE,
CPUHP_AP_WORKQUEUE_ONLINE,
CPUHP_AP_RCUTREE_ONLINE,
CPUHP_AP_NOTIFY_ONLINE,
@@ -99,7 +130,7 @@ enum cpuhp_state {
int __cpuhp_setup_state(enum cpuhp_state state, const char *name, bool invoke,
int (*startup)(unsigned int cpu),
- int (*teardown)(unsigned int cpu));
+ int (*teardown)(unsigned int cpu), bool multi_instance);
/**
* cpuhp_setup_state - Setup hotplug state callbacks with calling the callbacks
@@ -116,7 +147,7 @@ static inline int cpuhp_setup_state(enum cpuhp_state state,
int (*startup)(unsigned int cpu),
int (*teardown)(unsigned int cpu))
{
- return __cpuhp_setup_state(state, name, true, startup, teardown);
+ return __cpuhp_setup_state(state, name, true, startup, teardown, false);
}
/**
@@ -135,7 +166,66 @@ static inline int cpuhp_setup_state_nocalls(enum cpuhp_state state,
int (*startup)(unsigned int cpu),
int (*teardown)(unsigned int cpu))
{
- return __cpuhp_setup_state(state, name, false, startup, teardown);
+ return __cpuhp_setup_state(state, name, false, startup, teardown,
+ false);
+}
+
+/**
+ * cpuhp_setup_state_multi - Add callbacks for multi state
+ * @state: The state for which the calls are installed
+ * @name: Name of the callback.
+ * @startup: startup callback function
+ * @teardown: teardown callback function
+ *
+ * Sets the internal multi_instance flag and prepares a state to work as a multi
+ * instance callback. No callbacks are invoked at this point. The callbacks are
+ * invoked once an instance for this state are registered via
+ * @cpuhp_state_add_instance or @cpuhp_state_add_instance_nocalls.
+ */
+static inline int cpuhp_setup_state_multi(enum cpuhp_state state,
+ const char *name,
+ int (*startup)(unsigned int cpu,
+ struct hlist_node *node),
+ int (*teardown)(unsigned int cpu,
+ struct hlist_node *node))
+{
+ return __cpuhp_setup_state(state, name, false,
+ (void *) startup,
+ (void *) teardown, true);
+}
+
+int __cpuhp_state_add_instance(enum cpuhp_state state, struct hlist_node *node,
+ bool invoke);
+
+/**
+ * cpuhp_state_add_instance - Add an instance for a state and invoke startup
+ * callback.
+ * @state: The state for which the instance is installed
+ * @node: The node for this individual state.
+ *
+ * Installs the instance for the @state and invokes the startup callback on
+ * the present cpus which have already reached the @state. The @state must have
+ * been earlier marked as multi-instance by @cpuhp_setup_state_multi.
+ */
+static inline int cpuhp_state_add_instance(enum cpuhp_state state,
+ struct hlist_node *node)
+{
+ return __cpuhp_state_add_instance(state, node, true);
+}
+
+/**
+ * cpuhp_state_add_instance_nocalls - Add an instance for a state without
+ * invoking the startup callback.
+ * @state: The state for which the instance is installed
+ * @node: The node for this individual state.
+ *
+ * Installs the instance for the @state The @state must have been earlier
+ * marked as multi-instance by @cpuhp_setup_state_multi.
+ */
+static inline int cpuhp_state_add_instance_nocalls(enum cpuhp_state state,
+ struct hlist_node *node)
+{
+ return __cpuhp_state_add_instance(state, node, false);
}
void __cpuhp_remove_state(enum cpuhp_state state, bool invoke);
@@ -162,6 +252,51 @@ static inline void cpuhp_remove_state_nocalls(enum cpuhp_state state)
__cpuhp_remove_state(state, false);
}
+/**
+ * cpuhp_remove_multi_state - Remove hotplug multi state callback
+ * @state: The state for which the calls are removed
+ *
+ * Removes the callback functions from a multi state. This is the reverse of
+ * cpuhp_setup_state_multi(). All instances should have been removed before
+ * invoking this function.
+ */
+static inline void cpuhp_remove_multi_state(enum cpuhp_state state)
+{
+ __cpuhp_remove_state(state, false);
+}
+
+int __cpuhp_state_remove_instance(enum cpuhp_state state,
+ struct hlist_node *node, bool invoke);
+
+/**
+ * cpuhp_state_remove_instance - Remove hotplug instance from state and invoke
+ * the teardown callback
+ * @state: The state from which the instance is removed
+ * @node: The node for this individual state.
+ *
+ * Removes the instance and invokes the teardown callback on the present cpus
+ * which have already reached the @state.
+ */
+static inline int cpuhp_state_remove_instance(enum cpuhp_state state,
+ struct hlist_node *node)
+{
+ return __cpuhp_state_remove_instance(state, node, true);
+}
+
+/**
+ * cpuhp_state_remove_instance_nocalls - Remove hotplug instance from state
+ * without invoking the reatdown callback
+ * @state: The state from which the instance is removed
+ * @node: The node for this individual state.
+ *
+ * Removes the instance without invoking the teardown callback.
+ */
+static inline int cpuhp_state_remove_instance_nocalls(enum cpuhp_state state,
+ struct hlist_node *node)
+{
+ return __cpuhp_state_remove_instance(state, node, false);
+}
+
#ifdef CONFIG_SMP
void cpuhp_online_idle(enum cpuhp_state state);
#else
diff --git a/include/linux/cred.h b/include/linux/cred.h
index 257db64562e5..f0e70a1bb3ac 100644
--- a/include/linux/cred.h
+++ b/include/linux/cred.h
@@ -26,15 +26,10 @@ struct inode;
/*
* COW Supplementary groups list
*/
-#define NGROUPS_SMALL 32
-#define NGROUPS_PER_BLOCK ((unsigned int)(PAGE_SIZE / sizeof(kgid_t)))
-
struct group_info {
atomic_t usage;
int ngroups;
- int nblocks;
- kgid_t small_block[NGROUPS_SMALL];
- kgid_t *blocks[0];
+ kgid_t gid[0];
};
/**
@@ -88,10 +83,6 @@ extern void set_groups(struct cred *, struct group_info *);
extern int groups_search(const struct group_info *, kgid_t);
extern bool may_setgroups(void);
-/* access the groups "array" with this macro */
-#define GROUP_AT(gi, i) \
- ((gi)->blocks[(i) / NGROUPS_PER_BLOCK][(i) % NGROUPS_PER_BLOCK])
-
/*
* The security context of a task
*
diff --git a/include/linux/ctype.h b/include/linux/ctype.h
index 653589e3e30e..f13e4ff6835a 100644
--- a/include/linux/ctype.h
+++ b/include/linux/ctype.h
@@ -22,7 +22,10 @@ extern const unsigned char _ctype[];
#define isalnum(c) ((__ismask(c)&(_U|_L|_D)) != 0)
#define isalpha(c) ((__ismask(c)&(_U|_L)) != 0)
#define iscntrl(c) ((__ismask(c)&(_C)) != 0)
-#define isdigit(c) ((__ismask(c)&(_D)) != 0)
+static inline int isdigit(int c)
+{
+ return '0' <= c && c <= '9';
+}
#define isgraph(c) ((__ismask(c)&(_P|_U|_L|_D)) != 0)
#define islower(c) ((__ismask(c)&(_L)) != 0)
#define isprint(c) ((__ismask(c)&(_P|_U|_L|_D|_SP)) != 0)
diff --git a/include/linux/dax.h b/include/linux/dax.h
index 9c6dc7704043..add6c4bc568f 100644
--- a/include/linux/dax.h
+++ b/include/linux/dax.h
@@ -6,13 +6,19 @@
#include <linux/radix-tree.h>
#include <asm/pgtable.h>
+struct iomap_ops;
+
/* We use lowest available exceptional entry bit for locking */
#define RADIX_DAX_ENTRY_LOCK (1 << RADIX_TREE_EXCEPTIONAL_SHIFT)
+ssize_t iomap_dax_rw(struct kiocb *iocb, struct iov_iter *iter,
+ struct iomap_ops *ops);
ssize_t dax_do_io(struct kiocb *, struct inode *, struct iov_iter *,
get_block_t, dio_iodone_t, int flags);
int dax_zero_page_range(struct inode *, loff_t from, unsigned len, get_block_t);
int dax_truncate_page(struct inode *, loff_t from, get_block_t);
+int iomap_dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
+ struct iomap_ops *ops);
int dax_fault(struct vm_area_struct *, struct vm_fault *, get_block_t);
int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index);
void dax_wake_mapping_entry_waiter(struct address_space *mapping,
diff --git a/include/linux/dcache.h b/include/linux/dcache.h
index 5ff3e9a4fe5f..5beed7b30561 100644
--- a/include/linux/dcache.h
+++ b/include/linux/dcache.h
@@ -584,9 +584,10 @@ static inline struct dentry *d_real(struct dentry *dentry,
* If dentry is on an union/overlay, then return the underlying, real inode.
* Otherwise return d_inode().
*/
-static inline struct inode *d_real_inode(struct dentry *dentry)
+static inline struct inode *d_real_inode(const struct dentry *dentry)
{
- return d_backing_inode(d_real(dentry, NULL, 0));
+ /* This usage of d_real() results in const dentry */
+ return d_backing_inode(d_real((struct dentry *) dentry, NULL, 0));
}
diff --git a/include/linux/debugfs.h b/include/linux/debugfs.h
index 1438e2322d5c..4d3f0d1aec73 100644
--- a/include/linux/debugfs.h
+++ b/include/linux/debugfs.h
@@ -45,6 +45,23 @@ extern struct dentry *arch_debugfs_dir;
extern struct srcu_struct debugfs_srcu;
+/**
+ * debugfs_real_fops - getter for the real file operation
+ * @filp: a pointer to a struct file
+ *
+ * Must only be called under the protection established by
+ * debugfs_use_file_start().
+ */
+static inline const struct file_operations *debugfs_real_fops(struct file *filp)
+ __must_hold(&debugfs_srcu)
+{
+ /*
+ * Neither the pointer to the struct file_operations, nor its
+ * contents ever change -- srcu_dereference() is not needed here.
+ */
+ return filp->f_path.dentry->d_fsdata;
+}
+
#if defined(CONFIG_DEBUG_FS)
struct dentry *debugfs_create_file(const char *name, umode_t mode,
diff --git a/include/linux/devfreq-event.h b/include/linux/devfreq-event.h
index 0a83a1e648b0..4db00b02ca3f 100644
--- a/include/linux/devfreq-event.h
+++ b/include/linux/devfreq-event.h
@@ -148,11 +148,6 @@ static inline int devfreq_event_reset_event(struct devfreq_event_dev *edev)
return -EINVAL;
}
-static inline void *devfreq_event_get_drvdata(struct devfreq_event_dev *edev)
-{
- return ERR_PTR(-EINVAL);
-}
-
static inline struct devfreq_event_dev *devfreq_event_get_edev_by_phandle(
struct device *dev, int index)
{
diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h
index 91acfce74a22..ef7962e84444 100644
--- a/include/linux/device-mapper.h
+++ b/include/linux/device-mapper.h
@@ -590,6 +590,7 @@ extern struct ratelimit_state dm_ratelimit_state;
#define DM_MAPIO_SUBMITTED 0
#define DM_MAPIO_REMAPPED 1
#define DM_MAPIO_REQUEUE DM_ENDIO_REQUEUE
+#define DM_MAPIO_DELAY_REQUEUE 3
#define dm_sector_div64(x, y)( \
{ \
diff --git a/include/linux/device.h b/include/linux/device.h
index 38f02814d53a..bc41e87a969b 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -41,6 +41,7 @@ struct device_node;
struct fwnode_handle;
struct iommu_ops;
struct iommu_group;
+struct iommu_fwspec;
struct bus_attribute {
struct attribute attr;
@@ -765,6 +766,7 @@ struct device_dma_parameters {
* gone away. This should be set by the allocator of the
* device (i.e. the bus driver that discovered the device).
* @iommu_group: IOMMU group the device belongs to.
+ * @iommu_fwspec: IOMMU-specific properties supplied by firmware.
*
* @offline_disabled: If set, the device is permanently online.
* @offline: Set after successful invocation of bus type's .offline().
@@ -849,6 +851,7 @@ struct device {
void (*release)(struct device *dev);
struct iommu_group *iommu_group;
+ struct iommu_fwspec *iommu_fwspec;
bool offline_disabled:1;
bool offline:1;
diff --git a/include/linux/dma-debug.h b/include/linux/dma-debug.h
index fe8cb610deac..c7d844f09c3a 100644
--- a/include/linux/dma-debug.h
+++ b/include/linux/dma-debug.h
@@ -56,6 +56,13 @@ extern void debug_dma_alloc_coherent(struct device *dev, size_t size,
extern void debug_dma_free_coherent(struct device *dev, size_t size,
void *virt, dma_addr_t addr);
+extern void debug_dma_map_resource(struct device *dev, phys_addr_t addr,
+ size_t size, int direction,
+ dma_addr_t dma_addr);
+
+extern void debug_dma_unmap_resource(struct device *dev, dma_addr_t dma_addr,
+ size_t size, int direction);
+
extern void debug_dma_sync_single_for_cpu(struct device *dev,
dma_addr_t dma_handle, size_t size,
int direction);
@@ -141,6 +148,18 @@ static inline void debug_dma_free_coherent(struct device *dev, size_t size,
{
}
+static inline void debug_dma_map_resource(struct device *dev, phys_addr_t addr,
+ size_t size, int direction,
+ dma_addr_t dma_addr)
+{
+}
+
+static inline void debug_dma_unmap_resource(struct device *dev,
+ dma_addr_t dma_addr, size_t size,
+ int direction)
+{
+}
+
static inline void debug_dma_sync_single_for_cpu(struct device *dev,
dma_addr_t dma_handle,
size_t size, int direction)
diff --git a/include/linux/dma-iommu.h b/include/linux/dma-iommu.h
index 81c5c8d167ad..32c589062bd9 100644
--- a/include/linux/dma-iommu.h
+++ b/include/linux/dma-iommu.h
@@ -21,6 +21,7 @@
#ifdef CONFIG_IOMMU_DMA
#include <linux/iommu.h>
+#include <linux/msi.h>
int iommu_dma_init(void);
@@ -29,7 +30,8 @@ int iommu_get_dma_cookie(struct iommu_domain *domain);
void iommu_put_dma_cookie(struct iommu_domain *domain);
/* Setup call for arch DMA mapping code */
-int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base, u64 size);
+int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base,
+ u64 size, struct device *dev);
/* General helpers for DMA-API <-> IOMMU-API interaction */
int dma_direction_to_prot(enum dma_data_direction dir, bool coherent);
@@ -62,9 +64,13 @@ void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
int iommu_dma_supported(struct device *dev, u64 mask);
int iommu_dma_mapping_error(struct device *dev, dma_addr_t dma_addr);
+/* The DMA API isn't _quite_ the whole story, though... */
+void iommu_dma_map_msi_msg(int irq, struct msi_msg *msg);
+
#else
struct iommu_domain;
+struct msi_msg;
static inline int iommu_dma_init(void)
{
@@ -80,6 +86,10 @@ static inline void iommu_put_dma_cookie(struct iommu_domain *domain)
{
}
+static inline void iommu_dma_map_msi_msg(int irq, struct msi_msg *msg)
+{
+}
+
#endif /* CONFIG_IOMMU_DMA */
#endif /* __KERNEL__ */
#endif /* __DMA_IOMMU_H */
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
index 66533e18276c..08528afdf58b 100644
--- a/include/linux/dma-mapping.h
+++ b/include/linux/dma-mapping.h
@@ -56,6 +56,11 @@
* that gives better TLB efficiency.
*/
#define DMA_ATTR_ALLOC_SINGLE_PAGES (1UL << 7)
+/*
+ * DMA_ATTR_NO_WARN: This tells the DMA-mapping subsystem to suppress
+ * allocation failure reports (similarly to __GFP_NOWARN).
+ */
+#define DMA_ATTR_NO_WARN (1UL << 8)
/*
* A dma_addr_t can hold any valid DMA or bus address for the platform.
@@ -95,6 +100,12 @@ struct dma_map_ops {
struct scatterlist *sg, int nents,
enum dma_data_direction dir,
unsigned long attrs);
+ dma_addr_t (*map_resource)(struct device *dev, phys_addr_t phys_addr,
+ size_t size, enum dma_data_direction dir,
+ unsigned long attrs);
+ void (*unmap_resource)(struct device *dev, dma_addr_t dma_handle,
+ size_t size, enum dma_data_direction dir,
+ unsigned long attrs);
void (*sync_single_for_cpu)(struct device *dev,
dma_addr_t dma_handle, size_t size,
enum dma_data_direction dir);
@@ -258,6 +269,41 @@ static inline void dma_unmap_page(struct device *dev, dma_addr_t addr,
debug_dma_unmap_page(dev, addr, size, dir, false);
}
+static inline dma_addr_t dma_map_resource(struct device *dev,
+ phys_addr_t phys_addr,
+ size_t size,
+ enum dma_data_direction dir,
+ unsigned long attrs)
+{
+ struct dma_map_ops *ops = get_dma_ops(dev);
+ dma_addr_t addr;
+
+ BUG_ON(!valid_dma_direction(dir));
+
+ /* Don't allow RAM to be mapped */
+ BUG_ON(pfn_valid(PHYS_PFN(phys_addr)));
+
+ addr = phys_addr;
+ if (ops->map_resource)
+ addr = ops->map_resource(dev, phys_addr, size, dir, attrs);
+
+ debug_dma_map_resource(dev, phys_addr, size, dir, addr);
+
+ return addr;
+}
+
+static inline void dma_unmap_resource(struct device *dev, dma_addr_t addr,
+ size_t size, enum dma_data_direction dir,
+ unsigned long attrs)
+{
+ struct dma_map_ops *ops = get_dma_ops(dev);
+
+ BUG_ON(!valid_dma_direction(dir));
+ if (ops->unmap_resource)
+ ops->unmap_resource(dev, addr, size, dir, attrs);
+ debug_dma_unmap_resource(dev, addr, size, dir);
+}
+
static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr,
size_t size,
enum dma_data_direction dir)
@@ -718,7 +764,7 @@ static inline int dma_mmap_wc(struct device *dev,
#define dma_mmap_writecombine dma_mmap_wc
#endif
-#ifdef CONFIG_NEED_DMA_MAP_STATE
+#if defined(CONFIG_NEED_DMA_MAP_STATE) || defined(CONFIG_DMA_API_DEBUG)
#define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME) dma_addr_t ADDR_NAME
#define DEFINE_DMA_UNMAP_LEN(LEN_NAME) __u32 LEN_NAME
#define dma_unmap_addr(PTR, ADDR_NAME) ((PTR)->ADDR_NAME)
diff --git a/include/linux/dma/dw.h b/include/linux/dma/dw.h
index f2e538aaddad..ccfd0c3777df 100644
--- a/include/linux/dma/dw.h
+++ b/include/linux/dma/dw.h
@@ -40,8 +40,13 @@ struct dw_dma_chip {
};
/* Export to the platform drivers */
+#if IS_ENABLED(CONFIG_DW_DMAC_CORE)
int dw_dma_probe(struct dw_dma_chip *chip);
int dw_dma_remove(struct dw_dma_chip *chip);
+#else
+static inline int dw_dma_probe(struct dw_dma_chip *chip) { return -ENODEV; }
+static inline int dw_dma_remove(struct dw_dma_chip *chip) { return 0; }
+#endif /* CONFIG_DW_DMAC_CORE */
/* DMA API extensions */
struct dw_desc;
diff --git a/include/linux/dma/hsu.h b/include/linux/dma/hsu.h
index aaff68efba5d..197eec63e501 100644
--- a/include/linux/dma/hsu.h
+++ b/include/linux/dma/hsu.h
@@ -41,8 +41,7 @@ struct hsu_dma_chip {
/* Export to the internal users */
int hsu_dma_get_status(struct hsu_dma_chip *chip, unsigned short nr,
u32 *status);
-irqreturn_t hsu_dma_do_irq(struct hsu_dma_chip *chip, unsigned short nr,
- u32 status);
+int hsu_dma_do_irq(struct hsu_dma_chip *chip, unsigned short nr, u32 status);
/* Export to the platform drivers */
int hsu_dma_probe(struct hsu_dma_chip *chip);
@@ -53,10 +52,10 @@ static inline int hsu_dma_get_status(struct hsu_dma_chip *chip,
{
return 0;
}
-static inline irqreturn_t hsu_dma_do_irq(struct hsu_dma_chip *chip,
- unsigned short nr, u32 status)
+static inline int hsu_dma_do_irq(struct hsu_dma_chip *chip, unsigned short nr,
+ u32 status)
{
- return IRQ_NONE;
+ return 0;
}
static inline int hsu_dma_probe(struct hsu_dma_chip *chip) { return -ENODEV; }
static inline int hsu_dma_remove(struct hsu_dma_chip *chip) { return 0; }
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
index 30de0197263a..cc535a478bae 100644
--- a/include/linux/dmaengine.h
+++ b/include/linux/dmaengine.h
@@ -441,6 +441,21 @@ typedef bool (*dma_filter_fn)(struct dma_chan *chan, void *filter_param);
typedef void (*dma_async_tx_callback)(void *dma_async_param);
+enum dmaengine_tx_result {
+ DMA_TRANS_NOERROR = 0, /* SUCCESS */
+ DMA_TRANS_READ_FAILED, /* Source DMA read failed */
+ DMA_TRANS_WRITE_FAILED, /* Destination DMA write failed */
+ DMA_TRANS_ABORTED, /* Op never submitted / aborted */
+};
+
+struct dmaengine_result {
+ enum dmaengine_tx_result result;
+ u32 residue;
+};
+
+typedef void (*dma_async_tx_callback_result)(void *dma_async_param,
+ const struct dmaengine_result *result);
+
struct dmaengine_unmap_data {
u8 map_cnt;
u8 to_cnt;
@@ -478,6 +493,7 @@ struct dma_async_tx_descriptor {
dma_cookie_t (*tx_submit)(struct dma_async_tx_descriptor *tx);
int (*desc_free)(struct dma_async_tx_descriptor *tx);
dma_async_tx_callback callback;
+ dma_async_tx_callback_result callback_result;
void *callback_param;
struct dmaengine_unmap_data *unmap;
#ifdef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH
diff --git a/include/linux/efi.h b/include/linux/efi.h
index 7f5a58225385..2d089487d2da 100644
--- a/include/linux/efi.h
+++ b/include/linux/efi.h
@@ -20,6 +20,7 @@
#include <linux/ioport.h>
#include <linux/pfn.h>
#include <linux/pstore.h>
+#include <linux/range.h>
#include <linux/reboot.h>
#include <linux/uuid.h>
#include <linux/screen_info.h>
@@ -37,6 +38,7 @@
#define EFI_WRITE_PROTECTED ( 8 | (1UL << (BITS_PER_LONG-1)))
#define EFI_OUT_OF_RESOURCES ( 9 | (1UL << (BITS_PER_LONG-1)))
#define EFI_NOT_FOUND (14 | (1UL << (BITS_PER_LONG-1)))
+#define EFI_ABORTED (21 | (1UL << (BITS_PER_LONG-1)))
#define EFI_SECURITY_VIOLATION (26 | (1UL << (BITS_PER_LONG-1)))
typedef unsigned long efi_status_t;
@@ -118,6 +120,15 @@ typedef struct {
u32 imagesize;
} efi_capsule_header_t;
+struct efi_boot_memmap {
+ efi_memory_desc_t **map;
+ unsigned long *map_size;
+ unsigned long *desc_size;
+ u32 *desc_ver;
+ unsigned long *key_ptr;
+ unsigned long *buff_size;
+};
+
/*
* EFI capsule flags
*/
@@ -669,6 +680,18 @@ typedef struct {
unsigned long tables;
} efi_system_table_t;
+/*
+ * Architecture independent structure for describing a memory map for the
+ * benefit of efi_memmap_init_early(), saving us the need to pass four
+ * parameters.
+ */
+struct efi_memory_map_data {
+ phys_addr_t phys_map;
+ unsigned long size;
+ unsigned long desc_version;
+ unsigned long desc_size;
+};
+
struct efi_memory_map {
phys_addr_t phys_map;
void *map;
@@ -676,6 +699,12 @@ struct efi_memory_map {
int nr_map;
unsigned long desc_version;
unsigned long desc_size;
+ bool late;
+};
+
+struct efi_mem_range {
+ struct range range;
+ u64 attribute;
};
struct efi_fdt_params {
@@ -900,6 +929,16 @@ static inline efi_status_t efi_query_variable_store(u32 attributes,
}
#endif
extern void __iomem *efi_lookup_mapped_addr(u64 phys_addr);
+
+extern int __init efi_memmap_init_early(struct efi_memory_map_data *data);
+extern int __init efi_memmap_init_late(phys_addr_t addr, unsigned long size);
+extern void __init efi_memmap_unmap(void);
+extern int __init efi_memmap_install(phys_addr_t addr, unsigned int nr_map);
+extern int __init efi_memmap_split_count(efi_memory_desc_t *md,
+ struct range *range);
+extern void __init efi_memmap_insert(struct efi_memory_map *old_memmap,
+ void *buf, struct efi_mem_range *mem);
+
extern int efi_config_init(efi_config_table_type_t *arch_tables);
#ifdef CONFIG_EFI_ESRT
extern void __init efi_esrt_init(void);
@@ -915,6 +954,7 @@ extern u64 efi_mem_attribute (unsigned long phys_addr, unsigned long size);
extern int __init efi_uart_console_only (void);
extern u64 efi_mem_desc_end(efi_memory_desc_t *md);
extern int efi_mem_desc_lookup(u64 phys_addr, efi_memory_desc_t *out_md);
+extern void efi_mem_reserve(phys_addr_t addr, u64 size);
extern void efi_initialize_iomem_resources(struct resource *code_resource,
struct resource *data_resource, struct resource *bss_resource);
extern void efi_reserve_boot_services(void);
@@ -946,7 +986,7 @@ extern int efi_memattr_apply_permissions(struct mm_struct *mm,
/* Iterate through an efi_memory_map */
#define for_each_efi_memory_desc_in_map(m, md) \
for ((md) = (m)->map; \
- ((void *)(md) + (m)->desc_size) <= (m)->map_end; \
+ (md) && ((void *)(md) + (m)->desc_size) <= (m)->map_end; \
(md) = (void *)(md) + (m)->desc_size)
/**
@@ -1127,12 +1167,6 @@ struct efivar_operations {
};
struct efivars {
- /*
- * ->lock protects two things:
- * 1) efivarfs_list and efivars_sysfs_list
- * 2) ->ops calls
- */
- spinlock_t lock;
struct kset *kset;
struct kobject *kobject;
const struct efivar_operations *ops;
@@ -1273,8 +1307,8 @@ struct kobject *efivars_kobject(void);
int efivar_init(int (*func)(efi_char16_t *, efi_guid_t, unsigned long, void *),
void *data, bool duplicates, struct list_head *head);
-void efivar_entry_add(struct efivar_entry *entry, struct list_head *head);
-void efivar_entry_remove(struct efivar_entry *entry);
+int efivar_entry_add(struct efivar_entry *entry, struct list_head *head);
+int efivar_entry_remove(struct efivar_entry *entry);
int __efivar_entry_delete(struct efivar_entry *entry);
int efivar_entry_delete(struct efivar_entry *entry);
@@ -1291,7 +1325,7 @@ int efivar_entry_set_get_size(struct efivar_entry *entry, u32 attributes,
int efivar_entry_set_safe(efi_char16_t *name, efi_guid_t vendor, u32 attributes,
bool block, unsigned long size, void *data);
-void efivar_entry_iter_begin(void);
+int efivar_entry_iter_begin(void);
void efivar_entry_iter_end(void);
int __efivar_entry_iter(int (*func)(struct efivar_entry *, void *),
@@ -1327,7 +1361,6 @@ extern int efi_capsule_update(efi_capsule_header_t *capsule,
#ifdef CONFIG_EFI_RUNTIME_MAP
int efi_runtime_map_init(struct kobject *);
-void efi_runtime_map_setup(void *, int, u32);
int efi_get_runtime_map_size(void);
int efi_get_runtime_map_desc_size(void);
int efi_runtime_map_copy(void *buf, size_t bufsz);
@@ -1337,9 +1370,6 @@ static inline int efi_runtime_map_init(struct kobject *kobj)
return 0;
}
-static inline void
-efi_runtime_map_setup(void *map, int nr_entries, u32 desc_size) {}
-
static inline int efi_get_runtime_map_size(void)
{
return 0;
@@ -1371,11 +1401,7 @@ char *efi_convert_cmdline(efi_system_table_t *sys_table_arg,
efi_loaded_image_t *image, int *cmd_line_len);
efi_status_t efi_get_memory_map(efi_system_table_t *sys_table_arg,
- efi_memory_desc_t **map,
- unsigned long *map_size,
- unsigned long *desc_size,
- u32 *desc_ver,
- unsigned long *key_ptr);
+ struct efi_boot_memmap *map);
efi_status_t efi_low_alloc(efi_system_table_t *sys_table_arg,
unsigned long size, unsigned long align,
@@ -1457,4 +1483,14 @@ extern void efi_call_virt_check_flags(unsigned long flags, const char *call);
arch_efi_call_virt_teardown(); \
})
+typedef efi_status_t (*efi_exit_boot_map_processing)(
+ efi_system_table_t *sys_table_arg,
+ struct efi_boot_memmap *map,
+ void *priv);
+
+efi_status_t efi_exit_boot_services(efi_system_table_t *sys_table,
+ void *handle,
+ struct efi_boot_memmap *map,
+ void *priv,
+ efi_exit_boot_map_processing priv_func);
#endif /* _LINUX_EFI_H */
diff --git a/include/linux/export.h b/include/linux/export.h
index c565f87f005e..2a0f61fbc731 100644
--- a/include/linux/export.h
+++ b/include/linux/export.h
@@ -1,5 +1,6 @@
#ifndef _LINUX_EXPORT_H
#define _LINUX_EXPORT_H
+
/*
* Export symbols from the kernel to modules. Forked from module.h
* to reduce the amount of pointless cruft we feed to gcc when only
@@ -42,27 +43,26 @@ extern struct module __this_module;
#ifdef CONFIG_MODVERSIONS
/* Mark the CRC weak since genksyms apparently decides not to
* generate a checksums for some symbols */
-#define __CRC_SYMBOL(sym, sec) \
- extern __visible void *__crc_##sym __attribute__((weak)); \
- static const unsigned long __kcrctab_##sym \
- __used \
- __attribute__((section("___kcrctab" sec "+" #sym), unused)) \
+#define __CRC_SYMBOL(sym, sec) \
+ extern __visible void *__crc_##sym __attribute__((weak)); \
+ static const unsigned long __kcrctab_##sym \
+ __used \
+ __attribute__((section("___kcrctab" sec "+" #sym), used)) \
= (unsigned long) &__crc_##sym;
#else
#define __CRC_SYMBOL(sym, sec)
#endif
/* For every exported symbol, place a struct in the __ksymtab section */
-#define ___EXPORT_SYMBOL(sym, sec) \
- extern typeof(sym) sym; \
- __CRC_SYMBOL(sym, sec) \
- static const char __kstrtab_##sym[] \
- __attribute__((section("__ksymtab_strings"), aligned(1))) \
- = VMLINUX_SYMBOL_STR(sym); \
- extern const struct kernel_symbol __ksymtab_##sym; \
- __visible const struct kernel_symbol __ksymtab_##sym \
- __used \
- __attribute__((section("___ksymtab" sec "+" #sym), unused)) \
+#define ___EXPORT_SYMBOL(sym, sec) \
+ extern typeof(sym) sym; \
+ __CRC_SYMBOL(sym, sec) \
+ static const char __kstrtab_##sym[] \
+ __attribute__((section("__ksymtab_strings"), aligned(1))) \
+ = VMLINUX_SYMBOL_STR(sym); \
+ static const struct kernel_symbol __ksymtab_##sym \
+ __used \
+ __attribute__((section("___ksymtab" sec "+" #sym), used)) \
= { (unsigned long)&sym, __kstrtab_##sym }
#if defined(__KSYM_DEPS__)
@@ -78,7 +78,6 @@ extern struct module __this_module;
#elif defined(CONFIG_TRIM_UNUSED_KSYMS)
-#include <linux/kconfig.h>
#include <generated/autoksyms.h>
#define __EXPORT_SYMBOL(sym, sec) \
diff --git a/include/linux/exportfs.h b/include/linux/exportfs.h
index b03c0625fa6e..5ab958cdc50b 100644
--- a/include/linux/exportfs.h
+++ b/include/linux/exportfs.h
@@ -157,12 +157,13 @@ struct fid {
* @fh_to_dentry is given a &struct super_block (@sb) and a file handle
* fragment (@fh, @fh_len). It should return a &struct dentry which refers
* to the same file that the file handle fragment refers to. If it cannot,
- * it should return a %NULL pointer if the file was found but no acceptable
- * &dentries were available, or an %ERR_PTR error code indicating why it
- * couldn't be found (e.g. %ENOENT or %ENOMEM). Any suitable dentry can be
- * returned including, if necessary, a new dentry created with d_alloc_root.
- * The caller can then find any other extant dentries by following the
- * d_alias links.
+ * it should return a %NULL pointer if the file cannot be found, or an
+ * %ERR_PTR error code of %ENOMEM if a memory allocation failure occurred.
+ * Any other error code is treated like %NULL, and will cause an %ESTALE error
+ * for callers of exportfs_decode_fh().
+ * Any suitable dentry can be returned including, if necessary, a new dentry
+ * created with d_alloc_root. The caller can then find any other extant
+ * dentries by following the d_alias links.
*
* fh_to_parent:
* Same as @fh_to_dentry, except that it returns a pointer to the parent
diff --git a/include/linux/extcon.h b/include/linux/extcon.h
index 61004413dc64..b871c0cb1f02 100644
--- a/include/linux/extcon.h
+++ b/include/linux/extcon.h
@@ -29,6 +29,15 @@
#include <linux/device.h>
/*
+ * Define the type of supported external connectors
+ */
+#define EXTCON_TYPE_USB BIT(0) /* USB connector */
+#define EXTCON_TYPE_CHG BIT(1) /* Charger connector */
+#define EXTCON_TYPE_JACK BIT(2) /* Jack connector */
+#define EXTCON_TYPE_DISP BIT(3) /* Display connector */
+#define EXTCON_TYPE_MISC BIT(4) /* Miscellaneous connector */
+
+/*
* Define the unique id of supported external connectors
*/
#define EXTCON_NONE 0
@@ -44,6 +53,7 @@
#define EXTCON_CHG_USB_ACA 8 /* Accessory Charger Adapter */
#define EXTCON_CHG_USB_FAST 9
#define EXTCON_CHG_USB_SLOW 10
+#define EXTCON_CHG_WPT 11 /* Wireless Power Transfer */
/* Jack external connector */
#define EXTCON_JACK_MICROPHONE 20
@@ -60,6 +70,8 @@
#define EXTCON_DISP_MHL 41 /* Mobile High-Definition Link */
#define EXTCON_DISP_DVI 42 /* Digital Visual Interface */
#define EXTCON_DISP_VGA 43 /* Video Graphics Array */
+#define EXTCON_DISP_DP 44 /* Display Port */
+#define EXTCON_DISP_HMD 45 /* Head-Mounted Display */
/* Miscellaneous external connector */
#define EXTCON_DOCK 60
@@ -68,6 +80,85 @@
#define EXTCON_NUM 63
+/*
+ * Define the property of supported external connectors.
+ *
+ * When adding the new extcon property, they *must* have
+ * the type/value/default information. Also, you *have to*
+ * modify the EXTCON_PROP_[type]_START/END definitions
+ * which mean the range of the supported properties
+ * for each extcon type.
+ *
+ * The naming style of property
+ * : EXTCON_PROP_[type]_[property name]
+ *
+ * EXTCON_PROP_USB_[property name] : USB property
+ * EXTCON_PROP_CHG_[property name] : Charger property
+ * EXTCON_PROP_JACK_[property name] : Jack property
+ * EXTCON_PROP_DISP_[property name] : Display property
+ */
+
+/*
+ * Properties of EXTCON_TYPE_USB.
+ *
+ * - EXTCON_PROP_USB_VBUS
+ * @type: integer (intval)
+ * @value: 0 (low) or 1 (high)
+ * @default: 0 (low)
+ * - EXTCON_PROP_USB_TYPEC_POLARITY
+ * @type: integer (intval)
+ * @value: 0 (normal) or 1 (flip)
+ * @default: 0 (normal)
+ * - EXTCON_PROP_USB_SS (SuperSpeed)
+ * @type: integer (intval)
+ * @value: 0 (USB/USB2) or 1 (USB3)
+ * @default: 0 (USB/USB2)
+ *
+ */
+#define EXTCON_PROP_USB_VBUS 0
+#define EXTCON_PROP_USB_TYPEC_POLARITY 1
+#define EXTCON_PROP_USB_SS 2
+
+#define EXTCON_PROP_USB_MIN 0
+#define EXTCON_PROP_USB_MAX 2
+#define EXTCON_PROP_USB_CNT (EXTCON_PROP_USB_MAX - EXTCON_PROP_USB_MIN + 1)
+
+/* Properties of EXTCON_TYPE_CHG. */
+#define EXTCON_PROP_CHG_MIN 50
+#define EXTCON_PROP_CHG_MAX 50
+#define EXTCON_PROP_CHG_CNT (EXTCON_PROP_CHG_MAX - EXTCON_PROP_CHG_MIN + 1)
+
+/* Properties of EXTCON_TYPE_JACK. */
+#define EXTCON_PROP_JACK_MIN 100
+#define EXTCON_PROP_JACK_MAX 100
+#define EXTCON_PROP_JACK_CNT (EXTCON_PROP_JACK_MAX - EXTCON_PROP_JACK_MIN + 1)
+
+/*
+ * Properties of EXTCON_TYPE_DISP.
+ *
+ * - EXTCON_PROP_DISP_HPD (Hot Plug Detect)
+ * @type: integer (intval)
+ * @value: 0 (no hpd) or 1 (hpd)
+ * @default: 0 (no hpd)
+ *
+ */
+#define EXTCON_PROP_DISP_HPD 150
+
+/* Properties of EXTCON_TYPE_DISP. */
+#define EXTCON_PROP_DISP_MIN 150
+#define EXTCON_PROP_DISP_MAX 151
+#define EXTCON_PROP_DISP_CNT (EXTCON_PROP_DISP_MAX - EXTCON_PROP_DISP_MIN + 1)
+
+/*
+ * Define the type of property's value.
+ *
+ * Define the property's value as union type. Because each property
+ * would need the different data type to store it.
+ */
+union extcon_property_value {
+ int intval; /* type : integer (intval) */
+};
+
struct extcon_cable;
/**
@@ -150,26 +241,42 @@ extern struct extcon_dev *devm_extcon_dev_allocate(struct device *dev,
extern void devm_extcon_dev_free(struct device *dev, struct extcon_dev *edev);
/*
- * get/set/update_state access the 32b encoded state value, which represents
- * states of all possible cables of the multistate port. For example, if one
- * calls extcon_set_state(edev, 0x7), it may mean that all the three cables
- * are attached to the port.
+ * get/set_state access each bit of the 32b encoded state value.
+ * They are used to access the status of each cable based on the cable id.
*/
-static inline u32 extcon_get_state(struct extcon_dev *edev)
-{
- return edev->state;
-}
+extern int extcon_get_state(struct extcon_dev *edev, unsigned int id);
+extern int extcon_set_state(struct extcon_dev *edev, unsigned int id,
+ bool cable_state);
+extern int extcon_set_state_sync(struct extcon_dev *edev, unsigned int id,
+ bool cable_state);
+/*
+ * Synchronize the state and property data for a specific external connector.
+ */
+extern int extcon_sync(struct extcon_dev *edev, unsigned int id);
-extern int extcon_set_state(struct extcon_dev *edev, u32 state);
-extern int extcon_update_state(struct extcon_dev *edev, u32 mask, u32 state);
+/*
+ * get/set_property access the property value of each external connector.
+ * They are used to access the property of each cable based on the property id.
+ */
+extern int extcon_get_property(struct extcon_dev *edev, unsigned int id,
+ unsigned int prop,
+ union extcon_property_value *prop_val);
+extern int extcon_set_property(struct extcon_dev *edev, unsigned int id,
+ unsigned int prop,
+ union extcon_property_value prop_val);
+extern int extcon_set_property_sync(struct extcon_dev *edev, unsigned int id,
+ unsigned int prop,
+ union extcon_property_value prop_val);
/*
- * get/set_cable_state access each bit of the 32b encoded state value.
- * They are used to access the status of each cable based on the cable id.
+ * get/set_property_capability set the capability of the property for each
+ * external connector. They are used to set the capability of the property
+ * of each external connector based on the id and property.
*/
-extern int extcon_get_cable_state_(struct extcon_dev *edev, unsigned int id);
-extern int extcon_set_cable_state_(struct extcon_dev *edev, unsigned int id,
- bool cable_state);
+extern int extcon_get_property_capability(struct extcon_dev *edev,
+ unsigned int id, unsigned int prop);
+extern int extcon_set_property_capability(struct extcon_dev *edev,
+ unsigned int id, unsigned int prop);
/*
* Following APIs are to monitor every action of a notifier.
@@ -232,30 +339,57 @@ static inline struct extcon_dev *devm_extcon_dev_allocate(struct device *dev,
static inline void devm_extcon_dev_free(struct extcon_dev *edev) { }
-static inline u32 extcon_get_state(struct extcon_dev *edev)
+
+static inline int extcon_get_state(struct extcon_dev *edev, unsigned int id)
+{
+ return 0;
+}
+
+static inline int extcon_set_state(struct extcon_dev *edev, unsigned int id,
+ bool cable_state)
+{
+ return 0;
+}
+
+static inline int extcon_set_state_sync(struct extcon_dev *edev, unsigned int id,
+ bool cable_state)
{
return 0;
}
-static inline int extcon_set_state(struct extcon_dev *edev, u32 state)
+static inline int extcon_sync(struct extcon_dev *edev, unsigned int id)
{
return 0;
}
-static inline int extcon_update_state(struct extcon_dev *edev, u32 mask,
- u32 state)
+static inline int extcon_get_property(struct extcon_dev *edev, unsigned int id,
+ unsigned int prop,
+ union extcon_property_value *prop_val)
+{
+ return 0;
+}
+static inline int extcon_set_property(struct extcon_dev *edev, unsigned int id,
+ unsigned int prop,
+ union extcon_property_value prop_val)
{
return 0;
}
-static inline int extcon_get_cable_state_(struct extcon_dev *edev,
- unsigned int id)
+static inline int extcon_set_property_sync(struct extcon_dev *edev,
+ unsigned int id, unsigned int prop,
+ union extcon_property_value prop_val)
{
return 0;
}
-static inline int extcon_set_cable_state_(struct extcon_dev *edev,
- unsigned int id, bool cable_state)
+static inline int extcon_get_property_capability(struct extcon_dev *edev,
+ unsigned int id, unsigned int prop)
+{
+ return 0;
+}
+
+static inline int extcon_set_property_capability(struct extcon_dev *edev,
+ unsigned int id, unsigned int prop)
{
return 0;
}
@@ -320,4 +454,15 @@ static inline int extcon_unregister_interest(struct extcon_specific_cable_nb
{
return -EINVAL;
}
+
+static inline int extcon_get_cable_state_(struct extcon_dev *edev, unsigned int id)
+{
+ return extcon_get_state(edev, id);
+}
+
+static inline int extcon_set_cable_state_(struct extcon_dev *edev, unsigned int id,
+ bool cable_state)
+{
+ return extcon_set_state_sync(edev, id, cable_state);
+}
#endif /* __LINUX_EXTCON_H__ */
diff --git a/include/linux/extcon/extcon-adc-jack.h b/include/linux/extcon/extcon-adc-jack.h
index ac85f2061351..a0e03b13b449 100644
--- a/include/linux/extcon/extcon-adc-jack.h
+++ b/include/linux/extcon/extcon-adc-jack.h
@@ -20,8 +20,8 @@
/**
* struct adc_jack_cond - condition to use an extcon state
- * @state: the corresponding extcon state (if 0, this struct
* denotes the last adc_jack_cond element among the array)
+ * @id: the unique id of each external connector
* @min_adc: min adc value for this condition
* @max_adc: max adc value for this condition
*
@@ -33,7 +33,7 @@
* because when no adc_jack_cond is met, state = 0 is automatically chosen.
*/
struct adc_jack_cond {
- u32 state; /* extcon state value. 0 if invalid */
+ unsigned int id;
u32 min_adc;
u32 max_adc;
};
diff --git a/include/linux/f2fs_fs.h b/include/linux/f2fs_fs.h
index 4c02c6521fef..422630b8e588 100644
--- a/include/linux/f2fs_fs.h
+++ b/include/linux/f2fs_fs.h
@@ -100,6 +100,7 @@ struct f2fs_super_block {
/*
* For checkpoint
*/
+#define CP_CRC_RECOVERY_FLAG 0x00000040
#define CP_FASTBOOT_FLAG 0x00000020
#define CP_FSCK_FLAG 0x00000010
#define CP_ERROR_FLAG 0x00000008
diff --git a/include/linux/falloc.h b/include/linux/falloc.h
index 996111000a8c..7494dc67c66f 100644
--- a/include/linux/falloc.h
+++ b/include/linux/falloc.h
@@ -25,6 +25,7 @@ struct space_resv {
FALLOC_FL_PUNCH_HOLE | \
FALLOC_FL_COLLAPSE_RANGE | \
FALLOC_FL_ZERO_RANGE | \
- FALLOC_FL_INSERT_RANGE)
+ FALLOC_FL_INSERT_RANGE | \
+ FALLOC_FL_UNSHARE_RANGE)
#endif /* _FALLOC_H_ */
diff --git a/include/linux/fdtable.h b/include/linux/fdtable.h
index 5295535b60c6..6e84b2cae6ad 100644
--- a/include/linux/fdtable.h
+++ b/include/linux/fdtable.h
@@ -30,12 +30,12 @@ struct fdtable {
struct rcu_head rcu;
};
-static inline bool close_on_exec(int fd, const struct fdtable *fdt)
+static inline bool close_on_exec(unsigned int fd, const struct fdtable *fdt)
{
return test_bit(fd, fdt->close_on_exec);
}
-static inline bool fd_is_open(int fd, const struct fdtable *fdt)
+static inline bool fd_is_open(unsigned int fd, const struct fdtable *fdt)
{
return test_bit(fd, fdt->open_fds);
}
@@ -57,7 +57,7 @@ struct files_struct {
* written part on a separate cache line in SMP
*/
spinlock_t file_lock ____cacheline_aligned_in_smp;
- int next_fd;
+ unsigned int next_fd;
unsigned long close_on_exec_init[1];
unsigned long open_fds_init[1];
unsigned long full_fds_bits_init[1];
@@ -105,7 +105,7 @@ struct files_struct *get_files_struct(struct task_struct *);
void put_files_struct(struct files_struct *fs);
void reset_files_struct(struct files_struct *);
int unshare_files(struct files_struct **);
-struct files_struct *dup_fd(struct files_struct *, int *);
+struct files_struct *dup_fd(struct files_struct *, int *) __latent_entropy;
void do_close_on_exec(struct files_struct *);
int iterate_fd(struct files_struct *, unsigned,
int (*)(const void *, struct file *, unsigned),
diff --git a/include/linux/fence-array.h b/include/linux/fence-array.h
index 86baaa45567c..a44794e508df 100644
--- a/include/linux/fence-array.h
+++ b/include/linux/fence-array.h
@@ -52,6 +52,16 @@ struct fence_array {
extern const struct fence_ops fence_array_ops;
/**
+ * fence_is_array - check if a fence is from the array subsclass
+ *
+ * Return true if it is a fence_array and false otherwise.
+ */
+static inline bool fence_is_array(struct fence *fence)
+{
+ return fence->ops == &fence_array_ops;
+}
+
+/**
* to_fence_array - cast a fence to a fence_array
* @fence: fence to cast to a fence_array
*
diff --git a/include/linux/fence.h b/include/linux/fence.h
index 2ac6fa5f4712..0d763053f97a 100644
--- a/include/linux/fence.h
+++ b/include/linux/fence.h
@@ -60,7 +60,7 @@ struct fence_cb;
* implementer of the fence for its own purposes. Can be used in different
* ways by different fence implementers, so do not rely on this.
*
- * *) Since atomic bitops are used, this is not guaranteed to be the case.
+ * Since atomic bitops are used, this is not guaranteed to be the case.
* Particularly, if the bit was set, but fence_signal was called right
* before this bit was set, it would have been able to set the
* FENCE_FLAG_SIGNALED_BIT, before enable_signaling was called.
diff --git a/include/linux/filter.h b/include/linux/filter.h
index a16439b99fd9..1f09c521adfe 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -314,6 +314,70 @@ struct bpf_prog_aux;
bpf_size; \
})
+#define BPF_SIZEOF(type) \
+ ({ \
+ const int __size = bytes_to_bpf_size(sizeof(type)); \
+ BUILD_BUG_ON(__size < 0); \
+ __size; \
+ })
+
+#define BPF_FIELD_SIZEOF(type, field) \
+ ({ \
+ const int __size = bytes_to_bpf_size(FIELD_SIZEOF(type, field)); \
+ BUILD_BUG_ON(__size < 0); \
+ __size; \
+ })
+
+#define __BPF_MAP_0(m, v, ...) v
+#define __BPF_MAP_1(m, v, t, a, ...) m(t, a)
+#define __BPF_MAP_2(m, v, t, a, ...) m(t, a), __BPF_MAP_1(m, v, __VA_ARGS__)
+#define __BPF_MAP_3(m, v, t, a, ...) m(t, a), __BPF_MAP_2(m, v, __VA_ARGS__)
+#define __BPF_MAP_4(m, v, t, a, ...) m(t, a), __BPF_MAP_3(m, v, __VA_ARGS__)
+#define __BPF_MAP_5(m, v, t, a, ...) m(t, a), __BPF_MAP_4(m, v, __VA_ARGS__)
+
+#define __BPF_REG_0(...) __BPF_PAD(5)
+#define __BPF_REG_1(...) __BPF_MAP(1, __VA_ARGS__), __BPF_PAD(4)
+#define __BPF_REG_2(...) __BPF_MAP(2, __VA_ARGS__), __BPF_PAD(3)
+#define __BPF_REG_3(...) __BPF_MAP(3, __VA_ARGS__), __BPF_PAD(2)
+#define __BPF_REG_4(...) __BPF_MAP(4, __VA_ARGS__), __BPF_PAD(1)
+#define __BPF_REG_5(...) __BPF_MAP(5, __VA_ARGS__)
+
+#define __BPF_MAP(n, ...) __BPF_MAP_##n(__VA_ARGS__)
+#define __BPF_REG(n, ...) __BPF_REG_##n(__VA_ARGS__)
+
+#define __BPF_CAST(t, a) \
+ (__force t) \
+ (__force \
+ typeof(__builtin_choose_expr(sizeof(t) == sizeof(unsigned long), \
+ (unsigned long)0, (t)0))) a
+#define __BPF_V void
+#define __BPF_N
+
+#define __BPF_DECL_ARGS(t, a) t a
+#define __BPF_DECL_REGS(t, a) u64 a
+
+#define __BPF_PAD(n) \
+ __BPF_MAP(n, __BPF_DECL_ARGS, __BPF_N, u64, __ur_1, u64, __ur_2, \
+ u64, __ur_3, u64, __ur_4, u64, __ur_5)
+
+#define BPF_CALL_x(x, name, ...) \
+ static __always_inline \
+ u64 ____##name(__BPF_MAP(x, __BPF_DECL_ARGS, __BPF_V, __VA_ARGS__)); \
+ u64 name(__BPF_REG(x, __BPF_DECL_REGS, __BPF_N, __VA_ARGS__)); \
+ u64 name(__BPF_REG(x, __BPF_DECL_REGS, __BPF_N, __VA_ARGS__)) \
+ { \
+ return ____##name(__BPF_MAP(x,__BPF_CAST,__BPF_N,__VA_ARGS__));\
+ } \
+ static __always_inline \
+ u64 ____##name(__BPF_MAP(x, __BPF_DECL_ARGS, __BPF_V, __VA_ARGS__))
+
+#define BPF_CALL_0(name, ...) BPF_CALL_x(0, name, __VA_ARGS__)
+#define BPF_CALL_1(name, ...) BPF_CALL_x(1, name, __VA_ARGS__)
+#define BPF_CALL_2(name, ...) BPF_CALL_x(2, name, __VA_ARGS__)
+#define BPF_CALL_3(name, ...) BPF_CALL_x(3, name, __VA_ARGS__)
+#define BPF_CALL_4(name, ...) BPF_CALL_x(4, name, __VA_ARGS__)
+#define BPF_CALL_5(name, ...) BPF_CALL_x(5, name, __VA_ARGS__)
+
#ifdef CONFIG_COMPAT
/* A struct sock_filter is architecture independent. */
struct compat_sock_fprog {
diff --git a/include/linux/firmware/meson/meson_sm.h b/include/linux/firmware/meson/meson_sm.h
new file mode 100644
index 000000000000..8e953c6f394a
--- /dev/null
+++ b/include/linux/firmware/meson/meson_sm.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright (C) 2016 Endless Mobile, Inc.
+ * Author: Carlo Caione <carlo@endlessm.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _MESON_SM_FW_H_
+#define _MESON_SM_FW_H_
+
+enum {
+ SM_EFUSE_READ,
+ SM_EFUSE_WRITE,
+ SM_EFUSE_USER_MAX,
+};
+
+struct meson_sm_firmware;
+
+int meson_sm_call(unsigned int cmd_index, u32 *ret, u32 arg0, u32 arg1,
+ u32 arg2, u32 arg3, u32 arg4);
+int meson_sm_call_write(void *buffer, unsigned int b_size, unsigned int cmd_index,
+ u32 arg0, u32 arg1, u32 arg2, u32 arg3, u32 arg4);
+int meson_sm_call_read(void *buffer, unsigned int cmd_index, u32 arg0, u32 arg1,
+ u32 arg2, u32 arg3, u32 arg4);
+
+#endif /* _MESON_SM_FW_H_ */
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 901e25d495cc..16d2b6e874d6 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -63,7 +63,7 @@ extern void __init files_maxfiles_init(void);
extern struct files_stat_struct files_stat;
extern unsigned long get_max_files(void);
-extern int sysctl_nr_open;
+extern unsigned int sysctl_nr_open;
extern struct inodes_stat_t inodes_stat;
extern int leases_enable, lease_break_time;
extern int sysctl_protected_symlinks;
@@ -224,6 +224,7 @@ typedef int (dio_iodone_t)(struct kiocb *iocb, loff_t offset,
#define ATTR_KILL_PRIV (1 << 14)
#define ATTR_OPEN (1 << 15) /* Truncating from open(O_TRUNC) */
#define ATTR_TIMES_SET (1 << 16)
+#define ATTR_TOUCH (1 << 17)
/*
* Whiteout is represented by a char device. The following constants define the
@@ -439,8 +440,9 @@ struct address_space {
unsigned long nrexceptional;
pgoff_t writeback_index;/* writeback starts here */
const struct address_space_operations *a_ops; /* methods */
- unsigned long flags; /* error bits/gfp mask */
+ unsigned long flags; /* error bits */
spinlock_t private_lock; /* for use by the address_space */
+ gfp_t gfp_mask; /* implicit gfp mask for allocations */
struct list_head private_list; /* ditto */
void *private_data; /* ditto */
} __attribute__((aligned(sizeof(long))));
@@ -591,6 +593,7 @@ is_uncached_acl(struct posix_acl *acl)
#define IOP_FASTPERM 0x0001
#define IOP_LOOKUP 0x0002
#define IOP_NOFOLLOW 0x0004
+#define IOP_XATTR 0x0008
/*
* Keep mostly read-only and often accessed (especially for
@@ -1064,6 +1067,18 @@ struct file_lock_context {
extern void send_sigio(struct fown_struct *fown, int fd, int band);
+/*
+ * Return the inode to use for locking
+ *
+ * For overlayfs this should be the overlay inode, not the real inode returned
+ * by file_inode(). For any other fs file_inode(filp) and locks_inode(filp) are
+ * equal.
+ */
+static inline struct inode *locks_inode(const struct file *f)
+{
+ return f->f_path.dentry->d_inode;
+}
+
#ifdef CONFIG_FILE_LOCKING
extern int fcntl_getlk(struct file *, unsigned int, struct flock __user *);
extern int fcntl_setlk(unsigned int, struct file *, unsigned int,
@@ -1251,7 +1266,7 @@ static inline struct dentry *file_dentry(const struct file *file)
static inline int locks_lock_file_wait(struct file *filp, struct file_lock *fl)
{
- return locks_lock_inode_wait(file_inode(filp), fl);
+ return locks_lock_inode_wait(locks_inode(filp), fl);
}
struct fasync_struct {
@@ -1459,6 +1474,7 @@ static inline void i_gid_write(struct inode *inode, gid_t gid)
}
extern struct timespec current_fs_time(struct super_block *sb);
+extern struct timespec current_time(struct inode *inode);
/*
* Snapshotting support.
@@ -1733,17 +1749,10 @@ struct inode_operations {
int (*rmdir) (struct inode *,struct dentry *);
int (*mknod) (struct inode *,struct dentry *,umode_t,dev_t);
int (*rename) (struct inode *, struct dentry *,
- struct inode *, struct dentry *);
- int (*rename2) (struct inode *, struct dentry *,
struct inode *, struct dentry *, unsigned int);
int (*setattr) (struct dentry *, struct iattr *);
int (*getattr) (struct vfsmount *mnt, struct dentry *, struct kstat *);
- int (*setxattr) (struct dentry *, struct inode *,
- const char *, const void *, size_t, int);
- ssize_t (*getxattr) (struct dentry *, struct inode *,
- const char *, void *, size_t);
ssize_t (*listxattr) (struct dentry *, char *, size_t);
- int (*removexattr) (struct dentry *, const char *);
int (*fiemap)(struct inode *, struct fiemap_extent_info *, u64 start,
u64 len);
int (*update_time)(struct inode *, struct timespec *, int);
@@ -2006,7 +2015,6 @@ enum file_time_flags {
S_VERSION = 8,
};
-extern bool atime_needs_update(const struct path *, struct inode *);
extern void touch_atime(const struct path *);
static inline void file_accessed(struct file *file)
{
@@ -2075,10 +2083,19 @@ struct super_block *sget(struct file_system_type *type,
int (*test)(struct super_block *,void *),
int (*set)(struct super_block *,void *),
int flags, void *data);
-extern struct dentry *mount_pseudo(struct file_system_type *, char *,
- const struct super_operations *ops,
- const struct dentry_operations *dops,
- unsigned long);
+extern struct dentry *mount_pseudo_xattr(struct file_system_type *, char *,
+ const struct super_operations *ops,
+ const struct xattr_handler **xattr,
+ const struct dentry_operations *dops,
+ unsigned long);
+
+static inline struct dentry *
+mount_pseudo(struct file_system_type *fs_type, char *name,
+ const struct super_operations *ops,
+ const struct dentry_operations *dops, unsigned long magic)
+{
+ return mount_pseudo_xattr(fs_type, name, ops, NULL, dops, magic);
+}
/* Alas, no aliases. Too much hassle with bringing module.h everywhere */
#define fops_get(fops) \
@@ -2155,7 +2172,7 @@ static inline int mandatory_lock(struct inode *ino)
static inline int locks_verify_locked(struct file *file)
{
- if (mandatory_lock(file_inode(file)))
+ if (mandatory_lock(locks_inode(file)))
return locks_mandatory_locked(file);
return 0;
}
@@ -2794,8 +2811,6 @@ extern void block_sync_page(struct page *page);
/* fs/splice.c */
extern ssize_t generic_file_splice_read(struct file *, loff_t *,
struct pipe_inode_info *, size_t, unsigned int);
-extern ssize_t default_file_splice_read(struct file *, loff_t *,
- struct pipe_inode_info *, size_t, unsigned int);
extern ssize_t iter_file_splice_write(struct pipe_inode_info *,
struct file *, loff_t *, size_t, unsigned int);
extern ssize_t generic_splice_sendpage(struct pipe_inode_info *pipe,
@@ -2919,6 +2934,7 @@ extern int vfs_stat(const char __user *, struct kstat *);
extern int vfs_lstat(const char __user *, struct kstat *);
extern int vfs_fstat(unsigned int, struct kstat *);
extern int vfs_fstatat(int , const char __user *, struct kstat *, int);
+extern const char *vfs_get_link(struct dentry *, struct delayed_call *);
extern int __generic_block_fiemap(struct inode *inode,
struct fiemap_extent_info *fieinfo,
@@ -2950,7 +2966,8 @@ extern int simple_open(struct inode *inode, struct file *file);
extern int simple_link(struct dentry *, struct inode *, struct dentry *);
extern int simple_unlink(struct inode *, struct dentry *);
extern int simple_rmdir(struct inode *, struct dentry *);
-extern int simple_rename(struct inode *, struct dentry *, struct inode *, struct dentry *);
+extern int simple_rename(struct inode *, struct dentry *,
+ struct inode *, struct dentry *, unsigned int);
extern int noop_fsync(struct file *, loff_t, loff_t, int);
extern int simple_empty(struct dentry *);
extern int simple_readpage(struct file *file, struct page *page);
@@ -2995,7 +3012,7 @@ extern int buffer_migrate_page(struct address_space *,
#define buffer_migrate_page NULL
#endif
-extern int inode_change_ok(const struct inode *, struct iattr *);
+extern int setattr_prepare(struct dentry *, struct iattr *);
extern int inode_newsize_ok(const struct inode *, loff_t offset);
extern void setattr_copy(struct inode *inode, const struct iattr *attr);
diff --git a/include/linux/fscrypto.h b/include/linux/fscrypto.h
index cfa6cde25f8e..ff8b11b26f31 100644
--- a/include/linux/fscrypto.h
+++ b/include/linux/fscrypto.h
@@ -111,23 +111,6 @@ struct fscrypt_completion_result {
struct fscrypt_completion_result ecr = { \
COMPLETION_INITIALIZER((ecr).completion), 0 }
-static inline int fscrypt_key_size(int mode)
-{
- switch (mode) {
- case FS_ENCRYPTION_MODE_AES_256_XTS:
- return FS_AES_256_XTS_KEY_SIZE;
- case FS_ENCRYPTION_MODE_AES_256_GCM:
- return FS_AES_256_GCM_KEY_SIZE;
- case FS_ENCRYPTION_MODE_AES_256_CBC:
- return FS_AES_256_CBC_KEY_SIZE;
- case FS_ENCRYPTION_MODE_AES_256_CTS:
- return FS_AES_256_CTS_KEY_SIZE;
- default:
- BUG();
- }
- return 0;
-}
-
#define FS_FNAME_NUM_SCATTER_ENTRIES 4
#define FS_CRYPTO_BLOCK_SIZE 16
#define FS_FNAME_CRYPTO_DIGEST_SIZE 32
@@ -202,13 +185,6 @@ static inline bool fscrypt_valid_filenames_enc_mode(u32 mode)
return (mode == FS_ENCRYPTION_MODE_AES_256_CTS);
}
-static inline u32 fscrypt_validate_encryption_key_size(u32 mode, u32 size)
-{
- if (size == fscrypt_key_size(mode))
- return size;
- return 0;
-}
-
static inline bool fscrypt_is_dot_dotdot(const struct qstr *str)
{
if (str->len == 1 && str->name[0] == '.')
@@ -274,8 +250,7 @@ extern void fscrypt_restore_control_page(struct page *);
extern int fscrypt_zeroout_range(struct inode *, pgoff_t, sector_t,
unsigned int);
/* policy.c */
-extern int fscrypt_process_policy(struct inode *,
- const struct fscrypt_policy *);
+extern int fscrypt_process_policy(struct file *, const struct fscrypt_policy *);
extern int fscrypt_get_policy(struct inode *, struct fscrypt_policy *);
extern int fscrypt_has_permitted_context(struct inode *, struct inode *);
extern int fscrypt_inherit_context(struct inode *, struct inode *,
@@ -345,7 +320,7 @@ static inline int fscrypt_notsupp_zeroout_range(struct inode *i, pgoff_t p,
}
/* policy.c */
-static inline int fscrypt_notsupp_process_policy(struct inode *i,
+static inline int fscrypt_notsupp_process_policy(struct file *f,
const struct fscrypt_policy *p)
{
return -EOPNOTSUPP;
diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h
index eed9e853a06f..b8bcc058e031 100644
--- a/include/linux/fsnotify.h
+++ b/include/linux/fsnotify.h
@@ -29,7 +29,11 @@ static inline int fsnotify_parent(struct path *path, struct dentry *dentry, __u3
static inline int fsnotify_perm(struct file *file, int mask)
{
struct path *path = &file->f_path;
- struct inode *inode = file_inode(file);
+ /*
+ * Do not use file_inode() here or anywhere in this file to get the
+ * inode. That would break *notity on overlayfs.
+ */
+ struct inode *inode = path->dentry->d_inode;
__u32 fsnotify_mask = 0;
int ret;
@@ -173,7 +177,7 @@ static inline void fsnotify_mkdir(struct inode *inode, struct dentry *dentry)
static inline void fsnotify_access(struct file *file)
{
struct path *path = &file->f_path;
- struct inode *inode = file_inode(file);
+ struct inode *inode = path->dentry->d_inode;
__u32 mask = FS_ACCESS;
if (S_ISDIR(inode->i_mode))
@@ -191,7 +195,7 @@ static inline void fsnotify_access(struct file *file)
static inline void fsnotify_modify(struct file *file)
{
struct path *path = &file->f_path;
- struct inode *inode = file_inode(file);
+ struct inode *inode = path->dentry->d_inode;
__u32 mask = FS_MODIFY;
if (S_ISDIR(inode->i_mode))
@@ -209,7 +213,7 @@ static inline void fsnotify_modify(struct file *file)
static inline void fsnotify_open(struct file *file)
{
struct path *path = &file->f_path;
- struct inode *inode = file_inode(file);
+ struct inode *inode = path->dentry->d_inode;
__u32 mask = FS_OPEN;
if (S_ISDIR(inode->i_mode))
@@ -225,7 +229,7 @@ static inline void fsnotify_open(struct file *file)
static inline void fsnotify_close(struct file *file)
{
struct path *path = &file->f_path;
- struct inode *inode = file_inode(file);
+ struct inode *inode = path->dentry->d_inode;
fmode_t mode = file->f_mode;
__u32 mask = (mode & FMODE_WRITE) ? FS_CLOSE_WRITE : FS_CLOSE_NOWRITE;
diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h
index 58205f33af02..79467b239fcf 100644
--- a/include/linux/fsnotify_backend.h
+++ b/include/linux/fsnotify_backend.h
@@ -135,7 +135,7 @@ struct fsnotify_group {
const struct fsnotify_ops *ops; /* how this group handles things */
/* needed to send notification to userspace */
- struct mutex notification_mutex; /* protect the notification_list */
+ spinlock_t notification_lock; /* protect the notification_list */
struct list_head notification_list; /* list of event_holder this group needs to send to userspace */
wait_queue_head_t notification_waitq; /* read() on the notification file blocks on this waitq */
unsigned int q_len; /* events on the queue */
@@ -148,6 +148,7 @@ struct fsnotify_group {
#define FS_PRIO_1 1 /* fanotify content based access control */
#define FS_PRIO_2 2 /* fanotify pre-content access */
unsigned int priority;
+ bool shutdown; /* group is being shut down, don't queue more events */
/* stores all fastpath marks assoc with this group so they can be cleaned on unregister */
struct mutex mark_mutex; /* protect marks_list */
@@ -176,10 +177,8 @@ struct fsnotify_group {
struct fanotify_group_private_data {
#ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
/* allows a group to block waiting for a userspace response */
- spinlock_t access_lock;
struct list_head access_list;
wait_queue_head_t access_waitq;
- atomic_t bypass_perm;
#endif /* CONFIG_FANOTIFY_ACCESS_PERMISSIONS */
int f_flags;
unsigned int max_marks;
@@ -292,6 +291,8 @@ extern struct fsnotify_group *fsnotify_alloc_group(const struct fsnotify_ops *op
extern void fsnotify_get_group(struct fsnotify_group *group);
/* drop reference on a group from fsnotify_alloc_group */
extern void fsnotify_put_group(struct fsnotify_group *group);
+/* group destruction begins, stop queuing new events */
+extern void fsnotify_group_stop_queueing(struct fsnotify_group *group);
/* destroy group */
extern void fsnotify_destroy_group(struct fsnotify_group *group);
/* fasync handler function */
@@ -304,8 +305,6 @@ extern int fsnotify_add_event(struct fsnotify_group *group,
struct fsnotify_event *event,
int (*merge)(struct list_head *,
struct fsnotify_event *));
-/* Remove passed event from groups notification queue */
-extern void fsnotify_remove_event(struct fsnotify_group *group, struct fsnotify_event *event);
/* true if the group notification queue is empty */
extern bool fsnotify_notify_queue_is_empty(struct fsnotify_group *group);
/* return, but do not dequeue the first event on the notification queue */
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index 7d565afe35d2..b3d34d3e0e7e 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -794,8 +794,15 @@ struct ftrace_ret_stack {
unsigned long ret;
unsigned long func;
unsigned long long calltime;
+#ifdef CONFIG_FUNCTION_PROFILER
unsigned long long subtime;
+#endif
+#ifdef HAVE_FUNCTION_GRAPH_FP_TEST
unsigned long fp;
+#endif
+#ifdef HAVE_FUNCTION_GRAPH_RET_ADDR_PTR
+ unsigned long *retp;
+#endif
};
/*
@@ -807,7 +814,10 @@ extern void return_to_handler(void);
extern int
ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth,
- unsigned long frame_pointer);
+ unsigned long frame_pointer, unsigned long *retp);
+
+unsigned long ftrace_graph_ret_addr(struct task_struct *task, int *idx,
+ unsigned long ret, unsigned long *retp);
/*
* Sometimes we don't want to trace a function with the function
@@ -870,6 +880,13 @@ static inline int task_curr_ret_stack(struct task_struct *tsk)
return -1;
}
+static inline unsigned long
+ftrace_graph_ret_addr(struct task_struct *task, int *idx, unsigned long ret,
+ unsigned long *retp)
+{
+ return ret;
+}
+
static inline void pause_graph_tracing(void) { }
static inline void unpause_graph_tracing(void) { }
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
diff --git a/include/linux/ftrace_irq.h b/include/linux/ftrace_irq.h
index dca7bf8cffe2..4ec2c9b205f2 100644
--- a/include/linux/ftrace_irq.h
+++ b/include/linux/ftrace_irq.h
@@ -3,11 +3,34 @@
#ifdef CONFIG_FTRACE_NMI_ENTER
-extern void ftrace_nmi_enter(void);
-extern void ftrace_nmi_exit(void);
+extern void arch_ftrace_nmi_enter(void);
+extern void arch_ftrace_nmi_exit(void);
#else
-static inline void ftrace_nmi_enter(void) { }
-static inline void ftrace_nmi_exit(void) { }
+static inline void arch_ftrace_nmi_enter(void) { }
+static inline void arch_ftrace_nmi_exit(void) { }
#endif
+#ifdef CONFIG_HWLAT_TRACER
+extern bool trace_hwlat_callback_enabled;
+extern void trace_hwlat_callback(bool enter);
+#endif
+
+static inline void ftrace_nmi_enter(void)
+{
+#ifdef CONFIG_HWLAT_TRACER
+ if (trace_hwlat_callback_enabled)
+ trace_hwlat_callback(true);
+#endif
+ arch_ftrace_nmi_enter();
+}
+
+static inline void ftrace_nmi_exit(void)
+{
+ arch_ftrace_nmi_exit();
+#ifdef CONFIG_HWLAT_TRACER
+ if (trace_hwlat_callback_enabled)
+ trace_hwlat_callback(false);
+#endif
+}
+
#endif /* _LINUX_FTRACE_IRQ_H */
diff --git a/include/linux/genhd.h b/include/linux/genhd.h
index 1dbf52f9c24b..e0341af6950e 100644
--- a/include/linux/genhd.h
+++ b/include/linux/genhd.h
@@ -437,7 +437,7 @@ extern void disk_flush_events(struct gendisk *disk, unsigned int mask);
extern unsigned int disk_clear_events(struct gendisk *disk, unsigned int mask);
/* drivers/char/random.c */
-extern void add_disk_randomness(struct gendisk *disk);
+extern void add_disk_randomness(struct gendisk *disk) __latent_entropy;
extern void rand_initialize_disk(struct gendisk *disk);
static inline sector_t get_start_sect(struct block_device *bdev)
diff --git a/include/linux/gpio/driver.h b/include/linux/gpio/driver.h
index 50882e09289b..24e2cc56beb1 100644
--- a/include/linux/gpio/driver.h
+++ b/include/linux/gpio/driver.h
@@ -3,19 +3,18 @@
#include <linux/device.h>
#include <linux/types.h>
-#include <linux/module.h>
#include <linux/irq.h>
#include <linux/irqchip/chained_irq.h>
#include <linux/irqdomain.h>
#include <linux/lockdep.h>
#include <linux/pinctrl/pinctrl.h>
-#include <linux/kconfig.h>
struct gpio_desc;
struct of_phandle_args;
struct device_node;
struct seq_file;
struct gpio_device;
+struct module;
#ifdef CONFIG_GPIOLIB
@@ -112,6 +111,10 @@ enum single_ended_mode {
* initialization, provided by GPIO driver
* @irq_parent: GPIO IRQ chip parent/bank linux irq number,
* provided by GPIO driver
+ * @irq_need_valid_mask: If set core allocates @irq_valid_mask with all
+ * bits set to one
+ * @irq_valid_mask: If not %NULL holds bitmask of GPIOs which are valid to
+ * be included in IRQ domain of the chip
* @lock_key: per GPIO IRQ chip lockdep class
*
* A gpio_chip can help platforms abstract various sources of GPIOs so
@@ -190,6 +193,8 @@ struct gpio_chip {
irq_flow_handler_t irq_handler;
unsigned int irq_default_type;
int irq_parent;
+ bool irq_need_valid_mask;
+ unsigned long *irq_valid_mask;
struct lock_class_key *lock_key;
#endif
diff --git a/include/linux/hid-sensor-hub.h b/include/linux/hid-sensor-hub.h
index c02b5ce6c5cd..dd85f3503410 100644
--- a/include/linux/hid-sensor-hub.h
+++ b/include/linux/hid-sensor-hub.h
@@ -236,6 +236,7 @@ struct hid_sensor_common {
struct hid_sensor_hub_attribute_info report_state;
struct hid_sensor_hub_attribute_info power_state;
struct hid_sensor_hub_attribute_info sensitivity;
+ struct work_struct work;
};
/* Convert from hid unit expo to regular exponent */
diff --git a/include/linux/hid.h b/include/linux/hid.h
index 75b66eccc692..b2ec82712baa 100644
--- a/include/linux/hid.h
+++ b/include/linux/hid.h
@@ -837,7 +837,7 @@ __u32 hid_field_extract(const struct hid_device *hid, __u8 *report,
*/
static inline void hid_device_io_start(struct hid_device *hid) {
if (hid->io_started) {
- dev_warn(&hid->dev, "io already started");
+ dev_warn(&hid->dev, "io already started\n");
return;
}
hid->io_started = true;
@@ -857,7 +857,7 @@ static inline void hid_device_io_start(struct hid_device *hid) {
*/
static inline void hid_device_io_stop(struct hid_device *hid) {
if (!hid->io_started) {
- dev_warn(&hid->dev, "io already stopped");
+ dev_warn(&hid->dev, "io already stopped\n");
return;
}
hid->io_started = false;
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
index 6f14de45b5ce..9b9f65d99873 100644
--- a/include/linux/huge_mm.h
+++ b/include/linux/huge_mm.h
@@ -87,6 +87,10 @@ extern bool is_vma_temporary_stack(struct vm_area_struct *vma);
extern unsigned long transparent_hugepage_flags;
+extern unsigned long thp_get_unmapped_area(struct file *filp,
+ unsigned long addr, unsigned long len, unsigned long pgoff,
+ unsigned long flags);
+
extern void prep_transhuge_page(struct page *page);
extern void free_transhuge_page(struct page *page);
@@ -152,8 +156,8 @@ static inline bool is_huge_zero_pmd(pmd_t pmd)
return is_huge_zero_page(pmd_page(pmd));
}
-struct page *get_huge_zero_page(void);
-void put_huge_zero_page(void);
+struct page *mm_get_huge_zero_page(struct mm_struct *mm);
+void mm_put_huge_zero_page(struct mm_struct *mm);
#define mk_huge_pmd(page, prot) pmd_mkhuge(mk_pmd(page, prot))
@@ -169,6 +173,9 @@ void put_huge_zero_page(void);
static inline void prep_transhuge_page(struct page *page) {}
#define transparent_hugepage_flags 0UL
+
+#define thp_get_unmapped_area NULL
+
static inline int
split_huge_page_to_list(struct page *page, struct list_head *list)
{
@@ -213,9 +220,9 @@ static inline bool is_huge_zero_page(struct page *page)
return false;
}
-static inline void put_huge_zero_page(void)
+static inline void mm_put_huge_zero_page(struct mm_struct *mm)
{
- BUILD_BUG();
+ return;
}
static inline struct page *follow_devmap_pmd(struct vm_area_struct *vma,
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index c26d4638f665..48c76d612d40 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -90,7 +90,7 @@ int dequeue_hwpoisoned_huge_page(struct page *page);
bool isolate_huge_page(struct page *page, struct list_head *list);
void putback_active_hugepage(struct page *page);
void free_huge_page(struct page *page);
-void hugetlb_fix_reserve_counts(struct inode *inode, bool restore_reserve);
+void hugetlb_fix_reserve_counts(struct inode *inode);
extern struct mutex *hugetlb_fault_mutex_table;
u32 hugetlb_fault_mutex_hash(struct hstate *h, struct mm_struct *mm,
struct vm_area_struct *vma,
@@ -450,8 +450,8 @@ static inline pgoff_t basepage_index(struct page *page)
return __basepage_index(page);
}
-extern void dissolve_free_huge_pages(unsigned long start_pfn,
- unsigned long end_pfn);
+extern int dissolve_free_huge_pages(unsigned long start_pfn,
+ unsigned long end_pfn);
static inline bool hugepage_migration_supported(struct hstate *h)
{
#ifdef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION
@@ -518,7 +518,7 @@ static inline pgoff_t basepage_index(struct page *page)
{
return page->index;
}
-#define dissolve_free_huge_pages(s, e) do {} while (0)
+#define dissolve_free_huge_pages(s, e) 0
#define hugepage_migration_supported(h) false
static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
diff --git a/include/linux/hw_random.h b/include/linux/hw_random.h
index 4f7d8f4b1e9a..34a0dc18f327 100644
--- a/include/linux/hw_random.h
+++ b/include/linux/hw_random.h
@@ -29,7 +29,9 @@
* Returns the number of lower random bytes in "data".
* Must not be NULL. *OBSOLETE*
* @read: New API. drivers can fill up to max bytes of data
- * into the buffer. The buffer is aligned for any type.
+ * into the buffer. The buffer is aligned for any type
+ * and max is guaranteed to be >= to that alignment
+ * (either 4 or 8 depending on architecture).
* @priv: Private data, for use by the RNG driver.
* @quality: Estimation of true entropy in RNG's bitstream
* (per mill).
diff --git a/include/linux/hwmon.h b/include/linux/hwmon.h
index 09354f6c1d63..9d2f8bde7d12 100644
--- a/include/linux/hwmon.h
+++ b/include/linux/hwmon.h
@@ -14,9 +14,341 @@
#ifndef _HWMON_H_
#define _HWMON_H_
+#include <linux/bitops.h>
+
struct device;
struct attribute_group;
+enum hwmon_sensor_types {
+ hwmon_chip,
+ hwmon_temp,
+ hwmon_in,
+ hwmon_curr,
+ hwmon_power,
+ hwmon_energy,
+ hwmon_humidity,
+ hwmon_fan,
+ hwmon_pwm,
+};
+
+enum hwmon_chip_attributes {
+ hwmon_chip_temp_reset_history,
+ hwmon_chip_in_reset_history,
+ hwmon_chip_curr_reset_history,
+ hwmon_chip_power_reset_history,
+ hwmon_chip_register_tz,
+ hwmon_chip_update_interval,
+ hwmon_chip_alarms,
+};
+
+#define HWMON_C_TEMP_RESET_HISTORY BIT(hwmon_chip_temp_reset_history)
+#define HWMON_C_IN_RESET_HISTORY BIT(hwmon_chip_in_reset_history)
+#define HWMON_C_CURR_RESET_HISTORY BIT(hwmon_chip_curr_reset_history)
+#define HWMON_C_POWER_RESET_HISTORY BIT(hwmon_chip_power_reset_history)
+#define HWMON_C_REGISTER_TZ BIT(hwmon_chip_register_tz)
+#define HWMON_C_UPDATE_INTERVAL BIT(hwmon_chip_update_interval)
+#define HWMON_C_ALARMS BIT(hwmon_chip_alarms)
+
+enum hwmon_temp_attributes {
+ hwmon_temp_input = 0,
+ hwmon_temp_type,
+ hwmon_temp_lcrit,
+ hwmon_temp_lcrit_hyst,
+ hwmon_temp_min,
+ hwmon_temp_min_hyst,
+ hwmon_temp_max,
+ hwmon_temp_max_hyst,
+ hwmon_temp_crit,
+ hwmon_temp_crit_hyst,
+ hwmon_temp_emergency,
+ hwmon_temp_emergency_hyst,
+ hwmon_temp_alarm,
+ hwmon_temp_lcrit_alarm,
+ hwmon_temp_min_alarm,
+ hwmon_temp_max_alarm,
+ hwmon_temp_crit_alarm,
+ hwmon_temp_emergency_alarm,
+ hwmon_temp_fault,
+ hwmon_temp_offset,
+ hwmon_temp_label,
+ hwmon_temp_lowest,
+ hwmon_temp_highest,
+ hwmon_temp_reset_history,
+};
+
+#define HWMON_T_INPUT BIT(hwmon_temp_input)
+#define HWMON_T_TYPE BIT(hwmon_temp_type)
+#define HWMON_T_LCRIT BIT(hwmon_temp_lcrit)
+#define HWMON_T_LCRIT_HYST BIT(hwmon_temp_lcrit_hyst)
+#define HWMON_T_MIN BIT(hwmon_temp_min)
+#define HWMON_T_MIN_HYST BIT(hwmon_temp_min_hyst)
+#define HWMON_T_MAX BIT(hwmon_temp_max)
+#define HWMON_T_MAX_HYST BIT(hwmon_temp_max_hyst)
+#define HWMON_T_CRIT BIT(hwmon_temp_crit)
+#define HWMON_T_CRIT_HYST BIT(hwmon_temp_crit_hyst)
+#define HWMON_T_EMERGENCY BIT(hwmon_temp_emergency)
+#define HWMON_T_EMERGENCY_HYST BIT(hwmon_temp_emergency_hyst)
+#define HWMON_T_MIN_ALARM BIT(hwmon_temp_min_alarm)
+#define HWMON_T_MAX_ALARM BIT(hwmon_temp_max_alarm)
+#define HWMON_T_CRIT_ALARM BIT(hwmon_temp_crit_alarm)
+#define HWMON_T_EMERGENCY_ALARM BIT(hwmon_temp_emergency_alarm)
+#define HWMON_T_FAULT BIT(hwmon_temp_fault)
+#define HWMON_T_OFFSET BIT(hwmon_temp_offset)
+#define HWMON_T_LABEL BIT(hwmon_temp_label)
+#define HWMON_T_LOWEST BIT(hwmon_temp_lowest)
+#define HWMON_T_HIGHEST BIT(hwmon_temp_highest)
+#define HWMON_T_RESET_HISTORY BIT(hwmon_temp_reset_history)
+
+enum hwmon_in_attributes {
+ hwmon_in_input,
+ hwmon_in_min,
+ hwmon_in_max,
+ hwmon_in_lcrit,
+ hwmon_in_crit,
+ hwmon_in_average,
+ hwmon_in_lowest,
+ hwmon_in_highest,
+ hwmon_in_reset_history,
+ hwmon_in_label,
+ hwmon_in_alarm,
+ hwmon_in_min_alarm,
+ hwmon_in_max_alarm,
+ hwmon_in_lcrit_alarm,
+ hwmon_in_crit_alarm,
+};
+
+#define HWMON_I_INPUT BIT(hwmon_in_input)
+#define HWMON_I_MIN BIT(hwmon_in_min)
+#define HWMON_I_MAX BIT(hwmon_in_max)
+#define HWMON_I_LCRIT BIT(hwmon_in_lcrit)
+#define HWMON_I_CRIT BIT(hwmon_in_crit)
+#define HWMON_I_AVERAGE BIT(hwmon_in_average)
+#define HWMON_I_LOWEST BIT(hwmon_in_lowest)
+#define HWMON_I_HIGHEST BIT(hwmon_in_highest)
+#define HWMON_I_RESET_HISTORY BIT(hwmon_in_reset_history)
+#define HWMON_I_LABEL BIT(hwmon_in_label)
+#define HWMON_I_ALARM BIT(hwmon_in_alarm)
+#define HWMON_I_MIN_ALARM BIT(hwmon_in_min_alarm)
+#define HWMON_I_MAX_ALARM BIT(hwmon_in_max_alarm)
+#define HWMON_I_LCRIT_ALARM BIT(hwmon_in_lcrit_alarm)
+#define HWMON_I_CRIT_ALARM BIT(hwmon_in_crit_alarm)
+
+enum hwmon_curr_attributes {
+ hwmon_curr_input,
+ hwmon_curr_min,
+ hwmon_curr_max,
+ hwmon_curr_lcrit,
+ hwmon_curr_crit,
+ hwmon_curr_average,
+ hwmon_curr_lowest,
+ hwmon_curr_highest,
+ hwmon_curr_reset_history,
+ hwmon_curr_label,
+ hwmon_curr_alarm,
+ hwmon_curr_min_alarm,
+ hwmon_curr_max_alarm,
+ hwmon_curr_lcrit_alarm,
+ hwmon_curr_crit_alarm,
+};
+
+#define HWMON_C_INPUT BIT(hwmon_curr_input)
+#define HWMON_C_MIN BIT(hwmon_curr_min)
+#define HWMON_C_MAX BIT(hwmon_curr_max)
+#define HWMON_C_LCRIT BIT(hwmon_curr_lcrit)
+#define HWMON_C_CRIT BIT(hwmon_curr_crit)
+#define HWMON_C_AVERAGE BIT(hwmon_curr_average)
+#define HWMON_C_LOWEST BIT(hwmon_curr_lowest)
+#define HWMON_C_HIGHEST BIT(hwmon_curr_highest)
+#define HWMON_C_RESET_HISTORY BIT(hwmon_curr_reset_history)
+#define HWMON_C_LABEL BIT(hwmon_curr_label)
+#define HWMON_C_ALARM BIT(hwmon_curr_alarm)
+#define HWMON_C_MIN_ALARM BIT(hwmon_curr_min_alarm)
+#define HWMON_C_MAX_ALARM BIT(hwmon_curr_max_alarm)
+#define HWMON_C_LCRIT_ALARM BIT(hwmon_curr_lcrit_alarm)
+#define HWMON_C_CRIT_ALARM BIT(hwmon_curr_crit_alarm)
+
+enum hwmon_power_attributes {
+ hwmon_power_average,
+ hwmon_power_average_interval,
+ hwmon_power_average_interval_max,
+ hwmon_power_average_interval_min,
+ hwmon_power_average_highest,
+ hwmon_power_average_lowest,
+ hwmon_power_average_max,
+ hwmon_power_average_min,
+ hwmon_power_input,
+ hwmon_power_input_highest,
+ hwmon_power_input_lowest,
+ hwmon_power_reset_history,
+ hwmon_power_accuracy,
+ hwmon_power_cap,
+ hwmon_power_cap_hyst,
+ hwmon_power_cap_max,
+ hwmon_power_cap_min,
+ hwmon_power_max,
+ hwmon_power_crit,
+ hwmon_power_label,
+ hwmon_power_alarm,
+ hwmon_power_cap_alarm,
+ hwmon_power_max_alarm,
+ hwmon_power_crit_alarm,
+};
+
+#define HWMON_P_AVERAGE BIT(hwmon_power_average)
+#define HWMON_P_AVERAGE_INTERVAL BIT(hwmon_power_average_interval)
+#define HWMON_P_AVERAGE_INTERVAL_MAX BIT(hwmon_power_average_interval_max)
+#define HWMON_P_AVERAGE_INTERVAL_MIN BIT(hwmon_power_average_interval_min)
+#define HWMON_P_AVERAGE_HIGHEST BIT(hwmon_power_average_highest)
+#define HWMON_P_AVERAGE_LOWEST BIT(hwmon_power_average_lowest)
+#define HWMON_P_AVERAGE_MAX BIT(hwmon_power_average_max)
+#define HWMON_P_AVERAGE_MIN BIT(hwmon_power_average_min)
+#define HWMON_P_INPUT BIT(hwmon_power_input)
+#define HWMON_P_INPUT_HIGHEST BIT(hwmon_power_input_highest)
+#define HWMON_P_INPUT_LOWEST BIT(hwmon_power_input_lowest)
+#define HWMON_P_RESET_HISTORY BIT(hwmon_power_reset_history)
+#define HWMON_P_ACCURACY BIT(hwmon_power_accuracy)
+#define HWMON_P_CAP BIT(hwmon_power_cap)
+#define HWMON_P_CAP_HYST BIT(hwmon_power_cap_hyst)
+#define HWMON_P_CAP_MAX BIT(hwmon_power_cap_max)
+#define HWMON_P_CAP_MIN BIT(hwmon_power_cap_min)
+#define HWMON_P_MAX BIT(hwmon_power_max)
+#define HWMON_P_CRIT BIT(hwmon_power_crit)
+#define HWMON_P_LABEL BIT(hwmon_power_label)
+#define HWMON_P_ALARM BIT(hwmon_power_alarm)
+#define HWMON_P_CAP_ALARM BIT(hwmon_power_cap_alarm)
+#define HWMON_P_MAX_ALARM BIT(hwmon_power_max_alarm)
+#define HWMON_P_CRIT_ALARM BIT(hwmon_power_crit_alarm)
+
+enum hwmon_energy_attributes {
+ hwmon_energy_input,
+ hwmon_energy_label,
+};
+
+#define HWMON_E_INPUT BIT(hwmon_energy_input)
+#define HWMON_E_LABEL BIT(hwmon_energy_label)
+
+enum hwmon_humidity_attributes {
+ hwmon_humidity_input,
+ hwmon_humidity_label,
+ hwmon_humidity_min,
+ hwmon_humidity_min_hyst,
+ hwmon_humidity_max,
+ hwmon_humidity_max_hyst,
+ hwmon_humidity_alarm,
+ hwmon_humidity_fault,
+};
+
+#define HWMON_H_INPUT BIT(hwmon_humidity_input)
+#define HWMON_H_LABEL BIT(hwmon_humidity_label)
+#define HWMON_H_MIN BIT(hwmon_humidity_min)
+#define HWMON_H_MIN_HYST BIT(hwmon_humidity_min_hyst)
+#define HWMON_H_MAX BIT(hwmon_humidity_max)
+#define HWMON_H_MAX_HYST BIT(hwmon_humidity_max_hyst)
+#define HWMON_H_ALARM BIT(hwmon_humidity_alarm)
+#define HWMON_H_FAULT BIT(hwmon_humidity_fault)
+
+enum hwmon_fan_attributes {
+ hwmon_fan_input,
+ hwmon_fan_label,
+ hwmon_fan_min,
+ hwmon_fan_max,
+ hwmon_fan_div,
+ hwmon_fan_pulses,
+ hwmon_fan_target,
+ hwmon_fan_alarm,
+ hwmon_fan_min_alarm,
+ hwmon_fan_max_alarm,
+ hwmon_fan_fault,
+};
+
+#define HWMON_F_INPUT BIT(hwmon_fan_input)
+#define HWMON_F_LABEL BIT(hwmon_fan_label)
+#define HWMON_F_MIN BIT(hwmon_fan_min)
+#define HWMON_F_MAX BIT(hwmon_fan_max)
+#define HWMON_F_DIV BIT(hwmon_fan_div)
+#define HWMON_F_PULSES BIT(hwmon_fan_pulses)
+#define HWMON_F_TARGET BIT(hwmon_fan_target)
+#define HWMON_F_ALARM BIT(hwmon_fan_alarm)
+#define HWMON_F_MIN_ALARM BIT(hwmon_fan_min_alarm)
+#define HWMON_F_MAX_ALARM BIT(hwmon_fan_max_alarm)
+#define HWMON_F_FAULT BIT(hwmon_fan_fault)
+
+enum hwmon_pwm_attributes {
+ hwmon_pwm_input,
+ hwmon_pwm_enable,
+ hwmon_pwm_mode,
+ hwmon_pwm_freq,
+};
+
+#define HWMON_PWM_INPUT BIT(hwmon_pwm_input)
+#define HWMON_PWM_ENABLE BIT(hwmon_pwm_enable)
+#define HWMON_PWM_MODE BIT(hwmon_pwm_mode)
+#define HWMON_PWM_FREQ BIT(hwmon_pwm_freq)
+
+/**
+ * struct hwmon_ops - hwmon device operations
+ * @is_visible: Callback to return attribute visibility. Mandatory.
+ * Parameters are:
+ * @const void *drvdata:
+ * Pointer to driver-private data structure passed
+ * as argument to hwmon_device_register_with_info().
+ * @type: Sensor type
+ * @attr: Sensor attribute
+ * @channel:
+ * Channel number
+ * The function returns the file permissions.
+ * If the return value is 0, no attribute will be created.
+ * @read: Read callback. Optional. If not provided, attributes
+ * will not be readable.
+ * Parameters are:
+ * @dev: Pointer to hardware monitoring device
+ * @type: Sensor type
+ * @attr: Sensor attribute
+ * @channel:
+ * Channel number
+ * @val: Pointer to returned value
+ * The function returns 0 on success or a negative error number.
+ * @write: Write callback. Optional. If not provided, attributes
+ * will not be writable.
+ * Parameters are:
+ * @dev: Pointer to hardware monitoring device
+ * @type: Sensor type
+ * @attr: Sensor attribute
+ * @channel:
+ * Channel number
+ * @val: Value to write
+ * The function returns 0 on success or a negative error number.
+ */
+struct hwmon_ops {
+ umode_t (*is_visible)(const void *drvdata, enum hwmon_sensor_types type,
+ u32 attr, int channel);
+ int (*read)(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long *val);
+ int (*write)(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long val);
+};
+
+/**
+ * Channel information
+ * @type: Channel type.
+ * @config: Pointer to NULL-terminated list of channel parameters.
+ * Use for per-channel attributes.
+ */
+struct hwmon_channel_info {
+ enum hwmon_sensor_types type;
+ const u32 *config;
+};
+
+/**
+ * Chip configuration
+ * @ops: Pointer to hwmon operations.
+ * @info: Null-terminated list of channel information.
+ */
+struct hwmon_chip_info {
+ const struct hwmon_ops *ops;
+ const struct hwmon_channel_info **info;
+};
+
struct device *hwmon_device_register(struct device *dev);
struct device *
hwmon_device_register_with_groups(struct device *dev, const char *name,
@@ -26,6 +358,16 @@ struct device *
devm_hwmon_device_register_with_groups(struct device *dev, const char *name,
void *drvdata,
const struct attribute_group **groups);
+struct device *
+hwmon_device_register_with_info(struct device *dev,
+ const char *name, void *drvdata,
+ const struct hwmon_chip_info *info,
+ const struct attribute_group **groups);
+struct device *
+devm_hwmon_device_register_with_info(struct device *dev,
+ const char *name, void *drvdata,
+ const struct hwmon_chip_info *info,
+ const struct attribute_group **groups);
void hwmon_device_unregister(struct device *dev);
void devm_hwmon_device_unregister(struct device *dev);
diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h
index b10954a66939..6824556d37ed 100644
--- a/include/linux/hyperv.h
+++ b/include/linux/hyperv.h
@@ -674,6 +674,11 @@ enum hv_signal_policy {
HV_SIGNAL_POLICY_EXPLICIT,
};
+enum hv_numa_policy {
+ HV_BALANCED = 0,
+ HV_LOCALIZED,
+};
+
enum vmbus_device_type {
HV_IDE = 0,
HV_SCSI,
@@ -701,9 +706,6 @@ struct vmbus_device {
};
struct vmbus_channel {
- /* Unique channel id */
- int id;
-
struct list_head listentry;
struct hv_device *device_obj;
@@ -850,6 +852,43 @@ struct vmbus_channel {
* ring lock to preserve the current behavior.
*/
bool acquire_ring_lock;
+ /*
+ * For performance critical channels (storage, networking
+ * etc,), Hyper-V has a mechanism to enhance the throughput
+ * at the expense of latency:
+ * When the host is to be signaled, we just set a bit in a shared page
+ * and this bit will be inspected by the hypervisor within a certain
+ * window and if the bit is set, the host will be signaled. The window
+ * of time is the monitor latency - currently around 100 usecs. This
+ * mechanism improves throughput by:
+ *
+ * A) Making the host more efficient - each time it wakes up,
+ * potentially it will process morev number of packets. The
+ * monitor latency allows a batch to build up.
+ * B) By deferring the hypercall to signal, we will also minimize
+ * the interrupts.
+ *
+ * Clearly, these optimizations improve throughput at the expense of
+ * latency. Furthermore, since the channel is shared for both
+ * control and data messages, control messages currently suffer
+ * unnecessary latency adversley impacting performance and boot
+ * time. To fix this issue, permit tagging the channel as being
+ * in "low latency" mode. In this mode, we will bypass the monitor
+ * mechanism.
+ */
+ bool low_latency;
+
+ /*
+ * NUMA distribution policy:
+ * We support teo policies:
+ * 1) Balanced: Here all performance critical channels are
+ * distributed evenly amongst all the NUMA nodes.
+ * This policy will be the default policy.
+ * 2) Localized: All channels of a given instance of a
+ * performance critical service will be assigned CPUs
+ * within a selected NUMA node.
+ */
+ enum hv_numa_policy affinity_policy;
};
@@ -870,6 +909,12 @@ static inline void set_channel_signal_state(struct vmbus_channel *c,
c->signal_policy = policy;
}
+static inline void set_channel_affinity_state(struct vmbus_channel *c,
+ enum hv_numa_policy policy)
+{
+ c->affinity_policy = policy;
+}
+
static inline void set_channel_read_state(struct vmbus_channel *c, bool state)
{
c->batched_reading = state;
@@ -891,6 +936,16 @@ static inline void set_channel_pending_send_size(struct vmbus_channel *c,
c->outbound.ring_buffer->pending_send_sz = size;
}
+static inline void set_low_latency_mode(struct vmbus_channel *c)
+{
+ c->low_latency = true;
+}
+
+static inline void clear_low_latency_mode(struct vmbus_channel *c)
+{
+ c->low_latency = false;
+}
+
void vmbus_onmessage(void *context);
int vmbus_request_offers(void);
@@ -1114,6 +1169,13 @@ int __must_check __vmbus_driver_register(struct hv_driver *hv_driver,
const char *mod_name);
void vmbus_driver_unregister(struct hv_driver *hv_driver);
+static inline const char *vmbus_dev_name(const struct hv_device *device_obj)
+{
+ const struct kobject *kobj = &device_obj->device.kobj;
+
+ return kobj->name;
+}
+
void vmbus_hvsock_device_unregister(struct vmbus_channel *channel);
int vmbus_allocate_mmio(struct resource **new, struct hv_device *device_obj,
@@ -1257,6 +1319,27 @@ u64 hv_do_hypercall(u64 control, void *input, void *output);
0x80, 0x2e, 0x27, 0xed, 0xe1, 0x9f)
/*
+ * Linux doesn't support the 3 devices: the first two are for
+ * Automatic Virtual Machine Activation, and the third is for
+ * Remote Desktop Virtualization.
+ * {f8e65716-3cb3-4a06-9a60-1889c5cccab5}
+ * {3375baf4-9e15-4b30-b765-67acb10d607b}
+ * {276aacf4-ac15-426c-98dd-7521ad3f01fe}
+ */
+
+#define HV_AVMA1_GUID \
+ .guid = UUID_LE(0xf8e65716, 0x3cb3, 0x4a06, 0x9a, 0x60, \
+ 0x18, 0x89, 0xc5, 0xcc, 0xca, 0xb5)
+
+#define HV_AVMA2_GUID \
+ .guid = UUID_LE(0x3375baf4, 0x9e15, 0x4b30, 0xb7, 0x65, \
+ 0x67, 0xac, 0xb1, 0x0d, 0x60, 0x7b)
+
+#define HV_RDV_GUID \
+ .guid = UUID_LE(0x276aacf4, 0xac15, 0x426c, 0x98, 0xdd, \
+ 0x75, 0x21, 0xad, 0x3f, 0x01, 0xfe)
+
+/*
* Common header for Hyper-V ICs
*/
@@ -1344,6 +1427,15 @@ struct ictimesync_data {
u8 flags;
} __packed;
+struct ictimesync_ref_data {
+ u64 parenttime;
+ u64 vmreferencetime;
+ u8 flags;
+ char leapflags;
+ char stratum;
+ u8 reserved[3];
+} __packed;
+
struct hyperv_service_callback {
u8 msg_type;
char *log_msg;
@@ -1357,6 +1449,9 @@ extern bool vmbus_prep_negotiate_resp(struct icmsg_hdr *,
struct icmsg_negotiate *, u8 *, int,
int);
+void hv_event_tasklet_disable(struct vmbus_channel *channel);
+void hv_event_tasklet_enable(struct vmbus_channel *channel);
+
void hv_process_channel_removal(struct vmbus_channel *channel, u32 relid);
/*
diff --git a/include/linux/hypervisor.h b/include/linux/hypervisor.h
new file mode 100644
index 000000000000..3fa5ef2b3759
--- /dev/null
+++ b/include/linux/hypervisor.h
@@ -0,0 +1,17 @@
+#ifndef __LINUX_HYPEVISOR_H
+#define __LINUX_HYPEVISOR_H
+
+/*
+ * Generic Hypervisor support
+ * Juergen Gross <jgross@suse.com>
+ */
+
+#ifdef CONFIG_HYPERVISOR_GUEST
+#include <asm/hypervisor.h>
+#else
+static inline void hypervisor_pin_vcpu(int cpu)
+{
+}
+#endif
+
+#endif /* __LINUX_HYPEVISOR_H */
diff --git a/include/linux/i2c-mux.h b/include/linux/i2c-mux.h
index d4c1d12f900d..bd74d5706f3b 100644
--- a/include/linux/i2c-mux.h
+++ b/include/linux/i2c-mux.h
@@ -32,7 +32,9 @@
struct i2c_mux_core {
struct i2c_adapter *parent;
struct device *dev;
- bool mux_locked;
+ unsigned int mux_locked:1;
+ unsigned int arbitrator:1;
+ unsigned int gate:1;
void *priv;
@@ -51,7 +53,9 @@ struct i2c_mux_core *i2c_mux_alloc(struct i2c_adapter *parent,
int (*deselect)(struct i2c_mux_core *, u32));
/* flags for i2c_mux_alloc */
-#define I2C_MUX_LOCKED BIT(0)
+#define I2C_MUX_LOCKED BIT(0)
+#define I2C_MUX_ARBITRATOR BIT(1)
+#define I2C_MUX_GATE BIT(2)
static inline void *i2c_mux_priv(struct i2c_mux_core *muxc)
{
diff --git a/include/linux/i2c.h b/include/linux/i2c.h
index fffdc270ca18..6422eef428c4 100644
--- a/include/linux/i2c.h
+++ b/include/linux/i2c.h
@@ -427,6 +427,20 @@ struct i2c_algorithm {
};
/**
+ * struct i2c_lock_operations - represent I2C locking operations
+ * @lock_bus: Get exclusive access to an I2C bus segment
+ * @trylock_bus: Try to get exclusive access to an I2C bus segment
+ * @unlock_bus: Release exclusive access to an I2C bus segment
+ *
+ * The main operations are wrapped by i2c_lock_bus and i2c_unlock_bus.
+ */
+struct i2c_lock_operations {
+ void (*lock_bus)(struct i2c_adapter *, unsigned int flags);
+ int (*trylock_bus)(struct i2c_adapter *, unsigned int flags);
+ void (*unlock_bus)(struct i2c_adapter *, unsigned int flags);
+};
+
+/**
* struct i2c_timings - I2C timing information
* @bus_freq_hz: the bus frequency in Hz
* @scl_rise_ns: time SCL signal takes to rise in ns; t(r) in the I2C specification
@@ -536,6 +550,7 @@ struct i2c_adapter {
void *algo_data;
/* data fields that are valid for all devices */
+ const struct i2c_lock_operations *lock_ops;
struct rt_mutex bus_lock;
struct rt_mutex mux_lock;
@@ -552,10 +567,6 @@ struct i2c_adapter {
struct i2c_bus_recovery_info *bus_recovery_info;
const struct i2c_adapter_quirks *quirks;
-
- void (*lock_bus)(struct i2c_adapter *, unsigned int flags);
- int (*trylock_bus)(struct i2c_adapter *, unsigned int flags);
- void (*unlock_bus)(struct i2c_adapter *, unsigned int flags);
};
#define to_i2c_adapter(d) container_of(d, struct i2c_adapter, dev)
@@ -597,7 +608,21 @@ int i2c_for_each_dev(void *data, int (*fn)(struct device *, void *));
static inline void
i2c_lock_bus(struct i2c_adapter *adapter, unsigned int flags)
{
- adapter->lock_bus(adapter, flags);
+ adapter->lock_ops->lock_bus(adapter, flags);
+}
+
+/**
+ * i2c_trylock_bus - Try to get exclusive access to an I2C bus segment
+ * @adapter: Target I2C bus segment
+ * @flags: I2C_LOCK_ROOT_ADAPTER tries to locks the root i2c adapter,
+ * I2C_LOCK_SEGMENT tries to lock only this branch in the adapter tree
+ *
+ * Return: true if the I2C bus segment is locked, false otherwise
+ */
+static inline int
+i2c_trylock_bus(struct i2c_adapter *adapter, unsigned int flags)
+{
+ return adapter->lock_ops->trylock_bus(adapter, flags);
}
/**
@@ -609,7 +634,7 @@ i2c_lock_bus(struct i2c_adapter *adapter, unsigned int flags)
static inline void
i2c_unlock_bus(struct i2c_adapter *adapter, unsigned int flags)
{
- adapter->unlock_bus(adapter, flags);
+ adapter->lock_ops->unlock_bus(adapter, flags);
}
static inline void
@@ -673,6 +698,7 @@ extern void i2c_clients_command(struct i2c_adapter *adap,
extern struct i2c_adapter *i2c_get_adapter(int nr);
extern void i2c_put_adapter(struct i2c_adapter *adap);
+extern unsigned int i2c_adapter_depth(struct i2c_adapter *adapter);
void i2c_parse_fw_timings(struct device *dev, struct i2c_timings *t, bool use_defaults);
@@ -766,4 +792,13 @@ static inline struct i2c_adapter *of_get_i2c_adapter_by_node(struct device_node
}
#endif /* CONFIG_OF */
+#if IS_ENABLED(CONFIG_ACPI)
+u32 i2c_acpi_find_bus_speed(struct device *dev);
+#else
+static inline u32 i2c_acpi_find_bus_speed(struct device *dev)
+{
+ return 0;
+}
+#endif /* CONFIG_ACPI */
+
#endif /* _LINUX_I2C_H */
diff --git a/include/linux/if_bridge.h b/include/linux/if_bridge.h
index dcb89e3515db..c6587c01d951 100644
--- a/include/linux/if_bridge.h
+++ b/include/linux/if_bridge.h
@@ -45,6 +45,7 @@ struct br_ip_list {
#define BR_PROXYARP BIT(8)
#define BR_LEARNING_SYNC BIT(9)
#define BR_PROXYARP_WIFI BIT(10)
+#define BR_MCAST_FLOOD BIT(11)
#define BR_DEFAULT_AGEING_TIME (300 * HZ)
diff --git a/include/linux/if_link.h b/include/linux/if_link.h
index f923d15b432c..0b17c585b5cd 100644
--- a/include/linux/if_link.h
+++ b/include/linux/if_link.h
@@ -25,5 +25,6 @@ struct ifla_vf_info {
__u32 max_tx_rate;
__u32 rss_query_en;
__u32 trusted;
+ __be16 vlan_proto;
};
#endif /* _LINUX_IF_LINK_H */
diff --git a/include/linux/if_team.h b/include/linux/if_team.h
index 174f43f43aff..c05216a8fbac 100644
--- a/include/linux/if_team.h
+++ b/include/linux/if_team.h
@@ -245,7 +245,7 @@ static inline struct team_port *team_get_port_by_index(struct team *team,
return NULL;
}
-static inline int team_num_to_port_index(struct team *team, int num)
+static inline int team_num_to_port_index(struct team *team, unsigned int num)
{
int en_port_count = ACCESS_ONCE(team->en_port_count);
diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h
index a5f6ce6b578c..3319d97d789d 100644
--- a/include/linux/if_vlan.h
+++ b/include/linux/if_vlan.h
@@ -81,6 +81,7 @@ static inline bool is_vlan_dev(const struct net_device *dev)
#define skb_vlan_tag_present(__skb) ((__skb)->vlan_tci & VLAN_TAG_PRESENT)
#define skb_vlan_tag_get(__skb) ((__skb)->vlan_tci & ~VLAN_TAG_PRESENT)
#define skb_vlan_tag_get_id(__skb) ((__skb)->vlan_tci & VLAN_VID_MASK)
+#define skb_vlan_tag_get_prio(__skb) ((__skb)->vlan_tci & VLAN_PRIO_MASK)
/**
* struct vlan_pcpu_stats - VLAN percpu rx/tx stats
@@ -271,6 +272,23 @@ static inline int vlan_get_encap_level(struct net_device *dev)
}
#endif
+/**
+ * eth_type_vlan - check for valid vlan ether type.
+ * @ethertype: ether type to check
+ *
+ * Returns true if the ether type is a vlan ether type.
+ */
+static inline bool eth_type_vlan(__be16 ethertype)
+{
+ switch (ethertype) {
+ case htons(ETH_P_8021Q):
+ case htons(ETH_P_8021AD):
+ return true;
+ default:
+ return false;
+ }
+}
+
static inline bool vlan_hw_offload_capable(netdev_features_t features,
__be16 proto)
{
@@ -424,8 +442,7 @@ static inline int __vlan_get_tag(const struct sk_buff *skb, u16 *vlan_tci)
{
struct vlan_ethhdr *veth = (struct vlan_ethhdr *)skb->data;
- if (veth->h_vlan_proto != htons(ETH_P_8021Q) &&
- veth->h_vlan_proto != htons(ETH_P_8021AD))
+ if (!eth_type_vlan(veth->h_vlan_proto))
return -EINVAL;
*vlan_tci = ntohs(veth->h_vlan_TCI);
@@ -487,7 +504,7 @@ static inline __be16 __vlan_get_protocol(struct sk_buff *skb, __be16 type,
* present at mac_len - VLAN_HLEN (if mac_len > 0), or at
* ETH_HLEN otherwise
*/
- if (type == htons(ETH_P_8021Q) || type == htons(ETH_P_8021AD)) {
+ if (eth_type_vlan(type)) {
if (vlan_depth) {
if (WARN_ON(vlan_depth < VLAN_HLEN))
return 0;
@@ -505,8 +522,7 @@ static inline __be16 __vlan_get_protocol(struct sk_buff *skb, __be16 type,
vh = (struct vlan_hdr *)(skb->data + vlan_depth);
type = vh->h_vlan_encapsulated_proto;
vlan_depth += VLAN_HLEN;
- } while (type == htons(ETH_P_8021Q) ||
- type == htons(ETH_P_8021AD));
+ } while (eth_type_vlan(type));
}
if (depth)
@@ -571,8 +587,7 @@ static inline void vlan_set_encap_proto(struct sk_buff *skb,
static inline bool skb_vlan_tagged(const struct sk_buff *skb)
{
if (!skb_vlan_tag_present(skb) &&
- likely(skb->protocol != htons(ETH_P_8021Q) &&
- skb->protocol != htons(ETH_P_8021AD)))
+ likely(!eth_type_vlan(skb->protocol)))
return false;
return true;
@@ -592,15 +607,14 @@ static inline bool skb_vlan_tagged_multi(const struct sk_buff *skb)
if (!skb_vlan_tag_present(skb)) {
struct vlan_ethhdr *veh;
- if (likely(protocol != htons(ETH_P_8021Q) &&
- protocol != htons(ETH_P_8021AD)))
+ if (likely(!eth_type_vlan(protocol)))
return false;
veh = (struct vlan_ethhdr *)skb->data;
protocol = veh->h_vlan_encapsulated_proto;
}
- if (protocol != htons(ETH_P_8021Q) && protocol != htons(ETH_P_8021AD))
+ if (!eth_type_vlan(protocol))
return false;
return true;
diff --git a/include/linux/iio/consumer.h b/include/linux/iio/consumer.h
index 3d672f72e7ec..9edccfba1ffb 100644
--- a/include/linux/iio/consumer.h
+++ b/include/linux/iio/consumer.h
@@ -165,6 +165,18 @@ struct iio_channel
*iio_channel_cb_get_channels(const struct iio_cb_buffer *cb_buffer);
/**
+ * iio_channel_cb_get_iio_dev() - get access to the underlying device.
+ * @cb_buffer: The callback buffer from whom we want the device
+ * information.
+ *
+ * This function allows one to obtain information about the device.
+ * The primary aim is to allow drivers that are consuming a device to query
+ * things like current trigger.
+ */
+struct iio_dev
+*iio_channel_cb_get_iio_dev(const struct iio_cb_buffer *cb_buffer);
+
+/**
* iio_read_channel_raw() - read from a given channel
* @chan: The channel being queried.
* @val: Value read back.
diff --git a/include/linux/iio/iio.h b/include/linux/iio/iio.h
index 854e2dad1e0d..b4a0679e4a49 100644
--- a/include/linux/iio/iio.h
+++ b/include/linux/iio/iio.h
@@ -483,6 +483,7 @@ struct iio_buffer_setup_ops {
* @scan_timestamp: [INTERN] set if any buffers have requested timestamp
* @scan_index_timestamp:[INTERN] cache of the index to the timestamp
* @trig: [INTERN] current device trigger (buffer modes)
+ * @trig_readonly [INTERN] mark the current trigger immutable
* @pollfunc: [DRIVER] function run on trigger being received
* @pollfunc_event: [DRIVER] function run on events trigger being received
* @channels: [DRIVER] channel specification structure table
@@ -523,6 +524,7 @@ struct iio_dev {
bool scan_timestamp;
unsigned scan_index_timestamp;
struct iio_trigger *trig;
+ bool trig_readonly;
struct iio_poll_func *pollfunc;
struct iio_poll_func *pollfunc_event;
@@ -642,6 +644,7 @@ static inline struct iio_dev *iio_priv_to_dev(void *priv)
}
void iio_device_free(struct iio_dev *indio_dev);
+int devm_iio_device_match(struct device *dev, void *res, void *data);
struct iio_dev *devm_iio_device_alloc(struct device *dev, int sizeof_priv);
void devm_iio_device_free(struct device *dev, struct iio_dev *indio_dev);
struct iio_trigger *devm_iio_trigger_alloc(struct device *dev,
diff --git a/include/linux/iio/trigger.h b/include/linux/iio/trigger.h
index 1c9e028e0d4a..4f1154f7a33c 100644
--- a/include/linux/iio/trigger.h
+++ b/include/linux/iio/trigger.h
@@ -56,6 +56,9 @@ struct iio_trigger_ops {
* @subirqs: [INTERN] information about the 'child' irqs.
* @pool: [INTERN] bitmap of irqs currently in use.
* @pool_lock: [INTERN] protection of the irq pool.
+ * @attached_own_device:[INTERN] if we are using our own device as trigger,
+ * i.e. if we registered a poll function to the same
+ * device as the one providing the trigger.
**/
struct iio_trigger {
const struct iio_trigger_ops *ops;
@@ -73,6 +76,7 @@ struct iio_trigger {
struct iio_subirq subirqs[CONFIG_IIO_CONSUMERS_PER_TRIGGER];
unsigned long pool[BITS_TO_LONGS(CONFIG_IIO_CONSUMERS_PER_TRIGGER)];
struct mutex pool_lock;
+ bool attached_own_device;
};
@@ -125,12 +129,27 @@ static inline void *iio_trigger_get_drvdata(struct iio_trigger *trig)
**/
int iio_trigger_register(struct iio_trigger *trig_info);
+int devm_iio_trigger_register(struct device *dev,
+ struct iio_trigger *trig_info);
+
/**
* iio_trigger_unregister() - unregister a trigger from the core
* @trig_info: trigger to be unregistered
**/
void iio_trigger_unregister(struct iio_trigger *trig_info);
+void devm_iio_trigger_unregister(struct device *dev,
+ struct iio_trigger *trig_info);
+
+/**
+ * iio_trigger_set_immutable() - set an immutable trigger on destination
+ *
+ * @indio_dev - IIO device structure containing the device
+ * @trig - trigger to assign to device
+ *
+ **/
+int iio_trigger_set_immutable(struct iio_dev *indio_dev, struct iio_trigger *trig);
+
/**
* iio_trigger_poll() - called on a trigger occurring
* @trig: trigger which occurred
@@ -145,6 +164,13 @@ irqreturn_t iio_trigger_generic_data_rdy_poll(int irq, void *private);
__printf(1, 2) struct iio_trigger *iio_trigger_alloc(const char *fmt, ...);
void iio_trigger_free(struct iio_trigger *trig);
+/**
+ * iio_trigger_using_own() - tells us if we use our own HW trigger ourselves
+ * @indio_dev: device to check
+ */
+bool iio_trigger_using_own(struct iio_dev *indio_dev);
+
+
#else
struct iio_trigger;
struct iio_trigger_ops;
diff --git a/include/linux/iio/triggered_buffer.h b/include/linux/iio/triggered_buffer.h
index f72f70d5a97b..30145616773d 100644
--- a/include/linux/iio/triggered_buffer.h
+++ b/include/linux/iio/triggered_buffer.h
@@ -12,4 +12,12 @@ int iio_triggered_buffer_setup(struct iio_dev *indio_dev,
const struct iio_buffer_setup_ops *setup_ops);
void iio_triggered_buffer_cleanup(struct iio_dev *indio_dev);
+int devm_iio_triggered_buffer_setup(struct device *dev,
+ struct iio_dev *indio_dev,
+ irqreturn_t (*h)(int irq, void *p),
+ irqreturn_t (*thread)(int irq, void *p),
+ const struct iio_buffer_setup_ops *ops);
+void devm_iio_triggered_buffer_cleanup(struct device *dev,
+ struct iio_dev *indio_dev);
+
#endif
diff --git a/include/linux/inet_diag.h b/include/linux/inet_diag.h
index feb04ea20f11..65da430e260f 100644
--- a/include/linux/inet_diag.h
+++ b/include/linux/inet_diag.h
@@ -37,7 +37,7 @@ int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk,
struct sk_buff *skb, const struct inet_diag_req_v2 *req,
struct user_namespace *user_ns,
u32 pid, u32 seq, u16 nlmsg_flags,
- const struct nlmsghdr *unlh);
+ const struct nlmsghdr *unlh, bool net_admin);
void inet_diag_dump_icsk(struct inet_hashinfo *h, struct sk_buff *skb,
struct netlink_callback *cb,
const struct inet_diag_req_v2 *r,
@@ -56,7 +56,7 @@ void inet_diag_msg_common_fill(struct inet_diag_msg *r, struct sock *sk);
int inet_diag_msg_attrs_fill(struct sock *sk, struct sk_buff *skb,
struct inet_diag_msg *r, int ext,
- struct user_namespace *user_ns);
+ struct user_namespace *user_ns, bool net_admin);
extern int inet_diag_register(const struct inet_diag_handler *handler);
extern void inet_diag_unregister(const struct inet_diag_handler *handler);
diff --git a/include/linux/init.h b/include/linux/init.h
index 6935d02474aa..e30104ceb86d 100644
--- a/include/linux/init.h
+++ b/include/linux/init.h
@@ -39,24 +39,13 @@
/* These are for everybody (although not all archs will actually
discard it in modules) */
-#define __init __section(.init.text) __cold notrace
+#define __init __section(.init.text) __cold notrace __latent_entropy
#define __initdata __section(.init.data)
-#define __initconst __constsection(.init.rodata)
+#define __initconst __section(.init.rodata)
#define __exitdata __section(.exit.data)
#define __exit_call __used __section(.exitcall.exit)
/*
- * Some architecture have tool chains which do not handle rodata attributes
- * correctly. For those disable special sections for const, so that other
- * architectures can annotate correctly.
- */
-#ifdef CONFIG_BROKEN_RODATA
-#define __constsection(x)
-#else
-#define __constsection(x) __section(x)
-#endif
-
-/*
* modpost check for section mismatches during the kernel build.
* A section mismatch happens when there are references from a
* code or data section to an init section (both code or data).
@@ -75,7 +64,7 @@
*/
#define __ref __section(.ref.text) noinline
#define __refdata __section(.ref.data)
-#define __refconst __constsection(.ref.rodata)
+#define __refconst __section(.ref.rodata)
#ifdef MODULE
#define __exitused
@@ -86,12 +75,13 @@
#define __exit __section(.exit.text) __exitused __cold notrace
/* Used for MEMORY_HOTPLUG */
-#define __meminit __section(.meminit.text) __cold notrace
+#define __meminit __section(.meminit.text) __cold notrace \
+ __latent_entropy
#define __meminitdata __section(.meminit.data)
-#define __meminitconst __constsection(.meminit.rodata)
+#define __meminitconst __section(.meminit.rodata)
#define __memexit __section(.memexit.text) __exitused __cold notrace
#define __memexitdata __section(.memexit.data)
-#define __memexitconst __constsection(.memexit.rodata)
+#define __memexitconst __section(.memexit.rodata)
/* For assembly routines */
#define __HEAD .section ".head.text","ax"
@@ -150,24 +140,8 @@ extern bool initcall_debug;
#ifndef __ASSEMBLY__
-#ifdef CONFIG_LTO
-/* Work around a LTO gcc problem: when there is no reference to a variable
- * in a module it will be moved to the end of the program. This causes
- * reordering of initcalls which the kernel does not like.
- * Add a dummy reference function to avoid this. The function is
- * deleted by the linker.
- */
-#define LTO_REFERENCE_INITCALL(x) \
- ; /* yes this is needed */ \
- static __used __exit void *reference_##x(void) \
- { \
- return &x; \
- }
-#else
-#define LTO_REFERENCE_INITCALL(x)
-#endif
-
-/* initcalls are now grouped by functionality into separate
+/*
+ * initcalls are now grouped by functionality into separate
* subsections. Ordering inside the subsections is determined
* by link order.
* For backwards compatibility, initcall() puts the call in
@@ -175,12 +149,16 @@ extern bool initcall_debug;
*
* The `id' arg to __define_initcall() is needed so that multiple initcalls
* can point at the same handler without causing duplicate-symbol build errors.
+ *
+ * Initcalls are run by placing pointers in initcall sections that the
+ * kernel iterates at runtime. The linker can do dead code / data elimination
+ * and remove that completely, so the initcall sections have to be marked
+ * as KEEP() in the linker script.
*/
#define __define_initcall(fn, id) \
static initcall_t __initcall_##fn##id __used \
- __attribute__((__section__(".initcall" #id ".init"))) = fn; \
- LTO_REFERENCE_INITCALL(__initcall_##fn##id)
+ __attribute__((__section__(".initcall" #id ".init"))) = fn;
/*
* Early initcalls run before initializing SMP.
@@ -216,15 +194,15 @@ extern bool initcall_debug;
#define __initcall(fn) device_initcall(fn)
-#define __exitcall(fn) \
+#define __exitcall(fn) \
static exitcall_t __exitcall_##fn __exit_call = fn
-#define console_initcall(fn) \
- static initcall_t __initcall_##fn \
+#define console_initcall(fn) \
+ static initcall_t __initcall_##fn \
__used __section(.con_initcall.init) = fn
-#define security_initcall(fn) \
- static initcall_t __initcall_##fn \
+#define security_initcall(fn) \
+ static initcall_t __initcall_##fn \
__used __section(.security_initcall.init) = fn
struct obs_kernel_param {
diff --git a/include/linux/init_task.h b/include/linux/init_task.h
index f8834f820ec2..325f649d77ff 100644
--- a/include/linux/init_task.h
+++ b/include/linux/init_task.h
@@ -15,6 +15,8 @@
#include <net/net_namespace.h>
#include <linux/sched/rt.h>
+#include <asm/thread_info.h>
+
#ifdef CONFIG_SMP
# define INIT_PUSHABLE_TASKS(tsk) \
.pushable_tasks = PLIST_NODE_INIT(tsk.pushable_tasks, MAX_PRIO),
@@ -183,12 +185,21 @@ extern struct task_group root_task_group;
# define INIT_KASAN(tsk)
#endif
+#ifdef CONFIG_THREAD_INFO_IN_TASK
+# define INIT_TASK_TI(tsk) \
+ .thread_info = INIT_THREAD_INFO(tsk), \
+ .stack_refcount = ATOMIC_INIT(1),
+#else
+# define INIT_TASK_TI(tsk)
+#endif
+
/*
* INIT_TASK is used to set up the first task table, touch at
* your own risk!. Base=0, limit=0x1fffff (=2MB)
*/
#define INIT_TASK(tsk) \
{ \
+ INIT_TASK_TI(tsk) \
.state = 0, \
.stack = init_stack, \
.usage = ATOMIC_INIT(2), \
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index b6683f0ffc9f..72f0721f75e7 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -278,7 +278,8 @@ extern int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m);
extern int
irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify);
-struct cpumask *irq_create_affinity_mask(unsigned int *nr_vecs);
+struct cpumask *irq_create_affinity_masks(const struct cpumask *affinity, int nvec);
+int irq_calc_affinity_vectors(const struct cpumask *affinity, int maxvec);
#else /* CONFIG_SMP */
@@ -311,11 +312,18 @@ irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify)
return 0;
}
-static inline struct cpumask *irq_create_affinity_mask(unsigned int *nr_vecs)
+static inline struct cpumask *
+irq_create_affinity_masks(const struct cpumask *affinity, int nvec)
{
- *nr_vecs = 1;
return NULL;
}
+
+static inline int
+irq_calc_affinity_vectors(const struct cpumask *affinity, int maxvec)
+{
+ return maxvec;
+}
+
#endif /* CONFIG_SMP */
/*
diff --git a/include/linux/io-mapping.h b/include/linux/io-mapping.h
index 645ad06b5d52..58df02bd93c9 100644
--- a/include/linux/io-mapping.h
+++ b/include/linux/io-mapping.h
@@ -31,16 +31,16 @@
* See Documentation/io-mapping.txt
*/
-#ifdef CONFIG_HAVE_ATOMIC_IOMAP
-
-#include <asm/iomap.h>
-
struct io_mapping {
resource_size_t base;
unsigned long size;
pgprot_t prot;
+ void __iomem *iomem;
};
+#ifdef CONFIG_HAVE_ATOMIC_IOMAP
+
+#include <asm/iomap.h>
/*
* For small address space machines, mapping large objects
* into the kernel virtual space isn't practical. Where
@@ -49,34 +49,25 @@ struct io_mapping {
*/
static inline struct io_mapping *
-io_mapping_create_wc(resource_size_t base, unsigned long size)
+io_mapping_init_wc(struct io_mapping *iomap,
+ resource_size_t base,
+ unsigned long size)
{
- struct io_mapping *iomap;
pgprot_t prot;
- iomap = kmalloc(sizeof(*iomap), GFP_KERNEL);
- if (!iomap)
- goto out_err;
-
if (iomap_create_wc(base, size, &prot))
- goto out_free;
+ return NULL;
iomap->base = base;
iomap->size = size;
iomap->prot = prot;
return iomap;
-
-out_free:
- kfree(iomap);
-out_err:
- return NULL;
}
static inline void
-io_mapping_free(struct io_mapping *mapping)
+io_mapping_fini(struct io_mapping *mapping)
{
iomap_free(mapping->base, mapping->size);
- kfree(mapping);
}
/* Atomic map/unmap */
@@ -121,21 +112,46 @@ io_mapping_unmap(void __iomem *vaddr)
#else
#include <linux/uaccess.h>
-
-/* this struct isn't actually defined anywhere */
-struct io_mapping;
+#include <asm/pgtable.h>
/* Create the io_mapping object*/
static inline struct io_mapping *
-io_mapping_create_wc(resource_size_t base, unsigned long size)
+io_mapping_init_wc(struct io_mapping *iomap,
+ resource_size_t base,
+ unsigned long size)
{
- return (struct io_mapping __force *) ioremap_wc(base, size);
+ iomap->base = base;
+ iomap->size = size;
+ iomap->iomem = ioremap_wc(base, size);
+#if defined(pgprot_noncached_wc) /* archs can't agree on a name ... */
+ iomap->prot = pgprot_noncached_wc(PAGE_KERNEL);
+#elif defined(pgprot_writecombine)
+ iomap->prot = pgprot_writecombine(PAGE_KERNEL);
+#else
+ iomap->prot = pgprot_noncached(PAGE_KERNEL);
+#endif
+
+ return iomap;
}
static inline void
-io_mapping_free(struct io_mapping *mapping)
+io_mapping_fini(struct io_mapping *mapping)
+{
+ iounmap(mapping->iomem);
+}
+
+/* Non-atomic map/unmap */
+static inline void __iomem *
+io_mapping_map_wc(struct io_mapping *mapping,
+ unsigned long offset,
+ unsigned long size)
+{
+ return mapping->iomem + offset;
+}
+
+static inline void
+io_mapping_unmap(void __iomem *vaddr)
{
- iounmap((void __force __iomem *) mapping);
}
/* Atomic map/unmap */
@@ -145,30 +161,42 @@ io_mapping_map_atomic_wc(struct io_mapping *mapping,
{
preempt_disable();
pagefault_disable();
- return ((char __force __iomem *) mapping) + offset;
+ return io_mapping_map_wc(mapping, offset, PAGE_SIZE);
}
static inline void
io_mapping_unmap_atomic(void __iomem *vaddr)
{
+ io_mapping_unmap(vaddr);
pagefault_enable();
preempt_enable();
}
-/* Non-atomic map/unmap */
-static inline void __iomem *
-io_mapping_map_wc(struct io_mapping *mapping,
- unsigned long offset,
- unsigned long size)
+#endif /* HAVE_ATOMIC_IOMAP */
+
+static inline struct io_mapping *
+io_mapping_create_wc(resource_size_t base,
+ unsigned long size)
{
- return ((char __force __iomem *) mapping) + offset;
+ struct io_mapping *iomap;
+
+ iomap = kmalloc(sizeof(*iomap), GFP_KERNEL);
+ if (!iomap)
+ return NULL;
+
+ if (!io_mapping_init_wc(iomap, base, size)) {
+ kfree(iomap);
+ return NULL;
+ }
+
+ return iomap;
}
static inline void
-io_mapping_unmap(void __iomem *vaddr)
+io_mapping_free(struct io_mapping *iomap)
{
+ io_mapping_fini(iomap);
+ kfree(iomap);
}
-#endif /* HAVE_ATOMIC_IOMAP */
-
#endif /* _LINUX_IO_MAPPING_H */
diff --git a/include/linux/iomap.h b/include/linux/iomap.h
index 3d70ece10313..e63e288dee83 100644
--- a/include/linux/iomap.h
+++ b/include/linux/iomap.h
@@ -22,6 +22,8 @@ struct vm_fault;
* Flags for iomap mappings:
*/
#define IOMAP_F_MERGED 0x01 /* contains multiple blocks/extents */
+#define IOMAP_F_SHARED 0x02 /* block shared with another file */
+#define IOMAP_F_NEW 0x04 /* blocks have been newly allocated */
/*
* Magic value for blkno:
@@ -64,6 +66,8 @@ struct iomap_ops {
ssize_t iomap_file_buffered_write(struct kiocb *iocb, struct iov_iter *from,
struct iomap_ops *ops);
+int iomap_file_dirty(struct inode *inode, loff_t pos, loff_t len,
+ struct iomap_ops *ops);
int iomap_zero_range(struct inode *inode, loff_t pos, loff_t len,
bool *did_zero, struct iomap_ops *ops);
int iomap_truncate_page(struct inode *inode, loff_t pos, bool *did_zero,
diff --git a/include/linux/iommu.h b/include/linux/iommu.h
index a35fb8b42e1a..436dc21318af 100644
--- a/include/linux/iommu.h
+++ b/include/linux/iommu.h
@@ -331,10 +331,32 @@ extern struct iommu_group *pci_device_group(struct device *dev);
/* Generic device grouping function */
extern struct iommu_group *generic_device_group(struct device *dev);
+/**
+ * struct iommu_fwspec - per-device IOMMU instance data
+ * @ops: ops for this device's IOMMU
+ * @iommu_fwnode: firmware handle for this device's IOMMU
+ * @iommu_priv: IOMMU driver private data for this device
+ * @num_ids: number of associated device IDs
+ * @ids: IDs which this device may present to the IOMMU
+ */
+struct iommu_fwspec {
+ const struct iommu_ops *ops;
+ struct fwnode_handle *iommu_fwnode;
+ void *iommu_priv;
+ unsigned int num_ids;
+ u32 ids[1];
+};
+
+int iommu_fwspec_init(struct device *dev, struct fwnode_handle *iommu_fwnode,
+ const struct iommu_ops *ops);
+void iommu_fwspec_free(struct device *dev);
+int iommu_fwspec_add_ids(struct device *dev, u32 *ids, int num_ids);
+
#else /* CONFIG_IOMMU_API */
struct iommu_ops {};
struct iommu_group {};
+struct iommu_fwspec {};
static inline bool iommu_present(struct bus_type *bus)
{
@@ -541,6 +563,23 @@ static inline void iommu_device_unlink(struct device *dev, struct device *link)
{
}
+static inline int iommu_fwspec_init(struct device *dev,
+ struct fwnode_handle *iommu_fwnode,
+ const struct iommu_ops *ops)
+{
+ return -ENODEV;
+}
+
+static inline void iommu_fwspec_free(struct device *dev)
+{
+}
+
+static inline int iommu_fwspec_add_ids(struct device *dev, u32 *ids,
+ int num_ids)
+{
+ return -ENODEV;
+}
+
#endif /* CONFIG_IOMMU_API */
#endif /* __LINUX_IOMMU_H */
diff --git a/include/linux/ioprio.h b/include/linux/ioprio.h
index beb9ce1c2c23..8c1239020d79 100644
--- a/include/linux/ioprio.h
+++ b/include/linux/ioprio.h
@@ -7,7 +7,6 @@
/*
* Gives us 8 prio classes with 13-bits of data for each class
*/
-#define IOPRIO_BITS (16)
#define IOPRIO_CLASS_SHIFT (13)
#define IOPRIO_PRIO_MASK ((1UL << IOPRIO_CLASS_SHIFT) - 1)
diff --git a/include/linux/ipc_namespace.h b/include/linux/ipc_namespace.h
index d10e54f03c09..848e5796400e 100644
--- a/include/linux/ipc_namespace.h
+++ b/include/linux/ipc_namespace.h
@@ -58,6 +58,7 @@ struct ipc_namespace {
/* user_ns which owns the ipc ns */
struct user_namespace *user_ns;
+ struct ucounts *ucounts;
struct ns_common ns;
};
diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h
index c6dbcd84a2c7..7e9a789be5e0 100644
--- a/include/linux/ipv6.h
+++ b/include/linux/ipv6.h
@@ -18,6 +18,7 @@ struct ipv6_devconf {
__s32 dad_transmits;
__s32 rtr_solicits;
__s32 rtr_solicit_interval;
+ __s32 rtr_solicit_max_interval;
__s32 rtr_solicit_delay;
__s32 force_mld_version;
__s32 mldv1_unsolicited_report_interval;
diff --git a/include/linux/irq.h b/include/linux/irq.h
index b52424eaa0ed..e79875574b39 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -916,12 +916,20 @@ void irq_remove_generic_chip(struct irq_chip_generic *gc, u32 msk,
unsigned int clr, unsigned int set);
struct irq_chip_generic *irq_get_domain_generic_chip(struct irq_domain *d, unsigned int hw_irq);
-int irq_alloc_domain_generic_chips(struct irq_domain *d, int irqs_per_chip,
- int num_ct, const char *name,
- irq_flow_handler_t handler,
- unsigned int clr, unsigned int set,
- enum irq_gc_flags flags);
+int __irq_alloc_domain_generic_chips(struct irq_domain *d, int irqs_per_chip,
+ int num_ct, const char *name,
+ irq_flow_handler_t handler,
+ unsigned int clr, unsigned int set,
+ enum irq_gc_flags flags);
+
+#define irq_alloc_domain_generic_chips(d, irqs_per_chip, num_ct, name, \
+ handler, clr, set, flags) \
+({ \
+ MAYBE_BUILD_BUG_ON(irqs_per_chip > 32); \
+ __irq_alloc_domain_generic_chips(d, irqs_per_chip, num_ct, name,\
+ handler, clr, set, flags); \
+})
static inline struct irq_chip_type *irq_data_get_chip_type(struct irq_data *d)
{
@@ -945,6 +953,16 @@ static inline void irq_gc_lock(struct irq_chip_generic *gc) { }
static inline void irq_gc_unlock(struct irq_chip_generic *gc) { }
#endif
+/*
+ * The irqsave variants are for usage in non interrupt code. Do not use
+ * them in irq_chip callbacks. Use irq_gc_lock() instead.
+ */
+#define irq_gc_lock_irqsave(gc, flags) \
+ raw_spin_lock_irqsave(&(gc)->lock, flags)
+
+#define irq_gc_unlock_irqrestore(gc, flags) \
+ raw_spin_unlock_irqrestore(&(gc)->lock, flags)
+
static inline void irq_reg_writel(struct irq_chip_generic *gc,
u32 val, int reg_offset)
{
diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h
index 99ac022edc60..8361c8d3edd1 100644
--- a/include/linux/irqchip/arm-gic-v3.h
+++ b/include/linux/irqchip/arm-gic-v3.h
@@ -430,9 +430,9 @@ struct rdists {
};
struct irq_domain;
-struct device_node;
+struct fwnode_handle;
int its_cpu_init(void);
-int its_init(struct device_node *node, struct rdists *rdists,
+int its_init(struct fwnode_handle *handle, struct rdists *rdists,
struct irq_domain *domain);
static inline bool gic_enable_sre(void)
diff --git a/include/linux/irqdesc.h b/include/linux/irqdesc.h
index b51beebf9804..c9be57931b58 100644
--- a/include/linux/irqdesc.h
+++ b/include/linux/irqdesc.h
@@ -2,6 +2,7 @@
#define _LINUX_IRQDESC_H
#include <linux/rcupdate.h>
+#include <linux/kobject.h>
/*
* Core internal functions to deal with irq descriptors
@@ -43,6 +44,7 @@ struct pt_regs;
* @force_resume_depth: number of irqactions on a irq descriptor with
* IRQF_FORCE_RESUME set
* @rcu: rcu head for delayed free
+ * @kobj: kobject used to represent this struct in sysfs
* @dir: /proc/irq/ procfs entry
* @name: flow handler name for /proc/interrupts output
*/
@@ -88,6 +90,7 @@ struct irq_desc {
#endif
#ifdef CONFIG_SPARSE_IRQ
struct rcu_head rcu;
+ struct kobject kobj;
#endif
int parent_irq;
struct module *owner;
diff --git a/include/linux/jiffies.h b/include/linux/jiffies.h
index 5fdc55312334..589d14e970ad 100644
--- a/include/linux/jiffies.h
+++ b/include/linux/jiffies.h
@@ -150,15 +150,19 @@ static inline u64 get_jiffies_64(void)
/* time_is_before_jiffies(a) return true if a is before jiffies */
#define time_is_before_jiffies(a) time_after(jiffies, a)
+#define time_is_before_jiffies64(a) time_after64(get_jiffies_64(), a)
/* time_is_after_jiffies(a) return true if a is after jiffies */
#define time_is_after_jiffies(a) time_before(jiffies, a)
+#define time_is_after_jiffies64(a) time_before64(get_jiffies_64(), a)
/* time_is_before_eq_jiffies(a) return true if a is before or equal to jiffies*/
#define time_is_before_eq_jiffies(a) time_after_eq(jiffies, a)
+#define time_is_before_eq_jiffies64(a) time_after_eq64(get_jiffies_64(), a)
/* time_is_after_eq_jiffies(a) return true if a is after or equal to jiffies*/
#define time_is_after_eq_jiffies(a) time_before_eq(jiffies, a)
+#define time_is_after_eq_jiffies64(a) time_before_eq64(get_jiffies_64(), a)
/*
* Have the 32 bit jiffies value wrap 5 minutes after boot
diff --git a/include/linux/jump_label.h b/include/linux/jump_label.h
index 661af564fae8..a0547c571800 100644
--- a/include/linux/jump_label.h
+++ b/include/linux/jump_label.h
@@ -21,6 +21,8 @@
*
* DEFINE_STATIC_KEY_TRUE(key);
* DEFINE_STATIC_KEY_FALSE(key);
+ * DEFINE_STATIC_KEY_ARRAY_TRUE(keys, count);
+ * DEFINE_STATIC_KEY_ARRAY_FALSE(keys, count);
* static_branch_likely()
* static_branch_unlikely()
*
@@ -267,9 +269,25 @@ struct static_key_false {
#define DEFINE_STATIC_KEY_TRUE(name) \
struct static_key_true name = STATIC_KEY_TRUE_INIT
+#define DECLARE_STATIC_KEY_TRUE(name) \
+ extern struct static_key_true name
+
#define DEFINE_STATIC_KEY_FALSE(name) \
struct static_key_false name = STATIC_KEY_FALSE_INIT
+#define DECLARE_STATIC_KEY_FALSE(name) \
+ extern struct static_key_false name
+
+#define DEFINE_STATIC_KEY_ARRAY_TRUE(name, count) \
+ struct static_key_true name[count] = { \
+ [0 ... (count) - 1] = STATIC_KEY_TRUE_INIT, \
+ }
+
+#define DEFINE_STATIC_KEY_ARRAY_FALSE(name, count) \
+ struct static_key_false name[count] = { \
+ [0 ... (count) - 1] = STATIC_KEY_FALSE_INIT, \
+ }
+
extern bool ____wrong_branch_error(void);
#define static_key_enabled(x) \
diff --git a/include/linux/kern_levels.h b/include/linux/kern_levels.h
index c2ce155d83cc..f282d4e87258 100644
--- a/include/linux/kern_levels.h
+++ b/include/linux/kern_levels.h
@@ -20,7 +20,7 @@
* line that had no enclosing \n). Only to be used by core/arch code
* during early bootup (a continued line is not SMP-safe otherwise).
*/
-#define KERN_CONT ""
+#define KERN_CONT KERN_SOH "c"
/* integer equivalents of KERN_<LEVEL> */
#define LOGLEVEL_SCHED -2 /* Deferred messages from sched code
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index d96a6118d26a..bc6ed52a39b9 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -259,17 +259,14 @@ static inline void might_fault(void) { }
extern struct atomic_notifier_head panic_notifier_list;
extern long (*panic_blink)(int state);
__printf(1, 2)
-void panic(const char *fmt, ...)
- __noreturn __cold;
+void panic(const char *fmt, ...) __noreturn __cold;
void nmi_panic(struct pt_regs *regs, const char *msg);
extern void oops_enter(void);
extern void oops_exit(void);
void print_oops_end_marker(void);
extern int oops_may_print(void);
-void do_exit(long error_code)
- __noreturn;
-void complete_and_exit(struct completion *, long)
- __noreturn;
+void do_exit(long error_code) __noreturn;
+void complete_and_exit(struct completion *, long) __noreturn;
/* Internal, do not use. */
int __must_check _kstrtoul(const char *s, unsigned int base, unsigned long *res);
@@ -736,17 +733,25 @@ static inline void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) { }
* strict type-checking.. See the
* "unnecessary" pointer comparison.
*/
-#define min(x, y) ({ \
- typeof(x) _min1 = (x); \
- typeof(y) _min2 = (y); \
- (void) (&_min1 == &_min2); \
- _min1 < _min2 ? _min1 : _min2; })
-
-#define max(x, y) ({ \
- typeof(x) _max1 = (x); \
- typeof(y) _max2 = (y); \
- (void) (&_max1 == &_max2); \
- _max1 > _max2 ? _max1 : _max2; })
+#define __min(t1, t2, min1, min2, x, y) ({ \
+ t1 min1 = (x); \
+ t2 min2 = (y); \
+ (void) (&min1 == &min2); \
+ min1 < min2 ? min1 : min2; })
+#define min(x, y) \
+ __min(typeof(x), typeof(y), \
+ __UNIQUE_ID(min1_), __UNIQUE_ID(min2_), \
+ x, y)
+
+#define __max(t1, t2, max1, max2, x, y) ({ \
+ t1 max1 = (x); \
+ t2 max2 = (y); \
+ (void) (&max1 == &max2); \
+ max1 > max2 ? max1 : max2; })
+#define max(x, y) \
+ __max(typeof(x), typeof(y), \
+ __UNIQUE_ID(max1_), __UNIQUE_ID(max2_), \
+ x, y)
#define min3(x, y, z) min((typeof(x))min(x, y), z)
#define max3(x, y, z) max((typeof(x))max(x, y), z)
@@ -778,15 +783,15 @@ static inline void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) { }
*
* Or not use min/max/clamp at all, of course.
*/
-#define min_t(type, x, y) ({ \
- type __min1 = (x); \
- type __min2 = (y); \
- __min1 < __min2 ? __min1: __min2; })
-
-#define max_t(type, x, y) ({ \
- type __max1 = (x); \
- type __max2 = (y); \
- __max1 > __max2 ? __max1: __max2; })
+#define min_t(type, x, y) \
+ __min(type, type, \
+ __UNIQUE_ID(min1_), __UNIQUE_ID(min2_), \
+ x, y)
+
+#define max_t(type, x, y) \
+ __max(type, type, \
+ __UNIQUE_ID(min1_), __UNIQUE_ID(min2_), \
+ x, y)
/**
* clamp_t - return a value clamped to a given range using a given type
diff --git a/include/linux/kernfs.h b/include/linux/kernfs.h
index 96356ef012de..7056238fd9f5 100644
--- a/include/linux/kernfs.h
+++ b/include/linux/kernfs.h
@@ -269,10 +269,8 @@ static inline bool kernfs_ns_enabled(struct kernfs_node *kn)
}
int kernfs_name(struct kernfs_node *kn, char *buf, size_t buflen);
-size_t kernfs_path_len(struct kernfs_node *kn);
int kernfs_path_from_node(struct kernfs_node *root_kn, struct kernfs_node *kn,
char *buf, size_t buflen);
-char *kernfs_path(struct kernfs_node *kn, char *buf, size_t buflen);
void pr_cont_kernfs_name(struct kernfs_node *kn);
void pr_cont_kernfs_path(struct kernfs_node *kn);
struct kernfs_node *kernfs_get_parent(struct kernfs_node *kn);
@@ -341,12 +339,10 @@ static inline bool kernfs_ns_enabled(struct kernfs_node *kn)
static inline int kernfs_name(struct kernfs_node *kn, char *buf, size_t buflen)
{ return -ENOSYS; }
-static inline size_t kernfs_path_len(struct kernfs_node *kn)
-{ return 0; }
-
-static inline char *kernfs_path(struct kernfs_node *kn, char *buf,
- size_t buflen)
-{ return NULL; }
+static inline int kernfs_path_from_node(struct kernfs_node *root_kn,
+ struct kernfs_node *kn,
+ char *buf, size_t buflen)
+{ return -ENOSYS; }
static inline void pr_cont_kernfs_name(struct kernfs_node *kn) { }
static inline void pr_cont_kernfs_path(struct kernfs_node *kn) { }
@@ -436,6 +432,22 @@ static inline void kernfs_init(void) { }
#endif /* CONFIG_KERNFS */
+/**
+ * kernfs_path - build full path of a given node
+ * @kn: kernfs_node of interest
+ * @buf: buffer to copy @kn's name into
+ * @buflen: size of @buf
+ *
+ * Builds and returns the full path of @kn in @buf of @buflen bytes. The
+ * path is built from the end of @buf so the returned pointer usually
+ * doesn't match @buf. If @buf isn't long enough, @buf is nul terminated
+ * and %NULL is returned.
+ */
+static inline int kernfs_path(struct kernfs_node *kn, char *buf, size_t buflen)
+{
+ return kernfs_path_from_node(kn, NULL, buf, buflen);
+}
+
static inline struct kernfs_node *
kernfs_find_and_get(struct kernfs_node *kn, const char *name)
{
diff --git a/include/linux/kexec.h b/include/linux/kexec.h
index d7437777baaa..406c33dcae13 100644
--- a/include/linux/kexec.h
+++ b/include/linux/kexec.h
@@ -259,6 +259,12 @@ phys_addr_t paddr_vmcoreinfo_note(void);
vmcoreinfo_append_str("NUMBER(%s)=%ld\n", #name, (long)name)
#define VMCOREINFO_CONFIG(name) \
vmcoreinfo_append_str("CONFIG_%s=y\n", #name)
+#define VMCOREINFO_PAGE_OFFSET(value) \
+ vmcoreinfo_append_str("PAGE_OFFSET=%lx\n", (unsigned long)value)
+#define VMCOREINFO_VMALLOC_START(value) \
+ vmcoreinfo_append_str("VMALLOC_START=%lx\n", (unsigned long)value)
+#define VMCOREINFO_VMEMMAP_START(value) \
+ vmcoreinfo_append_str("VMEMMAP_START=%lx\n", (unsigned long)value)
extern struct kimage *kexec_image;
extern struct kimage *kexec_crash_image;
diff --git a/include/linux/kmemleak.h b/include/linux/kmemleak.h
index 4894c6888bc6..1c2a32829620 100644
--- a/include/linux/kmemleak.h
+++ b/include/linux/kmemleak.h
@@ -38,6 +38,11 @@ extern void kmemleak_not_leak(const void *ptr) __ref;
extern void kmemleak_ignore(const void *ptr) __ref;
extern void kmemleak_scan_area(const void *ptr, size_t size, gfp_t gfp) __ref;
extern void kmemleak_no_scan(const void *ptr) __ref;
+extern void kmemleak_alloc_phys(phys_addr_t phys, size_t size, int min_count,
+ gfp_t gfp) __ref;
+extern void kmemleak_free_part_phys(phys_addr_t phys, size_t size) __ref;
+extern void kmemleak_not_leak_phys(phys_addr_t phys) __ref;
+extern void kmemleak_ignore_phys(phys_addr_t phys) __ref;
static inline void kmemleak_alloc_recursive(const void *ptr, size_t size,
int min_count, unsigned long flags,
@@ -106,6 +111,19 @@ static inline void kmemleak_erase(void **ptr)
static inline void kmemleak_no_scan(const void *ptr)
{
}
+static inline void kmemleak_alloc_phys(phys_addr_t phys, size_t size,
+ int min_count, gfp_t gfp)
+{
+}
+static inline void kmemleak_free_part_phys(phys_addr_t phys, size_t size)
+{
+}
+static inline void kmemleak_not_leak_phys(phys_addr_t phys)
+{
+}
+static inline void kmemleak_ignore_phys(phys_addr_t phys)
+{
+}
#endif /* CONFIG_DEBUG_KMEMLEAK */
diff --git a/include/linux/kthread.h b/include/linux/kthread.h
index e691b6a23f72..a6e82a69c363 100644
--- a/include/linux/kthread.h
+++ b/include/linux/kthread.h
@@ -10,6 +10,17 @@ struct task_struct *kthread_create_on_node(int (*threadfn)(void *data),
int node,
const char namefmt[], ...);
+/**
+ * kthread_create - create a kthread on the current node
+ * @threadfn: the function to run in the thread
+ * @data: data pointer for @threadfn()
+ * @namefmt: printf-style format string for the thread name
+ * @...: arguments for @namefmt.
+ *
+ * This macro will create a kthread on the current node, leaving it in
+ * the stopped state. This is just a helper for kthread_create_on_node();
+ * see the documentation there for more details.
+ */
#define kthread_create(threadfn, data, namefmt, arg...) \
kthread_create_on_node(threadfn, data, NUMA_NO_NODE, namefmt, ##arg)
@@ -44,7 +55,7 @@ bool kthread_should_stop(void);
bool kthread_should_park(void);
bool kthread_freezable_should_stop(bool *was_frozen);
void *kthread_data(struct task_struct *k);
-void *probe_kthread_data(struct task_struct *k);
+void *kthread_probe_data(struct task_struct *k);
int kthread_park(struct task_struct *k);
void kthread_unpark(struct task_struct *k);
void kthread_parkme(void);
@@ -57,16 +68,23 @@ extern int tsk_fork_get_node(struct task_struct *tsk);
* Simple work processor based on kthread.
*
* This provides easier way to make use of kthreads. A kthread_work
- * can be queued and flushed using queue/flush_kthread_work()
+ * can be queued and flushed using queue/kthread_flush_work()
* respectively. Queued kthread_works are processed by a kthread
* running kthread_worker_fn().
*/
struct kthread_work;
typedef void (*kthread_work_func_t)(struct kthread_work *work);
+void kthread_delayed_work_timer_fn(unsigned long __data);
+
+enum {
+ KTW_FREEZABLE = 1 << 0, /* freeze during suspend */
+};
struct kthread_worker {
+ unsigned int flags;
spinlock_t lock;
struct list_head work_list;
+ struct list_head delayed_work_list;
struct task_struct *task;
struct kthread_work *current_work;
};
@@ -75,11 +93,19 @@ struct kthread_work {
struct list_head node;
kthread_work_func_t func;
struct kthread_worker *worker;
+ /* Number of canceling calls that are running at the moment. */
+ int canceling;
+};
+
+struct kthread_delayed_work {
+ struct kthread_work work;
+ struct timer_list timer;
};
#define KTHREAD_WORKER_INIT(worker) { \
.lock = __SPIN_LOCK_UNLOCKED((worker).lock), \
.work_list = LIST_HEAD_INIT((worker).work_list), \
+ .delayed_work_list = LIST_HEAD_INIT((worker).delayed_work_list),\
}
#define KTHREAD_WORK_INIT(work, fn) { \
@@ -87,46 +113,88 @@ struct kthread_work {
.func = (fn), \
}
+#define KTHREAD_DELAYED_WORK_INIT(dwork, fn) { \
+ .work = KTHREAD_WORK_INIT((dwork).work, (fn)), \
+ .timer = __TIMER_INITIALIZER(kthread_delayed_work_timer_fn, \
+ 0, (unsigned long)&(dwork), \
+ TIMER_IRQSAFE), \
+ }
+
#define DEFINE_KTHREAD_WORKER(worker) \
struct kthread_worker worker = KTHREAD_WORKER_INIT(worker)
#define DEFINE_KTHREAD_WORK(work, fn) \
struct kthread_work work = KTHREAD_WORK_INIT(work, fn)
+#define DEFINE_KTHREAD_DELAYED_WORK(dwork, fn) \
+ struct kthread_delayed_work dwork = \
+ KTHREAD_DELAYED_WORK_INIT(dwork, fn)
+
/*
* kthread_worker.lock needs its own lockdep class key when defined on
* stack with lockdep enabled. Use the following macros in such cases.
*/
#ifdef CONFIG_LOCKDEP
# define KTHREAD_WORKER_INIT_ONSTACK(worker) \
- ({ init_kthread_worker(&worker); worker; })
+ ({ kthread_init_worker(&worker); worker; })
# define DEFINE_KTHREAD_WORKER_ONSTACK(worker) \
struct kthread_worker worker = KTHREAD_WORKER_INIT_ONSTACK(worker)
#else
# define DEFINE_KTHREAD_WORKER_ONSTACK(worker) DEFINE_KTHREAD_WORKER(worker)
#endif
-extern void __init_kthread_worker(struct kthread_worker *worker,
+extern void __kthread_init_worker(struct kthread_worker *worker,
const char *name, struct lock_class_key *key);
-#define init_kthread_worker(worker) \
+#define kthread_init_worker(worker) \
do { \
static struct lock_class_key __key; \
- __init_kthread_worker((worker), "("#worker")->lock", &__key); \
+ __kthread_init_worker((worker), "("#worker")->lock", &__key); \
} while (0)
-#define init_kthread_work(work, fn) \
+#define kthread_init_work(work, fn) \
do { \
memset((work), 0, sizeof(struct kthread_work)); \
INIT_LIST_HEAD(&(work)->node); \
(work)->func = (fn); \
} while (0)
+#define kthread_init_delayed_work(dwork, fn) \
+ do { \
+ kthread_init_work(&(dwork)->work, (fn)); \
+ __setup_timer(&(dwork)->timer, \
+ kthread_delayed_work_timer_fn, \
+ (unsigned long)(dwork), \
+ TIMER_IRQSAFE); \
+ } while (0)
+
int kthread_worker_fn(void *worker_ptr);
-bool queue_kthread_work(struct kthread_worker *worker,
+__printf(2, 3)
+struct kthread_worker *
+kthread_create_worker(unsigned int flags, const char namefmt[], ...);
+
+struct kthread_worker *
+kthread_create_worker_on_cpu(int cpu, unsigned int flags,
+ const char namefmt[], ...);
+
+bool kthread_queue_work(struct kthread_worker *worker,
struct kthread_work *work);
-void flush_kthread_work(struct kthread_work *work);
-void flush_kthread_worker(struct kthread_worker *worker);
+
+bool kthread_queue_delayed_work(struct kthread_worker *worker,
+ struct kthread_delayed_work *dwork,
+ unsigned long delay);
+
+bool kthread_mod_delayed_work(struct kthread_worker *worker,
+ struct kthread_delayed_work *dwork,
+ unsigned long delay);
+
+void kthread_flush_work(struct kthread_work *work);
+void kthread_flush_worker(struct kthread_worker *worker);
+
+bool kthread_cancel_work_sync(struct kthread_work *work);
+bool kthread_cancel_delayed_work_sync(struct kthread_delayed_work *work);
+
+void kthread_destroy_worker(struct kthread_worker *worker);
#endif /* _LINUX_KTHREAD_H */
diff --git a/include/linux/ktime.h b/include/linux/ktime.h
index 2b6a204bd8d4..0fb7ffb1775f 100644
--- a/include/linux/ktime.h
+++ b/include/linux/ktime.h
@@ -64,6 +64,13 @@ static inline ktime_t ktime_set(const s64 secs, const unsigned long nsecs)
({ (ktime_t){ .tv64 = (lhs).tv64 + (rhs).tv64 }; })
/*
+ * Same as ktime_add(), but avoids undefined behaviour on overflow; however,
+ * this means that you must check the result for overflow yourself.
+ */
+#define ktime_add_unsafe(lhs, rhs) \
+ ({ (ktime_t){ .tv64 = (u64) (lhs).tv64 + (rhs).tv64 }; })
+
+/*
* Add a ktime_t variable and a scalar nanosecond value.
* res = kt + nsval:
*/
@@ -231,6 +238,11 @@ static inline ktime_t ktime_sub_us(const ktime_t kt, const u64 usec)
return ktime_sub_ns(kt, usec * NSEC_PER_USEC);
}
+static inline ktime_t ktime_sub_ms(const ktime_t kt, const u64 msec)
+{
+ return ktime_sub_ns(kt, msec * NSEC_PER_MSEC);
+}
+
extern ktime_t ktime_add_safe(const ktime_t lhs, const ktime_t rhs);
/**
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 9c28b4d4c90b..01c0b9cc3915 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -265,6 +265,7 @@ struct kvm_vcpu {
#endif
bool preempted;
struct kvm_vcpu_arch arch;
+ struct dentry *debugfs_dentry;
};
static inline int kvm_vcpu_exiting_guest_mode(struct kvm_vcpu *vcpu)
@@ -749,6 +750,9 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu);
void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu);
void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu);
+bool kvm_arch_has_vcpu_debugfs(void);
+int kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu);
+
int kvm_arch_hardware_enable(void);
void kvm_arch_hardware_disable(void);
int kvm_arch_hardware_setup(void);
diff --git a/include/linux/leds.h b/include/linux/leds.h
index 8a3b5d29602f..ddfcb2df3656 100644
--- a/include/linux/leds.h
+++ b/include/linux/leds.h
@@ -359,6 +359,11 @@ struct led_platform_data {
struct led_info *leds;
};
+struct gpio_desc;
+typedef int (*gpio_blink_set_t)(struct gpio_desc *desc, int state,
+ unsigned long *delay_on,
+ unsigned long *delay_off);
+
/* For the leds-gpio driver */
struct gpio_led {
const char *name;
@@ -382,9 +387,7 @@ struct gpio_led_platform_data {
#define GPIO_LED_NO_BLINK_LOW 0 /* No blink GPIO state low */
#define GPIO_LED_NO_BLINK_HIGH 1 /* No blink GPIO state high */
#define GPIO_LED_BLINK 2 /* Please, blink */
- int (*gpio_blink_set)(struct gpio_desc *desc, int state,
- unsigned long *delay_on,
- unsigned long *delay_off);
+ gpio_blink_set_t gpio_blink_set;
};
#ifdef CONFIG_NEW_LEDS
diff --git a/include/linux/lglock.h b/include/linux/lglock.h
deleted file mode 100644
index c92ebd100d9b..000000000000
--- a/include/linux/lglock.h
+++ /dev/null
@@ -1,81 +0,0 @@
-/*
- * Specialised local-global spinlock. Can only be declared as global variables
- * to avoid overhead and keep things simple (and we don't want to start using
- * these inside dynamically allocated structures).
- *
- * "local/global locks" (lglocks) can be used to:
- *
- * - Provide fast exclusive access to per-CPU data, with exclusive access to
- * another CPU's data allowed but possibly subject to contention, and to
- * provide very slow exclusive access to all per-CPU data.
- * - Or to provide very fast and scalable read serialisation, and to provide
- * very slow exclusive serialisation of data (not necessarily per-CPU data).
- *
- * Brlocks are also implemented as a short-hand notation for the latter use
- * case.
- *
- * Copyright 2009, 2010, Nick Piggin, Novell Inc.
- */
-#ifndef __LINUX_LGLOCK_H
-#define __LINUX_LGLOCK_H
-
-#include <linux/spinlock.h>
-#include <linux/lockdep.h>
-#include <linux/percpu.h>
-#include <linux/cpu.h>
-#include <linux/notifier.h>
-
-#ifdef CONFIG_SMP
-
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
-#define LOCKDEP_INIT_MAP lockdep_init_map
-#else
-#define LOCKDEP_INIT_MAP(a, b, c, d)
-#endif
-
-struct lglock {
- arch_spinlock_t __percpu *lock;
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
- struct lock_class_key lock_key;
- struct lockdep_map lock_dep_map;
-#endif
-};
-
-#define DEFINE_LGLOCK(name) \
- static DEFINE_PER_CPU(arch_spinlock_t, name ## _lock) \
- = __ARCH_SPIN_LOCK_UNLOCKED; \
- struct lglock name = { .lock = &name ## _lock }
-
-#define DEFINE_STATIC_LGLOCK(name) \
- static DEFINE_PER_CPU(arch_spinlock_t, name ## _lock) \
- = __ARCH_SPIN_LOCK_UNLOCKED; \
- static struct lglock name = { .lock = &name ## _lock }
-
-void lg_lock_init(struct lglock *lg, char *name);
-
-void lg_local_lock(struct lglock *lg);
-void lg_local_unlock(struct lglock *lg);
-void lg_local_lock_cpu(struct lglock *lg, int cpu);
-void lg_local_unlock_cpu(struct lglock *lg, int cpu);
-
-void lg_double_lock(struct lglock *lg, int cpu1, int cpu2);
-void lg_double_unlock(struct lglock *lg, int cpu1, int cpu2);
-
-void lg_global_lock(struct lglock *lg);
-void lg_global_unlock(struct lglock *lg);
-
-#else
-/* When !CONFIG_SMP, map lglock to spinlock */
-#define lglock spinlock
-#define DEFINE_LGLOCK(name) DEFINE_SPINLOCK(name)
-#define DEFINE_STATIC_LGLOCK(name) static DEFINE_SPINLOCK(name)
-#define lg_lock_init(lg, name) spin_lock_init(lg)
-#define lg_local_lock spin_lock
-#define lg_local_unlock spin_unlock
-#define lg_local_lock_cpu(lg, cpu) spin_lock(lg)
-#define lg_local_unlock_cpu(lg, cpu) spin_unlock(lg)
-#define lg_global_lock spin_lock
-#define lg_global_unlock spin_unlock
-#endif
-
-#endif
diff --git a/include/linux/libata.h b/include/linux/libata.h
index e37d4f99f510..616eef4d81ea 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -46,7 +46,8 @@
#ifdef CONFIG_ATA_NONSTANDARD
#include <asm/libata-portmap.h>
#else
-#include <asm-generic/libata-portmap.h>
+#define ATA_PRIMARY_IRQ(dev) 14
+#define ATA_SECONDARY_IRQ(dev) 15
#endif
/*
diff --git a/include/linux/libnvdimm.h b/include/linux/libnvdimm.h
index b519e137b9b7..f4947fda11e7 100644
--- a/include/linux/libnvdimm.h
+++ b/include/linux/libnvdimm.h
@@ -50,23 +50,6 @@ typedef int (*ndctl_fn)(struct nvdimm_bus_descriptor *nd_desc,
struct nvdimm *nvdimm, unsigned int cmd, void *buf,
unsigned int buf_len, int *cmd_rc);
-struct nd_namespace_label;
-struct nvdimm_drvdata;
-
-struct nd_mapping {
- struct nvdimm *nvdimm;
- struct nd_namespace_label **labels;
- u64 start;
- u64 size;
- /*
- * @ndd is for private use at region enable / disable time for
- * get_ndd() + put_ndd(), all other nd_mapping to ndd
- * conversions use to_ndd() which respects enabled state of the
- * nvdimm.
- */
- struct nvdimm_drvdata *ndd;
-};
-
struct nvdimm_bus_descriptor {
const struct attribute_group **attr_groups;
unsigned long cmd_mask;
@@ -89,9 +72,15 @@ struct nd_interleave_set {
u64 cookie;
};
+struct nd_mapping_desc {
+ struct nvdimm *nvdimm;
+ u64 start;
+ u64 size;
+};
+
struct nd_region_desc {
struct resource *res;
- struct nd_mapping *nd_mapping;
+ struct nd_mapping_desc *mapping;
u16 num_mappings;
const struct attribute_group **attr_groups;
struct nd_interleave_set *nd_set;
@@ -129,6 +118,8 @@ static inline struct nd_blk_region_desc *to_blk_region_desc(
}
int nvdimm_bus_add_poison(struct nvdimm_bus *nvdimm_bus, u64 addr, u64 length);
+void nvdimm_clear_from_poison_list(struct nvdimm_bus *nvdimm_bus,
+ phys_addr_t start, unsigned int len);
struct nvdimm_bus *nvdimm_bus_register(struct device *parent,
struct nvdimm_bus_descriptor *nfit_desc);
void nvdimm_bus_unregister(struct nvdimm_bus *nvdimm_bus);
@@ -139,6 +130,7 @@ struct nd_blk_region *to_nd_blk_region(struct device *dev);
struct nvdimm_bus_descriptor *to_nd_desc(struct nvdimm_bus *nvdimm_bus);
struct device *to_nvdimm_bus_dev(struct nvdimm_bus *nvdimm_bus);
const char *nvdimm_name(struct nvdimm *nvdimm);
+struct kobject *nvdimm_kobj(struct nvdimm *nvdimm);
unsigned long nvdimm_cmd_mask(struct nvdimm *nvdimm);
void *nvdimm_provider_data(struct nvdimm *nvdimm);
struct nvdimm *nvdimm_create(struct nvdimm_bus *nvdimm_bus, void *provider_data,
diff --git a/include/linux/lightnvm.h b/include/linux/lightnvm.h
index ba78b8306674..d190786e4ad8 100644
--- a/include/linux/lightnvm.h
+++ b/include/linux/lightnvm.h
@@ -352,7 +352,10 @@ struct nvm_dev {
/* Backend device */
struct request_queue *q;
+ struct device dev;
+ struct device *parent_dev;
char name[DISK_NAME_LEN];
+ void *private_data;
struct mutex mlock;
spinlock_t lock;
@@ -524,9 +527,9 @@ extern struct nvm_block *nvm_get_blk(struct nvm_dev *, struct nvm_lun *,
unsigned long);
extern void nvm_put_blk(struct nvm_dev *, struct nvm_block *);
-extern int nvm_register(struct request_queue *, char *,
- struct nvm_dev_ops *);
-extern void nvm_unregister(char *);
+extern struct nvm_dev *nvm_alloc_dev(int);
+extern int nvm_register(struct nvm_dev *);
+extern void nvm_unregister(struct nvm_dev *);
void nvm_mark_blk(struct nvm_dev *dev, struct ppa_addr ppa, int type);
@@ -575,11 +578,14 @@ extern int nvm_dev_factory(struct nvm_dev *, int flags);
#else /* CONFIG_NVM */
struct nvm_dev_ops;
-static inline int nvm_register(struct request_queue *q, char *disk_name,
- struct nvm_dev_ops *ops)
+static inline struct nvm_dev *nvm_alloc_dev(int node)
+{
+ return ERR_PTR(-EINVAL);
+}
+static inline int nvm_register(struct nvm_dev *dev)
{
return -EINVAL;
}
-static inline void nvm_unregister(char *disk_name) {}
+static inline void nvm_unregister(struct nvm_dev *dev) {}
#endif /* CONFIG_NVM */
#endif /* LIGHTNVM.H */
diff --git a/include/linux/list.h b/include/linux/list.h
index 5183138aa932..5809e9a2de5b 100644
--- a/include/linux/list.h
+++ b/include/linux/list.h
@@ -381,8 +381,11 @@ static inline void list_splice_tail_init(struct list_head *list,
*
* Note that if the list is empty, it returns NULL.
*/
-#define list_first_entry_or_null(ptr, type, member) \
- (!list_empty(ptr) ? list_first_entry(ptr, type, member) : NULL)
+#define list_first_entry_or_null(ptr, type, member) ({ \
+ struct list_head *head__ = (ptr); \
+ struct list_head *pos__ = READ_ONCE(head__->next); \
+ pos__ != head__ ? list_entry(pos__, type, member) : NULL; \
+})
/**
* list_next_entry - get the next element in list
diff --git a/include/linux/livepatch.h b/include/linux/livepatch.h
index a93a0b23dc8d..9072f04db616 100644
--- a/include/linux/livepatch.h
+++ b/include/linux/livepatch.h
@@ -116,6 +116,9 @@ int klp_unregister_patch(struct klp_patch *);
int klp_enable_patch(struct klp_patch *);
int klp_disable_patch(struct klp_patch *);
+void arch_klp_init_object_loaded(struct klp_patch *patch,
+ struct klp_object *obj);
+
/* Called from the module loader during module coming/going states */
int klp_module_coming(struct module *mod);
void klp_module_going(struct module *mod);
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index eabe0138eb06..c1458fede1f9 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -16,6 +16,8 @@ struct lockdep_map;
extern int prove_locking;
extern int lock_stat;
+#define MAX_LOCKDEP_SUBCLASSES 8UL
+
#ifdef CONFIG_LOCKDEP
#include <linux/linkage.h>
@@ -29,8 +31,6 @@ extern int lock_stat;
*/
#define XXX_LOCK_USAGE_STATES (1+3*4)
-#define MAX_LOCKDEP_SUBCLASSES 8UL
-
/*
* NR_LOCKDEP_CACHING_CLASSES ... Number of classes
* cached in the instance of lockdep_map
diff --git a/include/linux/lsm_audit.h b/include/linux/lsm_audit.h
index ffb9c9da4f39..e58e577117b6 100644
--- a/include/linux/lsm_audit.h
+++ b/include/linux/lsm_audit.h
@@ -59,6 +59,7 @@ struct common_audit_data {
#define LSM_AUDIT_DATA_INODE 9
#define LSM_AUDIT_DATA_DENTRY 10
#define LSM_AUDIT_DATA_IOCTL_OP 11
+#define LSM_AUDIT_DATA_FILE 12
union {
struct path path;
struct dentry *dentry;
@@ -75,6 +76,7 @@ struct common_audit_data {
#endif
char *kmod_name;
struct lsm_ioctlop_audit *op;
+ struct file *file;
} u;
/* this union contains LSM specific data */
union {
diff --git a/include/linux/lsm_hooks.h b/include/linux/lsm_hooks.h
index 101bf19c0f41..558adfa5c8a8 100644
--- a/include/linux/lsm_hooks.h
+++ b/include/linux/lsm_hooks.h
@@ -151,6 +151,16 @@
* @name name of the last path component used to create file
* @ctx pointer to place the pointer to the resulting context in.
* @ctxlen point to place the length of the resulting context.
+ * @dentry_create_files_as:
+ * Compute a context for a dentry as the inode is not yet available
+ * and set that context in passed in creds so that new files are
+ * created using that context. Context is calculated using the
+ * passed in creds and not the creds of the caller.
+ * @dentry dentry to use in calculating the context.
+ * @mode mode used to determine resource type.
+ * @name name of the last path component used to create file
+ * @old creds which should be used for context calculation
+ * @new creds to modify
*
*
* Security hooks for inode operations.
@@ -401,6 +411,23 @@
* @inode contains a pointer to the inode.
* @secid contains a pointer to the location where result will be saved.
* In case of failure, @secid will be set to zero.
+ * @inode_copy_up:
+ * A file is about to be copied up from lower layer to upper layer of
+ * overlay filesystem. Security module can prepare a set of new creds
+ * and modify as need be and return new creds. Caller will switch to
+ * new creds temporarily to create new file and release newly allocated
+ * creds.
+ * @src indicates the union dentry of file that is being copied up.
+ * @new pointer to pointer to return newly allocated creds.
+ * Returns 0 on success or a negative error code on error.
+ * @inode_copy_up_xattr:
+ * Filter the xattrs being copied up when a unioned file is copied
+ * up from a lower layer to the union/overlay layer.
+ * @name indicates the name of the xattr.
+ * Returns 0 to accept the xattr, 1 to discard the xattr, -EOPNOTSUPP if
+ * security module does not know about attribute or a negative error code
+ * to abort the copy up. Note that the caller is responsible for reading
+ * and writing the xattrs as this hook is merely a filter.
*
* Security hooks for file operations
*
@@ -1358,6 +1385,10 @@ union security_list_options {
int (*dentry_init_security)(struct dentry *dentry, int mode,
const struct qstr *name, void **ctx,
u32 *ctxlen);
+ int (*dentry_create_files_as)(struct dentry *dentry, int mode,
+ struct qstr *name,
+ const struct cred *old,
+ struct cred *new);
#ifdef CONFIG_SECURITY_PATH
@@ -1425,6 +1456,8 @@ union security_list_options {
int (*inode_listsecurity)(struct inode *inode, char *buffer,
size_t buffer_size);
void (*inode_getsecid)(struct inode *inode, u32 *secid);
+ int (*inode_copy_up)(struct dentry *src, struct cred **new);
+ int (*inode_copy_up_xattr)(const char *name);
int (*file_permission)(struct file *file, int mask);
int (*file_alloc_security)(struct file *file);
@@ -1455,7 +1488,6 @@ union security_list_options {
int (*kernel_act_as)(struct cred *new, u32 secid);
int (*kernel_create_files_as)(struct cred *new, struct inode *inode);
int (*kernel_module_request)(char *kmod_name);
- int (*kernel_module_from_file)(struct file *file);
int (*kernel_read_file)(struct file *file, enum kernel_read_file_id id);
int (*kernel_post_read_file)(struct file *file, char *buf, loff_t size,
enum kernel_read_file_id id);
@@ -1656,6 +1688,7 @@ struct security_hook_heads {
struct list_head sb_clone_mnt_opts;
struct list_head sb_parse_opts_str;
struct list_head dentry_init_security;
+ struct list_head dentry_create_files_as;
#ifdef CONFIG_SECURITY_PATH
struct list_head path_unlink;
struct list_head path_mkdir;
@@ -1696,6 +1729,8 @@ struct security_hook_heads {
struct list_head inode_setsecurity;
struct list_head inode_listsecurity;
struct list_head inode_getsecid;
+ struct list_head inode_copy_up;
+ struct list_head inode_copy_up_xattr;
struct list_head file_permission;
struct list_head file_alloc_security;
struct list_head file_free_security;
diff --git a/include/linux/mbus.h b/include/linux/mbus.h
index d610232762e3..2931aa43dab1 100644
--- a/include/linux/mbus.h
+++ b/include/linux/mbus.h
@@ -11,6 +11,8 @@
#ifndef __LINUX_MBUS_H
#define __LINUX_MBUS_H
+#include <linux/errno.h>
+
struct resource;
struct mbus_dram_target_info
@@ -55,6 +57,8 @@ struct mbus_dram_target_info
#ifdef CONFIG_PLAT_ORION
extern const struct mbus_dram_target_info *mv_mbus_dram_info(void);
extern const struct mbus_dram_target_info *mv_mbus_dram_info_nooverlap(void);
+int mvebu_mbus_get_io_win_info(phys_addr_t phyaddr, u32 *size, u8 *target,
+ u8 *attr);
#else
static inline const struct mbus_dram_target_info *mv_mbus_dram_info(void)
{
@@ -64,14 +68,24 @@ static inline const struct mbus_dram_target_info *mv_mbus_dram_info_nooverlap(vo
{
return NULL;
}
+static inline int mvebu_mbus_get_io_win_info(phys_addr_t phyaddr, u32 *size,
+ u8 *target, u8 *attr)
+{
+ /*
+ * On all ARM32 MVEBU platforms with MBus support, this stub
+ * function will not get called. The real function from the
+ * MBus driver is called instead. ARM64 MVEBU platforms like
+ * the Armada 3700 could use the mv_xor device driver which calls
+ * into this function
+ */
+ return -EINVAL;
+}
#endif
int mvebu_mbus_save_cpu_target(u32 __iomem *store_addr);
void mvebu_mbus_get_pcie_mem_aperture(struct resource *res);
void mvebu_mbus_get_pcie_io_aperture(struct resource *res);
int mvebu_mbus_get_dram_win_info(phys_addr_t phyaddr, u8 *target, u8 *attr);
-int mvebu_mbus_get_io_win_info(phys_addr_t phyaddr, u32 *size, u8 *target,
- u8 *attr);
int mvebu_mbus_add_window_remap_by_id(unsigned int target,
unsigned int attribute,
phys_addr_t base, size_t size,
diff --git a/include/linux/mcb.h b/include/linux/mcb.h
index ead13d233a97..4097ac9ea13a 100644
--- a/include/linux/mcb.h
+++ b/include/linux/mcb.h
@@ -41,15 +41,17 @@ struct mcb_bus {
char name[CHAMELEON_FILENAME_LEN + 1];
int (*get_irq)(struct mcb_device *dev);
};
-#define to_mcb_bus(b) container_of((b), struct mcb_bus, dev)
+
+static inline struct mcb_bus *to_mcb_bus(struct device *dev)
+{
+ return container_of(dev, struct mcb_bus, dev);
+}
/**
* struct mcb_device - MEN Chameleon Bus device
*
- * @bus_list: internal list handling for bus code
* @dev: device in kernel representation
* @bus: mcb bus the device is plugged to
- * @subordinate: subordinate MCBus in case of bridge
* @is_added: flag to check if device is added to bus
* @driver: associated mcb_driver
* @id: mcb device id
@@ -62,10 +64,8 @@ struct mcb_bus {
* @memory: memory resource
*/
struct mcb_device {
- struct list_head bus_list;
struct device dev;
struct mcb_bus *bus;
- struct mcb_bus *subordinate;
bool is_added;
struct mcb_driver *driver;
u16 id;
@@ -76,8 +76,13 @@ struct mcb_device {
int rev;
struct resource irq;
struct resource mem;
+ struct device *dma_dev;
};
-#define to_mcb_device(x) container_of((x), struct mcb_device, dev)
+
+static inline struct mcb_device *to_mcb_device(struct device *dev)
+{
+ return container_of(dev, struct mcb_device, dev);
+}
/**
* struct mcb_driver - MEN Chameleon Bus device driver
@@ -95,7 +100,11 @@ struct mcb_driver {
void (*remove)(struct mcb_device *mdev);
void (*shutdown)(struct mcb_device *mdev);
};
-#define to_mcb_driver(x) container_of((x), struct mcb_driver, driver)
+
+static inline struct mcb_driver *to_mcb_driver(struct device_driver *drv)
+{
+ return container_of(drv, struct mcb_driver, driver);
+}
static inline void *mcb_get_drvdata(struct mcb_device *dev)
{
diff --git a/include/linux/memblock.h b/include/linux/memblock.h
index 2925da23505d..5b759c9acf97 100644
--- a/include/linux/memblock.h
+++ b/include/linux/memblock.h
@@ -328,6 +328,7 @@ phys_addr_t memblock_alloc_base(phys_addr_t size, phys_addr_t align,
phys_addr_t __memblock_alloc_base(phys_addr_t size, phys_addr_t align,
phys_addr_t max_addr);
phys_addr_t memblock_phys_mem_size(void);
+phys_addr_t memblock_reserved_size(void);
phys_addr_t memblock_mem_size(unsigned long limit_pfn);
phys_addr_t memblock_start_of_DRAM(void);
phys_addr_t memblock_end_of_DRAM(void);
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 5d8ca6e02e39..61d20c17f3b7 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -366,6 +366,8 @@ struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *,
struct mem_cgroup *,
struct mem_cgroup_reclaim_cookie *);
void mem_cgroup_iter_break(struct mem_cgroup *, struct mem_cgroup *);
+int mem_cgroup_scan_tasks(struct mem_cgroup *,
+ int (*)(struct task_struct *, void *), void *);
static inline unsigned short mem_cgroup_id(struct mem_cgroup *memcg)
{
@@ -446,6 +448,8 @@ unsigned long mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list lru)
void mem_cgroup_handle_over_high(void);
+unsigned long mem_cgroup_get_limit(struct mem_cgroup *memcg);
+
void mem_cgroup_print_oom_info(struct mem_cgroup *memcg,
struct task_struct *p);
@@ -639,6 +643,12 @@ static inline void mem_cgroup_iter_break(struct mem_cgroup *root,
{
}
+static inline int mem_cgroup_scan_tasks(struct mem_cgroup *memcg,
+ int (*fn)(struct task_struct *, void *), void *arg)
+{
+ return 0;
+}
+
static inline unsigned short mem_cgroup_id(struct mem_cgroup *memcg)
{
return 0;
@@ -669,6 +679,11 @@ mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg,
return 0;
}
+static inline unsigned long mem_cgroup_get_limit(struct mem_cgroup *memcg)
+{
+ return 0;
+}
+
static inline void
mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
{
@@ -758,13 +773,13 @@ static inline void mem_cgroup_wb_stats(struct bdi_writeback *wb,
#endif /* CONFIG_CGROUP_WRITEBACK */
struct sock;
-void sock_update_memcg(struct sock *sk);
-void sock_release_memcg(struct sock *sk);
bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages);
void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages);
#ifdef CONFIG_MEMCG
extern struct static_key_false memcg_sockets_enabled_key;
#define mem_cgroup_sockets_enabled static_branch_unlikely(&memcg_sockets_enabled_key)
+void mem_cgroup_sk_alloc(struct sock *sk);
+void mem_cgroup_sk_free(struct sock *sk);
static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg)
{
if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && memcg->tcpmem_pressure)
@@ -777,6 +792,8 @@ static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg)
}
#else
#define mem_cgroup_sockets_enabled 0
+static inline void mem_cgroup_sk_alloc(struct sock *sk) { };
+static inline void mem_cgroup_sk_free(struct sock *sk) { };
static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg)
{
return false;
diff --git a/include/linux/mfd/88pm80x.h b/include/linux/mfd/88pm80x.h
index d409ceb2231e..c118a7ec94d6 100644
--- a/include/linux/mfd/88pm80x.h
+++ b/include/linux/mfd/88pm80x.h
@@ -350,7 +350,7 @@ static inline int pm80x_dev_suspend(struct device *dev)
int irq = platform_get_irq(pdev, 0);
if (device_may_wakeup(dev))
- set_bit((1 << irq), &chip->wu_flag);
+ set_bit(irq, &chip->wu_flag);
return 0;
}
@@ -362,7 +362,7 @@ static inline int pm80x_dev_resume(struct device *dev)
int irq = platform_get_irq(pdev, 0);
if (device_may_wakeup(dev))
- clear_bit((1 << irq), &chip->wu_flag);
+ clear_bit(irq, &chip->wu_flag);
return 0;
}
diff --git a/include/linux/mfd/abx500/ab8500.h b/include/linux/mfd/abx500/ab8500.h
index 9475fee2bfc5..d33c245e75ca 100644
--- a/include/linux/mfd/abx500/ab8500.h
+++ b/include/linux/mfd/abx500/ab8500.h
@@ -63,6 +63,8 @@ enum ab8500_version {
#define AB8500_STE_TEST 0x14
#define AB8500_OTP_EMUL 0x15
+#define AB8500_DEBUG_FIELD_LAST 0x16
+
/*
* Interrupts
* Values used to index into array ab8500_irq_regoffset[] defined in
diff --git a/include/linux/mfd/ac100.h b/include/linux/mfd/ac100.h
new file mode 100644
index 000000000000..3c148f196b9f
--- /dev/null
+++ b/include/linux/mfd/ac100.h
@@ -0,0 +1,178 @@
+/*
+ * Functions and registers to access AC100 codec / RTC combo IC.
+ *
+ * Copyright (C) 2016 Chen-Yu Tsai
+ *
+ * Chen-Yu Tsai <wens@csie.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __LINUX_MFD_AC100_H
+#define __LINUX_MFD_AC100_H
+
+#include <linux/regmap.h>
+
+struct ac100_dev {
+ struct device *dev;
+ struct regmap *regmap;
+};
+
+/* Audio codec related registers */
+#define AC100_CHIP_AUDIO_RST 0x00
+#define AC100_PLL_CTRL1 0x01
+#define AC100_PLL_CTRL2 0x02
+#define AC100_SYSCLK_CTRL 0x03
+#define AC100_MOD_CLK_ENA 0x04
+#define AC100_MOD_RST_CTRL 0x05
+#define AC100_I2S_SR_CTRL 0x06
+
+/* I2S1 interface */
+#define AC100_I2S1_CLK_CTRL 0x10
+#define AC100_I2S1_SND_OUT_CTRL 0x11
+#define AC100_I2S1_SND_IN_CTRL 0x12
+#define AC100_I2S1_MXR_SRC 0x13
+#define AC100_I2S1_VOL_CTRL1 0x14
+#define AC100_I2S1_VOL_CTRL2 0x15
+#define AC100_I2S1_VOL_CTRL3 0x16
+#define AC100_I2S1_VOL_CTRL4 0x17
+#define AC100_I2S1_MXR_GAIN 0x18
+
+/* I2S2 interface */
+#define AC100_I2S2_CLK_CTRL 0x20
+#define AC100_I2S2_SND_OUT_CTRL 0x21
+#define AC100_I2S2_SND_IN_CTRL 0x22
+#define AC100_I2S2_MXR_SRC 0x23
+#define AC100_I2S2_VOL_CTRL1 0x24
+#define AC100_I2S2_VOL_CTRL2 0x25
+#define AC100_I2S2_VOL_CTRL3 0x26
+#define AC100_I2S2_VOL_CTRL4 0x27
+#define AC100_I2S2_MXR_GAIN 0x28
+
+/* I2S3 interface */
+#define AC100_I2S3_CLK_CTRL 0x30
+#define AC100_I2S3_SND_OUT_CTRL 0x31
+#define AC100_I2S3_SND_IN_CTRL 0x32
+#define AC100_I2S3_SIG_PATH_CTRL 0x33
+
+/* ADC digital controls */
+#define AC100_ADC_DIG_CTRL 0x40
+#define AC100_ADC_VOL_CTRL 0x41
+
+/* HMIC plug sensing / key detection */
+#define AC100_HMIC_CTRL1 0x44
+#define AC100_HMIC_CTRL2 0x45
+#define AC100_HMIC_STATUS 0x46
+
+/* DAC digital controls */
+#define AC100_DAC_DIG_CTRL 0x48
+#define AC100_DAC_VOL_CTRL 0x49
+#define AC100_DAC_MXR_SRC 0x4c
+#define AC100_DAC_MXR_GAIN 0x4d
+
+/* Analog controls */
+#define AC100_ADC_APC_CTRL 0x50
+#define AC100_ADC_SRC 0x51
+#define AC100_ADC_SRC_BST_CTRL 0x52
+#define AC100_OUT_MXR_DAC_A_CTRL 0x53
+#define AC100_OUT_MXR_SRC 0x54
+#define AC100_OUT_MXR_SRC_BST 0x55
+#define AC100_HPOUT_CTRL 0x56
+#define AC100_ERPOUT_CTRL 0x57
+#define AC100_SPKOUT_CTRL 0x58
+#define AC100_LINEOUT_CTRL 0x59
+
+/* ADC digital audio processing (high pass filter & auto gain control */
+#define AC100_ADC_DAP_L_STA 0x80
+#define AC100_ADC_DAP_R_STA 0x81
+#define AC100_ADC_DAP_L_CTRL 0x82
+#define AC100_ADC_DAP_R_CTRL 0x83
+#define AC100_ADC_DAP_L_T_L 0x84 /* Left Target Level */
+#define AC100_ADC_DAP_R_T_L 0x85 /* Right Target Level */
+#define AC100_ADC_DAP_L_H_A_C 0x86 /* Left High Avg. Coef */
+#define AC100_ADC_DAP_L_L_A_C 0x87 /* Left Low Avg. Coef */
+#define AC100_ADC_DAP_R_H_A_C 0x88 /* Right High Avg. Coef */
+#define AC100_ADC_DAP_R_L_A_C 0x89 /* Right Low Avg. Coef */
+#define AC100_ADC_DAP_L_D_T 0x8a /* Left Decay Time */
+#define AC100_ADC_DAP_L_A_T 0x8b /* Left Attack Time */
+#define AC100_ADC_DAP_R_D_T 0x8c /* Right Decay Time */
+#define AC100_ADC_DAP_R_A_T 0x8d /* Right Attack Time */
+#define AC100_ADC_DAP_N_TH 0x8e /* Noise Threshold */
+#define AC100_ADC_DAP_L_H_N_A_C 0x8f /* Left High Noise Avg. Coef */
+#define AC100_ADC_DAP_L_L_N_A_C 0x90 /* Left Low Noise Avg. Coef */
+#define AC100_ADC_DAP_R_H_N_A_C 0x91 /* Right High Noise Avg. Coef */
+#define AC100_ADC_DAP_R_L_N_A_C 0x92 /* Right Low Noise Avg. Coef */
+#define AC100_ADC_DAP_H_HPF_C 0x93 /* High High-Pass-Filter Coef */
+#define AC100_ADC_DAP_L_HPF_C 0x94 /* Low High-Pass-Filter Coef */
+#define AC100_ADC_DAP_OPT 0x95 /* AGC Optimum */
+
+/* DAC digital audio processing (high pass filter & dynamic range control) */
+#define AC100_DAC_DAP_CTRL 0xa0
+#define AC100_DAC_DAP_H_HPF_C 0xa1 /* High High-Pass-Filter Coef */
+#define AC100_DAC_DAP_L_HPF_C 0xa2 /* Low High-Pass-Filter Coef */
+#define AC100_DAC_DAP_L_H_E_A_C 0xa3 /* Left High Energy Avg Coef */
+#define AC100_DAC_DAP_L_L_E_A_C 0xa4 /* Left Low Energy Avg Coef */
+#define AC100_DAC_DAP_R_H_E_A_C 0xa5 /* Right High Energy Avg Coef */
+#define AC100_DAC_DAP_R_L_E_A_C 0xa6 /* Right Low Energy Avg Coef */
+#define AC100_DAC_DAP_H_G_D_T_C 0xa7 /* High Gain Delay Time Coef */
+#define AC100_DAC_DAP_L_G_D_T_C 0xa8 /* Low Gain Delay Time Coef */
+#define AC100_DAC_DAP_H_G_A_T_C 0xa9 /* High Gain Attack Time Coef */
+#define AC100_DAC_DAP_L_G_A_T_C 0xaa /* Low Gain Attack Time Coef */
+#define AC100_DAC_DAP_H_E_TH 0xab /* High Energy Threshold */
+#define AC100_DAC_DAP_L_E_TH 0xac /* Low Energy Threshold */
+#define AC100_DAC_DAP_H_G_K 0xad /* High Gain K parameter */
+#define AC100_DAC_DAP_L_G_K 0xae /* Low Gain K parameter */
+#define AC100_DAC_DAP_H_G_OFF 0xaf /* High Gain offset */
+#define AC100_DAC_DAP_L_G_OFF 0xb0 /* Low Gain offset */
+#define AC100_DAC_DAP_OPT 0xb1 /* DRC optimum */
+
+/* Digital audio processing enable */
+#define AC100_ADC_DAP_ENA 0xb4
+#define AC100_DAC_DAP_ENA 0xb5
+
+/* SRC control */
+#define AC100_SRC1_CTRL1 0xb8
+#define AC100_SRC1_CTRL2 0xb9
+#define AC100_SRC1_CTRL3 0xba
+#define AC100_SRC1_CTRL4 0xbb
+#define AC100_SRC2_CTRL1 0xbc
+#define AC100_SRC2_CTRL2 0xbd
+#define AC100_SRC2_CTRL3 0xbe
+#define AC100_SRC2_CTRL4 0xbf
+
+/* RTC clk control */
+#define AC100_CLK32K_ANALOG_CTRL 0xc0
+#define AC100_CLKOUT_CTRL1 0xc1
+#define AC100_CLKOUT_CTRL2 0xc2
+#define AC100_CLKOUT_CTRL3 0xc3
+
+/* RTC module */
+#define AC100_RTC_RST 0xc6
+#define AC100_RTC_CTRL 0xc7
+#define AC100_RTC_SEC 0xc8 /* second */
+#define AC100_RTC_MIN 0xc9 /* minute */
+#define AC100_RTC_HOU 0xca /* hour */
+#define AC100_RTC_WEE 0xcb /* weekday */
+#define AC100_RTC_DAY 0xcc /* day */
+#define AC100_RTC_MON 0xcd /* month */
+#define AC100_RTC_YEA 0xce /* year */
+#define AC100_RTC_UPD 0xcf /* update trigger */
+
+/* RTC alarm */
+#define AC100_ALM_INT_ENA 0xd0
+#define AC100_ALM_INT_STA 0xd1
+#define AC100_ALM_SEC 0xd8
+#define AC100_ALM_MIN 0xd9
+#define AC100_ALM_HOU 0xda
+#define AC100_ALM_WEE 0xdb
+#define AC100_ALM_DAY 0xdc
+#define AC100_ALM_MON 0xdd
+#define AC100_ALM_YEA 0xde
+#define AC100_ALM_UPD 0xdf
+
+/* RTC general purpose register 0 ~ 15 */
+#define AC100_RTC_GP(x) (0xe0 + (x))
+
+#endif /* __LINUX_MFD_AC100_H */
diff --git a/include/linux/mfd/arizona/core.h b/include/linux/mfd/arizona/core.h
index 58ab4c0fe761..b31b3be7f8c9 100644
--- a/include/linux/mfd/arizona/core.h
+++ b/include/linux/mfd/arizona/core.h
@@ -13,6 +13,7 @@
#ifndef _WM_ARIZONA_CORE_H
#define _WM_ARIZONA_CORE_H
+#include <linux/clk.h>
#include <linux/interrupt.h>
#include <linux/notifier.h>
#include <linux/regmap.h>
@@ -21,6 +22,12 @@
#define ARIZONA_MAX_CORE_SUPPLIES 2
+enum {
+ ARIZONA_MCLK1,
+ ARIZONA_MCLK2,
+ ARIZONA_NUM_MCLK
+};
+
enum arizona_type {
WM5102 = 1,
WM5110 = 2,
@@ -139,6 +146,8 @@ struct arizona {
struct mutex clk_lock;
int clk32k_ref;
+ struct clk *mclk[ARIZONA_NUM_MCLK];
+
bool ctrlif_error;
struct snd_soc_dapm_context *dapm;
@@ -182,7 +191,4 @@ int cs47l24_patch(struct arizona *arizona);
int wm8997_patch(struct arizona *arizona);
int wm8998_patch(struct arizona *arizona);
-extern int arizona_of_get_named_gpio(struct arizona *arizona, const char *prop,
- bool mandatory);
-
#endif
diff --git a/include/linux/mfd/axp20x.h b/include/linux/mfd/axp20x.h
index 0be4982f08fe..fec597fb34cb 100644
--- a/include/linux/mfd/axp20x.h
+++ b/include/linux/mfd/axp20x.h
@@ -20,6 +20,7 @@ enum {
AXP221_ID,
AXP223_ID,
AXP288_ID,
+ AXP806_ID,
AXP809_ID,
NR_AXP20X_VARIANTS,
};
@@ -91,6 +92,30 @@ enum {
#define AXP22X_ALDO3_V_OUT 0x2a
#define AXP22X_CHRG_CTRL3 0x35
+#define AXP806_STARTUP_SRC 0x00
+#define AXP806_CHIP_ID 0x03
+#define AXP806_PWR_OUT_CTRL1 0x10
+#define AXP806_PWR_OUT_CTRL2 0x11
+#define AXP806_DCDCA_V_CTRL 0x12
+#define AXP806_DCDCB_V_CTRL 0x13
+#define AXP806_DCDCC_V_CTRL 0x14
+#define AXP806_DCDCD_V_CTRL 0x15
+#define AXP806_DCDCE_V_CTRL 0x16
+#define AXP806_ALDO1_V_CTRL 0x17
+#define AXP806_ALDO2_V_CTRL 0x18
+#define AXP806_ALDO3_V_CTRL 0x19
+#define AXP806_DCDC_MODE_CTRL1 0x1a
+#define AXP806_DCDC_MODE_CTRL2 0x1b
+#define AXP806_DCDC_FREQ_CTRL 0x1c
+#define AXP806_BLDO1_V_CTRL 0x20
+#define AXP806_BLDO2_V_CTRL 0x21
+#define AXP806_BLDO3_V_CTRL 0x22
+#define AXP806_BLDO4_V_CTRL 0x23
+#define AXP806_CLDO1_V_CTRL 0x24
+#define AXP806_CLDO2_V_CTRL 0x25
+#define AXP806_CLDO3_V_CTRL 0x26
+#define AXP806_VREF_TEMP_WARN_L 0xf3
+
/* Interrupt */
#define AXP152_IRQ1_EN 0x40
#define AXP152_IRQ2_EN 0x41
@@ -266,6 +291,26 @@ enum {
};
enum {
+ AXP806_DCDCA = 0,
+ AXP806_DCDCB,
+ AXP806_DCDCC,
+ AXP806_DCDCD,
+ AXP806_DCDCE,
+ AXP806_ALDO1,
+ AXP806_ALDO2,
+ AXP806_ALDO3,
+ AXP806_BLDO1,
+ AXP806_BLDO2,
+ AXP806_BLDO3,
+ AXP806_BLDO4,
+ AXP806_CLDO1,
+ AXP806_CLDO2,
+ AXP806_CLDO3,
+ AXP806_SW,
+ AXP806_REG_ID_MAX,
+};
+
+enum {
AXP809_DCDC1 = 0,
AXP809_DCDC2,
AXP809_DCDC3,
@@ -414,6 +459,21 @@ enum axp288_irqs {
AXP288_IRQ_BC_USB_CHNG,
};
+enum axp806_irqs {
+ AXP806_IRQ_DIE_TEMP_HIGH_LV1,
+ AXP806_IRQ_DIE_TEMP_HIGH_LV2,
+ AXP806_IRQ_DCDCA_V_LOW,
+ AXP806_IRQ_DCDCB_V_LOW,
+ AXP806_IRQ_DCDCC_V_LOW,
+ AXP806_IRQ_DCDCD_V_LOW,
+ AXP806_IRQ_DCDCE_V_LOW,
+ AXP806_IRQ_PWROK_LONG,
+ AXP806_IRQ_PWROK_SHORT,
+ AXP806_IRQ_WAKEUP,
+ AXP806_IRQ_PWROK_FALL,
+ AXP806_IRQ_PWROK_RISE,
+};
+
enum axp809_irqs {
AXP809_IRQ_ACIN_OVER_V = 1,
AXP809_IRQ_ACIN_PLUGIN,
diff --git a/include/linux/mfd/cros_ec.h b/include/linux/mfd/cros_ec.h
index d641a18abacb..76f7ef4d3a0d 100644
--- a/include/linux/mfd/cros_ec.h
+++ b/include/linux/mfd/cros_ec.h
@@ -109,6 +109,10 @@ struct cros_ec_command {
* should check msg.result for the EC's result code.
* @pkt_xfer: send packet to EC and get response
* @lock: one transaction at a time
+ * @mkbp_event_supported: true if this EC supports the MKBP event protocol.
+ * @event_notifier: interrupt event notifier for transport devices.
+ * @event_data: raw payload transferred with the MKBP event.
+ * @event_size: size in bytes of the event data.
*/
struct cros_ec_device {
@@ -137,6 +141,11 @@ struct cros_ec_device {
int (*pkt_xfer)(struct cros_ec_device *ec,
struct cros_ec_command *msg);
struct mutex lock;
+ bool mkbp_event_supported;
+ struct blocking_notifier_head event_notifier;
+
+ struct ec_response_get_next_event event_data;
+ int event_size;
};
/* struct cros_ec_platform - ChromeOS EC platform information
@@ -269,6 +278,15 @@ int cros_ec_register(struct cros_ec_device *ec_dev);
*/
int cros_ec_query_all(struct cros_ec_device *ec_dev);
+/**
+ * cros_ec_get_next_event - Fetch next event from the ChromeOS EC
+ *
+ * @ec_dev: Device to fetch event from
+ *
+ * Returns: 0 on success, Linux error number on failure
+ */
+int cros_ec_get_next_event(struct cros_ec_device *ec_dev);
+
/* sysfs stuff */
extern struct attribute_group cros_ec_attr_group;
extern struct attribute_group cros_ec_lightbar_attr_group;
diff --git a/include/linux/mfd/cros_ec_commands.h b/include/linux/mfd/cros_ec_commands.h
index 7e7a8d4b4551..76728ff37d01 100644
--- a/include/linux/mfd/cros_ec_commands.h
+++ b/include/linux/mfd/cros_ec_commands.h
@@ -1793,6 +1793,40 @@ struct ec_result_keyscan_seq_ctrl {
};
} __packed;
+/*
+ * Command for retrieving the next pending MKBP event from the EC device
+ *
+ * The device replies with UNAVAILABLE if there aren't any pending events.
+ */
+#define EC_CMD_GET_NEXT_EVENT 0x67
+
+enum ec_mkbp_event {
+ /* Keyboard matrix changed. The event data is the new matrix state. */
+ EC_MKBP_EVENT_KEY_MATRIX = 0,
+
+ /* New host event. The event data is 4 bytes of host event flags. */
+ EC_MKBP_EVENT_HOST_EVENT = 1,
+
+ /* New Sensor FIFO data. The event data is fifo_info structure. */
+ EC_MKBP_EVENT_SENSOR_FIFO = 2,
+
+ /* Number of MKBP events */
+ EC_MKBP_EVENT_COUNT,
+};
+
+union ec_response_get_next_data {
+ uint8_t key_matrix[13];
+
+ /* Unaligned */
+ uint32_t host_event;
+} __packed;
+
+struct ec_response_get_next_event {
+ uint8_t event_type;
+ /* Followed by event data if any */
+ union ec_response_get_next_data data;
+} __packed;
+
/*****************************************************************************/
/* Temperature sensor commands */
diff --git a/include/linux/mfd/da9063/core.h b/include/linux/mfd/da9063/core.h
index 621af82123c6..f3ae65db4c86 100644
--- a/include/linux/mfd/da9063/core.h
+++ b/include/linux/mfd/da9063/core.h
@@ -3,8 +3,8 @@
*
* Copyright 2012 Dialog Semiconductor Ltd.
*
- * Author: Michal Hajduk <michal.hajduk@diasemi.com>
- * Krystian Garbaciak <krystian.garbaciak@diasemi.com>
+ * Author: Michal Hajduk, Dialog Semiconductor
+ * Author: Krystian Garbaciak, Dialog Semiconductor
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
diff --git a/include/linux/mfd/da9063/pdata.h b/include/linux/mfd/da9063/pdata.h
index 612383bd80ae..8a125701ef7b 100644
--- a/include/linux/mfd/da9063/pdata.h
+++ b/include/linux/mfd/da9063/pdata.h
@@ -3,8 +3,8 @@
*
* Copyright 2012 Dialog Semiconductor Ltd.
*
- * Author: Michal Hajduk <michal.hajduk@diasemi.com>
- * Author: Krystian Garbaciak <krystian.garbaciak@diasemi.com>
+ * Author: Michal Hajduk, Dialog Semiconductor
+ * Author: Krystian Garbaciak, Dialog Semiconductor
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
diff --git a/include/linux/mfd/da9063/registers.h b/include/linux/mfd/da9063/registers.h
index 2e0ba6d5fbc3..5d42859cb441 100644
--- a/include/linux/mfd/da9063/registers.h
+++ b/include/linux/mfd/da9063/registers.h
@@ -3,8 +3,8 @@
*
* Copyright 2012 Dialog Semiconductor Ltd.
*
- * Author: Michal Hajduk <michal.hajduk@diasemi.com>
- * Krystian Garbaciak <krystian.garbaciak@diasemi.com>
+ * Author: Michal Hajduk, Dialog Semiconductor
+ * Author: Krystian Garbaciak, Dialog Semiconductor
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
diff --git a/include/linux/mfd/db8500-prcmu.h b/include/linux/mfd/db8500-prcmu.h
index 0bd69446bb05..7ba67b55b312 100644
--- a/include/linux/mfd/db8500-prcmu.h
+++ b/include/linux/mfd/db8500-prcmu.h
@@ -538,7 +538,6 @@ int db8500_prcmu_get_arm_opp(void);
int db8500_prcmu_set_ape_opp(u8 opp);
int db8500_prcmu_get_ape_opp(void);
int db8500_prcmu_request_ape_opp_100_voltage(bool enable);
-int db8500_prcmu_set_ddr_opp(u8 opp);
int db8500_prcmu_get_ddr_opp(void);
u32 db8500_prcmu_read(unsigned int reg);
@@ -594,11 +593,6 @@ static inline int prcmu_release_usb_wakeup_state(void)
return 0;
}
-static inline int db8500_prcmu_set_ddr_opp(u8 opp)
-{
- return 0;
-}
-
static inline int db8500_prcmu_get_ddr_opp(void)
{
return DDR_100_OPP;
diff --git a/include/linux/mfd/dbx500-prcmu.h b/include/linux/mfd/dbx500-prcmu.h
index 5d374601404c..2e2c6a63a065 100644
--- a/include/linux/mfd/dbx500-prcmu.h
+++ b/include/linux/mfd/dbx500-prcmu.h
@@ -269,10 +269,6 @@ unsigned long prcmu_clock_rate(u8 clock);
long prcmu_round_clock_rate(u8 clock, unsigned long rate);
int prcmu_set_clock_rate(u8 clock, unsigned long rate);
-static inline int prcmu_set_ddr_opp(u8 opp)
-{
- return db8500_prcmu_set_ddr_opp(opp);
-}
static inline int prcmu_get_ddr_opp(void)
{
return db8500_prcmu_get_ddr_opp();
@@ -489,11 +485,6 @@ static inline int prcmu_get_arm_opp(void)
return ARM_100_OPP;
}
-static inline int prcmu_set_ddr_opp(u8 opp)
-{
- return 0;
-}
-
static inline int prcmu_get_ddr_opp(void)
{
return DDR_100_OPP;
diff --git a/include/linux/mfd/lp873x.h b/include/linux/mfd/lp873x.h
new file mode 100644
index 000000000000..edbec8350a49
--- /dev/null
+++ b/include/linux/mfd/lp873x.h
@@ -0,0 +1,268 @@
+/*
+ * Functions to access LP873X power management chip.
+ *
+ * Copyright (C) 2016 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __LINUX_MFD_LP873X_H
+#define __LINUX_MFD_LP873X_H
+
+#include <linux/i2c.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+
+/* LP873x chip id list */
+#define LP873X 0x00
+
+/* All register addresses */
+#define LP873X_REG_DEV_REV 0X00
+#define LP873X_REG_OTP_REV 0X01
+#define LP873X_REG_BUCK0_CTRL_1 0X02
+#define LP873X_REG_BUCK0_CTRL_2 0X03
+#define LP873X_REG_BUCK1_CTRL_1 0X04
+#define LP873X_REG_BUCK1_CTRL_2 0X05
+#define LP873X_REG_BUCK0_VOUT 0X06
+#define LP873X_REG_BUCK1_VOUT 0X07
+#define LP873X_REG_LDO0_CTRL 0X08
+#define LP873X_REG_LDO1_CTRL 0X09
+#define LP873X_REG_LDO0_VOUT 0X0A
+#define LP873X_REG_LDO1_VOUT 0X0B
+#define LP873X_REG_BUCK0_DELAY 0X0C
+#define LP873X_REG_BUCK1_DELAY 0X0D
+#define LP873X_REG_LDO0_DELAY 0X0E
+#define LP873X_REG_LDO1_DELAY 0X0F
+#define LP873X_REG_GPO_DELAY 0X10
+#define LP873X_REG_GPO2_DELAY 0X11
+#define LP873X_REG_GPO_CTRL 0X12
+#define LP873X_REG_CONFIG 0X13
+#define LP873X_REG_PLL_CTRL 0X14
+#define LP873X_REG_PGOOD_CTRL1 0X15
+#define LP873X_REG_PGOOD_CTRL2 0X16
+#define LP873X_REG_PG_FAULT 0X17
+#define LP873X_REG_RESET 0X18
+#define LP873X_REG_INT_TOP_1 0X19
+#define LP873X_REG_INT_TOP_2 0X1A
+#define LP873X_REG_INT_BUCK 0X1B
+#define LP873X_REG_INT_LDO 0X1C
+#define LP873X_REG_TOP_STAT 0X1D
+#define LP873X_REG_BUCK_STAT 0X1E
+#define LP873X_REG_LDO_STAT 0x1F
+#define LP873X_REG_TOP_MASK_1 0x20
+#define LP873X_REG_TOP_MASK_2 0x21
+#define LP873X_REG_BUCK_MASK 0x22
+#define LP873X_REG_LDO_MASK 0x23
+#define LP873X_REG_SEL_I_LOAD 0x24
+#define LP873X_REG_I_LOAD_2 0x25
+#define LP873X_REG_I_LOAD_1 0x26
+
+#define LP873X_REG_MAX LP873X_REG_I_LOAD_1
+
+/* Register field definitions */
+#define LP873X_DEV_REV_DEV_ID 0xC0
+#define LP873X_DEV_REV_ALL_LAYER 0x30
+#define LP873X_DEV_REV_METAL_LAYER 0x0F
+
+#define LP873X_OTP_REV_OTP_ID 0xFF
+
+#define LP873X_BUCK0_CTRL_1_BUCK0_FPWM BIT(3)
+#define LP873X_BUCK0_CTRL_1_BUCK0_RDIS_EN BIT(2)
+#define LP873X_BUCK0_CTRL_1_BUCK0_EN_PIN_CTRL BIT(1)
+#define LP873X_BUCK0_CTRL_1_BUCK0_EN BIT(0)
+
+#define LP873X_BUCK0_CTRL_2_BUCK0_ILIM 0x38
+#define LP873X_BUCK0_CTRL_2_BUCK0_SLEW_RATE 0x07
+
+#define LP873X_BUCK1_CTRL_1_BUCK1_FPWM BIT(3)
+#define LP873X_BUCK1_CTRL_1_BUCK1_RDIS_EN BIT(2)
+#define LP873X_BUCK1_CTRL_1_BUCK1_EN_PIN_CTRL BIT(1)
+#define LP873X_BUCK1_CTRL_1_BUCK1_EN BIT(0)
+
+#define LP873X_BUCK1_CTRL_2_BUCK1_ILIM 0x38
+#define LP873X_BUCK1_CTRL_2_BUCK1_SLEW_RATE 0x07
+
+#define LP873X_BUCK0_VOUT_BUCK0_VSET 0xFF
+
+#define LP873X_BUCK1_VOUT_BUCK1_VSET 0xFF
+
+#define LP873X_LDO0_CTRL_LDO0_RDIS_EN BIT(2)
+#define LP873X_LDO0_CTRL_LDO0_EN_PIN_CTRL BIT(1)
+#define LP873X_LDO0_CTRL_LDO0_EN BIT(0)
+
+#define LP873X_LDO1_CTRL_LDO1_RDIS_EN BIT(2)
+#define LP873X_LDO1_CTRL_LDO1_EN_PIN_CTRL BIT(1)
+#define LP873X_LDO1_CTRL_LDO1_EN BIT(0)
+
+#define LP873X_LDO0_VOUT_LDO0_VSET 0x1F
+
+#define LP873X_LDO1_VOUT_LDO1_VSET 0x1F
+
+#define LP873X_BUCK0_DELAY_BUCK0_SD_DELAY 0xF0
+#define LP873X_BUCK0_DELAY_BUCK0_SU_DELAY 0x0F
+
+#define LP873X_BUCK1_DELAY_BUCK1_SD_DELAY 0xF0
+#define LP873X_BUCK1_DELAY_BUCK1_SU_DELAY 0x0F
+
+#define LP873X_LDO0_DELAY_LDO0_SD_DELAY 0xF0
+#define LP873X_LDO0_DELAY_LDO0_SU_DELAY 0x0F
+
+#define LP873X_LDO1_DELAY_LDO1_SD_DELAY 0xF0
+#define LP873X_LDO1_DELAY_LDO1_SU_DELAY 0x0F
+
+#define LP873X_GPO_DELAY_GPO_SD_DELAY 0xF0
+#define LP873X_GPO_DELAY_GPO_SU_DELAY 0x0F
+
+#define LP873X_GPO2_DELAY_GPO2_SD_DELAY 0xF0
+#define LP873X_GPO2_DELAY_GPO2_SU_DELAY 0x0F
+
+#define LP873X_GPO_CTRL_GPO2_OD BIT(6)
+#define LP873X_GPO_CTRL_GPO2_EN_PIN_CTRL BIT(5)
+#define LP873X_GPO_CTRL_GPO2_EN BIT(4)
+#define LP873X_GPO_CTRL_GPO_OD BIT(2)
+#define LP873X_GPO_CTRL_GPO_EN_PIN_CTRL BIT(1)
+#define LP873X_GPO_CTRL_GPO_EN BIT(0)
+
+#define LP873X_CONFIG_SU_DELAY_SEL BIT(6)
+#define LP873X_CONFIG_SD_DELAY_SEL BIT(5)
+#define LP873X_CONFIG_CLKIN_PIN_SEL BIT(4)
+#define LP873X_CONFIG_CLKIN_PD BIT(3)
+#define LP873X_CONFIG_EN_PD BIT(2)
+#define LP873X_CONFIG_TDIE_WARN_LEVEL BIT(1)
+#define LP873X_EN_SPREAD_SPEC BIT(0)
+
+#define LP873X_PLL_CTRL_EN_PLL BIT(6)
+#define LP873X_EXT_CLK_FREQ 0x1F
+
+#define LP873X_PGOOD_CTRL1_PGOOD_POL BIT(7)
+#define LP873X_PGOOD_CTRL1_PGOOD_OD BIT(6)
+#define LP873X_PGOOD_CTRL1_PGOOD_WINDOW_LDO BIT(5)
+#define LP873X_PGOOD_CTRL1_PGOOD_WINDOWN_BUCK BIT(4)
+#define LP873X_PGOOD_CTRL1_PGOOD_EN_PGOOD_LDO1 BIT(3)
+#define LP873X_PGOOD_CTRL1_PGOOD_EN_PGOOD_LDO0 BIT(2)
+#define LP873X_PGOOD_CTRL1_PGOOD_EN_PGOOD_BUCK1 BIT(1)
+#define LP873X_PGOOD_CTRL1_PGOOD_EN_PGOOD_BUCK0 BIT(0)
+
+#define LP873X_PGOOD_CTRL2_EN_PGOOD_TWARN BIT(2)
+#define LP873X_PGOOD_CTRL2_EN_PG_FAULT_GATE BIT(1)
+#define LP873X_PGOOD_CTRL2_PGOOD_MODE BIT(0)
+
+#define LP873X_PG_FAULT_PG_FAULT_LDO1 BIT(3)
+#define LP873X_PG_FAULT_PG_FAULT_LDO0 BIT(2)
+#define LP873X_PG_FAULT_PG_FAULT_BUCK1 BIT(1)
+#define LP873X_PG_FAULT_PG_FAULT_BUCK0 BIT(0)
+
+#define LP873X_RESET_SW_RESET BIT(0)
+
+#define LP873X_INT_TOP_1_PGOOD_INT BIT(7)
+#define LP873X_INT_TOP_1_LDO_INT BIT(6)
+#define LP873X_INT_TOP_1_BUCK_INT BIT(5)
+#define LP873X_INT_TOP_1_SYNC_CLK_INT BIT(4)
+#define LP873X_INT_TOP_1_TDIE_SD_INT BIT(3)
+#define LP873X_INT_TOP_1_TDIE_WARN_INT BIT(2)
+#define LP873X_INT_TOP_1_OVP_INT BIT(1)
+#define LP873X_INT_TOP_1_I_MEAS_INT BIT(0)
+
+#define LP873X_INT_TOP_2_RESET_REG_INT BIT(0)
+
+#define LP873X_INT_BUCK_BUCK1_PG_INT BIT(6)
+#define LP873X_INT_BUCK_BUCK1_SC_INT BIT(5)
+#define LP873X_INT_BUCK_BUCK1_ILIM_INT BIT(4)
+#define LP873X_INT_BUCK_BUCK0_PG_INT BIT(2)
+#define LP873X_INT_BUCK_BUCK0_SC_INT BIT(1)
+#define LP873X_INT_BUCK_BUCK0_ILIM_INT BIT(0)
+
+#define LP873X_INT_LDO_LDO1_PG_INT BIT(6)
+#define LP873X_INT_LDO_LDO1_SC_INT BIT(5)
+#define LP873X_INT_LDO_LDO1_ILIM_INT BIT(4)
+#define LP873X_INT_LDO_LDO0_PG_INT BIT(2)
+#define LP873X_INT_LDO_LDO0_SC_INT BIT(1)
+#define LP873X_INT_LDO_LDO0_ILIM_INT BIT(0)
+
+#define LP873X_TOP_STAT_PGOOD_STAT BIT(7)
+#define LP873X_TOP_STAT_SYNC_CLK_STAT BIT(4)
+#define LP873X_TOP_STAT_TDIE_SD_STAT BIT(3)
+#define LP873X_TOP_STAT_TDIE_WARN_STAT BIT(2)
+#define LP873X_TOP_STAT_OVP_STAT BIT(1)
+
+#define LP873X_BUCK_STAT_BUCK1_STAT BIT(7)
+#define LP873X_BUCK_STAT_BUCK1_PG_STAT BIT(6)
+#define LP873X_BUCK_STAT_BUCK1_ILIM_STAT BIT(4)
+#define LP873X_BUCK_STAT_BUCK0_STAT BIT(3)
+#define LP873X_BUCK_STAT_BUCK0_PG_STAT BIT(2)
+#define LP873X_BUCK_STAT_BUCK0_ILIM_STAT BIT(0)
+
+#define LP873X_LDO_STAT_LDO1_STAT BIT(7)
+#define LP873X_LDO_STAT_LDO1_PG_STAT BIT(6)
+#define LP873X_LDO_STAT_LDO1_ILIM_STAT BIT(4)
+#define LP873X_LDO_STAT_LDO0_STAT BIT(3)
+#define LP873X_LDO_STAT_LDO0_PG_STAT BIT(2)
+#define LP873X_LDO_STAT_LDO0_ILIM_STAT BIT(0)
+
+#define LP873X_TOP_MASK_1_PGOOD_INT_MASK BIT(7)
+#define LP873X_TOP_MASK_1_SYNC_CLK_MASK BIT(4)
+#define LP873X_TOP_MASK_1_TDIE_WARN_MASK BIT(2)
+#define LP873X_TOP_MASK_1_I_MEAS_MASK BIT(0)
+
+#define LP873X_TOP_MASK_2_RESET_REG_MASK BIT(0)
+
+#define LP873X_BUCK_MASK_BUCK1_PGF_MASK BIT(7)
+#define LP873X_BUCK_MASK_BUCK1_PGR_MASK BIT(6)
+#define LP873X_BUCK_MASK_BUCK1_ILIM_MASK BIT(4)
+#define LP873X_BUCK_MASK_BUCK0_PGF_MASK BIT(3)
+#define LP873X_BUCK_MASK_BUCK0_PGR_MASK BIT(2)
+#define LP873X_BUCK_MASK_BUCK0_ILIM_MASK BIT(0)
+
+#define LP873X_LDO_MASK_LDO1_PGF_MASK BIT(7)
+#define LP873X_LDO_MASK_LDO1_PGR_MASK BIT(6)
+#define LP873X_LDO_MASK_LDO1_ILIM_MASK BIT(4)
+#define LP873X_LDO_MASK_LDO0_PGF_MASK BIT(3)
+#define LP873X_LDO_MASK_LDO0_PGR_MASK BIT(2)
+#define LP873X_LDO_MASK_LDO0_ILIM_MASK BIT(0)
+
+#define LP873X_SEL_I_LOAD_CURRENT_BUCK_SELECT BIT(0)
+
+#define LP873X_I_LOAD_2_BUCK_LOAD_CURRENT BIT(0)
+
+#define LP873X_I_LOAD_1_BUCK_LOAD_CURRENT 0xFF
+
+#define LP873X_MAX_REG_ID LP873X_LDO_1
+
+/* Number of step-down converters available */
+#define LP873X_NUM_BUCK 2
+/* Number of LDO voltage regulators available */
+#define LP873X_NUM_LDO 2
+/* Number of total regulators available */
+#define LP873X_NUM_REGULATOR (LP873X_NUM_BUCK + LP873X_NUM_LDO)
+
+enum lp873x_regulator_id {
+ /* BUCK's */
+ LP873X_BUCK_0,
+ LP873X_BUCK_1,
+ /* LDOs */
+ LP873X_LDO_0,
+ LP873X_LDO_1,
+};
+
+/**
+ * struct lp873x - state holder for the lp873x driver
+ * @dev: struct device pointer for MFD device
+ * @rev: revision of the lp873x
+ * @lock: lock guarding the data structure
+ * @regmap: register map of the lp873x PMIC
+ *
+ * Device data may be used to access the LP873X chip
+ */
+struct lp873x {
+ struct device *dev;
+ u8 rev;
+ struct regmap *regmap;
+};
+#endif /* __LINUX_MFD_LP873X_H */
diff --git a/include/linux/mfd/max14577-private.h b/include/linux/mfd/max14577-private.h
index f01c1fae4d84..df75234f979d 100644
--- a/include/linux/mfd/max14577-private.h
+++ b/include/linux/mfd/max14577-private.h
@@ -3,7 +3,7 @@
*
* Copyright (C) 2014 Samsung Electrnoics
* Chanwoo Choi <cw00.choi@samsung.com>
- * Krzysztof Kozlowski <k.kozlowski@samsung.com>
+ * Krzysztof Kozlowski <krzk@kernel.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/include/linux/mfd/max14577.h b/include/linux/mfd/max14577.h
index ccfaf952c31b..d81b52bb8bee 100644
--- a/include/linux/mfd/max14577.h
+++ b/include/linux/mfd/max14577.h
@@ -3,7 +3,7 @@
*
* Copyright (C) 2014 Samsung Electrnoics
* Chanwoo Choi <cw00.choi@samsung.com>
- * Krzysztof Kozlowski <k.kozlowski@samsung.com>
+ * Krzysztof Kozlowski <krzk@kernel.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/include/linux/mfd/rk808.h b/include/linux/mfd/rk808.h
index 441b6ee72691..6d435a3c06bc 100644
--- a/include/linux/mfd/rk808.h
+++ b/include/linux/mfd/rk808.h
@@ -1,11 +1,15 @@
/*
- * rk808.h for Rockchip RK808
+ * Register definitions for Rockchip's RK808/RK818 PMIC
*
* Copyright (c) 2014, Fuzhou Rockchip Electronics Co., Ltd
*
* Author: Chris Zhong <zyw@rock-chips.com>
* Author: Zhang Qing <zhangqing@rock-chips.com>
*
+ * Copyright (C) 2016 PHYTEC Messtechnik GmbH
+ *
+ * Author: Wadim Egorov <w.egorov@phytec.de>
+ *
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
@@ -16,8 +20,8 @@
* more details.
*/
-#ifndef __LINUX_REGULATOR_rk808_H
-#define __LINUX_REGULATOR_rk808_H
+#ifndef __LINUX_REGULATOR_RK808_H
+#define __LINUX_REGULATOR_RK808_H
#include <linux/regulator/machine.h>
#include <linux/regmap.h>
@@ -28,7 +32,7 @@
#define RK808_DCDC1 0 /* (0+RK808_START) */
#define RK808_LDO1 4 /* (4+RK808_START) */
-#define RK808_NUM_REGULATORS 14
+#define RK808_NUM_REGULATORS 14
enum rk808_reg {
RK808_ID_DCDC1,
@@ -65,6 +69,8 @@ enum rk808_reg {
#define RK808_RTC_INT_REG 0x12
#define RK808_RTC_COMP_LSB_REG 0x13
#define RK808_RTC_COMP_MSB_REG 0x14
+#define RK808_ID_MSB 0x17
+#define RK808_ID_LSB 0x18
#define RK808_CLK32OUT_REG 0x20
#define RK808_VB_MON_REG 0x21
#define RK808_THERMAL_REG 0x22
@@ -115,7 +121,92 @@ enum rk808_reg {
#define RK808_INT_STS_MSK_REG2 0x4f
#define RK808_IO_POL_REG 0x50
-/* IRQ Definitions */
+/* RK818 */
+#define RK818_DCDC1 0
+#define RK818_LDO1 4
+#define RK818_NUM_REGULATORS 17
+
+enum rk818_reg {
+ RK818_ID_DCDC1,
+ RK818_ID_DCDC2,
+ RK818_ID_DCDC3,
+ RK818_ID_DCDC4,
+ RK818_ID_BOOST,
+ RK818_ID_LDO1,
+ RK818_ID_LDO2,
+ RK818_ID_LDO3,
+ RK818_ID_LDO4,
+ RK818_ID_LDO5,
+ RK818_ID_LDO6,
+ RK818_ID_LDO7,
+ RK818_ID_LDO8,
+ RK818_ID_LDO9,
+ RK818_ID_SWITCH,
+ RK818_ID_HDMI_SWITCH,
+ RK818_ID_OTG_SWITCH,
+};
+
+#define RK818_DCDC_EN_REG 0x23
+#define RK818_LDO_EN_REG 0x24
+#define RK818_SLEEP_SET_OFF_REG1 0x25
+#define RK818_SLEEP_SET_OFF_REG2 0x26
+#define RK818_DCDC_UV_STS_REG 0x27
+#define RK818_DCDC_UV_ACT_REG 0x28
+#define RK818_LDO_UV_STS_REG 0x29
+#define RK818_LDO_UV_ACT_REG 0x2a
+#define RK818_DCDC_PG_REG 0x2b
+#define RK818_LDO_PG_REG 0x2c
+#define RK818_VOUT_MON_TDB_REG 0x2d
+#define RK818_BUCK1_CONFIG_REG 0x2e
+#define RK818_BUCK1_ON_VSEL_REG 0x2f
+#define RK818_BUCK1_SLP_VSEL_REG 0x30
+#define RK818_BUCK2_CONFIG_REG 0x32
+#define RK818_BUCK2_ON_VSEL_REG 0x33
+#define RK818_BUCK2_SLP_VSEL_REG 0x34
+#define RK818_BUCK3_CONFIG_REG 0x36
+#define RK818_BUCK4_CONFIG_REG 0x37
+#define RK818_BUCK4_ON_VSEL_REG 0x38
+#define RK818_BUCK4_SLP_VSEL_REG 0x39
+#define RK818_BOOST_CONFIG_REG 0x3a
+#define RK818_LDO1_ON_VSEL_REG 0x3b
+#define RK818_LDO1_SLP_VSEL_REG 0x3c
+#define RK818_LDO2_ON_VSEL_REG 0x3d
+#define RK818_LDO2_SLP_VSEL_REG 0x3e
+#define RK818_LDO3_ON_VSEL_REG 0x3f
+#define RK818_LDO3_SLP_VSEL_REG 0x40
+#define RK818_LDO4_ON_VSEL_REG 0x41
+#define RK818_LDO4_SLP_VSEL_REG 0x42
+#define RK818_LDO5_ON_VSEL_REG 0x43
+#define RK818_LDO5_SLP_VSEL_REG 0x44
+#define RK818_LDO6_ON_VSEL_REG 0x45
+#define RK818_LDO6_SLP_VSEL_REG 0x46
+#define RK818_LDO7_ON_VSEL_REG 0x47
+#define RK818_LDO7_SLP_VSEL_REG 0x48
+#define RK818_LDO8_ON_VSEL_REG 0x49
+#define RK818_LDO8_SLP_VSEL_REG 0x4a
+#define RK818_BOOST_LDO9_ON_VSEL_REG 0x54
+#define RK818_BOOST_LDO9_SLP_VSEL_REG 0x55
+#define RK818_DEVCTRL_REG 0x4b
+#define RK818_INT_STS_REG1 0X4c
+#define RK818_INT_STS_MSK_REG1 0x4d
+#define RK818_INT_STS_REG2 0x4e
+#define RK818_INT_STS_MSK_REG2 0x4f
+#define RK818_IO_POL_REG 0x50
+#define RK818_H5V_EN_REG 0x52
+#define RK818_SLEEP_SET_OFF_REG3 0x53
+#define RK818_BOOST_LDO9_ON_VSEL_REG 0x54
+#define RK818_BOOST_LDO9_SLP_VSEL_REG 0x55
+#define RK818_BOOST_CTRL_REG 0x56
+#define RK818_DCDC_ILMAX 0x90
+#define RK818_USB_CTRL_REG 0xa1
+
+#define RK818_H5V_EN BIT(0)
+#define RK818_REF_RDY_CTRL BIT(1)
+#define RK818_USB_ILIM_SEL_MASK 0xf
+#define RK818_USB_ILMIN_2000MA 0x7
+#define RK818_USB_CHG_SD_VSEL_MASK 0x70
+
+/* RK808 IRQ Definitions */
#define RK808_IRQ_VOUT_LO 0
#define RK808_IRQ_VB_LO 1
#define RK808_IRQ_PWRON 2
@@ -137,6 +228,43 @@ enum rk808_reg {
#define RK808_IRQ_PLUG_IN_INT_MSK BIT(0)
#define RK808_IRQ_PLUG_OUT_INT_MSK BIT(1)
+/* RK818 IRQ Definitions */
+#define RK818_IRQ_VOUT_LO 0
+#define RK818_IRQ_VB_LO 1
+#define RK818_IRQ_PWRON 2
+#define RK818_IRQ_PWRON_LP 3
+#define RK818_IRQ_HOTDIE 4
+#define RK818_IRQ_RTC_ALARM 5
+#define RK818_IRQ_RTC_PERIOD 6
+#define RK818_IRQ_USB_OV 7
+#define RK818_IRQ_PLUG_IN 8
+#define RK818_IRQ_PLUG_OUT 9
+#define RK818_IRQ_CHG_OK 10
+#define RK818_IRQ_CHG_TE 11
+#define RK818_IRQ_CHG_TS1 12
+#define RK818_IRQ_TS2 13
+#define RK818_IRQ_CHG_CVTLIM 14
+#define RK818_IRQ_DISCHG_ILIM 15
+
+#define RK818_IRQ_VOUT_LO_MSK BIT(0)
+#define RK818_IRQ_VB_LO_MSK BIT(1)
+#define RK818_IRQ_PWRON_MSK BIT(2)
+#define RK818_IRQ_PWRON_LP_MSK BIT(3)
+#define RK818_IRQ_HOTDIE_MSK BIT(4)
+#define RK818_IRQ_RTC_ALARM_MSK BIT(5)
+#define RK818_IRQ_RTC_PERIOD_MSK BIT(6)
+#define RK818_IRQ_USB_OV_MSK BIT(7)
+#define RK818_IRQ_PLUG_IN_MSK BIT(0)
+#define RK818_IRQ_PLUG_OUT_MSK BIT(1)
+#define RK818_IRQ_CHG_OK_MSK BIT(2)
+#define RK818_IRQ_CHG_TE_MSK BIT(3)
+#define RK818_IRQ_CHG_TS1_MSK BIT(4)
+#define RK818_IRQ_TS2_MSK BIT(5)
+#define RK818_IRQ_CHG_CVTLIM_MSK BIT(6)
+#define RK818_IRQ_DISCHG_ILIM_MSK BIT(7)
+
+#define RK818_NUM_IRQ 16
+
#define RK808_VBAT_LOW_2V8 0x00
#define RK808_VBAT_LOW_2V9 0x01
#define RK808_VBAT_LOW_3V0 0x02
@@ -191,9 +319,17 @@ enum {
BOOST_ILMIN_250MA,
};
+enum {
+ RK808_ID = 0x0000,
+ RK818_ID = 0x8181,
+};
+
struct rk808 {
- struct i2c_client *i2c;
- struct regmap_irq_chip_data *irq_data;
- struct regmap *regmap;
+ struct i2c_client *i2c;
+ struct regmap_irq_chip_data *irq_data;
+ struct regmap *regmap;
+ long variant;
+ const struct regmap_config *regmap_cfg;
+ const struct regmap_irq_chip *regmap_irq_chip;
};
-#endif /* __LINUX_REGULATOR_rk808_H */
+#endif /* __LINUX_REGULATOR_RK808_H */
diff --git a/include/linux/mfd/stmpe.h b/include/linux/mfd/stmpe.h
index de748bc7525e..4a827af17e59 100644
--- a/include/linux/mfd/stmpe.h
+++ b/include/linux/mfd/stmpe.h
@@ -26,6 +26,7 @@ enum stmpe_partnum {
STMPE610,
STMPE801,
STMPE811,
+ STMPE1600,
STMPE1601,
STMPE1801,
STMPE2401,
@@ -39,22 +40,42 @@ enum stmpe_partnum {
*/
enum {
STMPE_IDX_CHIP_ID,
+ STMPE_IDX_SYS_CTRL,
+ STMPE_IDX_SYS_CTRL2,
STMPE_IDX_ICR_LSB,
STMPE_IDX_IER_LSB,
+ STMPE_IDX_IER_MSB,
STMPE_IDX_ISR_LSB,
STMPE_IDX_ISR_MSB,
STMPE_IDX_GPMR_LSB,
+ STMPE_IDX_GPMR_CSB,
+ STMPE_IDX_GPMR_MSB,
STMPE_IDX_GPSR_LSB,
+ STMPE_IDX_GPSR_CSB,
+ STMPE_IDX_GPSR_MSB,
STMPE_IDX_GPCR_LSB,
+ STMPE_IDX_GPCR_CSB,
+ STMPE_IDX_GPCR_MSB,
STMPE_IDX_GPDR_LSB,
+ STMPE_IDX_GPDR_CSB,
+ STMPE_IDX_GPDR_MSB,
+ STMPE_IDX_GPEDR_LSB,
+ STMPE_IDX_GPEDR_CSB,
STMPE_IDX_GPEDR_MSB,
STMPE_IDX_GPRER_LSB,
+ STMPE_IDX_GPRER_CSB,
+ STMPE_IDX_GPRER_MSB,
STMPE_IDX_GPFER_LSB,
+ STMPE_IDX_GPFER_CSB,
+ STMPE_IDX_GPFER_MSB,
STMPE_IDX_GPPUR_LSB,
STMPE_IDX_GPPDR_LSB,
STMPE_IDX_GPAFR_U_MSB,
STMPE_IDX_IEGPIOR_LSB,
+ STMPE_IDX_IEGPIOR_CSB,
+ STMPE_IDX_IEGPIOR_MSB,
STMPE_IDX_ISGPIOR_LSB,
+ STMPE_IDX_ISGPIOR_CSB,
STMPE_IDX_ISGPIOR_MSB,
STMPE_IDX_MAX,
};
diff --git a/include/linux/mfd/syscon/exynos5-pmu.h b/include/linux/mfd/syscon/exynos5-pmu.h
index 76f30f940c70..c28ff21ca4d2 100644
--- a/include/linux/mfd/syscon/exynos5-pmu.h
+++ b/include/linux/mfd/syscon/exynos5-pmu.h
@@ -43,8 +43,10 @@
#define EXYNOS5433_MIPI_PHY2_CONTROL (0x718)
#define EXYNOS5_PHY_ENABLE BIT(0)
-
#define EXYNOS5_MIPI_PHY_S_RESETN BIT(1)
#define EXYNOS5_MIPI_PHY_M_RESETN BIT(2)
+#define EXYNOS5433_PAD_RETENTION_AUD_OPTION (0x3028)
+#define EXYNOS5433_PAD_INITIATE_WAKEUP_FROM_LOWPWR BIT(28)
+
#endif /* _LINUX_MFD_SYSCON_PMU_EXYNOS5_H_ */
diff --git a/include/linux/mfd/tps65217.h b/include/linux/mfd/tps65217.h
index 1c88231496d3..4ccda8969639 100644
--- a/include/linux/mfd/tps65217.h
+++ b/include/linux/mfd/tps65217.h
@@ -73,6 +73,7 @@
#define TPS65217_PPATH_AC_CURRENT_MASK 0x0C
#define TPS65217_PPATH_USB_CURRENT_MASK 0x03
+#define TPS65217_INT_RESERVEDM BIT(7)
#define TPS65217_INT_PBM BIT(6)
#define TPS65217_INT_ACM BIT(5)
#define TPS65217_INT_USBM BIT(4)
@@ -233,6 +234,13 @@ struct tps65217_bl_pdata {
int dft_brightness;
};
+enum tps65217_irq_type {
+ TPS65217_IRQ_PB,
+ TPS65217_IRQ_AC,
+ TPS65217_IRQ_USB,
+ TPS65217_NUM_IRQ
+};
+
/**
* struct tps65217_board - packages regulator init data
* @tps65217_regulator_data: regulator initialization values
@@ -258,6 +266,10 @@ struct tps65217 {
struct regulator_desc desc[TPS65217_NUM_REGULATOR];
struct regmap *regmap;
u8 *strobes;
+ struct irq_domain *irq_domain;
+ struct mutex irq_lock;
+ u8 irq_mask;
+ int irq;
};
static inline struct tps65217 *dev_to_tps65217(struct device *dev)
diff --git a/include/linux/mfd/tps65218.h b/include/linux/mfd/tps65218.h
index 7fdf5326f34e..d1db9527fab5 100644
--- a/include/linux/mfd/tps65218.h
+++ b/include/linux/mfd/tps65218.h
@@ -63,6 +63,11 @@
#define TPS65218_CHIPID_CHIP_MASK 0xF8
#define TPS65218_CHIPID_REV_MASK 0x07
+#define TPS65218_REV_1_0 0x0
+#define TPS65218_REV_1_1 0x1
+#define TPS65218_REV_2_0 0x2
+#define TPS65218_REV_2_1 0x3
+
#define TPS65218_INT1_VPRG BIT(5)
#define TPS65218_INT1_AC BIT(4)
#define TPS65218_INT1_PB BIT(3)
@@ -267,6 +272,7 @@ struct tps_info {
struct tps65218 {
struct device *dev;
unsigned int id;
+ u8 rev;
struct mutex tps_lock; /* lock guarding the data structure */
/* IRQ Data */
diff --git a/include/linux/mfd/twl6040.h b/include/linux/mfd/twl6040.h
index 36795a1be479..a2e88761c09f 100644
--- a/include/linux/mfd/twl6040.h
+++ b/include/linux/mfd/twl6040.h
@@ -168,7 +168,7 @@
#define TWL6040_VIBROCDET 0x20
#define TWL6040_TSHUTDET 0x40
-#define TWL6040_CELLS 3
+#define TWL6040_CELLS 4
#define TWL6040_REV_ES1_0 0x00
#define TWL6040_REV_ES1_1 0x01 /* Rev ES1.1 and ES1.2 */
diff --git a/include/linux/miscdevice.h b/include/linux/miscdevice.h
index 543037465973..722698a43d79 100644
--- a/include/linux/miscdevice.h
+++ b/include/linux/miscdevice.h
@@ -3,6 +3,7 @@
#include <linux/major.h>
#include <linux/list.h>
#include <linux/types.h>
+#include <linux/device.h>
/*
* These allocations are managed by device@lanana.org. If you use an
@@ -70,6 +71,13 @@ struct miscdevice {
extern int misc_register(struct miscdevice *misc);
extern void misc_deregister(struct miscdevice *misc);
+/*
+ * Helper macro for drivers that don't do anything special in module init / exit
+ * call. This helps in eleminating of boilerplate code.
+ */
+#define module_misc_device(__misc_device) \
+ module_driver(__misc_device, misc_register, misc_deregister)
+
#define MODULE_ALIAS_MISCDEV(minor) \
MODULE_ALIAS("char-major-" __stringify(MISC_MAJOR) \
"-" __stringify(minor))
diff --git a/include/linux/mlx4/cmd.h b/include/linux/mlx4/cmd.h
index 116b284bc4ce..1f3568694a57 100644
--- a/include/linux/mlx4/cmd.h
+++ b/include/linux/mlx4/cmd.h
@@ -309,7 +309,8 @@ int mlx4_get_vf_stats(struct mlx4_dev *dev, int port, int vf_idx,
struct ifla_vf_stats *vf_stats);
u32 mlx4_comm_get_version(void);
int mlx4_set_vf_mac(struct mlx4_dev *dev, int port, int vf, u64 mac);
-int mlx4_set_vf_vlan(struct mlx4_dev *dev, int port, int vf, u16 vlan, u8 qos);
+int mlx4_set_vf_vlan(struct mlx4_dev *dev, int port, int vf, u16 vlan,
+ u8 qos, __be16 proto);
int mlx4_set_vf_rate(struct mlx4_dev *dev, int port, int vf, int min_tx_rate,
int max_tx_rate);
int mlx4_set_vf_spoofchk(struct mlx4_dev *dev, int port, int vf, bool setting);
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
index 42da3552f7cb..f6a164297358 100644
--- a/include/linux/mlx4/device.h
+++ b/include/linux/mlx4/device.h
@@ -71,7 +71,8 @@ enum {
MLX4_FLAG_SLAVE = 1 << 3,
MLX4_FLAG_SRIOV = 1 << 4,
MLX4_FLAG_OLD_REG_MAC = 1 << 6,
- MLX4_FLAG_BONDED = 1 << 7
+ MLX4_FLAG_BONDED = 1 << 7,
+ MLX4_FLAG_SECURE_HOST = 1 << 8,
};
enum {
@@ -221,6 +222,8 @@ enum {
MLX4_DEV_CAP_FLAG2_ROCE_V1_V2 = 1ULL << 33,
MLX4_DEV_CAP_FLAG2_DMFS_UC_MC_SNIFFER = 1ULL << 34,
MLX4_DEV_CAP_FLAG2_DIAG_PER_PORT = 1ULL << 35,
+ MLX4_DEV_CAP_FLAG2_SVLAN_BY_QP = 1ULL << 36,
+ MLX4_DEV_CAP_FLAG2_SL_TO_VL_CHANGE_EVENT = 1ULL << 37,
};
enum {
@@ -448,6 +451,7 @@ enum {
MLX4_DEV_PMC_SUBTYPE_GUID_INFO = 0x14,
MLX4_DEV_PMC_SUBTYPE_PORT_INFO = 0x15,
MLX4_DEV_PMC_SUBTYPE_PKEY_TABLE = 0x16,
+ MLX4_DEV_PMC_SUBTYPE_SL_TO_VL_MAP = 0x17,
};
/* Port mgmt change event handling */
@@ -459,6 +463,11 @@ enum {
MLX4_EQ_PORT_INFO_MSTR_SM_SL_CHANGE_MASK = 1 << 4,
};
+union sl2vl_tbl_to_u64 {
+ u8 sl8[8];
+ u64 sl64;
+};
+
enum {
MLX4_DEVICE_STATE_UP = 1 << 0,
MLX4_DEVICE_STATE_INTERNAL_ERROR = 1 << 1,
@@ -945,6 +954,9 @@ struct mlx4_eqe {
__be32 block_ptr;
__be32 tbl_entries_mask;
} __packed tbl_change_info;
+ struct {
+ u8 sl2vl_table[8];
+ } __packed sl2vl_tbl_change_info;
} params;
} __packed port_mgmt_change;
struct {
@@ -1371,6 +1383,8 @@ int mlx4_SET_PORT_fcs_check(struct mlx4_dev *dev, u8 port,
int mlx4_SET_PORT_VXLAN(struct mlx4_dev *dev, u8 port, u8 steering, int enable);
int set_phv_bit(struct mlx4_dev *dev, u8 port, int new_val);
int get_phv_bit(struct mlx4_dev *dev, u8 port, int *phv);
+int mlx4_get_is_vlan_offload_disabled(struct mlx4_dev *dev, u8 port,
+ bool *vlan_offload_disabled);
int mlx4_find_cached_mac(struct mlx4_dev *dev, u8 port, u64 mac, int *idx);
int mlx4_find_cached_vlan(struct mlx4_dev *dev, u8 port, u16 vid, int *idx);
int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index);
diff --git a/include/linux/mlx4/qp.h b/include/linux/mlx4/qp.h
index deaa2217214d..b4ee8f62ce8d 100644
--- a/include/linux/mlx4/qp.h
+++ b/include/linux/mlx4/qp.h
@@ -160,6 +160,7 @@ struct mlx4_qp_path {
enum { /* fl */
MLX4_FL_CV = 1 << 6,
+ MLX4_FL_SV = 1 << 5,
MLX4_FL_ETH_HIDE_CQE_VLAN = 1 << 2,
MLX4_FL_ETH_SRC_CHECK_MC_LB = 1 << 1,
MLX4_FL_ETH_SRC_CHECK_UC_LB = 1 << 0,
@@ -267,6 +268,7 @@ enum {
MLX4_UPD_QP_PATH_MASK_FVL_RX = 16 + 32,
MLX4_UPD_QP_PATH_MASK_ETH_SRC_CHECK_UC_LB = 18 + 32,
MLX4_UPD_QP_PATH_MASK_ETH_SRC_CHECK_MC_LB = 19 + 32,
+ MLX4_UPD_QP_PATH_MASK_SV = 22 + 32,
};
enum { /* param3 */
diff --git a/include/linux/mlx5/cq.h b/include/linux/mlx5/cq.h
index 2566f6d6444f..7c3c0d3aca37 100644
--- a/include/linux/mlx5/cq.h
+++ b/include/linux/mlx5/cq.h
@@ -170,12 +170,12 @@ static inline void mlx5_cq_arm(struct mlx5_core_cq *cq, u32 cmd,
int mlx5_init_cq_table(struct mlx5_core_dev *dev);
void mlx5_cleanup_cq_table(struct mlx5_core_dev *dev);
int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
- struct mlx5_create_cq_mbox_in *in, int inlen);
+ u32 *in, int inlen);
int mlx5_core_destroy_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq);
int mlx5_core_query_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
- struct mlx5_query_cq_mbox_out *out);
+ u32 *out, int outlen);
int mlx5_core_modify_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
- struct mlx5_modify_cq_mbox_in *in, int in_sz);
+ u32 *in, int inlen);
int mlx5_core_modify_cq_moderation(struct mlx5_core_dev *dev,
struct mlx5_core_cq *cq, u16 cq_period,
u16 cq_max_count);
diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h
index 0b6d15cddb2f..58276144ba81 100644
--- a/include/linux/mlx5/device.h
+++ b/include/linux/mlx5/device.h
@@ -92,12 +92,21 @@ __mlx5_mask(typ, fld))
___t; \
})
-#define MLX5_SET64(typ, p, fld, v) do { \
+#define __MLX5_SET64(typ, p, fld, v) do { \
BUILD_BUG_ON(__mlx5_bit_sz(typ, fld) != 64); \
- BUILD_BUG_ON(__mlx5_bit_off(typ, fld) % 64); \
*((__be64 *)(p) + __mlx5_64_off(typ, fld)) = cpu_to_be64(v); \
} while (0)
+#define MLX5_SET64(typ, p, fld, v) do { \
+ BUILD_BUG_ON(__mlx5_bit_off(typ, fld) % 64); \
+ __MLX5_SET64(typ, p, fld, v); \
+} while (0)
+
+#define MLX5_ARRAY_SET64(typ, p, fld, idx, v) do { \
+ BUILD_BUG_ON(__mlx5_bit_off(typ, fld) % 64); \
+ __MLX5_SET64(typ, p, fld[idx], v); \
+} while (0)
+
#define MLX5_GET64(typ, p, fld) be64_to_cpu(*((__be64 *)(p) + __mlx5_64_off(typ, fld)))
#define MLX5_GET64_PR(typ, p, fld) ({ \
@@ -198,19 +207,6 @@ enum {
};
enum {
- MLX5_ACCESS_MODE_PA = 0,
- MLX5_ACCESS_MODE_MTT = 1,
- MLX5_ACCESS_MODE_KLM = 2
-};
-
-enum {
- MLX5_MKEY_REMOTE_INVAL = 1 << 24,
- MLX5_MKEY_FLAG_SYNC_UMR = 1 << 29,
- MLX5_MKEY_BSF_EN = 1 << 30,
- MLX5_MKEY_LEN64 = 1 << 31,
-};
-
-enum {
MLX5_EN_RD = (u64)1,
MLX5_EN_WR = (u64)2
};
@@ -411,33 +407,6 @@ enum {
MLX5_MAX_SGE_RD = (512 - 16 - 16) / 16
};
-struct mlx5_inbox_hdr {
- __be16 opcode;
- u8 rsvd[4];
- __be16 opmod;
-};
-
-struct mlx5_outbox_hdr {
- u8 status;
- u8 rsvd[3];
- __be32 syndrome;
-};
-
-struct mlx5_cmd_query_adapter_mbox_in {
- struct mlx5_inbox_hdr hdr;
- u8 rsvd[8];
-};
-
-struct mlx5_cmd_query_adapter_mbox_out {
- struct mlx5_outbox_hdr hdr;
- u8 rsvd0[24];
- u8 intapin;
- u8 rsvd1[13];
- __be16 vsd_vendor_id;
- u8 vsd[208];
- u8 vsd_psid[16];
-};
-
enum mlx5_odp_transport_cap_bits {
MLX5_ODP_SUPPORT_SEND = 1 << 31,
MLX5_ODP_SUPPORT_RECV = 1 << 30,
@@ -455,30 +424,6 @@ struct mlx5_odp_caps {
char reserved2[0xe4];
};
-struct mlx5_cmd_init_hca_mbox_in {
- struct mlx5_inbox_hdr hdr;
- u8 rsvd0[2];
- __be16 profile;
- u8 rsvd1[4];
-};
-
-struct mlx5_cmd_init_hca_mbox_out {
- struct mlx5_outbox_hdr hdr;
- u8 rsvd[8];
-};
-
-struct mlx5_cmd_teardown_hca_mbox_in {
- struct mlx5_inbox_hdr hdr;
- u8 rsvd0[2];
- __be16 profile;
- u8 rsvd1[4];
-};
-
-struct mlx5_cmd_teardown_hca_mbox_out {
- struct mlx5_outbox_hdr hdr;
- u8 rsvd[8];
-};
-
struct mlx5_cmd_layout {
u8 type;
u8 rsvd0[3];
@@ -494,7 +439,6 @@ struct mlx5_cmd_layout {
u8 status_own;
};
-
struct health_buffer {
__be32 assert_var[5];
__be32 rsvd0[3];
@@ -856,245 +800,15 @@ struct mlx5_cqe128 {
struct mlx5_cqe64 cqe64;
};
-struct mlx5_srq_ctx {
- u8 state_log_sz;
- u8 rsvd0[3];
- __be32 flags_xrcd;
- __be32 pgoff_cqn;
- u8 rsvd1[4];
- u8 log_pg_sz;
- u8 rsvd2[7];
- __be32 pd;
- __be16 lwm;
- __be16 wqe_cnt;
- u8 rsvd3[8];
- __be64 db_record;
-};
-
-struct mlx5_create_srq_mbox_in {
- struct mlx5_inbox_hdr hdr;
- __be32 input_srqn;
- u8 rsvd0[4];
- struct mlx5_srq_ctx ctx;
- u8 rsvd1[208];
- __be64 pas[0];
-};
-
-struct mlx5_create_srq_mbox_out {
- struct mlx5_outbox_hdr hdr;
- __be32 srqn;
- u8 rsvd[4];
-};
-
-struct mlx5_destroy_srq_mbox_in {
- struct mlx5_inbox_hdr hdr;
- __be32 srqn;
- u8 rsvd[4];
-};
-
-struct mlx5_destroy_srq_mbox_out {
- struct mlx5_outbox_hdr hdr;
- u8 rsvd[8];
-};
-
-struct mlx5_query_srq_mbox_in {
- struct mlx5_inbox_hdr hdr;
- __be32 srqn;
- u8 rsvd0[4];
-};
-
-struct mlx5_query_srq_mbox_out {
- struct mlx5_outbox_hdr hdr;
- u8 rsvd0[8];
- struct mlx5_srq_ctx ctx;
- u8 rsvd1[32];
- __be64 pas[0];
-};
-
-struct mlx5_arm_srq_mbox_in {
- struct mlx5_inbox_hdr hdr;
- __be32 srqn;
- __be16 rsvd;
- __be16 lwm;
-};
-
-struct mlx5_arm_srq_mbox_out {
- struct mlx5_outbox_hdr hdr;
- u8 rsvd[8];
-};
-
-struct mlx5_cq_context {
- u8 status;
- u8 cqe_sz_flags;
- u8 st;
- u8 rsvd3;
- u8 rsvd4[6];
- __be16 page_offset;
- __be32 log_sz_usr_page;
- __be16 cq_period;
- __be16 cq_max_count;
- __be16 rsvd20;
- __be16 c_eqn;
- u8 log_pg_sz;
- u8 rsvd25[7];
- __be32 last_notified_index;
- __be32 solicit_producer_index;
- __be32 consumer_counter;
- __be32 producer_counter;
- u8 rsvd48[8];
- __be64 db_record_addr;
-};
-
-struct mlx5_create_cq_mbox_in {
- struct mlx5_inbox_hdr hdr;
- __be32 input_cqn;
- u8 rsvdx[4];
- struct mlx5_cq_context ctx;
- u8 rsvd6[192];
- __be64 pas[0];
-};
-
-struct mlx5_create_cq_mbox_out {
- struct mlx5_outbox_hdr hdr;
- __be32 cqn;
- u8 rsvd0[4];
-};
-
-struct mlx5_destroy_cq_mbox_in {
- struct mlx5_inbox_hdr hdr;
- __be32 cqn;
- u8 rsvd0[4];
-};
-
-struct mlx5_destroy_cq_mbox_out {
- struct mlx5_outbox_hdr hdr;
- u8 rsvd0[8];
-};
-
-struct mlx5_query_cq_mbox_in {
- struct mlx5_inbox_hdr hdr;
- __be32 cqn;
- u8 rsvd0[4];
-};
-
-struct mlx5_query_cq_mbox_out {
- struct mlx5_outbox_hdr hdr;
- u8 rsvd0[8];
- struct mlx5_cq_context ctx;
- u8 rsvd6[16];
- __be64 pas[0];
-};
-
-struct mlx5_modify_cq_mbox_in {
- struct mlx5_inbox_hdr hdr;
- __be32 cqn;
- __be32 field_select;
- struct mlx5_cq_context ctx;
- u8 rsvd[192];
- __be64 pas[0];
-};
-
-struct mlx5_modify_cq_mbox_out {
- struct mlx5_outbox_hdr hdr;
- u8 rsvd[8];
-};
-
-struct mlx5_enable_hca_mbox_in {
- struct mlx5_inbox_hdr hdr;
- u8 rsvd[8];
-};
-
-struct mlx5_enable_hca_mbox_out {
- struct mlx5_outbox_hdr hdr;
- u8 rsvd[8];
-};
-
-struct mlx5_disable_hca_mbox_in {
- struct mlx5_inbox_hdr hdr;
- u8 rsvd[8];
-};
-
-struct mlx5_disable_hca_mbox_out {
- struct mlx5_outbox_hdr hdr;
- u8 rsvd[8];
-};
-
-struct mlx5_eq_context {
- u8 status;
- u8 ec_oi;
- u8 st;
- u8 rsvd2[7];
- __be16 page_pffset;
- __be32 log_sz_usr_page;
- u8 rsvd3[7];
- u8 intr;
- u8 log_page_size;
- u8 rsvd4[15];
- __be32 consumer_counter;
- __be32 produser_counter;
- u8 rsvd5[16];
-};
-
-struct mlx5_create_eq_mbox_in {
- struct mlx5_inbox_hdr hdr;
- u8 rsvd0[3];
- u8 input_eqn;
- u8 rsvd1[4];
- struct mlx5_eq_context ctx;
- u8 rsvd2[8];
- __be64 events_mask;
- u8 rsvd3[176];
- __be64 pas[0];
-};
-
-struct mlx5_create_eq_mbox_out {
- struct mlx5_outbox_hdr hdr;
- u8 rsvd0[3];
- u8 eq_number;
- u8 rsvd1[4];
-};
-
-struct mlx5_destroy_eq_mbox_in {
- struct mlx5_inbox_hdr hdr;
- u8 rsvd0[3];
- u8 eqn;
- u8 rsvd1[4];
-};
-
-struct mlx5_destroy_eq_mbox_out {
- struct mlx5_outbox_hdr hdr;
- u8 rsvd[8];
-};
-
-struct mlx5_map_eq_mbox_in {
- struct mlx5_inbox_hdr hdr;
- __be64 mask;
- u8 mu;
- u8 rsvd0[2];
- u8 eqn;
- u8 rsvd1[24];
-};
-
-struct mlx5_map_eq_mbox_out {
- struct mlx5_outbox_hdr hdr;
- u8 rsvd[8];
-};
-
-struct mlx5_query_eq_mbox_in {
- struct mlx5_inbox_hdr hdr;
- u8 rsvd0[3];
- u8 eqn;
- u8 rsvd1[4];
-};
-
-struct mlx5_query_eq_mbox_out {
- struct mlx5_outbox_hdr hdr;
- u8 rsvd[8];
- struct mlx5_eq_context ctx;
+enum {
+ MLX5_MKEY_STATUS_FREE = 1 << 6,
};
enum {
- MLX5_MKEY_STATUS_FREE = 1 << 6,
+ MLX5_MKEY_REMOTE_INVAL = 1 << 24,
+ MLX5_MKEY_FLAG_SYNC_UMR = 1 << 29,
+ MLX5_MKEY_BSF_EN = 1 << 30,
+ MLX5_MKEY_LEN64 = 1 << 31,
};
struct mlx5_mkey_seg {
@@ -1119,134 +833,12 @@ struct mlx5_mkey_seg {
u8 rsvd4[4];
};
-struct mlx5_query_special_ctxs_mbox_in {
- struct mlx5_inbox_hdr hdr;
- u8 rsvd[8];
-};
-
-struct mlx5_query_special_ctxs_mbox_out {
- struct mlx5_outbox_hdr hdr;
- __be32 dump_fill_mkey;
- __be32 reserved_lkey;
-};
-
-struct mlx5_create_mkey_mbox_in {
- struct mlx5_inbox_hdr hdr;
- __be32 input_mkey_index;
- __be32 flags;
- struct mlx5_mkey_seg seg;
- u8 rsvd1[16];
- __be32 xlat_oct_act_size;
- __be32 rsvd2;
- u8 rsvd3[168];
- __be64 pas[0];
-};
-
-struct mlx5_create_mkey_mbox_out {
- struct mlx5_outbox_hdr hdr;
- __be32 mkey;
- u8 rsvd[4];
-};
-
-struct mlx5_destroy_mkey_mbox_in {
- struct mlx5_inbox_hdr hdr;
- __be32 mkey;
- u8 rsvd[4];
-};
-
-struct mlx5_destroy_mkey_mbox_out {
- struct mlx5_outbox_hdr hdr;
- u8 rsvd[8];
-};
-
-struct mlx5_query_mkey_mbox_in {
- struct mlx5_inbox_hdr hdr;
- __be32 mkey;
-};
-
-struct mlx5_query_mkey_mbox_out {
- struct mlx5_outbox_hdr hdr;
- __be64 pas[0];
-};
-
-struct mlx5_modify_mkey_mbox_in {
- struct mlx5_inbox_hdr hdr;
- __be32 mkey;
- __be64 pas[0];
-};
-
-struct mlx5_modify_mkey_mbox_out {
- struct mlx5_outbox_hdr hdr;
- u8 rsvd[8];
-};
-
-struct mlx5_dump_mkey_mbox_in {
- struct mlx5_inbox_hdr hdr;
-};
-
-struct mlx5_dump_mkey_mbox_out {
- struct mlx5_outbox_hdr hdr;
- __be32 mkey;
-};
-
-struct mlx5_mad_ifc_mbox_in {
- struct mlx5_inbox_hdr hdr;
- __be16 remote_lid;
- u8 rsvd0;
- u8 port;
- u8 rsvd1[4];
- u8 data[256];
-};
-
-struct mlx5_mad_ifc_mbox_out {
- struct mlx5_outbox_hdr hdr;
- u8 rsvd[8];
- u8 data[256];
-};
-
-struct mlx5_access_reg_mbox_in {
- struct mlx5_inbox_hdr hdr;
- u8 rsvd0[2];
- __be16 register_id;
- __be32 arg;
- __be32 data[0];
-};
-
-struct mlx5_access_reg_mbox_out {
- struct mlx5_outbox_hdr hdr;
- u8 rsvd[8];
- __be32 data[0];
-};
-
#define MLX5_ATTR_EXTENDED_PORT_INFO cpu_to_be16(0xff90)
enum {
MLX_EXT_PORT_CAP_FLAG_EXTENDED_PORT_INFO = 1 << 0
};
-struct mlx5_allocate_psv_in {
- struct mlx5_inbox_hdr hdr;
- __be32 npsv_pd;
- __be32 rsvd_psv0;
-};
-
-struct mlx5_allocate_psv_out {
- struct mlx5_outbox_hdr hdr;
- u8 rsvd[8];
- __be32 psv_idx[4];
-};
-
-struct mlx5_destroy_psv_in {
- struct mlx5_inbox_hdr hdr;
- __be32 psv_number;
- u8 rsvd[4];
-};
-
-struct mlx5_destroy_psv_out {
- struct mlx5_outbox_hdr hdr;
- u8 rsvd[8];
-};
-
enum {
VPORT_STATE_DOWN = 0x0,
VPORT_STATE_UP = 0x1,
@@ -1381,6 +973,18 @@ enum mlx5_cap_type {
#define MLX5_CAP_FLOWTABLE_NIC_RX_MAX(mdev, cap) \
MLX5_CAP_FLOWTABLE_MAX(mdev, flow_table_properties_nic_receive.cap)
+#define MLX5_CAP_FLOWTABLE_SNIFFER_RX(mdev, cap) \
+ MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_receive_sniffer.cap)
+
+#define MLX5_CAP_FLOWTABLE_SNIFFER_RX_MAX(mdev, cap) \
+ MLX5_CAP_FLOWTABLE_MAX(mdev, flow_table_properties_nic_receive_sniffer.cap)
+
+#define MLX5_CAP_FLOWTABLE_SNIFFER_TX(mdev, cap) \
+ MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_transmit_sniffer.cap)
+
+#define MLX5_CAP_FLOWTABLE_SNIFFER_TX_MAX(mdev, cap) \
+ MLX5_CAP_FLOWTABLE_MAX(mdev, flow_table_properties_nic_transmit_sniffer.cap)
+
#define MLX5_CAP_ESW_FLOWTABLE(mdev, cap) \
MLX5_GET(flow_table_eswitch_cap, \
mdev->hca_caps_cur[MLX5_CAP_ESWITCH_FLOW_TABLE], cap)
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index ccea6fb16482..85c4786427e4 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -49,10 +49,6 @@
#include <linux/mlx5/srq.h>
enum {
- MLX5_RQ_BITMASK_VSD = 1 << 1,
-};
-
-enum {
MLX5_BOARD_ID_LEN = 64,
MLX5_MAX_NAME_LEN = 16,
};
@@ -481,6 +477,7 @@ struct mlx5_fc_stats {
};
struct mlx5_eswitch;
+struct mlx5_lag;
struct mlx5_rl_entry {
u32 rate;
@@ -554,6 +551,7 @@ struct mlx5_priv {
struct mlx5_flow_steering *steering;
struct mlx5_eswitch *eswitch;
struct mlx5_core_sriov sriov;
+ struct mlx5_lag *lag;
unsigned long pci_dev_data;
struct mlx5_fc_stats fc_stats;
struct mlx5_rl_table rl_table;
@@ -771,14 +769,15 @@ int mlx5_cmd_init(struct mlx5_core_dev *dev);
void mlx5_cmd_cleanup(struct mlx5_core_dev *dev);
void mlx5_cmd_use_events(struct mlx5_core_dev *dev);
void mlx5_cmd_use_polling(struct mlx5_core_dev *dev);
-int mlx5_cmd_status_to_err(struct mlx5_outbox_hdr *hdr);
-int mlx5_cmd_status_to_err_v2(void *ptr);
-int mlx5_core_get_caps(struct mlx5_core_dev *dev, enum mlx5_cap_type cap_type);
+
int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
int out_size);
int mlx5_cmd_exec_cb(struct mlx5_core_dev *dev, void *in, int in_size,
void *out, int out_size, mlx5_cmd_cbk_t callback,
void *context);
+void mlx5_cmd_mbox_status(void *out, u8 *status, u32 *syndrome);
+
+int mlx5_core_get_caps(struct mlx5_core_dev *dev, enum mlx5_cap_type cap_type);
int mlx5_cmd_alloc_uar(struct mlx5_core_dev *dev, u32 *uarn);
int mlx5_cmd_free_uar(struct mlx5_core_dev *dev, u32 uarn);
int mlx5_alloc_uuars(struct mlx5_core_dev *dev, struct mlx5_uuar_info *uuari);
@@ -807,15 +806,18 @@ int mlx5_core_arm_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
u16 lwm, int is_srq);
void mlx5_init_mkey_table(struct mlx5_core_dev *dev);
void mlx5_cleanup_mkey_table(struct mlx5_core_dev *dev);
+int mlx5_core_create_mkey_cb(struct mlx5_core_dev *dev,
+ struct mlx5_core_mkey *mkey,
+ u32 *in, int inlen,
+ u32 *out, int outlen,
+ mlx5_cmd_cbk_t callback, void *context);
int mlx5_core_create_mkey(struct mlx5_core_dev *dev,
struct mlx5_core_mkey *mkey,
- struct mlx5_create_mkey_mbox_in *in, int inlen,
- mlx5_cmd_cbk_t callback, void *context,
- struct mlx5_create_mkey_mbox_out *out);
+ u32 *in, int inlen);
int mlx5_core_destroy_mkey(struct mlx5_core_dev *dev,
struct mlx5_core_mkey *mkey);
int mlx5_core_query_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mkey *mkey,
- struct mlx5_query_mkey_mbox_out *out, int outlen);
+ u32 *out, int outlen);
int mlx5_core_dump_fill_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mkey *_mkey,
u32 *mkey);
int mlx5_core_alloc_pd(struct mlx5_core_dev *dev, u32 *pdn);
@@ -826,8 +828,6 @@ void mlx5_pagealloc_init(struct mlx5_core_dev *dev);
void mlx5_pagealloc_cleanup(struct mlx5_core_dev *dev);
int mlx5_pagealloc_start(struct mlx5_core_dev *dev);
void mlx5_pagealloc_stop(struct mlx5_core_dev *dev);
-int mlx5_sriov_init(struct mlx5_core_dev *dev);
-int mlx5_sriov_cleanup(struct mlx5_core_dev *dev);
void mlx5_core_req_pages_handler(struct mlx5_core_dev *dev, u16 func_id,
s32 npages);
int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev, int boot);
@@ -865,7 +865,7 @@ int mlx5_core_access_reg(struct mlx5_core_dev *dev, void *data_in,
int mlx5_debug_eq_add(struct mlx5_core_dev *dev, struct mlx5_eq *eq);
void mlx5_debug_eq_remove(struct mlx5_core_dev *dev, struct mlx5_eq *eq);
int mlx5_core_eq_query(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
- struct mlx5_query_eq_mbox_out *out, int outlen);
+ u32 *out, int outlen);
int mlx5_eq_debugfs_init(struct mlx5_core_dev *dev);
void mlx5_eq_debugfs_cleanup(struct mlx5_core_dev *dev);
int mlx5_cq_debugfs_init(struct mlx5_core_dev *dev);
@@ -930,6 +930,8 @@ enum {
struct mlx5_interface {
void * (*add)(struct mlx5_core_dev *dev);
void (*remove)(struct mlx5_core_dev *dev, void *context);
+ int (*attach)(struct mlx5_core_dev *dev, void *context);
+ void (*detach)(struct mlx5_core_dev *dev, void *context);
void (*event)(struct mlx5_core_dev *dev, void *context,
enum mlx5_dev_event event, unsigned long param);
void * (*get_dev)(void *context);
@@ -942,6 +944,11 @@ int mlx5_register_interface(struct mlx5_interface *intf);
void mlx5_unregister_interface(struct mlx5_interface *intf);
int mlx5_core_query_vendor_id(struct mlx5_core_dev *mdev, u32 *vendor_id);
+int mlx5_cmd_create_vport_lag(struct mlx5_core_dev *dev);
+int mlx5_cmd_destroy_vport_lag(struct mlx5_core_dev *dev);
+bool mlx5_lag_is_active(struct mlx5_core_dev *dev);
+struct net_device *mlx5_lag_get_roce_netdev(struct mlx5_core_dev *dev);
+
struct mlx5_profile {
u64 mask;
u8 log_max_qp;
diff --git a/include/linux/mlx5/fs.h b/include/linux/mlx5/fs.h
index e036d6030867..93ebc5e21334 100644
--- a/include/linux/mlx5/fs.h
+++ b/include/linux/mlx5/fs.h
@@ -54,6 +54,7 @@ static inline void build_leftovers_ft_param(int *priority,
enum mlx5_flow_namespace_type {
MLX5_FLOW_NAMESPACE_BYPASS,
+ MLX5_FLOW_NAMESPACE_LAG,
MLX5_FLOW_NAMESPACE_OFFLOADS,
MLX5_FLOW_NAMESPACE_ETHTOOL,
MLX5_FLOW_NAMESPACE_KERNEL,
@@ -62,6 +63,8 @@ enum mlx5_flow_namespace_type {
MLX5_FLOW_NAMESPACE_FDB,
MLX5_FLOW_NAMESPACE_ESW_EGRESS,
MLX5_FLOW_NAMESPACE_ESW_INGRESS,
+ MLX5_FLOW_NAMESPACE_SNIFFER_RX,
+ MLX5_FLOW_NAMESPACE_SNIFFER_TX,
};
struct mlx5_flow_table;
@@ -106,6 +109,9 @@ mlx5_create_vport_flow_table(struct mlx5_flow_namespace *ns,
int prio,
int num_flow_table_entries,
u32 level, u16 vport);
+struct mlx5_flow_table *mlx5_create_lag_demux_flow_table(
+ struct mlx5_flow_namespace *ns,
+ int prio, u32 level);
int mlx5_destroy_flow_table(struct mlx5_flow_table *ft);
/* inbox should be set with the following values:
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
index 21bc4557b67a..6045d4d58065 100644
--- a/include/linux/mlx5/mlx5_ifc.h
+++ b/include/linux/mlx5/mlx5_ifc.h
@@ -152,7 +152,7 @@ enum {
MLX5_CMD_OP_CONFIG_INT_MODERATION = 0x804,
MLX5_CMD_OP_ACCESS_REG = 0x805,
MLX5_CMD_OP_ATTACH_TO_MCG = 0x806,
- MLX5_CMD_OP_DETTACH_FROM_MCG = 0x807,
+ MLX5_CMD_OP_DETACH_FROM_MCG = 0x807,
MLX5_CMD_OP_GET_DROPPED_PACKET_LOG = 0x80a,
MLX5_CMD_OP_MAD_IFC = 0x50d,
MLX5_CMD_OP_QUERY_MAD_DEMUX = 0x80b,
@@ -174,6 +174,12 @@ enum {
MLX5_CMD_OP_DELETE_L2_TABLE_ENTRY = 0x82b,
MLX5_CMD_OP_SET_WOL_ROL = 0x830,
MLX5_CMD_OP_QUERY_WOL_ROL = 0x831,
+ MLX5_CMD_OP_CREATE_LAG = 0x840,
+ MLX5_CMD_OP_MODIFY_LAG = 0x841,
+ MLX5_CMD_OP_QUERY_LAG = 0x842,
+ MLX5_CMD_OP_DESTROY_LAG = 0x843,
+ MLX5_CMD_OP_CREATE_VPORT_LAG = 0x844,
+ MLX5_CMD_OP_DESTROY_VPORT_LAG = 0x845,
MLX5_CMD_OP_CREATE_TIR = 0x900,
MLX5_CMD_OP_MODIFY_TIR = 0x901,
MLX5_CMD_OP_DESTROY_TIR = 0x902,
@@ -212,6 +218,8 @@ enum {
MLX5_CMD_OP_DEALLOC_FLOW_COUNTER = 0x93a,
MLX5_CMD_OP_QUERY_FLOW_COUNTER = 0x93b,
MLX5_CMD_OP_MODIFY_FLOW_TABLE = 0x93c,
+ MLX5_CMD_OP_ALLOC_ENCAP_HEADER = 0x93d,
+ MLX5_CMD_OP_DEALLOC_ENCAP_HEADER = 0x93e,
MLX5_CMD_OP_MAX
};
@@ -281,7 +289,9 @@ struct mlx5_ifc_flow_table_prop_layout_bits {
u8 modify_root[0x1];
u8 identified_miss_table_mode[0x1];
u8 flow_table_modify[0x1];
- u8 reserved_at_7[0x19];
+ u8 encap[0x1];
+ u8 decap[0x1];
+ u8 reserved_at_9[0x17];
u8 reserved_at_20[0x2];
u8 log_max_ft_size[0x6];
@@ -473,7 +483,9 @@ struct mlx5_ifc_ads_bits {
struct mlx5_ifc_flow_table_nic_cap_bits {
u8 nic_rx_multi_path_tirs[0x1];
- u8 reserved_at_1[0x1ff];
+ u8 nic_rx_multi_path_tirs_fts[0x1];
+ u8 allow_sniffer_and_nic_rx_shared_tir[0x1];
+ u8 reserved_at_3[0x1fd];
struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_receive;
@@ -512,7 +524,15 @@ struct mlx5_ifc_e_switch_cap_bits {
u8 nic_vport_node_guid_modify[0x1];
u8 nic_vport_port_guid_modify[0x1];
- u8 reserved_at_20[0x7e0];
+ u8 vxlan_encap_decap[0x1];
+ u8 nvgre_encap_decap[0x1];
+ u8 reserved_at_22[0x9];
+ u8 log_max_encap_headers[0x5];
+ u8 reserved_2b[0x6];
+ u8 max_encap_header_size[0xa];
+
+ u8 reserved_40[0x7c0];
+
};
struct mlx5_ifc_qos_cap_bits {
@@ -767,7 +787,9 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 out_of_seq_cnt[0x1];
u8 vport_counters[0x1];
u8 retransmission_q_counters[0x1];
- u8 reserved_at_183[0x3];
+ u8 reserved_at_183[0x1];
+ u8 modify_rq_counter_set_id[0x1];
+ u8 reserved_at_185[0x1];
u8 max_qp_cnt[0xa];
u8 pkey_table_size[0x10];
@@ -870,7 +892,10 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 pad_tx_eth_packet[0x1];
u8 reserved_at_263[0x8];
u8 log_bf_reg_size[0x5];
- u8 reserved_at_270[0x10];
+
+ u8 reserved_at_270[0xb];
+ u8 lag_master[0x1];
+ u8 num_lag_ports[0x4];
u8 reserved_at_280[0x10];
u8 max_wqe_sz_sq[0x10];
@@ -1904,7 +1929,7 @@ enum {
struct mlx5_ifc_qpc_bits {
u8 state[0x4];
- u8 reserved_at_4[0x4];
+ u8 lag_tx_port_affinity[0x4];
u8 st[0x8];
u8 reserved_at_10[0x3];
u8 pm_state[0x2];
@@ -1966,7 +1991,10 @@ struct mlx5_ifc_qpc_bits {
u8 reserved_at_3e0[0x8];
u8 cqn_snd[0x18];
- u8 reserved_at_400[0x40];
+ u8 reserved_at_400[0x8];
+ u8 deth_sqpn[0x18];
+
+ u8 reserved_at_420[0x20];
u8 reserved_at_440[0x8];
u8 last_acked_psn[0x18];
@@ -2064,6 +2092,8 @@ enum {
MLX5_FLOW_CONTEXT_ACTION_DROP = 0x2,
MLX5_FLOW_CONTEXT_ACTION_FWD_DEST = 0x4,
MLX5_FLOW_CONTEXT_ACTION_COUNT = 0x8,
+ MLX5_FLOW_CONTEXT_ACTION_ENCAP = 0x10,
+ MLX5_FLOW_CONTEXT_ACTION_DECAP = 0x20,
};
struct mlx5_ifc_flow_context_bits {
@@ -2083,7 +2113,9 @@ struct mlx5_ifc_flow_context_bits {
u8 reserved_at_a0[0x8];
u8 flow_counter_list_size[0x18];
- u8 reserved_at_c0[0x140];
+ u8 encap_id[0x20];
+
+ u8 reserved_at_e0[0x120];
struct mlx5_ifc_fte_match_param_bits match_value;
@@ -2146,7 +2178,11 @@ struct mlx5_ifc_traffic_counter_bits {
};
struct mlx5_ifc_tisc_bits {
- u8 reserved_at_0[0xc];
+ u8 strict_lag_tx_port_affinity[0x1];
+ u8 reserved_at_1[0x3];
+ u8 lag_tx_port_affinity[0x04];
+
+ u8 reserved_at_8[0x4];
u8 prio[0x4];
u8 reserved_at_10[0x10];
@@ -2808,7 +2844,7 @@ struct mlx5_ifc_xrqc_bits {
struct mlx5_ifc_tag_matching_topology_context_bits tag_matching_topology_context;
- u8 reserved_at_180[0x180];
+ u8 reserved_at_180[0x200];
struct mlx5_ifc_wq_bits wq;
};
@@ -3489,7 +3525,7 @@ struct mlx5_ifc_query_special_contexts_out_bits {
u8 syndrome[0x20];
- u8 reserved_at_40[0x20];
+ u8 dump_fill_mkey[0x20];
u8 resd_lkey[0x20];
};
@@ -4213,6 +4249,85 @@ struct mlx5_ifc_query_eq_in_bits {
u8 reserved_at_60[0x20];
};
+struct mlx5_ifc_encap_header_in_bits {
+ u8 reserved_at_0[0x5];
+ u8 header_type[0x3];
+ u8 reserved_at_8[0xe];
+ u8 encap_header_size[0xa];
+
+ u8 reserved_at_20[0x10];
+ u8 encap_header[2][0x8];
+
+ u8 more_encap_header[0][0x8];
+};
+
+struct mlx5_ifc_query_encap_header_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_at_40[0xa0];
+
+ struct mlx5_ifc_encap_header_in_bits encap_header[0];
+};
+
+struct mlx5_ifc_query_encap_header_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_at_10[0x10];
+
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+
+ u8 encap_id[0x20];
+
+ u8 reserved_at_60[0xa0];
+};
+
+struct mlx5_ifc_alloc_encap_header_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 encap_id[0x20];
+
+ u8 reserved_at_60[0x20];
+};
+
+struct mlx5_ifc_alloc_encap_header_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_at_10[0x10];
+
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_at_40[0xa0];
+
+ struct mlx5_ifc_encap_header_in_bits encap_header;
+};
+
+struct mlx5_ifc_dealloc_encap_header_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_at_40[0x40];
+};
+
+struct mlx5_ifc_dealloc_encap_header_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_at_10[0x10];
+
+ u8 reserved_20[0x10];
+ u8 op_mod[0x10];
+
+ u8 encap_id[0x20];
+
+ u8 reserved_60[0x20];
+};
+
struct mlx5_ifc_query_dct_out_bits {
u8 status[0x8];
u8 reserved_at_8[0x18];
@@ -4517,7 +4632,9 @@ struct mlx5_ifc_modify_tis_out_bits {
struct mlx5_ifc_modify_tis_bitmask_bits {
u8 reserved_at_0[0x20];
- u8 reserved_at_20[0x1f];
+ u8 reserved_at_20[0x1d];
+ u8 lag_tx_port_affinity[0x1];
+ u8 strict_lag_tx_port_affinity[0x1];
u8 prio[0x1];
};
@@ -4652,6 +4769,11 @@ struct mlx5_ifc_modify_rq_out_bits {
u8 reserved_at_40[0x40];
};
+enum {
+ MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_VSD = 1ULL << 1,
+ MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_MODIFY_RQ_COUNTER_SET_ID = 1ULL << 3,
+};
+
struct mlx5_ifc_modify_rq_in_bits {
u8 opcode[0x10];
u8 reserved_at_10[0x10];
@@ -4721,7 +4843,7 @@ struct mlx5_ifc_modify_nic_vport_field_select_bits {
u8 reserved_at_0[0x16];
u8 node_guid[0x1];
u8 port_guid[0x1];
- u8 reserved_at_18[0x1];
+ u8 min_inline[0x1];
u8 mtu[0x1];
u8 change_event[0x1];
u8 promisc[0x1];
@@ -6099,7 +6221,9 @@ struct mlx5_ifc_create_flow_table_in_bits {
u8 reserved_at_a0[0x20];
- u8 reserved_at_c0[0x4];
+ u8 encap_en[0x1];
+ u8 decap_en[0x1];
+ u8 reserved_at_c2[0x2];
u8 table_miss_mode[0x4];
u8 level[0x8];
u8 reserved_at_d0[0x8];
@@ -6108,7 +6232,10 @@ struct mlx5_ifc_create_flow_table_in_bits {
u8 reserved_at_e0[0x8];
u8 table_miss_id[0x18];
- u8 reserved_at_100[0x100];
+ u8 reserved_at_100[0x8];
+ u8 lag_master_next_table_id[0x18];
+
+ u8 reserved_at_120[0x80];
};
struct mlx5_ifc_create_flow_group_out_bits {
@@ -6710,9 +6837,10 @@ struct mlx5_ifc_pude_reg_bits {
};
struct mlx5_ifc_ptys_reg_bits {
- u8 an_disable_cap[0x1];
+ u8 reserved_at_0[0x1];
u8 an_disable_admin[0x1];
- u8 reserved_at_2[0x6];
+ u8 an_disable_cap[0x1];
+ u8 reserved_at_3[0x5];
u8 local_port[0x8];
u8 reserved_at_10[0xd];
u8 proto_mask[0x3];
@@ -7562,7 +7690,8 @@ struct mlx5_ifc_set_flow_table_root_in_bits {
};
enum {
- MLX5_MODIFY_FLOW_TABLE_MISS_TABLE_ID = 0x1,
+ MLX5_MODIFY_FLOW_TABLE_MISS_TABLE_ID = (1UL << 0),
+ MLX5_MODIFY_FLOW_TABLE_LAG_NEXT_TABLE_ID = (1UL << 15),
};
struct mlx5_ifc_modify_flow_table_out_bits {
@@ -7601,7 +7730,10 @@ struct mlx5_ifc_modify_flow_table_in_bits {
u8 reserved_at_e0[0x8];
u8 table_miss_id[0x18];
- u8 reserved_at_100[0x100];
+ u8 reserved_at_100[0x8];
+ u8 lag_master_next_table_id[0x18];
+
+ u8 reserved_at_120[0x80];
};
struct mlx5_ifc_ets_tcn_config_reg_bits {
@@ -7709,4 +7841,134 @@ struct mlx5_ifc_dcbx_param_bits {
u8 error[0x8];
u8 reserved_at_a0[0x160];
};
+
+struct mlx5_ifc_lagc_bits {
+ u8 reserved_at_0[0x1d];
+ u8 lag_state[0x3];
+
+ u8 reserved_at_20[0x14];
+ u8 tx_remap_affinity_2[0x4];
+ u8 reserved_at_38[0x4];
+ u8 tx_remap_affinity_1[0x4];
+};
+
+struct mlx5_ifc_create_lag_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_at_40[0x40];
+};
+
+struct mlx5_ifc_create_lag_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_at_10[0x10];
+
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+
+ struct mlx5_ifc_lagc_bits ctx;
+};
+
+struct mlx5_ifc_modify_lag_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_at_40[0x40];
+};
+
+struct mlx5_ifc_modify_lag_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_at_10[0x10];
+
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_at_40[0x20];
+ u8 field_select[0x20];
+
+ struct mlx5_ifc_lagc_bits ctx;
+};
+
+struct mlx5_ifc_query_lag_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_at_40[0x40];
+
+ struct mlx5_ifc_lagc_bits ctx;
+};
+
+struct mlx5_ifc_query_lag_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_at_10[0x10];
+
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_at_40[0x40];
+};
+
+struct mlx5_ifc_destroy_lag_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_at_40[0x40];
+};
+
+struct mlx5_ifc_destroy_lag_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_at_10[0x10];
+
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_at_40[0x40];
+};
+
+struct mlx5_ifc_create_vport_lag_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_at_40[0x40];
+};
+
+struct mlx5_ifc_create_vport_lag_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_at_10[0x10];
+
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_at_40[0x40];
+};
+
+struct mlx5_ifc_destroy_vport_lag_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_at_40[0x40];
+};
+
+struct mlx5_ifc_destroy_vport_lag_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_at_10[0x10];
+
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_at_40[0x40];
+};
+
#endif /* MLX5_IFC_H */
diff --git a/include/linux/mlx5/port.h b/include/linux/mlx5/port.h
index e3012cc64b8a..b3065acd20b4 100644
--- a/include/linux/mlx5/port.h
+++ b/include/linux/mlx5/port.h
@@ -61,6 +61,39 @@ enum mlx5_an_status {
#define MLX5_I2C_ADDR_HIGH 0x51
#define MLX5_EEPROM_PAGE_LENGTH 256
+enum mlx5e_link_mode {
+ MLX5E_1000BASE_CX_SGMII = 0,
+ MLX5E_1000BASE_KX = 1,
+ MLX5E_10GBASE_CX4 = 2,
+ MLX5E_10GBASE_KX4 = 3,
+ MLX5E_10GBASE_KR = 4,
+ MLX5E_20GBASE_KR2 = 5,
+ MLX5E_40GBASE_CR4 = 6,
+ MLX5E_40GBASE_KR4 = 7,
+ MLX5E_56GBASE_R4 = 8,
+ MLX5E_10GBASE_CR = 12,
+ MLX5E_10GBASE_SR = 13,
+ MLX5E_10GBASE_ER = 14,
+ MLX5E_40GBASE_SR4 = 15,
+ MLX5E_40GBASE_LR4 = 16,
+ MLX5E_50GBASE_SR2 = 18,
+ MLX5E_100GBASE_CR4 = 20,
+ MLX5E_100GBASE_SR4 = 21,
+ MLX5E_100GBASE_KR4 = 22,
+ MLX5E_100GBASE_LR4 = 23,
+ MLX5E_100BASE_TX = 24,
+ MLX5E_1000BASE_T = 25,
+ MLX5E_10GBASE_T = 26,
+ MLX5E_25GBASE_CR = 27,
+ MLX5E_25GBASE_KR = 28,
+ MLX5E_25GBASE_SR = 29,
+ MLX5E_50GBASE_CR2 = 30,
+ MLX5E_50GBASE_KR2 = 31,
+ MLX5E_LINK_MODES_NUMBER,
+};
+
+#define MLX5E_PROT_MASK(link_mode) (1 << link_mode)
+
int mlx5_set_port_caps(struct mlx5_core_dev *dev, u8 port_num, u32 caps);
int mlx5_query_port_ptys(struct mlx5_core_dev *dev, u32 *ptys,
int ptys_size, int proto_mask, u8 local_port);
@@ -70,9 +103,10 @@ int mlx5_query_port_proto_admin(struct mlx5_core_dev *dev,
u32 *proto_admin, int proto_mask);
int mlx5_query_port_link_width_oper(struct mlx5_core_dev *dev,
u8 *link_width_oper, u8 local_port);
-int mlx5_query_port_proto_oper(struct mlx5_core_dev *dev,
- u8 *proto_oper, int proto_mask,
- u8 local_port);
+int mlx5_query_port_ib_proto_oper(struct mlx5_core_dev *dev,
+ u8 *proto_oper, u8 local_port);
+int mlx5_query_port_eth_proto_oper(struct mlx5_core_dev *dev,
+ u32 *proto_oper, u8 local_port);
int mlx5_set_port_ptys(struct mlx5_core_dev *dev, bool an_disable,
u32 proto_admin, int proto_mask);
void mlx5_toggle_port_link(struct mlx5_core_dev *dev);
diff --git a/include/linux/mlx5/qp.h b/include/linux/mlx5/qp.h
index 7879bf411891..0aacb2a7480d 100644
--- a/include/linux/mlx5/qp.h
+++ b/include/linux/mlx5/qp.h
@@ -123,12 +123,13 @@ enum {
};
enum {
- MLX5_NON_ZERO_RQ = 0 << 24,
- MLX5_SRQ_RQ = 1 << 24,
- MLX5_CRQ_RQ = 2 << 24,
- MLX5_ZERO_LEN_RQ = 3 << 24
+ MLX5_NON_ZERO_RQ = 0x0,
+ MLX5_SRQ_RQ = 0x1,
+ MLX5_CRQ_RQ = 0x2,
+ MLX5_ZERO_LEN_RQ = 0x3
};
+/* TODO REM */
enum {
/* params1 */
MLX5_QP_BIT_SRE = 1 << 15,
@@ -178,12 +179,6 @@ enum {
};
enum {
- MLX5_QP_LAT_SENSITIVE = 1 << 28,
- MLX5_QP_BLOCK_MCAST = 1 << 30,
- MLX5_QP_ENABLE_SIG = 1 << 31,
-};
-
-enum {
MLX5_RCV_DBR = 0,
MLX5_SND_DBR = 1,
};
@@ -484,6 +479,7 @@ struct mlx5_qp_path {
u8 rmac[6];
};
+/* FIXME: use mlx5_ifc.h qpc */
struct mlx5_qp_context {
__be32 flags;
__be32 flags_pd;
@@ -525,99 +521,6 @@ struct mlx5_qp_context {
u8 rsvd1[24];
};
-struct mlx5_create_qp_mbox_in {
- struct mlx5_inbox_hdr hdr;
- __be32 input_qpn;
- u8 rsvd0[4];
- __be32 opt_param_mask;
- u8 rsvd1[4];
- struct mlx5_qp_context ctx;
- u8 rsvd3[16];
- __be64 pas[0];
-};
-
-struct mlx5_create_qp_mbox_out {
- struct mlx5_outbox_hdr hdr;
- __be32 qpn;
- u8 rsvd0[4];
-};
-
-struct mlx5_destroy_qp_mbox_in {
- struct mlx5_inbox_hdr hdr;
- __be32 qpn;
- u8 rsvd0[4];
-};
-
-struct mlx5_destroy_qp_mbox_out {
- struct mlx5_outbox_hdr hdr;
- u8 rsvd0[8];
-};
-
-struct mlx5_modify_qp_mbox_in {
- struct mlx5_inbox_hdr hdr;
- __be32 qpn;
- u8 rsvd0[4];
- __be32 optparam;
- u8 rsvd1[4];
- struct mlx5_qp_context ctx;
- u8 rsvd2[16];
-};
-
-struct mlx5_modify_qp_mbox_out {
- struct mlx5_outbox_hdr hdr;
- u8 rsvd0[8];
-};
-
-struct mlx5_query_qp_mbox_in {
- struct mlx5_inbox_hdr hdr;
- __be32 qpn;
- u8 rsvd[4];
-};
-
-struct mlx5_query_qp_mbox_out {
- struct mlx5_outbox_hdr hdr;
- u8 rsvd1[8];
- __be32 optparam;
- u8 rsvd0[4];
- struct mlx5_qp_context ctx;
- u8 rsvd2[16];
- __be64 pas[0];
-};
-
-struct mlx5_conf_sqp_mbox_in {
- struct mlx5_inbox_hdr hdr;
- __be32 qpn;
- u8 rsvd[3];
- u8 type;
-};
-
-struct mlx5_conf_sqp_mbox_out {
- struct mlx5_outbox_hdr hdr;
- u8 rsvd[8];
-};
-
-struct mlx5_alloc_xrcd_mbox_in {
- struct mlx5_inbox_hdr hdr;
- u8 rsvd[8];
-};
-
-struct mlx5_alloc_xrcd_mbox_out {
- struct mlx5_outbox_hdr hdr;
- __be32 xrcdn;
- u8 rsvd[4];
-};
-
-struct mlx5_dealloc_xrcd_mbox_in {
- struct mlx5_inbox_hdr hdr;
- __be32 xrcdn;
- u8 rsvd[4];
-};
-
-struct mlx5_dealloc_xrcd_mbox_out {
- struct mlx5_outbox_hdr hdr;
- u8 rsvd[8];
-};
-
static inline struct mlx5_core_qp *__mlx5_qp_lookup(struct mlx5_core_dev *dev, u32 qpn)
{
return radix_tree_lookup(&dev->priv.qp_table.tree, qpn);
@@ -628,28 +531,17 @@ static inline struct mlx5_core_mkey *__mlx5_mr_lookup(struct mlx5_core_dev *dev,
return radix_tree_lookup(&dev->priv.mkey_table.tree, key);
}
-struct mlx5_page_fault_resume_mbox_in {
- struct mlx5_inbox_hdr hdr;
- __be32 flags_qpn;
- u8 reserved[4];
-};
-
-struct mlx5_page_fault_resume_mbox_out {
- struct mlx5_outbox_hdr hdr;
- u8 rsvd[8];
-};
-
int mlx5_core_create_qp(struct mlx5_core_dev *dev,
struct mlx5_core_qp *qp,
- struct mlx5_create_qp_mbox_in *in,
+ u32 *in,
int inlen);
-int mlx5_core_qp_modify(struct mlx5_core_dev *dev, u16 operation,
- struct mlx5_modify_qp_mbox_in *in, int sqd_event,
+int mlx5_core_qp_modify(struct mlx5_core_dev *dev, u16 opcode,
+ u32 opt_param_mask, void *qpc,
struct mlx5_core_qp *qp);
int mlx5_core_destroy_qp(struct mlx5_core_dev *dev,
struct mlx5_core_qp *qp);
int mlx5_core_qp_query(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp,
- struct mlx5_query_qp_mbox_out *out, int outlen);
+ u32 *out, int outlen);
int mlx5_core_xrcd_alloc(struct mlx5_core_dev *dev, u32 *xrcdn);
int mlx5_core_xrcd_dealloc(struct mlx5_core_dev *dev, u32 xrcdn);
diff --git a/include/linux/mlx5/vport.h b/include/linux/mlx5/vport.h
index e087b7d047ac..451b0bde9083 100644
--- a/include/linux/mlx5/vport.h
+++ b/include/linux/mlx5/vport.h
@@ -45,6 +45,8 @@ int mlx5_query_nic_vport_mac_address(struct mlx5_core_dev *mdev,
u16 vport, u8 *addr);
void mlx5_query_nic_vport_min_inline(struct mlx5_core_dev *mdev,
u8 *min_inline);
+int mlx5_modify_nic_vport_min_inline(struct mlx5_core_dev *mdev,
+ u16 vport, u8 min_inline);
int mlx5_modify_nic_vport_mac_address(struct mlx5_core_dev *dev,
u16 vport, u8 *addr);
int mlx5_query_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 *mtu);
diff --git a/include/linux/mm.h b/include/linux/mm.h
index ef815b9cd426..e9caec6a51e9 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -126,7 +126,7 @@ extern int overcommit_kbytes_handler(struct ctl_table *, int, void __user *,
#define PAGE_ALIGN(addr) ALIGN(addr, PAGE_SIZE)
/* test whether an address (unsigned long or pointer) is aligned to PAGE_SIZE */
-#define PAGE_ALIGNED(addr) IS_ALIGNED((unsigned long)addr, PAGE_SIZE)
+#define PAGE_ALIGNED(addr) IS_ALIGNED((unsigned long)(addr), PAGE_SIZE)
/*
* Linux kernel virtual memory manager primitives.
@@ -1048,28 +1048,16 @@ struct address_space *page_file_mapping(struct page *page)
return page->mapping;
}
-/*
- * Return the pagecache index of the passed page. Regular pagecache pages
- * use ->index whereas swapcache pages use ->private
- */
-static inline pgoff_t page_index(struct page *page)
-{
- if (unlikely(PageSwapCache(page)))
- return page_private(page);
- return page->index;
-}
-
extern pgoff_t __page_file_index(struct page *page);
/*
- * Return the file index of the page. Regular pagecache pages use ->index
- * whereas swapcache pages use swp_offset(->private)
+ * Return the pagecache index of the passed page. Regular pagecache pages
+ * use ->index whereas swapcache pages use swp_offset(->private)
*/
-static inline pgoff_t page_file_index(struct page *page)
+static inline pgoff_t page_index(struct page *page)
{
if (unlikely(PageSwapCache(page)))
return __page_file_index(page);
-
return page->index;
}
@@ -1197,10 +1185,10 @@ void unmap_vmas(struct mmu_gather *tlb, struct vm_area_struct *start_vma,
* @pte_hole: if set, called for each hole at all levels
* @hugetlb_entry: if set, called for each hugetlb entry
* @test_walk: caller specific callback function to determine whether
- * we walk over the current vma or not. A positive returned
+ * we walk over the current vma or not. Returning 0
* value means "do page table walk over the current vma,"
* and a negative one means "abort current page table walk
- * right now." 0 means "skip the current vma."
+ * right now." 1 means "skip the current vma."
* @mm: mm_struct representing the target process of page table walk
* @vma: vma currently walked (NULL if walking outside vmas)
* @private: private data for callbacks' usage
@@ -1529,7 +1517,7 @@ static inline int pte_devmap(pte_t pte)
}
#endif
-int vma_wants_writenotify(struct vm_area_struct *vma);
+int vma_wants_writenotify(struct vm_area_struct *vma, pgprot_t vm_page_prot);
extern pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
spinlock_t **ptl);
@@ -1924,10 +1912,12 @@ extern void show_mem(unsigned int flags);
extern long si_mem_available(void);
extern void si_meminfo(struct sysinfo * val);
extern void si_meminfo_node(struct sysinfo *val, int nid);
+#ifdef __HAVE_ARCH_RESERVED_KERNEL_PAGES
+extern unsigned long arch_reserved_kernel_pages(void);
+#endif
-extern __printf(3, 4)
-void warn_alloc_failed(gfp_t gfp_mask, unsigned int order,
- const char *fmt, ...);
+extern __printf(2, 3)
+void warn_alloc(gfp_t gfp_mask, const char *fmt, ...);
extern void setup_per_cpu_pageset(void);
@@ -1977,8 +1967,14 @@ void anon_vma_interval_tree_verify(struct anon_vma_chain *node);
/* mmap.c */
extern int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin);
-extern int vma_adjust(struct vm_area_struct *vma, unsigned long start,
- unsigned long end, pgoff_t pgoff, struct vm_area_struct *insert);
+extern int __vma_adjust(struct vm_area_struct *vma, unsigned long start,
+ unsigned long end, pgoff_t pgoff, struct vm_area_struct *insert,
+ struct vm_area_struct *expand);
+static inline int vma_adjust(struct vm_area_struct *vma, unsigned long start,
+ unsigned long end, pgoff_t pgoff, struct vm_area_struct *insert)
+{
+ return __vma_adjust(vma, start, end, pgoff, insert, NULL);
+}
extern struct vm_area_struct *vma_merge(struct mm_struct *,
struct vm_area_struct *prev, unsigned long addr, unsigned long end,
unsigned long vm_flags, struct anon_vma *, struct file *, pgoff_t,
@@ -2019,6 +2015,8 @@ extern struct file *get_task_exe_file(struct task_struct *task);
extern bool may_expand_vm(struct mm_struct *, vm_flags_t, unsigned long npages);
extern void vm_stat_account(struct mm_struct *, vm_flags_t, long npages);
+extern bool vma_is_special_mapping(const struct vm_area_struct *vma,
+ const struct vm_special_mapping *sm);
extern struct vm_area_struct *_install_special_mapping(struct mm_struct *mm,
unsigned long addr, unsigned long len,
unsigned long flags,
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 903200f4ec41..4a8acedf4b7d 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -515,9 +515,7 @@ struct mm_struct {
#ifdef CONFIG_HUGETLB_PAGE
atomic_long_t hugetlb_usage;
#endif
-#ifdef CONFIG_MMU
struct work_struct async_put_work;
-#endif
};
static inline void mm_init_cpumask(struct mm_struct *mm)
diff --git a/include/linux/mmc/card.h b/include/linux/mmc/card.h
index d8673ca968ba..73fad83acbcb 100644
--- a/include/linux/mmc/card.h
+++ b/include/linux/mmc/card.h
@@ -292,6 +292,7 @@ struct mmc_card {
u32 raw_cid[4]; /* raw card CID */
u32 raw_csd[4]; /* raw card CSD */
u32 raw_scr[2]; /* raw card SCR */
+ u32 raw_ssr[16]; /* raw card SSR */
struct mmc_cid cid; /* card identification */
struct mmc_csd csd; /* card specific */
struct mmc_ext_csd ext_csd; /* mmc v4 extended card specific */
diff --git a/include/linux/mmc/core.h b/include/linux/mmc/core.h
index b01e77de1a74..2b953eb8ceae 100644
--- a/include/linux/mmc/core.h
+++ b/include/linux/mmc/core.h
@@ -55,6 +55,9 @@ struct mmc_command {
#define MMC_RSP_R6 (MMC_RSP_PRESENT|MMC_RSP_CRC|MMC_RSP_OPCODE)
#define MMC_RSP_R7 (MMC_RSP_PRESENT|MMC_RSP_CRC|MMC_RSP_OPCODE)
+/* Can be used by core to poll after switch to MMC HS mode */
+#define MMC_RSP_R1_NO_CRC (MMC_RSP_PRESENT|MMC_RSP_OPCODE)
+
#define mmc_resp_type(cmd) ((cmd)->flags & (MMC_RSP_PRESENT|MMC_RSP_136|MMC_RSP_CRC|MMC_RSP_BUSY|MMC_RSP_OPCODE))
/*
@@ -133,8 +136,12 @@ struct mmc_request {
struct mmc_command *stop;
struct completion completion;
+ struct completion cmd_completion;
void (*done)(struct mmc_request *);/* completion function */
struct mmc_host *host;
+
+ /* Allow other commands during this ongoing data transfer or busy wait */
+ bool cap_cmd_during_tfr;
};
struct mmc_card;
@@ -146,6 +153,9 @@ extern struct mmc_async_req *mmc_start_req(struct mmc_host *,
struct mmc_async_req *, int *);
extern int mmc_interrupt_hpi(struct mmc_card *);
extern void mmc_wait_for_req(struct mmc_host *, struct mmc_request *);
+extern void mmc_wait_for_req_done(struct mmc_host *host,
+ struct mmc_request *mrq);
+extern bool mmc_is_req_done(struct mmc_host *host, struct mmc_request *mrq);
extern int mmc_wait_for_cmd(struct mmc_host *, struct mmc_command *, int);
extern int mmc_app_cmd(struct mmc_host *, struct mmc_card *);
extern int mmc_wait_for_app_cmd(struct mmc_host *, struct mmc_card *,
diff --git a/include/linux/mmc/dw_mmc.h b/include/linux/mmc/dw_mmc.h
index 83b0edfce471..f5af2bd35e7f 100644
--- a/include/linux/mmc/dw_mmc.h
+++ b/include/linux/mmc/dw_mmc.h
@@ -17,6 +17,7 @@
#include <linux/scatterlist.h>
#include <linux/mmc/core.h>
#include <linux/dmaengine.h>
+#include <linux/reset.h>
#define MAX_MCI_SLOTS 2
@@ -259,6 +260,7 @@ struct dw_mci_board {
/* delay in mS before detecting cards after interrupt */
u32 detect_delay_ms;
+ struct reset_control *rstc;
struct dw_mci_dma_ops *dma_ops;
struct dma_pdata *data;
};
diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h
index aa4bfbf129e4..0b2439441cc8 100644
--- a/include/linux/mmc/host.h
+++ b/include/linux/mmc/host.h
@@ -281,6 +281,7 @@ struct mmc_host {
#define MMC_CAP_DRIVER_TYPE_A (1 << 23) /* Host supports Driver Type A */
#define MMC_CAP_DRIVER_TYPE_C (1 << 24) /* Host supports Driver Type C */
#define MMC_CAP_DRIVER_TYPE_D (1 << 25) /* Host supports Driver Type D */
+#define MMC_CAP_CMD_DURING_TFR (1 << 29) /* Commands during data transfer */
#define MMC_CAP_CMD23 (1 << 30) /* CMD23 supported. */
#define MMC_CAP_HW_RESET (1 << 31) /* Hardware reset */
@@ -382,6 +383,9 @@ struct mmc_host {
struct mmc_async_req *areq; /* active async req */
struct mmc_context_info context_info; /* async synchronization info */
+ /* Ongoing data transfer that allows commands during transfer */
+ struct mmc_request *ongoing_mrq;
+
#ifdef CONFIG_FAIL_MMC_REQUEST
struct fault_attr fail_mmc_request;
#endif
@@ -418,6 +422,7 @@ int mmc_power_restore_host(struct mmc_host *host);
void mmc_detect_change(struct mmc_host *, unsigned long delay);
void mmc_request_done(struct mmc_host *, struct mmc_request *);
+void mmc_command_done(struct mmc_host *host, struct mmc_request *mrq);
static inline void mmc_signal_sdio_irq(struct mmc_host *host)
{
diff --git a/include/linux/mmc/sdio_ids.h b/include/linux/mmc/sdio_ids.h
index 0d126aeb3ec0..d43ef96bf075 100644
--- a/include/linux/mmc/sdio_ids.h
+++ b/include/linux/mmc/sdio_ids.h
@@ -32,6 +32,7 @@
#define SDIO_DEVICE_ID_BROADCOM_43340 0xa94c
#define SDIO_DEVICE_ID_BROADCOM_43341 0xa94d
#define SDIO_DEVICE_ID_BROADCOM_4335_4339 0x4335
+#define SDIO_DEVICE_ID_BROADCOM_4339 0x4339
#define SDIO_DEVICE_ID_BROADCOM_43362 0xa962
#define SDIO_DEVICE_ID_BROADCOM_43430 0xa9a6
#define SDIO_DEVICE_ID_BROADCOM_4345 0x4345
diff --git a/include/linux/mount.h b/include/linux/mount.h
index 54a594d49733..1172cce949a4 100644
--- a/include/linux/mount.h
+++ b/include/linux/mount.h
@@ -96,4 +96,6 @@ extern void mark_mounts_for_expiry(struct list_head *mounts);
extern dev_t name_to_dev_t(const char *name);
+extern unsigned int sysctl_mount_max;
+
#endif /* _LINUX_MOUNT_H */
diff --git a/include/linux/mroute.h b/include/linux/mroute.h
index d351fd3e1049..e5fb81376e92 100644
--- a/include/linux/mroute.h
+++ b/include/linux/mroute.h
@@ -120,5 +120,5 @@ struct mfc_cache {
struct rtmsg;
int ipmr_get_route(struct net *net, struct sk_buff *skb,
__be32 saddr, __be32 daddr,
- struct rtmsg *rtm, int nowait);
+ struct rtmsg *rtm, int nowait, u32 portid);
#endif
diff --git a/include/linux/mroute6.h b/include/linux/mroute6.h
index 3987b64040c5..19a1c0c2993b 100644
--- a/include/linux/mroute6.h
+++ b/include/linux/mroute6.h
@@ -116,7 +116,7 @@ struct mfc6_cache {
struct rtmsg;
extern int ip6mr_get_route(struct net *net, struct sk_buff *skb,
- struct rtmsg *rtm, int nowait);
+ struct rtmsg *rtm, int nowait, u32 portid);
#ifdef CONFIG_IPV6_MROUTE
extern struct sock *mroute6_socket(struct net *net, struct sk_buff *skb);
diff --git a/include/linux/msi.h b/include/linux/msi.h
index e8c81fbd5f9c..0db320b7bb15 100644
--- a/include/linux/msi.h
+++ b/include/linux/msi.h
@@ -68,7 +68,7 @@ struct msi_desc {
unsigned int nvec_used;
struct device *dev;
struct msi_msg msg;
- const struct cpumask *affinity;
+ struct cpumask *affinity;
union {
/* PCI MSI/X specific data */
@@ -123,7 +123,8 @@ static inline void *msi_desc_to_pci_sysdata(struct msi_desc *desc)
}
#endif /* CONFIG_PCI_MSI */
-struct msi_desc *alloc_msi_entry(struct device *dev);
+struct msi_desc *alloc_msi_entry(struct device *dev, int nvec,
+ const struct cpumask *affinity);
void free_msi_entry(struct msi_desc *entry);
void __pci_read_msi_msg(struct msi_desc *entry, struct msi_msg *msg);
void __pci_write_msi_msg(struct msi_desc *entry, struct msi_msg *msg);
diff --git a/include/linux/mtd/mtd.h b/include/linux/mtd/mtd.h
index 29a170612203..13f8052b9ff9 100644
--- a/include/linux/mtd/mtd.h
+++ b/include/linux/mtd/mtd.h
@@ -127,6 +127,82 @@ struct mtd_ooblayout_ops {
struct mtd_oob_region *oobfree);
};
+/**
+ * struct mtd_pairing_info - page pairing information
+ *
+ * @pair: pair id
+ * @group: group id
+ *
+ * The term "pair" is used here, even though TLC NANDs might group pages by 3
+ * (3 bits in a single cell). A pair should regroup all pages that are sharing
+ * the same cell. Pairs are then indexed in ascending order.
+ *
+ * @group is defining the position of a page in a given pair. It can also be
+ * seen as the bit position in the cell: page attached to bit 0 belongs to
+ * group 0, page attached to bit 1 belongs to group 1, etc.
+ *
+ * Example:
+ * The H27UCG8T2BTR-BC datasheet describes the following pairing scheme:
+ *
+ * group-0 group-1
+ *
+ * pair-0 page-0 page-4
+ * pair-1 page-1 page-5
+ * pair-2 page-2 page-8
+ * ...
+ * pair-127 page-251 page-255
+ *
+ *
+ * Note that the "group" and "pair" terms were extracted from Samsung and
+ * Hynix datasheets, and might be referenced under other names in other
+ * datasheets (Micron is describing this concept as "shared pages").
+ */
+struct mtd_pairing_info {
+ int pair;
+ int group;
+};
+
+/**
+ * struct mtd_pairing_scheme - page pairing scheme description
+ *
+ * @ngroups: number of groups. Should be related to the number of bits
+ * per cell.
+ * @get_info: converts a write-unit (page number within an erase block) into
+ * mtd_pairing information (pair + group). This function should
+ * fill the info parameter based on the wunit index or return
+ * -EINVAL if the wunit parameter is invalid.
+ * @get_wunit: converts pairing information into a write-unit (page) number.
+ * This function should return the wunit index pointed by the
+ * pairing information described in the info argument. It should
+ * return -EINVAL, if there's no wunit corresponding to the
+ * passed pairing information.
+ *
+ * See mtd_pairing_info documentation for a detailed explanation of the
+ * pair and group concepts.
+ *
+ * The mtd_pairing_scheme structure provides a generic solution to represent
+ * NAND page pairing scheme. Instead of exposing two big tables to do the
+ * write-unit <-> (pair + group) conversions, we ask the MTD drivers to
+ * implement the ->get_info() and ->get_wunit() functions.
+ *
+ * MTD users will then be able to query these information by using the
+ * mtd_pairing_info_to_wunit() and mtd_wunit_to_pairing_info() helpers.
+ *
+ * @ngroups is here to help MTD users iterating over all the pages in a
+ * given pair. This value can be retrieved by MTD users using the
+ * mtd_pairing_groups() helper.
+ *
+ * Examples are given in the mtd_pairing_info_to_wunit() and
+ * mtd_wunit_to_pairing_info() documentation.
+ */
+struct mtd_pairing_scheme {
+ int ngroups;
+ int (*get_info)(struct mtd_info *mtd, int wunit,
+ struct mtd_pairing_info *info);
+ int (*get_wunit)(struct mtd_info *mtd,
+ const struct mtd_pairing_info *info);
+};
+
struct module; /* only needed for owner field in mtd_info */
struct mtd_info {
@@ -188,6 +264,9 @@ struct mtd_info {
/* OOB layout description */
const struct mtd_ooblayout_ops *ooblayout;
+ /* NAND pairing scheme, only provided for MLC/TLC NANDs */
+ const struct mtd_pairing_scheme *pairing;
+
/* the ecc step size. */
unsigned int ecc_step_size;
@@ -296,6 +375,12 @@ static inline void mtd_set_ooblayout(struct mtd_info *mtd,
mtd->ooblayout = ooblayout;
}
+static inline void mtd_set_pairing_scheme(struct mtd_info *mtd,
+ const struct mtd_pairing_scheme *pairing)
+{
+ mtd->pairing = pairing;
+}
+
static inline void mtd_set_of_node(struct mtd_info *mtd,
struct device_node *np)
{
@@ -312,6 +397,11 @@ static inline int mtd_oobavail(struct mtd_info *mtd, struct mtd_oob_ops *ops)
return ops->mode == MTD_OPS_AUTO_OOB ? mtd->oobavail : mtd->oobsize;
}
+int mtd_wunit_to_pairing_info(struct mtd_info *mtd, int wunit,
+ struct mtd_pairing_info *info);
+int mtd_pairing_info_to_wunit(struct mtd_info *mtd,
+ const struct mtd_pairing_info *info);
+int mtd_pairing_groups(struct mtd_info *mtd);
int mtd_erase(struct mtd_info *mtd, struct erase_info *instr);
int mtd_point(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen,
void **virt, resource_size_t *phys);
@@ -397,6 +487,23 @@ static inline uint32_t mtd_mod_by_ws(uint64_t sz, struct mtd_info *mtd)
return do_div(sz, mtd->writesize);
}
+static inline int mtd_wunit_per_eb(struct mtd_info *mtd)
+{
+ return mtd->erasesize / mtd->writesize;
+}
+
+static inline int mtd_offset_to_wunit(struct mtd_info *mtd, loff_t offs)
+{
+ return mtd_div_by_ws(mtd_mod_by_eb(offs, mtd), mtd);
+}
+
+static inline loff_t mtd_wunit_to_offset(struct mtd_info *mtd, loff_t base,
+ int wunit)
+{
+ return base + (wunit * mtd->writesize);
+}
+
+
static inline int mtd_has_oob(const struct mtd_info *mtd)
{
return mtd->_read_oob && mtd->_write_oob;
diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h
index 8dd6e01f45c0..c5d3d5024fc8 100644
--- a/include/linux/mtd/nand.h
+++ b/include/linux/mtd/nand.h
@@ -29,26 +29,26 @@ struct nand_flash_dev;
struct device_node;
/* Scan and identify a NAND device */
-extern int nand_scan(struct mtd_info *mtd, int max_chips);
+int nand_scan(struct mtd_info *mtd, int max_chips);
/*
* Separate phases of nand_scan(), allowing board driver to intervene
* and override command or ECC setup according to flash type.
*/
-extern int nand_scan_ident(struct mtd_info *mtd, int max_chips,
+int nand_scan_ident(struct mtd_info *mtd, int max_chips,
struct nand_flash_dev *table);
-extern int nand_scan_tail(struct mtd_info *mtd);
+int nand_scan_tail(struct mtd_info *mtd);
-/* Free resources held by the NAND device */
-extern void nand_release(struct mtd_info *mtd);
+/* Unregister the MTD device and free resources held by the NAND device */
+void nand_release(struct mtd_info *mtd);
/* Internal helper for board drivers which need to override command function */
-extern void nand_wait_ready(struct mtd_info *mtd);
+void nand_wait_ready(struct mtd_info *mtd);
/* locks all blocks present in the device */
-extern int nand_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
+int nand_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
/* unlocks specified locked blocks */
-extern int nand_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
+int nand_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
/* The maximum number of NAND chips in an array */
#define NAND_MAX_CHIPS 8
@@ -141,6 +141,7 @@ enum nand_ecc_algo {
* pages and you want to rely on the default implementation.
*/
#define NAND_ECC_GENERIC_ERASED_CHECK BIT(0)
+#define NAND_ECC_MAXIMIZE BIT(1)
/* Bit mask for flags passed to do_nand_read_ecc */
#define NAND_GET_DEVICE 0x80
@@ -460,6 +461,13 @@ struct nand_hw_control {
wait_queue_head_t wq;
};
+static inline void nand_hw_control_init(struct nand_hw_control *nfc)
+{
+ nfc->active = NULL;
+ spin_lock_init(&nfc->lock);
+ init_waitqueue_head(&nfc->wq);
+}
+
/**
* struct nand_ecc_ctrl - Control structure for ECC
* @mode: ECC mode
@@ -566,6 +574,123 @@ struct nand_buffers {
};
/**
+ * struct nand_sdr_timings - SDR NAND chip timings
+ *
+ * This struct defines the timing requirements of a SDR NAND chip.
+ * These information can be found in every NAND datasheets and the timings
+ * meaning are described in the ONFI specifications:
+ * www.onfi.org/~/media/ONFI/specs/onfi_3_1_spec.pdf (chapter 4.15 Timing
+ * Parameters)
+ *
+ * All these timings are expressed in picoseconds.
+ *
+ * @tALH_min: ALE hold time
+ * @tADL_min: ALE to data loading time
+ * @tALS_min: ALE setup time
+ * @tAR_min: ALE to RE# delay
+ * @tCEA_max: CE# access time
+ * @tCEH_min:
+ * @tCH_min: CE# hold time
+ * @tCHZ_max: CE# high to output hi-Z
+ * @tCLH_min: CLE hold time
+ * @tCLR_min: CLE to RE# delay
+ * @tCLS_min: CLE setup time
+ * @tCOH_min: CE# high to output hold
+ * @tCS_min: CE# setup time
+ * @tDH_min: Data hold time
+ * @tDS_min: Data setup time
+ * @tFEAT_max: Busy time for Set Features and Get Features
+ * @tIR_min: Output hi-Z to RE# low
+ * @tITC_max: Interface and Timing Mode Change time
+ * @tRC_min: RE# cycle time
+ * @tREA_max: RE# access time
+ * @tREH_min: RE# high hold time
+ * @tRHOH_min: RE# high to output hold
+ * @tRHW_min: RE# high to WE# low
+ * @tRHZ_max: RE# high to output hi-Z
+ * @tRLOH_min: RE# low to output hold
+ * @tRP_min: RE# pulse width
+ * @tRR_min: Ready to RE# low (data only)
+ * @tRST_max: Device reset time, measured from the falling edge of R/B# to the
+ * rising edge of R/B#.
+ * @tWB_max: WE# high to SR[6] low
+ * @tWC_min: WE# cycle time
+ * @tWH_min: WE# high hold time
+ * @tWHR_min: WE# high to RE# low
+ * @tWP_min: WE# pulse width
+ * @tWW_min: WP# transition to WE# low
+ */
+struct nand_sdr_timings {
+ u32 tALH_min;
+ u32 tADL_min;
+ u32 tALS_min;
+ u32 tAR_min;
+ u32 tCEA_max;
+ u32 tCEH_min;
+ u32 tCH_min;
+ u32 tCHZ_max;
+ u32 tCLH_min;
+ u32 tCLR_min;
+ u32 tCLS_min;
+ u32 tCOH_min;
+ u32 tCS_min;
+ u32 tDH_min;
+ u32 tDS_min;
+ u32 tFEAT_max;
+ u32 tIR_min;
+ u32 tITC_max;
+ u32 tRC_min;
+ u32 tREA_max;
+ u32 tREH_min;
+ u32 tRHOH_min;
+ u32 tRHW_min;
+ u32 tRHZ_max;
+ u32 tRLOH_min;
+ u32 tRP_min;
+ u32 tRR_min;
+ u64 tRST_max;
+ u32 tWB_max;
+ u32 tWC_min;
+ u32 tWH_min;
+ u32 tWHR_min;
+ u32 tWP_min;
+ u32 tWW_min;
+};
+
+/**
+ * enum nand_data_interface_type - NAND interface timing type
+ * @NAND_SDR_IFACE: Single Data Rate interface
+ */
+enum nand_data_interface_type {
+ NAND_SDR_IFACE,
+};
+
+/**
+ * struct nand_data_interface - NAND interface timing
+ * @type: type of the timing
+ * @timings: The timing, type according to @type
+ */
+struct nand_data_interface {
+ enum nand_data_interface_type type;
+ union {
+ struct nand_sdr_timings sdr;
+ } timings;
+};
+
+/**
+ * nand_get_sdr_timings - get SDR timing from data interface
+ * @conf: The data interface
+ */
+static inline const struct nand_sdr_timings *
+nand_get_sdr_timings(const struct nand_data_interface *conf)
+{
+ if (conf->type != NAND_SDR_IFACE)
+ return ERR_PTR(-EINVAL);
+
+ return &conf->timings.sdr;
+}
+
+/**
* struct nand_chip - NAND Private Flash Chip Data
* @mtd: MTD device registered to the MTD framework
* @IO_ADDR_R: [BOARDSPECIFIC] address to read the 8 I/O lines of the
@@ -627,10 +752,9 @@ struct nand_buffers {
* also from the datasheet. It is the recommended ECC step
* size, if known; if unknown, set to zero.
* @onfi_timing_mode_default: [INTERN] default ONFI timing mode. This field is
- * either deduced from the datasheet if the NAND
- * chip is not ONFI compliant or set to 0 if it is
- * (an ONFI chip is always configured in mode 0
- * after a NAND reset)
+ * set to the actually used ONFI mode if the chip is
+ * ONFI compliant or deduced from the datasheet if
+ * the NAND chip is not ONFI compliant.
* @numchips: [INTERN] number of physical chips
* @chipsize: [INTERN] the size of one chip for multichip arrays
* @pagemask: [INTERN] page number mask = number of (pages / chip) - 1
@@ -650,6 +774,7 @@ struct nand_buffers {
* @read_retries: [INTERN] the number of read retry modes supported
* @onfi_set_features: [REPLACEABLE] set the features for ONFI nand
* @onfi_get_features: [REPLACEABLE] get the features for ONFI nand
+ * @setup_data_interface: [OPTIONAL] setup the data interface and timing
* @bbt: [INTERN] bad block table pointer
* @bbt_td: [REPLACEABLE] bad block table descriptor for flash
* lookup.
@@ -696,6 +821,10 @@ struct nand_chip {
int (*onfi_get_features)(struct mtd_info *mtd, struct nand_chip *chip,
int feature_addr, uint8_t *subfeature_para);
int (*setup_read_retry)(struct mtd_info *mtd, int retry_mode);
+ int (*setup_data_interface)(struct mtd_info *mtd,
+ const struct nand_data_interface *conf,
+ bool check_only);
+
int chip_delay;
unsigned int options;
@@ -725,6 +854,8 @@ struct nand_chip {
struct nand_jedec_params jedec_params;
};
+ struct nand_data_interface *data_interface;
+
int read_retries;
flstate_t state;
@@ -893,14 +1024,14 @@ struct nand_manufacturers {
extern struct nand_flash_dev nand_flash_ids[];
extern struct nand_manufacturers nand_manuf_ids[];
-extern int nand_default_bbt(struct mtd_info *mtd);
-extern int nand_markbad_bbt(struct mtd_info *mtd, loff_t offs);
-extern int nand_isreserved_bbt(struct mtd_info *mtd, loff_t offs);
-extern int nand_isbad_bbt(struct mtd_info *mtd, loff_t offs, int allowbbt);
-extern int nand_erase_nand(struct mtd_info *mtd, struct erase_info *instr,
- int allowbbt);
-extern int nand_do_read(struct mtd_info *mtd, loff_t from, size_t len,
- size_t *retlen, uint8_t *buf);
+int nand_default_bbt(struct mtd_info *mtd);
+int nand_markbad_bbt(struct mtd_info *mtd, loff_t offs);
+int nand_isreserved_bbt(struct mtd_info *mtd, loff_t offs);
+int nand_isbad_bbt(struct mtd_info *mtd, loff_t offs, int allowbbt);
+int nand_erase_nand(struct mtd_info *mtd, struct erase_info *instr,
+ int allowbbt);
+int nand_do_read(struct mtd_info *mtd, loff_t from, size_t len,
+ size_t *retlen, uint8_t *buf);
/**
* struct platform_nand_chip - chip level device structure
@@ -988,6 +1119,11 @@ static inline int onfi_get_sync_timing_mode(struct nand_chip *chip)
return le16_to_cpu(chip->onfi_params.src_sync_timing_mode);
}
+int onfi_init_data_interface(struct nand_chip *chip,
+ struct nand_data_interface *iface,
+ enum nand_data_interface_type type,
+ int timing_mode);
+
/*
* Check if it is a SLC nand.
* The !nand_is_slc() can be used to check the MLC/TLC nand chips.
@@ -1023,57 +1159,10 @@ static inline int jedec_feature(struct nand_chip *chip)
: 0;
}
-/*
- * struct nand_sdr_timings - SDR NAND chip timings
- *
- * This struct defines the timing requirements of a SDR NAND chip.
- * These informations can be found in every NAND datasheets and the timings
- * meaning are described in the ONFI specifications:
- * www.onfi.org/~/media/ONFI/specs/onfi_3_1_spec.pdf (chapter 4.15 Timing
- * Parameters)
- *
- * All these timings are expressed in picoseconds.
- */
-
-struct nand_sdr_timings {
- u32 tALH_min;
- u32 tADL_min;
- u32 tALS_min;
- u32 tAR_min;
- u32 tCEA_max;
- u32 tCEH_min;
- u32 tCH_min;
- u32 tCHZ_max;
- u32 tCLH_min;
- u32 tCLR_min;
- u32 tCLS_min;
- u32 tCOH_min;
- u32 tCS_min;
- u32 tDH_min;
- u32 tDS_min;
- u32 tFEAT_max;
- u32 tIR_min;
- u32 tITC_max;
- u32 tRC_min;
- u32 tREA_max;
- u32 tREH_min;
- u32 tRHOH_min;
- u32 tRHW_min;
- u32 tRHZ_max;
- u32 tRLOH_min;
- u32 tRP_min;
- u32 tRR_min;
- u64 tRST_max;
- u32 tWB_max;
- u32 tWC_min;
- u32 tWH_min;
- u32 tWHR_min;
- u32 tWP_min;
- u32 tWW_min;
-};
-
/* get timing characteristics from ONFI timing mode. */
const struct nand_sdr_timings *onfi_async_timing_mode_to_sdr_timings(int mode);
+/* get data interface from ONFI timing mode 0, used after reset. */
+const struct nand_data_interface *nand_get_default_data_interface(void);
int nand_check_erased_ecc_chunk(void *data, int datalen,
void *ecc, int ecclen,
@@ -1093,4 +1182,11 @@ int nand_read_oob_std(struct mtd_info *mtd, struct nand_chip *chip, int page);
/* Default read_oob syndrome implementation */
int nand_read_oob_syndrome(struct mtd_info *mtd, struct nand_chip *chip,
int page);
+
+/* Reset and initialize a NAND device */
+int nand_reset(struct nand_chip *chip);
+
+/* Free resources held by the NAND device */
+void nand_cleanup(struct nand_chip *chip);
+
#endif /* __LINUX_MTD_NAND_H */
diff --git a/include/linux/nd.h b/include/linux/nd.h
index f1ea426d6a5e..fa66aeed441a 100644
--- a/include/linux/nd.h
+++ b/include/linux/nd.h
@@ -77,11 +77,13 @@ struct nd_namespace_io {
* @nsio: device and system physical address range to drive
* @alt_name: namespace name supplied in the dimm label
* @uuid: namespace name supplied in the dimm label
+ * @id: ida allocated id
*/
struct nd_namespace_pmem {
struct nd_namespace_io nsio;
char *alt_name;
u8 *uuid;
+ int id;
};
/**
@@ -105,19 +107,19 @@ struct nd_namespace_blk {
struct resource **res;
};
-static inline struct nd_namespace_io *to_nd_namespace_io(struct device *dev)
+static inline struct nd_namespace_io *to_nd_namespace_io(const struct device *dev)
{
return container_of(dev, struct nd_namespace_io, common.dev);
}
-static inline struct nd_namespace_pmem *to_nd_namespace_pmem(struct device *dev)
+static inline struct nd_namespace_pmem *to_nd_namespace_pmem(const struct device *dev)
{
struct nd_namespace_io *nsio = to_nd_namespace_io(dev);
return container_of(nsio, struct nd_namespace_pmem, nsio);
}
-static inline struct nd_namespace_blk *to_nd_namespace_blk(struct device *dev)
+static inline struct nd_namespace_blk *to_nd_namespace_blk(const struct device *dev)
{
return container_of(dev, struct nd_namespace_blk, common.dev);
}
diff --git a/include/linux/net.h b/include/linux/net.h
index b9f0ff4d489c..cd0c8bd0a1de 100644
--- a/include/linux/net.h
+++ b/include/linux/net.h
@@ -25,6 +25,7 @@
#include <linux/kmemcheck.h>
#include <linux/rcupdate.h>
#include <linux/once.h>
+#include <linux/fs.h>
#include <uapi/linux/net.h>
@@ -128,6 +129,9 @@ struct page;
struct sockaddr;
struct msghdr;
struct module;
+struct sk_buff;
+typedef int (*sk_read_actor_t)(read_descriptor_t *, struct sk_buff *,
+ unsigned int, size_t);
struct proto_ops {
int family;
@@ -186,6 +190,8 @@ struct proto_ops {
struct pipe_inode_info *pipe, size_t len, unsigned int flags);
int (*set_peek_off)(struct sock *sk, int val);
int (*peek_len)(struct socket *sock);
+ int (*read_sock)(struct sock *sk, read_descriptor_t *desc,
+ sk_read_actor_t recv_actor);
};
#define DECLARE_SOCKADDR(type, dst, src) \
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 3a788bf0affd..136ae6bbe81e 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -52,6 +52,7 @@
#include <uapi/linux/netdevice.h>
#include <uapi/linux/if_bonding.h>
#include <uapi/linux/pkt_cls.h>
+#include <linux/hashtable.h>
struct netpoll_info;
struct device;
@@ -788,6 +789,7 @@ enum {
TC_SETUP_CLSU32,
TC_SETUP_CLSFLOWER,
TC_SETUP_MATCHALL,
+ TC_SETUP_CLSBPF,
};
struct tc_cls_u32_offload;
@@ -799,6 +801,7 @@ struct tc_to_netdev {
struct tc_cls_u32_offload *cls_u32;
struct tc_cls_flower_offload *cls_flower;
struct tc_cls_matchall_offload *cls_mall;
+ struct tc_cls_bpf_offload *cls_bpf;
};
};
@@ -923,6 +926,14 @@ struct netdev_xdp {
* 3. Update dev->stats asynchronously and atomically, and define
* neither operation.
*
+ * bool (*ndo_has_offload_stats)(int attr_id)
+ * Return true if this device supports offload stats of this attr_id.
+ *
+ * int (*ndo_get_offload_stats)(int attr_id, const struct net_device *dev,
+ * void *attr_data)
+ * Get statistics for offload operations by attr_id. Write it into the
+ * attr_data pointer.
+ *
* int (*ndo_vlan_rx_add_vid)(struct net_device *dev, __be16 proto, u16 vid);
* If device supports VLAN filtering this function is called when a
* VLAN id is registered.
@@ -935,7 +946,8 @@ struct netdev_xdp {
*
* SR-IOV management functions.
* int (*ndo_set_vf_mac)(struct net_device *dev, int vf, u8* mac);
- * int (*ndo_set_vf_vlan)(struct net_device *dev, int vf, u16 vlan, u8 qos);
+ * int (*ndo_set_vf_vlan)(struct net_device *dev, int vf, u16 vlan,
+ * u8 qos, __be16 proto);
* int (*ndo_set_vf_rate)(struct net_device *dev, int vf, int min_tx_rate,
* int max_tx_rate);
* int (*ndo_set_vf_spoofchk)(struct net_device *dev, int vf, bool setting);
@@ -1030,7 +1042,7 @@ struct netdev_xdp {
* Deletes the FDB entry from dev coresponding to addr.
* int (*ndo_fdb_dump)(struct sk_buff *skb, struct netlink_callback *cb,
* struct net_device *dev, struct net_device *filter_dev,
- * int idx)
+ * int *idx)
* Used to add FDB entries to dump requests. Implementers should add
* entries to skb and update idx with the number of entries.
*
@@ -1154,6 +1166,10 @@ struct net_device_ops {
struct rtnl_link_stats64* (*ndo_get_stats64)(struct net_device *dev,
struct rtnl_link_stats64 *storage);
+ bool (*ndo_has_offload_stats)(int attr_id);
+ int (*ndo_get_offload_stats)(int attr_id,
+ const struct net_device *dev,
+ void *attr_data);
struct net_device_stats* (*ndo_get_stats)(struct net_device *dev);
int (*ndo_vlan_rx_add_vid)(struct net_device *dev,
@@ -1172,7 +1188,8 @@ struct net_device_ops {
int (*ndo_set_vf_mac)(struct net_device *dev,
int queue, u8 *mac);
int (*ndo_set_vf_vlan)(struct net_device *dev,
- int queue, u16 vlan, u8 qos);
+ int queue, u16 vlan,
+ u8 qos, __be16 proto);
int (*ndo_set_vf_rate)(struct net_device *dev,
int vf, int min_tx_rate,
int max_tx_rate);
@@ -1262,7 +1279,7 @@ struct net_device_ops {
struct netlink_callback *cb,
struct net_device *dev,
struct net_device *filter_dev,
- int idx);
+ int *idx);
int (*ndo_bridge_setlink)(struct net_device *dev,
struct nlmsghdr *nlh,
@@ -1561,8 +1578,6 @@ enum netdev_priv_flags {
*
* @xps_maps: XXX: need comments on this one
*
- * @offload_fwd_mark: Offload device fwding mark
- *
* @watchdog_timeo: Represents the timeout that is used by
* the watchdog (see dev_watchdog())
* @watchdog_timer: List of timers
@@ -1784,7 +1799,7 @@ struct net_device {
#endif
struct netdev_queue __rcu *ingress_queue;
#ifdef CONFIG_NETFILTER_INGRESS
- struct list_head nf_hooks_ingress;
+ struct nf_hook_entry __rcu *nf_hooks_ingress;
#endif
unsigned char broadcast[MAX_ADDR_LEN];
@@ -1800,6 +1815,9 @@ struct net_device {
unsigned int num_tx_queues;
unsigned int real_num_tx_queues;
struct Qdisc *qdisc;
+#ifdef CONFIG_NET_SCHED
+ DECLARE_HASHTABLE (qdisc_hash, 4);
+#endif
unsigned long tx_queue_len;
spinlock_t tx_global_lock;
int watchdog_timeo;
@@ -1810,9 +1828,6 @@ struct net_device {
#ifdef CONFIG_NET_CLS_ACT
struct tcf_proto __rcu *egress_cl_list;
#endif
-#ifdef CONFIG_NET_SWITCHDEV
- u32 offload_fwd_mark;
-#endif
/* These may be needed for future network-power-down code. */
struct timer_list watchdog_timer;
@@ -3267,6 +3282,7 @@ static inline void napi_free_frags(struct napi_struct *napi)
napi->skb = NULL;
}
+bool netdev_is_rx_handler_busy(struct net_device *dev);
int netdev_rx_handler_register(struct net_device *dev,
rx_handler_func_t *rx_handler,
void *rx_handler_data);
diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h
index 9230f9aee896..abc7fdcb9eb1 100644
--- a/include/linux/netfilter.h
+++ b/include/linux/netfilter.h
@@ -55,12 +55,34 @@ struct nf_hook_state {
struct net_device *out;
struct sock *sk;
struct net *net;
- struct list_head *hook_list;
+ struct nf_hook_entry __rcu *hook_entries;
int (*okfn)(struct net *, struct sock *, struct sk_buff *);
};
+typedef unsigned int nf_hookfn(void *priv,
+ struct sk_buff *skb,
+ const struct nf_hook_state *state);
+struct nf_hook_ops {
+ struct list_head list;
+
+ /* User fills in from here down. */
+ nf_hookfn *hook;
+ struct net_device *dev;
+ void *priv;
+ u_int8_t pf;
+ unsigned int hooknum;
+ /* Hooks are ordered in ascending priority. */
+ int priority;
+};
+
+struct nf_hook_entry {
+ struct nf_hook_entry __rcu *next;
+ struct nf_hook_ops ops;
+ const struct nf_hook_ops *orig_ops;
+};
+
static inline void nf_hook_state_init(struct nf_hook_state *p,
- struct list_head *hook_list,
+ struct nf_hook_entry *hook_entry,
unsigned int hook,
int thresh, u_int8_t pf,
struct net_device *indev,
@@ -76,26 +98,11 @@ static inline void nf_hook_state_init(struct nf_hook_state *p,
p->out = outdev;
p->sk = sk;
p->net = net;
- p->hook_list = hook_list;
+ RCU_INIT_POINTER(p->hook_entries, hook_entry);
p->okfn = okfn;
}
-typedef unsigned int nf_hookfn(void *priv,
- struct sk_buff *skb,
- const struct nf_hook_state *state);
-
-struct nf_hook_ops {
- struct list_head list;
- /* User fills in from here down. */
- nf_hookfn *hook;
- struct net_device *dev;
- void *priv;
- u_int8_t pf;
- unsigned int hooknum;
- /* Hooks are ordered in ascending priority. */
- int priority;
-};
struct nf_sockopt_ops {
struct list_head list;
@@ -133,6 +140,8 @@ int nf_register_hook(struct nf_hook_ops *reg);
void nf_unregister_hook(struct nf_hook_ops *reg);
int nf_register_hooks(struct nf_hook_ops *reg, unsigned int n);
void nf_unregister_hooks(struct nf_hook_ops *reg, unsigned int n);
+int _nf_register_hooks(struct nf_hook_ops *reg, unsigned int n);
+void _nf_unregister_hooks(struct nf_hook_ops *reg, unsigned int n);
/* Functions to register get/setsockopt ranges (non-inclusive). You
need to check permissions yourself! */
@@ -161,7 +170,8 @@ static inline int nf_hook_thresh(u_int8_t pf, unsigned int hook,
int (*okfn)(struct net *, struct sock *, struct sk_buff *),
int thresh)
{
- struct list_head *hook_list;
+ struct nf_hook_entry *hook_head;
+ int ret = 1;
#ifdef HAVE_JUMP_LABEL
if (__builtin_constant_p(pf) &&
@@ -170,16 +180,19 @@ static inline int nf_hook_thresh(u_int8_t pf, unsigned int hook,
return 1;
#endif
- hook_list = &net->nf.hooks[pf][hook];
-
- if (!list_empty(hook_list)) {
+ rcu_read_lock();
+ hook_head = rcu_dereference(net->nf.hooks[pf][hook]);
+ if (hook_head) {
struct nf_hook_state state;
- nf_hook_state_init(&state, hook_list, hook, thresh,
+ nf_hook_state_init(&state, hook_head, hook, thresh,
pf, indev, outdev, sk, net, okfn);
- return nf_hook_slow(skb, &state);
+
+ ret = nf_hook_slow(skb, &state);
}
- return 1;
+ rcu_read_unlock();
+
+ return ret;
}
static inline int nf_hook(u_int8_t pf, unsigned int hook, struct net *net,
diff --git a/include/linux/netfilter/nf_conntrack_common.h b/include/linux/netfilter/nf_conntrack_common.h
index 275505792664..1d1ef4e20512 100644
--- a/include/linux/netfilter/nf_conntrack_common.h
+++ b/include/linux/netfilter/nf_conntrack_common.h
@@ -4,13 +4,9 @@
#include <uapi/linux/netfilter/nf_conntrack_common.h>
struct ip_conntrack_stat {
- unsigned int searched;
unsigned int found;
- unsigned int new;
unsigned int invalid;
unsigned int ignore;
- unsigned int delete;
- unsigned int delete_list;
unsigned int insert;
unsigned int insert_failed;
unsigned int drop;
diff --git a/include/linux/netfilter/nf_conntrack_proto_gre.h b/include/linux/netfilter/nf_conntrack_proto_gre.h
index df78dc2b5524..dee0acd0dd31 100644
--- a/include/linux/netfilter/nf_conntrack_proto_gre.h
+++ b/include/linux/netfilter/nf_conntrack_proto_gre.h
@@ -1,68 +1,8 @@
#ifndef _CONNTRACK_PROTO_GRE_H
#define _CONNTRACK_PROTO_GRE_H
#include <asm/byteorder.h>
-
-/* GRE PROTOCOL HEADER */
-
-/* GRE Version field */
-#define GRE_VERSION_1701 0x0
-#define GRE_VERSION_PPTP 0x1
-
-/* GRE Protocol field */
-#define GRE_PROTOCOL_PPTP 0x880B
-
-/* GRE Flags */
-#define GRE_FLAG_C 0x80
-#define GRE_FLAG_R 0x40
-#define GRE_FLAG_K 0x20
-#define GRE_FLAG_S 0x10
-#define GRE_FLAG_A 0x80
-
-#define GRE_IS_C(f) ((f)&GRE_FLAG_C)
-#define GRE_IS_R(f) ((f)&GRE_FLAG_R)
-#define GRE_IS_K(f) ((f)&GRE_FLAG_K)
-#define GRE_IS_S(f) ((f)&GRE_FLAG_S)
-#define GRE_IS_A(f) ((f)&GRE_FLAG_A)
-
-/* GRE is a mess: Four different standards */
-struct gre_hdr {
-#if defined(__LITTLE_ENDIAN_BITFIELD)
- __u16 rec:3,
- srr:1,
- seq:1,
- key:1,
- routing:1,
- csum:1,
- version:3,
- reserved:4,
- ack:1;
-#elif defined(__BIG_ENDIAN_BITFIELD)
- __u16 csum:1,
- routing:1,
- key:1,
- seq:1,
- srr:1,
- rec:3,
- ack:1,
- reserved:4,
- version:3;
-#else
-#error "Adjust your <asm/byteorder.h> defines"
-#endif
- __be16 protocol;
-};
-
-/* modified GRE header for PPTP */
-struct gre_hdr_pptp {
- __u8 flags; /* bitfield */
- __u8 version; /* should be GRE_VERSION_PPTP */
- __be16 protocol; /* should be GRE_PROTOCOL_PPTP */
- __be16 payload_len; /* size of ppp payload, not inc. gre header */
- __be16 call_id; /* peer's call_id for this session */
- __be32 seq; /* sequence number. Present if S==1 */
- __be32 ack; /* seq number of highest packet received by */
- /* sender in this session */
-};
+#include <net/gre.h>
+#include <net/pptp.h>
struct nf_ct_gre {
unsigned int stream_timeout;
diff --git a/include/linux/netfilter_ingress.h b/include/linux/netfilter_ingress.h
index 5fcd375ef175..33e37fb41d5d 100644
--- a/include/linux/netfilter_ingress.h
+++ b/include/linux/netfilter_ingress.h
@@ -11,22 +11,30 @@ static inline bool nf_hook_ingress_active(const struct sk_buff *skb)
if (!static_key_false(&nf_hooks_needed[NFPROTO_NETDEV][NF_NETDEV_INGRESS]))
return false;
#endif
- return !list_empty(&skb->dev->nf_hooks_ingress);
+ return rcu_access_pointer(skb->dev->nf_hooks_ingress);
}
+/* caller must hold rcu_read_lock */
static inline int nf_hook_ingress(struct sk_buff *skb)
{
+ struct nf_hook_entry *e = rcu_dereference(skb->dev->nf_hooks_ingress);
struct nf_hook_state state;
- nf_hook_state_init(&state, &skb->dev->nf_hooks_ingress,
- NF_NETDEV_INGRESS, INT_MIN, NFPROTO_NETDEV,
- skb->dev, NULL, NULL, dev_net(skb->dev), NULL);
+ /* Must recheck the ingress hook head, in the event it became NULL
+ * after the check in nf_hook_ingress_active evaluated to true.
+ */
+ if (unlikely(!e))
+ return 0;
+
+ nf_hook_state_init(&state, e, NF_NETDEV_INGRESS, INT_MIN,
+ NFPROTO_NETDEV, skb->dev, NULL, NULL,
+ dev_net(skb->dev), NULL);
return nf_hook_slow(skb, &state);
}
static inline void nf_hook_ingress_init(struct net_device *dev)
{
- INIT_LIST_HEAD(&dev->nf_hooks_ingress);
+ RCU_INIT_POINTER(dev->nf_hooks_ingress, NULL);
}
#else /* CONFIG_NETFILTER_INGRESS */
static inline int nf_hook_ingress_active(struct sk_buff *skb)
diff --git a/include/linux/nfs4.h b/include/linux/nfs4.h
index c6564ada9beb..9094faf0699d 100644
--- a/include/linux/nfs4.h
+++ b/include/linux/nfs4.h
@@ -67,6 +67,7 @@ struct nfs4_stateid_struct {
NFS4_DELEGATION_STATEID_TYPE,
NFS4_LAYOUT_STATEID_TYPE,
NFS4_PNFS_DS_STATEID_TYPE,
+ NFS4_REVOKED_STATEID_TYPE,
} type;
};
diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h
index 14a762d2734d..b34097c67848 100644
--- a/include/linux/nfs_fs_sb.h
+++ b/include/linux/nfs_fs_sb.h
@@ -103,6 +103,9 @@ struct nfs_client {
#define NFS_SP4_MACH_CRED_WRITE 5 /* WRITE */
#define NFS_SP4_MACH_CRED_COMMIT 6 /* COMMIT */
#define NFS_SP4_MACH_CRED_PNFS_CLEANUP 7 /* LAYOUTRETURN */
+#if IS_ENABLED(CONFIG_NFS_V4_1)
+ wait_queue_head_t cl_lock_waitq;
+#endif /* CONFIG_NFS_V4_1 */
#endif /* CONFIG_NFS_V4 */
/* Our own IP address, as a null-terminated string.
diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
index 7cc0deee5bde..beb1e10f446e 100644
--- a/include/linux/nfs_xdr.h
+++ b/include/linux/nfs_xdr.h
@@ -125,6 +125,11 @@ struct nfs_fattr {
| NFS_ATTR_FATTR_V4_SECURITY_LABEL)
/*
+ * Maximal number of supported layout drivers.
+ */
+#define NFS_MAX_LAYOUT_TYPES 8
+
+/*
* Info on the file system
*/
struct nfs_fsinfo {
@@ -139,7 +144,8 @@ struct nfs_fsinfo {
__u64 maxfilesize;
struct timespec time_delta; /* server time granularity */
__u32 lease_time; /* in seconds */
- __u32 layouttype; /* supported pnfs layout driver */
+ __u32 nlayouttypes; /* number of layouttypes */
+ __u32 layouttype[NFS_MAX_LAYOUT_TYPES]; /* supported pnfs layout driver */
__u32 blksize; /* preferred pnfs io block size */
__u32 clone_blksize; /* granularity of a CLONE operation */
};
diff --git a/include/linux/nmi.h b/include/linux/nmi.h
index 4630eeae18e0..a78c35cff1ae 100644
--- a/include/linux/nmi.h
+++ b/include/linux/nmi.h
@@ -35,21 +35,34 @@ static inline void hardlockup_detector_disable(void) {}
* base function. Return whether such support was available,
* to allow calling code to fall back to some other mechanism:
*/
-#ifdef arch_trigger_all_cpu_backtrace
+#ifdef arch_trigger_cpumask_backtrace
static inline bool trigger_all_cpu_backtrace(void)
{
- arch_trigger_all_cpu_backtrace(true);
-
+ arch_trigger_cpumask_backtrace(cpu_online_mask, false);
return true;
}
+
static inline bool trigger_allbutself_cpu_backtrace(void)
{
- arch_trigger_all_cpu_backtrace(false);
+ arch_trigger_cpumask_backtrace(cpu_online_mask, true);
+ return true;
+}
+
+static inline bool trigger_cpumask_backtrace(struct cpumask *mask)
+{
+ arch_trigger_cpumask_backtrace(mask, false);
+ return true;
+}
+
+static inline bool trigger_single_cpu_backtrace(int cpu)
+{
+ arch_trigger_cpumask_backtrace(cpumask_of(cpu), false);
return true;
}
/* generic implementation */
-void nmi_trigger_all_cpu_backtrace(bool include_self,
+void nmi_trigger_cpumask_backtrace(const cpumask_t *mask,
+ bool exclude_self,
void (*raise)(cpumask_t *mask));
bool nmi_cpu_backtrace(struct pt_regs *regs);
@@ -62,6 +75,14 @@ static inline bool trigger_allbutself_cpu_backtrace(void)
{
return false;
}
+static inline bool trigger_cpumask_backtrace(struct cpumask *mask)
+{
+ return false;
+}
+static inline bool trigger_single_cpu_backtrace(int cpu)
+{
+ return false;
+}
#endif
#ifdef CONFIG_LOCKUP_DETECTOR
diff --git a/include/linux/of.h b/include/linux/of.h
index 3d9ff8e9d803..299aeb192727 100644
--- a/include/linux/of.h
+++ b/include/linux/of.h
@@ -291,20 +291,24 @@ extern int of_property_count_elems_of_size(const struct device_node *np,
extern int of_property_read_u32_index(const struct device_node *np,
const char *propname,
u32 index, u32 *out_value);
-extern int of_property_read_u8_array(const struct device_node *np,
- const char *propname, u8 *out_values, size_t sz);
-extern int of_property_read_u16_array(const struct device_node *np,
- const char *propname, u16 *out_values, size_t sz);
-extern int of_property_read_u32_array(const struct device_node *np,
- const char *propname,
- u32 *out_values,
- size_t sz);
+extern int of_property_read_variable_u8_array(const struct device_node *np,
+ const char *propname, u8 *out_values,
+ size_t sz_min, size_t sz_max);
+extern int of_property_read_variable_u16_array(const struct device_node *np,
+ const char *propname, u16 *out_values,
+ size_t sz_min, size_t sz_max);
+extern int of_property_read_variable_u32_array(const struct device_node *np,
+ const char *propname,
+ u32 *out_values,
+ size_t sz_min,
+ size_t sz_max);
extern int of_property_read_u64(const struct device_node *np,
const char *propname, u64 *out_value);
-extern int of_property_read_u64_array(const struct device_node *np,
- const char *propname,
- u64 *out_values,
- size_t sz);
+extern int of_property_read_variable_u64_array(const struct device_node *np,
+ const char *propname,
+ u64 *out_values,
+ size_t sz_min,
+ size_t sz_max);
extern int of_property_read_string(const struct device_node *np,
const char *propname,
@@ -380,6 +384,122 @@ extern int of_detach_node(struct device_node *);
#define of_match_ptr(_ptr) (_ptr)
+/**
+ * of_property_read_u8_array - Find and read an array of u8 from a property.
+ *
+ * @np: device node from which the property value is to be read.
+ * @propname: name of the property to be searched.
+ * @out_values: pointer to return value, modified only if return value is 0.
+ * @sz: number of array elements to read
+ *
+ * Search for a property in a device node and read 8-bit value(s) from
+ * it. Returns 0 on success, -EINVAL if the property does not exist,
+ * -ENODATA if property does not have a value, and -EOVERFLOW if the
+ * property data isn't large enough.
+ *
+ * dts entry of array should be like:
+ * property = /bits/ 8 <0x50 0x60 0x70>;
+ *
+ * The out_values is modified only if a valid u8 value can be decoded.
+ */
+static inline int of_property_read_u8_array(const struct device_node *np,
+ const char *propname,
+ u8 *out_values, size_t sz)
+{
+ int ret = of_property_read_variable_u8_array(np, propname, out_values,
+ sz, 0);
+ if (ret >= 0)
+ return 0;
+ else
+ return ret;
+}
+
+/**
+ * of_property_read_u16_array - Find and read an array of u16 from a property.
+ *
+ * @np: device node from which the property value is to be read.
+ * @propname: name of the property to be searched.
+ * @out_values: pointer to return value, modified only if return value is 0.
+ * @sz: number of array elements to read
+ *
+ * Search for a property in a device node and read 16-bit value(s) from
+ * it. Returns 0 on success, -EINVAL if the property does not exist,
+ * -ENODATA if property does not have a value, and -EOVERFLOW if the
+ * property data isn't large enough.
+ *
+ * dts entry of array should be like:
+ * property = /bits/ 16 <0x5000 0x6000 0x7000>;
+ *
+ * The out_values is modified only if a valid u16 value can be decoded.
+ */
+static inline int of_property_read_u16_array(const struct device_node *np,
+ const char *propname,
+ u16 *out_values, size_t sz)
+{
+ int ret = of_property_read_variable_u16_array(np, propname, out_values,
+ sz, 0);
+ if (ret >= 0)
+ return 0;
+ else
+ return ret;
+}
+
+/**
+ * of_property_read_u32_array - Find and read an array of 32 bit integers
+ * from a property.
+ *
+ * @np: device node from which the property value is to be read.
+ * @propname: name of the property to be searched.
+ * @out_values: pointer to return value, modified only if return value is 0.
+ * @sz: number of array elements to read
+ *
+ * Search for a property in a device node and read 32-bit value(s) from
+ * it. Returns 0 on success, -EINVAL if the property does not exist,
+ * -ENODATA if property does not have a value, and -EOVERFLOW if the
+ * property data isn't large enough.
+ *
+ * The out_values is modified only if a valid u32 value can be decoded.
+ */
+static inline int of_property_read_u32_array(const struct device_node *np,
+ const char *propname,
+ u32 *out_values, size_t sz)
+{
+ int ret = of_property_read_variable_u32_array(np, propname, out_values,
+ sz, 0);
+ if (ret >= 0)
+ return 0;
+ else
+ return ret;
+}
+
+/**
+ * of_property_read_u64_array - Find and read an array of 64 bit integers
+ * from a property.
+ *
+ * @np: device node from which the property value is to be read.
+ * @propname: name of the property to be searched.
+ * @out_values: pointer to return value, modified only if return value is 0.
+ * @sz: number of array elements to read
+ *
+ * Search for a property in a device node and read 64-bit value(s) from
+ * it. Returns 0 on success, -EINVAL if the property does not exist,
+ * -ENODATA if property does not have a value, and -EOVERFLOW if the
+ * property data isn't large enough.
+ *
+ * The out_values is modified only if a valid u64 value can be decoded.
+ */
+static inline int of_property_read_u64_array(const struct device_node *np,
+ const char *propname,
+ u64 *out_values, size_t sz)
+{
+ int ret = of_property_read_variable_u64_array(np, propname, out_values,
+ sz, 0);
+ if (ret >= 0)
+ return 0;
+ else
+ return ret;
+}
+
/*
* struct property *prop;
* const __be32 *p;
diff --git a/include/linux/of_fdt.h b/include/linux/of_fdt.h
index 26c3302ae58f..4341f32516d8 100644
--- a/include/linux/of_fdt.h
+++ b/include/linux/of_fdt.h
@@ -14,6 +14,7 @@
#include <linux/types.h>
#include <linux/init.h>
+#include <linux/errno.h>
/* Definitions used by the flattened device tree */
#define OF_DT_HEADER 0xd00dfeed /* marker */
@@ -66,6 +67,7 @@ extern int early_init_dt_scan_chosen(unsigned long node, const char *uname,
int depth, void *data);
extern int early_init_dt_scan_memory(unsigned long node, const char *uname,
int depth, void *data);
+extern int early_init_dt_scan_chosen_stdout(void);
extern void early_init_fdt_scan_reserved_mem(void);
extern void early_init_fdt_reserve_self(void);
extern void early_init_dt_add_memory_arch(u64 base, u64 size);
@@ -94,6 +96,7 @@ extern void early_get_first_memblock_info(void *, phys_addr_t *);
extern u64 of_flat_dt_translate_address(unsigned long node);
extern void of_fdt_limit_memory(int limit);
#else /* CONFIG_OF_FLATTREE */
+static inline int early_init_dt_scan_chosen_stdout(void) { return -ENODEV; }
static inline void early_init_fdt_scan_reserved_mem(void) {}
static inline void early_init_fdt_reserve_self(void) {}
static inline const char *of_flat_dt_get_machine_name(void) { return NULL; }
diff --git a/include/linux/of_gpio.h b/include/linux/of_gpio.h
index 092186c62ff4..3f87ea5b8bee 100644
--- a/include/linux/of_gpio.h
+++ b/include/linux/of_gpio.h
@@ -61,8 +61,6 @@ static inline int of_mm_gpiochip_add(struct device_node *np,
}
extern void of_mm_gpiochip_remove(struct of_mm_gpio_chip *mm_gc);
-extern int of_gpiochip_add(struct gpio_chip *gc);
-extern void of_gpiochip_remove(struct gpio_chip *gc);
extern int of_gpio_simple_xlate(struct gpio_chip *gc,
const struct of_phandle_args *gpiospec,
u32 *flags);
@@ -86,9 +84,6 @@ static inline int of_gpio_simple_xlate(struct gpio_chip *gc,
return -ENOSYS;
}
-static inline int of_gpiochip_add(struct gpio_chip *gc) { return 0; }
-static inline void of_gpiochip_remove(struct gpio_chip *gc) { }
-
#endif /* CONFIG_OF_GPIO */
/**
diff --git a/include/linux/of_pci.h b/include/linux/of_pci.h
index b969e9443962..7fd5cfce9140 100644
--- a/include/linux/of_pci.h
+++ b/include/linux/of_pci.h
@@ -17,6 +17,9 @@ int of_irq_parse_and_map_pci(const struct pci_dev *dev, u8 slot, u8 pin);
int of_pci_parse_bus_range(struct device_node *node, struct resource *res);
int of_get_pci_domain_nr(struct device_node *node);
void of_pci_check_probe_only(void);
+int of_pci_map_rid(struct device_node *np, u32 rid,
+ const char *map_name, const char *map_mask_name,
+ struct device_node **target, u32 *id_out);
#else
static inline int of_irq_parse_pci(const struct pci_dev *pdev, struct of_phandle_args *out_irq)
{
@@ -52,6 +55,13 @@ of_get_pci_domain_nr(struct device_node *node)
return -1;
}
+static inline int of_pci_map_rid(struct device_node *np, u32 rid,
+ const char *map_name, const char *map_mask_name,
+ struct device_node **target, u32 *id_out)
+{
+ return -EINVAL;
+}
+
static inline void of_pci_check_probe_only(void) { }
#endif
diff --git a/include/linux/omap-dma.h b/include/linux/omap-dma.h
index 1d99b61adc65..290081620b3e 100644
--- a/include/linux/omap-dma.h
+++ b/include/linux/omap-dma.h
@@ -297,6 +297,7 @@ struct omap_system_dma_plat_info {
#define dma_omap15xx() __dma_omap15xx(d)
#define dma_omap16xx() __dma_omap16xx(d)
+#if defined(CONFIG_ARCH_OMAP)
extern struct omap_system_dma_plat_info *omap_get_plat_info(void);
extern void omap_set_dma_priority(int lch, int dst_port, int priority);
@@ -355,4 +356,22 @@ static inline int omap_lcd_dma_running(void)
}
#endif
+#else /* CONFIG_ARCH_OMAP */
+
+static inline struct omap_system_dma_plat_info *omap_get_plat_info(void)
+{
+ return NULL;
+}
+
+static inline int omap_request_dma(int dev_id, const char *dev_name,
+ void (*callback)(int lch, u16 ch_status, void *data),
+ void *data, int *dma_ch)
+{
+ return -ENODEV;
+}
+
+static inline void omap_free_dma(int ch) { }
+
+#endif /* CONFIG_ARCH_OMAP */
+
#endif /* __LINUX_OMAP_DMA_H */
diff --git a/include/linux/omap-gpmc.h b/include/linux/omap-gpmc.h
index 9e9d79e8efa5..35d0fd7a4948 100644
--- a/include/linux/omap-gpmc.h
+++ b/include/linux/omap-gpmc.h
@@ -29,8 +29,8 @@ struct gpmc_nand_regs;
struct gpmc_nand_ops *gpmc_omap_get_nand_ops(struct gpmc_nand_regs *regs,
int cs);
#else
-static inline gpmc_nand_ops *gpmc_omap_get_nand_ops(struct gpmc_nand_regs *regs,
- int cs)
+static inline struct gpmc_nand_ops *gpmc_omap_get_nand_ops(struct gpmc_nand_regs *regs,
+ int cs)
{
return NULL;
}
diff --git a/include/linux/oom.h b/include/linux/oom.h
index 5bc0457ee3a8..b4e36e92bc87 100644
--- a/include/linux/oom.h
+++ b/include/linux/oom.h
@@ -34,23 +34,11 @@ struct oom_control {
* for display purposes.
*/
const int order;
-};
-
-/*
- * Types of limitations to the nodes from which allocations may occur
- */
-enum oom_constraint {
- CONSTRAINT_NONE,
- CONSTRAINT_CPUSET,
- CONSTRAINT_MEMORY_POLICY,
- CONSTRAINT_MEMCG,
-};
-enum oom_scan_t {
- OOM_SCAN_OK, /* scan thread and find its badness */
- OOM_SCAN_CONTINUE, /* do not consider thread for oom kill */
- OOM_SCAN_ABORT, /* abort the iteration and return */
- OOM_SCAN_SELECT, /* always select this thread first */
+ /* Used by oom implementation, do not set */
+ unsigned long totalpages;
+ struct task_struct *chosen;
+ unsigned long chosen_points;
};
extern struct mutex oom_lock;
@@ -70,45 +58,27 @@ static inline bool oom_task_origin(const struct task_struct *p)
return p->signal->oom_flag_origin;
}
-extern void mark_oom_victim(struct task_struct *tsk);
-
-#ifdef CONFIG_MMU
-extern void wake_oom_reaper(struct task_struct *tsk);
-#else
-static inline void wake_oom_reaper(struct task_struct *tsk)
+static inline bool tsk_is_oom_victim(struct task_struct * tsk)
{
+ return tsk->signal->oom_mm;
}
-#endif
extern unsigned long oom_badness(struct task_struct *p,
struct mem_cgroup *memcg, const nodemask_t *nodemask,
unsigned long totalpages);
-extern void oom_kill_process(struct oom_control *oc, struct task_struct *p,
- unsigned int points, unsigned long totalpages,
- const char *message);
-
-extern void check_panic_on_oom(struct oom_control *oc,
- enum oom_constraint constraint);
-
-extern enum oom_scan_t oom_scan_process_thread(struct oom_control *oc,
- struct task_struct *task);
-
extern bool out_of_memory(struct oom_control *oc);
-extern void exit_oom_victim(struct task_struct *tsk);
+extern void exit_oom_victim(void);
extern int register_oom_notifier(struct notifier_block *nb);
extern int unregister_oom_notifier(struct notifier_block *nb);
-extern bool oom_killer_disabled;
-extern bool oom_killer_disable(void);
+extern bool oom_killer_disable(signed long timeout);
extern void oom_killer_enable(void);
extern struct task_struct *find_lock_task_mm(struct task_struct *p);
-bool task_will_free_mem(struct task_struct *task);
-
/* sysctls */
extern int sysctl_oom_dump_tasks;
extern int sysctl_oom_kill_allocating_task;
diff --git a/include/linux/padata.h b/include/linux/padata.h
index 113ee626a4dc..0f9e567d5e15 100644
--- a/include/linux/padata.h
+++ b/include/linux/padata.h
@@ -151,7 +151,7 @@ struct parallel_data {
* @flags: padata flags.
*/
struct padata_instance {
- struct notifier_block cpu_notifier;
+ struct hlist_node node;
struct workqueue_struct *wq;
struct parallel_data *pd;
struct padata_cpumask cpumask;
diff --git a/include/linux/page_ext.h b/include/linux/page_ext.h
index 03f2a3e7d76d..9298c393ddaa 100644
--- a/include/linux/page_ext.h
+++ b/include/linux/page_ext.h
@@ -7,6 +7,8 @@
struct pglist_data;
struct page_ext_operations {
+ size_t offset;
+ size_t size;
bool (*need)(void);
void (*init)(void);
};
@@ -42,12 +44,6 @@ enum page_ext_flags {
*/
struct page_ext {
unsigned long flags;
-#ifdef CONFIG_PAGE_OWNER
- unsigned int order;
- gfp_t gfp_mask;
- int last_migrate_reason;
- depot_stack_handle_t handle;
-#endif
};
extern void pgdat_page_ext_init(struct pglist_data *pgdat);
diff --git a/include/linux/page_owner.h b/include/linux/page_owner.h
index 30583ab0ffb1..2be728d156b5 100644
--- a/include/linux/page_owner.h
+++ b/include/linux/page_owner.h
@@ -14,6 +14,8 @@ extern void __split_page_owner(struct page *page, unsigned int order);
extern void __copy_page_owner(struct page *oldpage, struct page *newpage);
extern void __set_page_owner_migrate_reason(struct page *page, int reason);
extern void __dump_page_owner(struct page *page);
+extern void pagetypeinfo_showmixedcount_print(struct seq_file *m,
+ pg_data_t *pgdat, struct zone *zone);
static inline void reset_page_owner(struct page *page, unsigned int order)
{
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index 66a1260b33de..dd15d39e1985 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -16,15 +16,16 @@
#include <linux/hugetlb_inline.h>
/*
- * Bits in mapping->flags. The lower __GFP_BITS_SHIFT bits are the page
- * allocation mode flags.
+ * Bits in mapping->flags.
*/
enum mapping_flags {
- AS_EIO = __GFP_BITS_SHIFT + 0, /* IO error on async write */
- AS_ENOSPC = __GFP_BITS_SHIFT + 1, /* ENOSPC on async write */
- AS_MM_ALL_LOCKS = __GFP_BITS_SHIFT + 2, /* under mm_take_all_locks() */
- AS_UNEVICTABLE = __GFP_BITS_SHIFT + 3, /* e.g., ramdisk, SHM_LOCK */
- AS_EXITING = __GFP_BITS_SHIFT + 4, /* final truncate in progress */
+ AS_EIO = 0, /* IO error on async write */
+ AS_ENOSPC = 1, /* ENOSPC on async write */
+ AS_MM_ALL_LOCKS = 2, /* under mm_take_all_locks() */
+ AS_UNEVICTABLE = 3, /* e.g., ramdisk, SHM_LOCK */
+ AS_EXITING = 4, /* final truncate in progress */
+ /* writeback related tags are not used */
+ AS_NO_WRITEBACK_TAGS = 5,
};
static inline void mapping_set_error(struct address_space *mapping, int error)
@@ -64,9 +65,19 @@ static inline int mapping_exiting(struct address_space *mapping)
return test_bit(AS_EXITING, &mapping->flags);
}
+static inline void mapping_set_no_writeback_tags(struct address_space *mapping)
+{
+ set_bit(AS_NO_WRITEBACK_TAGS, &mapping->flags);
+}
+
+static inline int mapping_use_writeback_tags(struct address_space *mapping)
+{
+ return !test_bit(AS_NO_WRITEBACK_TAGS, &mapping->flags);
+}
+
static inline gfp_t mapping_gfp_mask(struct address_space * mapping)
{
- return (__force gfp_t)mapping->flags & __GFP_BITS_MASK;
+ return mapping->gfp_mask;
}
/* Restricts the given gfp_mask to what the mapping allows. */
@@ -82,8 +93,7 @@ static inline gfp_t mapping_gfp_constraint(struct address_space *mapping,
*/
static inline void mapping_set_gfp_mask(struct address_space *m, gfp_t mask)
{
- m->flags = (m->flags & ~(__force unsigned long)__GFP_BITS_MASK) |
- (__force unsigned long)mask;
+ m->gfp_mask = mask;
}
void release_pages(struct page **pages, int nr, bool cold);
@@ -396,7 +406,7 @@ static inline loff_t page_offset(struct page *page)
static inline loff_t page_file_offset(struct page *page)
{
- return ((loff_t)page_file_index(page)) << PAGE_SHIFT;
+ return ((loff_t)page_index(page)) << PAGE_SHIFT;
}
extern pgoff_t linear_hugepage_index(struct vm_area_struct *vma,
@@ -518,109 +528,60 @@ void page_endio(struct page *page, bool is_write, int err);
extern void add_page_wait_queue(struct page *page, wait_queue_t *waiter);
/*
- * Fault one or two userspace pages into pagetables.
- * Return -EINVAL if more than two pages would be needed.
- * Return non-zero on a fault.
+ * Fault everything in given userspace address range in.
*/
static inline int fault_in_pages_writeable(char __user *uaddr, int size)
{
- int span, ret;
-
- if (unlikely(size == 0))
- return 0;
-
- span = offset_in_page(uaddr) + size;
- if (span > 2 * PAGE_SIZE)
- return -EINVAL;
- /*
- * Writing zeroes into userspace here is OK, because we know that if
- * the zero gets there, we'll be overwriting it.
- */
- ret = __put_user(0, uaddr);
- if (ret == 0 && span > PAGE_SIZE)
- ret = __put_user(0, uaddr + size - 1);
- return ret;
-}
-
-static inline int fault_in_pages_readable(const char __user *uaddr, int size)
-{
- volatile char c;
- int ret;
-
- if (unlikely(size == 0))
- return 0;
-
- ret = __get_user(c, uaddr);
- if (ret == 0) {
- const char __user *end = uaddr + size - 1;
-
- if (((unsigned long)uaddr & PAGE_MASK) !=
- ((unsigned long)end & PAGE_MASK)) {
- ret = __get_user(c, end);
- (void)c;
- }
- }
- return ret;
-}
-
-/*
- * Multipage variants of the above prefault helpers, useful if more than
- * PAGE_SIZE of data needs to be prefaulted. These are separate from the above
- * functions (which only handle up to PAGE_SIZE) to avoid clobbering the
- * filemap.c hotpaths.
- */
-static inline int fault_in_multipages_writeable(char __user *uaddr, int size)
-{
- int ret = 0;
char __user *end = uaddr + size - 1;
if (unlikely(size == 0))
- return ret;
+ return 0;
+ if (unlikely(uaddr > end))
+ return -EFAULT;
/*
* Writing zeroes into userspace here is OK, because we know that if
* the zero gets there, we'll be overwriting it.
*/
- while (uaddr <= end) {
- ret = __put_user(0, uaddr);
- if (ret != 0)
- return ret;
+ do {
+ if (unlikely(__put_user(0, uaddr) != 0))
+ return -EFAULT;
uaddr += PAGE_SIZE;
- }
+ } while (uaddr <= end);
/* Check whether the range spilled into the next page. */
if (((unsigned long)uaddr & PAGE_MASK) ==
((unsigned long)end & PAGE_MASK))
- ret = __put_user(0, end);
+ return __put_user(0, end);
- return ret;
+ return 0;
}
-static inline int fault_in_multipages_readable(const char __user *uaddr,
- int size)
+static inline int fault_in_pages_readable(const char __user *uaddr, int size)
{
volatile char c;
- int ret = 0;
const char __user *end = uaddr + size - 1;
if (unlikely(size == 0))
- return ret;
+ return 0;
+
+ if (unlikely(uaddr > end))
+ return -EFAULT;
- while (uaddr <= end) {
- ret = __get_user(c, uaddr);
- if (ret != 0)
- return ret;
+ do {
+ if (unlikely(__get_user(c, uaddr) != 0))
+ return -EFAULT;
uaddr += PAGE_SIZE;
- }
+ } while (uaddr <= end);
/* Check whether the range spilled into the next page. */
if (((unsigned long)uaddr & PAGE_MASK) ==
((unsigned long)end & PAGE_MASK)) {
- ret = __get_user(c, end);
- (void)c;
+ return __get_user(c, end);
}
- return ret;
+ (void)c;
+ return 0;
}
int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
diff --git a/include/linux/pci.h b/include/linux/pci.h
index 0ab835965669..0e49f70dbd9b 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -187,8 +187,9 @@ enum pci_irq_reroute_variant {
typedef unsigned short __bitwise pci_bus_flags_t;
enum pci_bus_flags {
- PCI_BUS_FLAGS_NO_MSI = (__force pci_bus_flags_t) 1,
- PCI_BUS_FLAGS_NO_MMRBC = (__force pci_bus_flags_t) 2,
+ PCI_BUS_FLAGS_NO_MSI = (__force pci_bus_flags_t) 1,
+ PCI_BUS_FLAGS_NO_MMRBC = (__force pci_bus_flags_t) 2,
+ PCI_BUS_FLAGS_NO_AERSID = (__force pci_bus_flags_t) 4,
};
/* These values come from the PCI Express Spec */
@@ -268,6 +269,9 @@ struct pci_dev {
unsigned int class; /* 3 bytes: (base,sub,prog-if) */
u8 revision; /* PCI revision, low byte of class word */
u8 hdr_type; /* PCI header type (`multi' flag masked out) */
+#ifdef CONFIG_PCIEAER
+ u16 aer_cap; /* AER capability offset */
+#endif
u8 pcie_cap; /* PCIe capability offset */
u8 msi_cap; /* MSI capability offset */
u8 msix_cap; /* MSI-X capability offset */
@@ -308,6 +312,9 @@ struct pci_dev {
powered on/off by the
corresponding bridge */
unsigned int ignore_hotplug:1; /* Ignore hotplug events */
+ unsigned int hotplug_user_indicators:1; /* SlotCtl indicators
+ controlled exclusively by
+ user sysfs */
unsigned int d3_delay; /* D3->D0 transition time in ms */
unsigned int d3cold_delay; /* D3cold->D0 transition time in ms */
@@ -367,6 +374,12 @@ struct pci_dev {
int rom_attr_enabled; /* has display of the rom attribute been enabled? */
struct bin_attribute *res_attr[DEVICE_COUNT_RESOURCE]; /* sysfs file for resources */
struct bin_attribute *res_attr_wc[DEVICE_COUNT_RESOURCE]; /* sysfs file for WC mapping of resources */
+
+#ifdef CONFIG_PCIE_PTM
+ unsigned int ptm_root:1;
+ unsigned int ptm_enabled:1;
+ u8 ptm_granularity;
+#endif
#ifdef CONFIG_PCI_MSI
const struct attribute_group **msi_irq_groups;
#endif
@@ -1126,6 +1139,7 @@ void pdev_enable_device(struct pci_dev *);
int pci_enable_resources(struct pci_dev *, int mask);
void pci_fixup_irqs(u8 (*)(struct pci_dev *, u8 *),
int (*)(const struct pci_dev *, u8, u8));
+struct resource *pci_find_resource(struct pci_dev *dev, struct resource *res);
#define HAVE_PCI_REQ_REGIONS 2
int __must_check pci_request_regions(struct pci_dev *, const char *);
int __must_check pci_request_regions_exclusive(struct pci_dev *, const char *);
@@ -1300,6 +1314,7 @@ int pci_alloc_irq_vectors(struct pci_dev *dev, unsigned int min_vecs,
unsigned int max_vecs, unsigned int flags);
void pci_free_irq_vectors(struct pci_dev *dev);
int pci_irq_vector(struct pci_dev *dev, unsigned int nr);
+const struct cpumask *pci_irq_get_affinity(struct pci_dev *pdev, int vec);
#else
static inline int pci_msi_vec_count(struct pci_dev *dev) { return -ENOSYS; }
@@ -1342,6 +1357,11 @@ static inline int pci_irq_vector(struct pci_dev *dev, unsigned int nr)
return -EINVAL;
return dev->irq;
}
+static inline const struct cpumask *pci_irq_get_affinity(struct pci_dev *pdev,
+ int vec)
+{
+ return cpu_possible_mask;
+}
#endif
#ifdef CONFIG_PCIEPORTBUS
@@ -1361,9 +1381,11 @@ static inline bool pcie_aspm_support_enabled(void) { return false; }
#ifdef CONFIG_PCIEAER
void pci_no_aer(void);
bool pci_aer_available(void);
+int pci_aer_init(struct pci_dev *dev);
#else
static inline void pci_no_aer(void) { }
static inline bool pci_aer_available(void) { return false; }
+static inline int pci_aer_init(struct pci_dev *d) { return -ENODEV; }
#endif
#ifdef CONFIG_PCIE_ECRC
@@ -1395,6 +1417,13 @@ static inline void pci_disable_ats(struct pci_dev *d) { }
static inline int pci_ats_queue_depth(struct pci_dev *d) { return -ENODEV; }
#endif
+#ifdef CONFIG_PCIE_PTM
+int pci_enable_ptm(struct pci_dev *dev, u8 *granularity);
+#else
+static inline int pci_enable_ptm(struct pci_dev *dev, u8 *granularity)
+{ return -EINVAL; }
+#endif
+
void pci_cfg_access_lock(struct pci_dev *dev);
bool pci_cfg_access_trylock(struct pci_dev *dev);
void pci_cfg_access_unlock(struct pci_dev *dev);
@@ -1542,6 +1571,9 @@ static inline int pci_enable_wake(struct pci_dev *dev, pci_power_t state,
int enable)
{ return 0; }
+static inline struct resource *pci_find_resource(struct pci_dev *dev,
+ struct resource *res)
+{ return NULL; }
static inline int pci_request_regions(struct pci_dev *dev, const char *res_name)
{ return -EIO; }
static inline void pci_release_regions(struct pci_dev *dev) { }
diff --git a/include/linux/percpu-rwsem.h b/include/linux/percpu-rwsem.h
index c2fa3ecb0dce..5b2e6159b744 100644
--- a/include/linux/percpu-rwsem.h
+++ b/include/linux/percpu-rwsem.h
@@ -10,32 +10,122 @@
struct percpu_rw_semaphore {
struct rcu_sync rss;
- unsigned int __percpu *fast_read_ctr;
+ unsigned int __percpu *read_count;
struct rw_semaphore rw_sem;
- atomic_t slow_read_ctr;
- wait_queue_head_t write_waitq;
+ wait_queue_head_t writer;
+ int readers_block;
};
-extern void percpu_down_read(struct percpu_rw_semaphore *);
-extern int percpu_down_read_trylock(struct percpu_rw_semaphore *);
-extern void percpu_up_read(struct percpu_rw_semaphore *);
+#define DEFINE_STATIC_PERCPU_RWSEM(name) \
+static DEFINE_PER_CPU(unsigned int, __percpu_rwsem_rc_##name); \
+static struct percpu_rw_semaphore name = { \
+ .rss = __RCU_SYNC_INITIALIZER(name.rss, RCU_SCHED_SYNC), \
+ .read_count = &__percpu_rwsem_rc_##name, \
+ .rw_sem = __RWSEM_INITIALIZER(name.rw_sem), \
+ .writer = __WAIT_QUEUE_HEAD_INITIALIZER(name.writer), \
+}
+
+extern int __percpu_down_read(struct percpu_rw_semaphore *, int);
+extern void __percpu_up_read(struct percpu_rw_semaphore *);
+
+static inline void percpu_down_read_preempt_disable(struct percpu_rw_semaphore *sem)
+{
+ might_sleep();
+
+ rwsem_acquire_read(&sem->rw_sem.dep_map, 0, 0, _RET_IP_);
+
+ preempt_disable();
+ /*
+ * We are in an RCU-sched read-side critical section, so the writer
+ * cannot both change sem->state from readers_fast and start checking
+ * counters while we are here. So if we see !sem->state, we know that
+ * the writer won't be checking until we're past the preempt_enable()
+ * and that one the synchronize_sched() is done, the writer will see
+ * anything we did within this RCU-sched read-size critical section.
+ */
+ __this_cpu_inc(*sem->read_count);
+ if (unlikely(!rcu_sync_is_idle(&sem->rss)))
+ __percpu_down_read(sem, false); /* Unconditional memory barrier */
+ barrier();
+ /*
+ * The barrier() prevents the compiler from
+ * bleeding the critical section out.
+ */
+}
+
+static inline void percpu_down_read(struct percpu_rw_semaphore *sem)
+{
+ percpu_down_read_preempt_disable(sem);
+ preempt_enable();
+}
+
+static inline int percpu_down_read_trylock(struct percpu_rw_semaphore *sem)
+{
+ int ret = 1;
+
+ preempt_disable();
+ /*
+ * Same as in percpu_down_read().
+ */
+ __this_cpu_inc(*sem->read_count);
+ if (unlikely(!rcu_sync_is_idle(&sem->rss)))
+ ret = __percpu_down_read(sem, true); /* Unconditional memory barrier */
+ preempt_enable();
+ /*
+ * The barrier() from preempt_enable() prevents the compiler from
+ * bleeding the critical section out.
+ */
+
+ if (ret)
+ rwsem_acquire_read(&sem->rw_sem.dep_map, 0, 1, _RET_IP_);
+
+ return ret;
+}
+
+static inline void percpu_up_read_preempt_enable(struct percpu_rw_semaphore *sem)
+{
+ /*
+ * The barrier() prevents the compiler from
+ * bleeding the critical section out.
+ */
+ barrier();
+ /*
+ * Same as in percpu_down_read().
+ */
+ if (likely(rcu_sync_is_idle(&sem->rss)))
+ __this_cpu_dec(*sem->read_count);
+ else
+ __percpu_up_read(sem); /* Unconditional memory barrier */
+ preempt_enable();
+
+ rwsem_release(&sem->rw_sem.dep_map, 1, _RET_IP_);
+}
+
+static inline void percpu_up_read(struct percpu_rw_semaphore *sem)
+{
+ preempt_disable();
+ percpu_up_read_preempt_enable(sem);
+}
extern void percpu_down_write(struct percpu_rw_semaphore *);
extern void percpu_up_write(struct percpu_rw_semaphore *);
extern int __percpu_init_rwsem(struct percpu_rw_semaphore *,
const char *, struct lock_class_key *);
+
extern void percpu_free_rwsem(struct percpu_rw_semaphore *);
-#define percpu_init_rwsem(brw) \
+#define percpu_init_rwsem(sem) \
({ \
static struct lock_class_key rwsem_key; \
- __percpu_init_rwsem(brw, #brw, &rwsem_key); \
+ __percpu_init_rwsem(sem, #sem, &rwsem_key); \
})
-
#define percpu_rwsem_is_held(sem) lockdep_is_held(&(sem)->rw_sem)
+#define percpu_rwsem_assert_held(sem) \
+ lockdep_assert_held(&(sem)->rw_sem)
+
static inline void percpu_rwsem_release(struct percpu_rw_semaphore *sem,
bool read, unsigned long ip)
{
diff --git a/include/linux/perf/arm_pmu.h b/include/linux/perf/arm_pmu.h
index e18843809eec..8462da266089 100644
--- a/include/linux/perf/arm_pmu.h
+++ b/include/linux/perf/arm_pmu.h
@@ -14,7 +14,7 @@
#include <linux/interrupt.h>
#include <linux/perf_event.h>
-
+#include <linux/sysfs.h>
#include <asm/cputype.h>
/*
@@ -77,6 +77,13 @@ struct pmu_hw_events {
struct arm_pmu *percpu_pmu;
};
+enum armpmu_attr_groups {
+ ARMPMU_ATTR_GROUP_COMMON,
+ ARMPMU_ATTR_GROUP_EVENTS,
+ ARMPMU_ATTR_GROUP_FORMATS,
+ ARMPMU_NR_ATTR_GROUPS
+};
+
struct arm_pmu {
struct pmu pmu;
cpumask_t active_irqs;
@@ -109,8 +116,10 @@ struct arm_pmu {
DECLARE_BITMAP(pmceid_bitmap, ARMV8_PMUV3_MAX_COMMON_EVENTS);
struct platform_device *plat_device;
struct pmu_hw_events __percpu *hw_events;
- struct list_head entry;
+ struct hlist_node node;
struct notifier_block cpu_pm_nb;
+ /* the attr_groups array must be NULL-terminated */
+ const struct attribute_group *attr_groups[ARMPMU_NR_ATTR_GROUPS + 1];
};
#define to_arm_pmu(p) (container_of(p, struct arm_pmu, pmu))
@@ -151,6 +160,8 @@ int arm_pmu_device_probe(struct platform_device *pdev,
const struct of_device_id *of_table,
const struct pmu_probe_info *probe_table);
+#define ARMV8_PMU_PDEV_NAME "armv8-pmu"
+
#endif /* CONFIG_ARM_PMU */
#endif /* __ARM_PMU_H__ */
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 2b6b43cc0dd5..060d0ede88df 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -510,9 +510,15 @@ typedef void (*perf_overflow_handler_t)(struct perf_event *,
struct perf_sample_data *,
struct pt_regs *regs);
-enum perf_group_flag {
- PERF_GROUP_SOFTWARE = 0x1,
-};
+/*
+ * Event capabilities. For event_caps and groups caps.
+ *
+ * PERF_EV_CAP_SOFTWARE: Is a software event.
+ * PERF_EV_CAP_READ_ACTIVE_PKG: A CPU event (or cgroup event) that can be read
+ * from any CPU in the package where it is active.
+ */
+#define PERF_EV_CAP_SOFTWARE BIT(0)
+#define PERF_EV_CAP_READ_ACTIVE_PKG BIT(1)
#define SWEVENT_HLIST_BITS 8
#define SWEVENT_HLIST_SIZE (1 << SWEVENT_HLIST_BITS)
@@ -568,7 +574,12 @@ struct perf_event {
struct hlist_node hlist_entry;
struct list_head active_entry;
int nr_siblings;
- int group_flags;
+
+ /* Not serialized. Only written during event initialization. */
+ int event_caps;
+ /* The cumulative AND of all event_caps for events in this group. */
+ int group_caps;
+
struct perf_event *group_leader;
struct pmu *pmu;
void *pmu_private;
@@ -679,6 +690,10 @@ struct perf_event {
u64 (*clock)(void);
perf_overflow_handler_t overflow_handler;
void *overflow_handler_context;
+#ifdef CONFIG_BPF_SYSCALL
+ perf_overflow_handler_t orig_overflow_handler;
+ struct bpf_prog *prog;
+#endif
#ifdef CONFIG_EVENT_TRACING
struct trace_event_call *tp_event;
@@ -774,6 +789,9 @@ struct perf_cpu_context {
#ifdef CONFIG_CGROUP_PERF
struct perf_cgroup *cgrp;
#endif
+
+ struct list_head sched_cb_entry;
+ int sched_cb_usage;
};
struct perf_output_handle {
@@ -788,6 +806,11 @@ struct perf_output_handle {
int page;
};
+struct bpf_perf_event_data_kern {
+ struct pt_regs *regs;
+ struct perf_sample_data *data;
+};
+
#ifdef CONFIG_CGROUP_PERF
/*
@@ -985,7 +1008,7 @@ static inline bool is_sampling_event(struct perf_event *event)
*/
static inline int is_software_event(struct perf_event *event)
{
- return event->pmu->task_ctx_nr == perf_sw_context;
+ return event->event_caps & PERF_EV_CAP_SOFTWARE;
}
extern struct static_key perf_swevent_enabled[PERF_COUNT_SW_MAX];
diff --git a/include/linux/phy.h b/include/linux/phy.h
index 2d24b283aa2d..e25f1830fbcf 100644
--- a/include/linux/phy.h
+++ b/include/linux/phy.h
@@ -80,6 +80,7 @@ typedef enum {
PHY_INTERFACE_MODE_XGMII,
PHY_INTERFACE_MODE_MOCA,
PHY_INTERFACE_MODE_QSGMII,
+ PHY_INTERFACE_MODE_TRGMII,
PHY_INTERFACE_MODE_MAX,
} phy_interface_t;
@@ -123,6 +124,8 @@ static inline const char *phy_modes(phy_interface_t interface)
return "moca";
case PHY_INTERFACE_MODE_QSGMII:
return "qsgmii";
+ case PHY_INTERFACE_MODE_TRGMII:
+ return "trgmii";
default:
return "unknown";
}
diff --git a/include/linux/phy/phy.h b/include/linux/phy/phy.h
index f08b67238b58..ee1bed7dbfc6 100644
--- a/include/linux/phy/phy.h
+++ b/include/linux/phy/phy.h
@@ -36,6 +36,7 @@ enum phy_mode {
* @power_on: powering on the phy
* @power_off: powering off the phy
* @set_mode: set the mode of the phy
+ * @reset: resetting the phy
* @owner: the module owner containing the ops
*/
struct phy_ops {
@@ -44,6 +45,7 @@ struct phy_ops {
int (*power_on)(struct phy *phy);
int (*power_off)(struct phy *phy);
int (*set_mode)(struct phy *phy, enum phy_mode mode);
+ int (*reset)(struct phy *phy);
struct module *owner;
};
@@ -136,6 +138,7 @@ int phy_exit(struct phy *phy);
int phy_power_on(struct phy *phy);
int phy_power_off(struct phy *phy);
int phy_set_mode(struct phy *phy, enum phy_mode mode);
+int phy_reset(struct phy *phy);
static inline int phy_get_bus_width(struct phy *phy)
{
return phy->attrs.bus_width;
diff --git a/include/linux/pid_namespace.h b/include/linux/pid_namespace.h
index 918b117a7cd3..34cce96741bc 100644
--- a/include/linux/pid_namespace.h
+++ b/include/linux/pid_namespace.h
@@ -40,6 +40,7 @@ struct pid_namespace {
struct fs_pin *bacct;
#endif
struct user_namespace *user_ns;
+ struct ucounts *ucounts;
struct work_struct proc_work;
kgid_t pid_gid;
int hide_pid;
diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h
index 24f5470d3944..e7497c9dde7f 100644
--- a/include/linux/pipe_fs_i.h
+++ b/include/linux/pipe_fs_i.h
@@ -66,15 +66,10 @@ struct pipe_inode_info {
*
* ->confirm()
* ->steal()
- * ...
- * ->map()
- * ...
- * ->unmap()
*
- * That is, ->map() must be called on a confirmed buffer,
- * same goes for ->steal(). See below for the meaning of each
- * operation. Also see kerneldoc in fs/pipe.c for the pipe
- * and generic variants of these hooks.
+ * That is, ->steal() must be called on a confirmed buffer.
+ * See below for the meaning of each operation. Also see kerneldoc
+ * in fs/pipe.c for the pipe and generic variants of these hooks.
*/
struct pipe_buf_operations {
/*
@@ -115,6 +110,53 @@ struct pipe_buf_operations {
void (*get)(struct pipe_inode_info *, struct pipe_buffer *);
};
+/**
+ * pipe_buf_get - get a reference to a pipe_buffer
+ * @pipe: the pipe that the buffer belongs to
+ * @buf: the buffer to get a reference to
+ */
+static inline void pipe_buf_get(struct pipe_inode_info *pipe,
+ struct pipe_buffer *buf)
+{
+ buf->ops->get(pipe, buf);
+}
+
+/**
+ * pipe_buf_release - put a reference to a pipe_buffer
+ * @pipe: the pipe that the buffer belongs to
+ * @buf: the buffer to put a reference to
+ */
+static inline void pipe_buf_release(struct pipe_inode_info *pipe,
+ struct pipe_buffer *buf)
+{
+ const struct pipe_buf_operations *ops = buf->ops;
+
+ buf->ops = NULL;
+ ops->release(pipe, buf);
+}
+
+/**
+ * pipe_buf_confirm - verify contents of the pipe buffer
+ * @pipe: the pipe that the buffer belongs to
+ * @buf: the buffer to confirm
+ */
+static inline int pipe_buf_confirm(struct pipe_inode_info *pipe,
+ struct pipe_buffer *buf)
+{
+ return buf->ops->confirm(pipe, buf);
+}
+
+/**
+ * pipe_buf_steal - attempt to take ownership of a pipe_buffer
+ * @pipe: the pipe that the buffer belongs to
+ * @buf: the buffer to attempt to steal
+ */
+static inline int pipe_buf_steal(struct pipe_inode_info *pipe,
+ struct pipe_buffer *buf)
+{
+ return buf->ops->steal(pipe, buf);
+}
+
/* Differs from PIPE_BUF in that PIPE_SIZE is the length of the actual
memory allocation, whereas PIPE_BUF makes atomicity guarantees. */
#define PIPE_SIZE PAGE_SIZE
@@ -129,7 +171,6 @@ extern unsigned long pipe_user_pages_hard;
extern unsigned long pipe_user_pages_soft;
int pipe_proc_fn(struct ctl_table *, int, void __user *, size_t *, loff_t *);
-
/* Drop the inode semaphore and wait for a pipe event, atomically */
void pipe_wait(struct pipe_inode_info *pipe);
diff --git a/include/linux/pkeys.h b/include/linux/pkeys.h
index 1d405a2b7272..a1bacf1150b2 100644
--- a/include/linux/pkeys.h
+++ b/include/linux/pkeys.h
@@ -4,11 +4,6 @@
#include <linux/mm_types.h>
#include <asm/mmu_context.h>
-#define PKEY_DISABLE_ACCESS 0x1
-#define PKEY_DISABLE_WRITE 0x2
-#define PKEY_ACCESS_MASK (PKEY_DISABLE_ACCESS |\
- PKEY_DISABLE_WRITE)
-
#ifdef CONFIG_ARCH_HAS_PKEYS
#include <asm/pkeys.h>
#else /* ! CONFIG_ARCH_HAS_PKEYS */
@@ -16,18 +11,33 @@
#define execute_only_pkey(mm) (0)
#define arch_override_mprotect_pkey(vma, prot, pkey) (0)
#define PKEY_DEDICATED_EXECUTE_ONLY 0
-#endif /* ! CONFIG_ARCH_HAS_PKEYS */
+#define ARCH_VM_PKEY_FLAGS 0
+
+static inline bool mm_pkey_is_allocated(struct mm_struct *mm, int pkey)
+{
+ return (pkey == 0);
+}
+
+static inline int mm_pkey_alloc(struct mm_struct *mm)
+{
+ return -1;
+}
-/*
- * This is called from mprotect_pkey().
- *
- * Returns true if the protection keys is valid.
- */
-static inline bool validate_pkey(int pkey)
+static inline int mm_pkey_free(struct mm_struct *mm, int pkey)
{
- if (pkey < 0)
- return false;
- return (pkey < arch_max_pkey());
+ return -EINVAL;
}
+static inline int arch_set_user_pkey_access(struct task_struct *tsk, int pkey,
+ unsigned long init_val)
+{
+ return 0;
+}
+
+static inline void copy_init_pkru_to_fpregs(void)
+{
+}
+
+#endif /* ! CONFIG_ARCH_HAS_PKEYS */
+
#endif /* _LINUX_PKEYS_H */
diff --git a/include/linux/platform_data/dma-dw.h b/include/linux/platform_data/dma-dw.h
index d15d8ba8cc24..5f0e11e7354c 100644
--- a/include/linux/platform_data/dma-dw.h
+++ b/include/linux/platform_data/dma-dw.h
@@ -23,6 +23,7 @@
* @dst_id: dst request line
* @m_master: memory master for transfers on allocated channel
* @p_master: peripheral master for transfers on allocated channel
+ * @hs_polarity:set active low polarity of handshake interface
*/
struct dw_dma_slave {
struct device *dma_dev;
@@ -30,6 +31,7 @@ struct dw_dma_slave {
u8 dst_id;
u8 m_master;
u8 p_master;
+ bool hs_polarity;
};
/**
@@ -38,6 +40,7 @@ struct dw_dma_slave {
* @is_private: The device channels should be marked as private and not for
* by the general purpose DMA channel allocator.
* @is_memcpy: The device channels do support memory-to-memory transfers.
+ * @is_nollp: The device channels does not support multi block transfers.
* @chan_allocation_order: Allocate channels starting from 0 or 7
* @chan_priority: Set channel priority increasing from 0 to 7 or 7 to 0.
* @block_size: Maximum block size supported by the controller
@@ -49,6 +52,7 @@ struct dw_dma_platform_data {
unsigned int nr_channels;
bool is_private;
bool is_memcpy;
+ bool is_nollp;
#define CHAN_ALLOCATION_ASCENDING 0 /* zero to seven */
#define CHAN_ALLOCATION_DESCENDING 1 /* seven to zero */
unsigned char chan_allocation_order;
diff --git a/include/linux/platform_data/dma-mmp_tdma.h b/include/linux/platform_data/dma-mmp_tdma.h
index 0c72886030ef..422d4504dbac 100644
--- a/include/linux/platform_data/dma-mmp_tdma.h
+++ b/include/linux/platform_data/dma-mmp_tdma.h
@@ -28,7 +28,7 @@ struct sram_platdata {
int granularity;
};
-#ifdef CONFIG_ARM
+#ifdef CONFIG_MMP_SRAM
extern struct gen_pool *sram_get_gpool(char *pool_name);
#else
static inline struct gen_pool *sram_get_gpool(char *pool_name)
diff --git a/include/linux/platform_data/dma-s3c24xx.h b/include/linux/platform_data/dma-s3c24xx.h
index 89ba1b0c90e4..4f9aba405e96 100644
--- a/include/linux/platform_data/dma-s3c24xx.h
+++ b/include/linux/platform_data/dma-s3c24xx.h
@@ -30,16 +30,22 @@ struct s3c24xx_dma_channel {
u16 chansel;
};
+struct dma_slave_map;
+
/**
* struct s3c24xx_dma_platdata - platform specific settings
* @num_phy_channels: number of physical channels
* @channels: array of virtual channel descriptions
* @num_channels: number of virtual channels
+ * @slave_map: dma slave map matching table
+ * @slavecnt: number of elements in slave_map
*/
struct s3c24xx_dma_platdata {
int num_phy_channels;
struct s3c24xx_dma_channel *channels;
int num_channels;
+ const struct dma_slave_map *slave_map;
+ int slavecnt;
};
struct dma_chan;
diff --git a/include/linux/mfd/htc-egpio.h b/include/linux/platform_data/gpio-htc-egpio.h
index b4201c971367..b4201c971367 100644
--- a/include/linux/mfd/htc-egpio.h
+++ b/include/linux/platform_data/gpio-htc-egpio.h
diff --git a/include/linux/platform_data/gpio-lpc32xx.h b/include/linux/platform_data/gpio-lpc32xx.h
deleted file mode 100644
index a544e962a818..000000000000
--- a/include/linux/platform_data/gpio-lpc32xx.h
+++ /dev/null
@@ -1,50 +0,0 @@
-/*
- * Author: Kevin Wells <kevin.wells@nxp.com>
- *
- * Copyright (C) 2010 NXP Semiconductors
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#ifndef __MACH_GPIO_LPC32XX_H
-#define __MACH_GPIO_LPC32XX_H
-
-/*
- * Note!
- * Muxed GP pins need to be setup to the GP state in the board level
- * code prior to using this driver.
- * GPI pins : 28xP3 group
- * GPO pins : 24xP3 group
- * GPIO pins: 8xP0 group, 24xP1 group, 13xP2 group, 6xP3 group
- */
-
-#define LPC32XX_GPIO_P0_MAX 8
-#define LPC32XX_GPIO_P1_MAX 24
-#define LPC32XX_GPIO_P2_MAX 13
-#define LPC32XX_GPIO_P3_MAX 6
-#define LPC32XX_GPI_P3_MAX 29
-#define LPC32XX_GPO_P3_MAX 24
-
-#define LPC32XX_GPIO_P0_GRP 0
-#define LPC32XX_GPIO_P1_GRP (LPC32XX_GPIO_P0_GRP + LPC32XX_GPIO_P0_MAX)
-#define LPC32XX_GPIO_P2_GRP (LPC32XX_GPIO_P1_GRP + LPC32XX_GPIO_P1_MAX)
-#define LPC32XX_GPIO_P3_GRP (LPC32XX_GPIO_P2_GRP + LPC32XX_GPIO_P2_MAX)
-#define LPC32XX_GPI_P3_GRP (LPC32XX_GPIO_P3_GRP + LPC32XX_GPIO_P3_MAX)
-#define LPC32XX_GPO_P3_GRP (LPC32XX_GPI_P3_GRP + LPC32XX_GPI_P3_MAX)
-
-/*
- * A specific GPIO can be selected with this macro
- * ie, GPIO_05 can be selected with LPC32XX_GPIO(LPC32XX_GPIO_P3_GRP, 5)
- * See the LPC32x0 User's guide for GPIO group numbers
- */
-#define LPC32XX_GPIO(x, y) ((x) + (y))
-
-#endif /* __MACH_GPIO_LPC32XX_H */
diff --git a/include/linux/platform_data/media/camera-pxa.h b/include/linux/platform_data/media/camera-pxa.h
index 6709b1cd7c77..ce5d90e1a6e4 100644
--- a/include/linux/platform_data/media/camera-pxa.h
+++ b/include/linux/platform_data/media/camera-pxa.h
@@ -37,6 +37,8 @@
struct pxacamera_platform_data {
unsigned long flags;
unsigned long mclk_10khz;
+ int sensor_i2c_adapter_id;
+ int sensor_i2c_address;
};
extern void pxa_set_camera_info(struct pxacamera_platform_data *);
diff --git a/include/linux/platform_data/remoteproc-omap.h b/include/linux/platform_data/remoteproc-omap.h
index bfbd12b41162..71a1b2399c48 100644
--- a/include/linux/platform_data/remoteproc-omap.h
+++ b/include/linux/platform_data/remoteproc-omap.h
@@ -39,9 +39,9 @@ struct omap_rproc_pdata {
const char *firmware;
const char *mbox_name;
const struct rproc_ops *ops;
- int (*device_enable) (struct platform_device *pdev);
- int (*device_shutdown) (struct platform_device *pdev);
- void(*set_bootaddr)(u32);
+ int (*device_enable)(struct platform_device *pdev);
+ int (*device_shutdown)(struct platform_device *pdev);
+ void (*set_bootaddr)(u32);
};
#if defined(CONFIG_OMAP_REMOTEPROC) || defined(CONFIG_OMAP_REMOTEPROC_MODULE)
diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h
index 31fec858088c..a09fe5c009c8 100644
--- a/include/linux/pm_domain.h
+++ b/include/linux/pm_domain.h
@@ -51,6 +51,8 @@ struct generic_pm_domain {
struct mutex lock;
struct dev_power_governor *gov;
struct work_struct power_off_work;
+ struct fwnode_handle *provider; /* Identity of the domain provider */
+ bool has_provider;
const char *name;
atomic_t sd_count; /* Number of subdomains with power "on" */
enum gpd_status status; /* Current state of the domain */
@@ -116,7 +118,6 @@ static inline struct generic_pm_domain_data *dev_gpd_data(struct device *dev)
return to_gpd_data(dev->power.subsys_data->domain_data);
}
-extern struct generic_pm_domain *pm_genpd_lookup_dev(struct device *dev);
extern int __pm_genpd_add_device(struct generic_pm_domain *genpd,
struct device *dev,
struct gpd_timing_data *td);
@@ -129,6 +130,7 @@ extern int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
struct generic_pm_domain *target);
extern int pm_genpd_init(struct generic_pm_domain *genpd,
struct dev_power_governor *gov, bool is_off);
+extern int pm_genpd_remove(struct generic_pm_domain *genpd);
extern struct dev_power_governor simple_qos_governor;
extern struct dev_power_governor pm_domain_always_on_gov;
@@ -138,10 +140,6 @@ static inline struct generic_pm_domain_data *dev_gpd_data(struct device *dev)
{
return ERR_PTR(-ENOSYS);
}
-static inline struct generic_pm_domain *pm_genpd_lookup_dev(struct device *dev)
-{
- return NULL;
-}
static inline int __pm_genpd_add_device(struct generic_pm_domain *genpd,
struct device *dev,
struct gpd_timing_data *td)
@@ -168,6 +166,10 @@ static inline int pm_genpd_init(struct generic_pm_domain *genpd,
{
return -ENOSYS;
}
+static inline int pm_genpd_remove(struct generic_pm_domain *genpd)
+{
+ return -ENOTSUPP;
+}
#endif
static inline int pm_genpd_add_device(struct generic_pm_domain *genpd,
@@ -192,57 +194,57 @@ struct genpd_onecell_data {
unsigned int num_domains;
};
-typedef struct generic_pm_domain *(*genpd_xlate_t)(struct of_phandle_args *args,
- void *data);
-
#ifdef CONFIG_PM_GENERIC_DOMAINS_OF
-int __of_genpd_add_provider(struct device_node *np, genpd_xlate_t xlate,
- void *data);
+int of_genpd_add_provider_simple(struct device_node *np,
+ struct generic_pm_domain *genpd);
+int of_genpd_add_provider_onecell(struct device_node *np,
+ struct genpd_onecell_data *data);
void of_genpd_del_provider(struct device_node *np);
-struct generic_pm_domain *of_genpd_get_from_provider(
- struct of_phandle_args *genpdspec);
-
-struct generic_pm_domain *__of_genpd_xlate_simple(
- struct of_phandle_args *genpdspec,
- void *data);
-struct generic_pm_domain *__of_genpd_xlate_onecell(
- struct of_phandle_args *genpdspec,
- void *data);
+extern int of_genpd_add_device(struct of_phandle_args *args,
+ struct device *dev);
+extern int of_genpd_add_subdomain(struct of_phandle_args *parent,
+ struct of_phandle_args *new_subdomain);
+extern struct generic_pm_domain *of_genpd_remove_last(struct device_node *np);
int genpd_dev_pm_attach(struct device *dev);
#else /* !CONFIG_PM_GENERIC_DOMAINS_OF */
-static inline int __of_genpd_add_provider(struct device_node *np,
- genpd_xlate_t xlate, void *data)
+static inline int of_genpd_add_provider_simple(struct device_node *np,
+ struct generic_pm_domain *genpd)
{
- return 0;
+ return -ENOTSUPP;
}
-static inline void of_genpd_del_provider(struct device_node *np) {}
-static inline struct generic_pm_domain *of_genpd_get_from_provider(
- struct of_phandle_args *genpdspec)
+static inline int of_genpd_add_provider_onecell(struct device_node *np,
+ struct genpd_onecell_data *data)
{
- return NULL;
+ return -ENOTSUPP;
}
-#define __of_genpd_xlate_simple NULL
-#define __of_genpd_xlate_onecell NULL
+static inline void of_genpd_del_provider(struct device_node *np) {}
-static inline int genpd_dev_pm_attach(struct device *dev)
+static inline int of_genpd_add_device(struct of_phandle_args *args,
+ struct device *dev)
{
return -ENODEV;
}
-#endif /* CONFIG_PM_GENERIC_DOMAINS_OF */
-static inline int of_genpd_add_provider_simple(struct device_node *np,
- struct generic_pm_domain *genpd)
+static inline int of_genpd_add_subdomain(struct of_phandle_args *parent,
+ struct of_phandle_args *new_subdomain)
{
- return __of_genpd_add_provider(np, __of_genpd_xlate_simple, genpd);
+ return -ENODEV;
}
-static inline int of_genpd_add_provider_onecell(struct device_node *np,
- struct genpd_onecell_data *data)
+
+static inline int genpd_dev_pm_attach(struct device *dev)
+{
+ return -ENODEV;
+}
+
+static inline
+struct generic_pm_domain *of_genpd_remove_last(struct device_node *np)
{
- return __of_genpd_add_provider(np, __of_genpd_xlate_onecell, data);
+ return ERR_PTR(-ENOTSUPP);
}
+#endif /* CONFIG_PM_GENERIC_DOMAINS_OF */
#ifdef CONFIG_PM
extern int dev_pm_domain_attach(struct device *dev, bool power_on);
diff --git a/include/linux/posix_acl.h b/include/linux/posix_acl.h
index d5d3d741f028..5a9a739acdd5 100644
--- a/include/linux/posix_acl.h
+++ b/include/linux/posix_acl.h
@@ -11,27 +11,7 @@
#include <linux/bug.h>
#include <linux/slab.h>
#include <linux/rcupdate.h>
-
-#define ACL_UNDEFINED_ID (-1)
-
-/* a_type field in acl_user_posix_entry_t */
-#define ACL_TYPE_ACCESS (0x8000)
-#define ACL_TYPE_DEFAULT (0x4000)
-
-/* e_tag entry in struct posix_acl_entry */
-#define ACL_USER_OBJ (0x01)
-#define ACL_USER (0x02)
-#define ACL_GROUP_OBJ (0x04)
-#define ACL_GROUP (0x08)
-#define ACL_MASK (0x10)
-#define ACL_OTHER (0x20)
-
-/* permissions in the e_perm field */
-#define ACL_READ (0x04)
-#define ACL_WRITE (0x02)
-#define ACL_EXECUTE (0x01)
-//#define ACL_ADD (0x08)
-//#define ACL_DELETE (0x10)
+#include <uapi/linux/posix_acl.h>
struct posix_acl_entry {
short e_tag;
@@ -93,6 +73,7 @@ extern int set_posix_acl(struct inode *, int, struct posix_acl *);
extern int posix_acl_chmod(struct inode *, umode_t);
extern int posix_acl_create(struct inode *, umode_t *, struct posix_acl **,
struct posix_acl **);
+extern int posix_acl_update_mode(struct inode *, umode_t *, struct posix_acl **);
extern int simple_set_acl(struct inode *, struct posix_acl *, int);
extern int simple_acl_create(struct inode *, struct inode *);
diff --git a/include/linux/posix_acl_xattr.h b/include/linux/posix_acl_xattr.h
index e5e8ec40278d..8b867e3bf3aa 100644
--- a/include/linux/posix_acl_xattr.h
+++ b/include/linux/posix_acl_xattr.h
@@ -10,42 +10,25 @@
#define _POSIX_ACL_XATTR_H
#include <uapi/linux/xattr.h>
+#include <uapi/linux/posix_acl_xattr.h>
#include <linux/posix_acl.h>
-/* Supported ACL a_version fields */
-#define POSIX_ACL_XATTR_VERSION 0x0002
-
-/* An undefined entry e_id value */
-#define ACL_UNDEFINED_ID (-1)
-
-typedef struct {
- __le16 e_tag;
- __le16 e_perm;
- __le32 e_id;
-} posix_acl_xattr_entry;
-
-typedef struct {
- __le32 a_version;
- posix_acl_xattr_entry a_entries[0];
-} posix_acl_xattr_header;
-
-
static inline size_t
posix_acl_xattr_size(int count)
{
- return (sizeof(posix_acl_xattr_header) +
- (count * sizeof(posix_acl_xattr_entry)));
+ return (sizeof(struct posix_acl_xattr_header) +
+ (count * sizeof(struct posix_acl_xattr_entry)));
}
static inline int
posix_acl_xattr_count(size_t size)
{
- if (size < sizeof(posix_acl_xattr_header))
+ if (size < sizeof(struct posix_acl_xattr_header))
return -1;
- size -= sizeof(posix_acl_xattr_header);
- if (size % sizeof(posix_acl_xattr_entry))
+ size -= sizeof(struct posix_acl_xattr_header);
+ if (size % sizeof(struct posix_acl_xattr_entry))
return -1;
- return size / sizeof(posix_acl_xattr_entry);
+ return size / sizeof(struct posix_acl_xattr_entry);
}
#ifdef CONFIG_FS_POSIX_ACL
diff --git a/include/linux/power/bq24735-charger.h b/include/linux/power/bq24735-charger.h
index 6b750c1a45fa..b04be59f914c 100644
--- a/include/linux/power/bq24735-charger.h
+++ b/include/linux/power/bq24735-charger.h
@@ -28,10 +28,6 @@ struct bq24735_platform {
const char *name;
- int status_gpio;
- int status_gpio_active_low;
- bool status_gpio_valid;
-
bool ext_control;
char **supplied_to;
diff --git a/include/linux/power/bq27xxx_battery.h b/include/linux/power/bq27xxx_battery.h
index b50c0492629d..e30deb046156 100644
--- a/include/linux/power/bq27xxx_battery.h
+++ b/include/linux/power/bq27xxx_battery.h
@@ -58,6 +58,7 @@ struct bq27xxx_device_info {
unsigned long last_update;
struct delayed_work work;
struct power_supply *bat;
+ struct list_head list;
struct mutex lock;
u8 *regs;
};
diff --git a/include/linux/power/sbs-battery.h b/include/linux/power/sbs-battery.h
index 2b0a9d9ff57e..519b8b43239a 100644
--- a/include/linux/power/sbs-battery.h
+++ b/include/linux/power/sbs-battery.h
@@ -26,17 +26,13 @@
/**
* struct sbs_platform_data - platform data for sbs devices
- * @battery_detect: GPIO which is used to detect battery presence
- * @battery_detect_present: gpio state when battery is present (0 / 1)
* @i2c_retry_count: # of times to retry on i2c IO failure
* @poll_retry_count: # of times to retry looking for new status after
* external change notification
*/
struct sbs_platform_data {
- int battery_detect;
- int battery_detect_present;
- int i2c_retry_count;
- int poll_retry_count;
+ u32 i2c_retry_count;
+ u32 poll_retry_count;
};
#endif
diff --git a/include/linux/printk.h b/include/linux/printk.h
index 696a56be7d3e..eac1af8502bb 100644
--- a/include/linux/printk.h
+++ b/include/linux/printk.h
@@ -16,6 +16,7 @@ static inline int printk_get_level(const char *buffer)
switch (buffer[1]) {
case '0' ... '7':
case 'd': /* KERN_DEFAULT */
+ case 'c': /* KERN_CONT */
return buffer[1];
}
}
diff --git a/include/linux/proc_ns.h b/include/linux/proc_ns.h
index de0e7719d4c5..12cb8bd81d2d 100644
--- a/include/linux/proc_ns.h
+++ b/include/linux/proc_ns.h
@@ -18,6 +18,8 @@ struct proc_ns_operations {
struct ns_common *(*get)(struct task_struct *task);
void (*put)(struct ns_common *ns);
int (*install)(struct nsproxy *nsproxy, struct ns_common *ns);
+ struct user_namespace *(*owner)(struct ns_common *ns);
+ struct ns_common *(*get_parent)(struct ns_common *ns);
};
extern const struct proc_ns_operations netns_operations;
diff --git a/include/linux/property.h b/include/linux/property.h
index 3a2f9ae25c86..856e50b2140c 100644
--- a/include/linux/property.h
+++ b/include/linux/property.h
@@ -190,7 +190,7 @@ struct property_entry {
.length = ARRAY_SIZE(_val_) * sizeof(_type_), \
.is_array = true, \
.is_string = false, \
- { .pointer = { _type_##_data = _val_ } }, \
+ { .pointer = { ._type_##_data = _val_ } }, \
}
#define PROPERTY_ENTRY_U8_ARRAY(_name_, _val_) \
diff --git a/include/linux/pstore.h b/include/linux/pstore.h
index 899e95e84400..92013cc9cc8c 100644
--- a/include/linux/pstore.h
+++ b/include/linux/pstore.h
@@ -22,12 +22,13 @@
#ifndef _LINUX_PSTORE_H
#define _LINUX_PSTORE_H
-#include <linux/time.h>
+#include <linux/compiler.h>
+#include <linux/errno.h>
#include <linux/kmsg_dump.h>
#include <linux/mutex.h>
-#include <linux/types.h>
#include <linux/spinlock.h>
-#include <linux/errno.h>
+#include <linux/time.h>
+#include <linux/types.h>
/* types */
enum pstore_type_id {
@@ -68,13 +69,21 @@ struct pstore_info {
enum kmsg_dump_reason reason, u64 *id,
unsigned int part, const char *buf, bool compressed,
size_t size, struct pstore_info *psi);
+ int (*write_buf_user)(enum pstore_type_id type,
+ enum kmsg_dump_reason reason, u64 *id,
+ unsigned int part, const char __user *buf,
+ bool compressed, size_t size, struct pstore_info *psi);
int (*erase)(enum pstore_type_id type, u64 id,
int count, struct timespec time,
struct pstore_info *psi);
void *data;
};
-#define PSTORE_FLAGS_FRAGILE 1
+#define PSTORE_FLAGS_DMESG (1 << 0)
+#define PSTORE_FLAGS_FRAGILE PSTORE_FLAGS_DMESG
+#define PSTORE_FLAGS_CONSOLE (1 << 1)
+#define PSTORE_FLAGS_FTRACE (1 << 2)
+#define PSTORE_FLAGS_PMSG (1 << 3)
extern int pstore_register(struct pstore_info *);
extern void pstore_unregister(struct pstore_info *);
diff --git a/include/linux/pstore_ram.h b/include/linux/pstore_ram.h
index 4660aaa3195e..c668c861c96c 100644
--- a/include/linux/pstore_ram.h
+++ b/include/linux/pstore_ram.h
@@ -17,11 +17,12 @@
#ifndef __LINUX_PSTORE_RAM_H__
#define __LINUX_PSTORE_RAM_H__
+#include <linux/compiler.h>
#include <linux/device.h>
+#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/types.h>
-#include <linux/init.h>
struct persistent_ram_buffer;
struct rs_control;
@@ -59,7 +60,9 @@ void persistent_ram_free(struct persistent_ram_zone *prz);
void persistent_ram_zap(struct persistent_ram_zone *prz);
int persistent_ram_write(struct persistent_ram_zone *prz, const void *s,
- unsigned int count);
+ unsigned int count);
+int persistent_ram_write_user(struct persistent_ram_zone *prz,
+ const void __user *s, unsigned int count);
void persistent_ram_save_old(struct persistent_ram_zone *prz);
size_t persistent_ram_old_size(struct persistent_ram_zone *prz);
diff --git a/include/linux/ptp_clock_kernel.h b/include/linux/ptp_clock_kernel.h
index 6b15e168148a..5ad54fc66cf0 100644
--- a/include/linux/ptp_clock_kernel.h
+++ b/include/linux/ptp_clock_kernel.h
@@ -127,6 +127,11 @@ struct ptp_clock;
*
* @info: Structure describing the new clock.
* @parent: Pointer to the parent device of the new clock.
+ *
+ * Returns a valid pointer on success or PTR_ERR on failure. If PHC
+ * support is missing at the configuration level, this function
+ * returns NULL, and drivers are expected to gracefully handle that
+ * case separately.
*/
extern struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info,
diff --git a/include/linux/pwm.h b/include/linux/pwm.h
index f1bbae014889..2c6c5114c089 100644
--- a/include/linux/pwm.h
+++ b/include/linux/pwm.h
@@ -641,6 +641,7 @@ static inline void pwm_remove_table(struct pwm_lookup *table, size_t num)
#ifdef CONFIG_PWM_SYSFS
void pwmchip_sysfs_export(struct pwm_chip *chip);
void pwmchip_sysfs_unexport(struct pwm_chip *chip);
+void pwmchip_sysfs_unexport_children(struct pwm_chip *chip);
#else
static inline void pwmchip_sysfs_export(struct pwm_chip *chip)
{
@@ -649,6 +650,10 @@ static inline void pwmchip_sysfs_export(struct pwm_chip *chip)
static inline void pwmchip_sysfs_unexport(struct pwm_chip *chip)
{
}
+
+static inline void pwmchip_sysfs_unexport_children(struct pwm_chip *chip)
+{
+}
#endif /* CONFIG_PWM_SYSFS */
#endif /* __LINUX_PWM_H */
diff --git a/include/linux/pxa2xx_ssp.h b/include/linux/pxa2xx_ssp.h
index 2a097d176ba9..2d6f0c39ed68 100644
--- a/include/linux/pxa2xx_ssp.h
+++ b/include/linux/pxa2xx_ssp.h
@@ -83,7 +83,6 @@
#define SSSR_RFS (1 << 6) /* Receive FIFO Service Request */
#define SSSR_ROR (1 << 7) /* Receive FIFO Overrun */
-#ifdef CONFIG_ARCH_PXA
#define RX_THRESH_DFLT 8
#define TX_THRESH_DFLT 8
@@ -95,19 +94,16 @@
#define SSCR1_RFT (0x00003c00) /* Receive FIFO Threshold (mask) */
#define SSCR1_RxTresh(x) (((x) - 1) << 10) /* level [1..16] */
-#else
-
-#define RX_THRESH_DFLT 2
-#define TX_THRESH_DFLT 2
+#define RX_THRESH_CE4100_DFLT 2
+#define TX_THRESH_CE4100_DFLT 2
-#define SSSR_TFL_MASK (0x3 << 8) /* Transmit FIFO Level mask */
-#define SSSR_RFL_MASK (0x3 << 12) /* Receive FIFO Level mask */
+#define CE4100_SSSR_TFL_MASK (0x3 << 8) /* Transmit FIFO Level mask */
+#define CE4100_SSSR_RFL_MASK (0x3 << 12) /* Receive FIFO Level mask */
-#define SSCR1_TFT (0x000000c0) /* Transmit FIFO Threshold (mask) */
-#define SSCR1_TxTresh(x) (((x) - 1) << 6) /* level [1..4] */
-#define SSCR1_RFT (0x00000c00) /* Receive FIFO Threshold (mask) */
-#define SSCR1_RxTresh(x) (((x) - 1) << 10) /* level [1..4] */
-#endif
+#define CE4100_SSCR1_TFT (0x000000c0) /* Transmit FIFO Threshold (mask) */
+#define CE4100_SSCR1_TxTresh(x) (((x) - 1) << 6) /* level [1..4] */
+#define CE4100_SSCR1_RFT (0x00000c00) /* Receive FIFO Threshold (mask) */
+#define CE4100_SSCR1_RxTresh(x) (((x) - 1) << 10) /* level [1..4] */
/* QUARK_X1000 SSCR0 bit definition */
#define QUARK_X1000_SSCR0_DSS (0x1F) /* Data Size Select (mask) */
diff --git a/include/linux/qed/common_hsi.h b/include/linux/qed/common_hsi.h
index 40c0ada01806..734deb094618 100644
--- a/include/linux/qed/common_hsi.h
+++ b/include/linux/qed/common_hsi.h
@@ -5,28 +5,77 @@
* (GPL) Version 2, available from the file COPYING in the main directory of
* this source tree.
*/
+#ifndef _COMMON_HSI_H
+#define _COMMON_HSI_H
+#include <linux/types.h>
+#include <asm/byteorder.h>
+#include <linux/bitops.h>
+#include <linux/slab.h>
+
+/* dma_addr_t manip */
+#define DMA_LO_LE(x) cpu_to_le32(lower_32_bits(x))
+#define DMA_HI_LE(x) cpu_to_le32(upper_32_bits(x))
+#define DMA_REGPAIR_LE(x, val) do { \
+ (x).hi = DMA_HI_LE((val)); \
+ (x).lo = DMA_LO_LE((val)); \
+ } while (0)
+
+#define HILO_GEN(hi, lo, type) ((((type)(hi)) << 32) + (lo))
+#define HILO_64(hi, lo) HILO_GEN((le32_to_cpu(hi)), (le32_to_cpu(lo)), u64)
+#define HILO_64_REGPAIR(regpair) (HILO_64(regpair.hi, regpair.lo))
+#define HILO_DMA_REGPAIR(regpair) ((dma_addr_t)HILO_64_REGPAIR(regpair))
#ifndef __COMMON_HSI__
#define __COMMON_HSI__
-#define CORE_SPQE_PAGE_SIZE_BYTES 4096
#define X_FINAL_CLEANUP_AGG_INT 1
+
+#define EVENT_RING_PAGE_SIZE_BYTES 4096
+
#define NUM_OF_GLOBAL_QUEUES 128
+#define COMMON_QUEUE_ENTRY_MAX_BYTE_SIZE 64
+
+#define ISCSI_CDU_TASK_SEG_TYPE 0
+#define RDMA_CDU_TASK_SEG_TYPE 1
+
+#define FW_ASSERT_GENERAL_ATTN_IDX 32
+
+#define MAX_PINNED_CCFC 32
/* Queue Zone sizes in bytes */
#define TSTORM_QZONE_SIZE 8
-#define MSTORM_QZONE_SIZE 0
+#define MSTORM_QZONE_SIZE 16
#define USTORM_QZONE_SIZE 8
#define XSTORM_QZONE_SIZE 8
#define YSTORM_QZONE_SIZE 0
#define PSTORM_QZONE_SIZE 0
-#define ETH_MAX_NUM_RX_QUEUES_PER_VF 16
+#define MSTORM_VF_ZONE_DEFAULT_SIZE_LOG 7
+#define ETH_MAX_NUM_RX_QUEUES_PER_VF_DEFAULT 16
+#define ETH_MAX_NUM_RX_QUEUES_PER_VF_DOUBLE 48
+#define ETH_MAX_NUM_RX_QUEUES_PER_VF_QUAD 112
+
+/********************************/
+/* CORE (LIGHT L2) FW CONSTANTS */
+/********************************/
+
+#define CORE_LL2_MAX_RAMROD_PER_CON 8
+#define CORE_LL2_TX_BD_PAGE_SIZE_BYTES 4096
+#define CORE_LL2_RX_BD_PAGE_SIZE_BYTES 4096
+#define CORE_LL2_RX_CQE_PAGE_SIZE_BYTES 4096
+#define CORE_LL2_RX_NUM_NEXT_PAGE_BDS 1
+
+#define CORE_LL2_TX_MAX_BDS_PER_PACKET 12
+
+#define CORE_SPQE_PAGE_SIZE_BYTES 4096
+
+#define MAX_NUM_LL2_RX_QUEUES 32
+#define MAX_NUM_LL2_TX_STATS_COUNTERS 32
#define FW_MAJOR_VERSION 8
#define FW_MINOR_VERSION 10
-#define FW_REVISION_VERSION 5
+#define FW_REVISION_VERSION 10
#define FW_ENGINEERING_VERSION 0
/***********************/
@@ -83,6 +132,20 @@
#define NUM_OF_LCIDS (320)
#define NUM_OF_LTIDS (320)
+/* Clock values */
+#define MASTER_CLK_FREQ_E4 (375e6)
+#define STORM_CLK_FREQ_E4 (1000e6)
+#define CLK25M_CLK_FREQ_E4 (25e6)
+
+/* Global PXP windows (GTT) */
+#define NUM_OF_GTT 19
+#define GTT_DWORD_SIZE_BITS 10
+#define GTT_BYTE_SIZE_BITS (GTT_DWORD_SIZE_BITS + 2)
+#define GTT_DWORD_SIZE BIT(GTT_DWORD_SIZE_BITS)
+
+/* Tools Version */
+#define TOOLS_VERSION 10
+
/*****************/
/* CDU CONSTANTS */
/*****************/
@@ -90,6 +153,8 @@
#define CDU_SEG_TYPE_OFFSET_REG_TYPE_SHIFT (17)
#define CDU_SEG_TYPE_OFFSET_REG_OFFSET_MASK (0x1ffff)
+#define CDU_VF_FL_SEG_TYPE_OFFSET_REG_TYPE_SHIFT (12)
+#define CDU_VF_FL_SEG_TYPE_OFFSET_REG_OFFSET_MASK (0xfff)
/*****************/
/* DQ CONSTANTS */
/*****************/
@@ -115,6 +180,11 @@
#define DQ_XCM_ETH_TX_BD_CONS_CMD DQ_XCM_AGG_VAL_SEL_WORD3
#define DQ_XCM_ETH_TX_BD_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4
#define DQ_XCM_ETH_GO_TO_BD_CONS_CMD DQ_XCM_AGG_VAL_SEL_WORD5
+#define DQ_XCM_ISCSI_SQ_CONS_CMD DQ_XCM_AGG_VAL_SEL_WORD3
+#define DQ_XCM_ISCSI_SQ_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4
+#define DQ_XCM_ISCSI_MORE_TO_SEND_SEQ_CMD DQ_XCM_AGG_VAL_SEL_REG3
+#define DQ_XCM_ISCSI_EXP_STAT_SN_CMD DQ_XCM_AGG_VAL_SEL_REG6
+#define DQ_XCM_ROCE_SQ_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4
/* UCM agg val selection (HW) */
#define DQ_UCM_AGG_VAL_SEL_WORD0 0
@@ -159,13 +229,16 @@
#define DQ_XCM_AGG_FLG_SHIFT_CF23 7
/* XCM agg counter flag selection */
-#define DQ_XCM_CORE_DQ_CF_CMD (1 << DQ_XCM_AGG_FLG_SHIFT_CF18)
-#define DQ_XCM_CORE_TERMINATE_CMD (1 << DQ_XCM_AGG_FLG_SHIFT_CF19)
-#define DQ_XCM_CORE_SLOW_PATH_CMD (1 << DQ_XCM_AGG_FLG_SHIFT_CF22)
-#define DQ_XCM_ETH_DQ_CF_CMD (1 << DQ_XCM_AGG_FLG_SHIFT_CF18)
-#define DQ_XCM_ETH_TERMINATE_CMD (1 << DQ_XCM_AGG_FLG_SHIFT_CF19)
-#define DQ_XCM_ETH_SLOW_PATH_CMD (1 << DQ_XCM_AGG_FLG_SHIFT_CF22)
-#define DQ_XCM_ETH_TPH_EN_CMD (1 << DQ_XCM_AGG_FLG_SHIFT_CF23)
+#define DQ_XCM_CORE_DQ_CF_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF18)
+#define DQ_XCM_CORE_TERMINATE_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF19)
+#define DQ_XCM_CORE_SLOW_PATH_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF22)
+#define DQ_XCM_ETH_DQ_CF_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF18)
+#define DQ_XCM_ETH_TERMINATE_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF19)
+#define DQ_XCM_ETH_SLOW_PATH_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF22)
+#define DQ_XCM_ETH_TPH_EN_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF23)
+#define DQ_XCM_ISCSI_DQ_FLUSH_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF19)
+#define DQ_XCM_ISCSI_SLOW_PATH_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF22)
+#define DQ_XCM_ISCSI_PROC_ONLY_CLEANUP_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF23)
/* UCM agg counter flag selection (HW) */
#define DQ_UCM_AGG_FLG_SHIFT_CF0 0
@@ -178,9 +251,45 @@
#define DQ_UCM_AGG_FLG_SHIFT_RULE1EN 7
/* UCM agg counter flag selection (FW) */
-#define DQ_UCM_ETH_PMD_TX_ARM_CMD (1 << DQ_UCM_AGG_FLG_SHIFT_CF4)
-#define DQ_UCM_ETH_PMD_RX_ARM_CMD (1 << DQ_UCM_AGG_FLG_SHIFT_CF5)
-
+#define DQ_UCM_ETH_PMD_TX_ARM_CMD BIT(DQ_UCM_AGG_FLG_SHIFT_CF4)
+#define DQ_UCM_ETH_PMD_RX_ARM_CMD BIT(DQ_UCM_AGG_FLG_SHIFT_CF5)
+#define DQ_UCM_ROCE_CQ_ARM_SE_CF_CMD BIT(DQ_UCM_AGG_FLG_SHIFT_CF4)
+#define DQ_UCM_ROCE_CQ_ARM_CF_CMD BIT(DQ_UCM_AGG_FLG_SHIFT_CF5)
+
+/* TCM agg counter flag selection (HW) */
+#define DQ_TCM_AGG_FLG_SHIFT_CF0 0
+#define DQ_TCM_AGG_FLG_SHIFT_CF1 1
+#define DQ_TCM_AGG_FLG_SHIFT_CF2 2
+#define DQ_TCM_AGG_FLG_SHIFT_CF3 3
+#define DQ_TCM_AGG_FLG_SHIFT_CF4 4
+#define DQ_TCM_AGG_FLG_SHIFT_CF5 5
+#define DQ_TCM_AGG_FLG_SHIFT_CF6 6
+#define DQ_TCM_AGG_FLG_SHIFT_CF7 7
+/* TCM agg counter flag selection (FW) */
+#define DQ_TCM_ISCSI_FLUSH_Q0_CMD BIT(DQ_TCM_AGG_FLG_SHIFT_CF1)
+#define DQ_TCM_ISCSI_TIMER_STOP_ALL_CMD BIT(DQ_TCM_AGG_FLG_SHIFT_CF3)
+
+/* PWM address mapping */
+#define DQ_PWM_OFFSET_DPM_BASE 0x0
+#define DQ_PWM_OFFSET_DPM_END 0x27
+#define DQ_PWM_OFFSET_XCM16_BASE 0x40
+#define DQ_PWM_OFFSET_XCM32_BASE 0x44
+#define DQ_PWM_OFFSET_UCM16_BASE 0x48
+#define DQ_PWM_OFFSET_UCM32_BASE 0x4C
+#define DQ_PWM_OFFSET_UCM16_4 0x50
+#define DQ_PWM_OFFSET_TCM16_BASE 0x58
+#define DQ_PWM_OFFSET_TCM32_BASE 0x5C
+#define DQ_PWM_OFFSET_XCM_FLAGS 0x68
+#define DQ_PWM_OFFSET_UCM_FLAGS 0x69
+#define DQ_PWM_OFFSET_TCM_FLAGS 0x6B
+
+#define DQ_PWM_OFFSET_XCM_RDMA_SQ_PROD (DQ_PWM_OFFSET_XCM16_BASE + 2)
+#define DQ_PWM_OFFSET_UCM_RDMA_CQ_CONS_32BIT (DQ_PWM_OFFSET_UCM32_BASE)
+#define DQ_PWM_OFFSET_UCM_RDMA_CQ_CONS_16BIT (DQ_PWM_OFFSET_UCM16_4)
+#define DQ_PWM_OFFSET_UCM_RDMA_INT_TIMEOUT (DQ_PWM_OFFSET_UCM16_BASE + 2)
+#define DQ_PWM_OFFSET_UCM_RDMA_ARM_FLAGS (DQ_PWM_OFFSET_UCM_FLAGS)
+#define DQ_PWM_OFFSET_TCM_ROCE_RQ_PROD (DQ_PWM_OFFSET_TCM16_BASE + 1)
+#define DQ_PWM_OFFSET_TCM_IWARP_RQ_PROD (DQ_PWM_OFFSET_TCM16_BASE + 3)
#define DQ_REGION_SHIFT (12)
/* DPM */
@@ -214,15 +323,17 @@
*/
#define CM_TX_PQ_BASE 0x200
+/* number of global Vport/QCN rate limiters */
+#define MAX_QM_GLOBAL_RLS 256
/* QM registers data */
#define QM_LINE_CRD_REG_WIDTH 16
-#define QM_LINE_CRD_REG_SIGN_BIT (1 << (QM_LINE_CRD_REG_WIDTH - 1))
+#define QM_LINE_CRD_REG_SIGN_BIT BIT((QM_LINE_CRD_REG_WIDTH - 1))
#define QM_BYTE_CRD_REG_WIDTH 24
-#define QM_BYTE_CRD_REG_SIGN_BIT (1 << (QM_BYTE_CRD_REG_WIDTH - 1))
+#define QM_BYTE_CRD_REG_SIGN_BIT BIT((QM_BYTE_CRD_REG_WIDTH - 1))
#define QM_WFQ_CRD_REG_WIDTH 32
-#define QM_WFQ_CRD_REG_SIGN_BIT (1 << (QM_WFQ_CRD_REG_WIDTH - 1))
+#define QM_WFQ_CRD_REG_SIGN_BIT BIT((QM_WFQ_CRD_REG_WIDTH - 1))
#define QM_RL_CRD_REG_WIDTH 32
-#define QM_RL_CRD_REG_SIGN_BIT (1 << (QM_RL_CRD_REG_WIDTH - 1))
+#define QM_RL_CRD_REG_SIGN_BIT BIT((QM_RL_CRD_REG_WIDTH - 1))
/*****************/
/* CAU CONSTANTS */
@@ -287,6 +398,17 @@
/* PXP CONSTANTS */
/*****************/
+/* Bars for Blocks */
+#define PXP_BAR_GRC 0
+#define PXP_BAR_TSDM 0
+#define PXP_BAR_USDM 0
+#define PXP_BAR_XSDM 0
+#define PXP_BAR_MSDM 0
+#define PXP_BAR_YSDM 0
+#define PXP_BAR_PSDM 0
+#define PXP_BAR_IGU 0
+#define PXP_BAR_DQ 1
+
/* PTT and GTT */
#define PXP_NUM_PF_WINDOWS 12
#define PXP_PER_PF_ENTRY_SIZE 8
@@ -334,6 +456,52 @@
(PXP_EXTERNAL_BAR_GLOBAL_WINDOW_START + \
PXP_EXTERNAL_BAR_GLOBAL_WINDOW_LENGTH - 1)
+/* PF BAR */
+#define PXP_BAR0_START_GRC 0x0000
+#define PXP_BAR0_GRC_LENGTH 0x1C00000
+#define PXP_BAR0_END_GRC (PXP_BAR0_START_GRC + \
+ PXP_BAR0_GRC_LENGTH - 1)
+
+#define PXP_BAR0_START_IGU 0x1C00000
+#define PXP_BAR0_IGU_LENGTH 0x10000
+#define PXP_BAR0_END_IGU (PXP_BAR0_START_IGU + \
+ PXP_BAR0_IGU_LENGTH - 1)
+
+#define PXP_BAR0_START_TSDM 0x1C80000
+#define PXP_BAR0_SDM_LENGTH 0x40000
+#define PXP_BAR0_SDM_RESERVED_LENGTH 0x40000
+#define PXP_BAR0_END_TSDM (PXP_BAR0_START_TSDM + \
+ PXP_BAR0_SDM_LENGTH - 1)
+
+#define PXP_BAR0_START_MSDM 0x1D00000
+#define PXP_BAR0_END_MSDM (PXP_BAR0_START_MSDM + \
+ PXP_BAR0_SDM_LENGTH - 1)
+
+#define PXP_BAR0_START_USDM 0x1D80000
+#define PXP_BAR0_END_USDM (PXP_BAR0_START_USDM + \
+ PXP_BAR0_SDM_LENGTH - 1)
+
+#define PXP_BAR0_START_XSDM 0x1E00000
+#define PXP_BAR0_END_XSDM (PXP_BAR0_START_XSDM + \
+ PXP_BAR0_SDM_LENGTH - 1)
+
+#define PXP_BAR0_START_YSDM 0x1E80000
+#define PXP_BAR0_END_YSDM (PXP_BAR0_START_YSDM + \
+ PXP_BAR0_SDM_LENGTH - 1)
+
+#define PXP_BAR0_START_PSDM 0x1F00000
+#define PXP_BAR0_END_PSDM (PXP_BAR0_START_PSDM + \
+ PXP_BAR0_SDM_LENGTH - 1)
+
+#define PXP_BAR0_FIRST_INVALID_ADDRESS (PXP_BAR0_END_PSDM + 1)
+
+/* VF BAR */
+#define PXP_VF_BAR0 0
+
+#define PXP_VF_BAR0_START_GRC 0x3E00
+#define PXP_VF_BAR0_GRC_LENGTH 0x200
+#define PXP_VF_BAR0_END_GRC (PXP_VF_BAR0_START_GRC + \
+ PXP_VF_BAR0_GRC_LENGTH - 1)
#define PXP_VF_BAR0_START_IGU 0
#define PXP_VF_BAR0_IGU_LENGTH 0x3000
@@ -399,6 +567,20 @@
#define PXP_NUM_ILT_RECORDS_BB 7600
#define PXP_NUM_ILT_RECORDS_K2 11000
#define MAX_NUM_ILT_RECORDS MAX(PXP_NUM_ILT_RECORDS_BB, PXP_NUM_ILT_RECORDS_K2)
+#define PXP_QUEUES_ZONE_MAX_NUM 320
+/*****************/
+/* PRM CONSTANTS */
+/*****************/
+#define PRM_DMA_PAD_BYTES_NUM 2
+/******************/
+/* SDMs CONSTANTS */
+/******************/
+#define SDM_OP_GEN_TRIG_NONE 0
+#define SDM_OP_GEN_TRIG_WAKE_THREAD 1
+#define SDM_OP_GEN_TRIG_AGG_INT 2
+#define SDM_OP_GEN_TRIG_LOADER 4
+#define SDM_OP_GEN_TRIG_INDICATE_ERROR 6
+#define SDM_OP_GEN_TRIG_RELEASE_THREAD 7
#define SDM_COMP_TYPE_NONE 0
#define SDM_COMP_TYPE_WAKE_THREAD 1
@@ -424,6 +606,8 @@
/* PRS CONSTANTS */
/*****************/
+#define PRS_GFT_CAM_LINES_NO_MATCH 31
+
/* Async data KCQ CQE */
struct async_data {
__le32 cid;
@@ -440,20 +624,6 @@ struct coalescing_timeset {
#define COALESCING_TIMESET_VALID_SHIFT 7
};
-struct common_prs_pf_msg_info {
- __le32 value;
-#define COMMON_PRS_PF_MSG_INFO_NPAR_DEFAULT_PF_MASK 0x1
-#define COMMON_PRS_PF_MSG_INFO_NPAR_DEFAULT_PF_SHIFT 0
-#define COMMON_PRS_PF_MSG_INFO_FW_DEBUG_1_MASK 0x1
-#define COMMON_PRS_PF_MSG_INFO_FW_DEBUG_1_SHIFT 1
-#define COMMON_PRS_PF_MSG_INFO_FW_DEBUG_2_MASK 0x1
-#define COMMON_PRS_PF_MSG_INFO_FW_DEBUG_2_SHIFT 2
-#define COMMON_PRS_PF_MSG_INFO_FW_DEBUG_3_MASK 0x1
-#define COMMON_PRS_PF_MSG_INFO_FW_DEBUG_3_SHIFT 3
-#define COMMON_PRS_PF_MSG_INFO_RESERVED_MASK 0xFFFFFFF
-#define COMMON_PRS_PF_MSG_INFO_RESERVED_SHIFT 4
-};
-
struct common_queue_zone {
__le16 ring_drv_data_consumer;
__le16 reserved;
@@ -473,6 +643,19 @@ struct vf_pf_channel_eqe_data {
struct regpair msg_addr;
};
+struct iscsi_eqe_data {
+ __le32 cid;
+ __le16 conn_id;
+ u8 error_code;
+ u8 error_pdu_opcode_reserved;
+#define ISCSI_EQE_DATA_ERROR_PDU_OPCODE_MASK 0x3F
+#define ISCSI_EQE_DATA_ERROR_PDU_OPCODE_SHIFT 0
+#define ISCSI_EQE_DATA_ERROR_PDU_OPCODE_VALID_MASK 0x1
+#define ISCSI_EQE_DATA_ERROR_PDU_OPCODE_VALID_SHIFT 6
+#define ISCSI_EQE_DATA_RESERVED0_MASK 0x1
+#define ISCSI_EQE_DATA_RESERVED0_SHIFT 7
+};
+
struct malicious_vf_eqe_data {
u8 vf_id;
u8 err_id;
@@ -488,8 +671,10 @@ struct initial_cleanup_eqe_data {
union event_ring_data {
u8 bytes[8];
struct vf_pf_channel_eqe_data vf_pf_channel;
+ struct iscsi_eqe_data iscsi_info;
struct malicious_vf_eqe_data malicious_vf;
struct initial_cleanup_eqe_data vf_init_cleanup;
+ struct regpair roce_handle;
};
/* Event Ring Entry */
@@ -616,6 +801,52 @@ enum db_dest {
MAX_DB_DEST
};
+/* Enum of doorbell DPM types */
+enum db_dpm_type {
+ DPM_LEGACY,
+ DPM_ROCE,
+ DPM_L2_INLINE,
+ DPM_L2_BD,
+ MAX_DB_DPM_TYPE
+};
+
+/* Structure for doorbell data, in L2 DPM mode, for 1st db in a DPM burst */
+struct db_l2_dpm_data {
+ __le16 icid;
+ __le16 bd_prod;
+ __le32 params;
+#define DB_L2_DPM_DATA_SIZE_MASK 0x3F
+#define DB_L2_DPM_DATA_SIZE_SHIFT 0
+#define DB_L2_DPM_DATA_DPM_TYPE_MASK 0x3
+#define DB_L2_DPM_DATA_DPM_TYPE_SHIFT 6
+#define DB_L2_DPM_DATA_NUM_BDS_MASK 0xFF
+#define DB_L2_DPM_DATA_NUM_BDS_SHIFT 8
+#define DB_L2_DPM_DATA_PKT_SIZE_MASK 0x7FF
+#define DB_L2_DPM_DATA_PKT_SIZE_SHIFT 16
+#define DB_L2_DPM_DATA_RESERVED0_MASK 0x1
+#define DB_L2_DPM_DATA_RESERVED0_SHIFT 27
+#define DB_L2_DPM_DATA_SGE_NUM_MASK 0x7
+#define DB_L2_DPM_DATA_SGE_NUM_SHIFT 28
+#define DB_L2_DPM_DATA_RESERVED1_MASK 0x1
+#define DB_L2_DPM_DATA_RESERVED1_SHIFT 31
+};
+
+/* Structure for SGE in a DPM doorbell of type DPM_L2_BD */
+struct db_l2_dpm_sge {
+ struct regpair addr;
+ __le16 nbytes;
+ __le16 bitfields;
+#define DB_L2_DPM_SGE_TPH_ST_INDEX_MASK 0x1FF
+#define DB_L2_DPM_SGE_TPH_ST_INDEX_SHIFT 0
+#define DB_L2_DPM_SGE_RESERVED0_MASK 0x3
+#define DB_L2_DPM_SGE_RESERVED0_SHIFT 9
+#define DB_L2_DPM_SGE_ST_VALID_MASK 0x1
+#define DB_L2_DPM_SGE_ST_VALID_SHIFT 11
+#define DB_L2_DPM_SGE_RESERVED1_MASK 0xF
+#define DB_L2_DPM_SGE_RESERVED1_SHIFT 12
+ __le32 reserved2;
+};
+
/* Structure for doorbell address, in legacy mode */
struct db_legacy_addr {
__le32 addr;
@@ -627,6 +858,49 @@ struct db_legacy_addr {
#define DB_LEGACY_ADDR_ICID_SHIFT 5
};
+/* Structure for doorbell address, in PWM mode */
+struct db_pwm_addr {
+ __le32 addr;
+#define DB_PWM_ADDR_RESERVED0_MASK 0x7
+#define DB_PWM_ADDR_RESERVED0_SHIFT 0
+#define DB_PWM_ADDR_OFFSET_MASK 0x7F
+#define DB_PWM_ADDR_OFFSET_SHIFT 3
+#define DB_PWM_ADDR_WID_MASK 0x3
+#define DB_PWM_ADDR_WID_SHIFT 10
+#define DB_PWM_ADDR_DPI_MASK 0xFFFF
+#define DB_PWM_ADDR_DPI_SHIFT 12
+#define DB_PWM_ADDR_RESERVED1_MASK 0xF
+#define DB_PWM_ADDR_RESERVED1_SHIFT 28
+};
+
+/* Parameters to RoCE firmware, passed in EDPM doorbell */
+struct db_roce_dpm_params {
+ __le32 params;
+#define DB_ROCE_DPM_PARAMS_SIZE_MASK 0x3F
+#define DB_ROCE_DPM_PARAMS_SIZE_SHIFT 0
+#define DB_ROCE_DPM_PARAMS_DPM_TYPE_MASK 0x3
+#define DB_ROCE_DPM_PARAMS_DPM_TYPE_SHIFT 6
+#define DB_ROCE_DPM_PARAMS_OPCODE_MASK 0xFF
+#define DB_ROCE_DPM_PARAMS_OPCODE_SHIFT 8
+#define DB_ROCE_DPM_PARAMS_WQE_SIZE_MASK 0x7FF
+#define DB_ROCE_DPM_PARAMS_WQE_SIZE_SHIFT 16
+#define DB_ROCE_DPM_PARAMS_RESERVED0_MASK 0x1
+#define DB_ROCE_DPM_PARAMS_RESERVED0_SHIFT 27
+#define DB_ROCE_DPM_PARAMS_COMPLETION_FLG_MASK 0x1
+#define DB_ROCE_DPM_PARAMS_COMPLETION_FLG_SHIFT 28
+#define DB_ROCE_DPM_PARAMS_S_FLG_MASK 0x1
+#define DB_ROCE_DPM_PARAMS_S_FLG_SHIFT 29
+#define DB_ROCE_DPM_PARAMS_RESERVED1_MASK 0x3
+#define DB_ROCE_DPM_PARAMS_RESERVED1_SHIFT 30
+};
+
+/* Structure for doorbell data, in ROCE DPM mode, for 1st db in a DPM burst */
+struct db_roce_dpm_data {
+ __le16 icid;
+ __le16 prod_val;
+ struct db_roce_dpm_params params;
+};
+
/* Igu interrupt command */
enum igu_int_cmd {
IGU_INT_ENABLE = 0,
@@ -764,6 +1038,19 @@ struct pxp_ptt_entry {
struct pxp_pretend_cmd pretend;
};
+/* VF Zone A Permission Register. */
+struct pxp_vf_zone_a_permission {
+ __le32 control;
+#define PXP_VF_ZONE_A_PERMISSION_VFID_MASK 0xFF
+#define PXP_VF_ZONE_A_PERMISSION_VFID_SHIFT 0
+#define PXP_VF_ZONE_A_PERMISSION_VALID_MASK 0x1
+#define PXP_VF_ZONE_A_PERMISSION_VALID_SHIFT 8
+#define PXP_VF_ZONE_A_PERMISSION_RESERVED0_MASK 0x7F
+#define PXP_VF_ZONE_A_PERMISSION_RESERVED0_SHIFT 9
+#define PXP_VF_ZONE_A_PERMISSION_RESERVED1_MASK 0xFFFF
+#define PXP_VF_ZONE_A_PERMISSION_RESERVED1_SHIFT 16
+};
+
/* RSS hash type */
struct rdif_task_context {
__le32 initial_ref_tag;
@@ -831,6 +1118,7 @@ struct rdif_task_context {
__le32 reserved2;
};
+/* RSS hash type */
enum rss_hash_type {
RSS_HASH_TYPE_DEFAULT = 0,
RSS_HASH_TYPE_IPV4 = 1,
@@ -942,7 +1230,7 @@ struct tdif_task_context {
};
struct timers_context {
- __le32 logical_client0;
+ __le32 logical_client_0;
#define TIMERS_CONTEXT_EXPIRATIONTIMELC0_MASK 0xFFFFFFF
#define TIMERS_CONTEXT_EXPIRATIONTIMELC0_SHIFT 0
#define TIMERS_CONTEXT_VALIDLC0_MASK 0x1
@@ -951,7 +1239,7 @@ struct timers_context {
#define TIMERS_CONTEXT_ACTIVELC0_SHIFT 29
#define TIMERS_CONTEXT_RESERVED0_MASK 0x3
#define TIMERS_CONTEXT_RESERVED0_SHIFT 30
- __le32 logical_client1;
+ __le32 logical_client_1;
#define TIMERS_CONTEXT_EXPIRATIONTIMELC1_MASK 0xFFFFFFF
#define TIMERS_CONTEXT_EXPIRATIONTIMELC1_SHIFT 0
#define TIMERS_CONTEXT_VALIDLC1_MASK 0x1
@@ -960,7 +1248,7 @@ struct timers_context {
#define TIMERS_CONTEXT_ACTIVELC1_SHIFT 29
#define TIMERS_CONTEXT_RESERVED1_MASK 0x3
#define TIMERS_CONTEXT_RESERVED1_SHIFT 30
- __le32 logical_client2;
+ __le32 logical_client_2;
#define TIMERS_CONTEXT_EXPIRATIONTIMELC2_MASK 0xFFFFFFF
#define TIMERS_CONTEXT_EXPIRATIONTIMELC2_SHIFT 0
#define TIMERS_CONTEXT_VALIDLC2_MASK 0x1
@@ -978,3 +1266,4 @@ struct timers_context {
#define TIMERS_CONTEXT_RESERVED3_SHIFT 29
};
#endif /* __COMMON_HSI__ */
+#endif
diff --git a/include/linux/qed/eth_common.h b/include/linux/qed/eth_common.h
index b5ebc697d05f..1aa0727c4136 100644
--- a/include/linux/qed/eth_common.h
+++ b/include/linux/qed/eth_common.h
@@ -13,9 +13,12 @@
/* ETH FW CONSTANTS */
/********************/
#define ETH_HSI_VER_MAJOR 3
-#define ETH_HSI_VER_MINOR 0
-#define ETH_CACHE_LINE_SIZE 64
+#define ETH_HSI_VER_MINOR 10
+
+#define ETH_HSI_VER_NO_PKT_LEN_TUNN 5
+#define ETH_CACHE_LINE_SIZE 64
+#define ETH_RX_CQE_GAP 32
#define ETH_MAX_RAMROD_PER_CON 8
#define ETH_TX_BD_PAGE_SIZE_BYTES 4096
#define ETH_RX_BD_PAGE_SIZE_BYTES 4096
@@ -24,15 +27,25 @@
#define ETH_TX_MIN_BDS_PER_NON_LSO_PKT 1
#define ETH_TX_MAX_BDS_PER_NON_LSO_PACKET 18
+#define ETH_TX_MAX_BDS_PER_LSO_PACKET 255
#define ETH_TX_MAX_LSO_HDR_NBD 4
#define ETH_TX_MIN_BDS_PER_LSO_PKT 3
#define ETH_TX_MIN_BDS_PER_TUNN_IPV6_WITH_EXT_PKT 3
#define ETH_TX_MIN_BDS_PER_IPV6_WITH_EXT_PKT 2
#define ETH_TX_MIN_BDS_PER_PKT_W_LOOPBACK_MODE 2
-#define ETH_TX_MAX_NON_LSO_PKT_LEN (9700 - (4 + 12 + 8))
+#define ETH_TX_MAX_NON_LSO_PKT_LEN (9700 - (4 + 4 + 12 + 8))
#define ETH_TX_MAX_LSO_HDR_BYTES 510
+#define ETH_TX_LSO_WINDOW_BDS_NUM (18 - 1)
+#define ETH_TX_LSO_WINDOW_MIN_LEN 9700
+#define ETH_TX_MAX_LSO_PAYLOAD_LEN 0xFE000
+#define ETH_TX_NUM_SAME_AS_LAST_ENTRIES 320
+#define ETH_TX_INACTIVE_SAME_AS_LAST 0xFFFF
#define ETH_NUM_STATISTIC_COUNTERS MAX_NUM_VPORTS
+#define ETH_NUM_STATISTIC_COUNTERS_DOUBLE_VF_ZONE \
+ (ETH_NUM_STATISTIC_COUNTERS - MAX_NUM_VFS / 2)
+#define ETH_NUM_STATISTIC_COUNTERS_QUAD_VF_ZONE \
+ (ETH_NUM_STATISTIC_COUNTERS - 3 * MAX_NUM_VFS / 4)
/* Maximum number of buffers, used for RX packet placement */
#define ETH_RX_MAX_BUFF_PER_PKT 5
@@ -59,6 +72,8 @@
#define ETH_TPA_CQE_CONT_LEN_LIST_SIZE 6
#define ETH_TPA_CQE_END_LEN_LIST_SIZE 4
+/* Control frame check constants */
+#define ETH_CTL_FRAME_ETH_TYPE_NUM 4
struct eth_tx_1st_bd_flags {
u8 bitfields;
@@ -82,10 +97,10 @@ struct eth_tx_1st_bd_flags {
/* The parsing information data fo rthe first tx bd of a given packet. */
struct eth_tx_data_1st_bd {
- __le16 vlan;
- u8 nbds;
- struct eth_tx_1st_bd_flags bd_flags;
- __le16 bitfields;
+ __le16 vlan;
+ u8 nbds;
+ struct eth_tx_1st_bd_flags bd_flags;
+ __le16 bitfields;
#define ETH_TX_DATA_1ST_BD_TUNN_FLAG_MASK 0x1
#define ETH_TX_DATA_1ST_BD_TUNN_FLAG_SHIFT 0
#define ETH_TX_DATA_1ST_BD_RESERVED0_MASK 0x1
@@ -96,7 +111,7 @@ struct eth_tx_data_1st_bd {
/* The parsing information data for the second tx bd of a given packet. */
struct eth_tx_data_2nd_bd {
- __le16 tunn_ip_size;
+ __le16 tunn_ip_size;
__le16 bitfields1;
#define ETH_TX_DATA_2ND_BD_TUNN_INNER_L2_HDR_SIZE_W_MASK 0xF
#define ETH_TX_DATA_2ND_BD_TUNN_INNER_L2_HDR_SIZE_W_SHIFT 0
@@ -125,9 +140,14 @@ struct eth_tx_data_2nd_bd {
#define ETH_TX_DATA_2ND_BD_RESERVED0_SHIFT 13
};
+/* Firmware data for L2-EDPM packet. */
+struct eth_edpm_fw_data {
+ struct eth_tx_data_1st_bd data_1st_bd;
+ struct eth_tx_data_2nd_bd data_2nd_bd;
+ __le32 reserved;
+};
+
struct eth_fast_path_cqe_fw_debug {
- u8 reserved0;
- u8 reserved1;
__le16 reserved2;
};
@@ -148,6 +168,17 @@ struct eth_tunnel_parsing_flags {
#define ETH_TUNNEL_PARSING_FLAGS_IPV4_OPTIONS_SHIFT 7
};
+/* PMD flow control bits */
+struct eth_pmd_flow_flags {
+ u8 flags;
+#define ETH_PMD_FLOW_FLAGS_VALID_MASK 0x1
+#define ETH_PMD_FLOW_FLAGS_VALID_SHIFT 0
+#define ETH_PMD_FLOW_FLAGS_TOGGLE_MASK 0x1
+#define ETH_PMD_FLOW_FLAGS_TOGGLE_SHIFT 1
+#define ETH_PMD_FLOW_FLAGS_RESERVED_MASK 0x3F
+#define ETH_PMD_FLOW_FLAGS_RESERVED_SHIFT 2
+};
+
/* Regular ETH Rx FP CQE. */
struct eth_fast_path_rx_reg_cqe {
u8 type;
@@ -166,64 +197,63 @@ struct eth_fast_path_rx_reg_cqe {
u8 placement_offset;
struct eth_tunnel_parsing_flags tunnel_pars_flags;
u8 bd_num;
- u8 reserved[7];
+ u8 reserved[9];
struct eth_fast_path_cqe_fw_debug fw_debug;
u8 reserved1[3];
- u8 flags;
-#define ETH_FAST_PATH_RX_REG_CQE_VALID_MASK 0x1
-#define ETH_FAST_PATH_RX_REG_CQE_VALID_SHIFT 0
-#define ETH_FAST_PATH_RX_REG_CQE_VALID_TOGGLE_MASK 0x1
-#define ETH_FAST_PATH_RX_REG_CQE_VALID_TOGGLE_SHIFT 1
-#define ETH_FAST_PATH_RX_REG_CQE_RESERVED2_MASK 0x3F
-#define ETH_FAST_PATH_RX_REG_CQE_RESERVED2_SHIFT 2
+ struct eth_pmd_flow_flags pmd_flags;
};
/* TPA-continue ETH Rx FP CQE. */
struct eth_fast_path_rx_tpa_cont_cqe {
- u8 type;
- u8 tpa_agg_index;
- __le16 len_list[ETH_TPA_CQE_CONT_LEN_LIST_SIZE];
- u8 reserved[5];
- u8 reserved1;
- __le16 reserved2[ETH_TPA_CQE_CONT_LEN_LIST_SIZE];
+ u8 type;
+ u8 tpa_agg_index;
+ __le16 len_list[ETH_TPA_CQE_CONT_LEN_LIST_SIZE];
+ u8 reserved;
+ u8 reserved1;
+ __le16 reserved2[ETH_TPA_CQE_CONT_LEN_LIST_SIZE];
+ u8 reserved3[3];
+ struct eth_pmd_flow_flags pmd_flags;
};
/* TPA-end ETH Rx FP CQE. */
struct eth_fast_path_rx_tpa_end_cqe {
- u8 type;
- u8 tpa_agg_index;
- __le16 total_packet_len;
- u8 num_of_bds;
- u8 end_reason;
- __le16 num_of_coalesced_segs;
- __le32 ts_delta;
- __le16 len_list[ETH_TPA_CQE_END_LEN_LIST_SIZE];
- u8 reserved1[3];
- u8 reserved2;
- __le16 reserved3[ETH_TPA_CQE_END_LEN_LIST_SIZE];
+ u8 type;
+ u8 tpa_agg_index;
+ __le16 total_packet_len;
+ u8 num_of_bds;
+ u8 end_reason;
+ __le16 num_of_coalesced_segs;
+ __le32 ts_delta;
+ __le16 len_list[ETH_TPA_CQE_END_LEN_LIST_SIZE];
+ __le16 reserved3[ETH_TPA_CQE_END_LEN_LIST_SIZE];
+ __le16 reserved1;
+ u8 reserved2;
+ struct eth_pmd_flow_flags pmd_flags;
};
/* TPA-start ETH Rx FP CQE. */
struct eth_fast_path_rx_tpa_start_cqe {
- u8 type;
- u8 bitfields;
+ u8 type;
+ u8 bitfields;
#define ETH_FAST_PATH_RX_TPA_START_CQE_RSS_HASH_TYPE_MASK 0x7
#define ETH_FAST_PATH_RX_TPA_START_CQE_RSS_HASH_TYPE_SHIFT 0
#define ETH_FAST_PATH_RX_TPA_START_CQE_TC_MASK 0xF
#define ETH_FAST_PATH_RX_TPA_START_CQE_TC_SHIFT 3
#define ETH_FAST_PATH_RX_TPA_START_CQE_RESERVED0_MASK 0x1
#define ETH_FAST_PATH_RX_TPA_START_CQE_RESERVED0_SHIFT 7
- __le16 seg_len;
+ __le16 seg_len;
struct parsing_and_err_flags pars_flags;
- __le16 vlan_tag;
- __le32 rss_hash;
- __le16 len_on_first_bd;
- u8 placement_offset;
+ __le16 vlan_tag;
+ __le32 rss_hash;
+ __le16 len_on_first_bd;
+ u8 placement_offset;
struct eth_tunnel_parsing_flags tunnel_pars_flags;
- u8 tpa_agg_index;
- u8 header_len;
- __le16 ext_bd_len_list[ETH_TPA_CQE_START_LEN_LIST_SIZE];
+ u8 tpa_agg_index;
+ u8 header_len;
+ __le16 ext_bd_len_list[ETH_TPA_CQE_START_LEN_LIST_SIZE];
struct eth_fast_path_cqe_fw_debug fw_debug;
+ u8 reserved;
+ struct eth_pmd_flow_flags pmd_flags;
};
/* The L4 pseudo checksum mode for Ethernet */
@@ -245,15 +275,7 @@ struct eth_slow_path_rx_cqe {
u8 reserved[25];
__le16 echo;
u8 reserved1;
- u8 flags;
-/* for PMD mode - valid indication */
-#define ETH_SLOW_PATH_RX_CQE_VALID_MASK 0x1
-#define ETH_SLOW_PATH_RX_CQE_VALID_SHIFT 0
-/* for PMD mode - valid toggle indication */
-#define ETH_SLOW_PATH_RX_CQE_VALID_TOGGLE_MASK 0x1
-#define ETH_SLOW_PATH_RX_CQE_VALID_TOGGLE_SHIFT 1
-#define ETH_SLOW_PATH_RX_CQE_RESERVED2_MASK 0x3F
-#define ETH_SLOW_PATH_RX_CQE_RESERVED2_SHIFT 2
+ struct eth_pmd_flow_flags pmd_flags;
};
/* union for all ETH Rx CQE types */
@@ -276,6 +298,11 @@ enum eth_rx_cqe_type {
MAX_ETH_RX_CQE_TYPE
};
+struct eth_rx_pmd_cqe {
+ union eth_rx_cqe cqe;
+ u8 reserved[ETH_RX_CQE_GAP];
+};
+
enum eth_rx_tunn_type {
ETH_RX_NO_TUNN,
ETH_RX_TUNN_GENEVE,
@@ -313,8 +340,8 @@ struct eth_tx_2nd_bd {
/* The parsing information data for the third tx bd of a given packet. */
struct eth_tx_data_3rd_bd {
- __le16 lso_mss;
- __le16 bitfields;
+ __le16 lso_mss;
+ __le16 bitfields;
#define ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_MASK 0xF
#define ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_SHIFT 0
#define ETH_TX_DATA_3RD_BD_HDR_NBD_MASK 0xF
@@ -323,8 +350,8 @@ struct eth_tx_data_3rd_bd {
#define ETH_TX_DATA_3RD_BD_START_BD_SHIFT 8
#define ETH_TX_DATA_3RD_BD_RESERVED0_MASK 0x7F
#define ETH_TX_DATA_3RD_BD_RESERVED0_SHIFT 9
- u8 tunn_l4_hdr_start_offset_w;
- u8 tunn_hdr_size_w;
+ u8 tunn_l4_hdr_start_offset_w;
+ u8 tunn_hdr_size_w;
};
/* The third tx bd of a given packet */
@@ -355,10 +382,10 @@ struct eth_tx_bd {
};
union eth_tx_bd_types {
- struct eth_tx_1st_bd first_bd;
- struct eth_tx_2nd_bd second_bd;
- struct eth_tx_3rd_bd third_bd;
- struct eth_tx_bd reg_bd;
+ struct eth_tx_1st_bd first_bd;
+ struct eth_tx_2nd_bd second_bd;
+ struct eth_tx_3rd_bd third_bd;
+ struct eth_tx_bd reg_bd;
};
/* Mstorm Queue Zone */
@@ -389,8 +416,8 @@ struct eth_db_data {
#define ETH_DB_DATA_RESERVED_SHIFT 5
#define ETH_DB_DATA_AGG_VAL_SEL_MASK 0x3
#define ETH_DB_DATA_AGG_VAL_SEL_SHIFT 6
- u8 agg_flags;
- __le16 bd_prod;
+ u8 agg_flags;
+ __le16 bd_prod;
};
#endif /* __ETH_COMMON__ */
diff --git a/include/linux/qed/iscsi_common.h b/include/linux/qed/iscsi_common.h
index b3c0feb15ae9..8f64b1223c2f 100644
--- a/include/linux/qed/iscsi_common.h
+++ b/include/linux/qed/iscsi_common.h
@@ -311,7 +311,7 @@ struct iscsi_login_req_hdr {
#define ISCSI_LOGIN_REQ_HDR_DATA_SEG_LEN_SHIFT 0
#define ISCSI_LOGIN_REQ_HDR_TOTAL_AHS_LEN_MASK 0xFF
#define ISCSI_LOGIN_REQ_HDR_TOTAL_AHS_LEN_SHIFT 24
- __le32 isid_TABC;
+ __le32 isid_tabc;
__le16 tsih;
__le16 isid_d;
__le32 itt;
@@ -464,7 +464,7 @@ struct iscsi_login_response_hdr {
#define ISCSI_LOGIN_RESPONSE_HDR_DATA_SEG_LEN_SHIFT 0
#define ISCSI_LOGIN_RESPONSE_HDR_TOTAL_AHS_LEN_MASK 0xFF
#define ISCSI_LOGIN_RESPONSE_HDR_TOTAL_AHS_LEN_SHIFT 24
- __le32 isid_TABC;
+ __le32 isid_tabc;
__le16 tsih;
__le16 isid_d;
__le32 itt;
@@ -688,8 +688,7 @@ union iscsi_cqe {
enum iscsi_cqes_type {
ISCSI_CQE_TYPE_SOLICITED = 1,
ISCSI_CQE_TYPE_UNSOLICITED,
- ISCSI_CQE_TYPE_SOLICITED_WITH_SENSE
- ,
+ ISCSI_CQE_TYPE_SOLICITED_WITH_SENSE,
ISCSI_CQE_TYPE_TASK_CLEANUP,
ISCSI_CQE_TYPE_DUMMY,
MAX_ISCSI_CQES_TYPE
@@ -769,9 +768,9 @@ enum iscsi_eqe_opcode {
ISCSI_EVENT_TYPE_UPDATE_CONN,
ISCSI_EVENT_TYPE_CLEAR_SQ,
ISCSI_EVENT_TYPE_TERMINATE_CONN,
+ ISCSI_EVENT_TYPE_MAC_UPDATE_CONN,
ISCSI_EVENT_TYPE_ASYN_CONNECT_COMPLETE,
ISCSI_EVENT_TYPE_ASYN_TERMINATE_DONE,
- RESERVED8,
RESERVED9,
ISCSI_EVENT_TYPE_START_OF_ERROR_TYPES = 10,
ISCSI_EVENT_TYPE_ASYN_ABORT_RCVD,
@@ -867,6 +866,7 @@ enum iscsi_ramrod_cmd_id {
ISCSI_RAMROD_CMD_ID_UPDATE_CONN = 4,
ISCSI_RAMROD_CMD_ID_TERMINATION_CONN = 5,
ISCSI_RAMROD_CMD_ID_CLEAR_SQ = 6,
+ ISCSI_RAMROD_CMD_ID_MAC_UPDATE = 7,
MAX_ISCSI_RAMROD_CMD_ID
};
@@ -883,6 +883,16 @@ union iscsi_seq_num {
__le16 r2t_sn;
};
+struct iscsi_spe_conn_mac_update {
+ struct iscsi_slow_path_hdr hdr;
+ __le16 conn_id;
+ __le32 fw_cid;
+ __le16 remote_mac_addr_lo;
+ __le16 remote_mac_addr_mid;
+ __le16 remote_mac_addr_hi;
+ u8 reserved0[2];
+};
+
struct iscsi_spe_conn_offload {
struct iscsi_slow_path_hdr hdr;
__le16 conn_id;
@@ -1302,14 +1312,6 @@ struct mstorm_iscsi_stats_drv {
struct regpair iscsi_rx_dropped_pdus_task_not_valid;
};
-struct ooo_opaque {
- __le32 cid;
- u8 drop_isle;
- u8 drop_size;
- u8 ooo_opcode;
- u8 ooo_isle;
-};
-
struct pstorm_iscsi_stats_drv {
struct regpair iscsi_tx_bytes_cnt;
struct regpair iscsi_tx_packet_cnt;
diff --git a/include/linux/qed/qed_chain.h b/include/linux/qed/qed_chain.h
index 7e441bdeabdc..72d88cf3ca25 100644
--- a/include/linux/qed/qed_chain.h
+++ b/include/linux/qed/qed_chain.h
@@ -16,19 +16,6 @@
#include <linux/slab.h>
#include <linux/qed/common_hsi.h>
-/* dma_addr_t manip */
-#define DMA_LO_LE(x) cpu_to_le32(lower_32_bits(x))
-#define DMA_HI_LE(x) cpu_to_le32(upper_32_bits(x))
-#define DMA_REGPAIR_LE(x, val) do { \
- (x).hi = DMA_HI_LE((val)); \
- (x).lo = DMA_LO_LE((val)); \
- } while (0)
-
-#define HILO_GEN(hi, lo, type) ((((type)(hi)) << 32) + (lo))
-#define HILO_64(hi, lo) HILO_GEN((le32_to_cpu(hi)), (le32_to_cpu(lo)), u64)
-#define HILO_64_REGPAIR(regpair) (HILO_64(regpair.hi, regpair.lo))
-#define HILO_DMA_REGPAIR(regpair) ((dma_addr_t)HILO_64_REGPAIR(regpair))
-
enum qed_chain_mode {
/* Each Page contains a next pointer at its end */
QED_CHAIN_MODE_NEXT_PTR,
diff --git a/include/linux/qed/qed_eth_if.h b/include/linux/qed/qed_eth_if.h
index 4475a9d8ae15..33c24ebc9b7f 100644
--- a/include/linux/qed/qed_eth_if.h
+++ b/include/linux/qed/qed_eth_if.h
@@ -23,6 +23,9 @@ struct qed_dev_eth_info {
u8 port_mac[ETH_ALEN];
u8 num_vlan_filters;
+
+ /* Legacy VF - this affects the datapath, so qede has to know */
+ bool is_legacy;
};
struct qed_update_vport_rss_params {
diff --git a/include/linux/qed/qed_if.h b/include/linux/qed/qed_if.h
index d6c4177df7cb..f9ae903bbb84 100644
--- a/include/linux/qed/qed_if.h
+++ b/include/linux/qed/qed_if.h
@@ -34,6 +34,8 @@ enum dcbx_protocol_type {
DCBX_MAX_PROTOCOL_TYPE
};
+#define QED_ROCE_PROTOCOL_INDEX (3)
+
#ifdef CONFIG_DCB
#define QED_LLDP_CHASSIS_ID_STAT_LEN 4
#define QED_LLDP_PORT_ID_STAT_LEN 4
@@ -260,15 +262,15 @@ struct qed_dev_info {
/* MFW version */
u32 mfw_rev;
- bool rdma_supported;
-
u32 flash_size;
u8 mf_mode;
bool tx_switching;
+ bool rdma_supported;
};
enum qed_sb_type {
QED_SB_TYPE_L2_QUEUE,
+ QED_SB_TYPE_CNQ,
};
enum qed_protocol {
@@ -276,6 +278,21 @@ enum qed_protocol {
QED_PROTOCOL_ISCSI,
};
+enum qed_link_mode_bits {
+ QED_LM_FIBRE_BIT = BIT(0),
+ QED_LM_Autoneg_BIT = BIT(1),
+ QED_LM_Asym_Pause_BIT = BIT(2),
+ QED_LM_Pause_BIT = BIT(3),
+ QED_LM_1000baseT_Half_BIT = BIT(4),
+ QED_LM_1000baseT_Full_BIT = BIT(5),
+ QED_LM_10000baseKR_Full_BIT = BIT(6),
+ QED_LM_25000baseKR_Full_BIT = BIT(7),
+ QED_LM_40000baseLR4_Full_BIT = BIT(8),
+ QED_LM_50000baseKR2_Full_BIT = BIT(9),
+ QED_LM_100000baseKR4_Full_BIT = BIT(10),
+ QED_LM_COUNT = 11
+};
+
struct qed_link_params {
bool link_up;
@@ -303,9 +320,11 @@ struct qed_link_params {
struct qed_link_output {
bool link_up;
- u32 supported_caps; /* In SUPPORTED defs */
- u32 advertised_caps; /* In ADVERTISED defs */
- u32 lp_caps; /* In ADVERTISED defs */
+ /* In QED_LM_* defs */
+ u32 supported_caps;
+ u32 advertised_caps;
+ u32 lp_caps;
+
u32 speed; /* In Mb/s */
u8 duplex; /* In DUPLEX defs */
u8 port; /* In PORT defs */
@@ -438,6 +457,10 @@ struct qed_common_ops {
void (*simd_handler_clean)(struct qed_dev *cdev,
int index);
+ int (*dbg_all_data) (struct qed_dev *cdev, void *buffer);
+
+ int (*dbg_all_data_size) (struct qed_dev *cdev);
+
/**
* @brief can_link_change - can the instance change the link or not
*
@@ -606,8 +629,9 @@ enum DP_MODULE {
QED_MSG_SP = 0x100000,
QED_MSG_STORAGE = 0x200000,
QED_MSG_CXT = 0x800000,
+ QED_MSG_LL2 = 0x1000000,
QED_MSG_ILT = 0x2000000,
- QED_MSG_ROCE = 0x4000000,
+ QED_MSG_RDMA = 0x4000000,
QED_MSG_DEBUG = 0x8000000,
/* to be added...up to 0x8000000 */
};
diff --git a/include/linux/qed/qed_ll2_if.h b/include/linux/qed/qed_ll2_if.h
new file mode 100644
index 000000000000..fd75c265dba3
--- /dev/null
+++ b/include/linux/qed/qed_ll2_if.h
@@ -0,0 +1,139 @@
+/* QLogic qed NIC Driver
+ *
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#ifndef _QED_LL2_IF_H
+#define _QED_LL2_IF_H
+
+#include <linux/types.h>
+#include <linux/interrupt.h>
+#include <linux/netdevice.h>
+#include <linux/pci.h>
+#include <linux/skbuff.h>
+#include <linux/version.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/qed/qed_if.h>
+
+struct qed_ll2_stats {
+ u64 gsi_invalid_hdr;
+ u64 gsi_invalid_pkt_length;
+ u64 gsi_unsupported_pkt_typ;
+ u64 gsi_crcchksm_error;
+
+ u64 packet_too_big_discard;
+ u64 no_buff_discard;
+
+ u64 rcv_ucast_bytes;
+ u64 rcv_mcast_bytes;
+ u64 rcv_bcast_bytes;
+ u64 rcv_ucast_pkts;
+ u64 rcv_mcast_pkts;
+ u64 rcv_bcast_pkts;
+
+ u64 sent_ucast_bytes;
+ u64 sent_mcast_bytes;
+ u64 sent_bcast_bytes;
+ u64 sent_ucast_pkts;
+ u64 sent_mcast_pkts;
+ u64 sent_bcast_pkts;
+};
+
+#define QED_LL2_UNUSED_HANDLE (0xff)
+
+struct qed_ll2_cb_ops {
+ int (*rx_cb)(void *, struct sk_buff *, u32, u32);
+ int (*tx_cb)(void *, struct sk_buff *, bool);
+};
+
+struct qed_ll2_params {
+ u16 mtu;
+ bool drop_ttl0_packets;
+ bool rx_vlan_stripping;
+ u8 tx_tc;
+ bool frags_mapped;
+ u8 ll2_mac_address[ETH_ALEN];
+};
+
+struct qed_ll2_ops {
+/**
+ * @brief start - initializes ll2
+ *
+ * @param cdev
+ * @param params - protocol driver configuration for the ll2.
+ *
+ * @return 0 on success, otherwise error value.
+ */
+ int (*start)(struct qed_dev *cdev, struct qed_ll2_params *params);
+
+/**
+ * @brief stop - stops the ll2
+ *
+ * @param cdev
+ *
+ * @return 0 on success, otherwise error value.
+ */
+ int (*stop)(struct qed_dev *cdev);
+
+/**
+ * @brief start_xmit - transmits an skb over the ll2 interface
+ *
+ * @param cdev
+ * @param skb
+ *
+ * @return 0 on success, otherwise error value.
+ */
+ int (*start_xmit)(struct qed_dev *cdev, struct sk_buff *skb);
+
+/**
+ * @brief register_cb_ops - protocol driver register the callback for Rx/Tx
+ * packets. Should be called before `start'.
+ *
+ * @param cdev
+ * @param cookie - to be passed to the callback functions.
+ * @param ops - the callback functions to register for Rx / Tx.
+ *
+ * @return 0 on success, otherwise error value.
+ */
+ void (*register_cb_ops)(struct qed_dev *cdev,
+ const struct qed_ll2_cb_ops *ops,
+ void *cookie);
+
+/**
+ * @brief get LL2 related statistics
+ *
+ * @param cdev
+ * @param stats - pointer to struct that would be filled with stats
+ *
+ * @return 0 on success, error otherwise.
+ */
+ int (*get_stats)(struct qed_dev *cdev, struct qed_ll2_stats *stats);
+};
+
+#ifdef CONFIG_QED_LL2
+int qed_ll2_alloc_if(struct qed_dev *);
+void qed_ll2_dealloc_if(struct qed_dev *);
+#else
+static const struct qed_ll2_ops qed_ll2_ops_pass = {
+ .start = NULL,
+ .stop = NULL,
+ .start_xmit = NULL,
+ .register_cb_ops = NULL,
+ .get_stats = NULL,
+};
+
+static inline int qed_ll2_alloc_if(struct qed_dev *cdev)
+{
+ return 0;
+}
+
+static inline void qed_ll2_dealloc_if(struct qed_dev *cdev)
+{
+}
+#endif
+#endif
diff --git a/include/linux/qed/qed_roce_if.h b/include/linux/qed/qed_roce_if.h
new file mode 100644
index 000000000000..53047d3fa678
--- /dev/null
+++ b/include/linux/qed/qed_roce_if.h
@@ -0,0 +1,604 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015-2016 QLogic Corporation
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and /or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef _QED_ROCE_IF_H
+#define _QED_ROCE_IF_H
+#include <linux/types.h>
+#include <linux/delay.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/qed/qed_if.h>
+#include <linux/qed/qed_ll2_if.h>
+#include <linux/qed/rdma_common.h>
+
+enum qed_roce_ll2_tx_dest {
+ /* Light L2 TX Destination to the Network */
+ QED_ROCE_LL2_TX_DEST_NW,
+
+ /* Light L2 TX Destination to the Loopback */
+ QED_ROCE_LL2_TX_DEST_LB,
+ QED_ROCE_LL2_TX_DEST_MAX
+};
+
+#define QED_RDMA_MAX_CNQ_SIZE (0xFFFF)
+
+/* rdma interface */
+
+enum qed_roce_qp_state {
+ QED_ROCE_QP_STATE_RESET,
+ QED_ROCE_QP_STATE_INIT,
+ QED_ROCE_QP_STATE_RTR,
+ QED_ROCE_QP_STATE_RTS,
+ QED_ROCE_QP_STATE_SQD,
+ QED_ROCE_QP_STATE_ERR,
+ QED_ROCE_QP_STATE_SQE
+};
+
+enum qed_rdma_tid_type {
+ QED_RDMA_TID_REGISTERED_MR,
+ QED_RDMA_TID_FMR,
+ QED_RDMA_TID_MW_TYPE1,
+ QED_RDMA_TID_MW_TYPE2A
+};
+
+struct qed_rdma_events {
+ void *context;
+ void (*affiliated_event)(void *context, u8 fw_event_code,
+ void *fw_handle);
+ void (*unaffiliated_event)(void *context, u8 event_code);
+};
+
+struct qed_rdma_device {
+ u32 vendor_id;
+ u32 vendor_part_id;
+ u32 hw_ver;
+ u64 fw_ver;
+
+ u64 node_guid;
+ u64 sys_image_guid;
+
+ u8 max_cnq;
+ u8 max_sge;
+ u8 max_srq_sge;
+ u16 max_inline;
+ u32 max_wqe;
+ u32 max_srq_wqe;
+ u8 max_qp_resp_rd_atomic_resc;
+ u8 max_qp_req_rd_atomic_resc;
+ u64 max_dev_resp_rd_atomic_resc;
+ u32 max_cq;
+ u32 max_qp;
+ u32 max_srq;
+ u32 max_mr;
+ u64 max_mr_size;
+ u32 max_cqe;
+ u32 max_mw;
+ u32 max_fmr;
+ u32 max_mr_mw_fmr_pbl;
+ u64 max_mr_mw_fmr_size;
+ u32 max_pd;
+ u32 max_ah;
+ u8 max_pkey;
+ u16 max_srq_wr;
+ u8 max_stats_queues;
+ u32 dev_caps;
+
+ /* Abilty to support RNR-NAK generation */
+
+#define QED_RDMA_DEV_CAP_RNR_NAK_MASK 0x1
+#define QED_RDMA_DEV_CAP_RNR_NAK_SHIFT 0
+ /* Abilty to support shutdown port */
+#define QED_RDMA_DEV_CAP_SHUTDOWN_PORT_MASK 0x1
+#define QED_RDMA_DEV_CAP_SHUTDOWN_PORT_SHIFT 1
+ /* Abilty to support port active event */
+#define QED_RDMA_DEV_CAP_PORT_ACTIVE_EVENT_MASK 0x1
+#define QED_RDMA_DEV_CAP_PORT_ACTIVE_EVENT_SHIFT 2
+ /* Abilty to support port change event */
+#define QED_RDMA_DEV_CAP_PORT_CHANGE_EVENT_MASK 0x1
+#define QED_RDMA_DEV_CAP_PORT_CHANGE_EVENT_SHIFT 3
+ /* Abilty to support system image GUID */
+#define QED_RDMA_DEV_CAP_SYS_IMAGE_MASK 0x1
+#define QED_RDMA_DEV_CAP_SYS_IMAGE_SHIFT 4
+ /* Abilty to support bad P_Key counter support */
+#define QED_RDMA_DEV_CAP_BAD_PKEY_CNT_MASK 0x1
+#define QED_RDMA_DEV_CAP_BAD_PKEY_CNT_SHIFT 5
+ /* Abilty to support atomic operations */
+#define QED_RDMA_DEV_CAP_ATOMIC_OP_MASK 0x1
+#define QED_RDMA_DEV_CAP_ATOMIC_OP_SHIFT 6
+#define QED_RDMA_DEV_CAP_RESIZE_CQ_MASK 0x1
+#define QED_RDMA_DEV_CAP_RESIZE_CQ_SHIFT 7
+ /* Abilty to support modifying the maximum number of
+ * outstanding work requests per QP
+ */
+#define QED_RDMA_DEV_CAP_RESIZE_MAX_WR_MASK 0x1
+#define QED_RDMA_DEV_CAP_RESIZE_MAX_WR_SHIFT 8
+ /* Abilty to support automatic path migration */
+#define QED_RDMA_DEV_CAP_AUTO_PATH_MIG_MASK 0x1
+#define QED_RDMA_DEV_CAP_AUTO_PATH_MIG_SHIFT 9
+ /* Abilty to support the base memory management extensions */
+#define QED_RDMA_DEV_CAP_BASE_MEMORY_EXT_MASK 0x1
+#define QED_RDMA_DEV_CAP_BASE_MEMORY_EXT_SHIFT 10
+#define QED_RDMA_DEV_CAP_BASE_QUEUE_EXT_MASK 0x1
+#define QED_RDMA_DEV_CAP_BASE_QUEUE_EXT_SHIFT 11
+ /* Abilty to support multipile page sizes per memory region */
+#define QED_RDMA_DEV_CAP_MULTI_PAGE_PER_MR_EXT_MASK 0x1
+#define QED_RDMA_DEV_CAP_MULTI_PAGE_PER_MR_EXT_SHIFT 12
+ /* Abilty to support block list physical buffer list */
+#define QED_RDMA_DEV_CAP_BLOCK_MODE_MASK 0x1
+#define QED_RDMA_DEV_CAP_BLOCK_MODE_SHIFT 13
+ /* Abilty to support zero based virtual addresses */
+#define QED_RDMA_DEV_CAP_ZBVA_MASK 0x1
+#define QED_RDMA_DEV_CAP_ZBVA_SHIFT 14
+ /* Abilty to support local invalidate fencing */
+#define QED_RDMA_DEV_CAP_LOCAL_INV_FENCE_MASK 0x1
+#define QED_RDMA_DEV_CAP_LOCAL_INV_FENCE_SHIFT 15
+ /* Abilty to support Loopback on QP */
+#define QED_RDMA_DEV_CAP_LB_INDICATOR_MASK 0x1
+#define QED_RDMA_DEV_CAP_LB_INDICATOR_SHIFT 16
+ u64 page_size_caps;
+ u8 dev_ack_delay;
+ u32 reserved_lkey;
+ u32 bad_pkey_counter;
+ struct qed_rdma_events events;
+};
+
+enum qed_port_state {
+ QED_RDMA_PORT_UP,
+ QED_RDMA_PORT_DOWN,
+};
+
+enum qed_roce_capability {
+ QED_ROCE_V1 = 1 << 0,
+ QED_ROCE_V2 = 1 << 1,
+};
+
+struct qed_rdma_port {
+ enum qed_port_state port_state;
+ int link_speed;
+ u64 max_msg_size;
+ u8 source_gid_table_len;
+ void *source_gid_table_ptr;
+ u8 pkey_table_len;
+ void *pkey_table_ptr;
+ u32 pkey_bad_counter;
+ enum qed_roce_capability capability;
+};
+
+struct qed_rdma_cnq_params {
+ u8 num_pbl_pages;
+ u64 pbl_ptr;
+};
+
+/* The CQ Mode affects the CQ doorbell transaction size.
+ * 64/32 bit machines should configure to 32/16 bits respectively.
+ */
+enum qed_rdma_cq_mode {
+ QED_RDMA_CQ_MODE_16_BITS,
+ QED_RDMA_CQ_MODE_32_BITS,
+};
+
+struct qed_roce_dcqcn_params {
+ u8 notification_point;
+ u8 reaction_point;
+
+ /* fields for notification point */
+ u32 cnp_send_timeout;
+
+ /* fields for reaction point */
+ u32 rl_bc_rate;
+ u16 rl_max_rate;
+ u16 rl_r_ai;
+ u16 rl_r_hai;
+ u16 dcqcn_g;
+ u32 dcqcn_k_us;
+ u32 dcqcn_timeout_us;
+};
+
+struct qed_rdma_start_in_params {
+ struct qed_rdma_events *events;
+ struct qed_rdma_cnq_params cnq_pbl_list[128];
+ u8 desired_cnq;
+ enum qed_rdma_cq_mode cq_mode;
+ struct qed_roce_dcqcn_params dcqcn_params;
+ u16 max_mtu;
+ u8 mac_addr[ETH_ALEN];
+ u8 iwarp_flags;
+};
+
+struct qed_rdma_add_user_out_params {
+ u16 dpi;
+ u64 dpi_addr;
+ u64 dpi_phys_addr;
+ u32 dpi_size;
+};
+
+enum roce_mode {
+ ROCE_V1,
+ ROCE_V2_IPV4,
+ ROCE_V2_IPV6,
+ MAX_ROCE_MODE
+};
+
+union qed_gid {
+ u8 bytes[16];
+ u16 words[8];
+ u32 dwords[4];
+ u64 qwords[2];
+ u32 ipv4_addr;
+};
+
+struct qed_rdma_register_tid_in_params {
+ u32 itid;
+ enum qed_rdma_tid_type tid_type;
+ u8 key;
+ u16 pd;
+ bool local_read;
+ bool local_write;
+ bool remote_read;
+ bool remote_write;
+ bool remote_atomic;
+ bool mw_bind;
+ u64 pbl_ptr;
+ bool pbl_two_level;
+ u8 pbl_page_size_log;
+ u8 page_size_log;
+ u32 fbo;
+ u64 length;
+ u64 vaddr;
+ bool zbva;
+ bool phy_mr;
+ bool dma_mr;
+
+ bool dif_enabled;
+ u64 dif_error_addr;
+ u64 dif_runt_addr;
+};
+
+struct qed_rdma_create_cq_in_params {
+ u32 cq_handle_lo;
+ u32 cq_handle_hi;
+ u32 cq_size;
+ u16 dpi;
+ bool pbl_two_level;
+ u64 pbl_ptr;
+ u16 pbl_num_pages;
+ u8 pbl_page_size_log;
+ u8 cnq_id;
+ u16 int_timeout;
+};
+
+struct qed_rdma_create_srq_in_params {
+ u64 pbl_base_addr;
+ u64 prod_pair_addr;
+ u16 num_pages;
+ u16 pd_id;
+ u16 page_size;
+};
+
+struct qed_rdma_destroy_cq_in_params {
+ u16 icid;
+};
+
+struct qed_rdma_destroy_cq_out_params {
+ u16 num_cq_notif;
+};
+
+struct qed_rdma_create_qp_in_params {
+ u32 qp_handle_lo;
+ u32 qp_handle_hi;
+ u32 qp_handle_async_lo;
+ u32 qp_handle_async_hi;
+ bool use_srq;
+ bool signal_all;
+ bool fmr_and_reserved_lkey;
+ u16 pd;
+ u16 dpi;
+ u16 sq_cq_id;
+ u16 sq_num_pages;
+ u64 sq_pbl_ptr;
+ u8 max_sq_sges;
+ u16 rq_cq_id;
+ u16 rq_num_pages;
+ u64 rq_pbl_ptr;
+ u16 srq_id;
+ u8 stats_queue;
+};
+
+struct qed_rdma_create_qp_out_params {
+ u32 qp_id;
+ u16 icid;
+ void *rq_pbl_virt;
+ dma_addr_t rq_pbl_phys;
+ void *sq_pbl_virt;
+ dma_addr_t sq_pbl_phys;
+};
+
+struct qed_rdma_modify_qp_in_params {
+ u32 modify_flags;
+#define QED_RDMA_MODIFY_QP_VALID_NEW_STATE_MASK 0x1
+#define QED_RDMA_MODIFY_QP_VALID_NEW_STATE_SHIFT 0
+#define QED_ROCE_MODIFY_QP_VALID_PKEY_MASK 0x1
+#define QED_ROCE_MODIFY_QP_VALID_PKEY_SHIFT 1
+#define QED_RDMA_MODIFY_QP_VALID_RDMA_OPS_EN_MASK 0x1
+#define QED_RDMA_MODIFY_QP_VALID_RDMA_OPS_EN_SHIFT 2
+#define QED_ROCE_MODIFY_QP_VALID_DEST_QP_MASK 0x1
+#define QED_ROCE_MODIFY_QP_VALID_DEST_QP_SHIFT 3
+#define QED_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR_MASK 0x1
+#define QED_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR_SHIFT 4
+#define QED_ROCE_MODIFY_QP_VALID_RQ_PSN_MASK 0x1
+#define QED_ROCE_MODIFY_QP_VALID_RQ_PSN_SHIFT 5
+#define QED_ROCE_MODIFY_QP_VALID_SQ_PSN_MASK 0x1
+#define QED_ROCE_MODIFY_QP_VALID_SQ_PSN_SHIFT 6
+#define QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_REQ_MASK 0x1
+#define QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_REQ_SHIFT 7
+#define QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_RESP_MASK 0x1
+#define QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_RESP_SHIFT 8
+#define QED_ROCE_MODIFY_QP_VALID_ACK_TIMEOUT_MASK 0x1
+#define QED_ROCE_MODIFY_QP_VALID_ACK_TIMEOUT_SHIFT 9
+#define QED_ROCE_MODIFY_QP_VALID_RETRY_CNT_MASK 0x1
+#define QED_ROCE_MODIFY_QP_VALID_RETRY_CNT_SHIFT 10
+#define QED_ROCE_MODIFY_QP_VALID_RNR_RETRY_CNT_MASK 0x1
+#define QED_ROCE_MODIFY_QP_VALID_RNR_RETRY_CNT_SHIFT 11
+#define QED_ROCE_MODIFY_QP_VALID_MIN_RNR_NAK_TIMER_MASK 0x1
+#define QED_ROCE_MODIFY_QP_VALID_MIN_RNR_NAK_TIMER_SHIFT 12
+#define QED_ROCE_MODIFY_QP_VALID_E2E_FLOW_CONTROL_EN_MASK 0x1
+#define QED_ROCE_MODIFY_QP_VALID_E2E_FLOW_CONTROL_EN_SHIFT 13
+#define QED_ROCE_MODIFY_QP_VALID_ROCE_MODE_MASK 0x1
+#define QED_ROCE_MODIFY_QP_VALID_ROCE_MODE_SHIFT 14
+
+ enum qed_roce_qp_state new_state;
+ u16 pkey;
+ bool incoming_rdma_read_en;
+ bool incoming_rdma_write_en;
+ bool incoming_atomic_en;
+ bool e2e_flow_control_en;
+ u32 dest_qp;
+ bool lb_indication;
+ u16 mtu;
+ u8 traffic_class_tos;
+ u8 hop_limit_ttl;
+ u32 flow_label;
+ union qed_gid sgid;
+ union qed_gid dgid;
+ u16 udp_src_port;
+
+ u16 vlan_id;
+
+ u32 rq_psn;
+ u32 sq_psn;
+ u8 max_rd_atomic_resp;
+ u8 max_rd_atomic_req;
+ u32 ack_timeout;
+ u8 retry_cnt;
+ u8 rnr_retry_cnt;
+ u8 min_rnr_nak_timer;
+ bool sqd_async;
+ u8 remote_mac_addr[6];
+ u8 local_mac_addr[6];
+ bool use_local_mac;
+ enum roce_mode roce_mode;
+};
+
+struct qed_rdma_query_qp_out_params {
+ enum qed_roce_qp_state state;
+ u32 rq_psn;
+ u32 sq_psn;
+ bool draining;
+ u16 mtu;
+ u32 dest_qp;
+ bool incoming_rdma_read_en;
+ bool incoming_rdma_write_en;
+ bool incoming_atomic_en;
+ bool e2e_flow_control_en;
+ union qed_gid sgid;
+ union qed_gid dgid;
+ u32 flow_label;
+ u8 hop_limit_ttl;
+ u8 traffic_class_tos;
+ u32 timeout;
+ u8 rnr_retry;
+ u8 retry_cnt;
+ u8 min_rnr_nak_timer;
+ u16 pkey_index;
+ u8 max_rd_atomic;
+ u8 max_dest_rd_atomic;
+ bool sqd_async;
+};
+
+struct qed_rdma_create_srq_out_params {
+ u16 srq_id;
+};
+
+struct qed_rdma_destroy_srq_in_params {
+ u16 srq_id;
+};
+
+struct qed_rdma_modify_srq_in_params {
+ u32 wqe_limit;
+ u16 srq_id;
+};
+
+struct qed_rdma_stats_out_params {
+ u64 sent_bytes;
+ u64 sent_pkts;
+ u64 rcv_bytes;
+ u64 rcv_pkts;
+};
+
+struct qed_rdma_counters_out_params {
+ u64 pd_count;
+ u64 max_pd;
+ u64 dpi_count;
+ u64 max_dpi;
+ u64 cq_count;
+ u64 max_cq;
+ u64 qp_count;
+ u64 max_qp;
+ u64 tid_count;
+ u64 max_tid;
+};
+
+#define QED_ROCE_TX_HEAD_FAILURE (1)
+#define QED_ROCE_TX_FRAG_FAILURE (2)
+
+struct qed_roce_ll2_header {
+ void *vaddr;
+ dma_addr_t baddr;
+ size_t len;
+};
+
+struct qed_roce_ll2_buffer {
+ dma_addr_t baddr;
+ size_t len;
+};
+
+struct qed_roce_ll2_packet {
+ struct qed_roce_ll2_header header;
+ int n_seg;
+ struct qed_roce_ll2_buffer payload[RDMA_MAX_SGE_PER_SQ_WQE];
+ int roce_mode;
+ enum qed_roce_ll2_tx_dest tx_dest;
+};
+
+struct qed_roce_ll2_tx_params {
+ int reserved;
+};
+
+struct qed_roce_ll2_rx_params {
+ u16 vlan_id;
+ u8 smac[ETH_ALEN];
+ int rc;
+};
+
+struct qed_roce_ll2_cbs {
+ void (*tx_cb)(void *pdev, struct qed_roce_ll2_packet *pkt);
+
+ void (*rx_cb)(void *pdev, struct qed_roce_ll2_packet *pkt,
+ struct qed_roce_ll2_rx_params *params);
+};
+
+struct qed_roce_ll2_params {
+ u16 max_rx_buffers;
+ u16 max_tx_buffers;
+ u16 mtu;
+ u8 mac_address[ETH_ALEN];
+ struct qed_roce_ll2_cbs cbs;
+ void *cb_cookie;
+};
+
+struct qed_roce_ll2_info {
+ u8 handle;
+ struct qed_roce_ll2_cbs cbs;
+ u8 mac_address[ETH_ALEN];
+ void *cb_cookie;
+
+ /* Lock to protect ll2 */
+ struct mutex lock;
+};
+
+enum qed_rdma_type {
+ QED_RDMA_TYPE_ROCE,
+};
+
+struct qed_dev_rdma_info {
+ struct qed_dev_info common;
+ enum qed_rdma_type rdma_type;
+};
+
+struct qed_rdma_ops {
+ const struct qed_common_ops *common;
+
+ int (*fill_dev_info)(struct qed_dev *cdev,
+ struct qed_dev_rdma_info *info);
+ void *(*rdma_get_rdma_ctx)(struct qed_dev *cdev);
+
+ int (*rdma_init)(struct qed_dev *dev,
+ struct qed_rdma_start_in_params *iparams);
+
+ int (*rdma_add_user)(void *rdma_cxt,
+ struct qed_rdma_add_user_out_params *oparams);
+
+ void (*rdma_remove_user)(void *rdma_cxt, u16 dpi);
+ int (*rdma_stop)(void *rdma_cxt);
+ struct qed_rdma_device* (*rdma_query_device)(void *rdma_cxt);
+ struct qed_rdma_port* (*rdma_query_port)(void *rdma_cxt);
+ int (*rdma_get_start_sb)(struct qed_dev *cdev);
+ int (*rdma_get_min_cnq_msix)(struct qed_dev *cdev);
+ void (*rdma_cnq_prod_update)(void *rdma_cxt, u8 cnq_index, u16 prod);
+ int (*rdma_get_rdma_int)(struct qed_dev *cdev,
+ struct qed_int_info *info);
+ int (*rdma_set_rdma_int)(struct qed_dev *cdev, u16 cnt);
+ int (*rdma_alloc_pd)(void *rdma_cxt, u16 *pd);
+ void (*rdma_dealloc_pd)(void *rdma_cxt, u16 pd);
+ int (*rdma_create_cq)(void *rdma_cxt,
+ struct qed_rdma_create_cq_in_params *params,
+ u16 *icid);
+ int (*rdma_destroy_cq)(void *rdma_cxt,
+ struct qed_rdma_destroy_cq_in_params *iparams,
+ struct qed_rdma_destroy_cq_out_params *oparams);
+ struct qed_rdma_qp *
+ (*rdma_create_qp)(void *rdma_cxt,
+ struct qed_rdma_create_qp_in_params *iparams,
+ struct qed_rdma_create_qp_out_params *oparams);
+
+ int (*rdma_modify_qp)(void *roce_cxt, struct qed_rdma_qp *qp,
+ struct qed_rdma_modify_qp_in_params *iparams);
+
+ int (*rdma_query_qp)(void *rdma_cxt, struct qed_rdma_qp *qp,
+ struct qed_rdma_query_qp_out_params *oparams);
+ int (*rdma_destroy_qp)(void *rdma_cxt, struct qed_rdma_qp *qp);
+ int
+ (*rdma_register_tid)(void *rdma_cxt,
+ struct qed_rdma_register_tid_in_params *iparams);
+ int (*rdma_deregister_tid)(void *rdma_cxt, u32 itid);
+ int (*rdma_alloc_tid)(void *rdma_cxt, u32 *itid);
+ void (*rdma_free_tid)(void *rdma_cxt, u32 itid);
+ int (*roce_ll2_start)(struct qed_dev *cdev,
+ struct qed_roce_ll2_params *params);
+ int (*roce_ll2_stop)(struct qed_dev *cdev);
+ int (*roce_ll2_tx)(struct qed_dev *cdev,
+ struct qed_roce_ll2_packet *packet,
+ struct qed_roce_ll2_tx_params *params);
+ int (*roce_ll2_post_rx_buffer)(struct qed_dev *cdev,
+ struct qed_roce_ll2_buffer *buf,
+ u64 cookie, u8 notify_fw);
+ int (*roce_ll2_set_mac_filter)(struct qed_dev *cdev,
+ u8 *old_mac_address,
+ u8 *new_mac_address);
+ int (*roce_ll2_stats)(struct qed_dev *cdev,
+ struct qed_ll2_stats *stats);
+};
+
+const struct qed_rdma_ops *qed_get_rdma_ops(void);
+
+#endif
diff --git a/include/linux/qed/qede_roce.h b/include/linux/qed/qede_roce.h
new file mode 100644
index 000000000000..99fbe6d55acb
--- /dev/null
+++ b/include/linux/qed/qede_roce.h
@@ -0,0 +1,88 @@
+/* QLogic qedr NIC Driver
+ * Copyright (c) 2015-2016 QLogic Corporation
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and /or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef QEDE_ROCE_H
+#define QEDE_ROCE_H
+
+struct qedr_dev;
+struct qed_dev;
+struct qede_dev;
+
+enum qede_roce_event {
+ QEDE_UP,
+ QEDE_DOWN,
+ QEDE_CHANGE_ADDR,
+ QEDE_CLOSE
+};
+
+struct qede_roce_event_work {
+ struct list_head list;
+ struct work_struct work;
+ void *ptr;
+ enum qede_roce_event event;
+};
+
+struct qedr_driver {
+ unsigned char name[32];
+
+ struct qedr_dev* (*add)(struct qed_dev *, struct pci_dev *,
+ struct net_device *);
+
+ void (*remove)(struct qedr_dev *);
+ void (*notify)(struct qedr_dev *, enum qede_roce_event);
+};
+
+/* APIs for RoCE driver to register callback handlers,
+ * which will be invoked when device is added, removed, ifup, ifdown
+ */
+int qede_roce_register_driver(struct qedr_driver *drv);
+void qede_roce_unregister_driver(struct qedr_driver *drv);
+
+bool qede_roce_supported(struct qede_dev *dev);
+
+#if IS_ENABLED(CONFIG_INFINIBAND_QEDR)
+int qede_roce_dev_add(struct qede_dev *dev);
+void qede_roce_dev_event_open(struct qede_dev *dev);
+void qede_roce_dev_event_close(struct qede_dev *dev);
+void qede_roce_dev_remove(struct qede_dev *dev);
+void qede_roce_event_changeaddr(struct qede_dev *qedr);
+#else
+static inline int qede_roce_dev_add(struct qede_dev *dev)
+{
+ return 0;
+}
+
+static inline void qede_roce_dev_event_open(struct qede_dev *dev) {}
+static inline void qede_roce_dev_event_close(struct qede_dev *dev) {}
+static inline void qede_roce_dev_remove(struct qede_dev *dev) {}
+static inline void qede_roce_event_changeaddr(struct qede_dev *qedr) {}
+#endif
+#endif
diff --git a/include/linux/qed/rdma_common.h b/include/linux/qed/rdma_common.h
index 187991c1f439..7663725faa94 100644
--- a/include/linux/qed/rdma_common.h
+++ b/include/linux/qed/rdma_common.h
@@ -28,6 +28,7 @@
#define RDMA_MAX_PDS (64 * 1024)
#define RDMA_NUM_STATISTIC_COUNTERS MAX_NUM_VPORTS
+#define RDMA_NUM_STATISTIC_COUNTERS_BB MAX_NUM_VPORTS_BB
#define RDMA_TASK_TYPE (PROTOCOLID_ROCE)
diff --git a/include/linux/qed/tcp_common.h b/include/linux/qed/tcp_common.h
index accba0e6b704..dc3889d1bbe6 100644
--- a/include/linux/qed/tcp_common.h
+++ b/include/linux/qed/tcp_common.h
@@ -11,6 +11,14 @@
#define TCP_INVALID_TIMEOUT_VAL -1
+struct ooo_opaque {
+ __le32 cid;
+ u8 drop_isle;
+ u8 drop_size;
+ u8 ooo_opcode;
+ u8 ooo_isle;
+};
+
enum tcp_connect_mode {
TCP_CONNECT_ACTIVE,
TCP_CONNECT_PASSIVE,
@@ -18,14 +26,10 @@ enum tcp_connect_mode {
};
struct tcp_init_params {
- __le32 max_cwnd;
- __le16 dup_ack_threshold;
+ __le32 two_msl_timer;
__le16 tx_sws_timer;
- __le16 min_rto;
- __le16 min_rto_rt;
- __le16 max_rto;
u8 maxfinrt;
- u8 reserved[1];
+ u8 reserved[9];
};
enum tcp_ip_version {
diff --git a/include/linux/radix-tree.h b/include/linux/radix-tree.h
index 4c45105dece3..af3581b8a451 100644
--- a/include/linux/radix-tree.h
+++ b/include/linux/radix-tree.h
@@ -280,9 +280,9 @@ bool __radix_tree_delete_node(struct radix_tree_root *root,
struct radix_tree_node *node);
void *radix_tree_delete_item(struct radix_tree_root *, unsigned long, void *);
void *radix_tree_delete(struct radix_tree_root *, unsigned long);
-struct radix_tree_node *radix_tree_replace_clear_tags(
- struct radix_tree_root *root,
- unsigned long index, void *entry);
+void radix_tree_clear_tags(struct radix_tree_root *root,
+ struct radix_tree_node *node,
+ void **slot);
unsigned int radix_tree_gang_lookup(struct radix_tree_root *root,
void **results, unsigned long first_index,
unsigned int max_items);
@@ -461,6 +461,14 @@ static inline struct radix_tree_node *entry_to_node(void *ptr)
*
* This function updates @iter->index in the case of a successful lookup.
* For tagged lookup it also eats @iter->tags.
+ *
+ * There are several cases where 'slot' can be passed in as NULL to this
+ * function. These cases result from the use of radix_tree_iter_next() or
+ * radix_tree_iter_retry(). In these cases we don't end up dereferencing
+ * 'slot' because either:
+ * a) we are doing tagged iteration and iter->tags has been set to 0, or
+ * b) we are doing non-tagged iteration, and iter->index and iter->next_index
+ * have been set up so that radix_tree_chunk_size() returns 1 or 0.
*/
static __always_inline void **
radix_tree_next_slot(void **slot, struct radix_tree_iter *iter, unsigned flags)
diff --git a/include/linux/raid/pq.h b/include/linux/raid/pq.h
index a0118d5929a9..4d57bbaaa1bf 100644
--- a/include/linux/raid/pq.h
+++ b/include/linux/raid/pq.h
@@ -102,7 +102,11 @@ extern const struct raid6_calls raid6_altivec8;
extern const struct raid6_calls raid6_avx2x1;
extern const struct raid6_calls raid6_avx2x2;
extern const struct raid6_calls raid6_avx2x4;
+extern const struct raid6_calls raid6_avx512x1;
+extern const struct raid6_calls raid6_avx512x2;
+extern const struct raid6_calls raid6_avx512x4;
extern const struct raid6_calls raid6_tilegx8;
+extern const struct raid6_calls raid6_s390vx8;
struct raid6_recov_calls {
void (*data2)(int, size_t, int, int, void **);
@@ -115,6 +119,8 @@ struct raid6_recov_calls {
extern const struct raid6_recov_calls raid6_recov_intx1;
extern const struct raid6_recov_calls raid6_recov_ssse3;
extern const struct raid6_recov_calls raid6_recov_avx2;
+extern const struct raid6_recov_calls raid6_recov_avx512;
+extern const struct raid6_recov_calls raid6_recov_s390xc;
extern const struct raid6_calls raid6_neonx1;
extern const struct raid6_calls raid6_neonx2;
diff --git a/include/linux/random.h b/include/linux/random.h
index 3d6e9815cd85..7bd2403e4fef 100644
--- a/include/linux/random.h
+++ b/include/linux/random.h
@@ -18,9 +18,20 @@ struct random_ready_callback {
};
extern void add_device_randomness(const void *, unsigned int);
+
+#if defined(CONFIG_GCC_PLUGIN_LATENT_ENTROPY) && !defined(__CHECKER__)
+static inline void add_latent_entropy(void)
+{
+ add_device_randomness((const void *)&latent_entropy,
+ sizeof(latent_entropy));
+}
+#else
+static inline void add_latent_entropy(void) {}
+#endif
+
extern void add_input_randomness(unsigned int type, unsigned int code,
- unsigned int value);
-extern void add_interrupt_randomness(int irq, int irq_flags);
+ unsigned int value) __latent_entropy;
+extern void add_interrupt_randomness(int irq, int irq_flags) __latent_entropy;
extern void get_random_bytes(void *buf, int nbytes);
extern int add_random_ready_callback(struct random_ready_callback *rdy);
@@ -34,7 +45,7 @@ extern const struct file_operations random_fops, urandom_fops;
unsigned int get_random_int(void);
unsigned long get_random_long(void);
-unsigned long randomize_range(unsigned long start, unsigned long end, unsigned long len);
+unsigned long randomize_page(unsigned long start, unsigned long range);
u32 prandom_u32(void);
void prandom_bytes(void *buf, size_t nbytes);
diff --git a/include/linux/rcu_sync.h b/include/linux/rcu_sync.h
index a63a33e6196e..ece7ed9a4a70 100644
--- a/include/linux/rcu_sync.h
+++ b/include/linux/rcu_sync.h
@@ -59,6 +59,7 @@ static inline bool rcu_sync_is_idle(struct rcu_sync *rsp)
}
extern void rcu_sync_init(struct rcu_sync *, enum rcu_sync_type);
+extern void rcu_sync_enter_start(struct rcu_sync *);
extern void rcu_sync_enter(struct rcu_sync *);
extern void rcu_sync_exit(struct rcu_sync *);
extern void rcu_sync_dtor(struct rcu_sync *);
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index 1aa62e1a761b..321f9ed552a9 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -334,6 +334,7 @@ void rcu_sched_qs(void);
void rcu_bh_qs(void);
void rcu_check_callbacks(int user);
void rcu_report_dead(unsigned int cpu);
+void rcu_cpu_starting(unsigned int cpu);
#ifndef CONFIG_TINY_RCU
void rcu_end_inkernel_boot(void);
diff --git a/include/linux/regmap.h b/include/linux/regmap.h
index 2c12cc5af744..9adc7b21903d 100644
--- a/include/linux/regmap.h
+++ b/include/linux/regmap.h
@@ -241,9 +241,9 @@ typedef void (*regmap_unlock)(void *);
* register cache support).
* @num_reg_defaults: Number of elements in reg_defaults.
*
- * @read_flag_mask: Mask to be set in the top byte of the register when doing
+ * @read_flag_mask: Mask to be set in the top bytes of the register when doing
* a read.
- * @write_flag_mask: Mask to be set in the top byte of the register when doing
+ * @write_flag_mask: Mask to be set in the top bytes of the register when doing
* a write. If both read_flag_mask and write_flag_mask are
* empty the regmap_bus default masks are used.
* @use_single_rw: If set, converts the bulk read and write operations into
@@ -299,8 +299,8 @@ struct regmap_config {
const void *reg_defaults_raw;
unsigned int num_reg_defaults_raw;
- u8 read_flag_mask;
- u8 write_flag_mask;
+ unsigned long read_flag_mask;
+ unsigned long write_flag_mask;
bool use_single_rw;
bool can_multi_write;
diff --git a/include/linux/regulator/consumer.h b/include/linux/regulator/consumer.h
index cae500b2c1d7..692108222271 100644
--- a/include/linux/regulator/consumer.h
+++ b/include/linux/regulator/consumer.h
@@ -140,8 +140,6 @@ struct regulator;
*
* @supply: The name of the supply. Initialised by the user before
* using the bulk regulator APIs.
- * @optional: The supply should be considered optional. Initialised by the user
- * before using the bulk regulator APIs.
* @consumer: The regulator consumer for the supply. This will be managed
* by the bulk API.
*
@@ -151,7 +149,6 @@ struct regulator;
*/
struct regulator_bulk_data {
const char *supply;
- bool optional;
struct regulator *consumer;
/* private: Internal use */
diff --git a/include/linux/regulator/driver.h b/include/linux/regulator/driver.h
index fcfa40a6692c..37b532410528 100644
--- a/include/linux/regulator/driver.h
+++ b/include/linux/regulator/driver.h
@@ -113,10 +113,14 @@ struct regulator_linear_range {
* stabilise after being enabled, in microseconds.
* @set_ramp_delay: Set the ramp delay for the regulator. The driver should
* select ramp delay equal to or less than(closest) ramp_delay.
+ * @set_voltage_time: Time taken for the regulator voltage output voltage
+ * to stabilise after being set to a new value, in microseconds.
+ * The function receives the from and to voltage as input, it
+ * should return the worst case.
* @set_voltage_time_sel: Time taken for the regulator voltage output voltage
* to stabilise after being set to a new value, in microseconds.
- * The function provides the from and to voltage selector, the
- * function should return the worst case.
+ * The function receives the from and to voltage selector as
+ * input, it should return the worst case.
* @set_soft_start: Enable soft start for the regulator.
*
* @set_suspend_voltage: Set the voltage for the regulator when the system
@@ -168,6 +172,8 @@ struct regulator_ops {
/* Time taken to enable or set voltage on the regulator */
int (*enable_time) (struct regulator_dev *);
int (*set_ramp_delay) (struct regulator_dev *, int ramp_delay);
+ int (*set_voltage_time) (struct regulator_dev *, int old_uV,
+ int new_uV);
int (*set_voltage_time_sel) (struct regulator_dev *,
unsigned int old_selector,
unsigned int new_selector);
diff --git a/include/linux/relay.h b/include/linux/relay.h
index d7c8359693c6..68c1448e56bb 100644
--- a/include/linux/relay.h
+++ b/include/linux/relay.h
@@ -15,10 +15,12 @@
#include <linux/timer.h>
#include <linux/wait.h>
#include <linux/list.h>
+#include <linux/irq_work.h>
#include <linux/bug.h>
#include <linux/fs.h>
#include <linux/poll.h>
#include <linux/kref.h>
+#include <linux/percpu.h>
/*
* Tracks changes to rchan/rchan_buf structs
@@ -37,7 +39,7 @@ struct rchan_buf
size_t subbufs_consumed; /* count of sub-buffers consumed */
struct rchan *chan; /* associated channel */
wait_queue_head_t read_wait; /* reader wait queue */
- struct timer_list timer; /* reader wake-up timer */
+ struct irq_work wakeup_work; /* reader wakeup */
struct dentry *dentry; /* channel file dentry */
struct kref kref; /* channel buffer refcount */
struct page **page_array; /* array of current buffer pages */
@@ -63,7 +65,7 @@ struct rchan
struct kref kref; /* channel refcount */
void *private_data; /* for user-defined data */
size_t last_toobig; /* tried to log event > subbuf size */
- struct rchan_buf *buf[NR_CPUS]; /* per-cpu channel buffers */
+ struct rchan_buf ** __percpu buf; /* per-cpu channel buffers */
int is_global; /* One global buffer ? */
struct list_head list; /* for channel list */
struct dentry *parent; /* parent dentry passed to open */
@@ -204,7 +206,7 @@ static inline void relay_write(struct rchan *chan,
struct rchan_buf *buf;
local_irq_save(flags);
- buf = chan->buf[smp_processor_id()];
+ buf = *this_cpu_ptr(chan->buf);
if (unlikely(buf->offset + length > chan->subbuf_size))
length = relay_switch_subbuf(buf, length);
memcpy(buf->data + buf->offset, data, length);
@@ -230,12 +232,12 @@ static inline void __relay_write(struct rchan *chan,
{
struct rchan_buf *buf;
- buf = chan->buf[get_cpu()];
+ buf = *get_cpu_ptr(chan->buf);
if (unlikely(buf->offset + length > buf->chan->subbuf_size))
length = relay_switch_subbuf(buf, length);
memcpy(buf->data + buf->offset, data, length);
buf->offset += length;
- put_cpu();
+ put_cpu_ptr(chan->buf);
}
/**
@@ -251,17 +253,19 @@ static inline void __relay_write(struct rchan *chan,
*/
static inline void *relay_reserve(struct rchan *chan, size_t length)
{
- void *reserved;
- struct rchan_buf *buf = chan->buf[smp_processor_id()];
+ void *reserved = NULL;
+ struct rchan_buf *buf = *get_cpu_ptr(chan->buf);
if (unlikely(buf->offset + length > buf->chan->subbuf_size)) {
length = relay_switch_subbuf(buf, length);
if (!length)
- return NULL;
+ goto end;
}
reserved = buf->data + buf->offset;
buf->offset += length;
+end:
+ put_cpu_ptr(chan->buf);
return reserved;
}
@@ -285,5 +289,11 @@ static inline void subbuf_start_reserve(struct rchan_buf *buf,
*/
extern const struct file_operations relay_file_operations;
+#ifdef CONFIG_RELAY
+int relay_prepare_cpu(unsigned int cpu);
+#else
+#define relay_prepare_cpu NULL
+#endif
+
#endif /* _LINUX_RELAY_H */
diff --git a/include/linux/remoteproc.h b/include/linux/remoteproc.h
index 1c457a8dd5a6..930023b7c825 100644
--- a/include/linux/remoteproc.h
+++ b/include/linux/remoteproc.h
@@ -118,7 +118,7 @@ enum fw_resource_type {
RSC_LAST = 4,
};
-#define FW_RSC_ADDR_ANY (0xFFFFFFFFFFFFFFFF)
+#define FW_RSC_ADDR_ANY (-1)
/**
* struct fw_rsc_carveout - physically contiguous memory request
@@ -241,7 +241,7 @@ struct fw_rsc_trace {
* @notifyid is a unique rproc-wide notify index for this vring. This notify
* index is used when kicking a remote processor, to let it know that this
* vring is triggered.
- * @reserved: reserved (must be zero)
+ * @pa: physical address
*
* This descriptor is not a resource entry by itself; it is part of the
* vdev resource type (see below).
@@ -255,7 +255,7 @@ struct fw_rsc_vdev_vring {
u32 align;
u32 num;
u32 notifyid;
- u32 reserved;
+ u32 pa;
} __packed;
/**
@@ -409,7 +409,6 @@ enum rproc_crash_type {
* @max_notifyid: largest allocated notify id.
* @table_ptr: pointer to the resource table in effect
* @cached_table: copy of the resource table
- * @table_csum: checksum of the resource table
* @has_iommu: flag to indicate if remote processor is behind an MMU
*/
struct rproc {
@@ -435,14 +434,14 @@ struct rproc {
struct idr notifyids;
int index;
struct work_struct crash_handler;
- unsigned crash_cnt;
+ unsigned int crash_cnt;
struct completion crash_comp;
bool recovery_disabled;
int max_notifyid;
struct resource_table *table_ptr;
struct resource_table *cached_table;
- u32 table_csum;
bool has_iommu;
+ bool auto_boot;
};
/* we currently support only two vrings per rvdev */
@@ -489,11 +488,12 @@ struct rproc_vdev {
struct rproc *rproc_get_by_phandle(phandle phandle);
struct rproc *rproc_alloc(struct device *dev, const char *name,
- const struct rproc_ops *ops,
- const char *firmware, int len);
+ const struct rproc_ops *ops,
+ const char *firmware, int len);
void rproc_put(struct rproc *rproc);
int rproc_add(struct rproc *rproc);
int rproc_del(struct rproc *rproc);
+void rproc_free(struct rproc *rproc);
int rproc_boot(struct rproc *rproc);
void rproc_shutdown(struct rproc *rproc);
diff --git a/include/linux/rhashtable.h b/include/linux/rhashtable.h
index 3eef0802a0cd..5c132d3188be 100644
--- a/include/linux/rhashtable.h
+++ b/include/linux/rhashtable.h
@@ -1,7 +1,7 @@
/*
* Resizable, Scalable, Concurrent Hash Table
*
- * Copyright (c) 2015 Herbert Xu <herbert@gondor.apana.org.au>
+ * Copyright (c) 2015-2016 Herbert Xu <herbert@gondor.apana.org.au>
* Copyright (c) 2014-2015 Thomas Graf <tgraf@suug.ch>
* Copyright (c) 2008-2014 Patrick McHardy <kaber@trash.net>
*
@@ -53,6 +53,11 @@ struct rhash_head {
struct rhash_head __rcu *next;
};
+struct rhlist_head {
+ struct rhash_head rhead;
+ struct rhlist_head __rcu *next;
+};
+
/**
* struct bucket_table - Table of hash buckets
* @size: Number of hash buckets
@@ -137,6 +142,7 @@ struct rhashtable_params {
* @key_len: Key length for hashfn
* @elasticity: Maximum chain length before rehash
* @p: Configuration parameters
+ * @rhlist: True if this is an rhltable
* @run_work: Deferred worker to expand/shrink asynchronously
* @mutex: Mutex to protect current/future table swapping
* @lock: Spin lock to protect walker list
@@ -147,12 +153,21 @@ struct rhashtable {
unsigned int key_len;
unsigned int elasticity;
struct rhashtable_params p;
+ bool rhlist;
struct work_struct run_work;
struct mutex mutex;
spinlock_t lock;
};
/**
+ * struct rhltable - Hash table with duplicate objects in a list
+ * @ht: Underlying rhtable
+ */
+struct rhltable {
+ struct rhashtable ht;
+};
+
+/**
* struct rhashtable_walker - Hash table walker
* @list: List entry on list of walkers
* @tbl: The table that we were walking over
@@ -163,9 +178,10 @@ struct rhashtable_walker {
};
/**
- * struct rhashtable_iter - Hash table iterator, fits into netlink cb
+ * struct rhashtable_iter - Hash table iterator
* @ht: Table to iterate through
* @p: Current pointer
+ * @list: Current hash list pointer
* @walker: Associated rhashtable walker
* @slot: Current slot
* @skip: Number of entries to skip in slot
@@ -173,7 +189,8 @@ struct rhashtable_walker {
struct rhashtable_iter {
struct rhashtable *ht;
struct rhash_head *p;
- struct rhashtable_walker *walker;
+ struct rhlist_head *list;
+ struct rhashtable_walker walker;
unsigned int slot;
unsigned int skip;
};
@@ -339,15 +356,14 @@ static inline int lockdep_rht_bucket_is_held(const struct bucket_table *tbl,
int rhashtable_init(struct rhashtable *ht,
const struct rhashtable_params *params);
+int rhltable_init(struct rhltable *hlt,
+ const struct rhashtable_params *params);
-struct bucket_table *rhashtable_insert_slow(struct rhashtable *ht,
- const void *key,
- struct rhash_head *obj,
- struct bucket_table *old_tbl);
-int rhashtable_insert_rehash(struct rhashtable *ht, struct bucket_table *tbl);
+void *rhashtable_insert_slow(struct rhashtable *ht, const void *key,
+ struct rhash_head *obj);
-int rhashtable_walk_init(struct rhashtable *ht, struct rhashtable_iter *iter,
- gfp_t gfp);
+void rhashtable_walk_enter(struct rhashtable *ht,
+ struct rhashtable_iter *iter);
void rhashtable_walk_exit(struct rhashtable_iter *iter);
int rhashtable_walk_start(struct rhashtable_iter *iter) __acquires(RCU);
void *rhashtable_walk_next(struct rhashtable_iter *iter);
@@ -506,6 +522,31 @@ void rhashtable_destroy(struct rhashtable *ht);
rht_for_each_entry_rcu_continue(tpos, pos, (tbl)->buckets[hash],\
tbl, hash, member)
+/**
+ * rhl_for_each_rcu - iterate over rcu hash table list
+ * @pos: the &struct rlist_head to use as a loop cursor.
+ * @list: the head of the list
+ *
+ * This hash chain list-traversal primitive should be used on the
+ * list returned by rhltable_lookup.
+ */
+#define rhl_for_each_rcu(pos, list) \
+ for (pos = list; pos; pos = rcu_dereference_raw(pos->next))
+
+/**
+ * rhl_for_each_entry_rcu - iterate over rcu hash table list of given type
+ * @tpos: the type * to use as a loop cursor.
+ * @pos: the &struct rlist_head to use as a loop cursor.
+ * @list: the head of the list
+ * @member: name of the &struct rlist_head within the hashable struct.
+ *
+ * This hash chain list-traversal primitive should be used on the
+ * list returned by rhltable_lookup.
+ */
+#define rhl_for_each_entry_rcu(tpos, pos, list, member) \
+ for (pos = list; pos && rht_entry(tpos, pos, member); \
+ pos = rcu_dereference_raw(pos->next))
+
static inline int rhashtable_compare(struct rhashtable_compare_arg *arg,
const void *obj)
{
@@ -515,18 +556,8 @@ static inline int rhashtable_compare(struct rhashtable_compare_arg *arg,
return memcmp(ptr + ht->p.key_offset, arg->key, ht->p.key_len);
}
-/**
- * rhashtable_lookup_fast - search hash table, inlined version
- * @ht: hash table
- * @key: the pointer to the key
- * @params: hash table parameters
- *
- * Computes the hash value for the key and traverses the bucket chain looking
- * for a entry with an identical key. The first matching entry is returned.
- *
- * Returns the first entry on which the compare function returned true.
- */
-static inline void *rhashtable_lookup_fast(
+/* Internal function, do not use. */
+static inline struct rhash_head *__rhashtable_lookup(
struct rhashtable *ht, const void *key,
const struct rhashtable_params params)
{
@@ -538,8 +569,6 @@ static inline void *rhashtable_lookup_fast(
struct rhash_head *he;
unsigned int hash;
- rcu_read_lock();
-
tbl = rht_dereference_rcu(ht->tbl, ht);
restart:
hash = rht_key_hashfn(ht, tbl, key, params);
@@ -548,8 +577,7 @@ restart:
params.obj_cmpfn(&arg, rht_obj(ht, he)) :
rhashtable_compare(&arg, rht_obj(ht, he)))
continue;
- rcu_read_unlock();
- return rht_obj(ht, he);
+ return he;
}
/* Ensure we see any new tables. */
@@ -558,89 +586,165 @@ restart:
tbl = rht_dereference_rcu(tbl->future_tbl, ht);
if (unlikely(tbl))
goto restart;
- rcu_read_unlock();
return NULL;
}
-/* Internal function, please use rhashtable_insert_fast() instead */
-static inline int __rhashtable_insert_fast(
- struct rhashtable *ht, const void *key, struct rhash_head *obj,
+/**
+ * rhashtable_lookup - search hash table
+ * @ht: hash table
+ * @key: the pointer to the key
+ * @params: hash table parameters
+ *
+ * Computes the hash value for the key and traverses the bucket chain looking
+ * for a entry with an identical key. The first matching entry is returned.
+ *
+ * This must only be called under the RCU read lock.
+ *
+ * Returns the first entry on which the compare function returned true.
+ */
+static inline void *rhashtable_lookup(
+ struct rhashtable *ht, const void *key,
const struct rhashtable_params params)
{
+ struct rhash_head *he = __rhashtable_lookup(ht, key, params);
+
+ return he ? rht_obj(ht, he) : NULL;
+}
+
+/**
+ * rhashtable_lookup_fast - search hash table, without RCU read lock
+ * @ht: hash table
+ * @key: the pointer to the key
+ * @params: hash table parameters
+ *
+ * Computes the hash value for the key and traverses the bucket chain looking
+ * for a entry with an identical key. The first matching entry is returned.
+ *
+ * Only use this function when you have other mechanisms guaranteeing
+ * that the object won't go away after the RCU read lock is released.
+ *
+ * Returns the first entry on which the compare function returned true.
+ */
+static inline void *rhashtable_lookup_fast(
+ struct rhashtable *ht, const void *key,
+ const struct rhashtable_params params)
+{
+ void *obj;
+
+ rcu_read_lock();
+ obj = rhashtable_lookup(ht, key, params);
+ rcu_read_unlock();
+
+ return obj;
+}
+
+/**
+ * rhltable_lookup - search hash list table
+ * @hlt: hash table
+ * @key: the pointer to the key
+ * @params: hash table parameters
+ *
+ * Computes the hash value for the key and traverses the bucket chain looking
+ * for a entry with an identical key. All matching entries are returned
+ * in a list.
+ *
+ * This must only be called under the RCU read lock.
+ *
+ * Returns the list of entries that match the given key.
+ */
+static inline struct rhlist_head *rhltable_lookup(
+ struct rhltable *hlt, const void *key,
+ const struct rhashtable_params params)
+{
+ struct rhash_head *he = __rhashtable_lookup(&hlt->ht, key, params);
+
+ return he ? container_of(he, struct rhlist_head, rhead) : NULL;
+}
+
+/* Internal function, please use rhashtable_insert_fast() instead. This
+ * function returns the existing element already in hashes in there is a clash,
+ * otherwise it returns an error via ERR_PTR().
+ */
+static inline void *__rhashtable_insert_fast(
+ struct rhashtable *ht, const void *key, struct rhash_head *obj,
+ const struct rhashtable_params params, bool rhlist)
+{
struct rhashtable_compare_arg arg = {
.ht = ht,
.key = key,
};
- struct bucket_table *tbl, *new_tbl;
+ struct rhash_head __rcu **pprev;
+ struct bucket_table *tbl;
struct rhash_head *head;
spinlock_t *lock;
- unsigned int elasticity;
unsigned int hash;
- int err;
+ int elasticity;
+ void *data;
-restart:
rcu_read_lock();
tbl = rht_dereference_rcu(ht->tbl, ht);
+ hash = rht_head_hashfn(ht, tbl, obj, params);
+ lock = rht_bucket_lock(tbl, hash);
+ spin_lock_bh(lock);
- /* All insertions must grab the oldest table containing
- * the hashed bucket that is yet to be rehashed.
- */
- for (;;) {
- hash = rht_head_hashfn(ht, tbl, obj, params);
- lock = rht_bucket_lock(tbl, hash);
- spin_lock_bh(lock);
-
- if (tbl->rehash <= hash)
- break;
-
- spin_unlock_bh(lock);
- tbl = rht_dereference_rcu(tbl->future_tbl, ht);
- }
-
- new_tbl = rht_dereference_rcu(tbl->future_tbl, ht);
- if (unlikely(new_tbl)) {
- tbl = rhashtable_insert_slow(ht, key, obj, new_tbl);
- if (!IS_ERR_OR_NULL(tbl))
- goto slow_path;
-
- err = PTR_ERR(tbl);
- goto out;
- }
-
- err = -E2BIG;
- if (unlikely(rht_grow_above_max(ht, tbl)))
- goto out;
-
- if (unlikely(rht_grow_above_100(ht, tbl))) {
+ if (unlikely(rht_dereference_bucket(tbl->future_tbl, tbl, hash))) {
slow_path:
spin_unlock_bh(lock);
- err = rhashtable_insert_rehash(ht, tbl);
rcu_read_unlock();
- if (err)
- return err;
-
- goto restart;
+ return rhashtable_insert_slow(ht, key, obj);
}
- err = -EEXIST;
elasticity = ht->elasticity;
+ pprev = &tbl->buckets[hash];
rht_for_each(head, tbl, hash) {
- if (key &&
- unlikely(!(params.obj_cmpfn ?
- params.obj_cmpfn(&arg, rht_obj(ht, head)) :
- rhashtable_compare(&arg, rht_obj(ht, head)))))
+ struct rhlist_head *plist;
+ struct rhlist_head *list;
+
+ elasticity--;
+ if (!key ||
+ (params.obj_cmpfn ?
+ params.obj_cmpfn(&arg, rht_obj(ht, head)) :
+ rhashtable_compare(&arg, rht_obj(ht, head))))
+ continue;
+
+ data = rht_obj(ht, head);
+
+ if (!rhlist)
goto out;
- if (!--elasticity)
- goto slow_path;
+
+
+ list = container_of(obj, struct rhlist_head, rhead);
+ plist = container_of(head, struct rhlist_head, rhead);
+
+ RCU_INIT_POINTER(list->next, plist);
+ head = rht_dereference_bucket(head->next, tbl, hash);
+ RCU_INIT_POINTER(list->rhead.next, head);
+ rcu_assign_pointer(*pprev, obj);
+
+ goto good;
}
- err = 0;
+ if (elasticity <= 0)
+ goto slow_path;
+
+ data = ERR_PTR(-E2BIG);
+ if (unlikely(rht_grow_above_max(ht, tbl)))
+ goto out;
+
+ if (unlikely(rht_grow_above_100(ht, tbl)))
+ goto slow_path;
head = rht_dereference_bucket(tbl->buckets[hash], tbl, hash);
RCU_INIT_POINTER(obj->next, head);
+ if (rhlist) {
+ struct rhlist_head *list;
+
+ list = container_of(obj, struct rhlist_head, rhead);
+ RCU_INIT_POINTER(list->next, NULL);
+ }
rcu_assign_pointer(tbl->buckets[hash], obj);
@@ -648,11 +752,14 @@ slow_path:
if (rht_grow_above_75(ht, tbl))
schedule_work(&ht->run_work);
+good:
+ data = NULL;
+
out:
spin_unlock_bh(lock);
rcu_read_unlock();
- return err;
+ return data;
}
/**
@@ -675,7 +782,65 @@ static inline int rhashtable_insert_fast(
struct rhashtable *ht, struct rhash_head *obj,
const struct rhashtable_params params)
{
- return __rhashtable_insert_fast(ht, NULL, obj, params);
+ void *ret;
+
+ ret = __rhashtable_insert_fast(ht, NULL, obj, params, false);
+ if (IS_ERR(ret))
+ return PTR_ERR(ret);
+
+ return ret == NULL ? 0 : -EEXIST;
+}
+
+/**
+ * rhltable_insert_key - insert object into hash list table
+ * @hlt: hash list table
+ * @key: the pointer to the key
+ * @list: pointer to hash list head inside object
+ * @params: hash table parameters
+ *
+ * Will take a per bucket spinlock to protect against mutual mutations
+ * on the same bucket. Multiple insertions may occur in parallel unless
+ * they map to the same bucket lock.
+ *
+ * It is safe to call this function from atomic context.
+ *
+ * Will trigger an automatic deferred table resizing if the size grows
+ * beyond the watermark indicated by grow_decision() which can be passed
+ * to rhashtable_init().
+ */
+static inline int rhltable_insert_key(
+ struct rhltable *hlt, const void *key, struct rhlist_head *list,
+ const struct rhashtable_params params)
+{
+ return PTR_ERR(__rhashtable_insert_fast(&hlt->ht, key, &list->rhead,
+ params, true));
+}
+
+/**
+ * rhltable_insert - insert object into hash list table
+ * @hlt: hash list table
+ * @list: pointer to hash list head inside object
+ * @params: hash table parameters
+ *
+ * Will take a per bucket spinlock to protect against mutual mutations
+ * on the same bucket. Multiple insertions may occur in parallel unless
+ * they map to the same bucket lock.
+ *
+ * It is safe to call this function from atomic context.
+ *
+ * Will trigger an automatic deferred table resizing if the size grows
+ * beyond the watermark indicated by grow_decision() which can be passed
+ * to rhashtable_init().
+ */
+static inline int rhltable_insert(
+ struct rhltable *hlt, struct rhlist_head *list,
+ const struct rhashtable_params params)
+{
+ const char *key = rht_obj(&hlt->ht, &list->rhead);
+
+ key += params.key_offset;
+
+ return rhltable_insert_key(hlt, key, list, params);
}
/**
@@ -704,11 +869,16 @@ static inline int rhashtable_lookup_insert_fast(
const struct rhashtable_params params)
{
const char *key = rht_obj(ht, obj);
+ void *ret;
BUG_ON(ht->p.obj_hashfn);
- return __rhashtable_insert_fast(ht, key + ht->p.key_offset, obj,
- params);
+ ret = __rhashtable_insert_fast(ht, key + ht->p.key_offset, obj, params,
+ false);
+ if (IS_ERR(ret))
+ return PTR_ERR(ret);
+
+ return ret == NULL ? 0 : -EEXIST;
}
/**
@@ -737,15 +907,42 @@ static inline int rhashtable_lookup_insert_key(
struct rhashtable *ht, const void *key, struct rhash_head *obj,
const struct rhashtable_params params)
{
+ void *ret;
+
+ BUG_ON(!ht->p.obj_hashfn || !key);
+
+ ret = __rhashtable_insert_fast(ht, key, obj, params, false);
+ if (IS_ERR(ret))
+ return PTR_ERR(ret);
+
+ return ret == NULL ? 0 : -EEXIST;
+}
+
+/**
+ * rhashtable_lookup_get_insert_key - lookup and insert object into hash table
+ * @ht: hash table
+ * @obj: pointer to hash head inside object
+ * @params: hash table parameters
+ * @data: pointer to element data already in hashes
+ *
+ * Just like rhashtable_lookup_insert_key(), but this function returns the
+ * object if it exists, NULL if it does not and the insertion was successful,
+ * and an ERR_PTR otherwise.
+ */
+static inline void *rhashtable_lookup_get_insert_key(
+ struct rhashtable *ht, const void *key, struct rhash_head *obj,
+ const struct rhashtable_params params)
+{
BUG_ON(!ht->p.obj_hashfn || !key);
- return __rhashtable_insert_fast(ht, key, obj, params);
+ return __rhashtable_insert_fast(ht, key, obj, params, false);
}
/* Internal function, please use rhashtable_remove_fast() instead */
-static inline int __rhashtable_remove_fast(
+static inline int __rhashtable_remove_fast_one(
struct rhashtable *ht, struct bucket_table *tbl,
- struct rhash_head *obj, const struct rhashtable_params params)
+ struct rhash_head *obj, const struct rhashtable_params params,
+ bool rhlist)
{
struct rhash_head __rcu **pprev;
struct rhash_head *he;
@@ -760,39 +957,66 @@ static inline int __rhashtable_remove_fast(
pprev = &tbl->buckets[hash];
rht_for_each(he, tbl, hash) {
+ struct rhlist_head *list;
+
+ list = container_of(he, struct rhlist_head, rhead);
+
if (he != obj) {
+ struct rhlist_head __rcu **lpprev;
+
pprev = &he->next;
- continue;
+
+ if (!rhlist)
+ continue;
+
+ do {
+ lpprev = &list->next;
+ list = rht_dereference_bucket(list->next,
+ tbl, hash);
+ } while (list && obj != &list->rhead);
+
+ if (!list)
+ continue;
+
+ list = rht_dereference_bucket(list->next, tbl, hash);
+ RCU_INIT_POINTER(*lpprev, list);
+ err = 0;
+ break;
}
- rcu_assign_pointer(*pprev, obj->next);
- err = 0;
+ obj = rht_dereference_bucket(obj->next, tbl, hash);
+ err = 1;
+
+ if (rhlist) {
+ list = rht_dereference_bucket(list->next, tbl, hash);
+ if (list) {
+ RCU_INIT_POINTER(list->rhead.next, obj);
+ obj = &list->rhead;
+ err = 0;
+ }
+ }
+
+ rcu_assign_pointer(*pprev, obj);
break;
}
spin_unlock_bh(lock);
+ if (err > 0) {
+ atomic_dec(&ht->nelems);
+ if (unlikely(ht->p.automatic_shrinking &&
+ rht_shrink_below_30(ht, tbl)))
+ schedule_work(&ht->run_work);
+ err = 0;
+ }
+
return err;
}
-/**
- * rhashtable_remove_fast - remove object from hash table
- * @ht: hash table
- * @obj: pointer to hash head inside object
- * @params: hash table parameters
- *
- * Since the hash chain is single linked, the removal operation needs to
- * walk the bucket chain upon removal. The removal operation is thus
- * considerable slow if the hash table is not correctly sized.
- *
- * Will automatically shrink the table via rhashtable_expand() if the
- * shrink_decision function specified at rhashtable_init() returns true.
- *
- * Returns zero on success, -ENOENT if the entry could not be found.
- */
-static inline int rhashtable_remove_fast(
+/* Internal function, please use rhashtable_remove_fast() instead */
+static inline int __rhashtable_remove_fast(
struct rhashtable *ht, struct rhash_head *obj,
- const struct rhashtable_params params)
+ const struct rhashtable_params params, bool rhlist)
{
struct bucket_table *tbl;
int err;
@@ -806,24 +1030,60 @@ static inline int rhashtable_remove_fast(
* visible then that guarantees the entry to still be in
* the old tbl if it exists.
*/
- while ((err = __rhashtable_remove_fast(ht, tbl, obj, params)) &&
+ while ((err = __rhashtable_remove_fast_one(ht, tbl, obj, params,
+ rhlist)) &&
(tbl = rht_dereference_rcu(tbl->future_tbl, ht)))
;
- if (err)
- goto out;
-
- atomic_dec(&ht->nelems);
- if (unlikely(ht->p.automatic_shrinking &&
- rht_shrink_below_30(ht, tbl)))
- schedule_work(&ht->run_work);
-
-out:
rcu_read_unlock();
return err;
}
+/**
+ * rhashtable_remove_fast - remove object from hash table
+ * @ht: hash table
+ * @obj: pointer to hash head inside object
+ * @params: hash table parameters
+ *
+ * Since the hash chain is single linked, the removal operation needs to
+ * walk the bucket chain upon removal. The removal operation is thus
+ * considerable slow if the hash table is not correctly sized.
+ *
+ * Will automatically shrink the table via rhashtable_expand() if the
+ * shrink_decision function specified at rhashtable_init() returns true.
+ *
+ * Returns zero on success, -ENOENT if the entry could not be found.
+ */
+static inline int rhashtable_remove_fast(
+ struct rhashtable *ht, struct rhash_head *obj,
+ const struct rhashtable_params params)
+{
+ return __rhashtable_remove_fast(ht, obj, params, false);
+}
+
+/**
+ * rhltable_remove - remove object from hash list table
+ * @hlt: hash list table
+ * @list: pointer to hash list head inside object
+ * @params: hash table parameters
+ *
+ * Since the hash chain is single linked, the removal operation needs to
+ * walk the bucket chain upon removal. The removal operation is thus
+ * considerable slow if the hash table is not correctly sized.
+ *
+ * Will automatically shrink the table via rhashtable_expand() if the
+ * shrink_decision function specified at rhashtable_init() returns true.
+ *
+ * Returns zero on success, -ENOENT if the entry could not be found.
+ */
+static inline int rhltable_remove(
+ struct rhltable *hlt, struct rhlist_head *list,
+ const struct rhashtable_params params)
+{
+ return __rhashtable_remove_fast(&hlt->ht, &list->rhead, params, true);
+}
+
/* Internal function, please use rhashtable_replace_fast() instead */
static inline int __rhashtable_replace_fast(
struct rhashtable *ht, struct bucket_table *tbl,
@@ -906,4 +1166,59 @@ static inline int rhashtable_replace_fast(
return err;
}
+/* Obsolete function, do not use in new code. */
+static inline int rhashtable_walk_init(struct rhashtable *ht,
+ struct rhashtable_iter *iter, gfp_t gfp)
+{
+ rhashtable_walk_enter(ht, iter);
+ return 0;
+}
+
+/**
+ * rhltable_walk_enter - Initialise an iterator
+ * @hlt: Table to walk over
+ * @iter: Hash table Iterator
+ *
+ * This function prepares a hash table walk.
+ *
+ * Note that if you restart a walk after rhashtable_walk_stop you
+ * may see the same object twice. Also, you may miss objects if
+ * there are removals in between rhashtable_walk_stop and the next
+ * call to rhashtable_walk_start.
+ *
+ * For a completely stable walk you should construct your own data
+ * structure outside the hash table.
+ *
+ * This function may sleep so you must not call it from interrupt
+ * context or with spin locks held.
+ *
+ * You must call rhashtable_walk_exit after this function returns.
+ */
+static inline void rhltable_walk_enter(struct rhltable *hlt,
+ struct rhashtable_iter *iter)
+{
+ return rhashtable_walk_enter(&hlt->ht, iter);
+}
+
+/**
+ * rhltable_free_and_destroy - free elements and destroy hash list table
+ * @hlt: the hash list table to destroy
+ * @free_fn: callback to release resources of element
+ * @arg: pointer passed to free_fn
+ *
+ * See documentation for rhashtable_free_and_destroy.
+ */
+static inline void rhltable_free_and_destroy(struct rhltable *hlt,
+ void (*free_fn)(void *ptr,
+ void *arg),
+ void *arg)
+{
+ return rhashtable_free_and_destroy(&hlt->ht, free_fn, arg);
+}
+
+static inline void rhltable_destroy(struct rhltable *hlt)
+{
+ return rhltable_free_and_destroy(hlt, NULL, NULL);
+}
+
#endif /* _LINUX_RHASHTABLE_H */
diff --git a/include/linux/rpmsg.h b/include/linux/rpmsg.h
index ada50ff36da0..452d393cc8dd 100644
--- a/include/linux/rpmsg.h
+++ b/include/linux/rpmsg.h
@@ -41,65 +41,27 @@
#include <linux/kref.h>
#include <linux/mutex.h>
-/* The feature bitmap for virtio rpmsg */
-#define VIRTIO_RPMSG_F_NS 0 /* RP supports name service notifications */
+#define RPMSG_ADDR_ANY 0xFFFFFFFF
+
+struct rpmsg_device;
+struct rpmsg_endpoint;
+struct rpmsg_device_ops;
+struct rpmsg_endpoint_ops;
/**
- * struct rpmsg_hdr - common header for all rpmsg messages
- * @src: source address
+ * struct rpmsg_channel_info - channel info representation
+ * @name: name of service
+ * @src: local address
* @dst: destination address
- * @reserved: reserved for future use
- * @len: length of payload (in bytes)
- * @flags: message flags
- * @data: @len bytes of message payload data
- *
- * Every message sent(/received) on the rpmsg bus begins with this header.
*/
-struct rpmsg_hdr {
+struct rpmsg_channel_info {
+ char name[RPMSG_NAME_SIZE];
u32 src;
u32 dst;
- u32 reserved;
- u16 len;
- u16 flags;
- u8 data[0];
-} __packed;
-
-/**
- * struct rpmsg_ns_msg - dynamic name service announcement message
- * @name: name of remote service that is published
- * @addr: address of remote service that is published
- * @flags: indicates whether service is created or destroyed
- *
- * This message is sent across to publish a new service, or announce
- * about its removal. When we receive these messages, an appropriate
- * rpmsg channel (i.e device) is created/destroyed. In turn, the ->probe()
- * or ->remove() handler of the appropriate rpmsg driver will be invoked
- * (if/as-soon-as one is registered).
- */
-struct rpmsg_ns_msg {
- char name[RPMSG_NAME_SIZE];
- u32 addr;
- u32 flags;
-} __packed;
-
-/**
- * enum rpmsg_ns_flags - dynamic name service announcement flags
- *
- * @RPMSG_NS_CREATE: a new remote service was just created
- * @RPMSG_NS_DESTROY: a known remote service was just destroyed
- */
-enum rpmsg_ns_flags {
- RPMSG_NS_CREATE = 0,
- RPMSG_NS_DESTROY = 1,
};
-#define RPMSG_ADDR_ANY 0xFFFFFFFF
-
-struct virtproc_info;
-
/**
- * rpmsg_channel - devices that belong to the rpmsg bus are called channels
- * @vrp: the remote processor this channel belongs to
+ * rpmsg_device - device that belong to the rpmsg bus
* @dev: the device struct
* @id: device id (used to match between rpmsg drivers and devices)
* @src: local address
@@ -107,17 +69,18 @@ struct virtproc_info;
* @ept: the rpmsg endpoint of this channel
* @announce: if set, rpmsg will announce the creation/removal of this channel
*/
-struct rpmsg_channel {
- struct virtproc_info *vrp;
+struct rpmsg_device {
struct device dev;
struct rpmsg_device_id id;
u32 src;
u32 dst;
struct rpmsg_endpoint *ept;
bool announce;
+
+ const struct rpmsg_device_ops *ops;
};
-typedef void (*rpmsg_rx_cb_t)(struct rpmsg_channel *, void *, int, void *, u32);
+typedef int (*rpmsg_rx_cb_t)(struct rpmsg_device *, void *, int, void *, u32);
/**
* struct rpmsg_endpoint - binds a local rpmsg address to its user
@@ -143,12 +106,14 @@ typedef void (*rpmsg_rx_cb_t)(struct rpmsg_channel *, void *, int, void *, u32);
* create additional endpoints by themselves (see rpmsg_create_ept()).
*/
struct rpmsg_endpoint {
- struct rpmsg_channel *rpdev;
+ struct rpmsg_device *rpdev;
struct kref refcount;
rpmsg_rx_cb_t cb;
struct mutex cb_lock;
u32 addr;
void *priv;
+
+ const struct rpmsg_endpoint_ops *ops;
};
/**
@@ -162,20 +127,19 @@ struct rpmsg_endpoint {
struct rpmsg_driver {
struct device_driver drv;
const struct rpmsg_device_id *id_table;
- int (*probe)(struct rpmsg_channel *dev);
- void (*remove)(struct rpmsg_channel *dev);
- void (*callback)(struct rpmsg_channel *, void *, int, void *, u32);
+ int (*probe)(struct rpmsg_device *dev);
+ void (*remove)(struct rpmsg_device *dev);
+ int (*callback)(struct rpmsg_device *, void *, int, void *, u32);
};
-int register_rpmsg_device(struct rpmsg_channel *dev);
-void unregister_rpmsg_device(struct rpmsg_channel *dev);
+int register_rpmsg_device(struct rpmsg_device *dev);
+void unregister_rpmsg_device(struct rpmsg_device *dev);
int __register_rpmsg_driver(struct rpmsg_driver *drv, struct module *owner);
void unregister_rpmsg_driver(struct rpmsg_driver *drv);
void rpmsg_destroy_ept(struct rpmsg_endpoint *);
-struct rpmsg_endpoint *rpmsg_create_ept(struct rpmsg_channel *,
- rpmsg_rx_cb_t cb, void *priv, u32 addr);
-int
-rpmsg_send_offchannel_raw(struct rpmsg_channel *, u32, u32, void *, int, bool);
+struct rpmsg_endpoint *rpmsg_create_ept(struct rpmsg_device *,
+ rpmsg_rx_cb_t cb, void *priv,
+ struct rpmsg_channel_info chinfo);
/* use a macro to avoid include chaining to get THIS_MODULE */
#define register_rpmsg_driver(drv) \
@@ -193,156 +157,14 @@ rpmsg_send_offchannel_raw(struct rpmsg_channel *, u32, u32, void *, int, bool);
module_driver(__rpmsg_driver, register_rpmsg_driver, \
unregister_rpmsg_driver)
-/**
- * rpmsg_send() - send a message across to the remote processor
- * @rpdev: the rpmsg channel
- * @data: payload of message
- * @len: length of payload
- *
- * This function sends @data of length @len on the @rpdev channel.
- * The message will be sent to the remote processor which the @rpdev
- * channel belongs to, using @rpdev's source and destination addresses.
- * In case there are no TX buffers available, the function will block until
- * one becomes available, or a timeout of 15 seconds elapses. When the latter
- * happens, -ERESTARTSYS is returned.
- *
- * Can only be called from process context (for now).
- *
- * Returns 0 on success and an appropriate error value on failure.
- */
-static inline int rpmsg_send(struct rpmsg_channel *rpdev, void *data, int len)
-{
- u32 src = rpdev->src, dst = rpdev->dst;
-
- return rpmsg_send_offchannel_raw(rpdev, src, dst, data, len, true);
-}
-
-/**
- * rpmsg_sendto() - send a message across to the remote processor, specify dst
- * @rpdev: the rpmsg channel
- * @data: payload of message
- * @len: length of payload
- * @dst: destination address
- *
- * This function sends @data of length @len to the remote @dst address.
- * The message will be sent to the remote processor which the @rpdev
- * channel belongs to, using @rpdev's source address.
- * In case there are no TX buffers available, the function will block until
- * one becomes available, or a timeout of 15 seconds elapses. When the latter
- * happens, -ERESTARTSYS is returned.
- *
- * Can only be called from process context (for now).
- *
- * Returns 0 on success and an appropriate error value on failure.
- */
-static inline
-int rpmsg_sendto(struct rpmsg_channel *rpdev, void *data, int len, u32 dst)
-{
- u32 src = rpdev->src;
-
- return rpmsg_send_offchannel_raw(rpdev, src, dst, data, len, true);
-}
-
-/**
- * rpmsg_send_offchannel() - send a message using explicit src/dst addresses
- * @rpdev: the rpmsg channel
- * @src: source address
- * @dst: destination address
- * @data: payload of message
- * @len: length of payload
- *
- * This function sends @data of length @len to the remote @dst address,
- * and uses @src as the source address.
- * The message will be sent to the remote processor which the @rpdev
- * channel belongs to.
- * In case there are no TX buffers available, the function will block until
- * one becomes available, or a timeout of 15 seconds elapses. When the latter
- * happens, -ERESTARTSYS is returned.
- *
- * Can only be called from process context (for now).
- *
- * Returns 0 on success and an appropriate error value on failure.
- */
-static inline
-int rpmsg_send_offchannel(struct rpmsg_channel *rpdev, u32 src, u32 dst,
- void *data, int len)
-{
- return rpmsg_send_offchannel_raw(rpdev, src, dst, data, len, true);
-}
-
-/**
- * rpmsg_send() - send a message across to the remote processor
- * @rpdev: the rpmsg channel
- * @data: payload of message
- * @len: length of payload
- *
- * This function sends @data of length @len on the @rpdev channel.
- * The message will be sent to the remote processor which the @rpdev
- * channel belongs to, using @rpdev's source and destination addresses.
- * In case there are no TX buffers available, the function will immediately
- * return -ENOMEM without waiting until one becomes available.
- *
- * Can only be called from process context (for now).
- *
- * Returns 0 on success and an appropriate error value on failure.
- */
-static inline
-int rpmsg_trysend(struct rpmsg_channel *rpdev, void *data, int len)
-{
- u32 src = rpdev->src, dst = rpdev->dst;
+int rpmsg_send(struct rpmsg_endpoint *ept, void *data, int len);
+int rpmsg_sendto(struct rpmsg_endpoint *ept, void *data, int len, u32 dst);
+int rpmsg_send_offchannel(struct rpmsg_endpoint *ept, u32 src, u32 dst,
+ void *data, int len);
- return rpmsg_send_offchannel_raw(rpdev, src, dst, data, len, false);
-}
-
-/**
- * rpmsg_sendto() - send a message across to the remote processor, specify dst
- * @rpdev: the rpmsg channel
- * @data: payload of message
- * @len: length of payload
- * @dst: destination address
- *
- * This function sends @data of length @len to the remote @dst address.
- * The message will be sent to the remote processor which the @rpdev
- * channel belongs to, using @rpdev's source address.
- * In case there are no TX buffers available, the function will immediately
- * return -ENOMEM without waiting until one becomes available.
- *
- * Can only be called from process context (for now).
- *
- * Returns 0 on success and an appropriate error value on failure.
- */
-static inline
-int rpmsg_trysendto(struct rpmsg_channel *rpdev, void *data, int len, u32 dst)
-{
- u32 src = rpdev->src;
-
- return rpmsg_send_offchannel_raw(rpdev, src, dst, data, len, false);
-}
-
-/**
- * rpmsg_send_offchannel() - send a message using explicit src/dst addresses
- * @rpdev: the rpmsg channel
- * @src: source address
- * @dst: destination address
- * @data: payload of message
- * @len: length of payload
- *
- * This function sends @data of length @len to the remote @dst address,
- * and uses @src as the source address.
- * The message will be sent to the remote processor which the @rpdev
- * channel belongs to.
- * In case there are no TX buffers available, the function will immediately
- * return -ENOMEM without waiting until one becomes available.
- *
- * Can only be called from process context (for now).
- *
- * Returns 0 on success and an appropriate error value on failure.
- */
-static inline
-int rpmsg_trysend_offchannel(struct rpmsg_channel *rpdev, u32 src, u32 dst,
- void *data, int len)
-{
- return rpmsg_send_offchannel_raw(rpdev, src, dst, data, len, false);
-}
+int rpmsg_trysend(struct rpmsg_endpoint *ept, void *data, int len);
+int rpmsg_trysendto(struct rpmsg_endpoint *ept, void *data, int len, u32 dst);
+int rpmsg_trysend_offchannel(struct rpmsg_endpoint *ept, u32 src, u32 dst,
+ void *data, int len);
#endif /* _LINUX_RPMSG_H */
diff --git a/include/linux/rtnetlink.h b/include/linux/rtnetlink.h
index 2daece8979f7..57e54847b0b9 100644
--- a/include/linux/rtnetlink.h
+++ b/include/linux/rtnetlink.h
@@ -105,7 +105,7 @@ extern int ndo_dflt_fdb_dump(struct sk_buff *skb,
struct netlink_callback *cb,
struct net_device *dev,
struct net_device *filter_dev,
- int idx);
+ int *idx);
extern int ndo_dflt_fdb_add(struct ndmsg *ndm,
struct nlattr *tb[],
struct net_device *dev,
diff --git a/include/linux/sbitmap.h b/include/linux/sbitmap.h
new file mode 100644
index 000000000000..f017fd6e69c4
--- /dev/null
+++ b/include/linux/sbitmap.h
@@ -0,0 +1,373 @@
+/*
+ * Fast and scalable bitmaps.
+ *
+ * Copyright (C) 2016 Facebook
+ * Copyright (C) 2013-2014 Jens Axboe
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License v2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <https://www.gnu.org/licenses/>.
+ */
+
+#ifndef __LINUX_SCALE_BITMAP_H
+#define __LINUX_SCALE_BITMAP_H
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+
+/**
+ * struct sbitmap_word - Word in a &struct sbitmap.
+ */
+struct sbitmap_word {
+ /**
+ * @word: The bitmap word itself.
+ */
+ unsigned long word;
+
+ /**
+ * @depth: Number of bits being used in @word.
+ */
+ unsigned long depth;
+} ____cacheline_aligned_in_smp;
+
+/**
+ * struct sbitmap - Scalable bitmap.
+ *
+ * A &struct sbitmap is spread over multiple cachelines to avoid ping-pong. This
+ * trades off higher memory usage for better scalability.
+ */
+struct sbitmap {
+ /**
+ * @depth: Number of bits used in the whole bitmap.
+ */
+ unsigned int depth;
+
+ /**
+ * @shift: log2(number of bits used per word)
+ */
+ unsigned int shift;
+
+ /**
+ * @map_nr: Number of words (cachelines) being used for the bitmap.
+ */
+ unsigned int map_nr;
+
+ /**
+ * @map: Allocated bitmap.
+ */
+ struct sbitmap_word *map;
+};
+
+#define SBQ_WAIT_QUEUES 8
+#define SBQ_WAKE_BATCH 8
+
+/**
+ * struct sbq_wait_state - Wait queue in a &struct sbitmap_queue.
+ */
+struct sbq_wait_state {
+ /**
+ * @wait_cnt: Number of frees remaining before we wake up.
+ */
+ atomic_t wait_cnt;
+
+ /**
+ * @wait: Wait queue.
+ */
+ wait_queue_head_t wait;
+} ____cacheline_aligned_in_smp;
+
+/**
+ * struct sbitmap_queue - Scalable bitmap with the added ability to wait on free
+ * bits.
+ *
+ * A &struct sbitmap_queue uses multiple wait queues and rolling wakeups to
+ * avoid contention on the wait queue spinlock. This ensures that we don't hit a
+ * scalability wall when we run out of free bits and have to start putting tasks
+ * to sleep.
+ */
+struct sbitmap_queue {
+ /**
+ * @sb: Scalable bitmap.
+ */
+ struct sbitmap sb;
+
+ /*
+ * @alloc_hint: Cache of last successfully allocated or freed bit.
+ *
+ * This is per-cpu, which allows multiple users to stick to different
+ * cachelines until the map is exhausted.
+ */
+ unsigned int __percpu *alloc_hint;
+
+ /**
+ * @wake_batch: Number of bits which must be freed before we wake up any
+ * waiters.
+ */
+ unsigned int wake_batch;
+
+ /**
+ * @wake_index: Next wait queue in @ws to wake up.
+ */
+ atomic_t wake_index;
+
+ /**
+ * @ws: Wait queues.
+ */
+ struct sbq_wait_state *ws;
+
+ /**
+ * @round_robin: Allocate bits in strict round-robin order.
+ */
+ bool round_robin;
+};
+
+/**
+ * sbitmap_init_node() - Initialize a &struct sbitmap on a specific memory node.
+ * @sb: Bitmap to initialize.
+ * @depth: Number of bits to allocate.
+ * @shift: Use 2^@shift bits per word in the bitmap; if a negative number if
+ * given, a good default is chosen.
+ * @flags: Allocation flags.
+ * @node: Memory node to allocate on.
+ *
+ * Return: Zero on success or negative errno on failure.
+ */
+int sbitmap_init_node(struct sbitmap *sb, unsigned int depth, int shift,
+ gfp_t flags, int node);
+
+/**
+ * sbitmap_free() - Free memory used by a &struct sbitmap.
+ * @sb: Bitmap to free.
+ */
+static inline void sbitmap_free(struct sbitmap *sb)
+{
+ kfree(sb->map);
+ sb->map = NULL;
+}
+
+/**
+ * sbitmap_resize() - Resize a &struct sbitmap.
+ * @sb: Bitmap to resize.
+ * @depth: New number of bits to resize to.
+ *
+ * Doesn't reallocate anything. It's up to the caller to ensure that the new
+ * depth doesn't exceed the depth that the sb was initialized with.
+ */
+void sbitmap_resize(struct sbitmap *sb, unsigned int depth);
+
+/**
+ * sbitmap_get() - Try to allocate a free bit from a &struct sbitmap.
+ * @sb: Bitmap to allocate from.
+ * @alloc_hint: Hint for where to start searching for a free bit.
+ * @round_robin: If true, be stricter about allocation order; always allocate
+ * starting from the last allocated bit. This is less efficient
+ * than the default behavior (false).
+ *
+ * Return: Non-negative allocated bit number if successful, -1 otherwise.
+ */
+int sbitmap_get(struct sbitmap *sb, unsigned int alloc_hint, bool round_robin);
+
+/**
+ * sbitmap_any_bit_set() - Check for a set bit in a &struct sbitmap.
+ * @sb: Bitmap to check.
+ *
+ * Return: true if any bit in the bitmap is set, false otherwise.
+ */
+bool sbitmap_any_bit_set(const struct sbitmap *sb);
+
+/**
+ * sbitmap_any_bit_clear() - Check for an unset bit in a &struct
+ * sbitmap.
+ * @sb: Bitmap to check.
+ *
+ * Return: true if any bit in the bitmap is clear, false otherwise.
+ */
+bool sbitmap_any_bit_clear(const struct sbitmap *sb);
+
+typedef bool (*sb_for_each_fn)(struct sbitmap *, unsigned int, void *);
+
+/**
+ * sbitmap_for_each_set() - Iterate over each set bit in a &struct sbitmap.
+ * @sb: Bitmap to iterate over.
+ * @fn: Callback. Should return true to continue or false to break early.
+ * @data: Pointer to pass to callback.
+ *
+ * This is inline even though it's non-trivial so that the function calls to the
+ * callback will hopefully get optimized away.
+ */
+static inline void sbitmap_for_each_set(struct sbitmap *sb, sb_for_each_fn fn,
+ void *data)
+{
+ unsigned int i;
+
+ for (i = 0; i < sb->map_nr; i++) {
+ struct sbitmap_word *word = &sb->map[i];
+ unsigned int off, nr;
+
+ if (!word->word)
+ continue;
+
+ nr = 0;
+ off = i << sb->shift;
+ while (1) {
+ nr = find_next_bit(&word->word, word->depth, nr);
+ if (nr >= word->depth)
+ break;
+
+ if (!fn(sb, off + nr, data))
+ return;
+
+ nr++;
+ }
+ }
+}
+
+#define SB_NR_TO_INDEX(sb, bitnr) ((bitnr) >> (sb)->shift)
+#define SB_NR_TO_BIT(sb, bitnr) ((bitnr) & ((1U << (sb)->shift) - 1U))
+
+static inline unsigned long *__sbitmap_word(struct sbitmap *sb,
+ unsigned int bitnr)
+{
+ return &sb->map[SB_NR_TO_INDEX(sb, bitnr)].word;
+}
+
+/* Helpers equivalent to the operations in asm/bitops.h and linux/bitmap.h */
+
+static inline void sbitmap_set_bit(struct sbitmap *sb, unsigned int bitnr)
+{
+ set_bit(SB_NR_TO_BIT(sb, bitnr), __sbitmap_word(sb, bitnr));
+}
+
+static inline void sbitmap_clear_bit(struct sbitmap *sb, unsigned int bitnr)
+{
+ clear_bit(SB_NR_TO_BIT(sb, bitnr), __sbitmap_word(sb, bitnr));
+}
+
+static inline int sbitmap_test_bit(struct sbitmap *sb, unsigned int bitnr)
+{
+ return test_bit(SB_NR_TO_BIT(sb, bitnr), __sbitmap_word(sb, bitnr));
+}
+
+unsigned int sbitmap_weight(const struct sbitmap *sb);
+
+/**
+ * sbitmap_queue_init_node() - Initialize a &struct sbitmap_queue on a specific
+ * memory node.
+ * @sbq: Bitmap queue to initialize.
+ * @depth: See sbitmap_init_node().
+ * @shift: See sbitmap_init_node().
+ * @round_robin: See sbitmap_get().
+ * @flags: Allocation flags.
+ * @node: Memory node to allocate on.
+ *
+ * Return: Zero on success or negative errno on failure.
+ */
+int sbitmap_queue_init_node(struct sbitmap_queue *sbq, unsigned int depth,
+ int shift, bool round_robin, gfp_t flags, int node);
+
+/**
+ * sbitmap_queue_free() - Free memory used by a &struct sbitmap_queue.
+ *
+ * @sbq: Bitmap queue to free.
+ */
+static inline void sbitmap_queue_free(struct sbitmap_queue *sbq)
+{
+ kfree(sbq->ws);
+ free_percpu(sbq->alloc_hint);
+ sbitmap_free(&sbq->sb);
+}
+
+/**
+ * sbitmap_queue_resize() - Resize a &struct sbitmap_queue.
+ * @sbq: Bitmap queue to resize.
+ * @depth: New number of bits to resize to.
+ *
+ * Like sbitmap_resize(), this doesn't reallocate anything. It has to do
+ * some extra work on the &struct sbitmap_queue, so it's not safe to just
+ * resize the underlying &struct sbitmap.
+ */
+void sbitmap_queue_resize(struct sbitmap_queue *sbq, unsigned int depth);
+
+/**
+ * __sbitmap_queue_get() - Try to allocate a free bit from a &struct
+ * sbitmap_queue with preemption already disabled.
+ * @sbq: Bitmap queue to allocate from.
+ *
+ * Return: Non-negative allocated bit number if successful, -1 otherwise.
+ */
+int __sbitmap_queue_get(struct sbitmap_queue *sbq);
+
+/**
+ * sbitmap_queue_get() - Try to allocate a free bit from a &struct
+ * sbitmap_queue.
+ * @sbq: Bitmap queue to allocate from.
+ * @cpu: Output parameter; will contain the CPU we ran on (e.g., to be passed to
+ * sbitmap_queue_clear()).
+ *
+ * Return: Non-negative allocated bit number if successful, -1 otherwise.
+ */
+static inline int sbitmap_queue_get(struct sbitmap_queue *sbq,
+ unsigned int *cpu)
+{
+ int nr;
+
+ *cpu = get_cpu();
+ nr = __sbitmap_queue_get(sbq);
+ put_cpu();
+ return nr;
+}
+
+/**
+ * sbitmap_queue_clear() - Free an allocated bit and wake up waiters on a
+ * &struct sbitmap_queue.
+ * @sbq: Bitmap to free from.
+ * @nr: Bit number to free.
+ * @cpu: CPU the bit was allocated on.
+ */
+void sbitmap_queue_clear(struct sbitmap_queue *sbq, unsigned int nr,
+ unsigned int cpu);
+
+static inline int sbq_index_inc(int index)
+{
+ return (index + 1) & (SBQ_WAIT_QUEUES - 1);
+}
+
+static inline void sbq_index_atomic_inc(atomic_t *index)
+{
+ int old = atomic_read(index);
+ int new = sbq_index_inc(old);
+ atomic_cmpxchg(index, old, new);
+}
+
+/**
+ * sbq_wait_ptr() - Get the next wait queue to use for a &struct
+ * sbitmap_queue.
+ * @sbq: Bitmap queue to wait on.
+ * @wait_index: A counter per "user" of @sbq.
+ */
+static inline struct sbq_wait_state *sbq_wait_ptr(struct sbitmap_queue *sbq,
+ atomic_t *wait_index)
+{
+ struct sbq_wait_state *ws;
+
+ ws = &sbq->ws[atomic_read(wait_index)];
+ sbq_index_atomic_inc(wait_index);
+ return ws;
+}
+
+/**
+ * sbitmap_queue_wake_all() - Wake up everything waiting on a &struct
+ * sbitmap_queue.
+ * @sbq: Bitmap queue to wake up.
+ */
+void sbitmap_queue_wake_all(struct sbitmap_queue *sbq);
+
+#endif /* __LINUX_SCALE_BITMAP_H */
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 98fe95fea30c..348f51b0ec92 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -448,6 +448,8 @@ static inline void io_schedule(void)
io_schedule_timeout(MAX_SCHEDULE_TIMEOUT);
}
+void __noreturn do_task_dead(void);
+
struct nsproxy;
struct user_namespace;
@@ -522,8 +524,9 @@ static inline int get_dumpable(struct mm_struct *mm)
#define MMF_HAS_UPROBES 19 /* has uprobes */
#define MMF_RECALC_UPROBES 20 /* MMF_HAS_UPROBES can be wrong */
-#define MMF_OOM_REAPED 21 /* mm has been already reaped */
-#define MMF_OOM_NOT_REAPABLE 22 /* mm couldn't be reaped */
+#define MMF_OOM_SKIP 21 /* mm is of no interest for the OOM killer */
+#define MMF_UNSTABLE 22 /* mm is unstable for copy_from_user */
+#define MMF_HUGE_ZERO_PAGE 23 /* mm has ever used the global huge zero page */
#define MMF_INIT_MASK (MMF_DUMPABLE_MASK | MMF_DUMP_FILTER_MASK)
@@ -671,7 +674,6 @@ struct signal_struct {
atomic_t sigcnt;
atomic_t live;
int nr_threads;
- atomic_t oom_victims; /* # of TIF_MEDIE threads in this thread group */
struct list_head thread_head;
wait_queue_head_t wait_chldexit; /* for wait4() */
@@ -804,6 +806,8 @@ struct signal_struct {
short oom_score_adj; /* OOM kill score adjustment */
short oom_score_adj_min; /* OOM kill score adjustment min value.
* Only settable by CAP_SYS_RESOURCE. */
+ struct mm_struct *oom_mm; /* recorded mm when the thread group got
+ * killed by the oom killer */
struct mutex cred_guard_mutex; /* guard against foreign influences on
* credential calculations
@@ -1022,7 +1026,8 @@ extern void wake_up_q(struct wake_q_head *head);
#define SD_BALANCE_FORK 0x0008 /* Balance on fork, clone */
#define SD_BALANCE_WAKE 0x0010 /* Balance on wakeup */
#define SD_WAKE_AFFINE 0x0020 /* Wake task to waking CPU */
-#define SD_SHARE_CPUCAPACITY 0x0080 /* Domain members share cpu power */
+#define SD_ASYM_CPUCAPACITY 0x0040 /* Groups have different max cpu capacities */
+#define SD_SHARE_CPUCAPACITY 0x0080 /* Domain members share cpu capacity */
#define SD_SHARE_POWERDOMAIN 0x0100 /* Domain members share power domain */
#define SD_SHARE_PKG_RESOURCES 0x0200 /* Domain members share cpu pkg resources */
#define SD_SERIALIZE 0x0400 /* Only a single load balancing instance */
@@ -1064,6 +1069,12 @@ extern int sched_domain_level_max;
struct sched_group;
+struct sched_domain_shared {
+ atomic_t ref;
+ atomic_t nr_busy_cpus;
+ int has_idle_cores;
+};
+
struct sched_domain {
/* These fields must be setup */
struct sched_domain *parent; /* top domain must be null terminated */
@@ -1094,6 +1105,8 @@ struct sched_domain {
u64 max_newidle_lb_cost;
unsigned long next_decay_max_lb_cost;
+ u64 avg_scan_cost; /* select_idle_sibling */
+
#ifdef CONFIG_SCHEDSTATS
/* load_balance() stats */
unsigned int lb_count[CPU_MAX_IDLE_TYPES];
@@ -1132,6 +1145,7 @@ struct sched_domain {
void *private; /* used during construction */
struct rcu_head rcu; /* used during destruction */
};
+ struct sched_domain_shared *shared;
unsigned int span_weight;
/*
@@ -1165,6 +1179,7 @@ typedef int (*sched_domain_flags_f)(void);
struct sd_data {
struct sched_domain **__percpu sd;
+ struct sched_domain_shared **__percpu sds;
struct sched_group **__percpu sg;
struct sched_group_capacity **__percpu sgc;
};
@@ -1458,6 +1473,13 @@ struct tlbflush_unmap_batch {
};
struct task_struct {
+#ifdef CONFIG_THREAD_INFO_IN_TASK
+ /*
+ * For reasons of header soup (see current_thread_info()), this
+ * must be the first element of task_struct.
+ */
+ struct thread_info thread_info;
+#endif
volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */
void *stack;
atomic_t usage;
@@ -1467,6 +1489,9 @@ struct task_struct {
#ifdef CONFIG_SMP
struct llist_node wake_entry;
int on_cpu;
+#ifdef CONFIG_THREAD_INFO_IN_TASK
+ unsigned int cpu; /* current CPU */
+#endif
unsigned int wakee_flips;
unsigned long wakee_flip_decay_ts;
struct task_struct *last_wakee;
@@ -1923,6 +1948,13 @@ struct task_struct {
#ifdef CONFIG_MMU
struct task_struct *oom_reaper_list;
#endif
+#ifdef CONFIG_VMAP_STACK
+ struct vm_struct *stack_vm_area;
+#endif
+#ifdef CONFIG_THREAD_INFO_IN_TASK
+ /* A live task holds one reference. */
+ atomic_t stack_refcount;
+#endif
/* CPU-specific state of this task */
struct thread_struct thread;
/*
@@ -1939,6 +1971,18 @@ extern int arch_task_struct_size __read_mostly;
# define arch_task_struct_size (sizeof(struct task_struct))
#endif
+#ifdef CONFIG_VMAP_STACK
+static inline struct vm_struct *task_stack_vm_area(const struct task_struct *t)
+{
+ return t->stack_vm_area;
+}
+#else
+static inline struct vm_struct *task_stack_vm_area(const struct task_struct *t)
+{
+ return NULL;
+}
+#endif
+
/* Future-safe accessor for struct task_struct's cpus_allowed. */
#define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
@@ -2568,12 +2612,14 @@ static inline bool is_idle_task(const struct task_struct *p)
return p->pid == 0;
}
extern struct task_struct *curr_task(int cpu);
-extern void set_curr_task(int cpu, struct task_struct *p);
+extern void ia64_set_curr_task(int cpu, struct task_struct *p);
void yield(void);
union thread_union {
+#ifndef CONFIG_THREAD_INFO_IN_TASK
struct thread_info thread_info;
+#endif
unsigned long stack[THREAD_SIZE/sizeof(long)];
};
@@ -2832,6 +2878,20 @@ static inline void mmdrop(struct mm_struct *mm)
__mmdrop(mm);
}
+static inline void mmdrop_async_fn(struct work_struct *work)
+{
+ struct mm_struct *mm = container_of(work, struct mm_struct, async_put_work);
+ __mmdrop(mm);
+}
+
+static inline void mmdrop_async(struct mm_struct *mm)
+{
+ if (unlikely(atomic_dec_and_test(&mm->mm_count))) {
+ INIT_WORK(&mm->async_put_work, mmdrop_async_fn);
+ schedule_work(&mm->async_put_work);
+ }
+}
+
static inline bool mmget_not_zero(struct mm_struct *mm)
{
return atomic_inc_not_zero(&mm->mm_users);
@@ -3061,10 +3121,34 @@ static inline void threadgroup_change_end(struct task_struct *tsk)
cgroup_threadgroup_change_end(tsk);
}
-#ifndef __HAVE_THREAD_FUNCTIONS
+#ifdef CONFIG_THREAD_INFO_IN_TASK
+
+static inline struct thread_info *task_thread_info(struct task_struct *task)
+{
+ return &task->thread_info;
+}
+
+/*
+ * When accessing the stack of a non-current task that might exit, use
+ * try_get_task_stack() instead. task_stack_page will return a pointer
+ * that could get freed out from under you.
+ */
+static inline void *task_stack_page(const struct task_struct *task)
+{
+ return task->stack;
+}
+
+#define setup_thread_stack(new,old) do { } while(0)
+
+static inline unsigned long *end_of_stack(const struct task_struct *task)
+{
+ return task->stack;
+}
+
+#elif !defined(__HAVE_THREAD_FUNCTIONS)
#define task_thread_info(task) ((struct thread_info *)(task)->stack)
-#define task_stack_page(task) ((task)->stack)
+#define task_stack_page(task) ((void *)(task)->stack)
static inline void setup_thread_stack(struct task_struct *p, struct task_struct *org)
{
@@ -3091,6 +3175,24 @@ static inline unsigned long *end_of_stack(struct task_struct *p)
}
#endif
+
+#ifdef CONFIG_THREAD_INFO_IN_TASK
+static inline void *try_get_task_stack(struct task_struct *tsk)
+{
+ return atomic_inc_not_zero(&tsk->stack_refcount) ?
+ task_stack_page(tsk) : NULL;
+}
+
+extern void put_task_stack(struct task_struct *tsk);
+#else
+static inline void *try_get_task_stack(struct task_struct *tsk)
+{
+ return task_stack_page(tsk);
+}
+
+static inline void put_task_stack(struct task_struct *tsk) {}
+#endif
+
#define task_stack_end_corrupted(task) \
(*(end_of_stack(task)) != STACK_END_MAGIC)
@@ -3206,7 +3308,11 @@ static inline int signal_pending_state(long state, struct task_struct *p)
* cond_resched_lock() will drop the spinlock before scheduling,
* cond_resched_softirq() will enable bhs before scheduling.
*/
+#ifndef CONFIG_PREEMPT
extern int _cond_resched(void);
+#else
+static inline int _cond_resched(void) { return 0; }
+#endif
#define cond_resched() ({ \
___might_sleep(__FILE__, __LINE__, 0); \
@@ -3236,6 +3342,15 @@ static inline void cond_resched_rcu(void)
#endif
}
+static inline unsigned long get_preempt_disable_ip(struct task_struct *p)
+{
+#ifdef CONFIG_DEBUG_PREEMPT
+ return p->preempt_disable_ip;
+#else
+ return 0;
+#endif
+}
+
/*
* Does a critical section need to be broken due to another
* task waiting?: (technically does not depend on CONFIG_PREEMPT,
@@ -3364,7 +3479,11 @@ static inline void ptrace_signal_wake_up(struct task_struct *t, bool resume)
static inline unsigned int task_cpu(const struct task_struct *p)
{
+#ifdef CONFIG_THREAD_INFO_IN_TASK
+ return p->cpu;
+#else
return task_thread_info(p)->cpu;
+#endif
}
static inline int task_node(const struct task_struct *p)
diff --git a/include/linux/security.h b/include/linux/security.h
index 7831cd57bcf7..c2125e9093e8 100644
--- a/include/linux/security.h
+++ b/include/linux/security.h
@@ -242,6 +242,10 @@ int security_sb_parse_opts_str(char *options, struct security_mnt_opts *opts);
int security_dentry_init_security(struct dentry *dentry, int mode,
const struct qstr *name, void **ctx,
u32 *ctxlen);
+int security_dentry_create_files_as(struct dentry *dentry, int mode,
+ struct qstr *name,
+ const struct cred *old,
+ struct cred *new);
int security_inode_alloc(struct inode *inode);
void security_inode_free(struct inode *inode);
@@ -282,6 +286,8 @@ int security_inode_getsecurity(struct inode *inode, const char *name, void **buf
int security_inode_setsecurity(struct inode *inode, const char *name, const void *value, size_t size, int flags);
int security_inode_listsecurity(struct inode *inode, char *buffer, size_t buffer_size);
void security_inode_getsecid(struct inode *inode, u32 *secid);
+int security_inode_copy_up(struct dentry *src, struct cred **new);
+int security_inode_copy_up_xattr(const char *name);
int security_file_permission(struct file *file, int mask);
int security_file_alloc(struct file *file);
void security_file_free(struct file *file);
@@ -307,7 +313,6 @@ void security_transfer_creds(struct cred *new, const struct cred *old);
int security_kernel_act_as(struct cred *new, u32 secid);
int security_kernel_create_files_as(struct cred *new, struct inode *inode);
int security_kernel_module_request(char *kmod_name);
-int security_kernel_module_from_file(struct file *file);
int security_kernel_read_file(struct file *file, enum kernel_read_file_id id);
int security_kernel_post_read_file(struct file *file, char *buf, loff_t size,
enum kernel_read_file_id id);
@@ -598,6 +603,14 @@ static inline int security_dentry_init_security(struct dentry *dentry,
return -EOPNOTSUPP;
}
+static inline int security_dentry_create_files_as(struct dentry *dentry,
+ int mode, struct qstr *name,
+ const struct cred *old,
+ struct cred *new)
+{
+ return 0;
+}
+
static inline int security_inode_init_security(struct inode *inode,
struct inode *dir,
@@ -758,6 +771,16 @@ static inline void security_inode_getsecid(struct inode *inode, u32 *secid)
*secid = 0;
}
+static inline int security_inode_copy_up(struct dentry *src, struct cred **new)
+{
+ return 0;
+}
+
+static inline int security_inode_copy_up_xattr(const char *name)
+{
+ return -EOPNOTSUPP;
+}
+
static inline int security_file_permission(struct file *file, int mask)
{
return 0;
diff --git a/include/linux/sem.h b/include/linux/sem.h
index 976ce3a19f1b..d0efd6e6c20a 100644
--- a/include/linux/sem.h
+++ b/include/linux/sem.h
@@ -21,6 +21,7 @@ struct sem_array {
struct list_head list_id; /* undo requests on this array */
int sem_nsems; /* no. of semaphores in array */
int complex_count; /* pending complex operations */
+ bool complex_mode; /* no parallel simple ops */
};
#ifdef CONFIG_SYSVIPC
diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h
index f3d45dd42695..e305b66a9fb9 100644
--- a/include/linux/seq_file.h
+++ b/include/linux/seq_file.h
@@ -117,9 +117,9 @@ __printf(2, 3)
void seq_printf(struct seq_file *m, const char *fmt, ...);
void seq_putc(struct seq_file *m, char c);
void seq_puts(struct seq_file *m, const char *s);
-void seq_put_decimal_ull(struct seq_file *m, char delimiter,
+void seq_put_decimal_ull(struct seq_file *m, const char *delimiter,
unsigned long long num);
-void seq_put_decimal_ll(struct seq_file *m, char delimiter, long long num);
+void seq_put_decimal_ll(struct seq_file *m, const char *delimiter, long long num);
void seq_escape(struct seq_file *m, const char *s, const char *esc);
void seq_hex_dump(struct seq_file *m, const char *prefix_str, int prefix_type,
diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h
index 2f44e2013654..344201437017 100644
--- a/include/linux/serial_core.h
+++ b/include/linux/serial_core.h
@@ -367,14 +367,21 @@ extern const struct earlycon_id __earlycon_table_end[];
#define EARLYCON_DECLARE(_name, fn) OF_EARLYCON_DECLARE(_name, "", fn)
-extern int setup_earlycon(char *buf);
extern int of_setup_earlycon(const struct earlycon_id *match,
unsigned long node,
const char *options);
+#ifdef CONFIG_SERIAL_EARLYCON
+extern bool earlycon_init_is_deferred __initdata;
+int setup_earlycon(char *buf);
+#else
+static const bool earlycon_init_is_deferred;
+static inline int setup_earlycon(char *buf) { return 0; }
+#endif
+
struct uart_port *uart_get_console(struct uart_port *ports, int nr,
struct console *c);
-int uart_parse_earlycon(char *p, unsigned char *iotype, unsigned long *addr,
+int uart_parse_earlycon(char *p, unsigned char *iotype, resource_size_t *addr,
char **options);
void uart_parse_options(char *options, int *baud, int *parity, int *bits,
int *flow);
@@ -412,7 +419,7 @@ int uart_resume_port(struct uart_driver *reg, struct uart_port *port);
static inline int uart_tx_stopped(struct uart_port *port)
{
struct tty_struct *tty = port->state->port.tty;
- if (tty->stopped || port->hw_stopped)
+ if ((tty && tty->stopped) || port->hw_stopped)
return 1;
return 0;
}
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 0f665cb26b50..601258f6e621 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -612,7 +612,6 @@ static inline bool skb_mstamp_after(const struct skb_mstamp *t1,
* @no_fcs: Request NIC to treat last 4 bytes as Ethernet FCS
* @napi_id: id of the NAPI struct this skb came from
* @secmark: security marking
- * @offload_fwd_mark: fwding offload mark
* @mark: Generic packet mark
* @vlan_proto: vlan encapsulation protocol
* @vlan_tci: vlan tag control information
@@ -677,13 +676,23 @@ struct sk_buff {
*/
kmemcheck_bitfield_begin(flags1);
__u16 queue_mapping;
+
+/* if you move cloned around you also must adapt those constants */
+#ifdef __BIG_ENDIAN_BITFIELD
+#define CLONED_MASK (1 << 7)
+#else
+#define CLONED_MASK 1
+#endif
+#define CLONED_OFFSET() offsetof(struct sk_buff, __cloned_offset)
+
+ __u8 __cloned_offset[0];
__u8 cloned:1,
nohdr:1,
fclone:2,
peeked:1,
head_frag:1,
- xmit_more:1;
- /* one bit hole */
+ xmit_more:1,
+ __unused:1; /* one bit hole */
kmemcheck_bitfield_end(flags1);
/* fields enclosed in headers_start/headers_end are copied
@@ -730,7 +739,10 @@ struct sk_buff {
__u8 ipvs_property:1;
__u8 inner_protocol_type:1;
__u8 remcsum_offload:1;
- /* 3 or 5 bit hole */
+#ifdef CONFIG_NET_SWITCHDEV
+ __u8 offload_fwd_mark:1;
+#endif
+ /* 2, 4 or 5 bit hole */
#ifdef CONFIG_NET_SCHED
__u16 tc_index; /* traffic control index */
@@ -757,14 +769,9 @@ struct sk_buff {
unsigned int sender_cpu;
};
#endif
- union {
#ifdef CONFIG_NETWORK_SECMARK
- __u32 secmark;
-#endif
-#ifdef CONFIG_NET_SWITCHDEV
- __u32 offload_fwd_mark;
+ __u32 secmark;
#endif
- };
union {
__u32 mark;
@@ -2295,7 +2302,7 @@ static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len)
int ___pskb_trim(struct sk_buff *skb, unsigned int len);
-static inline void __skb_trim(struct sk_buff *skb, unsigned int len)
+static inline void __skb_set_length(struct sk_buff *skb, unsigned int len)
{
if (unlikely(skb_is_nonlinear(skb))) {
WARN_ON(1);
@@ -2305,6 +2312,11 @@ static inline void __skb_trim(struct sk_buff *skb, unsigned int len)
skb_set_tail_pointer(skb, len);
}
+static inline void __skb_trim(struct sk_buff *skb, unsigned int len)
+{
+ __skb_set_length(skb, len);
+}
+
void skb_trim(struct sk_buff *skb, unsigned int len);
static inline int __pskb_trim(struct sk_buff *skb, unsigned int len)
@@ -2335,6 +2347,20 @@ static inline void pskb_trim_unique(struct sk_buff *skb, unsigned int len)
BUG_ON(err);
}
+static inline int __skb_grow(struct sk_buff *skb, unsigned int len)
+{
+ unsigned int diff = len - skb->len;
+
+ if (skb_tailroom(skb) < diff) {
+ int ret = pskb_expand_head(skb, 0, diff - skb_tailroom(skb),
+ GFP_ATOMIC);
+ if (ret)
+ return ret;
+ }
+ __skb_set_length(skb, len);
+ return 0;
+}
+
/**
* skb_orphan - orphan a buffer
* @skb: buffer to orphan
@@ -2386,6 +2412,8 @@ static inline void __skb_queue_purge(struct sk_buff_head *list)
kfree_skb(skb);
}
+void skb_rbtree_purge(struct rb_root *root);
+
void *netdev_alloc_frag(unsigned int fragsz);
struct sk_buff *__netdev_alloc_skb(struct net_device *dev, unsigned int length,
@@ -2938,6 +2966,21 @@ static inline int pskb_trim_rcsum(struct sk_buff *skb, unsigned int len)
return __pskb_trim(skb, len);
}
+static inline int __skb_trim_rcsum(struct sk_buff *skb, unsigned int len)
+{
+ if (skb->ip_summed == CHECKSUM_COMPLETE)
+ skb->ip_summed = CHECKSUM_NONE;
+ __skb_trim(skb, len);
+ return 0;
+}
+
+static inline int __skb_grow_rcsum(struct sk_buff *skb, unsigned int len)
+{
+ if (skb->ip_summed == CHECKSUM_COMPLETE)
+ skb->ip_summed = CHECKSUM_NONE;
+ return __skb_grow(skb, len);
+}
+
#define skb_queue_walk(queue, skb) \
for (skb = (queue)->next; \
skb != (struct sk_buff *)(queue); \
@@ -3021,15 +3064,9 @@ int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len);
int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len);
__wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, u8 *to,
int len, __wsum csum);
-ssize_t skb_socket_splice(struct sock *sk,
- struct pipe_inode_info *pipe,
- struct splice_pipe_desc *spd);
int skb_splice_bits(struct sk_buff *skb, struct sock *sk, unsigned int offset,
struct pipe_inode_info *pipe, unsigned int len,
- unsigned int flags,
- ssize_t (*splice_cb)(struct sock *,
- struct pipe_inode_info *,
- struct splice_pipe_desc *));
+ unsigned int flags);
void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to);
unsigned int skb_zerocopy_headlen(const struct sk_buff *from);
int skb_zerocopy(struct sk_buff *to, struct sk_buff *from,
@@ -3042,6 +3079,7 @@ bool skb_gso_validate_mtu(const struct sk_buff *skb, unsigned int mtu);
struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features);
struct sk_buff *skb_vlan_untag(struct sk_buff *skb);
int skb_ensure_writable(struct sk_buff *skb, int write_len);
+int __skb_vlan_pop(struct sk_buff *skb, u16 *vlan_tci);
int skb_vlan_pop(struct sk_buff *skb);
int skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci);
struct sk_buff *pskb_extract(struct sk_buff *skb, int off, int to_copy,
@@ -3726,6 +3764,13 @@ static inline bool skb_is_gso_v6(const struct sk_buff *skb)
return skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6;
}
+static inline void skb_gso_reset(struct sk_buff *skb)
+{
+ skb_shinfo(skb)->gso_size = 0;
+ skb_shinfo(skb)->gso_segs = 0;
+ skb_shinfo(skb)->gso_type = 0;
+}
+
void __skb_warn_lro_forwarding(const struct sk_buff *skb);
static inline bool skb_warn_if_lro(const struct sk_buff *skb)
diff --git a/include/linux/slab.h b/include/linux/slab.h
index 4293808d8cfb..084b12bad198 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -650,4 +650,12 @@ static inline void *kzalloc_node(size_t size, gfp_t flags, int node)
unsigned int kmem_cache_size(struct kmem_cache *s);
void __init kmem_cache_init_late(void);
+#if defined(CONFIG_SMP) && defined(CONFIG_SLAB)
+int slab_prepare_cpu(unsigned int cpu);
+int slab_dead_cpu(unsigned int cpu);
+#else
+#define slab_prepare_cpu NULL
+#define slab_dead_cpu NULL
+#endif
+
#endif /* _LINUX_SLAB_H */
diff --git a/include/linux/smp.h b/include/linux/smp.h
index eccae4690f41..8e0cb7a0f836 100644
--- a/include/linux/smp.h
+++ b/include/linux/smp.h
@@ -196,6 +196,9 @@ extern void arch_enable_nonboot_cpus_end(void);
void smp_setup_processor_id(void);
+int smp_call_on_cpu(unsigned int cpu, int (*func)(void *), void *par,
+ bool phys);
+
/* SMP core functions */
int smpcfd_prepare_cpu(unsigned int cpu);
int smpcfd_dead_cpu(unsigned int cpu);
diff --git a/include/linux/soc/qcom/smd.h b/include/linux/soc/qcom/smd.h
index cbb0f06c41b2..f148e0ffbec7 100644
--- a/include/linux/soc/qcom/smd.h
+++ b/include/linux/soc/qcom/smd.h
@@ -55,11 +55,16 @@ void qcom_smd_driver_unregister(struct qcom_smd_driver *drv);
struct qcom_smd_channel *qcom_smd_open_channel(struct qcom_smd_channel *channel,
const char *name,
qcom_smd_cb_t cb);
+void qcom_smd_close_channel(struct qcom_smd_channel *channel);
void *qcom_smd_get_drvdata(struct qcom_smd_channel *channel);
void qcom_smd_set_drvdata(struct qcom_smd_channel *channel, void *data);
int qcom_smd_send(struct qcom_smd_channel *channel, const void *data, int len);
+struct qcom_smd_edge *qcom_smd_register_edge(struct device *parent,
+ struct device_node *node);
+int qcom_smd_unregister_edge(struct qcom_smd_edge *edge);
+
#else
static inline int qcom_smd_driver_register(struct qcom_smd_driver *drv)
@@ -83,14 +88,20 @@ qcom_smd_open_channel(struct qcom_smd_channel *channel,
return NULL;
}
-void *qcom_smd_get_drvdata(struct qcom_smd_channel *channel)
+static inline void qcom_smd_close_channel(struct qcom_smd_channel *channel)
+{
+ /* This shouldn't be possible */
+ WARN_ON(1);
+}
+
+static inline void *qcom_smd_get_drvdata(struct qcom_smd_channel *channel)
{
/* This shouldn't be possible */
WARN_ON(1);
return NULL;
}
-void qcom_smd_set_drvdata(struct qcom_smd_channel *channel, void *data)
+static inline void qcom_smd_set_drvdata(struct qcom_smd_channel *channel, void *data)
{
/* This shouldn't be possible */
WARN_ON(1);
@@ -104,6 +115,20 @@ static inline int qcom_smd_send(struct qcom_smd_channel *channel,
return -ENXIO;
}
+static inline struct qcom_smd_edge *
+qcom_smd_register_edge(struct device *parent,
+ struct device_node *node)
+{
+ return ERR_PTR(-ENXIO);
+}
+
+static inline int qcom_smd_unregister_edge(struct qcom_smd_edge *edge)
+{
+ /* This shouldn't be possible */
+ WARN_ON(1);
+ return -ENXIO;
+}
+
#endif
#define module_qcom_smd_driver(__smd_driver) \
diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h
index 072cb2aa2413..4b743ac35396 100644
--- a/include/linux/spi/spi.h
+++ b/include/linux/spi/spi.h
@@ -312,6 +312,8 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv)
* @flags: other constraints relevant to this driver
* @max_transfer_size: function that returns the max transfer size for
* a &spi_device; may be %NULL, so the default %SIZE_MAX will be used.
+ * @max_message_size: function that returns the max message size for
+ * a &spi_device; may be %NULL, so the default %SIZE_MAX will be used.
* @io_mutex: mutex for physical bus access
* @bus_lock_spinlock: spinlock for SPI bus locking
* @bus_lock_mutex: mutex for exclusion of multiple callers
@@ -442,10 +444,11 @@ struct spi_master {
#define SPI_MASTER_MUST_TX BIT(4) /* requires tx */
/*
- * on some hardware transfer size may be constrained
+ * on some hardware transfer / message size may be constrained
* the limit may depend on device transfer settings
*/
size_t (*max_transfer_size)(struct spi_device *spi);
+ size_t (*max_message_size)(struct spi_device *spi);
/* I/O mutex */
struct mutex io_mutex;
@@ -905,12 +908,26 @@ extern int spi_async_locked(struct spi_device *spi,
struct spi_message *message);
static inline size_t
-spi_max_transfer_size(struct spi_device *spi)
+spi_max_message_size(struct spi_device *spi)
{
struct spi_master *master = spi->master;
- if (!master->max_transfer_size)
+ if (!master->max_message_size)
return SIZE_MAX;
- return master->max_transfer_size(spi);
+ return master->max_message_size(spi);
+}
+
+static inline size_t
+spi_max_transfer_size(struct spi_device *spi)
+{
+ struct spi_master *master = spi->master;
+ size_t tr_max = SIZE_MAX;
+ size_t msg_max = spi_max_message_size(spi);
+
+ if (master->max_transfer_size)
+ tr_max = master->max_transfer_size(spi);
+
+ /* transfer size limit must not be greater than messsage size limit */
+ return min(tr_max, msg_max);
}
/*---------------------------------------------------------------------------*/
@@ -980,6 +997,30 @@ extern int spi_bus_lock(struct spi_master *master);
extern int spi_bus_unlock(struct spi_master *master);
/**
+ * spi_sync_transfer - synchronous SPI data transfer
+ * @spi: device with which data will be exchanged
+ * @xfers: An array of spi_transfers
+ * @num_xfers: Number of items in the xfer array
+ * Context: can sleep
+ *
+ * Does a synchronous SPI data transfer of the given spi_transfer array.
+ *
+ * For more specific semantics see spi_sync().
+ *
+ * Return: Return: zero on success, else a negative error code.
+ */
+static inline int
+spi_sync_transfer(struct spi_device *spi, struct spi_transfer *xfers,
+ unsigned int num_xfers)
+{
+ struct spi_message msg;
+
+ spi_message_init_with_transfers(&msg, xfers, num_xfers);
+
+ return spi_sync(spi, &msg);
+}
+
+/**
* spi_write - SPI synchronous write
* @spi: device to which data will be written
* @buf: data buffer
@@ -998,11 +1039,8 @@ spi_write(struct spi_device *spi, const void *buf, size_t len)
.tx_buf = buf,
.len = len,
};
- struct spi_message m;
- spi_message_init(&m);
- spi_message_add_tail(&t, &m);
- return spi_sync(spi, &m);
+ return spi_sync_transfer(spi, &t, 1);
}
/**
@@ -1024,35 +1062,8 @@ spi_read(struct spi_device *spi, void *buf, size_t len)
.rx_buf = buf,
.len = len,
};
- struct spi_message m;
-
- spi_message_init(&m);
- spi_message_add_tail(&t, &m);
- return spi_sync(spi, &m);
-}
-/**
- * spi_sync_transfer - synchronous SPI data transfer
- * @spi: device with which data will be exchanged
- * @xfers: An array of spi_transfers
- * @num_xfers: Number of items in the xfer array
- * Context: can sleep
- *
- * Does a synchronous SPI data transfer of the given spi_transfer array.
- *
- * For more specific semantics see spi_sync().
- *
- * Return: Return: zero on success, else a negative error code.
- */
-static inline int
-spi_sync_transfer(struct spi_device *spi, struct spi_transfer *xfers,
- unsigned int num_xfers)
-{
- struct spi_message msg;
-
- spi_message_init_with_transfers(&msg, xfers, num_xfers);
-
- return spi_sync(spi, &msg);
+ return spi_sync_transfer(spi, &t, 1);
}
/* this copies txbuf and rxbuf data; for small transfers only! */
diff --git a/include/linux/splice.h b/include/linux/splice.h
index da2751d3b93d..00a21166e268 100644
--- a/include/linux/splice.h
+++ b/include/linux/splice.h
@@ -72,6 +72,8 @@ extern ssize_t __splice_from_pipe(struct pipe_inode_info *,
struct splice_desc *, splice_actor *);
extern ssize_t splice_to_pipe(struct pipe_inode_info *,
struct splice_pipe_desc *);
+extern ssize_t add_to_pipe(struct pipe_inode_info *,
+ struct pipe_buffer *);
extern ssize_t splice_direct_to_actor(struct file *, struct splice_desc *,
splice_direct_actor *);
@@ -83,4 +85,5 @@ extern void splice_shrink_spd(struct splice_pipe_desc *);
extern void spd_release_page(struct splice_pipe_desc *, unsigned int);
extern const struct pipe_buf_operations page_cache_pipe_buf_ops;
+extern const struct pipe_buf_operations default_pipe_buf_ops;
#endif
diff --git a/include/linux/sunrpc/auth.h b/include/linux/sunrpc/auth.h
index 4ccf184e971f..b1bc62ba20a2 100644
--- a/include/linux/sunrpc/auth.h
+++ b/include/linux/sunrpc/auth.h
@@ -131,6 +131,7 @@ struct rpc_authops {
struct rpc_auth * (*create)(struct rpc_auth_create_args *, struct rpc_clnt *);
void (*destroy)(struct rpc_auth *);
+ int (*hash_cred)(struct auth_cred *, unsigned int);
struct rpc_cred * (*lookup_cred)(struct rpc_auth *, struct auth_cred *, int);
struct rpc_cred * (*crcreate)(struct rpc_auth*, struct auth_cred *, int, gfp_t);
int (*list_pseudoflavors)(rpc_authflavor_t *, int);
diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
index 5c02b0691587..85cc819676e8 100644
--- a/include/linux/sunrpc/clnt.h
+++ b/include/linux/sunrpc/clnt.h
@@ -125,6 +125,13 @@ struct rpc_create_args {
struct svc_xprt *bc_xprt; /* NFSv4.1 backchannel */
};
+struct rpc_add_xprt_test {
+ int (*add_xprt_test)(struct rpc_clnt *,
+ struct rpc_xprt *,
+ void *calldata);
+ void *data;
+};
+
/* Values for "flags" field */
#define RPC_CLNT_CREATE_HARDRTRY (1UL << 0)
#define RPC_CLNT_CREATE_AUTOBIND (1UL << 2)
@@ -198,6 +205,16 @@ int rpc_clnt_add_xprt(struct rpc_clnt *, struct xprt_create *,
void rpc_cap_max_reconnect_timeout(struct rpc_clnt *clnt,
unsigned long timeo);
+int rpc_clnt_setup_test_and_add_xprt(struct rpc_clnt *,
+ struct rpc_xprt_switch *,
+ struct rpc_xprt *,
+ void *);
+
const char *rpc_proc_name(const struct rpc_task *task);
+
+void rpc_clnt_xprt_switch_put(struct rpc_clnt *);
+void rpc_clnt_xprt_switch_add_xprt(struct rpc_clnt *, struct rpc_xprt *);
+bool rpc_clnt_xprt_switch_has_addr(struct rpc_clnt *clnt,
+ const struct sockaddr *sap);
#endif /* __KERNEL__ */
#endif /* _LINUX_SUNRPC_CLNT_H */
diff --git a/include/linux/sunrpc/rpc_rdma.h b/include/linux/sunrpc/rpc_rdma.h
index 3b1ff38f0c37..cfda6adcf33c 100644
--- a/include/linux/sunrpc/rpc_rdma.h
+++ b/include/linux/sunrpc/rpc_rdma.h
@@ -41,10 +41,15 @@
#define _LINUX_SUNRPC_RPC_RDMA_H
#include <linux/types.h>
+#include <linux/bitops.h>
#define RPCRDMA_VERSION 1
#define rpcrdma_version cpu_to_be32(RPCRDMA_VERSION)
+enum {
+ RPCRDMA_V1_DEF_INLINE_SIZE = 1024,
+};
+
struct rpcrdma_segment {
__be32 rs_handle; /* Registered memory handle */
__be32 rs_length; /* Length of the chunk in bytes */
@@ -129,4 +134,38 @@ enum rpcrdma_proc {
#define rdma_done cpu_to_be32(RDMA_DONE)
#define rdma_error cpu_to_be32(RDMA_ERROR)
+/*
+ * Private extension to RPC-over-RDMA Version One.
+ * Message passed during RDMA-CM connection set-up.
+ *
+ * Add new fields at the end, and don't permute existing
+ * fields.
+ */
+struct rpcrdma_connect_private {
+ __be32 cp_magic;
+ u8 cp_version;
+ u8 cp_flags;
+ u8 cp_send_size;
+ u8 cp_recv_size;
+} __packed;
+
+#define rpcrdma_cmp_magic __cpu_to_be32(0xf6ab0e18)
+
+enum {
+ RPCRDMA_CMP_VERSION = 1,
+ RPCRDMA_CMP_F_SND_W_INV_OK = BIT(0),
+};
+
+static inline u8
+rpcrdma_encode_buffer_size(unsigned int size)
+{
+ return (size >> 10) - 1;
+}
+
+static inline unsigned int
+rpcrdma_decode_buffer_size(u8 val)
+{
+ return ((unsigned int)val + 1) << 10;
+}
+
#endif /* _LINUX_SUNRPC_RPC_RDMA_H */
diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h
index 817af0b4385e..7ba040c797ec 100644
--- a/include/linux/sunrpc/sched.h
+++ b/include/linux/sunrpc/sched.h
@@ -239,8 +239,8 @@ struct rpc_task *rpc_wake_up_first(struct rpc_wait_queue *,
void *);
void rpc_wake_up_status(struct rpc_wait_queue *, int);
void rpc_delay(struct rpc_task *, unsigned long);
-void * rpc_malloc(struct rpc_task *, size_t);
-void rpc_free(void *);
+int rpc_malloc(struct rpc_task *);
+void rpc_free(struct rpc_task *);
int rpciod_up(void);
void rpciod_down(void);
int __rpc_wait_for_completion_task(struct rpc_task *task, wait_bit_action_f *);
diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h
index d6917b896d3a..cc3ae16eac68 100644
--- a/include/linux/sunrpc/svc_rdma.h
+++ b/include/linux/sunrpc/svc_rdma.h
@@ -86,6 +86,7 @@ struct svc_rdma_op_ctxt {
unsigned long flags;
enum dma_data_direction direction;
int count;
+ unsigned int mapped_sges;
struct ib_sge sge[RPCSVC_MAXPAGES];
struct page *pages[RPCSVC_MAXPAGES];
};
@@ -136,6 +137,7 @@ struct svcxprt_rdma {
int sc_ord; /* RDMA read limit */
int sc_max_sge;
int sc_max_sge_rd; /* max sge for read target */
+ bool sc_snd_w_inv; /* OK to use Send With Invalidate */
atomic_t sc_sq_count; /* Number of SQ WR on queue */
unsigned int sc_sq_depth; /* Depth of SQ */
@@ -193,6 +195,14 @@ struct svcxprt_rdma {
#define RPCSVC_MAXPAYLOAD_RDMA RPCSVC_MAXPAYLOAD
+/* Track DMA maps for this transport and context */
+static inline void svc_rdma_count_mappings(struct svcxprt_rdma *rdma,
+ struct svc_rdma_op_ctxt *ctxt)
+{
+ ctxt->mapped_sges++;
+ atomic_inc(&rdma->sc_dma_used);
+}
+
/* svc_rdma_backchannel.c */
extern int svc_rdma_handle_bc_reply(struct rpc_xprt *xprt,
struct rpcrdma_msg *rmsgp,
diff --git a/include/linux/sunrpc/xdr.h b/include/linux/sunrpc/xdr.h
index 70c6b92e15a7..56c48c884a24 100644
--- a/include/linux/sunrpc/xdr.h
+++ b/include/linux/sunrpc/xdr.h
@@ -67,6 +67,18 @@ struct xdr_buf {
len; /* Length of XDR encoded message */
};
+static inline void
+xdr_buf_init(struct xdr_buf *buf, void *start, size_t len)
+{
+ buf->head[0].iov_base = start;
+ buf->head[0].iov_len = len;
+ buf->tail[0].iov_len = 0;
+ buf->page_len = 0;
+ buf->flags = 0;
+ buf->len = 0;
+ buf->buflen = len;
+}
+
/*
* pre-xdr'ed macros.
*/
diff --git a/include/linux/sunrpc/xprt.h b/include/linux/sunrpc/xprt.h
index a16070dd03ee..a5da60b24d83 100644
--- a/include/linux/sunrpc/xprt.h
+++ b/include/linux/sunrpc/xprt.h
@@ -83,9 +83,11 @@ struct rpc_rqst {
void (*rq_release_snd_buf)(struct rpc_rqst *); /* release rq_enc_pages */
struct list_head rq_list;
- __u32 * rq_buffer; /* XDR encode buffer */
- size_t rq_callsize,
- rq_rcvsize;
+ void *rq_xprtdata; /* Per-xprt private data */
+ void *rq_buffer; /* Call XDR encode buffer */
+ size_t rq_callsize;
+ void *rq_rbuffer; /* Reply XDR decode buffer */
+ size_t rq_rcvsize;
size_t rq_xmit_bytes_sent; /* total bytes sent */
size_t rq_reply_bytes_recvd; /* total reply bytes */
/* received */
@@ -127,8 +129,8 @@ struct rpc_xprt_ops {
void (*rpcbind)(struct rpc_task *task);
void (*set_port)(struct rpc_xprt *xprt, unsigned short port);
void (*connect)(struct rpc_xprt *xprt, struct rpc_task *task);
- void * (*buf_alloc)(struct rpc_task *task, size_t size);
- void (*buf_free)(void *buffer);
+ int (*buf_alloc)(struct rpc_task *task);
+ void (*buf_free)(struct rpc_task *task);
int (*send_request)(struct rpc_task *task);
void (*set_retrans_timeout)(struct rpc_task *task);
void (*timer)(struct rpc_xprt *xprt, struct rpc_task *task);
diff --git a/include/linux/sunrpc/xprtmultipath.h b/include/linux/sunrpc/xprtmultipath.h
index 5a9acffa41be..507418c1c69e 100644
--- a/include/linux/sunrpc/xprtmultipath.h
+++ b/include/linux/sunrpc/xprtmultipath.h
@@ -66,4 +66,6 @@ extern struct rpc_xprt *xprt_iter_xprt(struct rpc_xprt_iter *xpi);
extern struct rpc_xprt *xprt_iter_get_xprt(struct rpc_xprt_iter *xpi);
extern struct rpc_xprt *xprt_iter_get_next(struct rpc_xprt_iter *xpi);
+extern bool rpc_xprt_switch_has_addr(struct rpc_xprt_switch *xps,
+ const struct sockaddr *sap);
#endif
diff --git a/include/linux/sunrpc/xprtrdma.h b/include/linux/sunrpc/xprtrdma.h
index 39267dc3486a..221b7a2e5406 100644
--- a/include/linux/sunrpc/xprtrdma.h
+++ b/include/linux/sunrpc/xprtrdma.h
@@ -53,8 +53,8 @@
#define RPCRDMA_MAX_SLOT_TABLE (256U)
#define RPCRDMA_MIN_INLINE (1024) /* min inline thresh */
-#define RPCRDMA_DEF_INLINE (1024) /* default inline thresh */
-#define RPCRDMA_MAX_INLINE (3068) /* max inline thresh */
+#define RPCRDMA_DEF_INLINE (4096) /* default inline thresh */
+#define RPCRDMA_MAX_INLINE (65536) /* max inline thresh */
/* Memory registration strategies, by number.
* This is part of a kernel / user space API. Do not remove. */
diff --git a/include/linux/suspend.h b/include/linux/suspend.h
index 7693e39b14fe..d9718378a8be 100644
--- a/include/linux/suspend.h
+++ b/include/linux/suspend.h
@@ -245,6 +245,7 @@ static inline bool idle_should_freeze(void)
return unlikely(suspend_freeze_state == FREEZE_STATE_ENTER);
}
+extern void __init pm_states_init(void);
extern void freeze_set_ops(const struct platform_freeze_ops *ops);
extern void freeze_wake(void);
@@ -279,6 +280,7 @@ static inline bool pm_resume_via_firmware(void) { return false; }
static inline void suspend_set_ops(const struct platform_suspend_ops *ops) {}
static inline int pm_suspend(suspend_state_t state) { return -ENOSYS; }
static inline bool idle_should_freeze(void) { return false; }
+static inline void __init pm_states_init(void) {}
static inline void freeze_set_ops(const struct platform_freeze_ops *ops) {}
static inline void freeze_wake(void) {}
#endif /* !CONFIG_SUSPEND */
diff --git a/include/linux/swap.h b/include/linux/swap.h
index b17cc4830fa6..a56523cefb9b 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -191,6 +191,11 @@ struct percpu_cluster {
unsigned int next; /* Likely next allocation offset */
};
+struct swap_cluster_list {
+ struct swap_cluster_info head;
+ struct swap_cluster_info tail;
+};
+
/*
* The in-memory structure used to track swap areas.
*/
@@ -203,8 +208,7 @@ struct swap_info_struct {
unsigned int max; /* extent of the swap_map */
unsigned char *swap_map; /* vmalloc'ed array of usage counts */
struct swap_cluster_info *cluster_info; /* cluster info. Only for SSD */
- struct swap_cluster_info free_cluster_head; /* free cluster list head */
- struct swap_cluster_info free_cluster_tail; /* free cluster list tail */
+ struct swap_cluster_list free_clusters; /* free clusters list */
unsigned int lowest_bit; /* index of first free in swap_map */
unsigned int highest_bit; /* index of last free in swap_map */
unsigned int pages; /* total of usable pages of swap */
@@ -235,8 +239,7 @@ struct swap_info_struct {
* first.
*/
struct work_struct discard_work; /* discard worker */
- struct swap_cluster_info discard_cluster_head; /* list head of discard clusters */
- struct swap_cluster_info discard_cluster_tail; /* list tail of discard clusters */
+ struct swap_cluster_list discard_clusters; /* discard clusters list */
};
/* linux/mm/workingset.c */
@@ -257,6 +260,7 @@ static inline void workingset_node_pages_inc(struct radix_tree_node *node)
static inline void workingset_node_pages_dec(struct radix_tree_node *node)
{
+ VM_WARN_ON_ONCE(!workingset_node_pages(node));
node->count--;
}
@@ -272,6 +276,7 @@ static inline void workingset_node_shadows_inc(struct radix_tree_node *node)
static inline void workingset_node_shadows_dec(struct radix_tree_node *node)
{
+ VM_WARN_ON_ONCE(!workingset_node_shadows(node));
node->count -= 1U << RADIX_TREE_COUNT_SHIFT;
}
diff --git a/include/linux/sync_file.h b/include/linux/sync_file.h
index c6ffe8b0725c..aa17ccfc2f57 100644
--- a/include/linux/sync_file.h
+++ b/include/linux/sync_file.h
@@ -19,12 +19,7 @@
#include <linux/list.h>
#include <linux/spinlock.h>
#include <linux/fence.h>
-
-struct sync_file_cb {
- struct fence_cb cb;
- struct fence *fence;
- struct sync_file *sync_file;
-};
+#include <linux/fence-array.h>
/**
* struct sync_file - sync file to export to the userspace
@@ -32,10 +27,9 @@ struct sync_file_cb {
* @kref: reference count on fence.
* @name: name of sync_file. Useful for debugging
* @sync_file_list: membership in global file list
- * @num_fences: number of sync_pts in the fence
* @wq: wait queue for fence signaling
- * @status: 0: signaled, >0:active, <0: error
- * @cbs: sync_pts callback information
+ * @fence: fence with the fences in the sync_file
+ * @cb: fence callback information
*/
struct sync_file {
struct file *file;
@@ -44,14 +38,16 @@ struct sync_file {
#ifdef CONFIG_DEBUG_FS
struct list_head sync_file_list;
#endif
- int num_fences;
wait_queue_head_t wq;
- atomic_t status;
- struct sync_file_cb cbs[];
+ struct fence *fence;
+ struct fence_cb cb;
};
+#define POLL_ENABLED FENCE_FLAG_USER_BITS
+
struct sync_file *sync_file_create(struct fence *fence);
+struct fence *sync_file_get_fence(int fd);
#endif /* _LINUX_SYNC_H */
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
index d02239022bd0..0d7abb8b7315 100644
--- a/include/linux/syscalls.h
+++ b/include/linux/syscalls.h
@@ -898,4 +898,12 @@ asmlinkage long sys_copy_file_range(int fd_in, loff_t __user *off_in,
asmlinkage long sys_mlock2(unsigned long start, size_t len, int flags);
+asmlinkage long sys_pkey_mprotect(unsigned long start, size_t len,
+ unsigned long prot, int pkey);
+asmlinkage long sys_pkey_alloc(unsigned long flags, unsigned long init_val);
+asmlinkage long sys_pkey_free(int pkey);
+//asmlinkage long sys_pkey_get(int pkey, unsigned long flags);
+//asmlinkage long sys_pkey_set(int pkey, unsigned long access_rights,
+// unsigned long flags);
+
#endif
diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
index a4f7203a9017..adf4e51cf597 100644
--- a/include/linux/sysctl.h
+++ b/include/linux/sysctl.h
@@ -25,6 +25,7 @@
#include <linux/rcupdate.h>
#include <linux/wait.h>
#include <linux/rbtree.h>
+#include <linux/uidgid.h>
#include <uapi/linux/sysctl.h>
/* For the /proc/sys support */
@@ -157,8 +158,10 @@ struct ctl_table_set {
struct ctl_table_root {
struct ctl_table_set default_set;
- struct ctl_table_set *(*lookup)(struct ctl_table_root *root,
- struct nsproxy *namespaces);
+ struct ctl_table_set *(*lookup)(struct ctl_table_root *root);
+ void (*set_ownership)(struct ctl_table_header *head,
+ struct ctl_table *table,
+ kuid_t *uid, kgid_t *gid);
int (*permissions)(struct ctl_table_header *head, struct ctl_table *table);
};
diff --git a/include/linux/t10-pi.h b/include/linux/t10-pi.h
index dd8de82cf5b5..9fba9dd33544 100644
--- a/include/linux/t10-pi.h
+++ b/include/linux/t10-pi.h
@@ -5,6 +5,26 @@
#include <linux/blkdev.h>
/*
+ * A T10 PI-capable target device can be formatted with different
+ * protection schemes. Currently 0 through 3 are defined:
+ *
+ * Type 0 is regular (unprotected) I/O
+ *
+ * Type 1 defines the contents of the guard and reference tags
+ *
+ * Type 2 defines the contents of the guard and reference tags and
+ * uses 32-byte commands to seed the latter
+ *
+ * Type 3 defines the contents of the guard tag only
+ */
+enum t10_dif_type {
+ T10_PI_TYPE0_PROTECTION = 0x0,
+ T10_PI_TYPE1_PROTECTION = 0x1,
+ T10_PI_TYPE2_PROTECTION = 0x2,
+ T10_PI_TYPE3_PROTECTION = 0x3,
+};
+
+/*
* T10 Protection Information tuple.
*/
struct t10_pi_tuple {
diff --git a/include/linux/tcp.h b/include/linux/tcp.h
index 7be9b1242354..a17ae7b85218 100644
--- a/include/linux/tcp.h
+++ b/include/linux/tcp.h
@@ -19,6 +19,7 @@
#include <linux/skbuff.h>
+#include <linux/win_minmax.h>
#include <net/sock.h>
#include <net/inet_connection_sock.h>
#include <net/inet_timewait_sock.h>
@@ -212,7 +213,8 @@ struct tcp_sock {
u8 reord; /* reordering detected */
} rack;
u16 advmss; /* Advertised MSS */
- u8 unused;
+ u8 rate_app_limited:1, /* rate_{delivered,interval_us} limited? */
+ unused:7;
u8 nonagle : 4,/* Disable Nagle algorithm? */
thin_lto : 1,/* Use linear timeouts for thin streams */
thin_dupack : 1,/* Fast retransmit on first dupack */
@@ -234,9 +236,7 @@ struct tcp_sock {
u32 mdev_max_us; /* maximal mdev for the last rtt period */
u32 rttvar_us; /* smoothed mdev_max */
u32 rtt_seq; /* sequence number to update rttvar */
- struct rtt_meas {
- u32 rtt, ts; /* RTT in usec and sampling time in jiffies. */
- } rtt_min[3];
+ struct minmax rtt_min;
u32 packets_out; /* Packets which are "in flight" */
u32 retrans_out; /* Retransmitted packets out */
@@ -268,6 +268,12 @@ struct tcp_sock {
* receiver in Recovery. */
u32 prr_out; /* Total number of pkts sent during Recovery. */
u32 delivered; /* Total data packets delivered incl. rexmits */
+ u32 lost; /* Total data packets lost incl. rexmits */
+ u32 app_limited; /* limited until "delivered" reaches this val */
+ struct skb_mstamp first_tx_mstamp; /* start of window send phase */
+ struct skb_mstamp delivered_mstamp; /* time we reached "delivered" */
+ u32 rate_delivered; /* saved rate sample: packets delivered */
+ u32 rate_interval_us; /* saved rate sample: time elapsed */
u32 rcv_wnd; /* Current receiver window */
u32 write_seq; /* Tail(+1) of data held in tcp send buffer */
@@ -281,10 +287,9 @@ struct tcp_sock {
struct sk_buff* lost_skb_hint;
struct sk_buff *retransmit_skb_hint;
- /* OOO segments go in this list. Note that socket lock must be held,
- * as we do not use sk_buff_head lock.
- */
- struct sk_buff_head out_of_order_queue;
+ /* OOO segments go in this rbtree. Socket lock must be held. */
+ struct rb_root out_of_order_queue;
+ struct sk_buff *ooo_last_skb; /* cache rb_last(out_of_order_queue) */
/* SACKs data, these 2 need to be together (see tcp_options_write) */
struct tcp_sack_block duplicate_sack[1]; /* D-SACK block */
diff --git a/include/linux/thermal.h b/include/linux/thermal.h
index ee517bef0db0..511182a88e76 100644
--- a/include/linux/thermal.h
+++ b/include/linux/thermal.h
@@ -92,12 +92,24 @@ enum thermal_trend {
THERMAL_TREND_DROP_FULL, /* apply lowest cooling action */
};
+/* Thermal notification reason */
+enum thermal_notify_event {
+ THERMAL_EVENT_UNSPECIFIED, /* Unspecified event */
+ THERMAL_EVENT_TEMP_SAMPLE, /* New Temperature sample */
+ THERMAL_TRIP_VIOLATED, /* TRIP Point violation */
+ THERMAL_TRIP_CHANGED, /* TRIP Point temperature changed */
+ THERMAL_DEVICE_DOWN, /* Thermal device is down */
+ THERMAL_DEVICE_UP, /* Thermal device is up after a down event */
+ THERMAL_DEVICE_POWER_CAPABILITY_CHANGED, /* power capability changed */
+};
+
struct thermal_zone_device_ops {
int (*bind) (struct thermal_zone_device *,
struct thermal_cooling_device *);
int (*unbind) (struct thermal_zone_device *,
struct thermal_cooling_device *);
int (*get_temp) (struct thermal_zone_device *, int *);
+ int (*set_trips) (struct thermal_zone_device *, int, int);
int (*get_mode) (struct thermal_zone_device *,
enum thermal_device_mode *);
int (*set_mode) (struct thermal_zone_device *,
@@ -168,6 +180,10 @@ struct thermal_attr {
* @last_temperature: previous temperature read
* @emul_temperature: emulated temperature when using CONFIG_THERMAL_EMULATION
* @passive: 1 if you've crossed a passive trip point, 0 otherwise.
+ * @prev_low_trip: the low current temperature if you've crossed a passive
+ trip point.
+ * @prev_high_trip: the above current temperature if you've crossed a
+ passive trip point.
* @forced_passive: If > 0, temperature at which to switch on all ACPI
* processor cooling devices. Currently only used by the
* step-wise governor.
@@ -182,6 +198,7 @@ struct thermal_attr {
* @lock: lock to protect thermal_instances list
* @node: node in thermal_tz_list (in thermal_core.c)
* @poll_queue: delayed work for polling
+ * @notify_event: Last notification event
*/
struct thermal_zone_device {
int id;
@@ -199,6 +216,8 @@ struct thermal_zone_device {
int last_temperature;
int emul_temperature;
int passive;
+ int prev_low_trip;
+ int prev_high_trip;
unsigned int forced_passive;
atomic_t need_update;
struct thermal_zone_device_ops *ops;
@@ -210,6 +229,7 @@ struct thermal_zone_device {
struct mutex lock;
struct list_head node;
struct delayed_work poll_queue;
+ enum thermal_notify_event notify_event;
};
/**
@@ -333,6 +353,9 @@ struct thermal_genl_event {
*
* Optional:
* @get_trend: a pointer to a function that reads the sensor temperature trend.
+ * @set_trips: a pointer to a function that sets a temperature window. When
+ * this window is left the driver must inform the thermal core via
+ * thermal_zone_device_update.
* @set_emul_temp: a pointer to a function that sets sensor emulated
* temperature.
* @set_trip_temp: a pointer to a function that sets the trip temperature on
@@ -340,7 +363,8 @@ struct thermal_genl_event {
*/
struct thermal_zone_of_device_ops {
int (*get_temp)(void *, int *);
- int (*get_trend)(void *, long *);
+ int (*get_trend)(void *, int, enum thermal_trend *);
+ int (*set_trips)(void *, int, int);
int (*set_emul_temp)(void *, int);
int (*set_trip_temp)(void *, int, int);
};
@@ -425,7 +449,9 @@ int thermal_zone_bind_cooling_device(struct thermal_zone_device *, int,
unsigned int);
int thermal_zone_unbind_cooling_device(struct thermal_zone_device *, int,
struct thermal_cooling_device *);
-void thermal_zone_device_update(struct thermal_zone_device *);
+void thermal_zone_device_update(struct thermal_zone_device *,
+ enum thermal_notify_event);
+void thermal_zone_set_trips(struct thermal_zone_device *);
struct thermal_cooling_device *thermal_cooling_device_register(char *, void *,
const struct thermal_cooling_device_ops *);
@@ -435,6 +461,8 @@ thermal_of_cooling_device_register(struct device_node *np, char *, void *,
void thermal_cooling_device_unregister(struct thermal_cooling_device *);
struct thermal_zone_device *thermal_zone_get_zone_by_name(const char *name);
int thermal_zone_get_temp(struct thermal_zone_device *tz, int *temp);
+int thermal_zone_get_slope(struct thermal_zone_device *tz);
+int thermal_zone_get_offset(struct thermal_zone_device *tz);
int get_tz_trend(struct thermal_zone_device *, int);
struct thermal_instance *get_thermal_instance(struct thermal_zone_device *,
@@ -473,7 +501,10 @@ static inline int thermal_zone_unbind_cooling_device(
struct thermal_zone_device *tz, int trip,
struct thermal_cooling_device *cdev)
{ return -ENODEV; }
-static inline void thermal_zone_device_update(struct thermal_zone_device *tz)
+static inline void thermal_zone_device_update(struct thermal_zone_device *tz,
+ enum thermal_notify_event event)
+{ }
+static inline void thermal_zone_set_trips(struct thermal_zone_device *tz)
{ }
static inline struct thermal_cooling_device *
thermal_cooling_device_register(char *type, void *devdata,
@@ -492,6 +523,12 @@ static inline struct thermal_zone_device *thermal_zone_get_zone_by_name(
static inline int thermal_zone_get_temp(
struct thermal_zone_device *tz, int *temp)
{ return -ENODEV; }
+static inline int thermal_zone_get_slope(
+ struct thermal_zone_device *tz)
+{ return -ENODEV; }
+static inline int thermal_zone_get_offset(
+ struct thermal_zone_device *tz)
+{ return -ENODEV; }
static inline int get_tz_trend(struct thermal_zone_device *tz, int trip)
{ return -ENODEV; }
static inline struct thermal_instance *
diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h
index cbd8990e2e77..45f004e9cc59 100644
--- a/include/linux/thread_info.h
+++ b/include/linux/thread_info.h
@@ -13,6 +13,21 @@
struct timespec;
struct compat_timespec;
+#ifdef CONFIG_THREAD_INFO_IN_TASK
+struct thread_info {
+ unsigned long flags; /* low level flags */
+};
+
+#define INIT_THREAD_INFO(tsk) \
+{ \
+ .flags = 0, \
+}
+#endif
+
+#ifdef CONFIG_THREAD_INFO_IN_TASK
+#define current_thread_info() ((struct thread_info *)current)
+#endif
+
/*
* System call restart block.
*/
@@ -118,10 +133,11 @@ static inline int arch_within_stack_frames(const void * const stack,
extern void __check_object_size(const void *ptr, unsigned long n,
bool to_user);
-static inline void check_object_size(const void *ptr, unsigned long n,
- bool to_user)
+static __always_inline void check_object_size(const void *ptr, unsigned long n,
+ bool to_user)
{
- __check_object_size(ptr, n, to_user);
+ if (!__builtin_constant_p(n))
+ __check_object_size(ptr, n, to_user);
}
#else
static inline void check_object_size(const void *ptr, unsigned long n,
diff --git a/include/linux/time64.h b/include/linux/time64.h
index 7e5d2fa9ac46..980c71b3001a 100644
--- a/include/linux/time64.h
+++ b/include/linux/time64.h
@@ -5,6 +5,7 @@
#include <linux/math64.h>
typedef __s64 time64_t;
+typedef __u64 timeu64_t;
/*
* This wants to go into uapi/linux/time.h once we agreed about the
diff --git a/include/linux/timekeeping.h b/include/linux/timekeeping.h
index 816b7543f81b..09168c52ab64 100644
--- a/include/linux/timekeeping.h
+++ b/include/linux/timekeeping.h
@@ -1,7 +1,7 @@
#ifndef _LINUX_TIMEKEEPING_H
#define _LINUX_TIMEKEEPING_H
-#include <asm-generic/errno-base.h>
+#include <linux/errno.h>
/* Included from linux/ktime.h */
diff --git a/include/linux/torture.h b/include/linux/torture.h
index 6685a73736a2..a45702eb3e7b 100644
--- a/include/linux/torture.h
+++ b/include/linux/torture.h
@@ -43,7 +43,7 @@
#define TORTURE_FLAG "-torture:"
#define TOROUT_STRING(s) \
- pr_alert("%s" TORTURE_FLAG s "\n", torture_type)
+ pr_alert("%s" TORTURE_FLAG " %s\n", torture_type, s)
#define VERBOSE_TOROUT_STRING(s) \
do { if (verbose) pr_alert("%s" TORTURE_FLAG " %s\n", torture_type, s); } while (0)
#define VERBOSE_TOROUT_ERRSTRING(s) \
diff --git a/include/linux/u64_stats_sync.h b/include/linux/u64_stats_sync.h
index d3a2bb712af3..650f3dd6b800 100644
--- a/include/linux/u64_stats_sync.h
+++ b/include/linux/u64_stats_sync.h
@@ -103,31 +103,42 @@ static inline void u64_stats_update_end_raw(struct u64_stats_sync *syncp)
#endif
}
-static inline unsigned int u64_stats_fetch_begin(const struct u64_stats_sync *syncp)
+static inline unsigned int __u64_stats_fetch_begin(const struct u64_stats_sync *syncp)
{
#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
return read_seqcount_begin(&syncp->seq);
#else
-#if BITS_PER_LONG==32
- preempt_disable();
-#endif
return 0;
#endif
}
-static inline bool u64_stats_fetch_retry(const struct u64_stats_sync *syncp,
+static inline unsigned int u64_stats_fetch_begin(const struct u64_stats_sync *syncp)
+{
+#if BITS_PER_LONG==32 && !defined(CONFIG_SMP)
+ preempt_disable();
+#endif
+ return __u64_stats_fetch_begin(syncp);
+}
+
+static inline bool __u64_stats_fetch_retry(const struct u64_stats_sync *syncp,
unsigned int start)
{
#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
return read_seqcount_retry(&syncp->seq, start);
#else
-#if BITS_PER_LONG==32
- preempt_enable();
-#endif
return false;
#endif
}
+static inline bool u64_stats_fetch_retry(const struct u64_stats_sync *syncp,
+ unsigned int start)
+{
+#if BITS_PER_LONG==32 && !defined(CONFIG_SMP)
+ preempt_enable();
+#endif
+ return __u64_stats_fetch_retry(syncp, start);
+}
+
/*
* In case irq handlers can update u64 counters, readers can use following helpers
* - SMP 32bit arches use seqcount protection, irq safe.
@@ -136,27 +147,19 @@ static inline bool u64_stats_fetch_retry(const struct u64_stats_sync *syncp,
*/
static inline unsigned int u64_stats_fetch_begin_irq(const struct u64_stats_sync *syncp)
{
-#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
- return read_seqcount_begin(&syncp->seq);
-#else
-#if BITS_PER_LONG==32
+#if BITS_PER_LONG==32 && !defined(CONFIG_SMP)
local_irq_disable();
#endif
- return 0;
-#endif
+ return __u64_stats_fetch_begin(syncp);
}
static inline bool u64_stats_fetch_retry_irq(const struct u64_stats_sync *syncp,
- unsigned int start)
+ unsigned int start)
{
-#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
- return read_seqcount_retry(&syncp->seq, start);
-#else
-#if BITS_PER_LONG==32
+#if BITS_PER_LONG==32 && !defined(CONFIG_SMP)
local_irq_enable();
#endif
- return false;
-#endif
+ return __u64_stats_fetch_retry(syncp, start);
}
#endif /* _LINUX_U64_STATS_SYNC_H */
diff --git a/include/linux/uio.h b/include/linux/uio.h
index 1b5d1cd796e2..6e22b544d039 100644
--- a/include/linux/uio.h
+++ b/include/linux/uio.h
@@ -13,6 +13,7 @@
#include <uapi/linux/uio.h>
struct page;
+struct pipe_inode_info;
struct kvec {
void *iov_base; /* and that should *never* hold a userland pointer */
@@ -23,6 +24,7 @@ enum {
ITER_IOVEC = 0,
ITER_KVEC = 2,
ITER_BVEC = 4,
+ ITER_PIPE = 8,
};
struct iov_iter {
@@ -33,8 +35,12 @@ struct iov_iter {
const struct iovec *iov;
const struct kvec *kvec;
const struct bio_vec *bvec;
+ struct pipe_inode_info *pipe;
+ };
+ union {
+ unsigned long nr_segs;
+ int idx;
};
- unsigned long nr_segs;
};
/*
@@ -64,7 +70,7 @@ static inline struct iovec iov_iter_iovec(const struct iov_iter *iter)
}
#define iov_for_each(iov, iter, start) \
- if (!((start).type & ITER_BVEC)) \
+ if (!((start).type & (ITER_BVEC | ITER_PIPE))) \
for (iter = (start); \
(iter).count && \
((iov = iov_iter_iovec(&(iter))), 1); \
@@ -76,7 +82,6 @@ size_t iov_iter_copy_from_user_atomic(struct page *page,
struct iov_iter *i, unsigned long offset, size_t bytes);
void iov_iter_advance(struct iov_iter *i, size_t bytes);
int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes);
-int iov_iter_fault_in_multipages_readable(struct iov_iter *i, size_t bytes);
size_t iov_iter_single_seg_count(const struct iov_iter *i);
size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
struct iov_iter *i);
@@ -94,6 +99,8 @@ void iov_iter_kvec(struct iov_iter *i, int direction, const struct kvec *kvec,
unsigned long nr_segs, size_t count);
void iov_iter_bvec(struct iov_iter *i, int direction, const struct bio_vec *bvec,
unsigned long nr_segs, size_t count);
+void iov_iter_pipe(struct iov_iter *i, int direction, struct pipe_inode_info *pipe,
+ size_t count);
ssize_t iov_iter_get_pages(struct iov_iter *i, struct page **pages,
size_t maxsize, unsigned maxpages, size_t *start);
ssize_t iov_iter_get_pages_alloc(struct iov_iter *i, struct page ***pages,
@@ -102,14 +109,14 @@ int iov_iter_npages(const struct iov_iter *i, int maxpages);
const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags);
-static inline size_t iov_iter_count(struct iov_iter *i)
+static inline size_t iov_iter_count(const struct iov_iter *i)
{
return i->count;
}
-static inline bool iter_is_iovec(struct iov_iter *i)
+static inline bool iter_is_iovec(const struct iov_iter *i)
{
- return !(i->type & (ITER_BVEC | ITER_KVEC));
+ return !(i->type & (ITER_BVEC | ITER_KVEC | ITER_PIPE));
}
/*
diff --git a/include/linux/ulpi/driver.h b/include/linux/ulpi/driver.h
index 388f6e08b9d4..a7af21a55248 100644
--- a/include/linux/ulpi/driver.h
+++ b/include/linux/ulpi/driver.h
@@ -15,7 +15,7 @@ struct ulpi_ops;
*/
struct ulpi {
struct ulpi_device_id id;
- struct ulpi_ops *ops;
+ const struct ulpi_ops *ops;
struct device dev;
};
@@ -47,7 +47,11 @@ struct ulpi_driver {
#define to_ulpi_driver(d) container_of(d, struct ulpi_driver, driver)
-int ulpi_register_driver(struct ulpi_driver *drv);
+/*
+ * use a macro to avoid include chaining to get THIS_MODULE
+ */
+#define ulpi_register_driver(drv) __ulpi_register_driver(drv, THIS_MODULE)
+int __ulpi_register_driver(struct ulpi_driver *drv, struct module *module);
void ulpi_unregister_driver(struct ulpi_driver *drv);
#define module_ulpi_driver(__ulpi_driver) \
diff --git a/include/linux/ulpi/interface.h b/include/linux/ulpi/interface.h
index 4de8ab491038..a2011a919eb6 100644
--- a/include/linux/ulpi/interface.h
+++ b/include/linux/ulpi/interface.h
@@ -4,20 +4,19 @@
#include <linux/types.h>
struct ulpi;
+struct device;
/**
* struct ulpi_ops - ULPI register access
- * @dev: the interface provider
* @read: read operation for ULPI register access
* @write: write operation for ULPI register access
*/
struct ulpi_ops {
- struct device *dev;
- int (*read)(struct ulpi_ops *ops, u8 addr);
- int (*write)(struct ulpi_ops *ops, u8 addr, u8 val);
+ int (*read)(struct device *dev, u8 addr);
+ int (*write)(struct device *dev, u8 addr, u8 val);
};
-struct ulpi *ulpi_register_interface(struct device *, struct ulpi_ops *);
+struct ulpi *ulpi_register_interface(struct device *, const struct ulpi_ops *);
void ulpi_unregister_interface(struct ulpi *);
#endif /* __LINUX_ULPI_INTERFACE_H */
diff --git a/include/linux/usb/composite.h b/include/linux/usb/composite.h
index 2b81b24eb5aa..4616a49a1c2e 100644
--- a/include/linux/usb/composite.h
+++ b/include/linux/usb/composite.h
@@ -220,7 +220,8 @@ struct usb_function {
int (*setup)(struct usb_function *,
const struct usb_ctrlrequest *);
bool (*req_match)(struct usb_function *,
- const struct usb_ctrlrequest *);
+ const struct usb_ctrlrequest *,
+ bool config0);
void (*suspend)(struct usb_function *);
void (*resume)(struct usb_function *);
diff --git a/include/linux/usb/gadget.h b/include/linux/usb/gadget.h
index 612dbdfa388e..8e81f9eb95e4 100644
--- a/include/linux/usb/gadget.h
+++ b/include/linux/usb/gadget.h
@@ -346,6 +346,8 @@ struct usb_gadget_ops {
* or B-Peripheral wants to take host role.
* @quirk_ep_out_aligned_size: epout requires buffer size to be aligned to
* MaxPacketSize.
+ * @quirk_avoids_skb_reserve: udc/platform wants to avoid skb_reserve() in
+ * u_ether.c to improve performance.
* @is_selfpowered: if the gadget is self-powered.
* @deactivated: True if gadget is deactivated - in deactivated state it cannot
* be connected.
@@ -398,6 +400,7 @@ struct usb_gadget {
unsigned quirk_altset_not_supp:1;
unsigned quirk_stall_not_supp:1;
unsigned quirk_zlp_not_supp:1;
+ unsigned quirk_avoids_skb_reserve:1;
unsigned is_selfpowered:1;
unsigned deactivated:1;
unsigned connected:1;
@@ -418,8 +421,20 @@ static inline struct usb_gadget *dev_to_usb_gadget(struct device *dev)
list_for_each_entry(tmp, &(gadget)->ep_list, ep_list)
/**
+ * usb_ep_align - returns @len aligned to ep's maxpacketsize.
+ * @ep: the endpoint whose maxpacketsize is used to align @len
+ * @len: buffer size's length to align to @ep's maxpacketsize
+ *
+ * This helper is used to align buffer's size to an ep's maxpacketsize.
+ */
+static inline size_t usb_ep_align(struct usb_ep *ep, size_t len)
+{
+ return round_up(len, (size_t)le16_to_cpu(ep->desc->wMaxPacketSize));
+}
+
+/**
* usb_ep_align_maybe - returns @len aligned to ep's maxpacketsize if gadget
- * requires quirk_ep_out_aligned_size, otherwise reguens len.
+ * requires quirk_ep_out_aligned_size, otherwise returns len.
* @g: controller to check for quirk
* @ep: the endpoint whose maxpacketsize is used to align @len
* @len: buffer size's length to align to @ep's maxpacketsize
@@ -430,8 +445,7 @@ static inline struct usb_gadget *dev_to_usb_gadget(struct device *dev)
static inline size_t
usb_ep_align_maybe(struct usb_gadget *g, struct usb_ep *ep, size_t len)
{
- return !g->quirk_ep_out_aligned_size ? len :
- round_up(len, (size_t)ep->desc->wMaxPacketSize);
+ return g->quirk_ep_out_aligned_size ? usb_ep_align(ep, len) : len;
}
/**
@@ -463,6 +477,16 @@ static inline int gadget_is_zlp_supported(struct usb_gadget *g)
}
/**
+ * gadget_avoids_skb_reserve - return true iff the hardware would like to avoid
+ * skb_reserve to improve performance.
+ * @g: controller to check for quirk
+ */
+static inline int gadget_avoids_skb_reserve(struct usb_gadget *g)
+{
+ return g->quirk_avoids_skb_reserve;
+}
+
+/**
* gadget_is_dualspeed - return true iff the hardware handles high speed
* @g: controller that might support both high and full speeds
*/
diff --git a/include/linux/usb_usual.h b/include/linux/usb_usual.h
index 245f57dbbb61..0aae1b2ee931 100644
--- a/include/linux/usb_usual.h
+++ b/include/linux/usb_usual.h
@@ -81,6 +81,8 @@
/* Sets max_sectors to 240 */ \
US_FLAG(NO_REPORT_LUNS, 0x10000000) \
/* Cannot handle REPORT_LUNS */ \
+ US_FLAG(ALWAYS_SYNC, 0x20000000) \
+ /* lies about caching, so always sync */ \
#define US_FLAG(name, value) US_FL_##name = value ,
enum { US_DO_ALL_FLAGS };
diff --git a/include/linux/user_namespace.h b/include/linux/user_namespace.h
index 9217169c64cb..eb209d4523f5 100644
--- a/include/linux/user_namespace.h
+++ b/include/linux/user_namespace.h
@@ -22,6 +22,19 @@ struct uid_gid_map { /* 64 bytes -- 1 cache line */
#define USERNS_INIT_FLAGS USERNS_SETGROUPS_ALLOWED
+struct ucounts;
+
+enum ucount_type {
+ UCOUNT_USER_NAMESPACES,
+ UCOUNT_PID_NAMESPACES,
+ UCOUNT_UTS_NAMESPACES,
+ UCOUNT_IPC_NAMESPACES,
+ UCOUNT_NET_NAMESPACES,
+ UCOUNT_MNT_NAMESPACES,
+ UCOUNT_CGROUP_NAMESPACES,
+ UCOUNT_COUNTS,
+};
+
struct user_namespace {
struct uid_gid_map uid_map;
struct uid_gid_map gid_map;
@@ -39,10 +52,30 @@ struct user_namespace {
struct key *persistent_keyring_register;
struct rw_semaphore persistent_keyring_register_sem;
#endif
+ struct work_struct work;
+#ifdef CONFIG_SYSCTL
+ struct ctl_table_set set;
+ struct ctl_table_header *sysctls;
+#endif
+ struct ucounts *ucounts;
+ int ucount_max[UCOUNT_COUNTS];
+};
+
+struct ucounts {
+ struct hlist_node node;
+ struct user_namespace *ns;
+ kuid_t uid;
+ atomic_t count;
+ atomic_t ucount[UCOUNT_COUNTS];
};
extern struct user_namespace init_user_ns;
+bool setup_userns_sysctls(struct user_namespace *ns);
+void retire_userns_sysctls(struct user_namespace *ns);
+struct ucounts *inc_ucount(struct user_namespace *ns, kuid_t uid, enum ucount_type type);
+void dec_ucount(struct ucounts *ucounts, enum ucount_type type);
+
#ifdef CONFIG_USER_NS
static inline struct user_namespace *get_user_ns(struct user_namespace *ns)
@@ -54,12 +87,12 @@ static inline struct user_namespace *get_user_ns(struct user_namespace *ns)
extern int create_user_ns(struct cred *new);
extern int unshare_userns(unsigned long unshare_flags, struct cred **new_cred);
-extern void free_user_ns(struct user_namespace *ns);
+extern void __put_user_ns(struct user_namespace *ns);
static inline void put_user_ns(struct user_namespace *ns)
{
if (ns && atomic_dec_and_test(&ns->count))
- free_user_ns(ns);
+ __put_user_ns(ns);
}
struct seq_operations;
@@ -73,6 +106,8 @@ extern ssize_t proc_setgroups_write(struct file *, const char __user *, size_t,
extern int proc_setgroups_show(struct seq_file *m, void *v);
extern bool userns_may_setgroups(const struct user_namespace *ns);
extern bool current_in_userns(const struct user_namespace *target_ns);
+
+struct ns_common *ns_get_owner(struct ns_common *ns);
#else
static inline struct user_namespace *get_user_ns(struct user_namespace *ns)
@@ -106,6 +141,11 @@ static inline bool current_in_userns(const struct user_namespace *target_ns)
{
return true;
}
+
+static inline struct ns_common *ns_get_owner(struct ns_common *ns)
+{
+ return ERR_PTR(-EPERM);
+}
#endif
#endif /* _LINUX_USER_H */
diff --git a/include/linux/utsname.h b/include/linux/utsname.h
index 5093f58ae192..60f0bb83b313 100644
--- a/include/linux/utsname.h
+++ b/include/linux/utsname.h
@@ -24,6 +24,7 @@ struct uts_namespace {
struct kref kref;
struct new_utsname name;
struct user_namespace *user_ns;
+ struct ucounts *ucounts;
struct ns_common ns;
};
extern struct uts_namespace init_uts_ns;
diff --git a/include/linux/vgaarb.h b/include/linux/vgaarb.h
index 8c3b412d84df..ee162e3e879b 100644
--- a/include/linux/vgaarb.h
+++ b/include/linux/vgaarb.h
@@ -73,34 +73,6 @@ static inline void vga_set_legacy_decoding(struct pci_dev *pdev,
unsigned int decodes) { };
#endif
-/**
- * vga_get - acquire & locks VGA resources
- *
- * @pdev: pci device of the VGA card or NULL for the system default
- * @rsrc: bit mask of resources to acquire and lock
- * @interruptible: blocking should be interruptible by signals ?
- *
- * This function acquires VGA resources for the given
- * card and mark those resources locked. If the resource requested
- * are "normal" (and not legacy) resources, the arbiter will first check
- * whether the card is doing legacy decoding for that type of resource. If
- * yes, the lock is "converted" into a legacy resource lock.
- * The arbiter will first look for all VGA cards that might conflict
- * and disable their IOs and/or Memory access, including VGA forwarding
- * on P2P bridges if necessary, so that the requested resources can
- * be used. Then, the card is marked as locking these resources and
- * the IO and/or Memory accesse are enabled on the card (including
- * VGA forwarding on parent P2P bridges if any).
- * This function will block if some conflicting card is already locking
- * one of the required resources (or any resource on a different bus
- * segment, since P2P bridges don't differenciate VGA memory and IO
- * afaik). You can indicate whether this blocking should be interruptible
- * by a signal (for userland interface) or not.
- * Must not be called at interrupt time or in atomic context.
- * If the card already owns the resources, the function succeeds.
- * Nested calls are supported (a per-resource counter is maintained)
- */
-
#if defined(CONFIG_VGA_ARB)
extern int vga_get(struct pci_dev *pdev, unsigned int rsrc, int interruptible);
#else
@@ -108,11 +80,14 @@ static inline int vga_get(struct pci_dev *pdev, unsigned int rsrc, int interrupt
#endif
/**
- * vga_get_interruptible
+ * vga_get_interruptible
+ * @pdev: pci device of the VGA card or NULL for the system default
+ * @rsrc: bit mask of resources to acquire and lock
*
- * Shortcut to vga_get
+ * Shortcut to vga_get with interruptible set to true.
+ *
+ * On success, release the VGA resource again with vga_put().
*/
-
static inline int vga_get_interruptible(struct pci_dev *pdev,
unsigned int rsrc)
{
@@ -120,47 +95,26 @@ static inline int vga_get_interruptible(struct pci_dev *pdev,
}
/**
- * vga_get_uninterruptible
+ * vga_get_uninterruptible - shortcut to vga_get()
+ * @pdev: pci device of the VGA card or NULL for the system default
+ * @rsrc: bit mask of resources to acquire and lock
*
- * Shortcut to vga_get
+ * Shortcut to vga_get with interruptible set to false.
+ *
+ * On success, release the VGA resource again with vga_put().
*/
-
static inline int vga_get_uninterruptible(struct pci_dev *pdev,
unsigned int rsrc)
{
return vga_get(pdev, rsrc, 0);
}
-/**
- * vga_tryget - try to acquire & lock legacy VGA resources
- *
- * @pdev: pci devivce of VGA card or NULL for system default
- * @rsrc: bit mask of resources to acquire and lock
- *
- * This function performs the same operation as vga_get(), but
- * will return an error (-EBUSY) instead of blocking if the resources
- * are already locked by another card. It can be called in any context
- */
-
#if defined(CONFIG_VGA_ARB)
extern int vga_tryget(struct pci_dev *pdev, unsigned int rsrc);
#else
static inline int vga_tryget(struct pci_dev *pdev, unsigned int rsrc) { return 0; }
#endif
-/**
- * vga_put - release lock on legacy VGA resources
- *
- * @pdev: pci device of VGA card or NULL for system default
- * @rsrc: but mask of resource to release
- *
- * This function releases resources previously locked by vga_get()
- * or vga_tryget(). The resources aren't disabled right away, so
- * that a subsequence vga_get() on the same card will succeed
- * immediately. Resources have a counter, so locks are only
- * released if the counter reaches 0.
- */
-
#if defined(CONFIG_VGA_ARB)
extern void vga_put(struct pci_dev *pdev, unsigned int rsrc);
#else
@@ -168,25 +122,6 @@ extern void vga_put(struct pci_dev *pdev, unsigned int rsrc);
#endif
-/**
- * vga_default_device
- *
- * This can be defined by the platform. The default implementation
- * is rather dumb and will probably only work properly on single
- * vga card setups and/or x86 platforms.
- *
- * If your VGA default device is not PCI, you'll have to return
- * NULL here. In this case, I assume it will not conflict with
- * any PCI card. If this is not true, I'll have to define two archs
- * hooks for enabling/disabling the VGA default device if that is
- * possible. This may be a problem with real _ISA_ VGA cards, in
- * addition to a PCI one. I don't know at this point how to deal
- * with that card. Can theirs IOs be disabled at all ? If not, then
- * I suppose it's a matter of having the proper arch hook telling
- * us about it, so we basically never allow anybody to succeed a
- * vga_get()...
- */
-
#ifdef CONFIG_VGA_ARB
extern struct pci_dev *vga_default_device(void);
extern void vga_set_default_device(struct pci_dev *pdev);
@@ -195,14 +130,11 @@ static inline struct pci_dev *vga_default_device(void) { return NULL; };
static inline void vga_set_default_device(struct pci_dev *pdev) { };
#endif
-/**
- * vga_conflicts
- *
- * Architectures should define this if they have several
- * independent PCI domains that can afford concurrent VGA
- * decoding
+/*
+ * Architectures should define this if they have several
+ * independent PCI domains that can afford concurrent VGA
+ * decoding
*/
-
#ifndef __ARCH_HAS_VGA_CONFLICT
static inline int vga_conflicts(struct pci_dev *p1, struct pci_dev *p2)
{
@@ -210,34 +142,6 @@ static inline int vga_conflicts(struct pci_dev *p1, struct pci_dev *p2)
}
#endif
-/**
- * vga_client_register
- *
- * @pdev: pci device of the VGA client
- * @cookie: client cookie to be used in callbacks
- * @irq_set_state: irq state change callback
- * @set_vga_decode: vga decode change callback
- *
- * return value: 0 on success, -1 on failure
- * Register a client with the VGA arbitration logic
- *
- * Clients have two callback mechanisms they can use.
- * irq enable/disable callback -
- * If a client can't disable its GPUs VGA resources, then we
- * need to be able to ask it to turn off its irqs when we
- * turn off its mem and io decoding.
- * set_vga_decode
- * If a client can disable its GPU VGA resource, it will
- * get a callback from this to set the encode/decode state
- *
- * Rationale: we cannot disable VGA decode resources unconditionally
- * some single GPU laptops seem to require ACPI or BIOS access to the
- * VGA registers to control things like backlights etc.
- * Hopefully newer multi-GPU laptops do something saner, and desktops
- * won't have any special ACPI for this.
- * They driver will get a callback when VGA arbitration is first used
- * by userspace since we some older X servers have issues.
- */
#if defined(CONFIG_VGA_ARB)
int vga_client_register(struct pci_dev *pdev, void *cookie,
void (*irq_set_state)(void *cookie, bool state),
diff --git a/include/linux/vme.h b/include/linux/vme.h
index 71e4a6dec5ac..ea6095deba20 100644
--- a/include/linux/vme.h
+++ b/include/linux/vme.h
@@ -166,7 +166,7 @@ struct vme_resource *vme_lm_request(struct vme_dev *);
int vme_lm_count(struct vme_resource *);
int vme_lm_set(struct vme_resource *, unsigned long long, u32, u32);
int vme_lm_get(struct vme_resource *, unsigned long long *, u32 *, u32 *);
-int vme_lm_attach(struct vme_resource *, int, void (*callback)(int));
+int vme_lm_attach(struct vme_resource *, int, void (*callback)(void *), void *);
int vme_lm_detach(struct vme_resource *, int);
void vme_lm_free(struct vme_resource *);
diff --git a/include/linux/wait.h b/include/linux/wait.h
index c3ff74d764fa..2408e8d5c05c 100644
--- a/include/linux/wait.h
+++ b/include/linux/wait.h
@@ -248,6 +248,8 @@ wait_queue_head_t *bit_waitqueue(void *, int);
(!__builtin_constant_p(state) || \
state == TASK_INTERRUPTIBLE || state == TASK_KILLABLE) \
+extern void init_wait_entry(wait_queue_t *__wait, int flags);
+
/*
* The below macro ___wait_event() has an explicit shadow of the __ret
* variable when used from the wait_event_*() macros.
@@ -266,12 +268,7 @@ wait_queue_head_t *bit_waitqueue(void *, int);
wait_queue_t __wait; \
long __ret = ret; /* explicit shadow */ \
\
- INIT_LIST_HEAD(&__wait.task_list); \
- if (exclusive) \
- __wait.flags = WQ_FLAG_EXCLUSIVE; \
- else \
- __wait.flags = 0; \
- \
+ init_wait_entry(&__wait, exclusive ? WQ_FLAG_EXCLUSIVE : 0); \
for (;;) { \
long __int = prepare_to_wait_event(&wq, &__wait, state);\
\
@@ -280,12 +277,7 @@ wait_queue_head_t *bit_waitqueue(void *, int);
\
if (___wait_is_interruptible(state) && __int) { \
__ret = __int; \
- if (exclusive) { \
- abort_exclusive_wait(&wq, &__wait, \
- state, NULL); \
- goto __out; \
- } \
- break; \
+ goto __out; \
} \
\
cmd; \
@@ -989,7 +981,6 @@ void prepare_to_wait(wait_queue_head_t *q, wait_queue_t *wait, int state);
void prepare_to_wait_exclusive(wait_queue_head_t *q, wait_queue_t *wait, int state);
long prepare_to_wait_event(wait_queue_head_t *q, wait_queue_t *wait, int state);
void finish_wait(wait_queue_head_t *q, wait_queue_t *wait);
-void abort_exclusive_wait(wait_queue_head_t *q, wait_queue_t *wait, unsigned int mode, void *key);
long wait_woken(wait_queue_t *wait, unsigned mode, long timeout);
int woken_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
diff --git a/include/linux/watchdog.h b/include/linux/watchdog.h
index 7047bc7f8106..35a4d8185b51 100644
--- a/include/linux/watchdog.h
+++ b/include/linux/watchdog.h
@@ -19,6 +19,7 @@
struct watchdog_ops;
struct watchdog_device;
struct watchdog_core_data;
+struct watchdog_governor;
/** struct watchdog_ops - The watchdog-devices operations
*
@@ -28,6 +29,7 @@ struct watchdog_core_data;
* @ping: The routine that sends a keepalive ping to the watchdog device.
* @status: The routine that shows the status of the watchdog device.
* @set_timeout:The routine for setting the watchdog devices timeout value (in seconds).
+ * @set_pretimeout:The routine for setting the watchdog devices pretimeout.
* @get_timeleft:The routine that gets the time left before a reset (in seconds).
* @restart: The routine for restarting the machine.
* @ioctl: The routines that handles extra ioctl calls.
@@ -46,6 +48,7 @@ struct watchdog_ops {
int (*ping)(struct watchdog_device *);
unsigned int (*status)(struct watchdog_device *);
int (*set_timeout)(struct watchdog_device *, unsigned int);
+ int (*set_pretimeout)(struct watchdog_device *, unsigned int);
unsigned int (*get_timeleft)(struct watchdog_device *);
int (*restart)(struct watchdog_device *, unsigned long, void *);
long (*ioctl)(struct watchdog_device *, unsigned int, unsigned long);
@@ -59,8 +62,10 @@ struct watchdog_ops {
* watchdog device.
* @info: Pointer to a watchdog_info structure.
* @ops: Pointer to the list of watchdog operations.
+ * @gov: Pointer to watchdog pretimeout governor.
* @bootstatus: Status of the watchdog device at boot.
* @timeout: The watchdog devices timeout value (in seconds).
+ * @pretimeout: The watchdog devices pre_timeout value.
* @min_timeout:The watchdog devices minimum timeout value (in seconds).
* @max_timeout:The watchdog devices maximum timeout value (in seconds)
* as configurable from user space. Only relevant if
@@ -94,8 +99,10 @@ struct watchdog_device {
const struct attribute_group **groups;
const struct watchdog_info *info;
const struct watchdog_ops *ops;
+ const struct watchdog_governor *gov;
unsigned int bootstatus;
unsigned int timeout;
+ unsigned int pretimeout;
unsigned int min_timeout;
unsigned int max_timeout;
unsigned int min_hw_heartbeat_ms;
@@ -163,6 +170,13 @@ static inline bool watchdog_timeout_invalid(struct watchdog_device *wdd, unsigne
t > wdd->max_timeout);
}
+/* Use the following function to check if a pretimeout value is invalid */
+static inline bool watchdog_pretimeout_invalid(struct watchdog_device *wdd,
+ unsigned int t)
+{
+ return t && wdd->timeout && t >= wdd->timeout;
+}
+
/* Use the following functions to manipulate watchdog driver specific data */
static inline void watchdog_set_drvdata(struct watchdog_device *wdd, void *data)
{
@@ -174,6 +188,16 @@ static inline void *watchdog_get_drvdata(struct watchdog_device *wdd)
return wdd->driver_data;
}
+/* Use the following functions to report watchdog pretimeout event */
+#if IS_ENABLED(CONFIG_WATCHDOG_PRETIMEOUT_GOV)
+void watchdog_notify_pretimeout(struct watchdog_device *wdd);
+#else
+static inline void watchdog_notify_pretimeout(struct watchdog_device *wdd)
+{
+ pr_alert("watchdog%d: pretimeout event\n", wdd->id);
+}
+#endif
+
/* drivers/watchdog/watchdog_core.c */
void watchdog_set_restart_priority(struct watchdog_device *wdd, int priority);
extern int watchdog_init_timeout(struct watchdog_device *wdd,
diff --git a/include/linux/win_minmax.h b/include/linux/win_minmax.h
new file mode 100644
index 000000000000..56569604278f
--- /dev/null
+++ b/include/linux/win_minmax.h
@@ -0,0 +1,37 @@
+/**
+ * lib/minmax.c: windowed min/max tracker by Kathleen Nichols.
+ *
+ */
+#ifndef MINMAX_H
+#define MINMAX_H
+
+#include <linux/types.h>
+
+/* A single data point for our parameterized min-max tracker */
+struct minmax_sample {
+ u32 t; /* time measurement was taken */
+ u32 v; /* value measured */
+};
+
+/* State for the parameterized min-max tracker */
+struct minmax {
+ struct minmax_sample s[3];
+};
+
+static inline u32 minmax_get(const struct minmax *m)
+{
+ return m->s[0].v;
+}
+
+static inline u32 minmax_reset(struct minmax *m, u32 t, u32 meas)
+{
+ struct minmax_sample val = { .t = t, .v = meas };
+
+ m->s[2] = m->s[1] = m->s[0] = val;
+ return m->s[0].v;
+}
+
+u32 minmax_running_max(struct minmax *m, u32 win, u32 t, u32 meas);
+u32 minmax_running_min(struct minmax *m, u32 win, u32 t, u32 meas);
+
+#endif
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
index 26cc1df280d6..fc6e22186405 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -442,6 +442,7 @@ extern int schedule_on_each_cpu(work_func_t func);
int execute_in_process_context(work_func_t fn, struct execute_work *);
extern bool flush_work(struct work_struct *work);
+extern bool cancel_work(struct work_struct *work);
extern bool cancel_work_sync(struct work_struct *work);
extern bool flush_delayed_work(struct delayed_work *dwork);
diff --git a/include/linux/writeback.h b/include/linux/writeback.h
index fc1e16c25a29..797100e10010 100644
--- a/include/linux/writeback.h
+++ b/include/linux/writeback.h
@@ -319,7 +319,6 @@ void laptop_mode_timer_fn(unsigned long data);
#else
static inline void laptop_sync_completion(void) { }
#endif
-void throttle_vm_writeout(gfp_t gfp_mask);
bool node_dirty_ok(struct pglist_data *pgdat);
int wb_domain_init(struct wb_domain *dom, gfp_t gfp);
#ifdef CONFIG_CGROUP_WRITEBACK
diff --git a/include/linux/xattr.h b/include/linux/xattr.h
index 94079bab9243..e77605a0c8da 100644
--- a/include/linux/xattr.h
+++ b/include/linux/xattr.h
@@ -46,17 +46,16 @@ struct xattr {
};
ssize_t xattr_getsecurity(struct inode *, const char *, void *, size_t);
+ssize_t __vfs_getxattr(struct dentry *, struct inode *, const char *, void *, size_t);
ssize_t vfs_getxattr(struct dentry *, const char *, void *, size_t);
ssize_t vfs_listxattr(struct dentry *d, char *list, size_t size);
+int __vfs_setxattr(struct dentry *, struct inode *, const char *, const void *, size_t, int);
int __vfs_setxattr_noperm(struct dentry *, const char *, const void *, size_t, int);
int vfs_setxattr(struct dentry *, const char *, const void *, size_t, int);
+int __vfs_removexattr(struct dentry *, const char *);
int vfs_removexattr(struct dentry *, const char *);
-ssize_t generic_getxattr(struct dentry *dentry, struct inode *inode, const char *name, void *buffer, size_t size);
ssize_t generic_listxattr(struct dentry *dentry, char *buffer, size_t buffer_size);
-int generic_setxattr(struct dentry *dentry, struct inode *inode,
- const char *name, const void *value, size_t size, int flags);
-int generic_removexattr(struct dentry *dentry, const char *name);
ssize_t vfs_getxattr_alloc(struct dentry *dentry, const char *name,
char **xattr_value, size_t size, gfp_t flags);
diff --git a/include/media/cec.h b/include/media/cec.h
index dc7854b855f3..fdb5d600e4bb 100644
--- a/include/media/cec.h
+++ b/include/media/cec.h
@@ -57,8 +57,8 @@ struct cec_devnode {
int minor;
bool registered;
bool unregistered;
- struct mutex fhs_lock;
struct list_head fhs;
+ struct mutex lock;
};
struct cec_adapter;
diff --git a/include/media/drv-intf/sh_mobile_ceu.h b/include/media/drv-intf/sh_mobile_ceu.h
index 7f57056c22ba..2f43f7d9e28d 100644
--- a/include/media/drv-intf/sh_mobile_ceu.h
+++ b/include/media/drv-intf/sh_mobile_ceu.h
@@ -21,7 +21,6 @@ struct sh_mobile_ceu_info {
unsigned long flags;
int max_width;
int max_height;
- struct sh_mobile_ceu_companion *csi2;
struct v4l2_async_subdev **asd; /* Flat array, arranged in groups */
unsigned int *asd_sizes; /* 0-terminated array pf asd group sizes */
};
diff --git a/include/media/drv-intf/sh_mobile_csi2.h b/include/media/drv-intf/sh_mobile_csi2.h
deleted file mode 100644
index 14030db51f13..000000000000
--- a/include/media/drv-intf/sh_mobile_csi2.h
+++ /dev/null
@@ -1,48 +0,0 @@
-/*
- * Driver header for the SH-Mobile MIPI CSI-2 unit
- *
- * Copyright (C) 2010, Guennadi Liakhovetski <g.liakhovetski@gmx.de>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef SH_MIPI_CSI
-#define SH_MIPI_CSI
-
-#include <linux/list.h>
-
-enum sh_csi2_phy {
- SH_CSI2_PHY_MAIN,
- SH_CSI2_PHY_SUB,
-};
-
-enum sh_csi2_type {
- SH_CSI2C,
- SH_CSI2I,
-};
-
-#define SH_CSI2_CRC (1 << 0)
-#define SH_CSI2_ECC (1 << 1)
-
-struct platform_device;
-
-struct sh_csi2_client_config {
- enum sh_csi2_phy phy;
- unsigned char lanes; /* bitmask[3:0] */
- unsigned char channel; /* 0..3 */
- struct platform_device *pdev; /* client platform device */
- const char *name; /* async matching: client name */
-};
-
-struct v4l2_device;
-
-struct sh_csi2_pdata {
- enum sh_csi2_type type;
- unsigned int flags;
- struct sh_csi2_client_config *clients;
- int num_clients;
-};
-
-#endif
diff --git a/include/media/i2c/smiapp.h b/include/media/i2c/smiapp.h
index 029142ddb95c..635007e7441a 100644
--- a/include/media/i2c/smiapp.h
+++ b/include/media/i2c/smiapp.h
@@ -36,8 +36,6 @@
#define SMIAPP_CSI_SIGNALLING_MODE_CCP2_DATA_STROBE 1
#define SMIAPP_CSI_SIGNALLING_MODE_CSI2 2
-#define SMIAPP_NO_XSHUTDOWN -1
-
/*
* Sometimes due to board layout considerations the camera module can be
* mounted rotated. The typical rotation used is 180 degrees which can be
@@ -57,7 +55,7 @@ struct smiapp_flash_strobe_parms {
u8 trigger;
};
-struct smiapp_platform_data {
+struct smiapp_hwconfig {
/*
* Change the cci address if i2c_addr_alt is set.
* Both default and alternate cci addr need to be present
@@ -75,9 +73,6 @@ struct smiapp_platform_data {
enum smiapp_module_board_orient module_board_orient;
struct smiapp_flash_strobe_parms *strobe_setup;
-
- int (*set_xclk)(struct v4l2_subdev *sd, int hz);
- int32_t xshutdown; /* gpio or SMIAPP_NO_XSHUTDOWN */
};
#endif /* __SMIAPP_H_ */
diff --git a/include/media/media-device.h b/include/media/media-device.h
index 28195242386c..ef93e21335df 100644
--- a/include/media/media-device.h
+++ b/include/media/media-device.h
@@ -49,11 +49,21 @@ struct media_entity_notify {
};
/**
+ * struct media_device_ops - Media device operations
+ * @link_notify: Link state change notification callback. This callback is
+ * called with the graph_mutex held.
+ */
+struct media_device_ops {
+ int (*link_notify)(struct media_link *link, u32 flags,
+ unsigned int notification);
+};
+
+/**
* struct media_device - Media device
* @dev: Parent device
* @devnode: Media device node
* @driver_name: Optional device driver name. If not set, calls to
- * %MEDIA_IOC_DEVICE_INFO will return dev->driver->name.
+ * %MEDIA_IOC_DEVICE_INFO will return ``dev->driver->name``.
* This is needed for USB drivers for example, as otherwise
* they'll all appear as if the driver name was "usb".
* @model: Device model name
@@ -80,8 +90,7 @@ struct media_entity_notify {
* @enable_source: Enable Source Handler function pointer
* @disable_source: Disable Source Handler function pointer
*
- * @link_notify: Link state change notification callback. This callback is
- * called with the graph_mutex held.
+ * @ops: Operation handler callbacks
*
* This structure represents an abstract high-level media device. It allows easy
* access to entities and provides basic media device-level support. The
@@ -102,16 +111,18 @@ struct media_entity_notify {
* sink entity and deactivate the link between them. Drivers
* should call this handler to release the source.
*
- * Note: Bridge driver is expected to implement and set the
- * handler when media_device is registered or when
- * bridge driver finds the media_device during probe.
- * Bridge driver sets source_priv with information
- * necessary to run enable/disable source handlers.
- *
* Use-case: find tuner entity connected to the decoder
* entity and check if it is available, and activate the
- * the link between them from enable_source and deactivate
- * from disable_source.
+ * the link between them from @enable_source and deactivate
+ * from @disable_source.
+ *
+ * .. note::
+ *
+ * Bridge driver is expected to implement and set the
+ * handler when &media_device is registered or when
+ * bridge driver finds the media_device during probe.
+ * Bridge driver sets source_priv with information
+ * necessary to run @enable_source and @disable_source handlers.
*/
struct media_device {
/* dev->driver_data points to this struct. */
@@ -148,8 +159,7 @@ struct media_device {
struct media_pipeline *pipe);
void (*disable_source)(struct media_entity *entity);
- int (*link_notify)(struct media_link *link, u32 flags,
- unsigned int notification);
+ const struct media_device_ops *ops;
};
/* We don't need to include pci.h or usb.h here */
@@ -168,7 +178,7 @@ struct usb_device;
* @ent_enum: Entity enumeration to be initialised
* @mdev: The related media device
*
- * Returns zero on success or a negative error code.
+ * Return: zero on success or a negative error code.
*/
static inline __must_check int media_entity_enum_init(
struct media_entity_enum *ent_enum, struct media_device *mdev)
@@ -211,36 +221,38 @@ void media_device_cleanup(struct media_device *mdev);
*
* Users, should, instead, call the media_device_register() macro.
*
- * The caller is responsible for initializing the media_device structure before
- * registration. The following fields must be set:
+ * The caller is responsible for initializing the &media_device structure
+ * before registration. The following fields of &media_device must be set:
*
- * - dev must point to the parent device (usually a &pci_dev, &usb_interface or
- * &platform_device instance).
+ * - &media_entity.dev must point to the parent device (usually a &pci_dev,
+ * &usb_interface or &platform_device instance).
*
- * - model must be filled with the device model name as a NUL-terminated UTF-8
- * string. The device/model revision must not be stored in this field.
+ * - &media_entity.model must be filled with the device model name as a
+ * NUL-terminated UTF-8 string. The device/model revision must not be
+ * stored in this field.
*
* The following fields are optional:
*
- * - serial is a unique serial number stored as a NUL-terminated ASCII string.
- * The field is big enough to store a GUID in text form. If the hardware
- * doesn't provide a unique serial number this field must be left empty.
+ * - &media_entity.serial is a unique serial number stored as a
+ * NUL-terminated ASCII string. The field is big enough to store a GUID
+ * in text form. If the hardware doesn't provide a unique serial number
+ * this field must be left empty.
*
- * - bus_info represents the location of the device in the system as a
- * NUL-terminated ASCII string. For PCI/PCIe devices bus_info must be set to
- * "PCI:" (or "PCIe:") followed by the value of pci_name(). For USB devices,
- * the usb_make_path() function must be used. This field is used by
- * applications to distinguish between otherwise identical devices that don't
- * provide a serial number.
+ * - &media_entity.bus_info represents the location of the device in the
+ * system as a NUL-terminated ASCII string. For PCI/PCIe devices
+ * &media_entity.bus_info must be set to "PCI:" (or "PCIe:") followed by
+ * the value of pci_name(). For USB devices,the usb_make_path() function
+ * must be used. This field is used by applications to distinguish between
+ * otherwise identical devices that don't provide a serial number.
*
- * - hw_revision is the hardware device revision in a driver-specific format.
- * When possible the revision should be formatted with the KERNEL_VERSION
- * macro.
+ * - &media_entity.hw_revision is the hardware device revision in a
+ * driver-specific format. When possible the revision should be formatted
+ * with the KERNEL_VERSION() macro.
*
- * - driver_version is formatted with the KERNEL_VERSION macro. The version
- * minor must be incremented when new features are added to the userspace API
- * without breaking binary compatibility. The version major must be
- * incremented when binary compatibility is broken.
+ * - &media_entity.driver_version is formatted with the KERNEL_VERSION()
+ * macro. The version minor must be incremented when new features are added
+ * to the userspace API without breaking binary compatibility. The version
+ * major must be incremented when binary compatibility is broken.
*
* .. note::
*
@@ -252,6 +264,16 @@ void media_device_cleanup(struct media_device *mdev);
*/
int __must_check __media_device_register(struct media_device *mdev,
struct module *owner);
+
+
+/**
+ * media_device_register() - Registers a media device element
+ *
+ * @mdev: pointer to struct &media_device
+ *
+ * This macro calls __media_device_register() passing %THIS_MODULE as
+ * the __media_device_register() second argument (**owner**).
+ */
#define media_device_register(mdev) __media_device_register(mdev, THIS_MODULE)
/**
@@ -259,7 +281,6 @@ int __must_check __media_device_register(struct media_device *mdev,
*
* @mdev: pointer to struct &media_device
*
- *
* It is safe to call this function on an unregistered (but initialised)
* media device.
*/
@@ -285,14 +306,15 @@ void media_device_unregister(struct media_device *mdev);
* framework.
*
* If the device has pads, media_entity_pads_init() should be called before
- * this function. Otherwise, the &media_entity.@pad and &media_entity.@num_pads
+ * this function. Otherwise, the &media_entity.pad and &media_entity.num_pads
* should be zeroed before calling this function.
*
* Entities have flags that describe the entity capabilities and state:
*
- * %MEDIA_ENT_FL_DEFAULT indicates the default entity for a given type.
- * This can be used to report the default audio and video devices or the
- * default camera sensor.
+ * %MEDIA_ENT_FL_DEFAULT
+ * indicates the default entity for a given type.
+ * This can be used to report the default audio and video devices or the
+ * default camera sensor.
*
* .. note::
*
@@ -331,8 +353,10 @@ void media_device_unregister_entity(struct media_entity *entity);
* @mdev: The media device
* @nptr: The media_entity_notify
*
- * Note: When a new entity is registered, all the registered
- * media_entity_notify callbacks are invoked.
+ * .. note::
+ *
+ * When a new entity is registered, all the registered
+ * media_entity_notify callbacks are invoked.
*/
int __must_check media_device_register_entity_notify(struct media_device *mdev,
@@ -410,11 +434,13 @@ void media_device_pci_init(struct media_device *mdev,
* @board_name: media device name. If %NULL, the routine will use the usb
* product name, if available.
* @driver_name: name of the driver. if %NULL, the routine will use the name
- * given by udev->dev->driver->name, with is usually the wrong
+ * given by ``udev->dev->driver->name``, with is usually the wrong
* thing to do.
*
- * NOTE: It is better to call media_device_usb_init() instead, as
- * such macro fills driver_name with %KBUILD_MODNAME.
+ * .. note::
+ *
+ * It is better to call media_device_usb_init() instead, as
+ * such macro fills driver_name with %KBUILD_MODNAME.
*/
void __media_device_usb_init(struct media_device *mdev,
struct usb_device *udev,
@@ -472,6 +498,19 @@ static inline void __media_device_usb_init(struct media_device *mdev,
#endif /* CONFIG_MEDIA_CONTROLLER */
+/**
+ * media_device_usb_init() - create and initialize a
+ * struct &media_device from a PCI device.
+ *
+ * @mdev: pointer to struct &media_device
+ * @udev: pointer to struct usb_device
+ * @name: media device name. If %NULL, the routine will use the usb
+ * product name, if available.
+ *
+ * This macro calls media_device_usb_init() passing the
+ * media_device_usb_init() **driver_name** parameter filled with
+ * %KBUILD_MODNAME.
+ */
#define media_device_usb_init(mdev, udev, name) \
__media_device_usb_init(mdev, udev, name, KBUILD_MODNAME)
diff --git a/include/media/media-devnode.h b/include/media/media-devnode.h
index 37d494805944..cd23e915764c 100644
--- a/include/media/media-devnode.h
+++ b/include/media/media-devnode.h
@@ -75,8 +75,9 @@ struct media_file_operations {
* @cdev: struct cdev pointer character device
* @parent: parent device
* @minor: device node minor number
- * @flags: flags, combination of the MEDIA_FLAG_* constants
- * @release: release callback called at the end of media_devnode_release()
+ * @flags: flags, combination of the ``MEDIA_FLAG_*`` constants
+ * @release: release callback called at the end of ``media_devnode_release()``
+ * routine at media-device.c.
*
* This structure represents a media-related device node.
*
diff --git a/include/media/media-entity.h b/include/media/media-entity.h
index 09b03c17784d..b2203ee7a4c1 100644
--- a/include/media/media-entity.h
+++ b/include/media/media-entity.h
@@ -56,7 +56,7 @@ enum media_gobj_type {
/**
* struct media_gobj - Define a graph object.
*
- * @mdev: Pointer to the struct media_device that owns the object
+ * @mdev: Pointer to the struct &media_device that owns the object
* @id: Non-zero object ID identifier. The ID should be unique
* inside a media_device, as it is composed by
* %MEDIA_BITS_PER_TYPE to store the type plus
@@ -129,7 +129,7 @@ struct media_pipeline {
* an interface.
* @gobj1: Part of a union. Used to get the pointer for the second
* graph_object of the link.
- * @source: Part of a union. Used only if the second object (gobj1) is
+ * @sink: Part of a union. Used only if the second object (gobj1) is
* a pad. In that case, it represents the sink pad.
* @entity: Part of a union. Used only if the second object (gobj1) is
* an entity.
@@ -162,7 +162,9 @@ struct media_link {
* @graph_obj: Embedded structure containing the media object common data
* @entity: Entity this pad belongs to
* @index: Pad index in the entity pads array, numbered from 0 to n
- * @flags: Pad flags, as defined in uapi/media.h (MEDIA_PAD_FL_*)
+ * @flags: Pad flags, as defined in
+ * :ref:`include/uapi/linux/media.h <media_header>`
+ * (seek for ``MEDIA_PAD_FL_*``)
*/
struct media_pad {
struct media_gobj graph_obj; /* must be first field in struct */
@@ -182,7 +184,7 @@ struct media_pad {
*
* .. note::
*
- * Those these callbacks are called with struct media_device.@graph_mutex
+ * Those these callbacks are called with struct &media_device.graph_mutex
* mutex held.
*/
struct media_entity_operations {
@@ -210,7 +212,7 @@ struct media_entity_operations {
* This allows runtime type identification of media entities and safe casting to
* the correct object type. For instance, a media entity structure instance
* embedded in a v4l2_subdev structure instance will have the type
- * MEDIA_ENTITY_TYPE_V4L2_SUBDEV and can safely be cast to a v4l2_subdev
+ * %MEDIA_ENTITY_TYPE_V4L2_SUBDEV and can safely be cast to a &v4l2_subdev
* structure using the container_of() macro.
*/
enum media_entity_type {
@@ -225,9 +227,12 @@ enum media_entity_type {
* @graph_obj: Embedded structure containing the media object common data.
* @name: Entity name.
* @obj_type: Type of the object that implements the media_entity.
- * @function: Entity main function, as defined in uapi/media.h
- * (MEDIA_ENT_F_*)
- * @flags: Entity flags, as defined in uapi/media.h (MEDIA_ENT_FL_*)
+ * @function: Entity main function, as defined in
+ * :ref:`include/uapi/linux/media.h <media_header>`
+ * (seek for ``MEDIA_ENT_F_*``)
+ * @flags: Entity flags, as defined in
+ * :ref:`include/uapi/linux/media.h <media_header>`
+ * (seek for ``MEDIA_ENT_FL_*``)
* @num_pads: Number of sink and source pads.
* @num_links: Total number of links, forward and back, enabled and disabled.
* @num_backlinks: Number of backlinks
@@ -246,9 +251,12 @@ enum media_entity_type {
* @minor: Devnode minor number (zero if not applicable). Kept just
* for backward compatibility.
*
- * NOTE: @stream_count and @use_count reference counts must never be
- * negative, but are signed integers on purpose: a simple WARN_ON(<0) check
- * can be used to detect reference count bugs that would make them negative.
+ * .. note::
+ *
+ * @stream_count and @use_count reference counts must never be
+ * negative, but are signed integers on purpose: a simple ``WARN_ON(<0)``
+ * check can be used to detect reference count bugs that would make them
+ * negative.
*/
struct media_entity {
struct media_gobj graph_obj; /* must be first field in struct */
@@ -267,10 +275,6 @@ struct media_entity {
const struct media_entity_operations *ops;
- /* Reference counts must never be negative, but are signed integers on
- * purpose: a simple WARN_ON(<0) check can be used to detect reference
- * count bugs that would make them negative.
- */
int stream_count;
int use_count;
@@ -289,10 +293,16 @@ struct media_entity {
*
* @graph_obj: embedded graph object
* @links: List of links pointing to graph entities
- * @type: Type of the interface as defined in the
- * uapi/media/media.h header, e. g.
- * MEDIA_INTF_T_*
- * @flags: Interface flags as defined in uapi/media/media.h
+ * @type: Type of the interface as defined in
+ * :ref:`include/uapi/linux/media.h <media_header>`
+ * (seek for ``MEDIA_INTF_T_*``)
+ * @flags: Interface flags as defined in
+ * :ref:`include/uapi/linux/media.h <media_header>`
+ * (seek for ``MEDIA_INTF_FL_*``)
+ *
+ * .. note::
+ *
+ * Currently, no flags for &media_interface is defined.
*/
struct media_interface {
struct media_gobj graph_obj;
@@ -319,7 +329,7 @@ struct media_intf_devnode {
/**
* media_entity_id() - return the media entity graph object id
*
- * @entity: pointer to entity
+ * @entity: pointer to &media_entity
*/
static inline u32 media_entity_id(struct media_entity *entity)
{
@@ -329,7 +339,7 @@ static inline u32 media_entity_id(struct media_entity *entity)
/**
* media_type() - return the media object type
*
- * @gobj: pointer to the media graph object
+ * @gobj: Pointer to the struct &media_gobj graph object
*/
static inline enum media_gobj_type media_type(struct media_gobj *gobj)
{
@@ -339,7 +349,7 @@ static inline enum media_gobj_type media_type(struct media_gobj *gobj)
/**
* media_id() - return the media object ID
*
- * @gobj: pointer to the media graph object
+ * @gobj: Pointer to the struct &media_gobj graph object
*/
static inline u32 media_id(struct media_gobj *gobj)
{
@@ -350,7 +360,7 @@ static inline u32 media_id(struct media_gobj *gobj)
* media_gobj_gen_id() - encapsulates type and ID on at the object ID
*
* @type: object type as define at enum &media_gobj_type.
- * @local_id: next ID, from struct &media_device.@id.
+ * @local_id: next ID, from struct &media_device.id.
*/
static inline u32 media_gobj_gen_id(enum media_gobj_type type, u64 local_id)
{
@@ -366,9 +376,9 @@ static inline u32 media_gobj_gen_id(enum media_gobj_type type, u64 local_id)
* is_media_entity_v4l2_video_device() - Check if the entity is a video_device
* @entity: pointer to entity
*
- * Return: true if the entity is an instance of a video_device object and can
+ * Return: %true if the entity is an instance of a video_device object and can
* safely be cast to a struct video_device using the container_of() macro, or
- * false otherwise.
+ * %false otherwise.
*/
static inline bool is_media_entity_v4l2_video_device(struct media_entity *entity)
{
@@ -379,9 +389,9 @@ static inline bool is_media_entity_v4l2_video_device(struct media_entity *entity
* is_media_entity_v4l2_subdev() - Check if the entity is a v4l2_subdev
* @entity: pointer to entity
*
- * Return: true if the entity is an instance of a v4l2_subdev object and can
- * safely be cast to a struct v4l2_subdev using the container_of() macro, or
- * false otherwise.
+ * Return: %true if the entity is an instance of a &v4l2_subdev object and can
+ * safely be cast to a struct &v4l2_subdev using the container_of() macro, or
+ * %false otherwise.
*/
static inline bool is_media_entity_v4l2_subdev(struct media_entity *entity)
{
@@ -452,7 +462,7 @@ static inline void media_entity_enum_clear(struct media_entity_enum *ent_enum,
* @ent_enum: Entity enumeration
* @entity: Entity to be tested
*
- * Returns true if the entity was marked.
+ * Returns %true if the entity was marked.
*/
static inline bool media_entity_enum_test(struct media_entity_enum *ent_enum,
struct media_entity *entity)
@@ -464,12 +474,13 @@ static inline bool media_entity_enum_test(struct media_entity_enum *ent_enum,
}
/**
- * media_entity_enum_test - Test whether the entity is marked, and mark it
+ * media_entity_enum_test_and_set - Test whether the entity is marked,
+ * and mark it
*
* @ent_enum: Entity enumeration
* @entity: Entity to be tested
*
- * Returns true if the entity was marked, and mark it before doing so.
+ * Returns %true if the entity was marked, and mark it before doing so.
*/
static inline bool
media_entity_enum_test_and_set(struct media_entity_enum *ent_enum,
@@ -486,7 +497,7 @@ media_entity_enum_test_and_set(struct media_entity_enum *ent_enum,
*
* @ent_enum: Entity enumeration
*
- * Returns true if the entity was marked.
+ * Return: %true if the entity was empty.
*/
static inline bool media_entity_enum_empty(struct media_entity_enum *ent_enum)
{
@@ -499,7 +510,8 @@ static inline bool media_entity_enum_empty(struct media_entity_enum *ent_enum)
* @ent_enum1: First entity enumeration
* @ent_enum2: Second entity enumeration
*
- * Returns true if entity enumerations e and f intersect, otherwise false.
+ * Return: %true if entity enumerations @ent_enum1 and @ent_enum2 intersect,
+ * otherwise %false.
*/
static inline bool media_entity_enum_intersects(
struct media_entity_enum *ent_enum1,
@@ -511,39 +523,63 @@ static inline bool media_entity_enum_intersects(
min(ent_enum1->idx_max, ent_enum2->idx_max));
}
+/**
+ * gobj_to_entity - returns the struct &media_entity pointer from the
+ * @gobj contained on it.
+ *
+ * @gobj: Pointer to the struct &media_gobj graph object
+ */
#define gobj_to_entity(gobj) \
container_of(gobj, struct media_entity, graph_obj)
+/**
+ * gobj_to_pad - returns the struct &media_pad pointer from the
+ * @gobj contained on it.
+ *
+ * @gobj: Pointer to the struct &media_gobj graph object
+ */
#define gobj_to_pad(gobj) \
container_of(gobj, struct media_pad, graph_obj)
+/**
+ * gobj_to_link - returns the struct &media_link pointer from the
+ * @gobj contained on it.
+ *
+ * @gobj: Pointer to the struct &media_gobj graph object
+ */
#define gobj_to_link(gobj) \
container_of(gobj, struct media_link, graph_obj)
-#define gobj_to_link(gobj) \
- container_of(gobj, struct media_link, graph_obj)
-
-#define gobj_to_pad(gobj) \
- container_of(gobj, struct media_pad, graph_obj)
-
+/**
+ * gobj_to_intf - returns the struct &media_interface pointer from the
+ * @gobj contained on it.
+ *
+ * @gobj: Pointer to the struct &media_gobj graph object
+ */
#define gobj_to_intf(gobj) \
container_of(gobj, struct media_interface, graph_obj)
+/**
+ * intf_to_devnode - returns the struct media_intf_devnode pointer from the
+ * @intf contained on it.
+ *
+ * @intf: Pointer to struct &media_intf_devnode
+ */
#define intf_to_devnode(intf) \
container_of(intf, struct media_intf_devnode, intf)
/**
* media_gobj_create - Initialize a graph object
*
- * @mdev: Pointer to the media_device that contains the object
+ * @mdev: Pointer to the &media_device that contains the object
* @type: Type of the object
- * @gobj: Pointer to the graph object
+ * @gobj: Pointer to the struct &media_gobj graph object
*
- * This routine initializes the embedded struct media_gobj inside a
- * media graph object. It is called automatically if media_*_create\(\)
- * calls are used. However, if the object (entity, link, pad, interface)
- * is embedded on some other object, this function should be called before
- * registering the object at the media controller.
+ * This routine initializes the embedded struct &media_gobj inside a
+ * media graph object. It is called automatically if ``media_*_create``
+ * function calls are used. However, if the object (entity, link, pad,
+ * interface) is embedded on some other object, this function should be
+ * called before registering the object at the media controller.
*/
void media_gobj_create(struct media_device *mdev,
enum media_gobj_type type,
@@ -552,7 +588,7 @@ void media_gobj_create(struct media_device *mdev,
/**
* media_gobj_destroy - Stop using a graph object on a media device
*
- * @gobj: Pointer to the graph object
+ * @gobj: Pointer to the struct &media_gobj graph object
*
* This should be called by all routines like media_device_unregister()
* that remove/destroy media graph objects.
@@ -567,11 +603,11 @@ void media_gobj_destroy(struct media_gobj *gobj);
* @pads: Array of @num_pads pads.
*
* The pads array is managed by the entity driver and passed to
- * media_entity_pads_init() where its pointer will be stored in the entity
- * structure.
+ * media_entity_pads_init() where its pointer will be stored in the
+ * &media_entity structure.
*
* If no pads are needed, drivers could either directly fill
- * &media_entity->@num_pads with 0 and &media_entity->@pads with NULL or call
+ * &media_entity->num_pads with 0 and &media_entity->pads with %NULL or call
* this function that will do the same.
*
* As the number of pads is known in advance, the pads array is not allocated
@@ -601,18 +637,21 @@ static inline void media_entity_cleanup(struct media_entity *entity) {};
* @source_pad: number of the source pad in the pads array
* @sink: pointer to &media_entity of the sink pad.
* @sink_pad: number of the sink pad in the pads array.
- * @flags: Link flags, as defined in include/uapi/linux/media.h.
+ * @flags: Link flags, as defined in
+ * :ref:`include/uapi/linux/media.h <media_header>`
+ * ( seek for ``MEDIA_LNK_FL_*``)
*
* Valid values for flags:
*
- * - A %MEDIA_LNK_FL_ENABLED flag indicates that the link is enabled and can
- * be used to transfer media data. When two or more links target a sink pad,
- * only one of them can be enabled at a time.
+ * %MEDIA_LNK_FL_ENABLED
+ * Indicates that the link is enabled and can be used to transfer media data.
+ * When two or more links target a sink pad, only one of them can be
+ * enabled at a time.
*
- * - A %MEDIA_LNK_FL_IMMUTABLE flag indicates that the link enabled state can't
- * be modified at runtime. If %MEDIA_LNK_FL_IMMUTABLE is set, then
- * %MEDIA_LNK_FL_ENABLED must also be set since an immutable link is
- * always enabled.
+ * %MEDIA_LNK_FL_IMMUTABLE
+ * Indicates that the link enabled state can't be modified at runtime. If
+ * %MEDIA_LNK_FL_IMMUTABLE is set, then %MEDIA_LNK_FL_ENABLED must also be
+ * set, since an immutable link is always enabled.
*
* .. note::
*
@@ -630,17 +669,17 @@ __must_check int media_create_pad_link(struct media_entity *source,
* @source_function: Function of the source entities. Used only if @source is
* NULL.
* @source: pointer to &media_entity of the source pad. If NULL, it will use
- * all entities that matches the @sink_function.
+ * all entities that matches the @sink_function.
* @source_pad: number of the source pad in the pads array
* @sink_function: Function of the sink entities. Used only if @sink is NULL.
* @sink: pointer to &media_entity of the sink pad. If NULL, it will use
- * all entities that matches the @sink_function.
+ * all entities that matches the @sink_function.
* @sink_pad: number of the sink pad in the pads array.
* @flags: Link flags, as defined in include/uapi/linux/media.h.
- * @allow_both_undefined: if true, then both @source and @sink can be NULL.
+ * @allow_both_undefined: if %true, then both @source and @sink can be NULL.
* In such case, it will create a crossbar between all entities that
* matches @source_function to all entities that matches @sink_function.
- * If false, it will return 0 and won't create any link if both @source
+ * If %false, it will return 0 and won't create any link if both @source
* and @sink are NULL.
*
* Valid values for flags:
@@ -660,9 +699,11 @@ __must_check int media_create_pad_link(struct media_entity *source,
* creates link by link, this function is meant to allow 1:n, n:1 and even
* cross-bar (n:n) links.
*
- * NOTE: Before calling this function, media_entity_pads_init() and
- * media_device_register_entity() should be called previously for the entities
- * to be linked.
+ * .. note::
+ *
+ * Before calling this function, media_entity_pads_init() and
+ * media_device_register_entity() should be called previously for the
+ * entities to be linked.
*/
int media_create_pad_links(const struct media_device *mdev,
const u32 source_function,
@@ -721,7 +762,7 @@ int __media_entity_setup_link(struct media_link *link, u32 flags);
* flags.
*
* Media device drivers can be notified of link setup operations by setting the
- * media_device::link_notify pointer to a callback function. If provided, the
+ * &media_device.link_notify pointer to a callback function. If provided, the
* notification callback will be called before enabling and after disabling
* links.
*
@@ -731,7 +772,7 @@ int __media_entity_setup_link(struct media_link *link, u32 flags);
*
* Link configuration must not have any side effect on other links. If an
* enabled link at a sink pad prevents another link at the same pad from
- * being enabled, the link_setup operation must return -EBUSY and can't
+ * being enabled, the link_setup operation must return %-EBUSY and can't
* implicitly disable the first enabled link.
*
* .. note::
@@ -747,8 +788,8 @@ int media_entity_setup_link(struct media_link *link, u32 flags);
* @source: Source pad
* @sink: Sink pad
*
- * Return a pointer to the link between the two entities. If no such link
- * exists, return NULL.
+ * Return: returns a pointer to the link between the two entities. If no
+ * such link exists, return %NULL.
*/
struct media_link *media_entity_find_link(struct media_pad *source,
struct media_pad *sink);
@@ -760,8 +801,8 @@ struct media_link *media_entity_find_link(struct media_pad *source,
* Search for a remote pad connected to the given pad by iterating over all
* links originating or terminating at that pad until an enabled link is found.
*
- * Return a pointer to the pad at the remote end of the first found enabled
- * link, or NULL if no enabled link has been found.
+ * Return: returns a pointer to the pad at the remote end of the first found
+ * enabled link, or %NULL if no enabled link has been found.
*/
struct media_pad *media_entity_remote_pad(struct media_pad *pad);
@@ -772,12 +813,18 @@ struct media_pad *media_entity_remote_pad(struct media_pad *pad);
*
* Get a reference to the parent media device module.
*
- * The function will return immediately if @entity is NULL.
+ * The function will return immediately if @entity is %NULL.
*
- * Return a pointer to the entity on success or NULL on failure.
+ * Return: returns a pointer to the entity on success or %NULL on failure.
*/
struct media_entity *media_entity_get(struct media_entity *entity);
+/**
+ * media_entity_graph_walk_init - Allocate resources used by graph walk.
+ *
+ * @graph: Media graph structure that will be used to walk the graph
+ * @mdev: Pointer to the &media_device that contains the object
+ */
__must_check int media_entity_graph_walk_init(
struct media_entity_graph *graph, struct media_device *mdev);
@@ -795,12 +842,14 @@ void media_entity_graph_walk_cleanup(struct media_entity_graph *graph);
*
* Release the reference count acquired by media_entity_get().
*
- * The function will return immediately if @entity is NULL.
+ * The function will return immediately if @entity is %NULL.
*/
void media_entity_put(struct media_entity *entity);
/**
- * media_entity_graph_walk_start - Start walking the media graph at a given entity
+ * media_entity_graph_walk_start - Start walking the media graph at a
+ * given entity
+ *
* @graph: Media graph structure that will be used to walk the graph
* @entity: Starting entity
*
@@ -824,8 +873,8 @@ void media_entity_graph_walk_start(struct media_entity_graph *graph,
* The graph structure must have been previously initialized with a call to
* media_entity_graph_walk_start().
*
- * Return the next entity in the graph or NULL if the whole graph have been
- * traversed.
+ * Return: returns the next entity in the graph or %NULL if the whole graph
+ * have been traversed.
*/
struct media_entity *
media_entity_graph_walk_next(struct media_entity_graph *graph);
@@ -836,8 +885,8 @@ media_entity_graph_walk_next(struct media_entity_graph *graph);
* @pipe: Media pipeline to be assigned to all entities in the pipeline.
*
* Mark all entities connected to a given entity through enabled links, either
- * directly or indirectly, as streaming. The given pipeline object is assigned to
- * every entity in the pipeline and stored in the media_entity pipe field.
+ * directly or indirectly, as streaming. The given pipeline object is assigned
+ * to every entity in the pipeline and stored in the media_entity pipe field.
*
* Calls to this function can be nested, in which case the same number of
* media_entity_pipeline_stop() calls will be required to stop streaming. The
@@ -863,7 +912,7 @@ __must_check int __media_entity_pipeline_start(struct media_entity *entity,
*
* Mark all entities connected to a given entity through enabled links, either
* directly or indirectly, as not streaming. The media_entity pipe field is
- * reset to NULL.
+ * reset to %NULL.
*
* If multiple calls to media_entity_pipeline_start() have been made, the same
* number of calls to this function are required to mark the pipeline as not
@@ -884,14 +933,21 @@ void __media_entity_pipeline_stop(struct media_entity *entity);
* media_devnode_create() - creates and initializes a device node interface
*
* @mdev: pointer to struct &media_device
- * @type: type of the interface, as given by MEDIA_INTF_T_* macros
- * as defined in the uapi/media/media.h header.
- * @flags: Interface flags as defined in uapi/media/media.h.
+ * @type: type of the interface, as given by
+ * :ref:`include/uapi/linux/media.h <media_header>`
+ * ( seek for ``MEDIA_INTF_T_*``) macros.
+ * @flags: Interface flags, as defined in
+ * :ref:`include/uapi/linux/media.h <media_header>`
+ * ( seek for ``MEDIA_INTF_FL_*``)
* @major: Device node major number.
* @minor: Device node minor number.
*
* Return: if succeeded, returns a pointer to the newly allocated
* &media_intf_devnode pointer.
+ *
+ * .. note::
+ *
+ * Currently, no flags for &media_interface is defined.
*/
struct media_intf_devnode *
__must_check media_devnode_create(struct media_device *mdev,
@@ -913,15 +969,19 @@ struct media_link *
*
* @entity: pointer to %media_entity
* @intf: pointer to %media_interface
- * @flags: Link flags, as defined in include/uapi/linux/media.h.
+ * @flags: Link flags, as defined in
+ * :ref:`include/uapi/linux/media.h <media_header>`
+ * ( seek for ``MEDIA_LNK_FL_*``)
*
*
* Valid values for flags:
*
- * - The %MEDIA_LNK_FL_ENABLED flag indicates that the interface is connected to
- * the entity hardware. That's the default value for interfaces. An
- * interface may be disabled if the hardware is busy due to the usage
- * of some other interface that it is currently controlling the hardware.
+ * %MEDIA_LNK_FL_ENABLED
+ * Indicates that the interface is connected to the entity hardware.
+ * That's the default value for interfaces. An interface may be disabled if
+ * the hardware is busy due to the usage of some other interface that it is
+ * currently controlling the hardware.
+ *
* A typical example is an hybrid TV device that handle only one type of
* stream on a given time. So, when the digital TV is streaming,
* the V4L2 interfaces won't be enabled, as such device is not able to
@@ -977,6 +1037,18 @@ void __media_remove_intf_links(struct media_interface *intf);
*/
void media_remove_intf_links(struct media_interface *intf);
+/**
+ * media_entity_call - Calls a struct media_entity_operations operation on
+ * an entity
+ *
+ * @entity: entity where the @operation will be called
+ * @operation: type of the operation. Should be the name of a member of
+ * struct &media_entity_operations.
+ *
+ * This helper function will check if @operation is not %NULL. On such case,
+ * it will issue a call to @operation\(@entity, @args\).
+ */
+
#define media_entity_call(entity, operation, args...) \
(((entity)->ops && (entity)->ops->operation) ? \
(entity)->ops->operation((entity) , ##args) : -ENOIOCTLCMD)
diff --git a/include/media/rc-core.h b/include/media/rc-core.h
index 10908e356b23..40188d362486 100644
--- a/include/media/rc-core.h
+++ b/include/media/rc-core.h
@@ -231,7 +231,7 @@ void rc_unregister_device(struct rc_dev *dev);
int rc_open(struct rc_dev *rdev);
/**
- * rc_open - Closes a RC device
+ * rc_close - Closes a RC device
*
* @rdev: pointer to struct rc_dev.
*/
diff --git a/include/media/rc-map.h b/include/media/rc-map.h
index daa75fcc1ff1..e1cc14cba391 100644
--- a/include/media/rc-map.h
+++ b/include/media/rc-map.h
@@ -11,27 +11,55 @@
#include <linux/input.h>
+/**
+ * enum rc_type - type of the Remote Controller protocol
+ *
+ * @RC_TYPE_UNKNOWN: Protocol not known
+ * @RC_TYPE_OTHER: Protocol known but proprietary
+ * @RC_TYPE_RC5: Philips RC5 protocol
+ * @RC_TYPE_RC5X: Philips RC5x protocol
+ * @RC_TYPE_RC5_SZ: StreamZap variant of RC5
+ * @RC_TYPE_JVC: JVC protocol
+ * @RC_TYPE_SONY12: Sony 12 bit protocol
+ * @RC_TYPE_SONY15: Sony 15 bit protocol
+ * @RC_TYPE_SONY20: Sony 20 bit protocol
+ * @RC_TYPE_NEC: NEC protocol
+ * @RC_TYPE_NECX: Extended NEC protocol
+ * @RC_TYPE_NEC32: NEC 32 bit protocol
+ * @RC_TYPE_SANYO: Sanyo protocol
+ * @RC_TYPE_MCE_KBD: RC6-ish MCE keyboard/mouse
+ * @RC_TYPE_RC6_0: Philips RC6-0-16 protocol
+ * @RC_TYPE_RC6_6A_20: Philips RC6-6A-20 protocol
+ * @RC_TYPE_RC6_6A_24: Philips RC6-6A-24 protocol
+ * @RC_TYPE_RC6_6A_32: Philips RC6-6A-32 protocol
+ * @RC_TYPE_RC6_MCE: MCE (Philips RC6-6A-32 subtype) protocol
+ * @RC_TYPE_SHARP: Sharp protocol
+ * @RC_TYPE_XMP: XMP protocol
+ * @RC_TYPE_CEC: CEC protocol
+ */
enum rc_type {
- RC_TYPE_UNKNOWN = 0, /* Protocol not known */
- RC_TYPE_OTHER = 1, /* Protocol known but proprietary */
- RC_TYPE_RC5 = 2, /* Philips RC5 protocol */
- RC_TYPE_RC5X = 3, /* Philips RC5x protocol */
- RC_TYPE_RC5_SZ = 4, /* StreamZap variant of RC5 */
- RC_TYPE_JVC = 5, /* JVC protocol */
- RC_TYPE_SONY12 = 6, /* Sony 12 bit protocol */
- RC_TYPE_SONY15 = 7, /* Sony 15 bit protocol */
- RC_TYPE_SONY20 = 8, /* Sony 20 bit protocol */
- RC_TYPE_NEC = 9, /* NEC protocol */
- RC_TYPE_SANYO = 10, /* Sanyo protocol */
- RC_TYPE_MCE_KBD = 11, /* RC6-ish MCE keyboard/mouse */
- RC_TYPE_RC6_0 = 12, /* Philips RC6-0-16 protocol */
- RC_TYPE_RC6_6A_20 = 13, /* Philips RC6-6A-20 protocol */
- RC_TYPE_RC6_6A_24 = 14, /* Philips RC6-6A-24 protocol */
- RC_TYPE_RC6_6A_32 = 15, /* Philips RC6-6A-32 protocol */
- RC_TYPE_RC6_MCE = 16, /* MCE (Philips RC6-6A-32 subtype) protocol */
- RC_TYPE_SHARP = 17, /* Sharp protocol */
- RC_TYPE_XMP = 18, /* XMP protocol */
- RC_TYPE_CEC = 19, /* CEC protocol */
+ RC_TYPE_UNKNOWN = 0,
+ RC_TYPE_OTHER = 1,
+ RC_TYPE_RC5 = 2,
+ RC_TYPE_RC5X = 3,
+ RC_TYPE_RC5_SZ = 4,
+ RC_TYPE_JVC = 5,
+ RC_TYPE_SONY12 = 6,
+ RC_TYPE_SONY15 = 7,
+ RC_TYPE_SONY20 = 8,
+ RC_TYPE_NEC = 9,
+ RC_TYPE_NECX = 10,
+ RC_TYPE_NEC32 = 11,
+ RC_TYPE_SANYO = 12,
+ RC_TYPE_MCE_KBD = 13,
+ RC_TYPE_RC6_0 = 14,
+ RC_TYPE_RC6_6A_20 = 15,
+ RC_TYPE_RC6_6A_24 = 16,
+ RC_TYPE_RC6_6A_32 = 17,
+ RC_TYPE_RC6_MCE = 18,
+ RC_TYPE_SHARP = 19,
+ RC_TYPE_XMP = 20,
+ RC_TYPE_CEC = 21,
};
#define RC_BIT_NONE 0ULL
@@ -45,6 +73,8 @@ enum rc_type {
#define RC_BIT_SONY15 (1ULL << RC_TYPE_SONY15)
#define RC_BIT_SONY20 (1ULL << RC_TYPE_SONY20)
#define RC_BIT_NEC (1ULL << RC_TYPE_NEC)
+#define RC_BIT_NECX (1ULL << RC_TYPE_NECX)
+#define RC_BIT_NEC32 (1ULL << RC_TYPE_NEC32)
#define RC_BIT_SANYO (1ULL << RC_TYPE_SANYO)
#define RC_BIT_MCE_KBD (1ULL << RC_TYPE_MCE_KBD)
#define RC_BIT_RC6_0 (1ULL << RC_TYPE_RC6_0)
@@ -60,8 +90,9 @@ enum rc_type {
RC_BIT_RC5 | RC_BIT_RC5X | RC_BIT_RC5_SZ | \
RC_BIT_JVC | \
RC_BIT_SONY12 | RC_BIT_SONY15 | RC_BIT_SONY20 | \
- RC_BIT_NEC | RC_BIT_SANYO | RC_BIT_MCE_KBD | \
- RC_BIT_RC6_0 | RC_BIT_RC6_6A_20 | RC_BIT_RC6_6A_24 | \
+ RC_BIT_NEC | RC_BIT_NECX | RC_BIT_NEC32 | \
+ RC_BIT_SANYO | RC_BIT_MCE_KBD | RC_BIT_RC6_0 | \
+ RC_BIT_RC6_6A_20 | RC_BIT_RC6_6A_24 | \
RC_BIT_RC6_6A_32 | RC_BIT_RC6_MCE | RC_BIT_SHARP | \
RC_BIT_XMP | RC_BIT_CEC)
@@ -76,21 +107,45 @@ enum rc_type {
#define RC_SCANCODE_RC6_0(sys, cmd) (((sys) << 8) | (cmd))
#define RC_SCANCODE_RC6_6A(vendor, sys, cmd) (((vendor) << 16) | ((sys) << 8) | (cmd))
+/**
+ * struct rc_map_table - represents a scancode/keycode pair
+ *
+ * @scancode: scan code (u32)
+ * @keycode: Linux input keycode
+ */
struct rc_map_table {
u32 scancode;
u32 keycode;
};
+/**
+ * struct rc_map - represents a keycode map table
+ *
+ * @scan: pointer to struct &rc_map_table
+ * @size: Max number of entries
+ * @len: Number of entries that are in use
+ * @alloc: size of \*scan, in bytes
+ * @rc_type: type of the remote controller protocol, as defined at
+ * enum &rc_type
+ * @name: name of the key map table
+ * @lock: lock to protect access to this structure
+ */
struct rc_map {
struct rc_map_table *scan;
- unsigned int size; /* Max number of entries */
- unsigned int len; /* Used number of entries */
- unsigned int alloc; /* Size of *scan in bytes */
+ unsigned int size;
+ unsigned int len;
+ unsigned int alloc;
enum rc_type rc_type;
const char *name;
spinlock_t lock;
};
+/**
+ * struct rc_map_list - list of the registered &rc_map maps
+ *
+ * @list: pointer to struct &list_head
+ * @map: pointer to struct &rc_map
+ */
struct rc_map_list {
struct list_head list;
struct rc_map map;
diff --git a/include/media/rcar-fcp.h b/include/media/rcar-fcp.h
index 4c7fc77eaf29..8723f05c6321 100644
--- a/include/media/rcar-fcp.h
+++ b/include/media/rcar-fcp.h
@@ -29,7 +29,7 @@ static inline struct rcar_fcp_device *rcar_fcp_get(const struct device_node *np)
static inline void rcar_fcp_put(struct rcar_fcp_device *fcp) { }
static inline int rcar_fcp_enable(struct rcar_fcp_device *fcp)
{
- return -ENOSYS;
+ return 0;
}
static inline void rcar_fcp_disable(struct rcar_fcp_device *fcp) { }
#endif
diff --git a/include/media/soc_camera.h b/include/media/soc_camera.h
index 97aa13314bfd..1a15c3e4efd3 100644
--- a/include/media/soc_camera.h
+++ b/include/media/soc_camera.h
@@ -105,16 +105,13 @@ struct soc_camera_host_ops {
int (*get_formats)(struct soc_camera_device *, unsigned int,
struct soc_camera_format_xlate *);
void (*put_formats)(struct soc_camera_device *);
- int (*cropcap)(struct soc_camera_device *, struct v4l2_cropcap *);
- int (*get_crop)(struct soc_camera_device *, struct v4l2_crop *);
- int (*set_crop)(struct soc_camera_device *, const struct v4l2_crop *);
int (*get_selection)(struct soc_camera_device *, struct v4l2_selection *);
int (*set_selection)(struct soc_camera_device *, struct v4l2_selection *);
/*
- * The difference to .set_crop() is, that .set_livecrop is not allowed
+ * The difference to .set_selection() is, that .set_liveselection is not allowed
* to change the output sizes
*/
- int (*set_livecrop)(struct soc_camera_device *, const struct v4l2_crop *);
+ int (*set_liveselection)(struct soc_camera_device *, struct v4l2_selection *);
int (*set_fmt)(struct soc_camera_device *, struct v4l2_format *);
int (*try_fmt)(struct soc_camera_device *, struct v4l2_format *);
void (*init_videobuf)(struct videobuf_queue *,
diff --git a/include/media/v4l2-ctrls.h b/include/media/v4l2-ctrls.h
index 178a88d45aea..e1006b391cdc 100644
--- a/include/media/v4l2-ctrls.h
+++ b/include/media/v4l2-ctrls.h
@@ -93,6 +93,16 @@ struct v4l2_ctrl_type_ops {
union v4l2_ctrl_ptr ptr);
};
+/**
+ * typedef v4l2_ctrl_notify_fnc - typedef for a notify argument with a function
+ * that should be called when a control value has changed.
+ *
+ * @ctrl: pointer to struct &v4l2_ctrl
+ * @priv: control private data
+ *
+ * This typedef definition is used as an argument to v4l2_ctrl_notify()
+ * and as an argument at struct &v4l2_ctrl_handler.
+ */
typedef void (*v4l2_ctrl_notify_fnc)(struct v4l2_ctrl *ctrl, void *priv);
/**
@@ -229,7 +239,7 @@ struct v4l2_ctrl {
* @next: Single-link list node for the hash.
* @ctrl: The actual control information.
* @helper: Pointer to helper struct. Used internally in
- * prepare_ext_ctrls().
+ * ``prepare_ext_ctrls`` function at ``v4l2-ctrl.c``.
*
* Each control handler has a list of these refs. The list_head is used to
* keep a sorted-by-control-ID list of all controls, while the next pointer
@@ -369,17 +379,39 @@ void v4l2_ctrl_fill(u32 id, const char **name, enum v4l2_ctrl_type *type,
* @key: Used by the lock validator if CONFIG_LOCKDEP is set.
* @name: Used by the lock validator if CONFIG_LOCKDEP is set.
*
- * Returns an error if the buckets could not be allocated. This error will
- * also be stored in @hdl->error.
+ * .. attention::
+ *
+ * Never use this call directly, always use the v4l2_ctrl_handler_init()
+ * macro that hides the @key and @name arguments.
*
- * Never use this call directly, always use the v4l2_ctrl_handler_init
- * macro that hides the @key and @name arguments.
+ * Return: returns an error if the buckets could not be allocated. This
+ * error will also be stored in @hdl->error.
*/
int v4l2_ctrl_handler_init_class(struct v4l2_ctrl_handler *hdl,
unsigned int nr_of_controls_hint,
struct lock_class_key *key, const char *name);
#ifdef CONFIG_LOCKDEP
+
+/**
+ * v4l2_ctrl_handler_init - helper function to create a static struct
+ * &lock_class_key and calls v4l2_ctrl_handler_init_class()
+ *
+ * @hdl: The control handler.
+ * @nr_of_controls_hint: A hint of how many controls this handler is
+ * expected to refer to. This is the total number, so including
+ * any inherited controls. It doesn't have to be precise, but if
+ * it is way off, then you either waste memory (too many buckets
+ * are allocated) or the control lookup becomes slower (not enough
+ * buckets are allocated, so there are more slow list lookups).
+ * It will always work, though.
+ *
+ * This helper function creates a static struct &lock_class_key and
+ * calls v4l2_ctrl_handler_init_class(), providing a proper name for the lock
+ * validador.
+ *
+ * Use this helper function to initialize a control handler.
+ */
#define v4l2_ctrl_handler_init(hdl, nr_of_controls_hint) \
( \
({ \
@@ -564,6 +596,13 @@ struct v4l2_ctrl *v4l2_ctrl_new_int_menu(struct v4l2_ctrl_handler *hdl,
u32 id, u8 max, u8 def,
const s64 *qmenu_int);
+/**
+ * typedef v4l2_ctrl_filter - Typedef to define the filter function to be
+ * used when adding a control handler.
+ *
+ * @ctrl: pointer to struct &v4l2_ctrl.
+ */
+
typedef bool (*v4l2_ctrl_filter)(const struct v4l2_ctrl *ctrl);
/**
@@ -635,8 +674,8 @@ void v4l2_ctrl_cluster(unsigned int ncontrols, struct v4l2_ctrl **controls);
* be marked active, and any reads will just return the current value without
* going through g_volatile_ctrl.
*
- * In addition, this function will set the V4L2_CTRL_FLAG_UPDATE flag
- * on the autofoo control and V4L2_CTRL_FLAG_INACTIVE on the foo control(s)
+ * In addition, this function will set the %V4L2_CTRL_FLAG_UPDATE flag
+ * on the autofoo control and %V4L2_CTRL_FLAG_INACTIVE on the foo control(s)
* if autofoo is in auto mode.
*/
void v4l2_ctrl_auto_cluster(unsigned int ncontrols,
@@ -686,7 +725,6 @@ void v4l2_ctrl_activate(struct v4l2_ctrl *ctrl, bool active);
*/
void v4l2_ctrl_grab(struct v4l2_ctrl *ctrl, bool grabbed);
-
/**
*__v4l2_ctrl_modify_range() - Unlocked variant of v4l2_ctrl_modify_range()
*
@@ -936,9 +974,9 @@ extern const struct v4l2_subscribed_event_ops v4l2_ctrl_sub_ev_ops;
* v4l2_ctrl_replace - Function to be used as a callback to
* &struct v4l2_subscribed_event_ops replace\(\)
*
- * @old: pointer to :ref:`struct v4l2_event <v4l2-event>` with the reported
+ * @old: pointer to struct &v4l2_event with the reported
* event;
- * @new: pointer to :ref:`struct v4l2_event <v4l2-event>` with the modified
+ * @new: pointer to struct &v4l2_event with the modified
* event;
*/
void v4l2_ctrl_replace(struct v4l2_event *old, const struct v4l2_event *new);
@@ -947,9 +985,9 @@ void v4l2_ctrl_replace(struct v4l2_event *old, const struct v4l2_event *new);
* v4l2_ctrl_merge - Function to be used as a callback to
* &struct v4l2_subscribed_event_ops merge(\)
*
- * @old: pointer to :ref:`struct v4l2_event <v4l2-event>` with the reported
+ * @old: pointer to struct &v4l2_event with the reported
* event;
- * @new: pointer to :ref:`struct v4l2_event <v4l2-event>` with the merged
+ * @new: pointer to struct &v4l2_event with the merged
* event;
*/
void v4l2_ctrl_merge(const struct v4l2_event *old, struct v4l2_event *new);
diff --git a/include/media/v4l2-dev.h b/include/media/v4l2-dev.h
index a122b1bd40f9..e657614521e3 100644
--- a/include/media/v4l2-dev.h
+++ b/include/media/v4l2-dev.h
@@ -25,7 +25,8 @@
#define VFL_TYPE_RADIO 2
#define VFL_TYPE_SUBDEV 3
#define VFL_TYPE_SDR 4
-#define VFL_TYPE_MAX 5
+#define VFL_TYPE_TOUCH 5
+#define VFL_TYPE_MAX 6
/* Is this a receiver, transmitter or mem-to-mem? */
/* Ignored for VFL_TYPE_SUBDEV. */
@@ -55,7 +56,7 @@ struct v4l2_ctrl_handler;
*
* .. note::
* The size of @prios array matches the number of priority types defined
- * by :ref:`enum v4l2_priority <v4l2-priority>`.
+ * by enum &v4l2_priority.
*/
struct v4l2_prio_state {
atomic_t prios[4];
@@ -72,8 +73,8 @@ void v4l2_prio_init(struct v4l2_prio_state *global);
* v4l2_prio_change - changes the v4l2 file handler priority
*
* @global: pointer to the &struct v4l2_prio_state of the device node.
- * @local: pointer to the desired priority, as defined by :ref:`enum v4l2_priority <v4l2-priority>`
- * @new: Priority type requested, as defined by :ref:`enum v4l2_priority <v4l2-priority>`.
+ * @local: pointer to the desired priority, as defined by enum &v4l2_priority
+ * @new: Priority type requested, as defined by enum &v4l2_priority.
*
* .. note::
* This function should be used only by the V4L2 core.
@@ -85,7 +86,7 @@ int v4l2_prio_change(struct v4l2_prio_state *global, enum v4l2_priority *local,
* v4l2_prio_open - Implements the priority logic for a file handler open
*
* @global: pointer to the &struct v4l2_prio_state of the device node.
- * @local: pointer to the desired priority, as defined by :ref:`enum v4l2_priority <v4l2-priority>`
+ * @local: pointer to the desired priority, as defined by enum &v4l2_priority
*
* .. note::
* This function should be used only by the V4L2 core.
@@ -96,7 +97,7 @@ void v4l2_prio_open(struct v4l2_prio_state *global, enum v4l2_priority *local);
* v4l2_prio_close - Implements the priority logic for a file handler close
*
* @global: pointer to the &struct v4l2_prio_state of the device node.
- * @local: priority to be released, as defined by :ref:`enum v4l2_priority <v4l2-priority>`
+ * @local: priority to be released, as defined by enum &v4l2_priority
*
* .. note::
* This function should be used only by the V4L2 core.
@@ -114,10 +115,10 @@ void v4l2_prio_close(struct v4l2_prio_state *global, enum v4l2_priority local);
enum v4l2_priority v4l2_prio_max(struct v4l2_prio_state *global);
/**
- * v4l2_prio_close - Implements the priority logic for a file handler close
+ * v4l2_prio_check - Implements the priority logic for a file handler close
*
* @global: pointer to the &struct v4l2_prio_state of the device node.
- * @local: desired priority, as defined by :ref:`enum v4l2_priority <v4l2-priority>` local
+ * @local: desired priority, as defined by enum &v4l2_priority local
*
* .. note::
* This function should be used only by the V4L2 core.
@@ -294,6 +295,7 @@ struct video_device
* - %VFL_TYPE_RADIO - A radio card
* - %VFL_TYPE_SUBDEV - A subdevice
* - %VFL_TYPE_SDR - Software Defined Radio
+ * - %VFL_TYPE_TOUCH - A touch sensor
*
* .. note::
*
diff --git a/include/media/v4l2-device.h b/include/media/v4l2-device.h
index a9d6aa41790e..8ffa94009d1a 100644
--- a/include/media/v4l2-device.h
+++ b/include/media/v4l2-device.h
@@ -39,7 +39,7 @@ struct v4l2_ctrl_handler;
* if this struct is embedded into a larger struct.
* @name: unique device name, by default the driver name + bus ID
* @notify: notify callback called by some sub-devices.
- * @ctrl_handler: The control handler. May be NULL.
+ * @ctrl_handler: The control handler. May be %NULL.
* @prio: Device's priority state
* @ref: Keep track of the references to this struct.
* @release: Release function that is called when the ref count
@@ -53,8 +53,8 @@ struct v4l2_ctrl_handler;
*
* .. note::
*
- * #) dev->driver_data points to this struct.
- * #) dev might be NULL if there is no parent device
+ * #) @dev->driver_data points to this struct.
+ * #) @dev might be %NULL if there is no parent device
*/
struct v4l2_device {
@@ -76,10 +76,10 @@ struct v4l2_device {
/**
* v4l2_device_get - gets a V4L2 device reference
*
- * @v4l2_dev: pointer to struct v4l2_device
+ * @v4l2_dev: pointer to struct &v4l2_device
*
* This is an ancillary routine meant to increment the usage for the
- * struct v4l2_device pointed by @v4l2_dev.
+ * struct &v4l2_device pointed by @v4l2_dev.
*/
static inline void v4l2_device_get(struct v4l2_device *v4l2_dev)
{
@@ -89,23 +89,23 @@ static inline void v4l2_device_get(struct v4l2_device *v4l2_dev)
/**
* v4l2_device_put - putss a V4L2 device reference
*
- * @v4l2_dev: pointer to struct v4l2_device
+ * @v4l2_dev: pointer to struct &v4l2_device
*
* This is an ancillary routine meant to decrement the usage for the
- * struct v4l2_device pointed by @v4l2_dev.
+ * struct &v4l2_device pointed by @v4l2_dev.
*/
int v4l2_device_put(struct v4l2_device *v4l2_dev);
/**
- * v4l2_device_register -Initialize v4l2_dev and make dev->driver_data
- * point to v4l2_dev.
+ * v4l2_device_register - Initialize v4l2_dev and make @dev->driver_data
+ * point to @v4l2_dev.
*
- * @dev: pointer to struct device
- * @v4l2_dev: pointer to struct v4l2_device
+ * @dev: pointer to struct &device
+ * @v4l2_dev: pointer to struct &v4l2_device
*
* .. note::
- * dev may be NULL in rare cases (ISA devices).
- * In such case the caller must fill in the v4l2_dev->name field
+ * @dev may be %NULL in rare cases (ISA devices).
+ * In such case the caller must fill in the @v4l2_dev->name field
* before calling this function.
*/
int __must_check v4l2_device_register(struct device *dev,
@@ -113,14 +113,14 @@ int __must_check v4l2_device_register(struct device *dev,
/**
* v4l2_device_set_name - Optional function to initialize the
- * name field of struct v4l2_device
+ * name field of struct &v4l2_device
*
- * @v4l2_dev: pointer to struct v4l2_device
+ * @v4l2_dev: pointer to struct &v4l2_device
* @basename: base name for the device name
* @instance: pointer to a static atomic_t var with the instance usage for
- * the device driver.
+ * the device driver.
*
- * v4l2_device_set_name() initializes the name field of struct v4l2_device
+ * v4l2_device_set_name() initializes the name field of struct &v4l2_device
* using the driver name and a driver-global atomic_t instance.
*
* This function will increment the instance counter and returns the
@@ -132,7 +132,7 @@ int __must_check v4l2_device_register(struct device *dev,
*
* ...
*
- * instance = v4l2_device_set_name(&v4l2_dev, "foo", &drv_instance);
+ * instance = v4l2_device_set_name(&\ v4l2_dev, "foo", &\ drv_instance);
*
* The first time this is called the name field will be set to foo0 and
* this function returns 0. If the name ends with a digit (e.g. cx18),
@@ -147,16 +147,16 @@ int v4l2_device_set_name(struct v4l2_device *v4l2_dev, const char *basename,
* @v4l2_dev: pointer to struct v4l2_device
*
* Should be called when the USB parent disconnects.
- * Since the parent disappears, this ensures that v4l2_dev doesn't have
+ * Since the parent disappears, this ensures that @v4l2_dev doesn't have
* an invalid parent pointer.
*
- * .. note:: This function sets v4l2_dev->dev to NULL.
+ * .. note:: This function sets @v4l2_dev->dev to NULL.
*/
void v4l2_device_disconnect(struct v4l2_device *v4l2_dev);
/**
* v4l2_device_unregister - Unregister all sub-devices and any other
- * resources related to v4l2_dev.
+ * resources related to @v4l2_dev.
*
* @v4l2_dev: pointer to struct v4l2_device
*/
@@ -165,8 +165,8 @@ void v4l2_device_unregister(struct v4l2_device *v4l2_dev);
/**
* v4l2_device_register_subdev - Registers a subdev with a v4l2 device.
*
- * @v4l2_dev: pointer to struct v4l2_device
- * @sd: pointer to struct v4l2_subdev
+ * @v4l2_dev: pointer to struct &v4l2_device
+ * @sd: pointer to struct &v4l2_subdev
*
* While registered, the subdev module is marked as in-use.
*
@@ -179,7 +179,7 @@ int __must_check v4l2_device_register_subdev(struct v4l2_device *v4l2_dev,
/**
* v4l2_device_unregister_subdev - Unregisters a subdev with a v4l2 device.
*
- * @sd: pointer to struct v4l2_subdev
+ * @sd: pointer to struct &v4l2_subdev
*
* .. note ::
*
@@ -191,7 +191,7 @@ void v4l2_device_unregister_subdev(struct v4l2_subdev *sd);
/**
* v4l2_device_register_subdev_nodes - Registers device nodes for all subdevs
* of the v4l2 device that are marked with
- * the V4L2_SUBDEV_FL_HAS_DEVNODE flag.
+ * the %V4L2_SUBDEV_FL_HAS_DEVNODE flag.
*
* @v4l2_dev: pointer to struct v4l2_device
*/
@@ -201,9 +201,9 @@ v4l2_device_register_subdev_nodes(struct v4l2_device *v4l2_dev);
/**
* v4l2_subdev_notify - Sends a notification to v4l2_device.
*
- * @sd: pointer to struct v4l2_subdev
+ * @sd: pointer to struct &v4l2_subdev
* @notification: type of notification. Please notice that the notification
- * type is driver-specific.
+ * type is driver-specific.
* @arg: arguments for the notification. Those are specific to each
* notification type.
*/
@@ -222,7 +222,7 @@ static inline void v4l2_subdev_notify(struct v4l2_subdev *sd,
Ignore any errors. Note that you cannot add or delete a subdev
while walking the subdevs list. */
#define __v4l2_device_call_subdevs_p(v4l2_dev, sd, cond, o, f, args...) \
- do { \
+ do { \
list_for_each_entry((sd), &(v4l2_dev)->subdevs, list) \
if ((cond) && (sd)->ops->o && (sd)->ops->o->f) \
(sd)->ops->o->f((sd) , ##args); \
@@ -241,15 +241,15 @@ static inline void v4l2_subdev_notify(struct v4l2_subdev *sd,
return with that error code. Note that you cannot add or delete a
subdev while walking the subdevs list. */
#define __v4l2_device_call_subdevs_until_err_p(v4l2_dev, sd, cond, o, f, args...) \
-({ \
+({ \
long __err = 0; \
\
list_for_each_entry((sd), &(v4l2_dev)->subdevs, list) { \
if ((cond) && (sd)->ops->o && (sd)->ops->o->f) \
__err = (sd)->ops->o->f((sd) , ##args); \
if (__err && __err != -ENOIOCTLCMD) \
- break; \
- } \
+ break; \
+ } \
(__err == -ENOIOCTLCMD) ? 0 : __err; \
})
@@ -276,7 +276,7 @@ static inline void v4l2_subdev_notify(struct v4l2_subdev *sd,
match them all). If the callback returns an error other than 0 or
-ENOIOCTLCMD, then return with that error code. Note that you cannot
add or delete a subdev while walking the subdevs list. */
-#define v4l2_device_call_until_err(v4l2_dev, grpid, o, f, args...) \
+#define v4l2_device_call_until_err(v4l2_dev, grpid, o, f, args...) \
({ \
struct v4l2_subdev *__sd; \
__v4l2_device_call_subdevs_until_err_p(v4l2_dev, __sd, \
@@ -300,8 +300,8 @@ static inline void v4l2_subdev_notify(struct v4l2_subdev *sd,
/*
* Call the specified callback for all subdevs where grp_id & grpmsk != 0
- * (if grpmsk == `0, then match them all). If the callback returns an error
- * other than 0 or -ENOIOCTLCMD, then return with that error code. Note that
+ * (if grpmsk == 0, then match them all). If the callback returns an error
+ * other than 0 or %-ENOIOCTLCMD, then return with that error code. Note that
* you cannot add or delete a subdev while walking the subdevs list.
*/
#define v4l2_device_mask_call_until_err(v4l2_dev, grpmsk, o, f, args...) \
diff --git a/include/media/v4l2-dv-timings.h b/include/media/v4l2-dv-timings.h
index 65caadf13eec..0a7d9e1fc8c8 100644
--- a/include/media/v4l2-dv-timings.h
+++ b/include/media/v4l2-dv-timings.h
@@ -28,8 +28,8 @@
*/
extern const struct v4l2_dv_timings v4l2_dv_timings_presets[];
-/*
- * v4l2_check_dv_timings_fnc - timings check callback
+/**
+ * typedef v4l2_check_dv_timings_fnc - timings check callback
*
* @t: the v4l2_dv_timings struct.
* @handle: a handle from the driver.
diff --git a/include/media/v4l2-event.h b/include/media/v4l2-event.h
index ca854203b8b9..a700285c64a9 100644
--- a/include/media/v4l2-event.h
+++ b/include/media/v4l2-event.h
@@ -222,7 +222,8 @@ int v4l2_event_subdev_unsubscribe(struct v4l2_subdev *sd,
struct v4l2_fh *fh,
struct v4l2_event_subscription *sub);
/**
- * v4l2_src_change_event_subscribe -
+ * v4l2_src_change_event_subscribe - helper function that calls
+ * v4l2_event_subscribe() if the event is %V4L2_EVENT_SOURCE_CHANGE.
*
* @fh: pointer to struct v4l2_fh
* @sub: pointer to &struct v4l2_event_subscription
diff --git a/include/media/v4l2-flash-led-class.h b/include/media/v4l2-flash-led-class.h
index 3d184ab52274..b0fe4d6f4a5f 100644
--- a/include/media/v4l2-flash-led-class.h
+++ b/include/media/v4l2-flash-led-class.h
@@ -20,7 +20,7 @@ struct led_classdev;
struct v4l2_flash;
enum led_brightness;
-/*
+/**
* struct v4l2_flash_ctrl_data - flash control initialization data, filled
* basing on the features declared by the LED flash
* class driver in the v4l2_flash_config
@@ -33,14 +33,21 @@ struct v4l2_flash_ctrl_data {
u32 cid;
};
+/**
+ * struct v4l2_flash_ops - V4L2 flash operations
+ *
+ * @external_strobe_set: Setup strobing the flash by hardware pin state
+ * assertion.
+ * @intensity_to_led_brightness: Convert intensity to brightness in a device
+ * specific manner
+ * @led_brightness_to_intensity: convert brightness to intensity in a device
+ * specific manner.
+ */
struct v4l2_flash_ops {
- /* setup strobing the flash by hardware pin state assertion */
int (*external_strobe_set)(struct v4l2_flash *v4l2_flash,
bool enable);
- /* convert intensity to brightness in a device specific manner */
enum led_brightness (*intensity_to_led_brightness)
(struct v4l2_flash *v4l2_flash, s32 intensity);
- /* convert brightness to intensity in a device specific manner */
s32 (*led_brightness_to_intensity)
(struct v4l2_flash *v4l2_flash, enum led_brightness);
};
diff --git a/include/media/v4l2-ioctl.h b/include/media/v4l2-ioctl.h
index 8b1d19bc9b0e..574ff2ae94be 100644
--- a/include/media/v4l2-ioctl.h
+++ b/include/media/v4l2-ioctl.h
@@ -287,273 +287,286 @@ struct v4l2_ioctl_ops {
/* ioctl callbacks */
/* VIDIOC_QUERYCAP handler */
- int (*vidioc_querycap)(struct file *file, void *fh, struct v4l2_capability *cap);
+ int (*vidioc_querycap)(struct file *file, void *fh,
+ struct v4l2_capability *cap);
/* VIDIOC_ENUM_FMT handlers */
- int (*vidioc_enum_fmt_vid_cap) (struct file *file, void *fh,
- struct v4l2_fmtdesc *f);
- int (*vidioc_enum_fmt_vid_overlay) (struct file *file, void *fh,
- struct v4l2_fmtdesc *f);
- int (*vidioc_enum_fmt_vid_out) (struct file *file, void *fh,
- struct v4l2_fmtdesc *f);
+ int (*vidioc_enum_fmt_vid_cap)(struct file *file, void *fh,
+ struct v4l2_fmtdesc *f);
+ int (*vidioc_enum_fmt_vid_overlay)(struct file *file, void *fh,
+ struct v4l2_fmtdesc *f);
+ int (*vidioc_enum_fmt_vid_out)(struct file *file, void *fh,
+ struct v4l2_fmtdesc *f);
int (*vidioc_enum_fmt_vid_cap_mplane)(struct file *file, void *fh,
struct v4l2_fmtdesc *f);
int (*vidioc_enum_fmt_vid_out_mplane)(struct file *file, void *fh,
struct v4l2_fmtdesc *f);
- int (*vidioc_enum_fmt_sdr_cap) (struct file *file, void *fh,
- struct v4l2_fmtdesc *f);
- int (*vidioc_enum_fmt_sdr_out) (struct file *file, void *fh,
- struct v4l2_fmtdesc *f);
+ int (*vidioc_enum_fmt_sdr_cap)(struct file *file, void *fh,
+ struct v4l2_fmtdesc *f);
+ int (*vidioc_enum_fmt_sdr_out)(struct file *file, void *fh,
+ struct v4l2_fmtdesc *f);
/* VIDIOC_G_FMT handlers */
- int (*vidioc_g_fmt_vid_cap) (struct file *file, void *fh,
- struct v4l2_format *f);
+ int (*vidioc_g_fmt_vid_cap)(struct file *file, void *fh,
+ struct v4l2_format *f);
int (*vidioc_g_fmt_vid_overlay)(struct file *file, void *fh,
struct v4l2_format *f);
- int (*vidioc_g_fmt_vid_out) (struct file *file, void *fh,
- struct v4l2_format *f);
+ int (*vidioc_g_fmt_vid_out)(struct file *file, void *fh,
+ struct v4l2_format *f);
int (*vidioc_g_fmt_vid_out_overlay)(struct file *file, void *fh,
- struct v4l2_format *f);
- int (*vidioc_g_fmt_vbi_cap) (struct file *file, void *fh,
- struct v4l2_format *f);
- int (*vidioc_g_fmt_vbi_out) (struct file *file, void *fh,
- struct v4l2_format *f);
+ struct v4l2_format *f);
+ int (*vidioc_g_fmt_vbi_cap)(struct file *file, void *fh,
+ struct v4l2_format *f);
+ int (*vidioc_g_fmt_vbi_out)(struct file *file, void *fh,
+ struct v4l2_format *f);
int (*vidioc_g_fmt_sliced_vbi_cap)(struct file *file, void *fh,
- struct v4l2_format *f);
+ struct v4l2_format *f);
int (*vidioc_g_fmt_sliced_vbi_out)(struct file *file, void *fh,
- struct v4l2_format *f);
+ struct v4l2_format *f);
int (*vidioc_g_fmt_vid_cap_mplane)(struct file *file, void *fh,
struct v4l2_format *f);
int (*vidioc_g_fmt_vid_out_mplane)(struct file *file, void *fh,
struct v4l2_format *f);
- int (*vidioc_g_fmt_sdr_cap) (struct file *file, void *fh,
- struct v4l2_format *f);
- int (*vidioc_g_fmt_sdr_out) (struct file *file, void *fh,
- struct v4l2_format *f);
+ int (*vidioc_g_fmt_sdr_cap)(struct file *file, void *fh,
+ struct v4l2_format *f);
+ int (*vidioc_g_fmt_sdr_out)(struct file *file, void *fh,
+ struct v4l2_format *f);
/* VIDIOC_S_FMT handlers */
- int (*vidioc_s_fmt_vid_cap) (struct file *file, void *fh,
- struct v4l2_format *f);
+ int (*vidioc_s_fmt_vid_cap)(struct file *file, void *fh,
+ struct v4l2_format *f);
int (*vidioc_s_fmt_vid_overlay)(struct file *file, void *fh,
struct v4l2_format *f);
- int (*vidioc_s_fmt_vid_out) (struct file *file, void *fh,
- struct v4l2_format *f);
+ int (*vidioc_s_fmt_vid_out)(struct file *file, void *fh,
+ struct v4l2_format *f);
int (*vidioc_s_fmt_vid_out_overlay)(struct file *file, void *fh,
- struct v4l2_format *f);
- int (*vidioc_s_fmt_vbi_cap) (struct file *file, void *fh,
- struct v4l2_format *f);
- int (*vidioc_s_fmt_vbi_out) (struct file *file, void *fh,
- struct v4l2_format *f);
+ struct v4l2_format *f);
+ int (*vidioc_s_fmt_vbi_cap)(struct file *file, void *fh,
+ struct v4l2_format *f);
+ int (*vidioc_s_fmt_vbi_out)(struct file *file, void *fh,
+ struct v4l2_format *f);
int (*vidioc_s_fmt_sliced_vbi_cap)(struct file *file, void *fh,
- struct v4l2_format *f);
+ struct v4l2_format *f);
int (*vidioc_s_fmt_sliced_vbi_out)(struct file *file, void *fh,
- struct v4l2_format *f);
+ struct v4l2_format *f);
int (*vidioc_s_fmt_vid_cap_mplane)(struct file *file, void *fh,
struct v4l2_format *f);
int (*vidioc_s_fmt_vid_out_mplane)(struct file *file, void *fh,
struct v4l2_format *f);
- int (*vidioc_s_fmt_sdr_cap) (struct file *file, void *fh,
- struct v4l2_format *f);
- int (*vidioc_s_fmt_sdr_out) (struct file *file, void *fh,
- struct v4l2_format *f);
+ int (*vidioc_s_fmt_sdr_cap)(struct file *file, void *fh,
+ struct v4l2_format *f);
+ int (*vidioc_s_fmt_sdr_out)(struct file *file, void *fh,
+ struct v4l2_format *f);
/* VIDIOC_TRY_FMT handlers */
- int (*vidioc_try_fmt_vid_cap) (struct file *file, void *fh,
- struct v4l2_format *f);
+ int (*vidioc_try_fmt_vid_cap)(struct file *file, void *fh,
+ struct v4l2_format *f);
int (*vidioc_try_fmt_vid_overlay)(struct file *file, void *fh,
struct v4l2_format *f);
- int (*vidioc_try_fmt_vid_out) (struct file *file, void *fh,
- struct v4l2_format *f);
+ int (*vidioc_try_fmt_vid_out)(struct file *file, void *fh,
+ struct v4l2_format *f);
int (*vidioc_try_fmt_vid_out_overlay)(struct file *file, void *fh,
- struct v4l2_format *f);
- int (*vidioc_try_fmt_vbi_cap) (struct file *file, void *fh,
- struct v4l2_format *f);
- int (*vidioc_try_fmt_vbi_out) (struct file *file, void *fh,
- struct v4l2_format *f);
+ struct v4l2_format *f);
+ int (*vidioc_try_fmt_vbi_cap)(struct file *file, void *fh,
+ struct v4l2_format *f);
+ int (*vidioc_try_fmt_vbi_out)(struct file *file, void *fh,
+ struct v4l2_format *f);
int (*vidioc_try_fmt_sliced_vbi_cap)(struct file *file, void *fh,
- struct v4l2_format *f);
+ struct v4l2_format *f);
int (*vidioc_try_fmt_sliced_vbi_out)(struct file *file, void *fh,
- struct v4l2_format *f);
+ struct v4l2_format *f);
int (*vidioc_try_fmt_vid_cap_mplane)(struct file *file, void *fh,
struct v4l2_format *f);
int (*vidioc_try_fmt_vid_out_mplane)(struct file *file, void *fh,
struct v4l2_format *f);
- int (*vidioc_try_fmt_sdr_cap) (struct file *file, void *fh,
- struct v4l2_format *f);
- int (*vidioc_try_fmt_sdr_out) (struct file *file, void *fh,
- struct v4l2_format *f);
+ int (*vidioc_try_fmt_sdr_cap)(struct file *file, void *fh,
+ struct v4l2_format *f);
+ int (*vidioc_try_fmt_sdr_out)(struct file *file, void *fh,
+ struct v4l2_format *f);
/* Buffer handlers */
- int (*vidioc_reqbufs) (struct file *file, void *fh, struct v4l2_requestbuffers *b);
- int (*vidioc_querybuf)(struct file *file, void *fh, struct v4l2_buffer *b);
- int (*vidioc_qbuf) (struct file *file, void *fh, struct v4l2_buffer *b);
- int (*vidioc_expbuf) (struct file *file, void *fh,
- struct v4l2_exportbuffer *e);
- int (*vidioc_dqbuf) (struct file *file, void *fh, struct v4l2_buffer *b);
-
- int (*vidioc_create_bufs)(struct file *file, void *fh, struct v4l2_create_buffers *b);
- int (*vidioc_prepare_buf)(struct file *file, void *fh, struct v4l2_buffer *b);
-
- int (*vidioc_overlay) (struct file *file, void *fh, unsigned int i);
- int (*vidioc_g_fbuf) (struct file *file, void *fh,
- struct v4l2_framebuffer *a);
- int (*vidioc_s_fbuf) (struct file *file, void *fh,
- const struct v4l2_framebuffer *a);
+ int (*vidioc_reqbufs)(struct file *file, void *fh,
+ struct v4l2_requestbuffers *b);
+ int (*vidioc_querybuf)(struct file *file, void *fh,
+ struct v4l2_buffer *b);
+ int (*vidioc_qbuf)(struct file *file, void *fh,
+ struct v4l2_buffer *b);
+ int (*vidioc_expbuf)(struct file *file, void *fh,
+ struct v4l2_exportbuffer *e);
+ int (*vidioc_dqbuf)(struct file *file, void *fh,
+ struct v4l2_buffer *b);
+
+ int (*vidioc_create_bufs)(struct file *file, void *fh,
+ struct v4l2_create_buffers *b);
+ int (*vidioc_prepare_buf)(struct file *file, void *fh,
+ struct v4l2_buffer *b);
+
+ int (*vidioc_overlay)(struct file *file, void *fh, unsigned int i);
+ int (*vidioc_g_fbuf)(struct file *file, void *fh,
+ struct v4l2_framebuffer *a);
+ int (*vidioc_s_fbuf)(struct file *file, void *fh,
+ const struct v4l2_framebuffer *a);
/* Stream on/off */
- int (*vidioc_streamon) (struct file *file, void *fh, enum v4l2_buf_type i);
- int (*vidioc_streamoff)(struct file *file, void *fh, enum v4l2_buf_type i);
-
- /* Standard handling
- ENUMSTD is handled by videodev.c
+ int (*vidioc_streamon)(struct file *file, void *fh,
+ enum v4l2_buf_type i);
+ int (*vidioc_streamoff)(struct file *file, void *fh,
+ enum v4l2_buf_type i);
+
+ /*
+ * Standard handling
+ *
+ * Note: ENUMSTD is handled by videodev.c
*/
- int (*vidioc_g_std) (struct file *file, void *fh, v4l2_std_id *norm);
- int (*vidioc_s_std) (struct file *file, void *fh, v4l2_std_id norm);
- int (*vidioc_querystd) (struct file *file, void *fh, v4l2_std_id *a);
+ int (*vidioc_g_std)(struct file *file, void *fh, v4l2_std_id *norm);
+ int (*vidioc_s_std)(struct file *file, void *fh, v4l2_std_id norm);
+ int (*vidioc_querystd)(struct file *file, void *fh, v4l2_std_id *a);
/* Input handling */
int (*vidioc_enum_input)(struct file *file, void *fh,
struct v4l2_input *inp);
- int (*vidioc_g_input) (struct file *file, void *fh, unsigned int *i);
- int (*vidioc_s_input) (struct file *file, void *fh, unsigned int i);
+ int (*vidioc_g_input)(struct file *file, void *fh, unsigned int *i);
+ int (*vidioc_s_input)(struct file *file, void *fh, unsigned int i);
/* Output handling */
- int (*vidioc_enum_output) (struct file *file, void *fh,
+ int (*vidioc_enum_output)(struct file *file, void *fh,
struct v4l2_output *a);
- int (*vidioc_g_output) (struct file *file, void *fh, unsigned int *i);
- int (*vidioc_s_output) (struct file *file, void *fh, unsigned int i);
+ int (*vidioc_g_output)(struct file *file, void *fh, unsigned int *i);
+ int (*vidioc_s_output)(struct file *file, void *fh, unsigned int i);
/* Control handling */
- int (*vidioc_queryctrl) (struct file *file, void *fh,
- struct v4l2_queryctrl *a);
- int (*vidioc_query_ext_ctrl) (struct file *file, void *fh,
- struct v4l2_query_ext_ctrl *a);
- int (*vidioc_g_ctrl) (struct file *file, void *fh,
- struct v4l2_control *a);
- int (*vidioc_s_ctrl) (struct file *file, void *fh,
- struct v4l2_control *a);
- int (*vidioc_g_ext_ctrls) (struct file *file, void *fh,
- struct v4l2_ext_controls *a);
- int (*vidioc_s_ext_ctrls) (struct file *file, void *fh,
- struct v4l2_ext_controls *a);
- int (*vidioc_try_ext_ctrls) (struct file *file, void *fh,
- struct v4l2_ext_controls *a);
- int (*vidioc_querymenu) (struct file *file, void *fh,
- struct v4l2_querymenu *a);
+ int (*vidioc_queryctrl)(struct file *file, void *fh,
+ struct v4l2_queryctrl *a);
+ int (*vidioc_query_ext_ctrl)(struct file *file, void *fh,
+ struct v4l2_query_ext_ctrl *a);
+ int (*vidioc_g_ctrl)(struct file *file, void *fh,
+ struct v4l2_control *a);
+ int (*vidioc_s_ctrl)(struct file *file, void *fh,
+ struct v4l2_control *a);
+ int (*vidioc_g_ext_ctrls)(struct file *file, void *fh,
+ struct v4l2_ext_controls *a);
+ int (*vidioc_s_ext_ctrls)(struct file *file, void *fh,
+ struct v4l2_ext_controls *a);
+ int (*vidioc_try_ext_ctrls)(struct file *file, void *fh,
+ struct v4l2_ext_controls *a);
+ int (*vidioc_querymenu)(struct file *file, void *fh,
+ struct v4l2_querymenu *a);
/* Audio ioctls */
- int (*vidioc_enumaudio) (struct file *file, void *fh,
- struct v4l2_audio *a);
- int (*vidioc_g_audio) (struct file *file, void *fh,
- struct v4l2_audio *a);
- int (*vidioc_s_audio) (struct file *file, void *fh,
- const struct v4l2_audio *a);
+ int (*vidioc_enumaudio)(struct file *file, void *fh,
+ struct v4l2_audio *a);
+ int (*vidioc_g_audio)(struct file *file, void *fh,
+ struct v4l2_audio *a);
+ int (*vidioc_s_audio)(struct file *file, void *fh,
+ const struct v4l2_audio *a);
/* Audio out ioctls */
- int (*vidioc_enumaudout) (struct file *file, void *fh,
- struct v4l2_audioout *a);
- int (*vidioc_g_audout) (struct file *file, void *fh,
- struct v4l2_audioout *a);
- int (*vidioc_s_audout) (struct file *file, void *fh,
- const struct v4l2_audioout *a);
- int (*vidioc_g_modulator) (struct file *file, void *fh,
- struct v4l2_modulator *a);
- int (*vidioc_s_modulator) (struct file *file, void *fh,
- const struct v4l2_modulator *a);
+ int (*vidioc_enumaudout)(struct file *file, void *fh,
+ struct v4l2_audioout *a);
+ int (*vidioc_g_audout)(struct file *file, void *fh,
+ struct v4l2_audioout *a);
+ int (*vidioc_s_audout)(struct file *file, void *fh,
+ const struct v4l2_audioout *a);
+ int (*vidioc_g_modulator)(struct file *file, void *fh,
+ struct v4l2_modulator *a);
+ int (*vidioc_s_modulator)(struct file *file, void *fh,
+ const struct v4l2_modulator *a);
/* Crop ioctls */
- int (*vidioc_cropcap) (struct file *file, void *fh,
- struct v4l2_cropcap *a);
- int (*vidioc_g_crop) (struct file *file, void *fh,
- struct v4l2_crop *a);
- int (*vidioc_s_crop) (struct file *file, void *fh,
- const struct v4l2_crop *a);
- int (*vidioc_g_selection) (struct file *file, void *fh,
- struct v4l2_selection *s);
- int (*vidioc_s_selection) (struct file *file, void *fh,
- struct v4l2_selection *s);
+ int (*vidioc_cropcap)(struct file *file, void *fh,
+ struct v4l2_cropcap *a);
+ int (*vidioc_g_crop)(struct file *file, void *fh,
+ struct v4l2_crop *a);
+ int (*vidioc_s_crop)(struct file *file, void *fh,
+ const struct v4l2_crop *a);
+ int (*vidioc_g_selection)(struct file *file, void *fh,
+ struct v4l2_selection *s);
+ int (*vidioc_s_selection)(struct file *file, void *fh,
+ struct v4l2_selection *s);
/* Compression ioctls */
- int (*vidioc_g_jpegcomp) (struct file *file, void *fh,
- struct v4l2_jpegcompression *a);
- int (*vidioc_s_jpegcomp) (struct file *file, void *fh,
- const struct v4l2_jpegcompression *a);
- int (*vidioc_g_enc_index) (struct file *file, void *fh,
- struct v4l2_enc_idx *a);
- int (*vidioc_encoder_cmd) (struct file *file, void *fh,
- struct v4l2_encoder_cmd *a);
- int (*vidioc_try_encoder_cmd) (struct file *file, void *fh,
- struct v4l2_encoder_cmd *a);
- int (*vidioc_decoder_cmd) (struct file *file, void *fh,
- struct v4l2_decoder_cmd *a);
- int (*vidioc_try_decoder_cmd) (struct file *file, void *fh,
- struct v4l2_decoder_cmd *a);
+ int (*vidioc_g_jpegcomp)(struct file *file, void *fh,
+ struct v4l2_jpegcompression *a);
+ int (*vidioc_s_jpegcomp)(struct file *file, void *fh,
+ const struct v4l2_jpegcompression *a);
+ int (*vidioc_g_enc_index)(struct file *file, void *fh,
+ struct v4l2_enc_idx *a);
+ int (*vidioc_encoder_cmd)(struct file *file, void *fh,
+ struct v4l2_encoder_cmd *a);
+ int (*vidioc_try_encoder_cmd)(struct file *file, void *fh,
+ struct v4l2_encoder_cmd *a);
+ int (*vidioc_decoder_cmd)(struct file *file, void *fh,
+ struct v4l2_decoder_cmd *a);
+ int (*vidioc_try_decoder_cmd)(struct file *file, void *fh,
+ struct v4l2_decoder_cmd *a);
/* Stream type-dependent parameter ioctls */
- int (*vidioc_g_parm) (struct file *file, void *fh,
- struct v4l2_streamparm *a);
- int (*vidioc_s_parm) (struct file *file, void *fh,
- struct v4l2_streamparm *a);
+ int (*vidioc_g_parm)(struct file *file, void *fh,
+ struct v4l2_streamparm *a);
+ int (*vidioc_s_parm)(struct file *file, void *fh,
+ struct v4l2_streamparm *a);
/* Tuner ioctls */
- int (*vidioc_g_tuner) (struct file *file, void *fh,
- struct v4l2_tuner *a);
- int (*vidioc_s_tuner) (struct file *file, void *fh,
- const struct v4l2_tuner *a);
- int (*vidioc_g_frequency) (struct file *file, void *fh,
- struct v4l2_frequency *a);
- int (*vidioc_s_frequency) (struct file *file, void *fh,
- const struct v4l2_frequency *a);
- int (*vidioc_enum_freq_bands) (struct file *file, void *fh,
- struct v4l2_frequency_band *band);
+ int (*vidioc_g_tuner)(struct file *file, void *fh,
+ struct v4l2_tuner *a);
+ int (*vidioc_s_tuner)(struct file *file, void *fh,
+ const struct v4l2_tuner *a);
+ int (*vidioc_g_frequency)(struct file *file, void *fh,
+ struct v4l2_frequency *a);
+ int (*vidioc_s_frequency)(struct file *file, void *fh,
+ const struct v4l2_frequency *a);
+ int (*vidioc_enum_freq_bands)(struct file *file, void *fh,
+ struct v4l2_frequency_band *band);
/* Sliced VBI cap */
- int (*vidioc_g_sliced_vbi_cap) (struct file *file, void *fh,
- struct v4l2_sliced_vbi_cap *a);
+ int (*vidioc_g_sliced_vbi_cap)(struct file *file, void *fh,
+ struct v4l2_sliced_vbi_cap *a);
/* Log status ioctl */
- int (*vidioc_log_status) (struct file *file, void *fh);
+ int (*vidioc_log_status)(struct file *file, void *fh);
- int (*vidioc_s_hw_freq_seek) (struct file *file, void *fh,
- const struct v4l2_hw_freq_seek *a);
+ int (*vidioc_s_hw_freq_seek)(struct file *file, void *fh,
+ const struct v4l2_hw_freq_seek *a);
/* Debugging ioctls */
#ifdef CONFIG_VIDEO_ADV_DEBUG
- int (*vidioc_g_register) (struct file *file, void *fh,
- struct v4l2_dbg_register *reg);
- int (*vidioc_s_register) (struct file *file, void *fh,
- const struct v4l2_dbg_register *reg);
+ int (*vidioc_g_register)(struct file *file, void *fh,
+ struct v4l2_dbg_register *reg);
+ int (*vidioc_s_register)(struct file *file, void *fh,
+ const struct v4l2_dbg_register *reg);
- int (*vidioc_g_chip_info) (struct file *file, void *fh,
- struct v4l2_dbg_chip_info *chip);
+ int (*vidioc_g_chip_info)(struct file *file, void *fh,
+ struct v4l2_dbg_chip_info *chip);
#endif
- int (*vidioc_enum_framesizes) (struct file *file, void *fh,
- struct v4l2_frmsizeenum *fsize);
+ int (*vidioc_enum_framesizes)(struct file *file, void *fh,
+ struct v4l2_frmsizeenum *fsize);
- int (*vidioc_enum_frameintervals) (struct file *file, void *fh,
- struct v4l2_frmivalenum *fival);
+ int (*vidioc_enum_frameintervals)(struct file *file, void *fh,
+ struct v4l2_frmivalenum *fival);
/* DV Timings IOCTLs */
- int (*vidioc_s_dv_timings) (struct file *file, void *fh,
- struct v4l2_dv_timings *timings);
- int (*vidioc_g_dv_timings) (struct file *file, void *fh,
- struct v4l2_dv_timings *timings);
- int (*vidioc_query_dv_timings) (struct file *file, void *fh,
- struct v4l2_dv_timings *timings);
- int (*vidioc_enum_dv_timings) (struct file *file, void *fh,
- struct v4l2_enum_dv_timings *timings);
- int (*vidioc_dv_timings_cap) (struct file *file, void *fh,
- struct v4l2_dv_timings_cap *cap);
- int (*vidioc_g_edid) (struct file *file, void *fh, struct v4l2_edid *edid);
- int (*vidioc_s_edid) (struct file *file, void *fh, struct v4l2_edid *edid);
-
- int (*vidioc_subscribe_event) (struct v4l2_fh *fh,
- const struct v4l2_event_subscription *sub);
+ int (*vidioc_s_dv_timings)(struct file *file, void *fh,
+ struct v4l2_dv_timings *timings);
+ int (*vidioc_g_dv_timings)(struct file *file, void *fh,
+ struct v4l2_dv_timings *timings);
+ int (*vidioc_query_dv_timings)(struct file *file, void *fh,
+ struct v4l2_dv_timings *timings);
+ int (*vidioc_enum_dv_timings)(struct file *file, void *fh,
+ struct v4l2_enum_dv_timings *timings);
+ int (*vidioc_dv_timings_cap)(struct file *file, void *fh,
+ struct v4l2_dv_timings_cap *cap);
+ int (*vidioc_g_edid)(struct file *file, void *fh,
+ struct v4l2_edid *edid);
+ int (*vidioc_s_edid)(struct file *file, void *fh,
+ struct v4l2_edid *edid);
+
+ int (*vidioc_subscribe_event)(struct v4l2_fh *fh,
+ const struct v4l2_event_subscription *sub);
int (*vidioc_unsubscribe_event)(struct v4l2_fh *fh,
const struct v4l2_event_subscription *sub);
/* For other private ioctls */
- long (*vidioc_default) (struct file *file, void *fh,
- bool valid_prio, unsigned int cmd, void *arg);
+ long (*vidioc_default)(struct file *file, void *fh,
+ bool valid_prio, unsigned int cmd, void *arg);
};
@@ -573,38 +586,123 @@ struct v4l2_ioctl_ops {
#define V4L2_DEV_DEBUG_POLL 0x10
/* Video standard functions */
-extern const char *v4l2_norm_to_name(v4l2_std_id id);
-extern void v4l2_video_std_frame_period(int id, struct v4l2_fract *frameperiod);
-extern int v4l2_video_std_construct(struct v4l2_standard *vs,
+
+/**
+ * v4l2_norm_to_name - Ancillary routine to analog TV standard name from its ID.
+ *
+ * @id: analog TV standard ID.
+ *
+ * Return: returns a string with the name of the analog TV standard.
+ * If the standard is not found or if @id points to multiple standard,
+ * it returns "Unknown".
+ */
+const char *v4l2_norm_to_name(v4l2_std_id id);
+
+/**
+ * v4l2_video_std_frame_period - Ancillary routine that fills a
+ * struct &v4l2_fract pointer with the default framerate fraction.
+ *
+ * @id: analog TV sdandard ID.
+ * @frameperiod: struct &v4l2_fract pointer to be filled
+ *
+ */
+void v4l2_video_std_frame_period(int id, struct v4l2_fract *frameperiod);
+
+/**
+ * v4l2_video_std_construct - Ancillary routine that fills in the fields of
+ * a &v4l2_standard structure according to the @id parameter.
+ *
+ * @vs: struct &v4l2_standard pointer to be filled
+ * @id: analog TV sdandard ID.
+ * @name: name of the standard to be used
+ *
+ * .. note::
+ *
+ * This ancillary routine is obsolete. Shouldn't be used on newer drivers.
+ */
+int v4l2_video_std_construct(struct v4l2_standard *vs,
int id, const char *name);
-/* Prints the ioctl in a human-readable format. If prefix != NULL,
- then do printk(KERN_DEBUG "%s: ", prefix) first. */
-extern void v4l_printk_ioctl(const char *prefix, unsigned int cmd);
-/* Internal use only: get the mutex (if any) that we need to lock for the
- given command. */
+/**
+ * v4l_printk_ioctl - Ancillary routine that prints the ioctl in a
+ * human-readable format.
+ *
+ * @prefix: prefix to be added at the ioctl prints.
+ * @cmd: ioctl name
+ *
+ * .. note::
+ *
+ * If prefix != %NULL, then it will issue a
+ * ``printk(KERN_DEBUG "%s: ", prefix)`` first.
+ */
+void v4l_printk_ioctl(const char *prefix, unsigned int cmd);
+
struct video_device;
-extern struct mutex *v4l2_ioctl_get_lock(struct video_device *vdev, unsigned cmd);
+
+
+/**
+ * v4l2_ioctl_get_lock - get the mutex (if any) that it is need to lock for
+ * a given command.
+ *
+ * @vdev: Pointer to struct &video_device.
+ * @cmd: Ioctl name.
+ *
+ * .. note:: Internal use only. Should not be used outside V4L2 core.
+ */
+struct mutex *v4l2_ioctl_get_lock(struct video_device *vdev, unsigned int cmd);
/* names for fancy debug output */
extern const char *v4l2_field_names[];
extern const char *v4l2_type_names[];
#ifdef CONFIG_COMPAT
-/* 32 Bits compatibility layer for 64 bits processors */
-extern long v4l2_compat_ioctl32(struct file *file, unsigned int cmd,
- unsigned long arg);
+/**
+ * v4l2_compat_ioctl32 -32 Bits compatibility layer for 64 bits processors
+ *
+ * @file: Pointer to struct &file.
+ * @cmd: Ioctl name.
+ * @arg: Ioctl argument.
+ */
+long int v4l2_compat_ioctl32(struct file *file, unsigned int cmd,
+ unsigned long arg);
#endif
-typedef long (*v4l2_kioctl)(struct file *file,
- unsigned int cmd, void *arg);
+/**
+ * typedef v4l2_kioctl - Typedef used to pass an ioctl handler.
+ *
+ * @file: Pointer to struct &file.
+ * @cmd: Ioctl name.
+ * @arg: Ioctl argument.
+ */
+typedef long (*v4l2_kioctl)(struct file *file, unsigned int cmd, void *arg);
-/* Include support for obsoleted stuff */
-extern long video_usercopy(struct file *file, unsigned int cmd,
- unsigned long arg, v4l2_kioctl func);
+/**
+ * video_usercopy - copies data from/to userspace memory when an ioctl is
+ * issued.
+ *
+ * @file: Pointer to struct &file.
+ * @cmd: Ioctl name.
+ * @arg: Ioctl argument.
+ * @func: function that will handle the ioctl
+ *
+ * .. note::
+ *
+ * This routine should be used only inside the V4L2 core.
+ */
+long int video_usercopy(struct file *file, unsigned int cmd,
+ unsigned long int arg, v4l2_kioctl func);
-/* Standard handlers for V4L ioctl's */
-extern long video_ioctl2(struct file *file,
- unsigned int cmd, unsigned long arg);
+/**
+ * video_ioctl2 - Handles a V4L2 ioctl.
+ *
+ * @file: Pointer to struct &file.
+ * @cmd: Ioctl name.
+ * @arg: Ioctl argument.
+ *
+ * Method used to hancle an ioctl. Should be used to fill the
+ * &v4l2_ioctl_ops.unlocked_ioctl on all V4L2 drivers.
+ */
+long int video_ioctl2(struct file *file,
+ unsigned int cmd, unsigned long int arg);
#endif /* _V4L2_IOCTL_H */
diff --git a/include/media/v4l2-mc.h b/include/media/v4l2-mc.h
index 28c3f9d9c209..2634d9dc9916 100644
--- a/include/media/v4l2-mc.h
+++ b/include/media/v4l2-mc.h
@@ -53,7 +53,7 @@ enum tuner_pad_index {
};
/**
- * enum if_vid_dec_index - video IF-PLL pad index for
+ * enum if_vid_dec_pad_index - video IF-PLL pad index for
* MEDIA_ENT_F_IF_VID_DECODER
*
* @IF_VID_DEC_PAD_IF_INPUT: video Intermediate Frequency (IF) sink pad
@@ -68,7 +68,7 @@ enum if_vid_dec_pad_index {
};
/**
- * enum if_aud_dec_index - audio/sound IF-PLL pad index for
+ * enum if_aud_dec_pad_index - audio/sound IF-PLL pad index for
* MEDIA_ENT_F_IF_AUD_DECODER
*
* @IF_AUD_DEC_PAD_IF_INPUT: audio Intermediate Frequency (IF) sink pad
diff --git a/include/media/v4l2-mem2mem.h b/include/media/v4l2-mem2mem.h
index 5a9597dd1ee0..1b355344c804 100644
--- a/include/media/v4l2-mem2mem.h
+++ b/include/media/v4l2-mem2mem.h
@@ -41,9 +41,9 @@
* This function does not have to (and will usually not) wait
* until the device enters a state when it can be stopped.
* @lock: optional. Define a driver's own lock callback, instead of using
- * m2m_ctx->q_lock.
+ * &v4l2_m2m_ctx->q_lock.
* @unlock: optional. Define a driver's own unlock callback, instead of
- * using m2m_ctx->q_lock.
+ * using &v4l2_m2m_ctx->q_lock.
*/
struct v4l2_m2m_ops {
void (*device_run)(void *priv);
@@ -55,29 +55,51 @@ struct v4l2_m2m_ops {
struct v4l2_m2m_dev;
+/**
+ * struct v4l2_m2m_queue_ctx - represents a queue for buffers ready to be
+ * processed
+ *
+ * @q: pointer to struct &vb2_queue
+ * @rdy_queue: List of V4L2 mem-to-mem queues
+ * @rdy_spinlock: spin lock to protect the struct usage
+ * @num_rdy: number of buffers ready to be processed
+ * @buffered: is the queue buffered?
+ *
+ * Queue for buffers ready to be processed as soon as this
+ * instance receives access to the device.
+ */
+
struct v4l2_m2m_queue_ctx {
-/* private: internal use only */
struct vb2_queue q;
- /* Queue for buffers ready to be processed as soon as this
- * instance receives access to the device */
struct list_head rdy_queue;
spinlock_t rdy_spinlock;
u8 num_rdy;
bool buffered;
};
+/**
+ * struct v4l2_m2m_ctx - Memory to memory context structure
+ *
+ * @q_lock: struct &mutex lock
+ * @m2m_dev: opaque pointer to the internal data to handle M2M context
+ * @cap_q_ctx: Capture (output to memory) queue context
+ * @out_q_ctx: Output (input from memory) queue context
+ * @queue: List of memory to memory contexts
+ * @job_flags: Job queue flags, used internally by v4l2-mem2mem.c:
+ * %TRANS_QUEUED, %TRANS_RUNNING and %TRANS_ABORT.
+ * @finished: Wait queue used to signalize when a job queue finished.
+ * @priv: Instance private data
+ */
struct v4l2_m2m_ctx {
/* optional cap/out vb2 queues lock */
struct mutex *q_lock;
-/* private: internal use only */
+ /* internal use only */
struct v4l2_m2m_dev *m2m_dev;
- /* Capture (output to memory) queue context */
struct v4l2_m2m_queue_ctx cap_q_ctx;
- /* Output (input from memory) queue context */
struct v4l2_m2m_queue_ctx out_q_ctx;
/* For device job queue */
@@ -85,22 +107,75 @@ struct v4l2_m2m_ctx {
unsigned long job_flags;
wait_queue_head_t finished;
- /* Instance private data */
void *priv;
};
+/**
+ * struct v4l2_m2m_buffer - Memory to memory buffer
+ *
+ * @vb: pointer to struct &vb2_v4l2_buffer
+ * @list: list of m2m buffers
+ */
struct v4l2_m2m_buffer {
struct vb2_v4l2_buffer vb;
struct list_head list;
};
+/**
+ * v4l2_m2m_get_curr_priv() - return driver private data for the currently
+ * running instance or NULL if no instance is running
+ *
+ * @m2m_dev: opaque pointer to the internal data to handle M2M context
+ */
void *v4l2_m2m_get_curr_priv(struct v4l2_m2m_dev *m2m_dev);
+/**
+ * v4l2_m2m_get_vq() - return vb2_queue for the given type
+ *
+ * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
+ * @type: type of the V4L2 buffer, as defined by enum &v4l2_buf_type
+ */
struct vb2_queue *v4l2_m2m_get_vq(struct v4l2_m2m_ctx *m2m_ctx,
enum v4l2_buf_type type);
+/**
+ * v4l2_m2m_try_schedule() - check whether an instance is ready to be added to
+ * the pending job queue and add it if so.
+ *
+ * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
+ *
+ * There are three basic requirements an instance has to meet to be able to run:
+ * 1) at least one source buffer has to be queued,
+ * 2) at least one destination buffer has to be queued,
+ * 3) streaming has to be on.
+ *
+ * If a queue is buffered (for example a decoder hardware ringbuffer that has
+ * to be drained before doing streamoff), allow scheduling without v4l2 buffers
+ * on that queue.
+ *
+ * There may also be additional, custom requirements. In such case the driver
+ * should supply a custom callback (job_ready in v4l2_m2m_ops) that should
+ * return 1 if the instance is ready.
+ * An example of the above could be an instance that requires more than one
+ * src/dst buffer per transaction.
+ */
void v4l2_m2m_try_schedule(struct v4l2_m2m_ctx *m2m_ctx);
+/**
+ * v4l2_m2m_job_finish() - inform the framework that a job has been finished
+ * and have it clean up
+ *
+ * @m2m_dev: opaque pointer to the internal data to handle M2M context
+ * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
+ *
+ * Called by a driver to yield back the device after it has finished with it.
+ * Should be called as soon as possible after reaching a state which allows
+ * other instances to take control of the device.
+ *
+ * This function has to be called only after &v4l2_m2m_ops->device_run
+ * callback has been called on the driver. To prevent recursion, it should
+ * not be called directly from the &v4l2_m2m_ops->device_run callback though.
+ */
void v4l2_m2m_job_finish(struct v4l2_m2m_dev *m2m_dev,
struct v4l2_m2m_ctx *m2m_ctx);
@@ -110,38 +185,165 @@ v4l2_m2m_buf_done(struct vb2_v4l2_buffer *buf, enum vb2_buffer_state state)
vb2_buffer_done(&buf->vb2_buf, state);
}
+/**
+ * v4l2_m2m_reqbufs() - multi-queue-aware REQBUFS multiplexer
+ *
+ * @file: pointer to struct &file
+ * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
+ * @reqbufs: pointer to struct &v4l2_requestbuffers
+ */
int v4l2_m2m_reqbufs(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
struct v4l2_requestbuffers *reqbufs);
+/**
+ * v4l2_m2m_querybuf() - multi-queue-aware QUERYBUF multiplexer
+ *
+ * @file: pointer to struct &file
+ * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
+ * @buf: pointer to struct &v4l2_buffer
+ *
+ * See v4l2_m2m_mmap() documentation for details.
+ */
int v4l2_m2m_querybuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
struct v4l2_buffer *buf);
+/**
+ * v4l2_m2m_qbuf() - enqueue a source or destination buffer, depending on
+ * the type
+ *
+ * @file: pointer to struct &file
+ * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
+ * @buf: pointer to struct &v4l2_buffer
+ */
int v4l2_m2m_qbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
struct v4l2_buffer *buf);
+
+/**
+ * v4l2_m2m_dqbuf() - dequeue a source or destination buffer, depending on
+ * the type
+ *
+ * @file: pointer to struct &file
+ * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
+ * @buf: pointer to struct &v4l2_buffer
+ */
int v4l2_m2m_dqbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
struct v4l2_buffer *buf);
+
+/**
+ * v4l2_m2m_prepare_buf() - prepare a source or destination buffer, depending on
+ * the type
+ *
+ * @file: pointer to struct &file
+ * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
+ * @buf: pointer to struct &v4l2_buffer
+ */
int v4l2_m2m_prepare_buf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
struct v4l2_buffer *buf);
+
+/**
+ * v4l2_m2m_create_bufs() - create a source or destination buffer, depending
+ * on the type
+ *
+ * @file: pointer to struct &file
+ * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
+ * @create: pointer to struct &v4l2_create_buffers
+ */
int v4l2_m2m_create_bufs(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
struct v4l2_create_buffers *create);
+/**
+ * v4l2_m2m_expbuf() - export a source or destination buffer, depending on
+ * the type
+ *
+ * @file: pointer to struct &file
+ * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
+ * @eb: pointer to struct &v4l2_exportbuffer
+ */
int v4l2_m2m_expbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
struct v4l2_exportbuffer *eb);
+/**
+ * v4l2_m2m_streamon() - turn on streaming for a video queue
+ *
+ * @file: pointer to struct &file
+ * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
+ * @type: type of the V4L2 buffer, as defined by enum &v4l2_buf_type
+ */
int v4l2_m2m_streamon(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
enum v4l2_buf_type type);
+
+/**
+ * v4l2_m2m_streamoff() - turn off streaming for a video queue
+ *
+ * @file: pointer to struct &file
+ * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
+ * @type: type of the V4L2 buffer, as defined by enum &v4l2_buf_type
+ */
int v4l2_m2m_streamoff(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
enum v4l2_buf_type type);
+/**
+ * v4l2_m2m_poll() - poll replacement, for destination buffers only
+ *
+ * @file: pointer to struct &file
+ * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
+ * @wait: pointer to struct &poll_table_struct
+ *
+ * Call from the driver's poll() function. Will poll both queues. If a buffer
+ * is available to dequeue (with dqbuf) from the source queue, this will
+ * indicate that a non-blocking write can be performed, while read will be
+ * returned in case of the destination queue.
+ */
unsigned int v4l2_m2m_poll(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
struct poll_table_struct *wait);
+/**
+ * v4l2_m2m_mmap() - source and destination queues-aware mmap multiplexer
+ *
+ * @file: pointer to struct &file
+ * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
+ * @vma: pointer to struct &vm_area_struct
+ *
+ * Call from driver's mmap() function. Will handle mmap() for both queues
+ * seamlessly for videobuffer, which will receive normal per-queue offsets and
+ * proper videobuf queue pointers. The differentiation is made outside videobuf
+ * by adding a predefined offset to buffers from one of the queues and
+ * subtracting it before passing it back to videobuf. Only drivers (and
+ * thus applications) receive modified offsets.
+ */
int v4l2_m2m_mmap(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
struct vm_area_struct *vma);
+/**
+ * v4l2_m2m_init() - initialize per-driver m2m data
+ *
+ * @m2m_ops: pointer to struct v4l2_m2m_ops
+ *
+ * Usually called from driver's ``probe()`` function.
+ *
+ * Return: returns an opaque pointer to the internal data to handle M2M context
+ */
struct v4l2_m2m_dev *v4l2_m2m_init(const struct v4l2_m2m_ops *m2m_ops);
+
+/**
+ * v4l2_m2m_release() - cleans up and frees a m2m_dev structure
+ *
+ * @m2m_dev: opaque pointer to the internal data to handle M2M context
+ *
+ * Usually called from driver's ``remove()`` function.
+ */
void v4l2_m2m_release(struct v4l2_m2m_dev *m2m_dev);
+/**
+ * v4l2_m2m_ctx_init() - allocate and initialize a m2m context
+ *
+ * @m2m_dev: opaque pointer to the internal data to handle M2M context
+ * @drv_priv: driver's instance private data
+ * @queue_init: a callback for queue type-specific initialization function
+ * to be used for initializing videobuf_queues
+ *
+ * Usually called from driver's ``open()`` function.
+ */
struct v4l2_m2m_ctx *v4l2_m2m_ctx_init(struct v4l2_m2m_dev *m2m_dev,
void *drv_priv,
int (*queue_init)(void *priv, struct vb2_queue *src_vq, struct vb2_queue *dst_vq));
@@ -158,8 +360,23 @@ static inline void v4l2_m2m_set_dst_buffered(struct v4l2_m2m_ctx *m2m_ctx,
m2m_ctx->cap_q_ctx.buffered = buffered;
}
+/**
+ * v4l2_m2m_ctx_release() - release m2m context
+ *
+ * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
+ *
+ * Usually called from driver's release() function.
+ */
void v4l2_m2m_ctx_release(struct v4l2_m2m_ctx *m2m_ctx);
+/**
+ * v4l2_m2m_buf_queue() - add a buffer to the proper ready buffers list.
+ *
+ * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
+ * @vbuf: pointer to struct &vb2_v4l2_buffer
+ *
+ * Call from videobuf_queue_ops->ops->buf_queue, videobuf_queue_ops callback.
+ */
void v4l2_m2m_buf_queue(struct v4l2_m2m_ctx *m2m_ctx,
struct vb2_v4l2_buffer *vbuf);
@@ -167,7 +384,7 @@ void v4l2_m2m_buf_queue(struct v4l2_m2m_ctx *m2m_ctx,
* v4l2_m2m_num_src_bufs_ready() - return the number of source buffers ready for
* use
*
- * @m2m_ctx: pointer to struct v4l2_m2m_ctx
+ * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
*/
static inline
unsigned int v4l2_m2m_num_src_bufs_ready(struct v4l2_m2m_ctx *m2m_ctx)
@@ -176,10 +393,10 @@ unsigned int v4l2_m2m_num_src_bufs_ready(struct v4l2_m2m_ctx *m2m_ctx)
}
/**
- * v4l2_m2m_num_src_bufs_ready() - return the number of destination buffers
+ * v4l2_m2m_num_dst_bufs_ready() - return the number of destination buffers
* ready for use
*
- * @m2m_ctx: pointer to struct v4l2_m2m_ctx
+ * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
*/
static inline
unsigned int v4l2_m2m_num_dst_bufs_ready(struct v4l2_m2m_ctx *m2m_ctx)
@@ -187,13 +404,18 @@ unsigned int v4l2_m2m_num_dst_bufs_ready(struct v4l2_m2m_ctx *m2m_ctx)
return m2m_ctx->cap_q_ctx.num_rdy;
}
+/**
+ * v4l2_m2m_next_buf() - return next buffer from the list of ready buffers
+ *
+ * @q_ctx: pointer to struct @v4l2_m2m_queue_ctx
+ */
void *v4l2_m2m_next_buf(struct v4l2_m2m_queue_ctx *q_ctx);
/**
* v4l2_m2m_next_src_buf() - return next source buffer from the list of ready
* buffers
*
- * @m2m_ctx: pointer to struct v4l2_m2m_ctx
+ * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
*/
static inline void *v4l2_m2m_next_src_buf(struct v4l2_m2m_ctx *m2m_ctx)
{
@@ -204,7 +426,7 @@ static inline void *v4l2_m2m_next_src_buf(struct v4l2_m2m_ctx *m2m_ctx)
* v4l2_m2m_next_dst_buf() - return next destination buffer from the list of
* ready buffers
*
- * @m2m_ctx: pointer to struct v4l2_m2m_ctx
+ * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
*/
static inline void *v4l2_m2m_next_dst_buf(struct v4l2_m2m_ctx *m2m_ctx)
{
@@ -214,7 +436,7 @@ static inline void *v4l2_m2m_next_dst_buf(struct v4l2_m2m_ctx *m2m_ctx)
/**
* v4l2_m2m_get_src_vq() - return vb2_queue for source buffers
*
- * @m2m_ctx: pointer to struct v4l2_m2m_ctx
+ * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
*/
static inline
struct vb2_queue *v4l2_m2m_get_src_vq(struct v4l2_m2m_ctx *m2m_ctx)
@@ -225,7 +447,7 @@ struct vb2_queue *v4l2_m2m_get_src_vq(struct v4l2_m2m_ctx *m2m_ctx)
/**
* v4l2_m2m_get_dst_vq() - return vb2_queue for destination buffers
*
- * @m2m_ctx: pointer to struct v4l2_m2m_ctx
+ * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
*/
static inline
struct vb2_queue *v4l2_m2m_get_dst_vq(struct v4l2_m2m_ctx *m2m_ctx)
@@ -233,13 +455,19 @@ struct vb2_queue *v4l2_m2m_get_dst_vq(struct v4l2_m2m_ctx *m2m_ctx)
return &m2m_ctx->cap_q_ctx.q;
}
+/**
+ * v4l2_m2m_buf_remove() - take off a buffer from the list of ready buffers and
+ * return it
+ *
+ * @q_ctx: pointer to struct @v4l2_m2m_queue_ctx
+ */
void *v4l2_m2m_buf_remove(struct v4l2_m2m_queue_ctx *q_ctx);
/**
* v4l2_m2m_src_buf_remove() - take off a source buffer from the list of ready
* buffers and return it
*
- * @m2m_ctx: pointer to struct v4l2_m2m_ctx
+ * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
*/
static inline void *v4l2_m2m_src_buf_remove(struct v4l2_m2m_ctx *m2m_ctx)
{
@@ -250,7 +478,7 @@ static inline void *v4l2_m2m_src_buf_remove(struct v4l2_m2m_ctx *m2m_ctx)
* v4l2_m2m_dst_buf_remove() - take off a destination buffer from the list of
* ready buffers and return it
*
- * @m2m_ctx: pointer to struct v4l2_m2m_ctx
+ * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
*/
static inline void *v4l2_m2m_dst_buf_remove(struct v4l2_m2m_ctx *m2m_ctx)
{
diff --git a/include/media/v4l2-subdev.h b/include/media/v4l2-subdev.h
index 2a2240c99b30..cf778c5dca18 100644
--- a/include/media/v4l2-subdev.h
+++ b/include/media/v4l2-subdev.h
@@ -184,8 +184,6 @@ struct v4l2_subdev_io_pin_config {
* for it to be warned when the value of a control changes.
*
* @unsubscribe_event: remove event subscription from the control framework.
- *
- * @registered_async: the subdevice has been registered async.
*/
struct v4l2_subdev_core_ops {
int (*log_status)(struct v4l2_subdev *sd);
@@ -211,11 +209,11 @@ struct v4l2_subdev_core_ops {
struct v4l2_event_subscription *sub);
int (*unsubscribe_event)(struct v4l2_subdev *sd, struct v4l2_fh *fh,
struct v4l2_event_subscription *sub);
- int (*registered_async)(struct v4l2_subdev *sd);
};
/**
- * struct s_radio - Callbacks used when v4l device was opened in radio mode.
+ * struct v4l2_subdev_tuner_ops - Callbacks used when v4l device was opened
+ * in radio mode.
*
* @s_radio: callback for %VIDIOC_S_RADIO ioctl handler code.
*
@@ -229,7 +227,7 @@ struct v4l2_subdev_core_ops {
*
* @g_tuner: callback for %VIDIOC_G_TUNER ioctl handler code.
*
- * @s_tuner: callback for %VIDIOC_S_TUNER ioctl handler code. &vt->type must be
+ * @s_tuner: callback for %VIDIOC_S_TUNER ioctl handler code. @vt->type must be
* filled in. Normally done by video_ioctl2 or the
* bridge driver.
*
@@ -358,11 +356,7 @@ struct v4l2_mbus_frame_desc {
* @s_stream: used to notify the driver that a video stream will start or has
* stopped.
*
- * @cropcap: callback for %VIDIOC_CROPCAP ioctl handler code.
- *
- * @g_crop: callback for %VIDIOC_G_CROP ioctl handler code.
- *
- * @s_crop: callback for %VIDIOC_S_CROP ioctl handler code.
+ * @g_pixelaspect: callback to return the pixelaspect ratio.
*
* @g_parm: callback for %VIDIOC_G_PARM ioctl handler code.
*
@@ -402,9 +396,7 @@ struct v4l2_subdev_video_ops {
int (*g_tvnorms_output)(struct v4l2_subdev *sd, v4l2_std_id *std);
int (*g_input_status)(struct v4l2_subdev *sd, u32 *status);
int (*s_stream)(struct v4l2_subdev *sd, int enable);
- int (*cropcap)(struct v4l2_subdev *sd, struct v4l2_cropcap *cc);
- int (*g_crop)(struct v4l2_subdev *sd, struct v4l2_crop *crop);
- int (*s_crop)(struct v4l2_subdev *sd, const struct v4l2_crop *crop);
+ int (*g_pixelaspect)(struct v4l2_subdev *sd, struct v4l2_fract *aspect);
int (*g_parm)(struct v4l2_subdev *sd, struct v4l2_streamparm *param);
int (*s_parm)(struct v4l2_subdev *sd, struct v4l2_streamparm *param);
int (*g_frame_interval)(struct v4l2_subdev *sd,
@@ -430,7 +422,7 @@ struct v4l2_subdev_video_ops {
* in video mode via the vbi device node.
*
* @decode_vbi_line: video decoders that support sliced VBI need to implement
- * this ioctl. Field p of the &struct v4l2_sliced_vbi_line is set to the
+ * this ioctl. Field p of the &struct v4l2_decode_vbi_line is set to the
* start of the VBI data that was generated by the decoder. The driver
* then parses the sliced VBI data and sets the other fields in the
* struct accordingly. The pointer p is updated to point to the start of
@@ -773,7 +765,7 @@ struct v4l2_subdev_platform_data {
* @entity: pointer to &struct media_entity
* @list: List of sub-devices
* @owner: The owner is the same as the driver's &struct device owner.
- * @owner_v4l2_dev: true if the &sd->owner matches the owner of &v4l2_dev->dev
+ * @owner_v4l2_dev: true if the &sd->owner matches the owner of @v4l2_dev->dev
* ownner. Initialized by v4l2_device_register_subdev().
* @flags: subdev flags. Can be:
* %V4L2_SUBDEV_FL_IS_I2C - Set this flag if this subdev is a i2c device;
@@ -783,9 +775,9 @@ struct v4l2_subdev_platform_data {
* %V4L2_SUBDEV_FL_HAS_EVENTS - Set this flag if this subdev generates
* events.
*
- * @v4l2_dev: pointer to &struct v4l2_device
- * @ops: pointer to &struct v4l2_subdev_ops
- * @internal_ops: pointer to &struct v4l2_subdev_internal_ops.
+ * @v4l2_dev: pointer to struct &v4l2_device
+ * @ops: pointer to struct &v4l2_subdev_ops
+ * @internal_ops: pointer to struct &v4l2_subdev_internal_ops.
* Never call these internal ops from within a driver!
* @ctrl_handler: The control handler of this subdev. May be NULL.
* @name: Name of the sub-device. Please notice that the name must be unique.
@@ -896,7 +888,7 @@ static inline void *v4l2_get_subdevdata(const struct v4l2_subdev *sd)
}
/**
- * v4l2_set_subdevdata - Sets V4L2 dev private host data
+ * v4l2_set_subdev_hostdata - Sets V4L2 dev private host data
*
* @sd: pointer to &struct v4l2_subdev
* @p: pointer to the private data to be stored.
@@ -907,7 +899,7 @@ static inline void v4l2_set_subdev_hostdata(struct v4l2_subdev *sd, void *p)
}
/**
- * v4l2_get_subdevdata - Gets V4L2 dev private data
+ * v4l2_get_subdev_hostdata - Gets V4L2 dev private data
*
* @sd: pointer to &struct v4l2_subdev
*
diff --git a/include/media/videobuf2-core.h b/include/media/videobuf2-core.h
index a4a9a55a0c42..ac5898a55fd9 100644
--- a/include/media/videobuf2-core.h
+++ b/include/media/videobuf2-core.h
@@ -20,6 +20,20 @@
#define VB2_MAX_FRAME (32)
#define VB2_MAX_PLANES (8)
+/**
+ * enum vb2_memory - type of memory model used to make the buffers visible
+ * on userspace.
+ *
+ * @VB2_MEMORY_UNKNOWN: Buffer status is unknown or it is not used yet on
+ * userspace.
+ * @VB2_MEMORY_MMAP: The buffers are allocated by the Kernel and it is
+ * memory mapped via mmap() ioctl. This model is
+ * also used when the user is using the buffers via
+ * read() or write() system calls.
+ * @VB2_MEMORY_USERPTR: The buffers was allocated in userspace and it is
+ * memory mapped via mmap() ioctl.
+ * @VB2_MEMORY_DMABUF: The buffers are passed to userspace via DMA buffer.
+ */
enum vb2_memory {
VB2_MEMORY_UNKNOWN = 0,
VB2_MEMORY_MMAP = 1,
@@ -33,15 +47,15 @@ struct vb2_threadio_data;
/**
* struct vb2_mem_ops - memory handling/memory allocator operations
* @alloc: allocate video memory and, optionally, allocator private data,
- * return NULL on failure or a pointer to allocator private,
+ * return ERR_PTR() on failure or a pointer to allocator private,
* per-buffer data on success; the returned private structure
- * will then be passed as buf_priv argument to other ops in this
+ * will then be passed as @buf_priv argument to other ops in this
* structure. Additional gfp_flags to use when allocating the
* are also passed to this operation. These flags are from the
* gfp_flags field of vb2_queue.
* @put: inform the allocator that the buffer will no longer be used;
* usually will result in the allocator freeing the buffer (if
- * no other users of this buffer are present); the buf_priv
+ * no other users of this buffer are present); the @buf_priv
* argument is the allocator private per-buffer structure
* previously returned from the alloc callback.
* @get_dmabuf: acquire userspace memory for a hardware operation; used for
@@ -50,18 +64,18 @@ struct vb2_threadio_data;
* USERPTR memory types; vaddr is the address passed to the
* videobuf layer when queuing a video buffer of USERPTR type;
* should return an allocator private per-buffer structure
- * associated with the buffer on success, NULL on failure;
- * the returned private structure will then be passed as buf_priv
+ * associated with the buffer on success, ERR_PTR() on failure;
+ * the returned private structure will then be passed as @buf_priv
* argument to other ops in this structure.
* @put_userptr: inform the allocator that a USERPTR buffer will no longer
* be used.
* @attach_dmabuf: attach a shared struct dma_buf for a hardware operation;
* used for DMABUF memory types; dev is the alloc device
- * dbuf is the shared dma_buf; returns NULL on failure;
+ * dbuf is the shared dma_buf; returns ERR_PTR() on failure;
* allocator private per-buffer structure on success;
* this needs to be used for further accesses to the buffer.
* @detach_dmabuf: inform the exporter of the buffer that the current DMABUF
- * buffer is no longer used; the buf_priv argument is the
+ * buffer is no longer used; the @buf_priv argument is the
* allocator private per-buffer structure previously returned
* from the attach_dmabuf callback.
* @map_dmabuf: request for access to the dmabuf from allocator; the allocator
@@ -95,11 +109,13 @@ struct vb2_threadio_data;
*
* #) Required ops for read/write access types: alloc, put, num_users, vaddr.
*
- * #) Required ops for DMABUF types: attach_dmabuf, detach_dmabuf, map_dmabuf, unmap_dmabuf.
+ * #) Required ops for DMABUF types: attach_dmabuf, detach_dmabuf,
+ * map_dmabuf, unmap_dmabuf.
*/
struct vb2_mem_ops {
void *(*alloc)(struct device *dev, unsigned long attrs,
- unsigned long size, enum dma_data_direction dma_dir,
+ unsigned long size,
+ enum dma_data_direction dma_dir,
gfp_t gfp_flags);
void (*put)(void *buf_priv);
struct dma_buf *(*get_dmabuf)(void *buf_priv, unsigned long flags);
@@ -112,7 +128,8 @@ struct vb2_mem_ops {
void (*prepare)(void *buf_priv);
void (*finish)(void *buf_priv);
- void *(*attach_dmabuf)(struct device *dev, struct dma_buf *dbuf,
+ void *(*attach_dmabuf)(struct device *dev,
+ struct dma_buf *dbuf,
unsigned long size,
enum dma_data_direction dma_dir);
void (*detach_dmabuf)(void *buf_priv);
@@ -277,7 +294,7 @@ struct vb2_buffer {
/**
* struct vb2_ops - driver-specific callbacks
*
- * @queue_setup: called from %VIDIOC_REQBUFS and %VIDIOC_CREATE_BUFS
+ * @queue_setup: called from VIDIOC_REQBUFS() and VIDIOC_CREATE_BUFS()
* handlers before memory allocation. It can be called
* twice: if the original number of requested buffers
* could not be allocated, then it will be called a
@@ -288,11 +305,11 @@ struct vb2_buffer {
* buffer in \*num_planes, the size of each plane should be
* set in the sizes\[\] array and optional per-plane
* allocator specific device in the alloc_devs\[\] array.
- * When called from %VIDIOC_REQBUFS, \*num_planes == 0, the
- * driver has to use the currently configured format to
+ * When called from VIDIOC_REQBUFS,() \*num_planes == 0,
+ * the driver has to use the currently configured format to
* determine the plane sizes and \*num_buffers is the total
* number of buffers that are being allocated. When called
- * from %VIDIOC_CREATE_BUFS, \*num_planes != 0 and it
+ * from VIDIOC_CREATE_BUFS,() \*num_planes != 0 and it
* describes the requested number of planes and sizes\[\]
* contains the requested plane sizes. If either
* \*num_planes or the requested sizes are invalid callback
@@ -311,11 +328,11 @@ struct vb2_buffer {
* initialization failure (return != 0) will prevent
* queue setup from completing successfully; optional.
* @buf_prepare: called every time the buffer is queued from userspace
- * and from the %VIDIOC_PREPARE_BUF ioctl; drivers may
+ * and from the VIDIOC_PREPARE_BUF() ioctl; drivers may
* perform any initialization required before each
* hardware operation in this callback; drivers can
* access/modify the buffer here as it is still synced for
- * the CPU; drivers that support %VIDIOC_CREATE_BUFS must
+ * the CPU; drivers that support VIDIOC_CREATE_BUFS() must
* also validate the buffer size; if an error is returned,
* the buffer will not be queued in driver; optional.
* @buf_finish: called before every dequeue of the buffer back to
@@ -339,24 +356,25 @@ struct vb2_buffer {
* driver can return an error if hardware fails, in that
* case all buffers that have been already given by
* the @buf_queue callback are to be returned by the driver
- * by calling @vb2_buffer_done\(%VB2_BUF_STATE_QUEUED\).
+ * by calling vb2_buffer_done() with %VB2_BUF_STATE_QUEUED.
* If you need a minimum number of buffers before you can
* start streaming, then set @min_buffers_needed in the
* vb2_queue structure. If that is non-zero then
- * start_streaming won't be called until at least that
+ * @start_streaming won't be called until at least that
* many buffers have been queued up by userspace.
* @stop_streaming: called when 'streaming' state must be disabled; driver
* should stop any DMA transactions or wait until they
* finish and give back all buffers it got from &buf_queue
- * callback by calling @vb2_buffer_done\(\) with either
+ * callback by calling vb2_buffer_done() with either
* %VB2_BUF_STATE_DONE or %VB2_BUF_STATE_ERROR; may use
* vb2_wait_for_all_buffers() function
* @buf_queue: passes buffer vb to the driver; driver may start
* hardware operation on this buffer; driver should give
* the buffer back by calling vb2_buffer_done() function;
- * it is allways called after calling %VIDIOC_STREAMON ioctl;
- * might be called before start_streaming callback if user
- * pre-queued buffers before calling %VIDIOC_STREAMON.
+ * it is allways called after calling VIDIOC_STREAMON()
+ * ioctl; might be called before @start_streaming callback
+ * if user pre-queued buffers before calling
+ * VIDIOC_STREAMON().
*/
struct vb2_ops {
int (*queue_setup)(struct vb2_queue *q,
@@ -378,7 +396,7 @@ struct vb2_ops {
};
/**
- * struct vb2_ops - driver-specific callbacks
+ * struct vb2_buf_ops - driver-specific callbacks
*
* @verify_planes_array: Verify that a given user space structure contains
* enough planes for the buffer. This is called
@@ -404,7 +422,7 @@ struct vb2_buf_ops {
*
* @type: private buffer type whose content is defined by the vb2-core
* caller. For example, for V4L2, it should match
- * the V4L2_BUF_TYPE_* in include/uapi/linux/videodev2.h
+ * the types defined on enum &v4l2_buf_type
* @io_modes: supported io methods (see vb2_io_modes enum)
* @dev: device to use for the default allocation context if the driver
* doesn't fill in the @alloc_devs array.
@@ -439,12 +457,12 @@ struct vb2_buf_ops {
* Typically this is 0, but it may be e.g. GFP_DMA or __GFP_DMA32
* to force the buffer allocation to a specific memory zone.
* @min_buffers_needed: the minimum number of buffers needed before
- * start_streaming() can be called. Used when a DMA engine
+ * @start_streaming can be called. Used when a DMA engine
* cannot be started unless at least this number of buffers
* have been queued into the driver.
*/
/*
- * Private elements (won't appear at the DocBook):
+ * Private elements (won't appear at the uAPI book):
* @mmap_lock: private mutex used when buffers are allocated/freed/mmapped
* @memory: current memory type used
* @bufs: videobuf buffer structures
@@ -457,7 +475,7 @@ struct vb2_buf_ops {
* @done_wq: waitqueue for processes waiting for buffers ready to be dequeued
* @alloc_devs: memory type/allocator-specific per-plane device
* @streaming: current streaming state
- * @start_streaming_called: start_streaming() was called successfully and we
+ * @start_streaming_called: @start_streaming was called successfully and we
* started streaming.
* @error: a fatal error occurred on the queue
* @waiting_for_buffers: used in poll() to check if vb2 is still waiting for
@@ -536,36 +554,286 @@ struct vb2_queue {
#endif
};
+/**
+ * vb2_plane_vaddr() - Return a kernel virtual address of a given plane
+ * @vb: vb2_buffer to which the plane in question belongs to
+ * @plane_no: plane number for which the address is to be returned
+ *
+ * This function returns a kernel virtual address of a given plane if
+ * such a mapping exist, NULL otherwise.
+ */
void *vb2_plane_vaddr(struct vb2_buffer *vb, unsigned int plane_no);
+
+/**
+ * vb2_plane_cookie() - Return allocator specific cookie for the given plane
+ * @vb: vb2_buffer to which the plane in question belongs to
+ * @plane_no: plane number for which the cookie is to be returned
+ *
+ * This function returns an allocator specific cookie for a given plane if
+ * available, NULL otherwise. The allocator should provide some simple static
+ * inline function, which would convert this cookie to the allocator specific
+ * type that can be used directly by the driver to access the buffer. This can
+ * be for example physical address, pointer to scatter list or IOMMU mapping.
+ */
void *vb2_plane_cookie(struct vb2_buffer *vb, unsigned int plane_no);
+/**
+ * vb2_buffer_done() - inform videobuf that an operation on a buffer is finished
+ * @vb: vb2_buffer returned from the driver
+ * @state: either %VB2_BUF_STATE_DONE if the operation finished
+ * successfully, %VB2_BUF_STATE_ERROR if the operation finished
+ * with an error or %VB2_BUF_STATE_QUEUED if the driver wants to
+ * requeue buffers. If start_streaming fails then it should return
+ * buffers with state %VB2_BUF_STATE_QUEUED to put them back into
+ * the queue.
+ *
+ * This function should be called by the driver after a hardware operation on
+ * a buffer is finished and the buffer may be returned to userspace. The driver
+ * cannot use this buffer anymore until it is queued back to it by videobuf
+ * by the means of &vb2_ops->buf_queue callback. Only buffers previously queued
+ * to the driver by &vb2_ops->buf_queue can be passed to this function.
+ *
+ * While streaming a buffer can only be returned in state DONE or ERROR.
+ * The start_streaming op can also return them in case the DMA engine cannot
+ * be started for some reason. In that case the buffers should be returned with
+ * state QUEUED.
+ */
void vb2_buffer_done(struct vb2_buffer *vb, enum vb2_buffer_state state);
+
+/**
+ * vb2_discard_done() - discard all buffers marked as DONE
+ * @q: videobuf2 queue
+ *
+ * This function is intended to be used with suspend/resume operations. It
+ * discards all 'done' buffers as they would be too old to be requested after
+ * resume.
+ *
+ * Drivers must stop the hardware and synchronize with interrupt handlers and/or
+ * delayed works before calling this function to make sure no buffer will be
+ * touched by the driver and/or hardware.
+ */
void vb2_discard_done(struct vb2_queue *q);
+
+/**
+ * vb2_wait_for_all_buffers() - wait until all buffers are given back to vb2
+ * @q: videobuf2 queue
+ *
+ * This function will wait until all buffers that have been given to the driver
+ * by &vb2_ops->buf_queue are given back to vb2 with vb2_buffer_done(). It
+ * doesn't call wait_prepare()/wait_finish() pair. It is intended to be called
+ * with all locks taken, for example from &vb2_ops->stop_streaming callback.
+ */
int vb2_wait_for_all_buffers(struct vb2_queue *q);
+/**
+ * vb2_core_querybuf() - query video buffer information
+ * @q: videobuf queue
+ * @index: id number of the buffer
+ * @pb: buffer struct passed from userspace
+ *
+ * Should be called from vidioc_querybuf ioctl handler in driver.
+ * The passed buffer should have been verified.
+ * This function fills the relevant information for the userspace.
+ */
void vb2_core_querybuf(struct vb2_queue *q, unsigned int index, void *pb);
+
+/**
+ * vb2_core_reqbufs() - Initiate streaming
+ * @q: videobuf2 queue
+ * @memory: memory type
+ * @count: requested buffer count
+ *
+ * Should be called from vidioc_reqbufs ioctl handler of a driver.
+ *
+ * This function:
+ *
+ * #) verifies streaming parameters passed from the userspace,
+ * #) sets up the queue,
+ * #) negotiates number of buffers and planes per buffer with the driver
+ * to be used during streaming,
+ * #) allocates internal buffer structures (struct vb2_buffer), according to
+ * the agreed parameters,
+ * #) for MMAP memory type, allocates actual video memory, using the
+ * memory handling/allocation routines provided during queue initialization
+ *
+ * If req->count is 0, all the memory will be freed instead.
+ * If the queue has been allocated previously (by a previous vb2_reqbufs) call
+ * and the queue is not busy, memory will be reallocated.
+ *
+ * The return values from this function are intended to be directly returned
+ * from vidioc_reqbufs handler in driver.
+ */
int vb2_core_reqbufs(struct vb2_queue *q, enum vb2_memory memory,
unsigned int *count);
+
+/**
+ * vb2_core_create_bufs() - Allocate buffers and any required auxiliary structs
+ * @q: videobuf2 queue
+ * @memory: memory type
+ * @count: requested buffer count
+ * @requested_planes: number of planes requested
+ * @requested_sizes: array with the size of the planes
+ *
+ * Should be called from VIDIOC_CREATE_BUFS() ioctl handler of a driver.
+ * This function:
+ *
+ * #) verifies parameter sanity
+ * #) calls the .queue_setup() queue operation
+ * #) performs any necessary memory allocations
+ *
+ * Return: the return values from this function are intended to be directly
+ * returned from VIDIOC_CREATE_BUFS() handler in driver.
+ */
int vb2_core_create_bufs(struct vb2_queue *q, enum vb2_memory memory,
- unsigned int *count, unsigned requested_planes,
- const unsigned int requested_sizes[]);
+ unsigned int *count, unsigned int requested_planes,
+ const unsigned int requested_sizes[]);
+
+/**
+ * vb2_core_prepare_buf() - Pass ownership of a buffer from userspace
+ * to the kernel
+ * @q: videobuf2 queue
+ * @index: id number of the buffer
+ * @pb: buffer structure passed from userspace to vidioc_prepare_buf
+ * handler in driver
+ *
+ * Should be called from vidioc_prepare_buf ioctl handler of a driver.
+ * The passed buffer should have been verified.
+ * This function calls buf_prepare callback in the driver (if provided),
+ * in which driver-specific buffer initialization can be performed,
+ *
+ * The return values from this function are intended to be directly returned
+ * from vidioc_prepare_buf handler in driver.
+ */
int vb2_core_prepare_buf(struct vb2_queue *q, unsigned int index, void *pb);
+
+/**
+ * vb2_core_qbuf() - Queue a buffer from userspace
+ *
+ * @q: videobuf2 queue
+ * @index: id number of the buffer
+ * @pb: buffer structure passed from userspace to vidioc_qbuf handler
+ * in driver
+ *
+ * Should be called from vidioc_qbuf ioctl handler of a driver.
+ * The passed buffer should have been verified.
+ *
+ * This function:
+ *
+ * #) if necessary, calls buf_prepare callback in the driver (if provided), in
+ * which driver-specific buffer initialization can be performed,
+ * #) if streaming is on, queues the buffer in driver by the means of
+ * &vb2_ops->buf_queue callback for processing.
+ *
+ * The return values from this function are intended to be directly returned
+ * from vidioc_qbuf handler in driver.
+ */
int vb2_core_qbuf(struct vb2_queue *q, unsigned int index, void *pb);
+
+/**
+ * vb2_core_dqbuf() - Dequeue a buffer to the userspace
+ * @q: videobuf2 queue
+ * @pindex: pointer to the buffer index. May be NULL
+ * @pb: buffer structure passed from userspace to vidioc_dqbuf handler
+ * in driver
+ * @nonblocking: if true, this call will not sleep waiting for a buffer if no
+ * buffers ready for dequeuing are present. Normally the driver
+ * would be passing (file->f_flags & O_NONBLOCK) here
+ *
+ * Should be called from vidioc_dqbuf ioctl handler of a driver.
+ * The passed buffer should have been verified.
+ *
+ * This function:
+ *
+ * #) calls buf_finish callback in the driver (if provided), in which
+ * driver can perform any additional operations that may be required before
+ * returning the buffer to userspace, such as cache sync,
+ * #) the buffer struct members are filled with relevant information for
+ * the userspace.
+ *
+ * The return values from this function are intended to be directly returned
+ * from vidioc_dqbuf handler in driver.
+ */
int vb2_core_dqbuf(struct vb2_queue *q, unsigned int *pindex, void *pb,
bool nonblocking);
int vb2_core_streamon(struct vb2_queue *q, unsigned int type);
int vb2_core_streamoff(struct vb2_queue *q, unsigned int type);
+/**
+ * vb2_core_expbuf() - Export a buffer as a file descriptor
+ * @q: videobuf2 queue
+ * @fd: file descriptor associated with DMABUF (set by driver) *
+ * @type: buffer type
+ * @index: id number of the buffer
+ * @plane: index of the plane to be exported, 0 for single plane queues
+ * @flags: flags for newly created file, currently only O_CLOEXEC is
+ * supported, refer to manual of open syscall for more details
+ *
+ * The return values from this function are intended to be directly returned
+ * from vidioc_expbuf handler in driver.
+ */
int vb2_core_expbuf(struct vb2_queue *q, int *fd, unsigned int type,
unsigned int index, unsigned int plane, unsigned int flags);
+/**
+ * vb2_core_queue_init() - initialize a videobuf2 queue
+ * @q: videobuf2 queue; this structure should be allocated in driver
+ *
+ * The vb2_queue structure should be allocated by the driver. The driver is
+ * responsible of clearing it's content and setting initial values for some
+ * required entries before calling this function.
+ * q->ops, q->mem_ops, q->type and q->io_modes are mandatory. Please refer
+ * to the struct vb2_queue description in include/media/videobuf2-core.h
+ * for more information.
+ */
int vb2_core_queue_init(struct vb2_queue *q);
+
+/**
+ * vb2_core_queue_release() - stop streaming, release the queue and free memory
+ * @q: videobuf2 queue
+ *
+ * This function stops streaming and performs necessary clean ups, including
+ * freeing video buffer memory. The driver is responsible for freeing
+ * the vb2_queue structure itself.
+ */
void vb2_core_queue_release(struct vb2_queue *q);
+/**
+ * vb2_queue_error() - signal a fatal error on the queue
+ * @q: videobuf2 queue
+ *
+ * Flag that a fatal unrecoverable error has occurred and wake up all processes
+ * waiting on the queue. Polling will now set POLLERR and queuing and dequeuing
+ * buffers will return -EIO.
+ *
+ * The error flag will be cleared when cancelling the queue, either from
+ * vb2_streamoff or vb2_queue_release. Drivers should thus not call this
+ * function before starting the stream, otherwise the error flag will remain set
+ * until the queue is released when closing the device node.
+ */
void vb2_queue_error(struct vb2_queue *q);
+/**
+ * vb2_mmap() - map video buffers into application address space
+ * @q: videobuf2 queue
+ * @vma: vma passed to the mmap file operation handler in the driver
+ *
+ * Should be called from mmap file operation handler of a driver.
+ * This function maps one plane of one of the available video buffers to
+ * userspace. To map whole video memory allocated on reqbufs, this function
+ * has to be called once per each plane per each buffer previously allocated.
+ *
+ * When the userspace application calls mmap, it passes to it an offset returned
+ * to it earlier by the means of vidioc_querybuf handler. That offset acts as
+ * a "cookie", which is then used to identify the plane to be mapped.
+ * This function finds a plane with a matching offset and a mapping is performed
+ * by the means of a provided memory operation.
+ *
+ * The return values from this function are intended to be directly returned
+ * from the mmap handler in driver.
+ */
int vb2_mmap(struct vb2_queue *q, struct vm_area_struct *vma);
+
#ifndef CONFIG_MMU
unsigned long vb2_get_unmapped_area(struct vb2_queue *q,
unsigned long addr,
@@ -573,15 +841,36 @@ unsigned long vb2_get_unmapped_area(struct vb2_queue *q,
unsigned long pgoff,
unsigned long flags);
#endif
+
+/**
+ * vb2_core_poll() - implements poll userspace operation
+ * @q: videobuf2 queue
+ * @file: file argument passed to the poll file operation handler
+ * @wait: wait argument passed to the poll file operation handler
+ *
+ * This function implements poll file operation handler for a driver.
+ * For CAPTURE queues, if a buffer is ready to be dequeued, the userspace will
+ * be informed that the file descriptor of a video device is available for
+ * reading.
+ * For OUTPUT queues, if a buffer is ready to be dequeued, the file descriptor
+ * will be reported as available for writing.
+ *
+ * The return values from this function are intended to be directly returned
+ * from poll handler in driver.
+ */
unsigned int vb2_core_poll(struct vb2_queue *q, struct file *file,
- poll_table *wait);
+ poll_table *wait);
+
size_t vb2_read(struct vb2_queue *q, char __user *data, size_t count,
loff_t *ppos, int nonblock);
size_t vb2_write(struct vb2_queue *q, const char __user *data, size_t count,
loff_t *ppos, int nonblock);
-/*
- * vb2_thread_fnc - callback function for use with vb2_thread
+/**
+ * typedef vb2_thread_fnc - callback function for use with vb2_thread
+ *
+ * @vb: pointer to struct &vb2_buffer
+ * @priv: pointer to a private pointer
*
* This is called whenever a buffer is dequeued in the thread.
*/
@@ -597,9 +886,11 @@ typedef int (*vb2_thread_fnc)(struct vb2_buffer *vb, void *priv);
* This starts a thread that will queue and dequeue until an error occurs
* or @vb2_thread_stop is called.
*
- * This function should not be used for anything else but the videobuf2-dvb
- * support. If you think you have another good use-case for this, then please
- * contact the linux-media mailinglist first.
+ * .. attention::
+ *
+ * This function should not be used for anything else but the videobuf2-dvb
+ * support. If you think you have another good use-case for this, then please
+ * contact the linux-media mailing list first.
*/
int vb2_thread_start(struct vb2_queue *q, vb2_thread_fnc fnc, void *priv,
const char *thread_name);
@@ -717,7 +1008,26 @@ static inline void vb2_clear_last_buffer_dequeued(struct vb2_queue *q)
* The following functions are not part of the vb2 core API, but are useful
* functions for videobuf2-*.
*/
+
+/**
+ * vb2_buffer_in_use() - return true if the buffer is in use and
+ * the queue cannot be freed (by the means of REQBUFS(0)) call
+ *
+ * @vb: buffer for which plane size should be returned
+ * @q: videobuf queue
+ */
bool vb2_buffer_in_use(struct vb2_queue *q, struct vb2_buffer *vb);
+
+/**
+ * vb2_verify_memory_type() - Check whether the memory type and buffer type
+ * passed to a buffer operation are compatible with the queue.
+ *
+ * @q: videobuf queue
+ * @memory: memory model, as defined by enum &vb2_memory.
+ * @type: private buffer type whose content is defined by the vb2-core
+ * caller. For example, for V4L2, it should match
+ * the types defined on enum &v4l2_buf_type
+ */
int vb2_verify_memory_type(struct vb2_queue *q,
enum vb2_memory memory, unsigned int type);
#endif /* _MEDIA_VIDEOBUF2_CORE_H */
diff --git a/include/media/videobuf2-v4l2.h b/include/media/videobuf2-v4l2.h
index 3cc836f76675..036127c54bbf 100644
--- a/include/media/videobuf2-v4l2.h
+++ b/include/media/videobuf2-v4l2.h
@@ -25,11 +25,13 @@
/**
* struct vb2_v4l2_buffer - video buffer information for v4l2
+ *
* @vb2_buf: video buffer 2
* @flags: buffer informational flags
* @field: enum v4l2_field; field order of the image in the buffer
* @timecode: frame timecode
* @sequence: sequence count of this frame
+ *
* Should contain enough information to be able to cover all the fields
* of struct v4l2_buffer at videodev2.h
*/
@@ -49,22 +51,183 @@ struct vb2_v4l2_buffer {
container_of(vb, struct vb2_v4l2_buffer, vb2_buf)
int vb2_querybuf(struct vb2_queue *q, struct v4l2_buffer *b);
+
+/**
+ * vb2_reqbufs() - Wrapper for vb2_core_reqbufs() that also verifies
+ * the memory and type values.
+ *
+ * @q: videobuf2 queue
+ * @req: struct passed from userspace to vidioc_reqbufs handler
+ * in driver
+ */
int vb2_reqbufs(struct vb2_queue *q, struct v4l2_requestbuffers *req);
+/**
+ * vb2_create_bufs() - Wrapper for vb2_core_create_bufs() that also verifies
+ * the memory and type values.
+ *
+ * @q: videobuf2 queue
+ * @create: creation parameters, passed from userspace to vidioc_create_bufs
+ * handler in driver
+ */
int vb2_create_bufs(struct vb2_queue *q, struct v4l2_create_buffers *create);
+
+/**
+ * vb2_prepare_buf() - Pass ownership of a buffer from userspace to the kernel
+ *
+ * @q: videobuf2 queue
+ * @b: buffer structure passed from userspace to vidioc_prepare_buf
+ * handler in driver
+ *
+ * Should be called from vidioc_prepare_buf ioctl handler of a driver.
+ * This function:
+ *
+ * #) verifies the passed buffer,
+ * #) calls buf_prepare callback in the driver (if provided), in which
+ * driver-specific buffer initialization can be performed.
+ *
+ * The return values from this function are intended to be directly returned
+ * from vidioc_prepare_buf handler in driver.
+ */
int vb2_prepare_buf(struct vb2_queue *q, struct v4l2_buffer *b);
+/**
+ * vb2_qbuf() - Queue a buffer from userspace
+ * @q: videobuf2 queue
+ * @b: buffer structure passed from userspace to VIDIOC_QBUF() handler
+ * in driver
+ *
+ * Should be called from VIDIOC_QBUF() ioctl handler of a driver.
+ *
+ * This function:
+ *
+ * #) verifies the passed buffer,
+ * #) if necessary, calls buf_prepare callback in the driver (if provided), in
+ * which driver-specific buffer initialization can be performed,
+ * #) if streaming is on, queues the buffer in driver by the means of buf_queue
+ * callback for processing.
+ *
+ * The return values from this function are intended to be directly returned
+ * from VIDIOC_QBUF() handler in driver.
+ */
int vb2_qbuf(struct vb2_queue *q, struct v4l2_buffer *b);
+
+/**
+ * vb2_expbuf() - Export a buffer as a file descriptor
+ * @q: videobuf2 queue
+ * @eb: export buffer structure passed from userspace to VIDIOC_EXPBUF()
+ * handler in driver
+ *
+ * The return values from this function are intended to be directly returned
+ * from VIDIOC_EXPBUF() handler in driver.
+ */
int vb2_expbuf(struct vb2_queue *q, struct v4l2_exportbuffer *eb);
+
+/**
+ * vb2_dqbuf() - Dequeue a buffer to the userspace
+ * @q: videobuf2 queue
+ * @b: buffer structure passed from userspace to VIDIOC_DQBUF() handler
+ * in driver
+ * @nonblocking: if true, this call will not sleep waiting for a buffer if no
+ * buffers ready for dequeuing are present. Normally the driver
+ * would be passing (file->f_flags & O_NONBLOCK) here
+ *
+ * Should be called from VIDIOC_DQBUF() ioctl handler of a driver.
+ *
+ * This function:
+ *
+ * #) verifies the passed buffer,
+ * #) calls buf_finish callback in the driver (if provided), in which
+ * driver can perform any additional operations that may be required before
+ * returning the buffer to userspace, such as cache sync,
+ * #) the buffer struct members are filled with relevant information for
+ * the userspace.
+ *
+ * The return values from this function are intended to be directly returned
+ * from VIDIOC_DQBUF() handler in driver.
+ */
int vb2_dqbuf(struct vb2_queue *q, struct v4l2_buffer *b, bool nonblocking);
+/**
+ * vb2_streamon - start streaming
+ * @q: videobuf2 queue
+ * @type: type argument passed from userspace to vidioc_streamon handler
+ *
+ * Should be called from vidioc_streamon handler of a driver.
+ *
+ * This function:
+ *
+ * 1) verifies current state
+ * 2) passes any previously queued buffers to the driver and starts streaming
+ *
+ * The return values from this function are intended to be directly returned
+ * from vidioc_streamon handler in the driver.
+ */
int vb2_streamon(struct vb2_queue *q, enum v4l2_buf_type type);
+
+/**
+ * vb2_streamoff - stop streaming
+ * @q: videobuf2 queue
+ * @type: type argument passed from userspace to vidioc_streamoff handler
+ *
+ * Should be called from vidioc_streamoff handler of a driver.
+ *
+ * This function:
+ *
+ * #) verifies current state,
+ * #) stop streaming and dequeues any queued buffers, including those previously
+ * passed to the driver (after waiting for the driver to finish).
+ *
+ * This call can be used for pausing playback.
+ * The return values from this function are intended to be directly returned
+ * from vidioc_streamoff handler in the driver
+ */
int vb2_streamoff(struct vb2_queue *q, enum v4l2_buf_type type);
+/**
+ * vb2_queue_init() - initialize a videobuf2 queue
+ * @q: videobuf2 queue; this structure should be allocated in driver
+ *
+ * The vb2_queue structure should be allocated by the driver. The driver is
+ * responsible of clearing it's content and setting initial values for some
+ * required entries before calling this function.
+ * q->ops, q->mem_ops, q->type and q->io_modes are mandatory. Please refer
+ * to the struct vb2_queue description in include/media/videobuf2-core.h
+ * for more information.
+ */
int __must_check vb2_queue_init(struct vb2_queue *q);
+
+/**
+ * vb2_queue_release() - stop streaming, release the queue and free memory
+ * @q: videobuf2 queue
+ *
+ * This function stops streaming and performs necessary clean ups, including
+ * freeing video buffer memory. The driver is responsible for freeing
+ * the vb2_queue structure itself.
+ */
void vb2_queue_release(struct vb2_queue *q);
+
+/**
+ * vb2_poll() - implements poll userspace operation
+ * @q: videobuf2 queue
+ * @file: file argument passed to the poll file operation handler
+ * @wait: wait argument passed to the poll file operation handler
+ *
+ * This function implements poll file operation handler for a driver.
+ * For CAPTURE queues, if a buffer is ready to be dequeued, the userspace will
+ * be informed that the file descriptor of a video device is available for
+ * reading.
+ * For OUTPUT queues, if a buffer is ready to be dequeued, the file descriptor
+ * will be reported as available for writing.
+ *
+ * If the driver uses struct v4l2_fh, then vb2_poll() will also check for any
+ * pending events.
+ *
+ * The return values from this function are intended to be directly returned
+ * from poll handler in driver.
+ */
unsigned int vb2_poll(struct vb2_queue *q, struct file *file,
- poll_table *wait);
+ poll_table *wait);
/*
* The following functions are not part of the vb2 core API, but are simple
@@ -105,9 +268,22 @@ unsigned long vb2_fop_get_unmapped_area(struct file *file, unsigned long addr,
unsigned long len, unsigned long pgoff, unsigned long flags);
#endif
-/* struct vb2_ops helpers, only use if vq->lock is non-NULL. */
-
+/**
+ * vb2_ops_wait_prepare - helper function to lock a struct &vb2_queue
+ *
+ * @vq: pointer to struct vb2_queue
+ *
+ * ..note:: only use if vq->lock is non-NULL.
+ */
void vb2_ops_wait_prepare(struct vb2_queue *vq);
+
+/**
+ * vb2_ops_wait_finish - helper function to unlock a struct &vb2_queue
+ *
+ * @vq: pointer to struct vb2_queue
+ *
+ * ..note:: only use if vq->lock is non-NULL.
+ */
void vb2_ops_wait_finish(struct vb2_queue *vq);
#endif /* _MEDIA_VIDEOBUF2_V4L2_H */
diff --git a/include/media/vsp1.h b/include/media/vsp1.h
index 9322d9775fb7..458b400373d4 100644
--- a/include/media/vsp1.h
+++ b/include/media/vsp1.h
@@ -26,7 +26,7 @@ int vsp1_du_setup_lif(struct device *dev, unsigned int width,
struct vsp1_du_atomic_config {
u32 pixelformat;
unsigned int pitch;
- dma_addr_t mem[2];
+ dma_addr_t mem[3];
struct v4l2_rect src;
struct v4l2_rect dst;
unsigned int alpha;
diff --git a/include/net/addrconf.h b/include/net/addrconf.h
index 9826d3a9464c..f2d072787947 100644
--- a/include/net/addrconf.h
+++ b/include/net/addrconf.h
@@ -1,8 +1,9 @@
#ifndef _ADDRCONF_H
#define _ADDRCONF_H
-#define MAX_RTR_SOLICITATIONS 3
+#define MAX_RTR_SOLICITATIONS -1 /* unlimited */
#define RTR_SOLICITATION_INTERVAL (4*HZ)
+#define RTR_SOLICITATION_MAX_INTERVAL (3600*HZ) /* 1 hour */
#define MIN_VALID_LIFETIME (2*3600) /* 2 hours */
diff --git a/include/net/af_rxrpc.h b/include/net/af_rxrpc.h
index 7b0f88699b25..1061a472a3e3 100644
--- a/include/net/af_rxrpc.h
+++ b/include/net/af_rxrpc.h
@@ -12,42 +12,39 @@
#ifndef _NET_RXRPC_H
#define _NET_RXRPC_H
-#include <linux/skbuff.h>
#include <linux/rxrpc.h>
+struct key;
+struct sock;
+struct socket;
struct rxrpc_call;
-/*
- * the mark applied to socket buffers that may be intercepted
- */
-enum rxrpc_skb_mark {
- RXRPC_SKB_MARK_DATA, /* data message */
- RXRPC_SKB_MARK_FINAL_ACK, /* final ACK received message */
- RXRPC_SKB_MARK_BUSY, /* server busy message */
- RXRPC_SKB_MARK_REMOTE_ABORT, /* remote abort message */
- RXRPC_SKB_MARK_LOCAL_ABORT, /* local abort message */
- RXRPC_SKB_MARK_NET_ERROR, /* network error message */
- RXRPC_SKB_MARK_LOCAL_ERROR, /* local error message */
- RXRPC_SKB_MARK_NEW_CALL, /* local error message */
-};
+typedef void (*rxrpc_notify_rx_t)(struct sock *, struct rxrpc_call *,
+ unsigned long);
+typedef void (*rxrpc_notify_new_call_t)(struct sock *, struct rxrpc_call *,
+ unsigned long);
+typedef void (*rxrpc_discard_new_call_t)(struct rxrpc_call *, unsigned long);
+typedef void (*rxrpc_user_attach_call_t)(struct rxrpc_call *, unsigned long);
-typedef void (*rxrpc_interceptor_t)(struct sock *, unsigned long,
- struct sk_buff *);
-void rxrpc_kernel_intercept_rx_messages(struct socket *, rxrpc_interceptor_t);
+void rxrpc_kernel_new_call_notification(struct socket *,
+ rxrpc_notify_new_call_t,
+ rxrpc_discard_new_call_t);
struct rxrpc_call *rxrpc_kernel_begin_call(struct socket *,
struct sockaddr_rxrpc *,
struct key *,
unsigned long,
- gfp_t);
-int rxrpc_kernel_send_data(struct rxrpc_call *, struct msghdr *, size_t);
-void rxrpc_kernel_data_consumed(struct rxrpc_call *, struct sk_buff *);
-void rxrpc_kernel_abort_call(struct rxrpc_call *, u32);
-void rxrpc_kernel_end_call(struct rxrpc_call *);
-bool rxrpc_kernel_is_data_last(struct sk_buff *);
-u32 rxrpc_kernel_get_abort_code(struct sk_buff *);
-int rxrpc_kernel_get_error_number(struct sk_buff *);
-void rxrpc_kernel_free_skb(struct sk_buff *);
-struct rxrpc_call *rxrpc_kernel_accept_call(struct socket *, unsigned long);
-int rxrpc_kernel_reject_call(struct socket *);
+ gfp_t,
+ rxrpc_notify_rx_t);
+int rxrpc_kernel_send_data(struct socket *, struct rxrpc_call *,
+ struct msghdr *, size_t);
+int rxrpc_kernel_recv_data(struct socket *, struct rxrpc_call *,
+ void *, size_t, size_t *, bool, u32 *);
+void rxrpc_kernel_abort_call(struct socket *, struct rxrpc_call *,
+ u32, int, const char *);
+void rxrpc_kernel_end_call(struct socket *, struct rxrpc_call *);
+void rxrpc_kernel_get_peer(struct socket *, struct rxrpc_call *,
+ struct sockaddr_rxrpc *);
+int rxrpc_kernel_charge_accept(struct socket *, rxrpc_notify_rx_t,
+ rxrpc_user_attach_call_t, unsigned long, gfp_t);
#endif /* _NET_RXRPC_H */
diff --git a/include/net/af_unix.h b/include/net/af_unix.h
index 9b4c418bebd8..fd60eccb59a6 100644
--- a/include/net/af_unix.h
+++ b/include/net/af_unix.h
@@ -52,7 +52,7 @@ struct unix_sock {
struct sock sk;
struct unix_address *addr;
struct path path;
- struct mutex readlock;
+ struct mutex iolock, bindlock;
struct sock *peer;
struct list_head link;
atomic_long_t inflight;
diff --git a/include/net/bluetooth/bluetooth.h b/include/net/bluetooth/bluetooth.h
index bfd1590821d6..0a1e21d7bce1 100644
--- a/include/net/bluetooth/bluetooth.h
+++ b/include/net/bluetooth/bluetooth.h
@@ -29,7 +29,8 @@
#include <net/sock.h>
#include <linux/seq_file.h>
-#define BT_SUBSYS_VERSION "2.21"
+#define BT_SUBSYS_VERSION 2
+#define BT_SUBSYS_REVISION 22
#ifndef AF_BLUETOOTH
#define AF_BLUETOOTH 31
@@ -371,6 +372,7 @@ void hci_sock_set_flag(struct sock *sk, int nr);
void hci_sock_clear_flag(struct sock *sk, int nr);
int hci_sock_test_flag(struct sock *sk, int nr);
unsigned short hci_sock_get_channel(struct sock *sk);
+u32 hci_sock_get_cookie(struct sock *sk);
int hci_sock_init(void);
void hci_sock_cleanup(void);
diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h
index 003b25283407..99aa5e5e3100 100644
--- a/include/net/bluetooth/hci.h
+++ b/include/net/bluetooth/hci.h
@@ -63,6 +63,7 @@
#define HCI_SDIO 6
#define HCI_SPI 7
#define HCI_I2C 8
+#define HCI_SMD 9
/* HCI controller types */
#define HCI_PRIMARY 0x00
@@ -207,7 +208,11 @@ enum {
HCI_MGMT_INDEX_EVENTS,
HCI_MGMT_UNCONF_INDEX_EVENTS,
HCI_MGMT_EXT_INDEX_EVENTS,
- HCI_MGMT_GENERIC_EVENTS,
+ HCI_MGMT_EXT_INFO_EVENTS,
+ HCI_MGMT_OPTION_EVENTS,
+ HCI_MGMT_SETTING_EVENTS,
+ HCI_MGMT_DEV_CLASS_EVENTS,
+ HCI_MGMT_LOCAL_NAME_EVENTS,
HCI_MGMT_OOB_DATA_EVENTS,
};
diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
index ee7fc47680a1..f00bf667ec33 100644
--- a/include/net/bluetooth/hci_core.h
+++ b/include/net/bluetooth/hci_core.h
@@ -211,6 +211,7 @@ struct hci_dev {
__u8 dev_name[HCI_MAX_NAME_LENGTH];
__u8 short_name[HCI_MAX_SHORT_NAME_LENGTH];
__u8 eir[HCI_MAX_EIR_LENGTH];
+ __u16 appearance;
__u8 dev_class[3];
__u8 major_class;
__u8 minor_class;
@@ -399,7 +400,9 @@ struct hci_dev {
struct delayed_work rpa_expired;
bdaddr_t rpa;
+#if IS_ENABLED(CONFIG_BT_LEDS)
struct led_trigger *power_led;
+#endif
int (*open)(struct hci_dev *hdev);
int (*close)(struct hci_dev *hdev);
@@ -1026,8 +1029,8 @@ int hci_resume_dev(struct hci_dev *hdev);
int hci_reset_dev(struct hci_dev *hdev);
int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb);
int hci_recv_diag(struct hci_dev *hdev, struct sk_buff *skb);
-void hci_set_hw_info(struct hci_dev *hdev, const char *fmt, ...);
-void hci_set_fw_info(struct hci_dev *hdev, const char *fmt, ...);
+__printf(2, 3) void hci_set_hw_info(struct hci_dev *hdev, const char *fmt, ...);
+__printf(2, 3) void hci_set_fw_info(struct hci_dev *hdev, const char *fmt, ...);
int hci_dev_open(__u16 dev);
int hci_dev_close(__u16 dev);
int hci_dev_do_close(struct hci_dev *hdev);
@@ -1404,6 +1407,9 @@ void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb);
void hci_send_to_channel(unsigned short channel, struct sk_buff *skb,
int flag, struct sock *skip_sk);
void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb);
+void hci_send_monitor_ctrl_event(struct hci_dev *hdev, u16 event,
+ void *data, u16 data_len, ktime_t tstamp,
+ int flag, struct sock *skip_sk);
void hci_sock_dev_event(struct hci_dev *hdev, int event);
@@ -1449,6 +1455,7 @@ void hci_mgmt_chan_unregister(struct hci_mgmt_chan *c);
#define DISCOV_BREDR_INQUIRY_LEN 0x08
#define DISCOV_LE_RESTART_DELAY msecs_to_jiffies(200) /* msec */
+void mgmt_fill_version_info(void *ver);
int mgmt_new_settings(struct hci_dev *hdev);
void mgmt_index_added(struct hci_dev *hdev);
void mgmt_index_removed(struct hci_dev *hdev);
diff --git a/include/net/bluetooth/hci_mon.h b/include/net/bluetooth/hci_mon.h
index 587d0131b349..240786b04a46 100644
--- a/include/net/bluetooth/hci_mon.h
+++ b/include/net/bluetooth/hci_mon.h
@@ -45,6 +45,10 @@ struct hci_mon_hdr {
#define HCI_MON_VENDOR_DIAG 11
#define HCI_MON_SYSTEM_NOTE 12
#define HCI_MON_USER_LOGGING 13
+#define HCI_MON_CTRL_OPEN 14
+#define HCI_MON_CTRL_CLOSE 15
+#define HCI_MON_CTRL_COMMAND 16
+#define HCI_MON_CTRL_EVENT 17
struct hci_mon_new_index {
__u8 type;
diff --git a/include/net/bluetooth/mgmt.h b/include/net/bluetooth/mgmt.h
index 7647964b1efa..72a456bbbcd5 100644
--- a/include/net/bluetooth/mgmt.h
+++ b/include/net/bluetooth/mgmt.h
@@ -586,6 +586,24 @@ struct mgmt_rp_get_adv_size_info {
#define MGMT_OP_START_LIMITED_DISCOVERY 0x0041
+#define MGMT_OP_READ_EXT_INFO 0x0042
+#define MGMT_READ_EXT_INFO_SIZE 0
+struct mgmt_rp_read_ext_info {
+ bdaddr_t bdaddr;
+ __u8 version;
+ __le16 manufacturer;
+ __le32 supported_settings;
+ __le32 current_settings;
+ __le16 eir_len;
+ __u8 eir[0];
+} __packed;
+
+#define MGMT_OP_SET_APPEARANCE 0x0043
+struct mgmt_cp_set_appearance {
+ __u16 appearance;
+} __packed;
+#define MGMT_SET_APPEARANCE_SIZE 2
+
#define MGMT_EV_CMD_COMPLETE 0x0001
struct mgmt_ev_cmd_complete {
__le16 opcode;
@@ -800,3 +818,9 @@ struct mgmt_ev_advertising_added {
struct mgmt_ev_advertising_removed {
__u8 instance;
} __packed;
+
+#define MGMT_EV_EXT_INFO_CHANGED 0x0025
+struct mgmt_ev_ext_info_changed {
+ __le16 eir_len;
+ __u8 eir[0];
+} __packed;
diff --git a/include/net/bonding.h b/include/net/bonding.h
index 6360c259da6d..f32f7ef8a23a 100644
--- a/include/net/bonding.h
+++ b/include/net/bonding.h
@@ -37,18 +37,6 @@
#ifndef __long_aligned
#define __long_aligned __attribute__((aligned((sizeof(long)))))
#endif
-/*
- * Less bad way to call ioctl from within the kernel; this needs to be
- * done some other way to get the call out of interrupt context.
- * Needs "ioctl" variable to be supplied by calling context.
- */
-#define IOCTL(dev, arg, cmd) ({ \
- int res = 0; \
- mm_segment_t fs = get_fs(); \
- set_fs(get_ds()); \
- res = ioctl(dev, arg, cmd); \
- set_fs(fs); \
- res; })
#define BOND_MODE(bond) ((bond)->params.mode)
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index 9c23f4d33e06..bd19faad0d96 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -5,7 +5,7 @@
*
* Copyright 2006-2010 Johannes Berg <johannes@sipsolutions.net>
* Copyright 2013-2014 Intel Mobile Communications GmbH
- * Copyright 2015 Intel Deutschland GmbH
+ * Copyright 2015-2016 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -593,6 +593,8 @@ struct survey_info {
s8 noise;
};
+#define CFG80211_MAX_WEP_KEYS 4
+
/**
* struct cfg80211_crypto_settings - Crypto settings
* @wpa_versions: indicates which, if any, WPA versions are enabled
@@ -610,6 +612,9 @@ struct survey_info {
* allowed through even on unauthorized ports
* @control_port_no_encrypt: TRUE to prevent encryption of control port
* protocol frames.
+ * @wep_keys: static WEP keys, if not NULL points to an array of
+ * CFG80211_MAX_WEP_KEYS WEP keys
+ * @wep_tx_key: key index (0..3) of the default TX static WEP key
*/
struct cfg80211_crypto_settings {
u32 wpa_versions;
@@ -621,6 +626,8 @@ struct cfg80211_crypto_settings {
bool control_port;
__be16 control_port_ethertype;
bool control_port_no_encrypt;
+ struct key_params *wep_keys;
+ int wep_tx_key;
};
/**
@@ -676,6 +683,18 @@ struct cfg80211_acl_data {
struct mac_address mac_addrs[];
};
+/*
+ * cfg80211_bitrate_mask - masks for bitrate control
+ */
+struct cfg80211_bitrate_mask {
+ struct {
+ u32 legacy;
+ u8 ht_mcs[IEEE80211_HT_MCS_MASK_LEN];
+ u16 vht_mcs[NL80211_VHT_NSS_MAX];
+ enum nl80211_txrate_gi gi;
+ } control[NUM_NL80211_BANDS];
+};
+
/**
* struct cfg80211_ap_settings - AP configuration
*
@@ -700,6 +719,7 @@ struct cfg80211_acl_data {
* MAC address based access control
* @pbss: If set, start as a PCP instead of AP. Relevant for DMG
* networks.
+ * @beacon_rate: bitrate to be used for beacons
*/
struct cfg80211_ap_settings {
struct cfg80211_chan_def chandef;
@@ -719,6 +739,7 @@ struct cfg80211_ap_settings {
bool p2p_opp_ps;
const struct cfg80211_acl_data *acl;
bool pbss;
+ struct cfg80211_bitrate_mask beacon_rate;
};
/**
@@ -775,9 +796,9 @@ enum station_parameters_apply_mask {
* (or NULL for no change)
* @supported_rates_len: number of supported rates
* @sta_flags_mask: station flags that changed
- * (bitmask of BIT(NL80211_STA_FLAG_...))
+ * (bitmask of BIT(%NL80211_STA_FLAG_...))
* @sta_flags_set: station flags values
- * (bitmask of BIT(NL80211_STA_FLAG_...))
+ * (bitmask of BIT(%NL80211_STA_FLAG_...))
* @listen_interval: listen interval or -1 for no change
* @aid: AID or zero for no change
* @peer_aid: mesh peer AID or zero for no change
@@ -1102,6 +1123,7 @@ struct station_info {
struct cfg80211_tid_stats pertid[IEEE80211_NUM_TIDS + 1];
};
+#if IS_ENABLED(CONFIG_CFG80211)
/**
* cfg80211_get_station - retrieve information about a given station
* @dev: the device where the station is supposed to be connected to
@@ -1114,6 +1136,14 @@ struct station_info {
*/
int cfg80211_get_station(struct net_device *dev, const u8 *mac_addr,
struct station_info *sinfo);
+#else
+static inline int cfg80211_get_station(struct net_device *dev,
+ const u8 *mac_addr,
+ struct station_info *sinfo)
+{
+ return -ENOENT;
+}
+#endif
/**
* enum monitor_flags - monitor flags
@@ -1342,6 +1372,7 @@ struct mesh_config {
* @beacon_interval: beacon interval to use
* @mcast_rate: multicat rate for Mesh Node [6Mbps is the default for 802.11a]
* @basic_rates: basic rates to use when creating the mesh
+ * @beacon_rate: bitrate to be used for beacons
*
* These parameters are fixed when the mesh is created.
*/
@@ -1362,6 +1393,7 @@ struct mesh_setup {
u16 beacon_interval;
int mcast_rate[NUM_NL80211_BANDS];
u32 basic_rates;
+ struct cfg80211_bitrate_mask beacon_rate;
};
/**
@@ -2001,17 +2033,6 @@ enum wiphy_params_flags {
WIPHY_PARAM_DYN_ACK = 1 << 5,
};
-/*
- * cfg80211_bitrate_mask - masks for bitrate control
- */
-struct cfg80211_bitrate_mask {
- struct {
- u32 legacy;
- u8 ht_mcs[IEEE80211_HT_MCS_MASK_LEN];
- u16 vht_mcs[NL80211_VHT_NSS_MAX];
- enum nl80211_txrate_gi gi;
- } control[NUM_NL80211_BANDS];
-};
/**
* struct cfg80211_pmksa - PMK Security Association
*
@@ -2293,6 +2314,98 @@ struct cfg80211_qos_map {
};
/**
+ * struct cfg80211_nan_conf - NAN configuration
+ *
+ * This struct defines NAN configuration parameters
+ *
+ * @master_pref: master preference (1 - 255)
+ * @dual: dual band operation mode, see &enum nl80211_nan_dual_band_conf
+ */
+struct cfg80211_nan_conf {
+ u8 master_pref;
+ u8 dual;
+};
+
+/**
+ * enum cfg80211_nan_conf_changes - indicates changed fields in NAN
+ * configuration
+ *
+ * @CFG80211_NAN_CONF_CHANGED_PREF: master preference
+ * @CFG80211_NAN_CONF_CHANGED_DUAL: dual band operation
+ */
+enum cfg80211_nan_conf_changes {
+ CFG80211_NAN_CONF_CHANGED_PREF = BIT(0),
+ CFG80211_NAN_CONF_CHANGED_DUAL = BIT(1),
+};
+
+/**
+ * struct cfg80211_nan_func_filter - a NAN function Rx / Tx filter
+ *
+ * @filter: the content of the filter
+ * @len: the length of the filter
+ */
+struct cfg80211_nan_func_filter {
+ const u8 *filter;
+ u8 len;
+};
+
+/**
+ * struct cfg80211_nan_func - a NAN function
+ *
+ * @type: &enum nl80211_nan_function_type
+ * @service_id: the service ID of the function
+ * @publish_type: &nl80211_nan_publish_type
+ * @close_range: if true, the range should be limited. Threshold is
+ * implementation specific.
+ * @publish_bcast: if true, the solicited publish should be broadcasted
+ * @subscribe_active: if true, the subscribe is active
+ * @followup_id: the instance ID for follow up
+ * @followup_reqid: the requestor instance ID for follow up
+ * @followup_dest: MAC address of the recipient of the follow up
+ * @ttl: time to live counter in DW.
+ * @serv_spec_info: Service Specific Info
+ * @serv_spec_info_len: Service Specific Info length
+ * @srf_include: if true, SRF is inclusive
+ * @srf_bf: Bloom Filter
+ * @srf_bf_len: Bloom Filter length
+ * @srf_bf_idx: Bloom Filter index
+ * @srf_macs: SRF MAC addresses
+ * @srf_num_macs: number of MAC addresses in SRF
+ * @rx_filters: rx filters that are matched with corresponding peer's tx_filter
+ * @tx_filters: filters that should be transmitted in the SDF.
+ * @num_rx_filters: length of &rx_filters.
+ * @num_tx_filters: length of &tx_filters.
+ * @instance_id: driver allocated id of the function.
+ * @cookie: unique NAN function identifier.
+ */
+struct cfg80211_nan_func {
+ enum nl80211_nan_function_type type;
+ u8 service_id[NL80211_NAN_FUNC_SERVICE_ID_LEN];
+ u8 publish_type;
+ bool close_range;
+ bool publish_bcast;
+ bool subscribe_active;
+ u8 followup_id;
+ u8 followup_reqid;
+ struct mac_address followup_dest;
+ u32 ttl;
+ const u8 *serv_spec_info;
+ u8 serv_spec_info_len;
+ bool srf_include;
+ const u8 *srf_bf;
+ u8 srf_bf_len;
+ u8 srf_bf_idx;
+ struct mac_address *srf_macs;
+ int srf_num_macs;
+ struct cfg80211_nan_func_filter *rx_filters;
+ struct cfg80211_nan_func_filter *tx_filters;
+ u8 num_tx_filters;
+ u8 num_rx_filters;
+ u8 instance_id;
+ u64 cookie;
+};
+
+/**
* struct cfg80211_ops - backend description for wireless configuration
*
* This struct is registered by fullmac card drivers and/or wireless stacks
@@ -2423,7 +2536,8 @@ struct cfg80211_qos_map {
* cases, the result of roaming is indicated with a call to
* cfg80211_roamed() or cfg80211_roamed_bss().
* (invoked with the wireless_dev mutex held)
- * @disconnect: Disconnect from the BSS/ESS.
+ * @disconnect: Disconnect from the BSS/ESS. Once done, call
+ * cfg80211_disconnected().
* (invoked with the wireless_dev mutex held)
*
* @join_ibss: Join the specified IBSS (or create if necessary). Once done, call
@@ -2579,6 +2693,19 @@ struct cfg80211_qos_map {
* and returning to the base channel for communication with the AP.
* @tdls_cancel_channel_switch: Stop channel-switching with a TDLS peer. Both
* peers must be on the base channel when the call completes.
+ * @start_nan: Start the NAN interface.
+ * @stop_nan: Stop the NAN interface.
+ * @add_nan_func: Add a NAN function. Returns negative value on failure.
+ * On success @nan_func ownership is transferred to the driver and
+ * it may access it outside of the scope of this function. The driver
+ * should free the @nan_func when no longer needed by calling
+ * cfg80211_free_nan_func().
+ * On success the driver should assign an instance_id in the
+ * provided @nan_func.
+ * @del_nan_func: Delete a NAN function.
+ * @nan_change_conf: changes NAN configuration. The changed parameters must
+ * be specified in @changes (using &enum cfg80211_nan_conf_changes);
+ * All other parameters must be ignored.
*/
struct cfg80211_ops {
int (*suspend)(struct wiphy *wiphy, struct cfg80211_wowlan *wow);
@@ -2844,6 +2971,17 @@ struct cfg80211_ops {
void (*tdls_cancel_channel_switch)(struct wiphy *wiphy,
struct net_device *dev,
const u8 *addr);
+ int (*start_nan)(struct wiphy *wiphy, struct wireless_dev *wdev,
+ struct cfg80211_nan_conf *conf);
+ void (*stop_nan)(struct wiphy *wiphy, struct wireless_dev *wdev);
+ int (*add_nan_func)(struct wiphy *wiphy, struct wireless_dev *wdev,
+ struct cfg80211_nan_func *nan_func);
+ void (*del_nan_func)(struct wiphy *wiphy, struct wireless_dev *wdev,
+ u64 cookie);
+ int (*nan_change_conf)(struct wiphy *wiphy,
+ struct wireless_dev *wdev,
+ struct cfg80211_nan_conf *conf,
+ u32 changes);
};
/*
@@ -2890,6 +3028,8 @@ struct cfg80211_ops {
* @WIPHY_FLAG_SUPPORTS_5_10_MHZ: Device supports 5 MHz and 10 MHz channels.
* @WIPHY_FLAG_HAS_CHANNEL_SWITCH: Device supports channel switch in
* beaconing mode (AP, IBSS, Mesh, ...).
+ * @WIPHY_FLAG_HAS_STATIC_WEP: The device supports static WEP key installation
+ * before connection.
*/
enum wiphy_flags {
/* use hole at 0 */
@@ -2915,6 +3055,7 @@ enum wiphy_flags {
WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL = BIT(21),
WIPHY_FLAG_SUPPORTS_5_10_MHZ = BIT(22),
WIPHY_FLAG_HAS_CHANNEL_SWITCH = BIT(23),
+ WIPHY_FLAG_HAS_STATIC_WEP = BIT(24),
};
/**
@@ -2947,47 +3088,54 @@ struct ieee80211_iface_limit {
*
* 1. Allow #STA <= 1, #AP <= 1, matching BI, channels = 1, 2 total:
*
- * struct ieee80211_iface_limit limits1[] = {
- * { .max = 1, .types = BIT(NL80211_IFTYPE_STATION), },
- * { .max = 1, .types = BIT(NL80211_IFTYPE_AP}, },
- * };
- * struct ieee80211_iface_combination combination1 = {
- * .limits = limits1,
- * .n_limits = ARRAY_SIZE(limits1),
- * .max_interfaces = 2,
- * .beacon_int_infra_match = true,
- * };
+ * .. code-block:: c
+ *
+ * struct ieee80211_iface_limit limits1[] = {
+ * { .max = 1, .types = BIT(NL80211_IFTYPE_STATION), },
+ * { .max = 1, .types = BIT(NL80211_IFTYPE_AP}, },
+ * };
+ * struct ieee80211_iface_combination combination1 = {
+ * .limits = limits1,
+ * .n_limits = ARRAY_SIZE(limits1),
+ * .max_interfaces = 2,
+ * .beacon_int_infra_match = true,
+ * };
*
*
* 2. Allow #{AP, P2P-GO} <= 8, channels = 1, 8 total:
*
- * struct ieee80211_iface_limit limits2[] = {
- * { .max = 8, .types = BIT(NL80211_IFTYPE_AP) |
- * BIT(NL80211_IFTYPE_P2P_GO), },
- * };
- * struct ieee80211_iface_combination combination2 = {
- * .limits = limits2,
- * .n_limits = ARRAY_SIZE(limits2),
- * .max_interfaces = 8,
- * .num_different_channels = 1,
- * };
+ * .. code-block:: c
+ *
+ * struct ieee80211_iface_limit limits2[] = {
+ * { .max = 8, .types = BIT(NL80211_IFTYPE_AP) |
+ * BIT(NL80211_IFTYPE_P2P_GO), },
+ * };
+ * struct ieee80211_iface_combination combination2 = {
+ * .limits = limits2,
+ * .n_limits = ARRAY_SIZE(limits2),
+ * .max_interfaces = 8,
+ * .num_different_channels = 1,
+ * };
*
*
* 3. Allow #STA <= 1, #{P2P-client,P2P-GO} <= 3 on two channels, 4 total.
*
- * This allows for an infrastructure connection and three P2P connections.
+ * This allows for an infrastructure connection and three P2P connections.
+ *
+ * .. code-block:: c
+ *
+ * struct ieee80211_iface_limit limits3[] = {
+ * { .max = 1, .types = BIT(NL80211_IFTYPE_STATION), },
+ * { .max = 3, .types = BIT(NL80211_IFTYPE_P2P_GO) |
+ * BIT(NL80211_IFTYPE_P2P_CLIENT), },
+ * };
+ * struct ieee80211_iface_combination combination3 = {
+ * .limits = limits3,
+ * .n_limits = ARRAY_SIZE(limits3),
+ * .max_interfaces = 4,
+ * .num_different_channels = 2,
+ * };
*
- * struct ieee80211_iface_limit limits3[] = {
- * { .max = 1, .types = BIT(NL80211_IFTYPE_STATION), },
- * { .max = 3, .types = BIT(NL80211_IFTYPE_P2P_GO) |
- * BIT(NL80211_IFTYPE_P2P_CLIENT), },
- * };
- * struct ieee80211_iface_combination combination3 = {
- * .limits = limits3,
- * .n_limits = ARRAY_SIZE(limits3),
- * .max_interfaces = 4,
- * .num_different_channels = 2,
- * };
*/
struct ieee80211_iface_combination {
const struct ieee80211_iface_limit *limits;
@@ -3292,6 +3440,8 @@ struct wiphy_iftype_ext_capab {
* @bss_select_support: bitmask indicating the BSS selection criteria supported
* by the driver in the .connect() callback. The bit position maps to the
* attribute indices defined in &enum nl80211_bss_select_attr.
+ *
+ * @cookie_counter: unique generic cookie counter, used to identify objects.
*/
struct wiphy {
/* assign these fields before you register the wiphy */
@@ -3421,6 +3571,8 @@ struct wiphy {
u32 bss_select_support;
+ u64 cookie_counter;
+
char priv[0] __aligned(NETDEV_ALIGN);
};
@@ -3601,6 +3753,7 @@ struct cfg80211_cached_keys;
* beacons, 0 when not valid
* @address: The address for this device, valid only if @netdev is %NULL
* @p2p_started: true if this is a P2P Device that has been started
+ * @nan_started: true if this is a NAN interface that has been started
* @cac_started: true if DFS channel availability check has been started
* @cac_start_time: timestamp (jiffies) when the dfs state was entered.
* @cac_time_ms: CAC time in ms
@@ -3632,7 +3785,7 @@ struct wireless_dev {
struct mutex mtx;
- bool use_4addr, p2p_started;
+ bool use_4addr, p2p_started, nan_started;
u8 address[ETH_ALEN] __aligned(sizeof(u16));
@@ -3946,6 +4099,34 @@ unsigned int cfg80211_classify8021d(struct sk_buff *skb,
struct cfg80211_qos_map *qos_map);
/**
+ * cfg80211_find_ie_match - match information element and byte array in data
+ *
+ * @eid: element ID
+ * @ies: data consisting of IEs
+ * @len: length of data
+ * @match: byte array to match
+ * @match_len: number of bytes in the match array
+ * @match_offset: offset in the IE where the byte array should match.
+ * If match_len is zero, this must also be set to zero.
+ * Otherwise this must be set to 2 or more, because the first
+ * byte is the element id, which is already compared to eid, and
+ * the second byte is the IE length.
+ *
+ * Return: %NULL if the element ID could not be found or if
+ * the element is invalid (claims to be longer than the given
+ * data) or if the byte array doesn't match, or a pointer to the first
+ * byte of the requested element, that is the byte containing the
+ * element ID.
+ *
+ * Note: There are no checks on the element length other than
+ * having to fit into the given data and being large enough for the
+ * byte array to match.
+ */
+const u8 *cfg80211_find_ie_match(u8 eid, const u8 *ies, int len,
+ const u8 *match, int match_len,
+ int match_offset);
+
+/**
* cfg80211_find_ie - find information element in data
*
* @eid: element ID
@@ -3960,7 +4141,10 @@ unsigned int cfg80211_classify8021d(struct sk_buff *skb,
* Note: There are no checks on the element length other than
* having to fit into the given data.
*/
-const u8 *cfg80211_find_ie(u8 eid, const u8 *ies, int len);
+static inline const u8 *cfg80211_find_ie(u8 eid, const u8 *ies, int len)
+{
+ return cfg80211_find_ie_match(eid, ies, len, NULL, 0, 0);
+}
/**
* cfg80211_find_vendor_ie - find vendor specific information element in data
@@ -5509,6 +5693,67 @@ wiphy_ext_feature_isset(struct wiphy *wiphy,
return (ft_byte & BIT(ftidx % 8)) != 0;
}
+/**
+ * cfg80211_free_nan_func - free NAN function
+ * @f: NAN function that should be freed
+ *
+ * Frees all the NAN function and all it's allocated members.
+ */
+void cfg80211_free_nan_func(struct cfg80211_nan_func *f);
+
+/**
+ * struct cfg80211_nan_match_params - NAN match parameters
+ * @type: the type of the function that triggered a match. If it is
+ * %NL80211_NAN_FUNC_SUBSCRIBE it means that we replied to a subscriber.
+ * If it is %NL80211_NAN_FUNC_PUBLISH, it means that we got a discovery
+ * result.
+ * If it is %NL80211_NAN_FUNC_FOLLOW_UP, we received a follow up.
+ * @inst_id: the local instance id
+ * @peer_inst_id: the instance id of the peer's function
+ * @addr: the MAC address of the peer
+ * @info_len: the length of the &info
+ * @info: the Service Specific Info from the peer (if any)
+ * @cookie: unique identifier of the corresponding function
+ */
+struct cfg80211_nan_match_params {
+ enum nl80211_nan_function_type type;
+ u8 inst_id;
+ u8 peer_inst_id;
+ const u8 *addr;
+ u8 info_len;
+ const u8 *info;
+ u64 cookie;
+};
+
+/**
+ * cfg80211_nan_match - report a match for a NAN function.
+ * @wdev: the wireless device reporting the match
+ * @match: match notification parameters
+ * @gfp: allocation flags
+ *
+ * This function reports that the a NAN function had a match. This
+ * can be a subscribe that had a match or a solicited publish that
+ * was sent. It can also be a follow up that was received.
+ */
+void cfg80211_nan_match(struct wireless_dev *wdev,
+ struct cfg80211_nan_match_params *match, gfp_t gfp);
+
+/**
+ * cfg80211_nan_func_terminated - notify about NAN function termination.
+ *
+ * @wdev: the wireless device reporting the match
+ * @inst_id: the local instance id
+ * @reason: termination reason (one of the NL80211_NAN_FUNC_TERM_REASON_*)
+ * @cookie: unique NAN function identifier
+ * @gfp: allocation flags
+ *
+ * This function reports that the a NAN function is terminated.
+ */
+void cfg80211_nan_func_terminated(struct wireless_dev *wdev,
+ u8 inst_id,
+ enum nl80211_nan_func_term_reason reason,
+ u64 cookie, gfp_t gfp);
+
/* ethtool helper */
void cfg80211_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info);
diff --git a/include/net/devlink.h b/include/net/devlink.h
index c99ffe8cef3c..211bd3c37028 100644
--- a/include/net/devlink.h
+++ b/include/net/devlink.h
@@ -50,7 +50,6 @@ struct devlink_sb_pool_info {
};
struct devlink_ops {
- size_t priv_size;
int (*port_type_set)(struct devlink_port *devlink_port,
enum devlink_port_type port_type);
int (*port_split)(struct devlink *devlink, unsigned int port_index,
diff --git a/include/net/dsa.h b/include/net/dsa.h
index 2217a3f817f8..b122196d5a1f 100644
--- a/include/net/dsa.h
+++ b/include/net/dsa.h
@@ -26,6 +26,7 @@ enum dsa_tag_protocol {
DSA_TAG_PROTO_TRAILER,
DSA_TAG_PROTO_EDSA,
DSA_TAG_PROTO_BRCM,
+ DSA_TAG_PROTO_QCA,
DSA_TAG_LAST, /* MUST BE LAST */
};
@@ -142,6 +143,7 @@ struct dsa_port {
struct net_device *netdev;
struct device_node *dn;
unsigned int ageing_time;
+ u8 stp_state;
};
struct dsa_switch {
@@ -165,9 +167,9 @@ struct dsa_switch {
struct dsa_chip_data *cd;
/*
- * The used switch driver.
+ * The switch operations.
*/
- struct dsa_switch_driver *drv;
+ struct dsa_switch_ops *ops;
/*
* An array of which element [a] indicates which port on this
@@ -234,19 +236,21 @@ static inline u8 dsa_upstream_port(struct dsa_switch *ds)
struct switchdev_trans;
struct switchdev_obj;
struct switchdev_obj_port_fdb;
+struct switchdev_obj_port_mdb;
struct switchdev_obj_port_vlan;
-struct dsa_switch_driver {
+struct dsa_switch_ops {
struct list_head list;
- enum dsa_tag_protocol tag_protocol;
-
/*
* Probing and setup.
*/
const char *(*probe)(struct device *dsa_dev,
struct device *host_dev, int sw_addr,
void **priv);
+
+ enum dsa_tag_protocol (*get_tag_protocol)(struct dsa_switch *ds);
+
int (*setup)(struct dsa_switch *ds);
int (*set_addr)(struct dsa_switch *ds, u8 *addr);
u32 (*get_phy_flags)(struct dsa_switch *ds, int port);
@@ -336,6 +340,7 @@ struct dsa_switch_driver {
void (*port_bridge_leave)(struct dsa_switch *ds, int port);
void (*port_stp_state_set)(struct dsa_switch *ds, int port,
u8 state);
+ void (*port_fast_age)(struct dsa_switch *ds, int port);
/*
* VLAN support
@@ -368,17 +373,27 @@ struct dsa_switch_driver {
int (*port_fdb_dump)(struct dsa_switch *ds, int port,
struct switchdev_obj_port_fdb *fdb,
int (*cb)(struct switchdev_obj *obj));
+
+ /*
+ * Multicast database
+ */
+ int (*port_mdb_prepare)(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_mdb *mdb,
+ struct switchdev_trans *trans);
+ void (*port_mdb_add)(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_mdb *mdb,
+ struct switchdev_trans *trans);
+ int (*port_mdb_del)(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_mdb *mdb);
+ int (*port_mdb_dump)(struct dsa_switch *ds, int port,
+ struct switchdev_obj_port_mdb *mdb,
+ int (*cb)(struct switchdev_obj *obj));
};
-void register_switch_driver(struct dsa_switch_driver *type);
-void unregister_switch_driver(struct dsa_switch_driver *type);
+void register_switch_driver(struct dsa_switch_ops *type);
+void unregister_switch_driver(struct dsa_switch_ops *type);
struct mii_bus *dsa_host_dev_to_mii_bus(struct device *dev);
-static inline void *ds_to_priv(struct dsa_switch *ds)
-{
- return ds->priv;
-}
-
static inline bool dsa_uses_tagged_protocol(struct dsa_switch_tree *dst)
{
return dst->rcv != NULL;
@@ -386,4 +401,18 @@ static inline bool dsa_uses_tagged_protocol(struct dsa_switch_tree *dst)
void dsa_unregister_switch(struct dsa_switch *ds);
int dsa_register_switch(struct dsa_switch *ds, struct device_node *np);
+#ifdef CONFIG_PM_SLEEP
+int dsa_switch_suspend(struct dsa_switch *ds);
+int dsa_switch_resume(struct dsa_switch *ds);
+#else
+static inline int dsa_switch_suspend(struct dsa_switch *ds)
+{
+ return 0;
+}
+static inline int dsa_switch_resume(struct dsa_switch *ds)
+{
+ return 0;
+}
+#endif /* CONFIG_PM_SLEEP */
+
#endif
diff --git a/include/net/dst_metadata.h b/include/net/dst_metadata.h
index 5db9f5910428..6965c8f68ade 100644
--- a/include/net/dst_metadata.h
+++ b/include/net/dst_metadata.h
@@ -112,12 +112,13 @@ static inline struct ip_tunnel_info *skb_tunnel_info_unclone(struct sk_buff *skb
return &dst->u.tun_info;
}
-static inline struct metadata_dst *ip_tun_rx_dst(struct sk_buff *skb,
- __be16 flags,
- __be64 tunnel_id,
- int md_size)
+static inline struct metadata_dst *__ip_tun_set_dst(__be32 saddr,
+ __be32 daddr,
+ __u8 tos, __u8 ttl,
+ __be16 flags,
+ __be64 tunnel_id,
+ int md_size)
{
- const struct iphdr *iph = ip_hdr(skb);
struct metadata_dst *tun_dst;
tun_dst = tun_rx_dst(md_size);
@@ -125,17 +126,30 @@ static inline struct metadata_dst *ip_tun_rx_dst(struct sk_buff *skb,
return NULL;
ip_tunnel_key_init(&tun_dst->u.tun_info.key,
- iph->saddr, iph->daddr, iph->tos, iph->ttl,
+ saddr, daddr, tos, ttl,
0, 0, 0, tunnel_id, flags);
return tun_dst;
}
-static inline struct metadata_dst *ipv6_tun_rx_dst(struct sk_buff *skb,
+static inline struct metadata_dst *ip_tun_rx_dst(struct sk_buff *skb,
__be16 flags,
__be64 tunnel_id,
int md_size)
{
- const struct ipv6hdr *ip6h = ipv6_hdr(skb);
+ const struct iphdr *iph = ip_hdr(skb);
+
+ return __ip_tun_set_dst(iph->saddr, iph->daddr, iph->tos, iph->ttl,
+ flags, tunnel_id, md_size);
+}
+
+static inline struct metadata_dst *__ipv6_tun_set_dst(const struct in6_addr *saddr,
+ const struct in6_addr *daddr,
+ __u8 tos, __u8 ttl,
+ __be32 label,
+ __be16 flags,
+ __be64 tunnel_id,
+ int md_size)
+{
struct metadata_dst *tun_dst;
struct ip_tunnel_info *info;
@@ -150,14 +164,26 @@ static inline struct metadata_dst *ipv6_tun_rx_dst(struct sk_buff *skb,
info->key.tp_src = 0;
info->key.tp_dst = 0;
- info->key.u.ipv6.src = ip6h->saddr;
- info->key.u.ipv6.dst = ip6h->daddr;
+ info->key.u.ipv6.src = *saddr;
+ info->key.u.ipv6.dst = *daddr;
- info->key.tos = ipv6_get_dsfield(ip6h);
- info->key.ttl = ip6h->hop_limit;
- info->key.label = ip6_flowlabel(ip6h);
+ info->key.tos = tos;
+ info->key.ttl = ttl;
+ info->key.label = label;
return tun_dst;
}
+static inline struct metadata_dst *ipv6_tun_rx_dst(struct sk_buff *skb,
+ __be16 flags,
+ __be64 tunnel_id,
+ int md_size)
+{
+ const struct ipv6hdr *ip6h = ipv6_hdr(skb);
+
+ return __ipv6_tun_set_dst(&ip6h->saddr, &ip6h->daddr,
+ ipv6_get_dsfield(ip6h), ip6h->hop_limit,
+ ip6_flowlabel(ip6h), flags, tunnel_id,
+ md_size);
+}
#endif /* __NET_DST_METADATA_H */
diff --git a/include/net/flow.h b/include/net/flow.h
index d47ef4bb5423..035aa7716967 100644
--- a/include/net/flow.h
+++ b/include/net/flow.h
@@ -34,8 +34,7 @@ struct flowi_common {
__u8 flowic_flags;
#define FLOWI_FLAG_ANYSRC 0x01
#define FLOWI_FLAG_KNOWN_NH 0x02
-#define FLOWI_FLAG_L3MDEV_SRC 0x04
-#define FLOWI_FLAG_SKIP_NH_OIF 0x08
+#define FLOWI_FLAG_SKIP_NH_OIF 0x04
__u32 flowic_secid;
struct flowi_tunnel flowic_tun_key;
};
diff --git a/include/net/flow_dissector.h b/include/net/flow_dissector.h
index d3d60dccd19f..d9534927d93b 100644
--- a/include/net/flow_dissector.h
+++ b/include/net/flow_dissector.h
@@ -32,8 +32,13 @@ struct flow_dissector_key_basic {
};
struct flow_dissector_key_tags {
- u32 vlan_id:12,
- flow_label:20;
+ u32 flow_label;
+};
+
+struct flow_dissector_key_vlan {
+ u16 vlan_id:12,
+ vlan_priority:3;
+ u16 padding;
};
struct flow_dissector_key_keyid {
@@ -119,7 +124,7 @@ enum flow_dissector_key_id {
FLOW_DISSECTOR_KEY_PORTS, /* struct flow_dissector_key_ports */
FLOW_DISSECTOR_KEY_ETH_ADDRS, /* struct flow_dissector_key_eth_addrs */
FLOW_DISSECTOR_KEY_TIPC_ADDRS, /* struct flow_dissector_key_tipc_addrs */
- FLOW_DISSECTOR_KEY_VLANID, /* struct flow_dissector_key_flow_tags */
+ FLOW_DISSECTOR_KEY_VLAN, /* struct flow_dissector_key_flow_vlan */
FLOW_DISSECTOR_KEY_FLOW_LABEL, /* struct flow_dissector_key_flow_tags */
FLOW_DISSECTOR_KEY_GRE_KEYID, /* struct flow_dissector_key_keyid */
FLOW_DISSECTOR_KEY_MPLS_ENTROPY, /* struct flow_dissector_key_keyid */
@@ -148,6 +153,7 @@ struct flow_keys {
#define FLOW_KEYS_HASH_START_FIELD basic
struct flow_dissector_key_basic basic;
struct flow_dissector_key_tags tags;
+ struct flow_dissector_key_vlan vlan;
struct flow_dissector_key_keyid keyid;
struct flow_dissector_key_ports ports;
struct flow_dissector_key_addrs addrs;
@@ -177,7 +183,7 @@ struct flow_keys_digest {
void make_flow_keys_digest(struct flow_keys_digest *digest,
const struct flow_keys *flow);
-static inline bool flow_keys_have_l4(struct flow_keys *keys)
+static inline bool flow_keys_have_l4(const struct flow_keys *keys)
{
return (keys->ports.ports || keys->tags.flow_label);
}
diff --git a/include/net/fq.h b/include/net/fq.h
index 268b49049c37..6d8521a30c5c 100644
--- a/include/net/fq.h
+++ b/include/net/fq.h
@@ -72,9 +72,12 @@ struct fq {
u32 flows_cnt;
u32 perturbation;
u32 limit;
+ u32 memory_limit;
+ u32 memory_usage;
u32 quantum;
u32 backlog;
u32 overlimit;
+ u32 overmemory;
u32 collisions;
};
diff --git a/include/net/fq_impl.h b/include/net/fq_impl.h
index 163f3ed0f05a..4e6131cd3f43 100644
--- a/include/net/fq_impl.h
+++ b/include/net/fq_impl.h
@@ -29,6 +29,7 @@ static struct sk_buff *fq_flow_dequeue(struct fq *fq,
tin->backlog_packets--;
flow->backlog -= skb->len;
fq->backlog--;
+ fq->memory_usage -= skb->truesize;
if (flow->backlog == 0) {
list_del_init(&flow->backlogchain);
@@ -154,6 +155,7 @@ static void fq_tin_enqueue(struct fq *fq,
flow->backlog += skb->len;
tin->backlog_bytes += skb->len;
tin->backlog_packets++;
+ fq->memory_usage += skb->truesize;
fq->backlog++;
fq_recalc_backlog(fq, tin, flow);
@@ -166,7 +168,7 @@ static void fq_tin_enqueue(struct fq *fq,
__skb_queue_tail(&flow->queue, skb);
- if (fq->backlog > fq->limit) {
+ if (fq->backlog > fq->limit || fq->memory_usage > fq->memory_limit) {
flow = list_first_entry_or_null(&fq->backlogs,
struct fq_flow,
backlogchain);
@@ -181,6 +183,8 @@ static void fq_tin_enqueue(struct fq *fq,
flow->tin->overlimit++;
fq->overlimit++;
+ if (fq->memory_usage > fq->memory_limit)
+ fq->overmemory++;
}
}
@@ -251,6 +255,7 @@ static int fq_init(struct fq *fq, int flows_cnt)
fq->perturbation = prandom_u32();
fq->quantum = 300;
fq->limit = 8192;
+ fq->memory_limit = 16 << 20; /* 16 MBytes */
fq->flows = kcalloc(fq->flows_cnt, sizeof(fq->flows[0]), GFP_KERNEL);
if (!fq->flows)
diff --git a/include/net/gre.h b/include/net/gre.h
index 73ea256eb7d7..d25d836c129b 100644
--- a/include/net/gre.h
+++ b/include/net/gre.h
@@ -7,7 +7,15 @@
struct gre_base_hdr {
__be16 flags;
__be16 protocol;
-};
+} __packed;
+
+struct gre_full_hdr {
+ struct gre_base_hdr fixed_header;
+ __be16 csum;
+ __be16 reserved1;
+ __be32 key;
+ __be32 seq;
+} __packed;
#define GRE_HEADER_SECTION 4
#define GREPROTO_CISCO 0
diff --git a/include/net/ieee80211_radiotap.h b/include/net/ieee80211_radiotap.h
index b0fd9476c538..ba07b9d8ed63 100644
--- a/include/net/ieee80211_radiotap.h
+++ b/include/net/ieee80211_radiotap.h
@@ -190,6 +190,10 @@ struct ieee80211_radiotap_header {
* IEEE80211_RADIOTAP_VHT u16, u8, u8, u8[4], u8, u8, u16
*
* Contains VHT information about this frame.
+ *
+ * IEEE80211_RADIOTAP_TIMESTAMP u64, u16, u8, u8 variable
+ *
+ * Contains timestamp information for this frame.
*/
enum ieee80211_radiotap_type {
IEEE80211_RADIOTAP_TSFT = 0,
@@ -214,6 +218,7 @@ enum ieee80211_radiotap_type {
IEEE80211_RADIOTAP_MCS = 19,
IEEE80211_RADIOTAP_AMPDU_STATUS = 20,
IEEE80211_RADIOTAP_VHT = 21,
+ IEEE80211_RADIOTAP_TIMESTAMP = 22,
/* valid in every it_present bitmap, even vendor namespaces */
IEEE80211_RADIOTAP_RADIOTAP_NAMESPACE = 29,
@@ -321,6 +326,22 @@ enum ieee80211_radiotap_type {
#define IEEE80211_RADIOTAP_CODING_LDPC_USER2 0x04
#define IEEE80211_RADIOTAP_CODING_LDPC_USER3 0x08
+/* For IEEE80211_RADIOTAP_TIMESTAMP */
+#define IEEE80211_RADIOTAP_TIMESTAMP_UNIT_MASK 0x000F
+#define IEEE80211_RADIOTAP_TIMESTAMP_UNIT_MS 0x0000
+#define IEEE80211_RADIOTAP_TIMESTAMP_UNIT_US 0x0001
+#define IEEE80211_RADIOTAP_TIMESTAMP_UNIT_NS 0x0003
+#define IEEE80211_RADIOTAP_TIMESTAMP_SPOS_MASK 0x00F0
+#define IEEE80211_RADIOTAP_TIMESTAMP_SPOS_BEGIN_MDPU 0x0000
+#define IEEE80211_RADIOTAP_TIMESTAMP_SPOS_EO_MPDU 0x0010
+#define IEEE80211_RADIOTAP_TIMESTAMP_SPOS_EO_PPDU 0x0020
+#define IEEE80211_RADIOTAP_TIMESTAMP_SPOS_PLCP_SIG_ACQ 0x0030
+#define IEEE80211_RADIOTAP_TIMESTAMP_SPOS_UNKNOWN 0x00F0
+
+#define IEEE80211_RADIOTAP_TIMESTAMP_FLAG_64BIT 0x00
+#define IEEE80211_RADIOTAP_TIMESTAMP_FLAG_32BIT 0x01
+#define IEEE80211_RADIOTAP_TIMESTAMP_FLAG_ACCURACY 0x02
+
/* helpers */
static inline int ieee80211_get_radiotap_len(unsigned char *data)
{
diff --git a/include/net/if_inet6.h b/include/net/if_inet6.h
index 1c8b6820b694..515352c6280a 100644
--- a/include/net/if_inet6.h
+++ b/include/net/if_inet6.h
@@ -201,6 +201,7 @@ struct inet6_dev {
struct ipv6_devstat stats;
struct timer_list rs_timer;
+ __s32 rs_interval; /* in jiffies */
__u8 rs_probes;
__u8 addr_gen_mode;
diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h
index 49dcad4fe99e..197a30d221e9 100644
--- a/include/net/inet_connection_sock.h
+++ b/include/net/inet_connection_sock.h
@@ -134,8 +134,8 @@ struct inet_connection_sock {
} icsk_mtup;
u32 icsk_user_timeout;
- u64 icsk_ca_priv[64 / sizeof(u64)];
-#define ICSK_CA_PRIV_SIZE (8 * sizeof(u64))
+ u64 icsk_ca_priv[88 / sizeof(u64)];
+#define ICSK_CA_PRIV_SIZE (11 * sizeof(u64))
};
#define ICSK_TIME_RETRANS 1 /* Retransmit timer */
diff --git a/include/net/ip.h b/include/net/ip.h
index 9742b92dc933..bc43c0fcae12 100644
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@ -219,6 +219,29 @@ static inline u64 snmp_fold_field64(void __percpu *mib, int offt, size_t syncp_o
}
#endif
+#define snmp_get_cpu_field64_batch(buff64, stats_list, mib_statistic, offset) \
+{ \
+ int i, c; \
+ for_each_possible_cpu(c) { \
+ for (i = 0; stats_list[i].name; i++) \
+ buff64[i] += snmp_get_cpu_field64( \
+ mib_statistic, \
+ c, stats_list[i].entry, \
+ offset); \
+ } \
+}
+
+#define snmp_get_cpu_field_batch(buff, stats_list, mib_statistic) \
+{ \
+ int i, c; \
+ for_each_possible_cpu(c) { \
+ for (i = 0; stats_list[i].name; i++) \
+ buff[i] += snmp_get_cpu_field( \
+ mib_statistic, \
+ c, stats_list[i].entry); \
+ } \
+}
+
void inet_get_local_port_range(struct net *net, int *low, int *high);
#ifdef CONFIG_SYSCTL
diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h
index d97305d0e71f..e0cd318d5103 100644
--- a/include/net/ip6_route.h
+++ b/include/net/ip6_route.h
@@ -64,6 +64,9 @@ static inline bool rt6_need_strict(const struct in6_addr *daddr)
}
void ip6_route_input(struct sk_buff *skb);
+struct dst_entry *ip6_route_input_lookup(struct net *net,
+ struct net_device *dev,
+ struct flowi6 *fl6, int flags);
struct dst_entry *ip6_route_output_flags(struct net *net, const struct sock *sk,
struct flowi6 *fl6, int flags);
diff --git a/include/net/ip6_tunnel.h b/include/net/ip6_tunnel.h
index 43a5a0e4524c..20ed9699fcd4 100644
--- a/include/net/ip6_tunnel.h
+++ b/include/net/ip6_tunnel.h
@@ -23,6 +23,7 @@ struct __ip6_tnl_parm {
__u8 proto; /* tunnel protocol */
__u8 encap_limit; /* encapsulation limit for tunnel */
__u8 hop_limit; /* hop limit for tunnel */
+ bool collect_md;
__be32 flowinfo; /* traffic class and flowlabel for tunnel */
__u32 flags; /* tunnel flags */
struct in6_addr laddr; /* local tunnel end-point address */
diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
index 4079fc18ffe4..b9314b48e39f 100644
--- a/include/net/ip_fib.h
+++ b/include/net/ip_fib.h
@@ -22,6 +22,7 @@
#include <net/fib_rules.h>
#include <net/inetpeer.h>
#include <linux/percpu.h>
+#include <linux/notifier.h>
struct fib_config {
u8 fc_dst_len;
@@ -111,6 +112,7 @@ struct fib_info {
unsigned char fib_scope;
unsigned char fib_type;
__be32 fib_prefsrc;
+ u32 fib_tb_id;
u32 fib_priority;
u32 *fib_metrics;
#define fib_mtu fib_metrics[RTAX_MTU-1]
@@ -121,6 +123,7 @@ struct fib_info {
#ifdef CONFIG_IP_ROUTE_MULTIPATH
int fib_weight;
#endif
+ unsigned int fib_offload_cnt;
struct rcu_head rcu;
struct fib_nh fib_nh[0];
#define fib_dev fib_nh[0].nh_dev
@@ -172,6 +175,18 @@ struct fib_result_nl {
__be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh);
+static inline void fib_info_offload_inc(struct fib_info *fi)
+{
+ fi->fib_offload_cnt++;
+ fi->fib_flags |= RTNH_F_OFFLOAD;
+}
+
+static inline void fib_info_offload_dec(struct fib_info *fi)
+{
+ if (--fi->fib_offload_cnt == 0)
+ fi->fib_flags &= ~RTNH_F_OFFLOAD;
+}
+
#define FIB_RES_SADDR(net, res) \
((FIB_RES_NH(res).nh_saddr_genid == \
atomic_read(&(net)->ipv4.dev_addr_genid)) ? \
@@ -184,6 +199,33 @@ __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh);
#define FIB_RES_PREFSRC(net, res) ((res).fi->fib_prefsrc ? : \
FIB_RES_SADDR(net, res))
+struct fib_notifier_info {
+ struct net *net;
+};
+
+struct fib_entry_notifier_info {
+ struct fib_notifier_info info; /* must be first */
+ u32 dst;
+ int dst_len;
+ struct fib_info *fi;
+ u8 tos;
+ u8 type;
+ u32 tb_id;
+ u32 nlflags;
+};
+
+enum fib_event_type {
+ FIB_EVENT_ENTRY_ADD,
+ FIB_EVENT_ENTRY_DEL,
+ FIB_EVENT_RULE_ADD,
+ FIB_EVENT_RULE_DEL,
+};
+
+int register_fib_notifier(struct notifier_block *nb);
+int unregister_fib_notifier(struct notifier_block *nb);
+int call_fib_notifiers(struct net *net, enum fib_event_type event_type,
+ struct fib_notifier_info *info);
+
struct fib_table {
struct hlist_node tb_hlist;
u32 tb_id;
@@ -195,13 +237,12 @@ struct fib_table {
int fib_table_lookup(struct fib_table *tb, const struct flowi4 *flp,
struct fib_result *res, int fib_flags);
-int fib_table_insert(struct fib_table *, struct fib_config *);
-int fib_table_delete(struct fib_table *, struct fib_config *);
+int fib_table_insert(struct net *, struct fib_table *, struct fib_config *);
+int fib_table_delete(struct net *, struct fib_table *, struct fib_config *);
int fib_table_dump(struct fib_table *table, struct sk_buff *skb,
struct netlink_callback *cb);
-int fib_table_flush(struct fib_table *table);
+int fib_table_flush(struct net *net, struct fib_table *table);
struct fib_table *fib_trie_unmerge(struct fib_table *main_tb);
-void fib_table_flush_external(struct fib_table *table);
void fib_free_table(struct fib_table *tb);
#ifndef CONFIG_IP_MULTIPLE_TABLES
@@ -314,12 +355,11 @@ static inline int fib_num_tclassid_users(struct net *net)
}
#endif
int fib_unmerge(struct net *net);
-void fib_flush_external(struct net *net);
/* Exported by fib_semantics.c */
int ip_fib_check_default(__be32 gw, struct net_device *dev);
int fib_sync_down_dev(struct net_device *dev, unsigned long event, bool force);
-int fib_sync_down_addr(struct net *net, __be32 local);
+int fib_sync_down_addr(struct net_device *dev, __be32 local);
int fib_sync_up(struct net_device *dev, unsigned int nh_flags);
extern u32 fib_multipath_secret __read_mostly;
diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h
index a5e7035fb93f..59557c07904b 100644
--- a/include/net/ip_tunnels.h
+++ b/include/net/ip_tunnels.h
@@ -222,6 +222,25 @@ static inline unsigned short ip_tunnel_info_af(const struct ip_tunnel_info
return tun_info->mode & IP_TUNNEL_INFO_IPV6 ? AF_INET6 : AF_INET;
}
+static inline __be64 key32_to_tunnel_id(__be32 key)
+{
+#ifdef __BIG_ENDIAN
+ return (__force __be64)key;
+#else
+ return (__force __be64)((__force u64)key << 32);
+#endif
+}
+
+/* Returns the least-significant 32 bits of a __be64. */
+static inline __be32 tunnel_id_to_key32(__be64 tun_id)
+{
+#ifdef __BIG_ENDIAN
+ return (__force __be32)tun_id;
+#else
+ return (__force __be32)((__force u64)tun_id >> 32);
+#endif
+}
+
#ifdef CONFIG_INET
int ip_tunnel_init(struct net_device *dev);
@@ -236,6 +255,8 @@ void ip_tunnel_delete_net(struct ip_tunnel_net *itn, struct rtnl_link_ops *ops);
void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
const struct iphdr *tnl_params, const u8 protocol);
+void ip_md_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
+ const u8 proto);
int ip_tunnel_ioctl(struct net_device *dev, struct ip_tunnel_parm *p, int cmd);
int __ip_tunnel_change_mtu(struct net_device *dev, int new_mtu, bool strict);
int ip_tunnel_change_mtu(struct net_device *dev, int new_mtu);
diff --git a/include/net/kcm.h b/include/net/kcm.h
index 2840b5825dcc..2a8965819db0 100644
--- a/include/net/kcm.h
+++ b/include/net/kcm.h
@@ -13,6 +13,7 @@
#include <linux/skbuff.h>
#include <net/sock.h>
+#include <net/strparser.h>
#include <uapi/linux/kcm.h>
extern unsigned int kcm_net_id;
@@ -21,16 +22,8 @@ extern unsigned int kcm_net_id;
#define KCM_STATS_INCR(stat) ((stat)++)
struct kcm_psock_stats {
- unsigned long long rx_msgs;
- unsigned long long rx_bytes;
unsigned long long tx_msgs;
unsigned long long tx_bytes;
- unsigned int rx_aborts;
- unsigned int rx_mem_fail;
- unsigned int rx_need_more_hdr;
- unsigned int rx_msg_too_big;
- unsigned int rx_msg_timeouts;
- unsigned int rx_bad_hdr_len;
unsigned long long reserved;
unsigned long long unreserved;
unsigned int tx_aborts;
@@ -64,13 +57,6 @@ struct kcm_tx_msg {
struct sk_buff *last_skb;
};
-struct kcm_rx_msg {
- int full_len;
- int accum_len;
- int offset;
- int early_eaten;
-};
-
/* Socket structure for KCM client sockets */
struct kcm_sock {
struct sock sk;
@@ -87,6 +73,7 @@ struct kcm_sock {
struct work_struct tx_work;
struct list_head wait_psock_list;
struct sk_buff *seq_skb;
+ u32 tx_stopped : 1;
/* Don't use bit fields here, these are set under different locks */
bool tx_wait;
@@ -104,11 +91,11 @@ struct bpf_prog;
/* Structure for an attached lower socket */
struct kcm_psock {
struct sock *sk;
+ struct strparser strp;
struct kcm_mux *mux;
int index;
u32 tx_stopped : 1;
- u32 rx_stopped : 1;
u32 done : 1;
u32 unattaching : 1;
@@ -121,18 +108,12 @@ struct kcm_psock {
struct kcm_psock_stats stats;
/* Receive */
- struct sk_buff *rx_skb_head;
- struct sk_buff **rx_skb_nextp;
- struct sk_buff *ready_rx_msg;
struct list_head psock_ready_list;
- struct work_struct rx_work;
- struct delayed_work rx_delayed_work;
struct bpf_prog *bpf_prog;
struct kcm_sock *rx_kcm;
unsigned long long saved_rx_bytes;
unsigned long long saved_rx_msgs;
- struct timer_list rx_msg_timer;
- unsigned int rx_need_bytes;
+ struct sk_buff *ready_rx_msg;
/* Transmit */
struct kcm_sock *tx_kcm;
@@ -146,6 +127,7 @@ struct kcm_net {
struct mutex mutex;
struct kcm_psock_stats aggregate_psock_stats;
struct kcm_mux_stats aggregate_mux_stats;
+ struct strp_aggr_stats aggregate_strp_stats;
struct list_head mux_list;
int count;
};
@@ -163,6 +145,7 @@ struct kcm_mux {
struct kcm_mux_stats stats;
struct kcm_psock_stats aggregate_psock_stats;
+ struct strp_aggr_stats aggregate_strp_stats;
/* Receive */
spinlock_t rx_lock ____cacheline_aligned_in_smp;
@@ -190,14 +173,6 @@ static inline void aggregate_psock_stats(struct kcm_psock_stats *stats,
/* Save psock statistics in the mux when psock is being unattached. */
#define SAVE_PSOCK_STATS(_stat) (agg_stats->_stat += stats->_stat)
- SAVE_PSOCK_STATS(rx_msgs);
- SAVE_PSOCK_STATS(rx_bytes);
- SAVE_PSOCK_STATS(rx_aborts);
- SAVE_PSOCK_STATS(rx_mem_fail);
- SAVE_PSOCK_STATS(rx_need_more_hdr);
- SAVE_PSOCK_STATS(rx_msg_too_big);
- SAVE_PSOCK_STATS(rx_msg_timeouts);
- SAVE_PSOCK_STATS(rx_bad_hdr_len);
SAVE_PSOCK_STATS(tx_msgs);
SAVE_PSOCK_STATS(tx_bytes);
SAVE_PSOCK_STATS(reserved);
diff --git a/include/net/l3mdev.h b/include/net/l3mdev.h
index e90095091aa0..3832099289c5 100644
--- a/include/net/l3mdev.h
+++ b/include/net/l3mdev.h
@@ -11,6 +11,7 @@
#ifndef _NET_L3MDEV_H_
#define _NET_L3MDEV_H_
+#include <net/dst.h>
#include <net/fib_rules.h>
/**
@@ -18,30 +19,24 @@
*
* @l3mdev_fib_table: Get FIB table id to use for lookups
*
- * @l3mdev_get_rtable: Get cached IPv4 rtable (dst_entry) for device
+ * @l3mdev_l3_rcv: Hook in L3 receive path
*
- * @l3mdev_get_saddr: Get source address for a flow
+ * @l3mdev_l3_out: Hook in L3 output path
*
- * @l3mdev_get_rt6_dst: Get cached IPv6 rt6_info (dst_entry) for device
+ * @l3mdev_link_scope_lookup: IPv6 lookup for linklocal and mcast destinations
*/
struct l3mdev_ops {
u32 (*l3mdev_fib_table)(const struct net_device *dev);
struct sk_buff * (*l3mdev_l3_rcv)(struct net_device *dev,
struct sk_buff *skb, u16 proto);
-
- /* IPv4 ops */
- struct rtable * (*l3mdev_get_rtable)(const struct net_device *dev,
- const struct flowi4 *fl4);
- int (*l3mdev_get_saddr)(struct net_device *dev,
- struct flowi4 *fl4);
+ struct sk_buff * (*l3mdev_l3_out)(struct net_device *dev,
+ struct sock *sk, struct sk_buff *skb,
+ u16 proto);
/* IPv6 ops */
- struct dst_entry * (*l3mdev_get_rt6_dst)(const struct net_device *dev,
+ struct dst_entry * (*l3mdev_link_scope_lookup)(const struct net_device *dev,
struct flowi6 *fl6);
- int (*l3mdev_get_saddr6)(struct net_device *dev,
- const struct sock *sk,
- struct flowi6 *fl6);
};
#ifdef CONFIG_NET_L3_MASTER_DEV
@@ -49,6 +44,8 @@ struct l3mdev_ops {
int l3mdev_fib_rule_match(struct net *net, struct flowi *fl,
struct fib_lookup_arg *arg);
+void l3mdev_update_flow(struct net *net, struct flowi *fl);
+
int l3mdev_master_ifindex_rcu(const struct net_device *dev);
static inline int l3mdev_master_ifindex(struct net_device *dev)
{
@@ -80,7 +77,7 @@ static inline int l3mdev_master_ifindex_by_index(struct net *net, int ifindex)
}
static inline
-const struct net_device *l3mdev_master_dev_rcu(const struct net_device *_dev)
+struct net_device *l3mdev_master_dev_rcu(const struct net_device *_dev)
{
/* netdev_master_upper_dev_get_rcu calls
* list_first_or_null_rcu to walk the upper dev list.
@@ -89,7 +86,7 @@ const struct net_device *l3mdev_master_dev_rcu(const struct net_device *_dev)
* typecast to remove the const
*/
struct net_device *dev = (struct net_device *)_dev;
- const struct net_device *master;
+ struct net_device *master;
if (!dev)
return NULL;
@@ -104,26 +101,6 @@ const struct net_device *l3mdev_master_dev_rcu(const struct net_device *_dev)
return master;
}
-/* get index of an interface to use for FIB lookups. For devices
- * enslaved to an L3 master device FIB lookups are based on the
- * master index
- */
-static inline int l3mdev_fib_oif_rcu(struct net_device *dev)
-{
- return l3mdev_master_ifindex_rcu(dev) ? : dev->ifindex;
-}
-
-static inline int l3mdev_fib_oif(struct net_device *dev)
-{
- int oif;
-
- rcu_read_lock();
- oif = l3mdev_fib_oif_rcu(dev);
- rcu_read_unlock();
-
- return oif;
-}
-
u32 l3mdev_fib_table_rcu(const struct net_device *dev);
u32 l3mdev_fib_table_by_index(struct net *net, int ifindex);
static inline u32 l3mdev_fib_table(const struct net_device *dev)
@@ -137,15 +114,6 @@ static inline u32 l3mdev_fib_table(const struct net_device *dev)
return tb_id;
}
-static inline struct rtable *l3mdev_get_rtable(const struct net_device *dev,
- const struct flowi4 *fl4)
-{
- if (netif_is_l3_master(dev) && dev->l3mdev_ops->l3mdev_get_rtable)
- return dev->l3mdev_ops->l3mdev_get_rtable(dev, fl4);
-
- return NULL;
-}
-
static inline bool netif_index_is_l3_master(struct net *net, int ifindex)
{
struct net_device *dev;
@@ -165,11 +133,7 @@ static inline bool netif_index_is_l3_master(struct net *net, int ifindex)
return rc;
}
-int l3mdev_get_saddr(struct net *net, int ifindex, struct flowi4 *fl4);
-
-struct dst_entry *l3mdev_get_rt6_dst(struct net *net, struct flowi6 *fl6);
-int l3mdev_get_saddr6(struct net *net, const struct sock *sk,
- struct flowi6 *fl6);
+struct dst_entry *l3mdev_link_scope_lookup(struct net *net, struct flowi6 *fl6);
static inline
struct sk_buff *l3mdev_l3_rcv(struct sk_buff *skb, u16 proto)
@@ -199,6 +163,34 @@ struct sk_buff *l3mdev_ip6_rcv(struct sk_buff *skb)
return l3mdev_l3_rcv(skb, AF_INET6);
}
+static inline
+struct sk_buff *l3mdev_l3_out(struct sock *sk, struct sk_buff *skb, u16 proto)
+{
+ struct net_device *dev = skb_dst(skb)->dev;
+
+ if (netif_is_l3_slave(dev)) {
+ struct net_device *master;
+
+ master = netdev_master_upper_dev_get_rcu(dev);
+ if (master && master->l3mdev_ops->l3mdev_l3_out)
+ skb = master->l3mdev_ops->l3mdev_l3_out(master, sk,
+ skb, proto);
+ }
+
+ return skb;
+}
+
+static inline
+struct sk_buff *l3mdev_ip_out(struct sock *sk, struct sk_buff *skb)
+{
+ return l3mdev_l3_out(sk, skb, AF_INET);
+}
+
+static inline
+struct sk_buff *l3mdev_ip6_out(struct sock *sk, struct sk_buff *skb)
+{
+ return l3mdev_l3_out(sk, skb, AF_INET6);
+}
#else
static inline int l3mdev_master_ifindex_rcu(const struct net_device *dev)
@@ -216,20 +208,11 @@ static inline int l3mdev_master_ifindex_by_index(struct net *net, int ifindex)
}
static inline
-const struct net_device *l3mdev_master_dev_rcu(const struct net_device *dev)
+struct net_device *l3mdev_master_dev_rcu(const struct net_device *dev)
{
return NULL;
}
-static inline int l3mdev_fib_oif_rcu(struct net_device *dev)
-{
- return dev ? dev->ifindex : 0;
-}
-static inline int l3mdev_fib_oif(struct net_device *dev)
-{
- return dev ? dev->ifindex : 0;
-}
-
static inline u32 l3mdev_fib_table_rcu(const struct net_device *dev)
{
return 0;
@@ -243,43 +226,37 @@ static inline u32 l3mdev_fib_table_by_index(struct net *net, int ifindex)
return 0;
}
-static inline struct rtable *l3mdev_get_rtable(const struct net_device *dev,
- const struct flowi4 *fl4)
-{
- return NULL;
-}
-
static inline bool netif_index_is_l3_master(struct net *net, int ifindex)
{
return false;
}
-static inline int l3mdev_get_saddr(struct net *net, int ifindex,
- struct flowi4 *fl4)
+static inline
+struct dst_entry *l3mdev_link_scope_lookup(struct net *net, struct flowi6 *fl6)
{
- return 0;
+ return NULL;
}
static inline
-struct dst_entry *l3mdev_get_rt6_dst(struct net *net, struct flowi6 *fl6)
+struct sk_buff *l3mdev_ip_rcv(struct sk_buff *skb)
{
- return NULL;
+ return skb;
}
-static inline int l3mdev_get_saddr6(struct net *net, const struct sock *sk,
- struct flowi6 *fl6)
+static inline
+struct sk_buff *l3mdev_ip6_rcv(struct sk_buff *skb)
{
- return 0;
+ return skb;
}
static inline
-struct sk_buff *l3mdev_ip_rcv(struct sk_buff *skb)
+struct sk_buff *l3mdev_ip_out(struct sock *sk, struct sk_buff *skb)
{
return skb;
}
static inline
-struct sk_buff *l3mdev_ip6_rcv(struct sk_buff *skb)
+struct sk_buff *l3mdev_ip6_out(struct sock *sk, struct sk_buff *skb)
{
return skb;
}
@@ -290,6 +267,10 @@ int l3mdev_fib_rule_match(struct net *net, struct flowi *fl,
{
return 1;
}
+static inline
+void l3mdev_update_flow(struct net *net, struct flowi *fl)
+{
+}
#endif
#endif /* _NET_L3MDEV_H_ */
diff --git a/include/net/lwtunnel.h b/include/net/lwtunnel.h
index e9f116e29c22..ea3f80f58fd6 100644
--- a/include/net/lwtunnel.h
+++ b/include/net/lwtunnel.h
@@ -13,6 +13,13 @@
/* lw tunnel state flags */
#define LWTUNNEL_STATE_OUTPUT_REDIRECT BIT(0)
#define LWTUNNEL_STATE_INPUT_REDIRECT BIT(1)
+#define LWTUNNEL_STATE_XMIT_REDIRECT BIT(2)
+
+enum {
+ LWTUNNEL_XMIT_DONE,
+ LWTUNNEL_XMIT_CONTINUE,
+};
+
struct lwtunnel_state {
__u16 type;
@@ -21,6 +28,7 @@ struct lwtunnel_state {
int (*orig_output)(struct net *net, struct sock *sk, struct sk_buff *skb);
int (*orig_input)(struct sk_buff *);
int len;
+ __u16 headroom;
__u8 data[0];
};
@@ -34,6 +42,7 @@ struct lwtunnel_encap_ops {
struct lwtunnel_state *lwtstate);
int (*get_encap_size)(struct lwtunnel_state *lwtstate);
int (*cmp_encap)(struct lwtunnel_state *a, struct lwtunnel_state *b);
+ int (*xmit)(struct sk_buff *skb);
};
#ifdef CONFIG_LWTUNNEL
@@ -75,6 +84,24 @@ static inline bool lwtunnel_input_redirect(struct lwtunnel_state *lwtstate)
return false;
}
+
+static inline bool lwtunnel_xmit_redirect(struct lwtunnel_state *lwtstate)
+{
+ if (lwtstate && (lwtstate->flags & LWTUNNEL_STATE_XMIT_REDIRECT))
+ return true;
+
+ return false;
+}
+
+static inline unsigned int lwtunnel_headroom(struct lwtunnel_state *lwtstate,
+ unsigned int mtu)
+{
+ if (lwtunnel_xmit_redirect(lwtstate) && lwtstate->headroom < mtu)
+ return lwtstate->headroom;
+
+ return 0;
+}
+
int lwtunnel_encap_add_ops(const struct lwtunnel_encap_ops *op,
unsigned int num);
int lwtunnel_encap_del_ops(const struct lwtunnel_encap_ops *op,
@@ -90,6 +117,7 @@ struct lwtunnel_state *lwtunnel_state_alloc(int hdr_len);
int lwtunnel_cmp_encap(struct lwtunnel_state *a, struct lwtunnel_state *b);
int lwtunnel_output(struct net *net, struct sock *sk, struct sk_buff *skb);
int lwtunnel_input(struct sk_buff *skb);
+int lwtunnel_xmit(struct sk_buff *skb);
#else
@@ -117,6 +145,17 @@ static inline bool lwtunnel_input_redirect(struct lwtunnel_state *lwtstate)
return false;
}
+static inline bool lwtunnel_xmit_redirect(struct lwtunnel_state *lwtstate)
+{
+ return false;
+}
+
+static inline unsigned int lwtunnel_headroom(struct lwtunnel_state *lwtstate,
+ unsigned int mtu)
+{
+ return 0;
+}
+
static inline int lwtunnel_encap_add_ops(const struct lwtunnel_encap_ops *op,
unsigned int num)
{
@@ -170,6 +209,11 @@ static inline int lwtunnel_input(struct sk_buff *skb)
return -EOPNOTSUPP;
}
+static inline int lwtunnel_xmit(struct sk_buff *skb)
+{
+ return -EOPNOTSUPP;
+}
+
#endif /* CONFIG_LWTUNNEL */
#define MODULE_ALIAS_RTNL_LWT(encap_type) MODULE_ALIAS("rtnl-lwt-" __stringify(encap_type))
diff --git a/include/net/mac80211.h b/include/net/mac80211.h
index cca510a585c3..a810dfcb83c2 100644
--- a/include/net/mac80211.h
+++ b/include/net/mac80211.h
@@ -715,6 +715,7 @@ enum mac80211_tx_info_flags {
* frame (PS-Poll or uAPSD).
* @IEEE80211_TX_CTRL_RATE_INJECT: This frame is injected with rate information
* @IEEE80211_TX_CTRL_AMSDU: This frame is an A-MSDU frame
+ * @IEEE80211_TX_CTRL_FAST_XMIT: This frame is going through the fast_xmit path
*
* These flags are used in tx_info->control.flags.
*/
@@ -723,6 +724,7 @@ enum mac80211_tx_control_flags {
IEEE80211_TX_CTRL_PS_RESPONSE = BIT(1),
IEEE80211_TX_CTRL_RATE_INJECT = BIT(2),
IEEE80211_TX_CTRL_AMSDU = BIT(3),
+ IEEE80211_TX_CTRL_FAST_XMIT = BIT(4),
};
/*
@@ -1735,6 +1737,9 @@ struct ieee80211_sta_rates {
* @supp_rates: Bitmap of supported rates (per band)
* @ht_cap: HT capabilities of this STA; restricted to our own capabilities
* @vht_cap: VHT capabilities of this STA; restricted to our own capabilities
+ * @max_rx_aggregation_subframes: maximal amount of frames in a single AMPDU
+ * that this station is allowed to transmit to us.
+ * Can be modified by driver.
* @wme: indicates whether the STA supports QoS/WME (if local devices does,
* otherwise always false)
* @drv_priv: data area for driver use, will always be aligned to
@@ -1775,6 +1780,7 @@ struct ieee80211_sta {
u16 aid;
struct ieee80211_sta_ht_cap ht_cap;
struct ieee80211_sta_vht_cap vht_cap;
+ u8 max_rx_aggregation_subframes;
bool wme;
u8 uapsd_queues;
u8 max_sp;
@@ -2014,6 +2020,11 @@ struct ieee80211_txq {
* @IEEE80211_HW_TX_FRAG_LIST: Hardware (or driver) supports sending frag_list
* skbs, needed for zero-copy software A-MSDU.
*
+ * @IEEE80211_HW_REPORTS_LOW_ACK: The driver (or firmware) reports low ack event
+ * by ieee80211_report_low_ack() based on its own algorithm. For such
+ * drivers, mac80211 packet loss mechanism will not be triggered and driver
+ * is completely depending on firmware event for station kickout.
+ *
* @NUM_IEEE80211_HW_FLAGS: number of hardware flags, used for sizing arrays
*/
enum ieee80211_hw_flags {
@@ -2054,6 +2065,7 @@ enum ieee80211_hw_flags {
IEEE80211_HW_USES_RSS,
IEEE80211_HW_TX_AMSDU,
IEEE80211_HW_TX_FRAG_LIST,
+ IEEE80211_HW_REPORTS_LOW_ACK,
/* keep last, obviously */
NUM_IEEE80211_HW_FLAGS
@@ -2141,6 +2153,14 @@ enum ieee80211_hw_flags {
* the default is _GI | _BANDWIDTH.
* Use the %IEEE80211_RADIOTAP_VHT_KNOWN_* values.
*
+ * @radiotap_timestamp: Information for the radiotap timestamp field; if the
+ * 'units_pos' member is set to a non-negative value it must be set to
+ * a combination of a IEEE80211_RADIOTAP_TIMESTAMP_UNIT_* and a
+ * IEEE80211_RADIOTAP_TIMESTAMP_SPOS_* value, and then the timestamp
+ * field will be added and populated from the &struct ieee80211_rx_status
+ * device_timestamp. If the 'accuracy' member is non-negative, it's put
+ * into the accuracy radiotap field and the accuracy known flag is set.
+ *
* @netdev_features: netdev features to be set in each netdev created
* from this HW. Note that not all features are usable with mac80211,
* other features will be rejected during HW registration.
@@ -2159,6 +2179,8 @@ enum ieee80211_hw_flags {
* @n_cipher_schemes: a size of an array of cipher schemes definitions.
* @cipher_schemes: a pointer to an array of cipher scheme definitions
* supported by HW.
+ * @max_nan_de_entries: maximum number of NAN DE functions supported by the
+ * device.
*/
struct ieee80211_hw {
struct ieee80211_conf conf;
@@ -2184,11 +2206,16 @@ struct ieee80211_hw {
u8 offchannel_tx_hw_queue;
u8 radiotap_mcs_details;
u16 radiotap_vht_details;
+ struct {
+ int units_pos;
+ s16 accuracy;
+ } radiotap_timestamp;
netdev_features_t netdev_features;
u8 uapsd_queues;
u8 uapsd_max_sp_len;
u8 n_cipher_schemes;
const struct ieee80211_cipher_scheme *cipher_schemes;
+ u8 max_nan_de_entries;
};
static inline bool _ieee80211_hw_check(struct ieee80211_hw *hw,
@@ -3085,11 +3112,8 @@ enum ieee80211_reconfig_type {
*
* @sta_add_debugfs: Drivers can use this callback to add debugfs files
* when a station is added to mac80211's station list. This callback
- * and @sta_remove_debugfs should be within a CONFIG_MAC80211_DEBUGFS
- * conditional. This callback can sleep.
- *
- * @sta_remove_debugfs: Remove the debugfs files which were added using
- * @sta_add_debugfs. This callback can sleep.
+ * should be within a CONFIG_MAC80211_DEBUGFS conditional. This
+ * callback can sleep.
*
* @sta_notify: Notifies low level driver about power state transition of an
* associated station, AP, IBSS/WDS/mesh peer etc. For a VIF operating
@@ -3147,6 +3171,12 @@ enum ieee80211_reconfig_type {
* required function.
* The callback can sleep.
*
+ * @offset_tsf: Offset the TSF timer by the specified value in the
+ * firmware/hardware. Preferred to set_tsf as it avoids delay between
+ * calling set_tsf() and hardware getting programmed, which will show up
+ * as TSF delay. Is not a required function.
+ * The callback can sleep.
+ *
* @reset_tsf: Reset the TSF timer and allow firmware/hardware to synchronize
* with other STAs in the IBSS. This is only used in IBSS mode. This
* function is optional if the firmware/hardware takes full care of
@@ -3401,6 +3431,21 @@ enum ieee80211_reconfig_type {
* synchronization which is needed in case driver has in its RSS queues
* pending frames that were received prior to the control path action
* currently taken (e.g. disassociation) but are not processed yet.
+ *
+ * @start_nan: join an existing NAN cluster, or create a new one.
+ * @stop_nan: leave the NAN cluster.
+ * @nan_change_conf: change NAN configuration. The data in cfg80211_nan_conf
+ * contains full new configuration and changes specify which parameters
+ * are changed with respect to the last NAN config.
+ * The driver gets both full configuration and the changed parameters since
+ * some devices may need the full configuration while others need only the
+ * changed parameters.
+ * @add_nan_func: Add a NAN function. Returns 0 on success. The data in
+ * cfg80211_nan_func must not be referenced outside the scope of
+ * this call.
+ * @del_nan_func: Remove a NAN function. The driver must call
+ * ieee80211_nan_func_terminated() with
+ * NL80211_NAN_FUNC_TERM_REASON_USER_REQUEST reason code upon removal.
*/
struct ieee80211_ops {
void (*tx)(struct ieee80211_hw *hw,
@@ -3485,10 +3530,6 @@ struct ieee80211_ops {
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
struct dentry *dir);
- void (*sta_remove_debugfs)(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct ieee80211_sta *sta,
- struct dentry *dir);
#endif
void (*sta_notify)(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
enum sta_notify_cmd, struct ieee80211_sta *sta);
@@ -3516,6 +3557,8 @@ struct ieee80211_ops {
u64 (*get_tsf)(struct ieee80211_hw *hw, struct ieee80211_vif *vif);
void (*set_tsf)(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
u64 tsf);
+ void (*offset_tsf)(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ s64 offset);
void (*reset_tsf)(struct ieee80211_hw *hw, struct ieee80211_vif *vif);
int (*tx_last_beacon)(struct ieee80211_hw *hw);
int (*ampdu_action)(struct ieee80211_hw *hw,
@@ -3640,6 +3683,21 @@ struct ieee80211_ops {
void (*wake_tx_queue)(struct ieee80211_hw *hw,
struct ieee80211_txq *txq);
void (*sync_rx_queues)(struct ieee80211_hw *hw);
+
+ int (*start_nan)(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct cfg80211_nan_conf *conf);
+ int (*stop_nan)(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif);
+ int (*nan_change_conf)(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct cfg80211_nan_conf *conf, u32 changes);
+ int (*add_nan_func)(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ const struct cfg80211_nan_func *nan_func);
+ void (*del_nan_func)(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ u8 instance_id);
};
/**
@@ -5713,4 +5771,36 @@ struct sk_buff *ieee80211_tx_dequeue(struct ieee80211_hw *hw,
void ieee80211_txq_get_depth(struct ieee80211_txq *txq,
unsigned long *frame_cnt,
unsigned long *byte_cnt);
+
+/**
+ * ieee80211_nan_func_terminated - notify about NAN function termination.
+ *
+ * This function is used to notify mac80211 about NAN function termination.
+ * Note that this function can't be called from hard irq.
+ *
+ * @vif: &struct ieee80211_vif pointer from the add_interface callback.
+ * @inst_id: the local instance id
+ * @reason: termination reason (one of the NL80211_NAN_FUNC_TERM_REASON_*)
+ * @gfp: allocation flags
+ */
+void ieee80211_nan_func_terminated(struct ieee80211_vif *vif,
+ u8 inst_id,
+ enum nl80211_nan_func_term_reason reason,
+ gfp_t gfp);
+
+/**
+ * ieee80211_nan_func_match - notify about NAN function match event.
+ *
+ * This function is used to notify mac80211 about NAN function match. The
+ * cookie inside the match struct will be assigned by mac80211.
+ * Note that this function can't be called from hard irq.
+ *
+ * @vif: &struct ieee80211_vif pointer from the add_interface callback.
+ * @match: match event information
+ * @gfp: allocation flags
+ */
+void ieee80211_nan_func_match(struct ieee80211_vif *vif,
+ struct cfg80211_nan_match_params *match,
+ gfp_t gfp);
+
#endif /* MAC80211_H */
diff --git a/include/net/mpls.h b/include/net/mpls.h
index 5b3b5addfb08..1dbc669b770e 100644
--- a/include/net/mpls.h
+++ b/include/net/mpls.h
@@ -19,21 +19,18 @@
#define MPLS_HLEN 4
+struct mpls_shim_hdr {
+ __be32 label_stack_entry;
+};
+
static inline bool eth_p_mpls(__be16 eth_type)
{
return eth_type == htons(ETH_P_MPLS_UC) ||
eth_type == htons(ETH_P_MPLS_MC);
}
-/*
- * For non-MPLS skbs this will correspond to the network header.
- * For MPLS skbs it will be before the network_header as the MPLS
- * label stack lies between the end of the mac header and the network
- * header. That is, for MPLS skbs the end of the mac header
- * is the top of the MPLS label stack.
- */
-static inline unsigned char *skb_mpls_header(struct sk_buff *skb)
+static inline struct mpls_shim_hdr *mpls_hdr(const struct sk_buff *skb)
{
- return skb_mac_header(skb) + skb->mac_len;
+ return (struct mpls_shim_hdr *)skb_network_header(skb);
}
#endif
diff --git a/include/net/ncsi.h b/include/net/ncsi.h
index 1dbf42f79750..68680baac0fd 100644
--- a/include/net/ncsi.h
+++ b/include/net/ncsi.h
@@ -31,6 +31,7 @@ struct ncsi_dev {
struct ncsi_dev *ncsi_register_dev(struct net_device *dev,
void (*notifier)(struct ncsi_dev *nd));
int ncsi_start_dev(struct ncsi_dev *nd);
+void ncsi_stop_dev(struct ncsi_dev *nd);
void ncsi_unregister_dev(struct ncsi_dev *nd);
#else /* !CONFIG_NET_NCSI */
static inline struct ncsi_dev *ncsi_register_dev(struct net_device *dev,
@@ -44,6 +45,10 @@ static inline int ncsi_start_dev(struct ncsi_dev *nd)
return -ENOTTY;
}
+static void ncsi_stop_dev(struct ncsi_dev *nd)
+{
+}
+
static inline void ncsi_unregister_dev(struct ncsi_dev *nd)
{
}
diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h
index 0933c7455a30..fc4f757107df 100644
--- a/include/net/net_namespace.h
+++ b/include/net/net_namespace.h
@@ -60,6 +60,7 @@ struct net {
struct list_head exit_list; /* Use only net_mutex */
struct user_namespace *user_ns; /* Owning user namespace */
+ struct ucounts *ucounts;
spinlock_t nsid_lock;
struct idr netns_ids;
diff --git a/include/net/netfilter/br_netfilter.h b/include/net/netfilter/br_netfilter.h
index e8d1448425a7..0b0c35c37125 100644
--- a/include/net/netfilter/br_netfilter.h
+++ b/include/net/netfilter/br_netfilter.h
@@ -15,6 +15,12 @@ static inline struct nf_bridge_info *nf_bridge_alloc(struct sk_buff *skb)
void nf_bridge_update_protocol(struct sk_buff *skb);
+int br_nf_hook_thresh(unsigned int hook, struct net *net, struct sock *sk,
+ struct sk_buff *skb, struct net_device *indev,
+ struct net_device *outdev,
+ int (*okfn)(struct net *, struct sock *,
+ struct sk_buff *));
+
static inline struct nf_bridge_info *
nf_bridge_info_get(const struct sk_buff *skb)
{
diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h
index 445b019c2078..50418052a520 100644
--- a/include/net/netfilter/nf_conntrack.h
+++ b/include/net/netfilter/nf_conntrack.h
@@ -42,7 +42,6 @@ union nf_conntrack_expect_proto {
#include <linux/types.h>
#include <linux/skbuff.h>
-#include <linux/timer.h>
#ifdef CONFIG_NETFILTER_DEBUG
#define NF_CT_ASSERT(x) WARN_ON(!(x))
@@ -73,7 +72,7 @@ struct nf_conn_help {
#include <net/netfilter/ipv6/nf_conntrack_ipv6.h>
struct nf_conn {
- /* Usage count in here is 1 for hash table/destruct timer, 1 per skb,
+ /* Usage count in here is 1 for hash table, 1 per skb,
* plus 1 for any connection(s) we are `master' for
*
* Hint, SKB address this struct and refcnt via skb->nfct and
@@ -96,8 +95,8 @@ struct nf_conn {
/* Have we seen traffic both ways yet? (bitset) */
unsigned long status;
- /* Timer function; drops refcnt when it goes off. */
- struct timer_list timeout;
+ /* jiffies32 when this ct is considered dead */
+ u32 timeout;
possible_net_t ct_net;
@@ -220,21 +219,14 @@ static inline void nf_ct_refresh(struct nf_conn *ct,
__nf_ct_refresh_acct(ct, 0, skb, extra_jiffies, 0);
}
-bool __nf_ct_kill_acct(struct nf_conn *ct, enum ip_conntrack_info ctinfo,
- const struct sk_buff *skb, int do_acct);
-
/* kill conntrack and do accounting */
-static inline bool nf_ct_kill_acct(struct nf_conn *ct,
- enum ip_conntrack_info ctinfo,
- const struct sk_buff *skb)
-{
- return __nf_ct_kill_acct(ct, ctinfo, skb, 1);
-}
+bool nf_ct_kill_acct(struct nf_conn *ct, enum ip_conntrack_info ctinfo,
+ const struct sk_buff *skb);
/* kill conntrack without accounting */
static inline bool nf_ct_kill(struct nf_conn *ct)
{
- return __nf_ct_kill_acct(ct, 0, NULL, 0);
+ return nf_ct_delete(ct, 0, 0);
}
/* These are for NAT. Icky. */
@@ -291,21 +283,55 @@ static inline bool nf_is_loopback_packet(const struct sk_buff *skb)
return skb->dev && skb->skb_iif && skb->dev->flags & IFF_LOOPBACK;
}
+#define nfct_time_stamp ((u32)(jiffies))
+
/* jiffies until ct expires, 0 if already expired */
static inline unsigned long nf_ct_expires(const struct nf_conn *ct)
{
- long timeout = (long)ct->timeout.expires - (long)jiffies;
+ s32 timeout = ct->timeout - nfct_time_stamp;
return timeout > 0 ? timeout : 0;
}
+static inline bool nf_ct_is_expired(const struct nf_conn *ct)
+{
+ return (__s32)(ct->timeout - nfct_time_stamp) <= 0;
+}
+
+/* use after obtaining a reference count */
+static inline bool nf_ct_should_gc(const struct nf_conn *ct)
+{
+ return nf_ct_is_expired(ct) && nf_ct_is_confirmed(ct) &&
+ !nf_ct_is_dying(ct);
+}
+
struct kernel_param;
int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp);
int nf_conntrack_hash_resize(unsigned int hashsize);
+
+extern struct hlist_nulls_head *nf_conntrack_hash;
extern unsigned int nf_conntrack_htable_size;
+extern seqcount_t nf_conntrack_generation;
extern unsigned int nf_conntrack_max;
+/* must be called with rcu read lock held */
+static inline void
+nf_conntrack_get_ht(struct hlist_nulls_head **hash, unsigned int *hsize)
+{
+ struct hlist_nulls_head *hptr;
+ unsigned int sequence, hsz;
+
+ do {
+ sequence = read_seqcount_begin(&nf_conntrack_generation);
+ hsz = nf_conntrack_htable_size;
+ hptr = nf_conntrack_hash;
+ } while (read_seqcount_retry(&nf_conntrack_generation, sequence));
+
+ *hash = hptr;
+ *hsize = hsz;
+}
+
struct nf_conn *nf_ct_tmpl_alloc(struct net *net,
const struct nf_conntrack_zone *zone,
gfp_t flags);
diff --git a/include/net/netfilter/nf_conntrack_core.h b/include/net/netfilter/nf_conntrack_core.h
index 79d7ac5c9740..62e17d1319ff 100644
--- a/include/net/netfilter/nf_conntrack_core.h
+++ b/include/net/netfilter/nf_conntrack_core.h
@@ -51,8 +51,6 @@ bool nf_ct_invert_tuple(struct nf_conntrack_tuple *inverse,
const struct nf_conntrack_l3proto *l3proto,
const struct nf_conntrack_l4proto *l4proto);
-void nf_conntrack_get_ht(struct hlist_nulls_head **hash, unsigned int *hsize);
-
/* Find a connection corresponding to a tuple. */
struct nf_conntrack_tuple_hash *
nf_conntrack_find_get(struct net *net,
@@ -83,7 +81,6 @@ print_tuple(struct seq_file *s, const struct nf_conntrack_tuple *tuple,
#define CONNTRACK_LOCKS 1024
-extern struct hlist_nulls_head *nf_conntrack_hash;
extern spinlock_t nf_conntrack_locks[CONNTRACK_LOCKS];
void nf_conntrack_lock(spinlock_t *lock);
diff --git a/include/net/netfilter/nf_conntrack_ecache.h b/include/net/netfilter/nf_conntrack_ecache.h
index fa36447371c6..12d967b58726 100644
--- a/include/net/netfilter/nf_conntrack_ecache.h
+++ b/include/net/netfilter/nf_conntrack_ecache.h
@@ -12,12 +12,19 @@
#include <linux/netfilter/nf_conntrack_tuple_common.h>
#include <net/netfilter/nf_conntrack_extend.h>
+enum nf_ct_ecache_state {
+ NFCT_ECACHE_UNKNOWN, /* destroy event not sent */
+ NFCT_ECACHE_DESTROY_FAIL, /* tried but failed to send destroy event */
+ NFCT_ECACHE_DESTROY_SENT, /* sent destroy event after failure */
+};
+
struct nf_conntrack_ecache {
- unsigned long cache; /* bitops want long */
- unsigned long missed; /* missed events */
- u16 ctmask; /* bitmask of ct events to be delivered */
- u16 expmask; /* bitmask of expect events to be delivered */
- u32 portid; /* netlink portid of destroyer */
+ unsigned long cache; /* bitops want long */
+ unsigned long missed; /* missed events */
+ u16 ctmask; /* bitmask of ct events to be delivered */
+ u16 expmask; /* bitmask of expect events to be delivered */
+ u32 portid; /* netlink portid of destroyer */
+ enum nf_ct_ecache_state state; /* ecache state */
};
static inline struct nf_conntrack_ecache *
diff --git a/include/net/netfilter/nf_conntrack_l3proto.h b/include/net/netfilter/nf_conntrack_l3proto.h
index cdc920b4c4c2..8992e4229da9 100644
--- a/include/net/netfilter/nf_conntrack_l3proto.h
+++ b/include/net/netfilter/nf_conntrack_l3proto.h
@@ -63,10 +63,6 @@ struct nf_conntrack_l3proto {
size_t nla_size;
-#ifdef CONFIG_SYSCTL
- const char *ctl_table_path;
-#endif /* CONFIG_SYSCTL */
-
/* Init l3proto pernet data */
int (*init_net)(struct net *net);
diff --git a/include/net/netfilter/nf_conntrack_l4proto.h b/include/net/netfilter/nf_conntrack_l4proto.h
index 1a5fb36f165f..de629f1520df 100644
--- a/include/net/netfilter/nf_conntrack_l4proto.h
+++ b/include/net/netfilter/nf_conntrack_l4proto.h
@@ -134,14 +134,6 @@ void nf_ct_l4proto_pernet_unregister(struct net *net,
int nf_ct_l4proto_register(struct nf_conntrack_l4proto *proto);
void nf_ct_l4proto_unregister(struct nf_conntrack_l4proto *proto);
-static inline void nf_ct_kfree_compat_sysctl_table(struct nf_proto_net *pn)
-{
-#if defined(CONFIG_SYSCTL) && defined(CONFIG_NF_CONNTRACK_PROC_COMPAT)
- kfree(pn->ctl_compat_table);
- pn->ctl_compat_table = NULL;
-#endif
-}
-
/* Generic netlink helpers */
int nf_ct_port_tuple_to_nlattr(struct sk_buff *skb,
const struct nf_conntrack_tuple *tuple);
diff --git a/include/net/netfilter/nf_conntrack_synproxy.h b/include/net/netfilter/nf_conntrack_synproxy.h
index 6793614e6502..e6937318546c 100644
--- a/include/net/netfilter/nf_conntrack_synproxy.h
+++ b/include/net/netfilter/nf_conntrack_synproxy.h
@@ -27,6 +27,20 @@ static inline struct nf_conn_synproxy *nfct_synproxy_ext_add(struct nf_conn *ct)
#endif
}
+static inline bool nf_ct_add_synproxy(struct nf_conn *ct,
+ const struct nf_conn *tmpl)
+{
+ if (tmpl && nfct_synproxy(tmpl)) {
+ if (!nfct_seqadj_ext_add(ct))
+ return false;
+
+ if (!nfct_synproxy_ext_add(ct))
+ return false;
+ }
+
+ return true;
+}
+
struct synproxy_stats {
unsigned int syn_received;
unsigned int cookie_invalid;
diff --git a/include/net/netfilter/nf_log.h b/include/net/netfilter/nf_log.h
index 83d855ba6af1..309cd267be4f 100644
--- a/include/net/netfilter/nf_log.h
+++ b/include/net/netfilter/nf_log.h
@@ -2,15 +2,10 @@
#define _NF_LOG_H
#include <linux/netfilter.h>
+#include <linux/netfilter/nf_log.h>
-/* those NF_LOG_* defines and struct nf_loginfo are legacy definitios that will
- * disappear once iptables is replaced with pkttables. Please DO NOT use them
- * for any new code! */
-#define NF_LOG_TCPSEQ 0x01 /* Log TCP sequence numbers */
-#define NF_LOG_TCPOPT 0x02 /* Log TCP options */
-#define NF_LOG_IPOPT 0x04 /* Log IP options */
-#define NF_LOG_UID 0x08 /* Log UID owning local socket */
-#define NF_LOG_MASK 0x0f
+/* Log tcp sequence, tcp options, ip options and uid owning local socket */
+#define NF_LOG_DEFAULT_MASK 0x0f
/* This flag indicates that copy_len field in nf_loginfo is set */
#define NF_LOG_F_COPY_LEN 0x1
@@ -60,8 +55,7 @@ struct nf_logger {
int nf_log_register(u_int8_t pf, struct nf_logger *logger);
void nf_log_unregister(struct nf_logger *logger);
-void nf_log_set(struct net *net, u_int8_t pf,
- const struct nf_logger *logger);
+int nf_log_set(struct net *net, u_int8_t pf, const struct nf_logger *logger);
void nf_log_unset(struct net *net, const struct nf_logger *logger);
int nf_log_bind_pf(struct net *net, u_int8_t pf,
diff --git a/include/net/netfilter/nf_queue.h b/include/net/netfilter/nf_queue.h
index 0dbce55437f2..2280cfe86c56 100644
--- a/include/net/netfilter/nf_queue.h
+++ b/include/net/netfilter/nf_queue.h
@@ -11,7 +11,6 @@ struct nf_queue_entry {
struct sk_buff *skb;
unsigned int id;
- struct nf_hook_ops *elem;
struct nf_hook_state state;
u16 size; /* sizeof(entry) + saved route keys */
@@ -22,10 +21,10 @@ struct nf_queue_entry {
/* Packet queuing */
struct nf_queue_handler {
- int (*outfn)(struct nf_queue_entry *entry,
- unsigned int queuenum);
- void (*nf_hook_drop)(struct net *net,
- struct nf_hook_ops *ops);
+ int (*outfn)(struct nf_queue_entry *entry,
+ unsigned int queuenum);
+ void (*nf_hook_drop)(struct net *net,
+ const struct nf_hook_entry *hooks);
};
void nf_register_queue_handler(struct net *net, const struct nf_queue_handler *qh);
@@ -41,23 +40,19 @@ static inline void init_hashrandom(u32 *jhash_initval)
*jhash_initval = prandom_u32();
}
-static inline u32 hash_v4(const struct sk_buff *skb, u32 jhash_initval)
+static inline u32 hash_v4(const struct iphdr *iph, u32 initval)
{
- const struct iphdr *iph = ip_hdr(skb);
-
/* packets in either direction go into same queue */
if ((__force u32)iph->saddr < (__force u32)iph->daddr)
return jhash_3words((__force u32)iph->saddr,
- (__force u32)iph->daddr, iph->protocol, jhash_initval);
+ (__force u32)iph->daddr, iph->protocol, initval);
return jhash_3words((__force u32)iph->daddr,
- (__force u32)iph->saddr, iph->protocol, jhash_initval);
+ (__force u32)iph->saddr, iph->protocol, initval);
}
-#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
-static inline u32 hash_v6(const struct sk_buff *skb, u32 jhash_initval)
+static inline u32 hash_v6(const struct ipv6hdr *ip6h, u32 initval)
{
- const struct ipv6hdr *ip6h = ipv6_hdr(skb);
u32 a, b, c;
if ((__force u32)ip6h->saddr.s6_addr32[3] <
@@ -75,20 +70,50 @@ static inline u32 hash_v6(const struct sk_buff *skb, u32 jhash_initval)
else
c = (__force u32) ip6h->daddr.s6_addr32[1];
- return jhash_3words(a, b, c, jhash_initval);
+ return jhash_3words(a, b, c, initval);
+}
+
+static inline u32 hash_bridge(const struct sk_buff *skb, u32 initval)
+{
+ struct ipv6hdr *ip6h, _ip6h;
+ struct iphdr *iph, _iph;
+
+ switch (eth_hdr(skb)->h_proto) {
+ case htons(ETH_P_IP):
+ iph = skb_header_pointer(skb, skb_network_offset(skb),
+ sizeof(*iph), &_iph);
+ if (iph)
+ return hash_v4(iph, initval);
+ break;
+ case htons(ETH_P_IPV6):
+ ip6h = skb_header_pointer(skb, skb_network_offset(skb),
+ sizeof(*ip6h), &_ip6h);
+ if (ip6h)
+ return hash_v6(ip6h, initval);
+ break;
+ }
+
+ return 0;
}
-#endif
static inline u32
nfqueue_hash(const struct sk_buff *skb, u16 queue, u16 queues_total, u8 family,
- u32 jhash_initval)
+ u32 initval)
{
- if (family == NFPROTO_IPV4)
- queue += ((u64) hash_v4(skb, jhash_initval) * queues_total) >> 32;
-#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
- else if (family == NFPROTO_IPV6)
- queue += ((u64) hash_v6(skb, jhash_initval) * queues_total) >> 32;
-#endif
+ switch (family) {
+ case NFPROTO_IPV4:
+ queue += reciprocal_scale(hash_v4(ip_hdr(skb), initval),
+ queues_total);
+ break;
+ case NFPROTO_IPV6:
+ queue += reciprocal_scale(hash_v6(ipv6_hdr(skb), initval),
+ queues_total);
+ break;
+ case NFPROTO_BRIDGE:
+ queue += reciprocal_scale(hash_bridge(skb, initval),
+ queues_total);
+ break;
+ }
return queue;
}
diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
index f2f13399ce44..5031e072567b 100644
--- a/include/net/netfilter/nf_tables.h
+++ b/include/net/netfilter/nf_tables.h
@@ -19,6 +19,7 @@ struct nft_pktinfo {
const struct net_device *out;
u8 pf;
u8 hook;
+ bool tprot_set;
u8 tprot;
/* for x_tables compatibility */
struct xt_action_param xt;
@@ -36,6 +37,23 @@ static inline void nft_set_pktinfo(struct nft_pktinfo *pkt,
pkt->pf = pkt->xt.family = state->pf;
}
+static inline void nft_set_pktinfo_proto_unspec(struct nft_pktinfo *pkt,
+ struct sk_buff *skb)
+{
+ pkt->tprot_set = false;
+ pkt->tprot = 0;
+ pkt->xt.thoff = 0;
+ pkt->xt.fragoff = 0;
+}
+
+static inline void nft_set_pktinfo_unspec(struct nft_pktinfo *pkt,
+ struct sk_buff *skb,
+ const struct nf_hook_state *state)
+{
+ nft_set_pktinfo(pkt, skb, state);
+ nft_set_pktinfo_proto_unspec(pkt, skb);
+}
+
/**
* struct nft_verdict - nf_tables verdict
*
@@ -127,6 +145,7 @@ static inline enum nft_registers nft_type_to_reg(enum nft_data_types type)
return type == NFT_DATA_VERDICT ? NFT_REG_VERDICT : NFT_REG_1 * NFT_REG_SIZE / NFT_REG32_SIZE;
}
+unsigned int nft_parse_u32_check(const struct nlattr *attr, int max, u32 *dest);
unsigned int nft_parse_register(const struct nlattr *attr);
int nft_dump_register(struct sk_buff *skb, unsigned int attr, unsigned int reg);
@@ -251,7 +270,8 @@ struct nft_set_ops {
int (*insert)(const struct net *net,
const struct nft_set *set,
- const struct nft_set_elem *elem);
+ const struct nft_set_elem *elem,
+ struct nft_set_ext **ext);
void (*activate)(const struct net *net,
const struct nft_set *set,
const struct nft_set_elem *elem);
diff --git a/include/net/netfilter/nf_tables_bridge.h b/include/net/netfilter/nf_tables_bridge.h
deleted file mode 100644
index 511fb79f6dad..000000000000
--- a/include/net/netfilter/nf_tables_bridge.h
+++ /dev/null
@@ -1,7 +0,0 @@
-#ifndef _NET_NF_TABLES_BRIDGE_H
-#define _NET_NF_TABLES_BRIDGE_H
-
-int nft_bridge_iphdr_validate(struct sk_buff *skb);
-int nft_bridge_ip6hdr_validate(struct sk_buff *skb);
-
-#endif /* _NET_NF_TABLES_BRIDGE_H */
diff --git a/include/net/netfilter/nf_tables_core.h b/include/net/netfilter/nf_tables_core.h
index a9060dd99db7..00f4f6b1b1ba 100644
--- a/include/net/netfilter/nf_tables_core.h
+++ b/include/net/netfilter/nf_tables_core.h
@@ -28,6 +28,9 @@ extern const struct nft_expr_ops nft_cmp_fast_ops;
int nft_cmp_module_init(void);
void nft_cmp_module_exit(void);
+int nft_range_module_init(void);
+void nft_range_module_exit(void);
+
int nft_lookup_module_init(void);
void nft_lookup_module_exit(void);
diff --git a/include/net/netfilter/nf_tables_ipv4.h b/include/net/netfilter/nf_tables_ipv4.h
index ca6ef6bf775e..25e33aee91e7 100644
--- a/include/net/netfilter/nf_tables_ipv4.h
+++ b/include/net/netfilter/nf_tables_ipv4.h
@@ -14,11 +14,53 @@ nft_set_pktinfo_ipv4(struct nft_pktinfo *pkt,
nft_set_pktinfo(pkt, skb, state);
ip = ip_hdr(pkt->skb);
+ pkt->tprot_set = true;
pkt->tprot = ip->protocol;
pkt->xt.thoff = ip_hdrlen(pkt->skb);
pkt->xt.fragoff = ntohs(ip->frag_off) & IP_OFFSET;
}
+static inline int
+__nft_set_pktinfo_ipv4_validate(struct nft_pktinfo *pkt,
+ struct sk_buff *skb,
+ const struct nf_hook_state *state)
+{
+ struct iphdr *iph, _iph;
+ u32 len, thoff;
+
+ iph = skb_header_pointer(skb, skb_network_offset(skb), sizeof(*iph),
+ &_iph);
+ if (!iph)
+ return -1;
+
+ if (iph->ihl < 5 || iph->version != 4)
+ return -1;
+
+ len = ntohs(iph->tot_len);
+ thoff = iph->ihl * 4;
+ if (skb->len < len)
+ return -1;
+ else if (len < thoff)
+ return -1;
+
+ pkt->tprot_set = true;
+ pkt->tprot = iph->protocol;
+ pkt->xt.thoff = thoff;
+ pkt->xt.fragoff = ntohs(iph->frag_off) & IP_OFFSET;
+
+ return 0;
+}
+
+static inline void
+nft_set_pktinfo_ipv4_validate(struct nft_pktinfo *pkt,
+ struct sk_buff *skb,
+ const struct nf_hook_state *state)
+{
+ nft_set_pktinfo(pkt, skb, state);
+ if (__nft_set_pktinfo_ipv4_validate(pkt, skb, state) < 0)
+ nft_set_pktinfo_proto_unspec(pkt, skb);
+}
+
extern struct nft_af_info nft_af_ipv4;
#endif
diff --git a/include/net/netfilter/nf_tables_ipv6.h b/include/net/netfilter/nf_tables_ipv6.h
index 8ad39a6a5fe1..d150b5066201 100644
--- a/include/net/netfilter/nf_tables_ipv6.h
+++ b/include/net/netfilter/nf_tables_ipv6.h
@@ -4,7 +4,7 @@
#include <linux/netfilter_ipv6/ip6_tables.h>
#include <net/ipv6.h>
-static inline int
+static inline void
nft_set_pktinfo_ipv6(struct nft_pktinfo *pkt,
struct sk_buff *skb,
const struct nf_hook_state *state)
@@ -15,15 +15,64 @@ nft_set_pktinfo_ipv6(struct nft_pktinfo *pkt,
nft_set_pktinfo(pkt, skb, state);
protohdr = ipv6_find_hdr(pkt->skb, &thoff, -1, &frag_off, NULL);
- /* If malformed, drop it */
+ if (protohdr < 0) {
+ nft_set_pktinfo_proto_unspec(pkt, skb);
+ return;
+ }
+
+ pkt->tprot_set = true;
+ pkt->tprot = protohdr;
+ pkt->xt.thoff = thoff;
+ pkt->xt.fragoff = frag_off;
+}
+
+static inline int
+__nft_set_pktinfo_ipv6_validate(struct nft_pktinfo *pkt,
+ struct sk_buff *skb,
+ const struct nf_hook_state *state)
+{
+#if IS_ENABLED(CONFIG_IPV6)
+ struct ipv6hdr *ip6h, _ip6h;
+ unsigned int thoff = 0;
+ unsigned short frag_off;
+ int protohdr;
+ u32 pkt_len;
+
+ ip6h = skb_header_pointer(skb, skb_network_offset(skb), sizeof(*ip6h),
+ &_ip6h);
+ if (!ip6h)
+ return -1;
+
+ if (ip6h->version != 6)
+ return -1;
+
+ pkt_len = ntohs(ip6h->payload_len);
+ if (pkt_len + sizeof(*ip6h) > skb->len)
+ return -1;
+
+ protohdr = ipv6_find_hdr(pkt->skb, &thoff, -1, &frag_off, NULL);
if (protohdr < 0)
return -1;
+ pkt->tprot_set = true;
pkt->tprot = protohdr;
pkt->xt.thoff = thoff;
pkt->xt.fragoff = frag_off;
return 0;
+#else
+ return -1;
+#endif
+}
+
+static inline void
+nft_set_pktinfo_ipv6_validate(struct nft_pktinfo *pkt,
+ struct sk_buff *skb,
+ const struct nf_hook_state *state)
+{
+ nft_set_pktinfo(pkt, skb, state);
+ if (__nft_set_pktinfo_ipv6_validate(pkt, skb, state) < 0)
+ nft_set_pktinfo_proto_unspec(pkt, skb);
}
extern struct nft_af_info nft_af_ipv6;
diff --git a/include/net/netfilter/nft_meta.h b/include/net/netfilter/nft_meta.h
index d27588c8dbd9..1139cde0fdc5 100644
--- a/include/net/netfilter/nft_meta.h
+++ b/include/net/netfilter/nft_meta.h
@@ -36,4 +36,8 @@ void nft_meta_set_eval(const struct nft_expr *expr,
void nft_meta_set_destroy(const struct nft_ctx *ctx,
const struct nft_expr *expr);
+int nft_meta_set_validate(const struct nft_ctx *ctx,
+ const struct nft_expr *expr,
+ const struct nft_data **data);
+
#endif
diff --git a/include/net/netfilter/nft_reject.h b/include/net/netfilter/nft_reject.h
index 60fa1530006b..02e28c529b29 100644
--- a/include/net/netfilter/nft_reject.h
+++ b/include/net/netfilter/nft_reject.h
@@ -8,6 +8,10 @@ struct nft_reject {
extern const struct nla_policy nft_reject_policy[];
+int nft_reject_validate(const struct nft_ctx *ctx,
+ const struct nft_expr *expr,
+ const struct nft_data **data);
+
int nft_reject_init(const struct nft_ctx *ctx,
const struct nft_expr *expr,
const struct nlattr * const tb[]);
diff --git a/include/net/netns/conntrack.h b/include/net/netns/conntrack.h
index 38b1a80517f0..e469e85de3f9 100644
--- a/include/net/netns/conntrack.h
+++ b/include/net/netns/conntrack.h
@@ -15,10 +15,6 @@ struct nf_proto_net {
#ifdef CONFIG_SYSCTL
struct ctl_table_header *ctl_table_header;
struct ctl_table *ctl_table;
-#ifdef CONFIG_NF_CONNTRACK_PROC_COMPAT
- struct ctl_table_header *ctl_compat_header;
- struct ctl_table *ctl_compat_table;
-#endif
#endif
unsigned int users;
};
@@ -58,10 +54,6 @@ struct nf_ip_net {
struct nf_udp_net udp;
struct nf_icmp_net icmp;
struct nf_icmp_net icmpv6;
-#if defined(CONFIG_SYSCTL) && defined(CONFIG_NF_CONNTRACK_PROC_COMPAT)
- struct ctl_table_header *ctl_table_header;
- struct ctl_table *ctl_table;
-#endif
};
struct ct_pcpu {
diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
index d061ffeb1e71..7adf4386ac8f 100644
--- a/include/net/netns/ipv4.h
+++ b/include/net/netns/ipv4.h
@@ -40,7 +40,6 @@ struct netns_ipv4 {
#ifdef CONFIG_IP_MULTIPLE_TABLES
struct fib_rules_ops *rules_ops;
bool fib_has_custom_rules;
- struct fib_table __rcu *fib_local;
struct fib_table __rcu *fib_main;
struct fib_table __rcu *fib_default;
#endif
diff --git a/include/net/netns/netfilter.h b/include/net/netns/netfilter.h
index 36d723579af2..58487b1cc99a 100644
--- a/include/net/netns/netfilter.h
+++ b/include/net/netns/netfilter.h
@@ -16,6 +16,6 @@ struct netns_nf {
#ifdef CONFIG_SYSCTL
struct ctl_table_header *nf_log_dir_header;
#endif
- struct list_head hooks[NFPROTO_NUMPROTO][NF_MAX_HOOKS];
+ struct nf_hook_entry __rcu *hooks[NFPROTO_NUMPROTO][NF_MAX_HOOKS];
};
#endif
diff --git a/include/net/netns/xfrm.h b/include/net/netns/xfrm.h
index 24cd3949a9a4..27bb9633c69d 100644
--- a/include/net/netns/xfrm.h
+++ b/include/net/netns/xfrm.h
@@ -11,7 +11,7 @@
struct ctl_table_header;
struct xfrm_policy_hash {
- struct hlist_head *table;
+ struct hlist_head __rcu *table;
unsigned int hmask;
u8 dbits4;
u8 sbits4;
@@ -38,14 +38,12 @@ struct netns_xfrm {
* mode. Also, it can be used by ah/esp icmp error handler to find
* offending SA.
*/
- struct hlist_head *state_bydst;
- struct hlist_head *state_bysrc;
- struct hlist_head *state_byspi;
+ struct hlist_head __rcu *state_bydst;
+ struct hlist_head __rcu *state_bysrc;
+ struct hlist_head __rcu *state_byspi;
unsigned int state_hmask;
unsigned int state_num;
struct work_struct state_hash_work;
- struct hlist_head state_gc_list;
- struct work_struct state_gc_work;
struct list_head policy_all;
struct hlist_head *policy_byidx;
@@ -73,7 +71,7 @@ struct netns_xfrm {
struct dst_ops xfrm6_dst_ops;
#endif
spinlock_t xfrm_state_lock;
- rwlock_t xfrm_policy_lock;
+ spinlock_t xfrm_policy_lock;
struct mutex xfrm_cfg_mutex;
/* flow cache part */
diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h
index c99508d426cc..767b03a3fe67 100644
--- a/include/net/pkt_cls.h
+++ b/include/net/pkt_cls.h
@@ -69,17 +69,19 @@ struct tcf_exts {
int police;
};
-static inline void tcf_exts_init(struct tcf_exts *exts, int action, int police)
+static inline int tcf_exts_init(struct tcf_exts *exts, int action, int police)
{
#ifdef CONFIG_NET_CLS_ACT
exts->type = 0;
exts->nr_actions = 0;
exts->actions = kcalloc(TCA_ACT_MAX_PRIO, sizeof(struct tc_action *),
GFP_KERNEL);
- WARN_ON(!exts->actions); /* TODO: propagate the error to callers */
+ if (!exts->actions)
+ return -ENOMEM;
#endif
exts->action = action;
exts->police = police;
+ return 0;
}
/**
@@ -121,7 +123,7 @@ static inline void tcf_exts_to_list(const struct tcf_exts *exts,
for (i = 0; i < exts->nr_actions; i++) {
struct tc_action *a = exts->actions[i];
- list_add(&a->list, actions);
+ list_add_tail(&a->list, actions);
}
#endif
}
@@ -484,4 +486,20 @@ struct tc_cls_matchall_offload {
unsigned long cookie;
};
+enum tc_clsbpf_command {
+ TC_CLSBPF_ADD,
+ TC_CLSBPF_REPLACE,
+ TC_CLSBPF_DESTROY,
+ TC_CLSBPF_STATS,
+};
+
+struct tc_cls_bpf_offload {
+ enum tc_clsbpf_command command;
+ struct tcf_exts *exts;
+ struct bpf_prog *prog;
+ const char *name;
+ bool exts_integrated;
+ u32 gen_flags;
+};
+
#endif
diff --git a/include/net/pkt_sched.h b/include/net/pkt_sched.h
index 7caa99b482c6..cd334c9584e9 100644
--- a/include/net/pkt_sched.h
+++ b/include/net/pkt_sched.h
@@ -90,8 +90,8 @@ int unregister_qdisc(struct Qdisc_ops *qops);
void qdisc_get_default(char *id, size_t len);
int qdisc_set_default(const char *id);
-void qdisc_list_add(struct Qdisc *q);
-void qdisc_list_del(struct Qdisc *q);
+void qdisc_hash_add(struct Qdisc *q);
+void qdisc_hash_del(struct Qdisc *q);
struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle);
struct Qdisc *qdisc_lookup_class(struct net_device *dev, u32 handle);
struct qdisc_rate_table *qdisc_get_rtab(struct tc_ratespec *r,
diff --git a/include/net/pptp.h b/include/net/pptp.h
new file mode 100644
index 000000000000..92e9f1fe2628
--- /dev/null
+++ b/include/net/pptp.h
@@ -0,0 +1,23 @@
+#ifndef _NET_PPTP_H
+#define _NET_PPTP_H
+
+#define PPP_LCP_ECHOREQ 0x09
+#define PPP_LCP_ECHOREP 0x0A
+#define SC_RCV_BITS (SC_RCV_B7_1|SC_RCV_B7_0|SC_RCV_ODDP|SC_RCV_EVNP)
+
+#define MISSING_WINDOW 20
+#define WRAPPED(curseq, lastseq)\
+ ((((curseq) & 0xffffff00) == 0) &&\
+ (((lastseq) & 0xffffff00) == 0xffffff00))
+
+#define PPTP_HEADER_OVERHEAD (2+sizeof(struct pptp_gre_header))
+struct pptp_gre_header {
+ struct gre_base_hdr gre_hd;
+ __be16 payload_len;
+ __be16 call_id;
+ __be32 seq;
+ __be32 ack;
+} __packed;
+
+
+#endif
diff --git a/include/net/route.h b/include/net/route.h
index ad777d79af94..0429d47cad25 100644
--- a/include/net/route.h
+++ b/include/net/route.h
@@ -29,7 +29,6 @@
#include <net/flow.h>
#include <net/inet_sock.h>
#include <net/ip_fib.h>
-#include <net/l3mdev.h>
#include <linux/in_route.h>
#include <linux/rtnetlink.h>
#include <linux/rcupdate.h>
@@ -285,15 +284,6 @@ static inline struct rtable *ip_route_connect(struct flowi4 *fl4,
ip_route_connect_init(fl4, dst, src, tos, oif, protocol,
sport, dport, sk);
- if (!src && oif) {
- int rc;
-
- rc = l3mdev_get_saddr(net, oif, fl4);
- if (rc < 0)
- return ERR_PTR(rc);
-
- src = fl4->saddr;
- }
if (!dst || !src) {
rt = __ip_route_output_key(net, fl4);
if (IS_ERR(rt))
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index 909aff2db2b3..e6aa0a249672 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -36,6 +36,14 @@ struct qdisc_size_table {
u16 data[];
};
+/* similar to sk_buff_head, but skb->prev pointer is undefined. */
+struct qdisc_skb_head {
+ struct sk_buff *head;
+ struct sk_buff *tail;
+ __u32 qlen;
+ spinlock_t lock;
+};
+
struct Qdisc {
int (*enqueue)(struct sk_buff *skb,
struct Qdisc *sch,
@@ -61,7 +69,7 @@ struct Qdisc {
u32 limit;
const struct Qdisc_ops *ops;
struct qdisc_size_table __rcu *stab;
- struct list_head list;
+ struct hlist_node hash;
u32 handle;
u32 parent;
void *u32_node;
@@ -76,7 +84,7 @@ struct Qdisc {
* For performance sake on SMP, we put highly modified fields at the end
*/
struct sk_buff *gso_skb ____cacheline_aligned_in_smp;
- struct sk_buff_head q;
+ struct qdisc_skb_head q;
struct gnet_stats_basic_packed bstats;
seqcount_t running;
struct gnet_stats_queue qstats;
@@ -592,7 +600,7 @@ static inline void qdisc_qstats_drop(struct Qdisc *sch)
static inline void qdisc_qstats_cpu_drop(struct Qdisc *sch)
{
- qstats_drop_inc(this_cpu_ptr(sch->cpu_qstats));
+ this_cpu_inc(sch->cpu_qstats->drops);
}
static inline void qdisc_qstats_overlimit(struct Qdisc *sch)
@@ -600,10 +608,27 @@ static inline void qdisc_qstats_overlimit(struct Qdisc *sch)
sch->qstats.overlimits++;
}
+static inline void qdisc_skb_head_init(struct qdisc_skb_head *qh)
+{
+ qh->head = NULL;
+ qh->tail = NULL;
+ qh->qlen = 0;
+}
+
static inline int __qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch,
- struct sk_buff_head *list)
+ struct qdisc_skb_head *qh)
{
- __skb_queue_tail(list, skb);
+ struct sk_buff *last = qh->tail;
+
+ if (last) {
+ skb->next = NULL;
+ last->next = skb;
+ qh->tail = skb;
+ } else {
+ qh->tail = skb;
+ qh->head = skb;
+ }
+ qh->qlen++;
qdisc_qstats_backlog_inc(sch, skb);
return NET_XMIT_SUCCESS;
@@ -614,14 +639,16 @@ static inline int qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch)
return __qdisc_enqueue_tail(skb, sch, &sch->q);
}
-static inline struct sk_buff *__qdisc_dequeue_head(struct Qdisc *sch,
- struct sk_buff_head *list)
+static inline struct sk_buff *__qdisc_dequeue_head(struct qdisc_skb_head *qh)
{
- struct sk_buff *skb = __skb_dequeue(list);
+ struct sk_buff *skb = qh->head;
if (likely(skb != NULL)) {
- qdisc_qstats_backlog_dec(sch, skb);
- qdisc_bstats_update(sch, skb);
+ qh->head = skb->next;
+ qh->qlen--;
+ if (qh->head == NULL)
+ qh->tail = NULL;
+ skb->next = NULL;
}
return skb;
@@ -629,7 +656,14 @@ static inline struct sk_buff *__qdisc_dequeue_head(struct Qdisc *sch,
static inline struct sk_buff *qdisc_dequeue_head(struct Qdisc *sch)
{
- return __qdisc_dequeue_head(sch, &sch->q);
+ struct sk_buff *skb = __qdisc_dequeue_head(&sch->q);
+
+ if (likely(skb != NULL)) {
+ qdisc_qstats_backlog_dec(sch, skb);
+ qdisc_bstats_update(sch, skb);
+ }
+
+ return skb;
}
/* Instead of calling kfree_skb() while root qdisc lock is held,
@@ -642,10 +676,10 @@ static inline void __qdisc_drop(struct sk_buff *skb, struct sk_buff **to_free)
}
static inline unsigned int __qdisc_queue_drop_head(struct Qdisc *sch,
- struct sk_buff_head *list,
+ struct qdisc_skb_head *qh,
struct sk_buff **to_free)
{
- struct sk_buff *skb = __skb_dequeue(list);
+ struct sk_buff *skb = __qdisc_dequeue_head(qh);
if (likely(skb != NULL)) {
unsigned int len = qdisc_pkt_len(skb);
@@ -666,7 +700,9 @@ static inline unsigned int qdisc_queue_drop_head(struct Qdisc *sch,
static inline struct sk_buff *qdisc_peek_head(struct Qdisc *sch)
{
- return skb_peek(&sch->q);
+ const struct qdisc_skb_head *qh = &sch->q;
+
+ return qh->head;
}
/* generic pseudo peek method for non-work-conserving qdisc */
@@ -701,15 +737,19 @@ static inline struct sk_buff *qdisc_dequeue_peeked(struct Qdisc *sch)
return skb;
}
-static inline void __qdisc_reset_queue(struct sk_buff_head *list)
+static inline void __qdisc_reset_queue(struct qdisc_skb_head *qh)
{
/*
* We do not know the backlog in bytes of this list, it
* is up to the caller to correct it
*/
- if (!skb_queue_empty(list)) {
- rtnl_kfree_skbs(list->next, list->prev);
- __skb_queue_head_init(list);
+ ASSERT_RTNL();
+ if (qh->qlen) {
+ rtnl_kfree_skbs(qh->head, qh->tail);
+
+ qh->head = NULL;
+ qh->tail = NULL;
+ qh->qlen = 0;
}
}
diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
index 632e205ca54b..87a7f42e7639 100644
--- a/include/net/sctp/sctp.h
+++ b/include/net/sctp/sctp.h
@@ -83,9 +83,9 @@
#endif
/* Round an int up to the next multiple of 4. */
-#define WORD_ROUND(s) (((s)+3)&~3)
+#define SCTP_PAD4(s) (((s)+3)&~3)
/* Truncate to the previous multiple of 4. */
-#define WORD_TRUNC(s) ((s)&~3)
+#define SCTP_TRUNC4(s) ((s)&~3)
/*
* Function declarations.
@@ -433,7 +433,7 @@ static inline int sctp_frag_point(const struct sctp_association *asoc, int pmtu)
if (asoc->user_frag)
frag = min_t(int, frag, asoc->user_frag);
- frag = WORD_TRUNC(min_t(int, frag, SCTP_MAX_CHUNK_LEN));
+ frag = SCTP_TRUNC4(min_t(int, frag, SCTP_MAX_CHUNK_LEN));
return frag;
}
@@ -462,7 +462,7 @@ _sctp_walk_params((pos), (chunk), ntohs((chunk)->chunk_hdr.length), member)
for (pos.v = chunk->member;\
pos.v <= (void *)chunk + end - ntohs(pos.p->length) &&\
ntohs(pos.p->length) >= sizeof(sctp_paramhdr_t);\
- pos.v += WORD_ROUND(ntohs(pos.p->length)))
+ pos.v += SCTP_PAD4(ntohs(pos.p->length)))
#define sctp_walk_errors(err, chunk_hdr)\
_sctp_walk_errors((err), (chunk_hdr), ntohs((chunk_hdr)->length))
@@ -472,7 +472,7 @@ for (err = (sctp_errhdr_t *)((void *)chunk_hdr + \
sizeof(sctp_chunkhdr_t));\
(void *)err <= (void *)chunk_hdr + end - ntohs(err->length) &&\
ntohs(err->length) >= sizeof(sctp_errhdr_t); \
- err = (sctp_errhdr_t *)((void *)err + WORD_ROUND(ntohs(err->length))))
+ err = (sctp_errhdr_t *)((void *)err + SCTP_PAD4(ntohs(err->length))))
#define sctp_walk_fwdtsn(pos, chunk)\
_sctp_walk_fwdtsn((pos), (chunk), ntohs((chunk)->chunk_hdr->length) - sizeof(struct sctp_fwdtsn_chunk))
diff --git a/include/net/sctp/sm.h b/include/net/sctp/sm.h
index efc01743b9d6..ca6c971dd74a 100644
--- a/include/net/sctp/sm.h
+++ b/include/net/sctp/sm.h
@@ -307,85 +307,27 @@ static inline __u16 sctp_data_size(struct sctp_chunk *chunk)
}
/* Compare two TSNs */
+#define TSN_lt(a,b) \
+ (typecheck(__u32, a) && \
+ typecheck(__u32, b) && \
+ ((__s32)((a) - (b)) < 0))
-/* RFC 1982 - Serial Number Arithmetic
- *
- * 2. Comparison
- * Then, s1 is said to be equal to s2 if and only if i1 is equal to i2,
- * in all other cases, s1 is not equal to s2.
- *
- * s1 is said to be less than s2 if, and only if, s1 is not equal to s2,
- * and
- *
- * (i1 < i2 and i2 - i1 < 2^(SERIAL_BITS - 1)) or
- * (i1 > i2 and i1 - i2 > 2^(SERIAL_BITS - 1))
- *
- * s1 is said to be greater than s2 if, and only if, s1 is not equal to
- * s2, and
- *
- * (i1 < i2 and i2 - i1 > 2^(SERIAL_BITS - 1)) or
- * (i1 > i2 and i1 - i2 < 2^(SERIAL_BITS - 1))
- */
-
-/*
- * RFC 2960
- * 1.6 Serial Number Arithmetic
- *
- * Comparisons and arithmetic on TSNs in this document SHOULD use Serial
- * Number Arithmetic as defined in [RFC1982] where SERIAL_BITS = 32.
- */
-
-enum {
- TSN_SIGN_BIT = (1<<31)
-};
-
-static inline int TSN_lt(__u32 s, __u32 t)
-{
- return ((s) - (t)) & TSN_SIGN_BIT;
-}
-
-static inline int TSN_lte(__u32 s, __u32 t)
-{
- return ((s) == (t)) || (((s) - (t)) & TSN_SIGN_BIT);
-}
+#define TSN_lte(a,b) \
+ (typecheck(__u32, a) && \
+ typecheck(__u32, b) && \
+ ((__s32)((a) - (b)) <= 0))
/* Compare two SSNs */
-
-/*
- * RFC 2960
- * 1.6 Serial Number Arithmetic
- *
- * Comparisons and arithmetic on Stream Sequence Numbers in this document
- * SHOULD use Serial Number Arithmetic as defined in [RFC1982] where
- * SERIAL_BITS = 16.
- */
-enum {
- SSN_SIGN_BIT = (1<<15)
-};
-
-static inline int SSN_lt(__u16 s, __u16 t)
-{
- return ((s) - (t)) & SSN_SIGN_BIT;
-}
-
-static inline int SSN_lte(__u16 s, __u16 t)
-{
- return ((s) == (t)) || (((s) - (t)) & SSN_SIGN_BIT);
-}
-
-/*
- * ADDIP 3.1.1
- * The valid range of Serial Number is from 0 to 4294967295 (2**32 - 1). Serial
- * Numbers wrap back to 0 after reaching 4294967295.
- */
-enum {
- ADDIP_SERIAL_SIGN_BIT = (1<<31)
-};
-
-static inline int ADDIP_SERIAL_gte(__u16 s, __u16 t)
-{
- return ((s) == (t)) || (((t) - (s)) & ADDIP_SERIAL_SIGN_BIT);
-}
+#define SSN_lt(a,b) \
+ (typecheck(__u16, a) && \
+ typecheck(__u16, b) && \
+ ((__s16)((a) - (b)) < 0))
+
+/* ADDIP 3.1.1 */
+#define ADDIP_SERIAL_gte(a,b) \
+ (typecheck(__u32, a) && \
+ typecheck(__u32, b) && \
+ ((__s32)((b) - (a)) <= 0))
/* Check VTAG of the packet matches the sender's own tag. */
static inline int
diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
index ce93c4b10d26..11c3bf262a85 100644
--- a/include/net/sctp/structs.h
+++ b/include/net/sctp/structs.h
@@ -537,6 +537,7 @@ struct sctp_datamsg {
struct sctp_datamsg *sctp_datamsg_from_user(struct sctp_association *,
struct sctp_sndrcvinfo *,
struct iov_iter *);
+void sctp_datamsg_free(struct sctp_datamsg *);
void sctp_datamsg_put(struct sctp_datamsg *);
void sctp_chunk_fail(struct sctp_chunk *, int error);
int sctp_chunk_abandoned(struct sctp_chunk *);
@@ -554,6 +555,9 @@ struct sctp_chunk {
atomic_t refcnt;
+ /* How many times this chunk have been sent, for prsctp RTX policy */
+ int sent_count;
+
/* This is our link to the per-transport transmitted list. */
struct list_head transmitted_list;
@@ -603,16 +607,6 @@ struct sctp_chunk {
/* This needs to be recoverable for SCTP_SEND_FAILED events. */
struct sctp_sndrcvinfo sinfo;
- /* We use this field to record param for prsctp policies,
- * for TTL policy, it is the time_to_drop of this chunk,
- * for RTX policy, it is the max_sent_count of this chunk,
- * for PRIO policy, it is the priority of this chunk.
- */
- unsigned long prsctp_param;
-
- /* How many times this chunk have been sent, for prsctp RTX policy */
- int sent_count;
-
/* Which association does this belong to? */
struct sctp_association *asoc;
@@ -1076,7 +1070,7 @@ struct sctp_outq {
void sctp_outq_init(struct sctp_association *, struct sctp_outq *);
void sctp_outq_teardown(struct sctp_outq *);
void sctp_outq_free(struct sctp_outq*);
-int sctp_outq_tail(struct sctp_outq *, struct sctp_chunk *chunk, gfp_t);
+void sctp_outq_tail(struct sctp_outq *, struct sctp_chunk *chunk, gfp_t);
int sctp_outq_sack(struct sctp_outq *, struct sctp_chunk *);
int sctp_outq_is_empty(const struct sctp_outq *);
void sctp_outq_restart(struct sctp_outq *);
@@ -1084,7 +1078,7 @@ void sctp_outq_restart(struct sctp_outq *);
void sctp_retransmit(struct sctp_outq *, struct sctp_transport *,
sctp_retransmit_reason_t);
void sctp_retransmit_mark(struct sctp_outq *, struct sctp_transport *, __u8);
-int sctp_outq_uncork(struct sctp_outq *, gfp_t gfp);
+void sctp_outq_uncork(struct sctp_outq *, gfp_t gfp);
void sctp_prsctp_prune(struct sctp_association *asoc,
struct sctp_sndrcvinfo *sinfo, int msg_len);
/* Uncork and flush an outqueue. */
diff --git a/include/net/sock.h b/include/net/sock.h
index ff5be7e8ddea..ebf75db08e06 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -1020,7 +1020,6 @@ struct proto {
void (*unhash)(struct sock *sk);
void (*rehash)(struct sock *sk);
int (*get_port)(struct sock *sk, unsigned short snum);
- void (*clear_sk)(struct sock *sk, int size);
/* Keeping track of sockets in use */
#ifdef CONFIG_PROC_FS
@@ -1114,6 +1113,16 @@ static inline bool sk_stream_is_writeable(const struct sock *sk)
sk_stream_memory_free(sk);
}
+static inline int sk_under_cgroup_hierarchy(struct sock *sk,
+ struct cgroup *ancestor)
+{
+#ifdef CONFIG_SOCK_CGROUP_DATA
+ return cgroup_is_descendant(sock_cgroup_ptr(&sk->sk_cgrp_data),
+ ancestor);
+#else
+ return -ENOTSUPP;
+#endif
+}
static inline bool sk_has_memory_pressure(const struct sock *sk)
{
@@ -1232,8 +1241,6 @@ static inline int __sk_prot_rehash(struct sock *sk)
return sk->sk_prot->hash(sk);
}
-void sk_prot_clear_portaddr_nulls(struct sock *sk, int size);
-
/* About 10 seconds */
#define SOCK_DESTROY_TIME (10*HZ)
@@ -1332,6 +1339,16 @@ static inline void sk_mem_uncharge(struct sock *sk, int size)
if (!sk_has_account(sk))
return;
sk->sk_forward_alloc += size;
+
+ /* Avoid a possible overflow.
+ * TCP send queues can make this happen, if sk_mem_reclaim()
+ * is not called and more than 2 GBytes are released at once.
+ *
+ * If we reach 2 MBytes, reclaim 1 MBytes right now, there is
+ * no need to hold that much forward allocation anyway.
+ */
+ if (unlikely(sk->sk_forward_alloc >= 1 << 21))
+ __sk_mem_reclaim(sk, 1 << 20);
}
static inline void sk_wmem_free_skb(struct sock *sk, struct sk_buff *skb)
diff --git a/include/net/strparser.h b/include/net/strparser.h
new file mode 100644
index 000000000000..0c28ad97c52f
--- /dev/null
+++ b/include/net/strparser.h
@@ -0,0 +1,142 @@
+/*
+ * Stream Parser
+ *
+ * Copyright (c) 2016 Tom Herbert <tom@herbertland.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.
+ */
+
+#ifndef __NET_STRPARSER_H_
+#define __NET_STRPARSER_H_
+
+#include <linux/skbuff.h>
+#include <net/sock.h>
+
+#define STRP_STATS_ADD(stat, count) ((stat) += (count))
+#define STRP_STATS_INCR(stat) ((stat)++)
+
+struct strp_stats {
+ unsigned long long rx_msgs;
+ unsigned long long rx_bytes;
+ unsigned int rx_mem_fail;
+ unsigned int rx_need_more_hdr;
+ unsigned int rx_msg_too_big;
+ unsigned int rx_msg_timeouts;
+ unsigned int rx_bad_hdr_len;
+};
+
+struct strp_aggr_stats {
+ unsigned long long rx_msgs;
+ unsigned long long rx_bytes;
+ unsigned int rx_mem_fail;
+ unsigned int rx_need_more_hdr;
+ unsigned int rx_msg_too_big;
+ unsigned int rx_msg_timeouts;
+ unsigned int rx_bad_hdr_len;
+ unsigned int rx_aborts;
+ unsigned int rx_interrupted;
+ unsigned int rx_unrecov_intr;
+};
+
+struct strparser;
+
+/* Callbacks are called with lock held for the attached socket */
+struct strp_callbacks {
+ int (*parse_msg)(struct strparser *strp, struct sk_buff *skb);
+ void (*rcv_msg)(struct strparser *strp, struct sk_buff *skb);
+ int (*read_sock_done)(struct strparser *strp, int err);
+ void (*abort_parser)(struct strparser *strp, int err);
+};
+
+struct strp_rx_msg {
+ int full_len;
+ int offset;
+};
+
+static inline struct strp_rx_msg *strp_rx_msg(struct sk_buff *skb)
+{
+ return (struct strp_rx_msg *)((void *)skb->cb +
+ offsetof(struct qdisc_skb_cb, data));
+}
+
+/* Structure for an attached lower socket */
+struct strparser {
+ struct sock *sk;
+
+ u32 rx_stopped : 1;
+ u32 rx_paused : 1;
+ u32 rx_aborted : 1;
+ u32 rx_interrupted : 1;
+ u32 rx_unrecov_intr : 1;
+
+ struct sk_buff **rx_skb_nextp;
+ struct timer_list rx_msg_timer;
+ struct sk_buff *rx_skb_head;
+ unsigned int rx_need_bytes;
+ struct delayed_work rx_delayed_work;
+ struct work_struct rx_work;
+ struct strp_stats stats;
+ struct strp_callbacks cb;
+};
+
+/* Must be called with lock held for attached socket */
+static inline void strp_pause(struct strparser *strp)
+{
+ strp->rx_paused = 1;
+}
+
+/* May be called without holding lock for attached socket */
+void strp_unpause(struct strparser *strp);
+
+static inline void save_strp_stats(struct strparser *strp,
+ struct strp_aggr_stats *agg_stats)
+{
+ /* Save psock statistics in the mux when psock is being unattached. */
+
+#define SAVE_PSOCK_STATS(_stat) (agg_stats->_stat += \
+ strp->stats._stat)
+ SAVE_PSOCK_STATS(rx_msgs);
+ SAVE_PSOCK_STATS(rx_bytes);
+ SAVE_PSOCK_STATS(rx_mem_fail);
+ SAVE_PSOCK_STATS(rx_need_more_hdr);
+ SAVE_PSOCK_STATS(rx_msg_too_big);
+ SAVE_PSOCK_STATS(rx_msg_timeouts);
+ SAVE_PSOCK_STATS(rx_bad_hdr_len);
+#undef SAVE_PSOCK_STATS
+
+ if (strp->rx_aborted)
+ agg_stats->rx_aborts++;
+ if (strp->rx_interrupted)
+ agg_stats->rx_interrupted++;
+ if (strp->rx_unrecov_intr)
+ agg_stats->rx_unrecov_intr++;
+}
+
+static inline void aggregate_strp_stats(struct strp_aggr_stats *stats,
+ struct strp_aggr_stats *agg_stats)
+{
+#define SAVE_PSOCK_STATS(_stat) (agg_stats->_stat += stats->_stat)
+ SAVE_PSOCK_STATS(rx_msgs);
+ SAVE_PSOCK_STATS(rx_bytes);
+ SAVE_PSOCK_STATS(rx_mem_fail);
+ SAVE_PSOCK_STATS(rx_need_more_hdr);
+ SAVE_PSOCK_STATS(rx_msg_too_big);
+ SAVE_PSOCK_STATS(rx_msg_timeouts);
+ SAVE_PSOCK_STATS(rx_bad_hdr_len);
+ SAVE_PSOCK_STATS(rx_aborts);
+ SAVE_PSOCK_STATS(rx_interrupted);
+ SAVE_PSOCK_STATS(rx_unrecov_intr);
+#undef SAVE_PSOCK_STATS
+
+}
+
+void strp_done(struct strparser *strp);
+void strp_stop(struct strparser *strp);
+void strp_check_rcv(struct strparser *strp);
+int strp_init(struct strparser *strp, struct sock *csk,
+ struct strp_callbacks *cb);
+void strp_data_ready(struct strparser *strp);
+
+#endif /* __NET_STRPARSER_H_ */
diff --git a/include/net/switchdev.h b/include/net/switchdev.h
index 62f6a967a1b7..eba80c4fc56f 100644
--- a/include/net/switchdev.h
+++ b/include/net/switchdev.h
@@ -68,7 +68,6 @@ struct switchdev_attr {
enum switchdev_obj_id {
SWITCHDEV_OBJ_ID_UNDEFINED,
SWITCHDEV_OBJ_ID_PORT_VLAN,
- SWITCHDEV_OBJ_ID_IPV4_FIB,
SWITCHDEV_OBJ_ID_PORT_FDB,
SWITCHDEV_OBJ_ID_PORT_MDB,
};
@@ -92,21 +91,6 @@ struct switchdev_obj_port_vlan {
#define SWITCHDEV_OBJ_PORT_VLAN(obj) \
container_of(obj, struct switchdev_obj_port_vlan, obj)
-/* SWITCHDEV_OBJ_ID_IPV4_FIB */
-struct switchdev_obj_ipv4_fib {
- struct switchdev_obj obj;
- u32 dst;
- int dst_len;
- struct fib_info *fi;
- u8 tos;
- u8 type;
- u32 nlflags;
- u32 tb_id;
-};
-
-#define SWITCHDEV_OBJ_IPV4_FIB(obj) \
- container_of(obj, struct switchdev_obj_ipv4_fib, obj)
-
/* SWITCHDEV_OBJ_ID_PORT_FDB */
struct switchdev_obj_port_fdb {
struct switchdev_obj obj;
@@ -209,11 +193,6 @@ int switchdev_port_bridge_setlink(struct net_device *dev,
struct nlmsghdr *nlh, u16 flags);
int switchdev_port_bridge_dellink(struct net_device *dev,
struct nlmsghdr *nlh, u16 flags);
-int switchdev_fib_ipv4_add(u32 dst, int dst_len, struct fib_info *fi,
- u8 tos, u8 type, u32 nlflags, u32 tb_id);
-int switchdev_fib_ipv4_del(u32 dst, int dst_len, struct fib_info *fi,
- u8 tos, u8 type, u32 tb_id);
-void switchdev_fib_ipv4_abort(struct fib_info *fi);
int switchdev_port_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
struct net_device *dev, const unsigned char *addr,
u16 vid, u16 nlm_flags);
@@ -222,7 +201,7 @@ int switchdev_port_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],
u16 vid);
int switchdev_port_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
struct net_device *dev,
- struct net_device *filter_dev, int idx);
+ struct net_device *filter_dev, int *idx);
void switchdev_port_fwd_mark_set(struct net_device *dev,
struct net_device *group_dev,
bool joining);
@@ -304,25 +283,6 @@ static inline int switchdev_port_bridge_dellink(struct net_device *dev,
return -EOPNOTSUPP;
}
-static inline int switchdev_fib_ipv4_add(u32 dst, int dst_len,
- struct fib_info *fi,
- u8 tos, u8 type,
- u32 nlflags, u32 tb_id)
-{
- return 0;
-}
-
-static inline int switchdev_fib_ipv4_del(u32 dst, int dst_len,
- struct fib_info *fi,
- u8 tos, u8 type, u32 tb_id)
-{
- return 0;
-}
-
-static inline void switchdev_fib_ipv4_abort(struct fib_info *fi)
-{
-}
-
static inline int switchdev_port_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
struct net_device *dev,
const unsigned char *addr,
@@ -342,15 +302,9 @@ static inline int switchdev_port_fdb_dump(struct sk_buff *skb,
struct netlink_callback *cb,
struct net_device *dev,
struct net_device *filter_dev,
- int idx)
-{
- return idx;
-}
-
-static inline void switchdev_port_fwd_mark_set(struct net_device *dev,
- struct net_device *group_dev,
- bool joining)
+ int *idx)
{
+ return *idx;
}
static inline bool switchdev_port_same_parent_id(struct net_device *a,
diff --git a/include/net/tc_act/tc_ife.h b/include/net/tc_act/tc_ife.h
index 5164bd7a38fb..9fd2bea0a6e0 100644
--- a/include/net/tc_act/tc_ife.h
+++ b/include/net/tc_act/tc_ife.h
@@ -50,9 +50,11 @@ int ife_tlv_meta_encode(void *skbdata, u16 attrtype, u16 dlen,
int ife_alloc_meta_u32(struct tcf_meta_info *mi, void *metaval, gfp_t gfp);
int ife_alloc_meta_u16(struct tcf_meta_info *mi, void *metaval, gfp_t gfp);
int ife_check_meta_u32(u32 metaval, struct tcf_meta_info *mi);
+int ife_check_meta_u16(u16 metaval, struct tcf_meta_info *mi);
int ife_encode_meta_u32(u32 metaval, void *skbdata, struct tcf_meta_info *mi);
int ife_validate_meta_u32(void *val, int len);
int ife_validate_meta_u16(void *val, int len);
+int ife_encode_meta_u16(u16 metaval, void *skbdata, struct tcf_meta_info *mi);
void ife_release_meta_gen(struct tcf_meta_info *mi);
int register_ife_op(struct tcf_meta_ops *mops);
int unregister_ife_op(struct tcf_meta_ops *mops);
diff --git a/include/net/tc_act/tc_skbmod.h b/include/net/tc_act/tc_skbmod.h
new file mode 100644
index 000000000000..644a2116b47b
--- /dev/null
+++ b/include/net/tc_act/tc_skbmod.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright (c) 2016, Jamal Hadi Salim
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+*/
+
+#ifndef __NET_TC_SKBMOD_H
+#define __NET_TC_SKBMOD_H
+
+#include <net/act_api.h>
+#include <linux/tc_act/tc_skbmod.h>
+
+struct tcf_skbmod_params {
+ struct rcu_head rcu;
+ u64 flags; /*up to 64 types of operations; extend if needed */
+ u8 eth_dst[ETH_ALEN];
+ u16 eth_type;
+ u8 eth_src[ETH_ALEN];
+};
+
+struct tcf_skbmod {
+ struct tc_action common;
+ struct tcf_skbmod_params __rcu *skbmod_p;
+};
+#define to_skbmod(a) ((struct tcf_skbmod *)a)
+
+#endif /* __NET_TC_SKBMOD_H */
diff --git a/include/net/tc_act/tc_tunnel_key.h b/include/net/tc_act/tc_tunnel_key.h
new file mode 100644
index 000000000000..253f8da6c2a6
--- /dev/null
+++ b/include/net/tc_act/tc_tunnel_key.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright (c) 2016, Amir Vadai <amir@vadai.me>
+ * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __NET_TC_TUNNEL_KEY_H
+#define __NET_TC_TUNNEL_KEY_H
+
+#include <net/act_api.h>
+
+struct tcf_tunnel_key_params {
+ struct rcu_head rcu;
+ int tcft_action;
+ int action;
+ struct metadata_dst *tcft_enc_metadata;
+};
+
+struct tcf_tunnel_key {
+ struct tc_action common;
+ struct tcf_tunnel_key_params __rcu *params;
+};
+
+#define to_tunnel_key(a) ((struct tcf_tunnel_key *)a)
+
+#endif /* __NET_TC_TUNNEL_KEY_H */
diff --git a/include/net/tc_act/tc_vlan.h b/include/net/tc_act/tc_vlan.h
index e29f52e8bdf1..48cca321ee6c 100644
--- a/include/net/tc_act/tc_vlan.h
+++ b/include/net/tc_act/tc_vlan.h
@@ -11,6 +11,7 @@
#define __NET_TC_VLAN_H
#include <net/act_api.h>
+#include <linux/tc_act/tc_vlan.h>
#define VLAN_F_POP 0x1
#define VLAN_F_PUSH 0x2
@@ -20,7 +21,32 @@ struct tcf_vlan {
int tcfv_action;
u16 tcfv_push_vid;
__be16 tcfv_push_proto;
+ u8 tcfv_push_prio;
};
#define to_vlan(a) ((struct tcf_vlan *)a)
+static inline bool is_tcf_vlan(const struct tc_action *a)
+{
+#ifdef CONFIG_NET_CLS_ACT
+ if (a->ops && a->ops->type == TCA_ACT_VLAN)
+ return true;
+#endif
+ return false;
+}
+
+static inline u32 tcf_vlan_action(const struct tc_action *a)
+{
+ return to_vlan(a)->tcfv_action;
+}
+
+static inline u16 tcf_vlan_push_vid(const struct tc_action *a)
+{
+ return to_vlan(a)->tcfv_push_vid;
+}
+
+static inline __be16 tcf_vlan_push_proto(const struct tc_action *a)
+{
+ return to_vlan(a)->tcfv_push_proto;
+}
+
#endif /* __NET_TC_VLAN_H */
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 7717302cab91..f83b7f220a65 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -227,10 +227,9 @@ void tcp_time_wait(struct sock *sk, int state, int timeo);
#define TFO_SERVER_COOKIE_NOT_REQD 0x200
/* Force enable TFO on all listeners, i.e., not requiring the
- * TCP_FASTOPEN socket option. SOCKOPT1/2 determine how to set max_qlen.
+ * TCP_FASTOPEN socket option.
*/
#define TFO_SERVER_WO_SOCKOPT1 0x400
-#define TFO_SERVER_WO_SOCKOPT2 0x800
extern struct inet_timewait_death_row tcp_death_row;
@@ -534,6 +533,8 @@ __u32 cookie_v6_init_sequence(const struct sk_buff *skb, __u16 *mss);
#endif
/* tcp_output.c */
+u32 tcp_tso_autosize(const struct sock *sk, unsigned int mss_now,
+ int min_tso_segs);
void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss,
int nonagle);
bool tcp_may_send_now(struct sock *sk);
@@ -604,8 +605,6 @@ static inline int tcp_bound_to_half_wnd(struct tcp_sock *tp, int pktsize)
void tcp_get_info(struct sock *, struct tcp_info *);
/* Read 'sendfile()'-style from a TCP socket */
-typedef int (*sk_read_actor_t)(read_descriptor_t *, struct sk_buff *,
- unsigned int, size_t);
int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
sk_read_actor_t recv_actor);
@@ -643,7 +642,7 @@ static inline void tcp_fast_path_check(struct sock *sk)
{
struct tcp_sock *tp = tcp_sk(sk);
- if (skb_queue_empty(&tp->out_of_order_queue) &&
+ if (RB_EMPTY_ROOT(&tp->out_of_order_queue) &&
tp->rcv_wnd &&
atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf &&
!tp->urg_data)
@@ -674,7 +673,7 @@ static inline bool tcp_ca_dst_locked(const struct dst_entry *dst)
/* Minimum RTT in usec. ~0 means not available. */
static inline u32 tcp_min_rtt(const struct tcp_sock *tp)
{
- return tp->rtt_min[0].rtt;
+ return minmax_get(&tp->rtt_min);
}
/* Compute the actual receive window we are currently advertising.
@@ -766,8 +765,16 @@ struct tcp_skb_cb {
__u32 ack_seq; /* Sequence number ACK'd */
union {
struct {
- /* There is space for up to 20 bytes */
- __u32 in_flight;/* Bytes in flight when packet sent */
+ /* There is space for up to 24 bytes */
+ __u32 in_flight:30,/* Bytes in flight at transmit */
+ is_app_limited:1, /* cwnd not fully used? */
+ unused:1;
+ /* pkts S/ACKed so far upon tx of skb, incl retrans: */
+ __u32 delivered;
+ /* start of send pipeline phase */
+ struct skb_mstamp first_tx_mstamp;
+ /* when we reached the "delivered" count */
+ struct skb_mstamp delivered_mstamp;
} tx; /* only used for outgoing skbs */
union {
struct inet_skb_parm h4;
@@ -863,6 +870,27 @@ struct ack_sample {
u32 in_flight;
};
+/* A rate sample measures the number of (original/retransmitted) data
+ * packets delivered "delivered" over an interval of time "interval_us".
+ * The tcp_rate.c code fills in the rate sample, and congestion
+ * control modules that define a cong_control function to run at the end
+ * of ACK processing can optionally chose to consult this sample when
+ * setting cwnd and pacing rate.
+ * A sample is invalid if "delivered" or "interval_us" is negative.
+ */
+struct rate_sample {
+ struct skb_mstamp prior_mstamp; /* starting timestamp for interval */
+ u32 prior_delivered; /* tp->delivered at "prior_mstamp" */
+ s32 delivered; /* number of packets delivered over interval */
+ long interval_us; /* time for tp->delivered to incr "delivered" */
+ long rtt_us; /* RTT of last (S)ACKed packet (or -1) */
+ int losses; /* number of packets marked lost upon ACK */
+ u32 acked_sacked; /* number of packets newly (S)ACKed upon ACK */
+ u32 prior_in_flight; /* in flight before this ACK */
+ bool is_app_limited; /* is sample from packet with bubble in pipe? */
+ bool is_retrans; /* is sample from retransmission? */
+};
+
struct tcp_congestion_ops {
struct list_head list;
u32 key;
@@ -887,6 +915,14 @@ struct tcp_congestion_ops {
u32 (*undo_cwnd)(struct sock *sk);
/* hook for packet ack accounting (optional) */
void (*pkts_acked)(struct sock *sk, const struct ack_sample *sample);
+ /* suggest number of segments for each skb to transmit (optional) */
+ u32 (*tso_segs_goal)(struct sock *sk);
+ /* returns the multiplier used in tcp_sndbuf_expand (optional) */
+ u32 (*sndbuf_expand)(struct sock *sk);
+ /* call when packets are delivered to update cwnd and pacing rate,
+ * after all the ca_state processing. (optional)
+ */
+ void (*cong_control)(struct sock *sk, const struct rate_sample *rs);
/* get info for inet_diag (optional) */
size_t (*get_info)(struct sock *sk, u32 ext, int *attr,
union tcp_cc_info *info);
@@ -949,6 +985,14 @@ static inline void tcp_ca_event(struct sock *sk, const enum tcp_ca_event event)
icsk->icsk_ca_ops->cwnd_event(sk, event);
}
+/* From tcp_rate.c */
+void tcp_rate_skb_sent(struct sock *sk, struct sk_buff *skb);
+void tcp_rate_skb_delivered(struct sock *sk, struct sk_buff *skb,
+ struct rate_sample *rs);
+void tcp_rate_gen(struct sock *sk, u32 delivered, u32 lost,
+ struct skb_mstamp *now, struct rate_sample *rs);
+void tcp_rate_check_app_limited(struct sock *sk);
+
/* These functions determine how the current flow behaves in respect of SACK
* handling. SACK is negotiated with the peer, and therefore it can vary
* between different flows.
@@ -1164,6 +1208,7 @@ static inline void tcp_prequeue_init(struct tcp_sock *tp)
}
bool tcp_prequeue(struct sock *sk, struct sk_buff *skb);
+bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb);
#undef STATE_TRACE
@@ -1853,6 +1898,8 @@ static inline int tcp_inq(struct sock *sk)
return answ;
}
+int tcp_peek_len(struct socket *sock);
+
static inline void tcp_segs_in(struct tcp_sock *tp, const struct sk_buff *skb)
{
u16 segs_in;
diff --git a/include/net/udp.h b/include/net/udp.h
index 8894d7144189..ea53a87d880f 100644
--- a/include/net/udp.h
+++ b/include/net/udp.h
@@ -251,6 +251,7 @@ int udp_get_port(struct sock *sk, unsigned short snum,
int (*saddr_cmp)(const struct sock *,
const struct sock *));
void udp_err(struct sk_buff *, u32);
+int udp_abort(struct sock *sk, int err);
int udp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len);
int udp_push_pending_frames(struct sock *sk);
void udp_flush_pending_frames(struct sock *sk);
diff --git a/include/net/vxlan.h b/include/net/vxlan.h
index b96d0360c095..0255613a54a4 100644
--- a/include/net/vxlan.h
+++ b/include/net/vxlan.h
@@ -350,24 +350,6 @@ static inline __be32 vxlan_vni_field(__be32 vni)
#endif
}
-static inline __be32 vxlan_tun_id_to_vni(__be64 tun_id)
-{
-#if defined(__BIG_ENDIAN)
- return (__force __be32)tun_id;
-#else
- return (__force __be32)((__force u64)tun_id >> 32);
-#endif
-}
-
-static inline __be64 vxlan_vni_to_tun_id(__be32 vni)
-{
-#if defined(__BIG_ENDIAN)
- return (__force __be64)vni;
-#else
- return (__force __be64)((u64)(__force u32)vni << 32);
-#endif
-}
-
static inline size_t vxlan_rco_start(__be32 vni_field)
{
return be32_to_cpu(vni_field & VXLAN_RCO_MASK) << VXLAN_RCO_SHIFT;
diff --git a/include/net/xfrm.h b/include/net/xfrm.h
index adfebd6f243c..31947b9c21d6 100644
--- a/include/net/xfrm.h
+++ b/include/net/xfrm.h
@@ -187,7 +187,7 @@ struct xfrm_state {
struct xfrm_replay_state_esn *preplay_esn;
/* The functions for replay detection. */
- struct xfrm_replay *repl;
+ const struct xfrm_replay *repl;
/* internal flag that only holds state for delayed aevent at the
* moment
@@ -1540,8 +1540,10 @@ int xfrm4_tunnel_deregister(struct xfrm_tunnel *handler, unsigned short family);
void xfrm4_local_error(struct sk_buff *skb, u32 mtu);
int xfrm6_extract_header(struct sk_buff *skb);
int xfrm6_extract_input(struct xfrm_state *x, struct sk_buff *skb);
-int xfrm6_rcv_spi(struct sk_buff *skb, int nexthdr, __be32 spi);
+int xfrm6_rcv_spi(struct sk_buff *skb, int nexthdr, __be32 spi,
+ struct ip6_tnl *t);
int xfrm6_transport_finish(struct sk_buff *skb, int async);
+int xfrm6_rcv_tnl(struct sk_buff *skb, struct ip6_tnl *t);
int xfrm6_rcv(struct sk_buff *skb);
int xfrm6_input_addr(struct sk_buff *skb, xfrm_address_t *daddr,
xfrm_address_t *saddr, u8 proto);
diff --git a/include/rdma/ib_hdrs.h b/include/rdma/ib_hdrs.h
new file mode 100644
index 000000000000..408439fe911e
--- /dev/null
+++ b/include/rdma/ib_hdrs.h
@@ -0,0 +1,178 @@
+/*
+ * Copyright(c) 2016 Intel Corporation.
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * BSD LICENSE
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * - Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef IB_HDRS_H
+#define IB_HDRS_H
+
+#include <linux/types.h>
+#include <asm/unaligned.h>
+#include <rdma/ib_verbs.h>
+
+#define IB_SEQ_NAK (3 << 29)
+
+/* AETH NAK opcode values */
+#define IB_RNR_NAK 0x20
+#define IB_NAK_PSN_ERROR 0x60
+#define IB_NAK_INVALID_REQUEST 0x61
+#define IB_NAK_REMOTE_ACCESS_ERROR 0x62
+#define IB_NAK_REMOTE_OPERATIONAL_ERROR 0x63
+#define IB_NAK_INVALID_RD_REQUEST 0x64
+
+#define IB_BTH_REQ_ACK BIT(31)
+#define IB_BTH_SOLICITED BIT(23)
+#define IB_BTH_MIG_REQ BIT(22)
+
+#define IB_GRH_VERSION 6
+#define IB_GRH_VERSION_MASK 0xF
+#define IB_GRH_VERSION_SHIFT 28
+#define IB_GRH_TCLASS_MASK 0xFF
+#define IB_GRH_TCLASS_SHIFT 20
+#define IB_GRH_FLOW_MASK 0xFFFFF
+#define IB_GRH_FLOW_SHIFT 0
+#define IB_GRH_NEXT_HDR 0x1B
+
+struct ib_reth {
+ __be64 vaddr; /* potentially unaligned */
+ __be32 rkey;
+ __be32 length;
+} __packed;
+
+struct ib_atomic_eth {
+ __be64 vaddr; /* potentially unaligned */
+ __be32 rkey;
+ __be64 swap_data; /* potentially unaligned */
+ __be64 compare_data; /* potentially unaligned */
+} __packed;
+
+union ib_ehdrs {
+ struct {
+ __be32 deth[2];
+ __be32 imm_data;
+ } ud;
+ struct {
+ struct ib_reth reth;
+ __be32 imm_data;
+ } rc;
+ struct {
+ __be32 aeth;
+ __be64 atomic_ack_eth; /* potentially unaligned */
+ } __packed at;
+ __be32 imm_data;
+ __be32 aeth;
+ __be32 ieth;
+ struct ib_atomic_eth atomic_eth;
+} __packed;
+
+struct ib_other_headers {
+ __be32 bth[3];
+ union ib_ehdrs u;
+} __packed;
+
+struct ib_header {
+ __be16 lrh[4];
+ union {
+ struct {
+ struct ib_grh grh;
+ struct ib_other_headers oth;
+ } l;
+ struct ib_other_headers oth;
+ } u;
+} __packed;
+
+/* accessors for unaligned __be64 items */
+
+static inline u64 ib_u64_get(__be64 *p)
+{
+ return get_unaligned_be64(p);
+}
+
+static inline void ib_u64_put(u64 val, __be64 *p)
+{
+ put_unaligned_be64(val, p);
+}
+
+static inline u64 get_ib_reth_vaddr(struct ib_reth *reth)
+{
+ return ib_u64_get(&reth->vaddr);
+}
+
+static inline void put_ib_reth_vaddr(u64 val, struct ib_reth *reth)
+{
+ ib_u64_put(val, &reth->vaddr);
+}
+
+static inline u64 get_ib_ateth_vaddr(struct ib_atomic_eth *ateth)
+{
+ return ib_u64_get(&ateth->vaddr);
+}
+
+static inline void put_ib_ateth_vaddr(u64 val, struct ib_atomic_eth *ateth)
+{
+ ib_u64_put(val, &ateth->vaddr);
+}
+
+static inline u64 get_ib_ateth_swap(struct ib_atomic_eth *ateth)
+{
+ return ib_u64_get(&ateth->swap_data);
+}
+
+static inline void put_ib_ateth_swap(u64 val, struct ib_atomic_eth *ateth)
+{
+ ib_u64_put(val, &ateth->swap_data);
+}
+
+static inline u64 get_ib_ateth_compare(struct ib_atomic_eth *ateth)
+{
+ return ib_u64_get(&ateth->compare_data);
+}
+
+static inline void put_ib_ateth_compare(u64 val, struct ib_atomic_eth *ateth)
+{
+ ib_u64_put(val, &ateth->compare_data);
+}
+
+#endif /* IB_HDRS_H */
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index e1f96737c2a1..5ad43a487745 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -261,6 +261,16 @@ struct ib_odp_caps {
} per_transport_caps;
};
+struct ib_rss_caps {
+ /* Corresponding bit will be set if qp type from
+ * 'enum ib_qp_type' is supported, e.g.
+ * supported_qpts |= 1 << IB_QPT_UD
+ */
+ u32 supported_qpts;
+ u32 max_rwq_indirection_tables;
+ u32 max_rwq_indirection_table_size;
+};
+
enum ib_cq_creation_flags {
IB_CQ_FLAGS_TIMESTAMP_COMPLETION = 1 << 0,
IB_CQ_FLAGS_IGNORE_OVERRUN = 1 << 1,
@@ -318,6 +328,8 @@ struct ib_device_attr {
struct ib_odp_caps odp_caps;
uint64_t timestamp_mask;
uint64_t hca_core_clock; /* in KHZ */
+ struct ib_rss_caps rss_caps;
+ u32 max_wq_type_rq;
};
enum ib_mtu {
@@ -525,9 +537,11 @@ enum ib_device_modify_flags {
IB_DEVICE_MODIFY_NODE_DESC = 1 << 1
};
+#define IB_DEVICE_NODE_DESC_MAX 64
+
struct ib_device_modify {
u64 sys_image_guid;
- char node_desc[64];
+ char node_desc[IB_DEVICE_NODE_DESC_MAX];
};
enum ib_port_modify_flags {
@@ -1370,10 +1384,17 @@ struct ib_udata {
struct ib_pd {
u32 local_dma_lkey;
+ u32 flags;
struct ib_device *device;
struct ib_uobject *uobject;
atomic_t usecnt; /* count all resources */
- struct ib_mr *local_mr;
+
+ u32 unsafe_global_rkey;
+
+ /*
+ * Implementation details of the RDMA core, don't use in drivers:
+ */
+ struct ib_mr *__internal_mr;
};
struct ib_xrcd {
@@ -1604,6 +1625,8 @@ struct ib_flow_eth_filter {
u8 src_mac[6];
__be16 ether_type;
__be16 vlan_tag;
+ /* Must be last */
+ u8 real_sz[0];
};
struct ib_flow_spec_eth {
@@ -1616,6 +1639,8 @@ struct ib_flow_spec_eth {
struct ib_flow_ib_filter {
__be16 dlid;
__u8 sl;
+ /* Must be last */
+ u8 real_sz[0];
};
struct ib_flow_spec_ib {
@@ -1625,9 +1650,22 @@ struct ib_flow_spec_ib {
struct ib_flow_ib_filter mask;
};
+/* IPv4 header flags */
+enum ib_ipv4_flags {
+ IB_IPV4_DONT_FRAG = 0x2, /* Don't enable packet fragmentation */
+ IB_IPV4_MORE_FRAG = 0X4 /* For All fragmented packets except the
+ last have this flag set */
+};
+
struct ib_flow_ipv4_filter {
__be32 src_ip;
__be32 dst_ip;
+ u8 proto;
+ u8 tos;
+ u8 ttl;
+ u8 flags;
+ /* Must be last */
+ u8 real_sz[0];
};
struct ib_flow_spec_ipv4 {
@@ -1640,6 +1678,12 @@ struct ib_flow_spec_ipv4 {
struct ib_flow_ipv6_filter {
u8 src_ip[16];
u8 dst_ip[16];
+ __be32 flow_label;
+ u8 next_hdr;
+ u8 traffic_class;
+ u8 hop_limit;
+ /* Must be last */
+ u8 real_sz[0];
};
struct ib_flow_spec_ipv6 {
@@ -1652,6 +1696,8 @@ struct ib_flow_spec_ipv6 {
struct ib_flow_tcp_udp_filter {
__be16 dst_port;
__be16 src_port;
+ /* Must be last */
+ u8 real_sz[0];
};
struct ib_flow_spec_tcp_udp {
@@ -1739,6 +1785,14 @@ struct ib_dma_mapping_ops {
void (*unmap_sg)(struct ib_device *dev,
struct scatterlist *sg, int nents,
enum dma_data_direction direction);
+ int (*map_sg_attrs)(struct ib_device *dev,
+ struct scatterlist *sg, int nents,
+ enum dma_data_direction direction,
+ unsigned long attrs);
+ void (*unmap_sg_attrs)(struct ib_device *dev,
+ struct scatterlist *sg, int nents,
+ enum dma_data_direction direction,
+ unsigned long attrs);
void (*sync_single_for_cpu)(struct ib_device *dev,
u64 dma_handle,
size_t size,
@@ -2033,7 +2087,7 @@ struct ib_device {
u64 uverbs_cmd_mask;
u64 uverbs_ex_cmd_mask;
- char node_desc[64];
+ char node_desc[IB_DEVICE_NODE_DESC_MAX];
__be64 node_guid;
u32 local_dma_lkey;
u16 is_switch:1;
@@ -2497,8 +2551,23 @@ int ib_find_gid(struct ib_device *device, union ib_gid *gid,
int ib_find_pkey(struct ib_device *device,
u8 port_num, u16 pkey, u16 *index);
-struct ib_pd *ib_alloc_pd(struct ib_device *device);
+enum ib_pd_flags {
+ /*
+ * Create a memory registration for all memory in the system and place
+ * the rkey for it into pd->unsafe_global_rkey. This can be used by
+ * ULPs to avoid the overhead of dynamic MRs.
+ *
+ * This flag is generally considered unsafe and must only be used in
+ * extremly trusted environments. Every use of it will log a warning
+ * in the kernel log.
+ */
+ IB_PD_UNSAFE_GLOBAL_RKEY = 0x01,
+};
+struct ib_pd *__ib_alloc_pd(struct ib_device *device, unsigned int flags,
+ const char *caller);
+#define ib_alloc_pd(device, flags) \
+ __ib_alloc_pd((device), (flags), __func__)
void ib_dealloc_pd(struct ib_pd *pd);
/**
@@ -2852,18 +2921,6 @@ static inline int ib_req_ncomp_notif(struct ib_cq *cq, int wc_cnt)
}
/**
- * ib_get_dma_mr - Returns a memory region for system memory that is
- * usable for DMA.
- * @pd: The protection domain associated with the memory region.
- * @mr_access_flags: Specifies the memory access rights.
- *
- * Note that the ib_dma_*() functions defined below must be used
- * to create/destroy addresses used with the Lkey or Rkey returned
- * by ib_get_dma_mr().
- */
-struct ib_mr *ib_get_dma_mr(struct ib_pd *pd, int mr_access_flags);
-
-/**
* ib_dma_mapping_error - check a DMA addr for error
* @dev: The device for which the dma_addr was created
* @dma_addr: The DMA address to check
@@ -3000,8 +3057,12 @@ static inline int ib_dma_map_sg_attrs(struct ib_device *dev,
enum dma_data_direction direction,
unsigned long dma_attrs)
{
- return dma_map_sg_attrs(dev->dma_device, sg, nents, direction,
- dma_attrs);
+ if (dev->dma_ops)
+ return dev->dma_ops->map_sg_attrs(dev, sg, nents, direction,
+ dma_attrs);
+ else
+ return dma_map_sg_attrs(dev->dma_device, sg, nents, direction,
+ dma_attrs);
}
static inline void ib_dma_unmap_sg_attrs(struct ib_device *dev,
@@ -3009,7 +3070,12 @@ static inline void ib_dma_unmap_sg_attrs(struct ib_device *dev,
enum dma_data_direction direction,
unsigned long dma_attrs)
{
- dma_unmap_sg_attrs(dev->dma_device, sg, nents, direction, dma_attrs);
+ if (dev->dma_ops)
+ return dev->dma_ops->unmap_sg_attrs(dev, sg, nents, direction,
+ dma_attrs);
+ else
+ dma_unmap_sg_attrs(dev->dma_device, sg, nents, direction,
+ dma_attrs);
}
/**
* ib_sg_dma_address - Return the DMA address from a scatter/gather entry
diff --git a/include/rdma/rdmavt_qp.h b/include/rdma/rdmavt_qp.h
index bd34d0b56bf7..2c5183ef0243 100644
--- a/include/rdma/rdmavt_qp.h
+++ b/include/rdma/rdmavt_qp.h
@@ -466,6 +466,25 @@ static inline struct rvt_rwqe *rvt_get_rwqe_ptr(struct rvt_rq *rq, unsigned n)
}
/**
+ * rvt_get_qp - get a QP reference
+ * @qp - the QP to hold
+ */
+static inline void rvt_get_qp(struct rvt_qp *qp)
+{
+ atomic_inc(&qp->refcount);
+}
+
+/**
+ * rvt_put_qp - release a QP reference
+ * @qp - the QP to release
+ */
+static inline void rvt_put_qp(struct rvt_qp *qp)
+{
+ if (qp && atomic_dec_and_test(&qp->refcount))
+ wake_up(&qp->wait);
+}
+
+/**
* rvt_qp_wqe_reserve - reserve operation
* @qp - the rvt qp
* @wqe - the send wqe
diff --git a/include/rxrpc/packet.h b/include/rxrpc/packet.h
index b2017440b765..703a64b4681a 100644
--- a/include/rxrpc/packet.h
+++ b/include/rxrpc/packet.h
@@ -24,6 +24,7 @@ typedef __be32 rxrpc_serial_net_t; /* on-the-wire Rx message serial number */
*/
struct rxrpc_wire_header {
__be32 epoch; /* client boot timestamp */
+#define RXRPC_RANDOM_EPOCH 0x80000000 /* Random if set, date-based if not */
__be32 cid; /* connection and channel ID */
#define RXRPC_MAXCALLS 4 /* max active calls per conn */
@@ -33,8 +34,6 @@ struct rxrpc_wire_header {
#define RXRPC_CID_INC (1 << RXRPC_CIDSHIFT) /* connection ID increment */
__be32 callNumber; /* call ID (0 for connection-level packets) */
-#define RXRPC_PROCESS_MAXCALLS (1<<2) /* maximum number of active calls per conn (power of 2) */
-
__be32 seq; /* sequence number of pkt in call stream */
__be32 serial; /* serial number of pkt sent to network */
@@ -92,10 +91,14 @@ struct rxrpc_wire_header {
struct rxrpc_jumbo_header {
uint8_t flags; /* packet flags (as per rxrpc_header) */
uint8_t pad;
- __be16 _rsvd; /* reserved (used by kerberos security as cksum) */
+ union {
+ __be16 _rsvd; /* reserved */
+ __be16 cksum; /* kerberos security checksum */
+ };
};
#define RXRPC_JUMBO_DATALEN 1412 /* non-terminal jumbo packet data length */
+#define RXRPC_JUMBO_SUBPKTLEN (RXRPC_JUMBO_DATALEN + sizeof(struct rxrpc_jumbo_header))
/*****************************************************************************/
/*
@@ -120,6 +123,7 @@ struct rxrpc_ackpacket {
#define RXRPC_ACK_PING_RESPONSE 7 /* response to RXRPC_ACK_PING */
#define RXRPC_ACK_DELAY 8 /* nothing happened since received packet */
#define RXRPC_ACK_IDLE 9 /* ACK due to fully received ACK window */
+#define RXRPC_ACK__INVALID 10 /* Representation of invalid ACK reason */
uint8_t nAcks; /* number of ACKs */
#define RXRPC_MAXACKS 255
@@ -130,6 +134,13 @@ struct rxrpc_ackpacket {
} __packed;
+/* Some ACKs refer to specific packets and some are general and can be updated. */
+#define RXRPC_ACK_UPDATEABLE ((1 << RXRPC_ACK_REQUESTED) | \
+ (1 << RXRPC_ACK_PING_RESPONSE) | \
+ (1 << RXRPC_ACK_DELAY) | \
+ (1 << RXRPC_ACK_IDLE))
+
+
/*
* ACK packets can have a further piece of information tagged on the end
*/
diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h
index 0dee7afa93d6..7e4cd53139ed 100644
--- a/include/scsi/scsi_host.h
+++ b/include/scsi/scsi_host.h
@@ -771,12 +771,9 @@ static inline int scsi_host_in_recovery(struct Scsi_Host *shost)
shost->tmf_in_progress;
}
-extern bool scsi_use_blk_mq;
-
static inline bool shost_use_blk_mq(struct Scsi_Host *shost)
{
- return scsi_use_blk_mq;
-
+ return shost->use_blk_mq;
}
extern int scsi_queue_work(struct Scsi_Host *, struct work_struct *);
diff --git a/include/scsi/scsi_transport_sas.h b/include/scsi/scsi_transport_sas.h
index 13c0b2ba1b6c..73d870918939 100644
--- a/include/scsi/scsi_transport_sas.h
+++ b/include/scsi/scsi_transport_sas.h
@@ -11,12 +11,12 @@ struct sas_rphy;
struct request;
#if !IS_ENABLED(CONFIG_SCSI_SAS_ATTRS)
-static inline int is_sas_attached(struct scsi_device *sdev)
+static inline int scsi_is_sas_rphy(const struct device *sdev)
{
return 0;
}
#else
-extern int is_sas_attached(struct scsi_device *sdev);
+extern int scsi_is_sas_rphy(const struct device *);
#endif
static inline int sas_protocol_ata(enum sas_protocol proto)
@@ -202,7 +202,6 @@ extern int sas_rphy_add(struct sas_rphy *);
extern void sas_rphy_remove(struct sas_rphy *);
extern void sas_rphy_delete(struct sas_rphy *);
extern void sas_rphy_unlink(struct sas_rphy *);
-extern int scsi_is_sas_rphy(const struct device *);
struct sas_port *sas_port_alloc(struct device *, int);
struct sas_port *sas_port_alloc_num(struct device *);
diff --git a/include/soc/at91/atmel-sfr.h b/include/soc/at91/atmel-sfr.h
index 2f9bb984a4df..506ea8ffda19 100644
--- a/include/soc/at91/atmel-sfr.h
+++ b/include/soc/at91/atmel-sfr.h
@@ -13,6 +13,20 @@
#ifndef _LINUX_MFD_SYSCON_ATMEL_SFR_H
#define _LINUX_MFD_SYSCON_ATMEL_SFR_H
+#define AT91_SFR_DDRCFG 0x04 /* DDR Configuration Register */
+/* 0x08 ~ 0x0c: Reserved */
+#define AT91_SFR_OHCIICR 0x10 /* OHCI INT Configuration Register */
+#define AT91_SFR_OHCIISR 0x14 /* OHCI INT Status Register */
#define AT91_SFR_I2SCLKSEL 0x90 /* I2SC Register */
+/* Field definitions */
+#define AT91_OHCIICR_SUSPEND_A BIT(8)
+#define AT91_OHCIICR_SUSPEND_B BIT(9)
+#define AT91_OHCIICR_SUSPEND_C BIT(10)
+
+#define AT91_OHCIICR_USB_SUSPEND (AT91_OHCIICR_SUSPEND_A | \
+ AT91_OHCIICR_SUSPEND_B | \
+ AT91_OHCIICR_SUSPEND_C)
+
+
#endif /* _LINUX_MFD_SYSCON_ATMEL_SFR_H */
diff --git a/include/soc/fsl/bman.h b/include/soc/fsl/bman.h
new file mode 100644
index 000000000000..eaaf56df4086
--- /dev/null
+++ b/include/soc/fsl/bman.h
@@ -0,0 +1,129 @@
+/* Copyright 2008 - 2016 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __FSL_BMAN_H
+#define __FSL_BMAN_H
+
+/* wrapper for 48-bit buffers */
+struct bm_buffer {
+ union {
+ struct {
+ __be16 bpid; /* hi 8-bits reserved */
+ __be16 hi; /* High 16-bits of 48-bit address */
+ __be32 lo; /* Low 32-bits of 48-bit address */
+ };
+ __be64 data;
+ };
+} __aligned(8);
+/*
+ * Restore the 48 bit address previously stored in BMan
+ * hardware pools as a dma_addr_t
+ */
+static inline dma_addr_t bm_buf_addr(const struct bm_buffer *buf)
+{
+ return be64_to_cpu(buf->data) & 0xffffffffffffLLU;
+}
+
+static inline u64 bm_buffer_get64(const struct bm_buffer *buf)
+{
+ return be64_to_cpu(buf->data) & 0xffffffffffffLLU;
+}
+
+static inline void bm_buffer_set64(struct bm_buffer *buf, u64 addr)
+{
+ buf->hi = cpu_to_be16(upper_32_bits(addr));
+ buf->lo = cpu_to_be32(lower_32_bits(addr));
+}
+
+static inline u8 bm_buffer_get_bpid(const struct bm_buffer *buf)
+{
+ return be16_to_cpu(buf->bpid) & 0xff;
+}
+
+static inline void bm_buffer_set_bpid(struct bm_buffer *buf, int bpid)
+{
+ buf->bpid = cpu_to_be16(bpid & 0xff);
+}
+
+/* Managed portal, high-level i/face */
+
+/* Portal and Buffer Pools */
+struct bman_portal;
+struct bman_pool;
+
+#define BM_POOL_MAX 64 /* max # of buffer pools */
+
+/**
+ * bman_new_pool - Allocates a Buffer Pool object
+ *
+ * Creates a pool object, and returns a reference to it or NULL on error.
+ */
+struct bman_pool *bman_new_pool(void);
+
+/**
+ * bman_free_pool - Deallocates a Buffer Pool object
+ * @pool: the pool object to release
+ */
+void bman_free_pool(struct bman_pool *pool);
+
+/**
+ * bman_get_bpid - Returns a pool object's BPID.
+ * @pool: the pool object
+ *
+ * The returned value is the index of the encapsulated buffer pool,
+ * in the range of [0, @BM_POOL_MAX-1].
+ */
+int bman_get_bpid(const struct bman_pool *pool);
+
+/**
+ * bman_release - Release buffer(s) to the buffer pool
+ * @pool: the buffer pool object to release to
+ * @bufs: an array of buffers to release
+ * @num: the number of buffers in @bufs (1-8)
+ *
+ * Adds the given buffers to RCR entries. If the RCR ring is unresponsive,
+ * the function will return -ETIMEDOUT. Otherwise, it returns zero.
+ */
+int bman_release(struct bman_pool *pool, const struct bm_buffer *bufs, u8 num);
+
+/**
+ * bman_acquire - Acquire buffer(s) from a buffer pool
+ * @pool: the buffer pool object to acquire from
+ * @bufs: array for storing the acquired buffers
+ * @num: the number of buffers desired (@bufs is at least this big)
+ *
+ * Issues an "Acquire" command via the portal's management command interface.
+ * The return value will be the number of buffers obtained from the pool, or a
+ * negative error code if a h/w error or pool starvation was encountered. In
+ * the latter case, the content of @bufs is undefined.
+ */
+int bman_acquire(struct bman_pool *pool, struct bm_buffer *bufs, u8 num);
+
+#endif /* __FSL_BMAN_H */
diff --git a/include/soc/fsl/qman.h b/include/soc/fsl/qman.h
new file mode 100644
index 000000000000..37f3eb001a16
--- /dev/null
+++ b/include/soc/fsl/qman.h
@@ -0,0 +1,1074 @@
+/* Copyright 2008 - 2016 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __FSL_QMAN_H
+#define __FSL_QMAN_H
+
+#include <linux/bitops.h>
+
+/* Hardware constants */
+#define QM_CHANNEL_SWPORTAL0 0
+#define QMAN_CHANNEL_POOL1 0x21
+#define QMAN_CHANNEL_POOL1_REV3 0x401
+extern u16 qm_channel_pool1;
+
+/* Portal processing (interrupt) sources */
+#define QM_PIRQ_CSCI 0x00100000 /* Congestion State Change */
+#define QM_PIRQ_EQCI 0x00080000 /* Enqueue Command Committed */
+#define QM_PIRQ_EQRI 0x00040000 /* EQCR Ring (below threshold) */
+#define QM_PIRQ_DQRI 0x00020000 /* DQRR Ring (non-empty) */
+#define QM_PIRQ_MRI 0x00010000 /* MR Ring (non-empty) */
+/*
+ * This mask contains all the interrupt sources that need handling except DQRI,
+ * ie. that if present should trigger slow-path processing.
+ */
+#define QM_PIRQ_SLOW (QM_PIRQ_CSCI | QM_PIRQ_EQCI | QM_PIRQ_EQRI | \
+ QM_PIRQ_MRI)
+
+/* For qman_static_dequeue_*** APIs */
+#define QM_SDQCR_CHANNELS_POOL_MASK 0x00007fff
+/* for n in [1,15] */
+#define QM_SDQCR_CHANNELS_POOL(n) (0x00008000 >> (n))
+/* for conversion from n of qm_channel */
+static inline u32 QM_SDQCR_CHANNELS_POOL_CONV(u16 channel)
+{
+ return QM_SDQCR_CHANNELS_POOL(channel + 1 - qm_channel_pool1);
+}
+
+/* --- QMan data structures (and associated constants) --- */
+
+/* "Frame Descriptor (FD)" */
+struct qm_fd {
+ union {
+ struct {
+ u8 cfg8b_w1;
+ u8 bpid; /* Buffer Pool ID */
+ u8 cfg8b_w3;
+ u8 addr_hi; /* high 8-bits of 40-bit address */
+ __be32 addr_lo; /* low 32-bits of 40-bit address */
+ } __packed;
+ __be64 data;
+ };
+ __be32 cfg; /* format, offset, length / congestion */
+ union {
+ __be32 cmd;
+ __be32 status;
+ };
+} __aligned(8);
+
+#define QM_FD_FORMAT_SG BIT(31)
+#define QM_FD_FORMAT_LONG BIT(30)
+#define QM_FD_FORMAT_COMPOUND BIT(29)
+#define QM_FD_FORMAT_MASK GENMASK(31, 29)
+#define QM_FD_OFF_SHIFT 20
+#define QM_FD_OFF_MASK GENMASK(28, 20)
+#define QM_FD_LEN_MASK GENMASK(19, 0)
+#define QM_FD_LEN_BIG_MASK GENMASK(28, 0)
+
+enum qm_fd_format {
+ /*
+ * 'contig' implies a contiguous buffer, whereas 'sg' implies a
+ * scatter-gather table. 'big' implies a 29-bit length with no offset
+ * field, otherwise length is 20-bit and offset is 9-bit. 'compound'
+ * implies a s/g-like table, where each entry itself represents a frame
+ * (contiguous or scatter-gather) and the 29-bit "length" is
+ * interpreted purely for congestion calculations, ie. a "congestion
+ * weight".
+ */
+ qm_fd_contig = 0,
+ qm_fd_contig_big = QM_FD_FORMAT_LONG,
+ qm_fd_sg = QM_FD_FORMAT_SG,
+ qm_fd_sg_big = QM_FD_FORMAT_SG | QM_FD_FORMAT_LONG,
+ qm_fd_compound = QM_FD_FORMAT_COMPOUND
+};
+
+static inline dma_addr_t qm_fd_addr(const struct qm_fd *fd)
+{
+ return be64_to_cpu(fd->data) & 0xffffffffffLLU;
+}
+
+static inline u64 qm_fd_addr_get64(const struct qm_fd *fd)
+{
+ return be64_to_cpu(fd->data) & 0xffffffffffLLU;
+}
+
+static inline void qm_fd_addr_set64(struct qm_fd *fd, u64 addr)
+{
+ fd->addr_hi = upper_32_bits(addr);
+ fd->addr_lo = cpu_to_be32(lower_32_bits(addr));
+}
+
+/*
+ * The 'format' field indicates the interpretation of the remaining
+ * 29 bits of the 32-bit word.
+ * If 'format' is _contig or _sg, 20b length and 9b offset.
+ * If 'format' is _contig_big or _sg_big, 29b length.
+ * If 'format' is _compound, 29b "congestion weight".
+ */
+static inline enum qm_fd_format qm_fd_get_format(const struct qm_fd *fd)
+{
+ return be32_to_cpu(fd->cfg) & QM_FD_FORMAT_MASK;
+}
+
+static inline int qm_fd_get_offset(const struct qm_fd *fd)
+{
+ return (be32_to_cpu(fd->cfg) & QM_FD_OFF_MASK) >> QM_FD_OFF_SHIFT;
+}
+
+static inline int qm_fd_get_length(const struct qm_fd *fd)
+{
+ return be32_to_cpu(fd->cfg) & QM_FD_LEN_MASK;
+}
+
+static inline int qm_fd_get_len_big(const struct qm_fd *fd)
+{
+ return be32_to_cpu(fd->cfg) & QM_FD_LEN_BIG_MASK;
+}
+
+static inline void qm_fd_set_param(struct qm_fd *fd, enum qm_fd_format fmt,
+ int off, int len)
+{
+ fd->cfg = cpu_to_be32(fmt | (len & QM_FD_LEN_BIG_MASK) |
+ ((off << QM_FD_OFF_SHIFT) & QM_FD_OFF_MASK));
+}
+
+#define qm_fd_set_contig(fd, off, len) \
+ qm_fd_set_param(fd, qm_fd_contig, off, len)
+#define qm_fd_set_sg(fd, off, len) qm_fd_set_param(fd, qm_fd_sg, off, len)
+#define qm_fd_set_contig_big(fd, len) \
+ qm_fd_set_param(fd, qm_fd_contig_big, 0, len)
+#define qm_fd_set_sg_big(fd, len) qm_fd_set_param(fd, qm_fd_sg_big, 0, len)
+
+static inline void qm_fd_clear_fd(struct qm_fd *fd)
+{
+ fd->data = 0;
+ fd->cfg = 0;
+ fd->cmd = 0;
+}
+
+/* Scatter/Gather table entry */
+struct qm_sg_entry {
+ union {
+ struct {
+ u8 __reserved1[3];
+ u8 addr_hi; /* high 8-bits of 40-bit address */
+ __be32 addr_lo; /* low 32-bits of 40-bit address */
+ };
+ __be64 data;
+ };
+ __be32 cfg; /* E bit, F bit, length */
+ u8 __reserved2;
+ u8 bpid;
+ __be16 offset; /* 13-bit, _res[13-15]*/
+} __packed;
+
+#define QM_SG_LEN_MASK GENMASK(29, 0)
+#define QM_SG_OFF_MASK GENMASK(12, 0)
+#define QM_SG_FIN BIT(30)
+#define QM_SG_EXT BIT(31)
+
+static inline dma_addr_t qm_sg_addr(const struct qm_sg_entry *sg)
+{
+ return be64_to_cpu(sg->data) & 0xffffffffffLLU;
+}
+
+static inline u64 qm_sg_entry_get64(const struct qm_sg_entry *sg)
+{
+ return be64_to_cpu(sg->data) & 0xffffffffffLLU;
+}
+
+static inline void qm_sg_entry_set64(struct qm_sg_entry *sg, u64 addr)
+{
+ sg->addr_hi = upper_32_bits(addr);
+ sg->addr_lo = cpu_to_be32(lower_32_bits(addr));
+}
+
+static inline bool qm_sg_entry_is_final(const struct qm_sg_entry *sg)
+{
+ return be32_to_cpu(sg->cfg) & QM_SG_FIN;
+}
+
+static inline bool qm_sg_entry_is_ext(const struct qm_sg_entry *sg)
+{
+ return be32_to_cpu(sg->cfg) & QM_SG_EXT;
+}
+
+static inline int qm_sg_entry_get_len(const struct qm_sg_entry *sg)
+{
+ return be32_to_cpu(sg->cfg) & QM_SG_LEN_MASK;
+}
+
+static inline void qm_sg_entry_set_len(struct qm_sg_entry *sg, int len)
+{
+ sg->cfg = cpu_to_be32(len & QM_SG_LEN_MASK);
+}
+
+static inline void qm_sg_entry_set_f(struct qm_sg_entry *sg, int len)
+{
+ sg->cfg = cpu_to_be32(QM_SG_FIN | (len & QM_SG_LEN_MASK));
+}
+
+static inline int qm_sg_entry_get_off(const struct qm_sg_entry *sg)
+{
+ return be32_to_cpu(sg->offset) & QM_SG_OFF_MASK;
+}
+
+/* "Frame Dequeue Response" */
+struct qm_dqrr_entry {
+ u8 verb;
+ u8 stat;
+ u16 seqnum; /* 15-bit */
+ u8 tok;
+ u8 __reserved2[3];
+ u32 fqid; /* 24-bit */
+ u32 contextB;
+ struct qm_fd fd;
+ u8 __reserved4[32];
+} __packed;
+#define QM_DQRR_VERB_VBIT 0x80
+#define QM_DQRR_VERB_MASK 0x7f /* where the verb contains; */
+#define QM_DQRR_VERB_FRAME_DEQUEUE 0x60 /* "this format" */
+#define QM_DQRR_STAT_FQ_EMPTY 0x80 /* FQ empty */
+#define QM_DQRR_STAT_FQ_HELDACTIVE 0x40 /* FQ held active */
+#define QM_DQRR_STAT_FQ_FORCEELIGIBLE 0x20 /* FQ was force-eligible'd */
+#define QM_DQRR_STAT_FD_VALID 0x10 /* has a non-NULL FD */
+#define QM_DQRR_STAT_UNSCHEDULED 0x02 /* Unscheduled dequeue */
+#define QM_DQRR_STAT_DQCR_EXPIRED 0x01 /* VDQCR or PDQCR expired*/
+
+/* "ERN Message Response" */
+/* "FQ State Change Notification" */
+union qm_mr_entry {
+ struct {
+ u8 verb;
+ u8 __reserved[63];
+ };
+ struct {
+ u8 verb;
+ u8 dca;
+ u16 seqnum;
+ u8 rc; /* Rej Code: 8-bit */
+ u8 orp_hi; /* ORP: 24-bit */
+ u16 orp_lo;
+ u32 fqid; /* 24-bit */
+ u32 tag;
+ struct qm_fd fd;
+ u8 __reserved1[32];
+ } __packed ern;
+ struct {
+ u8 verb;
+ u8 fqs; /* Frame Queue Status */
+ u8 __reserved1[6];
+ u32 fqid; /* 24-bit */
+ u32 contextB;
+ u8 __reserved2[48];
+ } __packed fq; /* FQRN/FQRNI/FQRL/FQPN */
+};
+#define QM_MR_VERB_VBIT 0x80
+/*
+ * ERNs originating from direct-connect portals ("dcern") use 0x20 as a verb
+ * which would be invalid as a s/w enqueue verb. A s/w ERN can be distinguished
+ * from the other MR types by noting if the 0x20 bit is unset.
+ */
+#define QM_MR_VERB_TYPE_MASK 0x27
+#define QM_MR_VERB_DC_ERN 0x20
+#define QM_MR_VERB_FQRN 0x21
+#define QM_MR_VERB_FQRNI 0x22
+#define QM_MR_VERB_FQRL 0x23
+#define QM_MR_VERB_FQPN 0x24
+#define QM_MR_RC_MASK 0xf0 /* contains one of; */
+#define QM_MR_RC_CGR_TAILDROP 0x00
+#define QM_MR_RC_WRED 0x10
+#define QM_MR_RC_ERROR 0x20
+#define QM_MR_RC_ORPWINDOW_EARLY 0x30
+#define QM_MR_RC_ORPWINDOW_LATE 0x40
+#define QM_MR_RC_FQ_TAILDROP 0x50
+#define QM_MR_RC_ORPWINDOW_RETIRED 0x60
+#define QM_MR_RC_ORP_ZERO 0x70
+#define QM_MR_FQS_ORLPRESENT 0x02 /* ORL fragments to come */
+#define QM_MR_FQS_NOTEMPTY 0x01 /* FQ has enqueued frames */
+
+/*
+ * An identical structure of FQD fields is present in the "Init FQ" command and
+ * the "Query FQ" result, it's suctioned out into the "struct qm_fqd" type.
+ * Within that, the 'stashing' and 'taildrop' pieces are also factored out, the
+ * latter has two inlines to assist with converting to/from the mant+exp
+ * representation.
+ */
+struct qm_fqd_stashing {
+ /* See QM_STASHING_EXCL_<...> */
+ u8 exclusive;
+ /* Numbers of cachelines */
+ u8 cl; /* _res[6-7], as[4-5], ds[2-3], cs[0-1] */
+};
+
+struct qm_fqd_oac {
+ /* "Overhead Accounting Control", see QM_OAC_<...> */
+ u8 oac; /* oac[6-7], _res[0-5] */
+ /* Two's-complement value (-128 to +127) */
+ s8 oal; /* "Overhead Accounting Length" */
+};
+
+struct qm_fqd {
+ /* _res[6-7], orprws[3-5], oa[2], olws[0-1] */
+ u8 orpc;
+ u8 cgid;
+ __be16 fq_ctrl; /* See QM_FQCTRL_<...> */
+ __be16 dest_wq; /* channel[3-15], wq[0-2] */
+ __be16 ics_cred; /* 15-bit */
+ /*
+ * For "Initialize Frame Queue" commands, the write-enable mask
+ * determines whether 'td' or 'oac_init' is observed. For query
+ * commands, this field is always 'td', and 'oac_query' (below) reflects
+ * the Overhead ACcounting values.
+ */
+ union {
+ __be16 td; /* "Taildrop": _res[13-15], mant[5-12], exp[0-4] */
+ struct qm_fqd_oac oac_init;
+ };
+ __be32 context_b;
+ union {
+ /* Treat it as 64-bit opaque */
+ __be64 opaque;
+ struct {
+ __be32 hi;
+ __be32 lo;
+ };
+ /* Treat it as s/w portal stashing config */
+ /* see "FQD Context_A field used for [...]" */
+ struct {
+ struct qm_fqd_stashing stashing;
+ /*
+ * 48-bit address of FQ context to
+ * stash, must be cacheline-aligned
+ */
+ __be16 context_hi;
+ __be32 context_lo;
+ } __packed;
+ } context_a;
+ struct qm_fqd_oac oac_query;
+} __packed;
+
+#define QM_FQD_CHAN_OFF 3
+#define QM_FQD_WQ_MASK GENMASK(2, 0)
+#define QM_FQD_TD_EXP_MASK GENMASK(4, 0)
+#define QM_FQD_TD_MANT_OFF 5
+#define QM_FQD_TD_MANT_MASK GENMASK(12, 5)
+#define QM_FQD_TD_MAX 0xe0000000
+#define QM_FQD_TD_MANT_MAX 0xff
+#define QM_FQD_OAC_OFF 6
+#define QM_FQD_AS_OFF 4
+#define QM_FQD_DS_OFF 2
+#define QM_FQD_XS_MASK 0x3
+
+/* 64-bit converters for context_hi/lo */
+static inline u64 qm_fqd_stashing_get64(const struct qm_fqd *fqd)
+{
+ return be64_to_cpu(fqd->context_a.opaque) & 0xffffffffffffULL;
+}
+
+static inline dma_addr_t qm_fqd_stashing_addr(const struct qm_fqd *fqd)
+{
+ return be64_to_cpu(fqd->context_a.opaque) & 0xffffffffffffULL;
+}
+
+static inline u64 qm_fqd_context_a_get64(const struct qm_fqd *fqd)
+{
+ return qm_fqd_stashing_get64(fqd);
+}
+
+static inline void qm_fqd_stashing_set64(struct qm_fqd *fqd, u64 addr)
+{
+ fqd->context_a.context_hi = upper_32_bits(addr);
+ fqd->context_a.context_lo = lower_32_bits(addr);
+}
+
+static inline void qm_fqd_context_a_set64(struct qm_fqd *fqd, u64 addr)
+{
+ fqd->context_a.hi = cpu_to_be16(upper_32_bits(addr));
+ fqd->context_a.lo = cpu_to_be32(lower_32_bits(addr));
+}
+
+/* convert a threshold value into mant+exp representation */
+static inline int qm_fqd_set_taildrop(struct qm_fqd *fqd, u32 val,
+ int roundup)
+{
+ u32 e = 0;
+ int td, oddbit = 0;
+
+ if (val > QM_FQD_TD_MAX)
+ return -ERANGE;
+
+ while (val > QM_FQD_TD_MANT_MAX) {
+ oddbit = val & 1;
+ val >>= 1;
+ e++;
+ if (roundup && oddbit)
+ val++;
+ }
+
+ td = (val << QM_FQD_TD_MANT_OFF) & QM_FQD_TD_MANT_MASK;
+ td |= (e & QM_FQD_TD_EXP_MASK);
+ fqd->td = cpu_to_be16(td);
+ return 0;
+}
+/* and the other direction */
+static inline int qm_fqd_get_taildrop(const struct qm_fqd *fqd)
+{
+ int td = be16_to_cpu(fqd->td);
+
+ return ((td & QM_FQD_TD_MANT_MASK) >> QM_FQD_TD_MANT_OFF)
+ << (td & QM_FQD_TD_EXP_MASK);
+}
+
+static inline void qm_fqd_set_stashing(struct qm_fqd *fqd, u8 as, u8 ds, u8 cs)
+{
+ struct qm_fqd_stashing *st = &fqd->context_a.stashing;
+
+ st->cl = ((as & QM_FQD_XS_MASK) << QM_FQD_AS_OFF) |
+ ((ds & QM_FQD_XS_MASK) << QM_FQD_DS_OFF) |
+ (cs & QM_FQD_XS_MASK);
+}
+
+static inline u8 qm_fqd_get_stashing(const struct qm_fqd *fqd)
+{
+ return fqd->context_a.stashing.cl;
+}
+
+static inline void qm_fqd_set_oac(struct qm_fqd *fqd, u8 val)
+{
+ fqd->oac_init.oac = val << QM_FQD_OAC_OFF;
+}
+
+static inline void qm_fqd_set_oal(struct qm_fqd *fqd, s8 val)
+{
+ fqd->oac_init.oal = val;
+}
+
+static inline void qm_fqd_set_destwq(struct qm_fqd *fqd, int ch, int wq)
+{
+ fqd->dest_wq = cpu_to_be16((ch << QM_FQD_CHAN_OFF) |
+ (wq & QM_FQD_WQ_MASK));
+}
+
+static inline int qm_fqd_get_chan(const struct qm_fqd *fqd)
+{
+ return be16_to_cpu(fqd->dest_wq) >> QM_FQD_CHAN_OFF;
+}
+
+static inline int qm_fqd_get_wq(const struct qm_fqd *fqd)
+{
+ return be16_to_cpu(fqd->dest_wq) & QM_FQD_WQ_MASK;
+}
+
+/* See "Frame Queue Descriptor (FQD)" */
+/* Frame Queue Descriptor (FQD) field 'fq_ctrl' uses these constants */
+#define QM_FQCTRL_MASK 0x07ff /* 'fq_ctrl' flags; */
+#define QM_FQCTRL_CGE 0x0400 /* Congestion Group Enable */
+#define QM_FQCTRL_TDE 0x0200 /* Tail-Drop Enable */
+#define QM_FQCTRL_CTXASTASHING 0x0080 /* Context-A stashing */
+#define QM_FQCTRL_CPCSTASH 0x0040 /* CPC Stash Enable */
+#define QM_FQCTRL_FORCESFDR 0x0008 /* High-priority SFDRs */
+#define QM_FQCTRL_AVOIDBLOCK 0x0004 /* Don't block active */
+#define QM_FQCTRL_HOLDACTIVE 0x0002 /* Hold active in portal */
+#define QM_FQCTRL_PREFERINCACHE 0x0001 /* Aggressively cache FQD */
+#define QM_FQCTRL_LOCKINCACHE QM_FQCTRL_PREFERINCACHE /* older naming */
+
+/* See "FQD Context_A field used for [...] */
+/* Frame Queue Descriptor (FQD) field 'CONTEXT_A' uses these constants */
+#define QM_STASHING_EXCL_ANNOTATION 0x04
+#define QM_STASHING_EXCL_DATA 0x02
+#define QM_STASHING_EXCL_CTX 0x01
+
+/* See "Intra Class Scheduling" */
+/* FQD field 'OAC' (Overhead ACcounting) uses these constants */
+#define QM_OAC_ICS 0x2 /* Accounting for Intra-Class Scheduling */
+#define QM_OAC_CG 0x1 /* Accounting for Congestion Groups */
+
+/*
+ * This struct represents the 32-bit "WR_PARM_[GYR]" parameters in CGR fields
+ * and associated commands/responses. The WRED parameters are calculated from
+ * these fields as follows;
+ * MaxTH = MA * (2 ^ Mn)
+ * Slope = SA / (2 ^ Sn)
+ * MaxP = 4 * (Pn + 1)
+ */
+struct qm_cgr_wr_parm {
+ /* MA[24-31], Mn[19-23], SA[12-18], Sn[6-11], Pn[0-5] */
+ u32 word;
+};
+/*
+ * This struct represents the 13-bit "CS_THRES" CGR field. In the corresponding
+ * management commands, this is padded to a 16-bit structure field, so that's
+ * how we represent it here. The congestion state threshold is calculated from
+ * these fields as follows;
+ * CS threshold = TA * (2 ^ Tn)
+ */
+struct qm_cgr_cs_thres {
+ /* _res[13-15], TA[5-12], Tn[0-4] */
+ u16 word;
+};
+/*
+ * This identical structure of CGR fields is present in the "Init/Modify CGR"
+ * commands and the "Query CGR" result. It's suctioned out here into its own
+ * struct.
+ */
+struct __qm_mc_cgr {
+ struct qm_cgr_wr_parm wr_parm_g;
+ struct qm_cgr_wr_parm wr_parm_y;
+ struct qm_cgr_wr_parm wr_parm_r;
+ u8 wr_en_g; /* boolean, use QM_CGR_EN */
+ u8 wr_en_y; /* boolean, use QM_CGR_EN */
+ u8 wr_en_r; /* boolean, use QM_CGR_EN */
+ u8 cscn_en; /* boolean, use QM_CGR_EN */
+ union {
+ struct {
+ u16 cscn_targ_upd_ctrl; /* use QM_CSCN_TARG_UDP_ */
+ u16 cscn_targ_dcp_low; /* CSCN_TARG_DCP low-16bits */
+ };
+ u32 cscn_targ; /* use QM_CGR_TARG_* */
+ };
+ u8 cstd_en; /* boolean, use QM_CGR_EN */
+ u8 cs; /* boolean, only used in query response */
+ struct qm_cgr_cs_thres cs_thres; /* use qm_cgr_cs_thres_set64() */
+ u8 mode; /* QMAN_CGR_MODE_FRAME not supported in rev1.0 */
+} __packed;
+#define QM_CGR_EN 0x01 /* For wr_en_*, cscn_en, cstd_en */
+#define QM_CGR_TARG_UDP_CTRL_WRITE_BIT 0x8000 /* value written to portal bit*/
+#define QM_CGR_TARG_UDP_CTRL_DCP 0x4000 /* 0: SWP, 1: DCP */
+#define QM_CGR_TARG_PORTAL(n) (0x80000000 >> (n)) /* s/w portal, 0-9 */
+#define QM_CGR_TARG_FMAN0 0x00200000 /* direct-connect portal: fman0 */
+#define QM_CGR_TARG_FMAN1 0x00100000 /* : fman1 */
+/* Convert CGR thresholds to/from "cs_thres" format */
+static inline u64 qm_cgr_cs_thres_get64(const struct qm_cgr_cs_thres *th)
+{
+ return ((th->word >> 5) & 0xff) << (th->word & 0x1f);
+}
+
+static inline int qm_cgr_cs_thres_set64(struct qm_cgr_cs_thres *th, u64 val,
+ int roundup)
+{
+ u32 e = 0;
+ int oddbit = 0;
+
+ while (val > 0xff) {
+ oddbit = val & 1;
+ val >>= 1;
+ e++;
+ if (roundup && oddbit)
+ val++;
+ }
+ th->word = ((val & 0xff) << 5) | (e & 0x1f);
+ return 0;
+}
+
+/* "Initialize FQ" */
+struct qm_mcc_initfq {
+ u8 __reserved1[2];
+ u16 we_mask; /* Write Enable Mask */
+ u32 fqid; /* 24-bit */
+ u16 count; /* Initialises 'count+1' FQDs */
+ struct qm_fqd fqd; /* the FQD fields go here */
+ u8 __reserved2[30];
+} __packed;
+/* "Initialize/Modify CGR" */
+struct qm_mcc_initcgr {
+ u8 __reserve1[2];
+ u16 we_mask; /* Write Enable Mask */
+ struct __qm_mc_cgr cgr; /* CGR fields */
+ u8 __reserved2[2];
+ u8 cgid;
+ u8 __reserved3[32];
+} __packed;
+
+/* INITFQ-specific flags */
+#define QM_INITFQ_WE_MASK 0x01ff /* 'Write Enable' flags; */
+#define QM_INITFQ_WE_OAC 0x0100
+#define QM_INITFQ_WE_ORPC 0x0080
+#define QM_INITFQ_WE_CGID 0x0040
+#define QM_INITFQ_WE_FQCTRL 0x0020
+#define QM_INITFQ_WE_DESTWQ 0x0010
+#define QM_INITFQ_WE_ICSCRED 0x0008
+#define QM_INITFQ_WE_TDTHRESH 0x0004
+#define QM_INITFQ_WE_CONTEXTB 0x0002
+#define QM_INITFQ_WE_CONTEXTA 0x0001
+/* INITCGR/MODIFYCGR-specific flags */
+#define QM_CGR_WE_MASK 0x07ff /* 'Write Enable Mask'; */
+#define QM_CGR_WE_WR_PARM_G 0x0400
+#define QM_CGR_WE_WR_PARM_Y 0x0200
+#define QM_CGR_WE_WR_PARM_R 0x0100
+#define QM_CGR_WE_WR_EN_G 0x0080
+#define QM_CGR_WE_WR_EN_Y 0x0040
+#define QM_CGR_WE_WR_EN_R 0x0020
+#define QM_CGR_WE_CSCN_EN 0x0010
+#define QM_CGR_WE_CSCN_TARG 0x0008
+#define QM_CGR_WE_CSTD_EN 0x0004
+#define QM_CGR_WE_CS_THRES 0x0002
+#define QM_CGR_WE_MODE 0x0001
+
+#define QMAN_CGR_FLAG_USE_INIT 0x00000001
+
+ /* Portal and Frame Queues */
+/* Represents a managed portal */
+struct qman_portal;
+
+/*
+ * This object type represents QMan frame queue descriptors (FQD), it is
+ * cacheline-aligned, and initialised by qman_create_fq(). The structure is
+ * defined further down.
+ */
+struct qman_fq;
+
+/*
+ * This object type represents a QMan congestion group, it is defined further
+ * down.
+ */
+struct qman_cgr;
+
+/*
+ * This enum, and the callback type that returns it, are used when handling
+ * dequeued frames via DQRR. Note that for "null" callbacks registered with the
+ * portal object (for handling dequeues that do not demux because contextB is
+ * NULL), the return value *MUST* be qman_cb_dqrr_consume.
+ */
+enum qman_cb_dqrr_result {
+ /* DQRR entry can be consumed */
+ qman_cb_dqrr_consume,
+ /* Like _consume, but requests parking - FQ must be held-active */
+ qman_cb_dqrr_park,
+ /* Does not consume, for DCA mode only. */
+ qman_cb_dqrr_defer,
+ /*
+ * Stop processing without consuming this ring entry. Exits the current
+ * qman_p_poll_dqrr() or interrupt-handling, as appropriate. If within
+ * an interrupt handler, the callback would typically call
+ * qman_irqsource_remove(QM_PIRQ_DQRI) before returning this value,
+ * otherwise the interrupt will reassert immediately.
+ */
+ qman_cb_dqrr_stop,
+ /* Like qman_cb_dqrr_stop, but consumes the current entry. */
+ qman_cb_dqrr_consume_stop
+};
+typedef enum qman_cb_dqrr_result (*qman_cb_dqrr)(struct qman_portal *qm,
+ struct qman_fq *fq,
+ const struct qm_dqrr_entry *dqrr);
+
+/*
+ * This callback type is used when handling ERNs, FQRNs and FQRLs via MR. They
+ * are always consumed after the callback returns.
+ */
+typedef void (*qman_cb_mr)(struct qman_portal *qm, struct qman_fq *fq,
+ const union qm_mr_entry *msg);
+
+/*
+ * s/w-visible states. Ie. tentatively scheduled + truly scheduled + active +
+ * held-active + held-suspended are just "sched". Things like "retired" will not
+ * be assumed until it is complete (ie. QMAN_FQ_STATE_CHANGING is set until
+ * then, to indicate it's completing and to gate attempts to retry the retire
+ * command). Note, park commands do not set QMAN_FQ_STATE_CHANGING because it's
+ * technically impossible in the case of enqueue DCAs (which refer to DQRR ring
+ * index rather than the FQ that ring entry corresponds to), so repeated park
+ * commands are allowed (if you're silly enough to try) but won't change FQ
+ * state, and the resulting park notifications move FQs from "sched" to
+ * "parked".
+ */
+enum qman_fq_state {
+ qman_fq_state_oos,
+ qman_fq_state_parked,
+ qman_fq_state_sched,
+ qman_fq_state_retired
+};
+
+#define QMAN_FQ_STATE_CHANGING 0x80000000 /* 'state' is changing */
+#define QMAN_FQ_STATE_NE 0x40000000 /* retired FQ isn't empty */
+#define QMAN_FQ_STATE_ORL 0x20000000 /* retired FQ has ORL */
+#define QMAN_FQ_STATE_BLOCKOOS 0xe0000000 /* if any are set, no OOS */
+#define QMAN_FQ_STATE_CGR_EN 0x10000000 /* CGR enabled */
+#define QMAN_FQ_STATE_VDQCR 0x08000000 /* being volatile dequeued */
+
+/*
+ * Frame queue objects (struct qman_fq) are stored within memory passed to
+ * qman_create_fq(), as this allows stashing of caller-provided demux callback
+ * pointers at no extra cost to stashing of (driver-internal) FQ state. If the
+ * caller wishes to add per-FQ state and have it benefit from dequeue-stashing,
+ * they should;
+ *
+ * (a) extend the qman_fq structure with their state; eg.
+ *
+ * // myfq is allocated and driver_fq callbacks filled in;
+ * struct my_fq {
+ * struct qman_fq base;
+ * int an_extra_field;
+ * [ ... add other fields to be associated with each FQ ...]
+ * } *myfq = some_my_fq_allocator();
+ * struct qman_fq *fq = qman_create_fq(fqid, flags, &myfq->base);
+ *
+ * // in a dequeue callback, access extra fields from 'fq' via a cast;
+ * struct my_fq *myfq = (struct my_fq *)fq;
+ * do_something_with(myfq->an_extra_field);
+ * [...]
+ *
+ * (b) when and if configuring the FQ for context stashing, specify how ever
+ * many cachelines are required to stash 'struct my_fq', to accelerate not
+ * only the QMan driver but the callback as well.
+ */
+
+struct qman_fq_cb {
+ qman_cb_dqrr dqrr; /* for dequeued frames */
+ qman_cb_mr ern; /* for s/w ERNs */
+ qman_cb_mr fqs; /* frame-queue state changes*/
+};
+
+struct qman_fq {
+ /* Caller of qman_create_fq() provides these demux callbacks */
+ struct qman_fq_cb cb;
+ /*
+ * These are internal to the driver, don't touch. In particular, they
+ * may change, be removed, or extended (so you shouldn't rely on
+ * sizeof(qman_fq) being a constant).
+ */
+ u32 fqid, idx;
+ unsigned long flags;
+ enum qman_fq_state state;
+ int cgr_groupid;
+};
+
+/*
+ * This callback type is used when handling congestion group entry/exit.
+ * 'congested' is non-zero on congestion-entry, and zero on congestion-exit.
+ */
+typedef void (*qman_cb_cgr)(struct qman_portal *qm,
+ struct qman_cgr *cgr, int congested);
+
+struct qman_cgr {
+ /* Set these prior to qman_create_cgr() */
+ u32 cgrid; /* 0..255, but u32 to allow specials like -1, 256, etc.*/
+ qman_cb_cgr cb;
+ /* These are private to the driver */
+ u16 chan; /* portal channel this object is created on */
+ struct list_head node;
+};
+
+/* Flags to qman_create_fq() */
+#define QMAN_FQ_FLAG_NO_ENQUEUE 0x00000001 /* can't enqueue */
+#define QMAN_FQ_FLAG_NO_MODIFY 0x00000002 /* can only enqueue */
+#define QMAN_FQ_FLAG_TO_DCPORTAL 0x00000004 /* consumed by CAAM/PME/Fman */
+#define QMAN_FQ_FLAG_DYNAMIC_FQID 0x00000020 /* (de)allocate fqid */
+
+/* Flags to qman_init_fq() */
+#define QMAN_INITFQ_FLAG_SCHED 0x00000001 /* schedule rather than park */
+#define QMAN_INITFQ_FLAG_LOCAL 0x00000004 /* set dest portal */
+
+ /* Portal Management */
+/**
+ * qman_p_irqsource_add - add processing sources to be interrupt-driven
+ * @bits: bitmask of QM_PIRQ_**I processing sources
+ *
+ * Adds processing sources that should be interrupt-driven (rather than
+ * processed via qman_poll_***() functions).
+ */
+void qman_p_irqsource_add(struct qman_portal *p, u32 bits);
+
+/**
+ * qman_p_irqsource_remove - remove processing sources from being int-driven
+ * @bits: bitmask of QM_PIRQ_**I processing sources
+ *
+ * Removes processing sources from being interrupt-driven, so that they will
+ * instead be processed via qman_poll_***() functions.
+ */
+void qman_p_irqsource_remove(struct qman_portal *p, u32 bits);
+
+/**
+ * qman_affine_cpus - return a mask of cpus that have affine portals
+ */
+const cpumask_t *qman_affine_cpus(void);
+
+/**
+ * qman_affine_channel - return the channel ID of an portal
+ * @cpu: the cpu whose affine portal is the subject of the query
+ *
+ * If @cpu is -1, the affine portal for the current CPU will be used. It is a
+ * bug to call this function for any value of @cpu (other than -1) that is not a
+ * member of the mask returned from qman_affine_cpus().
+ */
+u16 qman_affine_channel(int cpu);
+
+/**
+ * qman_get_affine_portal - return the portal pointer affine to cpu
+ * @cpu: the cpu whose affine portal is the subject of the query
+ */
+struct qman_portal *qman_get_affine_portal(int cpu);
+
+/**
+ * qman_p_poll_dqrr - process DQRR (fast-path) entries
+ * @limit: the maximum number of DQRR entries to process
+ *
+ * Use of this function requires that DQRR processing not be interrupt-driven.
+ * The return value represents the number of DQRR entries processed.
+ */
+int qman_p_poll_dqrr(struct qman_portal *p, unsigned int limit);
+
+/**
+ * qman_p_static_dequeue_add - Add pool channels to the portal SDQCR
+ * @pools: bit-mask of pool channels, using QM_SDQCR_CHANNELS_POOL(n)
+ *
+ * Adds a set of pool channels to the portal's static dequeue command register
+ * (SDQCR). The requested pools are limited to those the portal has dequeue
+ * access to.
+ */
+void qman_p_static_dequeue_add(struct qman_portal *p, u32 pools);
+
+ /* FQ management */
+/**
+ * qman_create_fq - Allocates a FQ
+ * @fqid: the index of the FQD to encapsulate, must be "Out of Service"
+ * @flags: bit-mask of QMAN_FQ_FLAG_*** options
+ * @fq: memory for storing the 'fq', with callbacks filled in
+ *
+ * Creates a frame queue object for the given @fqid, unless the
+ * QMAN_FQ_FLAG_DYNAMIC_FQID flag is set in @flags, in which case a FQID is
+ * dynamically allocated (or the function fails if none are available). Once
+ * created, the caller should not touch the memory at 'fq' except as extended to
+ * adjacent memory for user-defined fields (see the definition of "struct
+ * qman_fq" for more info). NO_MODIFY is only intended for enqueuing to
+ * pre-existing frame-queues that aren't to be otherwise interfered with, it
+ * prevents all other modifications to the frame queue. The TO_DCPORTAL flag
+ * causes the driver to honour any contextB modifications requested in the
+ * qm_init_fq() API, as this indicates the frame queue will be consumed by a
+ * direct-connect portal (PME, CAAM, or Fman). When frame queues are consumed by
+ * software portals, the contextB field is controlled by the driver and can't be
+ * modified by the caller.
+ */
+int qman_create_fq(u32 fqid, u32 flags, struct qman_fq *fq);
+
+/**
+ * qman_destroy_fq - Deallocates a FQ
+ * @fq: the frame queue object to release
+ *
+ * The memory for this frame queue object ('fq' provided in qman_create_fq()) is
+ * not deallocated but the caller regains ownership, to do with as desired. The
+ * FQ must be in the 'out-of-service' or in the 'parked' state.
+ */
+void qman_destroy_fq(struct qman_fq *fq);
+
+/**
+ * qman_fq_fqid - Queries the frame queue ID of a FQ object
+ * @fq: the frame queue object to query
+ */
+u32 qman_fq_fqid(struct qman_fq *fq);
+
+/**
+ * qman_init_fq - Initialises FQ fields, leaves the FQ "parked" or "scheduled"
+ * @fq: the frame queue object to modify, must be 'parked' or new.
+ * @flags: bit-mask of QMAN_INITFQ_FLAG_*** options
+ * @opts: the FQ-modification settings, as defined in the low-level API
+ *
+ * The @opts parameter comes from the low-level portal API. Select
+ * QMAN_INITFQ_FLAG_SCHED in @flags to cause the frame queue to be scheduled
+ * rather than parked. NB, @opts can be NULL.
+ *
+ * Note that some fields and options within @opts may be ignored or overwritten
+ * by the driver;
+ * 1. the 'count' and 'fqid' fields are always ignored (this operation only
+ * affects one frame queue: @fq).
+ * 2. the QM_INITFQ_WE_CONTEXTB option of the 'we_mask' field and the associated
+ * 'fqd' structure's 'context_b' field are sometimes overwritten;
+ * - if @fq was not created with QMAN_FQ_FLAG_TO_DCPORTAL, then context_b is
+ * initialised to a value used by the driver for demux.
+ * - if context_b is initialised for demux, so is context_a in case stashing
+ * is requested (see item 4).
+ * (So caller control of context_b is only possible for TO_DCPORTAL frame queue
+ * objects.)
+ * 3. if @flags contains QMAN_INITFQ_FLAG_LOCAL, the 'fqd' structure's
+ * 'dest::channel' field will be overwritten to match the portal used to issue
+ * the command. If the WE_DESTWQ write-enable bit had already been set by the
+ * caller, the channel workqueue will be left as-is, otherwise the write-enable
+ * bit is set and the workqueue is set to a default of 4. If the "LOCAL" flag
+ * isn't set, the destination channel/workqueue fields and the write-enable bit
+ * are left as-is.
+ * 4. if the driver overwrites context_a/b for demux, then if
+ * QM_INITFQ_WE_CONTEXTA is set, the driver will only overwrite
+ * context_a.address fields and will leave the stashing fields provided by the
+ * user alone, otherwise it will zero out the context_a.stashing fields.
+ */
+int qman_init_fq(struct qman_fq *fq, u32 flags, struct qm_mcc_initfq *opts);
+
+/**
+ * qman_schedule_fq - Schedules a FQ
+ * @fq: the frame queue object to schedule, must be 'parked'
+ *
+ * Schedules the frame queue, which must be Parked, which takes it to
+ * Tentatively-Scheduled or Truly-Scheduled depending on its fill-level.
+ */
+int qman_schedule_fq(struct qman_fq *fq);
+
+/**
+ * qman_retire_fq - Retires a FQ
+ * @fq: the frame queue object to retire
+ * @flags: FQ flags (QMAN_FQ_STATE*) if retirement completes immediately
+ *
+ * Retires the frame queue. This returns zero if it succeeds immediately, +1 if
+ * the retirement was started asynchronously, otherwise it returns negative for
+ * failure. When this function returns zero, @flags is set to indicate whether
+ * the retired FQ is empty and/or whether it has any ORL fragments (to show up
+ * as ERNs). Otherwise the corresponding flags will be known when a subsequent
+ * FQRN message shows up on the portal's message ring.
+ *
+ * NB, if the retirement is asynchronous (the FQ was in the Truly Scheduled or
+ * Active state), the completion will be via the message ring as a FQRN - but
+ * the corresponding callback may occur before this function returns!! Ie. the
+ * caller should be prepared to accept the callback as the function is called,
+ * not only once it has returned.
+ */
+int qman_retire_fq(struct qman_fq *fq, u32 *flags);
+
+/**
+ * qman_oos_fq - Puts a FQ "out of service"
+ * @fq: the frame queue object to be put out-of-service, must be 'retired'
+ *
+ * The frame queue must be retired and empty, and if any order restoration list
+ * was released as ERNs at the time of retirement, they must all be consumed.
+ */
+int qman_oos_fq(struct qman_fq *fq);
+
+/**
+ * qman_enqueue - Enqueue a frame to a frame queue
+ * @fq: the frame queue object to enqueue to
+ * @fd: a descriptor of the frame to be enqueued
+ *
+ * Fills an entry in the EQCR of portal @qm to enqueue the frame described by
+ * @fd. The descriptor details are copied from @fd to the EQCR entry, the 'pid'
+ * field is ignored. The return value is non-zero on error, such as ring full.
+ */
+int qman_enqueue(struct qman_fq *fq, const struct qm_fd *fd);
+
+/**
+ * qman_alloc_fqid_range - Allocate a contiguous range of FQIDs
+ * @result: is set by the API to the base FQID of the allocated range
+ * @count: the number of FQIDs required
+ *
+ * Returns 0 on success, or a negative error code.
+ */
+int qman_alloc_fqid_range(u32 *result, u32 count);
+#define qman_alloc_fqid(result) qman_alloc_fqid_range(result, 1)
+
+/**
+ * qman_release_fqid - Release the specified frame queue ID
+ * @fqid: the FQID to be released back to the resource pool
+ *
+ * This function can also be used to seed the allocator with
+ * FQID ranges that it can subsequently allocate from.
+ * Returns 0 on success, or a negative error code.
+ */
+int qman_release_fqid(u32 fqid);
+
+ /* Pool-channel management */
+/**
+ * qman_alloc_pool_range - Allocate a contiguous range of pool-channel IDs
+ * @result: is set by the API to the base pool-channel ID of the allocated range
+ * @count: the number of pool-channel IDs required
+ *
+ * Returns 0 on success, or a negative error code.
+ */
+int qman_alloc_pool_range(u32 *result, u32 count);
+#define qman_alloc_pool(result) qman_alloc_pool_range(result, 1)
+
+/**
+ * qman_release_pool - Release the specified pool-channel ID
+ * @id: the pool-chan ID to be released back to the resource pool
+ *
+ * This function can also be used to seed the allocator with
+ * pool-channel ID ranges that it can subsequently allocate from.
+ * Returns 0 on success, or a negative error code.
+ */
+int qman_release_pool(u32 id);
+
+ /* CGR management */
+/**
+ * qman_create_cgr - Register a congestion group object
+ * @cgr: the 'cgr' object, with fields filled in
+ * @flags: QMAN_CGR_FLAG_* values
+ * @opts: optional state of CGR settings
+ *
+ * Registers this object to receiving congestion entry/exit callbacks on the
+ * portal affine to the cpu portal on which this API is executed. If opts is
+ * NULL then only the callback (cgr->cb) function is registered. If @flags
+ * contains QMAN_CGR_FLAG_USE_INIT, then an init hw command (which will reset
+ * any unspecified parameters) will be used rather than a modify hw hardware
+ * (which only modifies the specified parameters).
+ */
+int qman_create_cgr(struct qman_cgr *cgr, u32 flags,
+ struct qm_mcc_initcgr *opts);
+
+/**
+ * qman_delete_cgr - Deregisters a congestion group object
+ * @cgr: the 'cgr' object to deregister
+ *
+ * "Unplugs" this CGR object from the portal affine to the cpu on which this API
+ * is executed. This must be excuted on the same affine portal on which it was
+ * created.
+ */
+int qman_delete_cgr(struct qman_cgr *cgr);
+
+/**
+ * qman_delete_cgr_safe - Deregisters a congestion group object from any CPU
+ * @cgr: the 'cgr' object to deregister
+ *
+ * This will select the proper CPU and run there qman_delete_cgr().
+ */
+void qman_delete_cgr_safe(struct qman_cgr *cgr);
+
+/**
+ * qman_query_cgr_congested - Queries CGR's congestion status
+ * @cgr: the 'cgr' object to query
+ * @result: returns 'cgr's congestion status, 1 (true) if congested
+ */
+int qman_query_cgr_congested(struct qman_cgr *cgr, bool *result);
+
+/**
+ * qman_alloc_cgrid_range - Allocate a contiguous range of CGR IDs
+ * @result: is set by the API to the base CGR ID of the allocated range
+ * @count: the number of CGR IDs required
+ *
+ * Returns 0 on success, or a negative error code.
+ */
+int qman_alloc_cgrid_range(u32 *result, u32 count);
+#define qman_alloc_cgrid(result) qman_alloc_cgrid_range(result, 1)
+
+/**
+ * qman_release_cgrid - Release the specified CGR ID
+ * @id: the CGR ID to be released back to the resource pool
+ *
+ * This function can also be used to seed the allocator with
+ * CGR ID ranges that it can subsequently allocate from.
+ * Returns 0 on success, or a negative error code.
+ */
+int qman_release_cgrid(u32 id);
+
+#endif /* __FSL_QMAN_H */
diff --git a/include/soc/rockchip/rockchip_sip.h b/include/soc/rockchip/rockchip_sip.h
new file mode 100644
index 000000000000..7e28092c4d3d
--- /dev/null
+++ b/include/soc/rockchip/rockchip_sip.h
@@ -0,0 +1,27 @@
+/*
+ * Copyright (c) 2016, Fuzhou Rockchip Electronics Co., Ltd
+ * Author: Lin Huang <hl@rock-chips.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+#ifndef __SOC_ROCKCHIP_SIP_H
+#define __SOC_ROCKCHIP_SIP_H
+
+#define ROCKCHIP_SIP_DRAM_FREQ 0x82000008
+#define ROCKCHIP_SIP_CONFIG_DRAM_INIT 0x00
+#define ROCKCHIP_SIP_CONFIG_DRAM_SET_RATE 0x01
+#define ROCKCHIP_SIP_CONFIG_DRAM_ROUND_RATE 0x02
+#define ROCKCHIP_SIP_CONFIG_DRAM_SET_AT_SR 0x03
+#define ROCKCHIP_SIP_CONFIG_DRAM_GET_BW 0x04
+#define ROCKCHIP_SIP_CONFIG_DRAM_GET_RATE 0x05
+#define ROCKCHIP_SIP_CONFIG_DRAM_CLR_IRQ 0x06
+#define ROCKCHIP_SIP_CONFIG_DRAM_SET_PARAM 0x07
+
+#endif
diff --git a/include/sound/da7219.h b/include/sound/da7219.h
index 02876acdc840..409ef1397fd3 100644
--- a/include/sound/da7219.h
+++ b/include/sound/da7219.h
@@ -34,6 +34,8 @@ enum da7219_mic_amp_in_sel {
struct da7219_aad_pdata;
struct da7219_pdata {
+ bool wakeup_source;
+
/* Mic */
enum da7219_micbias_voltage micbias_lvl;
enum da7219_mic_amp_in_sel mic_amp_in_sel;
diff --git a/include/sound/hda_register.h b/include/sound/hda_register.h
index ff1aecf325e8..0013063db7f2 100644
--- a/include/sound/hda_register.h
+++ b/include/sound/hda_register.h
@@ -89,6 +89,19 @@ enum { SDI0, SDI1, SDI2, SDI3, SDO0, SDO1, SDO2, SDO3 };
#define AZX_REG_SD_BDLPL 0x18
#define AZX_REG_SD_BDLPU 0x1c
+/* GTS registers */
+#define AZX_REG_LLCH 0x14
+
+#define AZX_REG_GTS_BASE 0x520
+
+#define AZX_REG_GTSCC (AZX_REG_GTS_BASE + 0x00)
+#define AZX_REG_WALFCC (AZX_REG_GTS_BASE + 0x04)
+#define AZX_REG_TSCCL (AZX_REG_GTS_BASE + 0x08)
+#define AZX_REG_TSCCU (AZX_REG_GTS_BASE + 0x0C)
+#define AZX_REG_LLPFOC (AZX_REG_GTS_BASE + 0x14)
+#define AZX_REG_LLPCL (AZX_REG_GTS_BASE + 0x18)
+#define AZX_REG_LLPCU (AZX_REG_GTS_BASE + 0x1C)
+
/* Haswell/Broadwell display HD-A controller Extended Mode registers */
#define AZX_REG_HSW_EM4 0x100c
#define AZX_REG_HSW_EM5 0x1010
@@ -242,6 +255,29 @@ enum { SDI0, SDI1, SDI2, SDI3, SDO0, SDO1, SDO2, SDO3 };
/* Interval used to calculate the iterating register offset */
#define AZX_DRSM_INTERVAL 0x08
+/* Global time synchronization registers */
+#define GTSCC_TSCCD_MASK 0x80000000
+#define GTSCC_TSCCD_SHIFT BIT(31)
+#define GTSCC_TSCCI_MASK 0x20
+#define GTSCC_CDMAS_DMA_DIR_SHIFT 4
+
+#define WALFCC_CIF_MASK 0x1FF
+#define WALFCC_FN_SHIFT 9
+#define HDA_CLK_CYCLES_PER_FRAME 512
+
+/*
+ * An error occurs near frame "rollover". The clocks in frame value indicates
+ * whether this error may have occurred. Here we use the value of 10. Please
+ * see the errata for the right number [<10]
+ */
+#define HDA_MAX_CYCLE_VALUE 499
+#define HDA_MAX_CYCLE_OFFSET 10
+#define HDA_MAX_CYCLE_READ_RETRY 10
+
+#define TSCCU_CCU_SHIFT 32
+#define LLPC_CCU_SHIFT 32
+
+
/*
* helpers to read the stream position
*/
diff --git a/include/sound/hdaudio.h b/include/sound/hdaudio.h
index 93e63c56f48f..56004ec8d441 100644
--- a/include/sound/hdaudio.h
+++ b/include/sound/hdaudio.h
@@ -245,6 +245,12 @@ struct hdac_rb {
/*
* HD-audio bus base driver
+ *
+ * @ppcap: pp capabilities pointer
+ * @spbcap: SPIB capabilities pointer
+ * @mlcap: MultiLink capabilities pointer
+ * @gtscap: gts capabilities pointer
+ * @drsmcap: dma resume capabilities pointer
*/
struct hdac_bus {
struct device *dev;
@@ -256,6 +262,12 @@ struct hdac_bus {
void __iomem *remap_addr;
int irq;
+ void __iomem *ppcap;
+ void __iomem *spbcap;
+ void __iomem *mlcap;
+ void __iomem *gtscap;
+ void __iomem *drsmcap;
+
/* codec linked list */
struct list_head codec_list;
unsigned int num_codecs;
@@ -335,6 +347,7 @@ static inline void snd_hdac_codec_link_down(struct hdac_device *codec)
int snd_hdac_bus_send_cmd(struct hdac_bus *bus, unsigned int val);
int snd_hdac_bus_get_response(struct hdac_bus *bus, unsigned int addr,
unsigned int *res);
+int snd_hdac_bus_parse_capabilities(struct hdac_bus *bus);
int snd_hdac_link_power(struct hdac_device *codec, bool enable);
bool snd_hdac_bus_init_chip(struct hdac_bus *bus, bool full_reset);
diff --git a/include/sound/hdaudio_ext.h b/include/sound/hdaudio_ext.h
index b9593b201599..8660a7f10851 100644
--- a/include/sound/hdaudio_ext.h
+++ b/include/sound/hdaudio_ext.h
@@ -8,11 +8,6 @@
*
* @bus: hdac bus
* @num_streams: streams supported
- * @ppcap: pp capabilities pointer
- * @spbcap: SPIB capabilities pointer
- * @mlcap: MultiLink capabilities pointer
- * @gtscap: gts capabilities pointer
- * @drsmcap: dma resume capabilities pointer
* @hlink_list: link list of HDA links
* @lock: lock for link mgmt
* @cmd_dma_state: state of cmd DMAs: CORB and RIRB
@@ -22,12 +17,6 @@ struct hdac_ext_bus {
int num_streams;
int idx;
- void __iomem *ppcap;
- void __iomem *spbcap;
- void __iomem *mlcap;
- void __iomem *gtscap;
- void __iomem *drsmcap;
-
struct list_head hlink_list;
struct mutex lock;
@@ -54,7 +43,6 @@ void snd_hdac_ext_bus_device_remove(struct hdac_ext_bus *ebus);
#define HDA_CODEC_EXT_ENTRY(_vid, _revid, _name, _drv_data) \
HDA_CODEC_REV_EXT_ENTRY(_vid, _revid, _name, _drv_data)
-int snd_hdac_ext_bus_parse_capabilities(struct hdac_ext_bus *sbus);
void snd_hdac_ext_bus_ppcap_enable(struct hdac_ext_bus *chip, bool enable);
void snd_hdac_ext_bus_ppcap_int_enable(struct hdac_ext_bus *chip, bool enable);
diff --git a/include/sound/l3.h b/include/sound/l3.h
index 423a08f0f1b0..1471da22adad 100644
--- a/include/sound/l3.h
+++ b/include/sound/l3.h
@@ -2,9 +2,15 @@
#define _L3_H_ 1
struct l3_pins {
- void (*setdat)(int);
- void (*setclk)(int);
- void (*setmode)(int);
+ void (*setdat)(struct l3_pins *, int);
+ void (*setclk)(struct l3_pins *, int);
+ void (*setmode)(struct l3_pins *, int);
+
+ int gpio_data;
+ int gpio_clk;
+ int gpio_mode;
+ int use_gpios;
+
int data_hold;
int data_setup;
int clock_high;
@@ -13,6 +19,9 @@ struct l3_pins {
int mode_setup;
};
+struct device;
+
int l3_write(struct l3_pins *adap, u8 addr, u8 *data, int len);
+int l3_set_gpio_ops(struct device *dev, struct l3_pins *adap);
#endif
diff --git a/include/sound/rt5660.h b/include/sound/rt5660.h
new file mode 100644
index 000000000000..065f83a24db6
--- /dev/null
+++ b/include/sound/rt5660.h
@@ -0,0 +1,31 @@
+/*
+ * linux/sound/rt5660.h -- Platform data for RT5660
+ *
+ * Copyright 2016 Realtek Semiconductor Corp.
+ * Author: Oder Chiou <oder_chiou@realtek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __LINUX_SND_RT5660_H
+#define __LINUX_SND_RT5660_H
+
+enum rt5660_dmic1_data_pin {
+ RT5660_DMIC1_NULL,
+ RT5660_DMIC1_DATA_GPIO2,
+ RT5660_DMIC1_DATA_IN1P,
+};
+
+struct rt5660_platform_data {
+ /* IN1 & IN3 can optionally be differential */
+ bool in1_diff;
+ bool in3_diff;
+ bool use_ldo2;
+ bool poweroff_codec_in_suspend;
+
+ enum rt5660_dmic1_data_pin dmic1_data_pin;
+};
+
+#endif
diff --git a/include/sound/s3c24xx_uda134x.h b/include/sound/s3c24xx_uda134x.h
index 33df4cb909d3..ffaf1f098c8e 100644
--- a/include/sound/s3c24xx_uda134x.h
+++ b/include/sound/s3c24xx_uda134x.h
@@ -7,7 +7,6 @@ struct s3c24xx_uda134x_platform_data {
int l3_clk;
int l3_mode;
int l3_data;
- void (*power) (int);
int model;
};
diff --git a/include/sound/simple_card_utils.h b/include/sound/simple_card_utils.h
index 86088aed9002..fd6412551145 100644
--- a/include/sound/simple_card_utils.h
+++ b/include/sound/simple_card_utils.h
@@ -27,10 +27,45 @@ int asoc_simple_card_parse_daifmt(struct device *dev,
struct device_node *codec,
char *prefix,
unsigned int *retfmt);
+__printf(3, 4)
int asoc_simple_card_set_dailink_name(struct device *dev,
struct snd_soc_dai_link *dai_link,
const char *fmt, ...);
int asoc_simple_card_parse_card_name(struct snd_soc_card *card,
char *prefix);
+#define asoc_simple_card_parse_clk_cpu(node, dai_link, simple_dai) \
+ asoc_simple_card_parse_clk(node, dai_link->cpu_of_node, simple_dai)
+#define asoc_simple_card_parse_clk_codec(node, dai_link, simple_dai) \
+ asoc_simple_card_parse_clk(node, dai_link->codec_of_node, simple_dai)
+int asoc_simple_card_parse_clk(struct device_node *node,
+ struct device_node *dai_of_node,
+ struct asoc_simple_dai *simple_dai);
+
+#define asoc_simple_card_parse_cpu(node, dai_link, \
+ list_name, cells_name, is_single_link) \
+ asoc_simple_card_parse_dai(node, &dai_link->cpu_of_node, \
+ &dai_link->cpu_dai_name, list_name, cells_name, is_single_link)
+#define asoc_simple_card_parse_codec(node, dai_link, list_name, cells_name) \
+ asoc_simple_card_parse_dai(node, &dai_link->codec_of_node, \
+ &dai_link->codec_dai_name, list_name, cells_name, NULL)
+#define asoc_simple_card_parse_platform(node, dai_link, list_name, cells_name) \
+ asoc_simple_card_parse_dai(node, &dai_link->platform_of_node, \
+ NULL, list_name, cells_name, NULL)
+int asoc_simple_card_parse_dai(struct device_node *node,
+ struct device_node **endpoint_np,
+ const char **dai_name,
+ const char *list_name,
+ const char *cells_name,
+ int *is_single_links);
+
+int asoc_simple_card_init_dai(struct snd_soc_dai *dai,
+ struct asoc_simple_dai *simple_dai);
+
+int asoc_simple_card_canonicalize_dailink(struct snd_soc_dai_link *dai_link);
+void asoc_simple_card_canonicalize_cpu(struct snd_soc_dai_link *dai_link,
+ int is_single_links);
+
+int asoc_simple_card_clean_reference(struct snd_soc_card *card);
+
#endif /* __SIMPLE_CARD_CORE_H */
diff --git a/include/sound/soc.h b/include/sound/soc.h
index 6144882cc96a..4f1c784e44f6 100644
--- a/include/sound/soc.h
+++ b/include/sound/soc.h
@@ -898,14 +898,6 @@ struct snd_soc_codec_driver {
int (*resume)(struct snd_soc_codec *);
struct snd_soc_component_driver component_driver;
- /* Default control and setup, added after probe() is run */
- const struct snd_kcontrol_new *controls;
- int num_controls;
- const struct snd_soc_dapm_widget *dapm_widgets;
- int num_dapm_widgets;
- const struct snd_soc_dapm_route *dapm_routes;
- int num_dapm_routes;
-
/* codec wide operations */
int (*set_sysclk)(struct snd_soc_codec *codec,
int clk_id, int source, unsigned int freq, int dir);
@@ -1547,17 +1539,6 @@ static inline void *snd_soc_platform_get_drvdata(struct snd_soc_platform *platfo
return snd_soc_component_get_drvdata(&platform->component);
}
-static inline void snd_soc_pcm_set_drvdata(struct snd_soc_pcm_runtime *rtd,
- void *data)
-{
- dev_set_drvdata(rtd->dev, data);
-}
-
-static inline void *snd_soc_pcm_get_drvdata(struct snd_soc_pcm_runtime *rtd)
-{
- return dev_get_drvdata(rtd->dev);
-}
-
static inline void snd_soc_initialize_card_lists(struct snd_soc_card *card)
{
INIT_LIST_HEAD(&card->codec_dev_list);
diff --git a/include/sound/tlv.h b/include/sound/tlv.h
index df97d1966468..3677ebb928d5 100644
--- a/include/sound/tlv.h
+++ b/include/sound/tlv.h
@@ -22,67 +22,39 @@
*
*/
-/*
- * TLV structure is right behind the struct snd_ctl_tlv:
- * unsigned int type - see SNDRV_CTL_TLVT_*
- * unsigned int length
- * .... data aligned to sizeof(unsigned int), use
- * block_length = (length + (sizeof(unsigned int) - 1)) &
- * ~(sizeof(unsigned int) - 1)) ....
- */
-
#include <uapi/sound/tlv.h>
-#define TLV_ITEM(type, ...) \
- (type), TLV_LENGTH(__VA_ARGS__), __VA_ARGS__
-#define TLV_LENGTH(...) \
- ((unsigned int)sizeof((const unsigned int[]) { __VA_ARGS__ }))
+/* For historical reasons, these macros are aliases to the ones in UAPI. */
+#define TLV_ITEM SNDRV_CTL_TLVD_ITEM
+#define TLV_LENGTH SNDRV_CTL_TLVD_LENGTH
+
+#define TLV_CONTAINER_ITEM SNDRV_CTL_TLVD_CONTAINER_ITEM
+#define DECLARE_TLV_CONTAINER SNDRV_CTL_TLVD_DECLARE_CONTAINER
-#define TLV_CONTAINER_ITEM(...) \
- TLV_ITEM(SNDRV_CTL_TLVT_CONTAINER, __VA_ARGS__)
-#define DECLARE_TLV_CONTAINER(name, ...) \
- unsigned int name[] = { TLV_CONTAINER_ITEM(__VA_ARGS__) }
+#define TLV_DB_SCALE_MASK SNDRV_CTL_TLVD_DB_SCALE_MASK
+#define TLV_DB_SCALE_MUTE SNDRV_CTL_TLVD_DB_SCALE_MUTE
+#define TLV_DB_SCALE_ITEM SNDRV_CTL_TLVD_DB_SCALE_ITEM
+#define DECLARE_TLV_DB_SCALE SNDRV_CTL_TLVD_DECLARE_DB_SCALE
-#define TLV_DB_SCALE_MASK 0xffff
-#define TLV_DB_SCALE_MUTE 0x10000
-#define TLV_DB_SCALE_ITEM(min, step, mute) \
- TLV_ITEM(SNDRV_CTL_TLVT_DB_SCALE, \
- (min), \
- ((step) & TLV_DB_SCALE_MASK) | \
- ((mute) ? TLV_DB_SCALE_MUTE : 0))
-#define DECLARE_TLV_DB_SCALE(name, min, step, mute) \
- unsigned int name[] = { TLV_DB_SCALE_ITEM(min, step, mute) }
+#define TLV_DB_MINMAX_ITEM SNDRV_CTL_TLVD_DB_MINMAX_ITEM
+#define TLV_DB_MINMAX_MUTE_ITEM SNDRV_CTL_TLVD_DB_MINMAX_MUTE_ITEM
+#define DECLARE_TLV_DB_MINMAX SNDRV_CTL_TLVD_DECLARE_DB_MINMAX
+#define DECLARE_TLV_DB_MINMAX_MUTE SNDRV_CTL_TLVD_DECLARE_DB_MINMAX_MUTE
-/* dB scale specified with min/max values instead of step */
-#define TLV_DB_MINMAX_ITEM(min_dB, max_dB) \
- TLV_ITEM(SNDRV_CTL_TLVT_DB_MINMAX, (min_dB), (max_dB))
-#define TLV_DB_MINMAX_MUTE_ITEM(min_dB, max_dB) \
- TLV_ITEM(SNDRV_CTL_TLVT_DB_MINMAX_MUTE, (min_dB), (max_dB))
-#define DECLARE_TLV_DB_MINMAX(name, min_dB, max_dB) \
- unsigned int name[] = { TLV_DB_MINMAX_ITEM(min_dB, max_dB) }
-#define DECLARE_TLV_DB_MINMAX_MUTE(name, min_dB, max_dB) \
- unsigned int name[] = { TLV_DB_MINMAX_MUTE_ITEM(min_dB, max_dB) }
+#define TLV_DB_LINEAR_ITEM SNDRV_CTL_TLVD_DB_LINEAR_ITEM
+#define DECLARE_TLV_DB_LINEAR SNDRV_CTL_TLVD_DECLARE_DB_LINEAR
-/* linear volume between min_dB and max_dB (.01dB unit) */
-#define TLV_DB_LINEAR_ITEM(min_dB, max_dB) \
- TLV_ITEM(SNDRV_CTL_TLVT_DB_LINEAR, (min_dB), (max_dB))
-#define DECLARE_TLV_DB_LINEAR(name, min_dB, max_dB) \
- unsigned int name[] = { TLV_DB_LINEAR_ITEM(min_dB, max_dB) }
+#define TLV_DB_RANGE_ITEM SNDRV_CTL_TLVD_DB_RANGE_ITEM
+#define DECLARE_TLV_DB_RANGE SNDRV_CTL_TLVD_DECLARE_DB_RANGE
-/* dB range container:
- * Items in dB range container must be ordered by their values and by their
- * dB values. This implies that larger values must correspond with larger
- * dB values (which is also required for all other mixer controls).
+#define TLV_DB_GAIN_MUTE SNDRV_CTL_TLVD_DB_GAIN_MUTE
+
+/*
+ * The below assumes that each item TLV is 4 words like DB_SCALE or LINEAR.
+ * This is an old fasion and obsoleted by commit bf1d1c9b6179("ALSA: tlv: add
+ * DECLARE_TLV_DB_RANGE()").
*/
-/* Each item is: <min> <max> <TLV> */
-#define TLV_DB_RANGE_ITEM(...) \
- TLV_ITEM(SNDRV_CTL_TLVT_DB_RANGE, __VA_ARGS__)
-#define DECLARE_TLV_DB_RANGE(name, ...) \
- unsigned int name[] = { TLV_DB_RANGE_ITEM(__VA_ARGS__) }
-/* The below assumes that each item TLV is 4 words like DB_SCALE or LINEAR */
-#define TLV_DB_RANGE_HEAD(num) \
+#define TLV_DB_RANGE_HEAD(num) \
SNDRV_CTL_TLVT_DB_RANGE, 6 * (num) * sizeof(unsigned int)
-#define TLV_DB_GAIN_MUTE -9999999
-
#endif /* __SOUND_TLV_H */
diff --git a/include/trace/events/cgroup.h b/include/trace/events/cgroup.h
new file mode 100644
index 000000000000..ab68640a18d0
--- /dev/null
+++ b/include/trace/events/cgroup.h
@@ -0,0 +1,163 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM cgroup
+
+#if !defined(_TRACE_CGROUP_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_CGROUP_H
+
+#include <linux/cgroup.h>
+#include <linux/tracepoint.h>
+
+DECLARE_EVENT_CLASS(cgroup_root,
+
+ TP_PROTO(struct cgroup_root *root),
+
+ TP_ARGS(root),
+
+ TP_STRUCT__entry(
+ __field( int, root )
+ __field( u16, ss_mask )
+ __string( name, root->name )
+ ),
+
+ TP_fast_assign(
+ __entry->root = root->hierarchy_id;
+ __entry->ss_mask = root->subsys_mask;
+ __assign_str(name, root->name);
+ ),
+
+ TP_printk("root=%d ss_mask=%#x name=%s",
+ __entry->root, __entry->ss_mask, __get_str(name))
+);
+
+DEFINE_EVENT(cgroup_root, cgroup_setup_root,
+
+ TP_PROTO(struct cgroup_root *root),
+
+ TP_ARGS(root)
+);
+
+DEFINE_EVENT(cgroup_root, cgroup_destroy_root,
+
+ TP_PROTO(struct cgroup_root *root),
+
+ TP_ARGS(root)
+);
+
+DEFINE_EVENT(cgroup_root, cgroup_remount,
+
+ TP_PROTO(struct cgroup_root *root),
+
+ TP_ARGS(root)
+);
+
+DECLARE_EVENT_CLASS(cgroup,
+
+ TP_PROTO(struct cgroup *cgrp),
+
+ TP_ARGS(cgrp),
+
+ TP_STRUCT__entry(
+ __field( int, root )
+ __field( int, id )
+ __field( int, level )
+ __dynamic_array(char, path,
+ cgrp->kn ? cgroup_path(cgrp, NULL, 0) + 1
+ : strlen("(null)"))
+ ),
+
+ TP_fast_assign(
+ __entry->root = cgrp->root->hierarchy_id;
+ __entry->id = cgrp->id;
+ __entry->level = cgrp->level;
+ if (cgrp->kn)
+ cgroup_path(cgrp, __get_dynamic_array(path),
+ __get_dynamic_array_len(path));
+ else
+ __assign_str(path, "(null)");
+ ),
+
+ TP_printk("root=%d id=%d level=%d path=%s",
+ __entry->root, __entry->id, __entry->level, __get_str(path))
+);
+
+DEFINE_EVENT(cgroup, cgroup_mkdir,
+
+ TP_PROTO(struct cgroup *cgroup),
+
+ TP_ARGS(cgroup)
+);
+
+DEFINE_EVENT(cgroup, cgroup_rmdir,
+
+ TP_PROTO(struct cgroup *cgroup),
+
+ TP_ARGS(cgroup)
+);
+
+DEFINE_EVENT(cgroup, cgroup_release,
+
+ TP_PROTO(struct cgroup *cgroup),
+
+ TP_ARGS(cgroup)
+);
+
+DEFINE_EVENT(cgroup, cgroup_rename,
+
+ TP_PROTO(struct cgroup *cgroup),
+
+ TP_ARGS(cgroup)
+);
+
+DECLARE_EVENT_CLASS(cgroup_migrate,
+
+ TP_PROTO(struct cgroup *dst_cgrp, struct task_struct *task, bool threadgroup),
+
+ TP_ARGS(dst_cgrp, task, threadgroup),
+
+ TP_STRUCT__entry(
+ __field( int, dst_root )
+ __field( int, dst_id )
+ __field( int, dst_level )
+ __dynamic_array(char, dst_path,
+ dst_cgrp->kn ? cgroup_path(dst_cgrp, NULL, 0) + 1
+ : strlen("(null)"))
+ __field( int, pid )
+ __string( comm, task->comm )
+ ),
+
+ TP_fast_assign(
+ __entry->dst_root = dst_cgrp->root->hierarchy_id;
+ __entry->dst_id = dst_cgrp->id;
+ __entry->dst_level = dst_cgrp->level;
+ if (dst_cgrp->kn)
+ cgroup_path(dst_cgrp, __get_dynamic_array(dst_path),
+ __get_dynamic_array_len(dst_path));
+ else
+ __assign_str(dst_path, "(null)");
+ __entry->pid = task->pid;
+ __assign_str(comm, task->comm);
+ ),
+
+ TP_printk("dst_root=%d dst_id=%d dst_level=%d dst_path=%s pid=%d comm=%s",
+ __entry->dst_root, __entry->dst_id, __entry->dst_level,
+ __get_str(dst_path), __entry->pid, __get_str(comm))
+);
+
+DEFINE_EVENT(cgroup_migrate, cgroup_attach_task,
+
+ TP_PROTO(struct cgroup *dst_cgrp, struct task_struct *task, bool threadgroup),
+
+ TP_ARGS(dst_cgrp, task, threadgroup)
+);
+
+DEFINE_EVENT(cgroup_migrate, cgroup_transfer_tasks,
+
+ TP_PROTO(struct cgroup *dst_cgrp, struct task_struct *task, bool threadgroup),
+
+ TP_ARGS(dst_cgrp, task, threadgroup)
+);
+
+#endif /* _TRACE_CGROUP_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/compaction.h b/include/trace/events/compaction.h
index c2ba402ab256..cbdb90b6b308 100644
--- a/include/trace/events/compaction.h
+++ b/include/trace/events/compaction.h
@@ -13,7 +13,7 @@
EM( COMPACT_SKIPPED, "skipped") \
EM( COMPACT_DEFERRED, "deferred") \
EM( COMPACT_CONTINUE, "continue") \
- EM( COMPACT_PARTIAL, "partial") \
+ EM( COMPACT_SUCCESS, "success") \
EM( COMPACT_PARTIAL_SKIPPED, "partial_skipped") \
EM( COMPACT_COMPLETE, "complete") \
EM( COMPACT_NO_SUITABLE_PAGE, "no_suitable_page") \
diff --git a/include/trace/events/cpuhp.h b/include/trace/events/cpuhp.h
index a72bd93ec7e5..996953db91d7 100644
--- a/include/trace/events/cpuhp.h
+++ b/include/trace/events/cpuhp.h
@@ -33,6 +33,34 @@ TRACE_EVENT(cpuhp_enter,
__entry->cpu, __entry->target, __entry->idx, __entry->fun)
);
+TRACE_EVENT(cpuhp_multi_enter,
+
+ TP_PROTO(unsigned int cpu,
+ int target,
+ int idx,
+ int (*fun)(unsigned int, struct hlist_node *),
+ struct hlist_node *node),
+
+ TP_ARGS(cpu, target, idx, fun, node),
+
+ TP_STRUCT__entry(
+ __field( unsigned int, cpu )
+ __field( int, target )
+ __field( int, idx )
+ __field( void *, fun )
+ ),
+
+ TP_fast_assign(
+ __entry->cpu = cpu;
+ __entry->target = target;
+ __entry->idx = idx;
+ __entry->fun = fun;
+ ),
+
+ TP_printk("cpu: %04u target: %3d step: %3d (%pf)",
+ __entry->cpu, __entry->target, __entry->idx, __entry->fun)
+);
+
TRACE_EVENT(cpuhp_exit,
TP_PROTO(unsigned int cpu,
diff --git a/include/trace/events/f2fs.h b/include/trace/events/f2fs.h
index ff95fd02116f..903a09165bb1 100644
--- a/include/trace/events/f2fs.h
+++ b/include/trace/events/f2fs.h
@@ -58,16 +58,12 @@ TRACE_DEFINE_ENUM(CP_DISCARD);
#define F2FS_BIO_FLAG_MASK(t) (t & (REQ_RAHEAD | WRITE_FLUSH_FUA))
#define F2FS_BIO_EXTRA_MASK(t) (t & (REQ_META | REQ_PRIO))
-#define show_bio_type(op, op_flags) show_bio_op(op), \
- show_bio_op_flags(op_flags), show_bio_extra(op_flags)
-
-#define show_bio_op(op) \
- __print_symbolic(op, \
- { READ, "READ" }, \
- { WRITE, "WRITE" })
+#define show_bio_type(op_flags) show_bio_op_flags(op_flags), \
+ show_bio_extra(op_flags)
#define show_bio_op_flags(flags) \
__print_symbolic(F2FS_BIO_FLAG_MASK(flags), \
+ { 0, "WRITE" }, \
{ REQ_RAHEAD, "READAHEAD" }, \
{ READ_SYNC, "READ_SYNC" }, \
{ WRITE_SYNC, "WRITE_SYNC" }, \
@@ -754,12 +750,12 @@ DECLARE_EVENT_CLASS(f2fs__submit_page_bio,
),
TP_printk("dev = (%d,%d), ino = %lu, page_index = 0x%lx, "
- "oldaddr = 0x%llx, newaddr = 0x%llx rw = %s%si%s, type = %s",
+ "oldaddr = 0x%llx, newaddr = 0x%llx, rw = %s%s, type = %s",
show_dev_ino(__entry),
(unsigned long)__entry->index,
(unsigned long long)__entry->old_blkaddr,
(unsigned long long)__entry->new_blkaddr,
- show_bio_type(__entry->op, __entry->op_flags),
+ show_bio_type(__entry->op_flags),
show_block_type(__entry->type))
);
@@ -806,9 +802,9 @@ DECLARE_EVENT_CLASS(f2fs__submit_bio,
__entry->size = bio->bi_iter.bi_size;
),
- TP_printk("dev = (%d,%d), %s%s%s, %s, sector = %lld, size = %u",
+ TP_printk("dev = (%d,%d), rw = %s%s, %s, sector = %lld, size = %u",
show_dev(__entry),
- show_bio_type(__entry->op, __entry->op_flags),
+ show_bio_type(__entry->op_flags),
show_block_type(__entry->type),
(unsigned long long)__entry->sector,
__entry->size)
diff --git a/include/trace/events/intel_ish.h b/include/trace/events/intel_ish.h
new file mode 100644
index 000000000000..92f7d5b23177
--- /dev/null
+++ b/include/trace/events/intel_ish.h
@@ -0,0 +1,30 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM intel_ish
+
+#if !defined(_TRACE_INTEL_ISH_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_INTEL_ISH_H
+
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(ishtp_dump,
+
+ TP_PROTO(const char *message),
+
+ TP_ARGS(message),
+
+ TP_STRUCT__entry(
+ __string(message, message)
+ ),
+
+ TP_fast_assign(
+ __assign_str(message, message);
+ ),
+
+ TP_printk("%s", __get_str(message))
+);
+
+
+#endif /* _TRACE_INTEL_ISH_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/irq.h b/include/trace/events/irq.h
index f95f25e786ef..1c41b74581f7 100644
--- a/include/trace/events/irq.h
+++ b/include/trace/events/irq.h
@@ -75,7 +75,7 @@ TRACE_EVENT(irq_handler_entry,
* @ret: return value
*
* If the @ret value is set to IRQ_HANDLED, then we know that the corresponding
- * @action->handler scuccessully handled this irq. Otherwise, the irq might be
+ * @action->handler successfully handled this irq. Otherwise, the irq might be
* a shared irq line, or the irq was not handled successfully. Can be used in
* conjunction with the irq_handler_entry to understand irq handler latencies.
*/
diff --git a/include/trace/events/mce.h b/include/trace/events/mce.h
index 4cbbcef6baa8..70f02149808c 100644
--- a/include/trace/events/mce.h
+++ b/include/trace/events/mce.h
@@ -20,6 +20,8 @@ TRACE_EVENT(mce_record,
__field( u64, status )
__field( u64, addr )
__field( u64, misc )
+ __field( u64, synd )
+ __field( u64, ipid )
__field( u64, ip )
__field( u64, tsc )
__field( u64, walltime )
@@ -38,6 +40,8 @@ TRACE_EVENT(mce_record,
__entry->status = m->status;
__entry->addr = m->addr;
__entry->misc = m->misc;
+ __entry->synd = m->synd;
+ __entry->ipid = m->ipid;
__entry->ip = m->ip;
__entry->tsc = m->tsc;
__entry->walltime = m->time;
@@ -50,11 +54,12 @@ TRACE_EVENT(mce_record,
__entry->cpuvendor = m->cpuvendor;
),
- TP_printk("CPU: %d, MCGc/s: %llx/%llx, MC%d: %016Lx, ADDR/MISC: %016Lx/%016Lx, RIP: %02x:<%016Lx>, TSC: %llx, PROCESSOR: %u:%x, TIME: %llu, SOCKET: %u, APIC: %x",
+ TP_printk("CPU: %d, MCGc/s: %llx/%llx, MC%d: %016Lx, IPID: %016Lx, ADDR/MISC/SYND: %016Lx/%016Lx/%016Lx, RIP: %02x:<%016Lx>, TSC: %llx, PROCESSOR: %u:%x, TIME: %llu, SOCKET: %u, APIC: %x",
__entry->cpu,
__entry->mcgcap, __entry->mcgstatus,
__entry->bank, __entry->status,
- __entry->addr, __entry->misc,
+ __entry->ipid,
+ __entry->addr, __entry->misc, __entry->synd,
__entry->cs, __entry->ip,
__entry->tsc,
__entry->cpuvendor, __entry->cpuid,
diff --git a/include/trace/events/rxrpc.h b/include/trace/events/rxrpc.h
new file mode 100644
index 000000000000..0383e5e9a0f3
--- /dev/null
+++ b/include/trace/events/rxrpc.h
@@ -0,0 +1,625 @@
+/* AF_RXRPC tracepoints
+ *
+ * Copyright (C) 2016 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM rxrpc
+
+#if !defined(_TRACE_RXRPC_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_RXRPC_H
+
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(rxrpc_conn,
+ TP_PROTO(struct rxrpc_connection *conn, enum rxrpc_conn_trace op,
+ int usage, const void *where),
+
+ TP_ARGS(conn, op, usage, where),
+
+ TP_STRUCT__entry(
+ __field(struct rxrpc_connection *, conn )
+ __field(int, op )
+ __field(int, usage )
+ __field(const void *, where )
+ ),
+
+ TP_fast_assign(
+ __entry->conn = conn;
+ __entry->op = op;
+ __entry->usage = usage;
+ __entry->where = where;
+ ),
+
+ TP_printk("C=%p %s u=%d sp=%pSR",
+ __entry->conn,
+ rxrpc_conn_traces[__entry->op],
+ __entry->usage,
+ __entry->where)
+ );
+
+TRACE_EVENT(rxrpc_client,
+ TP_PROTO(struct rxrpc_connection *conn, int channel,
+ enum rxrpc_client_trace op),
+
+ TP_ARGS(conn, channel, op),
+
+ TP_STRUCT__entry(
+ __field(struct rxrpc_connection *, conn )
+ __field(u32, cid )
+ __field(int, channel )
+ __field(int, usage )
+ __field(enum rxrpc_client_trace, op )
+ __field(enum rxrpc_conn_cache_state, cs )
+ ),
+
+ TP_fast_assign(
+ __entry->conn = conn;
+ __entry->channel = channel;
+ __entry->usage = atomic_read(&conn->usage);
+ __entry->op = op;
+ __entry->cid = conn->proto.cid;
+ __entry->cs = conn->cache_state;
+ ),
+
+ TP_printk("C=%p h=%2d %s %s i=%08x u=%d",
+ __entry->conn,
+ __entry->channel,
+ rxrpc_client_traces[__entry->op],
+ rxrpc_conn_cache_states[__entry->cs],
+ __entry->cid,
+ __entry->usage)
+ );
+
+TRACE_EVENT(rxrpc_call,
+ TP_PROTO(struct rxrpc_call *call, enum rxrpc_call_trace op,
+ int usage, const void *where, const void *aux),
+
+ TP_ARGS(call, op, usage, where, aux),
+
+ TP_STRUCT__entry(
+ __field(struct rxrpc_call *, call )
+ __field(int, op )
+ __field(int, usage )
+ __field(const void *, where )
+ __field(const void *, aux )
+ ),
+
+ TP_fast_assign(
+ __entry->call = call;
+ __entry->op = op;
+ __entry->usage = usage;
+ __entry->where = where;
+ __entry->aux = aux;
+ ),
+
+ TP_printk("c=%p %s u=%d sp=%pSR a=%p",
+ __entry->call,
+ rxrpc_call_traces[__entry->op],
+ __entry->usage,
+ __entry->where,
+ __entry->aux)
+ );
+
+TRACE_EVENT(rxrpc_skb,
+ TP_PROTO(struct sk_buff *skb, enum rxrpc_skb_trace op,
+ int usage, int mod_count, const void *where),
+
+ TP_ARGS(skb, op, usage, mod_count, where),
+
+ TP_STRUCT__entry(
+ __field(struct sk_buff *, skb )
+ __field(enum rxrpc_skb_trace, op )
+ __field(int, usage )
+ __field(int, mod_count )
+ __field(const void *, where )
+ ),
+
+ TP_fast_assign(
+ __entry->skb = skb;
+ __entry->op = op;
+ __entry->usage = usage;
+ __entry->mod_count = mod_count;
+ __entry->where = where;
+ ),
+
+ TP_printk("s=%p %s u=%d m=%d p=%pSR",
+ __entry->skb,
+ rxrpc_skb_traces[__entry->op],
+ __entry->usage,
+ __entry->mod_count,
+ __entry->where)
+ );
+
+TRACE_EVENT(rxrpc_rx_packet,
+ TP_PROTO(struct rxrpc_skb_priv *sp),
+
+ TP_ARGS(sp),
+
+ TP_STRUCT__entry(
+ __field_struct(struct rxrpc_host_header, hdr )
+ ),
+
+ TP_fast_assign(
+ memcpy(&__entry->hdr, &sp->hdr, sizeof(__entry->hdr));
+ ),
+
+ TP_printk("%08x:%08x:%08x:%04x %08x %08x %02x %02x %s",
+ __entry->hdr.epoch, __entry->hdr.cid,
+ __entry->hdr.callNumber, __entry->hdr.serviceId,
+ __entry->hdr.serial, __entry->hdr.seq,
+ __entry->hdr.type, __entry->hdr.flags,
+ __entry->hdr.type <= 15 ? rxrpc_pkts[__entry->hdr.type] : "?UNK")
+ );
+
+TRACE_EVENT(rxrpc_rx_done,
+ TP_PROTO(int result, int abort_code),
+
+ TP_ARGS(result, abort_code),
+
+ TP_STRUCT__entry(
+ __field(int, result )
+ __field(int, abort_code )
+ ),
+
+ TP_fast_assign(
+ __entry->result = result;
+ __entry->abort_code = abort_code;
+ ),
+
+ TP_printk("r=%d a=%d", __entry->result, __entry->abort_code)
+ );
+
+TRACE_EVENT(rxrpc_abort,
+ TP_PROTO(const char *why, u32 cid, u32 call_id, rxrpc_seq_t seq,
+ int abort_code, int error),
+
+ TP_ARGS(why, cid, call_id, seq, abort_code, error),
+
+ TP_STRUCT__entry(
+ __array(char, why, 4 )
+ __field(u32, cid )
+ __field(u32, call_id )
+ __field(rxrpc_seq_t, seq )
+ __field(int, abort_code )
+ __field(int, error )
+ ),
+
+ TP_fast_assign(
+ memcpy(__entry->why, why, 4);
+ __entry->cid = cid;
+ __entry->call_id = call_id;
+ __entry->abort_code = abort_code;
+ __entry->error = error;
+ __entry->seq = seq;
+ ),
+
+ TP_printk("%08x:%08x s=%u a=%d e=%d %s",
+ __entry->cid, __entry->call_id, __entry->seq,
+ __entry->abort_code, __entry->error, __entry->why)
+ );
+
+TRACE_EVENT(rxrpc_transmit,
+ TP_PROTO(struct rxrpc_call *call, enum rxrpc_transmit_trace why),
+
+ TP_ARGS(call, why),
+
+ TP_STRUCT__entry(
+ __field(struct rxrpc_call *, call )
+ __field(enum rxrpc_transmit_trace, why )
+ __field(rxrpc_seq_t, tx_hard_ack )
+ __field(rxrpc_seq_t, tx_top )
+ ),
+
+ TP_fast_assign(
+ __entry->call = call;
+ __entry->why = why;
+ __entry->tx_hard_ack = call->tx_hard_ack;
+ __entry->tx_top = call->tx_top;
+ ),
+
+ TP_printk("c=%p %s f=%08x n=%u",
+ __entry->call,
+ rxrpc_transmit_traces[__entry->why],
+ __entry->tx_hard_ack + 1,
+ __entry->tx_top - __entry->tx_hard_ack)
+ );
+
+TRACE_EVENT(rxrpc_rx_ack,
+ TP_PROTO(struct rxrpc_call *call, rxrpc_seq_t first, u8 reason, u8 n_acks),
+
+ TP_ARGS(call, first, reason, n_acks),
+
+ TP_STRUCT__entry(
+ __field(struct rxrpc_call *, call )
+ __field(rxrpc_seq_t, first )
+ __field(u8, reason )
+ __field(u8, n_acks )
+ ),
+
+ TP_fast_assign(
+ __entry->call = call;
+ __entry->first = first;
+ __entry->reason = reason;
+ __entry->n_acks = n_acks;
+ ),
+
+ TP_printk("c=%p %s f=%08x n=%u",
+ __entry->call,
+ rxrpc_ack_names[__entry->reason],
+ __entry->first,
+ __entry->n_acks)
+ );
+
+TRACE_EVENT(rxrpc_tx_data,
+ TP_PROTO(struct rxrpc_call *call, rxrpc_seq_t seq,
+ rxrpc_serial_t serial, u8 flags, bool retrans, bool lose),
+
+ TP_ARGS(call, seq, serial, flags, retrans, lose),
+
+ TP_STRUCT__entry(
+ __field(struct rxrpc_call *, call )
+ __field(rxrpc_seq_t, seq )
+ __field(rxrpc_serial_t, serial )
+ __field(u8, flags )
+ __field(bool, retrans )
+ __field(bool, lose )
+ ),
+
+ TP_fast_assign(
+ __entry->call = call;
+ __entry->seq = seq;
+ __entry->serial = serial;
+ __entry->flags = flags;
+ __entry->retrans = retrans;
+ __entry->lose = lose;
+ ),
+
+ TP_printk("c=%p DATA %08x q=%08x fl=%02x%s%s",
+ __entry->call,
+ __entry->serial,
+ __entry->seq,
+ __entry->flags,
+ __entry->retrans ? " *RETRANS*" : "",
+ __entry->lose ? " *LOSE*" : "")
+ );
+
+TRACE_EVENT(rxrpc_tx_ack,
+ TP_PROTO(struct rxrpc_call *call, rxrpc_serial_t serial,
+ rxrpc_seq_t ack_first, rxrpc_serial_t ack_serial,
+ u8 reason, u8 n_acks),
+
+ TP_ARGS(call, serial, ack_first, ack_serial, reason, n_acks),
+
+ TP_STRUCT__entry(
+ __field(struct rxrpc_call *, call )
+ __field(rxrpc_serial_t, serial )
+ __field(rxrpc_seq_t, ack_first )
+ __field(rxrpc_serial_t, ack_serial )
+ __field(u8, reason )
+ __field(u8, n_acks )
+ ),
+
+ TP_fast_assign(
+ __entry->call = call;
+ __entry->serial = serial;
+ __entry->ack_first = ack_first;
+ __entry->ack_serial = ack_serial;
+ __entry->reason = reason;
+ __entry->n_acks = n_acks;
+ ),
+
+ TP_printk(" c=%p ACK %08x %s f=%08x r=%08x n=%u",
+ __entry->call,
+ __entry->serial,
+ rxrpc_ack_names[__entry->reason],
+ __entry->ack_first,
+ __entry->ack_serial,
+ __entry->n_acks)
+ );
+
+TRACE_EVENT(rxrpc_receive,
+ TP_PROTO(struct rxrpc_call *call, enum rxrpc_receive_trace why,
+ rxrpc_serial_t serial, rxrpc_seq_t seq),
+
+ TP_ARGS(call, why, serial, seq),
+
+ TP_STRUCT__entry(
+ __field(struct rxrpc_call *, call )
+ __field(enum rxrpc_receive_trace, why )
+ __field(rxrpc_serial_t, serial )
+ __field(rxrpc_seq_t, seq )
+ __field(rxrpc_seq_t, hard_ack )
+ __field(rxrpc_seq_t, top )
+ ),
+
+ TP_fast_assign(
+ __entry->call = call;
+ __entry->why = why;
+ __entry->serial = serial;
+ __entry->seq = seq;
+ __entry->hard_ack = call->rx_hard_ack;
+ __entry->top = call->rx_top;
+ ),
+
+ TP_printk("c=%p %s r=%08x q=%08x w=%08x-%08x",
+ __entry->call,
+ rxrpc_receive_traces[__entry->why],
+ __entry->serial,
+ __entry->seq,
+ __entry->hard_ack,
+ __entry->top)
+ );
+
+TRACE_EVENT(rxrpc_recvmsg,
+ TP_PROTO(struct rxrpc_call *call, enum rxrpc_recvmsg_trace why,
+ rxrpc_seq_t seq, unsigned int offset, unsigned int len,
+ int ret),
+
+ TP_ARGS(call, why, seq, offset, len, ret),
+
+ TP_STRUCT__entry(
+ __field(struct rxrpc_call *, call )
+ __field(enum rxrpc_recvmsg_trace, why )
+ __field(rxrpc_seq_t, seq )
+ __field(unsigned int, offset )
+ __field(unsigned int, len )
+ __field(int, ret )
+ ),
+
+ TP_fast_assign(
+ __entry->call = call;
+ __entry->why = why;
+ __entry->seq = seq;
+ __entry->offset = offset;
+ __entry->len = len;
+ __entry->ret = ret;
+ ),
+
+ TP_printk("c=%p %s q=%08x o=%u l=%u ret=%d",
+ __entry->call,
+ rxrpc_recvmsg_traces[__entry->why],
+ __entry->seq,
+ __entry->offset,
+ __entry->len,
+ __entry->ret)
+ );
+
+TRACE_EVENT(rxrpc_rtt_tx,
+ TP_PROTO(struct rxrpc_call *call, enum rxrpc_rtt_tx_trace why,
+ rxrpc_serial_t send_serial),
+
+ TP_ARGS(call, why, send_serial),
+
+ TP_STRUCT__entry(
+ __field(struct rxrpc_call *, call )
+ __field(enum rxrpc_rtt_tx_trace, why )
+ __field(rxrpc_serial_t, send_serial )
+ ),
+
+ TP_fast_assign(
+ __entry->call = call;
+ __entry->why = why;
+ __entry->send_serial = send_serial;
+ ),
+
+ TP_printk("c=%p %s sr=%08x",
+ __entry->call,
+ rxrpc_rtt_tx_traces[__entry->why],
+ __entry->send_serial)
+ );
+
+TRACE_EVENT(rxrpc_rtt_rx,
+ TP_PROTO(struct rxrpc_call *call, enum rxrpc_rtt_rx_trace why,
+ rxrpc_serial_t send_serial, rxrpc_serial_t resp_serial,
+ s64 rtt, u8 nr, s64 avg),
+
+ TP_ARGS(call, why, send_serial, resp_serial, rtt, nr, avg),
+
+ TP_STRUCT__entry(
+ __field(struct rxrpc_call *, call )
+ __field(enum rxrpc_rtt_rx_trace, why )
+ __field(u8, nr )
+ __field(rxrpc_serial_t, send_serial )
+ __field(rxrpc_serial_t, resp_serial )
+ __field(s64, rtt )
+ __field(u64, avg )
+ ),
+
+ TP_fast_assign(
+ __entry->call = call;
+ __entry->why = why;
+ __entry->send_serial = send_serial;
+ __entry->resp_serial = resp_serial;
+ __entry->rtt = rtt;
+ __entry->nr = nr;
+ __entry->avg = avg;
+ ),
+
+ TP_printk("c=%p %s sr=%08x rr=%08x rtt=%lld nr=%u avg=%lld",
+ __entry->call,
+ rxrpc_rtt_rx_traces[__entry->why],
+ __entry->send_serial,
+ __entry->resp_serial,
+ __entry->rtt,
+ __entry->nr,
+ __entry->avg)
+ );
+
+TRACE_EVENT(rxrpc_timer,
+ TP_PROTO(struct rxrpc_call *call, enum rxrpc_timer_trace why,
+ ktime_t now, unsigned long now_j),
+
+ TP_ARGS(call, why, now, now_j),
+
+ TP_STRUCT__entry(
+ __field(struct rxrpc_call *, call )
+ __field(enum rxrpc_timer_trace, why )
+ __field_struct(ktime_t, now )
+ __field_struct(ktime_t, expire_at )
+ __field_struct(ktime_t, ack_at )
+ __field_struct(ktime_t, resend_at )
+ __field(unsigned long, now_j )
+ __field(unsigned long, timer )
+ ),
+
+ TP_fast_assign(
+ __entry->call = call;
+ __entry->why = why;
+ __entry->now = now;
+ __entry->expire_at = call->expire_at;
+ __entry->ack_at = call->ack_at;
+ __entry->resend_at = call->resend_at;
+ __entry->now_j = now_j;
+ __entry->timer = call->timer.expires;
+ ),
+
+ TP_printk("c=%p %s x=%lld a=%lld r=%lld t=%ld",
+ __entry->call,
+ rxrpc_timer_traces[__entry->why],
+ ktime_to_ns(ktime_sub(__entry->expire_at, __entry->now)),
+ ktime_to_ns(ktime_sub(__entry->ack_at, __entry->now)),
+ ktime_to_ns(ktime_sub(__entry->resend_at, __entry->now)),
+ __entry->timer - __entry->now_j)
+ );
+
+TRACE_EVENT(rxrpc_rx_lose,
+ TP_PROTO(struct rxrpc_skb_priv *sp),
+
+ TP_ARGS(sp),
+
+ TP_STRUCT__entry(
+ __field_struct(struct rxrpc_host_header, hdr )
+ ),
+
+ TP_fast_assign(
+ memcpy(&__entry->hdr, &sp->hdr, sizeof(__entry->hdr));
+ ),
+
+ TP_printk("%08x:%08x:%08x:%04x %08x %08x %02x %02x %s *LOSE*",
+ __entry->hdr.epoch, __entry->hdr.cid,
+ __entry->hdr.callNumber, __entry->hdr.serviceId,
+ __entry->hdr.serial, __entry->hdr.seq,
+ __entry->hdr.type, __entry->hdr.flags,
+ __entry->hdr.type <= 15 ? rxrpc_pkts[__entry->hdr.type] : "?UNK")
+ );
+
+TRACE_EVENT(rxrpc_propose_ack,
+ TP_PROTO(struct rxrpc_call *call, enum rxrpc_propose_ack_trace why,
+ u8 ack_reason, rxrpc_serial_t serial, bool immediate,
+ bool background, enum rxrpc_propose_ack_outcome outcome),
+
+ TP_ARGS(call, why, ack_reason, serial, immediate, background,
+ outcome),
+
+ TP_STRUCT__entry(
+ __field(struct rxrpc_call *, call )
+ __field(enum rxrpc_propose_ack_trace, why )
+ __field(rxrpc_serial_t, serial )
+ __field(u8, ack_reason )
+ __field(bool, immediate )
+ __field(bool, background )
+ __field(enum rxrpc_propose_ack_outcome, outcome )
+ ),
+
+ TP_fast_assign(
+ __entry->call = call;
+ __entry->why = why;
+ __entry->serial = serial;
+ __entry->ack_reason = ack_reason;
+ __entry->immediate = immediate;
+ __entry->background = background;
+ __entry->outcome = outcome;
+ ),
+
+ TP_printk("c=%p %s %s r=%08x i=%u b=%u%s",
+ __entry->call,
+ rxrpc_propose_ack_traces[__entry->why],
+ rxrpc_ack_names[__entry->ack_reason],
+ __entry->serial,
+ __entry->immediate,
+ __entry->background,
+ rxrpc_propose_ack_outcomes[__entry->outcome])
+ );
+
+TRACE_EVENT(rxrpc_retransmit,
+ TP_PROTO(struct rxrpc_call *call, rxrpc_seq_t seq, u8 annotation,
+ s64 expiry),
+
+ TP_ARGS(call, seq, annotation, expiry),
+
+ TP_STRUCT__entry(
+ __field(struct rxrpc_call *, call )
+ __field(rxrpc_seq_t, seq )
+ __field(u8, annotation )
+ __field(s64, expiry )
+ ),
+
+ TP_fast_assign(
+ __entry->call = call;
+ __entry->seq = seq;
+ __entry->annotation = annotation;
+ __entry->expiry = expiry;
+ ),
+
+ TP_printk("c=%p q=%x a=%02x xp=%lld",
+ __entry->call,
+ __entry->seq,
+ __entry->annotation,
+ __entry->expiry)
+ );
+
+TRACE_EVENT(rxrpc_congest,
+ TP_PROTO(struct rxrpc_call *call, struct rxrpc_ack_summary *summary,
+ rxrpc_serial_t ack_serial, enum rxrpc_congest_change change),
+
+ TP_ARGS(call, summary, ack_serial, change),
+
+ TP_STRUCT__entry(
+ __field(struct rxrpc_call *, call )
+ __field(enum rxrpc_congest_change, change )
+ __field(rxrpc_seq_t, hard_ack )
+ __field(rxrpc_seq_t, top )
+ __field(rxrpc_seq_t, lowest_nak )
+ __field(rxrpc_serial_t, ack_serial )
+ __field_struct(struct rxrpc_ack_summary, sum )
+ ),
+
+ TP_fast_assign(
+ __entry->call = call;
+ __entry->change = change;
+ __entry->hard_ack = call->tx_hard_ack;
+ __entry->top = call->tx_top;
+ __entry->lowest_nak = call->acks_lowest_nak;
+ __entry->ack_serial = ack_serial;
+ memcpy(&__entry->sum, summary, sizeof(__entry->sum));
+ ),
+
+ TP_printk("c=%p %08x %s %08x %s cw=%u ss=%u nr=%u,%u nw=%u,%u r=%u b=%u u=%u d=%u l=%x%s%s%s",
+ __entry->call,
+ __entry->ack_serial,
+ rxrpc_ack_names[__entry->sum.ack_reason],
+ __entry->hard_ack,
+ rxrpc_congest_modes[__entry->sum.mode],
+ __entry->sum.cwnd,
+ __entry->sum.ssthresh,
+ __entry->sum.nr_acks, __entry->sum.nr_nacks,
+ __entry->sum.nr_new_acks, __entry->sum.nr_new_nacks,
+ __entry->sum.nr_rot_new_acks,
+ __entry->top - __entry->hard_ack,
+ __entry->sum.cumulative_acks,
+ __entry->sum.dup_acks,
+ __entry->lowest_nak, __entry->sum.new_low_nack ? "!" : "",
+ rxrpc_congest_changes[__entry->change],
+ __entry->sum.retrans_timeo ? " rTxTo" : "")
+ );
+
+#endif /* _TRACE_RXRPC_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/uapi/asm-generic/mman-common.h b/include/uapi/asm-generic/mman-common.h
index 58274382a616..8c27db0c5c08 100644
--- a/include/uapi/asm-generic/mman-common.h
+++ b/include/uapi/asm-generic/mman-common.h
@@ -72,4 +72,9 @@
#define MAP_HUGE_SHIFT 26
#define MAP_HUGE_MASK 0x3f
+#define PKEY_DISABLE_ACCESS 0x1
+#define PKEY_DISABLE_WRITE 0x2
+#define PKEY_ACCESS_MASK (PKEY_DISABLE_ACCESS |\
+ PKEY_DISABLE_WRITE)
+
#endif /* __ASM_GENERIC_MMAN_COMMON_H */
diff --git a/include/uapi/asm-generic/unistd.h b/include/uapi/asm-generic/unistd.h
index a26415b5151c..dbfee7e86ba6 100644
--- a/include/uapi/asm-generic/unistd.h
+++ b/include/uapi/asm-generic/unistd.h
@@ -724,9 +724,19 @@ __SYSCALL(__NR_copy_file_range, sys_copy_file_range)
__SC_COMP(__NR_preadv2, sys_preadv2, compat_sys_preadv2)
#define __NR_pwritev2 287
__SC_COMP(__NR_pwritev2, sys_pwritev2, compat_sys_pwritev2)
+#define __NR_pkey_mprotect 288
+__SYSCALL(__NR_pkey_mprotect, sys_pkey_mprotect)
+#define __NR_pkey_alloc 289
+__SYSCALL(__NR_pkey_alloc, sys_pkey_alloc)
+#define __NR_pkey_free 290
+__SYSCALL(__NR_pkey_free, sys_pkey_free)
+#define __NR_pkey_get 291
+//__SYSCALL(__NR_pkey_get, sys_pkey_get)
+#define __NR_pkey_set 292
+//__SYSCALL(__NR_pkey_set, sys_pkey_set)
#undef __NR_syscalls
-#define __NR_syscalls 288
+#define __NR_syscalls 291
/*
* All syscalls below here should go away really,
diff --git a/include/uapi/drm/amdgpu_drm.h b/include/uapi/drm/amdgpu_drm.h
index 462246aa200e..d6b5a21f3d3c 100644
--- a/include/uapi/drm/amdgpu_drm.h
+++ b/include/uapi/drm/amdgpu_drm.h
@@ -77,6 +77,10 @@ extern "C" {
#define AMDGPU_GEM_CREATE_NO_CPU_ACCESS (1 << 1)
/* Flag that USWC attributes should be used for GTT */
#define AMDGPU_GEM_CREATE_CPU_GTT_USWC (1 << 2)
+/* Flag that the memory should be in VRAM and cleared */
+#define AMDGPU_GEM_CREATE_VRAM_CLEARED (1 << 3)
+/* Flag that create shadow bo(GTT) while allocating vram bo */
+#define AMDGPU_GEM_CREATE_SHADOW (1 << 4)
struct drm_amdgpu_gem_create_in {
/** the requested memory size */
@@ -481,6 +485,8 @@ struct drm_amdgpu_cs_chunk_data {
#define AMDGPU_INFO_DEV_INFO 0x16
/* visible vram usage */
#define AMDGPU_INFO_VIS_VRAM_USAGE 0x17
+/* number of TTM buffer evictions */
+#define AMDGPU_INFO_NUM_EVICTIONS 0x18
#define AMDGPU_INFO_MMR_SE_INDEX_SHIFT 0
#define AMDGPU_INFO_MMR_SE_INDEX_MASK 0xff
@@ -643,6 +649,7 @@ struct drm_amdgpu_info_hw_ip {
* Supported GPU families
*/
#define AMDGPU_FAMILY_UNKNOWN 0
+#define AMDGPU_FAMILY_SI 110 /* Hainan, Oland, Verde, Pitcairn, Tahiti */
#define AMDGPU_FAMILY_CI 120 /* Bonaire, Hawaii */
#define AMDGPU_FAMILY_KV 125 /* Kaveri, Kabini, Mullins */
#define AMDGPU_FAMILY_VI 130 /* Iceland, Tonga */
diff --git a/include/uapi/drm/drm.h b/include/uapi/drm/drm.h
index 452675fb55d9..b2c52843bc70 100644
--- a/include/uapi/drm/drm.h
+++ b/include/uapi/drm/drm.h
@@ -646,6 +646,7 @@ struct drm_gem_open {
#define DRM_CAP_CURSOR_WIDTH 0x8
#define DRM_CAP_CURSOR_HEIGHT 0x9
#define DRM_CAP_ADDFB2_MODIFIERS 0x10
+#define DRM_CAP_PAGE_FLIP_TARGET 0x11
/** DRM_IOCTL_GET_CAP ioctl argument type */
struct drm_get_cap {
diff --git a/include/uapi/drm/drm_mode.h b/include/uapi/drm/drm_mode.h
index 49a72659b801..df0e3504c349 100644
--- a/include/uapi/drm/drm_mode.h
+++ b/include/uapi/drm/drm_mode.h
@@ -520,7 +520,13 @@ struct drm_color_lut {
#define DRM_MODE_PAGE_FLIP_EVENT 0x01
#define DRM_MODE_PAGE_FLIP_ASYNC 0x02
-#define DRM_MODE_PAGE_FLIP_FLAGS (DRM_MODE_PAGE_FLIP_EVENT|DRM_MODE_PAGE_FLIP_ASYNC)
+#define DRM_MODE_PAGE_FLIP_TARGET_ABSOLUTE 0x4
+#define DRM_MODE_PAGE_FLIP_TARGET_RELATIVE 0x8
+#define DRM_MODE_PAGE_FLIP_TARGET (DRM_MODE_PAGE_FLIP_TARGET_ABSOLUTE | \
+ DRM_MODE_PAGE_FLIP_TARGET_RELATIVE)
+#define DRM_MODE_PAGE_FLIP_FLAGS (DRM_MODE_PAGE_FLIP_EVENT | \
+ DRM_MODE_PAGE_FLIP_ASYNC | \
+ DRM_MODE_PAGE_FLIP_TARGET)
/*
* Request a page flip on the specified crtc.
@@ -543,8 +549,7 @@ struct drm_color_lut {
* 'as soon as possible', meaning that it not delay waiting for vblank.
* This may cause tearing on the screen.
*
- * The reserved field must be zero until we figure out something
- * clever to use it for.
+ * The reserved field must be zero.
*/
struct drm_mode_crtc_page_flip {
@@ -555,6 +560,34 @@ struct drm_mode_crtc_page_flip {
__u64 user_data;
};
+/*
+ * Request a page flip on the specified crtc.
+ *
+ * Same as struct drm_mode_crtc_page_flip, but supports new flags and
+ * re-purposes the reserved field:
+ *
+ * The sequence field must be zero unless either of the
+ * DRM_MODE_PAGE_FLIP_TARGET_ABSOLUTE/RELATIVE flags is specified. When
+ * the ABSOLUTE flag is specified, the sequence field denotes the absolute
+ * vblank sequence when the flip should take effect. When the RELATIVE
+ * flag is specified, the sequence field denotes the relative (to the
+ * current one when the ioctl is called) vblank sequence when the flip
+ * should take effect. NOTE: DRM_IOCTL_WAIT_VBLANK must still be used to
+ * make sure the vblank sequence before the target one has passed before
+ * calling this ioctl. The purpose of the
+ * DRM_MODE_PAGE_FLIP_TARGET_ABSOLUTE/RELATIVE flags is merely to clarify
+ * the target for when code dealing with a page flip runs during a
+ * vertical blank period.
+ */
+
+struct drm_mode_crtc_page_flip_target {
+ __u32 crtc_id;
+ __u32 fb_id;
+ __u32 flags;
+ __u32 sequence;
+ __u64 user_data;
+};
+
/* create a dumb scanout buffer */
struct drm_mode_create_dumb {
__u32 height;
diff --git a/include/uapi/drm/i915_drm.h b/include/uapi/drm/i915_drm.h
index d7e81a3886fd..03725fe89859 100644
--- a/include/uapi/drm/i915_drm.h
+++ b/include/uapi/drm/i915_drm.h
@@ -62,6 +62,30 @@ extern "C" {
#define I915_ERROR_UEVENT "ERROR"
#define I915_RESET_UEVENT "RESET"
+/*
+ * MOCS indexes used for GPU surfaces, defining the cacheability of the
+ * surface data and the coherency for this data wrt. CPU vs. GPU accesses.
+ */
+enum i915_mocs_table_index {
+ /*
+ * Not cached anywhere, coherency between CPU and GPU accesses is
+ * guaranteed.
+ */
+ I915_MOCS_UNCACHED,
+ /*
+ * Cacheability and coherency controlled by the kernel automatically
+ * based on the DRM_I915_GEM_SET_CACHING IOCTL setting and the current
+ * usage of the surface (used for display scanout or not).
+ */
+ I915_MOCS_PTE,
+ /*
+ * Cached in all GPU caches available on the platform.
+ * Coherency between CPU and GPU accesses to the surface is not
+ * guaranteed without extra synchronization.
+ */
+ I915_MOCS_CACHED,
+};
+
/* Each region is a minimum of 16k, and there are at most 255 of them.
*/
#define I915_NR_TEX_REGIONS 255 /* table size 2k - maximum due to use
@@ -363,6 +387,7 @@ typedef struct drm_i915_irq_wait {
#define I915_PARAM_HAS_EXEC_SOFTPIN 37
#define I915_PARAM_HAS_POOLED_EU 38
#define I915_PARAM_MIN_EU_IN_POOL 39
+#define I915_PARAM_MMAP_GTT_VERSION 40
typedef struct drm_i915_getparam {
__s32 param;
@@ -698,15 +723,20 @@ struct drm_i915_gem_exec_object2 {
*/
__u64 offset;
-#define EXEC_OBJECT_NEEDS_FENCE (1<<0)
-#define EXEC_OBJECT_NEEDS_GTT (1<<1)
-#define EXEC_OBJECT_WRITE (1<<2)
+#define EXEC_OBJECT_NEEDS_FENCE (1<<0)
+#define EXEC_OBJECT_NEEDS_GTT (1<<1)
+#define EXEC_OBJECT_WRITE (1<<2)
#define EXEC_OBJECT_SUPPORTS_48B_ADDRESS (1<<3)
-#define EXEC_OBJECT_PINNED (1<<4)
-#define __EXEC_OBJECT_UNKNOWN_FLAGS -(EXEC_OBJECT_PINNED<<1)
+#define EXEC_OBJECT_PINNED (1<<4)
+#define EXEC_OBJECT_PAD_TO_SIZE (1<<5)
+/* All remaining bits are MBZ and RESERVED FOR FUTURE USE */
+#define __EXEC_OBJECT_UNKNOWN_FLAGS -(EXEC_OBJECT_PAD_TO_SIZE<<1)
__u64 flags;
- __u64 rsvd1;
+ union {
+ __u64 rsvd1;
+ __u64 pad_to_size;
+ };
__u64 rsvd2;
};
@@ -826,7 +856,16 @@ struct drm_i915_gem_busy {
* having flushed any pending activity), and a non-zero return that
* the object is still in-flight on the GPU. (The GPU has not yet
* signaled completion for all pending requests that reference the
- * object.)
+ * object.) An object is guaranteed to become idle eventually (so
+ * long as no new GPU commands are executed upon it). Due to the
+ * asynchronous nature of the hardware, an object reported
+ * as busy may become idle before the ioctl is completed.
+ *
+ * Furthermore, if the object is busy, which engine is busy is only
+ * provided as a guide. There are race conditions which prevent the
+ * report of which engines are busy from being always accurate.
+ * However, the converse is not true. If the object is idle, the
+ * result of the ioctl, that all engines are idle, is accurate.
*
* The returned dword is split into two fields to indicate both
* the engines on which the object is being read, and the
@@ -849,6 +888,11 @@ struct drm_i915_gem_busy {
* execution engines, e.g. multiple media engines, which are
* mapped to the same identifier in the EXECBUFFER2 ioctl and
* so are not separately reported for busyness.
+ *
+ * Caveat emptor:
+ * Only the boolean result of this query is reliable; that is whether
+ * the object is idle or busy. The report of which engines are busy
+ * should be only used as a heuristic.
*/
__u32 busy;
};
@@ -897,6 +941,7 @@ struct drm_i915_gem_caching {
#define I915_TILING_NONE 0
#define I915_TILING_X 1
#define I915_TILING_Y 2
+#define I915_TILING_LAST I915_TILING_Y
#define I915_BIT_6_SWIZZLE_NONE 0
#define I915_BIT_6_SWIZZLE_9 1
diff --git a/include/uapi/drm/msm_drm.h b/include/uapi/drm/msm_drm.h
index 49f778de8e06..8c51e8a0df89 100644
--- a/include/uapi/drm/msm_drm.h
+++ b/include/uapi/drm/msm_drm.h
@@ -42,6 +42,15 @@ extern "C" {
#define MSM_PIPE_2D1 0x02
#define MSM_PIPE_3D0 0x10
+/* The pipe-id just uses the lower bits, so can be OR'd with flags in
+ * the upper 16 bits (which could be extended further, if needed, maybe
+ * we extend/overload the pipe-id some day to deal with multiple rings,
+ * but even then I don't think we need the full lower 16 bits).
+ */
+#define MSM_PIPE_ID_MASK 0xffff
+#define MSM_PIPE_ID(x) ((x) & MSM_PIPE_ID_MASK)
+#define MSM_PIPE_FLAGS(x) ((x) & ~MSM_PIPE_ID_MASK)
+
/* timeouts are specified in clock-monotonic absolute times (to simplify
* restarting interrupted ioctls). The following struct is logically the
* same as 'struct timespec' but 32/64b ABI safe.
@@ -175,17 +184,28 @@ struct drm_msm_gem_submit_bo {
__u64 presumed; /* in/out, presumed buffer address */
};
+/* Valid submit ioctl flags: */
+#define MSM_SUBMIT_NO_IMPLICIT 0x80000000 /* disable implicit sync */
+#define MSM_SUBMIT_FENCE_FD_IN 0x40000000 /* enable input fence_fd */
+#define MSM_SUBMIT_FENCE_FD_OUT 0x20000000 /* enable output fence_fd */
+#define MSM_SUBMIT_FLAGS ( \
+ MSM_SUBMIT_NO_IMPLICIT | \
+ MSM_SUBMIT_FENCE_FD_IN | \
+ MSM_SUBMIT_FENCE_FD_OUT | \
+ 0)
+
/* Each cmdstream submit consists of a table of buffers involved, and
* one or more cmdstream buffers. This allows for conditional execution
* (context-restore), and IB buffers needed for per tile/bin draw cmds.
*/
struct drm_msm_gem_submit {
- __u32 pipe; /* in, MSM_PIPE_x */
+ __u32 flags; /* MSM_PIPE_x | MSM_SUBMIT_x */
__u32 fence; /* out */
__u32 nr_bos; /* in, number of submit_bo's */
__u32 nr_cmds; /* in, number of submit_cmd's */
__u64 __user bos; /* in, ptr to array of submit_bo's */
__u64 __user cmds; /* in, ptr to array of submit_cmd's */
+ __s32 fence_fd; /* in/out fence fd (see MSM_SUBMIT_FENCE_FD_IN/OUT) */
};
/* The normal way to synchronize with the GPU is just to CPU_PREP on
diff --git a/include/uapi/linux/Kbuild b/include/uapi/linux/Kbuild
index 185f8ea2702f..6965d0909554 100644
--- a/include/uapi/linux/Kbuild
+++ b/include/uapi/linux/Kbuild
@@ -71,6 +71,7 @@ header-y += binfmts.h
header-y += blkpg.h
header-y += blktrace_api.h
header-y += bpf_common.h
+header-y += bpf_perf_event.h
header-y += bpf.h
header-y += bpqether.h
header-y += bsg.h
@@ -335,6 +336,8 @@ header-y += pkt_cls.h
header-y += pkt_sched.h
header-y += pmu.h
header-y += poll.h
+header-y += posix_acl.h
+header-y += posix_acl_xattr.h
header-y += posix_types.h
header-y += ppdev.h
header-y += ppp-comp.h
@@ -396,6 +399,7 @@ header-y += string.h
header-y += suspend_ioctls.h
header-y += swab.h
header-y += synclink.h
+header-y += sync_file.h
header-y += sysctl.h
header-y += sysinfo.h
header-y += target_core_user.h
diff --git a/include/uapi/linux/audit.h b/include/uapi/linux/audit.h
index 82e8aa59446b..208df7b44e90 100644
--- a/include/uapi/linux/audit.h
+++ b/include/uapi/linux/audit.h
@@ -329,9 +329,11 @@ enum {
#define AUDIT_FEATURE_BITMAP_BACKLOG_LIMIT 0x00000001
#define AUDIT_FEATURE_BITMAP_BACKLOG_WAIT_TIME 0x00000002
#define AUDIT_FEATURE_BITMAP_EXECUTABLE_PATH 0x00000004
+#define AUDIT_FEATURE_BITMAP_EXCLUDE_EXTEND 0x00000008
#define AUDIT_FEATURE_BITMAP_ALL (AUDIT_FEATURE_BITMAP_BACKLOG_LIMIT | \
AUDIT_FEATURE_BITMAP_BACKLOG_WAIT_TIME | \
- AUDIT_FEATURE_BITMAP_EXECUTABLE_PATH)
+ AUDIT_FEATURE_BITMAP_EXECUTABLE_PATH | \
+ AUDIT_FEATURE_BITMAP_EXCLUDE_EXTEND)
/* deprecated: AUDIT_VERSION_* */
#define AUDIT_VERSION_LATEST AUDIT_FEATURE_BITMAP_ALL
diff --git a/include/uapi/linux/auto_dev-ioctl.h b/include/uapi/linux/auto_dev-ioctl.h
new file mode 100644
index 000000000000..021ed331dd71
--- /dev/null
+++ b/include/uapi/linux/auto_dev-ioctl.h
@@ -0,0 +1,221 @@
+/*
+ * Copyright 2008 Red Hat, Inc. All rights reserved.
+ * Copyright 2008 Ian Kent <raven@themaw.net>
+ *
+ * This file is part of the Linux kernel and is made available under
+ * the terms of the GNU General Public License, version 2, or at your
+ * option, any later version, incorporated herein by reference.
+ */
+
+#ifndef _UAPI_LINUX_AUTO_DEV_IOCTL_H
+#define _UAPI_LINUX_AUTO_DEV_IOCTL_H
+
+#include <linux/auto_fs.h>
+#include <linux/string.h>
+
+#define AUTOFS_DEVICE_NAME "autofs"
+
+#define AUTOFS_DEV_IOCTL_VERSION_MAJOR 1
+#define AUTOFS_DEV_IOCTL_VERSION_MINOR 0
+
+#define AUTOFS_DEV_IOCTL_SIZE sizeof(struct autofs_dev_ioctl)
+
+/*
+ * An ioctl interface for autofs mount point control.
+ */
+
+struct args_protover {
+ __u32 version;
+};
+
+struct args_protosubver {
+ __u32 sub_version;
+};
+
+struct args_openmount {
+ __u32 devid;
+};
+
+struct args_ready {
+ __u32 token;
+};
+
+struct args_fail {
+ __u32 token;
+ __s32 status;
+};
+
+struct args_setpipefd {
+ __s32 pipefd;
+};
+
+struct args_timeout {
+ __u64 timeout;
+};
+
+struct args_requester {
+ __u32 uid;
+ __u32 gid;
+};
+
+struct args_expire {
+ __u32 how;
+};
+
+struct args_askumount {
+ __u32 may_umount;
+};
+
+struct args_ismountpoint {
+ union {
+ struct args_in {
+ __u32 type;
+ } in;
+ struct args_out {
+ __u32 devid;
+ __u32 magic;
+ } out;
+ };
+};
+
+/*
+ * All the ioctls use this structure.
+ * When sending a path size must account for the total length
+ * of the chunk of memory otherwise is is the size of the
+ * structure.
+ */
+
+struct autofs_dev_ioctl {
+ __u32 ver_major;
+ __u32 ver_minor;
+ __u32 size; /* total size of data passed in
+ * including this struct */
+ __s32 ioctlfd; /* automount command fd */
+
+ /* Command parameters */
+
+ union {
+ struct args_protover protover;
+ struct args_protosubver protosubver;
+ struct args_openmount openmount;
+ struct args_ready ready;
+ struct args_fail fail;
+ struct args_setpipefd setpipefd;
+ struct args_timeout timeout;
+ struct args_requester requester;
+ struct args_expire expire;
+ struct args_askumount askumount;
+ struct args_ismountpoint ismountpoint;
+ };
+
+ char path[0];
+};
+
+static inline void init_autofs_dev_ioctl(struct autofs_dev_ioctl *in)
+{
+ memset(in, 0, sizeof(struct autofs_dev_ioctl));
+ in->ver_major = AUTOFS_DEV_IOCTL_VERSION_MAJOR;
+ in->ver_minor = AUTOFS_DEV_IOCTL_VERSION_MINOR;
+ in->size = sizeof(struct autofs_dev_ioctl);
+ in->ioctlfd = -1;
+}
+
+/*
+ * If you change this make sure you make the corresponding change
+ * to autofs-dev-ioctl.c:lookup_ioctl()
+ */
+enum {
+ /* Get various version info */
+ AUTOFS_DEV_IOCTL_VERSION_CMD = 0x71,
+ AUTOFS_DEV_IOCTL_PROTOVER_CMD,
+ AUTOFS_DEV_IOCTL_PROTOSUBVER_CMD,
+
+ /* Open mount ioctl fd */
+ AUTOFS_DEV_IOCTL_OPENMOUNT_CMD,
+
+ /* Close mount ioctl fd */
+ AUTOFS_DEV_IOCTL_CLOSEMOUNT_CMD,
+
+ /* Mount/expire status returns */
+ AUTOFS_DEV_IOCTL_READY_CMD,
+ AUTOFS_DEV_IOCTL_FAIL_CMD,
+
+ /* Activate/deactivate autofs mount */
+ AUTOFS_DEV_IOCTL_SETPIPEFD_CMD,
+ AUTOFS_DEV_IOCTL_CATATONIC_CMD,
+
+ /* Expiry timeout */
+ AUTOFS_DEV_IOCTL_TIMEOUT_CMD,
+
+ /* Get mount last requesting uid and gid */
+ AUTOFS_DEV_IOCTL_REQUESTER_CMD,
+
+ /* Check for eligible expire candidates */
+ AUTOFS_DEV_IOCTL_EXPIRE_CMD,
+
+ /* Request busy status */
+ AUTOFS_DEV_IOCTL_ASKUMOUNT_CMD,
+
+ /* Check if path is a mountpoint */
+ AUTOFS_DEV_IOCTL_ISMOUNTPOINT_CMD,
+};
+
+#define AUTOFS_IOCTL 0x93
+
+#define AUTOFS_DEV_IOCTL_VERSION \
+ _IOWR(AUTOFS_IOCTL, \
+ AUTOFS_DEV_IOCTL_VERSION_CMD, struct autofs_dev_ioctl)
+
+#define AUTOFS_DEV_IOCTL_PROTOVER \
+ _IOWR(AUTOFS_IOCTL, \
+ AUTOFS_DEV_IOCTL_PROTOVER_CMD, struct autofs_dev_ioctl)
+
+#define AUTOFS_DEV_IOCTL_PROTOSUBVER \
+ _IOWR(AUTOFS_IOCTL, \
+ AUTOFS_DEV_IOCTL_PROTOSUBVER_CMD, struct autofs_dev_ioctl)
+
+#define AUTOFS_DEV_IOCTL_OPENMOUNT \
+ _IOWR(AUTOFS_IOCTL, \
+ AUTOFS_DEV_IOCTL_OPENMOUNT_CMD, struct autofs_dev_ioctl)
+
+#define AUTOFS_DEV_IOCTL_CLOSEMOUNT \
+ _IOWR(AUTOFS_IOCTL, \
+ AUTOFS_DEV_IOCTL_CLOSEMOUNT_CMD, struct autofs_dev_ioctl)
+
+#define AUTOFS_DEV_IOCTL_READY \
+ _IOWR(AUTOFS_IOCTL, \
+ AUTOFS_DEV_IOCTL_READY_CMD, struct autofs_dev_ioctl)
+
+#define AUTOFS_DEV_IOCTL_FAIL \
+ _IOWR(AUTOFS_IOCTL, \
+ AUTOFS_DEV_IOCTL_FAIL_CMD, struct autofs_dev_ioctl)
+
+#define AUTOFS_DEV_IOCTL_SETPIPEFD \
+ _IOWR(AUTOFS_IOCTL, \
+ AUTOFS_DEV_IOCTL_SETPIPEFD_CMD, struct autofs_dev_ioctl)
+
+#define AUTOFS_DEV_IOCTL_CATATONIC \
+ _IOWR(AUTOFS_IOCTL, \
+ AUTOFS_DEV_IOCTL_CATATONIC_CMD, struct autofs_dev_ioctl)
+
+#define AUTOFS_DEV_IOCTL_TIMEOUT \
+ _IOWR(AUTOFS_IOCTL, \
+ AUTOFS_DEV_IOCTL_TIMEOUT_CMD, struct autofs_dev_ioctl)
+
+#define AUTOFS_DEV_IOCTL_REQUESTER \
+ _IOWR(AUTOFS_IOCTL, \
+ AUTOFS_DEV_IOCTL_REQUESTER_CMD, struct autofs_dev_ioctl)
+
+#define AUTOFS_DEV_IOCTL_EXPIRE \
+ _IOWR(AUTOFS_IOCTL, \
+ AUTOFS_DEV_IOCTL_EXPIRE_CMD, struct autofs_dev_ioctl)
+
+#define AUTOFS_DEV_IOCTL_ASKUMOUNT \
+ _IOWR(AUTOFS_IOCTL, \
+ AUTOFS_DEV_IOCTL_ASKUMOUNT_CMD, struct autofs_dev_ioctl)
+
+#define AUTOFS_DEV_IOCTL_ISMOUNTPOINT \
+ _IOWR(AUTOFS_IOCTL, \
+ AUTOFS_DEV_IOCTL_ISMOUNTPOINT_CMD, struct autofs_dev_ioctl)
+
+#endif /* _UAPI_LINUX_AUTO_DEV_IOCTL_H */
diff --git a/include/uapi/linux/auto_fs.h b/include/uapi/linux/auto_fs.h
index 9175a1b4dc69..1bfc3ed8b284 100644
--- a/include/uapi/linux/auto_fs.h
+++ b/include/uapi/linux/auto_fs.h
@@ -12,6 +12,7 @@
#define _UAPI_LINUX_AUTO_FS_H
#include <linux/types.h>
+#include <linux/limits.h>
#ifndef __KERNEL__
#include <sys/ioctl.h>
#endif /* __KERNEL__ */
diff --git a/include/uapi/linux/batman_adv.h b/include/uapi/linux/batman_adv.h
index 0fbf6fd4711b..734fe83ab645 100644
--- a/include/uapi/linux/batman_adv.h
+++ b/include/uapi/linux/batman_adv.h
@@ -23,6 +23,42 @@
#define BATADV_NL_MCAST_GROUP_TPMETER "tpmeter"
/**
+ * enum batadv_tt_client_flags - TT client specific flags
+ * @BATADV_TT_CLIENT_DEL: the client has to be deleted from the table
+ * @BATADV_TT_CLIENT_ROAM: the client roamed to/from another node and the new
+ * update telling its new real location has not been received/sent yet
+ * @BATADV_TT_CLIENT_WIFI: this client is connected through a wifi interface.
+ * This information is used by the "AP Isolation" feature
+ * @BATADV_TT_CLIENT_ISOLA: this client is considered "isolated". This
+ * information is used by the Extended Isolation feature
+ * @BATADV_TT_CLIENT_NOPURGE: this client should never be removed from the table
+ * @BATADV_TT_CLIENT_NEW: this client has been added to the local table but has
+ * not been announced yet
+ * @BATADV_TT_CLIENT_PENDING: this client is marked for removal but it is kept
+ * in the table for one more originator interval for consistency purposes
+ * @BATADV_TT_CLIENT_TEMP: this global client has been detected to be part of
+ * the network but no nnode has already announced it
+ *
+ * Bits from 0 to 7 are called _remote flags_ because they are sent on the wire.
+ * Bits from 8 to 15 are called _local flags_ because they are used for local
+ * computations only.
+ *
+ * Bits from 4 to 7 - a subset of remote flags - are ensured to be in sync with
+ * the other nodes in the network. To achieve this goal these flags are included
+ * in the TT CRC computation.
+ */
+enum batadv_tt_client_flags {
+ BATADV_TT_CLIENT_DEL = (1 << 0),
+ BATADV_TT_CLIENT_ROAM = (1 << 1),
+ BATADV_TT_CLIENT_WIFI = (1 << 4),
+ BATADV_TT_CLIENT_ISOLA = (1 << 5),
+ BATADV_TT_CLIENT_NOPURGE = (1 << 8),
+ BATADV_TT_CLIENT_NEW = (1 << 9),
+ BATADV_TT_CLIENT_PENDING = (1 << 10),
+ BATADV_TT_CLIENT_TEMP = (1 << 11),
+};
+
+/**
* enum batadv_nl_attrs - batman-adv netlink attributes
*
* @BATADV_ATTR_UNSPEC: unspecified attribute to catch errors
@@ -40,6 +76,26 @@
* @BATADV_ATTR_TPMETER_BYTES: amount of acked bytes during run
* @BATADV_ATTR_TPMETER_COOKIE: session cookie to match tp_meter session
* @BATADV_ATTR_PAD: attribute used for padding for 64-bit alignment
+ * @BATADV_ATTR_ACTIVE: Flag indicating if the hard interface is active
+ * @BATADV_ATTR_TT_ADDRESS: Client MAC address
+ * @BATADV_ATTR_TT_TTVN: Translation table version
+ * @BATADV_ATTR_TT_LAST_TTVN: Previous translation table version
+ * @BATADV_ATTR_TT_CRC32: CRC32 over translation table
+ * @BATADV_ATTR_TT_VID: VLAN ID
+ * @BATADV_ATTR_TT_FLAGS: Translation table client flags
+ * @BATADV_ATTR_FLAG_BEST: Flags indicating entry is the best
+ * @BATADV_ATTR_LAST_SEEN_MSECS: Time in milliseconds since last seen
+ * @BATADV_ATTR_NEIGH_ADDRESS: Neighbour MAC address
+ * @BATADV_ATTR_TQ: TQ to neighbour
+ * @BATADV_ATTR_THROUGHPUT: Estimated throughput to Neighbour
+ * @BATADV_ATTR_BANDWIDTH_UP: Reported uplink bandwidth
+ * @BATADV_ATTR_BANDWIDTH_DOWN: Reported downlink bandwidth
+ * @BATADV_ATTR_ROUTER: Gateway router MAC address
+ * @BATADV_ATTR_BLA_OWN: Flag indicating own originator
+ * @BATADV_ATTR_BLA_ADDRESS: Bridge loop avoidance claim MAC address
+ * @BATADV_ATTR_BLA_VID: BLA VLAN ID
+ * @BATADV_ATTR_BLA_BACKBONE: BLA gateway originator MAC address
+ * @BATADV_ATTR_BLA_CRC: BLA CRC
* @__BATADV_ATTR_AFTER_LAST: internal use
* @NUM_BATADV_ATTR: total number of batadv_nl_attrs available
* @BATADV_ATTR_MAX: highest attribute number currently defined
@@ -60,6 +116,26 @@ enum batadv_nl_attrs {
BATADV_ATTR_TPMETER_BYTES,
BATADV_ATTR_TPMETER_COOKIE,
BATADV_ATTR_PAD,
+ BATADV_ATTR_ACTIVE,
+ BATADV_ATTR_TT_ADDRESS,
+ BATADV_ATTR_TT_TTVN,
+ BATADV_ATTR_TT_LAST_TTVN,
+ BATADV_ATTR_TT_CRC32,
+ BATADV_ATTR_TT_VID,
+ BATADV_ATTR_TT_FLAGS,
+ BATADV_ATTR_FLAG_BEST,
+ BATADV_ATTR_LAST_SEEN_MSECS,
+ BATADV_ATTR_NEIGH_ADDRESS,
+ BATADV_ATTR_TQ,
+ BATADV_ATTR_THROUGHPUT,
+ BATADV_ATTR_BANDWIDTH_UP,
+ BATADV_ATTR_BANDWIDTH_DOWN,
+ BATADV_ATTR_ROUTER,
+ BATADV_ATTR_BLA_OWN,
+ BATADV_ATTR_BLA_ADDRESS,
+ BATADV_ATTR_BLA_VID,
+ BATADV_ATTR_BLA_BACKBONE,
+ BATADV_ATTR_BLA_CRC,
/* add attributes above here, update the policy in netlink.c */
__BATADV_ATTR_AFTER_LAST,
NUM_BATADV_ATTR = __BATADV_ATTR_AFTER_LAST,
@@ -73,6 +149,15 @@ enum batadv_nl_attrs {
* @BATADV_CMD_GET_MESH_INFO: Query basic information about batman-adv device
* @BATADV_CMD_TP_METER: Start a tp meter session
* @BATADV_CMD_TP_METER_CANCEL: Cancel a tp meter session
+ * @BATADV_CMD_GET_ROUTING_ALGOS: Query the list of routing algorithms.
+ * @BATADV_CMD_GET_HARDIFS: Query list of hard interfaces
+ * @BATADV_CMD_GET_TRANSTABLE_LOCAL: Query list of local translations
+ * @BATADV_CMD_GET_TRANSTABLE_GLOBAL Query list of global translations
+ * @BATADV_CMD_GET_ORIGINATORS: Query list of originators
+ * @BATADV_CMD_GET_NEIGHBORS: Query list of neighbours
+ * @BATADV_CMD_GET_GATEWAYS: Query list of gateways
+ * @BATADV_CMD_GET_BLA_CLAIM: Query list of bridge loop avoidance claims
+ * @BATADV_CMD_GET_BLA_BACKBONE: Query list of bridge loop avoidance backbones
* @__BATADV_CMD_AFTER_LAST: internal use
* @BATADV_CMD_MAX: highest used command number
*/
@@ -81,6 +166,15 @@ enum batadv_nl_commands {
BATADV_CMD_GET_MESH_INFO,
BATADV_CMD_TP_METER,
BATADV_CMD_TP_METER_CANCEL,
+ BATADV_CMD_GET_ROUTING_ALGOS,
+ BATADV_CMD_GET_HARDIFS,
+ BATADV_CMD_GET_TRANSTABLE_LOCAL,
+ BATADV_CMD_GET_TRANSTABLE_GLOBAL,
+ BATADV_CMD_GET_ORIGINATORS,
+ BATADV_CMD_GET_NEIGHBORS,
+ BATADV_CMD_GET_GATEWAYS,
+ BATADV_CMD_GET_BLA_CLAIM,
+ BATADV_CMD_GET_BLA_BACKBONE,
/* add new commands above here */
__BATADV_CMD_AFTER_LAST,
BATADV_CMD_MAX = __BATADV_CMD_AFTER_LAST - 1
diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
index 9e5fc168c8a3..f09c70b97eca 100644
--- a/include/uapi/linux/bpf.h
+++ b/include/uapi/linux/bpf.h
@@ -95,6 +95,7 @@ enum bpf_prog_type {
BPF_PROG_TYPE_SCHED_ACT,
BPF_PROG_TYPE_TRACEPOINT,
BPF_PROG_TYPE_XDP,
+ BPF_PROG_TYPE_PERF_EVENT,
};
#define BPF_PSEUDO_MAP_FD 1
@@ -375,6 +376,56 @@ enum bpf_func_id {
*/
BPF_FUNC_probe_write_user,
+ /**
+ * bpf_current_task_under_cgroup(map, index) - Check cgroup2 membership of current task
+ * @map: pointer to bpf_map in BPF_MAP_TYPE_CGROUP_ARRAY type
+ * @index: index of the cgroup in the bpf_map
+ * Return:
+ * == 0 current failed the cgroup2 descendant test
+ * == 1 current succeeded the cgroup2 descendant test
+ * < 0 error
+ */
+ BPF_FUNC_current_task_under_cgroup,
+
+ /**
+ * bpf_skb_change_tail(skb, len, flags)
+ * The helper will resize the skb to the given new size,
+ * to be used f.e. with control messages.
+ * @skb: pointer to skb
+ * @len: new skb length
+ * @flags: reserved
+ * Return: 0 on success or negative error
+ */
+ BPF_FUNC_skb_change_tail,
+
+ /**
+ * bpf_skb_pull_data(skb, len)
+ * The helper will pull in non-linear data in case the
+ * skb is non-linear and not all of len are part of the
+ * linear section. Only needed for read/write with direct
+ * packet access.
+ * @skb: pointer to skb
+ * @len: len to make read/writeable
+ * Return: 0 on success or negative error
+ */
+ BPF_FUNC_skb_pull_data,
+
+ /**
+ * bpf_csum_update(skb, csum)
+ * Adds csum into skb->csum in case of CHECKSUM_COMPLETE.
+ * @skb: pointer to skb
+ * @csum: csum to add
+ * Return: csum on success or negative error
+ */
+ BPF_FUNC_csum_update,
+
+ /**
+ * bpf_set_hash_invalid(skb)
+ * Invalidate current skb>hash.
+ * @skb: pointer to skb
+ */
+ BPF_FUNC_set_hash_invalid,
+
__BPF_FUNC_MAX_ID,
};
diff --git a/include/uapi/linux/bpf_perf_event.h b/include/uapi/linux/bpf_perf_event.h
new file mode 100644
index 000000000000..067427259820
--- /dev/null
+++ b/include/uapi/linux/bpf_perf_event.h
@@ -0,0 +1,18 @@
+/* Copyright (c) 2016 Facebook
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ */
+#ifndef _UAPI__LINUX_BPF_PERF_EVENT_H__
+#define _UAPI__LINUX_BPF_PERF_EVENT_H__
+
+#include <linux/types.h>
+#include <linux/ptrace.h>
+
+struct bpf_perf_event_data {
+ struct pt_regs regs;
+ __u64 sample_period;
+};
+
+#endif /* _UAPI__LINUX_BPF_PERF_EVENT_H__ */
diff --git a/include/uapi/linux/btrfs.h b/include/uapi/linux/btrfs.h
index ac5eacd3055b..db4c253f8011 100644
--- a/include/uapi/linux/btrfs.h
+++ b/include/uapi/linux/btrfs.h
@@ -239,7 +239,17 @@ struct btrfs_ioctl_fs_info_args {
* Used by:
* struct btrfs_ioctl_feature_flags
*/
-#define BTRFS_FEATURE_COMPAT_RO_FREE_SPACE_TREE (1ULL << 0)
+#define BTRFS_FEATURE_COMPAT_RO_FREE_SPACE_TREE (1ULL << 0)
+/*
+ * Older kernels (< 4.9) on big-endian systems produced broken free space tree
+ * bitmaps, and btrfs-progs also used to corrupt the free space tree (versions
+ * < 4.7.3). If this bit is clear, then the free space tree cannot be trusted.
+ * btrfs-progs can also intentionally clear this bit to ask the kernel to
+ * rebuild the free space tree, however this might not work on older kernels
+ * that do not know about this bit. If not sure, clear the cache manually on
+ * first mount when booting older kernel versions.
+ */
+#define BTRFS_FEATURE_COMPAT_RO_FREE_SPACE_TREE_VALID (1ULL << 1)
#define BTRFS_FEATURE_INCOMPAT_MIXED_BACKREF (1ULL << 0)
#define BTRFS_FEATURE_INCOMPAT_DEFAULT_SUBVOL (1ULL << 1)
diff --git a/include/uapi/linux/dvb/video.h b/include/uapi/linux/dvb/video.h
index 49392564f9d6..260f033a5b54 100644
--- a/include/uapi/linux/dvb/video.h
+++ b/include/uapi/linux/dvb/video.h
@@ -206,7 +206,8 @@ typedef __u16 video_attributes_t;
/* 6 line 21-2 data present in GOP (1=yes, 0=no) */
/* 5- 3 source resolution (0=720x480/576, 1=704x480/576, 2=352x480/57 */
/* 2 source letterboxed (1=yes, 0=no) */
-/* 0 film/camera mode (0=camera, 1=film (625/50 only)) */
+/* 0 film/camera mode (0=
+ *camera, 1=film (625/50 only)) */
/* bit definitions for capabilities: */
diff --git a/include/uapi/linux/ethtool.h b/include/uapi/linux/ethtool.h
index b8f38e84d93a..099a4200732c 100644
--- a/include/uapi/linux/ethtool.h
+++ b/include/uapi/linux/ethtool.h
@@ -1362,7 +1362,14 @@ enum ethtool_link_mode_bit_indices {
ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT = 37,
ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT = 38,
ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT = 39,
- ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT = 40,
+ ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT = 40,
+ ETHTOOL_LINK_MODE_1000baseX_Full_BIT = 41,
+ ETHTOOL_LINK_MODE_10000baseCR_Full_BIT = 42,
+ ETHTOOL_LINK_MODE_10000baseSR_Full_BIT = 43,
+ ETHTOOL_LINK_MODE_10000baseLR_Full_BIT = 44,
+ ETHTOOL_LINK_MODE_10000baseLRM_Full_BIT = 45,
+ ETHTOOL_LINK_MODE_10000baseER_Full_BIT = 46,
+
/* Last allowed bit for __ETHTOOL_LINK_MODE_LEGACY_MASK is bit
* 31. Please do NOT define any SUPPORTED_* or ADVERTISED_*
@@ -1371,7 +1378,7 @@ enum ethtool_link_mode_bit_indices {
*/
__ETHTOOL_LINK_MODE_LAST
- = ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
+ = ETHTOOL_LINK_MODE_10000baseER_Full_BIT,
};
#define __ETHTOOL_LINK_MODE_LEGACY_MASK(base_name) \
diff --git a/include/uapi/linux/falloc.h b/include/uapi/linux/falloc.h
index 3e445a760f14..b075f601919b 100644
--- a/include/uapi/linux/falloc.h
+++ b/include/uapi/linux/falloc.h
@@ -58,4 +58,22 @@
*/
#define FALLOC_FL_INSERT_RANGE 0x20
+/*
+ * FALLOC_FL_UNSHARE_RANGE is used to unshare shared blocks within the
+ * file size without overwriting any existing data. The purpose of this
+ * call is to preemptively reallocate any blocks that are subject to
+ * copy-on-write.
+ *
+ * Different filesystems may implement different limitations on the
+ * granularity of the operation. Most will limit operations to filesystem
+ * block size boundaries, but this boundary may be larger or smaller
+ * depending on the filesystem and/or the configuration of the filesystem
+ * or file.
+ *
+ * This flag can only be used with allocate-mode fallocate, which is
+ * to say that it cannot be used with the punch, zero, collapse, or
+ * insert range modes.
+ */
+#define FALLOC_FL_UNSHARE_RANGE 0x40
+
#endif /* _UAPI_FALLOC_H_ */
diff --git a/include/uapi/linux/fs.h b/include/uapi/linux/fs.h
index 3b00f7c8943f..acb2b6152ba0 100644
--- a/include/uapi/linux/fs.h
+++ b/include/uapi/linux/fs.h
@@ -132,6 +132,7 @@ struct inodes_stat_t {
#define MS_LAZYTIME (1<<25) /* Update the on-disk [acm]times lazily */
/* These sb flags are internal to the kernel */
+#define MS_NOREMOTELOCK (1<<27)
#define MS_NOSEC (1<<28)
#define MS_BORN (1<<29)
#define MS_ACTIVE (1<<30)
@@ -157,7 +158,8 @@ struct fsxattr {
__u32 fsx_extsize; /* extsize field value (get/set)*/
__u32 fsx_nextents; /* nextents field value (get) */
__u32 fsx_projid; /* project identifier (get/set) */
- unsigned char fsx_pad[12];
+ __u32 fsx_cowextsize; /* CoW extsize field value (get/set)*/
+ unsigned char fsx_pad[8];
};
/*
@@ -178,6 +180,7 @@ struct fsxattr {
#define FS_XFLAG_NODEFRAG 0x00002000 /* do not defragment */
#define FS_XFLAG_FILESTREAM 0x00004000 /* use filestream allocator */
#define FS_XFLAG_DAX 0x00008000 /* use DAX for IO */
+#define FS_XFLAG_COWEXTSIZE 0x00010000 /* CoW extent size allocator hint */
#define FS_XFLAG_HASATTR 0x80000000 /* no DIFLAG for this */
/* the read-only stuff doesn't really belong here, but any other place is
diff --git a/include/uapi/linux/fuse.h b/include/uapi/linux/fuse.h
index 27e17363263a..42fa977e3b14 100644
--- a/include/uapi/linux/fuse.h
+++ b/include/uapi/linux/fuse.h
@@ -108,6 +108,10 @@
*
* 7.25
* - add FUSE_PARALLEL_DIROPS
+ *
+ * 7.26
+ * - add FUSE_HANDLE_KILLPRIV
+ * - add FUSE_POSIX_ACL
*/
#ifndef _LINUX_FUSE_H
@@ -143,7 +147,7 @@
#define FUSE_KERNEL_VERSION 7
/** Minor version number of this interface */
-#define FUSE_KERNEL_MINOR_VERSION 25
+#define FUSE_KERNEL_MINOR_VERSION 26
/** The node ID of the root inode */
#define FUSE_ROOT_ID 1
@@ -238,6 +242,8 @@ struct fuse_file_lock {
* FUSE_WRITEBACK_CACHE: use writeback cache for buffered writes
* FUSE_NO_OPEN_SUPPORT: kernel supports zero-message opens
* FUSE_PARALLEL_DIROPS: allow parallel lookups and readdir
+ * FUSE_HANDLE_KILLPRIV: fs handles killing suid/sgid/cap on write/chown/trunc
+ * FUSE_POSIX_ACL: filesystem supports posix acls
*/
#define FUSE_ASYNC_READ (1 << 0)
#define FUSE_POSIX_LOCKS (1 << 1)
@@ -258,6 +264,8 @@ struct fuse_file_lock {
#define FUSE_WRITEBACK_CACHE (1 << 16)
#define FUSE_NO_OPEN_SUPPORT (1 << 17)
#define FUSE_PARALLEL_DIROPS (1 << 18)
+#define FUSE_HANDLE_KILLPRIV (1 << 19)
+#define FUSE_POSIX_ACL (1 << 20)
/**
* CUSE INIT request/reply flags
diff --git a/include/uapi/linux/hsi/hsi_char.h b/include/uapi/linux/hsi/hsi_char.h
index 76160b4f455d..c00a463d55f9 100644
--- a/include/uapi/linux/hsi/hsi_char.h
+++ b/include/uapi/linux/hsi/hsi_char.h
@@ -20,10 +20,11 @@
* 02110-1301 USA
*/
-
#ifndef __HSI_CHAR_H
#define __HSI_CHAR_H
+#include <linux/types.h>
+
#define HSI_CHAR_MAGIC 'k'
#define HSC_IOW(num, dtype) _IOW(HSI_CHAR_MAGIC, num, dtype)
#define HSC_IOR(num, dtype) _IOR(HSI_CHAR_MAGIC, num, dtype)
@@ -48,16 +49,16 @@
#define HSC_ARB_PRIO 1
struct hsc_rx_config {
- uint32_t mode;
- uint32_t flow;
- uint32_t channels;
+ __u32 mode;
+ __u32 flow;
+ __u32 channels;
};
struct hsc_tx_config {
- uint32_t mode;
- uint32_t channels;
- uint32_t speed;
- uint32_t arb_mode;
+ __u32 mode;
+ __u32 channels;
+ __u32 speed;
+ __u32 arb_mode;
};
#endif /* __HSI_CHAR_H */
diff --git a/include/uapi/linux/if_bridge.h b/include/uapi/linux/if_bridge.h
index c186f64fffca..ab92bca6d448 100644
--- a/include/uapi/linux/if_bridge.h
+++ b/include/uapi/linux/if_bridge.h
@@ -140,7 +140,7 @@ struct bridge_vlan_xstats {
__u64 tx_bytes;
__u64 tx_packets;
__u16 vid;
- __u16 pad1;
+ __u16 flags;
__u32 pad2;
};
diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h
index a1b5202c5f6b..b4fba662cd32 100644
--- a/include/uapi/linux/if_link.h
+++ b/include/uapi/linux/if_link.h
@@ -318,6 +318,7 @@ enum {
IFLA_BRPORT_FLUSH,
IFLA_BRPORT_MULTICAST_ROUTER,
IFLA_BRPORT_PAD,
+ IFLA_BRPORT_MCAST_FLOOD,
__IFLA_BRPORT_MAX
};
#define IFLA_BRPORT_MAX (__IFLA_BRPORT_MAX - 1)
@@ -463,6 +464,7 @@ enum {
enum ipvlan_mode {
IPVLAN_MODE_L2 = 0,
IPVLAN_MODE_L3,
+ IPVLAN_MODE_L3S,
IPVLAN_MODE_MAX
};
@@ -617,7 +619,7 @@ enum {
enum {
IFLA_VF_UNSPEC,
IFLA_VF_MAC, /* Hardware queue specific attributes */
- IFLA_VF_VLAN,
+ IFLA_VF_VLAN, /* VLAN ID and QoS */
IFLA_VF_TX_RATE, /* Max TX Bandwidth Allocation */
IFLA_VF_SPOOFCHK, /* Spoof Checking on/off switch */
IFLA_VF_LINK_STATE, /* link state enable/disable/auto switch */
@@ -629,6 +631,7 @@ enum {
IFLA_VF_TRUST, /* Trust VF */
IFLA_VF_IB_NODE_GUID, /* VF Infiniband node GUID */
IFLA_VF_IB_PORT_GUID, /* VF Infiniband port GUID */
+ IFLA_VF_VLAN_LIST, /* nested list of vlans, option for QinQ */
__IFLA_VF_MAX,
};
@@ -645,6 +648,22 @@ struct ifla_vf_vlan {
__u32 qos;
};
+enum {
+ IFLA_VF_VLAN_INFO_UNSPEC,
+ IFLA_VF_VLAN_INFO, /* VLAN ID, QoS and VLAN protocol */
+ __IFLA_VF_VLAN_INFO_MAX,
+};
+
+#define IFLA_VF_VLAN_INFO_MAX (__IFLA_VF_VLAN_INFO_MAX - 1)
+#define MAX_VLAN_LIST_LEN 1
+
+struct ifla_vf_vlan_info {
+ __u32 vf;
+ __u32 vlan; /* 0 - 4095, 0 disables VLAN filter */
+ __u32 qos;
+ __be16 vlan_proto; /* VLAN protocol either 802.1Q or 802.1ad */
+};
+
struct ifla_vf_tx_rate {
__u32 vf;
__u32 rate; /* Max TX bandwidth in Mbps, 0 disables throttling */
@@ -825,6 +844,7 @@ enum {
IFLA_STATS_LINK_64,
IFLA_STATS_LINK_XSTATS,
IFLA_STATS_LINK_XSTATS_SLAVE,
+ IFLA_STATS_LINK_OFFLOAD_XSTATS,
__IFLA_STATS_MAX,
};
@@ -844,6 +864,14 @@ enum {
};
#define LINK_XSTATS_TYPE_MAX (__LINK_XSTATS_TYPE_MAX - 1)
+/* These are stats embedded into IFLA_STATS_LINK_OFFLOAD_XSTATS */
+enum {
+ IFLA_OFFLOAD_XSTATS_UNSPEC,
+ IFLA_OFFLOAD_XSTATS_CPU_HIT, /* struct rtnl_link_stats64 */
+ __IFLA_OFFLOAD_XSTATS_MAX
+};
+#define IFLA_OFFLOAD_XSTATS_MAX (__IFLA_OFFLOAD_XSTATS_MAX - 1)
+
/* XDP section */
enum {
diff --git a/include/uapi/linux/if_tunnel.h b/include/uapi/linux/if_tunnel.h
index 777b6cdb1b7b..92f3c8677523 100644
--- a/include/uapi/linux/if_tunnel.h
+++ b/include/uapi/linux/if_tunnel.h
@@ -27,9 +27,23 @@
#define GRE_SEQ __cpu_to_be16(0x1000)
#define GRE_STRICT __cpu_to_be16(0x0800)
#define GRE_REC __cpu_to_be16(0x0700)
-#define GRE_FLAGS __cpu_to_be16(0x00F8)
+#define GRE_ACK __cpu_to_be16(0x0080)
+#define GRE_FLAGS __cpu_to_be16(0x0078)
#define GRE_VERSION __cpu_to_be16(0x0007)
+#define GRE_IS_CSUM(f) ((f) & GRE_CSUM)
+#define GRE_IS_ROUTING(f) ((f) & GRE_ROUTING)
+#define GRE_IS_KEY(f) ((f) & GRE_KEY)
+#define GRE_IS_SEQ(f) ((f) & GRE_SEQ)
+#define GRE_IS_STRICT(f) ((f) & GRE_STRICT)
+#define GRE_IS_REC(f) ((f) & GRE_REC)
+#define GRE_IS_ACK(f) ((f) & GRE_ACK)
+
+#define GRE_VERSION_0 __cpu_to_be16(0x0000)
+#define GRE_VERSION_1 __cpu_to_be16(0x0001)
+#define GRE_PROTO_PPP __cpu_to_be16(0x880b)
+#define GRE_PPTP_KEY_MASK __cpu_to_be32(0xffff)
+
struct ip_tunnel_parm {
char name[IFNAMSIZ];
int link;
@@ -60,6 +74,7 @@ enum {
IFLA_IPTUN_ENCAP_FLAGS,
IFLA_IPTUN_ENCAP_SPORT,
IFLA_IPTUN_ENCAP_DPORT,
+ IFLA_IPTUN_COLLECT_METADATA,
__IFLA_IPTUN_MAX,
};
#define IFLA_IPTUN_MAX (__IFLA_IPTUN_MAX - 1)
diff --git a/include/uapi/linux/inet_diag.h b/include/uapi/linux/inet_diag.h
index abbd1dc5d683..509cd961068d 100644
--- a/include/uapi/linux/inet_diag.h
+++ b/include/uapi/linux/inet_diag.h
@@ -73,6 +73,7 @@ enum {
INET_DIAG_BC_S_COND,
INET_DIAG_BC_D_COND,
INET_DIAG_BC_DEV_COND, /* u32 ifindex */
+ INET_DIAG_BC_MARK_COND,
};
struct inet_diag_hostcond {
@@ -82,6 +83,11 @@ struct inet_diag_hostcond {
__be32 addr[0];
};
+struct inet_diag_markcond {
+ __u32 mark;
+ __u32 mask;
+};
+
/* Base info structure. It contains socket identity (addrs/ports/cookie)
* and, alas, the information shown by netstat. */
struct inet_diag_msg {
@@ -117,6 +123,8 @@ enum {
INET_DIAG_LOCALS,
INET_DIAG_PEERS,
INET_DIAG_PAD,
+ INET_DIAG_MARK,
+ INET_DIAG_BBRINFO,
__INET_DIAG_MAX,
};
@@ -150,8 +158,20 @@ struct tcp_dctcp_info {
__u32 dctcp_ab_tot;
};
+/* INET_DIAG_BBRINFO */
+
+struct tcp_bbr_info {
+ /* u64 bw: max-filtered BW (app throughput) estimate in Byte per sec: */
+ __u32 bbr_bw_lo; /* lower 32 bits of bw */
+ __u32 bbr_bw_hi; /* upper 32 bits of bw */
+ __u32 bbr_min_rtt; /* min-filtered RTT in uSec */
+ __u32 bbr_pacing_gain; /* pacing gain shifted left 8 bits */
+ __u32 bbr_cwnd_gain; /* cwnd gain shifted left 8 bits */
+};
+
union tcp_cc_info {
struct tcpvegas_info vegas;
struct tcp_dctcp_info dctcp;
+ struct tcp_bbr_info bbr;
};
#endif /* _UAPI_INET_DIAG_H_ */
diff --git a/include/uapi/linux/input.h b/include/uapi/linux/input.h
index c51494119817..e794f7bee22f 100644
--- a/include/uapi/linux/input.h
+++ b/include/uapi/linux/input.h
@@ -248,6 +248,7 @@ struct input_mask {
#define BUS_SPI 0x1C
#define BUS_RMI 0x1D
#define BUS_CEC 0x1E
+#define BUS_INTEL_ISHTP 0x1F
/*
* MT_TOOL types
diff --git a/include/uapi/linux/ipv6.h b/include/uapi/linux/ipv6.h
index 395876060f50..8c2772340c3f 100644
--- a/include/uapi/linux/ipv6.h
+++ b/include/uapi/linux/ipv6.h
@@ -177,6 +177,7 @@ enum {
DEVCONF_DROP_UNICAST_IN_L2_MULTICAST,
DEVCONF_DROP_UNSOLICITED_NA,
DEVCONF_KEEP_ADDR_ON_DOWN,
+ DEVCONF_RTR_SOLICIT_MAX_INTERVAL,
DEVCONF_MAX
};
diff --git a/include/uapi/linux/magic.h b/include/uapi/linux/magic.h
index e398beac67b8..9bd559472c92 100644
--- a/include/uapi/linux/magic.h
+++ b/include/uapi/linux/magic.h
@@ -65,6 +65,7 @@
#define V9FS_MAGIC 0x01021997
#define BDEVFS_MAGIC 0x62646576
+#define DAXFS_MAGIC 0x64646178
#define BINFMTFS_MAGIC 0x42494e4d
#define DEVPTS_SUPER_MAGIC 0x1cd1
#define FUTEXFS_SUPER_MAGIC 0xBAD1DEA
diff --git a/include/uapi/linux/media-bus-format.h b/include/uapi/linux/media-bus-format.h
index 190d491d5b13..2168759c1287 100644
--- a/include/uapi/linux/media-bus-format.h
+++ b/include/uapi/linux/media-bus-format.h
@@ -97,7 +97,7 @@
#define MEDIA_BUS_FMT_YUV10_1X30 0x2016
#define MEDIA_BUS_FMT_AYUV8_1X32 0x2017
-/* Bayer - next is 0x3019 */
+/* Bayer - next is 0x3021 */
#define MEDIA_BUS_FMT_SBGGR8_1X8 0x3001
#define MEDIA_BUS_FMT_SGBRG8_1X8 0x3013
#define MEDIA_BUS_FMT_SGRBG8_1X8 0x3002
@@ -122,6 +122,14 @@
#define MEDIA_BUS_FMT_SGBRG12_1X12 0x3010
#define MEDIA_BUS_FMT_SGRBG12_1X12 0x3011
#define MEDIA_BUS_FMT_SRGGB12_1X12 0x3012
+#define MEDIA_BUS_FMT_SBGGR14_1X14 0x3019
+#define MEDIA_BUS_FMT_SGBRG14_1X14 0x301a
+#define MEDIA_BUS_FMT_SGRBG14_1X14 0x301b
+#define MEDIA_BUS_FMT_SRGGB14_1X14 0x301c
+#define MEDIA_BUS_FMT_SBGGR16_1X16 0x301d
+#define MEDIA_BUS_FMT_SGBRG16_1X16 0x301e
+#define MEDIA_BUS_FMT_SGRBG16_1X16 0x301f
+#define MEDIA_BUS_FMT_SRGGB16_1X16 0x3020
/* JPEG compressed formats - next is 0x4002 */
#define MEDIA_BUS_FMT_JPEG_1X8 0x4001
diff --git a/include/uapi/linux/media.h b/include/uapi/linux/media.h
index 7acf0f634f70..4890787731b8 100644
--- a/include/uapi/linux/media.h
+++ b/include/uapi/linux/media.h
@@ -307,6 +307,7 @@ struct media_links_enum {
#define MEDIA_INTF_T_V4L_RADIO (MEDIA_INTF_T_V4L_BASE + 2)
#define MEDIA_INTF_T_V4L_SUBDEV (MEDIA_INTF_T_V4L_BASE + 3)
#define MEDIA_INTF_T_V4L_SWRADIO (MEDIA_INTF_T_V4L_BASE + 4)
+#define MEDIA_INTF_T_V4L_TOUCH (MEDIA_INTF_T_V4L_BASE + 5)
#define MEDIA_INTF_T_ALSA_PCM_CAPTURE (MEDIA_INTF_T_ALSA_BASE)
#define MEDIA_INTF_T_ALSA_PCM_PLAYBACK (MEDIA_INTF_T_ALSA_BASE + 1)
diff --git a/include/uapi/linux/mii.h b/include/uapi/linux/mii.h
index 237fac4bc17b..15d8510cdae0 100644
--- a/include/uapi/linux/mii.h
+++ b/include/uapi/linux/mii.h
@@ -48,6 +48,7 @@
#define BMCR_SPEED100 0x2000 /* Select 100Mbps */
#define BMCR_LOOPBACK 0x4000 /* TXD loopback bits */
#define BMCR_RESET 0x8000 /* Reset to default state */
+#define BMCR_SPEED10 0x0000 /* Select 10Mbps */
/* Basic mode status register. */
#define BMSR_ERCAP 0x0001 /* Ext-reg capability */
diff --git a/include/uapi/linux/ndctl.h b/include/uapi/linux/ndctl.h
index ba5a8c79652a..ede5c6a62164 100644
--- a/include/uapi/linux/ndctl.h
+++ b/include/uapi/linux/ndctl.h
@@ -21,14 +21,16 @@ struct nd_cmd_smart {
} __packed;
#define ND_SMART_HEALTH_VALID (1 << 0)
-#define ND_SMART_TEMP_VALID (1 << 1)
-#define ND_SMART_SPARES_VALID (1 << 2)
-#define ND_SMART_ALARM_VALID (1 << 3)
-#define ND_SMART_USED_VALID (1 << 4)
-#define ND_SMART_SHUTDOWN_VALID (1 << 5)
-#define ND_SMART_VENDOR_VALID (1 << 6)
-#define ND_SMART_TEMP_TRIP (1 << 0)
-#define ND_SMART_SPARE_TRIP (1 << 1)
+#define ND_SMART_SPARES_VALID (1 << 1)
+#define ND_SMART_USED_VALID (1 << 2)
+#define ND_SMART_TEMP_VALID (1 << 3)
+#define ND_SMART_CTEMP_VALID (1 << 4)
+#define ND_SMART_ALARM_VALID (1 << 9)
+#define ND_SMART_SHUTDOWN_VALID (1 << 10)
+#define ND_SMART_VENDOR_VALID (1 << 11)
+#define ND_SMART_SPARE_TRIP (1 << 0)
+#define ND_SMART_TEMP_TRIP (1 << 1)
+#define ND_SMART_CTEMP_TRIP (1 << 2)
#define ND_SMART_NON_CRITICAL_HEALTH (1 << 0)
#define ND_SMART_CRITICAL_HEALTH (1 << 1)
#define ND_SMART_FATAL_HEALTH (1 << 2)
@@ -37,14 +39,15 @@ struct nd_smart_payload {
__u32 flags;
__u8 reserved0[4];
__u8 health;
- __u16 temperature;
__u8 spares;
- __u8 alarm_flags;
__u8 life_used;
+ __u8 alarm_flags;
+ __u16 temperature;
+ __u16 ctrl_temperature;
+ __u8 reserved1[15];
__u8 shutdown_state;
- __u8 reserved1;
__u32 vendor_size;
- __u8 vendor_data[108];
+ __u8 vendor_data[92];
} __packed;
struct nd_cmd_smart_threshold {
@@ -53,7 +56,8 @@ struct nd_cmd_smart_threshold {
} __packed;
struct nd_smart_threshold_payload {
- __u16 alarm_control;
+ __u8 alarm_control;
+ __u8 reserved0;
__u16 temperature;
__u8 spares;
__u8 reserved[3];
diff --git a/include/uapi/linux/netfilter/nf_log.h b/include/uapi/linux/netfilter/nf_log.h
new file mode 100644
index 000000000000..8be21e02387d
--- /dev/null
+++ b/include/uapi/linux/netfilter/nf_log.h
@@ -0,0 +1,12 @@
+#ifndef _NETFILTER_NF_LOG_H
+#define _NETFILTER_NF_LOG_H
+
+#define NF_LOG_TCPSEQ 0x01 /* Log TCP sequence numbers */
+#define NF_LOG_TCPOPT 0x02 /* Log TCP options */
+#define NF_LOG_IPOPT 0x04 /* Log IP options */
+#define NF_LOG_UID 0x08 /* Log UID owning local socket */
+#define NF_LOG_NFLOG 0x10 /* Unsupported, don't reuse */
+#define NF_LOG_MACDECODE 0x20 /* Decode MAC header */
+#define NF_LOG_MASK 0x2f
+
+#endif /* _NETFILTER_NF_LOG_H */
diff --git a/include/uapi/linux/netfilter/nf_tables.h b/include/uapi/linux/netfilter/nf_tables.h
index c674ba2563b7..c6c4477c136b 100644
--- a/include/uapi/linux/netfilter/nf_tables.h
+++ b/include/uapi/linux/netfilter/nf_tables.h
@@ -546,6 +546,35 @@ enum nft_cmp_attributes {
};
#define NFTA_CMP_MAX (__NFTA_CMP_MAX - 1)
+/**
+ * enum nft_range_ops - nf_tables range operator
+ *
+ * @NFT_RANGE_EQ: equal
+ * @NFT_RANGE_NEQ: not equal
+ */
+enum nft_range_ops {
+ NFT_RANGE_EQ,
+ NFT_RANGE_NEQ,
+};
+
+/**
+ * enum nft_range_attributes - nf_tables range expression netlink attributes
+ *
+ * @NFTA_RANGE_SREG: source register of data to compare (NLA_U32: nft_registers)
+ * @NFTA_RANGE_OP: cmp operation (NLA_U32: nft_cmp_ops)
+ * @NFTA_RANGE_FROM_DATA: data range from (NLA_NESTED: nft_data_attributes)
+ * @NFTA_RANGE_TO_DATA: data range to (NLA_NESTED: nft_data_attributes)
+ */
+enum nft_range_attributes {
+ NFTA_RANGE_UNSPEC,
+ NFTA_RANGE_SREG,
+ NFTA_RANGE_OP,
+ NFTA_RANGE_FROM_DATA,
+ NFTA_RANGE_TO_DATA,
+ __NFTA_RANGE_MAX
+};
+#define NFTA_RANGE_MAX (__NFTA_RANGE_MAX - 1)
+
enum nft_lookup_flags {
NFT_LOOKUP_F_INV = (1 << 0),
};
@@ -575,6 +604,10 @@ enum nft_dynset_ops {
NFT_DYNSET_OP_UPDATE,
};
+enum nft_dynset_flags {
+ NFT_DYNSET_F_INV = (1 << 0),
+};
+
/**
* enum nft_dynset_attributes - dynset expression attributes
*
@@ -585,6 +618,7 @@ enum nft_dynset_ops {
* @NFTA_DYNSET_SREG_DATA: source register of the data (NLA_U32)
* @NFTA_DYNSET_TIMEOUT: timeout value for the new element (NLA_U64)
* @NFTA_DYNSET_EXPR: expression (NLA_NESTED: nft_expr_attributes)
+ * @NFTA_DYNSET_FLAGS: flags (NLA_U32)
*/
enum nft_dynset_attributes {
NFTA_DYNSET_UNSPEC,
@@ -596,6 +630,7 @@ enum nft_dynset_attributes {
NFTA_DYNSET_TIMEOUT,
NFTA_DYNSET_EXPR,
NFTA_DYNSET_PAD,
+ NFTA_DYNSET_FLAGS,
__NFTA_DYNSET_MAX,
};
#define NFTA_DYNSET_MAX (__NFTA_DYNSET_MAX - 1)
@@ -724,6 +759,28 @@ enum nft_meta_keys {
};
/**
+ * enum nft_hash_attributes - nf_tables hash expression netlink attributes
+ *
+ * @NFTA_HASH_SREG: source register (NLA_U32)
+ * @NFTA_HASH_DREG: destination register (NLA_U32)
+ * @NFTA_HASH_LEN: source data length (NLA_U32)
+ * @NFTA_HASH_MODULUS: modulus value (NLA_U32)
+ * @NFTA_HASH_SEED: seed value (NLA_U32)
+ * @NFTA_HASH_OFFSET: add this offset value to hash result (NLA_U32)
+ */
+enum nft_hash_attributes {
+ NFTA_HASH_UNSPEC,
+ NFTA_HASH_SREG,
+ NFTA_HASH_DREG,
+ NFTA_HASH_LEN,
+ NFTA_HASH_MODULUS,
+ NFTA_HASH_SEED,
+ NFTA_HASH_OFFSET,
+ __NFTA_HASH_MAX,
+};
+#define NFTA_HASH_MAX (__NFTA_HASH_MAX - 1)
+
+/**
* enum nft_meta_attributes - nf_tables meta expression netlink attributes
*
* @NFTA_META_DREG: destination register (NLA_U32)
@@ -866,12 +923,14 @@ enum nft_log_attributes {
* @NFTA_QUEUE_NUM: netlink queue to send messages to (NLA_U16)
* @NFTA_QUEUE_TOTAL: number of queues to load balance packets on (NLA_U16)
* @NFTA_QUEUE_FLAGS: various flags (NLA_U16)
+ * @NFTA_QUEUE_SREG_QNUM: source register of queue number (NLA_U32: nft_registers)
*/
enum nft_queue_attributes {
NFTA_QUEUE_UNSPEC,
NFTA_QUEUE_NUM,
NFTA_QUEUE_TOTAL,
NFTA_QUEUE_FLAGS,
+ NFTA_QUEUE_SREG_QNUM,
__NFTA_QUEUE_MAX
};
#define NFTA_QUEUE_MAX (__NFTA_QUEUE_MAX - 1)
@@ -880,6 +939,25 @@ enum nft_queue_attributes {
#define NFT_QUEUE_FLAG_CPU_FANOUT 0x02 /* use current CPU (no hashing) */
#define NFT_QUEUE_FLAG_MASK 0x03
+enum nft_quota_flags {
+ NFT_QUOTA_F_INV = (1 << 0),
+};
+
+/**
+ * enum nft_quota_attributes - nf_tables quota expression netlink attributes
+ *
+ * @NFTA_QUOTA_BYTES: quota in bytes (NLA_U16)
+ * @NFTA_QUOTA_FLAGS: flags (NLA_U32)
+ */
+enum nft_quota_attributes {
+ NFTA_QUOTA_UNSPEC,
+ NFTA_QUOTA_BYTES,
+ NFTA_QUOTA_FLAGS,
+ NFTA_QUOTA_PAD,
+ __NFTA_QUOTA_MAX
+};
+#define NFTA_QUOTA_MAX (__NFTA_QUOTA_MAX - 1)
+
/**
* enum nft_reject_types - nf_tables reject expression reject types
*
@@ -1051,7 +1129,7 @@ enum nft_gen_attributes {
* @NFTA_TRACE_NFPROTO: nf protocol processed (NLA_U32)
* @NFTA_TRACE_POLICY: policy that decided fate of packet (NLA_U32)
*/
-enum nft_trace_attibutes {
+enum nft_trace_attributes {
NFTA_TRACE_UNSPEC,
NFTA_TRACE_TABLE,
NFTA_TRACE_CHAIN,
@@ -1082,4 +1160,30 @@ enum nft_trace_types {
__NFT_TRACETYPE_MAX
};
#define NFT_TRACETYPE_MAX (__NFT_TRACETYPE_MAX - 1)
+
+/**
+ * enum nft_ng_attributes - nf_tables number generator expression netlink attributes
+ *
+ * @NFTA_NG_DREG: destination register (NLA_U32)
+ * @NFTA_NG_MODULUS: maximum counter value (NLA_U32)
+ * @NFTA_NG_TYPE: operation type (NLA_U32)
+ * @NFTA_NG_OFFSET: offset to be added to the counter (NLA_U32)
+ */
+enum nft_ng_attributes {
+ NFTA_NG_UNSPEC,
+ NFTA_NG_DREG,
+ NFTA_NG_MODULUS,
+ NFTA_NG_TYPE,
+ NFTA_NG_OFFSET,
+ __NFTA_NG_MAX
+};
+#define NFTA_NG_MAX (__NFTA_NG_MAX - 1)
+
+enum nft_ng_types {
+ NFT_NG_INCREMENTAL,
+ NFT_NG_RANDOM,
+ __NFT_NG_MAX
+};
+#define NFT_NG_MAX (__NFT_NG_MAX - 1)
+
#endif /* _LINUX_NF_TABLES_H */
diff --git a/include/uapi/linux/netfilter/nfnetlink_conntrack.h b/include/uapi/linux/netfilter/nfnetlink_conntrack.h
index 9df789709abe..6deb8867c5fc 100644
--- a/include/uapi/linux/netfilter/nfnetlink_conntrack.h
+++ b/include/uapi/linux/netfilter/nfnetlink_conntrack.h
@@ -231,13 +231,13 @@ enum ctattr_secctx {
enum ctattr_stats_cpu {
CTA_STATS_UNSPEC,
- CTA_STATS_SEARCHED,
+ CTA_STATS_SEARCHED, /* no longer used */
CTA_STATS_FOUND,
- CTA_STATS_NEW,
+ CTA_STATS_NEW, /* no longer used */
CTA_STATS_INVALID,
CTA_STATS_IGNORE,
- CTA_STATS_DELETE,
- CTA_STATS_DELETE_LIST,
+ CTA_STATS_DELETE, /* no longer used */
+ CTA_STATS_DELETE_LIST, /* no longer used */
CTA_STATS_INSERT,
CTA_STATS_INSERT_FAILED,
CTA_STATS_DROP,
diff --git a/include/uapi/linux/netfilter/xt_hashlimit.h b/include/uapi/linux/netfilter/xt_hashlimit.h
index 6db90372f09c..3efc0ca18345 100644
--- a/include/uapi/linux/netfilter/xt_hashlimit.h
+++ b/include/uapi/linux/netfilter/xt_hashlimit.h
@@ -6,6 +6,7 @@
/* timings are in milliseconds. */
#define XT_HASHLIMIT_SCALE 10000
+#define XT_HASHLIMIT_SCALE_v2 1000000llu
/* 1/10,000 sec period => max of 10,000/sec. Min rate is then 429490
* seconds, or one packet every 59 hours.
*/
@@ -63,6 +64,20 @@ struct hashlimit_cfg1 {
__u8 srcmask, dstmask;
};
+struct hashlimit_cfg2 {
+ __u64 avg; /* Average secs between packets * scale */
+ __u64 burst; /* Period multiplier for upper limit. */
+ __u32 mode; /* bitmask of XT_HASHLIMIT_HASH_* */
+
+ /* user specified */
+ __u32 size; /* how many buckets */
+ __u32 max; /* max number of entries */
+ __u32 gc_interval; /* gc interval */
+ __u32 expire; /* when do entries expire? */
+
+ __u8 srcmask, dstmask;
+};
+
struct xt_hashlimit_mtinfo1 {
char name[IFNAMSIZ];
struct hashlimit_cfg1 cfg;
@@ -71,4 +86,12 @@ struct xt_hashlimit_mtinfo1 {
struct xt_hashlimit_htable *hinfo __attribute__((aligned(8)));
};
+struct xt_hashlimit_mtinfo2 {
+ char name[NAME_MAX];
+ struct hashlimit_cfg2 cfg;
+
+ /* Used internally by the kernel */
+ struct xt_hashlimit_htable *hinfo __attribute__((aligned(8)));
+};
+
#endif /* _UAPI_XT_HASHLIMIT_H */
diff --git a/include/uapi/linux/nfs4.h b/include/uapi/linux/nfs4.h
index 2b871e0858d9..4ae62796bfde 100644
--- a/include/uapi/linux/nfs4.h
+++ b/include/uapi/linux/nfs4.h
@@ -39,8 +39,9 @@
#define NFS4_FH_VOL_MIGRATION 0x0004
#define NFS4_FH_VOL_RENAME 0x0008
-#define NFS4_OPEN_RESULT_CONFIRM 0x0002
-#define NFS4_OPEN_RESULT_LOCKTYPE_POSIX 0x0004
+#define NFS4_OPEN_RESULT_CONFIRM 0x0002
+#define NFS4_OPEN_RESULT_LOCKTYPE_POSIX 0x0004
+#define NFS4_OPEN_RESULT_MAY_NOTIFY_LOCK 0x0020
#define NFS4_SHARE_ACCESS_MASK 0x000F
#define NFS4_SHARE_ACCESS_READ 0x0001
diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h
index 220694151434..56368e9b4622 100644
--- a/include/uapi/linux/nl80211.h
+++ b/include/uapi/linux/nl80211.h
@@ -48,6 +48,7 @@
#define NL80211_MULTICAST_GROUP_REG "regulatory"
#define NL80211_MULTICAST_GROUP_MLME "mlme"
#define NL80211_MULTICAST_GROUP_VENDOR "vendor"
+#define NL80211_MULTICAST_GROUP_NAN "nan"
#define NL80211_MULTICAST_GROUP_TESTMODE "testmode"
/**
@@ -838,6 +839,41 @@
* not running. The driver indicates the status of the scan through
* cfg80211_scan_done().
*
+ * @NL80211_CMD_START_NAN: Start NAN operation, identified by its
+ * %NL80211_ATTR_WDEV interface. This interface must have been previously
+ * created with %NL80211_CMD_NEW_INTERFACE. After it has been started, the
+ * NAN interface will create or join a cluster. This command must have a
+ * valid %NL80211_ATTR_NAN_MASTER_PREF attribute and optional
+ * %NL80211_ATTR_NAN_DUAL attributes.
+ * After this command NAN functions can be added.
+ * @NL80211_CMD_STOP_NAN: Stop the NAN operation, identified by
+ * its %NL80211_ATTR_WDEV interface.
+ * @NL80211_CMD_ADD_NAN_FUNCTION: Add a NAN function. The function is defined
+ * with %NL80211_ATTR_NAN_FUNC nested attribute. When called, this
+ * operation returns the strictly positive and unique instance id
+ * (%NL80211_ATTR_NAN_FUNC_INST_ID) and a cookie (%NL80211_ATTR_COOKIE)
+ * of the function upon success.
+ * Since instance ID's can be re-used, this cookie is the right
+ * way to identify the function. This will avoid races when a termination
+ * event is handled by the user space after it has already added a new
+ * function that got the same instance id from the kernel as the one
+ * which just terminated.
+ * This cookie may be used in NAN events even before the command
+ * returns, so userspace shouldn't process NAN events until it processes
+ * the response to this command.
+ * Look at %NL80211_ATTR_SOCKET_OWNER as well.
+ * @NL80211_CMD_DEL_NAN_FUNCTION: Delete a NAN function by cookie.
+ * This command is also used as a notification sent when a NAN function is
+ * terminated. This will contain a %NL80211_ATTR_NAN_FUNC_INST_ID
+ * and %NL80211_ATTR_COOKIE attributes.
+ * @NL80211_CMD_CHANGE_NAN_CONFIG: Change current NAN configuration. NAN
+ * must be operational (%NL80211_CMD_START_NAN was executed).
+ * It must contain at least one of the following attributes:
+ * %NL80211_ATTR_NAN_MASTER_PREF, %NL80211_ATTR_NAN_DUAL.
+ * @NL80211_CMD_NAN_FUNC_MATCH: Notification sent when a match is reported.
+ * This will contain a %NL80211_ATTR_NAN_MATCH nested attribute and
+ * %NL80211_ATTR_COOKIE.
+ *
* @NL80211_CMD_MAX: highest used command number
* @__NL80211_CMD_AFTER_LAST: internal use
*/
@@ -1026,6 +1062,13 @@ enum nl80211_commands {
NL80211_CMD_ABORT_SCAN,
+ NL80211_CMD_START_NAN,
+ NL80211_CMD_STOP_NAN,
+ NL80211_CMD_ADD_NAN_FUNCTION,
+ NL80211_CMD_DEL_NAN_FUNCTION,
+ NL80211_CMD_CHANGE_NAN_CONFIG,
+ NL80211_CMD_NAN_MATCH,
+
/* add new commands above here */
/* used to define NL80211_CMD_MAX below */
@@ -1343,7 +1386,13 @@ enum nl80211_commands {
* enum nl80211_band value is used as the index (nla_type() of the nested
* data. If a band is not included, it will be configured to allow all
* rates based on negotiated supported rates information. This attribute
- * is used with %NL80211_CMD_SET_TX_BITRATE_MASK.
+ * is used with %NL80211_CMD_SET_TX_BITRATE_MASK and with starting AP,
+ * and joining mesh networks (not IBSS yet). In the later case, it must
+ * specify just a single bitrate, which is to be used for the beacon.
+ * The driver must also specify support for this with the extended
+ * features NL80211_EXT_FEATURE_BEACON_RATE_LEGACY,
+ * NL80211_EXT_FEATURE_BEACON_RATE_HT and
+ * NL80211_EXT_FEATURE_BEACON_RATE_VHT.
*
* @NL80211_ATTR_FRAME_MATCH: A binary attribute which typically must contain
* at least one byte, currently used with @NL80211_CMD_REGISTER_FRAME.
@@ -1733,6 +1782,12 @@ enum nl80211_commands {
* regulatory indoor configuration would be owned by the netlink socket
* that configured the indoor setting, and the indoor operation would be
* cleared when the socket is closed.
+ * If set during NAN interface creation, the interface will be destroyed
+ * if the socket is closed just like any other interface. Moreover, only
+ * the netlink socket that created the interface will be allowed to add
+ * and remove functions. NAN notifications will be sent in unicast to that
+ * socket. Without this attribute, any socket can add functions and the
+ * notifications will be sent to the %NL80211_MCGRP_NAN multicast group.
*
* @NL80211_ATTR_TDLS_INITIATOR: flag attribute indicating the current end is
* the TDLS link initiator.
@@ -1867,6 +1922,21 @@ enum nl80211_commands {
* @NL80211_ATTR_MESH_PEER_AID: Association ID for the mesh peer (u16). This is
* used to pull the stored data for mesh peer in power save state.
*
+ * @NL80211_ATTR_NAN_MASTER_PREF: the master preference to be used by
+ * %NL80211_CMD_START_NAN and optionally with
+ * %NL80211_CMD_CHANGE_NAN_CONFIG. Its type is u8 and it can't be 0.
+ * Also, values 1 and 255 are reserved for certification purposes and
+ * should not be used during a normal device operation.
+ * @NL80211_ATTR_NAN_DUAL: NAN dual band operation config (see
+ * &enum nl80211_nan_dual_band_conf). This attribute is used with
+ * %NL80211_CMD_START_NAN and optionally with
+ * %NL80211_CMD_CHANGE_NAN_CONFIG.
+ * @NL80211_ATTR_NAN_FUNC: a function that can be added to NAN. See
+ * &enum nl80211_nan_func_attributes for description of this nested
+ * attribute.
+ * @NL80211_ATTR_NAN_MATCH: used to report a match. This is a nested attribute.
+ * See &enum nl80211_nan_match_attributes.
+ *
* @NUM_NL80211_ATTR: total number of nl80211_attrs available
* @NL80211_ATTR_MAX: highest attribute number currently defined
* @__NL80211_ATTR_AFTER_LAST: internal use
@@ -2261,6 +2331,11 @@ enum nl80211_attrs {
NL80211_ATTR_MESH_PEER_AID,
+ NL80211_ATTR_NAN_MASTER_PREF,
+ NL80211_ATTR_NAN_DUAL,
+ NL80211_ATTR_NAN_FUNC,
+ NL80211_ATTR_NAN_MATCH,
+
/* add attributes here, update the policy in nl80211.c */
__NL80211_ATTR_AFTER_LAST,
@@ -2339,6 +2414,7 @@ enum nl80211_attrs {
* commands to create and destroy one
* @NL80211_IF_TYPE_OCB: Outside Context of a BSS
* This mode corresponds to the MIB variable dot11OCBActivated=true
+ * @NL80211_IFTYPE_NAN: NAN device interface type (not a netdev)
* @NL80211_IFTYPE_MAX: highest interface type number currently defined
* @NUM_NL80211_IFTYPES: number of defined interface types
*
@@ -2359,6 +2435,7 @@ enum nl80211_iftype {
NL80211_IFTYPE_P2P_GO,
NL80211_IFTYPE_P2P_DEVICE,
NL80211_IFTYPE_OCB,
+ NL80211_IFTYPE_NAN,
/* keep last */
NUM_NL80211_IFTYPES,
@@ -4551,6 +4628,12 @@ enum nl80211_feature_flags {
* (if available).
* @NL80211_EXT_FEATURE_SET_SCAN_DWELL: This driver supports configuration of
* channel dwell time.
+ * @NL80211_EXT_FEATURE_BEACON_RATE_LEGACY: Driver supports beacon rate
+ * configuration (AP/mesh), supporting a legacy (non HT/VHT) rate.
+ * @NL80211_EXT_FEATURE_BEACON_RATE_HT: Driver supports beacon rate
+ * configuration (AP/mesh) with HT rates.
+ * @NL80211_EXT_FEATURE_BEACON_RATE_VHT: Driver supports beacon rate
+ * configuration (AP/mesh) with VHT rates.
*
* @NUM_NL80211_EXT_FEATURES: number of extended features.
* @MAX_NL80211_EXT_FEATURES: highest extended feature index.
@@ -4562,6 +4645,9 @@ enum nl80211_ext_feature_index {
NL80211_EXT_FEATURE_SCAN_START_TIME,
NL80211_EXT_FEATURE_BSS_PARENT_TSF,
NL80211_EXT_FEATURE_SET_SCAN_DWELL,
+ NL80211_EXT_FEATURE_BEACON_RATE_LEGACY,
+ NL80211_EXT_FEATURE_BEACON_RATE_HT,
+ NL80211_EXT_FEATURE_BEACON_RATE_VHT,
/* add new features before the definition below */
NUM_NL80211_EXT_FEATURES,
@@ -4855,4 +4941,186 @@ enum nl80211_bss_select_attr {
NL80211_BSS_SELECT_ATTR_MAX = __NL80211_BSS_SELECT_ATTR_AFTER_LAST - 1
};
+/**
+ * enum nl80211_nan_dual_band_conf - NAN dual band configuration
+ *
+ * Defines the NAN dual band mode of operation
+ *
+ * @NL80211_NAN_BAND_DEFAULT: device default mode
+ * @NL80211_NAN_BAND_2GHZ: 2.4GHz mode
+ * @NL80211_NAN_BAND_5GHZ: 5GHz mode
+ */
+enum nl80211_nan_dual_band_conf {
+ NL80211_NAN_BAND_DEFAULT = 1 << 0,
+ NL80211_NAN_BAND_2GHZ = 1 << 1,
+ NL80211_NAN_BAND_5GHZ = 1 << 2,
+};
+
+/**
+ * enum nl80211_nan_function_type - NAN function type
+ *
+ * Defines the function type of a NAN function
+ *
+ * @NL80211_NAN_FUNC_PUBLISH: function is publish
+ * @NL80211_NAN_FUNC_SUBSCRIBE: function is subscribe
+ * @NL80211_NAN_FUNC_FOLLOW_UP: function is follow-up
+ */
+enum nl80211_nan_function_type {
+ NL80211_NAN_FUNC_PUBLISH,
+ NL80211_NAN_FUNC_SUBSCRIBE,
+ NL80211_NAN_FUNC_FOLLOW_UP,
+
+ /* keep last */
+ __NL80211_NAN_FUNC_TYPE_AFTER_LAST,
+ NL80211_NAN_FUNC_MAX_TYPE = __NL80211_NAN_FUNC_TYPE_AFTER_LAST - 1,
+};
+
+/**
+ * enum nl80211_nan_publish_type - NAN publish tx type
+ *
+ * Defines how to send publish Service Discovery Frames
+ *
+ * @NL80211_NAN_SOLICITED_PUBLISH: publish function is solicited
+ * @NL80211_NAN_UNSOLICITED_PUBLISH: publish function is unsolicited
+ */
+enum nl80211_nan_publish_type {
+ NL80211_NAN_SOLICITED_PUBLISH = 1 << 0,
+ NL80211_NAN_UNSOLICITED_PUBLISH = 1 << 1,
+};
+
+/**
+ * enum nl80211_nan_func_term_reason - NAN functions termination reason
+ *
+ * Defines termination reasons of a NAN function
+ *
+ * @NL80211_NAN_FUNC_TERM_REASON_USER_REQUEST: requested by user
+ * @NL80211_NAN_FUNC_TERM_REASON_TTL_EXPIRED: timeout
+ * @NL80211_NAN_FUNC_TERM_REASON_ERROR: errored
+ */
+enum nl80211_nan_func_term_reason {
+ NL80211_NAN_FUNC_TERM_REASON_USER_REQUEST,
+ NL80211_NAN_FUNC_TERM_REASON_TTL_EXPIRED,
+ NL80211_NAN_FUNC_TERM_REASON_ERROR,
+};
+
+#define NL80211_NAN_FUNC_SERVICE_ID_LEN 6
+#define NL80211_NAN_FUNC_SERVICE_SPEC_INFO_MAX_LEN 0xff
+#define NL80211_NAN_FUNC_SRF_MAX_LEN 0xff
+
+/**
+ * enum nl80211_nan_func_attributes - NAN function attributes
+ * @__NL80211_NAN_FUNC_INVALID: invalid
+ * @NL80211_NAN_FUNC_TYPE: &enum nl80211_nan_function_type (u8).
+ * @NL80211_NAN_FUNC_SERVICE_ID: 6 bytes of the service ID hash as
+ * specified in NAN spec. This is a binary attribute.
+ * @NL80211_NAN_FUNC_PUBLISH_TYPE: relevant if the function's type is
+ * publish. Defines the transmission type for the publish Service Discovery
+ * Frame, see &enum nl80211_nan_publish_type. Its type is u8.
+ * @NL80211_NAN_FUNC_PUBLISH_BCAST: relevant if the function is a solicited
+ * publish. Should the solicited publish Service Discovery Frame be sent to
+ * the NAN Broadcast address. This is a flag.
+ * @NL80211_NAN_FUNC_SUBSCRIBE_ACTIVE: relevant if the function's type is
+ * subscribe. Is the subscribe active. This is a flag.
+ * @NL80211_NAN_FUNC_FOLLOW_UP_ID: relevant if the function's type is follow up.
+ * The instance ID for the follow up Service Discovery Frame. This is u8.
+ * @NL80211_NAN_FUNC_FOLLOW_UP_REQ_ID: relevant if the function's type
+ * is follow up. This is a u8.
+ * The requestor instance ID for the follow up Service Discovery Frame.
+ * @NL80211_NAN_FUNC_FOLLOW_UP_DEST: the MAC address of the recipient of the
+ * follow up Service Discovery Frame. This is a binary attribute.
+ * @NL80211_NAN_FUNC_CLOSE_RANGE: is this function limited for devices in a
+ * close range. The range itself (RSSI) is defined by the device.
+ * This is a flag.
+ * @NL80211_NAN_FUNC_TTL: strictly positive number of DWs this function should
+ * stay active. If not present infinite TTL is assumed. This is a u32.
+ * @NL80211_NAN_FUNC_SERVICE_INFO: array of bytes describing the service
+ * specific info. This is a binary attribute.
+ * @NL80211_NAN_FUNC_SRF: Service Receive Filter. This is a nested attribute.
+ * See &enum nl80211_nan_srf_attributes.
+ * @NL80211_NAN_FUNC_RX_MATCH_FILTER: Receive Matching filter. This is a nested
+ * attribute. It is a list of binary values.
+ * @NL80211_NAN_FUNC_TX_MATCH_FILTER: Transmit Matching filter. This is a
+ * nested attribute. It is a list of binary values.
+ * @NL80211_NAN_FUNC_INSTANCE_ID: The instance ID of the function.
+ * Its type is u8 and it cannot be 0.
+ * @NL80211_NAN_FUNC_TERM_REASON: NAN function termination reason.
+ * See &enum nl80211_nan_func_term_reason.
+ *
+ * @NUM_NL80211_NAN_FUNC_ATTR: internal
+ * @NL80211_NAN_FUNC_ATTR_MAX: highest NAN function attribute
+ */
+enum nl80211_nan_func_attributes {
+ __NL80211_NAN_FUNC_INVALID,
+ NL80211_NAN_FUNC_TYPE,
+ NL80211_NAN_FUNC_SERVICE_ID,
+ NL80211_NAN_FUNC_PUBLISH_TYPE,
+ NL80211_NAN_FUNC_PUBLISH_BCAST,
+ NL80211_NAN_FUNC_SUBSCRIBE_ACTIVE,
+ NL80211_NAN_FUNC_FOLLOW_UP_ID,
+ NL80211_NAN_FUNC_FOLLOW_UP_REQ_ID,
+ NL80211_NAN_FUNC_FOLLOW_UP_DEST,
+ NL80211_NAN_FUNC_CLOSE_RANGE,
+ NL80211_NAN_FUNC_TTL,
+ NL80211_NAN_FUNC_SERVICE_INFO,
+ NL80211_NAN_FUNC_SRF,
+ NL80211_NAN_FUNC_RX_MATCH_FILTER,
+ NL80211_NAN_FUNC_TX_MATCH_FILTER,
+ NL80211_NAN_FUNC_INSTANCE_ID,
+ NL80211_NAN_FUNC_TERM_REASON,
+
+ /* keep last */
+ NUM_NL80211_NAN_FUNC_ATTR,
+ NL80211_NAN_FUNC_ATTR_MAX = NUM_NL80211_NAN_FUNC_ATTR - 1
+};
+
+/**
+ * enum nl80211_nan_srf_attributes - NAN Service Response filter attributes
+ * @__NL80211_NAN_SRF_INVALID: invalid
+ * @NL80211_NAN_SRF_INCLUDE: present if the include bit of the SRF set.
+ * This is a flag.
+ * @NL80211_NAN_SRF_BF: Bloom Filter. Present if and only if
+ * &NL80211_NAN_SRF_MAC_ADDRS isn't present. This attribute is binary.
+ * @NL80211_NAN_SRF_BF_IDX: index of the Bloom Filter. Mandatory if
+ * &NL80211_NAN_SRF_BF is present. This is a u8.
+ * @NL80211_NAN_SRF_MAC_ADDRS: list of MAC addresses for the SRF. Present if
+ * and only if &NL80211_NAN_SRF_BF isn't present. This is a nested
+ * attribute. Each nested attribute is a MAC address.
+ * @NUM_NL80211_NAN_SRF_ATTR: internal
+ * @NL80211_NAN_SRF_ATTR_MAX: highest NAN SRF attribute
+ */
+enum nl80211_nan_srf_attributes {
+ __NL80211_NAN_SRF_INVALID,
+ NL80211_NAN_SRF_INCLUDE,
+ NL80211_NAN_SRF_BF,
+ NL80211_NAN_SRF_BF_IDX,
+ NL80211_NAN_SRF_MAC_ADDRS,
+
+ /* keep last */
+ NUM_NL80211_NAN_SRF_ATTR,
+ NL80211_NAN_SRF_ATTR_MAX = NUM_NL80211_NAN_SRF_ATTR - 1,
+};
+
+/**
+ * enum nl80211_nan_match_attributes - NAN match attributes
+ * @__NL80211_NAN_MATCH_INVALID: invalid
+ * @NL80211_NAN_MATCH_FUNC_LOCAL: the local function that had the
+ * match. This is a nested attribute.
+ * See &enum nl80211_nan_func_attributes.
+ * @NL80211_NAN_MATCH_FUNC_PEER: the peer function
+ * that caused the match. This is a nested attribute.
+ * See &enum nl80211_nan_func_attributes.
+ *
+ * @NUM_NL80211_NAN_MATCH_ATTR: internal
+ * @NL80211_NAN_MATCH_ATTR_MAX: highest NAN match attribute
+ */
+enum nl80211_nan_match_attributes {
+ __NL80211_NAN_MATCH_INVALID,
+ NL80211_NAN_MATCH_FUNC_LOCAL,
+ NL80211_NAN_MATCH_FUNC_PEER,
+
+ /* keep last */
+ NUM_NL80211_NAN_MATCH_ATTR,
+ NL80211_NAN_MATCH_ATTR_MAX = NUM_NL80211_NAN_MATCH_ATTR - 1
+};
+
#endif /* __LINUX_NL80211_H */
diff --git a/include/uapi/linux/nsfs.h b/include/uapi/linux/nsfs.h
new file mode 100644
index 000000000000..3af617230d1b
--- /dev/null
+++ b/include/uapi/linux/nsfs.h
@@ -0,0 +1,13 @@
+#ifndef __LINUX_NSFS_H
+#define __LINUX_NSFS_H
+
+#include <linux/ioctl.h>
+
+#define NSIO 0xb7
+
+/* Returns a file descriptor that refers to an owning user namespace */
+#define NS_GET_USERNS _IO(NSIO, 0x1)
+/* Returns a file descriptor that refers to a parent namespace */
+#define NS_GET_PARENT _IO(NSIO, 0x2)
+
+#endif /* __LINUX_NSFS_H */
diff --git a/include/uapi/linux/openvswitch.h b/include/uapi/linux/openvswitch.h
index 54c3b4f4aceb..59ed3992c760 100644
--- a/include/uapi/linux/openvswitch.h
+++ b/include/uapi/linux/openvswitch.h
@@ -605,13 +605,13 @@ struct ovs_action_push_mpls {
* @vlan_tci: Tag control identifier (TCI) to push. The CFI bit must be set
* (but it will not be set in the 802.1Q header that is pushed).
*
- * The @vlan_tpid value is typically %ETH_P_8021Q. The only acceptable TPID
- * values are those that the kernel module also parses as 802.1Q headers, to
- * prevent %OVS_ACTION_ATTR_PUSH_VLAN followed by %OVS_ACTION_ATTR_POP_VLAN
- * from having surprising results.
+ * The @vlan_tpid value is typically %ETH_P_8021Q or %ETH_P_8021AD.
+ * The only acceptable TPID values are those that the kernel module also parses
+ * as 802.1Q or 802.1AD headers, to prevent %OVS_ACTION_ATTR_PUSH_VLAN followed
+ * by %OVS_ACTION_ATTR_POP_VLAN from having surprising results.
*/
struct ovs_action_push_vlan {
- __be16 vlan_tpid; /* 802.1Q TPID. */
+ __be16 vlan_tpid; /* 802.1Q or 802.1ad TPID. */
__be16 vlan_tci; /* 802.1Q TCI (VLAN ID and priority). */
};
@@ -721,9 +721,10 @@ enum ovs_nat_attr {
* is copied from the value to the packet header field, rest of the bits are
* left unchanged. The non-masked value bits must be passed in as zeroes.
* Masking is not supported for the %OVS_KEY_ATTR_TUNNEL attribute.
- * @OVS_ACTION_ATTR_PUSH_VLAN: Push a new outermost 802.1Q header onto the
- * packet.
- * @OVS_ACTION_ATTR_POP_VLAN: Pop the outermost 802.1Q header off the packet.
+ * @OVS_ACTION_ATTR_PUSH_VLAN: Push a new outermost 802.1Q or 802.1ad header
+ * onto the packet.
+ * @OVS_ACTION_ATTR_POP_VLAN: Pop the outermost 802.1Q or 802.1ad header
+ * from the packet.
* @OVS_ACTION_ATTR_SAMPLE: Probabilitically executes actions, as specified in
* the nested %OVS_SAMPLE_ATTR_* attributes.
* @OVS_ACTION_ATTR_PUSH_MPLS: Push a new MPLS label stack entry onto the
diff --git a/include/uapi/linux/pci_regs.h b/include/uapi/linux/pci_regs.h
index 404095124ae2..e5a2e68b2236 100644
--- a/include/uapi/linux/pci_regs.h
+++ b/include/uapi/linux/pci_regs.h
@@ -612,6 +612,8 @@
*/
#define PCI_EXP_DEVCAP2 36 /* Device Capabilities 2 */
#define PCI_EXP_DEVCAP2_ARI 0x00000020 /* Alternative Routing-ID */
+#define PCI_EXP_DEVCAP2_ATOMIC_ROUTE 0x00000040 /* Atomic Op routing */
+#define PCI_EXP_DEVCAP2_ATOMIC_COMP64 0x00000100 /* Atomic 64-bit compare */
#define PCI_EXP_DEVCAP2_LTR 0x00000800 /* Latency tolerance reporting */
#define PCI_EXP_DEVCAP2_OBFF_MASK 0x000c0000 /* OBFF support mechanism */
#define PCI_EXP_DEVCAP2_OBFF_MSG 0x00040000 /* New message signaling */
@@ -619,6 +621,7 @@
#define PCI_EXP_DEVCTL2 40 /* Device Control 2 */
#define PCI_EXP_DEVCTL2_COMP_TIMEOUT 0x000f /* Completion Timeout Value */
#define PCI_EXP_DEVCTL2_ARI 0x0020 /* Alternative Routing-ID */
+#define PCI_EXP_DEVCTL2_ATOMIC_REQ 0x0040 /* Set Atomic requests */
#define PCI_EXP_DEVCTL2_IDO_REQ_EN 0x0100 /* Allow IDO for requests */
#define PCI_EXP_DEVCTL2_IDO_CMP_EN 0x0200 /* Allow IDO for completions */
#define PCI_EXP_DEVCTL2_LTR_EN 0x0400 /* Enable LTR mechanism */
@@ -671,7 +674,8 @@
#define PCI_EXT_CAP_ID_PMUX 0x1A /* Protocol Multiplexing */
#define PCI_EXT_CAP_ID_PASID 0x1B /* Process Address Space ID */
#define PCI_EXT_CAP_ID_DPC 0x1D /* Downstream Port Containment */
-#define PCI_EXT_CAP_ID_MAX PCI_EXT_CAP_ID_DPC
+#define PCI_EXT_CAP_ID_PTM 0x1F /* Precision Time Measurement */
+#define PCI_EXT_CAP_ID_MAX PCI_EXT_CAP_ID_PTM
#define PCI_EXT_CAP_DSN_SIZEOF 12
#define PCI_EXT_CAP_MCAST_ENDPOINT_SIZEOF 40
@@ -964,4 +968,13 @@
#define PCI_EXP_DPC_SOURCE_ID 10 /* DPC Source Identifier */
+/* Precision Time Measurement */
+#define PCI_PTM_CAP 0x04 /* PTM Capability */
+#define PCI_PTM_CAP_REQ 0x00000001 /* Requester capable */
+#define PCI_PTM_CAP_ROOT 0x00000004 /* Root capable */
+#define PCI_PTM_GRANULARITY_MASK 0x0000FF00 /* Clock granularity */
+#define PCI_PTM_CTRL 0x08 /* PTM Control */
+#define PCI_PTM_CTRL_ENABLE 0x00000001 /* PTM enable */
+#define PCI_PTM_CTRL_ROOT 0x00000002 /* Root select */
+
#endif /* LINUX_PCI_REGS_H */
diff --git a/include/uapi/linux/pkt_cls.h b/include/uapi/linux/pkt_cls.h
index d1c1ccaba787..8fd715f806a2 100644
--- a/include/uapi/linux/pkt_cls.h
+++ b/include/uapi/linux/pkt_cls.h
@@ -396,6 +396,7 @@ enum {
TCA_BPF_FD,
TCA_BPF_NAME,
TCA_BPF_FLAGS,
+ TCA_BPF_FLAGS_GEN,
__TCA_BPF_MAX,
};
@@ -428,6 +429,24 @@ enum {
TCA_FLOWER_KEY_UDP_DST, /* be16 */
TCA_FLOWER_FLAGS,
+ TCA_FLOWER_KEY_VLAN_ID, /* be16 */
+ TCA_FLOWER_KEY_VLAN_PRIO, /* u8 */
+ TCA_FLOWER_KEY_VLAN_ETH_TYPE, /* be16 */
+
+ TCA_FLOWER_KEY_ENC_KEY_ID, /* be32 */
+ TCA_FLOWER_KEY_ENC_IPV4_SRC, /* be32 */
+ TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK,/* be32 */
+ TCA_FLOWER_KEY_ENC_IPV4_DST, /* be32 */
+ TCA_FLOWER_KEY_ENC_IPV4_DST_MASK,/* be32 */
+ TCA_FLOWER_KEY_ENC_IPV6_SRC, /* struct in6_addr */
+ TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK,/* struct in6_addr */
+ TCA_FLOWER_KEY_ENC_IPV6_DST, /* struct in6_addr */
+ TCA_FLOWER_KEY_ENC_IPV6_DST_MASK,/* struct in6_addr */
+
+ TCA_FLOWER_KEY_TCP_SRC_MASK, /* be16 */
+ TCA_FLOWER_KEY_TCP_DST_MASK, /* be16 */
+ TCA_FLOWER_KEY_UDP_SRC_MASK, /* be16 */
+ TCA_FLOWER_KEY_UDP_DST_MASK, /* be16 */
__TCA_FLOWER_MAX,
};
diff --git a/include/uapi/linux/pkt_sched.h b/include/uapi/linux/pkt_sched.h
index 2382eed50278..df7451d35131 100644
--- a/include/uapi/linux/pkt_sched.h
+++ b/include/uapi/linux/pkt_sched.h
@@ -792,6 +792,8 @@ enum {
TCA_FQ_ORPHAN_MASK, /* mask applied to orphaned skb hashes */
+ TCA_FQ_LOW_RATE_THRESHOLD, /* per packet delay under this rate */
+
__TCA_FQ_MAX
};
@@ -809,7 +811,7 @@ struct tc_fq_qd_stats {
__u32 flows;
__u32 inactive_flows;
__u32 throttled_flows;
- __u32 pad;
+ __u32 unthrottle_latency_ns;
};
/* Heavy-Hitter Filter */
diff --git a/include/uapi/linux/posix_acl.h b/include/uapi/linux/posix_acl.h
new file mode 100644
index 000000000000..1037cb19aa17
--- /dev/null
+++ b/include/uapi/linux/posix_acl.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (C) 2002 Andreas Gruenbacher <a.gruenbacher@computer.org>
+ * Copyright (C) 2016 Red Hat, Inc.
+ *
+ * This file is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ */
+
+#ifndef __UAPI_POSIX_ACL_H
+#define __UAPI_POSIX_ACL_H
+
+#define ACL_UNDEFINED_ID (-1)
+
+/* a_type field in acl_user_posix_entry_t */
+#define ACL_TYPE_ACCESS (0x8000)
+#define ACL_TYPE_DEFAULT (0x4000)
+
+/* e_tag entry in struct posix_acl_entry */
+#define ACL_USER_OBJ (0x01)
+#define ACL_USER (0x02)
+#define ACL_GROUP_OBJ (0x04)
+#define ACL_GROUP (0x08)
+#define ACL_MASK (0x10)
+#define ACL_OTHER (0x20)
+
+/* permissions in the e_perm field */
+#define ACL_READ (0x04)
+#define ACL_WRITE (0x02)
+#define ACL_EXECUTE (0x01)
+
+#endif /* __UAPI_POSIX_ACL_H */
diff --git a/include/uapi/linux/posix_acl_xattr.h b/include/uapi/linux/posix_acl_xattr.h
new file mode 100644
index 000000000000..8b579844109b
--- /dev/null
+++ b/include/uapi/linux/posix_acl_xattr.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright (C) 2002 Andreas Gruenbacher <a.gruenbacher@computer.org>
+ * Copyright (C) 2016 Red Hat, Inc.
+ *
+ * This file is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ */
+
+#ifndef __UAPI_POSIX_ACL_XATTR_H
+#define __UAPI_POSIX_ACL_XATTR_H
+
+#include <linux/types.h>
+
+/* Supported ACL a_version fields */
+#define POSIX_ACL_XATTR_VERSION 0x0002
+
+/* An undefined entry e_id value */
+#define ACL_UNDEFINED_ID (-1)
+
+struct posix_acl_xattr_entry {
+ __le16 e_tag;
+ __le16 e_perm;
+ __le32 e_id;
+};
+
+struct posix_acl_xattr_header {
+ __le32 a_version;
+};
+
+#endif /* __UAPI_POSIX_ACL_XATTR_H */
diff --git a/include/uapi/linux/serial_reg.h b/include/uapi/linux/serial_reg.h
index 1e5ac4e776da..b4c04842a8c0 100644
--- a/include/uapi/linux/serial_reg.h
+++ b/include/uapi/linux/serial_reg.h
@@ -376,5 +376,13 @@
#define UART_EXAR_TXTRG 0x0a /* Tx FIFO trigger level write-only */
#define UART_EXAR_RXTRG 0x0b /* Rx FIFO trigger level write-only */
+/*
+ * These are definitions for the Altera ALTR_16550_F32/F64/F128
+ * Normalized from 0x100 to 0x40 because of shift by 2 (32 bit regs).
+ */
+#define UART_ALTR_AFR 0x40 /* Additional Features Register */
+#define UART_ALTR_EN_TXFIFO_LW 0x01 /* Enable the TX FIFO Low Watermark */
+#define UART_ALTR_TX_LOW 0x41 /* Tx FIFO Low Watermark */
+
#endif /* _LINUX_SERIAL_REG_H */
diff --git a/include/uapi/linux/snmp.h b/include/uapi/linux/snmp.h
index 25a9ad8bcef1..e7a31f830690 100644
--- a/include/uapi/linux/snmp.h
+++ b/include/uapi/linux/snmp.h
@@ -235,6 +235,7 @@ enum
LINUX_MIB_TCPSPURIOUSRTOS, /* TCPSpuriousRTOs */
LINUX_MIB_TCPMD5NOTFOUND, /* TCPMD5NotFound */
LINUX_MIB_TCPMD5UNEXPECTED, /* TCPMD5Unexpected */
+ LINUX_MIB_TCPMD5FAILURE, /* TCPMD5Failure */
LINUX_MIB_SACKSHIFTED,
LINUX_MIB_SACKMERGED,
LINUX_MIB_SACKSHIFTFALLBACK,
diff --git a/include/uapi/linux/sync_file.h b/include/uapi/linux/sync_file.h
index 413303d37b56..5b287d6970b3 100644
--- a/include/uapi/linux/sync_file.h
+++ b/include/uapi/linux/sync_file.h
@@ -85,15 +85,12 @@ struct sync_file_info {
#define SYNC_IOC_MERGE _IOWR(SYNC_IOC_MAGIC, 3, struct sync_merge_data)
/**
- * DOC: SYNC_IOC_FENCE_INFO - get detailed information on a fence
+ * DOC: SYNC_IOC_FILE_INFO - get detailed information on a sync_file
*
- * Takes a struct sync_file_info_data with extra space allocated for pt_info.
- * Caller should write the size of the buffer into len. On return, len is
- * updated to reflect the total size of the sync_file_info_data including
- * pt_info.
- *
- * pt_info is a buffer containing sync_pt_infos for every sync_pt in the fence.
- * To iterate over the sync_pt_infos, use the sync_pt_info.len field.
+ * Takes a struct sync_file_info. If num_fences is 0, the field is updated
+ * with the actual number of fences. If num_fences is > 0, the system will
+ * use the pointer provided on sync_fence_info to return up to num_fences of
+ * struct sync_fence_info, with detailed fence information.
*/
#define SYNC_IOC_FILE_INFO _IOWR(SYNC_IOC_MAGIC, 4, struct sync_file_info)
diff --git a/include/uapi/linux/tc_act/tc_ife.h b/include/uapi/linux/tc_act/tc_ife.h
index 4ece02a77b9a..cd18360eca24 100644
--- a/include/uapi/linux/tc_act/tc_ife.h
+++ b/include/uapi/linux/tc_act/tc_ife.h
@@ -32,8 +32,9 @@ enum {
#define IFE_META_HASHID 2
#define IFE_META_PRIO 3
#define IFE_META_QMAP 4
+#define IFE_META_TCINDEX 5
/*Can be overridden at runtime by module option*/
-#define __IFE_META_MAX 5
+#define __IFE_META_MAX 6
#define IFE_META_MAX (__IFE_META_MAX - 1)
#endif
diff --git a/include/uapi/linux/tc_act/tc_skbmod.h b/include/uapi/linux/tc_act/tc_skbmod.h
new file mode 100644
index 000000000000..10fc07da6c69
--- /dev/null
+++ b/include/uapi/linux/tc_act/tc_skbmod.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2016, Jamal Hadi Salim
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+*/
+
+#ifndef __LINUX_TC_SKBMOD_H
+#define __LINUX_TC_SKBMOD_H
+
+#include <linux/pkt_cls.h>
+
+#define TCA_ACT_SKBMOD 15
+
+#define SKBMOD_F_DMAC 0x1
+#define SKBMOD_F_SMAC 0x2
+#define SKBMOD_F_ETYPE 0x4
+#define SKBMOD_F_SWAPMAC 0x8
+
+struct tc_skbmod {
+ tc_gen;
+ __u64 flags;
+};
+
+enum {
+ TCA_SKBMOD_UNSPEC,
+ TCA_SKBMOD_TM,
+ TCA_SKBMOD_PARMS,
+ TCA_SKBMOD_DMAC,
+ TCA_SKBMOD_SMAC,
+ TCA_SKBMOD_ETYPE,
+ TCA_SKBMOD_PAD,
+ __TCA_SKBMOD_MAX
+};
+#define TCA_SKBMOD_MAX (__TCA_SKBMOD_MAX - 1)
+
+#endif
diff --git a/include/uapi/linux/tc_act/tc_tunnel_key.h b/include/uapi/linux/tc_act/tc_tunnel_key.h
new file mode 100644
index 000000000000..890106ff16e6
--- /dev/null
+++ b/include/uapi/linux/tc_act/tc_tunnel_key.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright (c) 2016, Amir Vadai <amir@vadai.me>
+ * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __LINUX_TC_TUNNEL_KEY_H
+#define __LINUX_TC_TUNNEL_KEY_H
+
+#include <linux/pkt_cls.h>
+
+#define TCA_ACT_TUNNEL_KEY 17
+
+#define TCA_TUNNEL_KEY_ACT_SET 1
+#define TCA_TUNNEL_KEY_ACT_RELEASE 2
+
+struct tc_tunnel_key {
+ tc_gen;
+ int t_action;
+};
+
+enum {
+ TCA_TUNNEL_KEY_UNSPEC,
+ TCA_TUNNEL_KEY_TM,
+ TCA_TUNNEL_KEY_PARMS,
+ TCA_TUNNEL_KEY_ENC_IPV4_SRC, /* be32 */
+ TCA_TUNNEL_KEY_ENC_IPV4_DST, /* be32 */
+ TCA_TUNNEL_KEY_ENC_IPV6_SRC, /* struct in6_addr */
+ TCA_TUNNEL_KEY_ENC_IPV6_DST, /* struct in6_addr */
+ TCA_TUNNEL_KEY_ENC_KEY_ID, /* be64 */
+ TCA_TUNNEL_KEY_PAD,
+ __TCA_TUNNEL_KEY_MAX,
+};
+
+#define TCA_TUNNEL_KEY_MAX (__TCA_TUNNEL_KEY_MAX - 1)
+
+#endif
diff --git a/include/uapi/linux/tc_act/tc_vlan.h b/include/uapi/linux/tc_act/tc_vlan.h
index 31151ff6264f..bddb272b843f 100644
--- a/include/uapi/linux/tc_act/tc_vlan.h
+++ b/include/uapi/linux/tc_act/tc_vlan.h
@@ -16,6 +16,7 @@
#define TCA_VLAN_ACT_POP 1
#define TCA_VLAN_ACT_PUSH 2
+#define TCA_VLAN_ACT_MODIFY 3
struct tc_vlan {
tc_gen;
@@ -29,6 +30,7 @@ enum {
TCA_VLAN_PUSH_VLAN_ID,
TCA_VLAN_PUSH_VLAN_PROTOCOL,
TCA_VLAN_PAD,
+ TCA_VLAN_PUSH_VLAN_PRIORITY,
__TCA_VLAN_MAX,
};
#define TCA_VLAN_MAX (__TCA_VLAN_MAX - 1)
diff --git a/include/uapi/linux/tcp.h b/include/uapi/linux/tcp.h
index 482898fc433a..73ac0db487f8 100644
--- a/include/uapi/linux/tcp.h
+++ b/include/uapi/linux/tcp.h
@@ -167,6 +167,7 @@ struct tcp_info {
__u8 tcpi_backoff;
__u8 tcpi_options;
__u8 tcpi_snd_wscale : 4, tcpi_rcv_wscale : 4;
+ __u8 tcpi_delivery_rate_app_limited:1;
__u32 tcpi_rto;
__u32 tcpi_ato;
@@ -211,6 +212,8 @@ struct tcp_info {
__u32 tcpi_min_rtt;
__u32 tcpi_data_segs_in; /* RFC4898 tcpEStatsDataSegsIn */
__u32 tcpi_data_segs_out; /* RFC4898 tcpEStatsDataSegsOut */
+
+ __u64 tcpi_delivery_rate;
};
/* for TCP_MD5SIG socket option */
diff --git a/include/uapi/linux/tipc_netlink.h b/include/uapi/linux/tipc_netlink.h
index 5f3f6d09fb79..f9edd20fe9ba 100644
--- a/include/uapi/linux/tipc_netlink.h
+++ b/include/uapi/linux/tipc_netlink.h
@@ -59,6 +59,9 @@ enum {
TIPC_NL_MON_SET,
TIPC_NL_MON_GET,
TIPC_NL_MON_PEER_GET,
+ TIPC_NL_PEER_REMOVE,
+ TIPC_NL_BEARER_ADD,
+ TIPC_NL_UDP_GET_REMOTEIP,
__TIPC_NL_CMD_MAX,
TIPC_NL_CMD_MAX = __TIPC_NL_CMD_MAX - 1
@@ -98,6 +101,7 @@ enum {
TIPC_NLA_UDP_UNSPEC,
TIPC_NLA_UDP_LOCAL, /* sockaddr_storage */
TIPC_NLA_UDP_REMOTE, /* sockaddr_storage */
+ TIPC_NLA_UDP_MULTI_REMOTEIP, /* flag */
__TIPC_NLA_UDP_MAX,
TIPC_NLA_UDP_MAX = __TIPC_NLA_UDP_MAX - 1
diff --git a/include/uapi/linux/usb/functionfs.h b/include/uapi/linux/usb/functionfs.h
index 108dd7997014..acc63697a0cc 100644
--- a/include/uapi/linux/usb/functionfs.h
+++ b/include/uapi/linux/usb/functionfs.h
@@ -21,6 +21,8 @@ enum functionfs_flags {
FUNCTIONFS_HAS_MS_OS_DESC = 8,
FUNCTIONFS_VIRTUAL_ADDR = 16,
FUNCTIONFS_EVENTFD = 32,
+ FUNCTIONFS_ALL_CTRL_RECIP = 64,
+ FUNCTIONFS_CONFIG0_SETUP = 128,
};
/* Descriptor of an non-audio endpoint */
diff --git a/include/uapi/linux/v4l2-dv-timings.h b/include/uapi/linux/v4l2-dv-timings.h
index 086168e18ca8..f31957166337 100644
--- a/include/uapi/linux/v4l2-dv-timings.h
+++ b/include/uapi/linux/v4l2-dv-timings.h
@@ -934,4 +934,16 @@
V4L2_DV_FL_REDUCED_BLANKING) \
}
+/* SDI timings definitions */
+
+/* SMPTE-125M */
+#define V4L2_DV_BT_SDI_720X487I60 { \
+ .type = V4L2_DV_BT_656_1120, \
+ V4L2_INIT_BT_TIMINGS(720, 487, 1, \
+ V4L2_DV_HSYNC_POS_POL, \
+ 13500000, 16, 121, 0, 0, 19, 0, 0, 19, 0, \
+ V4L2_DV_BT_STD_SDI, \
+ V4L2_DV_FL_FIRST_FIELD_EXTRA_LINE) \
+}
+
#endif
diff --git a/include/uapi/linux/videodev2.h b/include/uapi/linux/videodev2.h
index 724f43e69d03..94f123f3e04e 100644
--- a/include/uapi/linux/videodev2.h
+++ b/include/uapi/linux/videodev2.h
@@ -292,13 +292,11 @@ enum v4l2_ycbcr_encoding {
* various colorspaces:
*
* V4L2_COLORSPACE_SMPTE170M, V4L2_COLORSPACE_470_SYSTEM_M,
- * V4L2_COLORSPACE_470_SYSTEM_BG, V4L2_COLORSPACE_ADOBERGB and
- * V4L2_COLORSPACE_JPEG: V4L2_YCBCR_ENC_601
+ * V4L2_COLORSPACE_470_SYSTEM_BG, V4L2_COLORSPACE_SRGB,
+ * V4L2_COLORSPACE_ADOBERGB and V4L2_COLORSPACE_JPEG: V4L2_YCBCR_ENC_601
*
* V4L2_COLORSPACE_REC709 and V4L2_COLORSPACE_DCI_P3: V4L2_YCBCR_ENC_709
*
- * V4L2_COLORSPACE_SRGB: V4L2_YCBCR_ENC_SYCC
- *
* V4L2_COLORSPACE_BT2020: V4L2_YCBCR_ENC_BT2020
*
* V4L2_COLORSPACE_SMPTE240M: V4L2_YCBCR_ENC_SMPTE240M
@@ -317,8 +315,14 @@ enum v4l2_ycbcr_encoding {
/* Rec. 709/EN 61966-2-4 Extended Gamut -- HDTV */
V4L2_YCBCR_ENC_XV709 = 4,
- /* sYCC (Y'CbCr encoding of sRGB) */
+#ifndef __KERNEL__
+ /*
+ * sYCC (Y'CbCr encoding of sRGB), identical to ENC_601. It was added
+ * originally due to a misunderstanding of the sYCC standard. It should
+ * not be used, instead use V4L2_YCBCR_ENC_601.
+ */
V4L2_YCBCR_ENC_SYCC = 5,
+#endif
/* BT.2020 Non-constant Luminance Y'CbCr */
V4L2_YCBCR_ENC_BT2020 = 6,
@@ -345,8 +349,8 @@ enum v4l2_quantization {
/*
* The default for R'G'B' quantization is always full range, except
* for the BT2020 colorspace. For Y'CbCr the quantization is always
- * limited range, except for COLORSPACE_JPEG, SYCC, XV601 or XV709:
- * those are full range.
+ * limited range, except for COLORSPACE_JPEG, SRGB, ADOBERGB,
+ * XV601 or XV709: those are full range.
*/
V4L2_QUANTIZATION_DEFAULT = 0,
V4L2_QUANTIZATION_FULL_RANGE = 1,
@@ -361,7 +365,8 @@ enum v4l2_quantization {
#define V4L2_MAP_QUANTIZATION_DEFAULT(is_rgb, colsp, ycbcr_enc) \
(((is_rgb) && (colsp) == V4L2_COLORSPACE_BT2020) ? V4L2_QUANTIZATION_LIM_RANGE : \
(((is_rgb) || (ycbcr_enc) == V4L2_YCBCR_ENC_XV601 || \
- (ycbcr_enc) == V4L2_YCBCR_ENC_XV709 || (colsp) == V4L2_COLORSPACE_JPEG) ? \
+ (ycbcr_enc) == V4L2_YCBCR_ENC_XV709 || (colsp) == V4L2_COLORSPACE_JPEG) || \
+ (colsp) == V4L2_COLORSPACE_ADOBERGB || (colsp) == V4L2_COLORSPACE_SRGB ? \
V4L2_QUANTIZATION_FULL_RANGE : V4L2_QUANTIZATION_LIM_RANGE))
enum v4l2_priority {
@@ -440,6 +445,8 @@ struct v4l2_capability {
#define V4L2_CAP_ASYNCIO 0x02000000 /* async I/O */
#define V4L2_CAP_STREAMING 0x04000000 /* streaming I/O ioctls */
+#define V4L2_CAP_TOUCH 0x10000000 /* Is a touch device */
+
#define V4L2_CAP_DEVICE_CAPS 0x80000000 /* sets device capabilities field */
/*
@@ -635,6 +642,12 @@ struct v4l2_pix_format {
#define V4L2_SDR_FMT_CS14LE v4l2_fourcc('C', 'S', '1', '4') /* complex s14le */
#define V4L2_SDR_FMT_RU12LE v4l2_fourcc('R', 'U', '1', '2') /* real u12le */
+/* Touch formats - used for Touch devices */
+#define V4L2_TCH_FMT_DELTA_TD16 v4l2_fourcc('T', 'D', '1', '6') /* 16-bit signed deltas */
+#define V4L2_TCH_FMT_DELTA_TD08 v4l2_fourcc('T', 'D', '0', '8') /* 8-bit signed deltas */
+#define V4L2_TCH_FMT_TU16 v4l2_fourcc('T', 'U', '1', '6') /* 16-bit unsigned touch data */
+#define V4L2_TCH_FMT_TU08 v4l2_fourcc('T', 'U', '0', '8') /* 8-bit unsigned touch data */
+
/* priv field value to indicates that subsequent fields are valid. */
#define V4L2_PIX_FMT_PRIV_MAGIC 0xfeedcafe
@@ -1261,6 +1274,7 @@ struct v4l2_bt_timings {
#define V4L2_DV_BT_STD_DMT (1 << 1) /* VESA Discrete Monitor Timings */
#define V4L2_DV_BT_STD_CVT (1 << 2) /* VESA Coordinated Video Timings */
#define V4L2_DV_BT_STD_GTF (1 << 3) /* VESA Generalized Timings Formula */
+#define V4L2_DV_BT_STD_SDI (1 << 4) /* SDI Timings */
/* Flags */
@@ -1292,6 +1306,11 @@ struct v4l2_bt_timings {
* use the range 16-235) as opposed to 0-255. All formats defined in CEA-861
* except for the 640x480 format are CE formats. */
#define V4L2_DV_FL_IS_CE_VIDEO (1 << 4)
+/* Some formats like SMPTE-125M have an interlaced signal with a odd
+ * total height. For these formats, if this flag is set, the first
+ * field has the extra line. If not, it is the second field.
+ */
+#define V4L2_DV_FL_FIRST_FIELD_EXTRA_LINE (1 << 5)
/* A few useful defines to calculate the total blanking and frame sizes */
#define V4L2_DV_BT_BLANKING_WIDTH(bt) \
@@ -1401,6 +1420,7 @@ struct v4l2_input {
/* Values for the 'type' field */
#define V4L2_INPUT_TYPE_TUNER 1
#define V4L2_INPUT_TYPE_CAMERA 2
+#define V4L2_INPUT_TYPE_TOUCH 3
/* field 'status' - general */
#define V4L2_IN_ST_NO_POWER 0x00000001 /* Attached device is off */
@@ -1415,6 +1435,8 @@ struct v4l2_input {
/* field 'status' - analog */
#define V4L2_IN_ST_NO_H_LOCK 0x00000100 /* No horizontal sync lock */
#define V4L2_IN_ST_COLOR_KILL 0x00000200 /* Color killer is active */
+#define V4L2_IN_ST_NO_V_LOCK 0x00000400 /* No vertical sync lock */
+#define V4L2_IN_ST_NO_STD_LOCK 0x00000800 /* No standard format lock */
/* field 'status' - digital */
#define V4L2_IN_ST_NO_SYNC 0x00010000 /* No synchronization lock */
diff --git a/include/uapi/linux/xfrm.h b/include/uapi/linux/xfrm.h
index 143338978b48..1fc62b239f1b 100644
--- a/include/uapi/linux/xfrm.h
+++ b/include/uapi/linux/xfrm.h
@@ -298,7 +298,7 @@ enum xfrm_attr_type_t {
XFRMA_ALG_AUTH_TRUNC, /* struct xfrm_algo_auth */
XFRMA_MARK, /* struct xfrm_mark */
XFRMA_TFCPAD, /* __u32 */
- XFRMA_REPLAY_ESN_VAL, /* struct xfrm_replay_esn */
+ XFRMA_REPLAY_ESN_VAL, /* struct xfrm_replay_state_esn */
XFRMA_SA_EXTRA_FLAGS, /* __u32 */
XFRMA_PROTO, /* __u8 */
XFRMA_ADDRESS_FILTER, /* struct xfrm_address_filter */
diff --git a/include/uapi/rdma/Kbuild b/include/uapi/rdma/Kbuild
index 4edb0f2b4f9f..f14ab7ff5fee 100644
--- a/include/uapi/rdma/Kbuild
+++ b/include/uapi/rdma/Kbuild
@@ -7,3 +7,10 @@ header-y += rdma_netlink.h
header-y += rdma_user_cm.h
header-y += hfi/
header-y += rdma_user_rxe.h
+header-y += cxgb3-abi.h
+header-y += cxgb4-abi.h
+header-y += mlx4-abi.h
+header-y += mlx5-abi.h
+header-y += mthca-abi.h
+header-y += nes-abi.h
+header-y += ocrdma-abi.h
diff --git a/include/uapi/rdma/cxgb3-abi.h b/include/uapi/rdma/cxgb3-abi.h
new file mode 100644
index 000000000000..48a19bda071b
--- /dev/null
+++ b/include/uapi/rdma/cxgb3-abi.h
@@ -0,0 +1,76 @@
+/*
+ * Copyright (c) 2006 Chelsio, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef CXGB3_ABI_USER_H
+#define CXBG3_ABI_USER_H
+
+#include <linux/types.h>
+
+#define IWCH_UVERBS_ABI_VERSION 1
+
+/*
+ * Make sure that all structs defined in this file remain laid out so
+ * that they pack the same way on 32-bit and 64-bit architectures (to
+ * avoid incompatibility between 32-bit userspace and 64-bit kernels).
+ * In particular do not use pointer types -- pass pointers in __u64
+ * instead.
+ */
+struct iwch_create_cq_req {
+ __u64 user_rptr_addr;
+};
+
+struct iwch_create_cq_resp_v0 {
+ __u64 key;
+ __u32 cqid;
+ __u32 size_log2;
+};
+
+struct iwch_create_cq_resp {
+ __u64 key;
+ __u32 cqid;
+ __u32 size_log2;
+ __u32 memsize;
+ __u32 reserved;
+};
+
+struct iwch_create_qp_resp {
+ __u64 key;
+ __u64 db_key;
+ __u32 qpid;
+ __u32 size_log2;
+ __u32 sq_size_log2;
+ __u32 rq_size_log2;
+};
+
+struct iwch_reg_user_mr_resp {
+ __u32 pbl_addr;
+};
+#endif /* CXGB3_ABI_USER_H */
diff --git a/include/uapi/rdma/cxgb4-abi.h b/include/uapi/rdma/cxgb4-abi.h
new file mode 100644
index 000000000000..472b15990894
--- /dev/null
+++ b/include/uapi/rdma/cxgb4-abi.h
@@ -0,0 +1,81 @@
+/*
+ * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef CXGB4_ABI_USER_H
+#define CXGB4_ABI_USER_H
+
+#include <linux/types.h>
+
+#define C4IW_UVERBS_ABI_VERSION 3
+
+/*
+ * Make sure that all structs defined in this file remain laid out so
+ * that they pack the same way on 32-bit and 64-bit architectures (to
+ * avoid incompatibility between 32-bit userspace and 64-bit kernels).
+ * In particular do not use pointer types -- pass pointers in __u64
+ * instead.
+ */
+struct c4iw_create_cq_resp {
+ __u64 key;
+ __u64 gts_key;
+ __u64 memsize;
+ __u32 cqid;
+ __u32 size;
+ __u32 qid_mask;
+ __u32 reserved; /* explicit padding (optional for i386) */
+};
+
+enum {
+ C4IW_QPF_ONCHIP = (1 << 0)
+};
+
+struct c4iw_create_qp_resp {
+ __u64 ma_sync_key;
+ __u64 sq_key;
+ __u64 rq_key;
+ __u64 sq_db_gts_key;
+ __u64 rq_db_gts_key;
+ __u64 sq_memsize;
+ __u64 rq_memsize;
+ __u32 sqid;
+ __u32 rqid;
+ __u32 sq_size;
+ __u32 rq_size;
+ __u32 qid_mask;
+ __u32 flags;
+};
+
+struct c4iw_alloc_ucontext_resp {
+ __u64 status_page_key;
+ __u32 status_page_size;
+ __u32 reserved; /* explicit padding (optional for i386) */
+};
+#endif /* CXGB4_ABI_USER_H */
diff --git a/include/uapi/rdma/ib_user_verbs.h b/include/uapi/rdma/ib_user_verbs.h
index 7f035f4b53b0..25225ebbc7d5 100644
--- a/include/uapi/rdma/ib_user_verbs.h
+++ b/include/uapi/rdma/ib_user_verbs.h
@@ -224,6 +224,17 @@ struct ib_uverbs_odp_caps {
__u32 reserved;
};
+struct ib_uverbs_rss_caps {
+ /* Corresponding bit will be set if qp type from
+ * 'enum ib_qp_type' is supported, e.g.
+ * supported_qpts |= 1 << IB_QPT_UD
+ */
+ __u32 supported_qpts;
+ __u32 max_rwq_indirection_tables;
+ __u32 max_rwq_indirection_table_size;
+ __u32 reserved;
+};
+
struct ib_uverbs_ex_query_device_resp {
struct ib_uverbs_query_device_resp base;
__u32 comp_mask;
@@ -232,6 +243,9 @@ struct ib_uverbs_ex_query_device_resp {
__u64 timestamp_mask;
__u64 hca_core_clock; /* in KHZ */
__u64 device_cap_flags_ex;
+ struct ib_uverbs_rss_caps rss_caps;
+ __u32 max_wq_type_rq;
+ __u32 reserved;
};
struct ib_uverbs_query_port {
@@ -834,6 +848,10 @@ struct ib_uverbs_flow_spec_eth {
struct ib_uverbs_flow_ipv4_filter {
__be32 src_ip;
__be32 dst_ip;
+ __u8 proto;
+ __u8 tos;
+ __u8 ttl;
+ __u8 flags;
};
struct ib_uverbs_flow_spec_ipv4 {
@@ -868,8 +886,13 @@ struct ib_uverbs_flow_spec_tcp_udp {
};
struct ib_uverbs_flow_ipv6_filter {
- __u8 src_ip[16];
- __u8 dst_ip[16];
+ __u8 src_ip[16];
+ __u8 dst_ip[16];
+ __be32 flow_label;
+ __u8 next_hdr;
+ __u8 traffic_class;
+ __u8 hop_limit;
+ __u8 reserved;
};
struct ib_uverbs_flow_spec_ipv6 {
diff --git a/include/uapi/rdma/mlx4-abi.h b/include/uapi/rdma/mlx4-abi.h
new file mode 100644
index 000000000000..af431752655c
--- /dev/null
+++ b/include/uapi/rdma/mlx4-abi.h
@@ -0,0 +1,107 @@
+/*
+ * Copyright (c) 2007 Cisco Systems, Inc. All rights reserved.
+ * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef MLX4_ABI_USER_H
+#define MLX4_ABI_USER_H
+
+#include <linux/types.h>
+
+/*
+ * Increment this value if any changes that break userspace ABI
+ * compatibility are made.
+ */
+
+#define MLX4_IB_UVERBS_NO_DEV_CAPS_ABI_VERSION 3
+#define MLX4_IB_UVERBS_ABI_VERSION 4
+
+/*
+ * Make sure that all structs defined in this file remain laid out so
+ * that they pack the same way on 32-bit and 64-bit architectures (to
+ * avoid incompatibility between 32-bit userspace and 64-bit kernels).
+ * In particular do not use pointer types -- pass pointers in __u64
+ * instead.
+ */
+
+struct mlx4_ib_alloc_ucontext_resp_v3 {
+ __u32 qp_tab_size;
+ __u16 bf_reg_size;
+ __u16 bf_regs_per_page;
+};
+
+struct mlx4_ib_alloc_ucontext_resp {
+ __u32 dev_caps;
+ __u32 qp_tab_size;
+ __u16 bf_reg_size;
+ __u16 bf_regs_per_page;
+ __u32 cqe_size;
+};
+
+struct mlx4_ib_alloc_pd_resp {
+ __u32 pdn;
+ __u32 reserved;
+};
+
+struct mlx4_ib_create_cq {
+ __u64 buf_addr;
+ __u64 db_addr;
+};
+
+struct mlx4_ib_create_cq_resp {
+ __u32 cqn;
+ __u32 reserved;
+};
+
+struct mlx4_ib_resize_cq {
+ __u64 buf_addr;
+};
+
+struct mlx4_ib_create_srq {
+ __u64 buf_addr;
+ __u64 db_addr;
+};
+
+struct mlx4_ib_create_srq_resp {
+ __u32 srqn;
+ __u32 reserved;
+};
+
+struct mlx4_ib_create_qp {
+ __u64 buf_addr;
+ __u64 db_addr;
+ __u8 log_sq_bb_count;
+ __u8 log_sq_stride;
+ __u8 sq_no_prefetch;
+ __u8 reserved[5];
+};
+
+#endif /* MLX4_ABI_USER_H */
diff --git a/include/uapi/rdma/mlx5-abi.h b/include/uapi/rdma/mlx5-abi.h
new file mode 100644
index 000000000000..f5d0f4e83b59
--- /dev/null
+++ b/include/uapi/rdma/mlx5-abi.h
@@ -0,0 +1,249 @@
+/*
+ * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef MLX5_ABI_USER_H
+#define MLX5_ABI_USER_H
+
+#include <linux/types.h>
+
+enum {
+ MLX5_QP_FLAG_SIGNATURE = 1 << 0,
+ MLX5_QP_FLAG_SCATTER_CQE = 1 << 1,
+};
+
+enum {
+ MLX5_SRQ_FLAG_SIGNATURE = 1 << 0,
+};
+
+enum {
+ MLX5_WQ_FLAG_SIGNATURE = 1 << 0,
+};
+
+/* Increment this value if any changes that break userspace ABI
+ * compatibility are made.
+ */
+#define MLX5_IB_UVERBS_ABI_VERSION 1
+
+/* Make sure that all structs defined in this file remain laid out so
+ * that they pack the same way on 32-bit and 64-bit architectures (to
+ * avoid incompatibility between 32-bit userspace and 64-bit kernels).
+ * In particular do not use pointer types -- pass pointers in __u64
+ * instead.
+ */
+
+struct mlx5_ib_alloc_ucontext_req {
+ __u32 total_num_uuars;
+ __u32 num_low_latency_uuars;
+};
+
+struct mlx5_ib_alloc_ucontext_req_v2 {
+ __u32 total_num_uuars;
+ __u32 num_low_latency_uuars;
+ __u32 flags;
+ __u32 comp_mask;
+ __u8 max_cqe_version;
+ __u8 reserved0;
+ __u16 reserved1;
+ __u32 reserved2;
+};
+
+enum mlx5_ib_alloc_ucontext_resp_mask {
+ MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_CORE_CLOCK_OFFSET = 1UL << 0,
+};
+
+enum mlx5_user_cmds_supp_uhw {
+ MLX5_USER_CMDS_SUPP_UHW_QUERY_DEVICE = 1 << 0,
+};
+
+struct mlx5_ib_alloc_ucontext_resp {
+ __u32 qp_tab_size;
+ __u32 bf_reg_size;
+ __u32 tot_uuars;
+ __u32 cache_line_size;
+ __u16 max_sq_desc_sz;
+ __u16 max_rq_desc_sz;
+ __u32 max_send_wqebb;
+ __u32 max_recv_wr;
+ __u32 max_srq_recv_wr;
+ __u16 num_ports;
+ __u16 reserved1;
+ __u32 comp_mask;
+ __u32 response_length;
+ __u8 cqe_version;
+ __u8 cmds_supp_uhw;
+ __u16 reserved2;
+ __u64 hca_core_clock_offset;
+};
+
+struct mlx5_ib_alloc_pd_resp {
+ __u32 pdn;
+};
+
+struct mlx5_ib_tso_caps {
+ __u32 max_tso; /* Maximum tso payload size in bytes */
+
+ /* Corresponding bit will be set if qp type from
+ * 'enum ib_qp_type' is supported, e.g.
+ * supported_qpts |= 1 << IB_QPT_UD
+ */
+ __u32 supported_qpts;
+};
+
+struct mlx5_ib_rss_caps {
+ __u64 rx_hash_fields_mask; /* enum mlx5_rx_hash_fields */
+ __u8 rx_hash_function; /* enum mlx5_rx_hash_function_flags */
+ __u8 reserved[7];
+};
+
+struct mlx5_ib_query_device_resp {
+ __u32 comp_mask;
+ __u32 response_length;
+ struct mlx5_ib_tso_caps tso_caps;
+ struct mlx5_ib_rss_caps rss_caps;
+};
+
+struct mlx5_ib_create_cq {
+ __u64 buf_addr;
+ __u64 db_addr;
+ __u32 cqe_size;
+ __u32 reserved; /* explicit padding (optional on i386) */
+};
+
+struct mlx5_ib_create_cq_resp {
+ __u32 cqn;
+ __u32 reserved;
+};
+
+struct mlx5_ib_resize_cq {
+ __u64 buf_addr;
+ __u16 cqe_size;
+ __u16 reserved0;
+ __u32 reserved1;
+};
+
+struct mlx5_ib_create_srq {
+ __u64 buf_addr;
+ __u64 db_addr;
+ __u32 flags;
+ __u32 reserved0; /* explicit padding (optional on i386) */
+ __u32 uidx;
+ __u32 reserved1;
+};
+
+struct mlx5_ib_create_srq_resp {
+ __u32 srqn;
+ __u32 reserved;
+};
+
+struct mlx5_ib_create_qp {
+ __u64 buf_addr;
+ __u64 db_addr;
+ __u32 sq_wqe_count;
+ __u32 rq_wqe_count;
+ __u32 rq_wqe_shift;
+ __u32 flags;
+ __u32 uidx;
+ __u32 reserved0;
+ __u64 sq_buf_addr;
+};
+
+/* RX Hash function flags */
+enum mlx5_rx_hash_function_flags {
+ MLX5_RX_HASH_FUNC_TOEPLITZ = 1 << 0,
+};
+
+/*
+ * RX Hash flags, these flags allows to set which incoming packet's field should
+ * participates in RX Hash. Each flag represent certain packet's field,
+ * when the flag is set the field that is represented by the flag will
+ * participate in RX Hash calculation.
+ * Note: *IPV4 and *IPV6 flags can't be enabled together on the same QP
+ * and *TCP and *UDP flags can't be enabled together on the same QP.
+*/
+enum mlx5_rx_hash_fields {
+ MLX5_RX_HASH_SRC_IPV4 = 1 << 0,
+ MLX5_RX_HASH_DST_IPV4 = 1 << 1,
+ MLX5_RX_HASH_SRC_IPV6 = 1 << 2,
+ MLX5_RX_HASH_DST_IPV6 = 1 << 3,
+ MLX5_RX_HASH_SRC_PORT_TCP = 1 << 4,
+ MLX5_RX_HASH_DST_PORT_TCP = 1 << 5,
+ MLX5_RX_HASH_SRC_PORT_UDP = 1 << 6,
+ MLX5_RX_HASH_DST_PORT_UDP = 1 << 7
+};
+
+struct mlx5_ib_create_qp_rss {
+ __u64 rx_hash_fields_mask; /* enum mlx5_rx_hash_fields */
+ __u8 rx_hash_function; /* enum mlx5_rx_hash_function_flags */
+ __u8 rx_key_len; /* valid only for Toeplitz */
+ __u8 reserved[6];
+ __u8 rx_hash_key[128]; /* valid only for Toeplitz */
+ __u32 comp_mask;
+ __u32 reserved1;
+};
+
+struct mlx5_ib_create_qp_resp {
+ __u32 uuar_index;
+};
+
+struct mlx5_ib_alloc_mw {
+ __u32 comp_mask;
+ __u8 num_klms;
+ __u8 reserved1;
+ __u16 reserved2;
+};
+
+struct mlx5_ib_create_wq {
+ __u64 buf_addr;
+ __u64 db_addr;
+ __u32 rq_wqe_count;
+ __u32 rq_wqe_shift;
+ __u32 user_index;
+ __u32 flags;
+ __u32 comp_mask;
+ __u32 reserved;
+};
+
+struct mlx5_ib_create_wq_resp {
+ __u32 response_length;
+ __u32 reserved;
+};
+
+struct mlx5_ib_create_rwq_ind_tbl_resp {
+ __u32 response_length;
+ __u32 reserved;
+};
+
+struct mlx5_ib_modify_wq {
+ __u32 comp_mask;
+ __u32 reserved;
+};
+#endif /* MLX5_ABI_USER_H */
diff --git a/include/uapi/rdma/mthca-abi.h b/include/uapi/rdma/mthca-abi.h
new file mode 100644
index 000000000000..bcbf4ff2f6d1
--- /dev/null
+++ b/include/uapi/rdma/mthca-abi.h
@@ -0,0 +1,111 @@
+/*
+ * Copyright (c) 2005 Topspin Communications. All rights reserved.
+ * Copyright (c) 2005, 2006 Cisco Systems. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef MTHCA_ABI_USER_H
+#define MTHCA_ABI_USER_H
+
+#include <linux/types.h>
+
+/*
+ * Increment this value if any changes that break userspace ABI
+ * compatibility are made.
+ */
+#define MTHCA_UVERBS_ABI_VERSION 1
+
+/*
+ * Make sure that all structs defined in this file remain laid out so
+ * that they pack the same way on 32-bit and 64-bit architectures (to
+ * avoid incompatibility between 32-bit userspace and 64-bit kernels).
+ * In particular do not use pointer types -- pass pointers in __u64
+ * instead.
+ */
+struct mthca_alloc_ucontext_resp {
+ __u32 qp_tab_size;
+ __u32 uarc_size;
+};
+
+struct mthca_alloc_pd_resp {
+ __u32 pdn;
+ __u32 reserved;
+};
+
+/*
+ * Mark the memory region with a DMA attribute that causes
+ * in-flight DMA to be flushed when the region is written to:
+ */
+#define MTHCA_MR_DMASYNC 0x1
+
+struct mthca_reg_mr {
+ __u32 mr_attrs;
+ __u32 reserved;
+};
+
+struct mthca_create_cq {
+ __u32 lkey;
+ __u32 pdn;
+ __u64 arm_db_page;
+ __u64 set_db_page;
+ __u32 arm_db_index;
+ __u32 set_db_index;
+};
+
+struct mthca_create_cq_resp {
+ __u32 cqn;
+ __u32 reserved;
+};
+
+struct mthca_resize_cq {
+ __u32 lkey;
+ __u32 reserved;
+};
+
+struct mthca_create_srq {
+ __u32 lkey;
+ __u32 db_index;
+ __u64 db_page;
+};
+
+struct mthca_create_srq_resp {
+ __u32 srqn;
+ __u32 reserved;
+};
+
+struct mthca_create_qp {
+ __u32 lkey;
+ __u32 reserved;
+ __u64 sq_db_page;
+ __u64 rq_db_page;
+ __u32 sq_db_index;
+ __u32 rq_db_index;
+};
+#endif /* MTHCA_ABI_USER_H */
diff --git a/include/uapi/rdma/nes-abi.h b/include/uapi/rdma/nes-abi.h
new file mode 100644
index 000000000000..6eb3734394a2
--- /dev/null
+++ b/include/uapi/rdma/nes-abi.h
@@ -0,0 +1,114 @@
+/*
+ * Copyright (c) 2006 - 2011 Intel Corporation. All rights reserved.
+ * Copyright (c) 2005 Topspin Communications. All rights reserved.
+ * Copyright (c) 2005 Cisco Systems. All rights reserved.
+ * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#ifndef NES_ABI_USER_H
+#define NES_ABI_USER_H
+
+#include <linux/types.h>
+
+#define NES_ABI_USERSPACE_VER 2
+#define NES_ABI_KERNEL_VER 2
+
+/*
+ * Make sure that all structs defined in this file remain laid out so
+ * that they pack the same way on 32-bit and 64-bit architectures (to
+ * avoid incompatibility between 32-bit userspace and 64-bit kernels).
+ * In particular do not use pointer types -- pass pointers in __u64
+ * instead.
+ */
+
+struct nes_alloc_ucontext_req {
+ __u32 reserved32;
+ __u8 userspace_ver;
+ __u8 reserved8[3];
+};
+
+struct nes_alloc_ucontext_resp {
+ __u32 max_pds; /* maximum pds allowed for this user process */
+ __u32 max_qps; /* maximum qps allowed for this user process */
+ __u32 wq_size; /* size of the WQs (sq+rq) allocated to the mmaped area */
+ __u8 virtwq; /* flag to indicate if virtual WQ are to be used or not */
+ __u8 kernel_ver;
+ __u8 reserved[2];
+};
+
+struct nes_alloc_pd_resp {
+ __u32 pd_id;
+ __u32 mmap_db_index;
+};
+
+struct nes_create_cq_req {
+ __u64 user_cq_buffer;
+ __u32 mcrqf;
+ __u8 reserved[4];
+};
+
+struct nes_create_qp_req {
+ __u64 user_wqe_buffers;
+ __u64 user_qp_buffer;
+};
+
+enum iwnes_memreg_type {
+ IWNES_MEMREG_TYPE_MEM = 0x0000,
+ IWNES_MEMREG_TYPE_QP = 0x0001,
+ IWNES_MEMREG_TYPE_CQ = 0x0002,
+ IWNES_MEMREG_TYPE_MW = 0x0003,
+ IWNES_MEMREG_TYPE_FMR = 0x0004,
+ IWNES_MEMREG_TYPE_FMEM = 0x0005,
+};
+
+struct nes_mem_reg_req {
+ __u32 reg_type; /* indicates if id is memory, QP or CQ */
+ __u32 reserved;
+};
+
+struct nes_create_cq_resp {
+ __u32 cq_id;
+ __u32 cq_size;
+ __u32 mmap_db_index;
+ __u32 reserved;
+};
+
+struct nes_create_qp_resp {
+ __u32 qp_id;
+ __u32 actual_sq_size;
+ __u32 actual_rq_size;
+ __u32 mmap_sq_db_index;
+ __u32 mmap_rq_db_index;
+ __u32 nes_drv_opt;
+};
+
+#endif /* NES_ABI_USER_H */
diff --git a/include/uapi/rdma/ocrdma-abi.h b/include/uapi/rdma/ocrdma-abi.h
new file mode 100644
index 000000000000..9f28191bef4d
--- /dev/null
+++ b/include/uapi/rdma/ocrdma-abi.h
@@ -0,0 +1,151 @@
+/* This file is part of the Emulex RoCE Device Driver for
+ * RoCE (RDMA over Converged Ethernet) adapters.
+ * Copyright (C) 2012-2015 Emulex. All rights reserved.
+ * EMULEX and SLI are trademarks of Emulex.
+ * www.emulex.com
+ *
+ * This software is available to you under a choice of one of two licenses.
+ * You may choose to be licensed under the terms of the GNU General Public
+ * License (GPL) Version 2, available from the file COPYING in the main
+ * directory of this source tree, or the BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Contact Information:
+ * linux-drivers@emulex.com
+ *
+ * Emulex
+ * 3333 Susan Street
+ * Costa Mesa, CA 92626
+ */
+
+#ifndef OCRDMA_ABI_USER_H
+#define OCRDMA_ABI_USER_H
+
+#include <linux/types.h>
+
+#define OCRDMA_ABI_VERSION 2
+#define OCRDMA_BE_ROCE_ABI_VERSION 1
+/* user kernel communication data structures. */
+
+struct ocrdma_alloc_ucontext_resp {
+ __u32 dev_id;
+ __u32 wqe_size;
+ __u32 max_inline_data;
+ __u32 dpp_wqe_size;
+ __u64 ah_tbl_page;
+ __u32 ah_tbl_len;
+ __u32 rqe_size;
+ __u8 fw_ver[32];
+ /* for future use/new features in progress */
+ __u64 rsvd1;
+ __u64 rsvd2;
+};
+
+struct ocrdma_alloc_pd_ureq {
+ __u64 rsvd1;
+};
+
+struct ocrdma_alloc_pd_uresp {
+ __u32 id;
+ __u32 dpp_enabled;
+ __u32 dpp_page_addr_hi;
+ __u32 dpp_page_addr_lo;
+ __u64 rsvd1;
+};
+
+struct ocrdma_create_cq_ureq {
+ __u32 dpp_cq;
+ __u32 rsvd; /* pad */
+};
+
+#define MAX_CQ_PAGES 8
+struct ocrdma_create_cq_uresp {
+ __u32 cq_id;
+ __u32 page_size;
+ __u32 num_pages;
+ __u32 max_hw_cqe;
+ __u64 page_addr[MAX_CQ_PAGES];
+ __u64 db_page_addr;
+ __u32 db_page_size;
+ __u32 phase_change;
+ /* for future use/new features in progress */
+ __u64 rsvd1;
+ __u64 rsvd2;
+};
+
+#define MAX_QP_PAGES 8
+#define MAX_UD_AV_PAGES 8
+
+struct ocrdma_create_qp_ureq {
+ __u8 enable_dpp_cq;
+ __u8 rsvd;
+ __u16 dpp_cq_id;
+ __u32 rsvd1; /* pad */
+};
+
+struct ocrdma_create_qp_uresp {
+ __u16 qp_id;
+ __u16 sq_dbid;
+ __u16 rq_dbid;
+ __u16 resv0; /* pad */
+ __u32 sq_page_size;
+ __u32 rq_page_size;
+ __u32 num_sq_pages;
+ __u32 num_rq_pages;
+ __u64 sq_page_addr[MAX_QP_PAGES];
+ __u64 rq_page_addr[MAX_QP_PAGES];
+ __u64 db_page_addr;
+ __u32 db_page_size;
+ __u32 dpp_credit;
+ __u32 dpp_offset;
+ __u32 num_wqe_allocated;
+ __u32 num_rqe_allocated;
+ __u32 db_sq_offset;
+ __u32 db_rq_offset;
+ __u32 db_shift;
+ __u64 rsvd[11];
+} __packed;
+
+struct ocrdma_create_srq_uresp {
+ __u16 rq_dbid;
+ __u16 resv0; /* pad */
+ __u32 resv1;
+
+ __u32 rq_page_size;
+ __u32 num_rq_pages;
+
+ __u64 rq_page_addr[MAX_QP_PAGES];
+ __u64 db_page_addr;
+
+ __u32 db_page_size;
+ __u32 num_rqe_allocated;
+ __u32 db_rq_offset;
+ __u32 db_shift;
+
+ __u64 rsvd2;
+ __u64 rsvd3;
+};
+
+#endif /* OCRDMA_ABI_USER_H */
diff --git a/include/uapi/rdma/qedr-abi.h b/include/uapi/rdma/qedr-abi.h
new file mode 100644
index 000000000000..75c270d839c8
--- /dev/null
+++ b/include/uapi/rdma/qedr-abi.h
@@ -0,0 +1,106 @@
+/* QLogic qedr NIC Driver
+ * Copyright (c) 2015-2016 QLogic Corporation
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and /or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __QEDR_USER_H__
+#define __QEDR_USER_H__
+
+#include <linux/types.h>
+
+#define QEDR_ABI_VERSION (8)
+
+/* user kernel communication data structures. */
+
+struct qedr_alloc_ucontext_resp {
+ __u64 db_pa;
+ __u32 db_size;
+
+ __u32 max_send_wr;
+ __u32 max_recv_wr;
+ __u32 max_srq_wr;
+ __u32 sges_per_send_wr;
+ __u32 sges_per_recv_wr;
+ __u32 sges_per_srq_wr;
+ __u32 max_cqes;
+};
+
+struct qedr_alloc_pd_ureq {
+ __u64 rsvd1;
+};
+
+struct qedr_alloc_pd_uresp {
+ __u32 pd_id;
+};
+
+struct qedr_create_cq_ureq {
+ __u64 addr;
+ __u64 len;
+};
+
+struct qedr_create_cq_uresp {
+ __u32 db_offset;
+ __u16 icid;
+};
+
+struct qedr_create_qp_ureq {
+ __u32 qp_handle_hi;
+ __u32 qp_handle_lo;
+
+ /* SQ */
+ /* user space virtual address of SQ buffer */
+ __u64 sq_addr;
+
+ /* length of SQ buffer */
+ __u64 sq_len;
+
+ /* RQ */
+ /* user space virtual address of RQ buffer */
+ __u64 rq_addr;
+
+ /* length of RQ buffer */
+ __u64 rq_len;
+};
+
+struct qedr_create_qp_uresp {
+ __u32 qp_id;
+ __u32 atomic_supported;
+
+ /* SQ */
+ __u32 sq_db_offset;
+ __u16 sq_icid;
+
+ /* RQ */
+ __u32 rq_db_offset;
+ __u16 rq_icid;
+
+ __u32 rq_db2_offset;
+};
+
+#endif /* __QEDR_USER_H__ */
diff --git a/include/uapi/scsi/cxlflash_ioctl.h b/include/uapi/scsi/cxlflash_ioctl.h
index 2302f3ce5f86..6bf1f8a022b1 100644
--- a/include/uapi/scsi/cxlflash_ioctl.h
+++ b/include/uapi/scsi/cxlflash_ioctl.h
@@ -39,19 +39,28 @@ struct dk_cxlflash_hdr {
* at this time, this provides future flexibility.
*/
#define DK_CXLFLASH_ALL_PORTS_ACTIVE 0x0000000000000001ULL
+#define DK_CXLFLASH_APP_CLOSE_ADAP_FD 0x0000000000000002ULL
/*
- * Notes:
- * -----
+ * General Notes:
+ * -------------
* The 'context_id' field of all ioctl structures contains the context
* identifier for a context in the lower 32-bits (upper 32-bits are not
* to be used when identifying a context to the AFU). That said, the value
* in its entirety (all 64-bits) is to be treated as an opaque cookie and
* should be presented as such when issuing ioctls.
+ */
+
+/*
+ * DK_CXLFLASH_ATTACH Notes:
+ * ------------------------
+ * Read/write access permissions are specified via the O_RDONLY, O_WRONLY,
+ * and O_RDWR flags defined in the fcntl.h header file.
*
- * For DK_CXLFLASH_ATTACH ioctl, user specifies read/write access
- * permissions via the O_RDONLY, O_WRONLY, and O_RDWR flags defined in
- * the fcntl.h header file.
+ * A valid adapter file descriptor (fd >= 0) is only returned on the initial
+ * attach (successful) of a context. When a context is shared(reused), the user
+ * is expected to already 'know' the adapter file descriptor associated with the
+ * context.
*/
#define DK_CXLFLASH_ATTACH_REUSE_CONTEXT 0x8000000000000000ULL
diff --git a/include/uapi/sound/Kbuild b/include/uapi/sound/Kbuild
index 691984cb0b91..9578d8bdbf31 100644
--- a/include/uapi/sound/Kbuild
+++ b/include/uapi/sound/Kbuild
@@ -13,3 +13,4 @@ header-y += sb16_csp.h
header-y += sfnt_info.h
header-y += tlv.h
header-y += usb_stream.h
+header-y += snd_sst_tokens.h
diff --git a/include/uapi/sound/asoc.h b/include/uapi/sound/asoc.h
index e4701a3c6331..33d00a4ce656 100644
--- a/include/uapi/sound/asoc.h
+++ b/include/uapi/sound/asoc.h
@@ -83,7 +83,7 @@
#define SND_SOC_TPLG_NUM_TEXTS 16
/* ABI version */
-#define SND_SOC_TPLG_ABI_VERSION 0x4
+#define SND_SOC_TPLG_ABI_VERSION 0x5
/* Max size of TLV data */
#define SND_SOC_TPLG_TLV_SIZE 32
@@ -105,7 +105,8 @@
#define SND_SOC_TPLG_TYPE_CODEC_LINK 9
#define SND_SOC_TPLG_TYPE_BACKEND_LINK 10
#define SND_SOC_TPLG_TYPE_PDATA 11
-#define SND_SOC_TPLG_TYPE_MAX SND_SOC_TPLG_TYPE_PDATA
+#define SND_SOC_TPLG_TYPE_BE_DAI 12
+#define SND_SOC_TPLG_TYPE_MAX SND_SOC_TPLG_TYPE_BE_DAI
/* vendor block IDs - please add new vendor types to end */
#define SND_SOC_TPLG_TYPE_VENDOR_FW 1000
@@ -124,6 +125,11 @@
#define SND_SOC_TPLG_TUPLE_TYPE_WORD 4
#define SND_SOC_TPLG_TUPLE_TYPE_SHORT 5
+/* BE DAI flags */
+#define SND_SOC_TPLG_DAI_FLGBIT_SYMMETRIC_RATES (1 << 0)
+#define SND_SOC_TPLG_DAI_FLGBIT_SYMMETRIC_CHANNELS (1 << 1)
+#define SND_SOC_TPLG_DAI_FLGBIT_SYMMETRIC_SAMPLEBITS (1 << 2)
+
/*
* Block Header.
* This header precedes all object and object arrays below.
@@ -251,6 +257,7 @@ struct snd_soc_tplg_stream_caps {
__le32 period_size_max; /* max period size bytes */
__le32 buffer_size_min; /* min buffer size bytes */
__le32 buffer_size_max; /* max buffer size bytes */
+ __le32 sig_bits; /* number of bits of content */
} __attribute__((packed));
/*
@@ -285,6 +292,8 @@ struct snd_soc_tplg_manifest {
__le32 graph_elems; /* number of graph elements */
__le32 pcm_elems; /* number of PCM elements */
__le32 dai_link_elems; /* number of DAI link elements */
+ __le32 be_dai_elems; /* number of BE DAI elements */
+ __le32 reserved[20]; /* reserved for new ABI element types */
struct snd_soc_tplg_private priv;
} __attribute__((packed));
@@ -450,4 +459,26 @@ struct snd_soc_tplg_link_config {
struct snd_soc_tplg_stream stream[SND_SOC_TPLG_STREAM_CONFIG_MAX]; /* supported configs playback and captrure */
__le32 num_streams; /* number of streams */
} __attribute__((packed));
+
+/*
+ * Describes SW/FW specific features of BE DAI.
+ *
+ * File block representation for BE DAI :-
+ * +-----------------------------------+-----+
+ * | struct snd_soc_tplg_hdr | 1 |
+ * +-----------------------------------+-----+
+ * | struct snd_soc_tplg_be_dai | N |
+ * +-----------------------------------+-----+
+ */
+struct snd_soc_tplg_be_dai {
+ __le32 size; /* in bytes of this structure */
+ char dai_name[SNDRV_CTL_ELEM_ID_NAME_MAXLEN]; /* name - used to match */
+ __le32 dai_id; /* unique ID - used to match */
+ __le32 playback; /* supports playback mode */
+ __le32 capture; /* supports capture mode */
+ struct snd_soc_tplg_stream_caps caps[2]; /* playback and capture for DAI */
+ __le32 flag_mask; /* bitmask of flags to configure */
+ __le32 flags; /* SND_SOC_TPLG_DAI_FLGBIT_* */
+ struct snd_soc_tplg_private priv;
+} __attribute__((packed));
#endif
diff --git a/include/uapi/sound/asound.h b/include/uapi/sound/asound.h
index 609cadb8739d..be353a78c303 100644
--- a/include/uapi/sound/asound.h
+++ b/include/uapi/sound/asound.h
@@ -106,9 +106,10 @@ enum {
SNDRV_HWDEP_IFACE_FW_OXFW, /* Oxford OXFW970/971 based device */
SNDRV_HWDEP_IFACE_FW_DIGI00X, /* Digidesign Digi 002/003 family */
SNDRV_HWDEP_IFACE_FW_TASCAM, /* TASCAM FireWire series */
+ SNDRV_HWDEP_IFACE_LINE6, /* Line6 USB processors */
/* Don't forget to change the following: */
- SNDRV_HWDEP_IFACE_LAST = SNDRV_HWDEP_IFACE_FW_TASCAM
+ SNDRV_HWDEP_IFACE_LAST = SNDRV_HWDEP_IFACE_LINE6
};
struct snd_hwdep_info {
diff --git a/include/uapi/sound/snd_sst_tokens.h b/include/uapi/sound/snd_sst_tokens.h
new file mode 100644
index 000000000000..1ee2e943d66a
--- /dev/null
+++ b/include/uapi/sound/snd_sst_tokens.h
@@ -0,0 +1,214 @@
+/*
+ * snd_sst_tokens.h - Intel SST tokens definition
+ *
+ * Copyright (C) 2016 Intel Corp
+ * Author: Shreyas NC <shreyas.nc@intel.com>
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+#ifndef __SND_SST_TOKENS_H__
+#define __SND_SST_TOKENS_H__
+
+/**
+ * %SKL_TKN_UUID: Module UUID
+ *
+ * %SKL_TKN_U8_BLOCK_TYPE: Type of the private data block.Can be:
+ * tuples, bytes, short and words
+ *
+ * %SKL_TKN_U8_IN_PIN_TYPE: Input pin type,
+ * homogenous=0, heterogenous=1
+ *
+ * %SKL_TKN_U8_OUT_PIN_TYPE: Output pin type,
+ * homogenous=0, heterogenous=1
+ * %SKL_TKN_U8_DYN_IN_PIN: Configure Input pin dynamically
+ * if true
+ *
+ * %SKL_TKN_U8_DYN_OUT_PIN: Configure Output pin dynamically
+ * if true
+ *
+ * %SKL_TKN_U8_IN_QUEUE_COUNT: Store the number of Input pins
+ *
+ * %SKL_TKN_U8_OUT_QUEUE_COUNT: Store the number of Output pins
+ *
+ * %SKL_TKN_U8_TIME_SLOT: TDM slot number
+ *
+ * %SKL_TKN_U8_CORE_ID: Stores module affinity value.Can take
+ * the values:
+ * SKL_AFFINITY_CORE_0 = 0,
+ * SKL_AFFINITY_CORE_1,
+ * SKL_AFFINITY_CORE_MAX
+ *
+ * %SKL_TKN_U8_MOD_TYPE: Module type value.
+ *
+ * %SKL_TKN_U8_CONN_TYPE: Module connection type can be a FE,
+ * BE or NONE as defined :
+ * SKL_PIPE_CONN_TYPE_NONE = 0,
+ * SKL_PIPE_CONN_TYPE_FE = 1 (HOST_DMA)
+ * SKL_PIPE_CONN_TYPE_BE = 2 (LINK_DMA)
+ *
+ * %SKL_TKN_U8_DEV_TYPE: Type of device to which the module is
+ * connected
+ * Can take the values:
+ * SKL_DEVICE_BT = 0x0,
+ * SKL_DEVICE_DMIC = 0x1,
+ * SKL_DEVICE_I2S = 0x2,
+ * SKL_DEVICE_SLIMBUS = 0x3,
+ * SKL_DEVICE_HDALINK = 0x4,
+ * SKL_DEVICE_HDAHOST = 0x5,
+ * SKL_DEVICE_NONE
+ *
+ * %SKL_TKN_U8_HW_CONN_TYPE: Connection type of the HW to which the
+ * module is connected
+ * SKL_CONN_NONE = 0,
+ * SKL_CONN_SOURCE = 1,
+ * SKL_CONN_SINK = 2
+ *
+ * %SKL_TKN_U16_PIN_INST_ID: Stores the pin instance id
+ *
+ * %SKL_TKN_U16_MOD_INST_ID: Stores the mdule instance id
+ *
+ * %SKL_TKN_U32_MAX_MCPS: Module max mcps value
+ *
+ * %SKL_TKN_U32_MEM_PAGES: Module resource pages
+ *
+ * %SKL_TKN_U32_OBS: Stores Output Buffer size
+ *
+ * %SKL_TKN_U32_IBS: Stores input buffer size
+ *
+ * %SKL_TKN_U32_VBUS_ID: Module VBUS_ID. PDM=0, SSP0=0,
+ * SSP1=1,SSP2=2,
+ * SSP3=3, SSP4=4,
+ * SSP5=5, SSP6=6,INVALID
+ *
+ * %SKL_TKN_U32_PARAMS_FIXUP: Module Params fixup mask
+ * %SKL_TKN_U32_CONVERTER: Module params converter mask
+ * %SKL_TKN_U32_PIPE_ID: Stores the pipe id
+ *
+ * %SKL_TKN_U32_PIPE_CONN_TYPE: Type of the token to which the pipe is
+ * connected to. It can be
+ * SKL_PIPE_CONN_TYPE_NONE = 0,
+ * SKL_PIPE_CONN_TYPE_FE = 1 (HOST_DMA),
+ * SKL_PIPE_CONN_TYPE_BE = 2 (LINK_DMA),
+ *
+ * %SKL_TKN_U32_PIPE_PRIORITY: Pipe priority value
+ * %SKL_TKN_U32_PIPE_MEM_PGS: Pipe resource pages
+ *
+ * %SKL_TKN_U32_DIR_PIN_COUNT: Value for the direction to set input/output
+ * formats and the pin count.
+ * The first 4 bits have the direction
+ * value and the next 4 have
+ * the pin count value.
+ * SKL_DIR_IN = 0, SKL_DIR_OUT = 1.
+ * The input and output formats
+ * share the same set of tokens
+ * with the distinction between input
+ * and output made by reading direction
+ * token.
+ *
+ * %SKL_TKN_U32_FMT_CH: Supported channel count
+ *
+ * %SKL_TKN_U32_FMT_FREQ: Supported frequency/sample rate
+ *
+ * %SKL_TKN_U32_FMT_BIT_DEPTH: Supported container size
+ *
+ * %SKL_TKN_U32_FMT_SAMPLE_SIZE:Number of samples in the container
+ *
+ * %SKL_TKN_U32_FMT_CH_CONFIG: Supported channel configurations for the
+ * input/output.
+ *
+ * %SKL_TKN_U32_FMT_INTERLEAVE: Interleaving style which can be per
+ * channel or per sample. The values can be :
+ * SKL_INTERLEAVING_PER_CHANNEL = 0,
+ * SKL_INTERLEAVING_PER_SAMPLE = 1,
+ *
+ * %SKL_TKN_U32_FMT_SAMPLE_TYPE:
+ * Specifies the sample type. Can take the
+ * values: SKL_SAMPLE_TYPE_INT_MSB = 0,
+ * SKL_SAMPLE_TYPE_INT_LSB = 1,
+ * SKL_SAMPLE_TYPE_INT_SIGNED = 2,
+ * SKL_SAMPLE_TYPE_INT_UNSIGNED = 3,
+ * SKL_SAMPLE_TYPE_FLOAT = 4
+ *
+ * %SKL_TKN_U32_CH_MAP: Channel map values
+ * %SKL_TKN_U32_MOD_SET_PARAMS: It can take these values:
+ * SKL_PARAM_DEFAULT, SKL_PARAM_INIT,
+ * SKL_PARAM_SET, SKL_PARAM_BIND
+ *
+ * %SKL_TKN_U32_MOD_PARAM_ID: ID of the module params
+ *
+ * %SKL_TKN_U32_CAPS_SET_PARAMS:
+ * Set params value
+ *
+ * %SKL_TKN_U32_CAPS_PARAMS_ID: Params ID
+ *
+ * %SKL_TKN_U32_CAPS_SIZE: Caps size
+ *
+ * %SKL_TKN_U32_PROC_DOMAIN: Specify processing domain
+ *
+ * %SKL_TKN_U32_LIB_COUNT: Specifies the number of libraries
+ *
+ * %SKL_TKN_STR_LIB_NAME: Specifies the library name
+ *
+ * module_id and loadable flags dont have tokens as these values will be
+ * read from the DSP FW manifest
+ */
+enum SKL_TKNS {
+ SKL_TKN_UUID = 1,
+ SKL_TKN_U8_NUM_BLOCKS,
+ SKL_TKN_U8_BLOCK_TYPE,
+ SKL_TKN_U8_IN_PIN_TYPE,
+ SKL_TKN_U8_OUT_PIN_TYPE,
+ SKL_TKN_U8_DYN_IN_PIN,
+ SKL_TKN_U8_DYN_OUT_PIN,
+ SKL_TKN_U8_IN_QUEUE_COUNT,
+ SKL_TKN_U8_OUT_QUEUE_COUNT,
+ SKL_TKN_U8_TIME_SLOT,
+ SKL_TKN_U8_CORE_ID,
+ SKL_TKN_U8_MOD_TYPE,
+ SKL_TKN_U8_CONN_TYPE,
+ SKL_TKN_U8_DEV_TYPE,
+ SKL_TKN_U8_HW_CONN_TYPE,
+ SKL_TKN_U16_MOD_INST_ID,
+ SKL_TKN_U16_BLOCK_SIZE,
+ SKL_TKN_U32_MAX_MCPS,
+ SKL_TKN_U32_MEM_PAGES,
+ SKL_TKN_U32_OBS,
+ SKL_TKN_U32_IBS,
+ SKL_TKN_U32_VBUS_ID,
+ SKL_TKN_U32_PARAMS_FIXUP,
+ SKL_TKN_U32_CONVERTER,
+ SKL_TKN_U32_PIPE_ID,
+ SKL_TKN_U32_PIPE_CONN_TYPE,
+ SKL_TKN_U32_PIPE_PRIORITY,
+ SKL_TKN_U32_PIPE_MEM_PGS,
+ SKL_TKN_U32_DIR_PIN_COUNT,
+ SKL_TKN_U32_FMT_CH,
+ SKL_TKN_U32_FMT_FREQ,
+ SKL_TKN_U32_FMT_BIT_DEPTH,
+ SKL_TKN_U32_FMT_SAMPLE_SIZE,
+ SKL_TKN_U32_FMT_CH_CONFIG,
+ SKL_TKN_U32_FMT_INTERLEAVE,
+ SKL_TKN_U32_FMT_SAMPLE_TYPE,
+ SKL_TKN_U32_FMT_CH_MAP,
+ SKL_TKN_U32_PIN_MOD_ID,
+ SKL_TKN_U32_PIN_INST_ID,
+ SKL_TKN_U32_MOD_SET_PARAMS,
+ SKL_TKN_U32_MOD_PARAM_ID,
+ SKL_TKN_U32_CAPS_SET_PARAMS,
+ SKL_TKN_U32_CAPS_PARAMS_ID,
+ SKL_TKN_U32_CAPS_SIZE,
+ SKL_TKN_U32_PROC_DOMAIN,
+ SKL_TKN_U32_LIB_COUNT,
+ SKL_TKN_STR_LIB_NAME,
+ SKL_TKN_MAX = SKL_TKN_STR_LIB_NAME,
+};
+
+#endif
diff --git a/include/uapi/sound/tlv.h b/include/uapi/sound/tlv.h
index ffc4f203146c..b4df440c015b 100644
--- a/include/uapi/sound/tlv.h
+++ b/include/uapi/sound/tlv.h
@@ -28,4 +28,73 @@
#define SNDRV_CTL_TLVT_CHMAP_VAR 0x102 /* channels freely swappable */
#define SNDRV_CTL_TLVT_CHMAP_PAIRED 0x103 /* pair-wise swappable */
+/*
+ * TLV structure is right behind the struct snd_ctl_tlv:
+ * unsigned int type - see SNDRV_CTL_TLVT_*
+ * unsigned int length
+ * .... data aligned to sizeof(unsigned int), use
+ * block_length = (length + (sizeof(unsigned int) - 1)) &
+ * ~(sizeof(unsigned int) - 1)) ....
+ */
+#define SNDRV_CTL_TLVD_ITEM(type, ...) \
+ (type), SNDRV_CTL_TLVD_LENGTH(__VA_ARGS__), __VA_ARGS__
+#define SNDRV_CTL_TLVD_LENGTH(...) \
+ ((unsigned int)sizeof((const unsigned int[]) { __VA_ARGS__ }))
+
+#define SNDRV_CTL_TLVD_CONTAINER_ITEM(...) \
+ SNDRV_CTL_TLVD_ITEM(SNDRV_CTL_TLVT_CONTAINER, __VA_ARGS__)
+#define SNDRV_CTL_TLVD_DECLARE_CONTAINER(name, ...) \
+ unsigned int name[] = { \
+ SNDRV_CTL_TLVD_CONTAINER_ITEM(__VA_ARGS__) \
+ }
+
+#define SNDRV_CTL_TLVD_DB_SCALE_MASK 0xffff
+#define SNDRV_CTL_TLVD_DB_SCALE_MUTE 0x10000
+#define SNDRV_CTL_TLVD_DB_SCALE_ITEM(min, step, mute) \
+ SNDRV_CTL_TLVD_ITEM(SNDRV_CTL_TLVT_DB_SCALE, \
+ (min), \
+ ((step) & SNDRV_CTL_TLVD_DB_SCALE_MASK) | \
+ ((mute) ? SNDRV_CTL_TLVD_DB_SCALE_MUTE : 0))
+#define SNDRV_CTL_TLVD_DECLARE_DB_SCALE(name, min, step, mute) \
+ unsigned int name[] = { \
+ SNDRV_CTL_TLVD_DB_SCALE_ITEM(min, step, mute) \
+ }
+
+/* dB scale specified with min/max values instead of step */
+#define SNDRV_CTL_TLVD_DB_MINMAX_ITEM(min_dB, max_dB) \
+ SNDRV_CTL_TLVD_ITEM(SNDRV_CTL_TLVT_DB_MINMAX, (min_dB), (max_dB))
+#define SNDRV_CTL_TLVD_DB_MINMAX_MUTE_ITEM(min_dB, max_dB) \
+ SNDRV_CTL_TLVD_ITEM(SNDRV_CTL_TLVT_DB_MINMAX_MUTE, (min_dB), (max_dB))
+#define SNDRV_CTL_TLVD_DECLARE_DB_MINMAX(name, min_dB, max_dB) \
+ unsigned int name[] = { \
+ SNDRV_CTL_TLVD_DB_MINMAX_ITEM(min_dB, max_dB) \
+ }
+#define SNDRV_CTL_TLVD_DECLARE_DB_MINMAX_MUTE(name, min_dB, max_dB) \
+ unsigned int name[] = { \
+ SNDRV_CTL_TLVD_DB_MINMAX_MUTE_ITEM(min_dB, max_dB) \
+ }
+
+/* linear volume between min_dB and max_dB (.01dB unit) */
+#define SNDRV_CTL_TLVD_DB_LINEAR_ITEM(min_dB, max_dB) \
+ SNDRV_CTL_TLVD_ITEM(SNDRV_CTL_TLVT_DB_LINEAR, (min_dB), (max_dB))
+#define SNDRV_CTL_TLVD_DECLARE_DB_LINEAR(name, min_dB, max_dB) \
+ unsigned int name[] = { \
+ SNDRV_CTL_TLVD_DB_LINEAR_ITEM(min_dB, max_dB) \
+ }
+
+/* dB range container:
+ * Items in dB range container must be ordered by their values and by their
+ * dB values. This implies that larger values must correspond with larger
+ * dB values (which is also required for all other mixer controls).
+ */
+/* Each item is: <min> <max> <TLV> */
+#define SNDRV_CTL_TLVD_DB_RANGE_ITEM(...) \
+ SNDRV_CTL_TLVD_ITEM(SNDRV_CTL_TLVT_DB_RANGE, __VA_ARGS__)
+#define SNDRV_CTL_TLVD_DECLARE_DB_RANGE(name, ...) \
+ unsigned int name[] = { \
+ SNDRV_CTL_TLVD_DB_RANGE_ITEM(__VA_ARGS__) \
+ }
+
+#define SNDRV_CTL_TLVD_DB_GAIN_MUTE -9999999
+
#endif
diff --git a/include/video/exynos_mipi_dsim.h b/include/video/exynos_mipi_dsim.h
deleted file mode 100644
index 6a578f8a1b3e..000000000000
--- a/include/video/exynos_mipi_dsim.h
+++ /dev/null
@@ -1,358 +0,0 @@
-/* include/video/exynos_mipi_dsim.h
- *
- * Platform data header for Samsung SoC MIPI-DSIM.
- *
- * Copyright (c) 2012 Samsung Electronics Co., Ltd
- *
- * InKi Dae <inki.dae@samsung.com>
- * Donghwa Lee <dh09.lee@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#ifndef _EXYNOS_MIPI_DSIM_H
-#define _EXYNOS_MIPI_DSIM_H
-
-#include <linux/device.h>
-#include <linux/fb.h>
-
-#define PANEL_NAME_SIZE (32)
-
-/*
- * Enumerate display interface type.
- *
- * DSIM_COMMAND means cpu interface and rgb interface for DSIM_VIDEO.
- *
- * P.S. MIPI DSI Master has two display controller intefaces, RGB Interface
- * for main display and CPU Interface(same as I80 Interface) for main
- * and sub display.
- */
-enum mipi_dsim_interface_type {
- DSIM_COMMAND,
- DSIM_VIDEO
-};
-
-enum mipi_dsim_virtual_ch_no {
- DSIM_VIRTUAL_CH_0,
- DSIM_VIRTUAL_CH_1,
- DSIM_VIRTUAL_CH_2,
- DSIM_VIRTUAL_CH_3
-};
-
-enum mipi_dsim_burst_mode_type {
- DSIM_NON_BURST_SYNC_EVENT,
- DSIM_BURST_SYNC_EVENT,
- DSIM_NON_BURST_SYNC_PULSE,
- DSIM_BURST,
- DSIM_NON_VIDEO_MODE
-};
-
-enum mipi_dsim_no_of_data_lane {
- DSIM_DATA_LANE_1,
- DSIM_DATA_LANE_2,
- DSIM_DATA_LANE_3,
- DSIM_DATA_LANE_4
-};
-
-enum mipi_dsim_byte_clk_src {
- DSIM_PLL_OUT_DIV8,
- DSIM_EXT_CLK_DIV8,
- DSIM_EXT_CLK_BYPASS
-};
-
-enum mipi_dsim_pixel_format {
- DSIM_CMD_3BPP,
- DSIM_CMD_8BPP,
- DSIM_CMD_12BPP,
- DSIM_CMD_16BPP,
- DSIM_VID_16BPP_565,
- DSIM_VID_18BPP_666PACKED,
- DSIM_18BPP_666LOOSELYPACKED,
- DSIM_24BPP_888
-};
-
-/*
- * struct mipi_dsim_config - interface for configuring mipi-dsi controller.
- *
- * @auto_flush: enable or disable Auto flush of MD FIFO using VSYNC pulse.
- * @eot_disable: enable or disable EoT packet in HS mode.
- * @auto_vertical_cnt: specifies auto vertical count mode.
- * in Video mode, the vertical line transition uses line counter
- * configured by VSA, VBP, and Vertical resolution.
- * If this bit is set to '1', the line counter does not use VSA and VBP
- * registers.(in command mode, this variable is ignored)
- * @hse: set horizontal sync event mode.
- * In VSYNC pulse and Vporch area, MIPI DSI master transfers only HSYNC
- * start packet to MIPI DSI slave at MIPI DSI spec1.1r02.
- * this bit transfers HSYNC end packet in VSYNC pulse and Vporch area
- * (in mommand mode, this variable is ignored)
- * @hfp: specifies HFP disable mode.
- * if this variable is set, DSI master ignores HFP area in VIDEO mode.
- * (in command mode, this variable is ignored)
- * @hbp: specifies HBP disable mode.
- * if this variable is set, DSI master ignores HBP area in VIDEO mode.
- * (in command mode, this variable is ignored)
- * @hsa: specifies HSA disable mode.
- * if this variable is set, DSI master ignores HSA area in VIDEO mode.
- * (in command mode, this variable is ignored)
- * @cma_allow: specifies the number of horizontal lines, where command packet
- * transmission is allowed after Stable VFP period.
- * @e_interface: specifies interface to be used.(CPU or RGB interface)
- * @e_virtual_ch: specifies virtual channel number that main or
- * sub diaplsy uses.
- * @e_pixel_format: specifies pixel stream format for main or sub display.
- * @e_burst_mode: selects Burst mode in Video mode.
- * in Non-burst mode, RGB data area is filled with RGB data and NULL
- * packets, according to input bandwidth of RGB interface.
- * In Burst mode, RGB data area is filled with RGB data only.
- * @e_no_data_lane: specifies data lane count to be used by Master.
- * @e_byte_clk: select byte clock source. (it must be DSIM_PLL_OUT_DIV8)
- * DSIM_EXT_CLK_DIV8 and DSIM_EXT_CLK_BYPASSS are not supported.
- * @pll_stable_time: specifies the PLL Timer for stability of the ganerated
- * clock(System clock cycle base)
- * if the timer value goes to 0x00000000, the clock stable bit of status
- * and interrupt register is set.
- * @esc_clk: specifies escape clock frequency for getting the escape clock
- * prescaler value.
- * @stop_holding_cnt: specifies the interval value between transmitting
- * read packet(or write "set_tear_on" command) and BTA request.
- * after transmitting read packet or write "set_tear_on" command,
- * BTA requests to D-PHY automatically. this counter value specifies
- * the interval between them.
- * @bta_timeout: specifies the timer for BTA.
- * this register specifies time out from BTA request to change
- * the direction with respect to Tx escape clock.
- * @rx_timeout: specifies the timer for LP Rx mode timeout.
- * this register specifies time out on how long RxValid deasserts,
- * after RxLpdt asserts with respect to Tx escape clock.
- * - RxValid specifies Rx data valid indicator.
- * - RxLpdt specifies an indicator that D-PHY is under RxLpdt mode.
- * - RxValid and RxLpdt specifies signal from D-PHY.
- */
-struct mipi_dsim_config {
- unsigned char auto_flush;
- unsigned char eot_disable;
-
- unsigned char auto_vertical_cnt;
- unsigned char hse;
- unsigned char hfp;
- unsigned char hbp;
- unsigned char hsa;
- unsigned char cmd_allow;
-
- enum mipi_dsim_interface_type e_interface;
- enum mipi_dsim_virtual_ch_no e_virtual_ch;
- enum mipi_dsim_pixel_format e_pixel_format;
- enum mipi_dsim_burst_mode_type e_burst_mode;
- enum mipi_dsim_no_of_data_lane e_no_data_lane;
- enum mipi_dsim_byte_clk_src e_byte_clk;
-
- /*
- * ===========================================
- * | P | M | S | MHz |
- * -------------------------------------------
- * | 3 | 100 | 3 | 100 |
- * | 3 | 100 | 2 | 200 |
- * | 3 | 63 | 1 | 252 |
- * | 4 | 100 | 1 | 300 |
- * | 4 | 110 | 1 | 330 |
- * | 12 | 350 | 1 | 350 |
- * | 3 | 100 | 1 | 400 |
- * | 4 | 150 | 1 | 450 |
- * | 6 | 118 | 1 | 472 |
- * | 3 | 120 | 1 | 480 |
- * | 12 | 250 | 0 | 500 |
- * | 4 | 100 | 0 | 600 |
- * | 3 | 81 | 0 | 648 |
- * | 3 | 88 | 0 | 704 |
- * | 3 | 90 | 0 | 720 |
- * | 3 | 100 | 0 | 800 |
- * | 12 | 425 | 0 | 850 |
- * | 4 | 150 | 0 | 900 |
- * | 12 | 475 | 0 | 950 |
- * | 6 | 250 | 0 | 1000 |
- * -------------------------------------------
- */
-
- /*
- * pms could be calculated as the following.
- * M * 24 / P * 2 ^ S = MHz
- */
- unsigned char p;
- unsigned short m;
- unsigned char s;
-
- unsigned int pll_stable_time;
- unsigned long esc_clk;
-
- unsigned short stop_holding_cnt;
- unsigned char bta_timeout;
- unsigned short rx_timeout;
-};
-
-/*
- * struct mipi_dsim_device - global interface for mipi-dsi driver.
- *
- * @dev: driver model representation of the device.
- * @id: unique device id.
- * @clock: pointer to MIPI-DSI clock of clock framework.
- * @irq: interrupt number to MIPI-DSI controller.
- * @reg_base: base address to memory mapped SRF of MIPI-DSI controller.
- * (virtual address)
- * @lock: the mutex protecting this data structure.
- * @dsim_info: infomation for configuring mipi-dsi controller.
- * @master_ops: callbacks to mipi-dsi operations.
- * @dsim_lcd_dev: pointer to activated ddi device.
- * (it would be registered by mipi-dsi driver.)
- * @dsim_lcd_drv: pointer to activated_ddi driver.
- * (it would be registered by mipi-dsi driver.)
- * @lcd_info: pointer to mipi_lcd_info structure.
- * @state: specifies status of MIPI-DSI controller.
- * the status could be RESET, INIT, STOP, HSCLKEN and ULPS.
- * @data_lane: specifiec enabled data lane number.
- * this variable would be set by driver according to e_no_data_lane
- * automatically.
- * @e_clk_src: select byte clock source.
- * @pd: pointer to MIPI-DSI driver platform data.
- * @phy: pointer to the MIPI-DSI PHY
- */
-struct mipi_dsim_device {
- struct device *dev;
- int id;
- struct clk *clock;
- unsigned int irq;
- void __iomem *reg_base;
- struct mutex lock;
-
- struct mipi_dsim_config *dsim_config;
- struct mipi_dsim_master_ops *master_ops;
- struct mipi_dsim_lcd_device *dsim_lcd_dev;
- struct mipi_dsim_lcd_driver *dsim_lcd_drv;
-
- unsigned int state;
- unsigned int data_lane;
- unsigned int e_clk_src;
- bool suspended;
-
- struct mipi_dsim_platform_data *pd;
- struct phy *phy;
-};
-
-/*
- * struct mipi_dsim_platform_data - interface to platform data
- * for mipi-dsi driver.
- *
- * @lcd_panel_name: specifies lcd panel name registered to mipi-dsi driver.
- * lcd panel driver searched would be actived.
- * @dsim_config: pointer of structure for configuring mipi-dsi controller.
- * @enabled: indicate whether mipi controller got enabled or not.
- * @lcd_panel_info: pointer for lcd panel specific structure.
- * this structure specifies width, height, timing and polarity and so on.
- */
-struct mipi_dsim_platform_data {
- char lcd_panel_name[PANEL_NAME_SIZE];
-
- struct mipi_dsim_config *dsim_config;
- unsigned int enabled;
- void *lcd_panel_info;
-};
-
-/*
- * struct mipi_dsim_master_ops - callbacks to mipi-dsi operations.
- *
- * @cmd_write: transfer command to lcd panel at LP mode.
- * @cmd_read: read command from rx register.
- * @get_dsim_frame_done: get the status that all screen data have been
- * transferred to mipi-dsi.
- * @clear_dsim_frame_done: clear frame done status.
- * @get_fb_frame_done: get frame done status of display controller.
- * @trigger: trigger display controller.
- * - this one would be used only in case of CPU mode.
- * @set_early_blank_mode: set framebuffer blank mode.
- * - this callback should be called prior to fb_blank() by a client driver
- * only if needing.
- * @set_blank_mode: set framebuffer blank mode.
- * - this callback should be called after fb_blank() by a client driver
- * only if needing.
- */
-
-struct mipi_dsim_master_ops {
- int (*cmd_write)(struct mipi_dsim_device *dsim, unsigned int data_id,
- const unsigned char *data0, unsigned int data1);
- int (*cmd_read)(struct mipi_dsim_device *dsim, unsigned int data_id,
- unsigned int data0, unsigned int req_size, u8 *rx_buf);
- int (*get_dsim_frame_done)(struct mipi_dsim_device *dsim);
- int (*clear_dsim_frame_done)(struct mipi_dsim_device *dsim);
-
- int (*get_fb_frame_done)(struct fb_info *info);
- void (*trigger)(struct fb_info *info);
- int (*set_early_blank_mode)(struct mipi_dsim_device *dsim, int power);
- int (*set_blank_mode)(struct mipi_dsim_device *dsim, int power);
-};
-
-/*
- * device structure for mipi-dsi based lcd panel.
- *
- * @name: name of the device to use with this device, or an
- * alias for that name.
- * @dev: driver model representation of the device.
- * @id: id of device to be registered.
- * @bus_id: bus id for identifing connected bus
- * and this bus id should be same as id of mipi_dsim_device.
- * @irq: irq number for signaling when framebuffer transfer of
- * lcd panel module is completed.
- * this irq would be used only for MIPI-DSI based CPU mode lcd panel.
- * @master: pointer to mipi-dsi master device object.
- * @platform_data: lcd panel specific platform data.
- */
-struct mipi_dsim_lcd_device {
- char *name;
- struct device dev;
- int id;
- int bus_id;
- int irq;
- int panel_reverse;
-
- struct mipi_dsim_device *master;
- void *platform_data;
-};
-
-/*
- * driver structure for mipi-dsi based lcd panel.
- *
- * this structure should be registered by lcd panel driver.
- * mipi-dsi driver seeks lcd panel registered through name field
- * and calls these callback functions in appropriate time.
- *
- * @name: name of the driver to use with this device, or an
- * alias for that name.
- * @id: id of driver to be registered.
- * this id would be used for finding device object registered.
- */
-struct mipi_dsim_lcd_driver {
- char *name;
- int id;
-
- void (*power_on)(struct mipi_dsim_lcd_device *dsim_dev, int enable);
- void (*set_sequence)(struct mipi_dsim_lcd_device *dsim_dev);
- int (*probe)(struct mipi_dsim_lcd_device *dsim_dev);
- int (*remove)(struct mipi_dsim_lcd_device *dsim_dev);
- void (*shutdown)(struct mipi_dsim_lcd_device *dsim_dev);
- int (*suspend)(struct mipi_dsim_lcd_device *dsim_dev);
- int (*resume)(struct mipi_dsim_lcd_device *dsim_dev);
-};
-
-/*
- * register mipi_dsim_lcd_device to mipi-dsi master.
- */
-int exynos_mipi_dsi_register_lcd_device(struct mipi_dsim_lcd_device
- *lcd_dev);
-/**
- * register mipi_dsim_lcd_driver object defined by lcd panel driver
- * to mipi-dsi driver.
- */
-int exynos_mipi_dsi_register_lcd_driver(struct mipi_dsim_lcd_driver
- *lcd_drv);
-#endif /* _EXYNOS_MIPI_DSIM_H */
diff --git a/include/video/imx-ipu-image-convert.h b/include/video/imx-ipu-image-convert.h
new file mode 100644
index 000000000000..7b87efc6d77a
--- /dev/null
+++ b/include/video/imx-ipu-image-convert.h
@@ -0,0 +1,207 @@
+/*
+ * Copyright (C) 2012-2016 Mentor Graphics Inc.
+ *
+ * i.MX Queued image conversion support, with tiling and rotation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+#ifndef __IMX_IPU_IMAGE_CONVERT_H__
+#define __IMX_IPU_IMAGE_CONVERT_H__
+
+#include <video/imx-ipu-v3.h>
+
+struct ipu_image_convert_ctx;
+
+/**
+ * struct ipu_image_convert_run - image conversion run request struct
+ *
+ * @ctx: the conversion context
+ * @in_phys: dma addr of input image buffer for this run
+ * @out_phys: dma addr of output image buffer for this run
+ * @status: completion status of this run
+ */
+struct ipu_image_convert_run {
+ struct ipu_image_convert_ctx *ctx;
+
+ dma_addr_t in_phys;
+ dma_addr_t out_phys;
+
+ int status;
+
+ /* internal to image converter, callers don't touch */
+ struct list_head list;
+};
+
+/**
+ * ipu_image_convert_cb_t - conversion callback function prototype
+ *
+ * @run: the completed conversion run pointer
+ * @ctx: a private context pointer for the callback
+ */
+typedef void (*ipu_image_convert_cb_t)(struct ipu_image_convert_run *run,
+ void *ctx);
+
+/**
+ * ipu_image_convert_enum_format() - enumerate the image converter's
+ * supported input and output pixel formats.
+ *
+ * @index: pixel format index
+ * @fourcc: v4l2 fourcc for this index
+ *
+ * Returns 0 with a valid index and fills in v4l2 fourcc, -EINVAL otherwise.
+ *
+ * In V4L2, drivers can call ipu_image_enum_format() in .enum_fmt.
+ */
+int ipu_image_convert_enum_format(int index, u32 *fourcc);
+
+/**
+ * ipu_image_convert_adjust() - adjust input/output images to IPU restrictions.
+ *
+ * @in: input image format, adjusted on return
+ * @out: output image format, adjusted on return
+ * @rot_mode: rotation mode
+ *
+ * In V4L2, drivers can call ipu_image_convert_adjust() in .try_fmt.
+ */
+void ipu_image_convert_adjust(struct ipu_image *in, struct ipu_image *out,
+ enum ipu_rotate_mode rot_mode);
+
+/**
+ * ipu_image_convert_verify() - verify that input/output image formats
+ * and rotation mode meet IPU restrictions.
+ *
+ * @in: input image format
+ * @out: output image format
+ * @rot_mode: rotation mode
+ *
+ * Returns 0 if the formats and rotation mode meet IPU restrictions,
+ * -EINVAL otherwise.
+ */
+int ipu_image_convert_verify(struct ipu_image *in, struct ipu_image *out,
+ enum ipu_rotate_mode rot_mode);
+
+/**
+ * ipu_image_convert_prepare() - prepare a conversion context.
+ *
+ * @ipu: the IPU handle to use for the conversions
+ * @ic_task: the IC task to use for the conversions
+ * @in: input image format
+ * @out: output image format
+ * @rot_mode: rotation mode
+ * @complete: run completion callback
+ * @complete_context: a context pointer for the completion callback
+ *
+ * Returns an opaque conversion context pointer on success, error pointer
+ * on failure. The input/output formats and rotation mode must already meet
+ * IPU retrictions.
+ *
+ * In V4L2, drivers should call ipu_image_convert_prepare() at streamon.
+ */
+struct ipu_image_convert_ctx *
+ipu_image_convert_prepare(struct ipu_soc *ipu, enum ipu_ic_task ic_task,
+ struct ipu_image *in, struct ipu_image *out,
+ enum ipu_rotate_mode rot_mode,
+ ipu_image_convert_cb_t complete,
+ void *complete_context);
+
+/**
+ * ipu_image_convert_unprepare() - unprepare a conversion context.
+ *
+ * @ctx: the conversion context pointer to unprepare
+ *
+ * Aborts any active or pending conversions for this context and
+ * frees the context. Any currently active or pending runs belonging
+ * to this context are returned via the completion callback with an
+ * error run status.
+ *
+ * In V4L2, drivers should call ipu_image_convert_unprepare() at
+ * streamoff.
+ */
+void ipu_image_convert_unprepare(struct ipu_image_convert_ctx *ctx);
+
+/**
+ * ipu_image_convert_queue() - queue a conversion run
+ *
+ * @run: the run request pointer
+ *
+ * ipu_image_convert_run must be dynamically allocated (_not_ as a local
+ * var) by callers and filled in with a previously prepared conversion
+ * context handle and the dma addr's of the input and output image buffers
+ * for this conversion run.
+ *
+ * When this conversion completes, the run pointer is returned via the
+ * completion callback. The caller is responsible for freeing the run
+ * object after it completes.
+ *
+ * In V4L2, drivers should call ipu_image_convert_queue() while
+ * streaming to queue the conversion of a received input buffer.
+ * For example mem2mem devices this would be called in .device_run.
+ */
+int ipu_image_convert_queue(struct ipu_image_convert_run *run);
+
+/**
+ * ipu_image_convert_abort() - abort conversions
+ *
+ * @ctx: the conversion context pointer
+ *
+ * This will abort any active or pending conversions for this context.
+ * Any currently active or pending runs belonging to this context are
+ * returned via the completion callback with an error run status.
+ */
+void ipu_image_convert_abort(struct ipu_image_convert_ctx *ctx);
+
+/**
+ * ipu_image_convert() - asynchronous image conversion request
+ *
+ * @ipu: the IPU handle to use for the conversion
+ * @ic_task: the IC task to use for the conversion
+ * @in: input image format
+ * @out: output image format
+ * @rot_mode: rotation mode
+ * @complete: run completion callback
+ * @complete_context: a context pointer for the completion callback
+ *
+ * Request a single image conversion. Returns the run that has been queued.
+ * A conversion context is automatically created and is available in run->ctx.
+ * As with ipu_image_convert_prepare(), the input/output formats and rotation
+ * mode must already meet IPU retrictions.
+ *
+ * On successful return the caller can queue more run requests if needed, using
+ * the prepared context in run->ctx. The caller is responsible for unpreparing
+ * the context when no more conversion requests are needed.
+ */
+struct ipu_image_convert_run *
+ipu_image_convert(struct ipu_soc *ipu, enum ipu_ic_task ic_task,
+ struct ipu_image *in, struct ipu_image *out,
+ enum ipu_rotate_mode rot_mode,
+ ipu_image_convert_cb_t complete,
+ void *complete_context);
+
+/**
+ * ipu_image_convert_sync() - synchronous single image conversion request
+ *
+ * @ipu: the IPU handle to use for the conversion
+ * @ic_task: the IC task to use for the conversion
+ * @in: input image format
+ * @out: output image format
+ * @rot_mode: rotation mode
+ *
+ * Carry out a single image conversion. Returns when the conversion
+ * completes. The input/output formats and rotation mode must already
+ * meet IPU retrictions. The created context is automatically unprepared
+ * and the run freed on return.
+ */
+int ipu_image_convert_sync(struct ipu_soc *ipu, enum ipu_ic_task ic_task,
+ struct ipu_image *in, struct ipu_image *out,
+ enum ipu_rotate_mode rot_mode);
+
+
+#endif /* __IMX_IPU_IMAGE_CONVERT_H__ */
diff --git a/include/video/imx-ipu-v3.h b/include/video/imx-ipu-v3.h
index 7adeaae06961..173073eb6aaf 100644
--- a/include/video/imx-ipu-v3.h
+++ b/include/video/imx-ipu-v3.h
@@ -63,23 +63,41 @@ enum ipu_csi_dest {
/*
* Enumeration of IPU rotation modes
*/
+#define IPU_ROT_BIT_VFLIP (1 << 0)
+#define IPU_ROT_BIT_HFLIP (1 << 1)
+#define IPU_ROT_BIT_90 (1 << 2)
+
enum ipu_rotate_mode {
IPU_ROTATE_NONE = 0,
- IPU_ROTATE_VERT_FLIP,
- IPU_ROTATE_HORIZ_FLIP,
- IPU_ROTATE_180,
- IPU_ROTATE_90_RIGHT,
- IPU_ROTATE_90_RIGHT_VFLIP,
- IPU_ROTATE_90_RIGHT_HFLIP,
- IPU_ROTATE_90_LEFT,
+ IPU_ROTATE_VERT_FLIP = IPU_ROT_BIT_VFLIP,
+ IPU_ROTATE_HORIZ_FLIP = IPU_ROT_BIT_HFLIP,
+ IPU_ROTATE_180 = (IPU_ROT_BIT_VFLIP | IPU_ROT_BIT_HFLIP),
+ IPU_ROTATE_90_RIGHT = IPU_ROT_BIT_90,
+ IPU_ROTATE_90_RIGHT_VFLIP = (IPU_ROT_BIT_90 | IPU_ROT_BIT_VFLIP),
+ IPU_ROTATE_90_RIGHT_HFLIP = (IPU_ROT_BIT_90 | IPU_ROT_BIT_HFLIP),
+ IPU_ROTATE_90_LEFT = (IPU_ROT_BIT_90 |
+ IPU_ROT_BIT_VFLIP | IPU_ROT_BIT_HFLIP),
};
+/* 90-degree rotations require the IRT unit */
+#define ipu_rot_mode_is_irt(m) (((m) & IPU_ROT_BIT_90) != 0)
+
enum ipu_color_space {
IPUV3_COLORSPACE_RGB,
IPUV3_COLORSPACE_YUV,
IPUV3_COLORSPACE_UNKNOWN,
};
+/*
+ * Enumeration of VDI MOTION select
+ */
+enum ipu_motion_sel {
+ MOTION_NONE = 0,
+ LOW_MOTION,
+ MED_MOTION,
+ HIGH_MOTION,
+};
+
struct ipuv3_channel;
enum ipu_channel_irq {
@@ -97,20 +115,42 @@ enum ipu_channel_irq {
#define IPUV3_CHANNEL_CSI2 2
#define IPUV3_CHANNEL_CSI3 3
#define IPUV3_CHANNEL_VDI_MEM_IC_VF 5
+/*
+ * NOTE: channels 6,7 are unused in the IPU and are not IDMAC channels,
+ * but the direct CSI->VDI linking is handled the same way as IDMAC
+ * channel linking in the FSU via the IPU_FS_PROC_FLOW registers, so
+ * these channel names are used to support the direct CSI->VDI link.
+ */
+#define IPUV3_CHANNEL_CSI_DIRECT 6
+#define IPUV3_CHANNEL_CSI_VDI_PREV 7
+#define IPUV3_CHANNEL_MEM_VDI_PREV 8
+#define IPUV3_CHANNEL_MEM_VDI_CUR 9
+#define IPUV3_CHANNEL_MEM_VDI_NEXT 10
#define IPUV3_CHANNEL_MEM_IC_PP 11
#define IPUV3_CHANNEL_MEM_IC_PRP_VF 12
+#define IPUV3_CHANNEL_VDI_MEM_RECENT 13
#define IPUV3_CHANNEL_G_MEM_IC_PRP_VF 14
#define IPUV3_CHANNEL_G_MEM_IC_PP 15
+#define IPUV3_CHANNEL_G_MEM_IC_PRP_VF_ALPHA 17
+#define IPUV3_CHANNEL_G_MEM_IC_PP_ALPHA 18
+#define IPUV3_CHANNEL_MEM_VDI_PLANE1_COMB_ALPHA 19
#define IPUV3_CHANNEL_IC_PRP_ENC_MEM 20
#define IPUV3_CHANNEL_IC_PRP_VF_MEM 21
#define IPUV3_CHANNEL_IC_PP_MEM 22
#define IPUV3_CHANNEL_MEM_BG_SYNC 23
#define IPUV3_CHANNEL_MEM_BG_ASYNC 24
+#define IPUV3_CHANNEL_MEM_VDI_PLANE1_COMB 25
+#define IPUV3_CHANNEL_MEM_VDI_PLANE3_COMB 26
#define IPUV3_CHANNEL_MEM_FG_SYNC 27
#define IPUV3_CHANNEL_MEM_DC_SYNC 28
#define IPUV3_CHANNEL_MEM_FG_ASYNC 29
#define IPUV3_CHANNEL_MEM_FG_SYNC_ALPHA 31
+#define IPUV3_CHANNEL_MEM_FG_ASYNC_ALPHA 33
+#define IPUV3_CHANNEL_DC_MEM_READ 40
#define IPUV3_CHANNEL_MEM_DC_ASYNC 41
+#define IPUV3_CHANNEL_MEM_DC_COMMAND 42
+#define IPUV3_CHANNEL_MEM_DC_COMMAND2 43
+#define IPUV3_CHANNEL_MEM_DC_OUTPUT_MASK 44
#define IPUV3_CHANNEL_MEM_ROT_ENC 45
#define IPUV3_CHANNEL_MEM_ROT_VF 46
#define IPUV3_CHANNEL_MEM_ROT_PP 47
@@ -118,6 +158,8 @@ enum ipu_channel_irq {
#define IPUV3_CHANNEL_ROT_VF_MEM 49
#define IPUV3_CHANNEL_ROT_PP_MEM 50
#define IPUV3_CHANNEL_MEM_BG_SYNC_ALPHA 51
+#define IPUV3_CHANNEL_MEM_BG_ASYNC_ALPHA 52
+#define IPUV3_NUM_CHANNELS 64
int ipu_map_irq(struct ipu_soc *ipu, int irq);
int ipu_idmac_channel_irq(struct ipu_soc *ipu, struct ipuv3_channel *channel,
@@ -138,6 +180,7 @@ int ipu_idmac_channel_irq(struct ipu_soc *ipu, struct ipuv3_channel *channel,
/*
* IPU Common functions
*/
+int ipu_get_num(struct ipu_soc *ipu);
void ipu_set_csi_src_mux(struct ipu_soc *ipu, int csi_id, bool mipi_csi2);
void ipu_set_ic_src_mux(struct ipu_soc *ipu, int csi_id, bool vdi);
void ipu_dump(struct ipu_soc *ipu);
@@ -160,6 +203,10 @@ int ipu_idmac_get_current_buffer(struct ipuv3_channel *channel);
bool ipu_idmac_buffer_is_ready(struct ipuv3_channel *channel, u32 buf_num);
void ipu_idmac_select_buffer(struct ipuv3_channel *channel, u32 buf_num);
void ipu_idmac_clear_buffer(struct ipuv3_channel *channel, u32 buf_num);
+int ipu_fsu_link(struct ipu_soc *ipu, int src_ch, int sink_ch);
+int ipu_fsu_unlink(struct ipu_soc *ipu, int src_ch, int sink_ch);
+int ipu_idmac_link(struct ipuv3_channel *src, struct ipuv3_channel *sink);
+int ipu_idmac_unlink(struct ipuv3_channel *src, struct ipuv3_channel *sink);
/*
* IPU Channel Parameter Memory (cpmem) functions
@@ -184,8 +231,10 @@ void ipu_cpmem_set_resolution(struct ipuv3_channel *ch, int xres, int yres);
void ipu_cpmem_set_stride(struct ipuv3_channel *ch, int stride);
void ipu_cpmem_set_high_priority(struct ipuv3_channel *ch);
void ipu_cpmem_set_buffer(struct ipuv3_channel *ch, int bufnum, dma_addr_t buf);
+void ipu_cpmem_set_uv_offset(struct ipuv3_channel *ch, u32 u_off, u32 v_off);
void ipu_cpmem_interlaced_scan(struct ipuv3_channel *ch, int stride);
void ipu_cpmem_set_axi_id(struct ipuv3_channel *ch, u32 id);
+int ipu_cpmem_get_burstsize(struct ipuv3_channel *ch);
void ipu_cpmem_set_burstsize(struct ipuv3_channel *ch, int burstsize);
void ipu_cpmem_set_block_mode(struct ipuv3_channel *ch);
void ipu_cpmem_set_rotation(struct ipuv3_channel *ch,
@@ -317,6 +366,19 @@ void ipu_ic_put(struct ipu_ic *ic);
void ipu_ic_dump(struct ipu_ic *ic);
/*
+ * IPU Video De-Interlacer (vdi) functions
+ */
+struct ipu_vdi;
+void ipu_vdi_set_field_order(struct ipu_vdi *vdi, v4l2_std_id std, u32 field);
+void ipu_vdi_set_motion(struct ipu_vdi *vdi, enum ipu_motion_sel motion_sel);
+void ipu_vdi_setup(struct ipu_vdi *vdi, u32 code, int xres, int yres);
+void ipu_vdi_unsetup(struct ipu_vdi *vdi);
+int ipu_vdi_enable(struct ipu_vdi *vdi);
+int ipu_vdi_disable(struct ipu_vdi *vdi);
+struct ipu_vdi *ipu_vdi_get(struct ipu_soc *ipu);
+void ipu_vdi_put(struct ipu_vdi *vdi);
+
+/*
* IPU Sensor Multiple FIFO Controller (SMFC) functions
*/
struct ipu_smfc *ipu_smfc_get(struct ipu_soc *ipu, unsigned int chno);
diff --git a/include/xen/interface/sched.h b/include/xen/interface/sched.h
index f18490985fc8..a4c4d735d781 100644
--- a/include/xen/interface/sched.h
+++ b/include/xen/interface/sched.h
@@ -3,6 +3,24 @@
*
* Scheduler state interactions
*
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
* Copyright (c) 2005, Keir Fraser <keir@xensource.com>
*/
@@ -12,18 +30,30 @@
#include <xen/interface/event_channel.h>
/*
+ * Guest Scheduler Operations
+ *
+ * The SCHEDOP interface provides mechanisms for a guest to interact
+ * with the scheduler, including yield, blocking and shutting itself
+ * down.
+ */
+
+/*
* The prototype for this hypercall is:
- * long sched_op_new(int cmd, void *arg)
+ * long HYPERVISOR_sched_op(enum sched_op cmd, void *arg, ...)
+ *
* @cmd == SCHEDOP_??? (scheduler operation).
* @arg == Operation-specific extra argument(s), as described below.
+ * ... == Additional Operation-specific extra arguments, described below.
*
- * **NOTE**:
- * Versions of Xen prior to 3.0.2 provide only the following legacy version
+ * Versions of Xen prior to 3.0.2 provided only the following legacy version
* of this hypercall, supporting only the commands yield, block and shutdown:
* long sched_op(int cmd, unsigned long arg)
* @cmd == SCHEDOP_??? (scheduler operation).
* @arg == 0 (SCHEDOP_yield and SCHEDOP_block)
* == SHUTDOWN_* code (SCHEDOP_shutdown)
+ *
+ * This legacy version is available to new guests as:
+ * long HYPERVISOR_sched_op_compat(enum sched_op cmd, unsigned long arg)
*/
/*
@@ -44,12 +74,17 @@
/*
* Halt execution of this domain (all VCPUs) and notify the system controller.
* @arg == pointer to sched_shutdown structure.
+ *
+ * If the sched_shutdown_t reason is SHUTDOWN_suspend then
+ * x86 PV guests must also set RDX (EDX for 32-bit guests) to the MFN
+ * of the guest's start info page. RDX/EDX is the third hypercall
+ * argument.
+ *
+ * In addition, which reason is SHUTDOWN_suspend this hypercall
+ * returns 1 if suspend was cancelled or the domain was merely
+ * checkpointed, and 0 if it is resuming in a new domain.
*/
#define SCHEDOP_shutdown 2
-struct sched_shutdown {
- unsigned int reason; /* SHUTDOWN_* */
-};
-DEFINE_GUEST_HANDLE_STRUCT(sched_shutdown);
/*
* Poll a set of event-channel ports. Return when one or more are pending. An
@@ -57,12 +92,6 @@ DEFINE_GUEST_HANDLE_STRUCT(sched_shutdown);
* @arg == pointer to sched_poll structure.
*/
#define SCHEDOP_poll 3
-struct sched_poll {
- GUEST_HANDLE(evtchn_port_t) ports;
- unsigned int nr_ports;
- uint64_t timeout;
-};
-DEFINE_GUEST_HANDLE_STRUCT(sched_poll);
/*
* Declare a shutdown for another domain. The main use of this function is
@@ -71,15 +100,11 @@ DEFINE_GUEST_HANDLE_STRUCT(sched_poll);
* @arg == pointer to sched_remote_shutdown structure.
*/
#define SCHEDOP_remote_shutdown 4
-struct sched_remote_shutdown {
- domid_t domain_id; /* Remote domain ID */
- unsigned int reason; /* SHUTDOWN_xxx reason */
-};
/*
* Latch a shutdown code, so that when the domain later shuts down it
* reports this code to the control tools.
- * @arg == as for SCHEDOP_shutdown.
+ * @arg == sched_shutdown, as for SCHEDOP_shutdown.
*/
#define SCHEDOP_shutdown_code 5
@@ -92,10 +117,47 @@ struct sched_remote_shutdown {
* With id != 0 and timeout != 0, poke watchdog timer and set new timeout.
*/
#define SCHEDOP_watchdog 6
+
+/*
+ * Override the current vcpu affinity by pinning it to one physical cpu or
+ * undo this override restoring the previous affinity.
+ * @arg == pointer to sched_pin_override structure.
+ *
+ * A negative pcpu value will undo a previous pin override and restore the
+ * previous cpu affinity.
+ * This call is allowed for the hardware domain only and requires the cpu
+ * to be part of the domain's cpupool.
+ */
+#define SCHEDOP_pin_override 7
+
+struct sched_shutdown {
+ unsigned int reason; /* SHUTDOWN_* => shutdown reason */
+};
+DEFINE_GUEST_HANDLE_STRUCT(sched_shutdown);
+
+struct sched_poll {
+ GUEST_HANDLE(evtchn_port_t) ports;
+ unsigned int nr_ports;
+ uint64_t timeout;
+};
+DEFINE_GUEST_HANDLE_STRUCT(sched_poll);
+
+struct sched_remote_shutdown {
+ domid_t domain_id; /* Remote domain ID */
+ unsigned int reason; /* SHUTDOWN_* => shutdown reason */
+};
+DEFINE_GUEST_HANDLE_STRUCT(sched_remote_shutdown);
+
struct sched_watchdog {
uint32_t id; /* watchdog ID */
uint32_t timeout; /* timeout */
};
+DEFINE_GUEST_HANDLE_STRUCT(sched_watchdog);
+
+struct sched_pin_override {
+ int32_t pcpu;
+};
+DEFINE_GUEST_HANDLE_STRUCT(sched_pin_override);
/*
* Reason codes for SCHEDOP_shutdown. These may be interpreted by control
@@ -107,6 +169,7 @@ struct sched_watchdog {
#define SHUTDOWN_suspend 2 /* Clean up, save suspend info, kill. */
#define SHUTDOWN_crash 3 /* Tell controller we've crashed. */
#define SHUTDOWN_watchdog 4 /* Restart because watchdog time expired. */
+
/*
* Domain asked to perform 'soft reset' for it. The expected behavior is to
* reset internal Xen state for the domain returning it to the point where it
@@ -115,5 +178,6 @@ struct sched_watchdog {
* interfaces again.
*/
#define SHUTDOWN_soft_reset 5
+#define SHUTDOWN_MAX 5 /* Maximum valid shutdown reason. */
#endif /* __XEN_PUBLIC_SCHED_H__ */
diff --git a/include/xen/xen.h b/include/xen/xen.h
index 0c0e3ef4c45d..f0f0252cff9a 100644
--- a/include/xen/xen.h
+++ b/include/xen/xen.h
@@ -38,8 +38,7 @@ extern enum xen_domain_type xen_domain_type;
*/
#include <xen/features.h>
#define xen_pvh_domain() (xen_pv_domain() && \
- xen_feature(XENFEAT_auto_translated_physmap) && \
- xen_have_vector_callback)
+ xen_feature(XENFEAT_auto_translated_physmap))
#else
#define xen_pvh_domain() (0)
#endif