diff options
Diffstat (limited to 'drivers')
102 files changed, 2236 insertions, 603 deletions
diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c index a7c2673ffd36..824ae985ad93 100644 --- a/drivers/acpi/device_pm.c +++ b/drivers/acpi/device_pm.c @@ -126,6 +126,7 @@ int acpi_device_get_power(struct acpi_device *device, int *state) return 0; } +EXPORT_SYMBOL(acpi_device_get_power); static int acpi_dev_pm_explicit_set(struct acpi_device *adev, int state) { diff --git a/drivers/auxdisplay/panel.c b/drivers/auxdisplay/panel.c index 3b25a643058c..21b9b2f2470a 100644 --- a/drivers/auxdisplay/panel.c +++ b/drivers/auxdisplay/panel.c @@ -155,10 +155,9 @@ struct logical_input { int release_data; } std; struct { /* valid when type == INPUT_TYPE_KBD */ - /* strings can be non null-terminated */ - char press_str[sizeof(void *) + sizeof(int)]; - char repeat_str[sizeof(void *) + sizeof(int)]; - char release_str[sizeof(void *) + sizeof(int)]; + char press_str[sizeof(void *) + sizeof(int)] __nonstring; + char repeat_str[sizeof(void *) + sizeof(int)] __nonstring; + char release_str[sizeof(void *) + sizeof(int)] __nonstring; } kbd; } u; }; diff --git a/drivers/block/brd.c b/drivers/block/brd.c index df8103dd40ac..c18586fccb6f 100644 --- a/drivers/block/brd.c +++ b/drivers/block/brd.c @@ -396,15 +396,14 @@ static struct brd_device *brd_alloc(int i) disk->first_minor = i * max_part; disk->fops = &brd_fops; disk->private_data = brd; - disk->queue = brd->brd_queue; disk->flags = GENHD_FL_EXT_DEVT; sprintf(disk->disk_name, "ram%d", i); set_capacity(disk, rd_size * 2); - disk->queue->backing_dev_info->capabilities |= BDI_CAP_SYNCHRONOUS_IO; + brd->brd_queue->backing_dev_info->capabilities |= BDI_CAP_SYNCHRONOUS_IO; /* Tell the block layer that this is not a rotational device */ - blk_queue_flag_set(QUEUE_FLAG_NONROT, disk->queue); - blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, disk->queue); + blk_queue_flag_set(QUEUE_FLAG_NONROT, brd->brd_queue); + blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, brd->brd_queue); return brd; @@ -436,6 +435,7 @@ static struct brd_device *brd_init_one(int i, bool *new) brd = brd_alloc(i); if (brd) { + brd->brd_disk->queue = brd->brd_queue; add_disk(brd->brd_disk); list_add_tail(&brd->brd_list, &brd_devices); } @@ -503,8 +503,14 @@ static int __init brd_init(void) /* point of no return */ - list_for_each_entry(brd, &brd_devices, brd_list) + list_for_each_entry(brd, &brd_devices, brd_list) { + /* + * associate with queue just before adding disk for + * avoiding to mess up failure path + */ + brd->brd_disk->queue = brd->brd_queue; add_disk(brd->brd_disk); + } blk_register_region(MKDEV(RAMDISK_MAJOR, 0), 1UL << MINORBITS, THIS_MODULE, brd_probe, NULL, NULL); diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c index 55fd104f1ed4..fa8204214ac0 100644 --- a/drivers/block/drbd/drbd_main.c +++ b/drivers/block/drbd/drbd_main.c @@ -1856,7 +1856,7 @@ int drbd_send(struct drbd_connection *connection, struct socket *sock, /* THINK if (signal_pending) return ... ? */ - iov_iter_kvec(&msg.msg_iter, WRITE | ITER_KVEC, &iov, 1, size); + iov_iter_kvec(&msg.msg_iter, WRITE, &iov, 1, size); if (sock == connection->data.socket) { rcu_read_lock(); diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c index fc67fd853375..61c392752fe4 100644 --- a/drivers/block/drbd/drbd_receiver.c +++ b/drivers/block/drbd/drbd_receiver.c @@ -516,7 +516,7 @@ static int drbd_recv_short(struct socket *sock, void *buf, size_t size, int flag struct msghdr msg = { .msg_flags = (flags ? flags : MSG_WAITALL | MSG_NOSIGNAL) }; - iov_iter_kvec(&msg.msg_iter, READ | ITER_KVEC, &iov, 1, size); + iov_iter_kvec(&msg.msg_iter, READ, &iov, 1, size); return sock_recvmsg(sock, &msg, msg.msg_flags); } diff --git a/drivers/block/loop.c b/drivers/block/loop.c index abad6d15f956..cb0cc8685076 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c @@ -77,7 +77,6 @@ #include <linux/falloc.h> #include <linux/uio.h> #include <linux/ioprio.h> -#include <linux/blk-cgroup.h> #include "loop.h" @@ -269,7 +268,7 @@ static int lo_write_bvec(struct file *file, struct bio_vec *bvec, loff_t *ppos) struct iov_iter i; ssize_t bw; - iov_iter_bvec(&i, ITER_BVEC | WRITE, bvec, 1, bvec->bv_len); + iov_iter_bvec(&i, WRITE, bvec, 1, bvec->bv_len); file_start_write(file); bw = vfs_iter_write(file, &i, ppos, 0); @@ -347,7 +346,7 @@ static int lo_read_simple(struct loop_device *lo, struct request *rq, ssize_t len; rq_for_each_segment(bvec, rq, iter) { - iov_iter_bvec(&i, ITER_BVEC, &bvec, 1, bvec.bv_len); + iov_iter_bvec(&i, READ, &bvec, 1, bvec.bv_len); len = vfs_iter_read(lo->lo_backing_file, &i, &pos, 0); if (len < 0) return len; @@ -388,7 +387,7 @@ static int lo_read_transfer(struct loop_device *lo, struct request *rq, b.bv_offset = 0; b.bv_len = bvec.bv_len; - iov_iter_bvec(&i, ITER_BVEC, &b, 1, b.bv_len); + iov_iter_bvec(&i, READ, &b, 1, b.bv_len); len = vfs_iter_read(lo->lo_backing_file, &i, &pos, 0); if (len < 0) { ret = len; @@ -555,8 +554,7 @@ static int lo_rw_aio(struct loop_device *lo, struct loop_cmd *cmd, } atomic_set(&cmd->ref, 2); - iov_iter_bvec(&iter, ITER_BVEC | rw, bvec, - segments, blk_rq_bytes(rq)); + iov_iter_bvec(&iter, rw, bvec, segments, blk_rq_bytes(rq)); iter.iov_offset = offset; cmd->iocb.ki_pos = pos; @@ -1761,8 +1759,8 @@ static blk_status_t loop_queue_rq(struct blk_mq_hw_ctx *hctx, /* always use the first bio's css */ #ifdef CONFIG_BLK_CGROUP - if (cmd->use_aio && rq->bio && rq->bio->bi_blkg) { - cmd->css = &bio_blkcg(rq->bio)->css; + if (cmd->use_aio && rq->bio && rq->bio->bi_css) { + cmd->css = rq->bio->bi_css; css_get(cmd->css); } else #endif diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c index dfc8de6ce525..a7daa8acbab3 100644 --- a/drivers/block/mtip32xx/mtip32xx.c +++ b/drivers/block/mtip32xx/mtip32xx.c @@ -1942,8 +1942,8 @@ static int exec_drive_taskfile(struct driver_data *dd, dev_warn(&dd->pdev->dev, "data movement but " "sect_count is 0\n"); - err = -EINVAL; - goto abort; + err = -EINVAL; + goto abort; } } } diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c index 14a51254c3db..4d4d6129ff66 100644 --- a/drivers/block/nbd.c +++ b/drivers/block/nbd.c @@ -473,7 +473,7 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index) u32 nbd_cmd_flags = 0; int sent = nsock->sent, skip = 0; - iov_iter_kvec(&from, WRITE | ITER_KVEC, &iov, 1, sizeof(request)); + iov_iter_kvec(&from, WRITE, &iov, 1, sizeof(request)); switch (req_op(req)) { case REQ_OP_DISCARD: @@ -564,8 +564,7 @@ send_pages: dev_dbg(nbd_to_dev(nbd), "request %p: sending %d bytes data\n", req, bvec.bv_len); - iov_iter_bvec(&from, ITER_BVEC | WRITE, - &bvec, 1, bvec.bv_len); + iov_iter_bvec(&from, WRITE, &bvec, 1, bvec.bv_len); if (skip) { if (skip >= iov_iter_count(&from)) { skip -= iov_iter_count(&from); @@ -624,7 +623,7 @@ static struct nbd_cmd *nbd_read_stat(struct nbd_device *nbd, int index) int ret = 0; reply.magic = 0; - iov_iter_kvec(&to, READ | ITER_KVEC, &iov, 1, sizeof(reply)); + iov_iter_kvec(&to, READ, &iov, 1, sizeof(reply)); result = sock_xmit(nbd, index, 0, &to, MSG_WAITALL, NULL); if (result <= 0) { if (!nbd_disconnected(config)) @@ -678,8 +677,7 @@ static struct nbd_cmd *nbd_read_stat(struct nbd_device *nbd, int index) struct bio_vec bvec; rq_for_each_segment(bvec, req, iter) { - iov_iter_bvec(&to, ITER_BVEC | READ, - &bvec, 1, bvec.bv_len); + iov_iter_bvec(&to, READ, &bvec, 1, bvec.bv_len); result = sock_xmit(nbd, index, 0, &to, MSG_WAITALL, NULL); if (result <= 0) { dev_err(disk_to_dev(nbd->disk), "Receive data failed (result %d)\n", @@ -1073,7 +1071,7 @@ static void send_disconnects(struct nbd_device *nbd) for (i = 0; i < config->num_connections; i++) { struct nbd_sock *nsock = config->socks[i]; - iov_iter_kvec(&from, WRITE | ITER_KVEC, &iov, 1, sizeof(request)); + iov_iter_kvec(&from, WRITE, &iov, 1, sizeof(request)); mutex_lock(&nsock->tx_lock); ret = sock_xmit(nbd, i, 1, &from, 0, NULL); if (ret <= 0) diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig index a11f4ba98b05..55c77e44bb2d 100644 --- a/drivers/clocksource/Kconfig +++ b/drivers/clocksource/Kconfig @@ -620,4 +620,22 @@ config RISCV_TIMER is accessed via both the SBI and the rdcycle instruction. This is required for all RISC-V systems. +config CSKY_MP_TIMER + bool "SMP Timer for the C-SKY platform" if COMPILE_TEST + depends on CSKY + select TIMER_OF + help + Say yes here to enable C-SKY SMP timer driver used for C-SKY SMP + system. + csky,mptimer is not only used in SMP system, it also could be used + single core system. It's not a mmio reg and it use mtcr/mfcr instruction. + +config GX6605S_TIMER + bool "Gx6605s SOC system timer driver" if COMPILE_TEST + depends on CSKY + select CLKSRC_MMIO + select TIMER_OF + help + This option enables support for gx6605s SOC's timer. + endmenu diff --git a/drivers/clocksource/Makefile b/drivers/clocksource/Makefile index e33b21d3f9d8..dd9138104568 100644 --- a/drivers/clocksource/Makefile +++ b/drivers/clocksource/Makefile @@ -79,3 +79,5 @@ obj-$(CONFIG_CLKSRC_ST_LPC) += clksrc_st_lpc.o obj-$(CONFIG_X86_NUMACHIP) += numachip.o obj-$(CONFIG_ATCPIT100_TIMER) += timer-atcpit100.o obj-$(CONFIG_RISCV_TIMER) += riscv_timer.o +obj-$(CONFIG_CSKY_MP_TIMER) += timer-mp-csky.o +obj-$(CONFIG_GX6605S_TIMER) += timer-gx6605s.o diff --git a/drivers/clocksource/timer-gx6605s.c b/drivers/clocksource/timer-gx6605s.c new file mode 100644 index 000000000000..80d0939d040b --- /dev/null +++ b/drivers/clocksource/timer-gx6605s.c @@ -0,0 +1,154 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#include <linux/init.h> +#include <linux/interrupt.h> +#include <linux/sched_clock.h> + +#include "timer-of.h" + +#define CLKSRC_OFFSET 0x40 + +#define TIMER_STATUS 0x00 +#define TIMER_VALUE 0x04 +#define TIMER_CONTRL 0x10 +#define TIMER_CONFIG 0x20 +#define TIMER_DIV 0x24 +#define TIMER_INI 0x28 + +#define GX6605S_STATUS_CLR BIT(0) +#define GX6605S_CONTRL_RST BIT(0) +#define GX6605S_CONTRL_START BIT(1) +#define GX6605S_CONFIG_EN BIT(0) +#define GX6605S_CONFIG_IRQ_EN BIT(1) + +static irqreturn_t gx6605s_timer_interrupt(int irq, void *dev) +{ + struct clock_event_device *ce = dev; + void __iomem *base = timer_of_base(to_timer_of(ce)); + + writel_relaxed(GX6605S_STATUS_CLR, base + TIMER_STATUS); + + ce->event_handler(ce); + + return IRQ_HANDLED; +} + +static int gx6605s_timer_set_oneshot(struct clock_event_device *ce) +{ + void __iomem *base = timer_of_base(to_timer_of(ce)); + + /* reset and stop counter */ + writel_relaxed(GX6605S_CONTRL_RST, base + TIMER_CONTRL); + + /* enable with irq and start */ + writel_relaxed(GX6605S_CONFIG_EN | GX6605S_CONFIG_IRQ_EN, + base + TIMER_CONFIG); + + return 0; +} + +static int gx6605s_timer_set_next_event(unsigned long delta, + struct clock_event_device *ce) +{ + void __iomem *base = timer_of_base(to_timer_of(ce)); + + /* use reset to pause timer */ + writel_relaxed(GX6605S_CONTRL_RST, base + TIMER_CONTRL); + + /* config next timeout value */ + writel_relaxed(ULONG_MAX - delta, base + TIMER_INI); + writel_relaxed(GX6605S_CONTRL_START, base + TIMER_CONTRL); + + return 0; +} + +static int gx6605s_timer_shutdown(struct clock_event_device *ce) +{ + void __iomem *base = timer_of_base(to_timer_of(ce)); + + writel_relaxed(0, base + TIMER_CONTRL); + writel_relaxed(0, base + TIMER_CONFIG); + + return 0; +} + +static struct timer_of to = { + .flags = TIMER_OF_IRQ | TIMER_OF_BASE | TIMER_OF_CLOCK, + .clkevt = { + .rating = 300, + .features = CLOCK_EVT_FEAT_DYNIRQ | + CLOCK_EVT_FEAT_ONESHOT, + .set_state_shutdown = gx6605s_timer_shutdown, + .set_state_oneshot = gx6605s_timer_set_oneshot, + .set_next_event = gx6605s_timer_set_next_event, + .cpumask = cpu_possible_mask, + }, + .of_irq = { + .handler = gx6605s_timer_interrupt, + .flags = IRQF_TIMER | IRQF_IRQPOLL, + }, +}; + +static u64 notrace gx6605s_sched_clock_read(void) +{ + void __iomem *base; + + base = timer_of_base(&to) + CLKSRC_OFFSET; + + return (u64)readl_relaxed(base + TIMER_VALUE); +} + +static void gx6605s_clkevt_init(void __iomem *base) +{ + writel_relaxed(0, base + TIMER_DIV); + writel_relaxed(0, base + TIMER_CONFIG); + + clockevents_config_and_register(&to.clkevt, timer_of_rate(&to), 2, + ULONG_MAX); +} + +static int gx6605s_clksrc_init(void __iomem *base) +{ + writel_relaxed(0, base + TIMER_DIV); + writel_relaxed(0, base + TIMER_INI); + + writel_relaxed(GX6605S_CONTRL_RST, base + TIMER_CONTRL); + + writel_relaxed(GX6605S_CONFIG_EN, base + TIMER_CONFIG); + + writel_relaxed(GX6605S_CONTRL_START, base + TIMER_CONTRL); + + sched_clock_register(gx6605s_sched_clock_read, 32, timer_of_rate(&to)); + + return clocksource_mmio_init(base + TIMER_VALUE, "gx6605s", + timer_of_rate(&to), 200, 32, clocksource_mmio_readl_up); +} + +static int __init gx6605s_timer_init(struct device_node *np) +{ + int ret; + + /* + * The timer driver is for nationalchip gx6605s SOC and there are two + * same timer in gx6605s. We use one for clkevt and another for clksrc. + * + * The timer is mmio map to access, so we need give mmio address in dts. + * + * It provides a 32bit countup timer and interrupt will be caused by + * count-overflow. + * So we need set-next-event by ULONG_MAX - delta in TIMER_INI reg. + * + * The counter at 0x0 offset is clock event. + * The counter at 0x40 offset is clock source. + * They are the same in hardware, just different used by driver. + */ + ret = timer_of_init(np, &to); + if (ret) + return ret; + + gx6605s_clkevt_init(timer_of_base(&to)); + + return gx6605s_clksrc_init(timer_of_base(&to) + CLKSRC_OFFSET); +} +TIMER_OF_DECLARE(csky_gx6605s_timer, "csky,gx6605s-timer", gx6605s_timer_init); diff --git a/drivers/clocksource/timer-mp-csky.c b/drivers/clocksource/timer-mp-csky.c new file mode 100644 index 000000000000..a8acc431a774 --- /dev/null +++ b/drivers/clocksource/timer-mp-csky.c @@ -0,0 +1,173 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. + +#include <linux/init.h> +#include <linux/interrupt.h> +#include <linux/sched_clock.h> +#include <linux/cpu.h> +#include <linux/of_irq.h> +#include <asm/reg_ops.h> + +#include "timer-of.h" + +#define PTIM_CCVR "cr<3, 14>" +#define PTIM_CTLR "cr<0, 14>" +#define PTIM_LVR "cr<6, 14>" +#define PTIM_TSR "cr<1, 14>" + +static int csky_mptimer_irq; + +static int csky_mptimer_set_next_event(unsigned long delta, + struct clock_event_device *ce) +{ + mtcr(PTIM_LVR, delta); + + return 0; +} + +static int csky_mptimer_shutdown(struct clock_event_device *ce) +{ + mtcr(PTIM_CTLR, 0); + + return 0; +} + +static int csky_mptimer_oneshot(struct clock_event_device *ce) +{ + mtcr(PTIM_CTLR, 1); + + return 0; +} + +static int csky_mptimer_oneshot_stopped(struct clock_event_device *ce) +{ + mtcr(PTIM_CTLR, 0); + + return 0; +} + +static DEFINE_PER_CPU(struct timer_of, csky_to) = { + .flags = TIMER_OF_CLOCK, + .clkevt = { + .rating = 300, + .features = CLOCK_EVT_FEAT_PERCPU | + CLOCK_EVT_FEAT_ONESHOT, + .set_state_shutdown = csky_mptimer_shutdown, + .set_state_oneshot = csky_mptimer_oneshot, + .set_state_oneshot_stopped = csky_mptimer_oneshot_stopped, + .set_next_event = csky_mptimer_set_next_event, + }, +}; + +static irqreturn_t csky_timer_interrupt(int irq, void *dev) +{ + struct timer_of *to = this_cpu_ptr(&csky_to); + + mtcr(PTIM_TSR, 0); + + to->clkevt.event_handler(&to->clkevt); + + return IRQ_HANDLED; +} + +/* + * clock event for percpu + */ +static int csky_mptimer_starting_cpu(unsigned int cpu) +{ + struct timer_of *to = per_cpu_ptr(&csky_to, cpu); + + to->clkevt.cpumask = cpumask_of(cpu); + + clockevents_config_and_register(&to->clkevt, timer_of_rate(to), + 2, ULONG_MAX); + + enable_percpu_irq(csky_mptimer_irq, 0); + + return 0; +} + +static int csky_mptimer_dying_cpu(unsigned int cpu) +{ + disable_percpu_irq(csky_mptimer_irq); + + return 0; +} + +/* + * clock source + */ +static u64 sched_clock_read(void) +{ + return (u64)mfcr(PTIM_CCVR); +} + +static u64 clksrc_read(struct clocksource *c) +{ + return (u64)mfcr(PTIM_CCVR); +} + +struct clocksource csky_clocksource = { + .name = "csky", + .rating = 400, + .mask = CLOCKSOURCE_MASK(32), + .flags = CLOCK_SOURCE_IS_CONTINUOUS, + .read = clksrc_read, +}; + +static int __init csky_mptimer_init(struct device_node *np) +{ + int ret, cpu, cpu_rollback; + struct timer_of *to = NULL; + + /* + * Csky_mptimer is designed for C-SKY SMP multi-processors and + * every core has it's own private irq and regs for clkevt and + * clksrc. + * + * The regs is accessed by cpu instruction: mfcr/mtcr instead of + * mmio map style. So we needn't mmio-address in dts, but we still + * need to give clk and irq number. + * + * We use private irq for the mptimer and irq number is the same + * for every core. So we use request_percpu_irq() in timer_of_init. + */ + csky_mptimer_irq = irq_of_parse_and_map(np, 0); + if (csky_mptimer_irq <= 0) + return -EINVAL; + + ret = request_percpu_irq(csky_mptimer_irq, csky_timer_interrupt, + "csky_mp_timer", &csky_to); + if (ret) + return -EINVAL; + + for_each_possible_cpu(cpu) { + to = per_cpu_ptr(&csky_to, cpu); + ret = timer_of_init(np, to); + if (ret) + goto rollback; + } + + clocksource_register_hz(&csky_clocksource, timer_of_rate(to)); + sched_clock_register(sched_clock_read, 32, timer_of_rate(to)); + + ret = cpuhp_setup_state(CPUHP_AP_CSKY_TIMER_STARTING, + "clockevents/csky/timer:starting", + csky_mptimer_starting_cpu, + csky_mptimer_dying_cpu); + if (ret) + return -EINVAL; + + return 0; + +rollback: + for_each_possible_cpu(cpu_rollback) { + if (cpu_rollback == cpu) + break; + + to = per_cpu_ptr(&csky_to, cpu_rollback); + timer_of_cleanup(to); + } + return -EINVAL; +} +TIMER_OF_DECLARE(csky_mptimer, "csky,mptimer", csky_mptimer_init); diff --git a/drivers/edac/Kconfig b/drivers/edac/Kconfig index df9467eef32a..41c9ccdd20d6 100644 --- a/drivers/edac/Kconfig +++ b/drivers/edac/Kconfig @@ -234,6 +234,7 @@ config EDAC_SKX depends on PCI && X86_64 && X86_MCE_INTEL && PCI_MMCONFIG depends on ACPI_NFIT || !ACPI_NFIT # if ACPI_NFIT=m, EDAC_SKX can't be y select DMI + select ACPI_ADXL if ACPI help Support for error detection and correction the Intel Skylake server Integrated Memory Controllers. If your diff --git a/drivers/edac/skx_edac.c b/drivers/edac/skx_edac.c index dd209e0dd9ab..a99ea61dad32 100644 --- a/drivers/edac/skx_edac.c +++ b/drivers/edac/skx_edac.c @@ -26,6 +26,7 @@ #include <linux/bitmap.h> #include <linux/math64.h> #include <linux/mod_devicetable.h> +#include <linux/adxl.h> #include <acpi/nfit.h> #include <asm/cpu_device_id.h> #include <asm/intel-family.h> @@ -35,6 +36,7 @@ #include "edac_module.h" #define EDAC_MOD_STR "skx_edac" +#define MSG_SIZE 1024 /* * Debug macros @@ -54,6 +56,29 @@ static LIST_HEAD(skx_edac_list); static u64 skx_tolm, skx_tohm; +static char *skx_msg; +static unsigned int nvdimm_count; + +enum { + INDEX_SOCKET, + INDEX_MEMCTRL, + INDEX_CHANNEL, + INDEX_DIMM, + INDEX_MAX +}; + +static const char * const component_names[] = { + [INDEX_SOCKET] = "ProcessorSocketId", + [INDEX_MEMCTRL] = "MemoryControllerId", + [INDEX_CHANNEL] = "ChannelId", + [INDEX_DIMM] = "DimmSlotId", +}; + +static int component_indices[ARRAY_SIZE(component_names)]; +static int adxl_component_count; +static const char * const *adxl_component_names; +static u64 *adxl_values; +static char *adxl_msg; #define NUM_IMC 2 /* memory controllers per socket */ #define NUM_CHANNELS 3 /* channels per memory controller */ @@ -393,6 +418,8 @@ static int get_nvdimm_info(struct dimm_info *dimm, struct skx_imc *imc, u16 flags; u64 size = 0; + nvdimm_count++; + dev_handle = ACPI_NFIT_BUILD_DEVICE_HANDLE(dimmno, chan, imc->lmc, imc->src_id, 0); @@ -941,12 +968,46 @@ static void teardown_skx_debug(void) } #endif /*CONFIG_EDAC_DEBUG*/ +static bool skx_adxl_decode(struct decoded_addr *res) + +{ + int i, len = 0; + + if (res->addr >= skx_tohm || (res->addr >= skx_tolm && + res->addr < BIT_ULL(32))) { + edac_dbg(0, "Address 0x%llx out of range\n", res->addr); + return false; + } + + if (adxl_decode(res->addr, adxl_values)) { + edac_dbg(0, "Failed to decode 0x%llx\n", res->addr); + return false; + } + + res->socket = (int)adxl_values[component_indices[INDEX_SOCKET]]; + res->imc = (int)adxl_values[component_indices[INDEX_MEMCTRL]]; + res->channel = (int)adxl_values[component_indices[INDEX_CHANNEL]]; + res->dimm = (int)adxl_values[component_indices[INDEX_DIMM]]; + + for (i = 0; i < adxl_component_count; i++) { + if (adxl_values[i] == ~0x0ull) + continue; + + len += snprintf(adxl_msg + len, MSG_SIZE - len, " %s:0x%llx", + adxl_component_names[i], adxl_values[i]); + if (MSG_SIZE - len <= 0) + break; + } + + return true; +} + static void skx_mce_output_error(struct mem_ctl_info *mci, const struct mce *m, struct decoded_addr *res) { enum hw_event_mc_err_type tp_event; - char *type, *optype, msg[256]; + char *type, *optype; bool ripv = GET_BITFIELD(m->mcgstatus, 0, 0); bool overflow = GET_BITFIELD(m->status, 62, 62); bool uncorrected_error = GET_BITFIELD(m->status, 61, 61); @@ -1007,22 +1068,47 @@ static void skx_mce_output_error(struct mem_ctl_info *mci, break; } } + if (adxl_component_count) { + snprintf(skx_msg, MSG_SIZE, "%s%s err_code:%04x:%04x %s", + overflow ? " OVERFLOW" : "", + (uncorrected_error && recoverable) ? " recoverable" : "", + mscod, errcode, adxl_msg); + } else { + snprintf(skx_msg, MSG_SIZE, + "%s%s err_code:%04x:%04x socket:%d imc:%d rank:%d bg:%d ba:%d row:%x col:%x", + overflow ? " OVERFLOW" : "", + (uncorrected_error && recoverable) ? " recoverable" : "", + mscod, errcode, + res->socket, res->imc, res->rank, + res->bank_group, res->bank_address, res->row, res->column); + } - snprintf(msg, sizeof(msg), - "%s%s err_code:%04x:%04x socket:%d imc:%d rank:%d bg:%d ba:%d row:%x col:%x", - overflow ? " OVERFLOW" : "", - (uncorrected_error && recoverable) ? " recoverable" : "", - mscod, errcode, - res->socket, res->imc, res->rank, - res->bank_group, res->bank_address, res->row, res->column); - - edac_dbg(0, "%s\n", msg); + edac_dbg(0, "%s\n", skx_msg); /* Call the helper to output message */ edac_mc_handle_error(tp_event, mci, core_err_cnt, m->addr >> PAGE_SHIFT, m->addr & ~PAGE_MASK, 0, res->channel, res->dimm, -1, - optype, msg); + optype, skx_msg); +} + +static struct mem_ctl_info *get_mci(int src_id, int lmc) +{ + struct skx_dev *d; + + if (lmc > NUM_IMC - 1) { + skx_printk(KERN_ERR, "Bad lmc %d\n", lmc); + return NULL; + } + + list_for_each_entry(d, &skx_edac_list, list) { + if (d->imc[0].src_id == src_id) + return d->imc[lmc].mci; + } + + skx_printk(KERN_ERR, "No mci for src_id %d lmc %d\n", src_id, lmc); + + return NULL; } static int skx_mce_check_error(struct notifier_block *nb, unsigned long val, @@ -1040,10 +1126,23 @@ static int skx_mce_check_error(struct notifier_block *nb, unsigned long val, if ((mce->status & 0xefff) >> 7 != 1 || !(mce->status & MCI_STATUS_ADDRV)) return NOTIFY_DONE; + memset(&res, 0, sizeof(res)); res.addr = mce->addr; - if (!skx_decode(&res)) + + if (adxl_component_count) { + if (!skx_adxl_decode(&res)) + return NOTIFY_DONE; + + mci = get_mci(res.socket, res.imc); + } else { + if (!skx_decode(&res)) + return NOTIFY_DONE; + + mci = res.dev->imc[res.imc].mci; + } + + if (!mci) return NOTIFY_DONE; - mci = res.dev->imc[res.imc].mci; if (mce->mcgstatus & MCG_STATUS_MCIP) type = "Exception"; @@ -1094,6 +1193,62 @@ static void skx_remove(void) } } +static void __init skx_adxl_get(void) +{ + const char * const *names; + int i, j; + + names = adxl_get_component_names(); + if (!names) { + skx_printk(KERN_NOTICE, "No firmware support for address translation."); + skx_printk(KERN_CONT, " Only decoding DDR4 address!\n"); + return; + } + + for (i = 0; i < INDEX_MAX; i++) { + for (j = 0; names[j]; j++) { + if (!strcmp(component_names[i], names[j])) { + component_indices[i] = j; + break; + } + } + + if (!names[j]) + goto err; + } + + adxl_component_names = names; + while (*names++) + adxl_component_count++; + + adxl_values = kcalloc(adxl_component_count, sizeof(*adxl_values), + GFP_KERNEL); + if (!adxl_values) { + adxl_component_count = 0; + return; + } + + adxl_msg = kzalloc(MSG_SIZE, GFP_KERNEL); + if (!adxl_msg) { + adxl_component_count = 0; + kfree(adxl_values); + } + + return; +err: + skx_printk(KERN_ERR, "'%s' is not matched from DSM parameters: ", + component_names[i]); + for (j = 0; names[j]; j++) + skx_printk(KERN_CONT, "%s ", names[j]); + skx_printk(KERN_CONT, "\n"); +} + +static void __exit skx_adxl_put(void) +{ + kfree(adxl_values); + kfree(adxl_msg); +} + /* * skx_init: * make sure we are running on the correct cpu model @@ -1158,6 +1313,15 @@ static int __init skx_init(void) } } + skx_msg = kzalloc(MSG_SIZE, GFP_KERNEL); + if (!skx_msg) { + rc = -ENOMEM; + goto fail; + } + + if (nvdimm_count) + skx_adxl_get(); + /* Ensure that the OPSTATE is set correctly for POLL or NMI */ opstate_init(); @@ -1176,6 +1340,9 @@ static void __exit skx_exit(void) edac_dbg(2, "\n"); mce_unregister_decode_chain(&skx_mce_dec); skx_remove(); + if (nvdimm_count) + skx_adxl_put(); + kfree(skx_msg); teardown_skx_debug(); } diff --git a/drivers/firmware/efi/efivars.c b/drivers/firmware/efi/efivars.c index 3e626fd9bd4e..8061667a6765 100644 --- a/drivers/firmware/efi/efivars.c +++ b/drivers/firmware/efi/efivars.c @@ -229,14 +229,6 @@ sanity_check(struct efi_variable *var, efi_char16_t *name, efi_guid_t vendor, return 0; } -static inline bool is_compat(void) -{ - if (IS_ENABLED(CONFIG_COMPAT) && in_compat_syscall()) - return true; - - return false; -} - static void copy_out_compat(struct efi_variable *dst, struct compat_efi_variable *src) { @@ -263,7 +255,7 @@ efivar_store_raw(struct efivar_entry *entry, const char *buf, size_t count) u8 *data; int err; - if (is_compat()) { + if (in_compat_syscall()) { struct compat_efi_variable *compat; if (count != sizeof(*compat)) @@ -324,7 +316,7 @@ efivar_show_raw(struct efivar_entry *entry, char *buf) &entry->var.DataSize, entry->var.Data)) return -EIO; - if (is_compat()) { + if (in_compat_syscall()) { compat = (struct compat_efi_variable *)buf; size = sizeof(*compat); @@ -418,7 +410,7 @@ static ssize_t efivar_create(struct file *filp, struct kobject *kobj, struct compat_efi_variable *compat = (struct compat_efi_variable *)buf; struct efi_variable *new_var = (struct efi_variable *)buf; struct efivar_entry *new_entry; - bool need_compat = is_compat(); + bool need_compat = in_compat_syscall(); efi_char16_t *name; unsigned long size; u32 attributes; @@ -495,7 +487,7 @@ static ssize_t efivar_delete(struct file *filp, struct kobject *kobj, if (!capable(CAP_SYS_ADMIN)) return -EACCES; - if (is_compat()) { + if (in_compat_syscall()) { if (count != sizeof(*compat)) return -EINVAL; diff --git a/drivers/fsi/fsi-sbefifo.c b/drivers/fsi/fsi-sbefifo.c index ae861342626e..d92f5b87c251 100644 --- a/drivers/fsi/fsi-sbefifo.c +++ b/drivers/fsi/fsi-sbefifo.c @@ -638,7 +638,7 @@ static void sbefifo_collect_async_ffdc(struct sbefifo *sbefifo) } ffdc_iov.iov_base = ffdc; ffdc_iov.iov_len = SBEFIFO_MAX_FFDC_SIZE; - iov_iter_kvec(&ffdc_iter, WRITE | ITER_KVEC, &ffdc_iov, 1, SBEFIFO_MAX_FFDC_SIZE); + iov_iter_kvec(&ffdc_iter, WRITE, &ffdc_iov, 1, SBEFIFO_MAX_FFDC_SIZE); cmd[0] = cpu_to_be32(2); cmd[1] = cpu_to_be32(SBEFIFO_CMD_GET_SBE_FFDC); rc = sbefifo_do_command(sbefifo, cmd, 2, &ffdc_iter); @@ -735,7 +735,7 @@ int sbefifo_submit(struct device *dev, const __be32 *command, size_t cmd_len, rbytes = (*resp_len) * sizeof(__be32); resp_iov.iov_base = response; resp_iov.iov_len = rbytes; - iov_iter_kvec(&resp_iter, WRITE | ITER_KVEC, &resp_iov, 1, rbytes); + iov_iter_kvec(&resp_iter, WRITE, &resp_iov, 1, rbytes); /* Perform the command */ mutex_lock(&sbefifo->lock); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c index 297a5490ad8c..0a4fba196b84 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c @@ -135,7 +135,8 @@ static int acp_poweroff(struct generic_pm_domain *genpd) * 2. power off the acp tiles * 3. check and enter ulv state */ - if (adev->powerplay.pp_funcs->set_powergating_by_smu) + if (adev->powerplay.pp_funcs && + adev->powerplay.pp_funcs->set_powergating_by_smu) amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, true); } return 0; @@ -517,7 +518,8 @@ static int acp_set_powergating_state(void *handle, struct amdgpu_device *adev = (struct amdgpu_device *)handle; bool enable = state == AMD_PG_STATE_GATE ? true : false; - if (adev->powerplay.pp_funcs->set_powergating_by_smu) + if (adev->powerplay.pp_funcs && + adev->powerplay.pp_funcs->set_powergating_by_smu) amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, enable); return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 1e4dd09a5072..30bc345d6fdf 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -1493,8 +1493,6 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev) } adev->powerplay.pp_feature = amdgpu_pp_feature_mask; - if (amdgpu_sriov_vf(adev)) - adev->powerplay.pp_feature &= ~PP_GFXOFF_MASK; for (i = 0; i < adev->num_ip_blocks; i++) { if ((amdgpu_ip_block_mask & (1 << i)) == 0) { @@ -1600,7 +1598,7 @@ static int amdgpu_device_fw_loading(struct amdgpu_device *adev) } } - if (adev->powerplay.pp_funcs->load_firmware) { + if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->load_firmware) { r = adev->powerplay.pp_funcs->load_firmware(adev->powerplay.pp_handle); if (r) { pr_err("firmware loading failed\n"); @@ -3341,7 +3339,7 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev, kthread_park(ring->sched.thread); - if (job && job->base.sched == &ring->sched) + if (job && job->base.sched != &ring->sched) continue; drm_sched_hw_job_reset(&ring->sched, job ? &job->base : NULL); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index 28781414d71c..943dbf3c5da1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -114,8 +114,8 @@ uint amdgpu_pg_mask = 0xffffffff; uint amdgpu_sdma_phase_quantum = 32; char *amdgpu_disable_cu = NULL; char *amdgpu_virtual_display = NULL; -/* OverDrive(bit 14) disabled by default*/ -uint amdgpu_pp_feature_mask = 0xffffbfff; +/* OverDrive(bit 14),gfxoff(bit 15),stutter mode(bit 17) disabled by default*/ +uint amdgpu_pp_feature_mask = 0xfffd3fff; int amdgpu_ngg = 0; int amdgpu_prim_buf_per_se = 0; int amdgpu_pos_buf_per_se = 0; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c index 790fd5408ddf..1a656b8657f7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c @@ -392,7 +392,7 @@ void amdgpu_gfx_off_ctrl(struct amdgpu_device *adev, bool enable) if (!(adev->powerplay.pp_feature & PP_GFXOFF_MASK)) return; - if (!adev->powerplay.pp_funcs->set_powergating_by_smu) + if (!adev->powerplay.pp_funcs || !adev->powerplay.pp_funcs->set_powergating_by_smu) return; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c index 94055a485e01..59cc678de8c1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c @@ -704,7 +704,10 @@ static ssize_t amdgpu_set_pp_dpm_sclk(struct device *dev, return ret; if (adev->powerplay.pp_funcs->force_clock_level) - amdgpu_dpm_force_clock_level(adev, PP_SCLK, mask); + ret = amdgpu_dpm_force_clock_level(adev, PP_SCLK, mask); + + if (ret) + return -EINVAL; return count; } @@ -737,7 +740,10 @@ static ssize_t amdgpu_set_pp_dpm_mclk(struct device *dev, return ret; if (adev->powerplay.pp_funcs->force_clock_level) - amdgpu_dpm_force_clock_level(adev, PP_MCLK, mask); + ret = amdgpu_dpm_force_clock_level(adev, PP_MCLK, mask); + + if (ret) + return -EINVAL; return count; } @@ -770,7 +776,10 @@ static ssize_t amdgpu_set_pp_dpm_pcie(struct device *dev, return ret; if (adev->powerplay.pp_funcs->force_clock_level) - amdgpu_dpm_force_clock_level(adev, PP_PCIE, mask); + ret = amdgpu_dpm_force_clock_level(adev, PP_PCIE, mask); + + if (ret) + return -EINVAL; return count; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 6904d794d60a..352b30409060 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -542,7 +542,8 @@ static void amdgpu_vm_pt_next_leaf(struct amdgpu_device *adev, struct amdgpu_vm_pt_cursor *cursor) { amdgpu_vm_pt_next(adev, cursor); - while (amdgpu_vm_pt_descendant(adev, cursor)); + if (cursor->pfn != ~0ll) + while (amdgpu_vm_pt_descendant(adev, cursor)); } /** @@ -3234,8 +3235,10 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm) } rbtree_postorder_for_each_entry_safe(mapping, tmp, &vm->va.rb_root, rb) { + /* Don't remove the mapping here, we don't want to trigger a + * rebalance and the tree is about to be destroyed anyway. + */ list_del(&mapping->list); - amdgpu_vm_it_remove(mapping, &vm->va); kfree(mapping); } list_for_each_entry_safe(mapping, tmp, &vm->freed, list) { diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index 3d0f277a6523..617b0c8908a3 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c @@ -4815,8 +4815,10 @@ static int gfx_v8_0_kcq_resume(struct amdgpu_device *adev) if (r) goto done; - /* Test KCQs */ - for (i = 0; i < adev->gfx.num_compute_rings; i++) { + /* Test KCQs - reversing the order of rings seems to fix ring test failure + * after GPU reset + */ + for (i = adev->gfx.num_compute_rings - 1; i >= 0; i--) { ring = &adev->gfx.compute_ring[i]; ring->ready = true; r = amdgpu_ring_test_ring(ring); diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c index 14649f8475f3..fd23ba1226a5 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c @@ -280,7 +280,7 @@ void mmhub_v1_0_update_power_gating(struct amdgpu_device *adev, return; if (enable && adev->pg_flags & AMD_PG_SUPPORT_MMHUB) { - if (adev->powerplay.pp_funcs->set_powergating_by_smu) + if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->set_powergating_by_smu) amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GMC, true); } diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c index 04fa3d972636..7a8c9172d30a 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c @@ -1366,7 +1366,8 @@ static int sdma_v4_0_hw_init(void *handle) int r; struct amdgpu_device *adev = (struct amdgpu_device *)handle; - if (adev->asic_type == CHIP_RAVEN && adev->powerplay.pp_funcs->set_powergating_by_smu) + if (adev->asic_type == CHIP_RAVEN && adev->powerplay.pp_funcs && + adev->powerplay.pp_funcs->set_powergating_by_smu) amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_SDMA, false); sdma_v4_0_init_golden_registers(adev); @@ -1386,7 +1387,8 @@ static int sdma_v4_0_hw_fini(void *handle) sdma_v4_0_ctx_switch_enable(adev, false); sdma_v4_0_enable(adev, false); - if (adev->asic_type == CHIP_RAVEN && adev->powerplay.pp_funcs->set_powergating_by_smu) + if (adev->asic_type == CHIP_RAVEN && adev->powerplay.pp_funcs + && adev->powerplay.pp_funcs->set_powergating_by_smu) amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_SDMA, true); return 0; diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index e224f23e2215..b0df6dc9a775 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -1524,6 +1524,13 @@ static int amdgpu_dm_backlight_update_status(struct backlight_device *bd) { struct amdgpu_display_manager *dm = bl_get_data(bd); + /* + * PWM interperts 0 as 100% rather than 0% because of HW + * limitation for level 0.So limiting minimum brightness level + * to 1. + */ + if (bd->props.brightness < 1) + return 1; if (dc_link_set_backlight_level(dm->backlight_link, bd->props.brightness, 0, 0)) return 0; diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c index 0fab64a2a915..12001a006b2d 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c @@ -101,7 +101,7 @@ bool dm_pp_apply_display_requirements( adev->pm.pm_display_cfg.displays[i].controller_id = dc_cfg->pipe_idx + 1; } - if (adev->powerplay.pp_funcs->display_configuration_change) + if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->display_configuration_change) adev->powerplay.pp_funcs->display_configuration_change( adev->powerplay.pp_handle, &adev->pm.pm_display_cfg); @@ -304,7 +304,7 @@ bool dm_pp_get_clock_levels_by_type( struct amd_pp_simple_clock_info validation_clks = { 0 }; uint32_t i; - if (adev->powerplay.pp_funcs->get_clock_by_type) { + if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->get_clock_by_type) { if (adev->powerplay.pp_funcs->get_clock_by_type(pp_handle, dc_to_pp_clock_type(clk_type), &pp_clks)) { /* Error in pplib. Provide default values. */ @@ -315,7 +315,7 @@ bool dm_pp_get_clock_levels_by_type( pp_to_dc_clock_levels(&pp_clks, dc_clks, clk_type); - if (adev->powerplay.pp_funcs->get_display_mode_validation_clocks) { + if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->get_display_mode_validation_clocks) { if (adev->powerplay.pp_funcs->get_display_mode_validation_clocks( pp_handle, &validation_clks)) { /* Error in pplib. Provide default values. */ @@ -398,6 +398,9 @@ bool dm_pp_get_clock_levels_by_type_with_voltage( struct pp_clock_levels_with_voltage pp_clk_info = {0}; const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; + if (!pp_funcs || !pp_funcs->get_clock_by_type_with_voltage) + return false; + if (pp_funcs->get_clock_by_type_with_voltage(pp_handle, dc_to_pp_clock_type(clk_type), &pp_clk_info)) @@ -438,7 +441,7 @@ bool dm_pp_apply_clock_for_voltage_request( if (!pp_clock_request.clock_type) return false; - if (adev->powerplay.pp_funcs->display_clock_voltage_request) + if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->display_clock_voltage_request) ret = adev->powerplay.pp_funcs->display_clock_voltage_request( adev->powerplay.pp_handle, &pp_clock_request); @@ -455,7 +458,7 @@ bool dm_pp_get_static_clocks( struct amd_pp_clock_info pp_clk_info = {0}; int ret = 0; - if (adev->powerplay.pp_funcs->get_current_clocks) + if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->get_current_clocks) ret = adev->powerplay.pp_funcs->get_current_clocks( adev->powerplay.pp_handle, &pp_clk_info); @@ -505,6 +508,9 @@ void pp_rv_set_wm_ranges(struct pp_smu *pp, wm_with_clock_ranges.num_wm_dmif_sets = ranges->num_reader_wm_sets; wm_with_clock_ranges.num_wm_mcif_sets = ranges->num_writer_wm_sets; + if (!pp_funcs || !pp_funcs->set_watermarks_for_clocks_ranges) + return; + for (i = 0; i < wm_with_clock_ranges.num_wm_dmif_sets; i++) { if (ranges->reader_wm_sets[i].wm_inst > 3) wm_dce_clocks[i].wm_set_id = WM_SET_A; diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c index de190935f0a4..e3624ca24574 100644 --- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c @@ -568,7 +568,7 @@ static struct input_pixel_processor *dce110_ipp_create( static const struct encoder_feature_support link_enc_feature = { .max_hdmi_deep_color = COLOR_DEPTH_121212, - .max_hdmi_pixel_clock = 594000, + .max_hdmi_pixel_clock = 300000, .flags.bits.IS_HBR2_CAPABLE = true, .flags.bits.IS_TPS3_CAPABLE = true }; diff --git a/drivers/gpu/drm/amd/display/dc/os_types.h b/drivers/gpu/drm/amd/display/dc/os_types.h index a407892905af..c0d9f332baed 100644 --- a/drivers/gpu/drm/amd/display/dc/os_types.h +++ b/drivers/gpu/drm/amd/display/dc/os_types.h @@ -40,8 +40,6 @@ #define LITTLEENDIAN_CPU #endif -#undef READ -#undef WRITE #undef FRAME_SIZE #define dm_output_to_console(fmt, ...) DRM_DEBUG_KMS(fmt, ##__VA_ARGS__) diff --git a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c index e8964cae6b93..d6aa1d414320 100644 --- a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c +++ b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c @@ -723,11 +723,14 @@ static int pp_dpm_force_clock_level(void *handle, pr_info("%s was not implemented.\n", __func__); return 0; } + + if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) { + pr_info("force clock level is for dpm manual mode only.\n"); + return -EINVAL; + } + mutex_lock(&hwmgr->smu_lock); - if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) - ret = hwmgr->hwmgr_func->force_clock_level(hwmgr, type, mask); - else - ret = -EINVAL; + ret = hwmgr->hwmgr_func->force_clock_level(hwmgr, type, mask); mutex_unlock(&hwmgr->smu_lock); return ret; } @@ -963,6 +966,7 @@ static int pp_dpm_switch_power_profile(void *handle, static int pp_set_power_limit(void *handle, uint32_t limit) { struct pp_hwmgr *hwmgr = handle; + uint32_t max_power_limit; if (!hwmgr || !hwmgr->pm_en) return -EINVAL; @@ -975,7 +979,13 @@ static int pp_set_power_limit(void *handle, uint32_t limit) if (limit == 0) limit = hwmgr->default_power_limit; - if (limit > hwmgr->default_power_limit) + max_power_limit = hwmgr->default_power_limit; + if (hwmgr->od_enabled) { + max_power_limit *= (100 + hwmgr->platform_descriptor.TDPODLimit); + max_power_limit /= 100; + } + + if (limit > max_power_limit) return -EINVAL; mutex_lock(&hwmgr->smu_lock); @@ -994,8 +1004,13 @@ static int pp_get_power_limit(void *handle, uint32_t *limit, bool default_limit) mutex_lock(&hwmgr->smu_lock); - if (default_limit) + if (default_limit) { *limit = hwmgr->default_power_limit; + if (hwmgr->od_enabled) { + *limit *= (100 + hwmgr->platform_descriptor.TDPODLimit); + *limit /= 100; + } + } else *limit = hwmgr->power_limit; @@ -1303,12 +1318,12 @@ static int pp_enable_mgpu_fan_boost(void *handle) { struct pp_hwmgr *hwmgr = handle; - if (!hwmgr || !hwmgr->pm_en) + if (!hwmgr) return -EINVAL; - if (hwmgr->hwmgr_func->enable_mgpu_fan_boost == NULL) { + if (!hwmgr->pm_en || + hwmgr->hwmgr_func->enable_mgpu_fan_boost == NULL) return 0; - } mutex_lock(&hwmgr->smu_lock); hwmgr->hwmgr_func->enable_mgpu_fan_boost(hwmgr); diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c index 6c99cbf51c08..ed35ec0341e6 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c @@ -3588,9 +3588,10 @@ static int smu7_find_dpm_states_clocks_in_dpm_table(struct pp_hwmgr *hwmgr, cons break; } - if (i >= sclk_table->count) + if (i >= sclk_table->count) { data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK; - else { + sclk_table->dpm_levels[i-1].value = sclk; + } else { /* TODO: Check SCLK in DAL's minimum clocks * in case DeepSleep divider update is required. */ @@ -3605,9 +3606,10 @@ static int smu7_find_dpm_states_clocks_in_dpm_table(struct pp_hwmgr *hwmgr, cons break; } - if (i >= mclk_table->count) + if (i >= mclk_table->count) { data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_MCLK; - + mclk_table->dpm_levels[i-1].value = mclk; + } if (data->display_timing.num_existing_displays != hwmgr->display_config->num_display) data->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_MCLK; diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c index 4714b5b59825..99a33c33a32c 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c @@ -718,7 +718,7 @@ int smu_set_watermarks_for_clocks_ranges(void *wt_table, table->WatermarkRow[1][i].MaxClock = cpu_to_le16((uint16_t) (wm_with_clock_ranges->wm_dmif_clocks_ranges[i].wm_max_dcfclk_clk_in_khz) / - 100); + 1000); table->WatermarkRow[1][i].MinUclk = cpu_to_le16((uint16_t) (wm_with_clock_ranges->wm_dmif_clocks_ranges[i].wm_min_mem_clk_in_khz) / diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c index 419a1d77d661..8c4db86bb4b7 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c @@ -1333,7 +1333,6 @@ static int vega10_setup_default_dpm_tables(struct pp_hwmgr *hwmgr) if (hwmgr->platform_descriptor.overdriveLimit.memoryClock == 0) hwmgr->platform_descriptor.overdriveLimit.memoryClock = dpm_table->dpm_levels[dpm_table->count-1].value; - vega10_init_dpm_state(&(dpm_table->dpm_state)); data->dpm_table.eclk_table.count = 0; @@ -3249,6 +3248,37 @@ static int vega10_apply_state_adjust_rules(struct pp_hwmgr *hwmgr, static int vega10_find_dpm_states_clocks_in_dpm_table(struct pp_hwmgr *hwmgr, const void *input) { struct vega10_hwmgr *data = hwmgr->backend; + const struct phm_set_power_state_input *states = + (const struct phm_set_power_state_input *)input; + const struct vega10_power_state *vega10_ps = + cast_const_phw_vega10_power_state(states->pnew_state); + struct vega10_single_dpm_table *sclk_table = &(data->dpm_table.gfx_table); + uint32_t sclk = vega10_ps->performance_levels + [vega10_ps->performance_level_count - 1].gfx_clock; + struct vega10_single_dpm_table *mclk_table = &(data->dpm_table.mem_table); + uint32_t mclk = vega10_ps->performance_levels + [vega10_ps->performance_level_count - 1].mem_clock; + uint32_t i; + + for (i = 0; i < sclk_table->count; i++) { + if (sclk == sclk_table->dpm_levels[i].value) + break; + } + + if (i >= sclk_table->count) { + data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_SCLK; + sclk_table->dpm_levels[i-1].value = sclk; + } + + for (i = 0; i < mclk_table->count; i++) { + if (mclk == mclk_table->dpm_levels[i].value) + break; + } + + if (i >= mclk_table->count) { + data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_MCLK; + mclk_table->dpm_levels[i-1].value = mclk; + } if (data->display_timing.num_existing_displays != hwmgr->display_config->num_display) data->need_update_dpm_table |= DPMTABLE_UPDATE_MCLK; @@ -4529,11 +4559,13 @@ static int vega10_set_sclk_od(struct pp_hwmgr *hwmgr, uint32_t value) if (vega10_ps->performance_levels [vega10_ps->performance_level_count - 1].gfx_clock > - hwmgr->platform_descriptor.overdriveLimit.engineClock) + hwmgr->platform_descriptor.overdriveLimit.engineClock) { vega10_ps->performance_levels [vega10_ps->performance_level_count - 1].gfx_clock = hwmgr->platform_descriptor.overdriveLimit.engineClock; - + pr_warn("max sclk supported by vbios is %d\n", + hwmgr->platform_descriptor.overdriveLimit.engineClock); + } return 0; } @@ -4581,10 +4613,13 @@ static int vega10_set_mclk_od(struct pp_hwmgr *hwmgr, uint32_t value) if (vega10_ps->performance_levels [vega10_ps->performance_level_count - 1].mem_clock > - hwmgr->platform_descriptor.overdriveLimit.memoryClock) + hwmgr->platform_descriptor.overdriveLimit.memoryClock) { vega10_ps->performance_levels [vega10_ps->performance_level_count - 1].mem_clock = hwmgr->platform_descriptor.overdriveLimit.memoryClock; + pr_warn("max mclk supported by vbios is %d\n", + hwmgr->platform_descriptor.overdriveLimit.memoryClock); + } return 0; } diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c index 9600e2f226e9..74bc37308dc0 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c @@ -2356,6 +2356,13 @@ static int vega12_gfx_off_control(struct pp_hwmgr *hwmgr, bool enable) return vega12_disable_gfx_off(hwmgr); } +static int vega12_get_performance_level(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *state, + PHM_PerformanceLevelDesignation designation, uint32_t index, + PHM_PerformanceLevel *level) +{ + return 0; +} + static const struct pp_hwmgr_func vega12_hwmgr_funcs = { .backend_init = vega12_hwmgr_backend_init, .backend_fini = vega12_hwmgr_backend_fini, @@ -2406,6 +2413,7 @@ static const struct pp_hwmgr_func vega12_hwmgr_funcs = { .register_irq_handlers = smu9_register_irq_handlers, .start_thermal_controller = vega12_start_thermal_controller, .powergate_gfx = vega12_gfx_off_control, + .get_performance_level = vega12_get_performance_level, }; int vega12_hwmgr_init(struct pp_hwmgr *hwmgr) diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c index b4dbbb7c334c..57143d51e3ee 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c @@ -1875,38 +1875,20 @@ static int vega20_get_gpu_power(struct pp_hwmgr *hwmgr, return ret; } -static int vega20_get_current_gfx_clk_freq(struct pp_hwmgr *hwmgr, uint32_t *gfx_freq) +static int vega20_get_current_clk_freq(struct pp_hwmgr *hwmgr, + PPCLK_e clk_id, uint32_t *clk_freq) { - uint32_t gfx_clk = 0; int ret = 0; - *gfx_freq = 0; + *clk_freq = 0; PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr, - PPSMC_MSG_GetDpmClockFreq, (PPCLK_GFXCLK << 16))) == 0, - "[GetCurrentGfxClkFreq] Attempt to get Current GFXCLK Frequency Failed!", + PPSMC_MSG_GetDpmClockFreq, (clk_id << 16))) == 0, + "[GetCurrentClkFreq] Attempt to get Current Frequency Failed!", return ret); - gfx_clk = smum_get_argument(hwmgr); + *clk_freq = smum_get_argument(hwmgr); - *gfx_freq = gfx_clk * 100; - - return 0; -} - -static int vega20_get_current_mclk_freq(struct pp_hwmgr *hwmgr, uint32_t *mclk_freq) -{ - uint32_t mem_clk = 0; - int ret = 0; - - *mclk_freq = 0; - - PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr, - PPSMC_MSG_GetDpmClockFreq, (PPCLK_UCLK << 16))) == 0, - "[GetCurrentMClkFreq] Attempt to get Current MCLK Frequency Failed!", - return ret); - mem_clk = smum_get_argument(hwmgr); - - *mclk_freq = mem_clk * 100; + *clk_freq = *clk_freq * 100; return 0; } @@ -1937,12 +1919,16 @@ static int vega20_read_sensor(struct pp_hwmgr *hwmgr, int idx, switch (idx) { case AMDGPU_PP_SENSOR_GFX_SCLK: - ret = vega20_get_current_gfx_clk_freq(hwmgr, (uint32_t *)value); + ret = vega20_get_current_clk_freq(hwmgr, + PPCLK_GFXCLK, + (uint32_t *)value); if (!ret) *size = 4; break; case AMDGPU_PP_SENSOR_GFX_MCLK: - ret = vega20_get_current_mclk_freq(hwmgr, (uint32_t *)value); + ret = vega20_get_current_clk_freq(hwmgr, + PPCLK_UCLK, + (uint32_t *)value); if (!ret) *size = 4; break; @@ -2012,7 +1998,6 @@ int vega20_display_clock_voltage_request(struct pp_hwmgr *hwmgr, if (data->smu_features[GNLD_DPM_DCEFCLK].enabled) { switch (clk_type) { case amd_pp_dcef_clock: - clk_freq = clock_req->clock_freq_in_khz / 100; clk_select = PPCLK_DCEFCLK; break; case amd_pp_disp_clock: @@ -2041,11 +2026,20 @@ int vega20_display_clock_voltage_request(struct pp_hwmgr *hwmgr, return result; } +static int vega20_get_performance_level(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *state, + PHM_PerformanceLevelDesignation designation, uint32_t index, + PHM_PerformanceLevel *level) +{ + return 0; +} + static int vega20_notify_smc_display_config_after_ps_adjustment( struct pp_hwmgr *hwmgr) { struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend); + struct vega20_single_dpm_table *dpm_table = + &data->dpm_table.mem_table; struct PP_Clocks min_clocks = {0}; struct pp_display_clock_request clock_req; int ret = 0; @@ -2063,7 +2057,7 @@ static int vega20_notify_smc_display_config_after_ps_adjustment( if (data->smu_features[GNLD_DPM_DCEFCLK].supported) { clock_req.clock_type = amd_pp_dcef_clock; - clock_req.clock_freq_in_khz = min_clocks.dcefClock; + clock_req.clock_freq_in_khz = min_clocks.dcefClock * 10; if (!vega20_display_clock_voltage_request(hwmgr, &clock_req)) { if (data->smu_features[GNLD_DS_DCEFCLK].supported) PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter( @@ -2076,6 +2070,15 @@ static int vega20_notify_smc_display_config_after_ps_adjustment( } } + if (data->smu_features[GNLD_DPM_UCLK].enabled) { + dpm_table->dpm_state.hard_min_level = min_clocks.memoryClock / 100; + PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(hwmgr, + PPSMC_MSG_SetHardMinByFreq, + (PPCLK_UCLK << 16 ) | dpm_table->dpm_state.hard_min_level)), + "[SetHardMinFreq] Set hard min uclk failed!", + return ret); + } + return 0; } @@ -2353,7 +2356,7 @@ static int vega20_get_sclks(struct pp_hwmgr *hwmgr, for (i = 0; i < count; i++) { clocks->data[i].clocks_in_khz = - dpm_table->dpm_levels[i].value * 100; + dpm_table->dpm_levels[i].value * 1000; clocks->data[i].latency_in_us = 0; } @@ -2383,7 +2386,7 @@ static int vega20_get_memclocks(struct pp_hwmgr *hwmgr, for (i = 0; i < count; i++) { clocks->data[i].clocks_in_khz = data->mclk_latency_table.entries[i].frequency = - dpm_table->dpm_levels[i].value * 100; + dpm_table->dpm_levels[i].value * 1000; clocks->data[i].latency_in_us = data->mclk_latency_table.entries[i].latency = vega20_get_mem_latency(hwmgr, dpm_table->dpm_levels[i].value); @@ -2408,7 +2411,7 @@ static int vega20_get_dcefclocks(struct pp_hwmgr *hwmgr, for (i = 0; i < count; i++) { clocks->data[i].clocks_in_khz = - dpm_table->dpm_levels[i].value * 100; + dpm_table->dpm_levels[i].value * 1000; clocks->data[i].latency_in_us = 0; } @@ -2431,7 +2434,7 @@ static int vega20_get_socclocks(struct pp_hwmgr *hwmgr, for (i = 0; i < count; i++) { clocks->data[i].clocks_in_khz = - dpm_table->dpm_levels[i].value * 100; + dpm_table->dpm_levels[i].value * 1000; clocks->data[i].latency_in_us = 0; } @@ -2582,11 +2585,11 @@ static int vega20_odn_edit_dpm_table(struct pp_hwmgr *hwmgr, return -EINVAL; } - if (input_clk < clocks.data[0].clocks_in_khz / 100 || + if (input_clk < clocks.data[0].clocks_in_khz / 1000 || input_clk > od8_settings[OD8_SETTING_UCLK_FMAX].max_value) { pr_info("clock freq %d is not within allowed range [%d - %d]\n", input_clk, - clocks.data[0].clocks_in_khz / 100, + clocks.data[0].clocks_in_khz / 1000, od8_settings[OD8_SETTING_UCLK_FMAX].max_value); return -EINVAL; } @@ -2726,7 +2729,7 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr, switch (type) { case PP_SCLK: - ret = vega20_get_current_gfx_clk_freq(hwmgr, &now); + ret = vega20_get_current_clk_freq(hwmgr, PPCLK_GFXCLK, &now); PP_ASSERT_WITH_CODE(!ret, "Attempt to get current gfx clk Failed!", return ret); @@ -2738,12 +2741,12 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr, for (i = 0; i < clocks.num_levels; i++) size += sprintf(buf + size, "%d: %uMhz %s\n", - i, clocks.data[i].clocks_in_khz / 100, + i, clocks.data[i].clocks_in_khz / 1000, (clocks.data[i].clocks_in_khz == now) ? "*" : ""); break; case PP_MCLK: - ret = vega20_get_current_mclk_freq(hwmgr, &now); + ret = vega20_get_current_clk_freq(hwmgr, PPCLK_UCLK, &now); PP_ASSERT_WITH_CODE(!ret, "Attempt to get current mclk freq Failed!", return ret); @@ -2755,7 +2758,7 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr, for (i = 0; i < clocks.num_levels; i++) size += sprintf(buf + size, "%d: %uMhz %s\n", - i, clocks.data[i].clocks_in_khz / 100, + i, clocks.data[i].clocks_in_khz / 1000, (clocks.data[i].clocks_in_khz == now) ? "*" : ""); break; @@ -2820,7 +2823,7 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr, return ret); size += sprintf(buf + size, "MCLK: %7uMhz %10uMhz\n", - clocks.data[0].clocks_in_khz / 100, + clocks.data[0].clocks_in_khz / 1000, od8_settings[OD8_SETTING_UCLK_FMAX].max_value); } @@ -3476,6 +3479,8 @@ static const struct pp_hwmgr_func vega20_hwmgr_funcs = { vega20_set_watermarks_for_clocks_ranges, .display_clock_voltage_request = vega20_display_clock_voltage_request, + .get_performance_level = + vega20_get_performance_level, /* UMD pstate, profile related */ .force_dpm_level = vega20_dpm_force_dpm_level, diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_processpptables.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_processpptables.c index e5f7f8230065..97f8a1a970c3 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_processpptables.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_processpptables.c @@ -642,8 +642,14 @@ static int check_powerplay_tables( "Unsupported PPTable format!", return -1); PP_ASSERT_WITH_CODE(powerplay_table->sHeader.structuresize > 0, "Invalid PowerPlay Table!", return -1); - PP_ASSERT_WITH_CODE(powerplay_table->smcPPTable.Version == PPTABLE_V20_SMU_VERSION, - "Unmatch PPTable version, vbios update may be needed!", return -1); + + if (powerplay_table->smcPPTable.Version != PPTABLE_V20_SMU_VERSION) { + pr_info("Unmatch PPTable version: " + "pptable from VBIOS is V%d while driver supported is V%d!", + powerplay_table->smcPPTable.Version, + PPTABLE_V20_SMU_VERSION); + return -EINVAL; + } //dump_pptable(&powerplay_table->smcPPTable); @@ -716,10 +722,6 @@ static int append_vbios_pptable(struct pp_hwmgr *hwmgr, PPTable_t *ppsmc_pptable "[appendVbiosPPTable] Failed to retrieve Smc Dpm Table from VBIOS!", return -1); - memset(ppsmc_pptable->Padding32, - 0, - sizeof(struct atom_smc_dpm_info_v4_4) - - sizeof(struct atom_common_table_header)); ppsmc_pptable->MaxVoltageStepGfx = smc_dpm_table->maxvoltagestepgfx; ppsmc_pptable->MaxVoltageStepSoc = smc_dpm_table->maxvoltagestepsoc; @@ -778,22 +780,19 @@ static int append_vbios_pptable(struct pp_hwmgr *hwmgr, PPTable_t *ppsmc_pptable ppsmc_pptable->FllGfxclkSpreadPercent = smc_dpm_table->fllgfxclkspreadpercent; ppsmc_pptable->FllGfxclkSpreadFreq = smc_dpm_table->fllgfxclkspreadfreq; - if ((smc_dpm_table->table_header.format_revision == 4) && - (smc_dpm_table->table_header.content_revision == 4)) { - for (i = 0; i < I2C_CONTROLLER_NAME_COUNT; i++) { - ppsmc_pptable->I2cControllers[i].Enabled = - smc_dpm_table->i2ccontrollers[i].enabled; - ppsmc_pptable->I2cControllers[i].SlaveAddress = - smc_dpm_table->i2ccontrollers[i].slaveaddress; - ppsmc_pptable->I2cControllers[i].ControllerPort = - smc_dpm_table->i2ccontrollers[i].controllerport; - ppsmc_pptable->I2cControllers[i].ThermalThrottler = - smc_dpm_table->i2ccontrollers[i].thermalthrottler; - ppsmc_pptable->I2cControllers[i].I2cProtocol = - smc_dpm_table->i2ccontrollers[i].i2cprotocol; - ppsmc_pptable->I2cControllers[i].I2cSpeed = - smc_dpm_table->i2ccontrollers[i].i2cspeed; - } + for (i = 0; i < I2C_CONTROLLER_NAME_COUNT; i++) { + ppsmc_pptable->I2cControllers[i].Enabled = + smc_dpm_table->i2ccontrollers[i].enabled; + ppsmc_pptable->I2cControllers[i].SlaveAddress = + smc_dpm_table->i2ccontrollers[i].slaveaddress; + ppsmc_pptable->I2cControllers[i].ControllerPort = + smc_dpm_table->i2ccontrollers[i].controllerport; + ppsmc_pptable->I2cControllers[i].ThermalThrottler = + smc_dpm_table->i2ccontrollers[i].thermalthrottler; + ppsmc_pptable->I2cControllers[i].I2cProtocol = + smc_dpm_table->i2ccontrollers[i].i2cprotocol; + ppsmc_pptable->I2cControllers[i].I2cSpeed = + smc_dpm_table->i2ccontrollers[i].i2cspeed; } return 0; @@ -882,15 +881,10 @@ static int init_powerplay_table_information( if (pptable_information->smc_pptable == NULL) return -ENOMEM; - if (powerplay_table->smcPPTable.Version <= 2) - memcpy(pptable_information->smc_pptable, - &(powerplay_table->smcPPTable), - sizeof(PPTable_t) - - sizeof(I2cControllerConfig_t) * I2C_CONTROLLER_NAME_COUNT); - else - memcpy(pptable_information->smc_pptable, - &(powerplay_table->smcPPTable), - sizeof(PPTable_t)); + memcpy(pptable_information->smc_pptable, + &(powerplay_table->smcPPTable), + sizeof(PPTable_t)); + result = append_vbios_pptable(hwmgr, (pptable_information->smc_pptable)); diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if.h b/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if.h index 2998a49960ed..63d5cf691549 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if.h +++ b/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if.h @@ -29,7 +29,7 @@ // any structure is changed in this file #define SMU11_DRIVER_IF_VERSION 0x12 -#define PPTABLE_V20_SMU_VERSION 2 +#define PPTABLE_V20_SMU_VERSION 3 #define NUM_GFXCLK_DPM_LEVELS 16 #define NUM_VCLK_DPM_LEVELS 8 diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.c index f836d30fdd44..09b844ec3eab 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.c @@ -71,7 +71,11 @@ static int smu8_send_msg_to_smc_async(struct pp_hwmgr *hwmgr, uint16_t msg) result = PHM_WAIT_FIELD_UNEQUAL(hwmgr, SMU_MP1_SRBM2P_RESP_0, CONTENT, 0); if (result != 0) { + /* Read the last message to SMU, to report actual cause */ + uint32_t val = cgs_read_register(hwmgr->device, + mmSMU_MP1_SRBM2P_MSG_0); pr_err("smu8_send_msg_to_smc_async (0x%04x) failed\n", msg); + pr_err("SMU still servicing msg (0x%04x)\n", val); return result; } diff --git a/drivers/gpu/drm/bridge/ti-sn65dsi86.c b/drivers/gpu/drm/bridge/ti-sn65dsi86.c index f8a931cf3665..680566d97adc 100644 --- a/drivers/gpu/drm/bridge/ti-sn65dsi86.c +++ b/drivers/gpu/drm/bridge/ti-sn65dsi86.c @@ -458,18 +458,6 @@ static void ti_sn_bridge_enable(struct drm_bridge *bridge) unsigned int val; int ret; - /* - * FIXME: - * This 70ms was found necessary by experimentation. If it's not - * present, link training fails. It seems like it can go anywhere from - * pre_enable() up to semi-auto link training initiation below. - * - * Neither the datasheet for the bridge nor the panel tested mention a - * delay of this magnitude in the timing requirements. So for now, add - * the mystery delay until someone figures out a better fix. - */ - msleep(70); - /* DSI_A lane config */ val = CHA_DSI_LANES(4 - pdata->dsi->lanes); regmap_update_bits(pdata->regmap, SN_DSI_LANES_REG, @@ -536,7 +524,22 @@ static void ti_sn_bridge_pre_enable(struct drm_bridge *bridge) /* configure bridge ref_clk */ ti_sn_bridge_set_refclk_freq(pdata); - /* in case drm_panel is connected then HPD is not supported */ + /* + * HPD on this bridge chip is a bit useless. This is an eDP bridge + * so the HPD is an internal signal that's only there to signal that + * the panel is done powering up. ...but the bridge chip debounces + * this signal by between 100 ms and 400 ms (depending on process, + * voltage, and temperate--I measured it at about 200 ms). One + * particular panel asserted HPD 84 ms after it was powered on meaning + * that we saw HPD 284 ms after power on. ...but the same panel said + * that instead of looking at HPD you could just hardcode a delay of + * 200 ms. We'll assume that the panel driver will have the hardcoded + * delay in its prepare and always disable HPD. + * + * If HPD somehow makes sense on some future panel we'll have to + * change this to be conditional on someone specifying that HPD should + * be used. + */ regmap_update_bits(pdata->regmap, SN_HPD_DISABLE_REG, HPD_DISABLE, HPD_DISABLE); diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c index 701cb334e1ea..d8b526b7932c 100644 --- a/drivers/gpu/drm/drm_atomic_helper.c +++ b/drivers/gpu/drm/drm_atomic_helper.c @@ -308,6 +308,26 @@ update_connector_routing(struct drm_atomic_state *state, return 0; } + crtc_state = drm_atomic_get_new_crtc_state(state, + new_connector_state->crtc); + /* + * For compatibility with legacy users, we want to make sure that + * we allow DPMS On->Off modesets on unregistered connectors. Modesets + * which would result in anything else must be considered invalid, to + * avoid turning on new displays on dead connectors. + * + * Since the connector can be unregistered at any point during an + * atomic check or commit, this is racy. But that's OK: all we care + * about is ensuring that userspace can't do anything but shut off the + * display on a connector that was destroyed after its been notified, + * not before. + */ + if (drm_connector_is_unregistered(connector) && crtc_state->active) { + DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] is not registered\n", + connector->base.id, connector->name); + return -EINVAL; + } + funcs = connector->helper_private; if (funcs->atomic_best_encoder) @@ -352,7 +372,6 @@ update_connector_routing(struct drm_atomic_state *state, set_best_encoder(state, new_connector_state, new_encoder); - crtc_state = drm_atomic_get_new_crtc_state(state, new_connector_state->crtc); crtc_state->connectors_changed = true; DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] using [ENCODER:%d:%s] on [CRTC:%d:%s]\n", diff --git a/drivers/gpu/drm/drm_connector.c b/drivers/gpu/drm/drm_connector.c index 1e40e5decbe9..4943cef178be 100644 --- a/drivers/gpu/drm/drm_connector.c +++ b/drivers/gpu/drm/drm_connector.c @@ -379,7 +379,8 @@ void drm_connector_cleanup(struct drm_connector *connector) /* The connector should have been removed from userspace long before * it is finally destroyed. */ - if (WARN_ON(connector->registered)) + if (WARN_ON(connector->registration_state == + DRM_CONNECTOR_REGISTERED)) drm_connector_unregister(connector); if (connector->tile_group) { @@ -436,7 +437,7 @@ int drm_connector_register(struct drm_connector *connector) return 0; mutex_lock(&connector->mutex); - if (connector->registered) + if (connector->registration_state != DRM_CONNECTOR_INITIALIZING) goto unlock; ret = drm_sysfs_connector_add(connector); @@ -456,7 +457,7 @@ int drm_connector_register(struct drm_connector *connector) drm_mode_object_register(connector->dev, &connector->base); - connector->registered = true; + connector->registration_state = DRM_CONNECTOR_REGISTERED; goto unlock; err_debugfs: @@ -478,7 +479,7 @@ EXPORT_SYMBOL(drm_connector_register); void drm_connector_unregister(struct drm_connector *connector) { mutex_lock(&connector->mutex); - if (!connector->registered) { + if (connector->registration_state != DRM_CONNECTOR_REGISTERED) { mutex_unlock(&connector->mutex); return; } @@ -489,7 +490,7 @@ void drm_connector_unregister(struct drm_connector *connector) drm_sysfs_connector_remove(connector); drm_debugfs_connector_remove(connector); - connector->registered = false; + connector->registration_state = DRM_CONNECTOR_UNREGISTERED; mutex_unlock(&connector->mutex); } EXPORT_SYMBOL(drm_connector_unregister); diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index ff0bfc65a8c1..b506e3622b08 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c @@ -122,6 +122,9 @@ static const struct edid_quirk { /* SDC panel of Lenovo B50-80 reports 8 bpc, but is a 6 bpc panel */ { "SDC", 0x3652, EDID_QUIRK_FORCE_6BPC }, + /* BOE model 0x0771 reports 8 bpc, but is a 6 bpc panel */ + { "BOE", 0x0771, EDID_QUIRK_FORCE_6BPC }, + /* Belinea 10 15 55 */ { "MAX", 1516, EDID_QUIRK_PREFER_LARGE_60 }, { "MAX", 0x77e, EDID_QUIRK_PREFER_LARGE_60 }, diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index 3fae4dab295f..13f9b56a9ce7 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c @@ -5102,19 +5102,13 @@ intel_dp_long_pulse(struct intel_connector *connector, */ status = connector_status_disconnected; goto out; - } else { - /* - * If display is now connected check links status, - * there has been known issues of link loss triggering - * long pulse. - * - * Some sinks (eg. ASUS PB287Q) seem to perform some - * weird HPD ping pong during modesets. So we can apparently - * end up with HPD going low during a modeset, and then - * going back up soon after. And once that happens we must - * retrain the link to get a picture. That's in case no - * userspace component reacted to intermittent HPD dip. - */ + } + + /* + * Some external monitors do not signal loss of link synchronization + * with an IRQ_HPD, so force a link status check. + */ + if (!intel_dp_is_edp(intel_dp)) { struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; intel_dp_retrain_link(encoder, ctx); diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c index 7f155b4f1a7d..1b00f8ea145b 100644 --- a/drivers/gpu/drm/i915/intel_dp_mst.c +++ b/drivers/gpu/drm/i915/intel_dp_mst.c @@ -77,7 +77,7 @@ static bool intel_dp_mst_compute_config(struct intel_encoder *encoder, pipe_config->pbn = mst_pbn; /* Zombie connectors can't have VCPI slots */ - if (READ_ONCE(connector->registered)) { + if (!drm_connector_is_unregistered(connector)) { slots = drm_dp_atomic_find_vcpi_slots(state, &intel_dp->mst_mgr, port, @@ -313,7 +313,7 @@ static int intel_dp_mst_get_ddc_modes(struct drm_connector *connector) struct edid *edid; int ret; - if (!READ_ONCE(connector->registered)) + if (drm_connector_is_unregistered(connector)) return intel_connector_update_modes(connector, NULL); edid = drm_dp_mst_get_edid(connector, &intel_dp->mst_mgr, intel_connector->port); @@ -329,7 +329,7 @@ intel_dp_mst_detect(struct drm_connector *connector, bool force) struct intel_connector *intel_connector = to_intel_connector(connector); struct intel_dp *intel_dp = intel_connector->mst_port; - if (!READ_ONCE(connector->registered)) + if (drm_connector_is_unregistered(connector)) return connector_status_disconnected; return drm_dp_mst_detect_port(connector, &intel_dp->mst_mgr, intel_connector->port); @@ -372,7 +372,7 @@ intel_dp_mst_mode_valid(struct drm_connector *connector, int bpp = 24; /* MST uses fixed bpp */ int max_rate, mode_rate, max_lanes, max_link_clock; - if (!READ_ONCE(connector->registered)) + if (drm_connector_is_unregistered(connector)) return MODE_ERROR; if (mode->flags & DRM_MODE_FLAG_DBLSCAN) diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c index 6bb78076b5b5..6cbbae3f438b 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/disp.c +++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c @@ -881,22 +881,16 @@ nv50_mstc_atomic_best_encoder(struct drm_connector *connector, { struct nv50_head *head = nv50_head(connector_state->crtc); struct nv50_mstc *mstc = nv50_mstc(connector); - if (mstc->port) { - struct nv50_mstm *mstm = mstc->mstm; - return &mstm->msto[head->base.index]->encoder; - } - return NULL; + + return &mstc->mstm->msto[head->base.index]->encoder; } static struct drm_encoder * nv50_mstc_best_encoder(struct drm_connector *connector) { struct nv50_mstc *mstc = nv50_mstc(connector); - if (mstc->port) { - struct nv50_mstm *mstm = mstc->mstm; - return &mstm->msto[0]->encoder; - } - return NULL; + + return &mstc->mstm->msto[0]->encoder; } static enum drm_mode_status diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c index 97964f7f2ace..a04ffb3b2174 100644 --- a/drivers/gpu/drm/panel/panel-simple.c +++ b/drivers/gpu/drm/panel/panel-simple.c @@ -56,6 +56,8 @@ struct panel_desc { /** * @prepare: the time (in milliseconds) that it takes for the panel to * become ready and start receiving video data + * @hpd_absent_delay: Add this to the prepare delay if we know Hot + * Plug Detect isn't used. * @enable: the time (in milliseconds) that it takes for the panel to * display the first valid frame after starting to receive * video data @@ -66,6 +68,7 @@ struct panel_desc { */ struct { unsigned int prepare; + unsigned int hpd_absent_delay; unsigned int enable; unsigned int disable; unsigned int unprepare; @@ -79,6 +82,7 @@ struct panel_simple { struct drm_panel base; bool prepared; bool enabled; + bool no_hpd; const struct panel_desc *desc; @@ -202,6 +206,7 @@ static int panel_simple_unprepare(struct drm_panel *panel) static int panel_simple_prepare(struct drm_panel *panel) { struct panel_simple *p = to_panel_simple(panel); + unsigned int delay; int err; if (p->prepared) @@ -215,8 +220,11 @@ static int panel_simple_prepare(struct drm_panel *panel) gpiod_set_value_cansleep(p->enable_gpio, 1); - if (p->desc->delay.prepare) - msleep(p->desc->delay.prepare); + delay = p->desc->delay.prepare; + if (p->no_hpd) + delay += p->desc->delay.hpd_absent_delay; + if (delay) + msleep(delay); p->prepared = true; @@ -305,6 +313,8 @@ static int panel_simple_probe(struct device *dev, const struct panel_desc *desc) panel->prepared = false; panel->desc = desc; + panel->no_hpd = of_property_read_bool(dev->of_node, "no-hpd"); + panel->supply = devm_regulator_get(dev, "power"); if (IS_ERR(panel->supply)) return PTR_ERR(panel->supply); @@ -1363,7 +1373,7 @@ static const struct panel_desc innolux_n156bge_l21 = { }, }; -static const struct drm_display_mode innolux_tv123wam_mode = { +static const struct drm_display_mode innolux_p120zdg_bf1_mode = { .clock = 206016, .hdisplay = 2160, .hsync_start = 2160 + 48, @@ -1377,15 +1387,16 @@ static const struct drm_display_mode innolux_tv123wam_mode = { .flags = DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC, }; -static const struct panel_desc innolux_tv123wam = { - .modes = &innolux_tv123wam_mode, +static const struct panel_desc innolux_p120zdg_bf1 = { + .modes = &innolux_p120zdg_bf1_mode, .num_modes = 1, .bpc = 8, .size = { - .width = 259, - .height = 173, + .width = 254, + .height = 169, }, .delay = { + .hpd_absent_delay = 200, .unprepare = 500, }, }; @@ -2445,8 +2456,8 @@ static const struct of_device_id platform_of_match[] = { .compatible = "innolux,n156bge-l21", .data = &innolux_n156bge_l21, }, { - .compatible = "innolux,tv123wam", - .data = &innolux_tv123wam, + .compatible = "innolux,p120zdg-bf1", + .data = &innolux_p120zdg_bf1, }, { .compatible = "innolux,zj070na-01p", .data = &innolux_zj070na_01p, diff --git a/drivers/irqchip/irq-mvebu-sei.c b/drivers/irqchip/irq-mvebu-sei.c index 566d69a2edbc..add4c9c934c8 100644 --- a/drivers/irqchip/irq-mvebu-sei.c +++ b/drivers/irqchip/irq-mvebu-sei.c @@ -384,9 +384,9 @@ static int mvebu_sei_probe(struct platform_device *pdev) sei->res = platform_get_resource(pdev, IORESOURCE_MEM, 0); sei->base = devm_ioremap_resource(sei->dev, sei->res); - if (!sei->base) { + if (IS_ERR(sei->base)) { dev_err(sei->dev, "Failed to remap SEI resource\n"); - return -ENODEV; + return PTR_ERR(sei->base); } /* Retrieve the SEI capabilities with the interrupt ranges */ diff --git a/drivers/isdn/mISDN/l1oip_core.c b/drivers/isdn/mISDN/l1oip_core.c index b05022f94f18..072bb5e36c18 100644 --- a/drivers/isdn/mISDN/l1oip_core.c +++ b/drivers/isdn/mISDN/l1oip_core.c @@ -718,8 +718,7 @@ l1oip_socket_thread(void *data) printk(KERN_DEBUG "%s: socket created and open\n", __func__); while (!signal_pending(current)) { - iov_iter_kvec(&msg.msg_iter, READ | ITER_KVEC, &iov, 1, - recvbuf_size); + iov_iter_kvec(&msg.msg_iter, READ, &iov, 1, recvbuf_size); recvlen = sock_recvmsg(socket, &msg, 0); if (recvlen > 0) { l1oip_socket_parse(hc, &sin_rx, recvbuf, recvlen); diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c index f3fb5bb8c82a..ac1cffd2a09b 100644 --- a/drivers/md/raid0.c +++ b/drivers/md/raid0.c @@ -542,7 +542,7 @@ static void raid0_handle_discard(struct mddev *mddev, struct bio *bio) !discard_bio) continue; bio_chain(discard_bio, bio); - bio_clone_blkg_association(discard_bio, bio); + bio_clone_blkcg_association(discard_bio, bio); if (mddev->gendisk) trace_block_bio_remap(bdev_get_queue(rdev->bdev), discard_bio, disk_devt(mddev->gendisk), diff --git a/drivers/misc/lkdtm/Makefile b/drivers/misc/lkdtm/Makefile index 3370a4138e94..951c984de61a 100644 --- a/drivers/misc/lkdtm/Makefile +++ b/drivers/misc/lkdtm/Makefile @@ -8,7 +8,9 @@ lkdtm-$(CONFIG_LKDTM) += perms.o lkdtm-$(CONFIG_LKDTM) += refcount.o lkdtm-$(CONFIG_LKDTM) += rodata_objcopy.o lkdtm-$(CONFIG_LKDTM) += usercopy.o +lkdtm-$(CONFIG_LKDTM) += stackleak.o +KASAN_SANITIZE_stackleak.o := n KCOV_INSTRUMENT_rodata.o := n OBJCOPYFLAGS := diff --git a/drivers/misc/lkdtm/core.c b/drivers/misc/lkdtm/core.c index 5a755590d3dc..2837dc77478e 100644 --- a/drivers/misc/lkdtm/core.c +++ b/drivers/misc/lkdtm/core.c @@ -184,6 +184,7 @@ static const struct crashtype crashtypes[] = { CRASHTYPE(USERCOPY_STACK_BEYOND), CRASHTYPE(USERCOPY_KERNEL), CRASHTYPE(USERCOPY_KERNEL_DS), + CRASHTYPE(STACKLEAK_ERASING), }; diff --git a/drivers/misc/lkdtm/lkdtm.h b/drivers/misc/lkdtm/lkdtm.h index 07db641d71d0..3c6fd327e166 100644 --- a/drivers/misc/lkdtm/lkdtm.h +++ b/drivers/misc/lkdtm/lkdtm.h @@ -84,4 +84,7 @@ void lkdtm_USERCOPY_STACK_BEYOND(void); void lkdtm_USERCOPY_KERNEL(void); void lkdtm_USERCOPY_KERNEL_DS(void); +/* lkdtm_stackleak.c */ +void lkdtm_STACKLEAK_ERASING(void); + #endif diff --git a/drivers/misc/lkdtm/stackleak.c b/drivers/misc/lkdtm/stackleak.c new file mode 100644 index 000000000000..d5a084475abc --- /dev/null +++ b/drivers/misc/lkdtm/stackleak.c @@ -0,0 +1,73 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * This code tests that the current task stack is properly erased (filled + * with STACKLEAK_POISON). + * + * Authors: + * Alexander Popov <alex.popov@linux.com> + * Tycho Andersen <tycho@tycho.ws> + */ + +#include "lkdtm.h" +#include <linux/stackleak.h> + +void lkdtm_STACKLEAK_ERASING(void) +{ + unsigned long *sp, left, found, i; + const unsigned long check_depth = + STACKLEAK_SEARCH_DEPTH / sizeof(unsigned long); + + /* + * For the details about the alignment of the poison values, see + * the comment in stackleak_track_stack(). + */ + sp = PTR_ALIGN(&i, sizeof(unsigned long)); + + left = ((unsigned long)sp & (THREAD_SIZE - 1)) / sizeof(unsigned long); + sp--; + + /* + * One 'long int' at the bottom of the thread stack is reserved + * and not poisoned. + */ + if (left > 1) { + left--; + } else { + pr_err("FAIL: not enough stack space for the test\n"); + return; + } + + pr_info("checking unused part of the thread stack (%lu bytes)...\n", + left * sizeof(unsigned long)); + + /* + * Search for 'check_depth' poison values in a row (just like + * stackleak_erase() does). + */ + for (i = 0, found = 0; i < left && found <= check_depth; i++) { + if (*(sp - i) == STACKLEAK_POISON) + found++; + else + found = 0; + } + + if (found <= check_depth) { + pr_err("FAIL: thread stack is not erased (checked %lu bytes)\n", + i * sizeof(unsigned long)); + return; + } + + pr_info("first %lu bytes are unpoisoned\n", + (i - found) * sizeof(unsigned long)); + + /* The rest of thread stack should be erased */ + for (; i < left; i++) { + if (*(sp - i) != STACKLEAK_POISON) { + pr_err("FAIL: thread stack is NOT properly erased\n"); + return; + } + } + + pr_info("OK: the rest of the thread stack is properly erased\n"); + return; +} diff --git a/drivers/misc/vmw_vmci/vmci_queue_pair.c b/drivers/misc/vmw_vmci/vmci_queue_pair.c index bd52f29b4a4e..264f4ed8eef2 100644 --- a/drivers/misc/vmw_vmci/vmci_queue_pair.c +++ b/drivers/misc/vmw_vmci/vmci_queue_pair.c @@ -3030,7 +3030,7 @@ ssize_t vmci_qpair_enqueue(struct vmci_qp *qpair, if (!qpair || !buf) return VMCI_ERROR_INVALID_ARGS; - iov_iter_kvec(&from, WRITE | ITER_KVEC, &v, 1, buf_size); + iov_iter_kvec(&from, WRITE, &v, 1, buf_size); qp_lock(qpair); @@ -3074,7 +3074,7 @@ ssize_t vmci_qpair_dequeue(struct vmci_qp *qpair, if (!qpair || !buf) return VMCI_ERROR_INVALID_ARGS; - iov_iter_kvec(&to, READ | ITER_KVEC, &v, 1, buf_size); + iov_iter_kvec(&to, READ, &v, 1, buf_size); qp_lock(qpair); @@ -3119,7 +3119,7 @@ ssize_t vmci_qpair_peek(struct vmci_qp *qpair, if (!qpair || !buf) return VMCI_ERROR_INVALID_ARGS; - iov_iter_kvec(&to, READ | ITER_KVEC, &v, 1, buf_size); + iov_iter_kvec(&to, READ, &v, 1, buf_size); qp_lock(qpair); diff --git a/drivers/mtd/ubi/attach.c b/drivers/mtd/ubi/attach.c index 93ceea4f27d5..e294d3986ba9 100644 --- a/drivers/mtd/ubi/attach.c +++ b/drivers/mtd/ubi/attach.c @@ -1072,6 +1072,7 @@ static int scan_peb(struct ubi_device *ubi, struct ubi_attach_info *ai, * be a result of power cut during erasure. */ ai->maybe_bad_peb_count += 1; + /* fall through */ case UBI_IO_BAD_HDR: /* * If we're facing a bad VID header we have to drop *all* diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c index d2a726654ff1..a4e3454133a4 100644 --- a/drivers/mtd/ubi/build.c +++ b/drivers/mtd/ubi/build.c @@ -1334,8 +1334,10 @@ static int bytes_str_to_int(const char *str) switch (*endp) { case 'G': result *= 1024; + /* fall through */ case 'M': result *= 1024; + /* fall through */ case 'K': result *= 1024; if (endp[1] == 'i' && endp[2] == 'B') diff --git a/drivers/net/ntb_netdev.c b/drivers/net/ntb_netdev.c index b12023bc2cab..a5bab614ff84 100644 --- a/drivers/net/ntb_netdev.c +++ b/drivers/net/ntb_netdev.c @@ -71,7 +71,6 @@ static unsigned int tx_start = 10; static unsigned int tx_stop = 5; struct ntb_netdev { - struct list_head list; struct pci_dev *pdev; struct net_device *ndev; struct ntb_transport_qp *qp; @@ -81,8 +80,6 @@ struct ntb_netdev { #define NTB_TX_TIMEOUT_MS 1000 #define NTB_RXQ_SIZE 100 -static LIST_HEAD(dev_list); - static void ntb_netdev_event_handler(void *data, int link_is_up) { struct net_device *ndev = data; @@ -236,7 +233,7 @@ static void ntb_netdev_tx_timer(struct timer_list *t) struct net_device *ndev = dev->ndev; if (ntb_transport_tx_free_entry(dev->qp) < tx_stop) { - mod_timer(&dev->tx_timer, jiffies + msecs_to_jiffies(tx_time)); + mod_timer(&dev->tx_timer, jiffies + usecs_to_jiffies(tx_time)); } else { /* Make sure anybody stopping the queue after this sees the new * value of ntb_transport_tx_free_entry() @@ -452,7 +449,7 @@ static int ntb_netdev_probe(struct device *client_dev) if (rc) goto err1; - list_add(&dev->list, &dev_list); + dev_set_drvdata(client_dev, ndev); dev_info(&pdev->dev, "%s created\n", ndev->name); return 0; @@ -465,27 +462,8 @@ err: static void ntb_netdev_remove(struct device *client_dev) { - struct ntb_dev *ntb; - struct net_device *ndev; - struct pci_dev *pdev; - struct ntb_netdev *dev; - bool found = false; - - ntb = dev_ntb(client_dev->parent); - pdev = ntb->pdev; - - list_for_each_entry(dev, &dev_list, list) { - if (dev->pdev == pdev) { - found = true; - break; - } - } - if (!found) - return; - - list_del(&dev->list); - - ndev = dev->ndev; + struct net_device *ndev = dev_get_drvdata(client_dev); + struct ntb_netdev *dev = netdev_priv(ndev); unregister_netdev(ndev); ntb_transport_free_queue(dev->qp); diff --git a/drivers/ntb/hw/idt/Kconfig b/drivers/ntb/hw/idt/Kconfig index b360e5613b9f..f8948cf515ce 100644 --- a/drivers/ntb/hw/idt/Kconfig +++ b/drivers/ntb/hw/idt/Kconfig @@ -1,6 +1,7 @@ config NTB_IDT tristate "IDT PCIe-switch Non-Transparent Bridge support" depends on PCI + select HWMON help This driver supports NTB of cappable IDT PCIe-switches. @@ -23,9 +24,7 @@ config NTB_IDT BAR settings of peer NT-functions, the BAR setups can't be done over kernel PCI fixups. That's why the alternative pre-initialization techniques like BIOS using SMBus interface or EEPROM should be - utilized. Additionally if one needs to have temperature sensor - information printed to system log, the corresponding registers must - be initialized within BIOS/EEPROM as well. + utilized. If unsure, say N. diff --git a/drivers/ntb/hw/idt/ntb_hw_idt.c b/drivers/ntb/hw/idt/ntb_hw_idt.c index dbe72f116017..1dede87dd54f 100644 --- a/drivers/ntb/hw/idt/ntb_hw_idt.c +++ b/drivers/ntb/hw/idt/ntb_hw_idt.c @@ -4,7 +4,7 @@ * * GPL LICENSE SUMMARY * - * Copyright (C) 2016 T-Platforms All Rights Reserved. + * Copyright (C) 2016-2018 T-Platforms JSC All Rights Reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -49,11 +49,14 @@ #include <linux/init.h> #include <linux/interrupt.h> #include <linux/spinlock.h> +#include <linux/mutex.h> #include <linux/pci.h> #include <linux/aer.h> #include <linux/slab.h> #include <linux/list.h> #include <linux/debugfs.h> +#include <linux/hwmon.h> +#include <linux/hwmon-sysfs.h> #include <linux/ntb.h> #include "ntb_hw_idt.h" @@ -1105,9 +1108,9 @@ static struct idt_mw_cfg *idt_scan_mws(struct idt_ntb_dev *ndev, int port, } /* Allocate memory for memory window descriptors */ - ret_mws = devm_kcalloc(&ndev->ntb.pdev->dev, *mw_cnt, - sizeof(*ret_mws), GFP_KERNEL); - if (IS_ERR_OR_NULL(ret_mws)) + ret_mws = devm_kcalloc(&ndev->ntb.pdev->dev, *mw_cnt, sizeof(*ret_mws), + GFP_KERNEL); + if (!ret_mws) return ERR_PTR(-ENOMEM); /* Copy the info of detected memory windows */ @@ -1320,7 +1323,7 @@ static int idt_ntb_peer_mw_set_trans(struct ntb_dev *ntb, int pidx, int widx, idt_nt_write(ndev, bar->ltbase, (u32)addr); idt_nt_write(ndev, bar->utbase, (u32)(addr >> 32)); /* Set the custom BAR aperture limit */ - limit = pci_resource_start(ntb->pdev, mw_cfg->bar) + size; + limit = pci_bus_address(ntb->pdev, mw_cfg->bar) + size; idt_nt_write(ndev, bar->limit, (u32)limit); if (IS_FLD_SET(BARSETUP_TYPE, data, 64)) idt_nt_write(ndev, (bar + 1)->limit, (limit >> 32)); @@ -1821,61 +1824,284 @@ static int idt_ntb_peer_msg_write(struct ntb_dev *ntb, int pidx, int midx, * 7. Temperature sensor operations * * IDT PCIe-switch has an embedded temperature sensor, which can be used to - * warn a user-space of possible chip overheating. Since workload temperature - * can be different on different platforms, temperature thresholds as well as - * general sensor settings must be setup in the framework of BIOS/EEPROM - * initializations. It includes the actual sensor enabling as well. + * check current chip core temperature. Since a workload environment can be + * different on different platforms, an offset and ADC/filter settings can be + * specified. Although the offset configuration is only exposed to the sysfs + * hwmon interface at the moment. The rest of the settings can be adjusted + * for instance by the BIOS/EEPROM firmware. *============================================================================= */ /* + * idt_get_deg() - convert millidegree Celsius value to just degree + * @mdegC: IN - millidegree Celsius value + * + * Return: Degree corresponding to the passed millidegree value + */ +static inline s8 idt_get_deg(long mdegC) +{ + return mdegC / 1000; +} + +/* + * idt_get_frac() - retrieve 0/0.5 fraction of the millidegree Celsius value + * @mdegC: IN - millidegree Celsius value + * + * Return: 0/0.5 degree fraction of the passed millidegree value + */ +static inline u8 idt_get_deg_frac(long mdegC) +{ + return (mdegC % 1000) >= 500 ? 5 : 0; +} + +/* + * idt_get_temp_fmt() - convert millidegree Celsius value to 0:7:1 format + * @mdegC: IN - millidegree Celsius value + * + * Return: 0:7:1 format acceptable by the IDT temperature sensor + */ +static inline u8 idt_temp_get_fmt(long mdegC) +{ + return (idt_get_deg(mdegC) << 1) | (idt_get_deg_frac(mdegC) ? 1 : 0); +} + +/* + * idt_get_temp_sval() - convert temp sample to signed millidegree Celsius + * @data: IN - shifted to LSB 8-bits temperature sample + * + * Return: signed millidegree Celsius + */ +static inline long idt_get_temp_sval(u32 data) +{ + return ((s8)data / 2) * 1000 + (data & 0x1 ? 500 : 0); +} + +/* + * idt_get_temp_sval() - convert temp sample to unsigned millidegree Celsius + * @data: IN - shifted to LSB 8-bits temperature sample + * + * Return: unsigned millidegree Celsius + */ +static inline long idt_get_temp_uval(u32 data) +{ + return (data / 2) * 1000 + (data & 0x1 ? 500 : 0); +} + +/* * idt_read_temp() - read temperature from chip sensor * @ntb: NTB device context. - * @val: OUT - integer value of temperature - * @frac: OUT - fraction + * @type: IN - type of the temperature value to read + * @val: OUT - integer value of temperature in millidegree Celsius */ -static void idt_read_temp(struct idt_ntb_dev *ndev, unsigned char *val, - unsigned char *frac) +static void idt_read_temp(struct idt_ntb_dev *ndev, + const enum idt_temp_val type, long *val) { u32 data; - /* Read the data from TEMP field of the TMPSTS register */ - data = idt_sw_read(ndev, IDT_SW_TMPSTS); - data = GET_FIELD(TMPSTS_TEMP, data); - /* TEMP field has one fractional bit and seven integer bits */ - *val = data >> 1; - *frac = ((data & 0x1) ? 5 : 0); + /* Alter the temperature field in accordance with the passed type */ + switch (type) { + case IDT_TEMP_CUR: + data = GET_FIELD(TMPSTS_TEMP, + idt_sw_read(ndev, IDT_SW_TMPSTS)); + break; + case IDT_TEMP_LOW: + data = GET_FIELD(TMPSTS_LTEMP, + idt_sw_read(ndev, IDT_SW_TMPSTS)); + break; + case IDT_TEMP_HIGH: + data = GET_FIELD(TMPSTS_HTEMP, + idt_sw_read(ndev, IDT_SW_TMPSTS)); + break; + case IDT_TEMP_OFFSET: + /* This is the only field with signed 0:7:1 format */ + data = GET_FIELD(TMPADJ_OFFSET, + idt_sw_read(ndev, IDT_SW_TMPADJ)); + *val = idt_get_temp_sval(data); + return; + default: + data = GET_FIELD(TMPSTS_TEMP, + idt_sw_read(ndev, IDT_SW_TMPSTS)); + break; + } + + /* The rest of the fields accept unsigned 0:7:1 format */ + *val = idt_get_temp_uval(data); } /* - * idt_temp_isr() - temperature sensor alarm events ISR - * @ndev: IDT NTB hardware driver descriptor - * @ntint_sts: NT-function interrupt status + * idt_write_temp() - write temperature to the chip sensor register + * @ntb: NTB device context. + * @type: IN - type of the temperature value to change + * @val: IN - integer value of temperature in millidegree Celsius + */ +static void idt_write_temp(struct idt_ntb_dev *ndev, + const enum idt_temp_val type, const long val) +{ + unsigned int reg; + u32 data; + u8 fmt; + + /* Retrieve the properly formatted temperature value */ + fmt = idt_temp_get_fmt(val); + + mutex_lock(&ndev->hwmon_mtx); + switch (type) { + case IDT_TEMP_LOW: + reg = IDT_SW_TMPALARM; + data = SET_FIELD(TMPALARM_LTEMP, idt_sw_read(ndev, reg), fmt) & + ~IDT_TMPALARM_IRQ_MASK; + break; + case IDT_TEMP_HIGH: + reg = IDT_SW_TMPALARM; + data = SET_FIELD(TMPALARM_HTEMP, idt_sw_read(ndev, reg), fmt) & + ~IDT_TMPALARM_IRQ_MASK; + break; + case IDT_TEMP_OFFSET: + reg = IDT_SW_TMPADJ; + data = SET_FIELD(TMPADJ_OFFSET, idt_sw_read(ndev, reg), fmt); + break; + default: + goto inval_spin_unlock; + } + + idt_sw_write(ndev, reg, data); + +inval_spin_unlock: + mutex_unlock(&ndev->hwmon_mtx); +} + +/* + * idt_sysfs_show_temp() - printout corresponding temperature value + * @dev: Pointer to the NTB device structure + * @da: Sensor device attribute structure + * @buf: Buffer to print temperature out * - * It handles events of temperature crossing alarm thresholds. Since reading - * of TMPALARM register clears it up, the function doesn't analyze the - * read value, instead the current temperature value just warningly printed to - * log. - * The method is called from PCIe ISR bottom-half routine. + * Return: Number of written symbols or negative error */ -static void idt_temp_isr(struct idt_ntb_dev *ndev, u32 ntint_sts) +static ssize_t idt_sysfs_show_temp(struct device *dev, + struct device_attribute *da, char *buf) { - unsigned char val, frac; + struct sensor_device_attribute *attr = to_sensor_dev_attr(da); + struct idt_ntb_dev *ndev = dev_get_drvdata(dev); + enum idt_temp_val type = attr->index; + long mdeg; - /* Read the current temperature value */ - idt_read_temp(ndev, &val, &frac); + idt_read_temp(ndev, type, &mdeg); + return sprintf(buf, "%ld\n", mdeg); +} - /* Read the temperature alarm to clean the alarm status out */ - /*(void)idt_sw_read(ndev, IDT_SW_TMPALARM);*/ +/* + * idt_sysfs_set_temp() - set corresponding temperature value + * @dev: Pointer to the NTB device structure + * @da: Sensor device attribute structure + * @buf: Buffer to print temperature out + * @count: Size of the passed buffer + * + * Return: Number of written symbols or negative error + */ +static ssize_t idt_sysfs_set_temp(struct device *dev, + struct device_attribute *da, const char *buf, + size_t count) +{ + struct sensor_device_attribute *attr = to_sensor_dev_attr(da); + struct idt_ntb_dev *ndev = dev_get_drvdata(dev); + enum idt_temp_val type = attr->index; + long mdeg; + int ret; - /* Clean the corresponding interrupt bit */ - idt_nt_write(ndev, IDT_NT_NTINTSTS, IDT_NTINTSTS_TMPSENSOR); + ret = kstrtol(buf, 10, &mdeg); + if (ret) + return ret; + + /* Clamp the passed value in accordance with the type */ + if (type == IDT_TEMP_OFFSET) + mdeg = clamp_val(mdeg, IDT_TEMP_MIN_OFFSET, + IDT_TEMP_MAX_OFFSET); + else + mdeg = clamp_val(mdeg, IDT_TEMP_MIN_MDEG, IDT_TEMP_MAX_MDEG); + + idt_write_temp(ndev, type, mdeg); + + return count; +} + +/* + * idt_sysfs_reset_hist() - reset temperature history + * @dev: Pointer to the NTB device structure + * @da: Sensor device attribute structure + * @buf: Buffer to print temperature out + * @count: Size of the passed buffer + * + * Return: Number of written symbols or negative error + */ +static ssize_t idt_sysfs_reset_hist(struct device *dev, + struct device_attribute *da, + const char *buf, size_t count) +{ + struct idt_ntb_dev *ndev = dev_get_drvdata(dev); + + /* Just set the maximal value to the lowest temperature field and + * minimal value to the highest temperature field + */ + idt_write_temp(ndev, IDT_TEMP_LOW, IDT_TEMP_MAX_MDEG); + idt_write_temp(ndev, IDT_TEMP_HIGH, IDT_TEMP_MIN_MDEG); - dev_dbg(&ndev->ntb.pdev->dev, - "Temp sensor IRQ detected %#08x", ntint_sts); + return count; +} + +/* + * Hwmon IDT sysfs attributes + */ +static SENSOR_DEVICE_ATTR(temp1_input, 0444, idt_sysfs_show_temp, NULL, + IDT_TEMP_CUR); +static SENSOR_DEVICE_ATTR(temp1_lowest, 0444, idt_sysfs_show_temp, NULL, + IDT_TEMP_LOW); +static SENSOR_DEVICE_ATTR(temp1_highest, 0444, idt_sysfs_show_temp, NULL, + IDT_TEMP_HIGH); +static SENSOR_DEVICE_ATTR(temp1_offset, 0644, idt_sysfs_show_temp, + idt_sysfs_set_temp, IDT_TEMP_OFFSET); +static DEVICE_ATTR(temp1_reset_history, 0200, NULL, idt_sysfs_reset_hist); - /* Print temperature value to log */ - dev_warn(&ndev->ntb.pdev->dev, "Temperature %hhu.%hhu", val, frac); +/* + * Hwmon IDT sysfs attributes group + */ +static struct attribute *idt_temp_attrs[] = { + &sensor_dev_attr_temp1_input.dev_attr.attr, + &sensor_dev_attr_temp1_lowest.dev_attr.attr, + &sensor_dev_attr_temp1_highest.dev_attr.attr, + &sensor_dev_attr_temp1_offset.dev_attr.attr, + &dev_attr_temp1_reset_history.attr, + NULL +}; +ATTRIBUTE_GROUPS(idt_temp); + +/* + * idt_init_temp() - initialize temperature sensor interface + * @ndev: IDT NTB hardware driver descriptor + * + * Simple sensor initializarion method is responsible for device switching + * on and resource management based hwmon interface registration. Note, that + * since the device is shared we won't disable it on remove, but leave it + * working until the system is powered off. + */ +static void idt_init_temp(struct idt_ntb_dev *ndev) +{ + struct device *hwmon; + + /* Enable sensor if it hasn't been already */ + idt_sw_write(ndev, IDT_SW_TMPCTL, 0x0); + + /* Initialize hwmon interface fields */ + mutex_init(&ndev->hwmon_mtx); + + hwmon = devm_hwmon_device_register_with_groups(&ndev->ntb.pdev->dev, + ndev->swcfg->name, ndev, idt_temp_groups); + if (IS_ERR(hwmon)) { + dev_err(&ndev->ntb.pdev->dev, "Couldn't create hwmon device"); + return; + } + + dev_dbg(&ndev->ntb.pdev->dev, "Temperature HWmon interface registered"); } /*============================================================================= @@ -1931,7 +2157,7 @@ static int idt_init_isr(struct idt_ntb_dev *ndev) goto err_free_vectors; } - /* Unmask Message/Doorbell/SE/Temperature interrupts */ + /* Unmask Message/Doorbell/SE interrupts */ ntint_mask = idt_nt_read(ndev, IDT_NT_NTINTMSK) & ~IDT_NTINTMSK_ALL; idt_nt_write(ndev, IDT_NT_NTINTMSK, ntint_mask); @@ -1946,7 +2172,6 @@ err_free_vectors: return ret; } - /* * idt_deinit_ist() - deinitialize PCIe interrupt handler * @ndev: IDT NTB hardware driver descriptor @@ -2007,12 +2232,6 @@ static irqreturn_t idt_thread_isr(int irq, void *devid) handled = true; } - /* Handle temperature sensor interrupt */ - if (ntint_sts & IDT_NTINTSTS_TMPSENSOR) { - idt_temp_isr(ndev, ntint_sts); - handled = true; - } - dev_dbg(&ndev->ntb.pdev->dev, "IDT IRQs 0x%08x handled", ntint_sts); return handled ? IRQ_HANDLED : IRQ_NONE; @@ -2123,9 +2342,9 @@ static ssize_t idt_dbgfs_info_read(struct file *filp, char __user *ubuf, size_t count, loff_t *offp) { struct idt_ntb_dev *ndev = filp->private_data; - unsigned char temp, frac, idx, pidx, cnt; + unsigned char idx, pidx, cnt; + unsigned long irqflags, mdeg; ssize_t ret = 0, off = 0; - unsigned long irqflags; enum ntb_speed speed; enum ntb_width width; char *strbuf; @@ -2274,9 +2493,10 @@ static ssize_t idt_dbgfs_info_read(struct file *filp, char __user *ubuf, off += scnprintf(strbuf + off, size - off, "\n"); /* Current temperature */ - idt_read_temp(ndev, &temp, &frac); + idt_read_temp(ndev, IDT_TEMP_CUR, &mdeg); off += scnprintf(strbuf + off, size - off, - "Switch temperature\t\t- %hhu.%hhuC\n", temp, frac); + "Switch temperature\t\t- %hhd.%hhuC\n", + idt_get_deg(mdeg), idt_get_deg_frac(mdeg)); /* Copy the buffer to the User Space */ ret = simple_read_from_buffer(ubuf, count, offp, strbuf, off); @@ -2390,7 +2610,7 @@ static struct idt_ntb_dev *idt_create_dev(struct pci_dev *pdev, /* Allocate memory for the IDT PCIe-device descriptor */ ndev = devm_kzalloc(&pdev->dev, sizeof(*ndev), GFP_KERNEL); - if (IS_ERR_OR_NULL(ndev)) { + if (!ndev) { dev_err(&pdev->dev, "Memory allocation failed for descriptor"); return ERR_PTR(-ENOMEM); } @@ -2571,6 +2791,9 @@ static int idt_pci_probe(struct pci_dev *pdev, /* Initialize Messaging subsystem */ idt_init_msg(ndev); + /* Initialize hwmon interface */ + idt_init_temp(ndev); + /* Initialize IDT interrupts handler */ ret = idt_init_isr(ndev); if (ret != 0) diff --git a/drivers/ntb/hw/idt/ntb_hw_idt.h b/drivers/ntb/hw/idt/ntb_hw_idt.h index 856fd182f6f4..2f1aa121b0cf 100644 --- a/drivers/ntb/hw/idt/ntb_hw_idt.h +++ b/drivers/ntb/hw/idt/ntb_hw_idt.h @@ -4,7 +4,7 @@ * * GPL LICENSE SUMMARY * - * Copyright (C) 2016 T-Platforms All Rights Reserved. + * Copyright (C) 2016-2018 T-Platforms JSC All Rights Reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -47,9 +47,9 @@ #include <linux/pci_ids.h> #include <linux/interrupt.h> #include <linux/spinlock.h> +#include <linux/mutex.h> #include <linux/ntb.h> - /* * Macro is used to create the struct pci_device_id that matches * the supported IDT PCIe-switches @@ -688,15 +688,14 @@ * @IDT_NTINTMSK_DBELL: Doorbell interrupt mask bit * @IDT_NTINTMSK_SEVENT: Switch Event interrupt mask bit * @IDT_NTINTMSK_TMPSENSOR: Temperature sensor interrupt mask bit - * @IDT_NTINTMSK_ALL: All the useful interrupts mask + * @IDT_NTINTMSK_ALL: NTB-related interrupts mask */ #define IDT_NTINTMSK_MSG 0x00000001U #define IDT_NTINTMSK_DBELL 0x00000002U #define IDT_NTINTMSK_SEVENT 0x00000008U #define IDT_NTINTMSK_TMPSENSOR 0x00000080U #define IDT_NTINTMSK_ALL \ - (IDT_NTINTMSK_MSG | IDT_NTINTMSK_DBELL | \ - IDT_NTINTMSK_SEVENT | IDT_NTINTMSK_TMPSENSOR) + (IDT_NTINTMSK_MSG | IDT_NTINTMSK_DBELL | IDT_NTINTMSK_SEVENT) /* * NTGSIGNAL register fields related constants @@ -886,12 +885,60 @@ #define IDT_SWPxMSGCTL_PART_FLD 4 /* + * TMPCTL register fields related constants + * @IDT_TMPCTL_LTH_MASK: Low temperature threshold field mask + * @IDT_TMPCTL_LTH_FLD: Low temperature threshold field offset + * @IDT_TMPCTL_MTH_MASK: Middle temperature threshold field mask + * @IDT_TMPCTL_MTH_FLD: Middle temperature threshold field offset + * @IDT_TMPCTL_HTH_MASK: High temperature threshold field mask + * @IDT_TMPCTL_HTH_FLD: High temperature threshold field offset + * @IDT_TMPCTL_PDOWN: Temperature sensor power down + */ +#define IDT_TMPCTL_LTH_MASK 0x000000FFU +#define IDT_TMPCTL_LTH_FLD 0 +#define IDT_TMPCTL_MTH_MASK 0x0000FF00U +#define IDT_TMPCTL_MTH_FLD 8 +#define IDT_TMPCTL_HTH_MASK 0x00FF0000U +#define IDT_TMPCTL_HTH_FLD 16 +#define IDT_TMPCTL_PDOWN 0x80000000U + +/* * TMPSTS register fields related constants * @IDT_TMPSTS_TEMP_MASK: Current temperature field mask * @IDT_TMPSTS_TEMP_FLD: Current temperature field offset + * @IDT_TMPSTS_LTEMP_MASK: Lowest temperature field mask + * @IDT_TMPSTS_LTEMP_FLD: Lowest temperature field offset + * @IDT_TMPSTS_HTEMP_MASK: Highest temperature field mask + * @IDT_TMPSTS_HTEMP_FLD: Highest temperature field offset */ #define IDT_TMPSTS_TEMP_MASK 0x000000FFU #define IDT_TMPSTS_TEMP_FLD 0 +#define IDT_TMPSTS_LTEMP_MASK 0x0000FF00U +#define IDT_TMPSTS_LTEMP_FLD 8 +#define IDT_TMPSTS_HTEMP_MASK 0x00FF0000U +#define IDT_TMPSTS_HTEMP_FLD 16 + +/* + * TMPALARM register fields related constants + * @IDT_TMPALARM_LTEMP_MASK: Lowest temperature field mask + * @IDT_TMPALARM_LTEMP_FLD: Lowest temperature field offset + * @IDT_TMPALARM_HTEMP_MASK: Highest temperature field mask + * @IDT_TMPALARM_HTEMP_FLD: Highest temperature field offset + * @IDT_TMPALARM_IRQ_MASK: Alarm IRQ status mask + */ +#define IDT_TMPALARM_LTEMP_MASK 0x0000FF00U +#define IDT_TMPALARM_LTEMP_FLD 8 +#define IDT_TMPALARM_HTEMP_MASK 0x00FF0000U +#define IDT_TMPALARM_HTEMP_FLD 16 +#define IDT_TMPALARM_IRQ_MASK 0x3F000000U + +/* + * TMPADJ register fields related constants + * @IDT_TMPADJ_OFFSET_MASK: Temperature value offset field mask + * @IDT_TMPADJ_OFFSET_FLD: Temperature value offset field offset + */ +#define IDT_TMPADJ_OFFSET_MASK 0x000000FFU +#define IDT_TMPADJ_OFFSET_FLD 0 /* * Helper macro to get/set the corresponding field value @@ -951,6 +998,32 @@ #define IDT_DIR_SIZE_ALIGN 1 /* + * IDT PCIe-switch temperature sensor value limits + * @IDT_TEMP_MIN_MDEG: Minimal integer value of temperature + * @IDT_TEMP_MAX_MDEG: Maximal integer value of temperature + * @IDT_TEMP_MIN_OFFSET:Minimal integer value of temperature offset + * @IDT_TEMP_MAX_OFFSET:Maximal integer value of temperature offset + */ +#define IDT_TEMP_MIN_MDEG 0 +#define IDT_TEMP_MAX_MDEG 127500 +#define IDT_TEMP_MIN_OFFSET -64000 +#define IDT_TEMP_MAX_OFFSET 63500 + +/* + * Temperature sensor values enumeration + * @IDT_TEMP_CUR: Current temperature + * @IDT_TEMP_LOW: Lowest historical temperature + * @IDT_TEMP_HIGH: Highest historical temperature + * @IDT_TEMP_OFFSET: Current temperature offset + */ +enum idt_temp_val { + IDT_TEMP_CUR, + IDT_TEMP_LOW, + IDT_TEMP_HIGH, + IDT_TEMP_OFFSET +}; + +/* * IDT Memory Windows type. Depending on the device settings, IDT supports * Direct Address Translation MW registers and Lookup Table registers * @IDT_MW_DIR: Direct address translation @@ -1044,6 +1117,8 @@ struct idt_ntb_peer { * @msg_mask_lock: Message mask register lock * @gasa_lock: GASA registers access lock * + * @hwmon_mtx: Temperature sensor interface update mutex + * * @dbgfs_info: DebugFS info node */ struct idt_ntb_dev { @@ -1071,6 +1146,8 @@ struct idt_ntb_dev { spinlock_t msg_mask_lock; spinlock_t gasa_lock; + struct mutex hwmon_mtx; + struct dentry *dbgfs_info; }; #define to_ndev_ntb(__ntb) container_of(__ntb, struct idt_ntb_dev, ntb) diff --git a/drivers/ntb/hw/intel/ntb_hw_gen1.c b/drivers/ntb/hw/intel/ntb_hw_gen1.c index 6aa573227279..2ad263f708da 100644 --- a/drivers/ntb/hw/intel/ntb_hw_gen1.c +++ b/drivers/ntb/hw/intel/ntb_hw_gen1.c @@ -265,7 +265,7 @@ static inline int ndev_db_clear_mask(struct intel_ntb_dev *ndev, u64 db_bits, return 0; } -static inline int ndev_vec_mask(struct intel_ntb_dev *ndev, int db_vector) +static inline u64 ndev_vec_mask(struct intel_ntb_dev *ndev, int db_vector) { u64 shift, mask; diff --git a/drivers/ntb/ntb_transport.c b/drivers/ntb/ntb_transport.c index 939895966476..3bfdb4562408 100644 --- a/drivers/ntb/ntb_transport.c +++ b/drivers/ntb/ntb_transport.c @@ -194,6 +194,8 @@ struct ntb_transport_mw { void __iomem *vbase; size_t xlat_size; size_t buff_size; + size_t alloc_size; + void *alloc_addr; void *virt_addr; dma_addr_t dma_addr; }; @@ -672,13 +674,59 @@ static void ntb_free_mw(struct ntb_transport_ctx *nt, int num_mw) return; ntb_mw_clear_trans(nt->ndev, PIDX, num_mw); - dma_free_coherent(&pdev->dev, mw->buff_size, - mw->virt_addr, mw->dma_addr); + dma_free_coherent(&pdev->dev, mw->alloc_size, + mw->alloc_addr, mw->dma_addr); mw->xlat_size = 0; mw->buff_size = 0; + mw->alloc_size = 0; + mw->alloc_addr = NULL; mw->virt_addr = NULL; } +static int ntb_alloc_mw_buffer(struct ntb_transport_mw *mw, + struct device *dma_dev, size_t align) +{ + dma_addr_t dma_addr; + void *alloc_addr, *virt_addr; + int rc; + + alloc_addr = dma_alloc_coherent(dma_dev, mw->alloc_size, + &dma_addr, GFP_KERNEL); + if (!alloc_addr) { + dev_err(dma_dev, "Unable to alloc MW buff of size %zu\n", + mw->alloc_size); + return -ENOMEM; + } + virt_addr = alloc_addr; + + /* + * we must ensure that the memory address allocated is BAR size + * aligned in order for the XLAT register to take the value. This + * is a requirement of the hardware. It is recommended to setup CMA + * for BAR sizes equal or greater than 4MB. + */ + if (!IS_ALIGNED(dma_addr, align)) { + if (mw->alloc_size > mw->buff_size) { + virt_addr = PTR_ALIGN(alloc_addr, align); + dma_addr = ALIGN(dma_addr, align); + } else { + rc = -ENOMEM; + goto err; + } + } + + mw->alloc_addr = alloc_addr; + mw->virt_addr = virt_addr; + mw->dma_addr = dma_addr; + + return 0; + +err: + dma_free_coherent(dma_dev, mw->alloc_size, alloc_addr, dma_addr); + + return rc; +} + static int ntb_set_mw(struct ntb_transport_ctx *nt, int num_mw, resource_size_t size) { @@ -710,28 +758,20 @@ static int ntb_set_mw(struct ntb_transport_ctx *nt, int num_mw, /* Alloc memory for receiving data. Must be aligned */ mw->xlat_size = xlat_size; mw->buff_size = buff_size; + mw->alloc_size = buff_size; - mw->virt_addr = dma_alloc_coherent(&pdev->dev, buff_size, - &mw->dma_addr, GFP_KERNEL); - if (!mw->virt_addr) { - mw->xlat_size = 0; - mw->buff_size = 0; - dev_err(&pdev->dev, "Unable to alloc MW buff of size %zu\n", - buff_size); - return -ENOMEM; - } - - /* - * we must ensure that the memory address allocated is BAR size - * aligned in order for the XLAT register to take the value. This - * is a requirement of the hardware. It is recommended to setup CMA - * for BAR sizes equal or greater than 4MB. - */ - if (!IS_ALIGNED(mw->dma_addr, xlat_align)) { - dev_err(&pdev->dev, "DMA memory %pad is not aligned\n", - &mw->dma_addr); - ntb_free_mw(nt, num_mw); - return -ENOMEM; + rc = ntb_alloc_mw_buffer(mw, &pdev->dev, xlat_align); + if (rc) { + mw->alloc_size *= 2; + rc = ntb_alloc_mw_buffer(mw, &pdev->dev, xlat_align); + if (rc) { + dev_err(&pdev->dev, + "Unable to alloc aligned MW buff\n"); + mw->xlat_size = 0; + mw->buff_size = 0; + mw->alloc_size = 0; + return rc; + } } /* Notify HW the memory location of the receive buffer */ @@ -1278,6 +1318,7 @@ static void ntb_rx_copy_callback(void *data, case DMA_TRANS_READ_FAILED: case DMA_TRANS_WRITE_FAILED: entry->errors++; + /* fall through */ case DMA_TRANS_ABORTED: { struct ntb_transport_qp *qp = entry->qp; @@ -1533,6 +1574,7 @@ static void ntb_tx_copy_callback(void *data, case DMA_TRANS_READ_FAILED: case DMA_TRANS_WRITE_FAILED: entry->errors++; + /* fall through */ case DMA_TRANS_ABORTED: { void __iomem *offset = diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c index e52b9d3c0bd6..0b70c8bab045 100644 --- a/drivers/nvme/host/fc.c +++ b/drivers/nvme/host/fc.c @@ -1704,7 +1704,6 @@ __nvme_fc_init_request(struct nvme_fc_ctrl *ctrl, op->fcp_req.rspaddr = &op->rsp_iu; op->fcp_req.rsplen = sizeof(op->rsp_iu); op->fcp_req.done = nvme_fc_fcpio_done; - op->fcp_req.private = &op->fcp_req.first_sgl[SG_CHUNK_SIZE]; op->ctrl = ctrl; op->queue = queue; op->rq = rq; @@ -1752,6 +1751,7 @@ nvme_fc_init_request(struct blk_mq_tag_set *set, struct request *rq, if (res) return res; op->op.fcp_req.first_sgl = &op->sgl[0]; + op->op.fcp_req.private = &op->priv[0]; return res; } diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index f30031945ee4..c33bb201b884 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -1663,6 +1663,9 @@ static void nvme_map_cmb(struct nvme_dev *dev) struct pci_dev *pdev = to_pci_dev(dev->dev); int bar; + if (dev->cmb_size) + return; + dev->cmbsz = readl(dev->bar + NVME_REG_CMBSZ); if (!dev->cmbsz) return; @@ -2147,7 +2150,6 @@ static void nvme_pci_disable(struct nvme_dev *dev) { struct pci_dev *pdev = to_pci_dev(dev->dev); - nvme_release_cmb(dev); pci_free_irq_vectors(pdev); if (pci_is_enabled(pdev)) { @@ -2595,6 +2597,7 @@ static void nvme_remove(struct pci_dev *pdev) nvme_stop_ctrl(&dev->ctrl); nvme_remove_namespaces(&dev->ctrl); nvme_dev_disable(dev, true); + nvme_release_cmb(dev); nvme_free_host_mem(dev); nvme_dev_remove_admin(dev); nvme_free_queues(dev, 0); diff --git a/drivers/nvme/target/io-cmd-file.c b/drivers/nvme/target/io-cmd-file.c index 39d972e2595f..01feebec29ea 100644 --- a/drivers/nvme/target/io-cmd-file.c +++ b/drivers/nvme/target/io-cmd-file.c @@ -101,7 +101,7 @@ static ssize_t nvmet_file_submit_bvec(struct nvmet_req *req, loff_t pos, rw = READ; } - iov_iter_bvec(&iter, ITER_BVEC | rw, req->f.bvec, nr_segs, count); + iov_iter_bvec(&iter, rw, req->f.bvec, nr_segs, count); iocb->ki_pos = pos; iocb->ki_filp = req->ns->file; diff --git a/drivers/of/base.c b/drivers/of/base.c index d023cf303d56..09692c9b32a7 100644 --- a/drivers/of/base.c +++ b/drivers/of/base.c @@ -777,8 +777,6 @@ struct device_node *of_get_next_cpu_node(struct device_node *prev) if (!(of_node_name_eq(next, "cpu") || (next->type && !of_node_cmp(next->type, "cpu")))) continue; - if (!__of_device_is_available(next)) - continue; if (of_node_get(next)) break; } diff --git a/drivers/pwm/Kconfig b/drivers/pwm/Kconfig index 504d252716f2..27e5dd47a01f 100644 --- a/drivers/pwm/Kconfig +++ b/drivers/pwm/Kconfig @@ -447,10 +447,9 @@ config PWM_TEGRA config PWM_TIECAP tristate "ECAP PWM support" - depends on ARCH_OMAP2PLUS || ARCH_DAVINCI_DA8XX || ARCH_KEYSTONE + depends on ARCH_OMAP2PLUS || ARCH_DAVINCI_DA8XX || ARCH_KEYSTONE || ARCH_K3 help - PWM driver support for the ECAP APWM controller found on AM33XX - TI SOC + PWM driver support for the ECAP APWM controller found on TI SOCs To compile this driver as a module, choose M here: the module will be called pwm-tiecap. diff --git a/drivers/pwm/pwm-lpss-platform.c b/drivers/pwm/pwm-lpss-platform.c index 5561b9e190f8..757230e1f575 100644 --- a/drivers/pwm/pwm-lpss-platform.c +++ b/drivers/pwm/pwm-lpss-platform.c @@ -30,6 +30,7 @@ static const struct pwm_lpss_boardinfo pwm_lpss_bsw_info = { .clk_rate = 19200000, .npwm = 1, .base_unit_bits = 16, + .other_devices_aml_touches_pwm_regs = true, }; /* Broxton */ @@ -60,6 +61,7 @@ static int pwm_lpss_probe_platform(struct platform_device *pdev) platform_set_drvdata(pdev, lpwm); + dev_pm_set_driver_flags(&pdev->dev, DPM_FLAG_SMART_PREPARE); pm_runtime_set_active(&pdev->dev); pm_runtime_enable(&pdev->dev); @@ -74,13 +76,29 @@ static int pwm_lpss_remove_platform(struct platform_device *pdev) return pwm_lpss_remove(lpwm); } -static SIMPLE_DEV_PM_OPS(pwm_lpss_platform_pm_ops, - pwm_lpss_suspend, - pwm_lpss_resume); +static int pwm_lpss_prepare(struct device *dev) +{ + struct pwm_lpss_chip *lpwm = dev_get_drvdata(dev); + + /* + * If other device's AML code touches the PWM regs on suspend/resume + * force runtime-resume the PWM controller to allow this. + */ + if (lpwm->info->other_devices_aml_touches_pwm_regs) + return 0; /* Force runtime-resume */ + + return 1; /* If runtime-suspended leave as is */ +} + +static const struct dev_pm_ops pwm_lpss_platform_pm_ops = { + .prepare = pwm_lpss_prepare, + SET_SYSTEM_SLEEP_PM_OPS(pwm_lpss_suspend, pwm_lpss_resume) +}; static const struct acpi_device_id pwm_lpss_acpi_match[] = { { "80860F09", (unsigned long)&pwm_lpss_byt_info }, { "80862288", (unsigned long)&pwm_lpss_bsw_info }, + { "80862289", (unsigned long)&pwm_lpss_bsw_info }, { "80865AC8", (unsigned long)&pwm_lpss_bxt_info }, { }, }; diff --git a/drivers/pwm/pwm-lpss.c b/drivers/pwm/pwm-lpss.c index 4721a264bac2..2ac3a2aa9e53 100644 --- a/drivers/pwm/pwm-lpss.c +++ b/drivers/pwm/pwm-lpss.c @@ -32,15 +32,6 @@ /* Size of each PWM register space if multiple */ #define PWM_SIZE 0x400 -#define MAX_PWMS 4 - -struct pwm_lpss_chip { - struct pwm_chip chip; - void __iomem *regs; - const struct pwm_lpss_boardinfo *info; - u32 saved_ctrl[MAX_PWMS]; -}; - static inline struct pwm_lpss_chip *to_lpwm(struct pwm_chip *chip) { return container_of(chip, struct pwm_lpss_chip, chip); @@ -97,7 +88,7 @@ static void pwm_lpss_prepare(struct pwm_lpss_chip *lpwm, struct pwm_device *pwm, unsigned long long on_time_div; unsigned long c = lpwm->info->clk_rate, base_unit_range; unsigned long long base_unit, freq = NSEC_PER_SEC; - u32 ctrl; + u32 orig_ctrl, ctrl; do_div(freq, period_ns); @@ -114,13 +105,17 @@ static void pwm_lpss_prepare(struct pwm_lpss_chip *lpwm, struct pwm_device *pwm, do_div(on_time_div, period_ns); on_time_div = 255ULL - on_time_div; - ctrl = pwm_lpss_read(pwm); + orig_ctrl = ctrl = pwm_lpss_read(pwm); ctrl &= ~PWM_ON_TIME_DIV_MASK; ctrl &= ~(base_unit_range << PWM_BASE_UNIT_SHIFT); base_unit &= base_unit_range; ctrl |= (u32) base_unit << PWM_BASE_UNIT_SHIFT; ctrl |= on_time_div; - pwm_lpss_write(pwm, ctrl); + + if (orig_ctrl != ctrl) { + pwm_lpss_write(pwm, ctrl); + pwm_lpss_write(pwm, ctrl | PWM_SW_UPDATE); + } } static inline void pwm_lpss_cond_enable(struct pwm_device *pwm, bool cond) @@ -144,7 +139,6 @@ static int pwm_lpss_apply(struct pwm_chip *chip, struct pwm_device *pwm, return ret; } pwm_lpss_prepare(lpwm, pwm, state->duty_cycle, state->period); - pwm_lpss_write(pwm, pwm_lpss_read(pwm) | PWM_SW_UPDATE); pwm_lpss_cond_enable(pwm, lpwm->info->bypass == false); ret = pwm_lpss_wait_for_update(pwm); if (ret) { @@ -157,7 +151,6 @@ static int pwm_lpss_apply(struct pwm_chip *chip, struct pwm_device *pwm, if (ret) return ret; pwm_lpss_prepare(lpwm, pwm, state->duty_cycle, state->period); - pwm_lpss_write(pwm, pwm_lpss_read(pwm) | PWM_SW_UPDATE); return pwm_lpss_wait_for_update(pwm); } } else if (pwm_is_enabled(pwm)) { @@ -168,8 +161,42 @@ static int pwm_lpss_apply(struct pwm_chip *chip, struct pwm_device *pwm, return 0; } +/* This function gets called once from pwmchip_add to get the initial state */ +static void pwm_lpss_get_state(struct pwm_chip *chip, struct pwm_device *pwm, + struct pwm_state *state) +{ + struct pwm_lpss_chip *lpwm = to_lpwm(chip); + unsigned long base_unit_range; + unsigned long long base_unit, freq, on_time_div; + u32 ctrl; + + base_unit_range = BIT(lpwm->info->base_unit_bits); + + ctrl = pwm_lpss_read(pwm); + on_time_div = 255 - (ctrl & PWM_ON_TIME_DIV_MASK); + base_unit = (ctrl >> PWM_BASE_UNIT_SHIFT) & (base_unit_range - 1); + + freq = base_unit * lpwm->info->clk_rate; + do_div(freq, base_unit_range); + if (freq == 0) + state->period = NSEC_PER_SEC; + else + state->period = NSEC_PER_SEC / (unsigned long)freq; + + on_time_div *= state->period; + do_div(on_time_div, 255); + state->duty_cycle = on_time_div; + + state->polarity = PWM_POLARITY_NORMAL; + state->enabled = !!(ctrl & PWM_ENABLE); + + if (state->enabled) + pm_runtime_get(chip->dev); +} + static const struct pwm_ops pwm_lpss_ops = { .apply = pwm_lpss_apply, + .get_state = pwm_lpss_get_state, .owner = THIS_MODULE, }; @@ -214,6 +241,12 @@ EXPORT_SYMBOL_GPL(pwm_lpss_probe); int pwm_lpss_remove(struct pwm_lpss_chip *lpwm) { + int i; + + for (i = 0; i < lpwm->info->npwm; i++) { + if (pwm_is_enabled(&lpwm->chip.pwms[i])) + pm_runtime_put(lpwm->chip.dev); + } return pwmchip_remove(&lpwm->chip); } EXPORT_SYMBOL_GPL(pwm_lpss_remove); diff --git a/drivers/pwm/pwm-lpss.h b/drivers/pwm/pwm-lpss.h index 7a4238ad1fcb..3236be835bd9 100644 --- a/drivers/pwm/pwm-lpss.h +++ b/drivers/pwm/pwm-lpss.h @@ -16,13 +16,25 @@ #include <linux/device.h> #include <linux/pwm.h> -struct pwm_lpss_chip; +#define MAX_PWMS 4 + +struct pwm_lpss_chip { + struct pwm_chip chip; + void __iomem *regs; + const struct pwm_lpss_boardinfo *info; + u32 saved_ctrl[MAX_PWMS]; +}; struct pwm_lpss_boardinfo { unsigned long clk_rate; unsigned int npwm; unsigned long base_unit_bits; bool bypass; + /* + * On some devices the _PS0/_PS3 AML code of the GPU (GFX0) device + * messes with the PWM0 controllers state, + */ + bool other_devices_aml_touches_pwm_regs; }; struct pwm_lpss_chip *pwm_lpss_probe(struct device *dev, struct resource *r, diff --git a/drivers/pwm/pwm-rcar.c b/drivers/pwm/pwm-rcar.c index 748f614d5375..a41812fc6f95 100644 --- a/drivers/pwm/pwm-rcar.c +++ b/drivers/pwm/pwm-rcar.c @@ -1,11 +1,8 @@ +// SPDX-License-Identifier: GPL-2.0 /* * R-Car PWM Timer driver * * Copyright (C) 2015 Renesas Electronics Corporation - * - * This is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. */ #include <linux/clk.h> diff --git a/drivers/pwm/pwm-renesas-tpu.c b/drivers/pwm/pwm-renesas-tpu.c index 29267d12fb4c..4a855a21b782 100644 --- a/drivers/pwm/pwm-renesas-tpu.c +++ b/drivers/pwm/pwm-renesas-tpu.c @@ -1,16 +1,8 @@ +// SPDX-License-Identifier: GPL-2.0 /* * R-Mobile TPU PWM driver * * Copyright (C) 2012 Renesas Solutions Corp. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. */ #include <linux/clk.h> diff --git a/drivers/pwm/pwm-tegra.c b/drivers/pwm/pwm-tegra.c index f8ebbece57b7..48c4595a0ffc 100644 --- a/drivers/pwm/pwm-tegra.c +++ b/drivers/pwm/pwm-tegra.c @@ -300,7 +300,6 @@ static const struct of_device_id tegra_pwm_of_match[] = { { .compatible = "nvidia,tegra186-pwm", .data = &tegra186_pwm_soc }, { } }; - MODULE_DEVICE_TABLE(of, tegra_pwm_of_match); static const struct dev_pm_ops tegra_pwm_pm_ops = { diff --git a/drivers/pwm/sysfs.c b/drivers/pwm/sysfs.c index 7c71cdb8a9d8..ceb233dd6048 100644 --- a/drivers/pwm/sysfs.c +++ b/drivers/pwm/sysfs.c @@ -249,6 +249,7 @@ static void pwm_export_release(struct device *child) static int pwm_export_child(struct device *parent, struct pwm_device *pwm) { struct pwm_export *export; + char *pwm_prop[2]; int ret; if (test_and_set_bit(PWMF_EXPORTED, &pwm->flags)) @@ -263,7 +264,6 @@ static int pwm_export_child(struct device *parent, struct pwm_device *pwm) export->pwm = pwm; mutex_init(&export->lock); - export->child.class = parent->class; export->child.release = pwm_export_release; export->child.parent = parent; export->child.devt = MKDEV(0, 0); @@ -277,6 +277,10 @@ static int pwm_export_child(struct device *parent, struct pwm_device *pwm) export = NULL; return ret; } + pwm_prop[0] = kasprintf(GFP_KERNEL, "EXPORT=pwm%u", pwm->hwpwm); + pwm_prop[1] = NULL; + kobject_uevent_env(&parent->kobj, KOBJ_CHANGE, pwm_prop); + kfree(pwm_prop[0]); return 0; } @@ -289,6 +293,7 @@ static int pwm_unexport_match(struct device *child, void *data) static int pwm_unexport_child(struct device *parent, struct pwm_device *pwm) { struct device *child; + char *pwm_prop[2]; if (!test_and_clear_bit(PWMF_EXPORTED, &pwm->flags)) return -ENODEV; @@ -297,6 +302,11 @@ static int pwm_unexport_child(struct device *parent, struct pwm_device *pwm) if (!child) return -ENODEV; + pwm_prop[0] = kasprintf(GFP_KERNEL, "UNEXPORT=pwm%u", pwm->hwpwm); + pwm_prop[1] = NULL; + kobject_uevent_env(&parent->kobj, KOBJ_CHANGE, pwm_prop); + kfree(pwm_prop[0]); + /* for device_find_child() */ put_device(child); device_unregister(child); diff --git a/drivers/scsi/3w-9xxx.c b/drivers/scsi/3w-9xxx.c index 05293babb031..2d655a97b959 100644 --- a/drivers/scsi/3w-9xxx.c +++ b/drivers/scsi/3w-9xxx.c @@ -143,7 +143,9 @@ static int twa_poll_status_gone(TW_Device_Extension *tw_dev, u32 flag, int secon static int twa_post_command_packet(TW_Device_Extension *tw_dev, int request_id, char internal); static int twa_reset_device_extension(TW_Device_Extension *tw_dev); static int twa_reset_sequence(TW_Device_Extension *tw_dev, int soft_reset); -static int twa_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id, char *cdb, int use_sg, TW_SG_Entry *sglistarg); +static int twa_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id, + unsigned char *cdb, int use_sg, + TW_SG_Entry *sglistarg); static void twa_scsiop_execute_scsi_complete(TW_Device_Extension *tw_dev, int request_id); static char *twa_string_lookup(twa_message_type *table, unsigned int aen_code); @@ -278,7 +280,7 @@ out: static int twa_aen_drain_queue(TW_Device_Extension *tw_dev, int no_check_reset) { int request_id = 0; - char cdb[TW_MAX_CDB_LEN]; + unsigned char cdb[TW_MAX_CDB_LEN]; TW_SG_Entry sglist[1]; int finished = 0, count = 0; TW_Command_Full *full_command_packet; @@ -423,7 +425,7 @@ static void twa_aen_queue_event(TW_Device_Extension *tw_dev, TW_Command_Apache_H /* This function will read the aen queue from the isr */ static int twa_aen_read_queue(TW_Device_Extension *tw_dev, int request_id) { - char cdb[TW_MAX_CDB_LEN]; + unsigned char cdb[TW_MAX_CDB_LEN]; TW_SG_Entry sglist[1]; TW_Command_Full *full_command_packet; int retval = 1; @@ -1798,7 +1800,9 @@ out: static DEF_SCSI_QCMD(twa_scsi_queue) /* This function hands scsi cdb's to the firmware */ -static int twa_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id, char *cdb, int use_sg, TW_SG_Entry *sglistarg) +static int twa_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id, + unsigned char *cdb, int use_sg, + TW_SG_Entry *sglistarg) { TW_Command_Full *full_command_packet; TW_Command_Apache *command_packet; diff --git a/drivers/scsi/3w-sas.c b/drivers/scsi/3w-sas.c index 266bdac75304..480cf82700e9 100644 --- a/drivers/scsi/3w-sas.c +++ b/drivers/scsi/3w-sas.c @@ -287,7 +287,9 @@ static int twl_post_command_packet(TW_Device_Extension *tw_dev, int request_id) } /* End twl_post_command_packet() */ /* This function hands scsi cdb's to the firmware */ -static int twl_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id, char *cdb, int use_sg, TW_SG_Entry_ISO *sglistarg) +static int twl_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id, + unsigned char *cdb, int use_sg, + TW_SG_Entry_ISO *sglistarg) { TW_Command_Full *full_command_packet; TW_Command_Apache *command_packet; @@ -372,7 +374,7 @@ out: /* This function will read the aen queue from the isr */ static int twl_aen_read_queue(TW_Device_Extension *tw_dev, int request_id) { - char cdb[TW_MAX_CDB_LEN]; + unsigned char cdb[TW_MAX_CDB_LEN]; TW_SG_Entry_ISO sglist[1]; TW_Command_Full *full_command_packet; int retval = 1; @@ -554,7 +556,7 @@ out: static int twl_aen_drain_queue(TW_Device_Extension *tw_dev, int no_check_reset) { int request_id = 0; - char cdb[TW_MAX_CDB_LEN]; + unsigned char cdb[TW_MAX_CDB_LEN]; TW_SG_Entry_ISO sglist[1]; int finished = 0, count = 0; TW_Command_Full *full_command_packet; diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig index 70988c381268..f07444d30b21 100644 --- a/drivers/scsi/Kconfig +++ b/drivers/scsi/Kconfig @@ -538,7 +538,7 @@ config SCSI_HPTIOP config SCSI_BUSLOGIC tristate "BusLogic SCSI support" - depends on (PCI || ISA || MCA) && SCSI && ISA_DMA_API && VIRT_TO_BUS + depends on (PCI || ISA) && SCSI && ISA_DMA_API && VIRT_TO_BUS ---help--- This is support for BusLogic MultiMaster and FlashPoint SCSI Host Adapters. Consult the SCSI-HOWTO, available from @@ -1175,12 +1175,12 @@ config SCSI_LPFC_DEBUG_FS config SCSI_SIM710 tristate "Simple 53c710 SCSI support (Compaq, NCR machines)" - depends on (EISA || MCA) && SCSI + depends on EISA && SCSI select SCSI_SPI_ATTRS ---help--- This driver is for NCR53c710 based SCSI host adapters. - It currently supports Compaq EISA cards and NCR MCA cards + It currently supports Compaq EISA cards. config SCSI_DC395x tristate "Tekram DC395(U/UW/F) and DC315(U) SCSI support" diff --git a/drivers/scsi/aha152x.c b/drivers/scsi/aha152x.c index 4d7b0e0adbf7..301b3cad15f8 100644 --- a/drivers/scsi/aha152x.c +++ b/drivers/scsi/aha152x.c @@ -269,7 +269,7 @@ static LIST_HEAD(aha152x_host_list); /* DEFINES */ /* For PCMCIA cards, always use AUTOCONF */ -#if defined(PCMCIA) || defined(MODULE) +#if defined(AHA152X_PCMCIA) || defined(MODULE) #if !defined(AUTOCONF) #define AUTOCONF #endif @@ -297,7 +297,7 @@ CMD_INC_RESID(struct scsi_cmnd *cmd, int inc) #define DELAY_DEFAULT 1000 -#if defined(PCMCIA) +#if defined(AHA152X_PCMCIA) #define IRQ_MIN 0 #define IRQ_MAX 16 #else @@ -328,7 +328,7 @@ MODULE_AUTHOR("Jürgen Fischer"); MODULE_DESCRIPTION(AHA152X_REVID); MODULE_LICENSE("GPL"); -#if !defined(PCMCIA) +#if !defined(AHA152X_PCMCIA) #if defined(MODULE) static int io[] = {0, 0}; module_param_hw_array(io, int, ioport, NULL, 0); @@ -391,7 +391,7 @@ static struct isapnp_device_id id_table[] = { MODULE_DEVICE_TABLE(isapnp, id_table); #endif /* ISAPNP */ -#endif /* !PCMCIA */ +#endif /* !AHA152X_PCMCIA */ static struct scsi_host_template aha152x_driver_template; @@ -863,7 +863,7 @@ void aha152x_release(struct Scsi_Host *shpnt) if (shpnt->irq) free_irq(shpnt->irq, shpnt); -#if !defined(PCMCIA) +#if !defined(AHA152X_PCMCIA) if (shpnt->io_port) release_region(shpnt->io_port, IO_RANGE); #endif @@ -2924,7 +2924,7 @@ static struct scsi_host_template aha152x_driver_template = { .slave_alloc = aha152x_adjust_queue, }; -#if !defined(PCMCIA) +#if !defined(AHA152X_PCMCIA) static int setup_count; static struct aha152x_setup setup[2]; @@ -3392,4 +3392,4 @@ static int __init aha152x_setup(char *str) __setup("aha152x=", aha152x_setup); #endif -#endif /* !PCMCIA */ +#endif /* !AHA152X_PCMCIA */ diff --git a/drivers/scsi/mvsas/mv_sas.c b/drivers/scsi/mvsas/mv_sas.c index 3df1428df317..311d23c727ce 100644 --- a/drivers/scsi/mvsas/mv_sas.c +++ b/drivers/scsi/mvsas/mv_sas.c @@ -790,12 +790,11 @@ static int mvs_task_prep(struct sas_task *task, struct mvs_info *mvi, int is_tmf slot->n_elem = n_elem; slot->slot_tag = tag; - slot->buf = dma_pool_alloc(mvi->dma_pool, GFP_ATOMIC, &slot->buf_dma); + slot->buf = dma_pool_zalloc(mvi->dma_pool, GFP_ATOMIC, &slot->buf_dma); if (!slot->buf) { rc = -ENOMEM; goto err_out_tag; } - memset(slot->buf, 0, MVS_SLOT_BUF_SZ); tei.task = task; tei.hdr = &mvi->slot[tag]; @@ -1906,8 +1905,7 @@ static void mvs_work_queue(struct work_struct *work) if (phy->phy_event & PHY_PLUG_OUT) { u32 tmp; - struct sas_identify_frame *id; - id = (struct sas_identify_frame *)phy->frame_rcvd; + tmp = MVS_CHIP_DISP->read_phy_ctl(mvi, phy_no); phy->phy_event &= ~PHY_PLUG_OUT; if (!(tmp & PHY_READY_MASK)) { diff --git a/drivers/scsi/pcmcia/aha152x_core.c b/drivers/scsi/pcmcia/aha152x_core.c index dba3716511c5..24b89228b241 100644 --- a/drivers/scsi/pcmcia/aha152x_core.c +++ b/drivers/scsi/pcmcia/aha152x_core.c @@ -1,3 +1,3 @@ -#define PCMCIA 1 +#define AHA152X_PCMCIA 1 #define AHA152X_STAT 1 #include "aha152x.c" diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c index b28f159fdaee..0bb9ac6ece92 100644 --- a/drivers/scsi/qla2xxx/qla_attr.c +++ b/drivers/scsi/qla2xxx/qla_attr.c @@ -218,7 +218,7 @@ qla2x00_sysfs_write_nvram(struct file *filp, struct kobject *kobj, mutex_lock(&ha->optrom_mutex); if (qla2x00_chip_is_down(vha)) { - mutex_unlock(&vha->hw->optrom_mutex); + mutex_unlock(&ha->optrom_mutex); return -EAGAIN; } diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c index c72d8012fe2a..6fe20c27acc1 100644 --- a/drivers/scsi/qla2xxx/qla_init.c +++ b/drivers/scsi/qla2xxx/qla_init.c @@ -425,7 +425,7 @@ void qla24xx_handle_adisc_event(scsi_qla_host_t *vha, struct event_arg *ea) __qla24xx_handle_gpdb_event(vha, ea); } -int qla_post_els_plogi_work(struct scsi_qla_host *vha, fc_port_t *fcport) +static int qla_post_els_plogi_work(struct scsi_qla_host *vha, fc_port_t *fcport) { struct qla_work_evt *e; @@ -680,7 +680,7 @@ static void qla24xx_handle_gnl_done_event(scsi_qla_host_t *vha, fcport); break; } - /* drop through */ + /* fall through */ default: if (fcport_is_smaller(fcport)) { /* local adapter is bigger */ @@ -1551,7 +1551,8 @@ void qla24xx_handle_relogin_event(scsi_qla_host_t *vha, } -void qla_handle_els_plogi_done(scsi_qla_host_t *vha, struct event_arg *ea) +static void qla_handle_els_plogi_done(scsi_qla_host_t *vha, + struct event_arg *ea) { ql_dbg(ql_dbg_disc, vha, 0x2118, "%s %d %8phC post PRLI\n", diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c index 86fb8b21aa71..032635321ad6 100644 --- a/drivers/scsi/qla2xxx/qla_iocb.c +++ b/drivers/scsi/qla2xxx/qla_iocb.c @@ -1195,8 +1195,8 @@ qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp, * @sp: SRB command to process * @cmd_pkt: Command type 3 IOCB * @tot_dsds: Total number of segments to transfer - * @tot_prot_dsds: - * @fw_prot_opts: + * @tot_prot_dsds: Total number of segments with protection information + * @fw_prot_opts: Protection options to be passed to firmware */ inline int qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt, diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c index d73b04e40590..30d3090842f8 100644 --- a/drivers/scsi/qla2xxx/qla_isr.c +++ b/drivers/scsi/qla2xxx/qla_isr.c @@ -25,7 +25,7 @@ static int qla2x00_error_entry(scsi_qla_host_t *, struct rsp_que *, /** * qla2100_intr_handler() - Process interrupts for the ISP2100 and ISP2200. - * @irq: + * @irq: interrupt number * @dev_id: SCSI driver HA context * * Called by system whenever the host adapter generates an interrupt. @@ -144,7 +144,7 @@ qla2x00_check_reg16_for_disconnect(scsi_qla_host_t *vha, uint16_t reg) /** * qla2300_intr_handler() - Process interrupts for the ISP23xx and ISP63xx. - * @irq: + * @irq: interrupt number * @dev_id: SCSI driver HA context * * Called by system whenever the host adapter generates an interrupt. @@ -3109,7 +3109,7 @@ done: /** * qla24xx_intr_handler() - Process interrupts for the ISP23xx and ISP24xx. - * @irq: + * @irq: interrupt number * @dev_id: SCSI driver HA context * * Called by system whenever the host adapter generates an interrupt. diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c index 2f3e5075ae76..191b6b7c8747 100644 --- a/drivers/scsi/qla2xxx/qla_mbx.c +++ b/drivers/scsi/qla2xxx/qla_mbx.c @@ -3478,9 +3478,9 @@ qla8044_read_serdes_word(scsi_qla_host_t *vha, uint32_t addr, uint32_t *data) /** * qla2x00_set_serdes_params() - * @vha: HA context - * @sw_em_1g: - * @sw_em_2g: - * @sw_em_4g: + * @sw_em_1g: serial link options + * @sw_em_2g: serial link options + * @sw_em_4g: serial link options * * Returns */ diff --git a/drivers/scsi/qla2xxx/qla_mr.c b/drivers/scsi/qla2xxx/qla_mr.c index 521a51370554..60f964c53c01 100644 --- a/drivers/scsi/qla2xxx/qla_mr.c +++ b/drivers/scsi/qla2xxx/qla_mr.c @@ -2212,7 +2212,7 @@ qlafx00_ioctl_iosb_entry(scsi_qla_host_t *vha, struct req_que *req, struct bsg_job *bsg_job; struct fc_bsg_reply *bsg_reply; struct srb_iocb *iocb_job; - int res; + int res = 0; struct qla_mt_iocb_rsp_fx00 fstatus; uint8_t *fw_sts_ptr; @@ -2624,7 +2624,7 @@ qlafx00_status_cont_entry(struct rsp_que *rsp, sts_cont_entry_t *pkt) * qlafx00_multistatus_entry() - Process Multi response queue entries. * @vha: SCSI driver HA context * @rsp: response queue - * @pkt: + * @pkt: received packet */ static void qlafx00_multistatus_entry(struct scsi_qla_host *vha, @@ -2681,12 +2681,10 @@ qlafx00_multistatus_entry(struct scsi_qla_host *vha, * @vha: SCSI driver HA context * @rsp: response queue * @pkt: Entry pointer - * @estatus: - * @etype: */ static void qlafx00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, - struct sts_entry_fx00 *pkt, uint8_t estatus, uint8_t etype) + struct sts_entry_fx00 *pkt) { srb_t *sp; struct qla_hw_data *ha = vha->hw; @@ -2695,9 +2693,6 @@ qlafx00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, struct req_que *req = NULL; int res = DID_ERROR << 16; - ql_dbg(ql_dbg_async, vha, 0x507f, - "type of error status in response: 0x%x\n", estatus); - req = ha->req_q_map[que]; sp = qla2x00_get_sp_from_handle(vha, func, req, pkt); @@ -2745,9 +2740,11 @@ qlafx00_process_response_queue(struct scsi_qla_host *vha, if (pkt->entry_status != 0 && pkt->entry_type != IOCTL_IOSB_TYPE_FX00) { + ql_dbg(ql_dbg_async, vha, 0x507f, + "type of error status in response: 0x%x\n", + pkt->entry_status); qlafx00_error_entry(vha, rsp, - (struct sts_entry_fx00 *)pkt, pkt->entry_status, - pkt->entry_type); + (struct sts_entry_fx00 *)pkt); continue; } @@ -2867,7 +2864,7 @@ qlafx00_async_event(scsi_qla_host_t *vha) /** * qlafx00x_mbx_completion() - Process mailbox command completions. * @vha: SCSI driver HA context - * @mb0: + * @mb0: value to be written into mailbox register 0 */ static void qlafx00_mbx_completion(scsi_qla_host_t *vha, uint32_t mb0) @@ -2893,7 +2890,7 @@ qlafx00_mbx_completion(scsi_qla_host_t *vha, uint32_t mb0) /** * qlafx00_intr_handler() - Process interrupts for the ISPFX00. - * @irq: + * @irq: interrupt number * @dev_id: SCSI driver HA context * * Called by system whenever the host adapter generates an interrupt. diff --git a/drivers/scsi/qla2xxx/qla_nx.c b/drivers/scsi/qla2xxx/qla_nx.c index 121e18b3b9f8..f2f54806f4da 100644 --- a/drivers/scsi/qla2xxx/qla_nx.c +++ b/drivers/scsi/qla2xxx/qla_nx.c @@ -2010,7 +2010,7 @@ qla82xx_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0) /** * qla82xx_intr_handler() - Process interrupts for the ISP23xx and ISP63xx. - * @irq: + * @irq: interrupt number * @dev_id: SCSI driver HA context * * Called by system whenever the host adapter generates an interrupt. diff --git a/drivers/scsi/qla2xxx/qla_nx2.c b/drivers/scsi/qla2xxx/qla_nx2.c index 3a2b0282df14..fe856b602e03 100644 --- a/drivers/scsi/qla2xxx/qla_nx2.c +++ b/drivers/scsi/qla2xxx/qla_nx2.c @@ -3878,7 +3878,7 @@ out: #define PF_BITS_MASK (0xF << 16) /** * qla8044_intr_handler() - Process interrupts for the ISP8044 - * @irq: + * @irq: interrupt number * @dev_id: SCSI driver HA context * * Called by system whenever the host adapter generates an interrupt. diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index 8794e54f43a9..518f15141170 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c @@ -1749,7 +1749,7 @@ qla2x00_loop_reset(scsi_qla_host_t *vha) static void __qla2x00_abort_all_cmds(struct qla_qpair *qp, int res) { - int cnt, status; + int cnt; unsigned long flags; srb_t *sp; scsi_qla_host_t *vha = qp->vha; @@ -1799,8 +1799,8 @@ __qla2x00_abort_all_cmds(struct qla_qpair *qp, int res) if (!sp_get(sp)) { spin_unlock_irqrestore (qp->qp_lock_ptr, flags); - status = qla2xxx_eh_abort( - GET_CMD_SP(sp)); + qla2xxx_eh_abort( + GET_CMD_SP(sp)); spin_lock_irqsave (qp->qp_lock_ptr, flags); } diff --git a/drivers/scsi/qla2xxx/qla_sup.c b/drivers/scsi/qla2xxx/qla_sup.c index 4499c787165f..2a3055c799fb 100644 --- a/drivers/scsi/qla2xxx/qla_sup.c +++ b/drivers/scsi/qla2xxx/qla_sup.c @@ -2229,7 +2229,7 @@ qla2x00_erase_flash_sector(struct qla_hw_data *ha, uint32_t addr, /** * qla2x00_get_flash_manufacturer() - Read manufacturer ID from flash chip. - * @ha: + * @ha: host adapter * @man_id: Flash manufacturer ID * @flash_id: Flash ID */ diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c index 39828207bc1d..c4504740f0e2 100644 --- a/drivers/scsi/qla2xxx/qla_target.c +++ b/drivers/scsi/qla2xxx/qla_target.c @@ -4540,7 +4540,7 @@ static int qlt_issue_task_mgmt(struct fc_port *sess, u64 lun, case QLA_TGT_CLEAR_TS: case QLA_TGT_ABORT_TS: abort_cmds_for_lun(vha, lun, a->u.isp24.fcp_hdr.s_id); - /* drop through */ + /* fall through */ case QLA_TGT_CLEAR_ACA: h = qlt_find_qphint(vha, mcmd->unpacked_lun); mcmd->qpair = h->qpair; @@ -6598,9 +6598,9 @@ static void qlt_lport_dump(struct scsi_qla_host *vha, u64 wwpn, * qla_tgt_lport_register - register lport with external module * * @target_lport_ptr: pointer for tcm_qla2xxx specific lport data - * @phys_wwpn: - * @npiv_wwpn: - * @npiv_wwnn: + * @phys_wwpn: physical port WWPN + * @npiv_wwpn: NPIV WWPN + * @npiv_wwnn: NPIV WWNN * @callback: lport initialization callback for tcm_qla2xxx code */ int qlt_lport_register(void *target_lport_ptr, u64 phys_wwpn, diff --git a/drivers/soc/ti/knav_qmss.h b/drivers/soc/ti/knav_qmss.h index 7c128132799e..4c28fa938ac7 100644 --- a/drivers/soc/ti/knav_qmss.h +++ b/drivers/soc/ti/knav_qmss.h @@ -329,8 +329,8 @@ struct knav_range_ops { }; struct knav_irq_info { - int irq; - u32 cpu_map; + int irq; + struct cpumask *cpu_mask; }; struct knav_range_info { diff --git a/drivers/soc/ti/knav_qmss_acc.c b/drivers/soc/ti/knav_qmss_acc.c index 316e82e46f6c..2f7fb2dcc1d6 100644 --- a/drivers/soc/ti/knav_qmss_acc.c +++ b/drivers/soc/ti/knav_qmss_acc.c @@ -205,18 +205,18 @@ static int knav_range_setup_acc_irq(struct knav_range_info *range, { struct knav_device *kdev = range->kdev; struct knav_acc_channel *acc; - unsigned long cpu_map; + struct cpumask *cpu_mask; int ret = 0, irq; u32 old, new; if (range->flags & RANGE_MULTI_QUEUE) { acc = range->acc; irq = range->irqs[0].irq; - cpu_map = range->irqs[0].cpu_map; + cpu_mask = range->irqs[0].cpu_mask; } else { acc = range->acc + queue; irq = range->irqs[queue].irq; - cpu_map = range->irqs[queue].cpu_map; + cpu_mask = range->irqs[queue].cpu_mask; } old = acc->open_mask; @@ -239,8 +239,8 @@ static int knav_range_setup_acc_irq(struct knav_range_info *range, acc->name, acc->name); ret = request_irq(irq, knav_acc_int_handler, 0, acc->name, range); - if (!ret && cpu_map) { - ret = irq_set_affinity_hint(irq, to_cpumask(&cpu_map)); + if (!ret && cpu_mask) { + ret = irq_set_affinity_hint(irq, cpu_mask); if (ret) { dev_warn(range->kdev->dev, "Failed to set IRQ affinity\n"); diff --git a/drivers/soc/ti/knav_qmss_queue.c b/drivers/soc/ti/knav_qmss_queue.c index b5d5673c255c..8b418379272d 100644 --- a/drivers/soc/ti/knav_qmss_queue.c +++ b/drivers/soc/ti/knav_qmss_queue.c @@ -118,19 +118,17 @@ static int knav_queue_setup_irq(struct knav_range_info *range, struct knav_queue_inst *inst) { unsigned queue = inst->id - range->queue_base; - unsigned long cpu_map; int ret = 0, irq; if (range->flags & RANGE_HAS_IRQ) { irq = range->irqs[queue].irq; - cpu_map = range->irqs[queue].cpu_map; ret = request_irq(irq, knav_queue_int_handler, 0, inst->irq_name, inst); if (ret) return ret; disable_irq(irq); - if (cpu_map) { - ret = irq_set_affinity_hint(irq, to_cpumask(&cpu_map)); + if (range->irqs[queue].cpu_mask) { + ret = irq_set_affinity_hint(irq, range->irqs[queue].cpu_mask); if (ret) { dev_warn(range->kdev->dev, "Failed to set IRQ affinity\n"); @@ -1262,9 +1260,19 @@ static int knav_setup_queue_range(struct knav_device *kdev, range->num_irqs++; - if (IS_ENABLED(CONFIG_SMP) && oirq.args_count == 3) - range->irqs[i].cpu_map = - (oirq.args[2] & 0x0000ff00) >> 8; + if (IS_ENABLED(CONFIG_SMP) && oirq.args_count == 3) { + unsigned long mask; + int bit; + + range->irqs[i].cpu_mask = devm_kzalloc(dev, + cpumask_size(), GFP_KERNEL); + if (!range->irqs[i].cpu_mask) + return -ENOMEM; + + mask = (oirq.args[2] & 0x0000ff00) >> 8; + for_each_set_bit(bit, &mask, BITS_PER_LONG) + cpumask_set_cpu(bit, range->irqs[i].cpu_mask); + } } range->num_irqs = min(range->num_irqs, range->num_queues); diff --git a/drivers/target/iscsi/iscsi_target_util.c b/drivers/target/iscsi/iscsi_target_util.c index 1227872227dc..36b742932c72 100644 --- a/drivers/target/iscsi/iscsi_target_util.c +++ b/drivers/target/iscsi/iscsi_target_util.c @@ -1245,8 +1245,7 @@ static int iscsit_do_rx_data( return -1; memset(&msg, 0, sizeof(struct msghdr)); - iov_iter_kvec(&msg.msg_iter, READ | ITER_KVEC, - count->iov, count->iov_count, data); + iov_iter_kvec(&msg.msg_iter, READ, count->iov, count->iov_count, data); while (msg_data_left(&msg)) { rx_loop = sock_recvmsg(conn->sock, &msg, MSG_WAITALL); @@ -1302,8 +1301,7 @@ int tx_data( memset(&msg, 0, sizeof(struct msghdr)); - iov_iter_kvec(&msg.msg_iter, WRITE | ITER_KVEC, - iov, iov_count, data); + iov_iter_kvec(&msg.msg_iter, WRITE, iov, iov_count, data); while (msg_data_left(&msg)) { int tx_loop = sock_sendmsg(conn->sock, &msg); diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c index e46ca968009c..4f134b0c3e29 100644 --- a/drivers/target/target_core_alua.c +++ b/drivers/target/target_core_alua.c @@ -268,7 +268,7 @@ target_emulate_report_target_port_groups(struct se_cmd *cmd) } transport_kunmap_data_sg(cmd); - target_complete_cmd(cmd, GOOD); + target_complete_cmd_with_length(cmd, GOOD, rd_len + 4); return 0; } diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c index 16751ae55d7b..49b110d1b972 100644 --- a/drivers/target/target_core_file.c +++ b/drivers/target/target_core_file.c @@ -303,7 +303,7 @@ fd_execute_rw_aio(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents, len += sg->length; } - iov_iter_bvec(&iter, ITER_BVEC | is_write, bvec, sgl_nents, len); + iov_iter_bvec(&iter, is_write, bvec, sgl_nents, len); aio_cmd->cmd = cmd; aio_cmd->len = len; @@ -353,7 +353,7 @@ static int fd_do_rw(struct se_cmd *cmd, struct file *fd, len += sg->length; } - iov_iter_bvec(&iter, ITER_BVEC, bvec, sgl_nents, len); + iov_iter_bvec(&iter, READ, bvec, sgl_nents, len); if (is_write) ret = vfs_iter_write(fd, &iter, &pos, 0); else @@ -490,7 +490,7 @@ fd_execute_write_same(struct se_cmd *cmd) len += se_dev->dev_attrib.block_size; } - iov_iter_bvec(&iter, ITER_BVEC, bvec, nolb, len); + iov_iter_bvec(&iter, READ, bvec, nolb, len); ret = vfs_iter_write(fd_dev->fd_file, &iter, &pos, 0); kfree(bvec); diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index 4cf33e2cc705..e31e4fc31aa1 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c @@ -205,19 +205,19 @@ void transport_subsystem_check_init(void) if (sub_api_initialized) return; - ret = request_module("target_core_iblock"); + ret = IS_ENABLED(CONFIG_TCM_IBLOCK) && request_module("target_core_iblock"); if (ret != 0) pr_err("Unable to load target_core_iblock\n"); - ret = request_module("target_core_file"); + ret = IS_ENABLED(CONFIG_TCM_FILEIO) && request_module("target_core_file"); if (ret != 0) pr_err("Unable to load target_core_file\n"); - ret = request_module("target_core_pscsi"); + ret = IS_ENABLED(CONFIG_TCM_PSCSI) && request_module("target_core_pscsi"); if (ret != 0) pr_err("Unable to load target_core_pscsi\n"); - ret = request_module("target_core_user"); + ret = IS_ENABLED(CONFIG_TCM_USER2) && request_module("target_core_user"); if (ret != 0) pr_err("Unable to load target_core_user\n"); diff --git a/drivers/usb/usbip/usbip_common.c b/drivers/usb/usbip/usbip_common.c index 9756752c0681..45da3e01c7b0 100644 --- a/drivers/usb/usbip/usbip_common.c +++ b/drivers/usb/usbip/usbip_common.c @@ -309,7 +309,7 @@ int usbip_recv(struct socket *sock, void *buf, int size) if (!sock || !buf || !size) return -EINVAL; - iov_iter_kvec(&msg.msg_iter, READ|ITER_KVEC, &iov, 1, size); + iov_iter_kvec(&msg.msg_iter, READ, &iov, 1, size); usbip_dbg_xmit("enter\n"); diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c index c24bb690680b..50dffe83714c 100644 --- a/drivers/vhost/scsi.c +++ b/drivers/vhost/scsi.c @@ -203,6 +203,19 @@ struct vhost_scsi { int vs_events_nr; /* num of pending events, protected by vq->mutex */ }; +/* + * Context for processing request and control queue operations. + */ +struct vhost_scsi_ctx { + int head; + unsigned int out, in; + size_t req_size, rsp_size; + size_t out_size, in_size; + u8 *target, *lunp; + void *req; + struct iov_iter out_iter; +}; + static struct workqueue_struct *vhost_scsi_workqueue; /* Global spinlock to protect vhost_scsi TPG list for vhost IOCTL access */ @@ -800,24 +813,120 @@ vhost_scsi_send_bad_target(struct vhost_scsi *vs, pr_err("Faulted on virtio_scsi_cmd_resp\n"); } +static int +vhost_scsi_get_desc(struct vhost_scsi *vs, struct vhost_virtqueue *vq, + struct vhost_scsi_ctx *vc) +{ + int ret = -ENXIO; + + vc->head = vhost_get_vq_desc(vq, vq->iov, + ARRAY_SIZE(vq->iov), &vc->out, &vc->in, + NULL, NULL); + + pr_debug("vhost_get_vq_desc: head: %d, out: %u in: %u\n", + vc->head, vc->out, vc->in); + + /* On error, stop handling until the next kick. */ + if (unlikely(vc->head < 0)) + goto done; + + /* Nothing new? Wait for eventfd to tell us they refilled. */ + if (vc->head == vq->num) { + if (unlikely(vhost_enable_notify(&vs->dev, vq))) { + vhost_disable_notify(&vs->dev, vq); + ret = -EAGAIN; + } + goto done; + } + + /* + * Get the size of request and response buffers. + * FIXME: Not correct for BIDI operation + */ + vc->out_size = iov_length(vq->iov, vc->out); + vc->in_size = iov_length(&vq->iov[vc->out], vc->in); + + /* + * Copy over the virtio-scsi request header, which for a + * ANY_LAYOUT enabled guest may span multiple iovecs, or a + * single iovec may contain both the header + outgoing + * WRITE payloads. + * + * copy_from_iter() will advance out_iter, so that it will + * point at the start of the outgoing WRITE payload, if + * DMA_TO_DEVICE is set. + */ + iov_iter_init(&vc->out_iter, WRITE, vq->iov, vc->out, vc->out_size); + ret = 0; + +done: + return ret; +} + +static int +vhost_scsi_chk_size(struct vhost_virtqueue *vq, struct vhost_scsi_ctx *vc) +{ + if (unlikely(vc->in_size < vc->rsp_size)) { + vq_err(vq, + "Response buf too small, need min %zu bytes got %zu", + vc->rsp_size, vc->in_size); + return -EINVAL; + } else if (unlikely(vc->out_size < vc->req_size)) { + vq_err(vq, + "Request buf too small, need min %zu bytes got %zu", + vc->req_size, vc->out_size); + return -EIO; + } + + return 0; +} + +static int +vhost_scsi_get_req(struct vhost_virtqueue *vq, struct vhost_scsi_ctx *vc, + struct vhost_scsi_tpg **tpgp) +{ + int ret = -EIO; + + if (unlikely(!copy_from_iter_full(vc->req, vc->req_size, + &vc->out_iter))) { + vq_err(vq, "Faulted on copy_from_iter\n"); + } else if (unlikely(*vc->lunp != 1)) { + /* virtio-scsi spec requires byte 0 of the lun to be 1 */ + vq_err(vq, "Illegal virtio-scsi lun: %u\n", *vc->lunp); + } else { + struct vhost_scsi_tpg **vs_tpg, *tpg; + + vs_tpg = vq->private_data; /* validated at handler entry */ + + tpg = READ_ONCE(vs_tpg[*vc->target]); + if (unlikely(!tpg)) { + vq_err(vq, "Target 0x%x does not exist\n", *vc->target); + } else { + if (tpgp) + *tpgp = tpg; + ret = 0; + } + } + + return ret; +} + static void vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq) { struct vhost_scsi_tpg **vs_tpg, *tpg; struct virtio_scsi_cmd_req v_req; struct virtio_scsi_cmd_req_pi v_req_pi; + struct vhost_scsi_ctx vc; struct vhost_scsi_cmd *cmd; - struct iov_iter out_iter, in_iter, prot_iter, data_iter; + struct iov_iter in_iter, prot_iter, data_iter; u64 tag; u32 exp_data_len, data_direction; - unsigned int out = 0, in = 0; - int head, ret, prot_bytes; - size_t req_size, rsp_size = sizeof(struct virtio_scsi_cmd_resp); - size_t out_size, in_size; + int ret, prot_bytes; u16 lun; - u8 *target, *lunp, task_attr; + u8 task_attr; bool t10_pi = vhost_has_feature(vq, VIRTIO_SCSI_F_T10_PI); - void *req, *cdb; + void *cdb; mutex_lock(&vq->mutex); /* @@ -828,85 +937,47 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq) if (!vs_tpg) goto out; + memset(&vc, 0, sizeof(vc)); + vc.rsp_size = sizeof(struct virtio_scsi_cmd_resp); + vhost_disable_notify(&vs->dev, vq); for (;;) { - head = vhost_get_vq_desc(vq, vq->iov, - ARRAY_SIZE(vq->iov), &out, &in, - NULL, NULL); - pr_debug("vhost_get_vq_desc: head: %d, out: %u in: %u\n", - head, out, in); - /* On error, stop handling until the next kick. */ - if (unlikely(head < 0)) - break; - /* Nothing new? Wait for eventfd to tell us they refilled. */ - if (head == vq->num) { - if (unlikely(vhost_enable_notify(&vs->dev, vq))) { - vhost_disable_notify(&vs->dev, vq); - continue; - } - break; - } - /* - * Check for a sane response buffer so we can report early - * errors back to the guest. - */ - if (unlikely(vq->iov[out].iov_len < rsp_size)) { - vq_err(vq, "Expecting at least virtio_scsi_cmd_resp" - " size, got %zu bytes\n", vq->iov[out].iov_len); - break; - } + ret = vhost_scsi_get_desc(vs, vq, &vc); + if (ret) + goto err; + /* * Setup pointers and values based upon different virtio-scsi * request header if T10_PI is enabled in KVM guest. */ if (t10_pi) { - req = &v_req_pi; - req_size = sizeof(v_req_pi); - lunp = &v_req_pi.lun[0]; - target = &v_req_pi.lun[1]; + vc.req = &v_req_pi; + vc.req_size = sizeof(v_req_pi); + vc.lunp = &v_req_pi.lun[0]; + vc.target = &v_req_pi.lun[1]; } else { - req = &v_req; - req_size = sizeof(v_req); - lunp = &v_req.lun[0]; - target = &v_req.lun[1]; + vc.req = &v_req; + vc.req_size = sizeof(v_req); + vc.lunp = &v_req.lun[0]; + vc.target = &v_req.lun[1]; } - /* - * FIXME: Not correct for BIDI operation - */ - out_size = iov_length(vq->iov, out); - in_size = iov_length(&vq->iov[out], in); /* - * Copy over the virtio-scsi request header, which for a - * ANY_LAYOUT enabled guest may span multiple iovecs, or a - * single iovec may contain both the header + outgoing - * WRITE payloads. - * - * copy_from_iter() will advance out_iter, so that it will - * point at the start of the outgoing WRITE payload, if - * DMA_TO_DEVICE is set. + * Validate the size of request and response buffers. + * Check for a sane response buffer so we can report + * early errors back to the guest. */ - iov_iter_init(&out_iter, WRITE, vq->iov, out, out_size); + ret = vhost_scsi_chk_size(vq, &vc); + if (ret) + goto err; - if (unlikely(!copy_from_iter_full(req, req_size, &out_iter))) { - vq_err(vq, "Faulted on copy_from_iter\n"); - vhost_scsi_send_bad_target(vs, vq, head, out); - continue; - } - /* virtio-scsi spec requires byte 0 of the lun to be 1 */ - if (unlikely(*lunp != 1)) { - vq_err(vq, "Illegal virtio-scsi lun: %u\n", *lunp); - vhost_scsi_send_bad_target(vs, vq, head, out); - continue; - } + ret = vhost_scsi_get_req(vq, &vc, &tpg); + if (ret) + goto err; + + ret = -EIO; /* bad target on any error from here on */ - tpg = READ_ONCE(vs_tpg[*target]); - if (unlikely(!tpg)) { - /* Target does not exist, fail the request */ - vhost_scsi_send_bad_target(vs, vq, head, out); - continue; - } /* * Determine data_direction by calculating the total outgoing * iovec sizes + incoming iovec sizes vs. virtio-scsi request + @@ -924,17 +995,17 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq) */ prot_bytes = 0; - if (out_size > req_size) { + if (vc.out_size > vc.req_size) { data_direction = DMA_TO_DEVICE; - exp_data_len = out_size - req_size; - data_iter = out_iter; - } else if (in_size > rsp_size) { + exp_data_len = vc.out_size - vc.req_size; + data_iter = vc.out_iter; + } else if (vc.in_size > vc.rsp_size) { data_direction = DMA_FROM_DEVICE; - exp_data_len = in_size - rsp_size; + exp_data_len = vc.in_size - vc.rsp_size; - iov_iter_init(&in_iter, READ, &vq->iov[out], in, - rsp_size + exp_data_len); - iov_iter_advance(&in_iter, rsp_size); + iov_iter_init(&in_iter, READ, &vq->iov[vc.out], vc.in, + vc.rsp_size + exp_data_len); + iov_iter_advance(&in_iter, vc.rsp_size); data_iter = in_iter; } else { data_direction = DMA_NONE; @@ -950,21 +1021,20 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq) if (data_direction != DMA_TO_DEVICE) { vq_err(vq, "Received non zero pi_bytesout," " but wrong data_direction\n"); - vhost_scsi_send_bad_target(vs, vq, head, out); - continue; + goto err; } prot_bytes = vhost32_to_cpu(vq, v_req_pi.pi_bytesout); } else if (v_req_pi.pi_bytesin) { if (data_direction != DMA_FROM_DEVICE) { vq_err(vq, "Received non zero pi_bytesin," " but wrong data_direction\n"); - vhost_scsi_send_bad_target(vs, vq, head, out); - continue; + goto err; } prot_bytes = vhost32_to_cpu(vq, v_req_pi.pi_bytesin); } /* - * Set prot_iter to data_iter, and advance past any + * Set prot_iter to data_iter and truncate it to + * prot_bytes, and advance data_iter past any * preceeding prot_bytes that may be present. * * Also fix up the exp_data_len to reflect only the @@ -973,6 +1043,7 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq) if (prot_bytes) { exp_data_len -= prot_bytes; prot_iter = data_iter; + iov_iter_truncate(&prot_iter, prot_bytes); iov_iter_advance(&data_iter, prot_bytes); } tag = vhost64_to_cpu(vq, v_req_pi.tag); @@ -996,8 +1067,7 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq) vq_err(vq, "Received SCSI CDB with command_size: %d that" " exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n", scsi_command_size(cdb), VHOST_SCSI_MAX_CDB_SIZE); - vhost_scsi_send_bad_target(vs, vq, head, out); - continue; + goto err; } cmd = vhost_scsi_get_tag(vq, tpg, cdb, tag, lun, task_attr, exp_data_len + prot_bytes, @@ -1005,13 +1075,12 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq) if (IS_ERR(cmd)) { vq_err(vq, "vhost_scsi_get_tag failed %ld\n", PTR_ERR(cmd)); - vhost_scsi_send_bad_target(vs, vq, head, out); - continue; + goto err; } cmd->tvc_vhost = vs; cmd->tvc_vq = vq; - cmd->tvc_resp_iov = vq->iov[out]; - cmd->tvc_in_iovs = in; + cmd->tvc_resp_iov = vq->iov[vc.out]; + cmd->tvc_in_iovs = vc.in; pr_debug("vhost_scsi got command opcode: %#02x, lun: %d\n", cmd->tvc_cdb[0], cmd->tvc_lun); @@ -1019,14 +1088,12 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq) " %d\n", cmd, exp_data_len, prot_bytes, data_direction); if (data_direction != DMA_NONE) { - ret = vhost_scsi_mapal(cmd, - prot_bytes, &prot_iter, - exp_data_len, &data_iter); - if (unlikely(ret)) { + if (unlikely(vhost_scsi_mapal(cmd, prot_bytes, + &prot_iter, exp_data_len, + &data_iter))) { vq_err(vq, "Failed to map iov to sgl\n"); vhost_scsi_release_cmd(&cmd->tvc_se_cmd); - vhost_scsi_send_bad_target(vs, vq, head, out); - continue; + goto err; } } /* @@ -1034,7 +1101,7 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq) * complete the virtio-scsi request in TCM callback context via * vhost_scsi_queue_data_in() and vhost_scsi_queue_status() */ - cmd->tvc_vq_desc = head; + cmd->tvc_vq_desc = vc.head; /* * Dispatch cmd descriptor for cmwq execution in process * context provided by vhost_scsi_workqueue. This also ensures @@ -1043,6 +1110,166 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq) */ INIT_WORK(&cmd->work, vhost_scsi_submission_work); queue_work(vhost_scsi_workqueue, &cmd->work); + ret = 0; +err: + /* + * ENXIO: No more requests, or read error, wait for next kick + * EINVAL: Invalid response buffer, drop the request + * EIO: Respond with bad target + * EAGAIN: Pending request + */ + if (ret == -ENXIO) + break; + else if (ret == -EIO) + vhost_scsi_send_bad_target(vs, vq, vc.head, vc.out); + } +out: + mutex_unlock(&vq->mutex); +} + +static void +vhost_scsi_send_tmf_reject(struct vhost_scsi *vs, + struct vhost_virtqueue *vq, + struct vhost_scsi_ctx *vc) +{ + struct virtio_scsi_ctrl_tmf_resp __user *resp; + struct virtio_scsi_ctrl_tmf_resp rsp; + int ret; + + pr_debug("%s\n", __func__); + memset(&rsp, 0, sizeof(rsp)); + rsp.response = VIRTIO_SCSI_S_FUNCTION_REJECTED; + resp = vq->iov[vc->out].iov_base; + ret = __copy_to_user(resp, &rsp, sizeof(rsp)); + if (!ret) + vhost_add_used_and_signal(&vs->dev, vq, vc->head, 0); + else + pr_err("Faulted on virtio_scsi_ctrl_tmf_resp\n"); +} + +static void +vhost_scsi_send_an_resp(struct vhost_scsi *vs, + struct vhost_virtqueue *vq, + struct vhost_scsi_ctx *vc) +{ + struct virtio_scsi_ctrl_an_resp __user *resp; + struct virtio_scsi_ctrl_an_resp rsp; + int ret; + + pr_debug("%s\n", __func__); + memset(&rsp, 0, sizeof(rsp)); /* event_actual = 0 */ + rsp.response = VIRTIO_SCSI_S_OK; + resp = vq->iov[vc->out].iov_base; + ret = __copy_to_user(resp, &rsp, sizeof(rsp)); + if (!ret) + vhost_add_used_and_signal(&vs->dev, vq, vc->head, 0); + else + pr_err("Faulted on virtio_scsi_ctrl_an_resp\n"); +} + +static void +vhost_scsi_ctl_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq) +{ + union { + __virtio32 type; + struct virtio_scsi_ctrl_an_req an; + struct virtio_scsi_ctrl_tmf_req tmf; + } v_req; + struct vhost_scsi_ctx vc; + size_t typ_size; + int ret; + + mutex_lock(&vq->mutex); + /* + * We can handle the vq only after the endpoint is setup by calling the + * VHOST_SCSI_SET_ENDPOINT ioctl. + */ + if (!vq->private_data) + goto out; + + memset(&vc, 0, sizeof(vc)); + + vhost_disable_notify(&vs->dev, vq); + + for (;;) { + ret = vhost_scsi_get_desc(vs, vq, &vc); + if (ret) + goto err; + + /* + * Get the request type first in order to setup + * other parameters dependent on the type. + */ + vc.req = &v_req.type; + typ_size = sizeof(v_req.type); + + if (unlikely(!copy_from_iter_full(vc.req, typ_size, + &vc.out_iter))) { + vq_err(vq, "Faulted on copy_from_iter tmf type\n"); + /* + * The size of the response buffer depends on the + * request type and must be validated against it. + * Since the request type is not known, don't send + * a response. + */ + continue; + } + + switch (v_req.type) { + case VIRTIO_SCSI_T_TMF: + vc.req = &v_req.tmf; + vc.req_size = sizeof(struct virtio_scsi_ctrl_tmf_req); + vc.rsp_size = sizeof(struct virtio_scsi_ctrl_tmf_resp); + vc.lunp = &v_req.tmf.lun[0]; + vc.target = &v_req.tmf.lun[1]; + break; + case VIRTIO_SCSI_T_AN_QUERY: + case VIRTIO_SCSI_T_AN_SUBSCRIBE: + vc.req = &v_req.an; + vc.req_size = sizeof(struct virtio_scsi_ctrl_an_req); + vc.rsp_size = sizeof(struct virtio_scsi_ctrl_an_resp); + vc.lunp = &v_req.an.lun[0]; + vc.target = NULL; + break; + default: + vq_err(vq, "Unknown control request %d", v_req.type); + continue; + } + + /* + * Validate the size of request and response buffers. + * Check for a sane response buffer so we can report + * early errors back to the guest. + */ + ret = vhost_scsi_chk_size(vq, &vc); + if (ret) + goto err; + + /* + * Get the rest of the request now that its size is known. + */ + vc.req += typ_size; + vc.req_size -= typ_size; + + ret = vhost_scsi_get_req(vq, &vc, NULL); + if (ret) + goto err; + + if (v_req.type == VIRTIO_SCSI_T_TMF) + vhost_scsi_send_tmf_reject(vs, vq, &vc); + else + vhost_scsi_send_an_resp(vs, vq, &vc); +err: + /* + * ENXIO: No more requests, or read error, wait for next kick + * EINVAL: Invalid response buffer, drop the request + * EIO: Respond with bad target + * EAGAIN: Pending request + */ + if (ret == -ENXIO) + break; + else if (ret == -EIO) + vhost_scsi_send_bad_target(vs, vq, vc.head, vc.out); } out: mutex_unlock(&vq->mutex); @@ -1050,7 +1277,12 @@ out: static void vhost_scsi_ctl_handle_kick(struct vhost_work *work) { + struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue, + poll.work); + struct vhost_scsi *vs = container_of(vq->dev, struct vhost_scsi, dev); + pr_debug("%s: The handling func for control queue.\n", __func__); + vhost_scsi_ctl_handle_vq(vs, vq); } static void diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c index d1c1f6283729..728ecd1eea30 100644 --- a/drivers/virtio/virtio_balloon.c +++ b/drivers/virtio/virtio_balloon.c @@ -41,13 +41,34 @@ #define VIRTIO_BALLOON_ARRAY_PFNS_MAX 256 #define VIRTBALLOON_OOM_NOTIFY_PRIORITY 80 +#define VIRTIO_BALLOON_FREE_PAGE_ALLOC_FLAG (__GFP_NORETRY | __GFP_NOWARN | \ + __GFP_NOMEMALLOC) +/* The order of free page blocks to report to host */ +#define VIRTIO_BALLOON_FREE_PAGE_ORDER (MAX_ORDER - 1) +/* The size of a free page block in bytes */ +#define VIRTIO_BALLOON_FREE_PAGE_SIZE \ + (1 << (VIRTIO_BALLOON_FREE_PAGE_ORDER + PAGE_SHIFT)) + #ifdef CONFIG_BALLOON_COMPACTION static struct vfsmount *balloon_mnt; #endif +enum virtio_balloon_vq { + VIRTIO_BALLOON_VQ_INFLATE, + VIRTIO_BALLOON_VQ_DEFLATE, + VIRTIO_BALLOON_VQ_STATS, + VIRTIO_BALLOON_VQ_FREE_PAGE, + VIRTIO_BALLOON_VQ_MAX +}; + struct virtio_balloon { struct virtio_device *vdev; - struct virtqueue *inflate_vq, *deflate_vq, *stats_vq; + struct virtqueue *inflate_vq, *deflate_vq, *stats_vq, *free_page_vq; + + /* Balloon's own wq for cpu-intensive work items */ + struct workqueue_struct *balloon_wq; + /* The free page reporting work item submitted to the balloon wq */ + struct work_struct report_free_page_work; /* The balloon servicing is delegated to a freezable workqueue. */ struct work_struct update_balloon_stats_work; @@ -57,6 +78,18 @@ struct virtio_balloon { spinlock_t stop_update_lock; bool stop_update; + /* The list of allocated free pages, waiting to be given back to mm */ + struct list_head free_page_list; + spinlock_t free_page_list_lock; + /* The number of free page blocks on the above list */ + unsigned long num_free_page_blocks; + /* The cmd id received from host */ + u32 cmd_id_received; + /* The cmd id that is actively in use */ + __virtio32 cmd_id_active; + /* Buffer to store the stop sign */ + __virtio32 cmd_id_stop; + /* Waiting for host to ack the pages we released. */ wait_queue_head_t acked; @@ -320,17 +353,6 @@ static void stats_handle_request(struct virtio_balloon *vb) virtqueue_kick(vq); } -static void virtballoon_changed(struct virtio_device *vdev) -{ - struct virtio_balloon *vb = vdev->priv; - unsigned long flags; - - spin_lock_irqsave(&vb->stop_update_lock, flags); - if (!vb->stop_update) - queue_work(system_freezable_wq, &vb->update_balloon_size_work); - spin_unlock_irqrestore(&vb->stop_update_lock, flags); -} - static inline s64 towards_target(struct virtio_balloon *vb) { s64 target; @@ -347,6 +369,60 @@ static inline s64 towards_target(struct virtio_balloon *vb) return target - vb->num_pages; } +/* Gives back @num_to_return blocks of free pages to mm. */ +static unsigned long return_free_pages_to_mm(struct virtio_balloon *vb, + unsigned long num_to_return) +{ + struct page *page; + unsigned long num_returned; + + spin_lock_irq(&vb->free_page_list_lock); + for (num_returned = 0; num_returned < num_to_return; num_returned++) { + page = balloon_page_pop(&vb->free_page_list); + if (!page) + break; + free_pages((unsigned long)page_address(page), + VIRTIO_BALLOON_FREE_PAGE_ORDER); + } + vb->num_free_page_blocks -= num_returned; + spin_unlock_irq(&vb->free_page_list_lock); + + return num_returned; +} + +static void virtballoon_changed(struct virtio_device *vdev) +{ + struct virtio_balloon *vb = vdev->priv; + unsigned long flags; + s64 diff = towards_target(vb); + + if (diff) { + spin_lock_irqsave(&vb->stop_update_lock, flags); + if (!vb->stop_update) + queue_work(system_freezable_wq, + &vb->update_balloon_size_work); + spin_unlock_irqrestore(&vb->stop_update_lock, flags); + } + + if (virtio_has_feature(vdev, VIRTIO_BALLOON_F_FREE_PAGE_HINT)) { + virtio_cread(vdev, struct virtio_balloon_config, + free_page_report_cmd_id, &vb->cmd_id_received); + if (vb->cmd_id_received == VIRTIO_BALLOON_CMD_ID_DONE) { + /* Pass ULONG_MAX to give back all the free pages */ + return_free_pages_to_mm(vb, ULONG_MAX); + } else if (vb->cmd_id_received != VIRTIO_BALLOON_CMD_ID_STOP && + vb->cmd_id_received != + virtio32_to_cpu(vdev, vb->cmd_id_active)) { + spin_lock_irqsave(&vb->stop_update_lock, flags); + if (!vb->stop_update) { + queue_work(vb->balloon_wq, + &vb->report_free_page_work); + } + spin_unlock_irqrestore(&vb->stop_update_lock, flags); + } + } +} + static void update_balloon_size(struct virtio_balloon *vb) { u32 actual = vb->num_pages; @@ -389,26 +465,44 @@ static void update_balloon_size_func(struct work_struct *work) static int init_vqs(struct virtio_balloon *vb) { - struct virtqueue *vqs[3]; - vq_callback_t *callbacks[] = { balloon_ack, balloon_ack, stats_request }; - static const char * const names[] = { "inflate", "deflate", "stats" }; - int err, nvqs; + struct virtqueue *vqs[VIRTIO_BALLOON_VQ_MAX]; + vq_callback_t *callbacks[VIRTIO_BALLOON_VQ_MAX]; + const char *names[VIRTIO_BALLOON_VQ_MAX]; + int err; /* - * We expect two virtqueues: inflate and deflate, and - * optionally stat. + * Inflateq and deflateq are used unconditionally. The names[] + * will be NULL if the related feature is not enabled, which will + * cause no allocation for the corresponding virtqueue in find_vqs. */ - nvqs = virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_STATS_VQ) ? 3 : 2; - err = virtio_find_vqs(vb->vdev, nvqs, vqs, callbacks, names, NULL); + callbacks[VIRTIO_BALLOON_VQ_INFLATE] = balloon_ack; + names[VIRTIO_BALLOON_VQ_INFLATE] = "inflate"; + callbacks[VIRTIO_BALLOON_VQ_DEFLATE] = balloon_ack; + names[VIRTIO_BALLOON_VQ_DEFLATE] = "deflate"; + names[VIRTIO_BALLOON_VQ_STATS] = NULL; + names[VIRTIO_BALLOON_VQ_FREE_PAGE] = NULL; + + if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_STATS_VQ)) { + names[VIRTIO_BALLOON_VQ_STATS] = "stats"; + callbacks[VIRTIO_BALLOON_VQ_STATS] = stats_request; + } + + if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_FREE_PAGE_HINT)) { + names[VIRTIO_BALLOON_VQ_FREE_PAGE] = "free_page_vq"; + callbacks[VIRTIO_BALLOON_VQ_FREE_PAGE] = NULL; + } + + err = vb->vdev->config->find_vqs(vb->vdev, VIRTIO_BALLOON_VQ_MAX, + vqs, callbacks, names, NULL, NULL); if (err) return err; - vb->inflate_vq = vqs[0]; - vb->deflate_vq = vqs[1]; + vb->inflate_vq = vqs[VIRTIO_BALLOON_VQ_INFLATE]; + vb->deflate_vq = vqs[VIRTIO_BALLOON_VQ_DEFLATE]; if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_STATS_VQ)) { struct scatterlist sg; unsigned int num_stats; - vb->stats_vq = vqs[2]; + vb->stats_vq = vqs[VIRTIO_BALLOON_VQ_STATS]; /* * Prime this virtqueue with one buffer so the hypervisor can @@ -426,9 +520,145 @@ static int init_vqs(struct virtio_balloon *vb) } virtqueue_kick(vb->stats_vq); } + + if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_FREE_PAGE_HINT)) + vb->free_page_vq = vqs[VIRTIO_BALLOON_VQ_FREE_PAGE]; + + return 0; +} + +static int send_cmd_id_start(struct virtio_balloon *vb) +{ + struct scatterlist sg; + struct virtqueue *vq = vb->free_page_vq; + int err, unused; + + /* Detach all the used buffers from the vq */ + while (virtqueue_get_buf(vq, &unused)) + ; + + vb->cmd_id_active = cpu_to_virtio32(vb->vdev, vb->cmd_id_received); + sg_init_one(&sg, &vb->cmd_id_active, sizeof(vb->cmd_id_active)); + err = virtqueue_add_outbuf(vq, &sg, 1, &vb->cmd_id_active, GFP_KERNEL); + if (!err) + virtqueue_kick(vq); + return err; +} + +static int send_cmd_id_stop(struct virtio_balloon *vb) +{ + struct scatterlist sg; + struct virtqueue *vq = vb->free_page_vq; + int err, unused; + + /* Detach all the used buffers from the vq */ + while (virtqueue_get_buf(vq, &unused)) + ; + + sg_init_one(&sg, &vb->cmd_id_stop, sizeof(vb->cmd_id_stop)); + err = virtqueue_add_outbuf(vq, &sg, 1, &vb->cmd_id_stop, GFP_KERNEL); + if (!err) + virtqueue_kick(vq); + return err; +} + +static int get_free_page_and_send(struct virtio_balloon *vb) +{ + struct virtqueue *vq = vb->free_page_vq; + struct page *page; + struct scatterlist sg; + int err, unused; + void *p; + + /* Detach all the used buffers from the vq */ + while (virtqueue_get_buf(vq, &unused)) + ; + + page = alloc_pages(VIRTIO_BALLOON_FREE_PAGE_ALLOC_FLAG, + VIRTIO_BALLOON_FREE_PAGE_ORDER); + /* + * When the allocation returns NULL, it indicates that we have got all + * the possible free pages, so return -EINTR to stop. + */ + if (!page) + return -EINTR; + + p = page_address(page); + sg_init_one(&sg, p, VIRTIO_BALLOON_FREE_PAGE_SIZE); + /* There is always 1 entry reserved for the cmd id to use. */ + if (vq->num_free > 1) { + err = virtqueue_add_inbuf(vq, &sg, 1, p, GFP_KERNEL); + if (unlikely(err)) { + free_pages((unsigned long)p, + VIRTIO_BALLOON_FREE_PAGE_ORDER); + return err; + } + virtqueue_kick(vq); + spin_lock_irq(&vb->free_page_list_lock); + balloon_page_push(&vb->free_page_list, page); + vb->num_free_page_blocks++; + spin_unlock_irq(&vb->free_page_list_lock); + } else { + /* + * The vq has no available entry to add this page block, so + * just free it. + */ + free_pages((unsigned long)p, VIRTIO_BALLOON_FREE_PAGE_ORDER); + } + + return 0; +} + +static int send_free_pages(struct virtio_balloon *vb) +{ + int err; + u32 cmd_id_active; + + while (1) { + /* + * If a stop id or a new cmd id was just received from host, + * stop the reporting. + */ + cmd_id_active = virtio32_to_cpu(vb->vdev, vb->cmd_id_active); + if (cmd_id_active != vb->cmd_id_received) + break; + + /* + * The free page blocks are allocated and sent to host one by + * one. + */ + err = get_free_page_and_send(vb); + if (err == -EINTR) + break; + else if (unlikely(err)) + return err; + } + return 0; } +static void report_free_page_func(struct work_struct *work) +{ + int err; + struct virtio_balloon *vb = container_of(work, struct virtio_balloon, + report_free_page_work); + struct device *dev = &vb->vdev->dev; + + /* Start by sending the received cmd id to host with an outbuf. */ + err = send_cmd_id_start(vb); + if (unlikely(err)) + dev_err(dev, "Failed to send a start id, err = %d\n", err); + + err = send_free_pages(vb); + if (unlikely(err)) + dev_err(dev, "Failed to send a free page, err = %d\n", err); + + /* End by sending a stop id to host with an outbuf. */ + err = send_cmd_id_stop(vb); + if (unlikely(err)) + dev_err(dev, "Failed to send a stop id, err = %d\n", err); +} + #ifdef CONFIG_BALLOON_COMPACTION /* * virtballoon_migratepage - perform the balloon page migration on behalf of @@ -512,14 +742,23 @@ static struct file_system_type balloon_fs = { #endif /* CONFIG_BALLOON_COMPACTION */ -static unsigned long virtio_balloon_shrinker_scan(struct shrinker *shrinker, - struct shrink_control *sc) +static unsigned long shrink_free_pages(struct virtio_balloon *vb, + unsigned long pages_to_free) { - unsigned long pages_to_free, pages_freed = 0; - struct virtio_balloon *vb = container_of(shrinker, - struct virtio_balloon, shrinker); + unsigned long blocks_to_free, blocks_freed; - pages_to_free = sc->nr_to_scan * VIRTIO_BALLOON_PAGES_PER_PAGE; + pages_to_free = round_up(pages_to_free, + 1 << VIRTIO_BALLOON_FREE_PAGE_ORDER); + blocks_to_free = pages_to_free >> VIRTIO_BALLOON_FREE_PAGE_ORDER; + blocks_freed = return_free_pages_to_mm(vb, blocks_to_free); + + return blocks_freed << VIRTIO_BALLOON_FREE_PAGE_ORDER; +} + +static unsigned long shrink_balloon_pages(struct virtio_balloon *vb, + unsigned long pages_to_free) +{ + unsigned long pages_freed = 0; /* * One invocation of leak_balloon can deflate at most @@ -527,12 +766,33 @@ static unsigned long virtio_balloon_shrinker_scan(struct shrinker *shrinker, * multiple times to deflate pages till reaching pages_to_free. */ while (vb->num_pages && pages_to_free) { + pages_freed += leak_balloon(vb, pages_to_free) / + VIRTIO_BALLOON_PAGES_PER_PAGE; pages_to_free -= pages_freed; - pages_freed += leak_balloon(vb, pages_to_free); } update_balloon_size(vb); - return pages_freed / VIRTIO_BALLOON_PAGES_PER_PAGE; + return pages_freed; +} + +static unsigned long virtio_balloon_shrinker_scan(struct shrinker *shrinker, + struct shrink_control *sc) +{ + unsigned long pages_to_free, pages_freed = 0; + struct virtio_balloon *vb = container_of(shrinker, + struct virtio_balloon, shrinker); + + pages_to_free = sc->nr_to_scan * VIRTIO_BALLOON_PAGES_PER_PAGE; + + if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_FREE_PAGE_HINT)) + pages_freed = shrink_free_pages(vb, pages_to_free); + + if (pages_freed >= pages_to_free) + return pages_freed; + + pages_freed += shrink_balloon_pages(vb, pages_to_free - pages_freed); + + return pages_freed; } static unsigned long virtio_balloon_shrinker_count(struct shrinker *shrinker, @@ -540,8 +800,12 @@ static unsigned long virtio_balloon_shrinker_count(struct shrinker *shrinker, { struct virtio_balloon *vb = container_of(shrinker, struct virtio_balloon, shrinker); + unsigned long count; - return vb->num_pages / VIRTIO_BALLOON_PAGES_PER_PAGE; + count = vb->num_pages / VIRTIO_BALLOON_PAGES_PER_PAGE; + count += vb->num_free_page_blocks >> VIRTIO_BALLOON_FREE_PAGE_ORDER; + + return count; } static void virtio_balloon_unregister_shrinker(struct virtio_balloon *vb) @@ -561,6 +825,7 @@ static int virtio_balloon_register_shrinker(struct virtio_balloon *vb) static int virtballoon_probe(struct virtio_device *vdev) { struct virtio_balloon *vb; + __u32 poison_val; int err; if (!vdev->config->get) { @@ -604,6 +869,36 @@ static int virtballoon_probe(struct virtio_device *vdev) } vb->vb_dev_info.inode->i_mapping->a_ops = &balloon_aops; #endif + if (virtio_has_feature(vdev, VIRTIO_BALLOON_F_FREE_PAGE_HINT)) { + /* + * There is always one entry reserved for cmd id, so the ring + * size needs to be at least two to report free page hints. + */ + if (virtqueue_get_vring_size(vb->free_page_vq) < 2) { + err = -ENOSPC; + goto out_del_vqs; + } + vb->balloon_wq = alloc_workqueue("balloon-wq", + WQ_FREEZABLE | WQ_CPU_INTENSIVE, 0); + if (!vb->balloon_wq) { + err = -ENOMEM; + goto out_del_vqs; + } + INIT_WORK(&vb->report_free_page_work, report_free_page_func); + vb->cmd_id_received = VIRTIO_BALLOON_CMD_ID_STOP; + vb->cmd_id_active = cpu_to_virtio32(vb->vdev, + VIRTIO_BALLOON_CMD_ID_STOP); + vb->cmd_id_stop = cpu_to_virtio32(vb->vdev, + VIRTIO_BALLOON_CMD_ID_STOP); + vb->num_free_page_blocks = 0; + spin_lock_init(&vb->free_page_list_lock); + INIT_LIST_HEAD(&vb->free_page_list); + if (virtio_has_feature(vdev, VIRTIO_BALLOON_F_PAGE_POISON)) { + memset(&poison_val, PAGE_POISON, sizeof(poison_val)); + virtio_cwrite(vb->vdev, struct virtio_balloon_config, + poison_val, &poison_val); + } + } /* * We continue to use VIRTIO_BALLOON_F_DEFLATE_ON_OOM to decide if a * shrinker needs to be registered to relieve memory pressure. @@ -611,7 +906,7 @@ static int virtballoon_probe(struct virtio_device *vdev) if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_DEFLATE_ON_OOM)) { err = virtio_balloon_register_shrinker(vb); if (err) - goto out_del_vqs; + goto out_del_balloon_wq; } virtio_device_ready(vdev); @@ -619,6 +914,9 @@ static int virtballoon_probe(struct virtio_device *vdev) virtballoon_changed(vdev); return 0; +out_del_balloon_wq: + if (virtio_has_feature(vdev, VIRTIO_BALLOON_F_FREE_PAGE_HINT)) + destroy_workqueue(vb->balloon_wq); out_del_vqs: vdev->config->del_vqs(vdev); out_free_vb: @@ -652,6 +950,11 @@ static void virtballoon_remove(struct virtio_device *vdev) cancel_work_sync(&vb->update_balloon_size_work); cancel_work_sync(&vb->update_balloon_stats_work); + if (virtio_has_feature(vdev, VIRTIO_BALLOON_F_FREE_PAGE_HINT)) { + cancel_work_sync(&vb->report_free_page_work); + destroy_workqueue(vb->balloon_wq); + } + remove_common(vb); #ifdef CONFIG_BALLOON_COMPACTION if (vb->vb_dev_info.inode) @@ -695,6 +998,9 @@ static int virtballoon_restore(struct virtio_device *vdev) static int virtballoon_validate(struct virtio_device *vdev) { + if (!page_poisoning_enabled()) + __virtio_clear_bit(vdev, VIRTIO_BALLOON_F_PAGE_POISON); + __virtio_clear_bit(vdev, VIRTIO_F_IOMMU_PLATFORM); return 0; } @@ -703,6 +1009,8 @@ static unsigned int features[] = { VIRTIO_BALLOON_F_MUST_TELL_HOST, VIRTIO_BALLOON_F_STATS_VQ, VIRTIO_BALLOON_F_DEFLATE_ON_OOM, + VIRTIO_BALLOON_F_FREE_PAGE_HINT, + VIRTIO_BALLOON_F_PAGE_POISON, }; static struct virtio_driver virtio_balloon_driver = { diff --git a/drivers/xen/pvcalls-back.c b/drivers/xen/pvcalls-back.c index b1092fbefa63..2e5d845b5091 100644 --- a/drivers/xen/pvcalls-back.c +++ b/drivers/xen/pvcalls-back.c @@ -137,13 +137,13 @@ static void pvcalls_conn_back_read(void *opaque) if (masked_prod < masked_cons) { vec[0].iov_base = data->in + masked_prod; vec[0].iov_len = wanted; - iov_iter_kvec(&msg.msg_iter, ITER_KVEC|WRITE, vec, 1, wanted); + iov_iter_kvec(&msg.msg_iter, WRITE, vec, 1, wanted); } else { vec[0].iov_base = data->in + masked_prod; vec[0].iov_len = array_size - masked_prod; vec[1].iov_base = data->in; vec[1].iov_len = wanted - vec[0].iov_len; - iov_iter_kvec(&msg.msg_iter, ITER_KVEC|WRITE, vec, 2, wanted); + iov_iter_kvec(&msg.msg_iter, WRITE, vec, 2, wanted); } atomic_set(&map->read, 0); @@ -195,13 +195,13 @@ static void pvcalls_conn_back_write(struct sock_mapping *map) if (pvcalls_mask(prod, array_size) > pvcalls_mask(cons, array_size)) { vec[0].iov_base = data->out + pvcalls_mask(cons, array_size); vec[0].iov_len = size; - iov_iter_kvec(&msg.msg_iter, ITER_KVEC|READ, vec, 1, size); + iov_iter_kvec(&msg.msg_iter, READ, vec, 1, size); } else { vec[0].iov_base = data->out + pvcalls_mask(cons, array_size); vec[0].iov_len = array_size - pvcalls_mask(cons, array_size); vec[1].iov_base = data->out; vec[1].iov_len = size - vec[0].iov_len; - iov_iter_kvec(&msg.msg_iter, ITER_KVEC|READ, vec, 2, size); + iov_iter_kvec(&msg.msg_iter, READ, vec, 2, size); } atomic_set(&map->write, 0); |