summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/Kconfig2
-rw-r--r--drivers/Makefile1
-rw-r--r--drivers/android/binder.c106
-rw-r--r--drivers/char/Kconfig19
-rw-r--r--drivers/char/Makefile6
-rw-r--r--drivers/char/applicom.c35
-rw-r--r--drivers/char/efirtc.c23
-rw-r--r--drivers/char/generic_nvram.c159
-rw-r--r--drivers/char/hpet.c2
-rw-r--r--drivers/char/mbcs.c1
-rw-r--r--drivers/char/nvram.c673
-rw-r--r--drivers/fpga/Kconfig2
-rw-r--r--drivers/fpga/altera-ps-spi.c2
-rw-r--r--drivers/hwtracing/coresight/coresight-cpu-debug.c4
-rw-r--r--drivers/hwtracing/coresight/coresight-etm-perf.c112
-rw-r--r--drivers/hwtracing/coresight/coresight-etm-perf.h6
-rw-r--r--drivers/hwtracing/coresight/coresight-priv.h1
-rw-r--r--drivers/hwtracing/coresight/coresight-stm.c12
-rw-r--r--drivers/hwtracing/coresight/coresight.c60
-rw-r--r--drivers/hwtracing/coresight/of_coresight.c4
-rw-r--r--drivers/interconnect/Kconfig15
-rw-r--r--drivers/interconnect/Makefile6
-rw-r--r--drivers/interconnect/core.c799
-rw-r--r--drivers/interconnect/qcom/Kconfig13
-rw-r--r--drivers/interconnect/qcom/Makefile5
-rw-r--r--drivers/interconnect/qcom/sdm845.c838
-rw-r--r--drivers/macintosh/via-cuda.c8
-rw-r--r--drivers/misc/ad525x_dpot.c24
-rw-r--r--drivers/misc/enclosure.c4
-rw-r--r--drivers/misc/ics932s401.c2
-rw-r--r--drivers/misc/lkdtm/core.c15
-rw-r--r--drivers/misc/lkdtm/lkdtm.h2
-rw-r--r--drivers/misc/lkdtm/perms.c36
-rw-r--r--drivers/misc/mei/hw.h3
-rw-r--r--drivers/misc/mic/vop/vop_main.c9
-rw-r--r--drivers/misc/mic/vop/vop_vringh.c36
-rw-r--r--drivers/misc/sgi-gru/grufault.c4
-rw-r--r--drivers/misc/vmw_balloon.c19
-rw-r--r--drivers/ntb/hw/intel/ntb_hw_intel.h30
-rw-r--r--drivers/ntb/hw/mscc/ntb_hw_switchtec.c36
-rw-r--r--drivers/nvmem/Kconfig2
-rw-r--r--drivers/nvmem/bcm-ocotp.c37
-rw-r--r--drivers/nvmem/core.c38
-rw-r--r--drivers/nvmem/imx-ocotp.c13
-rw-r--r--drivers/nvmem/sc27xx-efuse.c12
-rw-r--r--drivers/perf/arm_spe_pmu.c6
-rw-r--r--drivers/platform/goldfish/Kconfig4
-rw-r--r--drivers/scsi/Kconfig6
-rw-r--r--drivers/scsi/atari_scsi.c10
-rw-r--r--drivers/slimbus/core.c45
-rw-r--r--drivers/uio/uio.c16
-rw-r--r--drivers/uio/uio_pci_generic.c17
-rw-r--r--drivers/video/fbdev/Kconfig2
-rw-r--r--drivers/video/fbdev/controlfb.c42
-rw-r--r--drivers/video/fbdev/imsttfb.c23
-rw-r--r--drivers/video/fbdev/matrox/matroxfb_base.c7
-rw-r--r--drivers/video/fbdev/platinumfb.c21
-rw-r--r--drivers/video/fbdev/valkyriefb.c30
58 files changed, 2515 insertions, 950 deletions
diff --git a/drivers/Kconfig b/drivers/Kconfig
index 4f9f99057ff8..45f9decb9848 100644
--- a/drivers/Kconfig
+++ b/drivers/Kconfig
@@ -228,4 +228,6 @@ source "drivers/siox/Kconfig"
source "drivers/slimbus/Kconfig"
+source "drivers/interconnect/Kconfig"
+
endmenu
diff --git a/drivers/Makefile b/drivers/Makefile
index e1ce029d28fd..bb15b9d0e793 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -186,3 +186,4 @@ obj-$(CONFIG_MULTIPLEXER) += mux/
obj-$(CONFIG_UNISYS_VISORBUS) += visorbus/
obj-$(CONFIG_SIOX) += siox/
obj-$(CONFIG_GNSS) += gnss/
+obj-$(CONFIG_INTERCONNECT) += interconnect/
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index 4d2b2ad1ee0e..01f80cbd2741 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -329,6 +329,8 @@ struct binder_error {
* (invariant after initialized)
* @min_priority: minimum scheduling priority
* (invariant after initialized)
+ * @txn_security_ctx: require sender's security context
+ * (invariant after initialized)
* @async_todo: list of async work items
* (protected by @proc->inner_lock)
*
@@ -365,6 +367,7 @@ struct binder_node {
* invariant after initialization
*/
u8 accept_fds:1;
+ u8 txn_security_ctx:1;
u8 min_priority;
};
bool has_async_transaction;
@@ -615,6 +618,7 @@ struct binder_transaction {
long saved_priority;
kuid_t sender_euid;
struct list_head fd_fixups;
+ binder_uintptr_t security_ctx;
/**
* @lock: protects @from, @to_proc, and @to_thread
*
@@ -1152,6 +1156,7 @@ static struct binder_node *binder_init_node_ilocked(
node->work.type = BINDER_WORK_NODE;
node->min_priority = flags & FLAT_BINDER_FLAG_PRIORITY_MASK;
node->accept_fds = !!(flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
+ node->txn_security_ctx = !!(flags & FLAT_BINDER_FLAG_TXN_SECURITY_CTX);
spin_lock_init(&node->lock);
INIT_LIST_HEAD(&node->work.entry);
INIT_LIST_HEAD(&node->async_todo);
@@ -2778,6 +2783,8 @@ static void binder_transaction(struct binder_proc *proc,
binder_size_t last_fixup_min_off = 0;
struct binder_context *context = proc->context;
int t_debug_id = atomic_inc_return(&binder_last_id);
+ char *secctx = NULL;
+ u32 secctx_sz = 0;
e = binder_transaction_log_add(&binder_transaction_log);
e->debug_id = t_debug_id;
@@ -3020,6 +3027,20 @@ static void binder_transaction(struct binder_proc *proc,
t->flags = tr->flags;
t->priority = task_nice(current);
+ if (target_node && target_node->txn_security_ctx) {
+ u32 secid;
+
+ security_task_getsecid(proc->tsk, &secid);
+ ret = security_secid_to_secctx(secid, &secctx, &secctx_sz);
+ if (ret) {
+ return_error = BR_FAILED_REPLY;
+ return_error_param = ret;
+ return_error_line = __LINE__;
+ goto err_get_secctx_failed;
+ }
+ extra_buffers_size += ALIGN(secctx_sz, sizeof(u64));
+ }
+
trace_binder_transaction(reply, t, target_node);
t->buffer = binder_alloc_new_buf(&target_proc->alloc, tr->data_size,
@@ -3036,6 +3057,19 @@ static void binder_transaction(struct binder_proc *proc,
t->buffer = NULL;
goto err_binder_alloc_buf_failed;
}
+ if (secctx) {
+ size_t buf_offset = ALIGN(tr->data_size, sizeof(void *)) +
+ ALIGN(tr->offsets_size, sizeof(void *)) +
+ ALIGN(extra_buffers_size, sizeof(void *)) -
+ ALIGN(secctx_sz, sizeof(u64));
+ char *kptr = t->buffer->data + buf_offset;
+
+ t->security_ctx = (uintptr_t)kptr +
+ binder_alloc_get_user_buffer_offset(&target_proc->alloc);
+ memcpy(kptr, secctx, secctx_sz);
+ security_release_secctx(secctx, secctx_sz);
+ secctx = NULL;
+ }
t->buffer->debug_id = t->debug_id;
t->buffer->transaction = t;
t->buffer->target_node = target_node;
@@ -3305,6 +3339,9 @@ err_copy_data_failed:
t->buffer->transaction = NULL;
binder_alloc_free_buf(&target_proc->alloc, t->buffer);
err_binder_alloc_buf_failed:
+ if (secctx)
+ security_release_secctx(secctx, secctx_sz);
+err_get_secctx_failed:
kfree(tcomplete);
binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
err_alloc_tcomplete_failed:
@@ -4036,11 +4073,13 @@ retry:
while (1) {
uint32_t cmd;
- struct binder_transaction_data tr;
+ struct binder_transaction_data_secctx tr;
+ struct binder_transaction_data *trd = &tr.transaction_data;
struct binder_work *w = NULL;
struct list_head *list = NULL;
struct binder_transaction *t = NULL;
struct binder_thread *t_from;
+ size_t trsize = sizeof(*trd);
binder_inner_proc_lock(proc);
if (!binder_worklist_empty_ilocked(&thread->todo))
@@ -4240,8 +4279,8 @@ retry:
if (t->buffer->target_node) {
struct binder_node *target_node = t->buffer->target_node;
- tr.target.ptr = target_node->ptr;
- tr.cookie = target_node->cookie;
+ trd->target.ptr = target_node->ptr;
+ trd->cookie = target_node->cookie;
t->saved_priority = task_nice(current);
if (t->priority < target_node->min_priority &&
!(t->flags & TF_ONE_WAY))
@@ -4251,22 +4290,23 @@ retry:
binder_set_nice(target_node->min_priority);
cmd = BR_TRANSACTION;
} else {
- tr.target.ptr = 0;
- tr.cookie = 0;
+ trd->target.ptr = 0;
+ trd->cookie = 0;
cmd = BR_REPLY;
}
- tr.code = t->code;
- tr.flags = t->flags;
- tr.sender_euid = from_kuid(current_user_ns(), t->sender_euid);
+ trd->code = t->code;
+ trd->flags = t->flags;
+ trd->sender_euid = from_kuid(current_user_ns(), t->sender_euid);
t_from = binder_get_txn_from(t);
if (t_from) {
struct task_struct *sender = t_from->proc->tsk;
- tr.sender_pid = task_tgid_nr_ns(sender,
- task_active_pid_ns(current));
+ trd->sender_pid =
+ task_tgid_nr_ns(sender,
+ task_active_pid_ns(current));
} else {
- tr.sender_pid = 0;
+ trd->sender_pid = 0;
}
ret = binder_apply_fd_fixups(t);
@@ -4297,15 +4337,20 @@ retry:
}
continue;
}
- tr.data_size = t->buffer->data_size;
- tr.offsets_size = t->buffer->offsets_size;
- tr.data.ptr.buffer = (binder_uintptr_t)
+ trd->data_size = t->buffer->data_size;
+ trd->offsets_size = t->buffer->offsets_size;
+ trd->data.ptr.buffer = (binder_uintptr_t)
((uintptr_t)t->buffer->data +
binder_alloc_get_user_buffer_offset(&proc->alloc));
- tr.data.ptr.offsets = tr.data.ptr.buffer +
+ trd->data.ptr.offsets = trd->data.ptr.buffer +
ALIGN(t->buffer->data_size,
sizeof(void *));
+ tr.secctx = t->security_ctx;
+ if (t->security_ctx) {
+ cmd = BR_TRANSACTION_SEC_CTX;
+ trsize = sizeof(tr);
+ }
if (put_user(cmd, (uint32_t __user *)ptr)) {
if (t_from)
binder_thread_dec_tmpref(t_from);
@@ -4316,7 +4361,7 @@ retry:
return -EFAULT;
}
ptr += sizeof(uint32_t);
- if (copy_to_user(ptr, &tr, sizeof(tr))) {
+ if (copy_to_user(ptr, &tr, trsize)) {
if (t_from)
binder_thread_dec_tmpref(t_from);
@@ -4325,7 +4370,7 @@ retry:
return -EFAULT;
}
- ptr += sizeof(tr);
+ ptr += trsize;
trace_binder_transaction_received(t);
binder_stat_br(proc, thread, cmd);
@@ -4333,16 +4378,18 @@ retry:
"%d:%d %s %d %d:%d, cmd %d size %zd-%zd ptr %016llx-%016llx\n",
proc->pid, thread->pid,
(cmd == BR_TRANSACTION) ? "BR_TRANSACTION" :
- "BR_REPLY",
+ (cmd == BR_TRANSACTION_SEC_CTX) ?
+ "BR_TRANSACTION_SEC_CTX" : "BR_REPLY",
t->debug_id, t_from ? t_from->proc->pid : 0,
t_from ? t_from->pid : 0, cmd,
t->buffer->data_size, t->buffer->offsets_size,
- (u64)tr.data.ptr.buffer, (u64)tr.data.ptr.offsets);
+ (u64)trd->data.ptr.buffer,
+ (u64)trd->data.ptr.offsets);
if (t_from)
binder_thread_dec_tmpref(t_from);
t->buffer->allow_user_free = 1;
- if (cmd == BR_TRANSACTION && !(t->flags & TF_ONE_WAY)) {
+ if (cmd != BR_REPLY && !(t->flags & TF_ONE_WAY)) {
binder_inner_proc_lock(thread->proc);
t->to_parent = thread->transaction_stack;
t->to_thread = thread;
@@ -4690,7 +4737,8 @@ out:
return ret;
}
-static int binder_ioctl_set_ctx_mgr(struct file *filp)
+static int binder_ioctl_set_ctx_mgr(struct file *filp,
+ struct flat_binder_object *fbo)
{
int ret = 0;
struct binder_proc *proc = filp->private_data;
@@ -4719,7 +4767,7 @@ static int binder_ioctl_set_ctx_mgr(struct file *filp)
} else {
context->binder_context_mgr_uid = curr_euid;
}
- new_node = binder_new_node(proc, NULL);
+ new_node = binder_new_node(proc, fbo);
if (!new_node) {
ret = -ENOMEM;
goto out;
@@ -4842,8 +4890,20 @@ static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
binder_inner_proc_unlock(proc);
break;
}
+ case BINDER_SET_CONTEXT_MGR_EXT: {
+ struct flat_binder_object fbo;
+
+ if (copy_from_user(&fbo, ubuf, sizeof(fbo))) {
+ ret = -EINVAL;
+ goto err;
+ }
+ ret = binder_ioctl_set_ctx_mgr(filp, &fbo);
+ if (ret)
+ goto err;
+ break;
+ }
case BINDER_SET_CONTEXT_MGR:
- ret = binder_ioctl_set_ctx_mgr(filp);
+ ret = binder_ioctl_set_ctx_mgr(filp, NULL);
if (ret)
goto err;
break;
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index 2e2ffe7010aa..72866a004f07 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -244,26 +244,23 @@ source "drivers/char/hw_random/Kconfig"
config NVRAM
tristate "/dev/nvram support"
- depends on ATARI || X86 || GENERIC_NVRAM
+ depends on X86 || HAVE_ARCH_NVRAM_OPS
+ default M68K || PPC
---help---
If you say Y here and create a character special file /dev/nvram
with major number 10 and minor number 144 using mknod ("man mknod"),
- you get read and write access to the extra bytes of non-volatile
- memory in the real time clock (RTC), which is contained in every PC
- and most Ataris. The actual number of bytes varies, depending on the
- nvram in the system, but is usually 114 (128-14 for the RTC).
-
- This memory is conventionally called "CMOS RAM" on PCs and "NVRAM"
- on Ataris. /dev/nvram may be used to view settings there, or to
- change them (with some utility). It could also be used to frequently
+ you get read and write access to the non-volatile memory.
+
+ /dev/nvram may be used to view settings in NVRAM or to change them
+ (with some utility). It could also be used to frequently
save a few bits of very important data that may not be lost over
power-off and for which writing to disk is too insecure. Note
however that most NVRAM space in a PC belongs to the BIOS and you
should NEVER idly tamper with it. See Ralf Brown's interrupt list
for a guide to the use of CMOS bytes by your BIOS.
- On Atari machines, /dev/nvram is always configured and does not need
- to be selected.
+ This memory is conventionally called "NVRAM" on PowerPC machines,
+ "CMOS RAM" on PCs, "NVRAM" on Ataris and "PRAM" on Macintoshes.
To compile this driver as a module, choose M here: the
module will be called nvram.
diff --git a/drivers/char/Makefile b/drivers/char/Makefile
index b8d42b4e979b..fbea7dd12932 100644
--- a/drivers/char/Makefile
+++ b/drivers/char/Makefile
@@ -26,11 +26,7 @@ obj-$(CONFIG_RTC) += rtc.o
obj-$(CONFIG_HPET) += hpet.o
obj-$(CONFIG_EFI_RTC) += efirtc.o
obj-$(CONFIG_XILINX_HWICAP) += xilinx_hwicap/
-ifeq ($(CONFIG_GENERIC_NVRAM),y)
- obj-$(CONFIG_NVRAM) += generic_nvram.o
-else
- obj-$(CONFIG_NVRAM) += nvram.o
-endif
+obj-$(CONFIG_NVRAM) += nvram.o
obj-$(CONFIG_TOSHIBA) += toshiba.o
obj-$(CONFIG_DS1620) += ds1620.o
obj-$(CONFIG_HW_RANDOM) += hw_random/
diff --git a/drivers/char/applicom.c b/drivers/char/applicom.c
index c0a5b1f3a986..4ccc39e00ced 100644
--- a/drivers/char/applicom.c
+++ b/drivers/char/applicom.c
@@ -32,6 +32,7 @@
#include <linux/wait.h>
#include <linux/init.h>
#include <linux/fs.h>
+#include <linux/nospec.h>
#include <asm/io.h>
#include <linux/uaccess.h>
@@ -386,7 +387,11 @@ static ssize_t ac_write(struct file *file, const char __user *buf, size_t count,
TicCard = st_loc.tic_des_from_pc; /* tic number to send */
IndexCard = NumCard - 1;
- if((NumCard < 1) || (NumCard > MAX_BOARD) || !apbs[IndexCard].RamIO)
+ if (IndexCard >= MAX_BOARD)
+ return -EINVAL;
+ IndexCard = array_index_nospec(IndexCard, MAX_BOARD);
+
+ if (!apbs[IndexCard].RamIO)
return -EINVAL;
#ifdef DEBUG
@@ -697,6 +702,7 @@ static long ac_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
unsigned char IndexCard;
void __iomem *pmem;
int ret = 0;
+ static int warncount = 10;
volatile unsigned char byte_reset_it;
struct st_ram_io *adgl;
void __user *argp = (void __user *)arg;
@@ -711,16 +717,12 @@ static long ac_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
mutex_lock(&ac_mutex);
IndexCard = adgl->num_card-1;
- if(cmd != 6 && ((IndexCard >= MAX_BOARD) || !apbs[IndexCard].RamIO)) {
- static int warncount = 10;
- if (warncount) {
- printk( KERN_WARNING "APPLICOM driver IOCTL, bad board number %d\n",(int)IndexCard+1);
- warncount--;
- }
- kfree(adgl);
- mutex_unlock(&ac_mutex);
- return -EINVAL;
- }
+ if (cmd != 6 && IndexCard >= MAX_BOARD)
+ goto err;
+ IndexCard = array_index_nospec(IndexCard, MAX_BOARD);
+
+ if (cmd != 6 && !apbs[IndexCard].RamIO)
+ goto err;
switch (cmd) {
@@ -838,5 +840,16 @@ static long ac_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
kfree(adgl);
mutex_unlock(&ac_mutex);
return 0;
+
+err:
+ if (warncount) {
+ pr_warn("APPLICOM driver IOCTL, bad board number %d\n",
+ (int)IndexCard + 1);
+ warncount--;
+ }
+ kfree(adgl);
+ mutex_unlock(&ac_mutex);
+ return -EINVAL;
+
}
diff --git a/drivers/char/efirtc.c b/drivers/char/efirtc.c
index d9aab643997e..11781ebffbf7 100644
--- a/drivers/char/efirtc.c
+++ b/drivers/char/efirtc.c
@@ -255,35 +255,12 @@ static long efi_rtc_ioctl(struct file *file, unsigned int cmd,
}
/*
- * We enforce only one user at a time here with the open/close.
- * Also clear the previous interrupt data on an open, and clean
- * up things on a close.
- */
-
-static int efi_rtc_open(struct inode *inode, struct file *file)
-{
- /*
- * nothing special to do here
- * We do accept multiple open files at the same time as we
- * synchronize on the per call operation.
- */
- return 0;
-}
-
-static int efi_rtc_close(struct inode *inode, struct file *file)
-{
- return 0;
-}
-
-/*
* The various file operations we support.
*/
static const struct file_operations efi_rtc_fops = {
.owner = THIS_MODULE,
.unlocked_ioctl = efi_rtc_ioctl,
- .open = efi_rtc_open,
- .release = efi_rtc_close,
.llseek = no_llseek,
};
diff --git a/drivers/char/generic_nvram.c b/drivers/char/generic_nvram.c
deleted file mode 100644
index ff5394f47587..000000000000
--- a/drivers/char/generic_nvram.c
+++ /dev/null
@@ -1,159 +0,0 @@
-/*
- * Generic /dev/nvram driver for architectures providing some
- * "generic" hooks, that is :
- *
- * nvram_read_byte, nvram_write_byte, nvram_sync, nvram_get_size
- *
- * Note that an additional hook is supported for PowerMac only
- * for getting the nvram "partition" informations
- *
- */
-
-#define NVRAM_VERSION "1.1"
-
-#include <linux/module.h>
-
-#include <linux/types.h>
-#include <linux/errno.h>
-#include <linux/fs.h>
-#include <linux/miscdevice.h>
-#include <linux/fcntl.h>
-#include <linux/init.h>
-#include <linux/mutex.h>
-#include <linux/pagemap.h>
-#include <linux/uaccess.h>
-#include <asm/nvram.h>
-#ifdef CONFIG_PPC_PMAC
-#include <asm/machdep.h>
-#endif
-
-#define NVRAM_SIZE 8192
-
-static DEFINE_MUTEX(nvram_mutex);
-static ssize_t nvram_len;
-
-static loff_t nvram_llseek(struct file *file, loff_t offset, int origin)
-{
- return generic_file_llseek_size(file, offset, origin,
- MAX_LFS_FILESIZE, nvram_len);
-}
-
-static ssize_t read_nvram(struct file *file, char __user *buf,
- size_t count, loff_t *ppos)
-{
- unsigned int i;
- char __user *p = buf;
-
- if (!access_ok(buf, count))
- return -EFAULT;
- if (*ppos >= nvram_len)
- return 0;
- for (i = *ppos; count > 0 && i < nvram_len; ++i, ++p, --count)
- if (__put_user(nvram_read_byte(i), p))
- return -EFAULT;
- *ppos = i;
- return p - buf;
-}
-
-static ssize_t write_nvram(struct file *file, const char __user *buf,
- size_t count, loff_t *ppos)
-{
- unsigned int i;
- const char __user *p = buf;
- char c;
-
- if (!access_ok(buf, count))
- return -EFAULT;
- if (*ppos >= nvram_len)
- return 0;
- for (i = *ppos; count > 0 && i < nvram_len; ++i, ++p, --count) {
- if (__get_user(c, p))
- return -EFAULT;
- nvram_write_byte(c, i);
- }
- *ppos = i;
- return p - buf;
-}
-
-static int nvram_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
-{
- switch(cmd) {
-#ifdef CONFIG_PPC_PMAC
- case OBSOLETE_PMAC_NVRAM_GET_OFFSET:
- printk(KERN_WARNING "nvram: Using obsolete PMAC_NVRAM_GET_OFFSET ioctl\n");
- case IOC_NVRAM_GET_OFFSET: {
- int part, offset;
-
- if (!machine_is(powermac))
- return -EINVAL;
- if (copy_from_user(&part, (void __user*)arg, sizeof(part)) != 0)
- return -EFAULT;
- if (part < pmac_nvram_OF || part > pmac_nvram_NR)
- return -EINVAL;
- offset = pmac_get_partition(part);
- if (copy_to_user((void __user*)arg, &offset, sizeof(offset)) != 0)
- return -EFAULT;
- break;
- }
-#endif /* CONFIG_PPC_PMAC */
- case IOC_NVRAM_SYNC:
- nvram_sync();
- break;
- default:
- return -EINVAL;
- }
-
- return 0;
-}
-
-static long nvram_unlocked_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
-{
- int ret;
-
- mutex_lock(&nvram_mutex);
- ret = nvram_ioctl(file, cmd, arg);
- mutex_unlock(&nvram_mutex);
-
- return ret;
-}
-
-const struct file_operations nvram_fops = {
- .owner = THIS_MODULE,
- .llseek = nvram_llseek,
- .read = read_nvram,
- .write = write_nvram,
- .unlocked_ioctl = nvram_unlocked_ioctl,
-};
-
-static struct miscdevice nvram_dev = {
- NVRAM_MINOR,
- "nvram",
- &nvram_fops
-};
-
-int __init nvram_init(void)
-{
- int ret = 0;
-
- printk(KERN_INFO "Generic non-volatile memory driver v%s\n",
- NVRAM_VERSION);
- ret = misc_register(&nvram_dev);
- if (ret != 0)
- goto out;
-
- nvram_len = nvram_get_size();
- if (nvram_len < 0)
- nvram_len = NVRAM_SIZE;
-
-out:
- return ret;
-}
-
-void __exit nvram_cleanup(void)
-{
- misc_deregister( &nvram_dev );
-}
-
-module_init(nvram_init);
-module_exit(nvram_cleanup);
-MODULE_LICENSE("GPL");
diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
index 4a22b4b41aef..9bffcd37cc7b 100644
--- a/drivers/char/hpet.c
+++ b/drivers/char/hpet.c
@@ -377,7 +377,7 @@ static __init int hpet_mmap_enable(char *str)
pr_info("HPET mmap %s\n", hpet_mmap_enabled ? "enabled" : "disabled");
return 1;
}
-__setup("hpet_mmap", hpet_mmap_enable);
+__setup("hpet_mmap=", hpet_mmap_enable);
static int hpet_mmap(struct file *file, struct vm_area_struct *vma)
{
diff --git a/drivers/char/mbcs.c b/drivers/char/mbcs.c
index 8c9216a0f62e..0a31b60bee7b 100644
--- a/drivers/char/mbcs.c
+++ b/drivers/char/mbcs.c
@@ -50,6 +50,7 @@ static LIST_HEAD(soft_list);
* file operations
*/
static const struct file_operations mbcs_ops = {
+ .owner = THIS_MODULE,
.open = mbcs_open,
.llseek = mbcs_sram_llseek,
.read = mbcs_sram_read,
diff --git a/drivers/char/nvram.c b/drivers/char/nvram.c
index 25264d65e716..eff1e3f1b3a2 100644
--- a/drivers/char/nvram.c
+++ b/drivers/char/nvram.c
@@ -21,13 +21,6 @@
* ioctl(NVRAM_SETCKS) (doesn't change contents, just makes checksum valid
* again; use with care!)
*
- * This file also provides some functions for other parts of the kernel that
- * want to access the NVRAM: nvram_{read,write,check_checksum,set_checksum}.
- * Obviously this can be used only if this driver is always configured into
- * the kernel and is not a module. Since the functions are used by some Atari
- * drivers, this is the case on the Atari.
- *
- *
* 1.1 Cesar Barros: SMP locking fixes
* added changelog
* 1.2 Erik Gilling: Cobalt Networks support
@@ -39,64 +32,6 @@
#include <linux/module.h>
#include <linux/nvram.h>
-
-#define PC 1
-#define ATARI 2
-
-/* select machine configuration */
-#if defined(CONFIG_ATARI)
-# define MACH ATARI
-#elif defined(__i386__) || defined(__x86_64__) || defined(__arm__) /* and ?? */
-# define MACH PC
-#else
-# error Cannot build nvram driver for this machine configuration.
-#endif
-
-#if MACH == PC
-
-/* RTC in a PC */
-#define CHECK_DRIVER_INIT() 1
-
-/* On PCs, the checksum is built only over bytes 2..31 */
-#define PC_CKS_RANGE_START 2
-#define PC_CKS_RANGE_END 31
-#define PC_CKS_LOC 32
-#define NVRAM_BYTES (128-NVRAM_FIRST_BYTE)
-
-#define mach_check_checksum pc_check_checksum
-#define mach_set_checksum pc_set_checksum
-#define mach_proc_infos pc_proc_infos
-
-#endif
-
-#if MACH == ATARI
-
-/* Special parameters for RTC in Atari machines */
-#include <asm/atarihw.h>
-#include <asm/atariints.h>
-#define RTC_PORT(x) (TT_RTC_BAS + 2*(x))
-#define CHECK_DRIVER_INIT() (MACH_IS_ATARI && ATARIHW_PRESENT(TT_CLK))
-
-#define NVRAM_BYTES 50
-
-/* On Ataris, the checksum is over all bytes except the checksum bytes
- * themselves; these are at the very end */
-#define ATARI_CKS_RANGE_START 0
-#define ATARI_CKS_RANGE_END 47
-#define ATARI_CKS_LOC 48
-
-#define mach_check_checksum atari_check_checksum
-#define mach_set_checksum atari_set_checksum
-#define mach_proc_infos atari_proc_infos
-
-#endif
-
-/* Note that *all* calls to CMOS_READ and CMOS_WRITE must be done with
- * rtc_lock held. Due to the index-port/data-port design of the RTC, we
- * don't want two different things trying to get to it at once. (e.g. the
- * periodic 11 min sync from kernel/time/ntp.c vs. this driver.)
- */
-
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/miscdevice.h>
@@ -106,28 +41,26 @@
#include <linux/init.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
+#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/io.h>
#include <linux/uaccess.h>
#include <linux/mutex.h>
#include <linux/pagemap.h>
+#ifdef CONFIG_PPC
+#include <asm/nvram.h>
+#endif
static DEFINE_MUTEX(nvram_mutex);
static DEFINE_SPINLOCK(nvram_state_lock);
static int nvram_open_cnt; /* #times opened */
static int nvram_open_mode; /* special open modes */
+static ssize_t nvram_size;
#define NVRAM_WRITE 1 /* opened for writing (exclusive) */
#define NVRAM_EXCL 2 /* opened with O_EXCL */
-static int mach_check_checksum(void);
-static void mach_set_checksum(void);
-
-#ifdef CONFIG_PROC_FS
-static void mach_proc_infos(unsigned char *contents, struct seq_file *seq,
- void *offset);
-#endif
-
+#ifdef CONFIG_X86
/*
* These functions are provided to be called internally or by other parts of
* the kernel. It's up to the caller to ensure correct checksum before reading
@@ -139,13 +72,20 @@ static void mach_proc_infos(unsigned char *contents, struct seq_file *seq,
* know about the RTC cruft.
*/
-unsigned char __nvram_read_byte(int i)
+#define NVRAM_BYTES (128 - NVRAM_FIRST_BYTE)
+
+/* Note that *all* calls to CMOS_READ and CMOS_WRITE must be done with
+ * rtc_lock held. Due to the index-port/data-port design of the RTC, we
+ * don't want two different things trying to get to it at once. (e.g. the
+ * periodic 11 min sync from kernel/time/ntp.c vs. this driver.)
+ */
+
+static unsigned char __nvram_read_byte(int i)
{
return CMOS_READ(NVRAM_FIRST_BYTE + i);
}
-EXPORT_SYMBOL(__nvram_read_byte);
-unsigned char nvram_read_byte(int i)
+static unsigned char pc_nvram_read_byte(int i)
{
unsigned long flags;
unsigned char c;
@@ -155,16 +95,14 @@ unsigned char nvram_read_byte(int i)
spin_unlock_irqrestore(&rtc_lock, flags);
return c;
}
-EXPORT_SYMBOL(nvram_read_byte);
/* This races nicely with trying to read with checksum checking (nvram_read) */
-void __nvram_write_byte(unsigned char c, int i)
+static void __nvram_write_byte(unsigned char c, int i)
{
CMOS_WRITE(c, NVRAM_FIRST_BYTE + i);
}
-EXPORT_SYMBOL(__nvram_write_byte);
-void nvram_write_byte(unsigned char c, int i)
+static void pc_nvram_write_byte(unsigned char c, int i)
{
unsigned long flags;
@@ -172,172 +110,266 @@ void nvram_write_byte(unsigned char c, int i)
__nvram_write_byte(c, i);
spin_unlock_irqrestore(&rtc_lock, flags);
}
-EXPORT_SYMBOL(nvram_write_byte);
-int __nvram_check_checksum(void)
+/* On PCs, the checksum is built only over bytes 2..31 */
+#define PC_CKS_RANGE_START 2
+#define PC_CKS_RANGE_END 31
+#define PC_CKS_LOC 32
+
+static int __nvram_check_checksum(void)
{
- return mach_check_checksum();
+ int i;
+ unsigned short sum = 0;
+ unsigned short expect;
+
+ for (i = PC_CKS_RANGE_START; i <= PC_CKS_RANGE_END; ++i)
+ sum += __nvram_read_byte(i);
+ expect = __nvram_read_byte(PC_CKS_LOC)<<8 |
+ __nvram_read_byte(PC_CKS_LOC+1);
+ return (sum & 0xffff) == expect;
}
-EXPORT_SYMBOL(__nvram_check_checksum);
-int nvram_check_checksum(void)
+static void __nvram_set_checksum(void)
{
- unsigned long flags;
- int rv;
+ int i;
+ unsigned short sum = 0;
- spin_lock_irqsave(&rtc_lock, flags);
- rv = __nvram_check_checksum();
- spin_unlock_irqrestore(&rtc_lock, flags);
- return rv;
+ for (i = PC_CKS_RANGE_START; i <= PC_CKS_RANGE_END; ++i)
+ sum += __nvram_read_byte(i);
+ __nvram_write_byte(sum >> 8, PC_CKS_LOC);
+ __nvram_write_byte(sum & 0xff, PC_CKS_LOC + 1);
}
-EXPORT_SYMBOL(nvram_check_checksum);
-static void __nvram_set_checksum(void)
+static long pc_nvram_set_checksum(void)
{
- mach_set_checksum();
+ spin_lock_irq(&rtc_lock);
+ __nvram_set_checksum();
+ spin_unlock_irq(&rtc_lock);
+ return 0;
}
-#if 0
-void nvram_set_checksum(void)
+static long pc_nvram_initialize(void)
{
- unsigned long flags;
+ ssize_t i;
- spin_lock_irqsave(&rtc_lock, flags);
+ spin_lock_irq(&rtc_lock);
+ for (i = 0; i < NVRAM_BYTES; ++i)
+ __nvram_write_byte(0, i);
__nvram_set_checksum();
- spin_unlock_irqrestore(&rtc_lock, flags);
+ spin_unlock_irq(&rtc_lock);
+ return 0;
}
-#endif /* 0 */
-
-/*
- * The are the file operation function for user access to /dev/nvram
- */
-static loff_t nvram_llseek(struct file *file, loff_t offset, int origin)
+static ssize_t pc_nvram_get_size(void)
{
- return generic_file_llseek_size(file, offset, origin, MAX_LFS_FILESIZE,
- NVRAM_BYTES);
+ return NVRAM_BYTES;
}
-static ssize_t nvram_read(struct file *file, char __user *buf,
- size_t count, loff_t *ppos)
+static ssize_t pc_nvram_read(char *buf, size_t count, loff_t *ppos)
{
- unsigned char contents[NVRAM_BYTES];
- unsigned i = *ppos;
- unsigned char *tmp;
+ char *p = buf;
+ loff_t i;
spin_lock_irq(&rtc_lock);
+ if (!__nvram_check_checksum()) {
+ spin_unlock_irq(&rtc_lock);
+ return -EIO;
+ }
+ for (i = *ppos; count > 0 && i < NVRAM_BYTES; --count, ++i, ++p)
+ *p = __nvram_read_byte(i);
+ spin_unlock_irq(&rtc_lock);
- if (!__nvram_check_checksum())
- goto checksum_err;
+ *ppos = i;
+ return p - buf;
+}
- for (tmp = contents; count-- > 0 && i < NVRAM_BYTES; ++i, ++tmp)
- *tmp = __nvram_read_byte(i);
+static ssize_t pc_nvram_write(char *buf, size_t count, loff_t *ppos)
+{
+ char *p = buf;
+ loff_t i;
+ spin_lock_irq(&rtc_lock);
+ if (!__nvram_check_checksum()) {
+ spin_unlock_irq(&rtc_lock);
+ return -EIO;
+ }
+ for (i = *ppos; count > 0 && i < NVRAM_BYTES; --count, ++i, ++p)
+ __nvram_write_byte(*p, i);
+ __nvram_set_checksum();
spin_unlock_irq(&rtc_lock);
- if (copy_to_user(buf, contents, tmp - contents))
- return -EFAULT;
-
*ppos = i;
+ return p - buf;
+}
- return tmp - contents;
+const struct nvram_ops arch_nvram_ops = {
+ .read = pc_nvram_read,
+ .write = pc_nvram_write,
+ .read_byte = pc_nvram_read_byte,
+ .write_byte = pc_nvram_write_byte,
+ .get_size = pc_nvram_get_size,
+ .set_checksum = pc_nvram_set_checksum,
+ .initialize = pc_nvram_initialize,
+};
+EXPORT_SYMBOL(arch_nvram_ops);
+#endif /* CONFIG_X86 */
-checksum_err:
- spin_unlock_irq(&rtc_lock);
- return -EIO;
-}
+/*
+ * The are the file operation function for user access to /dev/nvram
+ */
-static ssize_t nvram_write(struct file *file, const char __user *buf,
- size_t count, loff_t *ppos)
+static loff_t nvram_misc_llseek(struct file *file, loff_t offset, int origin)
{
- unsigned char contents[NVRAM_BYTES];
- unsigned i = *ppos;
- unsigned char *tmp;
+ return generic_file_llseek_size(file, offset, origin, MAX_LFS_FILESIZE,
+ nvram_size);
+}
- if (i >= NVRAM_BYTES)
- return 0; /* Past EOF */
+static ssize_t nvram_misc_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ char *tmp;
+ ssize_t ret;
- if (count > NVRAM_BYTES - i)
- count = NVRAM_BYTES - i;
- if (count > NVRAM_BYTES)
- return -EFAULT; /* Can't happen, but prove it to gcc */
- if (copy_from_user(contents, buf, count))
+ if (!access_ok(buf, count))
return -EFAULT;
+ if (*ppos >= nvram_size)
+ return 0;
- spin_lock_irq(&rtc_lock);
+ count = min_t(size_t, count, nvram_size - *ppos);
+ count = min_t(size_t, count, PAGE_SIZE);
- if (!__nvram_check_checksum())
- goto checksum_err;
+ tmp = kmalloc(count, GFP_KERNEL);
+ if (!tmp)
+ return -ENOMEM;
- for (tmp = contents; count--; ++i, ++tmp)
- __nvram_write_byte(*tmp, i);
+ ret = nvram_read(tmp, count, ppos);
+ if (ret <= 0)
+ goto out;
- __nvram_set_checksum();
+ if (copy_to_user(buf, tmp, ret)) {
+ *ppos -= ret;
+ ret = -EFAULT;
+ }
- spin_unlock_irq(&rtc_lock);
+out:
+ kfree(tmp);
+ return ret;
+}
- *ppos = i;
+static ssize_t nvram_misc_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ char *tmp;
+ ssize_t ret;
- return tmp - contents;
+ if (!access_ok(buf, count))
+ return -EFAULT;
+ if (*ppos >= nvram_size)
+ return 0;
-checksum_err:
- spin_unlock_irq(&rtc_lock);
- return -EIO;
+ count = min_t(size_t, count, nvram_size - *ppos);
+ count = min_t(size_t, count, PAGE_SIZE);
+
+ tmp = memdup_user(buf, count);
+ if (IS_ERR(tmp))
+ return PTR_ERR(tmp);
+
+ ret = nvram_write(tmp, count, ppos);
+ kfree(tmp);
+ return ret;
}
-static long nvram_ioctl(struct file *file, unsigned int cmd,
- unsigned long arg)
+static long nvram_misc_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
{
- int i;
+ long ret = -ENOTTY;
switch (cmd) {
-
+#ifdef CONFIG_PPC
+ case OBSOLETE_PMAC_NVRAM_GET_OFFSET:
+ pr_warn("nvram: Using obsolete PMAC_NVRAM_GET_OFFSET ioctl\n");
+ /* fall through */
+ case IOC_NVRAM_GET_OFFSET:
+ ret = -EINVAL;
+#ifdef CONFIG_PPC_PMAC
+ if (machine_is(powermac)) {
+ int part, offset;
+
+ if (copy_from_user(&part, (void __user *)arg,
+ sizeof(part)) != 0)
+ return -EFAULT;
+ if (part < pmac_nvram_OF || part > pmac_nvram_NR)
+ return -EINVAL;
+ offset = pmac_get_partition(part);
+ if (offset < 0)
+ return -EINVAL;
+ if (copy_to_user((void __user *)arg,
+ &offset, sizeof(offset)) != 0)
+ return -EFAULT;
+ ret = 0;
+ }
+#endif
+ break;
+#ifdef CONFIG_PPC32
+ case IOC_NVRAM_SYNC:
+ if (ppc_md.nvram_sync != NULL) {
+ mutex_lock(&nvram_mutex);
+ ppc_md.nvram_sync();
+ mutex_unlock(&nvram_mutex);
+ }
+ ret = 0;
+ break;
+#endif
+#elif defined(CONFIG_X86) || defined(CONFIG_M68K)
case NVRAM_INIT:
/* initialize NVRAM contents and checksum */
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
- mutex_lock(&nvram_mutex);
- spin_lock_irq(&rtc_lock);
-
- for (i = 0; i < NVRAM_BYTES; ++i)
- __nvram_write_byte(0, i);
- __nvram_set_checksum();
-
- spin_unlock_irq(&rtc_lock);
- mutex_unlock(&nvram_mutex);
- return 0;
-
+ if (arch_nvram_ops.initialize != NULL) {
+ mutex_lock(&nvram_mutex);
+ ret = arch_nvram_ops.initialize();
+ mutex_unlock(&nvram_mutex);
+ }
+ break;
case NVRAM_SETCKS:
/* just set checksum, contents unchanged (maybe useful after
* checksum garbaged somehow...) */
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
- mutex_lock(&nvram_mutex);
- spin_lock_irq(&rtc_lock);
- __nvram_set_checksum();
- spin_unlock_irq(&rtc_lock);
- mutex_unlock(&nvram_mutex);
- return 0;
-
- default:
- return -ENOTTY;
+ if (arch_nvram_ops.set_checksum != NULL) {
+ mutex_lock(&nvram_mutex);
+ ret = arch_nvram_ops.set_checksum();
+ mutex_unlock(&nvram_mutex);
+ }
+ break;
+#endif /* CONFIG_X86 || CONFIG_M68K */
}
+ return ret;
}
-static int nvram_open(struct inode *inode, struct file *file)
+static int nvram_misc_open(struct inode *inode, struct file *file)
{
spin_lock(&nvram_state_lock);
+ /* Prevent multiple readers/writers if desired. */
if ((nvram_open_cnt && (file->f_flags & O_EXCL)) ||
- (nvram_open_mode & NVRAM_EXCL) ||
- ((file->f_mode & FMODE_WRITE) && (nvram_open_mode & NVRAM_WRITE))) {
+ (nvram_open_mode & NVRAM_EXCL)) {
spin_unlock(&nvram_state_lock);
return -EBUSY;
}
+#if defined(CONFIG_X86) || defined(CONFIG_M68K)
+ /* Prevent multiple writers if the set_checksum ioctl is implemented. */
+ if ((arch_nvram_ops.set_checksum != NULL) &&
+ (file->f_mode & FMODE_WRITE) && (nvram_open_mode & NVRAM_WRITE)) {
+ spin_unlock(&nvram_state_lock);
+ return -EBUSY;
+ }
+#endif
+
if (file->f_flags & O_EXCL)
nvram_open_mode |= NVRAM_EXCL;
if (file->f_mode & FMODE_WRITE)
@@ -349,7 +381,7 @@ static int nvram_open(struct inode *inode, struct file *file)
return 0;
}
-static int nvram_release(struct inode *inode, struct file *file)
+static int nvram_misc_release(struct inode *inode, struct file *file)
{
spin_lock(&nvram_state_lock);
@@ -366,123 +398,7 @@ static int nvram_release(struct inode *inode, struct file *file)
return 0;
}
-#ifndef CONFIG_PROC_FS
-static int nvram_add_proc_fs(void)
-{
- return 0;
-}
-
-#else
-
-static int nvram_proc_read(struct seq_file *seq, void *offset)
-{
- unsigned char contents[NVRAM_BYTES];
- int i = 0;
-
- spin_lock_irq(&rtc_lock);
- for (i = 0; i < NVRAM_BYTES; ++i)
- contents[i] = __nvram_read_byte(i);
- spin_unlock_irq(&rtc_lock);
-
- mach_proc_infos(contents, seq, offset);
-
- return 0;
-}
-
-static int nvram_add_proc_fs(void)
-{
- if (!proc_create_single("driver/nvram", 0, NULL, nvram_proc_read))
- return -ENOMEM;
- return 0;
-}
-
-#endif /* CONFIG_PROC_FS */
-
-static const struct file_operations nvram_fops = {
- .owner = THIS_MODULE,
- .llseek = nvram_llseek,
- .read = nvram_read,
- .write = nvram_write,
- .unlocked_ioctl = nvram_ioctl,
- .open = nvram_open,
- .release = nvram_release,
-};
-
-static struct miscdevice nvram_dev = {
- NVRAM_MINOR,
- "nvram",
- &nvram_fops
-};
-
-static int __init nvram_init(void)
-{
- int ret;
-
- /* First test whether the driver should init at all */
- if (!CHECK_DRIVER_INIT())
- return -ENODEV;
-
- ret = misc_register(&nvram_dev);
- if (ret) {
- printk(KERN_ERR "nvram: can't misc_register on minor=%d\n",
- NVRAM_MINOR);
- goto out;
- }
- ret = nvram_add_proc_fs();
- if (ret) {
- printk(KERN_ERR "nvram: can't create /proc/driver/nvram\n");
- goto outmisc;
- }
- ret = 0;
- printk(KERN_INFO "Non-volatile memory driver v" NVRAM_VERSION "\n");
-out:
- return ret;
-outmisc:
- misc_deregister(&nvram_dev);
- goto out;
-}
-
-static void __exit nvram_cleanup_module(void)
-{
- remove_proc_entry("driver/nvram", NULL);
- misc_deregister(&nvram_dev);
-}
-
-module_init(nvram_init);
-module_exit(nvram_cleanup_module);
-
-/*
- * Machine specific functions
- */
-
-#if MACH == PC
-
-static int pc_check_checksum(void)
-{
- int i;
- unsigned short sum = 0;
- unsigned short expect;
-
- for (i = PC_CKS_RANGE_START; i <= PC_CKS_RANGE_END; ++i)
- sum += __nvram_read_byte(i);
- expect = __nvram_read_byte(PC_CKS_LOC)<<8 |
- __nvram_read_byte(PC_CKS_LOC+1);
- return (sum & 0xffff) == expect;
-}
-
-static void pc_set_checksum(void)
-{
- int i;
- unsigned short sum = 0;
-
- for (i = PC_CKS_RANGE_START; i <= PC_CKS_RANGE_END; ++i)
- sum += __nvram_read_byte(i);
- __nvram_write_byte(sum >> 8, PC_CKS_LOC);
- __nvram_write_byte(sum & 0xff, PC_CKS_LOC + 1);
-}
-
-#ifdef CONFIG_PROC_FS
-
+#if defined(CONFIG_X86) && defined(CONFIG_PROC_FS)
static const char * const floppy_types[] = {
"none", "5.25'' 360k", "5.25'' 1.2M", "3.5'' 720k", "3.5'' 1.44M",
"3.5'' 2.88M", "3.5'' 2.88M"
@@ -495,8 +411,8 @@ static const char * const gfx_types[] = {
"monochrome",
};
-static void pc_proc_infos(unsigned char *nvram, struct seq_file *seq,
- void *offset)
+static void pc_nvram_proc_read(unsigned char *nvram, struct seq_file *seq,
+ void *offset)
{
int checksum;
int type;
@@ -557,143 +473,76 @@ static void pc_proc_infos(unsigned char *nvram, struct seq_file *seq,
return;
}
-#endif
-#endif /* MACH == PC */
-
-#if MACH == ATARI
-
-static int atari_check_checksum(void)
+static int nvram_proc_read(struct seq_file *seq, void *offset)
{
- int i;
- unsigned char sum = 0;
+ unsigned char contents[NVRAM_BYTES];
+ int i = 0;
- for (i = ATARI_CKS_RANGE_START; i <= ATARI_CKS_RANGE_END; ++i)
- sum += __nvram_read_byte(i);
- return (__nvram_read_byte(ATARI_CKS_LOC) == (~sum & 0xff)) &&
- (__nvram_read_byte(ATARI_CKS_LOC + 1) == (sum & 0xff));
-}
+ spin_lock_irq(&rtc_lock);
+ for (i = 0; i < NVRAM_BYTES; ++i)
+ contents[i] = __nvram_read_byte(i);
+ spin_unlock_irq(&rtc_lock);
-static void atari_set_checksum(void)
-{
- int i;
- unsigned char sum = 0;
+ pc_nvram_proc_read(contents, seq, offset);
- for (i = ATARI_CKS_RANGE_START; i <= ATARI_CKS_RANGE_END; ++i)
- sum += __nvram_read_byte(i);
- __nvram_write_byte(~sum, ATARI_CKS_LOC);
- __nvram_write_byte(sum, ATARI_CKS_LOC + 1);
+ return 0;
}
+#endif /* CONFIG_X86 && CONFIG_PROC_FS */
-#ifdef CONFIG_PROC_FS
-
-static struct {
- unsigned char val;
- const char *name;
-} boot_prefs[] = {
- { 0x80, "TOS" },
- { 0x40, "ASV" },
- { 0x20, "NetBSD (?)" },
- { 0x10, "Linux" },
- { 0x00, "unspecified" }
-};
-
-static const char * const languages[] = {
- "English (US)",
- "German",
- "French",
- "English (UK)",
- "Spanish",
- "Italian",
- "6 (undefined)",
- "Swiss (French)",
- "Swiss (German)"
-};
-
-static const char * const dateformat[] = {
- "MM%cDD%cYY",
- "DD%cMM%cYY",
- "YY%cMM%cDD",
- "YY%cDD%cMM",
- "4 (undefined)",
- "5 (undefined)",
- "6 (undefined)",
- "7 (undefined)"
+static const struct file_operations nvram_misc_fops = {
+ .owner = THIS_MODULE,
+ .llseek = nvram_misc_llseek,
+ .read = nvram_misc_read,
+ .write = nvram_misc_write,
+ .unlocked_ioctl = nvram_misc_ioctl,
+ .open = nvram_misc_open,
+ .release = nvram_misc_release,
};
-static const char * const colors[] = {
- "2", "4", "16", "256", "65536", "??", "??", "??"
+static struct miscdevice nvram_misc = {
+ NVRAM_MINOR,
+ "nvram",
+ &nvram_misc_fops,
};
-static void atari_proc_infos(unsigned char *nvram, struct seq_file *seq,
- void *offset)
+static int __init nvram_module_init(void)
{
- int checksum = nvram_check_checksum();
- int i;
- unsigned vmode;
+ int ret;
- seq_printf(seq, "Checksum status : %svalid\n", checksum ? "" : "not ");
+ nvram_size = nvram_get_size();
+ if (nvram_size < 0)
+ return nvram_size;
- seq_printf(seq, "Boot preference : ");
- for (i = ARRAY_SIZE(boot_prefs) - 1; i >= 0; --i) {
- if (nvram[1] == boot_prefs[i].val) {
- seq_printf(seq, "%s\n", boot_prefs[i].name);
- break;
- }
+ ret = misc_register(&nvram_misc);
+ if (ret) {
+ pr_err("nvram: can't misc_register on minor=%d\n", NVRAM_MINOR);
+ return ret;
}
- if (i < 0)
- seq_printf(seq, "0x%02x (undefined)\n", nvram[1]);
-
- seq_printf(seq, "SCSI arbitration : %s\n",
- (nvram[16] & 0x80) ? "on" : "off");
- seq_printf(seq, "SCSI host ID : ");
- if (nvram[16] & 0x80)
- seq_printf(seq, "%d\n", nvram[16] & 7);
- else
- seq_printf(seq, "n/a\n");
-
- /* the following entries are defined only for the Falcon */
- if ((atari_mch_cookie >> 16) != ATARI_MCH_FALCON)
- return;
- seq_printf(seq, "OS language : ");
- if (nvram[6] < ARRAY_SIZE(languages))
- seq_printf(seq, "%s\n", languages[nvram[6]]);
- else
- seq_printf(seq, "%u (undefined)\n", nvram[6]);
- seq_printf(seq, "Keyboard language: ");
- if (nvram[7] < ARRAY_SIZE(languages))
- seq_printf(seq, "%s\n", languages[nvram[7]]);
- else
- seq_printf(seq, "%u (undefined)\n", nvram[7]);
- seq_printf(seq, "Date format : ");
- seq_printf(seq, dateformat[nvram[8] & 7],
- nvram[9] ? nvram[9] : '/', nvram[9] ? nvram[9] : '/');
- seq_printf(seq, ", %dh clock\n", nvram[8] & 16 ? 24 : 12);
- seq_printf(seq, "Boot delay : ");
- if (nvram[10] == 0)
- seq_printf(seq, "default");
- else
- seq_printf(seq, "%ds%s\n", nvram[10],
- nvram[10] < 8 ? ", no memory test" : "");
-
- vmode = (nvram[14] << 8) | nvram[15];
- seq_printf(seq,
- "Video mode : %s colors, %d columns, %s %s monitor\n",
- colors[vmode & 7],
- vmode & 8 ? 80 : 40,
- vmode & 16 ? "VGA" : "TV", vmode & 32 ? "PAL" : "NTSC");
- seq_printf(seq, " %soverscan, compat. mode %s%s\n",
- vmode & 64 ? "" : "no ",
- vmode & 128 ? "on" : "off",
- vmode & 256 ?
- (vmode & 16 ? ", line doubling" : ", half screen") : "");
+#if defined(CONFIG_X86) && defined(CONFIG_PROC_FS)
+ if (!proc_create_single("driver/nvram", 0, NULL, nvram_proc_read)) {
+ pr_err("nvram: can't create /proc/driver/nvram\n");
+ misc_deregister(&nvram_misc);
+ return -ENOMEM;
+ }
+#endif
- return;
+ pr_info("Non-volatile memory driver v" NVRAM_VERSION "\n");
+ return 0;
}
+
+static void __exit nvram_module_exit(void)
+{
+#if defined(CONFIG_X86) && defined(CONFIG_PROC_FS)
+ remove_proc_entry("driver/nvram", NULL);
#endif
+ misc_deregister(&nvram_misc);
+}
-#endif /* MACH == ATARI */
+module_init(nvram_module_init);
+module_exit(nvram_module_exit);
MODULE_LICENSE("GPL");
MODULE_ALIAS_MISCDEV(NVRAM_MINOR);
+MODULE_ALIAS("devname:nvram");
diff --git a/drivers/fpga/Kconfig b/drivers/fpga/Kconfig
index 0bb7b5cd6cdc..c20445b867ae 100644
--- a/drivers/fpga/Kconfig
+++ b/drivers/fpga/Kconfig
@@ -104,7 +104,7 @@ config SOCFPGA_FPGA_BRIDGE
config ALTERA_FREEZE_BRIDGE
tristate "Altera FPGA Freeze Bridge"
- depends on ARCH_SOCFPGA && FPGA_BRIDGE
+ depends on FPGA_BRIDGE && HAS_IOMEM
help
Say Y to enable drivers for Altera FPGA Freeze bridges. A
freeze bridge is a bridge that exists in the FPGA fabric to
diff --git a/drivers/fpga/altera-ps-spi.c b/drivers/fpga/altera-ps-spi.c
index 8c18beec6b57..678d0115f840 100644
--- a/drivers/fpga/altera-ps-spi.c
+++ b/drivers/fpga/altera-ps-spi.c
@@ -205,7 +205,7 @@ static int altera_ps_write_complete(struct fpga_manager *mgr,
struct fpga_image_info *info)
{
struct altera_ps_conf *conf = mgr->priv;
- const char dummy[] = {0};
+ static const char dummy[] = {0};
int ret;
if (gpiod_get_value_cansleep(conf->status)) {
diff --git a/drivers/hwtracing/coresight/coresight-cpu-debug.c b/drivers/hwtracing/coresight/coresight-cpu-debug.c
index 45b2460f3166..e8819d750938 100644
--- a/drivers/hwtracing/coresight/coresight-cpu-debug.c
+++ b/drivers/hwtracing/coresight/coresight-cpu-debug.c
@@ -668,6 +668,10 @@ static const struct amba_id debug_ids[] = {
.id = 0x000bbd08,
.mask = 0x000fffff,
},
+ { /* Debug for Cortex-A73 */
+ .id = 0x000bbd09,
+ .mask = 0x000fffff,
+ },
{ 0, 0 },
};
diff --git a/drivers/hwtracing/coresight/coresight-etm-perf.c b/drivers/hwtracing/coresight/coresight-etm-perf.c
index abe8249b893b..8c88bf0a1e5f 100644
--- a/drivers/hwtracing/coresight/coresight-etm-perf.c
+++ b/drivers/hwtracing/coresight/coresight-etm-perf.c
@@ -14,6 +14,7 @@
#include <linux/perf_event.h>
#include <linux/percpu-defs.h>
#include <linux/slab.h>
+#include <linux/stringhash.h>
#include <linux/types.h>
#include <linux/workqueue.h>
@@ -30,11 +31,14 @@ static DEFINE_PER_CPU(struct coresight_device *, csdev_src);
PMU_FORMAT_ATTR(cycacc, "config:" __stringify(ETM_OPT_CYCACC));
PMU_FORMAT_ATTR(timestamp, "config:" __stringify(ETM_OPT_TS));
PMU_FORMAT_ATTR(retstack, "config:" __stringify(ETM_OPT_RETSTK));
+/* Sink ID - same for all ETMs */
+PMU_FORMAT_ATTR(sinkid, "config2:0-31");
static struct attribute *etm_config_formats_attr[] = {
&format_attr_cycacc.attr,
&format_attr_timestamp.attr,
&format_attr_retstack.attr,
+ &format_attr_sinkid.attr,
NULL,
};
@@ -43,8 +47,18 @@ static const struct attribute_group etm_pmu_format_group = {
.attrs = etm_config_formats_attr,
};
+static struct attribute *etm_config_sinks_attr[] = {
+ NULL,
+};
+
+static const struct attribute_group etm_pmu_sinks_group = {
+ .name = "sinks",
+ .attrs = etm_config_sinks_attr,
+};
+
static const struct attribute_group *etm_pmu_attr_groups[] = {
&etm_pmu_format_group,
+ &etm_pmu_sinks_group,
NULL,
};
@@ -177,31 +191,28 @@ static void etm_free_aux(void *data)
schedule_work(&event_data->work);
}
-static void *etm_setup_aux(int event_cpu, void **pages,
+static void *etm_setup_aux(struct perf_event *event, void **pages,
int nr_pages, bool overwrite)
{
- int cpu;
+ u32 id;
+ int cpu = event->cpu;
cpumask_t *mask;
struct coresight_device *sink;
struct etm_event_data *event_data = NULL;
- event_data = alloc_event_data(event_cpu);
+ event_data = alloc_event_data(cpu);
if (!event_data)
return NULL;
INIT_WORK(&event_data->work, free_event_data);
- /*
- * In theory nothing prevent tracers in a trace session from being
- * associated with different sinks, nor having a sink per tracer. But
- * until we have HW with this kind of topology we need to assume tracers
- * in a trace session are using the same sink. Therefore go through
- * the coresight bus and pick the first enabled sink.
- *
- * When operated from sysFS users are responsible to enable the sink
- * while from perf, the perf tools will do it based on the choice made
- * on the cmd line. As such the "enable_sink" flag in sysFS is reset.
- */
- sink = coresight_get_enabled_sink(true);
+ /* First get the selected sink from user space. */
+ if (event->attr.config2) {
+ id = (u32)event->attr.config2;
+ sink = coresight_get_sink_by_id(id);
+ } else {
+ sink = coresight_get_enabled_sink(true);
+ }
+
if (!sink || !sink_ops(sink)->alloc_buffer)
goto err;
@@ -479,6 +490,77 @@ int etm_perf_symlink(struct coresight_device *csdev, bool link)
return 0;
}
+static ssize_t etm_perf_sink_name_show(struct device *dev,
+ struct device_attribute *dattr,
+ char *buf)
+{
+ struct dev_ext_attribute *ea;
+
+ ea = container_of(dattr, struct dev_ext_attribute, attr);
+ return scnprintf(buf, PAGE_SIZE, "0x%lx\n", (unsigned long)(ea->var));
+}
+
+int etm_perf_add_symlink_sink(struct coresight_device *csdev)
+{
+ int ret;
+ unsigned long hash;
+ const char *name;
+ struct device *pmu_dev = etm_pmu.dev;
+ struct device *pdev = csdev->dev.parent;
+ struct dev_ext_attribute *ea;
+
+ if (csdev->type != CORESIGHT_DEV_TYPE_SINK &&
+ csdev->type != CORESIGHT_DEV_TYPE_LINKSINK)
+ return -EINVAL;
+
+ if (csdev->ea != NULL)
+ return -EINVAL;
+
+ if (!etm_perf_up)
+ return -EPROBE_DEFER;
+
+ ea = devm_kzalloc(pdev, sizeof(*ea), GFP_KERNEL);
+ if (!ea)
+ return -ENOMEM;
+
+ name = dev_name(pdev);
+ /* See function coresight_get_sink_by_id() to know where this is used */
+ hash = hashlen_hash(hashlen_string(NULL, name));
+
+ ea->attr.attr.name = devm_kstrdup(pdev, name, GFP_KERNEL);
+ if (!ea->attr.attr.name)
+ return -ENOMEM;
+
+ ea->attr.attr.mode = 0444;
+ ea->attr.show = etm_perf_sink_name_show;
+ ea->var = (unsigned long *)hash;
+
+ ret = sysfs_add_file_to_group(&pmu_dev->kobj,
+ &ea->attr.attr, "sinks");
+
+ if (!ret)
+ csdev->ea = ea;
+
+ return ret;
+}
+
+void etm_perf_del_symlink_sink(struct coresight_device *csdev)
+{
+ struct device *pmu_dev = etm_pmu.dev;
+ struct dev_ext_attribute *ea = csdev->ea;
+
+ if (csdev->type != CORESIGHT_DEV_TYPE_SINK &&
+ csdev->type != CORESIGHT_DEV_TYPE_LINKSINK)
+ return;
+
+ if (!ea)
+ return;
+
+ sysfs_remove_file_from_group(&pmu_dev->kobj,
+ &ea->attr.attr, "sinks");
+ csdev->ea = NULL;
+}
+
static int __init etm_perf_init(void)
{
int ret;
diff --git a/drivers/hwtracing/coresight/coresight-etm-perf.h b/drivers/hwtracing/coresight/coresight-etm-perf.h
index da7d9336a15c..015213abe00a 100644
--- a/drivers/hwtracing/coresight/coresight-etm-perf.h
+++ b/drivers/hwtracing/coresight/coresight-etm-perf.h
@@ -59,6 +59,8 @@ struct etm_event_data {
#ifdef CONFIG_CORESIGHT
int etm_perf_symlink(struct coresight_device *csdev, bool link);
+int etm_perf_add_symlink_sink(struct coresight_device *csdev);
+void etm_perf_del_symlink_sink(struct coresight_device *csdev);
static inline void *etm_perf_sink_config(struct perf_output_handle *handle)
{
struct etm_event_data *data = perf_get_aux(handle);
@@ -70,7 +72,9 @@ static inline void *etm_perf_sink_config(struct perf_output_handle *handle)
#else
static inline int etm_perf_symlink(struct coresight_device *csdev, bool link)
{ return -EINVAL; }
-
+int etm_perf_add_symlink_sink(struct coresight_device *csdev)
+{ return -EINVAL; }
+void etm_perf_del_symlink_sink(struct coresight_device *csdev) {}
static inline void *etm_perf_sink_config(struct perf_output_handle *handle)
{
return NULL;
diff --git a/drivers/hwtracing/coresight/coresight-priv.h b/drivers/hwtracing/coresight/coresight-priv.h
index 579f34943bf1..b936c6d7e13f 100644
--- a/drivers/hwtracing/coresight/coresight-priv.h
+++ b/drivers/hwtracing/coresight/coresight-priv.h
@@ -147,6 +147,7 @@ void coresight_disable_path(struct list_head *path);
int coresight_enable_path(struct list_head *path, u32 mode, void *sink_data);
struct coresight_device *coresight_get_sink(struct list_head *path);
struct coresight_device *coresight_get_enabled_sink(bool reset);
+struct coresight_device *coresight_get_sink_by_id(u32 id);
struct list_head *coresight_build_path(struct coresight_device *csdev,
struct coresight_device *sink);
void coresight_release_path(struct list_head *path);
diff --git a/drivers/hwtracing/coresight/coresight-stm.c b/drivers/hwtracing/coresight/coresight-stm.c
index ef339ff22090..f07825df5c7a 100644
--- a/drivers/hwtracing/coresight/coresight-stm.c
+++ b/drivers/hwtracing/coresight/coresight-stm.c
@@ -793,7 +793,7 @@ static int stm_probe(struct amba_device *adev, const struct amba_id *id)
struct stm_drvdata *drvdata;
struct resource *res = &adev->res;
struct resource ch_res;
- size_t res_size, bitmap_size;
+ size_t bitmap_size;
struct coresight_desc desc = { 0 };
struct device_node *np = adev->dev.of_node;
@@ -833,15 +833,11 @@ static int stm_probe(struct amba_device *adev, const struct amba_id *id)
drvdata->write_bytes = stm_fundamental_data_size(drvdata);
- if (boot_nr_channel) {
+ if (boot_nr_channel)
drvdata->numsp = boot_nr_channel;
- res_size = min((resource_size_t)(boot_nr_channel *
- BYTES_PER_CHANNEL), resource_size(res));
- } else {
+ else
drvdata->numsp = stm_num_stimulus_port(drvdata);
- res_size = min((resource_size_t)(drvdata->numsp *
- BYTES_PER_CHANNEL), resource_size(res));
- }
+
bitmap_size = BITS_TO_LONGS(drvdata->numsp) * sizeof(long);
guaranteed = devm_kzalloc(dev, bitmap_size, GFP_KERNEL);
diff --git a/drivers/hwtracing/coresight/coresight.c b/drivers/hwtracing/coresight/coresight.c
index 2b0df1a0a8df..29cef898afba 100644
--- a/drivers/hwtracing/coresight/coresight.c
+++ b/drivers/hwtracing/coresight/coresight.c
@@ -11,6 +11,7 @@
#include <linux/err.h>
#include <linux/export.h>
#include <linux/slab.h>
+#include <linux/stringhash.h>
#include <linux/mutex.h>
#include <linux/clk.h>
#include <linux/coresight.h>
@@ -18,6 +19,7 @@
#include <linux/delay.h>
#include <linux/pm_runtime.h>
+#include "coresight-etm-perf.h"
#include "coresight-priv.h"
static DEFINE_MUTEX(coresight_mutex);
@@ -540,6 +542,47 @@ struct coresight_device *coresight_get_enabled_sink(bool deactivate)
return dev ? to_coresight_device(dev) : NULL;
}
+static int coresight_sink_by_id(struct device *dev, void *data)
+{
+ struct coresight_device *csdev = to_coresight_device(dev);
+ unsigned long hash;
+
+ if (csdev->type == CORESIGHT_DEV_TYPE_SINK ||
+ csdev->type == CORESIGHT_DEV_TYPE_LINKSINK) {
+
+ if (!csdev->ea)
+ return 0;
+ /*
+ * See function etm_perf_add_symlink_sink() to know where
+ * this comes from.
+ */
+ hash = (unsigned long)csdev->ea->var;
+
+ if ((u32)hash == *(u32 *)data)
+ return 1;
+ }
+
+ return 0;
+}
+
+/**
+ * coresight_get_sink_by_id - returns the sink that matches the id
+ * @id: Id of the sink to match
+ *
+ * The name of a sink is unique, whether it is found on the AMBA bus or
+ * otherwise. As such the hash of that name can easily be used to identify
+ * a sink.
+ */
+struct coresight_device *coresight_get_sink_by_id(u32 id)
+{
+ struct device *dev = NULL;
+
+ dev = bus_find_device(&coresight_bustype, NULL, &id,
+ coresight_sink_by_id);
+
+ return dev ? to_coresight_device(dev) : NULL;
+}
+
/*
* coresight_grab_device - Power up this device and any of the helper
* devices connected to it for trace operation. Since the helper devices
@@ -1167,6 +1210,22 @@ struct coresight_device *coresight_register(struct coresight_desc *desc)
goto err_out;
}
+ if (csdev->type == CORESIGHT_DEV_TYPE_SINK ||
+ csdev->type == CORESIGHT_DEV_TYPE_LINKSINK) {
+ ret = etm_perf_add_symlink_sink(csdev);
+
+ if (ret) {
+ device_unregister(&csdev->dev);
+ /*
+ * As with the above, all resources are free'd
+ * explicitly via coresight_device_release() triggered
+ * from put_device(), which is in turn called from
+ * function device_unregister().
+ */
+ goto err_out;
+ }
+ }
+
mutex_lock(&coresight_mutex);
coresight_fixup_device_conns(csdev);
@@ -1185,6 +1244,7 @@ EXPORT_SYMBOL_GPL(coresight_register);
void coresight_unregister(struct coresight_device *csdev)
{
+ etm_perf_del_symlink_sink(csdev);
/* Remove references of that device in the topology */
coresight_remove_conns(csdev);
device_unregister(&csdev->dev);
diff --git a/drivers/hwtracing/coresight/of_coresight.c b/drivers/hwtracing/coresight/of_coresight.c
index 89092f83567e..7045930fc958 100644
--- a/drivers/hwtracing/coresight/of_coresight.c
+++ b/drivers/hwtracing/coresight/of_coresight.c
@@ -80,8 +80,8 @@ static struct device_node *of_coresight_get_port_parent(struct device_node *ep)
* Skip one-level up to the real device node, if we
* are using the new bindings.
*/
- if (!of_node_cmp(parent->name, "in-ports") ||
- !of_node_cmp(parent->name, "out-ports"))
+ if (of_node_name_eq(parent, "in-ports") ||
+ of_node_name_eq(parent, "out-ports"))
parent = of_get_next_parent(parent);
return parent;
diff --git a/drivers/interconnect/Kconfig b/drivers/interconnect/Kconfig
new file mode 100644
index 000000000000..07a8276fa35a
--- /dev/null
+++ b/drivers/interconnect/Kconfig
@@ -0,0 +1,15 @@
+menuconfig INTERCONNECT
+ tristate "On-Chip Interconnect management support"
+ help
+ Support for management of the on-chip interconnects.
+
+ This framework is designed to provide a generic interface for
+ managing the interconnects in a SoC.
+
+ If unsure, say no.
+
+if INTERCONNECT
+
+source "drivers/interconnect/qcom/Kconfig"
+
+endif
diff --git a/drivers/interconnect/Makefile b/drivers/interconnect/Makefile
new file mode 100644
index 000000000000..28f2ab0824d5
--- /dev/null
+++ b/drivers/interconnect/Makefile
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0
+
+icc-core-objs := core.o
+
+obj-$(CONFIG_INTERCONNECT) += icc-core.o
+obj-$(CONFIG_INTERCONNECT_QCOM) += qcom/
diff --git a/drivers/interconnect/core.c b/drivers/interconnect/core.c
new file mode 100644
index 000000000000..6005a1c189f6
--- /dev/null
+++ b/drivers/interconnect/core.c
@@ -0,0 +1,799 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Interconnect framework core driver
+ *
+ * Copyright (c) 2017-2019, Linaro Ltd.
+ * Author: Georgi Djakov <georgi.djakov@linaro.org>
+ */
+
+#include <linux/debugfs.h>
+#include <linux/device.h>
+#include <linux/idr.h>
+#include <linux/init.h>
+#include <linux/interconnect.h>
+#include <linux/interconnect-provider.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/overflow.h>
+
+static DEFINE_IDR(icc_idr);
+static LIST_HEAD(icc_providers);
+static DEFINE_MUTEX(icc_lock);
+static struct dentry *icc_debugfs_dir;
+
+/**
+ * struct icc_req - constraints that are attached to each node
+ * @req_node: entry in list of requests for the particular @node
+ * @node: the interconnect node to which this constraint applies
+ * @dev: reference to the device that sets the constraints
+ * @avg_bw: an integer describing the average bandwidth in kBps
+ * @peak_bw: an integer describing the peak bandwidth in kBps
+ */
+struct icc_req {
+ struct hlist_node req_node;
+ struct icc_node *node;
+ struct device *dev;
+ u32 avg_bw;
+ u32 peak_bw;
+};
+
+/**
+ * struct icc_path - interconnect path structure
+ * @num_nodes: number of hops (nodes)
+ * @reqs: array of the requests applicable to this path of nodes
+ */
+struct icc_path {
+ size_t num_nodes;
+ struct icc_req reqs[];
+};
+
+static void icc_summary_show_one(struct seq_file *s, struct icc_node *n)
+{
+ if (!n)
+ return;
+
+ seq_printf(s, "%-30s %12u %12u\n",
+ n->name, n->avg_bw, n->peak_bw);
+}
+
+static int icc_summary_show(struct seq_file *s, void *data)
+{
+ struct icc_provider *provider;
+
+ seq_puts(s, " node avg peak\n");
+ seq_puts(s, "--------------------------------------------------------\n");
+
+ mutex_lock(&icc_lock);
+
+ list_for_each_entry(provider, &icc_providers, provider_list) {
+ struct icc_node *n;
+
+ list_for_each_entry(n, &provider->nodes, node_list) {
+ struct icc_req *r;
+
+ icc_summary_show_one(s, n);
+ hlist_for_each_entry(r, &n->req_list, req_node) {
+ if (!r->dev)
+ continue;
+
+ seq_printf(s, " %-26s %12u %12u\n",
+ dev_name(r->dev), r->avg_bw,
+ r->peak_bw);
+ }
+ }
+ }
+
+ mutex_unlock(&icc_lock);
+
+ return 0;
+}
+
+static int icc_summary_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, icc_summary_show, inode->i_private);
+}
+
+static const struct file_operations icc_summary_fops = {
+ .open = icc_summary_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static struct icc_node *node_find(const int id)
+{
+ return idr_find(&icc_idr, id);
+}
+
+static struct icc_path *path_init(struct device *dev, struct icc_node *dst,
+ ssize_t num_nodes)
+{
+ struct icc_node *node = dst;
+ struct icc_path *path;
+ int i;
+
+ path = kzalloc(struct_size(path, reqs, num_nodes), GFP_KERNEL);
+ if (!path)
+ return ERR_PTR(-ENOMEM);
+
+ path->num_nodes = num_nodes;
+
+ for (i = num_nodes - 1; i >= 0; i--) {
+ node->provider->users++;
+ hlist_add_head(&path->reqs[i].req_node, &node->req_list);
+ path->reqs[i].node = node;
+ path->reqs[i].dev = dev;
+ /* reference to previous node was saved during path traversal */
+ node = node->reverse;
+ }
+
+ return path;
+}
+
+static struct icc_path *path_find(struct device *dev, struct icc_node *src,
+ struct icc_node *dst)
+{
+ struct icc_path *path = ERR_PTR(-EPROBE_DEFER);
+ struct icc_node *n, *node = NULL;
+ struct list_head traverse_list;
+ struct list_head edge_list;
+ struct list_head visited_list;
+ size_t i, depth = 1;
+ bool found = false;
+
+ INIT_LIST_HEAD(&traverse_list);
+ INIT_LIST_HEAD(&edge_list);
+ INIT_LIST_HEAD(&visited_list);
+
+ list_add(&src->search_list, &traverse_list);
+ src->reverse = NULL;
+
+ do {
+ list_for_each_entry_safe(node, n, &traverse_list, search_list) {
+ if (node == dst) {
+ found = true;
+ list_splice_init(&edge_list, &visited_list);
+ list_splice_init(&traverse_list, &visited_list);
+ break;
+ }
+ for (i = 0; i < node->num_links; i++) {
+ struct icc_node *tmp = node->links[i];
+
+ if (!tmp) {
+ path = ERR_PTR(-ENOENT);
+ goto out;
+ }
+
+ if (tmp->is_traversed)
+ continue;
+
+ tmp->is_traversed = true;
+ tmp->reverse = node;
+ list_add_tail(&tmp->search_list, &edge_list);
+ }
+ }
+
+ if (found)
+ break;
+
+ list_splice_init(&traverse_list, &visited_list);
+ list_splice_init(&edge_list, &traverse_list);
+
+ /* count the hops including the source */
+ depth++;
+
+ } while (!list_empty(&traverse_list));
+
+out:
+
+ /* reset the traversed state */
+ list_for_each_entry_reverse(n, &visited_list, search_list)
+ n->is_traversed = false;
+
+ if (found)
+ path = path_init(dev, dst, depth);
+
+ return path;
+}
+
+/*
+ * We want the path to honor all bandwidth requests, so the average and peak
+ * bandwidth requirements from each consumer are aggregated at each node.
+ * The aggregation is platform specific, so each platform can customize it by
+ * implementing its own aggregate() function.
+ */
+
+static int aggregate_requests(struct icc_node *node)
+{
+ struct icc_provider *p = node->provider;
+ struct icc_req *r;
+
+ node->avg_bw = 0;
+ node->peak_bw = 0;
+
+ hlist_for_each_entry(r, &node->req_list, req_node)
+ p->aggregate(node, r->avg_bw, r->peak_bw,
+ &node->avg_bw, &node->peak_bw);
+
+ return 0;
+}
+
+static int apply_constraints(struct icc_path *path)
+{
+ struct icc_node *next, *prev = NULL;
+ int ret = -EINVAL;
+ int i;
+
+ for (i = 0; i < path->num_nodes; i++) {
+ next = path->reqs[i].node;
+
+ /*
+ * Both endpoints should be valid master-slave pairs of the
+ * same interconnect provider that will be configured.
+ */
+ if (!prev || next->provider != prev->provider) {
+ prev = next;
+ continue;
+ }
+
+ /* set the constraints */
+ ret = next->provider->set(prev, next);
+ if (ret)
+ goto out;
+
+ prev = next;
+ }
+out:
+ return ret;
+}
+
+/* of_icc_xlate_onecell() - Translate function using a single index.
+ * @spec: OF phandle args to map into an interconnect node.
+ * @data: private data (pointer to struct icc_onecell_data)
+ *
+ * This is a generic translate function that can be used to model simple
+ * interconnect providers that have one device tree node and provide
+ * multiple interconnect nodes. A single cell is used as an index into
+ * an array of icc nodes specified in the icc_onecell_data struct when
+ * registering the provider.
+ */
+struct icc_node *of_icc_xlate_onecell(struct of_phandle_args *spec,
+ void *data)
+{
+ struct icc_onecell_data *icc_data = data;
+ unsigned int idx = spec->args[0];
+
+ if (idx >= icc_data->num_nodes) {
+ pr_err("%s: invalid index %u\n", __func__, idx);
+ return ERR_PTR(-EINVAL);
+ }
+
+ return icc_data->nodes[idx];
+}
+EXPORT_SYMBOL_GPL(of_icc_xlate_onecell);
+
+/**
+ * of_icc_get_from_provider() - Look-up interconnect node
+ * @spec: OF phandle args to use for look-up
+ *
+ * Looks for interconnect provider under the node specified by @spec and if
+ * found, uses xlate function of the provider to map phandle args to node.
+ *
+ * Returns a valid pointer to struct icc_node on success or ERR_PTR()
+ * on failure.
+ */
+static struct icc_node *of_icc_get_from_provider(struct of_phandle_args *spec)
+{
+ struct icc_node *node = ERR_PTR(-EPROBE_DEFER);
+ struct icc_provider *provider;
+
+ if (!spec || spec->args_count != 1)
+ return ERR_PTR(-EINVAL);
+
+ mutex_lock(&icc_lock);
+ list_for_each_entry(provider, &icc_providers, provider_list) {
+ if (provider->dev->of_node == spec->np)
+ node = provider->xlate(spec, provider->data);
+ if (!IS_ERR(node))
+ break;
+ }
+ mutex_unlock(&icc_lock);
+
+ return node;
+}
+
+/**
+ * of_icc_get() - get a path handle from a DT node based on name
+ * @dev: device pointer for the consumer device
+ * @name: interconnect path name
+ *
+ * This function will search for a path between two endpoints and return an
+ * icc_path handle on success. Use icc_put() to release constraints when they
+ * are not needed anymore.
+ * If the interconnect API is disabled, NULL is returned and the consumer
+ * drivers will still build. Drivers are free to handle this specifically,
+ * but they don't have to.
+ *
+ * Return: icc_path pointer on success or ERR_PTR() on error. NULL is returned
+ * when the API is disabled or the "interconnects" DT property is missing.
+ */
+struct icc_path *of_icc_get(struct device *dev, const char *name)
+{
+ struct icc_path *path = ERR_PTR(-EPROBE_DEFER);
+ struct icc_node *src_node, *dst_node;
+ struct device_node *np = NULL;
+ struct of_phandle_args src_args, dst_args;
+ int idx = 0;
+ int ret;
+
+ if (!dev || !dev->of_node)
+ return ERR_PTR(-ENODEV);
+
+ np = dev->of_node;
+
+ /*
+ * When the consumer DT node do not have "interconnects" property
+ * return a NULL path to skip setting constraints.
+ */
+ if (!of_find_property(np, "interconnects", NULL))
+ return NULL;
+
+ /*
+ * We use a combination of phandle and specifier for endpoint. For now
+ * lets support only global ids and extend this in the future if needed
+ * without breaking DT compatibility.
+ */
+ if (name) {
+ idx = of_property_match_string(np, "interconnect-names", name);
+ if (idx < 0)
+ return ERR_PTR(idx);
+ }
+
+ ret = of_parse_phandle_with_args(np, "interconnects",
+ "#interconnect-cells", idx * 2,
+ &src_args);
+ if (ret)
+ return ERR_PTR(ret);
+
+ of_node_put(src_args.np);
+
+ ret = of_parse_phandle_with_args(np, "interconnects",
+ "#interconnect-cells", idx * 2 + 1,
+ &dst_args);
+ if (ret)
+ return ERR_PTR(ret);
+
+ of_node_put(dst_args.np);
+
+ src_node = of_icc_get_from_provider(&src_args);
+
+ if (IS_ERR(src_node)) {
+ if (PTR_ERR(src_node) != -EPROBE_DEFER)
+ dev_err(dev, "error finding src node: %ld\n",
+ PTR_ERR(src_node));
+ return ERR_CAST(src_node);
+ }
+
+ dst_node = of_icc_get_from_provider(&dst_args);
+
+ if (IS_ERR(dst_node)) {
+ if (PTR_ERR(dst_node) != -EPROBE_DEFER)
+ dev_err(dev, "error finding dst node: %ld\n",
+ PTR_ERR(dst_node));
+ return ERR_CAST(dst_node);
+ }
+
+ mutex_lock(&icc_lock);
+ path = path_find(dev, src_node, dst_node);
+ if (IS_ERR(path))
+ dev_err(dev, "%s: invalid path=%ld\n", __func__, PTR_ERR(path));
+ mutex_unlock(&icc_lock);
+
+ return path;
+}
+EXPORT_SYMBOL_GPL(of_icc_get);
+
+/**
+ * icc_set_bw() - set bandwidth constraints on an interconnect path
+ * @path: reference to the path returned by icc_get()
+ * @avg_bw: average bandwidth in kilobytes per second
+ * @peak_bw: peak bandwidth in kilobytes per second
+ *
+ * This function is used by an interconnect consumer to express its own needs
+ * in terms of bandwidth for a previously requested path between two endpoints.
+ * The requests are aggregated and each node is updated accordingly. The entire
+ * path is locked by a mutex to ensure that the set() is completed.
+ * The @path can be NULL when the "interconnects" DT properties is missing,
+ * which will mean that no constraints will be set.
+ *
+ * Returns 0 on success, or an appropriate error code otherwise.
+ */
+int icc_set_bw(struct icc_path *path, u32 avg_bw, u32 peak_bw)
+{
+ struct icc_node *node;
+ u32 old_avg, old_peak;
+ size_t i;
+ int ret;
+
+ if (!path || !path->num_nodes)
+ return 0;
+
+ mutex_lock(&icc_lock);
+
+ old_avg = path->reqs[0].avg_bw;
+ old_peak = path->reqs[0].peak_bw;
+
+ for (i = 0; i < path->num_nodes; i++) {
+ node = path->reqs[i].node;
+
+ /* update the consumer request for this path */
+ path->reqs[i].avg_bw = avg_bw;
+ path->reqs[i].peak_bw = peak_bw;
+
+ /* aggregate requests for this node */
+ aggregate_requests(node);
+ }
+
+ ret = apply_constraints(path);
+ if (ret) {
+ pr_debug("interconnect: error applying constraints (%d)\n",
+ ret);
+
+ for (i = 0; i < path->num_nodes; i++) {
+ node = path->reqs[i].node;
+ path->reqs[i].avg_bw = old_avg;
+ path->reqs[i].peak_bw = old_peak;
+ aggregate_requests(node);
+ }
+ apply_constraints(path);
+ }
+
+ mutex_unlock(&icc_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(icc_set_bw);
+
+/**
+ * icc_get() - return a handle for path between two endpoints
+ * @dev: the device requesting the path
+ * @src_id: source device port id
+ * @dst_id: destination device port id
+ *
+ * This function will search for a path between two endpoints and return an
+ * icc_path handle on success. Use icc_put() to release
+ * constraints when they are not needed anymore.
+ * If the interconnect API is disabled, NULL is returned and the consumer
+ * drivers will still build. Drivers are free to handle this specifically,
+ * but they don't have to.
+ *
+ * Return: icc_path pointer on success, ERR_PTR() on error or NULL if the
+ * interconnect API is disabled.
+ */
+struct icc_path *icc_get(struct device *dev, const int src_id, const int dst_id)
+{
+ struct icc_node *src, *dst;
+ struct icc_path *path = ERR_PTR(-EPROBE_DEFER);
+
+ mutex_lock(&icc_lock);
+
+ src = node_find(src_id);
+ if (!src)
+ goto out;
+
+ dst = node_find(dst_id);
+ if (!dst)
+ goto out;
+
+ path = path_find(dev, src, dst);
+ if (IS_ERR(path))
+ dev_err(dev, "%s: invalid path=%ld\n", __func__, PTR_ERR(path));
+
+out:
+ mutex_unlock(&icc_lock);
+ return path;
+}
+EXPORT_SYMBOL_GPL(icc_get);
+
+/**
+ * icc_put() - release the reference to the icc_path
+ * @path: interconnect path
+ *
+ * Use this function to release the constraints on a path when the path is
+ * no longer needed. The constraints will be re-aggregated.
+ */
+void icc_put(struct icc_path *path)
+{
+ struct icc_node *node;
+ size_t i;
+ int ret;
+
+ if (!path || WARN_ON(IS_ERR(path)))
+ return;
+
+ ret = icc_set_bw(path, 0, 0);
+ if (ret)
+ pr_err("%s: error (%d)\n", __func__, ret);
+
+ mutex_lock(&icc_lock);
+ for (i = 0; i < path->num_nodes; i++) {
+ node = path->reqs[i].node;
+ hlist_del(&path->reqs[i].req_node);
+ if (!WARN_ON(!node->provider->users))
+ node->provider->users--;
+ }
+ mutex_unlock(&icc_lock);
+
+ kfree(path);
+}
+EXPORT_SYMBOL_GPL(icc_put);
+
+static struct icc_node *icc_node_create_nolock(int id)
+{
+ struct icc_node *node;
+
+ /* check if node already exists */
+ node = node_find(id);
+ if (node)
+ return node;
+
+ node = kzalloc(sizeof(*node), GFP_KERNEL);
+ if (!node)
+ return ERR_PTR(-ENOMEM);
+
+ id = idr_alloc(&icc_idr, node, id, id + 1, GFP_KERNEL);
+ if (id < 0) {
+ WARN(1, "%s: couldn't get idr\n", __func__);
+ kfree(node);
+ return ERR_PTR(id);
+ }
+
+ node->id = id;
+
+ return node;
+}
+
+/**
+ * icc_node_create() - create a node
+ * @id: node id
+ *
+ * Return: icc_node pointer on success, or ERR_PTR() on error
+ */
+struct icc_node *icc_node_create(int id)
+{
+ struct icc_node *node;
+
+ mutex_lock(&icc_lock);
+
+ node = icc_node_create_nolock(id);
+
+ mutex_unlock(&icc_lock);
+
+ return node;
+}
+EXPORT_SYMBOL_GPL(icc_node_create);
+
+/**
+ * icc_node_destroy() - destroy a node
+ * @id: node id
+ */
+void icc_node_destroy(int id)
+{
+ struct icc_node *node;
+
+ mutex_lock(&icc_lock);
+
+ node = node_find(id);
+ if (node) {
+ idr_remove(&icc_idr, node->id);
+ WARN_ON(!hlist_empty(&node->req_list));
+ }
+
+ mutex_unlock(&icc_lock);
+
+ kfree(node);
+}
+EXPORT_SYMBOL_GPL(icc_node_destroy);
+
+/**
+ * icc_link_create() - create a link between two nodes
+ * @node: source node id
+ * @dst_id: destination node id
+ *
+ * Create a link between two nodes. The nodes might belong to different
+ * interconnect providers and the @dst_id node might not exist (if the
+ * provider driver has not probed yet). So just create the @dst_id node
+ * and when the actual provider driver is probed, the rest of the node
+ * data is filled.
+ *
+ * Return: 0 on success, or an error code otherwise
+ */
+int icc_link_create(struct icc_node *node, const int dst_id)
+{
+ struct icc_node *dst;
+ struct icc_node **new;
+ int ret = 0;
+
+ if (!node->provider)
+ return -EINVAL;
+
+ mutex_lock(&icc_lock);
+
+ dst = node_find(dst_id);
+ if (!dst) {
+ dst = icc_node_create_nolock(dst_id);
+
+ if (IS_ERR(dst)) {
+ ret = PTR_ERR(dst);
+ goto out;
+ }
+ }
+
+ new = krealloc(node->links,
+ (node->num_links + 1) * sizeof(*node->links),
+ GFP_KERNEL);
+ if (!new) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ node->links = new;
+ node->links[node->num_links++] = dst;
+
+out:
+ mutex_unlock(&icc_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(icc_link_create);
+
+/**
+ * icc_link_destroy() - destroy a link between two nodes
+ * @src: pointer to source node
+ * @dst: pointer to destination node
+ *
+ * Return: 0 on success, or an error code otherwise
+ */
+int icc_link_destroy(struct icc_node *src, struct icc_node *dst)
+{
+ struct icc_node **new;
+ size_t slot;
+ int ret = 0;
+
+ if (IS_ERR_OR_NULL(src))
+ return -EINVAL;
+
+ if (IS_ERR_OR_NULL(dst))
+ return -EINVAL;
+
+ mutex_lock(&icc_lock);
+
+ for (slot = 0; slot < src->num_links; slot++)
+ if (src->links[slot] == dst)
+ break;
+
+ if (WARN_ON(slot == src->num_links)) {
+ ret = -ENXIO;
+ goto out;
+ }
+
+ src->links[slot] = src->links[--src->num_links];
+
+ new = krealloc(src->links, src->num_links * sizeof(*src->links),
+ GFP_KERNEL);
+ if (new)
+ src->links = new;
+
+out:
+ mutex_unlock(&icc_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(icc_link_destroy);
+
+/**
+ * icc_node_add() - add interconnect node to interconnect provider
+ * @node: pointer to the interconnect node
+ * @provider: pointer to the interconnect provider
+ */
+void icc_node_add(struct icc_node *node, struct icc_provider *provider)
+{
+ mutex_lock(&icc_lock);
+
+ node->provider = provider;
+ list_add_tail(&node->node_list, &provider->nodes);
+
+ mutex_unlock(&icc_lock);
+}
+EXPORT_SYMBOL_GPL(icc_node_add);
+
+/**
+ * icc_node_del() - delete interconnect node from interconnect provider
+ * @node: pointer to the interconnect node
+ */
+void icc_node_del(struct icc_node *node)
+{
+ mutex_lock(&icc_lock);
+
+ list_del(&node->node_list);
+
+ mutex_unlock(&icc_lock);
+}
+EXPORT_SYMBOL_GPL(icc_node_del);
+
+/**
+ * icc_provider_add() - add a new interconnect provider
+ * @provider: the interconnect provider that will be added into topology
+ *
+ * Return: 0 on success, or an error code otherwise
+ */
+int icc_provider_add(struct icc_provider *provider)
+{
+ if (WARN_ON(!provider->set))
+ return -EINVAL;
+ if (WARN_ON(!provider->xlate))
+ return -EINVAL;
+
+ mutex_lock(&icc_lock);
+
+ INIT_LIST_HEAD(&provider->nodes);
+ list_add_tail(&provider->provider_list, &icc_providers);
+
+ mutex_unlock(&icc_lock);
+
+ dev_dbg(provider->dev, "interconnect provider added to topology\n");
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(icc_provider_add);
+
+/**
+ * icc_provider_del() - delete previously added interconnect provider
+ * @provider: the interconnect provider that will be removed from topology
+ *
+ * Return: 0 on success, or an error code otherwise
+ */
+int icc_provider_del(struct icc_provider *provider)
+{
+ mutex_lock(&icc_lock);
+ if (provider->users) {
+ pr_warn("interconnect provider still has %d users\n",
+ provider->users);
+ mutex_unlock(&icc_lock);
+ return -EBUSY;
+ }
+
+ if (!list_empty(&provider->nodes)) {
+ pr_warn("interconnect provider still has nodes\n");
+ mutex_unlock(&icc_lock);
+ return -EBUSY;
+ }
+
+ list_del(&provider->provider_list);
+ mutex_unlock(&icc_lock);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(icc_provider_del);
+
+static int __init icc_init(void)
+{
+ icc_debugfs_dir = debugfs_create_dir("interconnect", NULL);
+ debugfs_create_file("interconnect_summary", 0444,
+ icc_debugfs_dir, NULL, &icc_summary_fops);
+ return 0;
+}
+
+static void __exit icc_exit(void)
+{
+ debugfs_remove_recursive(icc_debugfs_dir);
+}
+module_init(icc_init);
+module_exit(icc_exit);
+
+MODULE_AUTHOR("Georgi Djakov <georgi.djakov@linaro.org>");
+MODULE_DESCRIPTION("Interconnect Driver Core");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/interconnect/qcom/Kconfig b/drivers/interconnect/qcom/Kconfig
new file mode 100644
index 000000000000..290d330abe5a
--- /dev/null
+++ b/drivers/interconnect/qcom/Kconfig
@@ -0,0 +1,13 @@
+config INTERCONNECT_QCOM
+ bool "Qualcomm Network-on-Chip interconnect drivers"
+ depends on ARCH_QCOM
+ help
+ Support for Qualcomm's Network-on-Chip interconnect hardware.
+
+config INTERCONNECT_QCOM_SDM845
+ tristate "Qualcomm SDM845 interconnect driver"
+ depends on INTERCONNECT_QCOM
+ depends on (QCOM_RPMH && QCOM_COMMAND_DB && OF) || COMPILE_TEST
+ help
+ This is a driver for the Qualcomm Network-on-Chip on sdm845-based
+ platforms.
diff --git a/drivers/interconnect/qcom/Makefile b/drivers/interconnect/qcom/Makefile
new file mode 100644
index 000000000000..1c1cea690f92
--- /dev/null
+++ b/drivers/interconnect/qcom/Makefile
@@ -0,0 +1,5 @@
+# SPDX-License-Identifier: GPL-2.0
+
+qnoc-sdm845-objs := sdm845.o
+
+obj-$(CONFIG_INTERCONNECT_QCOM_SDM845) += qnoc-sdm845.o
diff --git a/drivers/interconnect/qcom/sdm845.c b/drivers/interconnect/qcom/sdm845.c
new file mode 100644
index 000000000000..4915b78da673
--- /dev/null
+++ b/drivers/interconnect/qcom/sdm845.c
@@ -0,0 +1,838 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ */
+
+#include <asm/div64.h>
+#include <dt-bindings/interconnect/qcom,sdm845.h>
+#include <linux/device.h>
+#include <linux/interconnect.h>
+#include <linux/interconnect-provider.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/sort.h>
+
+#include <soc/qcom/cmd-db.h>
+#include <soc/qcom/rpmh.h>
+#include <soc/qcom/tcs.h>
+
+#define BCM_TCS_CMD_COMMIT_SHFT 30
+#define BCM_TCS_CMD_COMMIT_MASK 0x40000000
+#define BCM_TCS_CMD_VALID_SHFT 29
+#define BCM_TCS_CMD_VALID_MASK 0x20000000
+#define BCM_TCS_CMD_VOTE_X_SHFT 14
+#define BCM_TCS_CMD_VOTE_MASK 0x3fff
+#define BCM_TCS_CMD_VOTE_Y_SHFT 0
+#define BCM_TCS_CMD_VOTE_Y_MASK 0xfffc000
+
+#define BCM_TCS_CMD(commit, valid, vote_x, vote_y) \
+ (((commit) << BCM_TCS_CMD_COMMIT_SHFT) | \
+ ((valid) << BCM_TCS_CMD_VALID_SHFT) | \
+ ((cpu_to_le32(vote_x) & \
+ BCM_TCS_CMD_VOTE_MASK) << BCM_TCS_CMD_VOTE_X_SHFT) | \
+ ((cpu_to_le32(vote_y) & \
+ BCM_TCS_CMD_VOTE_MASK) << BCM_TCS_CMD_VOTE_Y_SHFT))
+
+#define to_qcom_provider(_provider) \
+ container_of(_provider, struct qcom_icc_provider, provider)
+
+struct qcom_icc_provider {
+ struct icc_provider provider;
+ struct device *dev;
+ struct qcom_icc_bcm **bcms;
+ size_t num_bcms;
+};
+
+/**
+ * struct bcm_db - Auxiliary data pertaining to each Bus Clock Manager (BCM)
+ * @unit: divisor used to convert bytes/sec bw value to an RPMh msg
+ * @width: multiplier used to convert bytes/sec bw value to an RPMh msg
+ * @vcd: virtual clock domain that this bcm belongs to
+ * @reserved: reserved field
+ */
+struct bcm_db {
+ __le32 unit;
+ __le16 width;
+ u8 vcd;
+ u8 reserved;
+};
+
+#define SDM845_MAX_LINKS 43
+#define SDM845_MAX_BCMS 30
+#define SDM845_MAX_BCM_PER_NODE 2
+#define SDM845_MAX_VCD 10
+
+/**
+ * struct qcom_icc_node - Qualcomm specific interconnect nodes
+ * @name: the node name used in debugfs
+ * @links: an array of nodes where we can go next while traversing
+ * @id: a unique node identifier
+ * @num_links: the total number of @links
+ * @channels: num of channels at this node
+ * @buswidth: width of the interconnect between a node and the bus
+ * @sum_avg: current sum aggregate value of all avg bw requests
+ * @max_peak: current max aggregate value of all peak bw requests
+ * @bcms: list of bcms associated with this logical node
+ * @num_bcms: num of @bcms
+ */
+struct qcom_icc_node {
+ const char *name;
+ u16 links[SDM845_MAX_LINKS];
+ u16 id;
+ u16 num_links;
+ u16 channels;
+ u16 buswidth;
+ u64 sum_avg;
+ u64 max_peak;
+ struct qcom_icc_bcm *bcms[SDM845_MAX_BCM_PER_NODE];
+ size_t num_bcms;
+};
+
+/**
+ * struct qcom_icc_bcm - Qualcomm specific hardware accelerator nodes
+ * known as Bus Clock Manager (BCM)
+ * @name: the bcm node name used to fetch BCM data from command db
+ * @type: latency or bandwidth bcm
+ * @addr: address offsets used when voting to RPMH
+ * @vote_x: aggregated threshold values, represents sum_bw when @type is bw bcm
+ * @vote_y: aggregated threshold values, represents peak_bw when @type is bw bcm
+ * @dirty: flag used to indicate whether the bcm needs to be committed
+ * @keepalive: flag used to indicate whether a keepalive is required
+ * @aux_data: auxiliary data used when calculating threshold values and
+ * communicating with RPMh
+ * @list: used to link to other bcms when compiling lists for commit
+ * @num_nodes: total number of @num_nodes
+ * @nodes: list of qcom_icc_nodes that this BCM encapsulates
+ */
+struct qcom_icc_bcm {
+ const char *name;
+ u32 type;
+ u32 addr;
+ u64 vote_x;
+ u64 vote_y;
+ bool dirty;
+ bool keepalive;
+ struct bcm_db aux_data;
+ struct list_head list;
+ size_t num_nodes;
+ struct qcom_icc_node *nodes[];
+};
+
+struct qcom_icc_fabric {
+ struct qcom_icc_node **nodes;
+ size_t num_nodes;
+};
+
+struct qcom_icc_desc {
+ struct qcom_icc_node **nodes;
+ size_t num_nodes;
+ struct qcom_icc_bcm **bcms;
+ size_t num_bcms;
+};
+
+#define DEFINE_QNODE(_name, _id, _channels, _buswidth, \
+ _numlinks, ...) \
+ static struct qcom_icc_node _name = { \
+ .id = _id, \
+ .name = #_name, \
+ .channels = _channels, \
+ .buswidth = _buswidth, \
+ .num_links = _numlinks, \
+ .links = { __VA_ARGS__ }, \
+ }
+
+DEFINE_QNODE(qhm_a1noc_cfg, MASTER_A1NOC_CFG, 1, 4, 1, SLAVE_SERVICE_A1NOC);
+DEFINE_QNODE(qhm_qup1, MASTER_BLSP_1, 1, 4, 1, SLAVE_A1NOC_SNOC);
+DEFINE_QNODE(qhm_tsif, MASTER_TSIF, 1, 4, 1, SLAVE_A1NOC_SNOC);
+DEFINE_QNODE(xm_sdc2, MASTER_SDCC_2, 1, 8, 1, SLAVE_A1NOC_SNOC);
+DEFINE_QNODE(xm_sdc4, MASTER_SDCC_4, 1, 8, 1, SLAVE_A1NOC_SNOC);
+DEFINE_QNODE(xm_ufs_card, MASTER_UFS_CARD, 1, 8, 1, SLAVE_A1NOC_SNOC);
+DEFINE_QNODE(xm_ufs_mem, MASTER_UFS_MEM, 1, 8, 1, SLAVE_A1NOC_SNOC);
+DEFINE_QNODE(xm_pcie_0, MASTER_PCIE_0, 1, 8, 1, SLAVE_ANOC_PCIE_A1NOC_SNOC);
+DEFINE_QNODE(qhm_a2noc_cfg, MASTER_A2NOC_CFG, 1, 4, 1, SLAVE_SERVICE_A2NOC);
+DEFINE_QNODE(qhm_qdss_bam, MASTER_QDSS_BAM, 1, 4, 1, SLAVE_A2NOC_SNOC);
+DEFINE_QNODE(qhm_qup2, MASTER_BLSP_2, 1, 4, 1, SLAVE_A2NOC_SNOC);
+DEFINE_QNODE(qnm_cnoc, MASTER_CNOC_A2NOC, 1, 8, 1, SLAVE_A2NOC_SNOC);
+DEFINE_QNODE(qxm_crypto, MASTER_CRYPTO, 1, 8, 1, SLAVE_A2NOC_SNOC);
+DEFINE_QNODE(qxm_ipa, MASTER_IPA, 1, 8, 1, SLAVE_A2NOC_SNOC);
+DEFINE_QNODE(xm_pcie3_1, MASTER_PCIE_1, 1, 8, 1, SLAVE_ANOC_PCIE_SNOC);
+DEFINE_QNODE(xm_qdss_etr, MASTER_QDSS_ETR, 1, 8, 1, SLAVE_A2NOC_SNOC);
+DEFINE_QNODE(xm_usb3_0, MASTER_USB3_0, 1, 8, 1, SLAVE_A2NOC_SNOC);
+DEFINE_QNODE(xm_usb3_1, MASTER_USB3_1, 1, 8, 1, SLAVE_A2NOC_SNOC);
+DEFINE_QNODE(qxm_camnoc_hf0_uncomp, MASTER_CAMNOC_HF0_UNCOMP, 1, 32, 1, SLAVE_CAMNOC_UNCOMP);
+DEFINE_QNODE(qxm_camnoc_hf1_uncomp, MASTER_CAMNOC_HF1_UNCOMP, 1, 32, 1, SLAVE_CAMNOC_UNCOMP);
+DEFINE_QNODE(qxm_camnoc_sf_uncomp, MASTER_CAMNOC_SF_UNCOMP, 1, 32, 1, SLAVE_CAMNOC_UNCOMP);
+DEFINE_QNODE(qhm_spdm, MASTER_SPDM, 1, 4, 1, SLAVE_CNOC_A2NOC);
+DEFINE_QNODE(qhm_tic, MASTER_TIC, 1, 4, 43, SLAVE_A1NOC_CFG, SLAVE_A2NOC_CFG, SLAVE_AOP, SLAVE_AOSS, SLAVE_CAMERA_CFG, SLAVE_CLK_CTL, SLAVE_CDSP_CFG, SLAVE_RBCPR_CX_CFG, SLAVE_CRYPTO_0_CFG, SLAVE_DCC_CFG, SLAVE_CNOC_DDRSS, SLAVE_DISPLAY_CFG, SLAVE_GLM, SLAVE_GFX3D_CFG, SLAVE_IMEM_CFG, SLAVE_IPA_CFG, SLAVE_CNOC_MNOC_CFG, SLAVE_PCIE_0_CFG, SLAVE_PCIE_1_CFG, SLAVE_PDM, SLAVE_SOUTH_PHY_CFG, SLAVE_PIMEM_CFG, SLAVE_PRNG, SLAVE_QDSS_CFG, SLAVE_BLSP_2, SLAVE_BLSP_1, SLAVE_SDCC_2, SLAVE_SDCC_4, SLAVE_SNOC_CFG, SLAVE_SPDM_WRAPPER, SLAVE_SPSS_CFG, SLAVE_TCSR, SLAVE_TLMM_NORTH, SLAVE_TLMM_SOUTH, SLAVE_TSIF, SLAVE_UFS_CARD_CFG, SLAVE_UFS_MEM_CFG, SLAVE_USB3_0, SLAVE_USB3_1, SLAVE_VENUS_CFG, SLAVE_VSENSE_CTRL_CFG, SLAVE_CNOC_A2NOC, SLAVE_SERVICE_CNOC);
+DEFINE_QNODE(qnm_snoc, MASTER_SNOC_CNOC, 1, 8, 42, SLAVE_A1NOC_CFG, SLAVE_A2NOC_CFG, SLAVE_AOP, SLAVE_AOSS, SLAVE_CAMERA_CFG, SLAVE_CLK_CTL, SLAVE_CDSP_CFG, SLAVE_RBCPR_CX_CFG, SLAVE_CRYPTO_0_CFG, SLAVE_DCC_CFG, SLAVE_CNOC_DDRSS, SLAVE_DISPLAY_CFG, SLAVE_GLM, SLAVE_GFX3D_CFG, SLAVE_IMEM_CFG, SLAVE_IPA_CFG, SLAVE_CNOC_MNOC_CFG, SLAVE_PCIE_0_CFG, SLAVE_PCIE_1_CFG, SLAVE_PDM, SLAVE_SOUTH_PHY_CFG, SLAVE_PIMEM_CFG, SLAVE_PRNG, SLAVE_QDSS_CFG, SLAVE_BLSP_2, SLAVE_BLSP_1, SLAVE_SDCC_2, SLAVE_SDCC_4, SLAVE_SNOC_CFG, SLAVE_SPDM_WRAPPER, SLAVE_SPSS_CFG, SLAVE_TCSR, SLAVE_TLMM_NORTH, SLAVE_TLMM_SOUTH, SLAVE_TSIF, SLAVE_UFS_CARD_CFG, SLAVE_UFS_MEM_CFG, SLAVE_USB3_0, SLAVE_USB3_1, SLAVE_VENUS_CFG, SLAVE_VSENSE_CTRL_CFG, SLAVE_SERVICE_CNOC);
+DEFINE_QNODE(xm_qdss_dap, MASTER_QDSS_DAP, 1, 8, 43, SLAVE_A1NOC_CFG, SLAVE_A2NOC_CFG, SLAVE_AOP, SLAVE_AOSS, SLAVE_CAMERA_CFG, SLAVE_CLK_CTL, SLAVE_CDSP_CFG, SLAVE_RBCPR_CX_CFG, SLAVE_CRYPTO_0_CFG, SLAVE_DCC_CFG, SLAVE_CNOC_DDRSS, SLAVE_DISPLAY_CFG, SLAVE_GLM, SLAVE_GFX3D_CFG, SLAVE_IMEM_CFG, SLAVE_IPA_CFG, SLAVE_CNOC_MNOC_CFG, SLAVE_PCIE_0_CFG, SLAVE_PCIE_1_CFG, SLAVE_PDM, SLAVE_SOUTH_PHY_CFG, SLAVE_PIMEM_CFG, SLAVE_PRNG, SLAVE_QDSS_CFG, SLAVE_BLSP_2, SLAVE_BLSP_1, SLAVE_SDCC_2, SLAVE_SDCC_4, SLAVE_SNOC_CFG, SLAVE_SPDM_WRAPPER, SLAVE_SPSS_CFG, SLAVE_TCSR, SLAVE_TLMM_NORTH, SLAVE_TLMM_SOUTH, SLAVE_TSIF, SLAVE_UFS_CARD_CFG, SLAVE_UFS_MEM_CFG, SLAVE_USB3_0, SLAVE_USB3_1, SLAVE_VENUS_CFG, SLAVE_VSENSE_CTRL_CFG, SLAVE_CNOC_A2NOC, SLAVE_SERVICE_CNOC);
+DEFINE_QNODE(qhm_cnoc, MASTER_CNOC_DC_NOC, 1, 4, 2, SLAVE_LLCC_CFG, SLAVE_MEM_NOC_CFG);
+DEFINE_QNODE(acm_l3, MASTER_APPSS_PROC, 1, 16, 3, SLAVE_GNOC_SNOC, SLAVE_GNOC_MEM_NOC, SLAVE_SERVICE_GNOC);
+DEFINE_QNODE(pm_gnoc_cfg, MASTER_GNOC_CFG, 1, 4, 1, SLAVE_SERVICE_GNOC);
+DEFINE_QNODE(llcc_mc, MASTER_LLCC, 4, 4, 1, SLAVE_EBI1);
+DEFINE_QNODE(acm_tcu, MASTER_TCU_0, 1, 8, 3, SLAVE_MEM_NOC_GNOC, SLAVE_LLCC, SLAVE_MEM_NOC_SNOC);
+DEFINE_QNODE(qhm_memnoc_cfg, MASTER_MEM_NOC_CFG, 1, 4, 2, SLAVE_MSS_PROC_MS_MPU_CFG, SLAVE_SERVICE_MEM_NOC);
+DEFINE_QNODE(qnm_apps, MASTER_GNOC_MEM_NOC, 2, 32, 1, SLAVE_LLCC);
+DEFINE_QNODE(qnm_mnoc_hf, MASTER_MNOC_HF_MEM_NOC, 2, 32, 2, SLAVE_MEM_NOC_GNOC, SLAVE_LLCC);
+DEFINE_QNODE(qnm_mnoc_sf, MASTER_MNOC_SF_MEM_NOC, 1, 32, 3, SLAVE_MEM_NOC_GNOC, SLAVE_LLCC, SLAVE_MEM_NOC_SNOC);
+DEFINE_QNODE(qnm_snoc_gc, MASTER_SNOC_GC_MEM_NOC, 1, 8, 1, SLAVE_LLCC);
+DEFINE_QNODE(qnm_snoc_sf, MASTER_SNOC_SF_MEM_NOC, 1, 16, 2, SLAVE_MEM_NOC_GNOC, SLAVE_LLCC);
+DEFINE_QNODE(qxm_gpu, MASTER_GFX3D, 2, 32, 3, SLAVE_MEM_NOC_GNOC, SLAVE_LLCC, SLAVE_MEM_NOC_SNOC);
+DEFINE_QNODE(qhm_mnoc_cfg, MASTER_CNOC_MNOC_CFG, 1, 4, 1, SLAVE_SERVICE_MNOC);
+DEFINE_QNODE(qxm_camnoc_hf0, MASTER_CAMNOC_HF0, 1, 32, 1, SLAVE_MNOC_HF_MEM_NOC);
+DEFINE_QNODE(qxm_camnoc_hf1, MASTER_CAMNOC_HF1, 1, 32, 1, SLAVE_MNOC_HF_MEM_NOC);
+DEFINE_QNODE(qxm_camnoc_sf, MASTER_CAMNOC_SF, 1, 32, 1, SLAVE_MNOC_SF_MEM_NOC);
+DEFINE_QNODE(qxm_mdp0, MASTER_MDP0, 1, 32, 1, SLAVE_MNOC_HF_MEM_NOC);
+DEFINE_QNODE(qxm_mdp1, MASTER_MDP1, 1, 32, 1, SLAVE_MNOC_HF_MEM_NOC);
+DEFINE_QNODE(qxm_rot, MASTER_ROTATOR, 1, 32, 1, SLAVE_MNOC_SF_MEM_NOC);
+DEFINE_QNODE(qxm_venus0, MASTER_VIDEO_P0, 1, 32, 1, SLAVE_MNOC_SF_MEM_NOC);
+DEFINE_QNODE(qxm_venus1, MASTER_VIDEO_P1, 1, 32, 1, SLAVE_MNOC_SF_MEM_NOC);
+DEFINE_QNODE(qxm_venus_arm9, MASTER_VIDEO_PROC, 1, 8, 1, SLAVE_MNOC_SF_MEM_NOC);
+DEFINE_QNODE(qhm_snoc_cfg, MASTER_SNOC_CFG, 1, 4, 1, SLAVE_SERVICE_SNOC);
+DEFINE_QNODE(qnm_aggre1_noc, MASTER_A1NOC_SNOC, 1, 16, 6, SLAVE_APPSS, SLAVE_SNOC_CNOC, SLAVE_SNOC_MEM_NOC_SF, SLAVE_IMEM, SLAVE_PIMEM, SLAVE_QDSS_STM);
+DEFINE_QNODE(qnm_aggre2_noc, MASTER_A2NOC_SNOC, 1, 16, 9, SLAVE_APPSS, SLAVE_SNOC_CNOC, SLAVE_SNOC_MEM_NOC_SF, SLAVE_IMEM, SLAVE_PCIE_0, SLAVE_PCIE_1, SLAVE_PIMEM, SLAVE_QDSS_STM, SLAVE_TCU);
+DEFINE_QNODE(qnm_gladiator_sodv, MASTER_GNOC_SNOC, 1, 8, 8, SLAVE_APPSS, SLAVE_SNOC_CNOC, SLAVE_IMEM, SLAVE_PCIE_0, SLAVE_PCIE_1, SLAVE_PIMEM, SLAVE_QDSS_STM, SLAVE_TCU);
+DEFINE_QNODE(qnm_memnoc, MASTER_MEM_NOC_SNOC, 1, 8, 5, SLAVE_APPSS, SLAVE_SNOC_CNOC, SLAVE_IMEM, SLAVE_PIMEM, SLAVE_QDSS_STM);
+DEFINE_QNODE(qnm_pcie_anoc, MASTER_ANOC_PCIE_SNOC, 1, 16, 5, SLAVE_APPSS, SLAVE_SNOC_CNOC, SLAVE_SNOC_MEM_NOC_SF, SLAVE_IMEM, SLAVE_QDSS_STM);
+DEFINE_QNODE(qxm_pimem, MASTER_PIMEM, 1, 8, 2, SLAVE_SNOC_MEM_NOC_GC, SLAVE_IMEM);
+DEFINE_QNODE(xm_gic, MASTER_GIC, 1, 8, 2, SLAVE_SNOC_MEM_NOC_GC, SLAVE_IMEM);
+DEFINE_QNODE(qns_a1noc_snoc, SLAVE_A1NOC_SNOC, 1, 16, 1, MASTER_A1NOC_SNOC);
+DEFINE_QNODE(srvc_aggre1_noc, SLAVE_SERVICE_A1NOC, 1, 4, 0);
+DEFINE_QNODE(qns_pcie_a1noc_snoc, SLAVE_ANOC_PCIE_A1NOC_SNOC, 1, 16, 1, MASTER_ANOC_PCIE_SNOC);
+DEFINE_QNODE(qns_a2noc_snoc, SLAVE_A2NOC_SNOC, 1, 16, 1, MASTER_A2NOC_SNOC);
+DEFINE_QNODE(qns_pcie_snoc, SLAVE_ANOC_PCIE_SNOC, 1, 16, 1, MASTER_ANOC_PCIE_SNOC);
+DEFINE_QNODE(srvc_aggre2_noc, SLAVE_SERVICE_A2NOC, 1, 4, 0);
+DEFINE_QNODE(qns_camnoc_uncomp, SLAVE_CAMNOC_UNCOMP, 1, 32, 0);
+DEFINE_QNODE(qhs_a1_noc_cfg, SLAVE_A1NOC_CFG, 1, 4, 1, MASTER_A1NOC_CFG);
+DEFINE_QNODE(qhs_a2_noc_cfg, SLAVE_A2NOC_CFG, 1, 4, 1, MASTER_A2NOC_CFG);
+DEFINE_QNODE(qhs_aop, SLAVE_AOP, 1, 4, 0);
+DEFINE_QNODE(qhs_aoss, SLAVE_AOSS, 1, 4, 0);
+DEFINE_QNODE(qhs_camera_cfg, SLAVE_CAMERA_CFG, 1, 4, 0);
+DEFINE_QNODE(qhs_clk_ctl, SLAVE_CLK_CTL, 1, 4, 0);
+DEFINE_QNODE(qhs_compute_dsp_cfg, SLAVE_CDSP_CFG, 1, 4, 0);
+DEFINE_QNODE(qhs_cpr_cx, SLAVE_RBCPR_CX_CFG, 1, 4, 0);
+DEFINE_QNODE(qhs_crypto0_cfg, SLAVE_CRYPTO_0_CFG, 1, 4, 0);
+DEFINE_QNODE(qhs_dcc_cfg, SLAVE_DCC_CFG, 1, 4, 1, MASTER_CNOC_DC_NOC);
+DEFINE_QNODE(qhs_ddrss_cfg, SLAVE_CNOC_DDRSS, 1, 4, 0);
+DEFINE_QNODE(qhs_display_cfg, SLAVE_DISPLAY_CFG, 1, 4, 0);
+DEFINE_QNODE(qhs_glm, SLAVE_GLM, 1, 4, 0);
+DEFINE_QNODE(qhs_gpuss_cfg, SLAVE_GFX3D_CFG, 1, 8, 0);
+DEFINE_QNODE(qhs_imem_cfg, SLAVE_IMEM_CFG, 1, 4, 0);
+DEFINE_QNODE(qhs_ipa, SLAVE_IPA_CFG, 1, 4, 0);
+DEFINE_QNODE(qhs_mnoc_cfg, SLAVE_CNOC_MNOC_CFG, 1, 4, 1, MASTER_CNOC_MNOC_CFG);
+DEFINE_QNODE(qhs_pcie0_cfg, SLAVE_PCIE_0_CFG, 1, 4, 0);
+DEFINE_QNODE(qhs_pcie_gen3_cfg, SLAVE_PCIE_1_CFG, 1, 4, 0);
+DEFINE_QNODE(qhs_pdm, SLAVE_PDM, 1, 4, 0);
+DEFINE_QNODE(qhs_phy_refgen_south, SLAVE_SOUTH_PHY_CFG, 1, 4, 0);
+DEFINE_QNODE(qhs_pimem_cfg, SLAVE_PIMEM_CFG, 1, 4, 0);
+DEFINE_QNODE(qhs_prng, SLAVE_PRNG, 1, 4, 0);
+DEFINE_QNODE(qhs_qdss_cfg, SLAVE_QDSS_CFG, 1, 4, 0);
+DEFINE_QNODE(qhs_qupv3_north, SLAVE_BLSP_2, 1, 4, 0);
+DEFINE_QNODE(qhs_qupv3_south, SLAVE_BLSP_1, 1, 4, 0);
+DEFINE_QNODE(qhs_sdc2, SLAVE_SDCC_2, 1, 4, 0);
+DEFINE_QNODE(qhs_sdc4, SLAVE_SDCC_4, 1, 4, 0);
+DEFINE_QNODE(qhs_snoc_cfg, SLAVE_SNOC_CFG, 1, 4, 1, MASTER_SNOC_CFG);
+DEFINE_QNODE(qhs_spdm, SLAVE_SPDM_WRAPPER, 1, 4, 0);
+DEFINE_QNODE(qhs_spss_cfg, SLAVE_SPSS_CFG, 1, 4, 0);
+DEFINE_QNODE(qhs_tcsr, SLAVE_TCSR, 1, 4, 0);
+DEFINE_QNODE(qhs_tlmm_north, SLAVE_TLMM_NORTH, 1, 4, 0);
+DEFINE_QNODE(qhs_tlmm_south, SLAVE_TLMM_SOUTH, 1, 4, 0);
+DEFINE_QNODE(qhs_tsif, SLAVE_TSIF, 1, 4, 0);
+DEFINE_QNODE(qhs_ufs_card_cfg, SLAVE_UFS_CARD_CFG, 1, 4, 0);
+DEFINE_QNODE(qhs_ufs_mem_cfg, SLAVE_UFS_MEM_CFG, 1, 4, 0);
+DEFINE_QNODE(qhs_usb3_0, SLAVE_USB3_0, 1, 4, 0);
+DEFINE_QNODE(qhs_usb3_1, SLAVE_USB3_1, 1, 4, 0);
+DEFINE_QNODE(qhs_venus_cfg, SLAVE_VENUS_CFG, 1, 4, 0);
+DEFINE_QNODE(qhs_vsense_ctrl_cfg, SLAVE_VSENSE_CTRL_CFG, 1, 4, 0);
+DEFINE_QNODE(qns_cnoc_a2noc, SLAVE_CNOC_A2NOC, 1, 8, 1, MASTER_CNOC_A2NOC);
+DEFINE_QNODE(srvc_cnoc, SLAVE_SERVICE_CNOC, 1, 4, 0);
+DEFINE_QNODE(qhs_llcc, SLAVE_LLCC_CFG, 1, 4, 0);
+DEFINE_QNODE(qhs_memnoc, SLAVE_MEM_NOC_CFG, 1, 4, 1, MASTER_MEM_NOC_CFG);
+DEFINE_QNODE(qns_gladiator_sodv, SLAVE_GNOC_SNOC, 1, 8, 1, MASTER_GNOC_SNOC);
+DEFINE_QNODE(qns_gnoc_memnoc, SLAVE_GNOC_MEM_NOC, 2, 32, 1, MASTER_GNOC_MEM_NOC);
+DEFINE_QNODE(srvc_gnoc, SLAVE_SERVICE_GNOC, 1, 4, 0);
+DEFINE_QNODE(ebi, SLAVE_EBI1, 4, 4, 0);
+DEFINE_QNODE(qhs_mdsp_ms_mpu_cfg, SLAVE_MSS_PROC_MS_MPU_CFG, 1, 4, 0);
+DEFINE_QNODE(qns_apps_io, SLAVE_MEM_NOC_GNOC, 1, 32, 0);
+DEFINE_QNODE(qns_llcc, SLAVE_LLCC, 4, 16, 1, MASTER_LLCC);
+DEFINE_QNODE(qns_memnoc_snoc, SLAVE_MEM_NOC_SNOC, 1, 8, 1, MASTER_MEM_NOC_SNOC);
+DEFINE_QNODE(srvc_memnoc, SLAVE_SERVICE_MEM_NOC, 1, 4, 0);
+DEFINE_QNODE(qns2_mem_noc, SLAVE_MNOC_SF_MEM_NOC, 1, 32, 1, MASTER_MNOC_SF_MEM_NOC);
+DEFINE_QNODE(qns_mem_noc_hf, SLAVE_MNOC_HF_MEM_NOC, 2, 32, 1, MASTER_MNOC_HF_MEM_NOC);
+DEFINE_QNODE(srvc_mnoc, SLAVE_SERVICE_MNOC, 1, 4, 0);
+DEFINE_QNODE(qhs_apss, SLAVE_APPSS, 1, 8, 0);
+DEFINE_QNODE(qns_cnoc, SLAVE_SNOC_CNOC, 1, 8, 1, MASTER_SNOC_CNOC);
+DEFINE_QNODE(qns_memnoc_gc, SLAVE_SNOC_MEM_NOC_GC, 1, 8, 1, MASTER_SNOC_GC_MEM_NOC);
+DEFINE_QNODE(qns_memnoc_sf, SLAVE_SNOC_MEM_NOC_SF, 1, 16, 1, MASTER_SNOC_SF_MEM_NOC);
+DEFINE_QNODE(qxs_imem, SLAVE_IMEM, 1, 8, 0);
+DEFINE_QNODE(qxs_pcie, SLAVE_PCIE_0, 1, 8, 0);
+DEFINE_QNODE(qxs_pcie_gen3, SLAVE_PCIE_1, 1, 8, 0);
+DEFINE_QNODE(qxs_pimem, SLAVE_PIMEM, 1, 8, 0);
+DEFINE_QNODE(srvc_snoc, SLAVE_SERVICE_SNOC, 1, 4, 0);
+DEFINE_QNODE(xs_qdss_stm, SLAVE_QDSS_STM, 1, 4, 0);
+DEFINE_QNODE(xs_sys_tcu_cfg, SLAVE_TCU, 1, 8, 0);
+
+#define DEFINE_QBCM(_name, _bcmname, _keepalive, _numnodes, ...) \
+ static struct qcom_icc_bcm _name = { \
+ .name = _bcmname, \
+ .keepalive = _keepalive, \
+ .num_nodes = _numnodes, \
+ .nodes = { __VA_ARGS__ }, \
+ }
+
+DEFINE_QBCM(bcm_acv, "ACV", false, 1, &ebi);
+DEFINE_QBCM(bcm_mc0, "MC0", true, 1, &ebi);
+DEFINE_QBCM(bcm_sh0, "SH0", true, 1, &qns_llcc);
+DEFINE_QBCM(bcm_mm0, "MM0", false, 1, &qns_mem_noc_hf);
+DEFINE_QBCM(bcm_sh1, "SH1", false, 1, &qns_apps_io);
+DEFINE_QBCM(bcm_mm1, "MM1", false, 7, &qxm_camnoc_hf0_uncomp, &qxm_camnoc_hf1_uncomp, &qxm_camnoc_sf_uncomp, &qxm_camnoc_hf0, &qxm_camnoc_hf1, &qxm_mdp0, &qxm_mdp1);
+DEFINE_QBCM(bcm_sh2, "SH2", false, 1, &qns_memnoc_snoc);
+DEFINE_QBCM(bcm_mm2, "MM2", false, 1, &qns2_mem_noc);
+DEFINE_QBCM(bcm_sh3, "SH3", false, 1, &acm_tcu);
+DEFINE_QBCM(bcm_mm3, "MM3", false, 5, &qxm_camnoc_sf, &qxm_rot, &qxm_venus0, &qxm_venus1, &qxm_venus_arm9);
+DEFINE_QBCM(bcm_sh5, "SH5", false, 1, &qnm_apps);
+DEFINE_QBCM(bcm_sn0, "SN0", true, 1, &qns_memnoc_sf);
+DEFINE_QBCM(bcm_ce0, "CE0", false, 1, &qxm_crypto);
+DEFINE_QBCM(bcm_cn0, "CN0", false, 47, &qhm_spdm, &qhm_tic, &qnm_snoc, &xm_qdss_dap, &qhs_a1_noc_cfg, &qhs_a2_noc_cfg, &qhs_aop, &qhs_aoss, &qhs_camera_cfg, &qhs_clk_ctl, &qhs_compute_dsp_cfg, &qhs_cpr_cx, &qhs_crypto0_cfg, &qhs_dcc_cfg, &qhs_ddrss_cfg, &qhs_display_cfg, &qhs_glm, &qhs_gpuss_cfg, &qhs_imem_cfg, &qhs_ipa, &qhs_mnoc_cfg, &qhs_pcie0_cfg, &qhs_pcie_gen3_cfg, &qhs_pdm, &qhs_phy_refgen_south, &qhs_pimem_cfg, &qhs_prng, &qhs_qdss_cfg, &qhs_qupv3_north, &qhs_qupv3_south, &qhs_sdc2, &qhs_sdc4, &qhs_snoc_cfg, &qhs_spdm, &qhs_spss_cfg, &qhs_tcsr, &qhs_tlmm_north, &qhs_tlmm_south, &qhs_tsif, &qhs_ufs_card_cfg, &qhs_ufs_mem_cfg, &qhs_usb3_0, &qhs_usb3_1, &qhs_venus_cfg, &qhs_vsense_ctrl_cfg, &qns_cnoc_a2noc, &srvc_cnoc);
+DEFINE_QBCM(bcm_qup0, "QUP0", false, 2, &qhm_qup1, &qhm_qup2);
+DEFINE_QBCM(bcm_sn1, "SN1", false, 1, &qxs_imem);
+DEFINE_QBCM(bcm_sn2, "SN2", false, 1, &qns_memnoc_gc);
+DEFINE_QBCM(bcm_sn3, "SN3", false, 1, &qns_cnoc);
+DEFINE_QBCM(bcm_sn4, "SN4", false, 1, &qxm_pimem);
+DEFINE_QBCM(bcm_sn5, "SN5", false, 1, &xs_qdss_stm);
+DEFINE_QBCM(bcm_sn6, "SN6", false, 3, &qhs_apss, &srvc_snoc, &xs_sys_tcu_cfg);
+DEFINE_QBCM(bcm_sn7, "SN7", false, 1, &qxs_pcie);
+DEFINE_QBCM(bcm_sn8, "SN8", false, 1, &qxs_pcie_gen3);
+DEFINE_QBCM(bcm_sn9, "SN9", false, 2, &srvc_aggre1_noc, &qnm_aggre1_noc);
+DEFINE_QBCM(bcm_sn11, "SN11", false, 2, &srvc_aggre2_noc, &qnm_aggre2_noc);
+DEFINE_QBCM(bcm_sn12, "SN12", false, 2, &qnm_gladiator_sodv, &xm_gic);
+DEFINE_QBCM(bcm_sn14, "SN14", false, 1, &qnm_pcie_anoc);
+DEFINE_QBCM(bcm_sn15, "SN15", false, 1, &qnm_memnoc);
+
+static struct qcom_icc_node *rsc_hlos_nodes[] = {
+ [MASTER_APPSS_PROC] = &acm_l3,
+ [MASTER_TCU_0] = &acm_tcu,
+ [MASTER_LLCC] = &llcc_mc,
+ [MASTER_GNOC_CFG] = &pm_gnoc_cfg,
+ [MASTER_A1NOC_CFG] = &qhm_a1noc_cfg,
+ [MASTER_A2NOC_CFG] = &qhm_a2noc_cfg,
+ [MASTER_CNOC_DC_NOC] = &qhm_cnoc,
+ [MASTER_MEM_NOC_CFG] = &qhm_memnoc_cfg,
+ [MASTER_CNOC_MNOC_CFG] = &qhm_mnoc_cfg,
+ [MASTER_QDSS_BAM] = &qhm_qdss_bam,
+ [MASTER_BLSP_1] = &qhm_qup1,
+ [MASTER_BLSP_2] = &qhm_qup2,
+ [MASTER_SNOC_CFG] = &qhm_snoc_cfg,
+ [MASTER_SPDM] = &qhm_spdm,
+ [MASTER_TIC] = &qhm_tic,
+ [MASTER_TSIF] = &qhm_tsif,
+ [MASTER_A1NOC_SNOC] = &qnm_aggre1_noc,
+ [MASTER_A2NOC_SNOC] = &qnm_aggre2_noc,
+ [MASTER_GNOC_MEM_NOC] = &qnm_apps,
+ [MASTER_CNOC_A2NOC] = &qnm_cnoc,
+ [MASTER_GNOC_SNOC] = &qnm_gladiator_sodv,
+ [MASTER_MEM_NOC_SNOC] = &qnm_memnoc,
+ [MASTER_MNOC_HF_MEM_NOC] = &qnm_mnoc_hf,
+ [MASTER_MNOC_SF_MEM_NOC] = &qnm_mnoc_sf,
+ [MASTER_ANOC_PCIE_SNOC] = &qnm_pcie_anoc,
+ [MASTER_SNOC_CNOC] = &qnm_snoc,
+ [MASTER_SNOC_GC_MEM_NOC] = &qnm_snoc_gc,
+ [MASTER_SNOC_SF_MEM_NOC] = &qnm_snoc_sf,
+ [MASTER_CAMNOC_HF0] = &qxm_camnoc_hf0,
+ [MASTER_CAMNOC_HF0_UNCOMP] = &qxm_camnoc_hf0_uncomp,
+ [MASTER_CAMNOC_HF1] = &qxm_camnoc_hf1,
+ [MASTER_CAMNOC_HF1_UNCOMP] = &qxm_camnoc_hf1_uncomp,
+ [MASTER_CAMNOC_SF] = &qxm_camnoc_sf,
+ [MASTER_CAMNOC_SF_UNCOMP] = &qxm_camnoc_sf_uncomp,
+ [MASTER_CRYPTO] = &qxm_crypto,
+ [MASTER_GFX3D] = &qxm_gpu,
+ [MASTER_IPA] = &qxm_ipa,
+ [MASTER_MDP0] = &qxm_mdp0,
+ [MASTER_MDP1] = &qxm_mdp1,
+ [MASTER_PIMEM] = &qxm_pimem,
+ [MASTER_ROTATOR] = &qxm_rot,
+ [MASTER_VIDEO_P0] = &qxm_venus0,
+ [MASTER_VIDEO_P1] = &qxm_venus1,
+ [MASTER_VIDEO_PROC] = &qxm_venus_arm9,
+ [MASTER_GIC] = &xm_gic,
+ [MASTER_PCIE_1] = &xm_pcie3_1,
+ [MASTER_PCIE_0] = &xm_pcie_0,
+ [MASTER_QDSS_DAP] = &xm_qdss_dap,
+ [MASTER_QDSS_ETR] = &xm_qdss_etr,
+ [MASTER_SDCC_2] = &xm_sdc2,
+ [MASTER_SDCC_4] = &xm_sdc4,
+ [MASTER_UFS_CARD] = &xm_ufs_card,
+ [MASTER_UFS_MEM] = &xm_ufs_mem,
+ [MASTER_USB3_0] = &xm_usb3_0,
+ [MASTER_USB3_1] = &xm_usb3_1,
+ [SLAVE_EBI1] = &ebi,
+ [SLAVE_A1NOC_CFG] = &qhs_a1_noc_cfg,
+ [SLAVE_A2NOC_CFG] = &qhs_a2_noc_cfg,
+ [SLAVE_AOP] = &qhs_aop,
+ [SLAVE_AOSS] = &qhs_aoss,
+ [SLAVE_APPSS] = &qhs_apss,
+ [SLAVE_CAMERA_CFG] = &qhs_camera_cfg,
+ [SLAVE_CLK_CTL] = &qhs_clk_ctl,
+ [SLAVE_CDSP_CFG] = &qhs_compute_dsp_cfg,
+ [SLAVE_RBCPR_CX_CFG] = &qhs_cpr_cx,
+ [SLAVE_CRYPTO_0_CFG] = &qhs_crypto0_cfg,
+ [SLAVE_DCC_CFG] = &qhs_dcc_cfg,
+ [SLAVE_CNOC_DDRSS] = &qhs_ddrss_cfg,
+ [SLAVE_DISPLAY_CFG] = &qhs_display_cfg,
+ [SLAVE_GLM] = &qhs_glm,
+ [SLAVE_GFX3D_CFG] = &qhs_gpuss_cfg,
+ [SLAVE_IMEM_CFG] = &qhs_imem_cfg,
+ [SLAVE_IPA_CFG] = &qhs_ipa,
+ [SLAVE_LLCC_CFG] = &qhs_llcc,
+ [SLAVE_MSS_PROC_MS_MPU_CFG] = &qhs_mdsp_ms_mpu_cfg,
+ [SLAVE_MEM_NOC_CFG] = &qhs_memnoc,
+ [SLAVE_CNOC_MNOC_CFG] = &qhs_mnoc_cfg,
+ [SLAVE_PCIE_0_CFG] = &qhs_pcie0_cfg,
+ [SLAVE_PCIE_1_CFG] = &qhs_pcie_gen3_cfg,
+ [SLAVE_PDM] = &qhs_pdm,
+ [SLAVE_SOUTH_PHY_CFG] = &qhs_phy_refgen_south,
+ [SLAVE_PIMEM_CFG] = &qhs_pimem_cfg,
+ [SLAVE_PRNG] = &qhs_prng,
+ [SLAVE_QDSS_CFG] = &qhs_qdss_cfg,
+ [SLAVE_BLSP_2] = &qhs_qupv3_north,
+ [SLAVE_BLSP_1] = &qhs_qupv3_south,
+ [SLAVE_SDCC_2] = &qhs_sdc2,
+ [SLAVE_SDCC_4] = &qhs_sdc4,
+ [SLAVE_SNOC_CFG] = &qhs_snoc_cfg,
+ [SLAVE_SPDM_WRAPPER] = &qhs_spdm,
+ [SLAVE_SPSS_CFG] = &qhs_spss_cfg,
+ [SLAVE_TCSR] = &qhs_tcsr,
+ [SLAVE_TLMM_NORTH] = &qhs_tlmm_north,
+ [SLAVE_TLMM_SOUTH] = &qhs_tlmm_south,
+ [SLAVE_TSIF] = &qhs_tsif,
+ [SLAVE_UFS_CARD_CFG] = &qhs_ufs_card_cfg,
+ [SLAVE_UFS_MEM_CFG] = &qhs_ufs_mem_cfg,
+ [SLAVE_USB3_0] = &qhs_usb3_0,
+ [SLAVE_USB3_1] = &qhs_usb3_1,
+ [SLAVE_VENUS_CFG] = &qhs_venus_cfg,
+ [SLAVE_VSENSE_CTRL_CFG] = &qhs_vsense_ctrl_cfg,
+ [SLAVE_MNOC_SF_MEM_NOC] = &qns2_mem_noc,
+ [SLAVE_A1NOC_SNOC] = &qns_a1noc_snoc,
+ [SLAVE_A2NOC_SNOC] = &qns_a2noc_snoc,
+ [SLAVE_MEM_NOC_GNOC] = &qns_apps_io,
+ [SLAVE_CAMNOC_UNCOMP] = &qns_camnoc_uncomp,
+ [SLAVE_SNOC_CNOC] = &qns_cnoc,
+ [SLAVE_CNOC_A2NOC] = &qns_cnoc_a2noc,
+ [SLAVE_GNOC_SNOC] = &qns_gladiator_sodv,
+ [SLAVE_GNOC_MEM_NOC] = &qns_gnoc_memnoc,
+ [SLAVE_LLCC] = &qns_llcc,
+ [SLAVE_MNOC_HF_MEM_NOC] = &qns_mem_noc_hf,
+ [SLAVE_SNOC_MEM_NOC_GC] = &qns_memnoc_gc,
+ [SLAVE_SNOC_MEM_NOC_SF] = &qns_memnoc_sf,
+ [SLAVE_MEM_NOC_SNOC] = &qns_memnoc_snoc,
+ [SLAVE_ANOC_PCIE_A1NOC_SNOC] = &qns_pcie_a1noc_snoc,
+ [SLAVE_ANOC_PCIE_SNOC] = &qns_pcie_snoc,
+ [SLAVE_IMEM] = &qxs_imem,
+ [SLAVE_PCIE_0] = &qxs_pcie,
+ [SLAVE_PCIE_1] = &qxs_pcie_gen3,
+ [SLAVE_PIMEM] = &qxs_pimem,
+ [SLAVE_SERVICE_A1NOC] = &srvc_aggre1_noc,
+ [SLAVE_SERVICE_A2NOC] = &srvc_aggre2_noc,
+ [SLAVE_SERVICE_CNOC] = &srvc_cnoc,
+ [SLAVE_SERVICE_GNOC] = &srvc_gnoc,
+ [SLAVE_SERVICE_MEM_NOC] = &srvc_memnoc,
+ [SLAVE_SERVICE_MNOC] = &srvc_mnoc,
+ [SLAVE_SERVICE_SNOC] = &srvc_snoc,
+ [SLAVE_QDSS_STM] = &xs_qdss_stm,
+ [SLAVE_TCU] = &xs_sys_tcu_cfg,
+};
+
+static struct qcom_icc_bcm *rsc_hlos_bcms[] = {
+ &bcm_acv,
+ &bcm_mc0,
+ &bcm_sh0,
+ &bcm_mm0,
+ &bcm_sh1,
+ &bcm_mm1,
+ &bcm_sh2,
+ &bcm_mm2,
+ &bcm_sh3,
+ &bcm_mm3,
+ &bcm_sh5,
+ &bcm_sn0,
+ &bcm_ce0,
+ &bcm_cn0,
+ &bcm_qup0,
+ &bcm_sn1,
+ &bcm_sn2,
+ &bcm_sn3,
+ &bcm_sn4,
+ &bcm_sn5,
+ &bcm_sn6,
+ &bcm_sn7,
+ &bcm_sn8,
+ &bcm_sn9,
+ &bcm_sn11,
+ &bcm_sn12,
+ &bcm_sn14,
+ &bcm_sn15,
+};
+
+static struct qcom_icc_desc sdm845_rsc_hlos = {
+ .nodes = rsc_hlos_nodes,
+ .num_nodes = ARRAY_SIZE(rsc_hlos_nodes),
+ .bcms = rsc_hlos_bcms,
+ .num_bcms = ARRAY_SIZE(rsc_hlos_bcms),
+};
+
+static int qcom_icc_bcm_init(struct qcom_icc_bcm *bcm, struct device *dev)
+{
+ struct qcom_icc_node *qn;
+ const struct bcm_db *data;
+ size_t data_count;
+ int i;
+
+ bcm->addr = cmd_db_read_addr(bcm->name);
+ if (!bcm->addr) {
+ dev_err(dev, "%s could not find RPMh address\n",
+ bcm->name);
+ return -EINVAL;
+ }
+
+ data = cmd_db_read_aux_data(bcm->name, &data_count);
+ if (IS_ERR(data)) {
+ dev_err(dev, "%s command db read error (%ld)\n",
+ bcm->name, PTR_ERR(data));
+ return PTR_ERR(data);
+ }
+ if (!data_count) {
+ dev_err(dev, "%s command db missing or partial aux data\n",
+ bcm->name);
+ return -EINVAL;
+ }
+
+ bcm->aux_data.unit = le32_to_cpu(data->unit);
+ bcm->aux_data.width = le16_to_cpu(data->width);
+ bcm->aux_data.vcd = data->vcd;
+ bcm->aux_data.reserved = data->reserved;
+
+ /*
+ * Link Qnodes to their respective BCMs
+ */
+ for (i = 0; i < bcm->num_nodes; i++) {
+ qn = bcm->nodes[i];
+ qn->bcms[qn->num_bcms] = bcm;
+ qn->num_bcms++;
+ }
+
+ return 0;
+}
+
+inline void tcs_cmd_gen(struct tcs_cmd *cmd, u64 vote_x, u64 vote_y,
+ u32 addr, bool commit)
+{
+ bool valid = true;
+
+ if (!cmd)
+ return;
+
+ if (vote_x == 0 && vote_y == 0)
+ valid = false;
+
+ if (vote_x > BCM_TCS_CMD_VOTE_MASK)
+ vote_x = BCM_TCS_CMD_VOTE_MASK;
+
+ if (vote_y > BCM_TCS_CMD_VOTE_MASK)
+ vote_y = BCM_TCS_CMD_VOTE_MASK;
+
+ cmd->addr = addr;
+ cmd->data = BCM_TCS_CMD(commit, valid, vote_x, vote_y);
+
+ /*
+ * Set the wait for completion flag on command that need to be completed
+ * before the next command.
+ */
+ if (commit)
+ cmd->wait = true;
+}
+
+static void tcs_list_gen(struct list_head *bcm_list,
+ struct tcs_cmd tcs_list[SDM845_MAX_VCD],
+ int n[SDM845_MAX_VCD])
+{
+ struct qcom_icc_bcm *bcm;
+ bool commit;
+ size_t idx = 0, batch = 0, cur_vcd_size = 0;
+
+ memset(n, 0, sizeof(int) * SDM845_MAX_VCD);
+
+ list_for_each_entry(bcm, bcm_list, list) {
+ commit = false;
+ cur_vcd_size++;
+ if ((list_is_last(&bcm->list, bcm_list)) ||
+ bcm->aux_data.vcd != list_next_entry(bcm, list)->aux_data.vcd) {
+ commit = true;
+ cur_vcd_size = 0;
+ }
+ tcs_cmd_gen(&tcs_list[idx], bcm->vote_x, bcm->vote_y,
+ bcm->addr, commit);
+ idx++;
+ n[batch]++;
+ /*
+ * Batch the BCMs in such a way that we do not split them in
+ * multiple payloads when they are under the same VCD. This is
+ * to ensure that every BCM is committed since we only set the
+ * commit bit on the last BCM request of every VCD.
+ */
+ if (n[batch] >= MAX_RPMH_PAYLOAD) {
+ if (!commit) {
+ n[batch] -= cur_vcd_size;
+ n[batch + 1] = cur_vcd_size;
+ }
+ batch++;
+ }
+ }
+}
+
+static void bcm_aggregate(struct qcom_icc_bcm *bcm)
+{
+ size_t i;
+ u64 agg_avg = 0;
+ u64 agg_peak = 0;
+ u64 temp;
+
+ for (i = 0; i < bcm->num_nodes; i++) {
+ temp = bcm->nodes[i]->sum_avg * bcm->aux_data.width;
+ do_div(temp, bcm->nodes[i]->buswidth * bcm->nodes[i]->channels);
+ agg_avg = max(agg_avg, temp);
+
+ temp = bcm->nodes[i]->max_peak * bcm->aux_data.width;
+ do_div(temp, bcm->nodes[i]->buswidth);
+ agg_peak = max(agg_peak, temp);
+ }
+
+ temp = agg_avg * 1000ULL;
+ do_div(temp, bcm->aux_data.unit);
+ bcm->vote_x = temp;
+
+ temp = agg_peak * 1000ULL;
+ do_div(temp, bcm->aux_data.unit);
+ bcm->vote_y = temp;
+
+ if (bcm->keepalive && bcm->vote_x == 0 && bcm->vote_y == 0) {
+ bcm->vote_x = 1;
+ bcm->vote_y = 1;
+ }
+
+ bcm->dirty = false;
+}
+
+static int qcom_icc_aggregate(struct icc_node *node, u32 avg_bw,
+ u32 peak_bw, u32 *agg_avg, u32 *agg_peak)
+{
+ size_t i;
+ struct qcom_icc_node *qn;
+
+ qn = node->data;
+
+ *agg_avg += avg_bw;
+ *agg_peak = max_t(u32, *agg_peak, peak_bw);
+
+ qn->sum_avg = *agg_avg;
+ qn->max_peak = *agg_peak;
+
+ for (i = 0; i < qn->num_bcms; i++)
+ qn->bcms[i]->dirty = true;
+
+ return 0;
+}
+
+static int qcom_icc_set(struct icc_node *src, struct icc_node *dst)
+{
+ struct qcom_icc_provider *qp;
+ struct icc_node *node;
+ struct tcs_cmd cmds[SDM845_MAX_BCMS];
+ struct list_head commit_list;
+ int commit_idx[SDM845_MAX_VCD];
+ int ret = 0, i;
+
+ if (!src)
+ node = dst;
+ else
+ node = src;
+
+ qp = to_qcom_provider(node->provider);
+
+ INIT_LIST_HEAD(&commit_list);
+
+ for (i = 0; i < qp->num_bcms; i++) {
+ if (qp->bcms[i]->dirty) {
+ bcm_aggregate(qp->bcms[i]);
+ list_add_tail(&qp->bcms[i]->list, &commit_list);
+ }
+ }
+
+ /*
+ * Construct the command list based on a pre ordered list of BCMs
+ * based on VCD.
+ */
+ tcs_list_gen(&commit_list, cmds, commit_idx);
+
+ if (!commit_idx[0])
+ return ret;
+
+ ret = rpmh_invalidate(qp->dev);
+ if (ret) {
+ pr_err("Error invalidating RPMH client (%d)\n", ret);
+ return ret;
+ }
+
+ ret = rpmh_write_batch(qp->dev, RPMH_ACTIVE_ONLY_STATE,
+ cmds, commit_idx);
+ if (ret) {
+ pr_err("Error sending AMC RPMH requests (%d)\n", ret);
+ return ret;
+ }
+
+ return ret;
+}
+
+static int cmp_vcd(const void *_l, const void *_r)
+{
+ const struct qcom_icc_bcm **l = (const struct qcom_icc_bcm **)_l;
+ const struct qcom_icc_bcm **r = (const struct qcom_icc_bcm **)_r;
+
+ if (l[0]->aux_data.vcd < r[0]->aux_data.vcd)
+ return -1;
+ else if (l[0]->aux_data.vcd == r[0]->aux_data.vcd)
+ return 0;
+ else
+ return 1;
+}
+
+static int qnoc_probe(struct platform_device *pdev)
+{
+ const struct qcom_icc_desc *desc;
+ struct icc_onecell_data *data;
+ struct icc_provider *provider;
+ struct qcom_icc_node **qnodes;
+ struct qcom_icc_provider *qp;
+ struct icc_node *node;
+ size_t num_nodes, i;
+ int ret;
+
+ desc = of_device_get_match_data(&pdev->dev);
+ if (!desc)
+ return -EINVAL;
+
+ qnodes = desc->nodes;
+ num_nodes = desc->num_nodes;
+
+ qp = devm_kzalloc(&pdev->dev, sizeof(*qp), GFP_KERNEL);
+ if (!qp)
+ return -ENOMEM;
+
+ data = devm_kcalloc(&pdev->dev, num_nodes, sizeof(*node), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ provider = &qp->provider;
+ provider->dev = &pdev->dev;
+ provider->set = qcom_icc_set;
+ provider->aggregate = qcom_icc_aggregate;
+ provider->xlate = of_icc_xlate_onecell;
+ INIT_LIST_HEAD(&provider->nodes);
+ provider->data = data;
+
+ qp->dev = &pdev->dev;
+ qp->bcms = desc->bcms;
+ qp->num_bcms = desc->num_bcms;
+
+ ret = icc_provider_add(provider);
+ if (ret) {
+ dev_err(&pdev->dev, "error adding interconnect provider\n");
+ return ret;
+ }
+
+ for (i = 0; i < num_nodes; i++) {
+ size_t j;
+
+ node = icc_node_create(qnodes[i]->id);
+ if (IS_ERR(node)) {
+ ret = PTR_ERR(node);
+ goto err;
+ }
+
+ node->name = qnodes[i]->name;
+ node->data = qnodes[i];
+ icc_node_add(node, provider);
+
+ dev_dbg(&pdev->dev, "registered node %p %s %d\n", node,
+ qnodes[i]->name, node->id);
+
+ /* populate links */
+ for (j = 0; j < qnodes[i]->num_links; j++)
+ icc_link_create(node, qnodes[i]->links[j]);
+
+ data->nodes[i] = node;
+ }
+ data->num_nodes = num_nodes;
+
+ for (i = 0; i < qp->num_bcms; i++)
+ qcom_icc_bcm_init(qp->bcms[i], &pdev->dev);
+
+ /*
+ * Pre sort the BCMs based on VCD for ease of generating a command list
+ * that groups the BCMs with the same VCD together. VCDs are numbered
+ * with lowest being the most expensive time wise, ensuring that
+ * those commands are being sent the earliest in the queue.
+ */
+ sort(qp->bcms, qp->num_bcms, sizeof(*qp->bcms), cmp_vcd, NULL);
+
+ platform_set_drvdata(pdev, qp);
+
+ dev_dbg(&pdev->dev, "Registered SDM845 ICC\n");
+
+ return ret;
+err:
+ list_for_each_entry(node, &provider->nodes, node_list) {
+ icc_node_del(node);
+ icc_node_destroy(node->id);
+ }
+
+ icc_provider_del(provider);
+ return ret;
+}
+
+static int qnoc_remove(struct platform_device *pdev)
+{
+ struct qcom_icc_provider *qp = platform_get_drvdata(pdev);
+ struct icc_provider *provider = &qp->provider;
+ struct icc_node *n;
+
+ list_for_each_entry(n, &provider->nodes, node_list) {
+ icc_node_del(n);
+ icc_node_destroy(n->id);
+ }
+
+ return icc_provider_del(provider);
+}
+
+static const struct of_device_id qnoc_of_match[] = {
+ { .compatible = "qcom,sdm845-rsc-hlos", .data = &sdm845_rsc_hlos },
+ { },
+};
+MODULE_DEVICE_TABLE(of, qnoc_of_match);
+
+static struct platform_driver qnoc_driver = {
+ .probe = qnoc_probe,
+ .remove = qnoc_remove,
+ .driver = {
+ .name = "qnoc-sdm845",
+ .of_match_table = qnoc_of_match,
+ },
+};
+module_platform_driver(qnoc_driver);
+
+MODULE_AUTHOR("David Dai <daidavid1@codeaurora.org>");
+MODULE_DESCRIPTION("Qualcomm sdm845 NoC driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/macintosh/via-cuda.c b/drivers/macintosh/via-cuda.c
index bbec6ac0a966..3581abfb0c6a 100644
--- a/drivers/macintosh/via-cuda.c
+++ b/drivers/macintosh/via-cuda.c
@@ -569,6 +569,7 @@ cuda_interrupt(int irq, void *arg)
unsigned char ibuf[16];
int ibuf_len = 0;
int complete = 0;
+ bool full;
spin_lock_irqsave(&cuda_lock, flags);
@@ -656,12 +657,13 @@ idle_state:
break;
case reading:
- if (reading_reply ? ARRAY_FULL(current_req->reply, reply_ptr)
- : ARRAY_FULL(cuda_rbuf, reply_ptr))
+ full = reading_reply ? ARRAY_FULL(current_req->reply, reply_ptr)
+ : ARRAY_FULL(cuda_rbuf, reply_ptr);
+ if (full)
(void)in_8(&via[SR]);
else
*reply_ptr++ = in_8(&via[SR]);
- if (!TREQ_asserted(status)) {
+ if (!TREQ_asserted(status) || full) {
if (mcu_is_egret)
assert_TACK();
/* that's all folks */
diff --git a/drivers/misc/ad525x_dpot.c b/drivers/misc/ad525x_dpot.c
index a0afadefcc49..1f6d008e0036 100644
--- a/drivers/misc/ad525x_dpot.c
+++ b/drivers/misc/ad525x_dpot.c
@@ -202,22 +202,20 @@ static s32 dpot_read_i2c(struct dpot_data *dpot, u8 reg)
return dpot_read_r8d8(dpot, ctrl);
case DPOT_UID(AD5272_ID):
case DPOT_UID(AD5274_ID):
- dpot_write_r8d8(dpot,
+ dpot_write_r8d8(dpot,
(DPOT_AD5270_1_2_4_READ_RDAC << 2), 0);
- value = dpot_read_r8d16(dpot,
- DPOT_AD5270_1_2_4_RDAC << 2);
-
- if (value < 0)
- return value;
- /*
- * AD5272/AD5274 returns high byte first, however
- * underling smbus expects low byte first.
- */
- value = swab16(value);
+ value = dpot_read_r8d16(dpot, DPOT_AD5270_1_2_4_RDAC << 2);
+ if (value < 0)
+ return value;
+ /*
+ * AD5272/AD5274 returns high byte first, however
+ * underling smbus expects low byte first.
+ */
+ value = swab16(value);
- if (dpot->uid == DPOT_UID(AD5274_ID))
- value = value >> 2;
+ if (dpot->uid == DPOT_UID(AD5274_ID))
+ value = value >> 2;
return value;
default:
if ((reg & DPOT_REG_TOL) || (dpot->max_pos > 256))
diff --git a/drivers/misc/enclosure.c b/drivers/misc/enclosure.c
index 5a17bfeb80d3..74d4fda6c4a7 100644
--- a/drivers/misc/enclosure.c
+++ b/drivers/misc/enclosure.c
@@ -125,9 +125,7 @@ enclosure_register(struct device *dev, const char *name, int components,
struct enclosure_component_callbacks *cb)
{
struct enclosure_device *edev =
- kzalloc(sizeof(struct enclosure_device) +
- sizeof(struct enclosure_component)*components,
- GFP_KERNEL);
+ kzalloc(struct_size(edev, component, components), GFP_KERNEL);
int err, i;
BUG_ON(!cb);
diff --git a/drivers/misc/ics932s401.c b/drivers/misc/ics932s401.c
index 81a0541ef3ac..294fb2f66bfe 100644
--- a/drivers/misc/ics932s401.c
+++ b/drivers/misc/ics932s401.c
@@ -146,6 +146,8 @@ static struct ics932s401_data *ics932s401_update_device(struct device *dev)
*/
for (i = 0; i < NUM_MIRRORED_REGS; i++) {
temp = i2c_smbus_read_word_data(client, regs_to_copy[i]);
+ if (temp < 0)
+ data->regs[regs_to_copy[i]] = 0;
data->regs[regs_to_copy[i]] = temp >> 8;
}
diff --git a/drivers/misc/lkdtm/core.c b/drivers/misc/lkdtm/core.c
index 2837dc77478e..b51cf182b031 100644
--- a/drivers/misc/lkdtm/core.c
+++ b/drivers/misc/lkdtm/core.c
@@ -37,16 +37,9 @@
#include <linux/kprobes.h>
#include <linux/list.h>
#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/hrtimer.h>
#include <linux/slab.h>
-#include <scsi/scsi_cmnd.h>
#include <linux/debugfs.h>
-#ifdef CONFIG_IDE
-#include <linux/ide.h>
-#endif
-
#define DEFAULT_COUNT 10
static int lkdtm_debugfs_open(struct inode *inode, struct file *file);
@@ -102,9 +95,7 @@ static struct crashpoint crashpoints[] = {
CRASHPOINT("MEM_SWAPOUT", "shrink_inactive_list"),
CRASHPOINT("TIMERADD", "hrtimer_start"),
CRASHPOINT("SCSI_DISPATCH_CMD", "scsi_dispatch_cmd"),
-# ifdef CONFIG_IDE
CRASHPOINT("IDE_CORE_CP", "generic_ide_ioctl"),
-# endif
#endif
};
@@ -152,7 +143,9 @@ static const struct crashtype crashtypes[] = {
CRASHTYPE(EXEC_VMALLOC),
CRASHTYPE(EXEC_RODATA),
CRASHTYPE(EXEC_USERSPACE),
+ CRASHTYPE(EXEC_NULL),
CRASHTYPE(ACCESS_USERSPACE),
+ CRASHTYPE(ACCESS_NULL),
CRASHTYPE(WRITE_RO),
CRASHTYPE(WRITE_RO_AFTER_INIT),
CRASHTYPE(WRITE_KERN),
@@ -347,9 +340,9 @@ static ssize_t lkdtm_debugfs_read(struct file *f, char __user *user_buf,
if (buf == NULL)
return -ENOMEM;
- n = snprintf(buf, PAGE_SIZE, "Available crash types:\n");
+ n = scnprintf(buf, PAGE_SIZE, "Available crash types:\n");
for (i = 0; i < ARRAY_SIZE(crashtypes); i++) {
- n += snprintf(buf + n, PAGE_SIZE - n, "%s\n",
+ n += scnprintf(buf + n, PAGE_SIZE - n, "%s\n",
crashtypes[i].name);
}
buf[n] = '\0';
diff --git a/drivers/misc/lkdtm/lkdtm.h b/drivers/misc/lkdtm/lkdtm.h
index 3c6fd327e166..b69ee004a3f7 100644
--- a/drivers/misc/lkdtm/lkdtm.h
+++ b/drivers/misc/lkdtm/lkdtm.h
@@ -45,7 +45,9 @@ void lkdtm_EXEC_KMALLOC(void);
void lkdtm_EXEC_VMALLOC(void);
void lkdtm_EXEC_RODATA(void);
void lkdtm_EXEC_USERSPACE(void);
+void lkdtm_EXEC_NULL(void);
void lkdtm_ACCESS_USERSPACE(void);
+void lkdtm_ACCESS_NULL(void);
/* lkdtm_refcount.c */
void lkdtm_REFCOUNT_INC_OVERFLOW(void);
diff --git a/drivers/misc/lkdtm/perms.c b/drivers/misc/lkdtm/perms.c
index 53b85c9d16b8..62f76d506f04 100644
--- a/drivers/misc/lkdtm/perms.c
+++ b/drivers/misc/lkdtm/perms.c
@@ -47,7 +47,7 @@ static noinline void execute_location(void *dst, bool write)
{
void (*func)(void) = dst;
- pr_info("attempting ok execution at %p\n", do_nothing);
+ pr_info("attempting ok execution at %px\n", do_nothing);
do_nothing();
if (write == CODE_WRITE) {
@@ -55,7 +55,7 @@ static noinline void execute_location(void *dst, bool write)
flush_icache_range((unsigned long)dst,
(unsigned long)dst + EXEC_SIZE);
}
- pr_info("attempting bad execution at %p\n", func);
+ pr_info("attempting bad execution at %px\n", func);
func();
}
@@ -66,14 +66,14 @@ static void execute_user_location(void *dst)
/* Intentionally crossing kernel/user memory boundary. */
void (*func)(void) = dst;
- pr_info("attempting ok execution at %p\n", do_nothing);
+ pr_info("attempting ok execution at %px\n", do_nothing);
do_nothing();
copied = access_process_vm(current, (unsigned long)dst, do_nothing,
EXEC_SIZE, FOLL_WRITE);
if (copied < EXEC_SIZE)
return;
- pr_info("attempting bad execution at %p\n", func);
+ pr_info("attempting bad execution at %px\n", func);
func();
}
@@ -82,7 +82,7 @@ void lkdtm_WRITE_RO(void)
/* Explicitly cast away "const" for the test. */
unsigned long *ptr = (unsigned long *)&rodata;
- pr_info("attempting bad rodata write at %p\n", ptr);
+ pr_info("attempting bad rodata write at %px\n", ptr);
*ptr ^= 0xabcd1234;
}
@@ -100,7 +100,7 @@ void lkdtm_WRITE_RO_AFTER_INIT(void)
return;
}
- pr_info("attempting bad ro_after_init write at %p\n", ptr);
+ pr_info("attempting bad ro_after_init write at %px\n", ptr);
*ptr ^= 0xabcd1234;
}
@@ -112,7 +112,7 @@ void lkdtm_WRITE_KERN(void)
size = (unsigned long)do_overwritten - (unsigned long)do_nothing;
ptr = (unsigned char *)do_overwritten;
- pr_info("attempting bad %zu byte write at %p\n", size, ptr);
+ pr_info("attempting bad %zu byte write at %px\n", size, ptr);
memcpy(ptr, (unsigned char *)do_nothing, size);
flush_icache_range((unsigned long)ptr, (unsigned long)(ptr + size));
@@ -164,6 +164,11 @@ void lkdtm_EXEC_USERSPACE(void)
vm_munmap(user_addr, PAGE_SIZE);
}
+void lkdtm_EXEC_NULL(void)
+{
+ execute_location(NULL, CODE_AS_IS);
+}
+
void lkdtm_ACCESS_USERSPACE(void)
{
unsigned long user_addr, tmp = 0;
@@ -185,16 +190,29 @@ void lkdtm_ACCESS_USERSPACE(void)
ptr = (unsigned long *)user_addr;
- pr_info("attempting bad read at %p\n", ptr);
+ pr_info("attempting bad read at %px\n", ptr);
tmp = *ptr;
tmp += 0xc0dec0de;
- pr_info("attempting bad write at %p\n", ptr);
+ pr_info("attempting bad write at %px\n", ptr);
*ptr = tmp;
vm_munmap(user_addr, PAGE_SIZE);
}
+void lkdtm_ACCESS_NULL(void)
+{
+ unsigned long tmp;
+ unsigned long *ptr = (unsigned long *)NULL;
+
+ pr_info("attempting bad read at %px\n", ptr);
+ tmp = *ptr;
+ tmp += 0xc0dec0de;
+
+ pr_info("attempting bad write at %px\n", ptr);
+ *ptr = tmp;
+}
+
void __init lkdtm_perms_init(void)
{
/* Make sure we can write to __ro_after_init values during __init */
diff --git a/drivers/misc/mei/hw.h b/drivers/misc/mei/hw.h
index 2b7f7677f8cc..b7d2487b8409 100644
--- a/drivers/misc/mei/hw.h
+++ b/drivers/misc/mei/hw.h
@@ -311,7 +311,8 @@ struct mei_client_properties {
u8 protocol_version;
u8 max_number_of_connections;
u8 fixed_address;
- u8 single_recv_buf;
+ u8 single_recv_buf:1;
+ u8 reserved:7;
u32 max_msg_length;
} __packed;
diff --git a/drivers/misc/mic/vop/vop_main.c b/drivers/misc/mic/vop/vop_main.c
index 744757f541be..3f96e19e57eb 100644
--- a/drivers/misc/mic/vop/vop_main.c
+++ b/drivers/misc/mic/vop/vop_main.c
@@ -34,6 +34,7 @@
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/dma-mapping.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
#include "vop_main.h"
@@ -118,7 +119,7 @@ _vop_total_desc_size(struct mic_device_desc __iomem *desc)
static u64 vop_get_features(struct virtio_device *vdev)
{
unsigned int i, bits;
- u32 features = 0;
+ u64 features = 0;
struct mic_device_desc __iomem *desc = to_vopvdev(vdev)->desc;
u8 __iomem *in_features = _vop_vq_features(desc);
int feature_len = ioread8(&desc->feature_len);
@@ -126,7 +127,7 @@ static u64 vop_get_features(struct virtio_device *vdev)
bits = min_t(unsigned, feature_len, sizeof(vdev->features)) * 8;
for (i = 0; i < bits; i++)
if (ioread8(&in_features[i / 8]) & (BIT(i % 8)))
- features |= BIT(i);
+ features |= BIT_ULL(i);
return features;
}
@@ -228,7 +229,7 @@ static void vop_reset_inform_host(struct virtio_device *dev)
if (ioread8(&dc->host_ack))
break;
msleep(100);
- };
+ }
dev_dbg(_vop_dev(vdev), "%s: retry: %d\n", __func__, retry);
@@ -437,7 +438,7 @@ static int vop_find_vqs(struct virtio_device *dev, unsigned nvqs,
if (!ioread8(&dc->used_address_updated))
break;
msleep(100);
- };
+ }
dev_dbg(_vop_dev(vdev), "%s: retry: %d\n", __func__, retry);
if (!retry) {
diff --git a/drivers/misc/mic/vop/vop_vringh.c b/drivers/misc/mic/vop/vop_vringh.c
index cbc8ebcff5cf..0bac8bce933c 100644
--- a/drivers/misc/mic/vop/vop_vringh.c
+++ b/drivers/misc/mic/vop/vop_vringh.c
@@ -531,12 +531,12 @@ static int vop_virtio_copy_to_user(struct vop_vdev *vdev, void __user *ubuf,
void __iomem *dbuf = vpdev->hw_ops->ioremap(vpdev, daddr, len);
struct vop_vringh *vvr = &vdev->vvr[vr_idx];
struct vop_info *vi = dev_get_drvdata(&vpdev->dev);
- size_t dma_alignment = 1 << vi->dma_ch->device->copy_align;
- bool x200 = is_dma_copy_aligned(vi->dma_ch->device, 1, 1, 1);
+ size_t dma_alignment;
+ bool x200;
size_t dma_offset, partlen;
int err;
- if (!VOP_USE_DMA) {
+ if (!VOP_USE_DMA || !vi->dma_ch) {
if (copy_to_user(ubuf, (void __force *)dbuf, len)) {
err = -EFAULT;
dev_err(vop_dev(vdev), "%s %d err %d\n",
@@ -548,6 +548,9 @@ static int vop_virtio_copy_to_user(struct vop_vdev *vdev, void __user *ubuf,
goto err;
}
+ dma_alignment = 1 << vi->dma_ch->device->copy_align;
+ x200 = is_dma_copy_aligned(vi->dma_ch->device, 1, 1, 1);
+
dma_offset = daddr - round_down(daddr, dma_alignment);
daddr -= dma_offset;
len += dma_offset;
@@ -587,7 +590,7 @@ static int vop_virtio_copy_to_user(struct vop_vdev *vdev, void __user *ubuf,
err:
vpdev->hw_ops->iounmap(vpdev, dbuf);
dev_dbg(vop_dev(vdev),
- "%s: ubuf %p dbuf %p len 0x%lx vr_idx 0x%x\n",
+ "%s: ubuf %p dbuf %p len 0x%zx vr_idx 0x%x\n",
__func__, ubuf, dbuf, len, vr_idx);
return err;
}
@@ -606,18 +609,23 @@ static int vop_virtio_copy_from_user(struct vop_vdev *vdev, void __user *ubuf,
void __iomem *dbuf = vpdev->hw_ops->ioremap(vpdev, daddr, len);
struct vop_vringh *vvr = &vdev->vvr[vr_idx];
struct vop_info *vi = dev_get_drvdata(&vdev->vpdev->dev);
- size_t dma_alignment = 1 << vi->dma_ch->device->copy_align;
- bool x200 = is_dma_copy_aligned(vi->dma_ch->device, 1, 1, 1);
+ size_t dma_alignment;
+ bool x200;
size_t partlen;
- bool dma = VOP_USE_DMA;
+ bool dma = VOP_USE_DMA && vi->dma_ch;
int err = 0;
- if (daddr & (dma_alignment - 1)) {
- vdev->tx_dst_unaligned += len;
- dma = false;
- } else if (ALIGN(len, dma_alignment) > dlen) {
- vdev->tx_len_unaligned += len;
- dma = false;
+ if (dma) {
+ dma_alignment = 1 << vi->dma_ch->device->copy_align;
+ x200 = is_dma_copy_aligned(vi->dma_ch->device, 1, 1, 1);
+
+ if (daddr & (dma_alignment - 1)) {
+ vdev->tx_dst_unaligned += len;
+ dma = false;
+ } else if (ALIGN(len, dma_alignment) > dlen) {
+ vdev->tx_len_unaligned += len;
+ dma = false;
+ }
}
if (!dma)
@@ -670,7 +678,7 @@ memcpy:
err:
vpdev->hw_ops->iounmap(vpdev, dbuf);
dev_dbg(vop_dev(vdev),
- "%s: ubuf %p dbuf %p len 0x%lx vr_idx 0x%x\n",
+ "%s: ubuf %p dbuf %p len 0x%zx vr_idx 0x%x\n",
__func__, ubuf, dbuf, len, vr_idx);
return err;
}
diff --git a/drivers/misc/sgi-gru/grufault.c b/drivers/misc/sgi-gru/grufault.c
index 93be82fc338a..2ec5808ba464 100644
--- a/drivers/misc/sgi-gru/grufault.c
+++ b/drivers/misc/sgi-gru/grufault.c
@@ -616,8 +616,8 @@ irqreturn_t gru_intr_mblade(int irq, void *dev_id)
for_each_possible_blade(blade) {
if (uv_blade_nr_possible_cpus(blade))
continue;
- gru_intr(0, blade);
- gru_intr(1, blade);
+ gru_intr(0, blade);
+ gru_intr(1, blade);
}
return IRQ_HANDLED;
}
diff --git a/drivers/misc/vmw_balloon.c b/drivers/misc/vmw_balloon.c
index f8240b87df22..f96dc3690ade 100644
--- a/drivers/misc/vmw_balloon.c
+++ b/drivers/misc/vmw_balloon.c
@@ -34,7 +34,6 @@
MODULE_AUTHOR("VMware, Inc.");
MODULE_DESCRIPTION("VMware Memory Control (Balloon) Driver");
-MODULE_VERSION("1.5.0.0-k");
MODULE_ALIAS("dmi:*:svnVMware*:*");
MODULE_ALIAS("vmware_vmmemctl");
MODULE_LICENSE("GPL");
@@ -73,15 +72,26 @@ enum vmwballoon_capabilities {
VMW_BALLOON_BATCHED_CMDS = (1 << 2),
VMW_BALLOON_BATCHED_2M_CMDS = (1 << 3),
VMW_BALLOON_SIGNALLED_WAKEUP_CMD = (1 << 4),
+ VMW_BALLOON_64_BIT_TARGET = (1 << 5)
};
-#define VMW_BALLOON_CAPABILITIES (VMW_BALLOON_BASIC_CMDS \
+#define VMW_BALLOON_CAPABILITIES_COMMON (VMW_BALLOON_BASIC_CMDS \
| VMW_BALLOON_BATCHED_CMDS \
| VMW_BALLOON_BATCHED_2M_CMDS \
| VMW_BALLOON_SIGNALLED_WAKEUP_CMD)
#define VMW_BALLOON_2M_ORDER (PMD_SHIFT - PAGE_SHIFT)
+/*
+ * 64-bit targets are only supported in 64-bit
+ */
+#ifdef CONFIG_64BIT
+#define VMW_BALLOON_CAPABILITIES (VMW_BALLOON_CAPABILITIES_COMMON \
+ | VMW_BALLOON_64_BIT_TARGET)
+#else
+#define VMW_BALLOON_CAPABILITIES VMW_BALLOON_CAPABILITIES_COMMON
+#endif
+
enum vmballoon_page_size_type {
VMW_BALLOON_4K_PAGE,
VMW_BALLOON_2M_PAGE,
@@ -572,8 +582,9 @@ static int vmballoon_send_get_target(struct vmballoon *b)
limit = totalram_pages();
- /* Ensure limit fits in 32-bits */
- if (limit != (u32)limit)
+ /* Ensure limit fits in 32-bits if 64-bit targets are not supported */
+ if (!(b->capabilities & VMW_BALLOON_64_BIT_TARGET) &&
+ limit != (u32)limit)
return -EINVAL;
status = vmballoon_cmd(b, VMW_BALLOON_CMD_GET_TARGET, limit, 0);
diff --git a/drivers/ntb/hw/intel/ntb_hw_intel.h b/drivers/ntb/hw/intel/ntb_hw_intel.h
index c49ff8970ce3..e071e28bca3f 100644
--- a/drivers/ntb/hw/intel/ntb_hw_intel.h
+++ b/drivers/ntb/hw/intel/ntb_hw_intel.h
@@ -53,6 +53,7 @@
#include <linux/ntb.h>
#include <linux/pci.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
/* PCI device IDs */
#define PCI_DEVICE_ID_INTEL_NTB_B2B_JSF 0x3725
@@ -218,33 +219,4 @@ static inline int pdev_is_gen3(struct pci_dev *pdev)
return 0;
}
-#ifndef ioread64
-#ifdef readq
-#define ioread64 readq
-#else
-#define ioread64 _ioread64
-static inline u64 _ioread64(void __iomem *mmio)
-{
- u64 low, high;
-
- low = ioread32(mmio);
- high = ioread32(mmio + sizeof(u32));
- return low | (high << 32);
-}
-#endif
-#endif
-
-#ifndef iowrite64
-#ifdef writeq
-#define iowrite64 writeq
-#else
-#define iowrite64 _iowrite64
-static inline void _iowrite64(u64 val, void __iomem *mmio)
-{
- iowrite32(val, mmio);
- iowrite32(val >> 32, mmio + sizeof(u32));
-}
-#endif
-#endif
-
#endif
diff --git a/drivers/ntb/hw/mscc/ntb_hw_switchtec.c b/drivers/ntb/hw/mscc/ntb_hw_switchtec.c
index f1eaa3c4d46a..f2df2d39c65b 100644
--- a/drivers/ntb/hw/mscc/ntb_hw_switchtec.c
+++ b/drivers/ntb/hw/mscc/ntb_hw_switchtec.c
@@ -13,13 +13,14 @@
*
*/
-#include <linux/switchtec.h>
-#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
#include <linux/delay.h>
#include <linux/kthread.h>
-#include <linux/interrupt.h>
+#include <linux/module.h>
#include <linux/ntb.h>
#include <linux/pci.h>
+#include <linux/switchtec.h>
MODULE_DESCRIPTION("Microsemi Switchtec(tm) NTB Driver");
MODULE_VERSION("0.1");
@@ -36,35 +37,6 @@ module_param(use_lut_mws, bool, 0644);
MODULE_PARM_DESC(use_lut_mws,
"Enable the use of the LUT based memory windows");
-#ifndef ioread64
-#ifdef readq
-#define ioread64 readq
-#else
-#define ioread64 _ioread64
-static inline u64 _ioread64(void __iomem *mmio)
-{
- u64 low, high;
-
- low = ioread32(mmio);
- high = ioread32(mmio + sizeof(u32));
- return low | (high << 32);
-}
-#endif
-#endif
-
-#ifndef iowrite64
-#ifdef writeq
-#define iowrite64 writeq
-#else
-#define iowrite64 _iowrite64
-static inline void _iowrite64(u64 val, void __iomem *mmio)
-{
- iowrite32(val, mmio);
- iowrite32(val >> 32, mmio + sizeof(u32));
-}
-#endif
-#endif
-
#define SWITCHTEC_NTB_MAGIC 0x45CC0001
#define MAX_MWS 128
diff --git a/drivers/nvmem/Kconfig b/drivers/nvmem/Kconfig
index 0a7a470ee859..9bbe2816400a 100644
--- a/drivers/nvmem/Kconfig
+++ b/drivers/nvmem/Kconfig
@@ -26,7 +26,7 @@ config NVMEM_IMX_IIM
config NVMEM_IMX_OCOTP
tristate "i.MX6 On-Chip OTP Controller support"
- depends on SOC_IMX6 || COMPILE_TEST
+ depends on SOC_IMX6 || SOC_IMX7D || COMPILE_TEST
depends on HAS_IOMEM
help
This is a driver for the On-Chip OTP Controller (OCOTP) available on
diff --git a/drivers/nvmem/bcm-ocotp.c b/drivers/nvmem/bcm-ocotp.c
index 4159b3f41d79..a8097511582a 100644
--- a/drivers/nvmem/bcm-ocotp.c
+++ b/drivers/nvmem/bcm-ocotp.c
@@ -11,13 +11,14 @@
* GNU General Public License for more details.
*/
+#include <linux/acpi.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/nvmem-provider.h>
#include <linux/of.h>
-#include <linux/of_address.h>
+#include <linux/of_device.h>
#include <linux/platform_device.h>
/*
@@ -78,9 +79,9 @@ static struct otpc_map otp_map_v2 = {
};
struct otpc_priv {
- struct device *dev;
- void __iomem *base;
- struct otpc_map *map;
+ struct device *dev;
+ void __iomem *base;
+ const struct otpc_map *map;
struct nvmem_config *config;
};
@@ -237,16 +238,22 @@ static struct nvmem_config bcm_otpc_nvmem_config = {
};
static const struct of_device_id bcm_otpc_dt_ids[] = {
- { .compatible = "brcm,ocotp" },
- { .compatible = "brcm,ocotp-v2" },
+ { .compatible = "brcm,ocotp", .data = &otp_map },
+ { .compatible = "brcm,ocotp-v2", .data = &otp_map_v2 },
{ },
};
MODULE_DEVICE_TABLE(of, bcm_otpc_dt_ids);
+static const struct acpi_device_id bcm_otpc_acpi_ids[] = {
+ { .id = "BRCM0700", .driver_data = (kernel_ulong_t)&otp_map },
+ { .id = "BRCM0701", .driver_data = (kernel_ulong_t)&otp_map_v2 },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(acpi, bcm_otpc_acpi_ids);
+
static int bcm_otpc_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct device_node *dn = dev->of_node;
struct resource *res;
struct otpc_priv *priv;
struct nvmem_device *nvmem;
@@ -257,14 +264,9 @@ static int bcm_otpc_probe(struct platform_device *pdev)
if (!priv)
return -ENOMEM;
- if (of_device_is_compatible(dev->of_node, "brcm,ocotp"))
- priv->map = &otp_map;
- else if (of_device_is_compatible(dev->of_node, "brcm,ocotp-v2"))
- priv->map = &otp_map_v2;
- else {
- dev_err(dev, "%s otpc config map not defined\n", __func__);
- return -EINVAL;
- }
+ priv->map = device_get_match_data(dev);
+ if (!priv->map)
+ return -ENODEV;
/* Get OTP base address register. */
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -281,7 +283,7 @@ static int bcm_otpc_probe(struct platform_device *pdev)
reset_start_bit(priv->base);
/* Read size of memory in words. */
- err = of_property_read_u32(dn, "brcm,ocotp-size", &num_words);
+ err = device_property_read_u32(dev, "brcm,ocotp-size", &num_words);
if (err) {
dev_err(dev, "size parameter not specified\n");
return -EINVAL;
@@ -294,7 +296,7 @@ static int bcm_otpc_probe(struct platform_device *pdev)
bcm_otpc_nvmem_config.dev = dev;
bcm_otpc_nvmem_config.priv = priv;
- if (of_device_is_compatible(dev->of_node, "brcm,ocotp-v2")) {
+ if (priv->map == &otp_map_v2) {
bcm_otpc_nvmem_config.word_size = 8;
bcm_otpc_nvmem_config.stride = 8;
}
@@ -315,6 +317,7 @@ static struct platform_driver bcm_otpc_driver = {
.driver = {
.name = "brcm-otpc",
.of_match_table = bcm_otpc_dt_ids,
+ .acpi_match_table = ACPI_PTR(bcm_otpc_acpi_ids),
},
};
module_platform_driver(bcm_otpc_driver);
diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c
index f7301bb4ef3b..9dd07eae1f3e 100644
--- a/drivers/nvmem/core.c
+++ b/drivers/nvmem/core.c
@@ -525,12 +525,14 @@ out:
static struct nvmem_cell *
nvmem_find_cell_by_name(struct nvmem_device *nvmem, const char *cell_id)
{
- struct nvmem_cell *cell = NULL;
+ struct nvmem_cell *iter, *cell = NULL;
mutex_lock(&nvmem_mutex);
- list_for_each_entry(cell, &nvmem->cells, node) {
- if (strcmp(cell_id, cell->name) == 0)
+ list_for_each_entry(iter, &nvmem->cells, node) {
+ if (strcmp(cell_id, iter->name) == 0) {
+ cell = iter;
break;
+ }
}
mutex_unlock(&nvmem_mutex);
@@ -646,8 +648,8 @@ struct nvmem_device *nvmem_register(const struct nvmem_config *config)
config->name ? config->id : nvmem->id);
}
- nvmem->read_only = device_property_present(config->dev, "read-only") |
- config->read_only;
+ nvmem->read_only = device_property_present(config->dev, "read-only") ||
+ config->read_only || !nvmem->reg_write;
if (config->root_only)
nvmem->dev.groups = nvmem->read_only ?
@@ -809,6 +811,7 @@ static struct nvmem_device *__nvmem_device_get(struct device_node *np,
"could not increase module refcount for cell %s\n",
nvmem_dev_name(nvmem));
+ put_device(&nvmem->dev);
return ERR_PTR(-EINVAL);
}
@@ -819,6 +822,7 @@ static struct nvmem_device *__nvmem_device_get(struct device_node *np,
static void __nvmem_device_put(struct nvmem_device *nvmem)
{
+ put_device(&nvmem->dev);
module_put(nvmem->owner);
kref_put(&nvmem->refcnt, nvmem_device_release);
}
@@ -837,13 +841,14 @@ struct nvmem_device *of_nvmem_device_get(struct device_node *np, const char *id)
{
struct device_node *nvmem_np;
- int index;
+ int index = 0;
- index = of_property_match_string(np, "nvmem-names", id);
+ if (id)
+ index = of_property_match_string(np, "nvmem-names", id);
nvmem_np = of_parse_phandle(np, "nvmem", index);
if (!nvmem_np)
- return ERR_PTR(-EINVAL);
+ return ERR_PTR(-ENOENT);
return __nvmem_device_get(nvmem_np, NULL);
}
@@ -871,7 +876,7 @@ struct nvmem_device *nvmem_device_get(struct device *dev, const char *dev_name)
}
- return nvmem_find(dev_name);
+ return __nvmem_device_get(NULL, dev_name);
}
EXPORT_SYMBOL_GPL(nvmem_device_get);
@@ -972,7 +977,7 @@ nvmem_cell_get_from_lookup(struct device *dev, const char *con_id)
if (IS_ERR(nvmem)) {
/* Provider may not be registered yet. */
cell = ERR_CAST(nvmem);
- goto out;
+ break;
}
cell = nvmem_find_cell_by_name(nvmem,
@@ -980,12 +985,11 @@ nvmem_cell_get_from_lookup(struct device *dev, const char *con_id)
if (!cell) {
__nvmem_device_put(nvmem);
cell = ERR_PTR(-ENOENT);
- goto out;
}
+ break;
}
}
-out:
mutex_unlock(&nvmem_lookup_mutex);
return cell;
}
@@ -994,12 +998,14 @@ out:
static struct nvmem_cell *
nvmem_find_cell_by_node(struct nvmem_device *nvmem, struct device_node *np)
{
- struct nvmem_cell *cell = NULL;
+ struct nvmem_cell *iter, *cell = NULL;
mutex_lock(&nvmem_mutex);
- list_for_each_entry(cell, &nvmem->cells, node) {
- if (np == cell->np)
+ list_for_each_entry(iter, &nvmem->cells, node) {
+ if (np == iter->np) {
+ cell = iter;
break;
+ }
}
mutex_unlock(&nvmem_mutex);
@@ -1031,7 +1037,7 @@ struct nvmem_cell *of_nvmem_cell_get(struct device_node *np, const char *id)
cell_np = of_parse_phandle(np, "nvmem-cells", index);
if (!cell_np)
- return ERR_PTR(-EINVAL);
+ return ERR_PTR(-ENOENT);
nvmem_np = of_get_next_parent(cell_np);
if (!nvmem_np)
diff --git a/drivers/nvmem/imx-ocotp.c b/drivers/nvmem/imx-ocotp.c
index afb429a417fe..08a9b1ef8ae4 100644
--- a/drivers/nvmem/imx-ocotp.c
+++ b/drivers/nvmem/imx-ocotp.c
@@ -427,19 +427,32 @@ static const struct ocotp_params imx6ul_params = {
.set_timing = imx_ocotp_set_imx6_timing,
};
+static const struct ocotp_params imx6ull_params = {
+ .nregs = 64,
+ .bank_address_words = 0,
+ .set_timing = imx_ocotp_set_imx6_timing,
+};
+
static const struct ocotp_params imx7d_params = {
.nregs = 64,
.bank_address_words = 4,
.set_timing = imx_ocotp_set_imx7_timing,
};
+static const struct ocotp_params imx7ulp_params = {
+ .nregs = 256,
+ .bank_address_words = 0,
+};
+
static const struct of_device_id imx_ocotp_dt_ids[] = {
{ .compatible = "fsl,imx6q-ocotp", .data = &imx6q_params },
{ .compatible = "fsl,imx6sl-ocotp", .data = &imx6sl_params },
{ .compatible = "fsl,imx6sx-ocotp", .data = &imx6sx_params },
{ .compatible = "fsl,imx6ul-ocotp", .data = &imx6ul_params },
+ { .compatible = "fsl,imx6ull-ocotp", .data = &imx6ull_params },
{ .compatible = "fsl,imx7d-ocotp", .data = &imx7d_params },
{ .compatible = "fsl,imx6sll-ocotp", .data = &imx6sll_params },
+ { .compatible = "fsl,imx7ulp-ocotp", .data = &imx7ulp_params },
{ },
};
MODULE_DEVICE_TABLE(of, imx_ocotp_dt_ids);
diff --git a/drivers/nvmem/sc27xx-efuse.c b/drivers/nvmem/sc27xx-efuse.c
index 33185d8d82cf..c6ee21018d80 100644
--- a/drivers/nvmem/sc27xx-efuse.c
+++ b/drivers/nvmem/sc27xx-efuse.c
@@ -106,10 +106,12 @@ static int sc27xx_efuse_poll_status(struct sc27xx_efuse *efuse, u32 bits)
static int sc27xx_efuse_read(void *context, u32 offset, void *val, size_t bytes)
{
struct sc27xx_efuse *efuse = context;
- u32 buf;
+ u32 buf, blk_index = offset / SC27XX_EFUSE_BLOCK_WIDTH;
+ u32 blk_offset = (offset % SC27XX_EFUSE_BLOCK_WIDTH) * BITS_PER_BYTE;
int ret;
- if (offset > SC27XX_EFUSE_BLOCK_MAX || bytes > SC27XX_EFUSE_BLOCK_WIDTH)
+ if (blk_index > SC27XX_EFUSE_BLOCK_MAX ||
+ bytes > SC27XX_EFUSE_BLOCK_WIDTH)
return -EINVAL;
ret = sc27xx_efuse_lock(efuse);
@@ -133,7 +135,7 @@ static int sc27xx_efuse_read(void *context, u32 offset, void *val, size_t bytes)
/* Set the block address to be read. */
ret = regmap_write(efuse->regmap,
efuse->base + SC27XX_EFUSE_BLOCK_INDEX,
- offset & SC27XX_EFUSE_BLOCK_MASK);
+ blk_index & SC27XX_EFUSE_BLOCK_MASK);
if (ret)
goto disable_efuse;
@@ -171,8 +173,10 @@ disable_efuse:
unlock_efuse:
sc27xx_efuse_unlock(efuse);
- if (!ret)
+ if (!ret) {
+ buf >>= blk_offset;
memcpy(val, &buf, bytes);
+ }
return ret;
}
diff --git a/drivers/perf/arm_spe_pmu.c b/drivers/perf/arm_spe_pmu.c
index 8e46a9dad2fa..7cb766dafe85 100644
--- a/drivers/perf/arm_spe_pmu.c
+++ b/drivers/perf/arm_spe_pmu.c
@@ -824,10 +824,10 @@ static void arm_spe_pmu_read(struct perf_event *event)
{
}
-static void *arm_spe_pmu_setup_aux(int cpu, void **pages, int nr_pages,
- bool snapshot)
+static void *arm_spe_pmu_setup_aux(struct perf_event *event, void **pages,
+ int nr_pages, bool snapshot)
{
- int i;
+ int i, cpu = event->cpu;
struct page **pglist;
struct arm_spe_pmu_buf *buf;
diff --git a/drivers/platform/goldfish/Kconfig b/drivers/platform/goldfish/Kconfig
index 479031aa4f88..74fdfa68d1f2 100644
--- a/drivers/platform/goldfish/Kconfig
+++ b/drivers/platform/goldfish/Kconfig
@@ -2,7 +2,7 @@ menuconfig GOLDFISH
bool "Platform support for Goldfish virtual devices"
depends on X86_32 || X86_64 || ARM || ARM64 || MIPS
depends on HAS_IOMEM
- ---help---
+ help
Say Y here to get to see options for the Goldfish virtual platform.
This option alone does not add any kernel code.
@@ -12,7 +12,7 @@ if GOLDFISH
config GOLDFISH_PIPE
tristate "Goldfish virtual device for QEMU pipes"
- ---help---
+ help
This is a virtual device to drive the QEMU pipe interface used by
the Goldfish Android Virtual Device.
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index f38882f6f37d..8f9d9e9fa695 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -1369,14 +1369,14 @@ config ATARI_SCSI
tristate "Atari native SCSI support"
depends on ATARI && SCSI
select SCSI_SPI_ATTRS
- select NVRAM
---help---
If you have an Atari with built-in NCR5380 SCSI controller (TT,
Falcon, ...) say Y to get it supported. Of course also, if you have
a compatible SCSI controller (e.g. for Medusa).
- To compile this driver as a module, choose M here: the
- module will be called atari_scsi.
+ To compile this driver as a module, choose M here: the module will
+ be called atari_scsi. If you also enable NVRAM support, the SCSI
+ host's ID is taken from the setting in TT RTC NVRAM.
This driver supports both styles of NCR integration into the
system: the TT style (separate DMA), and the Falcon style (via
diff --git a/drivers/scsi/atari_scsi.c b/drivers/scsi/atari_scsi.c
index a503dc50c4f8..e809493d0d06 100644
--- a/drivers/scsi/atari_scsi.c
+++ b/drivers/scsi/atari_scsi.c
@@ -757,15 +757,17 @@ static int __init atari_scsi_probe(struct platform_device *pdev)
if (setup_hostid >= 0) {
atari_scsi_template.this_id = setup_hostid & 7;
- } else {
+ } else if (IS_REACHABLE(CONFIG_NVRAM)) {
/* Test if a host id is set in the NVRam */
- if (ATARIHW_PRESENT(TT_CLK) && nvram_check_checksum()) {
- unsigned char b = nvram_read_byte(16);
+ if (ATARIHW_PRESENT(TT_CLK)) {
+ unsigned char b;
+ loff_t offset = 16;
+ ssize_t count = nvram_read(&b, 1, &offset);
/* Arbitration enabled? (for TOS)
* If yes, use configured host ID
*/
- if (b & 0x80)
+ if ((count == 1) && (b & 0x80))
atari_scsi_template.this_id = b & 7;
}
}
diff --git a/drivers/slimbus/core.c b/drivers/slimbus/core.c
index 55eda5863a6b..b2f07d2043eb 100644
--- a/drivers/slimbus/core.c
+++ b/drivers/slimbus/core.c
@@ -21,7 +21,9 @@ static const struct slim_device_id *slim_match(const struct slim_device_id *id,
{
while (id->manf_id != 0 || id->prod_code != 0) {
if (id->manf_id == sbdev->e_addr.manf_id &&
- id->prod_code == sbdev->e_addr.prod_code)
+ id->prod_code == sbdev->e_addr.prod_code &&
+ id->dev_index == sbdev->e_addr.dev_index &&
+ id->instance == sbdev->e_addr.instance)
return id;
id++;
}
@@ -40,6 +42,23 @@ static int slim_device_match(struct device *dev, struct device_driver *drv)
return !!slim_match(sbdrv->id_table, sbdev);
}
+static void slim_device_update_status(struct slim_device *sbdev,
+ enum slim_device_status status)
+{
+ struct slim_driver *sbdrv;
+
+ if (sbdev->status == status)
+ return;
+
+ sbdev->status = status;
+ if (!sbdev->dev.driver)
+ return;
+
+ sbdrv = to_slim_driver(sbdev->dev.driver);
+ if (sbdrv->device_status)
+ sbdrv->device_status(sbdev, sbdev->status);
+}
+
static int slim_device_probe(struct device *dev)
{
struct slim_device *sbdev = to_slim_device(dev);
@@ -53,8 +72,7 @@ static int slim_device_probe(struct device *dev)
/* try getting the logical address after probe */
ret = slim_get_logical_addr(sbdev);
if (!ret) {
- if (sbdrv->device_status)
- sbdrv->device_status(sbdev, sbdev->status);
+ slim_device_update_status(sbdev, SLIM_DEVICE_STATUS_UP);
} else {
dev_err(&sbdev->dev, "Failed to get logical address\n");
ret = -EPROBE_DEFER;
@@ -256,6 +274,7 @@ int slim_register_controller(struct slim_controller *ctrl)
mutex_init(&ctrl->lock);
mutex_init(&ctrl->sched.m_reconf);
init_completion(&ctrl->sched.pause_comp);
+ spin_lock_init(&ctrl->txn_lock);
dev_dbg(ctrl->dev, "Bus [%s] registered:dev:%p\n",
ctrl->name, ctrl->dev);
@@ -295,23 +314,6 @@ int slim_unregister_controller(struct slim_controller *ctrl)
}
EXPORT_SYMBOL_GPL(slim_unregister_controller);
-static void slim_device_update_status(struct slim_device *sbdev,
- enum slim_device_status status)
-{
- struct slim_driver *sbdrv;
-
- if (sbdev->status == status)
- return;
-
- sbdev->status = status;
- if (!sbdev->dev.driver)
- return;
-
- sbdrv = to_slim_driver(sbdev->dev.driver);
- if (sbdrv->device_status)
- sbdrv->device_status(sbdev, sbdev->status);
-}
-
/**
* slim_report_absent() - Controller calls this function when a device
* reports absent, OR when the device cannot be communicated with
@@ -464,6 +466,7 @@ static int slim_device_alloc_laddr(struct slim_device *sbdev,
sbdev->laddr = laddr;
sbdev->is_laddr_valid = true;
+ mutex_unlock(&ctrl->lock);
slim_device_update_status(sbdev, SLIM_DEVICE_STATUS_UP);
@@ -471,6 +474,8 @@ static int slim_device_alloc_laddr(struct slim_device *sbdev,
laddr, sbdev->e_addr.manf_id, sbdev->e_addr.prod_code,
sbdev->e_addr.dev_index, sbdev->e_addr.instance);
+ return 0;
+
err:
mutex_unlock(&ctrl->lock);
return ret;
diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
index 131342280b46..a57698985f9c 100644
--- a/drivers/uio/uio.c
+++ b/drivers/uio/uio.c
@@ -491,10 +491,10 @@ static int uio_open(struct inode *inode, struct file *filep)
if (!idev->info) {
mutex_unlock(&idev->info_lock);
ret = -EINVAL;
- goto err_alloc_listener;
+ goto err_infoopen;
}
- if (idev->info && idev->info->open)
+ if (idev->info->open)
ret = idev->info->open(idev->info, inode);
mutex_unlock(&idev->info_lock);
if (ret)
@@ -635,7 +635,7 @@ static ssize_t uio_write(struct file *filep, const char __user *buf,
goto out;
}
- if (!idev->info || !idev->info->irq) {
+ if (!idev->info->irq) {
retval = -EIO;
goto out;
}
@@ -940,9 +940,12 @@ int __uio_register_device(struct module *owner,
atomic_set(&idev->event, 0);
ret = uio_get_minor(idev);
- if (ret)
+ if (ret) {
+ kfree(idev);
return ret;
+ }
+ device_initialize(&idev->dev);
idev->dev.devt = MKDEV(uio_major, idev->minor);
idev->dev.class = &uio_class;
idev->dev.parent = parent;
@@ -953,7 +956,7 @@ int __uio_register_device(struct module *owner,
if (ret)
goto err_device_create;
- ret = device_register(&idev->dev);
+ ret = device_add(&idev->dev);
if (ret)
goto err_device_create;
@@ -985,9 +988,10 @@ int __uio_register_device(struct module *owner,
err_request_irq:
uio_dev_del_attributes(idev);
err_uio_dev_add_attributes:
- device_unregister(&idev->dev);
+ device_del(&idev->dev);
err_device_create:
uio_free_minor(idev);
+ put_device(&idev->dev);
return ret;
}
EXPORT_SYMBOL_GPL(__uio_register_device);
diff --git a/drivers/uio/uio_pci_generic.c b/drivers/uio/uio_pci_generic.c
index 8773e373ffe5..dde5cbb27178 100644
--- a/drivers/uio/uio_pci_generic.c
+++ b/drivers/uio/uio_pci_generic.c
@@ -39,6 +39,22 @@ to_uio_pci_generic_dev(struct uio_info *info)
return container_of(info, struct uio_pci_generic_dev, info);
}
+static int release(struct uio_info *info, struct inode *inode)
+{
+ struct uio_pci_generic_dev *gdev = to_uio_pci_generic_dev(info);
+
+ /*
+ * This driver is insecure when used with devices doing DMA, but some
+  * people (mis)use it with such devices.
+  * Let's at least make sure DMA isn't left enabled after the userspace
+  * driver closes the fd.
+  * Note that there's a non-zero chance doing this will wedge the device
+  * at least until reset.
+ */
+ pci_clear_master(gdev->pdev);
+ return 0;
+}
+
/* Interrupt handler. Read/modify/write the command register to disable
* the interrupt. */
static irqreturn_t irqhandler(int irq, struct uio_info *info)
@@ -78,6 +94,7 @@ static int probe(struct pci_dev *pdev,
gdev->info.name = "uio_pci_generic";
gdev->info.version = DRIVER_VERSION;
+ gdev->info.release = release;
gdev->pdev = pdev;
if (pdev->irq) {
gdev->info.irq = pdev->irq;
diff --git a/drivers/video/fbdev/Kconfig b/drivers/video/fbdev/Kconfig
index ae7712c9687a..58a9590c9db6 100644
--- a/drivers/video/fbdev/Kconfig
+++ b/drivers/video/fbdev/Kconfig
@@ -536,7 +536,7 @@ config FB_IMSTT
bool "IMS Twin Turbo display support"
depends on (FB = y) && PCI
select FB_CFB_IMAGEBLIT
- select FB_MACMODES if PPC
+ select FB_MACMODES if PPC_PMAC
help
The IMS Twin Turbo is a PCI-based frame buffer card bundled with
many Macintosh and compatible computers.
diff --git a/drivers/video/fbdev/controlfb.c b/drivers/video/fbdev/controlfb.c
index 9cb0ef7ac29e..7af8db28bb80 100644
--- a/drivers/video/fbdev/controlfb.c
+++ b/drivers/video/fbdev/controlfb.c
@@ -411,35 +411,23 @@ static int __init init_control(struct fb_info_control *p)
full = p->total_vram == 0x400000;
/* Try to pick a video mode out of NVRAM if we have one. */
-#ifdef CONFIG_NVRAM
- if (default_cmode == CMODE_NVRAM) {
+ cmode = default_cmode;
+ if (IS_REACHABLE(CONFIG_NVRAM) && cmode == CMODE_NVRAM)
cmode = nvram_read_byte(NV_CMODE);
- if(cmode < CMODE_8 || cmode > CMODE_32)
- cmode = CMODE_8;
- } else
-#endif
- cmode=default_cmode;
-#ifdef CONFIG_NVRAM
- if (default_vmode == VMODE_NVRAM) {
+ if (cmode < CMODE_8 || cmode > CMODE_32)
+ cmode = CMODE_8;
+
+ vmode = default_vmode;
+ if (IS_REACHABLE(CONFIG_NVRAM) && vmode == VMODE_NVRAM)
vmode = nvram_read_byte(NV_VMODE);
- if (vmode < 1 || vmode > VMODE_MAX ||
- control_mac_modes[vmode - 1].m[full] < cmode) {
- sense = read_control_sense(p);
- printk("Monitor sense value = 0x%x, ", sense);
- vmode = mac_map_monitor_sense(sense);
- if (control_mac_modes[vmode - 1].m[full] < cmode)
- vmode = VMODE_640_480_60;
- }
- } else
-#endif
- {
- vmode=default_vmode;
- if (control_mac_modes[vmode - 1].m[full] < cmode) {
- if (cmode > CMODE_8)
- cmode--;
- else
- vmode = VMODE_640_480_60;
- }
+ if (vmode < 1 || vmode > VMODE_MAX ||
+ control_mac_modes[vmode - 1].m[full] < cmode) {
+ sense = read_control_sense(p);
+ printk(KERN_CONT "Monitor sense value = 0x%x, ", sense);
+ vmode = mac_map_monitor_sense(sense);
+ if (control_mac_modes[vmode - 1].m[full] < 0)
+ vmode = VMODE_640_480_60;
+ cmode = min(cmode, control_mac_modes[vmode - 1].m[full]);
}
/* Initialize info structure */
diff --git a/drivers/video/fbdev/imsttfb.c b/drivers/video/fbdev/imsttfb.c
index 901ca4ed10e9..5d9670daf60e 100644
--- a/drivers/video/fbdev/imsttfb.c
+++ b/drivers/video/fbdev/imsttfb.c
@@ -30,9 +30,8 @@
#include <asm/io.h>
#include <linux/uaccess.h>
-#if defined(CONFIG_PPC)
+#if defined(CONFIG_PPC_PMAC)
#include <linux/nvram.h>
-#include <asm/prom.h>
#include "macmodes.h"
#endif
@@ -327,14 +326,13 @@ enum {
TVP = 1
};
-#define USE_NV_MODES 1
#define INIT_BPP 8
#define INIT_XRES 640
#define INIT_YRES 480
static int inverse = 0;
static char fontname[40] __initdata = { 0 };
-#if defined(CONFIG_PPC)
+#if defined(CONFIG_PPC_PMAC)
static signed char init_vmode = -1, init_cmode = -1;
#endif
@@ -1390,8 +1388,8 @@ static void init_imstt(struct fb_info *info)
}
}
-#if USE_NV_MODES && defined(CONFIG_PPC32)
- {
+#if defined(CONFIG_PPC_PMAC) && defined(CONFIG_PPC32)
+ if (IS_REACHABLE(CONFIG_NVRAM) && machine_is(powermac)) {
int vmode = init_vmode, cmode = init_cmode;
if (vmode == -1) {
@@ -1409,12 +1407,13 @@ static void init_imstt(struct fb_info *info)
info->var.yres = info->var.yres_virtual = INIT_YRES;
info->var.bits_per_pixel = INIT_BPP;
}
- }
-#else
- info->var.xres = info->var.xres_virtual = INIT_XRES;
- info->var.yres = info->var.yres_virtual = INIT_YRES;
- info->var.bits_per_pixel = INIT_BPP;
+ } else
#endif
+ {
+ info->var.xres = info->var.xres_virtual = INIT_XRES;
+ info->var.yres = info->var.yres_virtual = INIT_YRES;
+ info->var.bits_per_pixel = INIT_BPP;
+ }
if ((info->var.xres * info->var.yres) * (info->var.bits_per_pixel >> 3) > info->fix.smem_len
|| !(compute_imstt_regvals(par, info->var.xres, info->var.yres))) {
@@ -1565,7 +1564,7 @@ imsttfb_setup(char *options)
inverse = 1;
fb_invert_cmaps();
}
-#if defined(CONFIG_PPC)
+#if defined(CONFIG_PPC_PMAC)
else if (!strncmp(this_opt, "vmode:", 6)) {
int vmode = simple_strtoul(this_opt+6, NULL, 0);
if (vmode > 0 && vmode <= VMODE_MAX)
diff --git a/drivers/video/fbdev/matrox/matroxfb_base.c b/drivers/video/fbdev/matrox/matroxfb_base.c
index 838869c6490c..d11b5e6210ed 100644
--- a/drivers/video/fbdev/matrox/matroxfb_base.c
+++ b/drivers/video/fbdev/matrox/matroxfb_base.c
@@ -111,12 +111,12 @@
#include "matroxfb_g450.h"
#include <linux/matroxfb.h>
#include <linux/interrupt.h>
+#include <linux/nvram.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
#ifdef CONFIG_PPC_PMAC
#include <asm/machdep.h>
-unsigned char nvram_read_byte(int);
static int default_vmode = VMODE_NVRAM;
static int default_cmode = CMODE_NVRAM;
#endif
@@ -1872,10 +1872,11 @@ static int initMatrox2(struct matrox_fb_info *minfo, struct board *b)
#ifndef MODULE
if (machine_is(powermac)) {
struct fb_var_screeninfo var;
+
if (default_vmode <= 0 || default_vmode > VMODE_MAX)
default_vmode = VMODE_640_480_60;
-#ifdef CONFIG_NVRAM
- if (default_cmode == CMODE_NVRAM)
+#if defined(CONFIG_PPC32)
+ if (IS_REACHABLE(CONFIG_NVRAM) && default_cmode == CMODE_NVRAM)
default_cmode = nvram_read_byte(NV_CMODE);
#endif
if (default_cmode < CMODE_8 || default_cmode > CMODE_32)
diff --git a/drivers/video/fbdev/platinumfb.c b/drivers/video/fbdev/platinumfb.c
index bf6b7fb83cf4..76f299375a00 100644
--- a/drivers/video/fbdev/platinumfb.c
+++ b/drivers/video/fbdev/platinumfb.c
@@ -345,23 +345,18 @@ static int platinum_init_fb(struct fb_info *info)
sense = read_platinum_sense(pinfo);
printk(KERN_INFO "platinumfb: Monitor sense value = 0x%x, ", sense);
- if (default_vmode == VMODE_NVRAM) {
-#ifdef CONFIG_NVRAM
+
+ if (IS_REACHABLE(CONFIG_NVRAM) && default_vmode == VMODE_NVRAM)
default_vmode = nvram_read_byte(NV_VMODE);
- if (default_vmode <= 0 || default_vmode > VMODE_MAX ||
- !platinum_reg_init[default_vmode-1])
-#endif
- default_vmode = VMODE_CHOOSE;
- }
- if (default_vmode == VMODE_CHOOSE) {
+ if (default_vmode <= 0 || default_vmode > VMODE_MAX ||
+ !platinum_reg_init[default_vmode - 1]) {
default_vmode = mac_map_monitor_sense(sense);
+ if (!platinum_reg_init[default_vmode - 1])
+ default_vmode = VMODE_640_480_60;
}
- if (default_vmode <= 0 || default_vmode > VMODE_MAX)
- default_vmode = VMODE_640_480_60;
-#ifdef CONFIG_NVRAM
- if (default_cmode == CMODE_NVRAM)
+
+ if (IS_REACHABLE(CONFIG_NVRAM) && default_cmode == CMODE_NVRAM)
default_cmode = nvram_read_byte(NV_CMODE);
-#endif
if (default_cmode < CMODE_8 || default_cmode > CMODE_32)
default_cmode = CMODE_8;
/*
diff --git a/drivers/video/fbdev/valkyriefb.c b/drivers/video/fbdev/valkyriefb.c
index d51c3a8009cb..e04fde9c1fcd 100644
--- a/drivers/video/fbdev/valkyriefb.c
+++ b/drivers/video/fbdev/valkyriefb.c
@@ -63,15 +63,8 @@
#include "macmodes.h"
#include "valkyriefb.h"
-#ifdef CONFIG_MAC
-/* We don't yet have functions to read the PRAM... perhaps we can
- adapt them from the PPC code? */
-static int default_vmode = VMODE_CHOOSE;
-static int default_cmode = CMODE_8;
-#else
static int default_vmode = VMODE_NVRAM;
static int default_cmode = CMODE_NVRAM;
-#endif
struct fb_par_valkyrie {
int vmode, cmode;
@@ -283,24 +276,21 @@ static void __init valkyrie_choose_mode(struct fb_info_valkyrie *p)
printk(KERN_INFO "Monitor sense value = 0x%x\n", p->sense);
/* Try to pick a video mode out of NVRAM if we have one. */
-#if !defined(CONFIG_MAC) && defined(CONFIG_NVRAM)
- if (default_vmode == VMODE_NVRAM) {
+#ifdef CONFIG_PPC_PMAC
+ if (IS_REACHABLE(CONFIG_NVRAM) && default_vmode == VMODE_NVRAM)
default_vmode = nvram_read_byte(NV_VMODE);
- if (default_vmode <= 0
- || default_vmode > VMODE_MAX
- || !valkyrie_reg_init[default_vmode - 1])
- default_vmode = VMODE_CHOOSE;
- }
#endif
- if (default_vmode == VMODE_CHOOSE)
+ if (default_vmode <= 0 || default_vmode > VMODE_MAX ||
+ !valkyrie_reg_init[default_vmode - 1]) {
default_vmode = mac_map_monitor_sense(p->sense);
- if (!valkyrie_reg_init[default_vmode - 1])
- default_vmode = VMODE_640_480_67;
-#if !defined(CONFIG_MAC) && defined(CONFIG_NVRAM)
- if (default_cmode == CMODE_NVRAM)
+ if (!valkyrie_reg_init[default_vmode - 1])
+ default_vmode = VMODE_640_480_67;
+ }
+
+#ifdef CONFIG_PPC_PMAC
+ if (IS_REACHABLE(CONFIG_NVRAM) && default_cmode == CMODE_NVRAM)
default_cmode = nvram_read_byte(NV_CMODE);
#endif
-
/*
* Reduce the pixel size if we don't have enough VRAM or bandwidth.
*/