diff options
Diffstat (limited to 'drivers/misc')
31 files changed, 3036 insertions, 1465 deletions
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile index b2fb6dbffcef..4387ccb79e64 100644 --- a/drivers/misc/Makefile +++ b/drivers/misc/Makefile @@ -57,3 +57,17 @@ obj-$(CONFIG_ECHO) += echo/ obj-$(CONFIG_VEXPRESS_SYSCFG) += vexpress-syscfg.o obj-$(CONFIG_CXL_BASE) += cxl/ obj-$(CONFIG_PANEL) += panel.o + +lkdtm-$(CONFIG_LKDTM) += lkdtm_core.o +lkdtm-$(CONFIG_LKDTM) += lkdtm_bugs.o +lkdtm-$(CONFIG_LKDTM) += lkdtm_heap.o +lkdtm-$(CONFIG_LKDTM) += lkdtm_perms.o +lkdtm-$(CONFIG_LKDTM) += lkdtm_rodata_objcopy.o +lkdtm-$(CONFIG_LKDTM) += lkdtm_usercopy.o + +OBJCOPYFLAGS := +OBJCOPYFLAGS_lkdtm_rodata_objcopy.o := \ + --set-section-flags .text=alloc,readonly \ + --rename-section .text=.rodata +$(obj)/lkdtm_rodata_objcopy.o: $(obj)/lkdtm_rodata.o + $(call if_changed,objcopy) diff --git a/drivers/misc/cxl/Kconfig b/drivers/misc/cxl/Kconfig index 8756d06e2bb8..b75cf830d08a 100644 --- a/drivers/misc/cxl/Kconfig +++ b/drivers/misc/cxl/Kconfig @@ -7,11 +7,7 @@ config CXL_BASE default n select PPC_COPRO_BASE -config CXL_KERNEL_API - bool - default n - -config CXL_EEH +config CXL_AFU_DRIVER_OPS bool default n @@ -19,8 +15,7 @@ config CXL tristate "Support for IBM Coherent Accelerators (CXL)" depends on PPC_POWERNV && PCI_MSI && EEH select CXL_BASE - select CXL_KERNEL_API - select CXL_EEH + select CXL_AFU_DRIVER_OPS default m help Select this option to enable driver support for IBM Coherent @@ -33,3 +28,11 @@ config CXL CAPI adapters are found in POWER8 based systems. If unsure, say N. + +config CXL_BIMODAL + bool "Support for bi-modal CAPI cards" + depends on HOTPLUG_PCI_POWERNV = y && CXL || HOTPLUG_PCI_POWERNV = m && CXL = m + default y + help + Select this option to enable support for bi-modal CAPI cards, such as + the Mellanox CX-4. diff --git a/drivers/misc/cxl/Makefile b/drivers/misc/cxl/Makefile index 8a55c1aa11aa..56e9a4732ef0 100644 --- a/drivers/misc/cxl/Makefile +++ b/drivers/misc/cxl/Makefile @@ -3,7 +3,7 @@ ccflags-$(CONFIG_PPC_WERROR) += -Werror cxl-y += main.o file.o irq.o fault.o native.o cxl-y += context.o sysfs.o debugfs.o pci.o trace.o -cxl-y += vphb.o api.o +cxl-y += vphb.o phb.o api.o cxl-$(CONFIG_PPC_PSERIES) += flash.o guest.o of.o hcalls.o obj-$(CONFIG_CXL) += cxl.o obj-$(CONFIG_CXL_BASE) += base.o diff --git a/drivers/misc/cxl/api.c b/drivers/misc/cxl/api.c index 6d228ccd884d..f3d34b941f85 100644 --- a/drivers/misc/cxl/api.c +++ b/drivers/misc/cxl/api.c @@ -13,6 +13,8 @@ #include <linux/file.h> #include <misc/cxl.h> #include <linux/fs.h> +#include <asm/pnv-pci.h> +#include <linux/msi.h> #include "cxl.h" @@ -24,6 +26,8 @@ struct cxl_context *cxl_dev_context_init(struct pci_dev *dev) int rc; afu = cxl_pci_to_afu(dev); + if (IS_ERR(afu)) + return ERR_CAST(afu); ctx = cxl_context_alloc(); if (IS_ERR(ctx)) { @@ -94,6 +98,42 @@ static irq_hw_number_t cxl_find_afu_irq(struct cxl_context *ctx, int num) return 0; } +int _cxl_next_msi_hwirq(struct pci_dev *pdev, struct cxl_context **ctx, int *afu_irq) +{ + if (*ctx == NULL || *afu_irq == 0) { + *afu_irq = 1; + *ctx = cxl_get_context(pdev); + } else { + (*afu_irq)++; + if (*afu_irq > cxl_get_max_irqs_per_process(pdev)) { + *ctx = list_next_entry(*ctx, extra_irq_contexts); + *afu_irq = 1; + } + } + return cxl_find_afu_irq(*ctx, *afu_irq); +} +/* Exported via cxl_base */ + +int cxl_set_priv(struct cxl_context *ctx, void *priv) +{ + if (!ctx) + return -EINVAL; + + ctx->priv = priv; + + return 0; +} +EXPORT_SYMBOL_GPL(cxl_set_priv); + +void *cxl_get_priv(struct cxl_context *ctx) +{ + if (!ctx) + return ERR_PTR(-EINVAL); + + return ctx->priv; +} +EXPORT_SYMBOL_GPL(cxl_get_priv); + int cxl_allocate_afu_irqs(struct cxl_context *ctx, int num) { int res; @@ -102,7 +142,10 @@ int cxl_allocate_afu_irqs(struct cxl_context *ctx, int num) if (num == 0) num = ctx->afu->pp_irqs; res = afu_allocate_irqs(ctx, num); - if (!res && !cpu_has_feature(CPU_FTR_HVMODE)) { + if (res) + return res; + + if (!cpu_has_feature(CPU_FTR_HVMODE)) { /* In a guest, the PSL interrupt is not multiplexed. It was * allocated above, and we need to set its handler */ @@ -110,6 +153,13 @@ int cxl_allocate_afu_irqs(struct cxl_context *ctx, int num) if (hwirq) cxl_map_irq(ctx->afu->adapter, hwirq, cxl_ops->psl_interrupt, ctx, "psl"); } + + if (ctx->status == STARTED) { + if (cxl_ops->update_ivtes) + cxl_ops->update_ivtes(ctx); + else WARN(1, "BUG: cxl_allocate_afu_irqs must be called prior to starting the context on this platform\n"); + } + return res; } EXPORT_SYMBOL_GPL(cxl_allocate_afu_irqs); @@ -323,6 +373,23 @@ struct cxl_context *cxl_fops_get_context(struct file *file) } EXPORT_SYMBOL_GPL(cxl_fops_get_context); +void cxl_set_driver_ops(struct cxl_context *ctx, + struct cxl_afu_driver_ops *ops) +{ + WARN_ON(!ops->fetch_event || !ops->event_delivered); + atomic_set(&ctx->afu_driver_events, 0); + ctx->afu_driver_ops = ops; +} +EXPORT_SYMBOL_GPL(cxl_set_driver_ops); + +void cxl_context_events_pending(struct cxl_context *ctx, + unsigned int new_events) +{ + atomic_add(new_events, &ctx->afu_driver_events); + wake_up_all(&ctx->wq); +} +EXPORT_SYMBOL_GPL(cxl_context_events_pending); + int cxl_start_work(struct cxl_context *ctx, struct cxl_ioctl_start_work *work) { @@ -390,7 +457,106 @@ EXPORT_SYMBOL_GPL(cxl_perst_reloads_same_image); ssize_t cxl_read_adapter_vpd(struct pci_dev *dev, void *buf, size_t count) { struct cxl_afu *afu = cxl_pci_to_afu(dev); + if (IS_ERR(afu)) + return -ENODEV; return cxl_ops->read_adapter_vpd(afu->adapter, buf, count); } EXPORT_SYMBOL_GPL(cxl_read_adapter_vpd); + +int cxl_set_max_irqs_per_process(struct pci_dev *dev, int irqs) +{ + struct cxl_afu *afu = cxl_pci_to_afu(dev); + if (IS_ERR(afu)) + return -ENODEV; + + if (irqs > afu->adapter->user_irqs) + return -EINVAL; + + /* Limit user_irqs to prevent the user increasing this via sysfs */ + afu->adapter->user_irqs = irqs; + afu->irqs_max = irqs; + + return 0; +} +EXPORT_SYMBOL_GPL(cxl_set_max_irqs_per_process); + +int cxl_get_max_irqs_per_process(struct pci_dev *dev) +{ + struct cxl_afu *afu = cxl_pci_to_afu(dev); + if (IS_ERR(afu)) + return -ENODEV; + + return afu->irqs_max; +} +EXPORT_SYMBOL_GPL(cxl_get_max_irqs_per_process); + +/* + * This is a special interrupt allocation routine called from the PHB's MSI + * setup function. When capi interrupts are allocated in this manner they must + * still be associated with a running context, but since the MSI APIs have no + * way to specify this we use the default context associated with the device. + * + * The Mellanox CX4 has a hardware limitation that restricts the maximum AFU + * interrupt number, so in order to overcome this their driver informs us of + * the restriction by setting the maximum interrupts per context, and we + * allocate additional contexts as necessary so that we can keep the AFU + * interrupt number within the supported range. + */ +int _cxl_cx4_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type) +{ + struct cxl_context *ctx, *new_ctx, *default_ctx; + int remaining; + int rc; + + ctx = default_ctx = cxl_get_context(pdev); + if (WARN_ON(!default_ctx)) + return -ENODEV; + + remaining = nvec; + while (remaining > 0) { + rc = cxl_allocate_afu_irqs(ctx, min(remaining, ctx->afu->irqs_max)); + if (rc) { + pr_warn("%s: Failed to find enough free MSIs\n", pci_name(pdev)); + return rc; + } + remaining -= ctx->afu->irqs_max; + + if (ctx != default_ctx && default_ctx->status == STARTED) { + WARN_ON(cxl_start_context(ctx, + be64_to_cpu(default_ctx->elem->common.wed), + NULL)); + } + + if (remaining > 0) { + new_ctx = cxl_dev_context_init(pdev); + if (!new_ctx) { + pr_warn("%s: Failed to allocate enough contexts for MSIs\n", pci_name(pdev)); + return -ENOSPC; + } + list_add(&new_ctx->extra_irq_contexts, &ctx->extra_irq_contexts); + ctx = new_ctx; + } + } + + return 0; +} +/* Exported via cxl_base */ + +void _cxl_cx4_teardown_msi_irqs(struct pci_dev *pdev) +{ + struct cxl_context *ctx, *pos, *tmp; + + ctx = cxl_get_context(pdev); + if (WARN_ON(!ctx)) + return; + + cxl_free_afu_irqs(ctx); + list_for_each_entry_safe(pos, tmp, &ctx->extra_irq_contexts, extra_irq_contexts) { + cxl_stop_context(pos); + cxl_free_afu_irqs(pos); + list_del(&pos->extra_irq_contexts); + cxl_release_context(pos); + } +} +/* Exported via cxl_base */ diff --git a/drivers/misc/cxl/base.c b/drivers/misc/cxl/base.c index 9b90ec6c07cd..cd54ce6f6230 100644 --- a/drivers/misc/cxl/base.c +++ b/drivers/misc/cxl/base.c @@ -54,6 +54,19 @@ static inline void cxl_calls_put(struct cxl_calls *calls) { } #endif /* CONFIG_CXL_MODULE */ +/* AFU refcount management */ +struct cxl_afu *cxl_afu_get(struct cxl_afu *afu) +{ + return (get_device(&afu->dev) == NULL) ? NULL : afu; +} +EXPORT_SYMBOL_GPL(cxl_afu_get); + +void cxl_afu_put(struct cxl_afu *afu) +{ + put_device(&afu->dev); +} +EXPORT_SYMBOL_GPL(cxl_afu_put); + void cxl_slbia(struct mm_struct *mm) { struct cxl_calls *calls; @@ -93,9 +106,92 @@ int cxl_update_properties(struct device_node *dn, } EXPORT_SYMBOL_GPL(cxl_update_properties); +/* + * API calls into the driver that may be called from the PHB code and must be + * built in. + */ +bool cxl_pci_associate_default_context(struct pci_dev *dev, struct cxl_afu *afu) +{ + bool ret; + struct cxl_calls *calls; + + calls = cxl_calls_get(); + if (!calls) + return false; + + ret = calls->cxl_pci_associate_default_context(dev, afu); + + cxl_calls_put(calls); + + return ret; +} +EXPORT_SYMBOL_GPL(cxl_pci_associate_default_context); + +void cxl_pci_disable_device(struct pci_dev *dev) +{ + struct cxl_calls *calls; + + calls = cxl_calls_get(); + if (!calls) + return; + + calls->cxl_pci_disable_device(dev); + + cxl_calls_put(calls); +} +EXPORT_SYMBOL_GPL(cxl_pci_disable_device); + +int cxl_next_msi_hwirq(struct pci_dev *pdev, struct cxl_context **ctx, int *afu_irq) +{ + int ret; + struct cxl_calls *calls; + + calls = cxl_calls_get(); + if (!calls) + return -EBUSY; + + ret = calls->cxl_next_msi_hwirq(pdev, ctx, afu_irq); + + cxl_calls_put(calls); + + return ret; +} +EXPORT_SYMBOL_GPL(cxl_next_msi_hwirq); + +int cxl_cx4_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type) +{ + int ret; + struct cxl_calls *calls; + + calls = cxl_calls_get(); + if (!calls) + return false; + + ret = calls->cxl_cx4_setup_msi_irqs(pdev, nvec, type); + + cxl_calls_put(calls); + + return ret; +} +EXPORT_SYMBOL_GPL(cxl_cx4_setup_msi_irqs); + +void cxl_cx4_teardown_msi_irqs(struct pci_dev *pdev) +{ + struct cxl_calls *calls; + + calls = cxl_calls_get(); + if (!calls) + return; + + calls->cxl_cx4_teardown_msi_irqs(pdev); + + cxl_calls_put(calls); +} +EXPORT_SYMBOL_GPL(cxl_cx4_teardown_msi_irqs); + static int __init cxl_base_init(void) { - struct device_node *np = NULL; + struct device_node *np; struct platform_device *dev; int count = 0; @@ -105,8 +201,7 @@ static int __init cxl_base_init(void) if (cpu_has_feature(CPU_FTR_HVMODE)) return 0; - while ((np = of_find_compatible_node(np, NULL, - "ibm,coherent-platform-facility"))) { + for_each_compatible_node(np, NULL, "ibm,coherent-platform-facility") { dev = of_platform_device_create(np, NULL, NULL); if (dev) count++; @@ -114,5 +209,4 @@ static int __init cxl_base_init(void) pr_devel("Found %d cxl device(s)\n", count); return 0; } - -module_init(cxl_base_init); +device_initcall(cxl_base_init); diff --git a/drivers/misc/cxl/context.c b/drivers/misc/cxl/context.c index 26d206b1d08c..bdee9a01ef35 100644 --- a/drivers/misc/cxl/context.c +++ b/drivers/misc/cxl/context.c @@ -67,6 +67,9 @@ int cxl_context_init(struct cxl_context *ctx, struct cxl_afu *afu, bool master, ctx->pending_fault = false; ctx->pending_afu_err = false; + INIT_LIST_HEAD(&ctx->irq_names); + INIT_LIST_HEAD(&ctx->extra_irq_contexts); + /* * When we have to destroy all contexts in cxl_context_detach_all() we * end up with afu_release_irqs() called from inside a @@ -87,7 +90,8 @@ int cxl_context_init(struct cxl_context *ctx, struct cxl_afu *afu, bool master, */ mutex_lock(&afu->contexts_lock); idr_preload(GFP_KERNEL); - i = idr_alloc(&ctx->afu->contexts_idr, ctx, 0, + i = idr_alloc(&ctx->afu->contexts_idr, ctx, + ctx->afu->adapter->native->sl_ops->min_pe, ctx->afu->num_procs, GFP_NOWAIT); idr_preload_end(); mutex_unlock(&afu->contexts_lock); diff --git a/drivers/misc/cxl/cxl.h b/drivers/misc/cxl/cxl.h index 4fe50788ff45..de090533f18c 100644 --- a/drivers/misc/cxl/cxl.h +++ b/drivers/misc/cxl/cxl.h @@ -24,6 +24,7 @@ #include <asm/reg.h> #include <misc/cxl-base.h> +#include <misc/cxl.h> #include <uapi/misc/cxl.h> extern uint cxl_verbose; @@ -34,7 +35,7 @@ extern uint cxl_verbose; * Bump version each time a user API change is made, whether it is * backwards compatible ot not. */ -#define CXL_API_VERSION 2 +#define CXL_API_VERSION 3 #define CXL_API_VERSION_COMPATIBLE 1 /* @@ -81,6 +82,7 @@ static const cxl_p1_reg_t CXL_PSL_TLBIA = {0x00A8}; static const cxl_p1_reg_t CXL_PSL_AFUSEL = {0x00B0}; /* 0x00C0:7EFF Implementation dependent area */ +/* PSL registers */ static const cxl_p1_reg_t CXL_PSL_FIR1 = {0x0100}; static const cxl_p1_reg_t CXL_PSL_FIR2 = {0x0108}; static const cxl_p1_reg_t CXL_PSL_Timebase = {0x0110}; @@ -91,6 +93,11 @@ static const cxl_p1_reg_t CXL_PSL_FIR_CNTL = {0x0148}; static const cxl_p1_reg_t CXL_PSL_DSNDCTL = {0x0150}; static const cxl_p1_reg_t CXL_PSL_SNWRALLOC = {0x0158}; static const cxl_p1_reg_t CXL_PSL_TRACE = {0x0170}; +/* XSL registers (Mellanox CX4) */ +static const cxl_p1_reg_t CXL_XSL_Timebase = {0x0100}; +static const cxl_p1_reg_t CXL_XSL_TB_CTLSTAT = {0x0108}; +static const cxl_p1_reg_t CXL_XSL_FEC = {0x0158}; +static const cxl_p1_reg_t CXL_XSL_DSNCTL = {0x0168}; /* 0x7F00:7FFF Reserved PCIe MSI-X Pending Bit Array area */ /* 0x8000:FFFF Reserved PCIe MSI-X Table Area */ @@ -182,6 +189,18 @@ static const cxl_p2n_reg_t CXL_PSL_WED_An = {0x0A0}; #define CXL_PSL_ID_An_F (1ull << (63-31)) #define CXL_PSL_ID_An_L (1ull << (63-30)) +/****** CXL_PSL_SERR_An ****************************************************/ +#define CXL_PSL_SERR_An_afuto (1ull << (63-0)) +#define CXL_PSL_SERR_An_afudis (1ull << (63-1)) +#define CXL_PSL_SERR_An_afuov (1ull << (63-2)) +#define CXL_PSL_SERR_An_badsrc (1ull << (63-3)) +#define CXL_PSL_SERR_An_badctx (1ull << (63-4)) +#define CXL_PSL_SERR_An_llcmdis (1ull << (63-5)) +#define CXL_PSL_SERR_An_llcmdto (1ull << (63-6)) +#define CXL_PSL_SERR_An_afupar (1ull << (63-7)) +#define CXL_PSL_SERR_An_afudup (1ull << (63-8)) +#define CXL_PSL_SERR_An_AE (1ull << (63-30)) + /****** CXL_PSL_SCNTL_An ****************************************************/ #define CXL_PSL_SCNTL_An_CR (0x1ull << (63-15)) /* Programming Modes: */ @@ -421,18 +440,6 @@ struct cxl_afu { bool enabled; }; -/* AFU refcount management */ -static inline struct cxl_afu *cxl_afu_get(struct cxl_afu *afu) -{ - - return (get_device(&afu->dev) == NULL) ? NULL : afu; -} - -static inline void cxl_afu_put(struct cxl_afu *afu) -{ - put_device(&afu->dev); -} - struct cxl_irq_name { struct list_head list; @@ -477,6 +484,9 @@ struct cxl_context { /* Only used in PR mode */ u64 process_token; + /* driver private data */ + void *priv; + unsigned long *irq_bitmap; /* Accessed from IRQ context */ struct cxl_irq_ranges irqs; struct list_head irq_names; @@ -522,7 +532,36 @@ struct cxl_context { bool pending_fault; bool pending_afu_err; + /* Used by AFU drivers for driver specific event delivery */ + struct cxl_afu_driver_ops *afu_driver_ops; + atomic_t afu_driver_events; + struct rcu_head rcu; + + /* + * Only used when more interrupts are allocated via + * pci_enable_msix_range than are supported in the default context, to + * use additional contexts to overcome the limitation. i.e. Mellanox + * CX4 only: + */ + struct list_head extra_irq_contexts; +}; + +struct cxl_service_layer_ops { + int (*adapter_regs_init)(struct cxl *adapter, struct pci_dev *dev); + int (*afu_regs_init)(struct cxl_afu *afu); + int (*register_serr_irq)(struct cxl_afu *afu); + void (*release_serr_irq)(struct cxl_afu *afu); + void (*debugfs_add_adapter_sl_regs)(struct cxl *adapter, struct dentry *dir); + void (*debugfs_add_afu_sl_regs)(struct cxl_afu *afu, struct dentry *dir); + void (*psl_irq_dump_registers)(struct cxl_context *ctx); + void (*err_irq_dump_registers)(struct cxl *adapter); + void (*debugfs_stop_trace)(struct cxl *adapter); + void (*write_timebase_ctrl)(struct cxl *adapter); + u64 (*timebase_read)(struct cxl *adapter); + int capi_mode; + bool needs_reset_before_disable; + int min_pe; }; struct cxl_native { @@ -533,6 +572,7 @@ struct cxl_native { irq_hw_number_t err_hwirq; unsigned int err_virq; u64 ps_off; + const struct cxl_service_layer_ops *sl_ops; }; struct cxl_guest { @@ -688,9 +728,21 @@ static inline u64 cxl_p2n_read(struct cxl_afu *afu, cxl_p2n_reg_t reg) ssize_t cxl_pci_afu_read_err_buffer(struct cxl_afu *afu, char *buf, loff_t off, size_t count); +/* Internal functions wrapped in cxl_base to allow PHB to call them */ +bool _cxl_pci_associate_default_context(struct pci_dev *dev, struct cxl_afu *afu); +void _cxl_pci_disable_device(struct pci_dev *dev); +int _cxl_next_msi_hwirq(struct pci_dev *pdev, struct cxl_context **ctx, int *afu_irq); +int _cxl_cx4_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type); +void _cxl_cx4_teardown_msi_irqs(struct pci_dev *pdev); struct cxl_calls { void (*cxl_slbia)(struct mm_struct *mm); + bool (*cxl_pci_associate_default_context)(struct pci_dev *dev, struct cxl_afu *afu); + void (*cxl_pci_disable_device)(struct pci_dev *dev); + int (*cxl_next_msi_hwirq)(struct pci_dev *pdev, struct cxl_context **ctx, int *afu_irq); + int (*cxl_cx4_setup_msi_irqs)(struct pci_dev *pdev, int nvec, int type); + void (*cxl_cx4_teardown_msi_irqs)(struct pci_dev *pdev); + struct module *owner; }; int register_cxl_calls(struct cxl_calls *calls); @@ -805,6 +857,11 @@ int cxl_tlb_slb_invalidate(struct cxl *adapter); int cxl_afu_disable(struct cxl_afu *afu); int cxl_psl_purge(struct cxl_afu *afu); +void cxl_debugfs_add_adapter_psl_regs(struct cxl *adapter, struct dentry *dir); +void cxl_debugfs_add_adapter_xsl_regs(struct cxl *adapter, struct dentry *dir); +void cxl_debugfs_add_afu_psl_regs(struct cxl_afu *afu, struct dentry *dir); +void cxl_native_psl_irq_dump_regs(struct cxl_context *ctx); +void cxl_native_err_irq_dump_regs(struct cxl *adapter); void cxl_stop_trace(struct cxl *cxl); int cxl_pci_vphb_add(struct cxl_afu *afu); void cxl_pci_vphb_remove(struct cxl_afu *afu); @@ -855,6 +912,7 @@ struct cxl_backend_ops { int (*attach_process)(struct cxl_context *ctx, bool kernel, u64 wed, u64 amr); int (*detach_process)(struct cxl_context *ctx); + void (*update_ivtes)(struct cxl_context *ctx); bool (*support_attributes)(const char *attr_name, enum cxl_attrs type); bool (*link_ok)(struct cxl *cxl, struct cxl_afu *afu); void (*release_afu)(struct device *dev); @@ -879,4 +937,7 @@ extern const struct cxl_backend_ops *cxl_ops; /* check if the given pci_dev is on the the cxl vphb bus */ bool cxl_pci_is_vphb_device(struct pci_dev *dev); + +/* decode AFU error bits in the PSL register PSL_SERR_An */ +void cxl_afu_decode_psl_serr(struct cxl_afu *afu, u64 serr); #endif diff --git a/drivers/misc/cxl/debugfs.c b/drivers/misc/cxl/debugfs.c index 5751899e0c17..ec7b8a017439 100644 --- a/drivers/misc/cxl/debugfs.c +++ b/drivers/misc/cxl/debugfs.c @@ -51,6 +51,19 @@ static struct dentry *debugfs_create_io_x64(const char *name, umode_t mode, return debugfs_create_file(name, mode, parent, (void __force *)value, &fops_io_x64); } +void cxl_debugfs_add_adapter_psl_regs(struct cxl *adapter, struct dentry *dir) +{ + debugfs_create_io_x64("fir1", S_IRUSR, dir, _cxl_p1_addr(adapter, CXL_PSL_FIR1)); + debugfs_create_io_x64("fir2", S_IRUSR, dir, _cxl_p1_addr(adapter, CXL_PSL_FIR2)); + debugfs_create_io_x64("fir_cntl", S_IRUSR, dir, _cxl_p1_addr(adapter, CXL_PSL_FIR_CNTL)); + debugfs_create_io_x64("trace", S_IRUSR | S_IWUSR, dir, _cxl_p1_addr(adapter, CXL_PSL_TRACE)); +} + +void cxl_debugfs_add_adapter_xsl_regs(struct cxl *adapter, struct dentry *dir) +{ + debugfs_create_io_x64("fec", S_IRUSR, dir, _cxl_p1_addr(adapter, CXL_XSL_FEC)); +} + int cxl_debugfs_adapter_add(struct cxl *adapter) { struct dentry *dir; @@ -65,13 +78,10 @@ int cxl_debugfs_adapter_add(struct cxl *adapter) return PTR_ERR(dir); adapter->debugfs = dir; - debugfs_create_io_x64("fir1", S_IRUSR, dir, _cxl_p1_addr(adapter, CXL_PSL_FIR1)); - debugfs_create_io_x64("fir2", S_IRUSR, dir, _cxl_p1_addr(adapter, CXL_PSL_FIR2)); - debugfs_create_io_x64("fir_cntl", S_IRUSR, dir, _cxl_p1_addr(adapter, CXL_PSL_FIR_CNTL)); debugfs_create_io_x64("err_ivte", S_IRUSR, dir, _cxl_p1_addr(adapter, CXL_PSL_ErrIVTE)); - debugfs_create_io_x64("trace", S_IRUSR | S_IWUSR, dir, _cxl_p1_addr(adapter, CXL_PSL_TRACE)); - + if (adapter->native->sl_ops->debugfs_add_adapter_sl_regs) + adapter->native->sl_ops->debugfs_add_adapter_sl_regs(adapter, dir); return 0; } @@ -80,6 +90,14 @@ void cxl_debugfs_adapter_remove(struct cxl *adapter) debugfs_remove_recursive(adapter->debugfs); } +void cxl_debugfs_add_afu_psl_regs(struct cxl_afu *afu, struct dentry *dir) +{ + debugfs_create_io_x64("fir", S_IRUSR, dir, _cxl_p1n_addr(afu, CXL_PSL_FIR_SLICE_An)); + debugfs_create_io_x64("serr", S_IRUSR, dir, _cxl_p1n_addr(afu, CXL_PSL_SERR_An)); + debugfs_create_io_x64("afu_debug", S_IRUSR, dir, _cxl_p1n_addr(afu, CXL_AFU_DEBUG_An)); + debugfs_create_io_x64("trace", S_IRUSR | S_IWUSR, dir, _cxl_p1n_addr(afu, CXL_PSL_SLICE_TRACE)); +} + int cxl_debugfs_afu_add(struct cxl_afu *afu) { struct dentry *dir; @@ -94,18 +112,15 @@ int cxl_debugfs_afu_add(struct cxl_afu *afu) return PTR_ERR(dir); afu->debugfs = dir; - debugfs_create_io_x64("fir", S_IRUSR, dir, _cxl_p1n_addr(afu, CXL_PSL_FIR_SLICE_An)); - debugfs_create_io_x64("serr", S_IRUSR, dir, _cxl_p1n_addr(afu, CXL_PSL_SERR_An)); - debugfs_create_io_x64("afu_debug", S_IRUSR, dir, _cxl_p1n_addr(afu, CXL_AFU_DEBUG_An)); debugfs_create_io_x64("sr", S_IRUSR, dir, _cxl_p1n_addr(afu, CXL_PSL_SR_An)); - debugfs_create_io_x64("dsisr", S_IRUSR, dir, _cxl_p2n_addr(afu, CXL_PSL_DSISR_An)); debugfs_create_io_x64("dar", S_IRUSR, dir, _cxl_p2n_addr(afu, CXL_PSL_DAR_An)); debugfs_create_io_x64("sstp0", S_IRUSR, dir, _cxl_p2n_addr(afu, CXL_SSTP0_An)); debugfs_create_io_x64("sstp1", S_IRUSR, dir, _cxl_p2n_addr(afu, CXL_SSTP1_An)); debugfs_create_io_x64("err_status", S_IRUSR, dir, _cxl_p2n_addr(afu, CXL_PSL_ErrStat_An)); - debugfs_create_io_x64("trace", S_IRUSR | S_IWUSR, dir, _cxl_p1n_addr(afu, CXL_PSL_SLICE_TRACE)); + if (afu->adapter->native->sl_ops->debugfs_add_afu_sl_regs) + afu->adapter->native->sl_ops->debugfs_add_afu_sl_regs(afu, dir); return 0; } diff --git a/drivers/misc/cxl/file.c b/drivers/misc/cxl/file.c index eec468f1612f..5fb9894b157f 100644 --- a/drivers/misc/cxl/file.c +++ b/drivers/misc/cxl/file.c @@ -293,6 +293,17 @@ int afu_mmap(struct file *file, struct vm_area_struct *vm) return cxl_context_iomap(ctx, vm); } +static inline bool ctx_event_pending(struct cxl_context *ctx) +{ + if (ctx->pending_irq || ctx->pending_fault || ctx->pending_afu_err) + return true; + + if (ctx->afu_driver_ops && atomic_read(&ctx->afu_driver_events)) + return true; + + return false; +} + unsigned int afu_poll(struct file *file, struct poll_table_struct *poll) { struct cxl_context *ctx = file->private_data; @@ -305,8 +316,7 @@ unsigned int afu_poll(struct file *file, struct poll_table_struct *poll) pr_devel("afu_poll wait done pe: %i\n", ctx->pe); spin_lock_irqsave(&ctx->lock, flags); - if (ctx->pending_irq || ctx->pending_fault || - ctx->pending_afu_err) + if (ctx_event_pending(ctx)) mask |= POLLIN | POLLRDNORM; else if (ctx->status == CLOSED) /* Only error on closed when there are no futher events pending @@ -319,16 +329,46 @@ unsigned int afu_poll(struct file *file, struct poll_table_struct *poll) return mask; } -static inline int ctx_event_pending(struct cxl_context *ctx) +static ssize_t afu_driver_event_copy(struct cxl_context *ctx, + char __user *buf, + struct cxl_event *event, + struct cxl_event_afu_driver_reserved *pl) { - return (ctx->pending_irq || ctx->pending_fault || - ctx->pending_afu_err || (ctx->status == CLOSED)); + /* Check event */ + if (!pl) { + ctx->afu_driver_ops->event_delivered(ctx, pl, -EINVAL); + return -EFAULT; + } + + /* Check event size */ + event->header.size += pl->data_size; + if (event->header.size > CXL_READ_MIN_SIZE) { + ctx->afu_driver_ops->event_delivered(ctx, pl, -EINVAL); + return -EFAULT; + } + + /* Copy event header */ + if (copy_to_user(buf, event, sizeof(struct cxl_event_header))) { + ctx->afu_driver_ops->event_delivered(ctx, pl, -EFAULT); + return -EFAULT; + } + + /* Copy event data */ + buf += sizeof(struct cxl_event_header); + if (copy_to_user(buf, &pl->data, pl->data_size)) { + ctx->afu_driver_ops->event_delivered(ctx, pl, -EFAULT); + return -EFAULT; + } + + ctx->afu_driver_ops->event_delivered(ctx, pl, 0); /* Success */ + return event->header.size; } ssize_t afu_read(struct file *file, char __user *buf, size_t count, loff_t *off) { struct cxl_context *ctx = file->private_data; + struct cxl_event_afu_driver_reserved *pl = NULL; struct cxl_event event; unsigned long flags; int rc; @@ -344,7 +384,7 @@ ssize_t afu_read(struct file *file, char __user *buf, size_t count, for (;;) { prepare_to_wait(&ctx->wq, &wait, TASK_INTERRUPTIBLE); - if (ctx_event_pending(ctx)) + if (ctx_event_pending(ctx) || (ctx->status == CLOSED)) break; if (!cxl_ops->link_ok(ctx->afu->adapter, ctx->afu)) { @@ -374,7 +414,12 @@ ssize_t afu_read(struct file *file, char __user *buf, size_t count, memset(&event, 0, sizeof(event)); event.header.process_element = ctx->pe; event.header.size = sizeof(struct cxl_event_header); - if (ctx->pending_irq) { + if (ctx->afu_driver_ops && atomic_read(&ctx->afu_driver_events)) { + pr_devel("afu_read delivering AFU driver specific event\n"); + pl = ctx->afu_driver_ops->fetch_event(ctx); + atomic_dec(&ctx->afu_driver_events); + event.header.type = CXL_EVENT_AFU_DRIVER; + } else if (ctx->pending_irq) { pr_devel("afu_read delivering AFU interrupt\n"); event.header.size += sizeof(struct cxl_event_afu_interrupt); event.header.type = CXL_EVENT_AFU_INTERRUPT; @@ -404,6 +449,9 @@ ssize_t afu_read(struct file *file, char __user *buf, size_t count, spin_unlock_irqrestore(&ctx->lock, flags); + if (event.header.type == CXL_EVENT_AFU_DRIVER) + return afu_driver_event_copy(ctx, buf, &event, pl); + if (copy_to_user(buf, &event, event.header.size)) return -EFAULT; return event.header.size; @@ -558,7 +606,7 @@ int __init cxl_file_init(void) * If these change we really need to update API. Either change some * flags or update API version number CXL_API_VERSION. */ - BUILD_BUG_ON(CXL_API_VERSION != 2); + BUILD_BUG_ON(CXL_API_VERSION != 3); BUILD_BUG_ON(sizeof(struct cxl_ioctl_start_work) != 64); BUILD_BUG_ON(sizeof(struct cxl_event_header) != 8); BUILD_BUG_ON(sizeof(struct cxl_event_afu_interrupt) != 8); diff --git a/drivers/misc/cxl/flash.c b/drivers/misc/cxl/flash.c index 68dd0b7da471..c63d61e17d56 100644 --- a/drivers/misc/cxl/flash.c +++ b/drivers/misc/cxl/flash.c @@ -24,8 +24,8 @@ struct ai_header { }; static struct semaphore sem; -unsigned long *buffer[CXL_AI_MAX_ENTRIES]; -struct sg_list *le; +static unsigned long *buffer[CXL_AI_MAX_ENTRIES]; +static struct sg_list *le; static u64 continue_token; static unsigned int transfer; diff --git a/drivers/misc/cxl/guest.c b/drivers/misc/cxl/guest.c index bc8d0b9870eb..9aa58a77a24d 100644 --- a/drivers/misc/cxl/guest.c +++ b/drivers/misc/cxl/guest.c @@ -196,15 +196,18 @@ static irqreturn_t guest_slice_irq_err(int irq, void *data) { struct cxl_afu *afu = data; int rc; - u64 serr; + u64 serr, afu_error, dsisr; - WARN(irq, "CXL SLICE ERROR interrupt %i\n", irq); rc = cxl_h_get_fn_error_interrupt(afu->guest->handle, &serr); if (rc) { dev_crit(&afu->dev, "Couldn't read PSL_SERR_An: %d\n", rc); return IRQ_HANDLED; } - dev_crit(&afu->dev, "PSL_SERR_An: 0x%.16llx\n", serr); + afu_error = cxl_p2n_read(afu, CXL_AFU_ERR_An); + dsisr = cxl_p2n_read(afu, CXL_PSL_DSISR_An); + cxl_afu_decode_psl_serr(afu, serr); + dev_crit(&afu->dev, "AFU_ERR_An: 0x%.16llx\n", afu_error); + dev_crit(&afu->dev, "PSL_DSISR_An: 0x%.16llx\n", dsisr); rc = cxl_h_ack_fn_error_interrupt(afu->guest->handle, serr); if (rc) @@ -1052,16 +1055,18 @@ static void free_adapter(struct cxl *adapter) struct irq_avail *cur; int i; - if (adapter->guest->irq_avail) { - for (i = 0; i < adapter->guest->irq_nranges; i++) { - cur = &adapter->guest->irq_avail[i]; - kfree(cur->bitmap); + if (adapter->guest) { + if (adapter->guest->irq_avail) { + for (i = 0; i < adapter->guest->irq_nranges; i++) { + cur = &adapter->guest->irq_avail[i]; + kfree(cur->bitmap); + } + kfree(adapter->guest->irq_avail); } - kfree(adapter->guest->irq_avail); + kfree(adapter->guest->status); + kfree(adapter->guest); } - kfree(adapter->guest->status); cxl_remove_adapter_nr(adapter); - kfree(adapter->guest); kfree(adapter); } @@ -1182,6 +1187,7 @@ const struct cxl_backend_ops cxl_guest_ops = { .ack_irq = guest_ack_irq, .attach_process = guest_attach_process, .detach_process = guest_detach_process, + .update_ivtes = NULL, .support_attributes = guest_support_attributes, .link_ok = guest_link_ok, .release_afu = guest_release_afu, diff --git a/drivers/misc/cxl/irq.c b/drivers/misc/cxl/irq.c index 8def4553acba..dec60f58a767 100644 --- a/drivers/misc/cxl/irq.c +++ b/drivers/misc/cxl/irq.c @@ -260,9 +260,6 @@ int afu_allocate_irqs(struct cxl_context *ctx, u32 count) else alloc_count = count + 1; - /* Initialize the list head to hold irq names */ - INIT_LIST_HEAD(&ctx->irq_names); - if ((rc = cxl_ops->alloc_irq_ranges(&ctx->irqs, ctx->afu->adapter, alloc_count))) return rc; @@ -374,3 +371,32 @@ void afu_release_irqs(struct cxl_context *ctx, void *cookie) ctx->irq_count = 0; } + +void cxl_afu_decode_psl_serr(struct cxl_afu *afu, u64 serr) +{ + dev_crit(&afu->dev, + "PSL Slice error received. Check AFU for root cause.\n"); + dev_crit(&afu->dev, "PSL_SERR_An: 0x%016llx\n", serr); + if (serr & CXL_PSL_SERR_An_afuto) + dev_crit(&afu->dev, "AFU MMIO Timeout\n"); + if (serr & CXL_PSL_SERR_An_afudis) + dev_crit(&afu->dev, + "MMIO targeted Accelerator that was not enabled\n"); + if (serr & CXL_PSL_SERR_An_afuov) + dev_crit(&afu->dev, "AFU CTAG Overflow\n"); + if (serr & CXL_PSL_SERR_An_badsrc) + dev_crit(&afu->dev, "Bad Interrupt Source\n"); + if (serr & CXL_PSL_SERR_An_badctx) + dev_crit(&afu->dev, "Bad Context Handle\n"); + if (serr & CXL_PSL_SERR_An_llcmdis) + dev_crit(&afu->dev, "LLCMD to Disabled AFU\n"); + if (serr & CXL_PSL_SERR_An_llcmdto) + dev_crit(&afu->dev, "LLCMD Timeout to AFU\n"); + if (serr & CXL_PSL_SERR_An_afupar) + dev_crit(&afu->dev, "AFU MMIO Parity Error\n"); + if (serr & CXL_PSL_SERR_An_afudup) + dev_crit(&afu->dev, "AFU MMIO Duplicate CTAG Error\n"); + if (serr & CXL_PSL_SERR_An_AE) + dev_crit(&afu->dev, + "AFU asserted JDONE with JERROR in AFU Directed Mode\n"); +} diff --git a/drivers/misc/cxl/main.c b/drivers/misc/cxl/main.c index ae68c3201156..d9be23b24aa3 100644 --- a/drivers/misc/cxl/main.c +++ b/drivers/misc/cxl/main.c @@ -110,6 +110,11 @@ static inline void cxl_slbia_core(struct mm_struct *mm) static struct cxl_calls cxl_calls = { .cxl_slbia = cxl_slbia_core, + .cxl_pci_associate_default_context = _cxl_pci_associate_default_context, + .cxl_pci_disable_device = _cxl_pci_disable_device, + .cxl_next_msi_hwirq = _cxl_next_msi_hwirq, + .cxl_cx4_setup_msi_irqs = _cxl_cx4_setup_msi_irqs, + .cxl_cx4_teardown_msi_irqs = _cxl_cx4_teardown_msi_irqs, .owner = THIS_MODULE, }; diff --git a/drivers/misc/cxl/native.c b/drivers/misc/cxl/native.c index 55d8a1459f28..3bcdaee11ba1 100644 --- a/drivers/misc/cxl/native.c +++ b/drivers/misc/cxl/native.c @@ -21,10 +21,10 @@ #include "cxl.h" #include "trace.h" -static int afu_control(struct cxl_afu *afu, u64 command, +static int afu_control(struct cxl_afu *afu, u64 command, u64 clear, u64 result, u64 mask, bool enabled) { - u64 AFU_Cntl = cxl_p2n_read(afu, CXL_AFU_Cntl_An); + u64 AFU_Cntl; unsigned long timeout = jiffies + (HZ * CXL_TIMEOUT); int rc = 0; @@ -33,7 +33,8 @@ static int afu_control(struct cxl_afu *afu, u64 command, trace_cxl_afu_ctrl(afu, command); - cxl_p2n_write(afu, CXL_AFU_Cntl_An, AFU_Cntl | command); + AFU_Cntl = cxl_p2n_read(afu, CXL_AFU_Cntl_An); + cxl_p2n_write(afu, CXL_AFU_Cntl_An, (AFU_Cntl & ~clear) | command); AFU_Cntl = cxl_p2n_read(afu, CXL_AFU_Cntl_An); while ((AFU_Cntl & mask) != result) { @@ -54,6 +55,16 @@ static int afu_control(struct cxl_afu *afu, u64 command, cpu_relax(); AFU_Cntl = cxl_p2n_read(afu, CXL_AFU_Cntl_An); }; + + if (AFU_Cntl & CXL_AFU_Cntl_An_RA) { + /* + * Workaround for a bug in the XSL used in the Mellanox CX4 + * that fails to clear the RA bit after an AFU reset, + * preventing subsequent AFU resets from working. + */ + cxl_p2n_write(afu, CXL_AFU_Cntl_An, AFU_Cntl & ~CXL_AFU_Cntl_An_RA); + } + pr_devel("AFU command complete: %llx\n", command); afu->enabled = enabled; out: @@ -67,7 +78,7 @@ static int afu_enable(struct cxl_afu *afu) { pr_devel("AFU enable request\n"); - return afu_control(afu, CXL_AFU_Cntl_An_E, + return afu_control(afu, CXL_AFU_Cntl_An_E, 0, CXL_AFU_Cntl_An_ES_Enabled, CXL_AFU_Cntl_An_ES_MASK, true); } @@ -76,7 +87,8 @@ int cxl_afu_disable(struct cxl_afu *afu) { pr_devel("AFU disable request\n"); - return afu_control(afu, 0, CXL_AFU_Cntl_An_ES_Disabled, + return afu_control(afu, 0, CXL_AFU_Cntl_An_E, + CXL_AFU_Cntl_An_ES_Disabled, CXL_AFU_Cntl_An_ES_MASK, false); } @@ -85,7 +97,7 @@ static int native_afu_reset(struct cxl_afu *afu) { pr_devel("AFU reset request\n"); - return afu_control(afu, CXL_AFU_Cntl_An_RA, + return afu_control(afu, CXL_AFU_Cntl_An_RA, 0, CXL_AFU_Cntl_An_RS_Complete | CXL_AFU_Cntl_An_ES_Disabled, CXL_AFU_Cntl_An_RS_MASK | CXL_AFU_Cntl_An_ES_MASK, false); @@ -189,7 +201,7 @@ int cxl_alloc_spa(struct cxl_afu *afu) unsigned spa_size; /* Work out how many pages to allocate */ - afu->native->spa_order = 0; + afu->native->spa_order = -1; do { afu->native->spa_order++; spa_size = (1 << afu->native->spa_order) * PAGE_SIZE; @@ -430,7 +442,6 @@ static int remove_process_element(struct cxl_context *ctx) return rc; } - void cxl_assign_psn_space(struct cxl_context *ctx) { if (!ctx->afu->pp_size || ctx->master) { @@ -507,10 +518,39 @@ static u64 calculate_sr(struct cxl_context *ctx) return sr; } +static void update_ivtes_directed(struct cxl_context *ctx) +{ + bool need_update = (ctx->status == STARTED); + int r; + + if (need_update) { + WARN_ON(terminate_process_element(ctx)); + WARN_ON(remove_process_element(ctx)); + } + + for (r = 0; r < CXL_IRQ_RANGES; r++) { + ctx->elem->ivte_offsets[r] = cpu_to_be16(ctx->irqs.offset[r]); + ctx->elem->ivte_ranges[r] = cpu_to_be16(ctx->irqs.range[r]); + } + + /* + * Theoretically we could use the update llcmd, instead of a + * terminate/remove/add (or if an atomic update was required we could + * do a suspend/update/resume), however it seems there might be issues + * with the update llcmd on some cards (including those using an XSL on + * an ASIC) so for now it's safest to go with the commands that are + * known to work. In the future if we come across a situation where the + * card may be performing transactions using the same PE while we are + * doing this update we might need to revisit this. + */ + if (need_update) + WARN_ON(add_process_element(ctx)); +} + static int attach_afu_directed(struct cxl_context *ctx, u64 wed, u64 amr) { u32 pid; - int r, result; + int result; cxl_assign_psn_space(ctx); @@ -545,10 +585,7 @@ static int attach_afu_directed(struct cxl_context *ctx, u64 wed, u64 amr) ctx->irqs.range[0] = 1; } - for (r = 0; r < CXL_IRQ_RANGES; r++) { - ctx->elem->ivte_offsets[r] = cpu_to_be16(ctx->irqs.offset[r]); - ctx->elem->ivte_ranges[r] = cpu_to_be16(ctx->irqs.range[r]); - } + update_ivtes_directed(ctx); ctx->elem->common.amr = cpu_to_be64(amr); ctx->elem->common.wed = cpu_to_be64(wed); @@ -570,7 +607,33 @@ static int deactivate_afu_directed(struct cxl_afu *afu) cxl_sysfs_afu_m_remove(afu); cxl_chardev_afu_remove(afu); - cxl_ops->afu_reset(afu); + /* + * The CAIA section 2.2.1 indicates that the procedure for starting and + * stopping an AFU in AFU directed mode is AFU specific, which is not + * ideal since this code is generic and with one exception has no + * knowledge of the AFU. This is in contrast to the procedure for + * disabling a dedicated process AFU, which is documented to just + * require a reset. The architecture does indicate that both an AFU + * reset and an AFU disable should result in the AFU being disabled and + * we do both followed by a PSL purge for safety. + * + * Notably we used to have some issues with the disable sequence on PSL + * cards, which is why we ended up using this heavy weight procedure in + * the first place, however a bug was discovered that had rendered the + * disable operation ineffective, so it is conceivable that was the + * sole explanation for those difficulties. Careful regression testing + * is recommended if anyone attempts to remove or reorder these + * operations. + * + * The XSL on the Mellanox CX4 behaves a little differently from the + * PSL based cards and will time out an AFU reset if the AFU is still + * enabled. That card is special in that we do have a means to identify + * it from this code, so in that case we skip the reset and just use a + * disable/purge to avoid the timeout and corresponding noise in the + * kernel log. + */ + if (afu->adapter->native->sl_ops->needs_reset_before_disable) + cxl_ops->afu_reset(afu); cxl_afu_disable(afu); cxl_psl_purge(afu); @@ -600,6 +663,22 @@ static int activate_dedicated_process(struct cxl_afu *afu) return cxl_chardev_d_afu_add(afu); } +static void update_ivtes_dedicated(struct cxl_context *ctx) +{ + struct cxl_afu *afu = ctx->afu; + + cxl_p1n_write(afu, CXL_PSL_IVTE_Offset_An, + (((u64)ctx->irqs.offset[0] & 0xffff) << 48) | + (((u64)ctx->irqs.offset[1] & 0xffff) << 32) | + (((u64)ctx->irqs.offset[2] & 0xffff) << 16) | + ((u64)ctx->irqs.offset[3] & 0xffff)); + cxl_p1n_write(afu, CXL_PSL_IVTE_Limit_An, (u64) + (((u64)ctx->irqs.range[0] & 0xffff) << 48) | + (((u64)ctx->irqs.range[1] & 0xffff) << 32) | + (((u64)ctx->irqs.range[2] & 0xffff) << 16) | + ((u64)ctx->irqs.range[3] & 0xffff)); +} + static int attach_dedicated(struct cxl_context *ctx, u64 wed, u64 amr) { struct cxl_afu *afu = ctx->afu; @@ -618,16 +697,7 @@ static int attach_dedicated(struct cxl_context *ctx, u64 wed, u64 amr) cxl_prefault(ctx, wed); - cxl_p1n_write(afu, CXL_PSL_IVTE_Offset_An, - (((u64)ctx->irqs.offset[0] & 0xffff) << 48) | - (((u64)ctx->irqs.offset[1] & 0xffff) << 32) | - (((u64)ctx->irqs.offset[2] & 0xffff) << 16) | - ((u64)ctx->irqs.offset[3] & 0xffff)); - cxl_p1n_write(afu, CXL_PSL_IVTE_Limit_An, (u64) - (((u64)ctx->irqs.range[0] & 0xffff) << 48) | - (((u64)ctx->irqs.range[1] & 0xffff) << 32) | - (((u64)ctx->irqs.range[2] & 0xffff) << 16) | - ((u64)ctx->irqs.range[3] & 0xffff)); + update_ivtes_dedicated(ctx); cxl_p2n_write(afu, CXL_PSL_AMR_An, amr); @@ -703,12 +773,37 @@ static int native_attach_process(struct cxl_context *ctx, bool kernel, static inline int detach_process_native_dedicated(struct cxl_context *ctx) { + /* + * The CAIA section 2.1.1 indicates that we need to do an AFU reset to + * stop the AFU in dedicated mode (we therefore do not make that + * optional like we do in the afu directed path). It does not indicate + * that we need to do an explicit disable (which should occur + * implicitly as part of the reset) or purge, but we do these as well + * to be on the safe side. + * + * Notably we used to have some issues with the disable sequence + * (before the sequence was spelled out in the architecture) which is + * why we were so heavy weight in the first place, however a bug was + * discovered that had rendered the disable operation ineffective, so + * it is conceivable that was the sole explanation for those + * difficulties. Point is, we should be careful and do some regression + * testing if we ever attempt to remove any part of this procedure. + */ cxl_ops->afu_reset(ctx->afu); cxl_afu_disable(ctx->afu); cxl_psl_purge(ctx->afu); return 0; } +static void native_update_ivtes(struct cxl_context *ctx) +{ + if (ctx->afu->current_mode == CXL_MODE_DIRECTED) + return update_ivtes_directed(ctx); + if (ctx->afu->current_mode == CXL_MODE_DEDICATED) + return update_ivtes_dedicated(ctx); + WARN(1, "native_update_ivtes: Bad mode\n"); +} + static inline int detach_process_native_afu_directed(struct cxl_context *ctx) { if (!ctx->pe_inserted) @@ -754,26 +849,38 @@ static int native_get_irq_info(struct cxl_afu *afu, struct cxl_irq_info *info) return 0; } -static irqreturn_t native_handle_psl_slice_error(struct cxl_context *ctx, - u64 dsisr, u64 errstat) +void cxl_native_psl_irq_dump_regs(struct cxl_context *ctx) { u64 fir1, fir2, fir_slice, serr, afu_debug; fir1 = cxl_p1_read(ctx->afu->adapter, CXL_PSL_FIR1); fir2 = cxl_p1_read(ctx->afu->adapter, CXL_PSL_FIR2); fir_slice = cxl_p1n_read(ctx->afu, CXL_PSL_FIR_SLICE_An); - serr = cxl_p1n_read(ctx->afu, CXL_PSL_SERR_An); afu_debug = cxl_p1n_read(ctx->afu, CXL_AFU_DEBUG_An); - dev_crit(&ctx->afu->dev, "PSL ERROR STATUS: 0x%016llx\n", errstat); dev_crit(&ctx->afu->dev, "PSL_FIR1: 0x%016llx\n", fir1); dev_crit(&ctx->afu->dev, "PSL_FIR2: 0x%016llx\n", fir2); - dev_crit(&ctx->afu->dev, "PSL_SERR_An: 0x%016llx\n", serr); + if (ctx->afu->adapter->native->sl_ops->register_serr_irq) { + serr = cxl_p1n_read(ctx->afu, CXL_PSL_SERR_An); + cxl_afu_decode_psl_serr(ctx->afu, serr); + } dev_crit(&ctx->afu->dev, "PSL_FIR_SLICE_An: 0x%016llx\n", fir_slice); dev_crit(&ctx->afu->dev, "CXL_PSL_AFU_DEBUG_An: 0x%016llx\n", afu_debug); +} + +static irqreturn_t native_handle_psl_slice_error(struct cxl_context *ctx, + u64 dsisr, u64 errstat) +{ + + dev_crit(&ctx->afu->dev, "PSL ERROR STATUS: 0x%016llx\n", errstat); - dev_crit(&ctx->afu->dev, "STOPPING CXL TRACE\n"); - cxl_stop_trace(ctx->afu->adapter); + if (ctx->afu->adapter->native->sl_ops->psl_irq_dump_registers) + ctx->afu->adapter->native->sl_ops->psl_irq_dump_registers(ctx); + + if (ctx->afu->adapter->native->sl_ops->debugfs_stop_trace) { + dev_crit(&ctx->afu->dev, "STOPPING CXL TRACE\n"); + ctx->afu->adapter->native->sl_ops->debugfs_stop_trace(ctx->afu->adapter); + } return cxl_ops->ack_irq(ctx, 0, errstat); } @@ -849,41 +956,56 @@ void native_irq_wait(struct cxl_context *ctx) static irqreturn_t native_slice_irq_err(int irq, void *data) { struct cxl_afu *afu = data; - u64 fir_slice, errstat, serr, afu_debug; - - WARN(irq, "CXL SLICE ERROR interrupt %i\n", irq); + u64 fir_slice, errstat, serr, afu_debug, afu_error, dsisr; + /* + * slice err interrupt is only used with full PSL (no XSL) + */ serr = cxl_p1n_read(afu, CXL_PSL_SERR_An); fir_slice = cxl_p1n_read(afu, CXL_PSL_FIR_SLICE_An); errstat = cxl_p2n_read(afu, CXL_PSL_ErrStat_An); afu_debug = cxl_p1n_read(afu, CXL_AFU_DEBUG_An); - dev_crit(&afu->dev, "PSL_SERR_An: 0x%016llx\n", serr); + afu_error = cxl_p2n_read(afu, CXL_AFU_ERR_An); + dsisr = cxl_p2n_read(afu, CXL_PSL_DSISR_An); + cxl_afu_decode_psl_serr(afu, serr); dev_crit(&afu->dev, "PSL_FIR_SLICE_An: 0x%016llx\n", fir_slice); dev_crit(&afu->dev, "CXL_PSL_ErrStat_An: 0x%016llx\n", errstat); dev_crit(&afu->dev, "CXL_PSL_AFU_DEBUG_An: 0x%016llx\n", afu_debug); + dev_crit(&afu->dev, "AFU_ERR_An: 0x%.16llx\n", afu_error); + dev_crit(&afu->dev, "PSL_DSISR_An: 0x%.16llx\n", dsisr); cxl_p1n_write(afu, CXL_PSL_SERR_An, serr); return IRQ_HANDLED; } +void cxl_native_err_irq_dump_regs(struct cxl *adapter) +{ + u64 fir1, fir2; + + fir1 = cxl_p1_read(adapter, CXL_PSL_FIR1); + fir2 = cxl_p1_read(adapter, CXL_PSL_FIR2); + + dev_crit(&adapter->dev, "PSL_FIR1: 0x%016llx\nPSL_FIR2: 0x%016llx\n", fir1, fir2); +} + static irqreturn_t native_irq_err(int irq, void *data) { struct cxl *adapter = data; - u64 fir1, fir2, err_ivte; + u64 err_ivte; WARN(1, "CXL ERROR interrupt %i\n", irq); err_ivte = cxl_p1_read(adapter, CXL_PSL_ErrIVTE); dev_crit(&adapter->dev, "PSL_ErrIVTE: 0x%016llx\n", err_ivte); - dev_crit(&adapter->dev, "STOPPING CXL TRACE\n"); - cxl_stop_trace(adapter); - - fir1 = cxl_p1_read(adapter, CXL_PSL_FIR1); - fir2 = cxl_p1_read(adapter, CXL_PSL_FIR2); + if (adapter->native->sl_ops->debugfs_stop_trace) { + dev_crit(&adapter->dev, "STOPPING CXL TRACE\n"); + adapter->native->sl_ops->debugfs_stop_trace(adapter); + } - dev_crit(&adapter->dev, "PSL_FIR1: 0x%016llx\nPSL_FIR2: 0x%016llx\n", fir1, fir2); + if (adapter->native->sl_ops->err_irq_dump_registers) + adapter->native->sl_ops->err_irq_dump_registers(adapter); return IRQ_HANDLED; } @@ -1128,6 +1250,7 @@ const struct cxl_backend_ops cxl_native_ops = { .irq_wait = native_irq_wait, .attach_process = native_attach_process, .detach_process = native_detach_process, + .update_ivtes = native_update_ivtes, .support_attributes = native_support_attributes, .link_ok = cxl_adapter_link_ok, .release_afu = cxl_pci_release_afu, diff --git a/drivers/misc/cxl/pci.c b/drivers/misc/cxl/pci.c index a08fcc888a71..d152e2de8c93 100644 --- a/drivers/misc/cxl/pci.c +++ b/drivers/misc/cxl/pci.c @@ -55,6 +55,8 @@ pci_read_config_byte(dev, vsec + 0xa, dest) #define CXL_WRITE_VSEC_MODE_CONTROL(dev, vsec, val) \ pci_write_config_byte(dev, vsec + 0xa, val) +#define CXL_WRITE_VSEC_MODE_CONTROL_BUS(bus, devfn, vsec, val) \ + pci_bus_write_config_byte(bus, devfn, vsec + 0xa, val) #define CXL_VSEC_PROTOCOL_MASK 0xe0 #define CXL_VSEC_PROTOCOL_1024TB 0x80 #define CXL_VSEC_PROTOCOL_512TB 0x40 @@ -352,13 +354,10 @@ static u64 get_capp_unit_id(struct device_node *np) return 0; } -static int init_implementation_adapter_regs(struct cxl *adapter, struct pci_dev *dev) +static int calc_capp_routing(struct pci_dev *dev, u64 *chipid, u64 *capp_unit_id) { struct device_node *np; const __be32 *prop; - u64 psl_dsnctl; - u64 chipid; - u64 capp_unit_id; if (!(np = pnv_pci_get_phb_node(dev))) return -ENODEV; @@ -367,14 +366,28 @@ static int init_implementation_adapter_regs(struct cxl *adapter, struct pci_dev np = of_get_next_parent(np); if (!np) return -ENODEV; - chipid = be32_to_cpup(prop); - capp_unit_id = get_capp_unit_id(np); + *chipid = be32_to_cpup(prop); + *capp_unit_id = get_capp_unit_id(np); of_node_put(np); - if (!capp_unit_id) { + if (!*capp_unit_id) { pr_err("cxl: invalid capp unit id\n"); return -ENODEV; } + return 0; +} + +static int init_implementation_adapter_psl_regs(struct cxl *adapter, struct pci_dev *dev) +{ + u64 psl_dsnctl; + u64 chipid; + u64 capp_unit_id; + int rc; + + rc = calc_capp_routing(dev, &chipid, &capp_unit_id); + if (rc) + return rc; + psl_dsnctl = 0x0000900000000000ULL; /* pteupd ttype, scdone */ psl_dsnctl |= (0x2ULL << (63-38)); /* MMIO hang pulse: 256 us */ /* Tell PSL where to route data to */ @@ -393,8 +406,61 @@ static int init_implementation_adapter_regs(struct cxl *adapter, struct pci_dev return 0; } +static int init_implementation_adapter_xsl_regs(struct cxl *adapter, struct pci_dev *dev) +{ + u64 xsl_dsnctl; + u64 chipid; + u64 capp_unit_id; + int rc; + + rc = calc_capp_routing(dev, &chipid, &capp_unit_id); + if (rc) + return rc; + + /* Tell XSL where to route data to */ + xsl_dsnctl = 0x0000600000000000ULL | (chipid << (63-5)); + xsl_dsnctl |= (capp_unit_id << (63-13)); + cxl_p1_write(adapter, CXL_XSL_DSNCTL, xsl_dsnctl); + + return 0; +} + +/* PSL & XSL */ +#define TBSYNC_CAL(n) (((u64)n & 0x7) << (63-3)) #define TBSYNC_CNT(n) (((u64)n & 0x7) << (63-6)) -#define _2048_250MHZ_CYCLES 1 +/* For the PSL this is a multiple for 0 < n <= 7: */ +#define PSL_2048_250MHZ_CYCLES 1 + +static void write_timebase_ctrl_psl(struct cxl *adapter) +{ + cxl_p1_write(adapter, CXL_PSL_TB_CTLSTAT, + TBSYNC_CNT(2 * PSL_2048_250MHZ_CYCLES)); +} + +/* XSL */ +#define TBSYNC_ENA (1ULL << 63) +/* For the XSL this is 2**n * 2000 clocks for 0 < n <= 6: */ +#define XSL_2000_CLOCKS 1 +#define XSL_4000_CLOCKS 2 +#define XSL_8000_CLOCKS 3 + +static void write_timebase_ctrl_xsl(struct cxl *adapter) +{ + cxl_p1_write(adapter, CXL_XSL_TB_CTLSTAT, + TBSYNC_ENA | + TBSYNC_CAL(3) | + TBSYNC_CNT(XSL_4000_CLOCKS)); +} + +static u64 timebase_read_psl(struct cxl *adapter) +{ + return cxl_p1_read(adapter, CXL_PSL_Timebase); +} + +static u64 timebase_read_xsl(struct cxl *adapter) +{ + return cxl_p1_read(adapter, CXL_XSL_Timebase); +} static void cxl_setup_psl_timebase(struct cxl *adapter, struct pci_dev *dev) { @@ -421,8 +487,7 @@ static void cxl_setup_psl_timebase(struct cxl *adapter, struct pci_dev *dev) * Setup PSL Timebase Control and Status register * with the recommended Timebase Sync Count value */ - cxl_p1_write(adapter, CXL_PSL_TB_CTLSTAT, - TBSYNC_CNT(2 * _2048_250MHZ_CYCLES)); + adapter->native->sl_ops->write_timebase_ctrl(adapter); /* Enable PSL Timebase */ cxl_p1_write(adapter, CXL_PSL_Control, 0x0000000000000000); @@ -435,7 +500,7 @@ static void cxl_setup_psl_timebase(struct cxl *adapter, struct pci_dev *dev) dev_info(&dev->dev, "PSL timebase can't synchronize\n"); return; } - psl_tb = cxl_p1_read(adapter, CXL_PSL_Timebase); + psl_tb = adapter->native->sl_ops->timebase_read(adapter); delta = mftb() - psl_tb; if (delta < 0) delta = -delta; @@ -445,7 +510,7 @@ static void cxl_setup_psl_timebase(struct cxl *adapter, struct pci_dev *dev) return; } -static int init_implementation_afu_regs(struct cxl_afu *afu) +static int init_implementation_afu_psl_regs(struct cxl_afu *afu) { /* read/write masks for this slice */ cxl_p1n_write(afu, CXL_PSL_APCALLOC_A, 0xFFFFFFFEFEFEFEFEULL); @@ -551,36 +616,234 @@ static int setup_cxl_bars(struct pci_dev *dev) return 0; } -/* pciex node: ibm,opal-m64-window = <0x3d058 0x0 0x3d058 0x0 0x8 0x0>; */ -static int switch_card_to_cxl(struct pci_dev *dev) -{ +#ifdef CONFIG_CXL_BIMODAL + +struct cxl_switch_work { + struct pci_dev *dev; + struct work_struct work; int vsec; + int mode; +}; + +static void switch_card_to_cxl(struct work_struct *work) +{ + struct cxl_switch_work *switch_work = + container_of(work, struct cxl_switch_work, work); + struct pci_dev *dev = switch_work->dev; + struct pci_bus *bus = dev->bus; + struct pci_controller *hose = pci_bus_to_host(bus); + struct pci_dev *bridge; + struct pnv_php_slot *php_slot; + unsigned int devfn; u8 val; int rc; - dev_info(&dev->dev, "switch card to CXL\n"); + dev_info(&bus->dev, "cxl: Preparing for mode switch...\n"); + bridge = list_first_entry_or_null(&hose->bus->devices, struct pci_dev, + bus_list); + if (!bridge) { + dev_WARN(&bus->dev, "cxl: Couldn't find root port!\n"); + goto err_dev_put; + } - if (!(vsec = find_cxl_vsec(dev))) { - dev_err(&dev->dev, "ABORTING: CXL VSEC not found!\n"); + php_slot = pnv_php_find_slot(pci_device_to_OF_node(bridge)); + if (!php_slot) { + dev_err(&bus->dev, "cxl: Failed to find slot hotplug " + "information. You may need to upgrade " + "skiboot. Aborting.\n"); + goto err_dev_put; + } + + rc = CXL_READ_VSEC_MODE_CONTROL(dev, switch_work->vsec, &val); + if (rc) { + dev_err(&bus->dev, "cxl: Failed to read CAPI mode control: %i\n", rc); + goto err_dev_put; + } + devfn = dev->devfn; + + /* Release the reference obtained in cxl_check_and_switch_mode() */ + pci_dev_put(dev); + + dev_dbg(&bus->dev, "cxl: Removing PCI devices from kernel\n"); + pci_lock_rescan_remove(); + pci_hp_remove_devices(bridge->subordinate); + pci_unlock_rescan_remove(); + + /* Switch the CXL protocol on the card */ + if (switch_work->mode == CXL_BIMODE_CXL) { + dev_info(&bus->dev, "cxl: Switching card to CXL mode\n"); + val &= ~CXL_VSEC_PROTOCOL_MASK; + val |= CXL_VSEC_PROTOCOL_256TB | CXL_VSEC_PROTOCOL_ENABLE; + rc = pnv_cxl_enable_phb_kernel_api(hose, true); + if (rc) { + dev_err(&bus->dev, "cxl: Failed to enable kernel API" + " on real PHB, aborting\n"); + goto err_free_work; + } + } else { + dev_WARN(&bus->dev, "cxl: Switching card to PCI mode not supported!\n"); + goto err_free_work; + } + + rc = CXL_WRITE_VSEC_MODE_CONTROL_BUS(bus, devfn, switch_work->vsec, val); + if (rc) { + dev_err(&bus->dev, "cxl: Failed to configure CXL protocol: %i\n", rc); + goto err_free_work; + } + + /* + * The CAIA spec (v1.1, Section 10.6 Bi-modal Device Support) states + * we must wait 100ms after this mode switch before touching PCIe config + * space. + */ + msleep(100); + + /* + * Hot reset to cause the card to come back in cxl mode. A + * OPAL_RESET_PCI_LINK would be sufficient, but currently lacks support + * in skiboot, so we use a hot reset instead. + * + * We call pci_set_pcie_reset_state() on the bridge, as a CAPI card is + * guaranteed to sit directly under the root port, and setting the reset + * state on a device directly under the root port is equivalent to doing + * it on the root port iself. + */ + dev_info(&bus->dev, "cxl: Configuration write complete, resetting card\n"); + pci_set_pcie_reset_state(bridge, pcie_hot_reset); + pci_set_pcie_reset_state(bridge, pcie_deassert_reset); + + dev_dbg(&bus->dev, "cxl: Offlining slot\n"); + rc = pnv_php_set_slot_power_state(&php_slot->slot, OPAL_PCI_SLOT_OFFLINE); + if (rc) { + dev_err(&bus->dev, "cxl: OPAL offlining call failed: %i\n", rc); + goto err_free_work; + } + + dev_dbg(&bus->dev, "cxl: Onlining and probing slot\n"); + rc = pnv_php_set_slot_power_state(&php_slot->slot, OPAL_PCI_SLOT_ONLINE); + if (rc) { + dev_err(&bus->dev, "cxl: OPAL onlining call failed: %i\n", rc); + goto err_free_work; + } + + pci_lock_rescan_remove(); + pci_hp_add_devices(bridge->subordinate); + pci_unlock_rescan_remove(); + + dev_info(&bus->dev, "cxl: CAPI mode switch completed\n"); + kfree(switch_work); + return; + +err_dev_put: + /* Release the reference obtained in cxl_check_and_switch_mode() */ + pci_dev_put(dev); +err_free_work: + kfree(switch_work); +} + +int cxl_check_and_switch_mode(struct pci_dev *dev, int mode, int vsec) +{ + struct cxl_switch_work *work; + u8 val; + int rc; + + if (!cpu_has_feature(CPU_FTR_HVMODE)) return -ENODEV; + + if (!vsec) { + vsec = find_cxl_vsec(dev); + if (!vsec) { + dev_info(&dev->dev, "CXL VSEC not found\n"); + return -ENODEV; + } } - if ((rc = CXL_READ_VSEC_MODE_CONTROL(dev, vsec, &val))) { - dev_err(&dev->dev, "failed to read current mode control: %i", rc); + rc = CXL_READ_VSEC_MODE_CONTROL(dev, vsec, &val); + if (rc) { + dev_err(&dev->dev, "Failed to read current mode control: %i", rc); return rc; } - val &= ~CXL_VSEC_PROTOCOL_MASK; - val |= CXL_VSEC_PROTOCOL_256TB | CXL_VSEC_PROTOCOL_ENABLE; - if ((rc = CXL_WRITE_VSEC_MODE_CONTROL(dev, vsec, val))) { - dev_err(&dev->dev, "failed to enable CXL protocol: %i", rc); - return rc; + + if (mode == CXL_BIMODE_PCI) { + if (!(val & CXL_VSEC_PROTOCOL_ENABLE)) { + dev_info(&dev->dev, "Card is already in PCI mode\n"); + return 0; + } + /* + * TODO: Before it's safe to switch the card back to PCI mode + * we need to disable the CAPP and make sure any cachelines the + * card holds have been flushed out. Needs skiboot support. + */ + dev_WARN(&dev->dev, "CXL mode switch to PCI unsupported!\n"); + return -EIO; } + + if (val & CXL_VSEC_PROTOCOL_ENABLE) { + dev_info(&dev->dev, "Card is already in CXL mode\n"); + return 0; + } + + dev_info(&dev->dev, "Card is in PCI mode, scheduling kernel thread " + "to switch to CXL mode\n"); + + work = kmalloc(sizeof(struct cxl_switch_work), GFP_KERNEL); + if (!work) + return -ENOMEM; + + pci_dev_get(dev); + work->dev = dev; + work->vsec = vsec; + work->mode = mode; + INIT_WORK(&work->work, switch_card_to_cxl); + + schedule_work(&work->work); + /* - * The CAIA spec (v0.12 11.6 Bi-modal Device Support) states - * we must wait 100ms after this mode switch before touching - * PCIe config space. + * We return a failure now to abort the driver init. Once the + * link has been cycled and the card is in cxl mode we will + * come back (possibly using the generic cxl driver), but + * return success as the card should then be in cxl mode. + * + * TODO: What if the card comes back in PCI mode even after + * the switch? Don't want to spin endlessly. */ - msleep(100); + return -EBUSY; +} +EXPORT_SYMBOL_GPL(cxl_check_and_switch_mode); + +#endif /* CONFIG_CXL_BIMODAL */ + +static int setup_cxl_protocol_area(struct pci_dev *dev) +{ + u8 val; + int rc; + int vsec = find_cxl_vsec(dev); + + if (!vsec) { + dev_info(&dev->dev, "CXL VSEC not found\n"); + return -ENODEV; + } + + rc = CXL_READ_VSEC_MODE_CONTROL(dev, vsec, &val); + if (rc) { + dev_err(&dev->dev, "Failed to read current mode control: %i\n", rc); + return rc; + } + + if (!(val & CXL_VSEC_PROTOCOL_ENABLE)) { + dev_err(&dev->dev, "Card not in CAPI mode!\n"); + return -EIO; + } + + if ((val & CXL_VSEC_PROTOCOL_MASK) != CXL_VSEC_PROTOCOL_256TB) { + val &= ~CXL_VSEC_PROTOCOL_MASK; + val |= CXL_VSEC_PROTOCOL_256TB; + rc = CXL_WRITE_VSEC_MODE_CONTROL(dev, vsec, val); + if (rc) { + dev_err(&dev->dev, "Failed to set CXL protocol area: %i\n", rc); + return rc; + } + } return 0; } @@ -712,6 +975,21 @@ static int cxl_afu_descriptor_looks_ok(struct cxl_afu *afu) } } + if ((afu->modes_supported & ~CXL_MODE_DEDICATED) && afu->max_procs_virtualised == 0) { + /* + * We could also check this for the dedicated process model + * since the architecture indicates it should be set to 1, but + * in that case we ignore the value and I'd rather not risk + * breaking any existing dedicated process AFUs that left it as + * 0 (not that I'm aware of any). It is clearly an error for an + * AFU directed AFU to set this to 0, and would have previously + * triggered a bug resulting in the maximum not being enforced + * at all since idr_alloc treats 0 as no maximum. + */ + dev_err(&afu->dev, "AFU does not support any processes\n"); + return -EINVAL; + } + return 0; } @@ -753,11 +1031,13 @@ static int sanitise_afu_regs(struct cxl_afu *afu) else cxl_p2n_write(afu, CXL_PSL_TFC_An, CXL_PSL_TFC_An_A); } - reg = cxl_p1n_read(afu, CXL_PSL_SERR_An); - if (reg) { - if (reg & ~0xffff) - dev_warn(&afu->dev, "AFU had pending SERR: %#016llx\n", reg); - cxl_p1n_write(afu, CXL_PSL_SERR_An, reg & ~0xffff); + if (afu->adapter->native->sl_ops->register_serr_irq) { + reg = cxl_p1n_read(afu, CXL_PSL_SERR_An); + if (reg) { + if (reg & ~0xffff) + dev_warn(&afu->dev, "AFU had pending SERR: %#016llx\n", reg); + cxl_p1n_write(afu, CXL_PSL_SERR_An, reg & ~0xffff); + } } reg = cxl_p2n_read(afu, CXL_PSL_ErrStat_An); if (reg) { @@ -835,11 +1115,13 @@ static int pci_configure_afu(struct cxl_afu *afu, struct cxl *adapter, struct pc if ((rc = cxl_afu_descriptor_looks_ok(afu))) goto err1; - if ((rc = init_implementation_afu_regs(afu))) - goto err1; + if (adapter->native->sl_ops->afu_regs_init) + if ((rc = adapter->native->sl_ops->afu_regs_init(afu))) + goto err1; - if ((rc = cxl_native_register_serr_irq(afu))) - goto err1; + if (adapter->native->sl_ops->register_serr_irq) + if ((rc = adapter->native->sl_ops->register_serr_irq(afu))) + goto err1; if ((rc = cxl_native_register_psl_irq(afu))) goto err2; @@ -847,7 +1129,8 @@ static int pci_configure_afu(struct cxl_afu *afu, struct cxl *adapter, struct pc return 0; err2: - cxl_native_release_serr_irq(afu); + if (adapter->native->sl_ops->release_serr_irq) + adapter->native->sl_ops->release_serr_irq(afu); err1: pci_unmap_slice_regs(afu); return rc; @@ -856,7 +1139,8 @@ err1: static void pci_deconfigure_afu(struct cxl_afu *afu) { cxl_native_release_psl_irq(afu); - cxl_native_release_serr_irq(afu); + if (afu->adapter->native->sl_ops->release_serr_irq) + afu->adapter->native->sl_ops->release_serr_irq(afu); pci_unmap_slice_regs(afu); } @@ -1165,7 +1449,7 @@ static int cxl_configure_adapter(struct cxl *adapter, struct pci_dev *dev) if ((rc = setup_cxl_bars(dev))) return rc; - if ((rc = switch_card_to_cxl(dev))) + if ((rc = setup_cxl_protocol_area(dev))) return rc; if ((rc = cxl_update_image_control(adapter))) @@ -1177,10 +1461,13 @@ static int cxl_configure_adapter(struct cxl *adapter, struct pci_dev *dev) if ((rc = sanitise_adapter_regs(adapter))) goto err; - if ((rc = init_implementation_adapter_regs(adapter, dev))) + if ((rc = adapter->native->sl_ops->adapter_regs_init(adapter, dev))) goto err; - if ((rc = pnv_phb_to_cxl_mode(dev, OPAL_PHB_CAPI_MODE_CAPI))) + /* Required for devices using CAPP DMA mode, harmless for others */ + pci_set_master(dev); + + if ((rc = pnv_phb_to_cxl_mode(dev, adapter->native->sl_ops->capi_mode))) goto err; /* If recovery happened, the last step is to turn on snooping. @@ -1212,6 +1499,43 @@ static void cxl_deconfigure_adapter(struct cxl *adapter) pci_disable_device(pdev); } +static const struct cxl_service_layer_ops psl_ops = { + .adapter_regs_init = init_implementation_adapter_psl_regs, + .afu_regs_init = init_implementation_afu_psl_regs, + .register_serr_irq = cxl_native_register_serr_irq, + .release_serr_irq = cxl_native_release_serr_irq, + .debugfs_add_adapter_sl_regs = cxl_debugfs_add_adapter_psl_regs, + .debugfs_add_afu_sl_regs = cxl_debugfs_add_afu_psl_regs, + .psl_irq_dump_registers = cxl_native_psl_irq_dump_regs, + .err_irq_dump_registers = cxl_native_err_irq_dump_regs, + .debugfs_stop_trace = cxl_stop_trace, + .write_timebase_ctrl = write_timebase_ctrl_psl, + .timebase_read = timebase_read_psl, + .capi_mode = OPAL_PHB_CAPI_MODE_CAPI, + .needs_reset_before_disable = true, +}; + +static const struct cxl_service_layer_ops xsl_ops = { + .adapter_regs_init = init_implementation_adapter_xsl_regs, + .debugfs_add_adapter_sl_regs = cxl_debugfs_add_adapter_xsl_regs, + .write_timebase_ctrl = write_timebase_ctrl_xsl, + .timebase_read = timebase_read_xsl, + .capi_mode = OPAL_PHB_CAPI_MODE_DMA, + .min_pe = 1, /* Workaround for Mellanox CX4 HW bug */ +}; + +static void set_sl_ops(struct cxl *adapter, struct pci_dev *dev) +{ + if (dev->vendor == PCI_VENDOR_ID_MELLANOX && dev->device == 0x1013) { + dev_info(&adapter->dev, "Device uses an XSL\n"); + adapter->native->sl_ops = &xsl_ops; + } else { + dev_info(&adapter->dev, "Device uses a PSL\n"); + adapter->native->sl_ops = &psl_ops; + } +} + + static struct cxl *cxl_pci_init_adapter(struct pci_dev *dev) { struct cxl *adapter; @@ -1227,6 +1551,8 @@ static struct cxl *cxl_pci_init_adapter(struct pci_dev *dev) goto err_release; } + set_sl_ops(adapter, dev); + /* Set defaults for parameters which need to persist over * configure/reconfigure */ @@ -1280,6 +1606,67 @@ static void cxl_pci_remove_adapter(struct cxl *adapter) device_unregister(&adapter->dev); } +#define CXL_MAX_PCIEX_PARENT 2 + +static int cxl_slot_is_switched(struct pci_dev *dev) +{ + struct device_node *np; + int depth = 0; + const __be32 *prop; + + if (!(np = pci_device_to_OF_node(dev))) { + pr_err("cxl: np = NULL\n"); + return -ENODEV; + } + of_node_get(np); + while (np) { + np = of_get_next_parent(np); + prop = of_get_property(np, "device_type", NULL); + if (!prop || strcmp((char *)prop, "pciex")) + break; + depth++; + } + of_node_put(np); + return (depth > CXL_MAX_PCIEX_PARENT); +} + +bool cxl_slot_is_supported(struct pci_dev *dev, int flags) +{ + if (!cpu_has_feature(CPU_FTR_HVMODE)) + return false; + + if ((flags & CXL_SLOT_FLAG_DMA) && (!pvr_version_is(PVR_POWER8NVL))) { + /* + * CAPP DMA mode is technically supported on regular P8, but + * will EEH if the card attempts to access memory < 4GB, which + * we cannot realistically avoid. We might be able to work + * around the issue, but until then return unsupported: + */ + return false; + } + + if (cxl_slot_is_switched(dev)) + return false; + + /* + * XXX: This gets a little tricky on regular P8 (not POWER8NVL) since + * the CAPP can be connected to PHB 0, 1 or 2 on a first come first + * served basis, which is racy to check from here. If we need to + * support this in future we might need to consider having this + * function effectively reserve it ahead of time. + * + * Currently, the only user of this API is the Mellanox CX4, which is + * only supported on P8NVL due to the above mentioned limitation of + * CAPP DMA mode and therefore does not need to worry about this. If the + * issue with CAPP DMA mode is later worked around on P8 we might need + * to revisit this. + */ + + return true; +} +EXPORT_SYMBOL_GPL(cxl_slot_is_supported); + + static int cxl_probe(struct pci_dev *dev, const struct pci_device_id *id) { struct cxl *adapter; @@ -1291,6 +1678,11 @@ static int cxl_probe(struct pci_dev *dev, const struct pci_device_id *id) return -ENODEV; } + if (cxl_slot_is_switched(dev)) { + dev_info(&dev->dev, "Ignoring card on incompatible PCI slot\n"); + return -ENODEV; + } + if (cxl_verbose) dump_cxl_config_space(dev); @@ -1311,6 +1703,9 @@ static int cxl_probe(struct pci_dev *dev, const struct pci_device_id *id) dev_err(&dev->dev, "AFU %i failed to start: %i\n", slice, rc); } + if (pnv_pci_on_cxl_phb(dev) && adapter->slices >= 1) + pnv_cxl_phb_set_peer_afu(dev, adapter->afu[0]); + return 0; } @@ -1381,6 +1776,9 @@ static pci_ers_result_t cxl_pci_error_detected(struct pci_dev *pdev, */ for (i = 0; i < adapter->slices; i++) { afu = adapter->afu[i]; + /* Only participate in EEH if we are on a virtual PHB */ + if (afu->phb == NULL) + return PCI_ERS_RESULT_NONE; cxl_vphb_error_detected(afu, state); } return PCI_ERS_RESULT_DISCONNECT; diff --git a/drivers/misc/cxl/phb.c b/drivers/misc/cxl/phb.c new file mode 100644 index 000000000000..0935d44c1770 --- /dev/null +++ b/drivers/misc/cxl/phb.c @@ -0,0 +1,44 @@ +/* + * Copyright 2014-2016 IBM Corp. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#include <linux/pci.h> +#include "cxl.h" + +bool _cxl_pci_associate_default_context(struct pci_dev *dev, struct cxl_afu *afu) +{ + struct cxl_context *ctx; + + /* + * Allocate a context to do cxl things to. This is used for interrupts + * in the peer model using a real phb, and if we eventually do DMA ops + * in the virtual phb, we'll need a default context to attach them to. + */ + ctx = cxl_dev_context_init(dev); + if (!ctx) + return false; + dev->dev.archdata.cxl_ctx = ctx; + + return (cxl_ops->afu_check_and_enable(afu) == 0); +} +/* exported via cxl_base */ + +void _cxl_pci_disable_device(struct pci_dev *dev) +{ + struct cxl_context *ctx = cxl_get_context(dev); + + if (ctx) { + if (ctx->status == STARTED) { + dev_err(&dev->dev, "Default context started\n"); + return; + } + dev->dev.archdata.cxl_ctx = NULL; + cxl_release_context(ctx); + } +} +/* exported via cxl_base */ diff --git a/drivers/misc/cxl/vphb.c b/drivers/misc/cxl/vphb.c index cdc7723b845d..dee8def1c193 100644 --- a/drivers/misc/cxl/vphb.c +++ b/drivers/misc/cxl/vphb.c @@ -9,6 +9,7 @@ #include <linux/pci.h> #include <misc/cxl.h> +#include <asm/pnv-pci.h> #include "cxl.h" static int cxl_dma_set_mask(struct pci_dev *pdev, u64 dma_mask) @@ -44,7 +45,6 @@ static bool cxl_pci_enable_device_hook(struct pci_dev *dev) { struct pci_controller *phb; struct cxl_afu *afu; - struct cxl_context *ctx; phb = pci_bus_to_host(dev->bus); afu = (struct cxl_afu *)phb->private_data; @@ -57,30 +57,7 @@ static bool cxl_pci_enable_device_hook(struct pci_dev *dev) set_dma_ops(&dev->dev, &dma_direct_ops); set_dma_offset(&dev->dev, PAGE_OFFSET); - /* - * Allocate a context to do cxl things too. If we eventually do real - * DMA ops, we'll need a default context to attach them to - */ - ctx = cxl_dev_context_init(dev); - if (!ctx) - return false; - dev->dev.archdata.cxl_ctx = ctx; - - return (cxl_ops->afu_check_and_enable(afu) == 0); -} - -static void cxl_pci_disable_device(struct pci_dev *dev) -{ - struct cxl_context *ctx = cxl_get_context(dev); - - if (ctx) { - if (ctx->status == STARTED) { - dev_err(&dev->dev, "Default context started\n"); - return; - } - dev->dev.archdata.cxl_ctx = NULL; - cxl_release_context(ctx); - } + return _cxl_pci_associate_default_context(dev, afu); } static resource_size_t cxl_pci_window_alignment(struct pci_bus *bus, @@ -197,8 +174,8 @@ static struct pci_controller_ops cxl_pci_controller_ops = { .probe_mode = cxl_pci_probe_mode, .enable_device_hook = cxl_pci_enable_device_hook, - .disable_device = cxl_pci_disable_device, - .release_device = cxl_pci_disable_device, + .disable_device = _cxl_pci_disable_device, + .release_device = _cxl_pci_disable_device, .window_alignment = cxl_pci_window_alignment, .reset_secondary_bus = cxl_pci_reset_secondary_bus, .setup_msi_irqs = cxl_setup_msi_irqs, @@ -208,20 +185,30 @@ static struct pci_controller_ops cxl_pci_controller_ops = int cxl_pci_vphb_add(struct cxl_afu *afu) { - struct pci_dev *phys_dev; - struct pci_controller *phb, *phys_phb; + struct pci_controller *phb; struct device_node *vphb_dn; struct device *parent; - if (cpu_has_feature(CPU_FTR_HVMODE)) { - phys_dev = to_pci_dev(afu->adapter->dev.parent); - phys_phb = pci_bus_to_host(phys_dev->bus); - vphb_dn = phys_phb->dn; - parent = &phys_dev->dev; - } else { - vphb_dn = afu->adapter->dev.parent->of_node; - parent = afu->adapter->dev.parent; - } + /* + * If there are no AFU configuration records we won't have anything to + * expose under the vPHB, so skip creating one, returning success since + * this is still a valid case. This will also opt us out of EEH + * handling since we won't have anything special to do if there are no + * kernel drivers attached to the vPHB, and EEH handling is not yet + * supported in the peer model. + */ + if (!afu->crs_num) + return 0; + + /* The parent device is the adapter. Reuse the device node of + * the adapter. + * We don't seem to care what device node is used for the vPHB, + * but tools such as lsvpd walk up the device parents looking + * for a valid location code, so we might as well show devices + * attached to the adapter as being located on that adapter. + */ + parent = afu->adapter->dev.parent; + vphb_dn = parent->of_node; /* Alloc and setup PHB data structure */ phb = pcibios_alloc_controller(vphb_dn); @@ -272,13 +259,18 @@ void cxl_pci_vphb_remove(struct cxl_afu *afu) pcibios_free_controller(phb); } +static bool _cxl_pci_is_vphb_device(struct pci_controller *phb) +{ + return (phb->ops == &cxl_pcie_pci_ops); +} + bool cxl_pci_is_vphb_device(struct pci_dev *dev) { struct pci_controller *phb; phb = pci_bus_to_host(dev->bus); - return (phb->ops == &cxl_pcie_pci_ops); + return _cxl_pci_is_vphb_device(phb); } struct cxl_afu *cxl_pci_to_afu(struct pci_dev *dev) @@ -287,7 +279,13 @@ struct cxl_afu *cxl_pci_to_afu(struct pci_dev *dev) phb = pci_bus_to_host(dev->bus); - return (struct cxl_afu *)phb->private_data; + if (_cxl_pci_is_vphb_device(phb)) + return (struct cxl_afu *)phb->private_data; + + if (pnv_pci_on_cxl_phb(dev)) + return pnv_cxl_phb_to_afu(phb); + + return ERR_PTR(-ENODEV); } EXPORT_SYMBOL_GPL(cxl_pci_to_afu); diff --git a/drivers/misc/eeprom/at24.c b/drivers/misc/eeprom/at24.c index 9ceb63b62be5..3cdf8e1ca0ad 100644 --- a/drivers/misc/eeprom/at24.c +++ b/drivers/misc/eeprom/at24.c @@ -58,6 +58,10 @@ struct at24_data { int use_smbus; int use_smbus_write; + ssize_t (*read_func)(struct at24_data *, char *, unsigned int, size_t); + ssize_t (*write_func)(struct at24_data *, + const char *, unsigned int, size_t); + /* * Lock protects against activities from other Linux tasks, * but not from changes by other I2C masters. @@ -109,25 +113,63 @@ MODULE_PARM_DESC(write_timeout, "Time (in ms) to try writes (default 25)"); ((1 << AT24_SIZE_FLAGS | (_flags)) \ << AT24_SIZE_BYTELEN | ilog2(_len)) +/* + * Both reads and writes fail if the previous write didn't complete yet. This + * macro loops a few times waiting at least long enough for one entire page + * write to work while making sure that at least one iteration is run before + * checking the break condition. + * + * It takes two parameters: a variable in which the future timeout in jiffies + * will be stored and a temporary variable holding the time of the last + * iteration of processing the request. Both should be unsigned integers + * holding at least 32 bits. + */ +#define loop_until_timeout(tout, op_time) \ + for (tout = jiffies + msecs_to_jiffies(write_timeout), op_time = 0; \ + op_time ? time_before(op_time, tout) : true; \ + usleep_range(1000, 1500), op_time = jiffies) + static const struct i2c_device_id at24_ids[] = { /* needs 8 addresses as A0-A2 are ignored */ - { "24c00", AT24_DEVICE_MAGIC(128 / 8, AT24_FLAG_TAKE8ADDR) }, + { "24c00", AT24_DEVICE_MAGIC(128 / 8, AT24_FLAG_TAKE8ADDR) }, /* old variants can't be handled with this generic entry! */ - { "24c01", AT24_DEVICE_MAGIC(1024 / 8, 0) }, - { "24c02", AT24_DEVICE_MAGIC(2048 / 8, 0) }, + { "24c01", AT24_DEVICE_MAGIC(1024 / 8, 0) }, + { "24cs01", AT24_DEVICE_MAGIC(16, + AT24_FLAG_SERIAL | AT24_FLAG_READONLY) }, + { "24c02", AT24_DEVICE_MAGIC(2048 / 8, 0) }, + { "24cs02", AT24_DEVICE_MAGIC(16, + AT24_FLAG_SERIAL | AT24_FLAG_READONLY) }, + { "24mac402", AT24_DEVICE_MAGIC(48 / 8, + AT24_FLAG_MAC | AT24_FLAG_READONLY) }, + { "24mac602", AT24_DEVICE_MAGIC(64 / 8, + AT24_FLAG_MAC | AT24_FLAG_READONLY) }, /* spd is a 24c02 in memory DIMMs */ - { "spd", AT24_DEVICE_MAGIC(2048 / 8, - AT24_FLAG_READONLY | AT24_FLAG_IRUGO) }, - { "24c04", AT24_DEVICE_MAGIC(4096 / 8, 0) }, + { "spd", AT24_DEVICE_MAGIC(2048 / 8, + AT24_FLAG_READONLY | AT24_FLAG_IRUGO) }, + { "24c04", AT24_DEVICE_MAGIC(4096 / 8, 0) }, + { "24cs04", AT24_DEVICE_MAGIC(16, + AT24_FLAG_SERIAL | AT24_FLAG_READONLY) }, /* 24rf08 quirk is handled at i2c-core */ - { "24c08", AT24_DEVICE_MAGIC(8192 / 8, 0) }, - { "24c16", AT24_DEVICE_MAGIC(16384 / 8, 0) }, - { "24c32", AT24_DEVICE_MAGIC(32768 / 8, AT24_FLAG_ADDR16) }, - { "24c64", AT24_DEVICE_MAGIC(65536 / 8, AT24_FLAG_ADDR16) }, - { "24c128", AT24_DEVICE_MAGIC(131072 / 8, AT24_FLAG_ADDR16) }, - { "24c256", AT24_DEVICE_MAGIC(262144 / 8, AT24_FLAG_ADDR16) }, - { "24c512", AT24_DEVICE_MAGIC(524288 / 8, AT24_FLAG_ADDR16) }, - { "24c1024", AT24_DEVICE_MAGIC(1048576 / 8, AT24_FLAG_ADDR16) }, + { "24c08", AT24_DEVICE_MAGIC(8192 / 8, 0) }, + { "24cs08", AT24_DEVICE_MAGIC(16, + AT24_FLAG_SERIAL | AT24_FLAG_READONLY) }, + { "24c16", AT24_DEVICE_MAGIC(16384 / 8, 0) }, + { "24cs16", AT24_DEVICE_MAGIC(16, + AT24_FLAG_SERIAL | AT24_FLAG_READONLY) }, + { "24c32", AT24_DEVICE_MAGIC(32768 / 8, AT24_FLAG_ADDR16) }, + { "24cs32", AT24_DEVICE_MAGIC(16, + AT24_FLAG_ADDR16 | + AT24_FLAG_SERIAL | + AT24_FLAG_READONLY) }, + { "24c64", AT24_DEVICE_MAGIC(65536 / 8, AT24_FLAG_ADDR16) }, + { "24cs64", AT24_DEVICE_MAGIC(16, + AT24_FLAG_ADDR16 | + AT24_FLAG_SERIAL | + AT24_FLAG_READONLY) }, + { "24c128", AT24_DEVICE_MAGIC(131072 / 8, AT24_FLAG_ADDR16) }, + { "24c256", AT24_DEVICE_MAGIC(262144 / 8, AT24_FLAG_ADDR16) }, + { "24c512", AT24_DEVICE_MAGIC(524288 / 8, AT24_FLAG_ADDR16) }, + { "24c1024", AT24_DEVICE_MAGIC(1048576 / 8, AT24_FLAG_ADDR16) }, { "at24", 0 }, { /* END OF LIST */ } }; @@ -145,9 +187,22 @@ MODULE_DEVICE_TABLE(acpi, at24_acpi_ids); * This routine supports chips which consume multiple I2C addresses. It * computes the addressing information to be used for a given r/w request. * Assumes that sanity checks for offset happened at sysfs-layer. + * + * Slave address and byte offset derive from the offset. Always + * set the byte address; on a multi-master board, another master + * may have changed the chip's "current" address pointer. + * + * REVISIT some multi-address chips don't rollover page reads to + * the next slave address, so we may need to truncate the count. + * Those chips might need another quirk flag. + * + * If the real hardware used four adjacent 24c02 chips and that + * were misconfigured as one 24c08, that would be a similar effect: + * one "eeprom" file not four, but larger reads would fail when + * they crossed certain pages. */ static struct i2c_client *at24_translate_offset(struct at24_data *at24, - unsigned *offset) + unsigned int *offset) { unsigned i; @@ -162,123 +217,168 @@ static struct i2c_client *at24_translate_offset(struct at24_data *at24, return at24->client[i]; } -static ssize_t at24_eeprom_read(struct at24_data *at24, char *buf, - unsigned offset, size_t count) +static ssize_t at24_eeprom_read_smbus(struct at24_data *at24, char *buf, + unsigned int offset, size_t count) { - struct i2c_msg msg[2]; - u8 msgbuf[2]; + unsigned long timeout, read_time; struct i2c_client *client; + int status; + + client = at24_translate_offset(at24, &offset); + + if (count > io_limit) + count = io_limit; + + /* Smaller eeproms can work given some SMBus extension calls */ + if (count > I2C_SMBUS_BLOCK_MAX) + count = I2C_SMBUS_BLOCK_MAX; + + loop_until_timeout(timeout, read_time) { + status = i2c_smbus_read_i2c_block_data_or_emulated(client, + offset, + count, buf); + + dev_dbg(&client->dev, "read %zu@%d --> %d (%ld)\n", + count, offset, status, jiffies); + + if (status == count) + return count; + } + + return -ETIMEDOUT; +} + +static ssize_t at24_eeprom_read_i2c(struct at24_data *at24, char *buf, + unsigned int offset, size_t count) +{ unsigned long timeout, read_time; + struct i2c_client *client; + struct i2c_msg msg[2]; int status, i; + u8 msgbuf[2]; memset(msg, 0, sizeof(msg)); - - /* - * REVISIT some multi-address chips don't rollover page reads to - * the next slave address, so we may need to truncate the count. - * Those chips might need another quirk flag. - * - * If the real hardware used four adjacent 24c02 chips and that - * were misconfigured as one 24c08, that would be a similar effect: - * one "eeprom" file not four, but larger reads would fail when - * they crossed certain pages. - */ - - /* - * Slave address and byte offset derive from the offset. Always - * set the byte address; on a multi-master board, another master - * may have changed the chip's "current" address pointer. - */ client = at24_translate_offset(at24, &offset); if (count > io_limit) count = io_limit; - if (at24->use_smbus) { - /* Smaller eeproms can work given some SMBus extension calls */ - if (count > I2C_SMBUS_BLOCK_MAX) - count = I2C_SMBUS_BLOCK_MAX; - } else { - /* - * When we have a better choice than SMBus calls, use a - * combined I2C message. Write address; then read up to - * io_limit data bytes. Note that read page rollover helps us - * here (unlike writes). msgbuf is u8 and will cast to our - * needs. - */ - i = 0; - if (at24->chip.flags & AT24_FLAG_ADDR16) - msgbuf[i++] = offset >> 8; - msgbuf[i++] = offset; - - msg[0].addr = client->addr; - msg[0].buf = msgbuf; - msg[0].len = i; - - msg[1].addr = client->addr; - msg[1].flags = I2C_M_RD; - msg[1].buf = buf; - msg[1].len = count; - } - /* - * Reads fail if the previous write didn't complete yet. We may - * loop a few times until this one succeeds, waiting at least - * long enough for one entire page write to work. + * When we have a better choice than SMBus calls, use a combined I2C + * message. Write address; then read up to io_limit data bytes. Note + * that read page rollover helps us here (unlike writes). msgbuf is + * u8 and will cast to our needs. */ - timeout = jiffies + msecs_to_jiffies(write_timeout); - do { - read_time = jiffies; - if (at24->use_smbus) { - status = i2c_smbus_read_i2c_block_data_or_emulated(client, offset, - count, buf); - } else { - status = i2c_transfer(client->adapter, msg, 2); - if (status == 2) - status = count; - } + i = 0; + if (at24->chip.flags & AT24_FLAG_ADDR16) + msgbuf[i++] = offset >> 8; + msgbuf[i++] = offset; + + msg[0].addr = client->addr; + msg[0].buf = msgbuf; + msg[0].len = i; + + msg[1].addr = client->addr; + msg[1].flags = I2C_M_RD; + msg[1].buf = buf; + msg[1].len = count; + + loop_until_timeout(timeout, read_time) { + status = i2c_transfer(client->adapter, msg, 2); + if (status == 2) + status = count; + dev_dbg(&client->dev, "read %zu@%d --> %d (%ld)\n", count, offset, status, jiffies); if (status == count) return count; - - usleep_range(1000, 1500); - } while (time_before(read_time, timeout)); + } return -ETIMEDOUT; } -static int at24_read(void *priv, unsigned int off, void *val, size_t count) +static ssize_t at24_eeprom_read_serial(struct at24_data *at24, char *buf, + unsigned int offset, size_t count) { - struct at24_data *at24 = priv; - char *buf = val; + unsigned long timeout, read_time; + struct i2c_client *client; + struct i2c_msg msg[2]; + u8 addrbuf[2]; + int status; - if (unlikely(!count)) - return count; + client = at24_translate_offset(at24, &offset); + + memset(msg, 0, sizeof(msg)); + msg[0].addr = client->addr; + msg[0].buf = addrbuf; /* - * Read data from chip, protecting against concurrent updates - * from this host, but not from other I2C masters. + * The address pointer of the device is shared between the regular + * EEPROM array and the serial number block. The dummy write (part of + * the sequential read protocol) ensures the address pointer is reset + * to the desired position. */ - mutex_lock(&at24->lock); + if (at24->chip.flags & AT24_FLAG_ADDR16) { + /* + * For 16 bit address pointers, the word address must contain + * a '10' sequence in bits 11 and 10 regardless of the + * intended position of the address pointer. + */ + addrbuf[0] = 0x08; + addrbuf[1] = offset; + msg[0].len = 2; + } else { + /* + * Otherwise the word address must begin with a '10' sequence, + * regardless of the intended address. + */ + addrbuf[0] = 0x80 + offset; + msg[0].len = 1; + } - while (count) { - int status; + msg[1].addr = client->addr; + msg[1].flags = I2C_M_RD; + msg[1].buf = buf; + msg[1].len = count; - status = at24_eeprom_read(at24, buf, off, count); - if (status < 0) { - mutex_unlock(&at24->lock); - return status; - } - buf += status; - off += status; - count -= status; + loop_until_timeout(timeout, read_time) { + status = i2c_transfer(client->adapter, msg, 2); + if (status == 2) + return count; } - mutex_unlock(&at24->lock); + return -ETIMEDOUT; +} - return 0; +static ssize_t at24_eeprom_read_mac(struct at24_data *at24, char *buf, + unsigned int offset, size_t count) +{ + unsigned long timeout, read_time; + struct i2c_client *client; + struct i2c_msg msg[2]; + u8 addrbuf[2]; + int status; + + client = at24_translate_offset(at24, &offset); + + memset(msg, 0, sizeof(msg)); + msg[0].addr = client->addr; + msg[0].buf = addrbuf; + addrbuf[0] = 0x90 + offset; + msg[0].len = 1; + msg[1].addr = client->addr; + msg[1].flags = I2C_M_RD; + msg[1].buf = buf; + msg[1].len = count; + + loop_until_timeout(timeout, read_time) { + status = i2c_transfer(client->adapter, msg, 2); + if (status == 2) + return count; + } + + return -ETIMEDOUT; } /* @@ -286,21 +386,15 @@ static int at24_read(void *priv, unsigned int off, void *val, size_t count) * chip is normally write protected. But there are plenty of product * variants here, including OTP fuses and partial chip protect. * - * We only use page mode writes; the alternative is sloooow. This routine - * writes at most one page. + * We only use page mode writes; the alternative is sloooow. These routines + * write at most one page. */ -static ssize_t at24_eeprom_write(struct at24_data *at24, const char *buf, - unsigned offset, size_t count) + +static size_t at24_adjust_write_count(struct at24_data *at24, + unsigned int offset, size_t count) { - struct i2c_client *client; - struct i2c_msg msg; - ssize_t status = 0; - unsigned long timeout, write_time; unsigned next_page; - /* Get corresponding I2C address and adjust offset */ - client = at24_translate_offset(at24, &offset); - /* write_max is at most a page */ if (count > at24->write_max) count = at24->write_max; @@ -310,62 +404,132 @@ static ssize_t at24_eeprom_write(struct at24_data *at24, const char *buf, if (offset + count > next_page) count = next_page - offset; - /* If we'll use I2C calls for I/O, set up the message */ - if (!at24->use_smbus) { - int i = 0; + return count; +} - msg.addr = client->addr; - msg.flags = 0; +static ssize_t at24_eeprom_write_smbus_block(struct at24_data *at24, + const char *buf, + unsigned int offset, size_t count) +{ + unsigned long timeout, write_time; + struct i2c_client *client; + ssize_t status = 0; - /* msg.buf is u8 and casts will mask the values */ - msg.buf = at24->writebuf; - if (at24->chip.flags & AT24_FLAG_ADDR16) - msg.buf[i++] = offset >> 8; + client = at24_translate_offset(at24, &offset); + count = at24_adjust_write_count(at24, offset, count); + + loop_until_timeout(timeout, write_time) { + status = i2c_smbus_write_i2c_block_data(client, + offset, count, buf); + if (status == 0) + status = count; + + dev_dbg(&client->dev, "write %zu@%d --> %zd (%ld)\n", + count, offset, status, jiffies); - msg.buf[i++] = offset; - memcpy(&msg.buf[i], buf, count); - msg.len = i + count; + if (status == count) + return count; } - /* - * Writes fail if the previous one didn't complete yet. We may - * loop a few times until this one succeeds, waiting at least - * long enough for one entire page write to work. - */ - timeout = jiffies + msecs_to_jiffies(write_timeout); - do { - write_time = jiffies; - if (at24->use_smbus_write) { - switch (at24->use_smbus_write) { - case I2C_SMBUS_I2C_BLOCK_DATA: - status = i2c_smbus_write_i2c_block_data(client, - offset, count, buf); - break; - case I2C_SMBUS_BYTE_DATA: - status = i2c_smbus_write_byte_data(client, - offset, buf[0]); - break; - } - - if (status == 0) - status = count; - } else { - status = i2c_transfer(client->adapter, &msg, 1); - if (status == 1) - status = count; - } + return -ETIMEDOUT; +} + +static ssize_t at24_eeprom_write_smbus_byte(struct at24_data *at24, + const char *buf, + unsigned int offset, size_t count) +{ + unsigned long timeout, write_time; + struct i2c_client *client; + ssize_t status = 0; + + client = at24_translate_offset(at24, &offset); + + loop_until_timeout(timeout, write_time) { + status = i2c_smbus_write_byte_data(client, offset, buf[0]); + if (status == 0) + status = count; + dev_dbg(&client->dev, "write %zu@%d --> %zd (%ld)\n", count, offset, status, jiffies); if (status == count) return count; + } + + return -ETIMEDOUT; +} + +static ssize_t at24_eeprom_write_i2c(struct at24_data *at24, const char *buf, + unsigned int offset, size_t count) +{ + unsigned long timeout, write_time; + struct i2c_client *client; + struct i2c_msg msg; + ssize_t status = 0; + int i = 0; + + client = at24_translate_offset(at24, &offset); + count = at24_adjust_write_count(at24, offset, count); + + msg.addr = client->addr; + msg.flags = 0; + + /* msg.buf is u8 and casts will mask the values */ + msg.buf = at24->writebuf; + if (at24->chip.flags & AT24_FLAG_ADDR16) + msg.buf[i++] = offset >> 8; - usleep_range(1000, 1500); - } while (time_before(write_time, timeout)); + msg.buf[i++] = offset; + memcpy(&msg.buf[i], buf, count); + msg.len = i + count; + + loop_until_timeout(timeout, write_time) { + status = i2c_transfer(client->adapter, &msg, 1); + if (status == 1) + status = count; + + dev_dbg(&client->dev, "write %zu@%d --> %zd (%ld)\n", + count, offset, status, jiffies); + + if (status == count) + return count; + } return -ETIMEDOUT; } +static int at24_read(void *priv, unsigned int off, void *val, size_t count) +{ + struct at24_data *at24 = priv; + char *buf = val; + + if (unlikely(!count)) + return count; + + /* + * Read data from chip, protecting against concurrent updates + * from this host, but not from other I2C masters. + */ + mutex_lock(&at24->lock); + + while (count) { + int status; + + status = at24->read_func(at24, buf, off, count); + if (status < 0) { + mutex_unlock(&at24->lock); + return status; + } + buf += status; + off += status; + count -= status; + } + + mutex_unlock(&at24->lock); + + return 0; +} + static int at24_write(void *priv, unsigned int off, void *val, size_t count) { struct at24_data *at24 = priv; @@ -383,7 +547,7 @@ static int at24_write(void *priv, unsigned int off, void *val, size_t count) while (count) { int status; - status = at24_eeprom_write(at24, buf, off, count); + status = at24->write_func(at24, buf, off, count); if (status < 0) { mutex_unlock(&at24->lock); return status; @@ -400,7 +564,7 @@ static int at24_write(void *priv, unsigned int off, void *val, size_t count) #ifdef CONFIG_OF static void at24_get_ofdata(struct i2c_client *client, - struct at24_platform_data *chip) + struct at24_platform_data *chip) { const __be32 *val; struct device_node *node = client->dev.of_node; @@ -415,7 +579,7 @@ static void at24_get_ofdata(struct i2c_client *client, } #else static void at24_get_ofdata(struct i2c_client *client, - struct at24_platform_data *chip) + struct at24_platform_data *chip) { } #endif /* CONFIG_OF */ @@ -518,6 +682,30 @@ static int at24_probe(struct i2c_client *client, const struct i2c_device_id *id) at24->chip = chip; at24->num_addresses = num_addresses; + if ((chip.flags & AT24_FLAG_SERIAL) && (chip.flags & AT24_FLAG_MAC)) { + dev_err(&client->dev, + "invalid device data - cannot have both AT24_FLAG_SERIAL & AT24_FLAG_MAC."); + return -EINVAL; + } + + if (chip.flags & AT24_FLAG_SERIAL) { + at24->read_func = at24_eeprom_read_serial; + } else if (chip.flags & AT24_FLAG_MAC) { + at24->read_func = at24_eeprom_read_mac; + } else { + at24->read_func = at24->use_smbus ? at24_eeprom_read_smbus + : at24_eeprom_read_i2c; + } + + if (at24->use_smbus) { + if (at24->use_smbus_write == I2C_SMBUS_I2C_BLOCK_DATA) + at24->write_func = at24_eeprom_write_smbus_block; + else + at24->write_func = at24_eeprom_write_smbus_byte; + } else { + at24->write_func = at24_eeprom_write_i2c; + } + writable = !(chip.flags & AT24_FLAG_READONLY); if (writable) { if (!use_smbus || use_smbus_write) { diff --git a/drivers/misc/genwqe/card_base.c b/drivers/misc/genwqe/card_base.c index 4cf8f82cfca2..a70b853fa2c9 100644 --- a/drivers/misc/genwqe/card_base.c +++ b/drivers/misc/genwqe/card_base.c @@ -182,7 +182,7 @@ static void genwqe_dev_free(struct genwqe_dev *cd) */ static int genwqe_bus_reset(struct genwqe_dev *cd) { - int bars, rc = 0; + int rc = 0; struct pci_dev *pci_dev = cd->pci_dev; void __iomem *mmio; @@ -193,8 +193,7 @@ static int genwqe_bus_reset(struct genwqe_dev *cd) cd->mmio = NULL; pci_iounmap(pci_dev, mmio); - bars = pci_select_bars(pci_dev, IORESOURCE_MEM); - pci_release_selected_regions(pci_dev, bars); + pci_release_mem_regions(pci_dev); /* * Firmware/BIOS might change memory mapping during bus reset. @@ -218,7 +217,7 @@ static int genwqe_bus_reset(struct genwqe_dev *cd) GENWQE_INJECT_GFIR_FATAL | GENWQE_INJECT_GFIR_INFO); - rc = pci_request_selected_regions(pci_dev, bars, genwqe_driver_name); + rc = pci_request_mem_regions(pci_dev, genwqe_driver_name); if (rc) { dev_err(&pci_dev->dev, "[%s] err: request bars failed (%d)\n", __func__, rc); @@ -1068,10 +1067,9 @@ static int genwqe_health_check_stop(struct genwqe_dev *cd) */ static int genwqe_pci_setup(struct genwqe_dev *cd) { - int err, bars; + int err; struct pci_dev *pci_dev = cd->pci_dev; - bars = pci_select_bars(pci_dev, IORESOURCE_MEM); err = pci_enable_device_mem(pci_dev); if (err) { dev_err(&pci_dev->dev, @@ -1080,7 +1078,7 @@ static int genwqe_pci_setup(struct genwqe_dev *cd) } /* Reserve PCI I/O and memory resources */ - err = pci_request_selected_regions(pci_dev, bars, genwqe_driver_name); + err = pci_request_mem_regions(pci_dev, genwqe_driver_name); if (err) { dev_err(&pci_dev->dev, "[%s] err: request bars failed (%d)\n", __func__, err); @@ -1142,7 +1140,7 @@ static int genwqe_pci_setup(struct genwqe_dev *cd) out_iounmap: pci_iounmap(pci_dev, cd->mmio); out_release_resources: - pci_release_selected_regions(pci_dev, bars); + pci_release_mem_regions(pci_dev); err_disable_device: pci_disable_device(pci_dev); err_out: @@ -1154,14 +1152,12 @@ static int genwqe_pci_setup(struct genwqe_dev *cd) */ static void genwqe_pci_remove(struct genwqe_dev *cd) { - int bars; struct pci_dev *pci_dev = cd->pci_dev; if (cd->mmio) pci_iounmap(pci_dev, cd->mmio); - bars = pci_select_bars(pci_dev, IORESOURCE_MEM); - pci_release_selected_regions(pci_dev, bars); + pci_release_mem_regions(pci_dev); pci_disable_device(pci_dev); } diff --git a/drivers/misc/lkdtm.c b/drivers/misc/lkdtm.c deleted file mode 100644 index 0a5cbbe12452..000000000000 --- a/drivers/misc/lkdtm.c +++ /dev/null @@ -1,1023 +0,0 @@ -/* - * Kprobe module for testing crash dumps - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. - * - * Copyright (C) IBM Corporation, 2006 - * - * Author: Ankita Garg <ankita@in.ibm.com> - * - * This module induces system failures at predefined crashpoints to - * evaluate the reliability of crash dumps obtained using different dumping - * solutions. - * - * It is adapted from the Linux Kernel Dump Test Tool by - * Fernando Luis Vazquez Cao <http://lkdtt.sourceforge.net> - * - * Debugfs support added by Simon Kagstrom <simon.kagstrom@netinsight.net> - * - * See Documentation/fault-injection/provoke-crashes.txt for instructions - */ -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt - -#include <linux/kernel.h> -#include <linux/fs.h> -#include <linux/module.h> -#include <linux/buffer_head.h> -#include <linux/kprobes.h> -#include <linux/list.h> -#include <linux/init.h> -#include <linux/interrupt.h> -#include <linux/hrtimer.h> -#include <linux/slab.h> -#include <scsi/scsi_cmnd.h> -#include <linux/debugfs.h> -#include <linux/vmalloc.h> -#include <linux/mman.h> -#include <asm/cacheflush.h> - -#ifdef CONFIG_IDE -#include <linux/ide.h> -#endif - -/* - * Make sure our attempts to over run the kernel stack doesn't trigger - * a compiler warning when CONFIG_FRAME_WARN is set. Then make sure we - * recurse past the end of THREAD_SIZE by default. - */ -#if defined(CONFIG_FRAME_WARN) && (CONFIG_FRAME_WARN > 0) -#define REC_STACK_SIZE (CONFIG_FRAME_WARN / 2) -#else -#define REC_STACK_SIZE (THREAD_SIZE / 8) -#endif -#define REC_NUM_DEFAULT ((THREAD_SIZE / REC_STACK_SIZE) * 2) - -#define DEFAULT_COUNT 10 -#define EXEC_SIZE 64 - -enum cname { - CN_INVALID, - CN_INT_HARDWARE_ENTRY, - CN_INT_HW_IRQ_EN, - CN_INT_TASKLET_ENTRY, - CN_FS_DEVRW, - CN_MEM_SWAPOUT, - CN_TIMERADD, - CN_SCSI_DISPATCH_CMD, - CN_IDE_CORE_CP, - CN_DIRECT, -}; - -enum ctype { - CT_NONE, - CT_PANIC, - CT_BUG, - CT_WARNING, - CT_EXCEPTION, - CT_LOOP, - CT_OVERFLOW, - CT_CORRUPT_STACK, - CT_UNALIGNED_LOAD_STORE_WRITE, - CT_OVERWRITE_ALLOCATION, - CT_WRITE_AFTER_FREE, - CT_READ_AFTER_FREE, - CT_WRITE_BUDDY_AFTER_FREE, - CT_READ_BUDDY_AFTER_FREE, - CT_SOFTLOCKUP, - CT_HARDLOCKUP, - CT_SPINLOCKUP, - CT_HUNG_TASK, - CT_EXEC_DATA, - CT_EXEC_STACK, - CT_EXEC_KMALLOC, - CT_EXEC_VMALLOC, - CT_EXEC_USERSPACE, - CT_ACCESS_USERSPACE, - CT_WRITE_RO, - CT_WRITE_RO_AFTER_INIT, - CT_WRITE_KERN, - CT_WRAP_ATOMIC -}; - -static char* cp_name[] = { - "INT_HARDWARE_ENTRY", - "INT_HW_IRQ_EN", - "INT_TASKLET_ENTRY", - "FS_DEVRW", - "MEM_SWAPOUT", - "TIMERADD", - "SCSI_DISPATCH_CMD", - "IDE_CORE_CP", - "DIRECT", -}; - -static char* cp_type[] = { - "PANIC", - "BUG", - "WARNING", - "EXCEPTION", - "LOOP", - "OVERFLOW", - "CORRUPT_STACK", - "UNALIGNED_LOAD_STORE_WRITE", - "OVERWRITE_ALLOCATION", - "WRITE_AFTER_FREE", - "READ_AFTER_FREE", - "WRITE_BUDDY_AFTER_FREE", - "READ_BUDDY_AFTER_FREE", - "SOFTLOCKUP", - "HARDLOCKUP", - "SPINLOCKUP", - "HUNG_TASK", - "EXEC_DATA", - "EXEC_STACK", - "EXEC_KMALLOC", - "EXEC_VMALLOC", - "EXEC_USERSPACE", - "ACCESS_USERSPACE", - "WRITE_RO", - "WRITE_RO_AFTER_INIT", - "WRITE_KERN", - "WRAP_ATOMIC" -}; - -static struct jprobe lkdtm; - -static int lkdtm_parse_commandline(void); -static void lkdtm_handler(void); - -static char* cpoint_name; -static char* cpoint_type; -static int cpoint_count = DEFAULT_COUNT; -static int recur_count = REC_NUM_DEFAULT; - -static enum cname cpoint = CN_INVALID; -static enum ctype cptype = CT_NONE; -static int count = DEFAULT_COUNT; -static DEFINE_SPINLOCK(count_lock); -static DEFINE_SPINLOCK(lock_me_up); - -static u8 data_area[EXEC_SIZE]; - -static const unsigned long rodata = 0xAA55AA55; -static unsigned long ro_after_init __ro_after_init = 0x55AA5500; - -module_param(recur_count, int, 0644); -MODULE_PARM_DESC(recur_count, " Recursion level for the stack overflow test"); -module_param(cpoint_name, charp, 0444); -MODULE_PARM_DESC(cpoint_name, " Crash Point, where kernel is to be crashed"); -module_param(cpoint_type, charp, 0444); -MODULE_PARM_DESC(cpoint_type, " Crash Point Type, action to be taken on "\ - "hitting the crash point"); -module_param(cpoint_count, int, 0644); -MODULE_PARM_DESC(cpoint_count, " Crash Point Count, number of times the "\ - "crash point is to be hit to trigger action"); - -static unsigned int jp_do_irq(unsigned int irq) -{ - lkdtm_handler(); - jprobe_return(); - return 0; -} - -static irqreturn_t jp_handle_irq_event(unsigned int irq, - struct irqaction *action) -{ - lkdtm_handler(); - jprobe_return(); - return 0; -} - -static void jp_tasklet_action(struct softirq_action *a) -{ - lkdtm_handler(); - jprobe_return(); -} - -static void jp_ll_rw_block(int rw, int nr, struct buffer_head *bhs[]) -{ - lkdtm_handler(); - jprobe_return(); -} - -struct scan_control; - -static unsigned long jp_shrink_inactive_list(unsigned long max_scan, - struct zone *zone, - struct scan_control *sc) -{ - lkdtm_handler(); - jprobe_return(); - return 0; -} - -static int jp_hrtimer_start(struct hrtimer *timer, ktime_t tim, - const enum hrtimer_mode mode) -{ - lkdtm_handler(); - jprobe_return(); - return 0; -} - -static int jp_scsi_dispatch_cmd(struct scsi_cmnd *cmd) -{ - lkdtm_handler(); - jprobe_return(); - return 0; -} - -#ifdef CONFIG_IDE -static int jp_generic_ide_ioctl(ide_drive_t *drive, struct file *file, - struct block_device *bdev, unsigned int cmd, - unsigned long arg) -{ - lkdtm_handler(); - jprobe_return(); - return 0; -} -#endif - -/* Return the crashpoint number or NONE if the name is invalid */ -static enum ctype parse_cp_type(const char *what, size_t count) -{ - int i; - - for (i = 0; i < ARRAY_SIZE(cp_type); i++) { - if (!strcmp(what, cp_type[i])) - return i + 1; - } - - return CT_NONE; -} - -static const char *cp_type_to_str(enum ctype type) -{ - if (type == CT_NONE || type < 0 || type > ARRAY_SIZE(cp_type)) - return "None"; - - return cp_type[type - 1]; -} - -static const char *cp_name_to_str(enum cname name) -{ - if (name == CN_INVALID || name < 0 || name > ARRAY_SIZE(cp_name)) - return "INVALID"; - - return cp_name[name - 1]; -} - - -static int lkdtm_parse_commandline(void) -{ - int i; - unsigned long flags; - - if (cpoint_count < 1 || recur_count < 1) - return -EINVAL; - - spin_lock_irqsave(&count_lock, flags); - count = cpoint_count; - spin_unlock_irqrestore(&count_lock, flags); - - /* No special parameters */ - if (!cpoint_type && !cpoint_name) - return 0; - - /* Neither or both of these need to be set */ - if (!cpoint_type || !cpoint_name) - return -EINVAL; - - cptype = parse_cp_type(cpoint_type, strlen(cpoint_type)); - if (cptype == CT_NONE) - return -EINVAL; - - for (i = 0; i < ARRAY_SIZE(cp_name); i++) { - if (!strcmp(cpoint_name, cp_name[i])) { - cpoint = i + 1; - return 0; - } - } - - /* Could not find a valid crash point */ - return -EINVAL; -} - -static int recursive_loop(int remaining) -{ - char buf[REC_STACK_SIZE]; - - /* Make sure compiler does not optimize this away. */ - memset(buf, (remaining & 0xff) | 0x1, REC_STACK_SIZE); - if (!remaining) - return 0; - else - return recursive_loop(remaining - 1); -} - -static void do_nothing(void) -{ - return; -} - -/* Must immediately follow do_nothing for size calculuations to work out. */ -static void do_overwritten(void) -{ - pr_info("do_overwritten wasn't overwritten!\n"); - return; -} - -static noinline void corrupt_stack(void) -{ - /* Use default char array length that triggers stack protection. */ - char data[8]; - - memset((void *)data, 0, 64); -} - -static void noinline execute_location(void *dst) -{ - void (*func)(void) = dst; - - pr_info("attempting ok execution at %p\n", do_nothing); - do_nothing(); - - memcpy(dst, do_nothing, EXEC_SIZE); - flush_icache_range((unsigned long)dst, (unsigned long)dst + EXEC_SIZE); - pr_info("attempting bad execution at %p\n", func); - func(); -} - -static void execute_user_location(void *dst) -{ - /* Intentionally crossing kernel/user memory boundary. */ - void (*func)(void) = dst; - - pr_info("attempting ok execution at %p\n", do_nothing); - do_nothing(); - - if (copy_to_user((void __user *)dst, do_nothing, EXEC_SIZE)) - return; - flush_icache_range((unsigned long)dst, (unsigned long)dst + EXEC_SIZE); - pr_info("attempting bad execution at %p\n", func); - func(); -} - -static void lkdtm_do_action(enum ctype which) -{ - switch (which) { - case CT_PANIC: - panic("dumptest"); - break; - case CT_BUG: - BUG(); - break; - case CT_WARNING: - WARN_ON(1); - break; - case CT_EXCEPTION: - *((int *) 0) = 0; - break; - case CT_LOOP: - for (;;) - ; - break; - case CT_OVERFLOW: - (void) recursive_loop(recur_count); - break; - case CT_CORRUPT_STACK: - corrupt_stack(); - break; - case CT_UNALIGNED_LOAD_STORE_WRITE: { - static u8 data[5] __attribute__((aligned(4))) = {1, 2, - 3, 4, 5}; - u32 *p; - u32 val = 0x12345678; - - p = (u32 *)(data + 1); - if (*p == 0) - val = 0x87654321; - *p = val; - break; - } - case CT_OVERWRITE_ALLOCATION: { - size_t len = 1020; - u32 *data = kmalloc(len, GFP_KERNEL); - - data[1024 / sizeof(u32)] = 0x12345678; - kfree(data); - break; - } - case CT_WRITE_AFTER_FREE: { - int *base, *again; - size_t len = 1024; - /* - * The slub allocator uses the first word to store the free - * pointer in some configurations. Use the middle of the - * allocation to avoid running into the freelist - */ - size_t offset = (len / sizeof(*base)) / 2; - - base = kmalloc(len, GFP_KERNEL); - pr_info("Allocated memory %p-%p\n", base, &base[offset * 2]); - pr_info("Attempting bad write to freed memory at %p\n", - &base[offset]); - kfree(base); - base[offset] = 0x0abcdef0; - /* Attempt to notice the overwrite. */ - again = kmalloc(len, GFP_KERNEL); - kfree(again); - if (again != base) - pr_info("Hmm, didn't get the same memory range.\n"); - - break; - } - case CT_READ_AFTER_FREE: { - int *base, *val, saw; - size_t len = 1024; - /* - * The slub allocator uses the first word to store the free - * pointer in some configurations. Use the middle of the - * allocation to avoid running into the freelist - */ - size_t offset = (len / sizeof(*base)) / 2; - - base = kmalloc(len, GFP_KERNEL); - if (!base) - break; - - val = kmalloc(len, GFP_KERNEL); - if (!val) { - kfree(base); - break; - } - - *val = 0x12345678; - base[offset] = *val; - pr_info("Value in memory before free: %x\n", base[offset]); - - kfree(base); - - pr_info("Attempting bad read from freed memory\n"); - saw = base[offset]; - if (saw != *val) { - /* Good! Poisoning happened, so declare a win. */ - pr_info("Memory correctly poisoned (%x)\n", saw); - BUG(); - } - pr_info("Memory was not poisoned\n"); - - kfree(val); - break; - } - case CT_WRITE_BUDDY_AFTER_FREE: { - unsigned long p = __get_free_page(GFP_KERNEL); - if (!p) - break; - pr_info("Writing to the buddy page before free\n"); - memset((void *)p, 0x3, PAGE_SIZE); - free_page(p); - schedule(); - pr_info("Attempting bad write to the buddy page after free\n"); - memset((void *)p, 0x78, PAGE_SIZE); - /* Attempt to notice the overwrite. */ - p = __get_free_page(GFP_KERNEL); - free_page(p); - schedule(); - - break; - } - case CT_READ_BUDDY_AFTER_FREE: { - unsigned long p = __get_free_page(GFP_KERNEL); - int saw, *val; - int *base; - - if (!p) - break; - - val = kmalloc(1024, GFP_KERNEL); - if (!val) { - free_page(p); - break; - } - - base = (int *)p; - - *val = 0x12345678; - base[0] = *val; - pr_info("Value in memory before free: %x\n", base[0]); - free_page(p); - pr_info("Attempting to read from freed memory\n"); - saw = base[0]; - if (saw != *val) { - /* Good! Poisoning happened, so declare a win. */ - pr_info("Memory correctly poisoned (%x)\n", saw); - BUG(); - } - pr_info("Buddy page was not poisoned\n"); - - kfree(val); - break; - } - case CT_SOFTLOCKUP: - preempt_disable(); - for (;;) - cpu_relax(); - break; - case CT_HARDLOCKUP: - local_irq_disable(); - for (;;) - cpu_relax(); - break; - case CT_SPINLOCKUP: - /* Must be called twice to trigger. */ - spin_lock(&lock_me_up); - /* Let sparse know we intended to exit holding the lock. */ - __release(&lock_me_up); - break; - case CT_HUNG_TASK: - set_current_state(TASK_UNINTERRUPTIBLE); - schedule(); - break; - case CT_EXEC_DATA: - execute_location(data_area); - break; - case CT_EXEC_STACK: { - u8 stack_area[EXEC_SIZE]; - execute_location(stack_area); - break; - } - case CT_EXEC_KMALLOC: { - u32 *kmalloc_area = kmalloc(EXEC_SIZE, GFP_KERNEL); - execute_location(kmalloc_area); - kfree(kmalloc_area); - break; - } - case CT_EXEC_VMALLOC: { - u32 *vmalloc_area = vmalloc(EXEC_SIZE); - execute_location(vmalloc_area); - vfree(vmalloc_area); - break; - } - case CT_EXEC_USERSPACE: { - unsigned long user_addr; - - user_addr = vm_mmap(NULL, 0, PAGE_SIZE, - PROT_READ | PROT_WRITE | PROT_EXEC, - MAP_ANONYMOUS | MAP_PRIVATE, 0); - if (user_addr >= TASK_SIZE) { - pr_warn("Failed to allocate user memory\n"); - return; - } - execute_user_location((void *)user_addr); - vm_munmap(user_addr, PAGE_SIZE); - break; - } - case CT_ACCESS_USERSPACE: { - unsigned long user_addr, tmp = 0; - unsigned long *ptr; - - user_addr = vm_mmap(NULL, 0, PAGE_SIZE, - PROT_READ | PROT_WRITE | PROT_EXEC, - MAP_ANONYMOUS | MAP_PRIVATE, 0); - if (user_addr >= TASK_SIZE) { - pr_warn("Failed to allocate user memory\n"); - return; - } - - if (copy_to_user((void __user *)user_addr, &tmp, sizeof(tmp))) { - pr_warn("copy_to_user failed\n"); - vm_munmap(user_addr, PAGE_SIZE); - return; - } - - ptr = (unsigned long *)user_addr; - - pr_info("attempting bad read at %p\n", ptr); - tmp = *ptr; - tmp += 0xc0dec0de; - - pr_info("attempting bad write at %p\n", ptr); - *ptr = tmp; - - vm_munmap(user_addr, PAGE_SIZE); - - break; - } - case CT_WRITE_RO: { - /* Explicitly cast away "const" for the test. */ - unsigned long *ptr = (unsigned long *)&rodata; - - pr_info("attempting bad rodata write at %p\n", ptr); - *ptr ^= 0xabcd1234; - - break; - } - case CT_WRITE_RO_AFTER_INIT: { - unsigned long *ptr = &ro_after_init; - - /* - * Verify we were written to during init. Since an Oops - * is considered a "success", a failure is to just skip the - * real test. - */ - if ((*ptr & 0xAA) != 0xAA) { - pr_info("%p was NOT written during init!?\n", ptr); - break; - } - - pr_info("attempting bad ro_after_init write at %p\n", ptr); - *ptr ^= 0xabcd1234; - - break; - } - case CT_WRITE_KERN: { - size_t size; - unsigned char *ptr; - - size = (unsigned long)do_overwritten - - (unsigned long)do_nothing; - ptr = (unsigned char *)do_overwritten; - - pr_info("attempting bad %zu byte write at %p\n", size, ptr); - memcpy(ptr, (unsigned char *)do_nothing, size); - flush_icache_range((unsigned long)ptr, - (unsigned long)(ptr + size)); - - do_overwritten(); - break; - } - case CT_WRAP_ATOMIC: { - atomic_t under = ATOMIC_INIT(INT_MIN); - atomic_t over = ATOMIC_INIT(INT_MAX); - - pr_info("attempting atomic underflow\n"); - atomic_dec(&under); - pr_info("attempting atomic overflow\n"); - atomic_inc(&over); - - return; - } - case CT_NONE: - default: - break; - } - -} - -static void lkdtm_handler(void) -{ - unsigned long flags; - bool do_it = false; - - spin_lock_irqsave(&count_lock, flags); - count--; - pr_info("Crash point %s of type %s hit, trigger in %d rounds\n", - cp_name_to_str(cpoint), cp_type_to_str(cptype), count); - - if (count == 0) { - do_it = true; - count = cpoint_count; - } - spin_unlock_irqrestore(&count_lock, flags); - - if (do_it) - lkdtm_do_action(cptype); -} - -static int lkdtm_register_cpoint(enum cname which) -{ - int ret; - - cpoint = CN_INVALID; - if (lkdtm.entry != NULL) - unregister_jprobe(&lkdtm); - - switch (which) { - case CN_DIRECT: - lkdtm_do_action(cptype); - return 0; - case CN_INT_HARDWARE_ENTRY: - lkdtm.kp.symbol_name = "do_IRQ"; - lkdtm.entry = (kprobe_opcode_t*) jp_do_irq; - break; - case CN_INT_HW_IRQ_EN: - lkdtm.kp.symbol_name = "handle_IRQ_event"; - lkdtm.entry = (kprobe_opcode_t*) jp_handle_irq_event; - break; - case CN_INT_TASKLET_ENTRY: - lkdtm.kp.symbol_name = "tasklet_action"; - lkdtm.entry = (kprobe_opcode_t*) jp_tasklet_action; - break; - case CN_FS_DEVRW: - lkdtm.kp.symbol_name = "ll_rw_block"; - lkdtm.entry = (kprobe_opcode_t*) jp_ll_rw_block; - break; - case CN_MEM_SWAPOUT: - lkdtm.kp.symbol_name = "shrink_inactive_list"; - lkdtm.entry = (kprobe_opcode_t*) jp_shrink_inactive_list; - break; - case CN_TIMERADD: - lkdtm.kp.symbol_name = "hrtimer_start"; - lkdtm.entry = (kprobe_opcode_t*) jp_hrtimer_start; - break; - case CN_SCSI_DISPATCH_CMD: - lkdtm.kp.symbol_name = "scsi_dispatch_cmd"; - lkdtm.entry = (kprobe_opcode_t*) jp_scsi_dispatch_cmd; - break; - case CN_IDE_CORE_CP: -#ifdef CONFIG_IDE - lkdtm.kp.symbol_name = "generic_ide_ioctl"; - lkdtm.entry = (kprobe_opcode_t*) jp_generic_ide_ioctl; -#else - pr_info("Crash point not available\n"); - return -EINVAL; -#endif - break; - default: - pr_info("Invalid Crash Point\n"); - return -EINVAL; - } - - cpoint = which; - if ((ret = register_jprobe(&lkdtm)) < 0) { - pr_info("Couldn't register jprobe\n"); - cpoint = CN_INVALID; - } - - return ret; -} - -static ssize_t do_register_entry(enum cname which, struct file *f, - const char __user *user_buf, size_t count, loff_t *off) -{ - char *buf; - int err; - - if (count >= PAGE_SIZE) - return -EINVAL; - - buf = (char *)__get_free_page(GFP_KERNEL); - if (!buf) - return -ENOMEM; - if (copy_from_user(buf, user_buf, count)) { - free_page((unsigned long) buf); - return -EFAULT; - } - /* NULL-terminate and remove enter */ - buf[count] = '\0'; - strim(buf); - - cptype = parse_cp_type(buf, count); - free_page((unsigned long) buf); - - if (cptype == CT_NONE) - return -EINVAL; - - err = lkdtm_register_cpoint(which); - if (err < 0) - return err; - - *off += count; - - return count; -} - -/* Generic read callback that just prints out the available crash types */ -static ssize_t lkdtm_debugfs_read(struct file *f, char __user *user_buf, - size_t count, loff_t *off) -{ - char *buf; - int i, n, out; - - buf = (char *)__get_free_page(GFP_KERNEL); - if (buf == NULL) - return -ENOMEM; - - n = snprintf(buf, PAGE_SIZE, "Available crash types:\n"); - for (i = 0; i < ARRAY_SIZE(cp_type); i++) - n += snprintf(buf + n, PAGE_SIZE - n, "%s\n", cp_type[i]); - buf[n] = '\0'; - - out = simple_read_from_buffer(user_buf, count, off, - buf, n); - free_page((unsigned long) buf); - - return out; -} - -static int lkdtm_debugfs_open(struct inode *inode, struct file *file) -{ - return 0; -} - - -static ssize_t int_hardware_entry(struct file *f, const char __user *buf, - size_t count, loff_t *off) -{ - return do_register_entry(CN_INT_HARDWARE_ENTRY, f, buf, count, off); -} - -static ssize_t int_hw_irq_en(struct file *f, const char __user *buf, - size_t count, loff_t *off) -{ - return do_register_entry(CN_INT_HW_IRQ_EN, f, buf, count, off); -} - -static ssize_t int_tasklet_entry(struct file *f, const char __user *buf, - size_t count, loff_t *off) -{ - return do_register_entry(CN_INT_TASKLET_ENTRY, f, buf, count, off); -} - -static ssize_t fs_devrw_entry(struct file *f, const char __user *buf, - size_t count, loff_t *off) -{ - return do_register_entry(CN_FS_DEVRW, f, buf, count, off); -} - -static ssize_t mem_swapout_entry(struct file *f, const char __user *buf, - size_t count, loff_t *off) -{ - return do_register_entry(CN_MEM_SWAPOUT, f, buf, count, off); -} - -static ssize_t timeradd_entry(struct file *f, const char __user *buf, - size_t count, loff_t *off) -{ - return do_register_entry(CN_TIMERADD, f, buf, count, off); -} - -static ssize_t scsi_dispatch_cmd_entry(struct file *f, - const char __user *buf, size_t count, loff_t *off) -{ - return do_register_entry(CN_SCSI_DISPATCH_CMD, f, buf, count, off); -} - -static ssize_t ide_core_cp_entry(struct file *f, const char __user *buf, - size_t count, loff_t *off) -{ - return do_register_entry(CN_IDE_CORE_CP, f, buf, count, off); -} - -/* Special entry to just crash directly. Available without KPROBEs */ -static ssize_t direct_entry(struct file *f, const char __user *user_buf, - size_t count, loff_t *off) -{ - enum ctype type; - char *buf; - - if (count >= PAGE_SIZE) - return -EINVAL; - if (count < 1) - return -EINVAL; - - buf = (char *)__get_free_page(GFP_KERNEL); - if (!buf) - return -ENOMEM; - if (copy_from_user(buf, user_buf, count)) { - free_page((unsigned long) buf); - return -EFAULT; - } - /* NULL-terminate and remove enter */ - buf[count] = '\0'; - strim(buf); - - type = parse_cp_type(buf, count); - free_page((unsigned long) buf); - if (type == CT_NONE) - return -EINVAL; - - pr_info("Performing direct entry %s\n", cp_type_to_str(type)); - lkdtm_do_action(type); - *off += count; - - return count; -} - -struct crash_entry { - const char *name; - const struct file_operations fops; -}; - -static const struct crash_entry crash_entries[] = { - {"DIRECT", {.read = lkdtm_debugfs_read, - .llseek = generic_file_llseek, - .open = lkdtm_debugfs_open, - .write = direct_entry} }, - {"INT_HARDWARE_ENTRY", {.read = lkdtm_debugfs_read, - .llseek = generic_file_llseek, - .open = lkdtm_debugfs_open, - .write = int_hardware_entry} }, - {"INT_HW_IRQ_EN", {.read = lkdtm_debugfs_read, - .llseek = generic_file_llseek, - .open = lkdtm_debugfs_open, - .write = int_hw_irq_en} }, - {"INT_TASKLET_ENTRY", {.read = lkdtm_debugfs_read, - .llseek = generic_file_llseek, - .open = lkdtm_debugfs_open, - .write = int_tasklet_entry} }, - {"FS_DEVRW", {.read = lkdtm_debugfs_read, - .llseek = generic_file_llseek, - .open = lkdtm_debugfs_open, - .write = fs_devrw_entry} }, - {"MEM_SWAPOUT", {.read = lkdtm_debugfs_read, - .llseek = generic_file_llseek, - .open = lkdtm_debugfs_open, - .write = mem_swapout_entry} }, - {"TIMERADD", {.read = lkdtm_debugfs_read, - .llseek = generic_file_llseek, - .open = lkdtm_debugfs_open, - .write = timeradd_entry} }, - {"SCSI_DISPATCH_CMD", {.read = lkdtm_debugfs_read, - .llseek = generic_file_llseek, - .open = lkdtm_debugfs_open, - .write = scsi_dispatch_cmd_entry} }, - {"IDE_CORE_CP", {.read = lkdtm_debugfs_read, - .llseek = generic_file_llseek, - .open = lkdtm_debugfs_open, - .write = ide_core_cp_entry} }, -}; - -static struct dentry *lkdtm_debugfs_root; - -static int __init lkdtm_module_init(void) -{ - int ret = -EINVAL; - int n_debugfs_entries = 1; /* Assume only the direct entry */ - int i; - - /* Make sure we can write to __ro_after_init values during __init */ - ro_after_init |= 0xAA; - - /* Register debugfs interface */ - lkdtm_debugfs_root = debugfs_create_dir("provoke-crash", NULL); - if (!lkdtm_debugfs_root) { - pr_err("creating root dir failed\n"); - return -ENODEV; - } - -#ifdef CONFIG_KPROBES - n_debugfs_entries = ARRAY_SIZE(crash_entries); -#endif - - for (i = 0; i < n_debugfs_entries; i++) { - const struct crash_entry *cur = &crash_entries[i]; - struct dentry *de; - - de = debugfs_create_file(cur->name, 0644, lkdtm_debugfs_root, - NULL, &cur->fops); - if (de == NULL) { - pr_err("could not create %s\n", cur->name); - goto out_err; - } - } - - if (lkdtm_parse_commandline() == -EINVAL) { - pr_info("Invalid command\n"); - goto out_err; - } - - if (cpoint != CN_INVALID && cptype != CT_NONE) { - ret = lkdtm_register_cpoint(cpoint); - if (ret < 0) { - pr_info("Invalid crash point %d\n", cpoint); - goto out_err; - } - pr_info("Crash point %s of type %s registered\n", - cpoint_name, cpoint_type); - } else { - pr_info("No crash points registered, enable through debugfs\n"); - } - - return 0; - -out_err: - debugfs_remove_recursive(lkdtm_debugfs_root); - return ret; -} - -static void __exit lkdtm_module_exit(void) -{ - debugfs_remove_recursive(lkdtm_debugfs_root); - - unregister_jprobe(&lkdtm); - pr_info("Crash point unregistered\n"); -} - -module_init(lkdtm_module_init); -module_exit(lkdtm_module_exit); - -MODULE_LICENSE("GPL"); -MODULE_DESCRIPTION("Kprobe module for testing crash dumps"); diff --git a/drivers/misc/lkdtm.h b/drivers/misc/lkdtm.h new file mode 100644 index 000000000000..fdf954c2107f --- /dev/null +++ b/drivers/misc/lkdtm.h @@ -0,0 +1,60 @@ +#ifndef __LKDTM_H +#define __LKDTM_H + +#define pr_fmt(fmt) "lkdtm: " fmt + +#include <linux/kernel.h> + +/* lkdtm_bugs.c */ +void __init lkdtm_bugs_init(int *recur_param); +void lkdtm_PANIC(void); +void lkdtm_BUG(void); +void lkdtm_WARNING(void); +void lkdtm_EXCEPTION(void); +void lkdtm_LOOP(void); +void lkdtm_OVERFLOW(void); +void lkdtm_CORRUPT_STACK(void); +void lkdtm_UNALIGNED_LOAD_STORE_WRITE(void); +void lkdtm_SOFTLOCKUP(void); +void lkdtm_HARDLOCKUP(void); +void lkdtm_SPINLOCKUP(void); +void lkdtm_HUNG_TASK(void); +void lkdtm_ATOMIC_UNDERFLOW(void); +void lkdtm_ATOMIC_OVERFLOW(void); + +/* lkdtm_heap.c */ +void lkdtm_OVERWRITE_ALLOCATION(void); +void lkdtm_WRITE_AFTER_FREE(void); +void lkdtm_READ_AFTER_FREE(void); +void lkdtm_WRITE_BUDDY_AFTER_FREE(void); +void lkdtm_READ_BUDDY_AFTER_FREE(void); + +/* lkdtm_perms.c */ +void __init lkdtm_perms_init(void); +void lkdtm_WRITE_RO(void); +void lkdtm_WRITE_RO_AFTER_INIT(void); +void lkdtm_WRITE_KERN(void); +void lkdtm_EXEC_DATA(void); +void lkdtm_EXEC_STACK(void); +void lkdtm_EXEC_KMALLOC(void); +void lkdtm_EXEC_VMALLOC(void); +void lkdtm_EXEC_RODATA(void); +void lkdtm_EXEC_USERSPACE(void); +void lkdtm_ACCESS_USERSPACE(void); + +/* lkdtm_rodata.c */ +void lkdtm_rodata_do_nothing(void); + +/* lkdtm_usercopy.c */ +void __init lkdtm_usercopy_init(void); +void __exit lkdtm_usercopy_exit(void); +void lkdtm_USERCOPY_HEAP_SIZE_TO(void); +void lkdtm_USERCOPY_HEAP_SIZE_FROM(void); +void lkdtm_USERCOPY_HEAP_FLAG_TO(void); +void lkdtm_USERCOPY_HEAP_FLAG_FROM(void); +void lkdtm_USERCOPY_STACK_FRAME_TO(void); +void lkdtm_USERCOPY_STACK_FRAME_FROM(void); +void lkdtm_USERCOPY_STACK_BEYOND(void); +void lkdtm_USERCOPY_KERNEL(void); + +#endif diff --git a/drivers/misc/lkdtm_bugs.c b/drivers/misc/lkdtm_bugs.c new file mode 100644 index 000000000000..182ae1894b32 --- /dev/null +++ b/drivers/misc/lkdtm_bugs.c @@ -0,0 +1,148 @@ +/* + * This is for all the tests related to logic bugs (e.g. bad dereferences, + * bad alignment, bad loops, bad locking, bad scheduling, deep stacks, and + * lockups) along with other things that don't fit well into existing LKDTM + * test source files. + */ +#include "lkdtm.h" +#include <linux/sched.h> + +/* + * Make sure our attempts to over run the kernel stack doesn't trigger + * a compiler warning when CONFIG_FRAME_WARN is set. Then make sure we + * recurse past the end of THREAD_SIZE by default. + */ +#if defined(CONFIG_FRAME_WARN) && (CONFIG_FRAME_WARN > 0) +#define REC_STACK_SIZE (CONFIG_FRAME_WARN / 2) +#else +#define REC_STACK_SIZE (THREAD_SIZE / 8) +#endif +#define REC_NUM_DEFAULT ((THREAD_SIZE / REC_STACK_SIZE) * 2) + +static int recur_count = REC_NUM_DEFAULT; + +static DEFINE_SPINLOCK(lock_me_up); + +static int recursive_loop(int remaining) +{ + char buf[REC_STACK_SIZE]; + + /* Make sure compiler does not optimize this away. */ + memset(buf, (remaining & 0xff) | 0x1, REC_STACK_SIZE); + if (!remaining) + return 0; + else + return recursive_loop(remaining - 1); +} + +/* If the depth is negative, use the default, otherwise keep parameter. */ +void __init lkdtm_bugs_init(int *recur_param) +{ + if (*recur_param < 0) + *recur_param = recur_count; + else + recur_count = *recur_param; +} + +void lkdtm_PANIC(void) +{ + panic("dumptest"); +} + +void lkdtm_BUG(void) +{ + BUG(); +} + +void lkdtm_WARNING(void) +{ + WARN_ON(1); +} + +void lkdtm_EXCEPTION(void) +{ + *((int *) 0) = 0; +} + +void lkdtm_LOOP(void) +{ + for (;;) + ; +} + +void lkdtm_OVERFLOW(void) +{ + (void) recursive_loop(recur_count); +} + +noinline void lkdtm_CORRUPT_STACK(void) +{ + /* Use default char array length that triggers stack protection. */ + char data[8]; + + memset((void *)data, 0, 64); +} + +void lkdtm_UNALIGNED_LOAD_STORE_WRITE(void) +{ + static u8 data[5] __attribute__((aligned(4))) = {1, 2, 3, 4, 5}; + u32 *p; + u32 val = 0x12345678; + + p = (u32 *)(data + 1); + if (*p == 0) + val = 0x87654321; + *p = val; +} + +void lkdtm_SOFTLOCKUP(void) +{ + preempt_disable(); + for (;;) + cpu_relax(); +} + +void lkdtm_HARDLOCKUP(void) +{ + local_irq_disable(); + for (;;) + cpu_relax(); +} + +void lkdtm_SPINLOCKUP(void) +{ + /* Must be called twice to trigger. */ + spin_lock(&lock_me_up); + /* Let sparse know we intended to exit holding the lock. */ + __release(&lock_me_up); +} + +void lkdtm_HUNG_TASK(void) +{ + set_current_state(TASK_UNINTERRUPTIBLE); + schedule(); +} + +void lkdtm_ATOMIC_UNDERFLOW(void) +{ + atomic_t under = ATOMIC_INIT(INT_MIN); + + pr_info("attempting good atomic increment\n"); + atomic_inc(&under); + atomic_dec(&under); + + pr_info("attempting bad atomic underflow\n"); + atomic_dec(&under); +} + +void lkdtm_ATOMIC_OVERFLOW(void) +{ + atomic_t over = ATOMIC_INIT(INT_MAX); + + pr_info("attempting good atomic decrement\n"); + atomic_dec(&over); + atomic_inc(&over); + + pr_info("attempting bad atomic overflow\n"); + atomic_inc(&over); +} diff --git a/drivers/misc/lkdtm_core.c b/drivers/misc/lkdtm_core.c new file mode 100644 index 000000000000..f9154b8d67f6 --- /dev/null +++ b/drivers/misc/lkdtm_core.c @@ -0,0 +1,544 @@ +/* + * Linux Kernel Dump Test Module for testing kernel crashes conditions: + * induces system failures at predefined crashpoints and under predefined + * operational conditions in order to evaluate the reliability of kernel + * sanity checking and crash dumps obtained using different dumping + * solutions. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + * Copyright (C) IBM Corporation, 2006 + * + * Author: Ankita Garg <ankita@in.ibm.com> + * + * It is adapted from the Linux Kernel Dump Test Tool by + * Fernando Luis Vazquez Cao <http://lkdtt.sourceforge.net> + * + * Debugfs support added by Simon Kagstrom <simon.kagstrom@netinsight.net> + * + * See Documentation/fault-injection/provoke-crashes.txt for instructions + */ +#include "lkdtm.h" +#include <linux/fs.h> +#include <linux/module.h> +#include <linux/buffer_head.h> +#include <linux/kprobes.h> +#include <linux/list.h> +#include <linux/init.h> +#include <linux/interrupt.h> +#include <linux/hrtimer.h> +#include <linux/slab.h> +#include <scsi/scsi_cmnd.h> +#include <linux/debugfs.h> + +#ifdef CONFIG_IDE +#include <linux/ide.h> +#endif + +#define DEFAULT_COUNT 10 + +static int lkdtm_debugfs_open(struct inode *inode, struct file *file); +static ssize_t lkdtm_debugfs_read(struct file *f, char __user *user_buf, + size_t count, loff_t *off); +static ssize_t direct_entry(struct file *f, const char __user *user_buf, + size_t count, loff_t *off); + +#ifdef CONFIG_KPROBES +static void lkdtm_handler(void); +static ssize_t lkdtm_debugfs_entry(struct file *f, + const char __user *user_buf, + size_t count, loff_t *off); + + +/* jprobe entry point handlers. */ +static unsigned int jp_do_irq(unsigned int irq) +{ + lkdtm_handler(); + jprobe_return(); + return 0; +} + +static irqreturn_t jp_handle_irq_event(unsigned int irq, + struct irqaction *action) +{ + lkdtm_handler(); + jprobe_return(); + return 0; +} + +static void jp_tasklet_action(struct softirq_action *a) +{ + lkdtm_handler(); + jprobe_return(); +} + +static void jp_ll_rw_block(int rw, int nr, struct buffer_head *bhs[]) +{ + lkdtm_handler(); + jprobe_return(); +} + +struct scan_control; + +static unsigned long jp_shrink_inactive_list(unsigned long max_scan, + struct zone *zone, + struct scan_control *sc) +{ + lkdtm_handler(); + jprobe_return(); + return 0; +} + +static int jp_hrtimer_start(struct hrtimer *timer, ktime_t tim, + const enum hrtimer_mode mode) +{ + lkdtm_handler(); + jprobe_return(); + return 0; +} + +static int jp_scsi_dispatch_cmd(struct scsi_cmnd *cmd) +{ + lkdtm_handler(); + jprobe_return(); + return 0; +} + +# ifdef CONFIG_IDE +static int jp_generic_ide_ioctl(ide_drive_t *drive, struct file *file, + struct block_device *bdev, unsigned int cmd, + unsigned long arg) +{ + lkdtm_handler(); + jprobe_return(); + return 0; +} +# endif +#endif + +/* Crash points */ +struct crashpoint { + const char *name; + const struct file_operations fops; + struct jprobe jprobe; +}; + +#define CRASHPOINT(_name, _write, _symbol, _entry) \ + { \ + .name = _name, \ + .fops = { \ + .read = lkdtm_debugfs_read, \ + .llseek = generic_file_llseek, \ + .open = lkdtm_debugfs_open, \ + .write = _write, \ + }, \ + .jprobe = { \ + .kp.symbol_name = _symbol, \ + .entry = (kprobe_opcode_t *)_entry, \ + }, \ + } + +/* Define the possible places where we can trigger a crash point. */ +struct crashpoint crashpoints[] = { + CRASHPOINT("DIRECT", direct_entry, + NULL, NULL), +#ifdef CONFIG_KPROBES + CRASHPOINT("INT_HARDWARE_ENTRY", lkdtm_debugfs_entry, + "do_IRQ", jp_do_irq), + CRASHPOINT("INT_HW_IRQ_EN", lkdtm_debugfs_entry, + "handle_IRQ_event", jp_handle_irq_event), + CRASHPOINT("INT_TASKLET_ENTRY", lkdtm_debugfs_entry, + "tasklet_action", jp_tasklet_action), + CRASHPOINT("FS_DEVRW", lkdtm_debugfs_entry, + "ll_rw_block", jp_ll_rw_block), + CRASHPOINT("MEM_SWAPOUT", lkdtm_debugfs_entry, + "shrink_inactive_list", jp_shrink_inactive_list), + CRASHPOINT("TIMERADD", lkdtm_debugfs_entry, + "hrtimer_start", jp_hrtimer_start), + CRASHPOINT("SCSI_DISPATCH_CMD", lkdtm_debugfs_entry, + "scsi_dispatch_cmd", jp_scsi_dispatch_cmd), +# ifdef CONFIG_IDE + CRASHPOINT("IDE_CORE_CP", lkdtm_debugfs_entry, + "generic_ide_ioctl", jp_generic_ide_ioctl), +# endif +#endif +}; + + +/* Crash types. */ +struct crashtype { + const char *name; + void (*func)(void); +}; + +#define CRASHTYPE(_name) \ + { \ + .name = __stringify(_name), \ + .func = lkdtm_ ## _name, \ + } + +/* Define the possible types of crashes that can be triggered. */ +struct crashtype crashtypes[] = { + CRASHTYPE(PANIC), + CRASHTYPE(BUG), + CRASHTYPE(WARNING), + CRASHTYPE(EXCEPTION), + CRASHTYPE(LOOP), + CRASHTYPE(OVERFLOW), + CRASHTYPE(CORRUPT_STACK), + CRASHTYPE(UNALIGNED_LOAD_STORE_WRITE), + CRASHTYPE(OVERWRITE_ALLOCATION), + CRASHTYPE(WRITE_AFTER_FREE), + CRASHTYPE(READ_AFTER_FREE), + CRASHTYPE(WRITE_BUDDY_AFTER_FREE), + CRASHTYPE(READ_BUDDY_AFTER_FREE), + CRASHTYPE(SOFTLOCKUP), + CRASHTYPE(HARDLOCKUP), + CRASHTYPE(SPINLOCKUP), + CRASHTYPE(HUNG_TASK), + CRASHTYPE(EXEC_DATA), + CRASHTYPE(EXEC_STACK), + CRASHTYPE(EXEC_KMALLOC), + CRASHTYPE(EXEC_VMALLOC), + CRASHTYPE(EXEC_RODATA), + CRASHTYPE(EXEC_USERSPACE), + CRASHTYPE(ACCESS_USERSPACE), + CRASHTYPE(WRITE_RO), + CRASHTYPE(WRITE_RO_AFTER_INIT), + CRASHTYPE(WRITE_KERN), + CRASHTYPE(ATOMIC_UNDERFLOW), + CRASHTYPE(ATOMIC_OVERFLOW), + CRASHTYPE(USERCOPY_HEAP_SIZE_TO), + CRASHTYPE(USERCOPY_HEAP_SIZE_FROM), + CRASHTYPE(USERCOPY_HEAP_FLAG_TO), + CRASHTYPE(USERCOPY_HEAP_FLAG_FROM), + CRASHTYPE(USERCOPY_STACK_FRAME_TO), + CRASHTYPE(USERCOPY_STACK_FRAME_FROM), + CRASHTYPE(USERCOPY_STACK_BEYOND), + CRASHTYPE(USERCOPY_KERNEL), +}; + + +/* Global jprobe entry and crashtype. */ +static struct jprobe *lkdtm_jprobe; +struct crashpoint *lkdtm_crashpoint; +struct crashtype *lkdtm_crashtype; + +/* Module parameters */ +static int recur_count = -1; +module_param(recur_count, int, 0644); +MODULE_PARM_DESC(recur_count, " Recursion level for the stack overflow test"); + +static char* cpoint_name; +module_param(cpoint_name, charp, 0444); +MODULE_PARM_DESC(cpoint_name, " Crash Point, where kernel is to be crashed"); + +static char* cpoint_type; +module_param(cpoint_type, charp, 0444); +MODULE_PARM_DESC(cpoint_type, " Crash Point Type, action to be taken on "\ + "hitting the crash point"); + +static int cpoint_count = DEFAULT_COUNT; +module_param(cpoint_count, int, 0644); +MODULE_PARM_DESC(cpoint_count, " Crash Point Count, number of times the "\ + "crash point is to be hit to trigger action"); + + +/* Return the crashtype number or NULL if the name is invalid */ +static struct crashtype *find_crashtype(const char *name) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(crashtypes); i++) { + if (!strcmp(name, crashtypes[i].name)) + return &crashtypes[i]; + } + + return NULL; +} + +/* + * This is forced noinline just so it distinctly shows up in the stackdump + * which makes validation of expected lkdtm crashes easier. + */ +static noinline void lkdtm_do_action(struct crashtype *crashtype) +{ + BUG_ON(!crashtype || !crashtype->func); + crashtype->func(); +} + +static int lkdtm_register_cpoint(struct crashpoint *crashpoint, + struct crashtype *crashtype) +{ + int ret; + + /* If this doesn't have a symbol, just call immediately. */ + if (!crashpoint->jprobe.kp.symbol_name) { + lkdtm_do_action(crashtype); + return 0; + } + + if (lkdtm_jprobe != NULL) + unregister_jprobe(lkdtm_jprobe); + + lkdtm_crashpoint = crashpoint; + lkdtm_crashtype = crashtype; + lkdtm_jprobe = &crashpoint->jprobe; + ret = register_jprobe(lkdtm_jprobe); + if (ret < 0) { + pr_info("Couldn't register jprobe %s\n", + crashpoint->jprobe.kp.symbol_name); + lkdtm_jprobe = NULL; + lkdtm_crashpoint = NULL; + lkdtm_crashtype = NULL; + } + + return ret; +} + +#ifdef CONFIG_KPROBES +/* Global crash counter and spinlock. */ +static int crash_count = DEFAULT_COUNT; +static DEFINE_SPINLOCK(crash_count_lock); + +/* Called by jprobe entry points. */ +static void lkdtm_handler(void) +{ + unsigned long flags; + bool do_it = false; + + BUG_ON(!lkdtm_crashpoint || !lkdtm_crashtype); + + spin_lock_irqsave(&crash_count_lock, flags); + crash_count--; + pr_info("Crash point %s of type %s hit, trigger in %d rounds\n", + lkdtm_crashpoint->name, lkdtm_crashtype->name, crash_count); + + if (crash_count == 0) { + do_it = true; + crash_count = cpoint_count; + } + spin_unlock_irqrestore(&crash_count_lock, flags); + + if (do_it) + lkdtm_do_action(lkdtm_crashtype); +} + +static ssize_t lkdtm_debugfs_entry(struct file *f, + const char __user *user_buf, + size_t count, loff_t *off) +{ + struct crashpoint *crashpoint = file_inode(f)->i_private; + struct crashtype *crashtype = NULL; + char *buf; + int err; + + if (count >= PAGE_SIZE) + return -EINVAL; + + buf = (char *)__get_free_page(GFP_KERNEL); + if (!buf) + return -ENOMEM; + if (copy_from_user(buf, user_buf, count)) { + free_page((unsigned long) buf); + return -EFAULT; + } + /* NULL-terminate and remove enter */ + buf[count] = '\0'; + strim(buf); + + crashtype = find_crashtype(buf); + free_page((unsigned long)buf); + + if (!crashtype) + return -EINVAL; + + err = lkdtm_register_cpoint(crashpoint, crashtype); + if (err < 0) + return err; + + *off += count; + + return count; +} +#endif + +/* Generic read callback that just prints out the available crash types */ +static ssize_t lkdtm_debugfs_read(struct file *f, char __user *user_buf, + size_t count, loff_t *off) +{ + char *buf; + int i, n, out; + + buf = (char *)__get_free_page(GFP_KERNEL); + if (buf == NULL) + return -ENOMEM; + + n = snprintf(buf, PAGE_SIZE, "Available crash types:\n"); + for (i = 0; i < ARRAY_SIZE(crashtypes); i++) { + n += snprintf(buf + n, PAGE_SIZE - n, "%s\n", + crashtypes[i].name); + } + buf[n] = '\0'; + + out = simple_read_from_buffer(user_buf, count, off, + buf, n); + free_page((unsigned long) buf); + + return out; +} + +static int lkdtm_debugfs_open(struct inode *inode, struct file *file) +{ + return 0; +} + +/* Special entry to just crash directly. Available without KPROBEs */ +static ssize_t direct_entry(struct file *f, const char __user *user_buf, + size_t count, loff_t *off) +{ + struct crashtype *crashtype; + char *buf; + + if (count >= PAGE_SIZE) + return -EINVAL; + if (count < 1) + return -EINVAL; + + buf = (char *)__get_free_page(GFP_KERNEL); + if (!buf) + return -ENOMEM; + if (copy_from_user(buf, user_buf, count)) { + free_page((unsigned long) buf); + return -EFAULT; + } + /* NULL-terminate and remove enter */ + buf[count] = '\0'; + strim(buf); + + crashtype = find_crashtype(buf); + free_page((unsigned long) buf); + if (!crashtype) + return -EINVAL; + + pr_info("Performing direct entry %s\n", crashtype->name); + lkdtm_do_action(crashtype); + *off += count; + + return count; +} + +static struct dentry *lkdtm_debugfs_root; + +static int __init lkdtm_module_init(void) +{ + struct crashpoint *crashpoint = NULL; + struct crashtype *crashtype = NULL; + int ret = -EINVAL; + int i; + + /* Neither or both of these need to be set */ + if ((cpoint_type || cpoint_name) && !(cpoint_type && cpoint_name)) { + pr_err("Need both cpoint_type and cpoint_name or neither\n"); + return -EINVAL; + } + + if (cpoint_type) { + crashtype = find_crashtype(cpoint_type); + if (!crashtype) { + pr_err("Unknown crashtype '%s'\n", cpoint_type); + return -EINVAL; + } + } + + if (cpoint_name) { + for (i = 0; i < ARRAY_SIZE(crashpoints); i++) { + if (!strcmp(cpoint_name, crashpoints[i].name)) + crashpoint = &crashpoints[i]; + } + + /* Refuse unknown crashpoints. */ + if (!crashpoint) { + pr_err("Invalid crashpoint %s\n", cpoint_name); + return -EINVAL; + } + } + +#ifdef CONFIG_KPROBES + /* Set crash count. */ + crash_count = cpoint_count; +#endif + + /* Handle test-specific initialization. */ + lkdtm_bugs_init(&recur_count); + lkdtm_perms_init(); + lkdtm_usercopy_init(); + + /* Register debugfs interface */ + lkdtm_debugfs_root = debugfs_create_dir("provoke-crash", NULL); + if (!lkdtm_debugfs_root) { + pr_err("creating root dir failed\n"); + return -ENODEV; + } + + /* Install debugfs trigger files. */ + for (i = 0; i < ARRAY_SIZE(crashpoints); i++) { + struct crashpoint *cur = &crashpoints[i]; + struct dentry *de; + + de = debugfs_create_file(cur->name, 0644, lkdtm_debugfs_root, + cur, &cur->fops); + if (de == NULL) { + pr_err("could not create crashpoint %s\n", cur->name); + goto out_err; + } + } + + /* Install crashpoint if one was selected. */ + if (crashpoint) { + ret = lkdtm_register_cpoint(crashpoint, crashtype); + if (ret < 0) { + pr_info("Invalid crashpoint %s\n", crashpoint->name); + goto out_err; + } + pr_info("Crash point %s of type %s registered\n", + crashpoint->name, cpoint_type); + } else { + pr_info("No crash points registered, enable through debugfs\n"); + } + + return 0; + +out_err: + debugfs_remove_recursive(lkdtm_debugfs_root); + return ret; +} + +static void __exit lkdtm_module_exit(void) +{ + debugfs_remove_recursive(lkdtm_debugfs_root); + + /* Handle test-specific clean-up. */ + lkdtm_usercopy_exit(); + + unregister_jprobe(lkdtm_jprobe); + pr_info("Crash point unregistered\n"); +} + +module_init(lkdtm_module_init); +module_exit(lkdtm_module_exit); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Kernel crash testing module"); diff --git a/drivers/misc/lkdtm_heap.c b/drivers/misc/lkdtm_heap.c new file mode 100644 index 000000000000..0f1581664c1c --- /dev/null +++ b/drivers/misc/lkdtm_heap.c @@ -0,0 +1,142 @@ +/* + * This is for all the tests relating directly to heap memory, including + * page allocation and slab allocations. + */ +#include "lkdtm.h" +#include <linux/slab.h> + +/* + * This tries to stay within the next largest power-of-2 kmalloc cache + * to avoid actually overwriting anything important if it's not detected + * correctly. + */ +void lkdtm_OVERWRITE_ALLOCATION(void) +{ + size_t len = 1020; + u32 *data = kmalloc(len, GFP_KERNEL); + + data[1024 / sizeof(u32)] = 0x12345678; + kfree(data); +} + +void lkdtm_WRITE_AFTER_FREE(void) +{ + int *base, *again; + size_t len = 1024; + /* + * The slub allocator uses the first word to store the free + * pointer in some configurations. Use the middle of the + * allocation to avoid running into the freelist + */ + size_t offset = (len / sizeof(*base)) / 2; + + base = kmalloc(len, GFP_KERNEL); + pr_info("Allocated memory %p-%p\n", base, &base[offset * 2]); + pr_info("Attempting bad write to freed memory at %p\n", + &base[offset]); + kfree(base); + base[offset] = 0x0abcdef0; + /* Attempt to notice the overwrite. */ + again = kmalloc(len, GFP_KERNEL); + kfree(again); + if (again != base) + pr_info("Hmm, didn't get the same memory range.\n"); +} + +void lkdtm_READ_AFTER_FREE(void) +{ + int *base, *val, saw; + size_t len = 1024; + /* + * The slub allocator uses the first word to store the free + * pointer in some configurations. Use the middle of the + * allocation to avoid running into the freelist + */ + size_t offset = (len / sizeof(*base)) / 2; + + base = kmalloc(len, GFP_KERNEL); + if (!base) { + pr_info("Unable to allocate base memory.\n"); + return; + } + + val = kmalloc(len, GFP_KERNEL); + if (!val) { + pr_info("Unable to allocate val memory.\n"); + kfree(base); + return; + } + + *val = 0x12345678; + base[offset] = *val; + pr_info("Value in memory before free: %x\n", base[offset]); + + kfree(base); + + pr_info("Attempting bad read from freed memory\n"); + saw = base[offset]; + if (saw != *val) { + /* Good! Poisoning happened, so declare a win. */ + pr_info("Memory correctly poisoned (%x)\n", saw); + BUG(); + } + pr_info("Memory was not poisoned\n"); + + kfree(val); +} + +void lkdtm_WRITE_BUDDY_AFTER_FREE(void) +{ + unsigned long p = __get_free_page(GFP_KERNEL); + if (!p) { + pr_info("Unable to allocate free page\n"); + return; + } + + pr_info("Writing to the buddy page before free\n"); + memset((void *)p, 0x3, PAGE_SIZE); + free_page(p); + schedule(); + pr_info("Attempting bad write to the buddy page after free\n"); + memset((void *)p, 0x78, PAGE_SIZE); + /* Attempt to notice the overwrite. */ + p = __get_free_page(GFP_KERNEL); + free_page(p); + schedule(); +} + +void lkdtm_READ_BUDDY_AFTER_FREE(void) +{ + unsigned long p = __get_free_page(GFP_KERNEL); + int saw, *val; + int *base; + + if (!p) { + pr_info("Unable to allocate free page\n"); + return; + } + + val = kmalloc(1024, GFP_KERNEL); + if (!val) { + pr_info("Unable to allocate val memory.\n"); + free_page(p); + return; + } + + base = (int *)p; + + *val = 0x12345678; + base[0] = *val; + pr_info("Value in memory before free: %x\n", base[0]); + free_page(p); + pr_info("Attempting to read from freed memory\n"); + saw = base[0]; + if (saw != *val) { + /* Good! Poisoning happened, so declare a win. */ + pr_info("Memory correctly poisoned (%x)\n", saw); + BUG(); + } + pr_info("Buddy page was not poisoned\n"); + + kfree(val); +} diff --git a/drivers/misc/lkdtm_perms.c b/drivers/misc/lkdtm_perms.c new file mode 100644 index 000000000000..45f1c0f96612 --- /dev/null +++ b/drivers/misc/lkdtm_perms.c @@ -0,0 +1,199 @@ +/* + * This is for all the tests related to validating kernel memory + * permissions: non-executable regions, non-writable regions, and + * even non-readable regions. + */ +#include "lkdtm.h" +#include <linux/slab.h> +#include <linux/vmalloc.h> +#include <linux/mman.h> +#include <linux/uaccess.h> +#include <asm/cacheflush.h> + +/* Whether or not to fill the target memory area with do_nothing(). */ +#define CODE_WRITE true +#define CODE_AS_IS false + +/* How many bytes to copy to be sure we've copied enough of do_nothing(). */ +#define EXEC_SIZE 64 + +/* This is non-const, so it will end up in the .data section. */ +static u8 data_area[EXEC_SIZE]; + +/* This is cost, so it will end up in the .rodata section. */ +static const unsigned long rodata = 0xAA55AA55; + +/* This is marked __ro_after_init, so it should ultimately be .rodata. */ +static unsigned long ro_after_init __ro_after_init = 0x55AA5500; + +/* + * This just returns to the caller. It is designed to be copied into + * non-executable memory regions. + */ +static void do_nothing(void) +{ + return; +} + +/* Must immediately follow do_nothing for size calculuations to work out. */ +static void do_overwritten(void) +{ + pr_info("do_overwritten wasn't overwritten!\n"); + return; +} + +static noinline void execute_location(void *dst, bool write) +{ + void (*func)(void) = dst; + + pr_info("attempting ok execution at %p\n", do_nothing); + do_nothing(); + + if (write == CODE_WRITE) { + memcpy(dst, do_nothing, EXEC_SIZE); + flush_icache_range((unsigned long)dst, + (unsigned long)dst + EXEC_SIZE); + } + pr_info("attempting bad execution at %p\n", func); + func(); +} + +static void execute_user_location(void *dst) +{ + /* Intentionally crossing kernel/user memory boundary. */ + void (*func)(void) = dst; + + pr_info("attempting ok execution at %p\n", do_nothing); + do_nothing(); + + if (copy_to_user((void __user *)dst, do_nothing, EXEC_SIZE)) + return; + flush_icache_range((unsigned long)dst, (unsigned long)dst + EXEC_SIZE); + pr_info("attempting bad execution at %p\n", func); + func(); +} + +void lkdtm_WRITE_RO(void) +{ + /* Explicitly cast away "const" for the test. */ + unsigned long *ptr = (unsigned long *)&rodata; + + pr_info("attempting bad rodata write at %p\n", ptr); + *ptr ^= 0xabcd1234; +} + +void lkdtm_WRITE_RO_AFTER_INIT(void) +{ + unsigned long *ptr = &ro_after_init; + + /* + * Verify we were written to during init. Since an Oops + * is considered a "success", a failure is to just skip the + * real test. + */ + if ((*ptr & 0xAA) != 0xAA) { + pr_info("%p was NOT written during init!?\n", ptr); + return; + } + + pr_info("attempting bad ro_after_init write at %p\n", ptr); + *ptr ^= 0xabcd1234; +} + +void lkdtm_WRITE_KERN(void) +{ + size_t size; + unsigned char *ptr; + + size = (unsigned long)do_overwritten - (unsigned long)do_nothing; + ptr = (unsigned char *)do_overwritten; + + pr_info("attempting bad %zu byte write at %p\n", size, ptr); + memcpy(ptr, (unsigned char *)do_nothing, size); + flush_icache_range((unsigned long)ptr, (unsigned long)(ptr + size)); + + do_overwritten(); +} + +void lkdtm_EXEC_DATA(void) +{ + execute_location(data_area, CODE_WRITE); +} + +void lkdtm_EXEC_STACK(void) +{ + u8 stack_area[EXEC_SIZE]; + execute_location(stack_area, CODE_WRITE); +} + +void lkdtm_EXEC_KMALLOC(void) +{ + u32 *kmalloc_area = kmalloc(EXEC_SIZE, GFP_KERNEL); + execute_location(kmalloc_area, CODE_WRITE); + kfree(kmalloc_area); +} + +void lkdtm_EXEC_VMALLOC(void) +{ + u32 *vmalloc_area = vmalloc(EXEC_SIZE); + execute_location(vmalloc_area, CODE_WRITE); + vfree(vmalloc_area); +} + +void lkdtm_EXEC_RODATA(void) +{ + execute_location(lkdtm_rodata_do_nothing, CODE_AS_IS); +} + +void lkdtm_EXEC_USERSPACE(void) +{ + unsigned long user_addr; + + user_addr = vm_mmap(NULL, 0, PAGE_SIZE, + PROT_READ | PROT_WRITE | PROT_EXEC, + MAP_ANONYMOUS | MAP_PRIVATE, 0); + if (user_addr >= TASK_SIZE) { + pr_warn("Failed to allocate user memory\n"); + return; + } + execute_user_location((void *)user_addr); + vm_munmap(user_addr, PAGE_SIZE); +} + +void lkdtm_ACCESS_USERSPACE(void) +{ + unsigned long user_addr, tmp = 0; + unsigned long *ptr; + + user_addr = vm_mmap(NULL, 0, PAGE_SIZE, + PROT_READ | PROT_WRITE | PROT_EXEC, + MAP_ANONYMOUS | MAP_PRIVATE, 0); + if (user_addr >= TASK_SIZE) { + pr_warn("Failed to allocate user memory\n"); + return; + } + + if (copy_to_user((void __user *)user_addr, &tmp, sizeof(tmp))) { + pr_warn("copy_to_user failed\n"); + vm_munmap(user_addr, PAGE_SIZE); + return; + } + + ptr = (unsigned long *)user_addr; + + pr_info("attempting bad read at %p\n", ptr); + tmp = *ptr; + tmp += 0xc0dec0de; + + pr_info("attempting bad write at %p\n", ptr); + *ptr = tmp; + + vm_munmap(user_addr, PAGE_SIZE); +} + +void __init lkdtm_perms_init(void) +{ + /* Make sure we can write to __ro_after_init values during __init */ + ro_after_init |= 0xAA; + +} diff --git a/drivers/misc/lkdtm_rodata.c b/drivers/misc/lkdtm_rodata.c new file mode 100644 index 000000000000..166b1db3969f --- /dev/null +++ b/drivers/misc/lkdtm_rodata.c @@ -0,0 +1,10 @@ +/* + * This includes functions that are meant to live entirely in .rodata + * (via objcopy tricks), to validate the non-executability of .rodata. + */ +#include "lkdtm.h" + +void lkdtm_rodata_do_nothing(void) +{ + /* Does nothing. We just want an architecture agnostic "return". */ +} diff --git a/drivers/misc/lkdtm_usercopy.c b/drivers/misc/lkdtm_usercopy.c new file mode 100644 index 000000000000..5a3fd76eec27 --- /dev/null +++ b/drivers/misc/lkdtm_usercopy.c @@ -0,0 +1,313 @@ +/* + * This is for all the tests related to copy_to_user() and copy_from_user() + * hardening. + */ +#include "lkdtm.h" +#include <linux/slab.h> +#include <linux/vmalloc.h> +#include <linux/mman.h> +#include <linux/uaccess.h> +#include <asm/cacheflush.h> + +static size_t cache_size = 1024; +static struct kmem_cache *bad_cache; + +static const unsigned char test_text[] = "This is a test.\n"; + +/* + * Instead of adding -Wno-return-local-addr, just pass the stack address + * through a function to obfuscate it from the compiler. + */ +static noinline unsigned char *trick_compiler(unsigned char *stack) +{ + return stack + 0; +} + +static noinline unsigned char *do_usercopy_stack_callee(int value) +{ + unsigned char buf[32]; + int i; + + /* Exercise stack to avoid everything living in registers. */ + for (i = 0; i < sizeof(buf); i++) { + buf[i] = value & 0xff; + } + + return trick_compiler(buf); +} + +static noinline void do_usercopy_stack(bool to_user, bool bad_frame) +{ + unsigned long user_addr; + unsigned char good_stack[32]; + unsigned char *bad_stack; + int i; + + /* Exercise stack to avoid everything living in registers. */ + for (i = 0; i < sizeof(good_stack); i++) + good_stack[i] = test_text[i % sizeof(test_text)]; + + /* This is a pointer to outside our current stack frame. */ + if (bad_frame) { + bad_stack = do_usercopy_stack_callee((uintptr_t)bad_stack); + } else { + /* Put start address just inside stack. */ + bad_stack = task_stack_page(current) + THREAD_SIZE; + bad_stack -= sizeof(unsigned long); + } + + user_addr = vm_mmap(NULL, 0, PAGE_SIZE, + PROT_READ | PROT_WRITE | PROT_EXEC, + MAP_ANONYMOUS | MAP_PRIVATE, 0); + if (user_addr >= TASK_SIZE) { + pr_warn("Failed to allocate user memory\n"); + return; + } + + if (to_user) { + pr_info("attempting good copy_to_user of local stack\n"); + if (copy_to_user((void __user *)user_addr, good_stack, + sizeof(good_stack))) { + pr_warn("copy_to_user failed unexpectedly?!\n"); + goto free_user; + } + + pr_info("attempting bad copy_to_user of distant stack\n"); + if (copy_to_user((void __user *)user_addr, bad_stack, + sizeof(good_stack))) { + pr_warn("copy_to_user failed, but lacked Oops\n"); + goto free_user; + } + } else { + /* + * There isn't a safe way to not be protected by usercopy + * if we're going to write to another thread's stack. + */ + if (!bad_frame) + goto free_user; + + pr_info("attempting good copy_from_user of local stack\n"); + if (copy_from_user(good_stack, (void __user *)user_addr, + sizeof(good_stack))) { + pr_warn("copy_from_user failed unexpectedly?!\n"); + goto free_user; + } + + pr_info("attempting bad copy_from_user of distant stack\n"); + if (copy_from_user(bad_stack, (void __user *)user_addr, + sizeof(good_stack))) { + pr_warn("copy_from_user failed, but lacked Oops\n"); + goto free_user; + } + } + +free_user: + vm_munmap(user_addr, PAGE_SIZE); +} + +static void do_usercopy_heap_size(bool to_user) +{ + unsigned long user_addr; + unsigned char *one, *two; + const size_t size = 1024; + + one = kmalloc(size, GFP_KERNEL); + two = kmalloc(size, GFP_KERNEL); + if (!one || !two) { + pr_warn("Failed to allocate kernel memory\n"); + goto free_kernel; + } + + user_addr = vm_mmap(NULL, 0, PAGE_SIZE, + PROT_READ | PROT_WRITE | PROT_EXEC, + MAP_ANONYMOUS | MAP_PRIVATE, 0); + if (user_addr >= TASK_SIZE) { + pr_warn("Failed to allocate user memory\n"); + goto free_kernel; + } + + memset(one, 'A', size); + memset(two, 'B', size); + + if (to_user) { + pr_info("attempting good copy_to_user of correct size\n"); + if (copy_to_user((void __user *)user_addr, one, size)) { + pr_warn("copy_to_user failed unexpectedly?!\n"); + goto free_user; + } + + pr_info("attempting bad copy_to_user of too large size\n"); + if (copy_to_user((void __user *)user_addr, one, 2 * size)) { + pr_warn("copy_to_user failed, but lacked Oops\n"); + goto free_user; + } + } else { + pr_info("attempting good copy_from_user of correct size\n"); + if (copy_from_user(one, (void __user *)user_addr, size)) { + pr_warn("copy_from_user failed unexpectedly?!\n"); + goto free_user; + } + + pr_info("attempting bad copy_from_user of too large size\n"); + if (copy_from_user(one, (void __user *)user_addr, 2 * size)) { + pr_warn("copy_from_user failed, but lacked Oops\n"); + goto free_user; + } + } + +free_user: + vm_munmap(user_addr, PAGE_SIZE); +free_kernel: + kfree(one); + kfree(two); +} + +static void do_usercopy_heap_flag(bool to_user) +{ + unsigned long user_addr; + unsigned char *good_buf = NULL; + unsigned char *bad_buf = NULL; + + /* Make sure cache was prepared. */ + if (!bad_cache) { + pr_warn("Failed to allocate kernel cache\n"); + return; + } + + /* + * Allocate one buffer from each cache (kmalloc will have the + * SLAB_USERCOPY flag already, but "bad_cache" won't). + */ + good_buf = kmalloc(cache_size, GFP_KERNEL); + bad_buf = kmem_cache_alloc(bad_cache, GFP_KERNEL); + if (!good_buf || !bad_buf) { + pr_warn("Failed to allocate buffers from caches\n"); + goto free_alloc; + } + + /* Allocate user memory we'll poke at. */ + user_addr = vm_mmap(NULL, 0, PAGE_SIZE, + PROT_READ | PROT_WRITE | PROT_EXEC, + MAP_ANONYMOUS | MAP_PRIVATE, 0); + if (user_addr >= TASK_SIZE) { + pr_warn("Failed to allocate user memory\n"); + goto free_alloc; + } + + memset(good_buf, 'A', cache_size); + memset(bad_buf, 'B', cache_size); + + if (to_user) { + pr_info("attempting good copy_to_user with SLAB_USERCOPY\n"); + if (copy_to_user((void __user *)user_addr, good_buf, + cache_size)) { + pr_warn("copy_to_user failed unexpectedly?!\n"); + goto free_user; + } + + pr_info("attempting bad copy_to_user w/o SLAB_USERCOPY\n"); + if (copy_to_user((void __user *)user_addr, bad_buf, + cache_size)) { + pr_warn("copy_to_user failed, but lacked Oops\n"); + goto free_user; + } + } else { + pr_info("attempting good copy_from_user with SLAB_USERCOPY\n"); + if (copy_from_user(good_buf, (void __user *)user_addr, + cache_size)) { + pr_warn("copy_from_user failed unexpectedly?!\n"); + goto free_user; + } + + pr_info("attempting bad copy_from_user w/o SLAB_USERCOPY\n"); + if (copy_from_user(bad_buf, (void __user *)user_addr, + cache_size)) { + pr_warn("copy_from_user failed, but lacked Oops\n"); + goto free_user; + } + } + +free_user: + vm_munmap(user_addr, PAGE_SIZE); +free_alloc: + if (bad_buf) + kmem_cache_free(bad_cache, bad_buf); + kfree(good_buf); +} + +/* Callable tests. */ +void lkdtm_USERCOPY_HEAP_SIZE_TO(void) +{ + do_usercopy_heap_size(true); +} + +void lkdtm_USERCOPY_HEAP_SIZE_FROM(void) +{ + do_usercopy_heap_size(false); +} + +void lkdtm_USERCOPY_HEAP_FLAG_TO(void) +{ + do_usercopy_heap_flag(true); +} + +void lkdtm_USERCOPY_HEAP_FLAG_FROM(void) +{ + do_usercopy_heap_flag(false); +} + +void lkdtm_USERCOPY_STACK_FRAME_TO(void) +{ + do_usercopy_stack(true, true); +} + +void lkdtm_USERCOPY_STACK_FRAME_FROM(void) +{ + do_usercopy_stack(false, true); +} + +void lkdtm_USERCOPY_STACK_BEYOND(void) +{ + do_usercopy_stack(true, false); +} + +void lkdtm_USERCOPY_KERNEL(void) +{ + unsigned long user_addr; + + user_addr = vm_mmap(NULL, 0, PAGE_SIZE, + PROT_READ | PROT_WRITE | PROT_EXEC, + MAP_ANONYMOUS | MAP_PRIVATE, 0); + if (user_addr >= TASK_SIZE) { + pr_warn("Failed to allocate user memory\n"); + return; + } + + pr_info("attempting good copy_to_user from kernel rodata\n"); + if (copy_to_user((void __user *)user_addr, test_text, + sizeof(test_text))) { + pr_warn("copy_to_user failed unexpectedly?!\n"); + goto free_user; + } + + pr_info("attempting bad copy_to_user from kernel text\n"); + if (copy_to_user((void __user *)user_addr, vm_mmap, PAGE_SIZE)) { + pr_warn("copy_to_user failed, but lacked Oops\n"); + goto free_user; + } + +free_user: + vm_munmap(user_addr, PAGE_SIZE); +} + +void __init lkdtm_usercopy_init(void) +{ + /* Prepare cache that lacks SLAB_USERCOPY flag. */ + bad_cache = kmem_cache_create("lkdtm-no-usercopy", cache_size, 0, + 0, NULL); +} + +void __exit lkdtm_usercopy_exit(void) +{ + kmem_cache_destroy(bad_cache); +} diff --git a/drivers/misc/mei/hbm.c b/drivers/misc/mei/hbm.c index 5aa606c8a827..085f3aafe6fa 100644 --- a/drivers/misc/mei/hbm.c +++ b/drivers/misc/mei/hbm.c @@ -132,6 +132,7 @@ static inline void mei_hbm_hdr(struct mei_msg_hdr *hdr, size_t length) hdr->length = length; hdr->msg_complete = 1; hdr->reserved = 0; + hdr->internal = 0; } /** @@ -165,15 +166,15 @@ void mei_hbm_cl_hdr(struct mei_cl *cl, u8 hbm_cmd, void *buf, size_t len) * Return: 0 on success, <0 on failure. */ static inline -int mei_hbm_cl_write(struct mei_device *dev, - struct mei_cl *cl, u8 hbm_cmd, size_t len) +int mei_hbm_cl_write(struct mei_device *dev, struct mei_cl *cl, + u8 hbm_cmd, u8 *buf, size_t len) { - struct mei_msg_hdr *mei_hdr = &dev->wr_msg.hdr; + struct mei_msg_hdr mei_hdr; - mei_hbm_hdr(mei_hdr, len); - mei_hbm_cl_hdr(cl, hbm_cmd, dev->wr_msg.data, len); + mei_hbm_hdr(&mei_hdr, len); + mei_hbm_cl_hdr(cl, hbm_cmd, buf, len); - return mei_write_message(dev, mei_hdr, dev->wr_msg.data); + return mei_write_message(dev, &mei_hdr, buf); } /** @@ -250,24 +251,23 @@ int mei_hbm_start_wait(struct mei_device *dev) */ int mei_hbm_start_req(struct mei_device *dev) { - struct mei_msg_hdr *mei_hdr = &dev->wr_msg.hdr; - struct hbm_host_version_request *start_req; + struct mei_msg_hdr mei_hdr; + struct hbm_host_version_request start_req; const size_t len = sizeof(struct hbm_host_version_request); int ret; mei_hbm_reset(dev); - mei_hbm_hdr(mei_hdr, len); + mei_hbm_hdr(&mei_hdr, len); /* host start message */ - start_req = (struct hbm_host_version_request *)dev->wr_msg.data; - memset(start_req, 0, len); - start_req->hbm_cmd = HOST_START_REQ_CMD; - start_req->host_version.major_version = HBM_MAJOR_VERSION; - start_req->host_version.minor_version = HBM_MINOR_VERSION; + memset(&start_req, 0, len); + start_req.hbm_cmd = HOST_START_REQ_CMD; + start_req.host_version.major_version = HBM_MAJOR_VERSION; + start_req.host_version.minor_version = HBM_MINOR_VERSION; dev->hbm_state = MEI_HBM_IDLE; - ret = mei_write_message(dev, mei_hdr, dev->wr_msg.data); + ret = mei_write_message(dev, &mei_hdr, &start_req); if (ret) { dev_err(dev->dev, "version message write failed: ret = %d\n", ret); @@ -288,23 +288,22 @@ int mei_hbm_start_req(struct mei_device *dev) */ static int mei_hbm_enum_clients_req(struct mei_device *dev) { - struct mei_msg_hdr *mei_hdr = &dev->wr_msg.hdr; - struct hbm_host_enum_request *enum_req; + struct mei_msg_hdr mei_hdr; + struct hbm_host_enum_request enum_req; const size_t len = sizeof(struct hbm_host_enum_request); int ret; /* enumerate clients */ - mei_hbm_hdr(mei_hdr, len); + mei_hbm_hdr(&mei_hdr, len); - enum_req = (struct hbm_host_enum_request *)dev->wr_msg.data; - memset(enum_req, 0, len); - enum_req->hbm_cmd = HOST_ENUM_REQ_CMD; - enum_req->flags |= dev->hbm_f_dc_supported ? - MEI_HBM_ENUM_F_ALLOW_ADD : 0; - enum_req->flags |= dev->hbm_f_ie_supported ? - MEI_HBM_ENUM_F_IMMEDIATE_ENUM : 0; + memset(&enum_req, 0, len); + enum_req.hbm_cmd = HOST_ENUM_REQ_CMD; + enum_req.flags |= dev->hbm_f_dc_supported ? + MEI_HBM_ENUM_F_ALLOW_ADD : 0; + enum_req.flags |= dev->hbm_f_ie_supported ? + MEI_HBM_ENUM_F_IMMEDIATE_ENUM : 0; - ret = mei_write_message(dev, mei_hdr, dev->wr_msg.data); + ret = mei_write_message(dev, &mei_hdr, &enum_req); if (ret) { dev_err(dev->dev, "enumeration request write failed: ret = %d.\n", ret); @@ -358,23 +357,21 @@ static int mei_hbm_me_cl_add(struct mei_device *dev, */ static int mei_hbm_add_cl_resp(struct mei_device *dev, u8 addr, u8 status) { - struct mei_msg_hdr *mei_hdr = &dev->wr_msg.hdr; - struct hbm_add_client_response *resp; + struct mei_msg_hdr mei_hdr; + struct hbm_add_client_response resp; const size_t len = sizeof(struct hbm_add_client_response); int ret; dev_dbg(dev->dev, "adding client response\n"); - resp = (struct hbm_add_client_response *)dev->wr_msg.data; + mei_hbm_hdr(&mei_hdr, len); - mei_hbm_hdr(mei_hdr, len); - memset(resp, 0, sizeof(struct hbm_add_client_response)); + memset(&resp, 0, sizeof(struct hbm_add_client_response)); + resp.hbm_cmd = MEI_HBM_ADD_CLIENT_RES_CMD; + resp.me_addr = addr; + resp.status = status; - resp->hbm_cmd = MEI_HBM_ADD_CLIENT_RES_CMD; - resp->me_addr = addr; - resp->status = status; - - ret = mei_write_message(dev, mei_hdr, dev->wr_msg.data); + ret = mei_write_message(dev, &mei_hdr, &resp); if (ret) dev_err(dev->dev, "add client response write failed: ret = %d\n", ret); @@ -421,18 +418,17 @@ int mei_hbm_cl_notify_req(struct mei_device *dev, struct mei_cl *cl, u8 start) { - struct mei_msg_hdr *mei_hdr = &dev->wr_msg.hdr; - struct hbm_notification_request *req; + struct mei_msg_hdr mei_hdr; + struct hbm_notification_request req; const size_t len = sizeof(struct hbm_notification_request); int ret; - mei_hbm_hdr(mei_hdr, len); - mei_hbm_cl_hdr(cl, MEI_HBM_NOTIFY_REQ_CMD, dev->wr_msg.data, len); + mei_hbm_hdr(&mei_hdr, len); + mei_hbm_cl_hdr(cl, MEI_HBM_NOTIFY_REQ_CMD, &req, len); - req = (struct hbm_notification_request *)dev->wr_msg.data; - req->start = start; + req.start = start; - ret = mei_write_message(dev, mei_hdr, dev->wr_msg.data); + ret = mei_write_message(dev, &mei_hdr, &req); if (ret) dev_err(dev->dev, "notify request failed: ret = %d\n", ret); @@ -534,8 +530,8 @@ static void mei_hbm_cl_notify(struct mei_device *dev, */ static int mei_hbm_prop_req(struct mei_device *dev, unsigned long start_idx) { - struct mei_msg_hdr *mei_hdr = &dev->wr_msg.hdr; - struct hbm_props_request *prop_req; + struct mei_msg_hdr mei_hdr; + struct hbm_props_request prop_req; const size_t len = sizeof(struct hbm_props_request); unsigned long addr; int ret; @@ -550,15 +546,14 @@ static int mei_hbm_prop_req(struct mei_device *dev, unsigned long start_idx) return 0; } - mei_hbm_hdr(mei_hdr, len); - prop_req = (struct hbm_props_request *)dev->wr_msg.data; + mei_hbm_hdr(&mei_hdr, len); - memset(prop_req, 0, sizeof(struct hbm_props_request)); + memset(&prop_req, 0, sizeof(struct hbm_props_request)); - prop_req->hbm_cmd = HOST_CLIENT_PROPERTIES_REQ_CMD; - prop_req->me_addr = addr; + prop_req.hbm_cmd = HOST_CLIENT_PROPERTIES_REQ_CMD; + prop_req.me_addr = addr; - ret = mei_write_message(dev, mei_hdr, dev->wr_msg.data); + ret = mei_write_message(dev, &mei_hdr, &prop_req); if (ret) { dev_err(dev->dev, "properties request write failed: ret = %d\n", ret); @@ -581,21 +576,20 @@ static int mei_hbm_prop_req(struct mei_device *dev, unsigned long start_idx) */ int mei_hbm_pg(struct mei_device *dev, u8 pg_cmd) { - struct mei_msg_hdr *mei_hdr = &dev->wr_msg.hdr; - struct hbm_power_gate *req; + struct mei_msg_hdr mei_hdr; + struct hbm_power_gate req; const size_t len = sizeof(struct hbm_power_gate); int ret; if (!dev->hbm_f_pg_supported) return -EOPNOTSUPP; - mei_hbm_hdr(mei_hdr, len); + mei_hbm_hdr(&mei_hdr, len); - req = (struct hbm_power_gate *)dev->wr_msg.data; - memset(req, 0, len); - req->hbm_cmd = pg_cmd; + memset(&req, 0, len); + req.hbm_cmd = pg_cmd; - ret = mei_write_message(dev, mei_hdr, dev->wr_msg.data); + ret = mei_write_message(dev, &mei_hdr, &req); if (ret) dev_err(dev->dev, "power gate command write failed.\n"); return ret; @@ -611,18 +605,17 @@ EXPORT_SYMBOL_GPL(mei_hbm_pg); */ static int mei_hbm_stop_req(struct mei_device *dev) { - struct mei_msg_hdr *mei_hdr = &dev->wr_msg.hdr; - struct hbm_host_stop_request *req = - (struct hbm_host_stop_request *)dev->wr_msg.data; + struct mei_msg_hdr mei_hdr; + struct hbm_host_stop_request req; const size_t len = sizeof(struct hbm_host_stop_request); - mei_hbm_hdr(mei_hdr, len); + mei_hbm_hdr(&mei_hdr, len); - memset(req, 0, len); - req->hbm_cmd = HOST_STOP_REQ_CMD; - req->reason = DRIVER_STOP_REQUEST; + memset(&req, 0, len); + req.hbm_cmd = HOST_STOP_REQ_CMD; + req.reason = DRIVER_STOP_REQUEST; - return mei_write_message(dev, mei_hdr, dev->wr_msg.data); + return mei_write_message(dev, &mei_hdr, &req); } /** @@ -636,9 +629,10 @@ static int mei_hbm_stop_req(struct mei_device *dev) int mei_hbm_cl_flow_control_req(struct mei_device *dev, struct mei_cl *cl) { const size_t len = sizeof(struct hbm_flow_control); + u8 buf[len]; cl_dbg(dev, cl, "sending flow control\n"); - return mei_hbm_cl_write(dev, cl, MEI_FLOW_CONTROL_CMD, len); + return mei_hbm_cl_write(dev, cl, MEI_FLOW_CONTROL_CMD, buf, len); } /** @@ -714,8 +708,9 @@ static void mei_hbm_cl_flow_control_res(struct mei_device *dev, int mei_hbm_cl_disconnect_req(struct mei_device *dev, struct mei_cl *cl) { const size_t len = sizeof(struct hbm_client_connect_request); + u8 buf[len]; - return mei_hbm_cl_write(dev, cl, CLIENT_DISCONNECT_REQ_CMD, len); + return mei_hbm_cl_write(dev, cl, CLIENT_DISCONNECT_REQ_CMD, buf, len); } /** @@ -729,8 +724,9 @@ int mei_hbm_cl_disconnect_req(struct mei_device *dev, struct mei_cl *cl) int mei_hbm_cl_disconnect_rsp(struct mei_device *dev, struct mei_cl *cl) { const size_t len = sizeof(struct hbm_client_connect_response); + u8 buf[len]; - return mei_hbm_cl_write(dev, cl, CLIENT_DISCONNECT_RES_CMD, len); + return mei_hbm_cl_write(dev, cl, CLIENT_DISCONNECT_RES_CMD, buf, len); } /** @@ -765,8 +761,9 @@ static void mei_hbm_cl_disconnect_res(struct mei_device *dev, struct mei_cl *cl, int mei_hbm_cl_connect_req(struct mei_device *dev, struct mei_cl *cl) { const size_t len = sizeof(struct hbm_client_connect_request); + u8 buf[len]; - return mei_hbm_cl_write(dev, cl, CLIENT_CONNECT_REQ_CMD, len); + return mei_hbm_cl_write(dev, cl, CLIENT_CONNECT_REQ_CMD, buf, len); } /** diff --git a/drivers/misc/mei/mei_dev.h b/drivers/misc/mei/mei_dev.h index c9e01021eadf..e5e32503d4bc 100644 --- a/drivers/misc/mei/mei_dev.h +++ b/drivers/misc/mei/mei_dev.h @@ -382,7 +382,6 @@ const char *mei_pg_state_str(enum mei_pg_state state); * * @hbuf_depth : depth of hardware host/write buffer is slots * @hbuf_is_ready : query if the host host/write buffer is ready - * @wr_msg : the buffer for hbm control messages * * @version : HBM protocol version in use * @hbm_f_pg_supported : hbm feature pgi protocol @@ -467,12 +466,6 @@ struct mei_device { u8 hbuf_depth; bool hbuf_is_ready; - /* used for control messages */ - struct { - struct mei_msg_hdr hdr; - unsigned char data[128]; - } wr_msg; - struct hbm_version version; unsigned int hbm_f_pg_supported:1; unsigned int hbm_f_dc_supported:1; @@ -670,8 +663,7 @@ static inline size_t mei_hbuf_max_len(const struct mei_device *dev) } static inline int mei_write_message(struct mei_device *dev, - struct mei_msg_hdr *hdr, - unsigned char *buf) + struct mei_msg_hdr *hdr, void *buf) { return dev->ops->write(dev, hdr, buf); } diff --git a/drivers/misc/mic/host/mic_boot.c b/drivers/misc/mic/host/mic_boot.c index e047efd83f57..9599d732aff3 100644 --- a/drivers/misc/mic/host/mic_boot.c +++ b/drivers/misc/mic/host/mic_boot.c @@ -38,7 +38,7 @@ static inline struct mic_device *vpdev_to_mdev(struct device *dev) static dma_addr_t _mic_dma_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, - enum dma_data_direction dir, struct dma_attrs *attrs) + enum dma_data_direction dir, unsigned long attrs) { void *va = phys_to_virt(page_to_phys(page)) + offset; struct mic_device *mdev = vpdev_to_mdev(dev); @@ -48,7 +48,7 @@ _mic_dma_map_page(struct device *dev, struct page *page, static void _mic_dma_unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size, enum dma_data_direction dir, - struct dma_attrs *attrs) + unsigned long attrs) { struct mic_device *mdev = vpdev_to_mdev(dev); @@ -144,7 +144,7 @@ static inline struct mic_device *scdev_to_mdev(struct scif_hw_dev *scdev) static void *__mic_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t gfp, - struct dma_attrs *attrs) + unsigned long attrs) { struct scif_hw_dev *scdev = dev_get_drvdata(dev); struct mic_device *mdev = scdev_to_mdev(scdev); @@ -164,7 +164,7 @@ static void *__mic_dma_alloc(struct device *dev, size_t size, } static void __mic_dma_free(struct device *dev, size_t size, void *vaddr, - dma_addr_t dma_handle, struct dma_attrs *attrs) + dma_addr_t dma_handle, unsigned long attrs) { struct scif_hw_dev *scdev = dev_get_drvdata(dev); struct mic_device *mdev = scdev_to_mdev(scdev); @@ -176,7 +176,7 @@ static void __mic_dma_free(struct device *dev, size_t size, void *vaddr, static dma_addr_t __mic_dma_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction dir, - struct dma_attrs *attrs) + unsigned long attrs) { void *va = phys_to_virt(page_to_phys(page)) + offset; struct scif_hw_dev *scdev = dev_get_drvdata(dev); @@ -188,7 +188,7 @@ __mic_dma_map_page(struct device *dev, struct page *page, unsigned long offset, static void __mic_dma_unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size, enum dma_data_direction dir, - struct dma_attrs *attrs) + unsigned long attrs) { struct scif_hw_dev *scdev = dev_get_drvdata(dev); struct mic_device *mdev = scdev_to_mdev(scdev); @@ -198,7 +198,7 @@ __mic_dma_unmap_page(struct device *dev, dma_addr_t dma_addr, static int __mic_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction dir, - struct dma_attrs *attrs) + unsigned long attrs) { struct scif_hw_dev *scdev = dev_get_drvdata(dev); struct mic_device *mdev = scdev_to_mdev(scdev); @@ -229,7 +229,7 @@ err: static void __mic_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction dir, - struct dma_attrs *attrs) + unsigned long attrs) { struct scif_hw_dev *scdev = dev_get_drvdata(dev); struct mic_device *mdev = scdev_to_mdev(scdev); @@ -327,7 +327,7 @@ static inline struct mic_device *mbdev_to_mdev(struct mbus_device *mbdev) static dma_addr_t mic_dma_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction dir, - struct dma_attrs *attrs) + unsigned long attrs) { void *va = phys_to_virt(page_to_phys(page)) + offset; struct mic_device *mdev = dev_get_drvdata(dev->parent); @@ -338,7 +338,7 @@ mic_dma_map_page(struct device *dev, struct page *page, static void mic_dma_unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size, enum dma_data_direction dir, - struct dma_attrs *attrs) + unsigned long attrs) { struct mic_device *mdev = dev_get_drvdata(dev->parent); mic_unmap_single(mdev, dma_addr, size); diff --git a/drivers/misc/ti-st/st_core.c b/drivers/misc/ti-st/st_core.c index dcdbd58672cc..00051590e00f 100644 --- a/drivers/misc/ti-st/st_core.c +++ b/drivers/misc/ti-st/st_core.c @@ -141,7 +141,7 @@ static void st_send_frame(unsigned char chnl_id, struct st_data_s *st_gdata) * This function is being called with spin lock held, protocol drivers are * only expected to complete their waits and do nothing more than that. */ -static void st_reg_complete(struct st_data_s *st_gdata, char err) +static void st_reg_complete(struct st_data_s *st_gdata, int err) { unsigned char i = 0; pr_info(" %s ", __func__); |