diff options
author | Ingo Molnar <mingo@kernel.org> | 2020-03-21 09:23:40 +0100 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2020-03-21 09:24:41 +0100 |
commit | a4654e9bde4ecedb4921e6c8fe2088114bdff1b3 (patch) | |
tree | 1b9970b520d7bc7176cc9460fe67f210be5ea181 /drivers/firmware | |
parent | Merge branch 'kcsan.2020.01.07a' into locking/kcsan (diff) | |
parent | x86/purgatory: Fail the build if purgatory.ro has missing symbols (diff) | |
download | linux-a4654e9bde4ecedb4921e6c8fe2088114bdff1b3.tar.xz linux-a4654e9bde4ecedb4921e6c8fe2088114bdff1b3.zip |
Merge branch 'x86/kdump' into locking/kcsan, to resolve conflicts
Conflicts:
arch/x86/purgatory/Makefile
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'drivers/firmware')
45 files changed, 2119 insertions, 2126 deletions
diff --git a/drivers/firmware/Kconfig b/drivers/firmware/Kconfig index e40a77bfe821..ea869addc89b 100644 --- a/drivers/firmware/Kconfig +++ b/drivers/firmware/Kconfig @@ -239,14 +239,6 @@ config QCOM_SCM depends on ARM || ARM64 select RESET_CONTROLLER -config QCOM_SCM_32 - def_bool y - depends on QCOM_SCM && ARM - -config QCOM_SCM_64 - def_bool y - depends on QCOM_SCM && ARM64 - config QCOM_SCM_DOWNLOAD_MODE_DEFAULT bool "Qualcomm download mode enabled by default" depends on QCOM_SCM diff --git a/drivers/firmware/Makefile b/drivers/firmware/Makefile index 3fcb91975bdc..e9fb838af4df 100644 --- a/drivers/firmware/Makefile +++ b/drivers/firmware/Makefile @@ -17,10 +17,7 @@ obj-$(CONFIG_ISCSI_IBFT) += iscsi_ibft.o obj-$(CONFIG_FIRMWARE_MEMMAP) += memmap.o obj-$(CONFIG_RASPBERRYPI_FIRMWARE) += raspberrypi.o obj-$(CONFIG_FW_CFG_SYSFS) += qemu_fw_cfg.o -obj-$(CONFIG_QCOM_SCM) += qcom_scm.o -obj-$(CONFIG_QCOM_SCM_64) += qcom_scm-64.o -obj-$(CONFIG_QCOM_SCM_32) += qcom_scm-32.o -CFLAGS_qcom_scm-32.o :=$(call as-instr,.arch armv7-a\n.arch_extension sec,-DREQUIRES_SEC=1) -march=armv7-a +obj-$(CONFIG_QCOM_SCM) += qcom_scm.o qcom_scm-smc.o qcom_scm-legacy.o obj-$(CONFIG_TI_SCI_PROTOCOL) += ti_sci.o obj-$(CONFIG_TRUSTED_FOUNDATIONS) += trusted_foundations.o obj-$(CONFIG_TURRIS_MOX_RWTM) += turris-mox-rwtm.o diff --git a/drivers/firmware/arm_scmi/bus.c b/drivers/firmware/arm_scmi/bus.c index 7a30952b463d..db55c43a2cbd 100644 --- a/drivers/firmware/arm_scmi/bus.c +++ b/drivers/firmware/arm_scmi/bus.c @@ -28,8 +28,12 @@ scmi_dev_match_id(struct scmi_device *scmi_dev, struct scmi_driver *scmi_drv) return NULL; for (; id->protocol_id; id++) - if (id->protocol_id == scmi_dev->protocol_id) - return id; + if (id->protocol_id == scmi_dev->protocol_id) { + if (!id->name) + return id; + else if (!strcmp(id->name, scmi_dev->name)) + return id; + } return NULL; } @@ -56,6 +60,11 @@ static int scmi_protocol_init(int protocol_id, struct scmi_handle *handle) return fn(handle); } +static int scmi_protocol_dummy_init(struct scmi_handle *handle) +{ + return 0; +} + static int scmi_dev_probe(struct device *dev) { struct scmi_driver *scmi_drv = to_scmi_driver(dev->driver); @@ -74,6 +83,10 @@ static int scmi_dev_probe(struct device *dev) if (ret) return ret; + /* Skip protocol initialisation for additional devices */ + idr_replace(&scmi_protocols, &scmi_protocol_dummy_init, + scmi_dev->protocol_id); + return scmi_drv->probe(scmi_dev); } @@ -125,7 +138,8 @@ static void scmi_device_release(struct device *dev) } struct scmi_device * -scmi_device_create(struct device_node *np, struct device *parent, int protocol) +scmi_device_create(struct device_node *np, struct device *parent, int protocol, + const char *name) { int id, retval; struct scmi_device *scmi_dev; @@ -134,8 +148,15 @@ scmi_device_create(struct device_node *np, struct device *parent, int protocol) if (!scmi_dev) return NULL; + scmi_dev->name = kstrdup_const(name ?: "unknown", GFP_KERNEL); + if (!scmi_dev->name) { + kfree(scmi_dev); + return NULL; + } + id = ida_simple_get(&scmi_bus_id, 1, 0, GFP_KERNEL); if (id < 0) { + kfree_const(scmi_dev->name); kfree(scmi_dev); return NULL; } @@ -154,6 +175,7 @@ scmi_device_create(struct device_node *np, struct device *parent, int protocol) return scmi_dev; put_dev: + kfree_const(scmi_dev->name); put_device(&scmi_dev->dev); ida_simple_remove(&scmi_bus_id, id); return NULL; @@ -161,6 +183,7 @@ put_dev: void scmi_device_destroy(struct scmi_device *scmi_dev) { + kfree_const(scmi_dev->name); scmi_handle_put(scmi_dev->handle); ida_simple_remove(&scmi_bus_id, scmi_dev->id); device_unregister(&scmi_dev->dev); diff --git a/drivers/firmware/arm_scmi/clock.c b/drivers/firmware/arm_scmi/clock.c index 32526a793f3a..4c2227662b26 100644 --- a/drivers/firmware/arm_scmi/clock.c +++ b/drivers/firmware/arm_scmi/clock.c @@ -65,6 +65,7 @@ struct scmi_clock_set_rate { }; struct clock_info { + u32 version; int num_clocks; int max_async_req; atomic_t cur_async_req; @@ -340,6 +341,7 @@ static int scmi_clock_protocol_init(struct scmi_handle *handle) scmi_clock_describe_rates_get(handle, clkid, clk); } + cinfo->version = version; handle->clk_ops = &clk_ops; handle->clk_priv = cinfo; diff --git a/drivers/firmware/arm_scmi/common.h b/drivers/firmware/arm_scmi/common.h index 5237c2ff79fe..df35358ff324 100644 --- a/drivers/firmware/arm_scmi/common.h +++ b/drivers/firmware/arm_scmi/common.h @@ -81,6 +81,7 @@ struct scmi_msg { /** * struct scmi_xfer - Structure representing a message flow * + * @transfer_id: Unique ID for debug & profiling purpose * @hdr: Transmit message header * @tx: Transmit message * @rx: Receive message, the buffer should be pre-allocated to store @@ -90,6 +91,7 @@ struct scmi_msg { * @async: pointer to delayed response message received event completion */ struct scmi_xfer { + int transfer_id; struct scmi_msg_hdr hdr; struct scmi_msg tx; struct scmi_msg rx; diff --git a/drivers/firmware/arm_scmi/driver.c b/drivers/firmware/arm_scmi/driver.c index 3eb0382491ce..2c96f6b5a7d8 100644 --- a/drivers/firmware/arm_scmi/driver.c +++ b/drivers/firmware/arm_scmi/driver.c @@ -29,6 +29,9 @@ #include "common.h" +#define CREATE_TRACE_POINTS +#include <trace/events/scmi.h> + #define MSG_ID_MASK GENMASK(7, 0) #define MSG_XTRACT_ID(hdr) FIELD_GET(MSG_ID_MASK, (hdr)) #define MSG_TYPE_MASK GENMASK(9, 8) @@ -61,6 +64,8 @@ enum scmi_error_codes { static LIST_HEAD(scmi_list); /* Protection for the entire list */ static DEFINE_MUTEX(scmi_list_mutex); +/* Track the unique id for the transfers for debug & profiling purpose */ +static atomic_t transfer_last_id; /** * struct scmi_xfers_info - Structure to manage transfer information @@ -304,6 +309,7 @@ static struct scmi_xfer *scmi_xfer_get(const struct scmi_handle *handle, xfer = &minfo->xfer_block[xfer_id]; xfer->hdr.seq = xfer_id; reinit_completion(&xfer->done); + xfer->transfer_id = atomic_inc_return(&transfer_last_id); return xfer; } @@ -374,6 +380,10 @@ static void scmi_rx_callback(struct mbox_client *cl, void *m) scmi_fetch_response(xfer, mem); + trace_scmi_rx_done(xfer->transfer_id, xfer->hdr.id, + xfer->hdr.protocol_id, xfer->hdr.seq, + msg_type); + if (msg_type == MSG_TYPE_DELAYED_RESP) complete(xfer->async_done); else @@ -439,6 +449,10 @@ int scmi_do_xfer(const struct scmi_handle *handle, struct scmi_xfer *xfer) if (unlikely(!cinfo)) return -EINVAL; + trace_scmi_xfer_begin(xfer->transfer_id, xfer->hdr.id, + xfer->hdr.protocol_id, xfer->hdr.seq, + xfer->hdr.poll_completion); + ret = mbox_send_message(cinfo->chan, xfer); if (ret < 0) { dev_dbg(dev, "mbox send fail %d\n", ret); @@ -478,6 +492,10 @@ int scmi_do_xfer(const struct scmi_handle *handle, struct scmi_xfer *xfer) */ mbox_client_txdone(cinfo->chan, ret); + trace_scmi_xfer_end(xfer->transfer_id, xfer->hdr.id, + xfer->hdr.protocol_id, xfer->hdr.seq, + xfer->hdr.status); + return ret; } @@ -735,6 +753,11 @@ static int scmi_mbox_chan_setup(struct scmi_info *info, struct device *dev, idx = tx ? 0 : 1; idr = tx ? &info->tx_idr : &info->rx_idr; + /* check if already allocated, used for multiple device per protocol */ + cinfo = idr_find(idr, prot_id); + if (cinfo) + return 0; + if (scmi_mailbox_check(np, idx)) { cinfo = idr_find(idr, SCMI_PROTOCOL_BASE); if (unlikely(!cinfo)) /* Possible only if platform has no Rx */ @@ -803,11 +826,11 @@ scmi_mbox_txrx_setup(struct scmi_info *info, struct device *dev, int prot_id) static inline void scmi_create_protocol_device(struct device_node *np, struct scmi_info *info, - int prot_id) + int prot_id, const char *name) { struct scmi_device *sdev; - sdev = scmi_device_create(np, info->dev, prot_id); + sdev = scmi_device_create(np, info->dev, prot_id, name); if (!sdev) { dev_err(info->dev, "failed to create %d protocol device\n", prot_id); @@ -824,6 +847,40 @@ scmi_create_protocol_device(struct device_node *np, struct scmi_info *info, scmi_set_handle(sdev); } +#define MAX_SCMI_DEV_PER_PROTOCOL 2 +struct scmi_prot_devnames { + int protocol_id; + char *names[MAX_SCMI_DEV_PER_PROTOCOL]; +}; + +static struct scmi_prot_devnames devnames[] = { + { SCMI_PROTOCOL_POWER, { "genpd" },}, + { SCMI_PROTOCOL_PERF, { "cpufreq" },}, + { SCMI_PROTOCOL_CLOCK, { "clocks" },}, + { SCMI_PROTOCOL_SENSOR, { "hwmon" },}, + { SCMI_PROTOCOL_RESET, { "reset" },}, +}; + +static inline void +scmi_create_protocol_devices(struct device_node *np, struct scmi_info *info, + int prot_id) +{ + int loop, cnt; + + for (loop = 0; loop < ARRAY_SIZE(devnames); loop++) { + if (devnames[loop].protocol_id != prot_id) + continue; + + for (cnt = 0; cnt < ARRAY_SIZE(devnames[loop].names); cnt++) { + const char *name = devnames[loop].names[cnt]; + + if (name) + scmi_create_protocol_device(np, info, prot_id, + name); + } + } +} + static int scmi_probe(struct platform_device *pdev) { int ret; @@ -892,7 +949,7 @@ static int scmi_probe(struct platform_device *pdev) continue; } - scmi_create_protocol_device(child, info, prot_id); + scmi_create_protocol_devices(child, info, prot_id); } return 0; @@ -940,6 +997,52 @@ static int scmi_remove(struct platform_device *pdev) return ret; } +static ssize_t protocol_version_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct scmi_info *info = dev_get_drvdata(dev); + + return sprintf(buf, "%u.%u\n", info->version.major_ver, + info->version.minor_ver); +} +static DEVICE_ATTR_RO(protocol_version); + +static ssize_t firmware_version_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct scmi_info *info = dev_get_drvdata(dev); + + return sprintf(buf, "0x%x\n", info->version.impl_ver); +} +static DEVICE_ATTR_RO(firmware_version); + +static ssize_t vendor_id_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct scmi_info *info = dev_get_drvdata(dev); + + return sprintf(buf, "%s\n", info->version.vendor_id); +} +static DEVICE_ATTR_RO(vendor_id); + +static ssize_t sub_vendor_id_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct scmi_info *info = dev_get_drvdata(dev); + + return sprintf(buf, "%s\n", info->version.sub_vendor_id); +} +static DEVICE_ATTR_RO(sub_vendor_id); + +static struct attribute *versions_attrs[] = { + &dev_attr_firmware_version.attr, + &dev_attr_protocol_version.attr, + &dev_attr_vendor_id.attr, + &dev_attr_sub_vendor_id.attr, + NULL, +}; +ATTRIBUTE_GROUPS(versions); + static const struct scmi_desc scmi_generic_desc = { .max_rx_timeout_ms = 30, /* We may increase this if required */ .max_msg = 20, /* Limited by MBOX_TX_QUEUE_LEN */ @@ -958,6 +1061,7 @@ static struct platform_driver scmi_driver = { .driver = { .name = "arm-scmi", .of_match_table = scmi_of_match, + .dev_groups = versions_groups, }, .probe = scmi_probe, .remove = scmi_remove, diff --git a/drivers/firmware/arm_scmi/perf.c b/drivers/firmware/arm_scmi/perf.c index 601af4edad5e..ec81e6f7e7a4 100644 --- a/drivers/firmware/arm_scmi/perf.c +++ b/drivers/firmware/arm_scmi/perf.c @@ -145,6 +145,7 @@ struct perf_dom_info { }; struct scmi_perf_info { + u32 version; int num_domains; bool power_scale_mw; u64 stats_addr; @@ -736,6 +737,7 @@ static int scmi_perf_protocol_init(struct scmi_handle *handle) scmi_perf_domain_init_fc(handle, domain, &dom->fc_info); } + pinfo->version = version; handle->perf_ops = &perf_ops; handle->perf_priv = pinfo; diff --git a/drivers/firmware/arm_scmi/power.c b/drivers/firmware/arm_scmi/power.c index 5abef7079c0a..214886ce84f1 100644 --- a/drivers/firmware/arm_scmi/power.c +++ b/drivers/firmware/arm_scmi/power.c @@ -50,6 +50,7 @@ struct power_dom_info { }; struct scmi_power_info { + u32 version; int num_domains; u64 stats_addr; u32 stats_size; @@ -207,6 +208,7 @@ static int scmi_power_protocol_init(struct scmi_handle *handle) scmi_power_domain_attributes_get(handle, domain, dom); } + pinfo->version = version; handle->power_ops = &power_ops; handle->power_priv = pinfo; diff --git a/drivers/firmware/arm_scmi/reset.c b/drivers/firmware/arm_scmi/reset.c index ab42c21c5517..de73054554f3 100644 --- a/drivers/firmware/arm_scmi/reset.c +++ b/drivers/firmware/arm_scmi/reset.c @@ -48,6 +48,7 @@ struct reset_dom_info { }; struct scmi_reset_info { + u32 version; int num_domains; struct reset_dom_info *dom_info; }; @@ -217,6 +218,7 @@ static int scmi_reset_protocol_init(struct scmi_handle *handle) scmi_reset_domain_attributes_get(handle, domain, dom); } + pinfo->version = version; handle->reset_ops = &reset_ops; handle->reset_priv = pinfo; diff --git a/drivers/firmware/arm_scmi/scmi_pm_domain.c b/drivers/firmware/arm_scmi/scmi_pm_domain.c index 87f737e01473..bafbfe358f97 100644 --- a/drivers/firmware/arm_scmi/scmi_pm_domain.c +++ b/drivers/firmware/arm_scmi/scmi_pm_domain.c @@ -112,7 +112,7 @@ static int scmi_pm_domain_probe(struct scmi_device *sdev) } static const struct scmi_device_id scmi_id_table[] = { - { SCMI_PROTOCOL_POWER }, + { SCMI_PROTOCOL_POWER, "genpd" }, { }, }; MODULE_DEVICE_TABLE(scmi, scmi_id_table); diff --git a/drivers/firmware/arm_scmi/sensors.c b/drivers/firmware/arm_scmi/sensors.c index a400ea805fc2..eba61b9c1f53 100644 --- a/drivers/firmware/arm_scmi/sensors.c +++ b/drivers/firmware/arm_scmi/sensors.c @@ -68,6 +68,7 @@ struct scmi_msg_sensor_reading_get { }; struct sensors_info { + u32 version; int num_sensors; int max_requests; u64 reg_addr; @@ -294,6 +295,7 @@ static int scmi_sensors_protocol_init(struct scmi_handle *handle) scmi_sensor_description_get(handle, sinfo); + sinfo->version = version; handle->sensor_ops = &sensor_ops; handle->sensor_priv = sinfo; diff --git a/drivers/firmware/broadcom/bcm47xx_nvram.c b/drivers/firmware/broadcom/bcm47xx_nvram.c index da04fdae62a1..835ece9c00f1 100644 --- a/drivers/firmware/broadcom/bcm47xx_nvram.c +++ b/drivers/firmware/broadcom/bcm47xx_nvram.c @@ -120,7 +120,7 @@ int bcm47xx_nvram_init_from_mem(u32 base, u32 lim) void __iomem *iobase; int err; - iobase = ioremap_nocache(base, lim); + iobase = ioremap(base, lim); if (!iobase) return -ENOMEM; diff --git a/drivers/firmware/efi/Kconfig b/drivers/firmware/efi/Kconfig index bcc378c19ebe..ecc83e2f032c 100644 --- a/drivers/firmware/efi/Kconfig +++ b/drivers/firmware/efi/Kconfig @@ -215,6 +215,28 @@ config EFI_RCI2_TABLE Say Y here for Dell EMC PowerEdge systems. +config EFI_DISABLE_PCI_DMA + bool "Clear Busmaster bit on PCI bridges during ExitBootServices()" + help + Disable the busmaster bit in the control register on all PCI bridges + while calling ExitBootServices() and passing control to the runtime + kernel. System firmware may configure the IOMMU to prevent malicious + PCI devices from being able to attack the OS via DMA. However, since + firmware can't guarantee that the OS is IOMMU-aware, it will tear + down IOMMU configuration when ExitBootServices() is called. This + leaves a window between where a hostile device could still cause + damage before Linux configures the IOMMU again. + + If you say Y here, the EFI stub will clear the busmaster bit on all + PCI bridges before ExitBootServices() is called. This will prevent + any malicious PCI devices from being able to perform DMA until the + kernel reenables busmastering after configuring the IOMMU. + + This option will cause failures with some poorly behaved hardware + and should not be enabled without testing. The kernel commandline + options "efi=disable_early_pci_dma" or "efi=no_disable_early_pci_dma" + may be used to override this option. + endmenu config UEFI_CPER diff --git a/drivers/firmware/efi/arm-init.c b/drivers/firmware/efi/arm-init.c index 904fa09e6a6b..d99f5b0c8a09 100644 --- a/drivers/firmware/efi/arm-init.c +++ b/drivers/firmware/efi/arm-init.c @@ -10,10 +10,12 @@ #define pr_fmt(fmt) "efi: " fmt #include <linux/efi.h> +#include <linux/fwnode.h> #include <linux/init.h> #include <linux/memblock.h> #include <linux/mm_types.h> #include <linux/of.h> +#include <linux/of_address.h> #include <linux/of_fdt.h> #include <linux/platform_device.h> #include <linux/screen_info.h> @@ -276,15 +278,112 @@ void __init efi_init(void) efi_memmap_unmap(); } +static bool efifb_overlaps_pci_range(const struct of_pci_range *range) +{ + u64 fb_base = screen_info.lfb_base; + + if (screen_info.capabilities & VIDEO_CAPABILITY_64BIT_BASE) + fb_base |= (u64)(unsigned long)screen_info.ext_lfb_base << 32; + + return fb_base >= range->cpu_addr && + fb_base < (range->cpu_addr + range->size); +} + +static struct device_node *find_pci_overlap_node(void) +{ + struct device_node *np; + + for_each_node_by_type(np, "pci") { + struct of_pci_range_parser parser; + struct of_pci_range range; + int err; + + err = of_pci_range_parser_init(&parser, np); + if (err) { + pr_warn("of_pci_range_parser_init() failed: %d\n", err); + continue; + } + + for_each_of_pci_range(&parser, &range) + if (efifb_overlaps_pci_range(&range)) + return np; + } + return NULL; +} + +/* + * If the efifb framebuffer is backed by a PCI graphics controller, we have + * to ensure that this relation is expressed using a device link when + * running in DT mode, or the probe order may be reversed, resulting in a + * resource reservation conflict on the memory window that the efifb + * framebuffer steals from the PCIe host bridge. + */ +static int efifb_add_links(const struct fwnode_handle *fwnode, + struct device *dev) +{ + struct device_node *sup_np; + struct device *sup_dev; + + sup_np = find_pci_overlap_node(); + + /* + * If there's no PCI graphics controller backing the efifb, we are + * done here. + */ + if (!sup_np) + return 0; + + sup_dev = get_dev_from_fwnode(&sup_np->fwnode); + of_node_put(sup_np); + + /* + * Return -ENODEV if the PCI graphics controller device hasn't been + * registered yet. This ensures that efifb isn't allowed to probe + * and this function is retried again when new devices are + * registered. + */ + if (!sup_dev) + return -ENODEV; + + /* + * If this fails, retrying this function at a later point won't + * change anything. So, don't return an error after this. + */ + if (!device_link_add(dev, sup_dev, 0)) + dev_warn(dev, "device_link_add() failed\n"); + + put_device(sup_dev); + + return 0; +} + +static const struct fwnode_operations efifb_fwnode_ops = { + .add_links = efifb_add_links, +}; + +static struct fwnode_handle efifb_fwnode = { + .ops = &efifb_fwnode_ops, +}; + static int __init register_gop_device(void) { - void *pd; + struct platform_device *pd; + int err; if (screen_info.orig_video_isVGA != VIDEO_TYPE_EFI) return 0; - pd = platform_device_register_data(NULL, "efi-framebuffer", 0, - &screen_info, sizeof(screen_info)); - return PTR_ERR_OR_ZERO(pd); + pd = platform_device_alloc("efi-framebuffer", 0); + if (!pd) + return -ENOMEM; + + if (IS_ENABLED(CONFIG_PCI)) + pd->dev.fwnode = &efifb_fwnode; + + err = platform_device_add_data(pd, &screen_info, sizeof(screen_info)); + if (err) + return err; + + return platform_device_add(pd); } subsys_initcall(register_gop_device); diff --git a/drivers/firmware/efi/arm-runtime.c b/drivers/firmware/efi/arm-runtime.c index 899b803842bb..9dda2602c862 100644 --- a/drivers/firmware/efi/arm-runtime.c +++ b/drivers/firmware/efi/arm-runtime.c @@ -27,7 +27,7 @@ extern u64 efi_system_table; -#ifdef CONFIG_ARM64_PTDUMP_DEBUGFS +#if defined(CONFIG_PTDUMP_DEBUGFS) && defined(CONFIG_ARM64) #include <asm/ptdump.h> static struct ptdump_info efi_ptdump_info = { diff --git a/drivers/firmware/efi/capsule-loader.c b/drivers/firmware/efi/capsule-loader.c index b1395133389e..d3067cbd5114 100644 --- a/drivers/firmware/efi/capsule-loader.c +++ b/drivers/firmware/efi/capsule-loader.c @@ -11,6 +11,7 @@ #include <linux/module.h> #include <linux/miscdevice.h> #include <linux/highmem.h> +#include <linux/io.h> #include <linux/slab.h> #include <linux/mutex.h> #include <linux/efi.h> diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c index 2b02cb165f16..621220ab3d0e 100644 --- a/drivers/firmware/efi/efi.c +++ b/drivers/firmware/efi/efi.c @@ -908,7 +908,7 @@ u64 efi_mem_attributes(unsigned long phys_addr) * * Search in the EFI memory map for the region covering @phys_addr. * Returns the EFI memory type if the region was found in the memory - * map, EFI_RESERVED_TYPE (zero) otherwise. + * map, -EINVAL otherwise. */ int efi_mem_type(unsigned long phys_addr) { diff --git a/drivers/firmware/efi/fake_mem.c b/drivers/firmware/efi/fake_mem.c index bb9fc70d0cfa..6e0f34a38171 100644 --- a/drivers/firmware/efi/fake_mem.c +++ b/drivers/firmware/efi/fake_mem.c @@ -34,46 +34,45 @@ static int __init cmp_fake_mem(const void *x1, const void *x2) return 0; } -void __init efi_fake_memmap(void) +static void __init efi_fake_range(struct efi_mem_range *efi_range) { + struct efi_memory_map_data data = { 0 }; int new_nr_map = efi.memmap.nr_map; efi_memory_desc_t *md; - phys_addr_t new_memmap_phy; void *new_memmap; - int i; - - if (!efi_enabled(EFI_MEMMAP) || !nr_fake_mem) - return; /* count up the number of EFI memory descriptor */ - for (i = 0; i < nr_fake_mem; i++) { - for_each_efi_memory_desc(md) { - struct range *r = &efi_fake_mems[i].range; - - new_nr_map += efi_memmap_split_count(md, r); - } - } + for_each_efi_memory_desc(md) + new_nr_map += efi_memmap_split_count(md, &efi_range->range); /* allocate memory for new EFI memmap */ - new_memmap_phy = efi_memmap_alloc(new_nr_map); - if (!new_memmap_phy) + if (efi_memmap_alloc(new_nr_map, &data) != 0) return; /* create new EFI memmap */ - new_memmap = early_memremap(new_memmap_phy, - efi.memmap.desc_size * new_nr_map); + new_memmap = early_memremap(data.phys_map, data.size); if (!new_memmap) { - memblock_free(new_memmap_phy, efi.memmap.desc_size * new_nr_map); + __efi_memmap_free(data.phys_map, data.size, data.flags); return; } - for (i = 0; i < nr_fake_mem; i++) - efi_memmap_insert(&efi.memmap, new_memmap, &efi_fake_mems[i]); + efi_memmap_insert(&efi.memmap, new_memmap, efi_range); /* swap into new EFI memmap */ - early_memunmap(new_memmap, efi.memmap.desc_size * new_nr_map); + early_memunmap(new_memmap, data.size); + + efi_memmap_install(&data); +} + +void __init efi_fake_memmap(void) +{ + int i; - efi_memmap_install(new_memmap_phy, new_nr_map); + if (!efi_enabled(EFI_MEMMAP) || !nr_fake_mem) + return; + + for (i = 0; i < nr_fake_mem; i++) + efi_fake_range(&efi_fake_mems[i]); /* print new EFI memmap */ efi_print_memmap(); diff --git a/drivers/firmware/efi/libstub/Makefile b/drivers/firmware/efi/libstub/Makefile index d54b8ff9c79a..9cf9173f948e 100644 --- a/drivers/firmware/efi/libstub/Makefile +++ b/drivers/firmware/efi/libstub/Makefile @@ -41,7 +41,7 @@ OBJECT_FILES_NON_STANDARD := y KCOV_INSTRUMENT := n lib-y := efi-stub-helper.o gop.o secureboot.o tpm.o \ - random.o + random.o pci.o # include the stub's generic dependencies from lib/ when building for ARM/arm64 arm-deps-y := fdt_rw.c fdt_ro.c fdt_wip.c fdt.c fdt_empty_tree.c fdt_sw.c diff --git a/drivers/firmware/efi/libstub/arm-stub.c b/drivers/firmware/efi/libstub/arm-stub.c index 817237ce2420..7bbef4a67350 100644 --- a/drivers/firmware/efi/libstub/arm-stub.c +++ b/drivers/firmware/efi/libstub/arm-stub.c @@ -37,16 +37,14 @@ static u64 virtmap_base = EFI_RT_VIRTUAL_BASE; -void efi_char16_printk(efi_system_table_t *sys_table_arg, - efi_char16_t *str) -{ - struct efi_simple_text_output_protocol *out; +static efi_system_table_t *__efistub_global sys_table; - out = (struct efi_simple_text_output_protocol *)sys_table_arg->con_out; - out->output_string(out, str); +__pure efi_system_table_t *efi_system_table(void) +{ + return sys_table; } -static struct screen_info *setup_graphics(efi_system_table_t *sys_table_arg) +static struct screen_info *setup_graphics(void) { efi_guid_t gop_proto = EFI_GRAPHICS_OUTPUT_PROTOCOL_GUID; efi_status_t status; @@ -55,27 +53,27 @@ static struct screen_info *setup_graphics(efi_system_table_t *sys_table_arg) struct screen_info *si = NULL; size = 0; - status = efi_call_early(locate_handle, EFI_LOCATE_BY_PROTOCOL, - &gop_proto, NULL, &size, gop_handle); + status = efi_bs_call(locate_handle, EFI_LOCATE_BY_PROTOCOL, + &gop_proto, NULL, &size, gop_handle); if (status == EFI_BUFFER_TOO_SMALL) { - si = alloc_screen_info(sys_table_arg); + si = alloc_screen_info(); if (!si) return NULL; - efi_setup_gop(sys_table_arg, si, &gop_proto, size); + efi_setup_gop(si, &gop_proto, size); } return si; } -void install_memreserve_table(efi_system_table_t *sys_table_arg) +void install_memreserve_table(void) { struct linux_efi_memreserve *rsv; efi_guid_t memreserve_table_guid = LINUX_EFI_MEMRESERVE_TABLE_GUID; efi_status_t status; - status = efi_call_early(allocate_pool, EFI_LOADER_DATA, sizeof(*rsv), - (void **)&rsv); + status = efi_bs_call(allocate_pool, EFI_LOADER_DATA, sizeof(*rsv), + (void **)&rsv); if (status != EFI_SUCCESS) { - pr_efi_err(sys_table_arg, "Failed to allocate memreserve entry!\n"); + pr_efi_err("Failed to allocate memreserve entry!\n"); return; } @@ -83,11 +81,10 @@ void install_memreserve_table(efi_system_table_t *sys_table_arg) rsv->size = 0; atomic_set(&rsv->count, 0); - status = efi_call_early(install_configuration_table, - &memreserve_table_guid, - rsv); + status = efi_bs_call(install_configuration_table, + &memreserve_table_guid, rsv); if (status != EFI_SUCCESS) - pr_efi_err(sys_table_arg, "Failed to install memreserve config table!\n"); + pr_efi_err("Failed to install memreserve config table!\n"); } @@ -97,8 +94,7 @@ void install_memreserve_table(efi_system_table_t *sys_table_arg) * must be reserved. On failure it is required to free all * all allocations it has made. */ -efi_status_t handle_kernel_image(efi_system_table_t *sys_table, - unsigned long *image_addr, +efi_status_t handle_kernel_image(unsigned long *image_addr, unsigned long *image_size, unsigned long *reserve_addr, unsigned long *reserve_size, @@ -110,7 +106,7 @@ efi_status_t handle_kernel_image(efi_system_table_t *sys_table, * for both archictectures, with the arch-specific code provided in the * handle_kernel_image() function. */ -unsigned long efi_entry(void *handle, efi_system_table_t *sys_table, +unsigned long efi_entry(void *handle, efi_system_table_t *sys_table_arg, unsigned long *image_addr) { efi_loaded_image_t *image; @@ -131,11 +127,13 @@ unsigned long efi_entry(void *handle, efi_system_table_t *sys_table, enum efi_secureboot_mode secure_boot; struct screen_info *si; + sys_table = sys_table_arg; + /* Check if we were booted by the EFI firmware */ if (sys_table->hdr.signature != EFI_SYSTEM_TABLE_SIGNATURE) goto fail; - status = check_platform_features(sys_table); + status = check_platform_features(); if (status != EFI_SUCCESS) goto fail; @@ -147,13 +145,13 @@ unsigned long efi_entry(void *handle, efi_system_table_t *sys_table, status = sys_table->boottime->handle_protocol(handle, &loaded_image_proto, (void *)&image); if (status != EFI_SUCCESS) { - pr_efi_err(sys_table, "Failed to get loaded image protocol\n"); + pr_efi_err("Failed to get loaded image protocol\n"); goto fail; } - dram_base = get_dram_base(sys_table); + dram_base = get_dram_base(); if (dram_base == EFI_ERROR) { - pr_efi_err(sys_table, "Failed to find DRAM base\n"); + pr_efi_err("Failed to find DRAM base\n"); goto fail; } @@ -162,9 +160,9 @@ unsigned long efi_entry(void *handle, efi_system_table_t *sys_table, * protocol. We are going to copy the command line into the * device tree, so this can be allocated anywhere. */ - cmdline_ptr = efi_convert_cmdline(sys_table, image, &cmdline_size); + cmdline_ptr = efi_convert_cmdline(image, &cmdline_size); if (!cmdline_ptr) { - pr_efi_err(sys_table, "getting command line via LOADED_IMAGE_PROTOCOL\n"); + pr_efi_err("getting command line via LOADED_IMAGE_PROTOCOL\n"); goto fail; } @@ -176,25 +174,25 @@ unsigned long efi_entry(void *handle, efi_system_table_t *sys_table, if (!IS_ENABLED(CONFIG_CMDLINE_FORCE) && cmdline_size > 0) efi_parse_options(cmdline_ptr); - pr_efi(sys_table, "Booting Linux Kernel...\n"); + pr_efi("Booting Linux Kernel...\n"); - si = setup_graphics(sys_table); + si = setup_graphics(); - status = handle_kernel_image(sys_table, image_addr, &image_size, + status = handle_kernel_image(image_addr, &image_size, &reserve_addr, &reserve_size, dram_base, image); if (status != EFI_SUCCESS) { - pr_efi_err(sys_table, "Failed to relocate kernel\n"); + pr_efi_err("Failed to relocate kernel\n"); goto fail_free_cmdline; } - efi_retrieve_tpm2_eventlog(sys_table); + efi_retrieve_tpm2_eventlog(); /* Ask the firmware to clear memory on unclean shutdown */ - efi_enable_reset_attack_mitigation(sys_table); + efi_enable_reset_attack_mitigation(); - secure_boot = efi_get_secureboot(sys_table); + secure_boot = efi_get_secureboot(); /* * Unauthenticated device tree data is a security hazard, so ignore @@ -204,39 +202,38 @@ unsigned long efi_entry(void *handle, efi_system_table_t *sys_table, if (!IS_ENABLED(CONFIG_EFI_ARMSTUB_DTB_LOADER) || secure_boot != efi_secureboot_mode_disabled) { if (strstr(cmdline_ptr, "dtb=")) - pr_efi(sys_table, "Ignoring DTB from command line.\n"); + pr_efi("Ignoring DTB from command line.\n"); } else { - status = handle_cmdline_files(sys_table, image, cmdline_ptr, - "dtb=", + status = handle_cmdline_files(image, cmdline_ptr, "dtb=", ~0UL, &fdt_addr, &fdt_size); if (status != EFI_SUCCESS) { - pr_efi_err(sys_table, "Failed to load device tree!\n"); + pr_efi_err("Failed to load device tree!\n"); goto fail_free_image; } } if (fdt_addr) { - pr_efi(sys_table, "Using DTB from command line\n"); + pr_efi("Using DTB from command line\n"); } else { /* Look for a device tree configuration table entry. */ - fdt_addr = (uintptr_t)get_fdt(sys_table, &fdt_size); + fdt_addr = (uintptr_t)get_fdt(&fdt_size); if (fdt_addr) - pr_efi(sys_table, "Using DTB from configuration table\n"); + pr_efi("Using DTB from configuration table\n"); } if (!fdt_addr) - pr_efi(sys_table, "Generating empty DTB\n"); + pr_efi("Generating empty DTB\n"); - status = handle_cmdline_files(sys_table, image, cmdline_ptr, "initrd=", + status = handle_cmdline_files(image, cmdline_ptr, "initrd=", efi_get_max_initrd_addr(dram_base, *image_addr), (unsigned long *)&initrd_addr, (unsigned long *)&initrd_size); if (status != EFI_SUCCESS) - pr_efi_err(sys_table, "Failed initrd from command line!\n"); + pr_efi_err("Failed initrd from command line!\n"); - efi_random_get_seed(sys_table); + efi_random_get_seed(); /* hibernation expects the runtime regions to stay in the same place */ if (!IS_ENABLED(CONFIG_HIBERNATION) && !nokaslr()) { @@ -251,18 +248,17 @@ unsigned long efi_entry(void *handle, efi_system_table_t *sys_table, EFI_RT_VIRTUAL_SIZE; u32 rnd; - status = efi_get_random_bytes(sys_table, sizeof(rnd), - (u8 *)&rnd); + status = efi_get_random_bytes(sizeof(rnd), (u8 *)&rnd); if (status == EFI_SUCCESS) { virtmap_base = EFI_RT_VIRTUAL_BASE + (((headroom >> 21) * rnd) >> (32 - 21)); } } - install_memreserve_table(sys_table); + install_memreserve_table(); new_fdt_addr = fdt_addr; - status = allocate_new_fdt_and_exit_boot(sys_table, handle, + status = allocate_new_fdt_and_exit_boot(handle, &new_fdt_addr, efi_get_max_fdt_addr(dram_base), initrd_addr, initrd_size, cmdline_ptr, fdt_addr, fdt_size); @@ -275,17 +271,17 @@ unsigned long efi_entry(void *handle, efi_system_table_t *sys_table, if (status == EFI_SUCCESS) return new_fdt_addr; - pr_efi_err(sys_table, "Failed to update FDT and exit boot services\n"); + pr_efi_err("Failed to update FDT and exit boot services\n"); - efi_free(sys_table, initrd_size, initrd_addr); - efi_free(sys_table, fdt_size, fdt_addr); + efi_free(initrd_size, initrd_addr); + efi_free(fdt_size, fdt_addr); fail_free_image: - efi_free(sys_table, image_size, *image_addr); - efi_free(sys_table, reserve_size, reserve_addr); + efi_free(image_size, *image_addr); + efi_free(reserve_size, reserve_addr); fail_free_cmdline: - free_screen_info(sys_table, si); - efi_free(sys_table, cmdline_size, (unsigned long)cmdline_ptr); + free_screen_info(si); + efi_free(cmdline_size, (unsigned long)cmdline_ptr); fail: return EFI_ERROR; } diff --git a/drivers/firmware/efi/libstub/arm32-stub.c b/drivers/firmware/efi/libstub/arm32-stub.c index 4566640de650..7b2a6382b647 100644 --- a/drivers/firmware/efi/libstub/arm32-stub.c +++ b/drivers/firmware/efi/libstub/arm32-stub.c @@ -7,7 +7,7 @@ #include "efistub.h" -efi_status_t check_platform_features(efi_system_table_t *sys_table_arg) +efi_status_t check_platform_features(void) { int block; @@ -18,7 +18,7 @@ efi_status_t check_platform_features(efi_system_table_t *sys_table_arg) /* LPAE kernels need compatible hardware */ block = cpuid_feature_extract(CPUID_EXT_MMFR0, 0); if (block < 5) { - pr_efi_err(sys_table_arg, "This LPAE kernel is not supported by your CPU\n"); + pr_efi_err("This LPAE kernel is not supported by your CPU\n"); return EFI_UNSUPPORTED; } return EFI_SUCCESS; @@ -26,7 +26,7 @@ efi_status_t check_platform_features(efi_system_table_t *sys_table_arg) static efi_guid_t screen_info_guid = LINUX_EFI_ARM_SCREEN_INFO_TABLE_GUID; -struct screen_info *alloc_screen_info(efi_system_table_t *sys_table_arg) +struct screen_info *alloc_screen_info(void) { struct screen_info *si; efi_status_t status; @@ -37,32 +37,31 @@ struct screen_info *alloc_screen_info(efi_system_table_t *sys_table_arg) * its contents while we hand over to the kernel proper from the * decompressor. */ - status = efi_call_early(allocate_pool, EFI_RUNTIME_SERVICES_DATA, - sizeof(*si), (void **)&si); + status = efi_bs_call(allocate_pool, EFI_RUNTIME_SERVICES_DATA, + sizeof(*si), (void **)&si); if (status != EFI_SUCCESS) return NULL; - status = efi_call_early(install_configuration_table, - &screen_info_guid, si); + status = efi_bs_call(install_configuration_table, + &screen_info_guid, si); if (status == EFI_SUCCESS) return si; - efi_call_early(free_pool, si); + efi_bs_call(free_pool, si); return NULL; } -void free_screen_info(efi_system_table_t *sys_table_arg, struct screen_info *si) +void free_screen_info(struct screen_info *si) { if (!si) return; - efi_call_early(install_configuration_table, &screen_info_guid, NULL); - efi_call_early(free_pool, si); + efi_bs_call(install_configuration_table, &screen_info_guid, NULL); + efi_bs_call(free_pool, si); } -static efi_status_t reserve_kernel_base(efi_system_table_t *sys_table_arg, - unsigned long dram_base, +static efi_status_t reserve_kernel_base(unsigned long dram_base, unsigned long *reserve_addr, unsigned long *reserve_size) { @@ -92,8 +91,8 @@ static efi_status_t reserve_kernel_base(efi_system_table_t *sys_table_arg, */ alloc_addr = dram_base + MAX_UNCOMP_KERNEL_SIZE; nr_pages = MAX_UNCOMP_KERNEL_SIZE / EFI_PAGE_SIZE; - status = efi_call_early(allocate_pages, EFI_ALLOCATE_MAX_ADDRESS, - EFI_BOOT_SERVICES_DATA, nr_pages, &alloc_addr); + status = efi_bs_call(allocate_pages, EFI_ALLOCATE_MAX_ADDRESS, + EFI_BOOT_SERVICES_DATA, nr_pages, &alloc_addr); if (status == EFI_SUCCESS) { if (alloc_addr == dram_base) { *reserve_addr = alloc_addr; @@ -119,10 +118,9 @@ static efi_status_t reserve_kernel_base(efi_system_table_t *sys_table_arg, * released to the OS after ExitBootServices(), the decompressor can * safely overwrite them. */ - status = efi_get_memory_map(sys_table_arg, &map); + status = efi_get_memory_map(&map); if (status != EFI_SUCCESS) { - pr_efi_err(sys_table_arg, - "reserve_kernel_base(): Unable to retrieve memory map.\n"); + pr_efi_err("reserve_kernel_base(): Unable to retrieve memory map.\n"); return status; } @@ -158,14 +156,13 @@ static efi_status_t reserve_kernel_base(efi_system_table_t *sys_table_arg, start = max(start, (u64)dram_base); end = min(end, (u64)dram_base + MAX_UNCOMP_KERNEL_SIZE); - status = efi_call_early(allocate_pages, - EFI_ALLOCATE_ADDRESS, - EFI_LOADER_DATA, - (end - start) / EFI_PAGE_SIZE, - &start); + status = efi_bs_call(allocate_pages, + EFI_ALLOCATE_ADDRESS, + EFI_LOADER_DATA, + (end - start) / EFI_PAGE_SIZE, + &start); if (status != EFI_SUCCESS) { - pr_efi_err(sys_table_arg, - "reserve_kernel_base(): alloc failed.\n"); + pr_efi_err("reserve_kernel_base(): alloc failed.\n"); goto out; } break; @@ -188,12 +185,11 @@ static efi_status_t reserve_kernel_base(efi_system_table_t *sys_table_arg, status = EFI_SUCCESS; out: - efi_call_early(free_pool, memory_map); + efi_bs_call(free_pool, memory_map); return status; } -efi_status_t handle_kernel_image(efi_system_table_t *sys_table, - unsigned long *image_addr, +efi_status_t handle_kernel_image(unsigned long *image_addr, unsigned long *image_size, unsigned long *reserve_addr, unsigned long *reserve_size, @@ -221,10 +217,9 @@ efi_status_t handle_kernel_image(efi_system_table_t *sys_table, */ kernel_base += TEXT_OFFSET - 5 * PAGE_SIZE; - status = reserve_kernel_base(sys_table, kernel_base, reserve_addr, - reserve_size); + status = reserve_kernel_base(kernel_base, reserve_addr, reserve_size); if (status != EFI_SUCCESS) { - pr_efi_err(sys_table, "Unable to allocate memory for uncompressed kernel.\n"); + pr_efi_err("Unable to allocate memory for uncompressed kernel.\n"); return status; } @@ -233,12 +228,11 @@ efi_status_t handle_kernel_image(efi_system_table_t *sys_table, * memory window. */ *image_size = image->image_size; - status = efi_relocate_kernel(sys_table, image_addr, *image_size, - *image_size, + status = efi_relocate_kernel(image_addr, *image_size, *image_size, kernel_base + MAX_UNCOMP_KERNEL_SIZE, 0, 0); if (status != EFI_SUCCESS) { - pr_efi_err(sys_table, "Failed to relocate kernel.\n"); - efi_free(sys_table, *reserve_size, *reserve_addr); + pr_efi_err("Failed to relocate kernel.\n"); + efi_free(*reserve_size, *reserve_addr); *reserve_size = 0; return status; } @@ -249,10 +243,10 @@ efi_status_t handle_kernel_image(efi_system_table_t *sys_table, * address at which the zImage is loaded. */ if (*image_addr + *image_size > dram_base + ZIMAGE_OFFSET_LIMIT) { - pr_efi_err(sys_table, "Failed to relocate kernel, no low memory available.\n"); - efi_free(sys_table, *reserve_size, *reserve_addr); + pr_efi_err("Failed to relocate kernel, no low memory available.\n"); + efi_free(*reserve_size, *reserve_addr); *reserve_size = 0; - efi_free(sys_table, *image_size, *image_addr); + efi_free(*image_size, *image_addr); *image_size = 0; return EFI_LOAD_ERROR; } diff --git a/drivers/firmware/efi/libstub/arm64-stub.c b/drivers/firmware/efi/libstub/arm64-stub.c index 1550d244e996..2915b44132e6 100644 --- a/drivers/firmware/efi/libstub/arm64-stub.c +++ b/drivers/firmware/efi/libstub/arm64-stub.c @@ -21,7 +21,7 @@ #include "efistub.h" -efi_status_t check_platform_features(efi_system_table_t *sys_table_arg) +efi_status_t check_platform_features(void) { u64 tg; @@ -32,16 +32,15 @@ efi_status_t check_platform_features(efi_system_table_t *sys_table_arg) tg = (read_cpuid(ID_AA64MMFR0_EL1) >> ID_AA64MMFR0_TGRAN_SHIFT) & 0xf; if (tg != ID_AA64MMFR0_TGRAN_SUPPORTED) { if (IS_ENABLED(CONFIG_ARM64_64K_PAGES)) - pr_efi_err(sys_table_arg, "This 64 KB granular kernel is not supported by your CPU\n"); + pr_efi_err("This 64 KB granular kernel is not supported by your CPU\n"); else - pr_efi_err(sys_table_arg, "This 16 KB granular kernel is not supported by your CPU\n"); + pr_efi_err("This 16 KB granular kernel is not supported by your CPU\n"); return EFI_UNSUPPORTED; } return EFI_SUCCESS; } -efi_status_t handle_kernel_image(efi_system_table_t *sys_table_arg, - unsigned long *image_addr, +efi_status_t handle_kernel_image(unsigned long *image_addr, unsigned long *image_size, unsigned long *reserve_addr, unsigned long *reserve_size, @@ -56,17 +55,16 @@ efi_status_t handle_kernel_image(efi_system_table_t *sys_table_arg, if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) { if (!nokaslr()) { - status = efi_get_random_bytes(sys_table_arg, - sizeof(phys_seed), + status = efi_get_random_bytes(sizeof(phys_seed), (u8 *)&phys_seed); if (status == EFI_NOT_FOUND) { - pr_efi(sys_table_arg, "EFI_RNG_PROTOCOL unavailable, no randomness supplied\n"); + pr_efi("EFI_RNG_PROTOCOL unavailable, no randomness supplied\n"); } else if (status != EFI_SUCCESS) { - pr_efi_err(sys_table_arg, "efi_get_random_bytes() failed\n"); + pr_efi_err("efi_get_random_bytes() failed\n"); return status; } } else { - pr_efi(sys_table_arg, "KASLR disabled on kernel command line\n"); + pr_efi("KASLR disabled on kernel command line\n"); } } @@ -108,7 +106,7 @@ efi_status_t handle_kernel_image(efi_system_table_t *sys_table_arg, * locate the kernel at a randomized offset in physical memory. */ *reserve_size = kernel_memsize + offset; - status = efi_random_alloc(sys_table_arg, *reserve_size, + status = efi_random_alloc(*reserve_size, MIN_KIMG_ALIGN, reserve_addr, (u32)phys_seed); @@ -131,19 +129,19 @@ efi_status_t handle_kernel_image(efi_system_table_t *sys_table_arg, *image_addr = *reserve_addr = preferred_offset; *reserve_size = round_up(kernel_memsize, EFI_ALLOC_ALIGN); - status = efi_call_early(allocate_pages, EFI_ALLOCATE_ADDRESS, - EFI_LOADER_DATA, - *reserve_size / EFI_PAGE_SIZE, - (efi_physical_addr_t *)reserve_addr); + status = efi_bs_call(allocate_pages, EFI_ALLOCATE_ADDRESS, + EFI_LOADER_DATA, + *reserve_size / EFI_PAGE_SIZE, + (efi_physical_addr_t *)reserve_addr); } if (status != EFI_SUCCESS) { *reserve_size = kernel_memsize + TEXT_OFFSET; - status = efi_low_alloc(sys_table_arg, *reserve_size, + status = efi_low_alloc(*reserve_size, MIN_KIMG_ALIGN, reserve_addr); if (status != EFI_SUCCESS) { - pr_efi_err(sys_table_arg, "Failed to relocate kernel\n"); + pr_efi_err("Failed to relocate kernel\n"); *reserve_size = 0; return status; } diff --git a/drivers/firmware/efi/libstub/efi-stub-helper.c b/drivers/firmware/efi/libstub/efi-stub-helper.c index e02579907f2e..74ddfb496140 100644 --- a/drivers/firmware/efi/libstub/efi-stub-helper.c +++ b/drivers/firmware/efi/libstub/efi-stub-helper.c @@ -27,24 +27,26 @@ */ #define EFI_READ_CHUNK_SIZE (1024 * 1024) -static unsigned long __chunk_size = EFI_READ_CHUNK_SIZE; +static unsigned long efi_chunk_size = EFI_READ_CHUNK_SIZE; -static int __section(.data) __nokaslr; -static int __section(.data) __quiet; -static int __section(.data) __novamap; -static bool __section(.data) efi_nosoftreserve; +static bool __efistub_global efi_nokaslr; +static bool __efistub_global efi_quiet; +static bool __efistub_global efi_novamap; +static bool __efistub_global efi_nosoftreserve; +static bool __efistub_global efi_disable_pci_dma = + IS_ENABLED(CONFIG_EFI_DISABLE_PCI_DMA); -int __pure nokaslr(void) +bool __pure nokaslr(void) { - return __nokaslr; + return efi_nokaslr; } -int __pure is_quiet(void) +bool __pure is_quiet(void) { - return __quiet; + return efi_quiet; } -int __pure novamap(void) +bool __pure novamap(void) { - return __novamap; + return efi_novamap; } bool __pure __efi_soft_reserve_enabled(void) { @@ -58,7 +60,7 @@ struct file_info { u64 size; }; -void efi_printk(efi_system_table_t *sys_table_arg, char *str) +void efi_printk(char *str) { char *s8; @@ -68,10 +70,10 @@ void efi_printk(efi_system_table_t *sys_table_arg, char *str) ch[0] = *s8; if (*s8 == '\n') { efi_char16_t nl[2] = { '\r', 0 }; - efi_char16_printk(sys_table_arg, nl); + efi_char16_printk(nl); } - efi_char16_printk(sys_table_arg, ch); + efi_char16_printk(ch); } } @@ -84,8 +86,7 @@ static inline bool mmap_has_headroom(unsigned long buff_size, return slack / desc_size >= EFI_MMAP_NR_SLACK_SLOTS; } -efi_status_t efi_get_memory_map(efi_system_table_t *sys_table_arg, - struct efi_boot_memmap *map) +efi_status_t efi_get_memory_map(struct efi_boot_memmap *map) { efi_memory_desc_t *m = NULL; efi_status_t status; @@ -96,19 +97,19 @@ efi_status_t efi_get_memory_map(efi_system_table_t *sys_table_arg, *map->map_size = *map->desc_size * 32; *map->buff_size = *map->map_size; again: - status = efi_call_early(allocate_pool, EFI_LOADER_DATA, - *map->map_size, (void **)&m); + status = efi_bs_call(allocate_pool, EFI_LOADER_DATA, + *map->map_size, (void **)&m); if (status != EFI_SUCCESS) goto fail; *map->desc_size = 0; key = 0; - status = efi_call_early(get_memory_map, map->map_size, m, - &key, map->desc_size, &desc_version); + status = efi_bs_call(get_memory_map, map->map_size, m, + &key, map->desc_size, &desc_version); if (status == EFI_BUFFER_TOO_SMALL || !mmap_has_headroom(*map->buff_size, *map->map_size, *map->desc_size)) { - efi_call_early(free_pool, m); + efi_bs_call(free_pool, m); /* * Make sure there is some entries of headroom so that the * buffer can be reused for a new map after allocations are @@ -122,7 +123,7 @@ again: } if (status != EFI_SUCCESS) - efi_call_early(free_pool, m); + efi_bs_call(free_pool, m); if (map->key_ptr && status == EFI_SUCCESS) *map->key_ptr = key; @@ -135,7 +136,7 @@ fail: } -unsigned long get_dram_base(efi_system_table_t *sys_table_arg) +unsigned long get_dram_base(void) { efi_status_t status; unsigned long map_size, buff_size; @@ -151,7 +152,7 @@ unsigned long get_dram_base(efi_system_table_t *sys_table_arg) boot_map.key_ptr = NULL; boot_map.buff_size = &buff_size; - status = efi_get_memory_map(sys_table_arg, &boot_map); + status = efi_get_memory_map(&boot_map); if (status != EFI_SUCCESS) return membase; @@ -164,7 +165,7 @@ unsigned long get_dram_base(efi_system_table_t *sys_table_arg) } } - efi_call_early(free_pool, map.map); + efi_bs_call(free_pool, map.map); return membase; } @@ -172,8 +173,7 @@ unsigned long get_dram_base(efi_system_table_t *sys_table_arg) /* * Allocate at the highest possible address that is not above 'max'. */ -efi_status_t efi_high_alloc(efi_system_table_t *sys_table_arg, - unsigned long size, unsigned long align, +efi_status_t efi_high_alloc(unsigned long size, unsigned long align, unsigned long *addr, unsigned long max) { unsigned long map_size, desc_size, buff_size; @@ -191,7 +191,7 @@ efi_status_t efi_high_alloc(efi_system_table_t *sys_table_arg, boot_map.key_ptr = NULL; boot_map.buff_size = &buff_size; - status = efi_get_memory_map(sys_table_arg, &boot_map); + status = efi_get_memory_map(&boot_map); if (status != EFI_SUCCESS) goto fail; @@ -251,9 +251,8 @@ again: if (!max_addr) status = EFI_NOT_FOUND; else { - status = efi_call_early(allocate_pages, - EFI_ALLOCATE_ADDRESS, EFI_LOADER_DATA, - nr_pages, &max_addr); + status = efi_bs_call(allocate_pages, EFI_ALLOCATE_ADDRESS, + EFI_LOADER_DATA, nr_pages, &max_addr); if (status != EFI_SUCCESS) { max = max_addr; max_addr = 0; @@ -263,7 +262,7 @@ again: *addr = max_addr; } - efi_call_early(free_pool, map); + efi_bs_call(free_pool, map); fail: return status; } @@ -271,8 +270,7 @@ fail: /* * Allocate at the lowest possible address that is not below 'min'. */ -efi_status_t efi_low_alloc_above(efi_system_table_t *sys_table_arg, - unsigned long size, unsigned long align, +efi_status_t efi_low_alloc_above(unsigned long size, unsigned long align, unsigned long *addr, unsigned long min) { unsigned long map_size, desc_size, buff_size; @@ -289,7 +287,7 @@ efi_status_t efi_low_alloc_above(efi_system_table_t *sys_table_arg, boot_map.key_ptr = NULL; boot_map.buff_size = &buff_size; - status = efi_get_memory_map(sys_table_arg, &boot_map); + status = efi_get_memory_map(&boot_map); if (status != EFI_SUCCESS) goto fail; @@ -331,9 +329,8 @@ efi_status_t efi_low_alloc_above(efi_system_table_t *sys_table_arg, if ((start + size) > end) continue; - status = efi_call_early(allocate_pages, - EFI_ALLOCATE_ADDRESS, EFI_LOADER_DATA, - nr_pages, &start); + status = efi_bs_call(allocate_pages, EFI_ALLOCATE_ADDRESS, + EFI_LOADER_DATA, nr_pages, &start); if (status == EFI_SUCCESS) { *addr = start; break; @@ -343,13 +340,12 @@ efi_status_t efi_low_alloc_above(efi_system_table_t *sys_table_arg, if (i == map_size / desc_size) status = EFI_NOT_FOUND; - efi_call_early(free_pool, map); + efi_bs_call(free_pool, map); fail: return status; } -void efi_free(efi_system_table_t *sys_table_arg, unsigned long size, - unsigned long addr) +void efi_free(unsigned long size, unsigned long addr) { unsigned long nr_pages; @@ -357,12 +353,11 @@ void efi_free(efi_system_table_t *sys_table_arg, unsigned long size, return; nr_pages = round_up(size, EFI_ALLOC_ALIGN) / EFI_PAGE_SIZE; - efi_call_early(free_pages, addr, nr_pages); + efi_bs_call(free_pages, addr, nr_pages); } -static efi_status_t efi_file_size(efi_system_table_t *sys_table_arg, void *__fh, - efi_char16_t *filename_16, void **handle, - u64 *file_sz) +static efi_status_t efi_file_size(void *__fh, efi_char16_t *filename_16, + void **handle, u64 *file_sz) { efi_file_handle_t *h, *fh = __fh; efi_file_info_t *info; @@ -370,81 +365,75 @@ static efi_status_t efi_file_size(efi_system_table_t *sys_table_arg, void *__fh, efi_guid_t info_guid = EFI_FILE_INFO_ID; unsigned long info_sz; - status = efi_call_proto(efi_file_handle, open, fh, &h, filename_16, - EFI_FILE_MODE_READ, (u64)0); + status = fh->open(fh, &h, filename_16, EFI_FILE_MODE_READ, 0); if (status != EFI_SUCCESS) { - efi_printk(sys_table_arg, "Failed to open file: "); - efi_char16_printk(sys_table_arg, filename_16); - efi_printk(sys_table_arg, "\n"); + efi_printk("Failed to open file: "); + efi_char16_printk(filename_16); + efi_printk("\n"); return status; } *handle = h; info_sz = 0; - status = efi_call_proto(efi_file_handle, get_info, h, &info_guid, - &info_sz, NULL); + status = h->get_info(h, &info_guid, &info_sz, NULL); if (status != EFI_BUFFER_TOO_SMALL) { - efi_printk(sys_table_arg, "Failed to get file info size\n"); + efi_printk("Failed to get file info size\n"); return status; } grow: - status = efi_call_early(allocate_pool, EFI_LOADER_DATA, - info_sz, (void **)&info); + status = efi_bs_call(allocate_pool, EFI_LOADER_DATA, info_sz, + (void **)&info); if (status != EFI_SUCCESS) { - efi_printk(sys_table_arg, "Failed to alloc mem for file info\n"); + efi_printk("Failed to alloc mem for file info\n"); return status; } - status = efi_call_proto(efi_file_handle, get_info, h, &info_guid, - &info_sz, info); + status = h->get_info(h, &info_guid, &info_sz, info); if (status == EFI_BUFFER_TOO_SMALL) { - efi_call_early(free_pool, info); + efi_bs_call(free_pool, info); goto grow; } *file_sz = info->file_size; - efi_call_early(free_pool, info); + efi_bs_call(free_pool, info); if (status != EFI_SUCCESS) - efi_printk(sys_table_arg, "Failed to get initrd info\n"); + efi_printk("Failed to get initrd info\n"); return status; } -static efi_status_t efi_file_read(void *handle, unsigned long *size, void *addr) +static efi_status_t efi_file_read(efi_file_handle_t *handle, + unsigned long *size, void *addr) { - return efi_call_proto(efi_file_handle, read, handle, size, addr); + return handle->read(handle, size, addr); } -static efi_status_t efi_file_close(void *handle) +static efi_status_t efi_file_close(efi_file_handle_t *handle) { - return efi_call_proto(efi_file_handle, close, handle); + return handle->close(handle); } -static efi_status_t efi_open_volume(efi_system_table_t *sys_table_arg, - efi_loaded_image_t *image, +static efi_status_t efi_open_volume(efi_loaded_image_t *image, efi_file_handle_t **__fh) { efi_file_io_interface_t *io; efi_file_handle_t *fh; efi_guid_t fs_proto = EFI_FILE_SYSTEM_GUID; efi_status_t status; - void *handle = (void *)(unsigned long)efi_table_attr(efi_loaded_image, - device_handle, - image); + efi_handle_t handle = image->device_handle; - status = efi_call_early(handle_protocol, handle, - &fs_proto, (void **)&io); + status = efi_bs_call(handle_protocol, handle, &fs_proto, (void **)&io); if (status != EFI_SUCCESS) { - efi_printk(sys_table_arg, "Failed to handle fs_proto\n"); + efi_printk("Failed to handle fs_proto\n"); return status; } - status = efi_call_proto(efi_file_io_interface, open_volume, io, &fh); + status = io->open_volume(io, &fh); if (status != EFI_SUCCESS) - efi_printk(sys_table_arg, "Failed to open volume\n"); + efi_printk("Failed to open volume\n"); else *__fh = fh; @@ -465,11 +454,11 @@ efi_status_t efi_parse_options(char const *cmdline) str = strstr(cmdline, "nokaslr"); if (str == cmdline || (str && str > cmdline && *(str - 1) == ' ')) - __nokaslr = 1; + efi_nokaslr = true; str = strstr(cmdline, "quiet"); if (str == cmdline || (str && str > cmdline && *(str - 1) == ' ')) - __quiet = 1; + efi_quiet = true; /* * If no EFI parameters were specified on the cmdline we've got @@ -489,18 +478,28 @@ efi_status_t efi_parse_options(char const *cmdline) while (*str && *str != ' ') { if (!strncmp(str, "nochunk", 7)) { str += strlen("nochunk"); - __chunk_size = -1UL; + efi_chunk_size = -1UL; } if (!strncmp(str, "novamap", 7)) { str += strlen("novamap"); - __novamap = 1; + efi_novamap = true; } if (IS_ENABLED(CONFIG_EFI_SOFT_RESERVE) && !strncmp(str, "nosoftreserve", 7)) { str += strlen("nosoftreserve"); - efi_nosoftreserve = 1; + efi_nosoftreserve = true; + } + + if (!strncmp(str, "disable_early_pci_dma", 21)) { + str += strlen("disable_early_pci_dma"); + efi_disable_pci_dma = true; + } + + if (!strncmp(str, "no_disable_early_pci_dma", 24)) { + str += strlen("no_disable_early_pci_dma"); + efi_disable_pci_dma = false; } /* Group words together, delimited by "," */ @@ -520,8 +519,7 @@ efi_status_t efi_parse_options(char const *cmdline) * We only support loading a file from the same filesystem as * the kernel image. */ -efi_status_t handle_cmdline_files(efi_system_table_t *sys_table_arg, - efi_loaded_image_t *image, +efi_status_t handle_cmdline_files(efi_loaded_image_t *image, char *cmd_line, char *option_string, unsigned long max_addr, unsigned long *load_addr, @@ -570,10 +568,10 @@ efi_status_t handle_cmdline_files(efi_system_table_t *sys_table_arg, if (!nr_files) return EFI_SUCCESS; - status = efi_call_early(allocate_pool, EFI_LOADER_DATA, - nr_files * sizeof(*files), (void **)&files); + status = efi_bs_call(allocate_pool, EFI_LOADER_DATA, + nr_files * sizeof(*files), (void **)&files); if (status != EFI_SUCCESS) { - pr_efi_err(sys_table_arg, "Failed to alloc mem for file handle list\n"); + pr_efi_err("Failed to alloc mem for file handle list\n"); goto fail; } @@ -612,13 +610,13 @@ efi_status_t handle_cmdline_files(efi_system_table_t *sys_table_arg, /* Only open the volume once. */ if (!i) { - status = efi_open_volume(sys_table_arg, image, &fh); + status = efi_open_volume(image, &fh); if (status != EFI_SUCCESS) goto free_files; } - status = efi_file_size(sys_table_arg, fh, filename_16, - (void **)&file->handle, &file->size); + status = efi_file_size(fh, filename_16, (void **)&file->handle, + &file->size); if (status != EFI_SUCCESS) goto close_handles; @@ -633,16 +631,16 @@ efi_status_t handle_cmdline_files(efi_system_table_t *sys_table_arg, * so allocate enough memory for all the files. This is used * for loading multiple files. */ - status = efi_high_alloc(sys_table_arg, file_size_total, 0x1000, - &file_addr, max_addr); + status = efi_high_alloc(file_size_total, 0x1000, &file_addr, + max_addr); if (status != EFI_SUCCESS) { - pr_efi_err(sys_table_arg, "Failed to alloc highmem for files\n"); + pr_efi_err("Failed to alloc highmem for files\n"); goto close_handles; } /* We've run out of free low memory. */ if (file_addr > max_addr) { - pr_efi_err(sys_table_arg, "We've run out of free low memory\n"); + pr_efi_err("We've run out of free low memory\n"); status = EFI_INVALID_PARAMETER; goto free_file_total; } @@ -655,8 +653,8 @@ efi_status_t handle_cmdline_files(efi_system_table_t *sys_table_arg, while (size) { unsigned long chunksize; - if (IS_ENABLED(CONFIG_X86) && size > __chunk_size) - chunksize = __chunk_size; + if (IS_ENABLED(CONFIG_X86) && size > efi_chunk_size) + chunksize = efi_chunk_size; else chunksize = size; @@ -664,7 +662,7 @@ efi_status_t handle_cmdline_files(efi_system_table_t *sys_table_arg, &chunksize, (void *)addr); if (status != EFI_SUCCESS) { - pr_efi_err(sys_table_arg, "Failed to read file\n"); + pr_efi_err("Failed to read file\n"); goto free_file_total; } addr += chunksize; @@ -676,7 +674,7 @@ efi_status_t handle_cmdline_files(efi_system_table_t *sys_table_arg, } - efi_call_early(free_pool, files); + efi_bs_call(free_pool, files); *load_addr = file_addr; *load_size = file_size_total; @@ -684,13 +682,13 @@ efi_status_t handle_cmdline_files(efi_system_table_t *sys_table_arg, return status; free_file_total: - efi_free(sys_table_arg, file_size_total, file_addr); + efi_free(file_size_total, file_addr); close_handles: for (k = j; k < i; k++) efi_file_close(files[k].handle); free_files: - efi_call_early(free_pool, files); + efi_bs_call(free_pool, files); fail: *load_addr = 0; *load_size = 0; @@ -707,8 +705,7 @@ fail: * address is not available the lowest available address will * be used. */ -efi_status_t efi_relocate_kernel(efi_system_table_t *sys_table_arg, - unsigned long *image_addr, +efi_status_t efi_relocate_kernel(unsigned long *image_addr, unsigned long image_size, unsigned long alloc_size, unsigned long preferred_addr, @@ -737,20 +734,19 @@ efi_status_t efi_relocate_kernel(efi_system_table_t *sys_table_arg, * as possible while respecting the required alignment. */ nr_pages = round_up(alloc_size, EFI_ALLOC_ALIGN) / EFI_PAGE_SIZE; - status = efi_call_early(allocate_pages, - EFI_ALLOCATE_ADDRESS, EFI_LOADER_DATA, - nr_pages, &efi_addr); + status = efi_bs_call(allocate_pages, EFI_ALLOCATE_ADDRESS, + EFI_LOADER_DATA, nr_pages, &efi_addr); new_addr = efi_addr; /* * If preferred address allocation failed allocate as low as * possible. */ if (status != EFI_SUCCESS) { - status = efi_low_alloc_above(sys_table_arg, alloc_size, - alignment, &new_addr, min_addr); + status = efi_low_alloc_above(alloc_size, alignment, &new_addr, + min_addr); } if (status != EFI_SUCCESS) { - pr_efi_err(sys_table_arg, "Failed to allocate usable memory for kernel.\n"); + pr_efi_err("Failed to allocate usable memory for kernel.\n"); return status; } @@ -824,8 +820,7 @@ static u8 *efi_utf16_to_utf8(u8 *dst, const u16 *src, int n) * Size of memory allocated return in *cmd_line_len. * Returns NULL on error. */ -char *efi_convert_cmdline(efi_system_table_t *sys_table_arg, - efi_loaded_image_t *image, +char *efi_convert_cmdline(efi_loaded_image_t *image, int *cmd_line_len) { const u16 *s2; @@ -854,8 +849,8 @@ char *efi_convert_cmdline(efi_system_table_t *sys_table_arg, options_bytes++; /* NUL termination */ - status = efi_high_alloc(sys_table_arg, options_bytes, 0, - &cmdline_addr, MAX_CMDLINE_ADDRESS); + status = efi_high_alloc(options_bytes, 0, &cmdline_addr, + MAX_CMDLINE_ADDRESS); if (status != EFI_SUCCESS) return NULL; @@ -877,24 +872,26 @@ char *efi_convert_cmdline(efi_system_table_t *sys_table_arg, * specific structure may be passed to the function via priv. The client * function may be called multiple times. */ -efi_status_t efi_exit_boot_services(efi_system_table_t *sys_table_arg, - void *handle, +efi_status_t efi_exit_boot_services(void *handle, struct efi_boot_memmap *map, void *priv, efi_exit_boot_map_processing priv_func) { efi_status_t status; - status = efi_get_memory_map(sys_table_arg, map); + status = efi_get_memory_map(map); if (status != EFI_SUCCESS) goto fail; - status = priv_func(sys_table_arg, map, priv); + status = priv_func(map, priv); if (status != EFI_SUCCESS) goto free_map; - status = efi_call_early(exit_boot_services, handle, *map->key_ptr); + if (efi_disable_pci_dma) + efi_pci_disable_bridge_busmaster(); + + status = efi_bs_call(exit_boot_services, handle, *map->key_ptr); if (status == EFI_INVALID_PARAMETER) { /* @@ -911,23 +908,23 @@ efi_status_t efi_exit_boot_services(efi_system_table_t *sys_table_arg, * to get_memory_map() is expected to succeed here. */ *map->map_size = *map->buff_size; - status = efi_call_early(get_memory_map, - map->map_size, - *map->map, - map->key_ptr, - map->desc_size, - map->desc_ver); + status = efi_bs_call(get_memory_map, + map->map_size, + *map->map, + map->key_ptr, + map->desc_size, + map->desc_ver); /* exit_boot_services() was called, thus cannot free */ if (status != EFI_SUCCESS) goto fail; - status = priv_func(sys_table_arg, map, priv); + status = priv_func(map, priv); /* exit_boot_services() was called, thus cannot free */ if (status != EFI_SUCCESS) goto fail; - status = efi_call_early(exit_boot_services, handle, *map->key_ptr); + status = efi_bs_call(exit_boot_services, handle, *map->key_ptr); } /* exit_boot_services() was called, thus cannot free */ @@ -937,38 +934,31 @@ efi_status_t efi_exit_boot_services(efi_system_table_t *sys_table_arg, return EFI_SUCCESS; free_map: - efi_call_early(free_pool, *map->map); + efi_bs_call(free_pool, *map->map); fail: return status; } -#define GET_EFI_CONFIG_TABLE(bits) \ -static void *get_efi_config_table##bits(efi_system_table_t *_sys_table, \ - efi_guid_t guid) \ -{ \ - efi_system_table_##bits##_t *sys_table; \ - efi_config_table_##bits##_t *tables; \ - int i; \ - \ - sys_table = (typeof(sys_table))_sys_table; \ - tables = (typeof(tables))(unsigned long)sys_table->tables; \ - \ - for (i = 0; i < sys_table->nr_tables; i++) { \ - if (efi_guidcmp(tables[i].guid, guid) != 0) \ - continue; \ - \ - return (void *)(unsigned long)tables[i].table; \ - } \ - \ - return NULL; \ +void *get_efi_config_table(efi_guid_t guid) +{ + unsigned long tables = efi_table_attr(efi_system_table(), tables); + int nr_tables = efi_table_attr(efi_system_table(), nr_tables); + int i; + + for (i = 0; i < nr_tables; i++) { + efi_config_table_t *t = (void *)tables; + + if (efi_guidcmp(t->guid, guid) == 0) + return efi_table_attr(t, table); + + tables += efi_is_native() ? sizeof(efi_config_table_t) + : sizeof(efi_config_table_32_t); + } + return NULL; } -GET_EFI_CONFIG_TABLE(32) -GET_EFI_CONFIG_TABLE(64) -void *get_efi_config_table(efi_system_table_t *sys_table, efi_guid_t guid) +void efi_char16_printk(efi_char16_t *str) { - if (efi_is_64bit()) - return get_efi_config_table64(sys_table, guid); - else - return get_efi_config_table32(sys_table, guid); + efi_call_proto(efi_table_attr(efi_system_table(), con_out), + output_string, str); } diff --git a/drivers/firmware/efi/libstub/efistub.h b/drivers/firmware/efi/libstub/efistub.h index 05739ae013c8..c244b165005e 100644 --- a/drivers/firmware/efi/libstub/efistub.h +++ b/drivers/firmware/efi/libstub/efistub.h @@ -25,22 +25,30 @@ #define EFI_ALLOC_ALIGN EFI_PAGE_SIZE #endif -extern int __pure nokaslr(void); -extern int __pure is_quiet(void); -extern int __pure novamap(void); +#ifdef CONFIG_ARM +#define __efistub_global __section(.data) +#else +#define __efistub_global +#endif + +extern bool __pure nokaslr(void); +extern bool __pure is_quiet(void); +extern bool __pure novamap(void); + +extern __pure efi_system_table_t *efi_system_table(void); -#define pr_efi(sys_table, msg) do { \ - if (!is_quiet()) efi_printk(sys_table, "EFI stub: "msg); \ +#define pr_efi(msg) do { \ + if (!is_quiet()) efi_printk("EFI stub: "msg); \ } while (0) -#define pr_efi_err(sys_table, msg) efi_printk(sys_table, "EFI stub: ERROR: "msg) +#define pr_efi_err(msg) efi_printk("EFI stub: ERROR: "msg) -void efi_char16_printk(efi_system_table_t *, efi_char16_t *); +void efi_char16_printk(efi_char16_t *); +void efi_char16_printk(efi_char16_t *); -unsigned long get_dram_base(efi_system_table_t *sys_table_arg); +unsigned long get_dram_base(void); -efi_status_t allocate_new_fdt_and_exit_boot(efi_system_table_t *sys_table, - void *handle, +efi_status_t allocate_new_fdt_and_exit_boot(void *handle, unsigned long *new_fdt_addr, unsigned long max_addr, u64 initrd_addr, u64 initrd_size, @@ -48,22 +56,20 @@ efi_status_t allocate_new_fdt_and_exit_boot(efi_system_table_t *sys_table, unsigned long fdt_addr, unsigned long fdt_size); -void *get_fdt(efi_system_table_t *sys_table, unsigned long *fdt_size); +void *get_fdt(unsigned long *fdt_size); void efi_get_virtmap(efi_memory_desc_t *memory_map, unsigned long map_size, unsigned long desc_size, efi_memory_desc_t *runtime_map, int *count); -efi_status_t efi_get_random_bytes(efi_system_table_t *sys_table, - unsigned long size, u8 *out); +efi_status_t efi_get_random_bytes(unsigned long size, u8 *out); -efi_status_t efi_random_alloc(efi_system_table_t *sys_table_arg, - unsigned long size, unsigned long align, +efi_status_t efi_random_alloc(unsigned long size, unsigned long align, unsigned long *addr, unsigned long random_seed); -efi_status_t check_platform_features(efi_system_table_t *sys_table_arg); +efi_status_t check_platform_features(void); -void *get_efi_config_table(efi_system_table_t *sys_table, efi_guid_t guid); +void *get_efi_config_table(efi_guid_t guid); /* Helper macros for the usual case of using simple C variables: */ #ifndef fdt_setprop_inplace_var @@ -76,4 +82,12 @@ void *get_efi_config_table(efi_system_table_t *sys_table, efi_guid_t guid); fdt_setprop((fdt), (node_offset), (name), &(var), sizeof(var)) #endif +#define get_efi_var(name, vendor, ...) \ + efi_rt_call(get_variable, (efi_char16_t *)(name), \ + (efi_guid_t *)(vendor), __VA_ARGS__) + +#define set_efi_var(name, vendor, ...) \ + efi_rt_call(set_variable, (efi_char16_t *)(name), \ + (efi_guid_t *)(vendor), __VA_ARGS__) + #endif diff --git a/drivers/firmware/efi/libstub/fdt.c b/drivers/firmware/efi/libstub/fdt.c index 0bf0190917e0..0a91e5232127 100644 --- a/drivers/firmware/efi/libstub/fdt.c +++ b/drivers/firmware/efi/libstub/fdt.c @@ -16,7 +16,7 @@ #define EFI_DT_ADDR_CELLS_DEFAULT 2 #define EFI_DT_SIZE_CELLS_DEFAULT 2 -static void fdt_update_cell_size(efi_system_table_t *sys_table, void *fdt) +static void fdt_update_cell_size(void *fdt) { int offset; @@ -27,8 +27,7 @@ static void fdt_update_cell_size(efi_system_table_t *sys_table, void *fdt) fdt_setprop_u32(fdt, offset, "#size-cells", EFI_DT_SIZE_CELLS_DEFAULT); } -static efi_status_t update_fdt(efi_system_table_t *sys_table, void *orig_fdt, - unsigned long orig_fdt_size, +static efi_status_t update_fdt(void *orig_fdt, unsigned long orig_fdt_size, void *fdt, int new_fdt_size, char *cmdline_ptr, u64 initrd_addr, u64 initrd_size) { @@ -40,7 +39,7 @@ static efi_status_t update_fdt(efi_system_table_t *sys_table, void *orig_fdt, /* Do some checks on provided FDT, if it exists: */ if (orig_fdt) { if (fdt_check_header(orig_fdt)) { - pr_efi_err(sys_table, "Device Tree header not valid!\n"); + pr_efi_err("Device Tree header not valid!\n"); return EFI_LOAD_ERROR; } /* @@ -48,7 +47,7 @@ static efi_status_t update_fdt(efi_system_table_t *sys_table, void *orig_fdt, * configuration table: */ if (orig_fdt_size && fdt_totalsize(orig_fdt) > orig_fdt_size) { - pr_efi_err(sys_table, "Truncated device tree! foo!\n"); + pr_efi_err("Truncated device tree! foo!\n"); return EFI_LOAD_ERROR; } } @@ -62,7 +61,7 @@ static efi_status_t update_fdt(efi_system_table_t *sys_table, void *orig_fdt, * Any failure from the following function is * non-critical: */ - fdt_update_cell_size(sys_table, fdt); + fdt_update_cell_size(fdt); } } @@ -111,7 +110,7 @@ static efi_status_t update_fdt(efi_system_table_t *sys_table, void *orig_fdt, /* Add FDT entries for EFI runtime services in chosen node. */ node = fdt_subnode_offset(fdt, 0, "chosen"); - fdt_val64 = cpu_to_fdt64((u64)(unsigned long)sys_table); + fdt_val64 = cpu_to_fdt64((u64)(unsigned long)efi_system_table()); status = fdt_setprop_var(fdt, node, "linux,uefi-system-table", fdt_val64); if (status) @@ -140,7 +139,7 @@ static efi_status_t update_fdt(efi_system_table_t *sys_table, void *orig_fdt, if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) { efi_status_t efi_status; - efi_status = efi_get_random_bytes(sys_table, sizeof(fdt_val64), + efi_status = efi_get_random_bytes(sizeof(fdt_val64), (u8 *)&fdt_val64); if (efi_status == EFI_SUCCESS) { status = fdt_setprop_var(fdt, node, "kaslr-seed", fdt_val64); @@ -210,8 +209,7 @@ struct exit_boot_struct { void *new_fdt_addr; }; -static efi_status_t exit_boot_func(efi_system_table_t *sys_table_arg, - struct efi_boot_memmap *map, +static efi_status_t exit_boot_func(struct efi_boot_memmap *map, void *priv) { struct exit_boot_struct *p = priv; @@ -244,8 +242,7 @@ static efi_status_t exit_boot_func(efi_system_table_t *sys_table_arg, * with the final memory map in it. */ -efi_status_t allocate_new_fdt_and_exit_boot(efi_system_table_t *sys_table, - void *handle, +efi_status_t allocate_new_fdt_and_exit_boot(void *handle, unsigned long *new_fdt_addr, unsigned long max_addr, u64 initrd_addr, u64 initrd_size, @@ -275,19 +272,19 @@ efi_status_t allocate_new_fdt_and_exit_boot(efi_system_table_t *sys_table, * subsequent allocations adding entries, since they could not affect * the number of EFI_MEMORY_RUNTIME regions. */ - status = efi_get_memory_map(sys_table, &map); + status = efi_get_memory_map(&map); if (status != EFI_SUCCESS) { - pr_efi_err(sys_table, "Unable to retrieve UEFI memory map.\n"); + pr_efi_err("Unable to retrieve UEFI memory map.\n"); return status; } - pr_efi(sys_table, "Exiting boot services and installing virtual address map...\n"); + pr_efi("Exiting boot services and installing virtual address map...\n"); map.map = &memory_map; - status = efi_high_alloc(sys_table, MAX_FDT_SIZE, EFI_FDT_ALIGN, + status = efi_high_alloc(MAX_FDT_SIZE, EFI_FDT_ALIGN, new_fdt_addr, max_addr); if (status != EFI_SUCCESS) { - pr_efi_err(sys_table, "Unable to allocate memory for new device tree.\n"); + pr_efi_err("Unable to allocate memory for new device tree.\n"); goto fail; } @@ -295,16 +292,16 @@ efi_status_t allocate_new_fdt_and_exit_boot(efi_system_table_t *sys_table, * Now that we have done our final memory allocation (and free) * we can get the memory map key needed for exit_boot_services(). */ - status = efi_get_memory_map(sys_table, &map); + status = efi_get_memory_map(&map); if (status != EFI_SUCCESS) goto fail_free_new_fdt; - status = update_fdt(sys_table, (void *)fdt_addr, fdt_size, + status = update_fdt((void *)fdt_addr, fdt_size, (void *)*new_fdt_addr, MAX_FDT_SIZE, cmdline_ptr, initrd_addr, initrd_size); if (status != EFI_SUCCESS) { - pr_efi_err(sys_table, "Unable to construct new device tree.\n"); + pr_efi_err("Unable to construct new device tree.\n"); goto fail_free_new_fdt; } @@ -313,7 +310,7 @@ efi_status_t allocate_new_fdt_and_exit_boot(efi_system_table_t *sys_table, priv.runtime_entry_count = &runtime_entry_count; priv.new_fdt_addr = (void *)*new_fdt_addr; - status = efi_exit_boot_services(sys_table, handle, &map, &priv, exit_boot_func); + status = efi_exit_boot_services(handle, &map, &priv, exit_boot_func); if (status == EFI_SUCCESS) { efi_set_virtual_address_map_t *svam; @@ -322,7 +319,7 @@ efi_status_t allocate_new_fdt_and_exit_boot(efi_system_table_t *sys_table, return EFI_SUCCESS; /* Install the new virtual address map */ - svam = sys_table->runtime->set_virtual_address_map; + svam = efi_system_table()->runtime->set_virtual_address_map; status = svam(runtime_entry_count * desc_size, desc_size, desc_ver, runtime_map); @@ -350,28 +347,28 @@ efi_status_t allocate_new_fdt_and_exit_boot(efi_system_table_t *sys_table, return EFI_SUCCESS; } - pr_efi_err(sys_table, "Exit boot services failed.\n"); + pr_efi_err("Exit boot services failed.\n"); fail_free_new_fdt: - efi_free(sys_table, MAX_FDT_SIZE, *new_fdt_addr); + efi_free(MAX_FDT_SIZE, *new_fdt_addr); fail: - sys_table->boottime->free_pool(runtime_map); + efi_system_table()->boottime->free_pool(runtime_map); return EFI_LOAD_ERROR; } -void *get_fdt(efi_system_table_t *sys_table, unsigned long *fdt_size) +void *get_fdt(unsigned long *fdt_size) { void *fdt; - fdt = get_efi_config_table(sys_table, DEVICE_TREE_GUID); + fdt = get_efi_config_table(DEVICE_TREE_GUID); if (!fdt) return NULL; if (fdt_check_header(fdt) != 0) { - pr_efi_err(sys_table, "Invalid header detected on UEFI supplied FDT, ignoring ...\n"); + pr_efi_err("Invalid header detected on UEFI supplied FDT, ignoring ...\n"); return NULL; } *fdt_size = fdt_totalsize(fdt); diff --git a/drivers/firmware/efi/libstub/gop.c b/drivers/firmware/efi/libstub/gop.c index b7bf1e993b8b..55e6b3f286fe 100644 --- a/drivers/firmware/efi/libstub/gop.c +++ b/drivers/firmware/efi/libstub/gop.c @@ -10,6 +10,8 @@ #include <asm/efi.h> #include <asm/setup.h> +#include "efistub.h" + static void find_bits(unsigned long mask, u8 *pos, u8 *size) { u8 first, len; @@ -35,7 +37,7 @@ static void find_bits(unsigned long mask, u8 *pos, u8 *size) static void setup_pixel_info(struct screen_info *si, u32 pixels_per_scan_line, - struct efi_pixel_bitmask pixel_info, int pixel_format) + efi_pixel_bitmask_t pixel_info, int pixel_format) { if (pixel_format == PIXEL_RGB_RESERVED_8BIT_PER_COLOR) { si->lfb_depth = 32; @@ -83,48 +85,42 @@ setup_pixel_info(struct screen_info *si, u32 pixels_per_scan_line, } } -static efi_status_t -setup_gop32(efi_system_table_t *sys_table_arg, struct screen_info *si, - efi_guid_t *proto, unsigned long size, void **gop_handle) +static efi_status_t setup_gop(struct screen_info *si, efi_guid_t *proto, + unsigned long size, void **handles) { - struct efi_graphics_output_protocol_32 *gop32, *first_gop; - unsigned long nr_gops; + efi_graphics_output_protocol_t *gop, *first_gop; u16 width, height; u32 pixels_per_scan_line; u32 ext_lfb_base; - u64 fb_base; - struct efi_pixel_bitmask pixel_info; + efi_physical_addr_t fb_base; + efi_pixel_bitmask_t pixel_info; int pixel_format; efi_status_t status; - u32 *handles = (u32 *)(unsigned long)gop_handle; + efi_handle_t h; int i; first_gop = NULL; - gop32 = NULL; + gop = NULL; - nr_gops = size / sizeof(u32); - for (i = 0; i < nr_gops; i++) { - struct efi_graphics_output_protocol_mode_32 *mode; - struct efi_graphics_output_mode_info *info = NULL; + for_each_efi_handle(h, handles, size, i) { + efi_graphics_output_protocol_mode_t *mode; + efi_graphics_output_mode_info_t *info = NULL; efi_guid_t conout_proto = EFI_CONSOLE_OUT_DEVICE_GUID; bool conout_found = false; void *dummy = NULL; - efi_handle_t h = (efi_handle_t)(unsigned long)handles[i]; - u64 current_fb_base; + efi_physical_addr_t current_fb_base; - status = efi_call_early(handle_protocol, h, - proto, (void **)&gop32); + status = efi_bs_call(handle_protocol, h, proto, (void **)&gop); if (status != EFI_SUCCESS) continue; - status = efi_call_early(handle_protocol, h, - &conout_proto, &dummy); + status = efi_bs_call(handle_protocol, h, &conout_proto, &dummy); if (status == EFI_SUCCESS) conout_found = true; - mode = (void *)(unsigned long)gop32->mode; - info = (void *)(unsigned long)mode->info; - current_fb_base = mode->frame_buffer_base; + mode = efi_table_attr(gop, mode); + info = efi_table_attr(mode, info); + current_fb_base = efi_table_attr(mode, frame_buffer_base); if ((!first_gop || conout_found) && info->pixel_format != PIXEL_BLT_ONLY) { @@ -146,104 +142,7 @@ setup_gop32(efi_system_table_t *sys_table_arg, struct screen_info *si, * Once we've found a GOP supporting ConOut, * don't bother looking any further. */ - first_gop = gop32; - if (conout_found) - break; - } - } - - /* Did we find any GOPs? */ - if (!first_gop) - return EFI_NOT_FOUND; - - /* EFI framebuffer */ - si->orig_video_isVGA = VIDEO_TYPE_EFI; - - si->lfb_width = width; - si->lfb_height = height; - si->lfb_base = fb_base; - - ext_lfb_base = (u64)(unsigned long)fb_base >> 32; - if (ext_lfb_base) { - si->capabilities |= VIDEO_CAPABILITY_64BIT_BASE; - si->ext_lfb_base = ext_lfb_base; - } - - si->pages = 1; - - setup_pixel_info(si, pixels_per_scan_line, pixel_info, pixel_format); - - si->lfb_size = si->lfb_linelength * si->lfb_height; - - si->capabilities |= VIDEO_CAPABILITY_SKIP_QUIRKS; - - return EFI_SUCCESS; -} - -static efi_status_t -setup_gop64(efi_system_table_t *sys_table_arg, struct screen_info *si, - efi_guid_t *proto, unsigned long size, void **gop_handle) -{ - struct efi_graphics_output_protocol_64 *gop64, *first_gop; - unsigned long nr_gops; - u16 width, height; - u32 pixels_per_scan_line; - u32 ext_lfb_base; - u64 fb_base; - struct efi_pixel_bitmask pixel_info; - int pixel_format; - efi_status_t status; - u64 *handles = (u64 *)(unsigned long)gop_handle; - int i; - - first_gop = NULL; - gop64 = NULL; - - nr_gops = size / sizeof(u64); - for (i = 0; i < nr_gops; i++) { - struct efi_graphics_output_protocol_mode_64 *mode; - struct efi_graphics_output_mode_info *info = NULL; - efi_guid_t conout_proto = EFI_CONSOLE_OUT_DEVICE_GUID; - bool conout_found = false; - void *dummy = NULL; - efi_handle_t h = (efi_handle_t)(unsigned long)handles[i]; - u64 current_fb_base; - - status = efi_call_early(handle_protocol, h, - proto, (void **)&gop64); - if (status != EFI_SUCCESS) - continue; - - status = efi_call_early(handle_protocol, h, - &conout_proto, &dummy); - if (status == EFI_SUCCESS) - conout_found = true; - - mode = (void *)(unsigned long)gop64->mode; - info = (void *)(unsigned long)mode->info; - current_fb_base = mode->frame_buffer_base; - - if ((!first_gop || conout_found) && - info->pixel_format != PIXEL_BLT_ONLY) { - /* - * Systems that use the UEFI Console Splitter may - * provide multiple GOP devices, not all of which are - * backed by real hardware. The workaround is to search - * for a GOP implementing the ConOut protocol, and if - * one isn't found, to just fall back to the first GOP. - */ - width = info->horizontal_resolution; - height = info->vertical_resolution; - pixel_format = info->pixel_format; - pixel_info = info->pixel_information; - pixels_per_scan_line = info->pixels_per_scan_line; - fb_base = current_fb_base; - - /* - * Once we've found a GOP supporting ConOut, - * don't bother looking any further. - */ - first_gop = gop64; + first_gop = gop; if (conout_found) break; } @@ -280,33 +179,25 @@ setup_gop64(efi_system_table_t *sys_table_arg, struct screen_info *si, /* * See if we have Graphics Output Protocol */ -efi_status_t efi_setup_gop(efi_system_table_t *sys_table_arg, - struct screen_info *si, efi_guid_t *proto, +efi_status_t efi_setup_gop(struct screen_info *si, efi_guid_t *proto, unsigned long size) { efi_status_t status; void **gop_handle = NULL; - status = efi_call_early(allocate_pool, EFI_LOADER_DATA, - size, (void **)&gop_handle); + status = efi_bs_call(allocate_pool, EFI_LOADER_DATA, size, + (void **)&gop_handle); if (status != EFI_SUCCESS) return status; - status = efi_call_early(locate_handle, - EFI_LOCATE_BY_PROTOCOL, - proto, NULL, &size, gop_handle); + status = efi_bs_call(locate_handle, EFI_LOCATE_BY_PROTOCOL, proto, NULL, + &size, gop_handle); if (status != EFI_SUCCESS) goto free_handle; - if (efi_is_64bit()) { - status = setup_gop64(sys_table_arg, si, proto, size, - gop_handle); - } else { - status = setup_gop32(sys_table_arg, si, proto, size, - gop_handle); - } + status = setup_gop(si, proto, size, gop_handle); free_handle: - efi_call_early(free_pool, gop_handle); + efi_bs_call(free_pool, gop_handle); return status; } diff --git a/drivers/firmware/efi/libstub/pci.c b/drivers/firmware/efi/libstub/pci.c new file mode 100644 index 000000000000..b025e59b94df --- /dev/null +++ b/drivers/firmware/efi/libstub/pci.c @@ -0,0 +1,114 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * PCI-related functions used by the EFI stub on multiple + * architectures. + * + * Copyright 2019 Google, LLC + */ + +#include <linux/efi.h> +#include <linux/pci.h> + +#include <asm/efi.h> + +#include "efistub.h" + +void efi_pci_disable_bridge_busmaster(void) +{ + efi_guid_t pci_proto = EFI_PCI_IO_PROTOCOL_GUID; + unsigned long pci_handle_size = 0; + efi_handle_t *pci_handle = NULL; + efi_handle_t handle; + efi_status_t status; + u16 class, command; + int i; + + status = efi_bs_call(locate_handle, EFI_LOCATE_BY_PROTOCOL, &pci_proto, + NULL, &pci_handle_size, NULL); + + if (status != EFI_BUFFER_TOO_SMALL) { + if (status != EFI_SUCCESS && status != EFI_NOT_FOUND) + pr_efi_err("Failed to locate PCI I/O handles'\n"); + return; + } + + status = efi_bs_call(allocate_pool, EFI_LOADER_DATA, pci_handle_size, + (void **)&pci_handle); + if (status != EFI_SUCCESS) { + pr_efi_err("Failed to allocate memory for 'pci_handle'\n"); + return; + } + + status = efi_bs_call(locate_handle, EFI_LOCATE_BY_PROTOCOL, &pci_proto, + NULL, &pci_handle_size, pci_handle); + if (status != EFI_SUCCESS) { + pr_efi_err("Failed to locate PCI I/O handles'\n"); + goto free_handle; + } + + for_each_efi_handle(handle, pci_handle, pci_handle_size, i) { + efi_pci_io_protocol_t *pci; + unsigned long segment_nr, bus_nr, device_nr, func_nr; + + status = efi_bs_call(handle_protocol, handle, &pci_proto, + (void **)&pci); + if (status != EFI_SUCCESS) + continue; + + /* + * Disregard devices living on bus 0 - these are not behind a + * bridge so no point in disconnecting them from their drivers. + */ + status = efi_call_proto(pci, get_location, &segment_nr, &bus_nr, + &device_nr, &func_nr); + if (status != EFI_SUCCESS || bus_nr == 0) + continue; + + /* + * Don't disconnect VGA controllers so we don't risk losing + * access to the framebuffer. Drivers for true PCIe graphics + * controllers that are behind a PCIe root port do not use + * DMA to implement the GOP framebuffer anyway [although they + * may use it in their implentation of Gop->Blt()], and so + * disabling DMA in the PCI bridge should not interfere with + * normal operation of the device. + */ + status = efi_call_proto(pci, pci.read, EfiPciIoWidthUint16, + PCI_CLASS_DEVICE, 1, &class); + if (status != EFI_SUCCESS || class == PCI_CLASS_DISPLAY_VGA) + continue; + + /* Disconnect this handle from all its drivers */ + efi_bs_call(disconnect_controller, handle, NULL, NULL); + } + + for_each_efi_handle(handle, pci_handle, pci_handle_size, i) { + efi_pci_io_protocol_t *pci; + + status = efi_bs_call(handle_protocol, handle, &pci_proto, + (void **)&pci); + if (status != EFI_SUCCESS || !pci) + continue; + + status = efi_call_proto(pci, pci.read, EfiPciIoWidthUint16, + PCI_CLASS_DEVICE, 1, &class); + + if (status != EFI_SUCCESS || class != PCI_CLASS_BRIDGE_PCI) + continue; + + /* Disable busmastering */ + status = efi_call_proto(pci, pci.read, EfiPciIoWidthUint16, + PCI_COMMAND, 1, &command); + if (status != EFI_SUCCESS || !(command & PCI_COMMAND_MASTER)) + continue; + + command &= ~PCI_COMMAND_MASTER; + status = efi_call_proto(pci, pci.write, EfiPciIoWidthUint16, + PCI_COMMAND, 1, &command); + if (status != EFI_SUCCESS) + pr_efi_err("Failed to disable PCI busmastering\n"); + } + +free_handle: + efi_bs_call(free_pool, pci_handle); +} diff --git a/drivers/firmware/efi/libstub/random.c b/drivers/firmware/efi/libstub/random.c index 97378cf96a2e..316ce9ff0193 100644 --- a/drivers/firmware/efi/libstub/random.c +++ b/drivers/firmware/efi/libstub/random.c @@ -9,38 +9,34 @@ #include "efistub.h" -typedef struct efi_rng_protocol efi_rng_protocol_t; - -typedef struct { - u32 get_info; - u32 get_rng; -} efi_rng_protocol_32_t; - -typedef struct { - u64 get_info; - u64 get_rng; -} efi_rng_protocol_64_t; - -struct efi_rng_protocol { - efi_status_t (*get_info)(struct efi_rng_protocol *, - unsigned long *, efi_guid_t *); - efi_status_t (*get_rng)(struct efi_rng_protocol *, - efi_guid_t *, unsigned long, u8 *out); +typedef union efi_rng_protocol efi_rng_protocol_t; + +union efi_rng_protocol { + struct { + efi_status_t (__efiapi *get_info)(efi_rng_protocol_t *, + unsigned long *, + efi_guid_t *); + efi_status_t (__efiapi *get_rng)(efi_rng_protocol_t *, + efi_guid_t *, unsigned long, + u8 *out); + }; + struct { + u32 get_info; + u32 get_rng; + } mixed_mode; }; -efi_status_t efi_get_random_bytes(efi_system_table_t *sys_table_arg, - unsigned long size, u8 *out) +efi_status_t efi_get_random_bytes(unsigned long size, u8 *out) { efi_guid_t rng_proto = EFI_RNG_PROTOCOL_GUID; efi_status_t status; - struct efi_rng_protocol *rng = NULL; + efi_rng_protocol_t *rng = NULL; - status = efi_call_early(locate_protocol, &rng_proto, NULL, - (void **)&rng); + status = efi_bs_call(locate_protocol, &rng_proto, NULL, (void **)&rng); if (status != EFI_SUCCESS) return status; - return efi_call_proto(efi_rng_protocol, get_rng, rng, NULL, size, out); + return efi_call_proto(rng, get_rng, NULL, size, out); } /* @@ -81,8 +77,7 @@ static unsigned long get_entry_num_slots(efi_memory_desc_t *md, */ #define MD_NUM_SLOTS(md) ((md)->virt_addr) -efi_status_t efi_random_alloc(efi_system_table_t *sys_table_arg, - unsigned long size, +efi_status_t efi_random_alloc(unsigned long size, unsigned long align, unsigned long *addr, unsigned long random_seed) @@ -101,7 +96,7 @@ efi_status_t efi_random_alloc(efi_system_table_t *sys_table_arg, map.key_ptr = NULL; map.buff_size = &buff_size; - status = efi_get_memory_map(sys_table_arg, &map); + status = efi_get_memory_map(&map); if (status != EFI_SUCCESS) return status; @@ -145,39 +140,38 @@ efi_status_t efi_random_alloc(efi_system_table_t *sys_table_arg, target = round_up(md->phys_addr, align) + target_slot * align; pages = round_up(size, EFI_PAGE_SIZE) / EFI_PAGE_SIZE; - status = efi_call_early(allocate_pages, EFI_ALLOCATE_ADDRESS, - EFI_LOADER_DATA, pages, &target); + status = efi_bs_call(allocate_pages, EFI_ALLOCATE_ADDRESS, + EFI_LOADER_DATA, pages, &target); if (status == EFI_SUCCESS) *addr = target; break; } - efi_call_early(free_pool, memory_map); + efi_bs_call(free_pool, memory_map); return status; } -efi_status_t efi_random_get_seed(efi_system_table_t *sys_table_arg) +efi_status_t efi_random_get_seed(void) { efi_guid_t rng_proto = EFI_RNG_PROTOCOL_GUID; efi_guid_t rng_algo_raw = EFI_RNG_ALGORITHM_RAW; efi_guid_t rng_table_guid = LINUX_EFI_RANDOM_SEED_TABLE_GUID; - struct efi_rng_protocol *rng = NULL; + efi_rng_protocol_t *rng = NULL; struct linux_efi_random_seed *seed = NULL; efi_status_t status; - status = efi_call_early(locate_protocol, &rng_proto, NULL, - (void **)&rng); + status = efi_bs_call(locate_protocol, &rng_proto, NULL, (void **)&rng); if (status != EFI_SUCCESS) return status; - status = efi_call_early(allocate_pool, EFI_RUNTIME_SERVICES_DATA, - sizeof(*seed) + EFI_RANDOM_SEED_SIZE, - (void **)&seed); + status = efi_bs_call(allocate_pool, EFI_RUNTIME_SERVICES_DATA, + sizeof(*seed) + EFI_RANDOM_SEED_SIZE, + (void **)&seed); if (status != EFI_SUCCESS) return status; - status = efi_call_proto(efi_rng_protocol, get_rng, rng, &rng_algo_raw, + status = efi_call_proto(rng, get_rng, &rng_algo_raw, EFI_RANDOM_SEED_SIZE, seed->bits); if (status == EFI_UNSUPPORTED) @@ -185,21 +179,20 @@ efi_status_t efi_random_get_seed(efi_system_table_t *sys_table_arg) * Use whatever algorithm we have available if the raw algorithm * is not implemented. */ - status = efi_call_proto(efi_rng_protocol, get_rng, rng, NULL, - EFI_RANDOM_SEED_SIZE, seed->bits); + status = efi_call_proto(rng, get_rng, NULL, + EFI_RANDOM_SEED_SIZE, seed->bits); if (status != EFI_SUCCESS) goto err_freepool; seed->size = EFI_RANDOM_SEED_SIZE; - status = efi_call_early(install_configuration_table, &rng_table_guid, - seed); + status = efi_bs_call(install_configuration_table, &rng_table_guid, seed); if (status != EFI_SUCCESS) goto err_freepool; return EFI_SUCCESS; err_freepool: - efi_call_early(free_pool, seed); + efi_bs_call(free_pool, seed); return status; } diff --git a/drivers/firmware/efi/libstub/secureboot.c b/drivers/firmware/efi/libstub/secureboot.c index edba5e7a3743..a765378ad18c 100644 --- a/drivers/firmware/efi/libstub/secureboot.c +++ b/drivers/firmware/efi/libstub/secureboot.c @@ -21,18 +21,13 @@ static const efi_char16_t efi_SetupMode_name[] = L"SetupMode"; static const efi_guid_t shim_guid = EFI_SHIM_LOCK_GUID; static const efi_char16_t shim_MokSBState_name[] = L"MokSBState"; -#define get_efi_var(name, vendor, ...) \ - efi_call_runtime(get_variable, \ - (efi_char16_t *)(name), (efi_guid_t *)(vendor), \ - __VA_ARGS__); - /* * Determine whether we're in secure boot mode. * * Please keep the logic in sync with * arch/x86/xen/efi.c:xen_efi_get_secureboot(). */ -enum efi_secureboot_mode efi_get_secureboot(efi_system_table_t *sys_table_arg) +enum efi_secureboot_mode efi_get_secureboot(void) { u32 attr; u8 secboot, setupmode, moksbstate; @@ -72,10 +67,10 @@ enum efi_secureboot_mode efi_get_secureboot(efi_system_table_t *sys_table_arg) return efi_secureboot_mode_disabled; secure_boot_enabled: - pr_efi(sys_table_arg, "UEFI Secure Boot is enabled.\n"); + pr_efi("UEFI Secure Boot is enabled.\n"); return efi_secureboot_mode_enabled; out_efi_err: - pr_efi_err(sys_table_arg, "Could not determine UEFI Secure Boot status.\n"); + pr_efi_err("Could not determine UEFI Secure Boot status.\n"); return efi_secureboot_mode_unknown; } diff --git a/drivers/firmware/efi/libstub/tpm.c b/drivers/firmware/efi/libstub/tpm.c index eb9af83e4d59..1d59e103a2e3 100644 --- a/drivers/firmware/efi/libstub/tpm.c +++ b/drivers/firmware/efi/libstub/tpm.c @@ -20,23 +20,13 @@ static const efi_char16_t efi_MemoryOverWriteRequest_name[] = #define MEMORY_ONLY_RESET_CONTROL_GUID \ EFI_GUID(0xe20939be, 0x32d4, 0x41be, 0xa1, 0x50, 0x89, 0x7f, 0x85, 0xd4, 0x98, 0x29) -#define get_efi_var(name, vendor, ...) \ - efi_call_runtime(get_variable, \ - (efi_char16_t *)(name), (efi_guid_t *)(vendor), \ - __VA_ARGS__) - -#define set_efi_var(name, vendor, ...) \ - efi_call_runtime(set_variable, \ - (efi_char16_t *)(name), (efi_guid_t *)(vendor), \ - __VA_ARGS__) - /* * Enable reboot attack mitigation. This requests that the firmware clear the * RAM on next reboot before proceeding with boot, ensuring that any secrets * are cleared. If userland has ensured that all secrets have been removed * from RAM before reboot it can simply reset this variable. */ -void efi_enable_reset_attack_mitigation(efi_system_table_t *sys_table_arg) +void efi_enable_reset_attack_mitigation(void) { u8 val = 1; efi_guid_t var_guid = MEMORY_ONLY_RESET_CONTROL_GUID; @@ -57,7 +47,7 @@ void efi_enable_reset_attack_mitigation(efi_system_table_t *sys_table_arg) #endif -void efi_retrieve_tpm2_eventlog(efi_system_table_t *sys_table_arg) +void efi_retrieve_tpm2_eventlog(void) { efi_guid_t tcg2_guid = EFI_TCG2_PROTOCOL_GUID; efi_guid_t linux_eventlog_guid = LINUX_EFI_TPM_EVENT_LOG_GUID; @@ -69,23 +59,22 @@ void efi_retrieve_tpm2_eventlog(efi_system_table_t *sys_table_arg) size_t log_size, last_entry_size; efi_bool_t truncated; int version = EFI_TCG2_EVENT_LOG_FORMAT_TCG_2; - void *tcg2_protocol = NULL; + efi_tcg2_protocol_t *tcg2_protocol = NULL; int final_events_size = 0; - status = efi_call_early(locate_protocol, &tcg2_guid, NULL, - &tcg2_protocol); + status = efi_bs_call(locate_protocol, &tcg2_guid, NULL, + (void **)&tcg2_protocol); if (status != EFI_SUCCESS) return; - status = efi_call_proto(efi_tcg2_protocol, get_event_log, - tcg2_protocol, version, &log_location, - &log_last_entry, &truncated); + status = efi_call_proto(tcg2_protocol, get_event_log, version, + &log_location, &log_last_entry, &truncated); if (status != EFI_SUCCESS || !log_location) { version = EFI_TCG2_EVENT_LOG_FORMAT_TCG_1_2; - status = efi_call_proto(efi_tcg2_protocol, get_event_log, - tcg2_protocol, version, &log_location, - &log_last_entry, &truncated); + status = efi_call_proto(tcg2_protocol, get_event_log, version, + &log_location, &log_last_entry, + &truncated); if (status != EFI_SUCCESS || !log_location) return; @@ -126,13 +115,11 @@ void efi_retrieve_tpm2_eventlog(efi_system_table_t *sys_table_arg) } /* Allocate space for the logs and copy them. */ - status = efi_call_early(allocate_pool, EFI_LOADER_DATA, - sizeof(*log_tbl) + log_size, - (void **) &log_tbl); + status = efi_bs_call(allocate_pool, EFI_LOADER_DATA, + sizeof(*log_tbl) + log_size, (void **)&log_tbl); if (status != EFI_SUCCESS) { - efi_printk(sys_table_arg, - "Unable to allocate memory for event log\n"); + efi_printk("Unable to allocate memory for event log\n"); return; } @@ -140,8 +127,7 @@ void efi_retrieve_tpm2_eventlog(efi_system_table_t *sys_table_arg) * Figure out whether any events have already been logged to the * final events structure, and if so how much space they take up */ - final_events_table = get_efi_config_table(sys_table_arg, - LINUX_EFI_TPM_FINAL_LOG_GUID); + final_events_table = get_efi_config_table(LINUX_EFI_TPM_FINAL_LOG_GUID); if (final_events_table && final_events_table->nr_events) { struct tcg_pcr_event2_head *header; int offset; @@ -169,12 +155,12 @@ void efi_retrieve_tpm2_eventlog(efi_system_table_t *sys_table_arg) log_tbl->version = version; memcpy(log_tbl->log, (void *) first_entry_addr, log_size); - status = efi_call_early(install_configuration_table, - &linux_eventlog_guid, log_tbl); + status = efi_bs_call(install_configuration_table, + &linux_eventlog_guid, log_tbl); if (status != EFI_SUCCESS) goto err_free; return; err_free: - efi_call_early(free_pool, log_tbl); + efi_bs_call(free_pool, log_tbl); } diff --git a/drivers/firmware/efi/memmap.c b/drivers/firmware/efi/memmap.c index 38b686c67b17..2ff1883dc788 100644 --- a/drivers/firmware/efi/memmap.c +++ b/drivers/firmware/efi/memmap.c @@ -29,9 +29,32 @@ static phys_addr_t __init __efi_memmap_alloc_late(unsigned long size) return PFN_PHYS(page_to_pfn(p)); } +void __init __efi_memmap_free(u64 phys, unsigned long size, unsigned long flags) +{ + if (flags & EFI_MEMMAP_MEMBLOCK) { + if (slab_is_available()) + memblock_free_late(phys, size); + else + memblock_free(phys, size); + } else if (flags & EFI_MEMMAP_SLAB) { + struct page *p = pfn_to_page(PHYS_PFN(phys)); + unsigned int order = get_order(size); + + free_pages((unsigned long) page_address(p), order); + } +} + +static void __init efi_memmap_free(void) +{ + __efi_memmap_free(efi.memmap.phys_map, + efi.memmap.desc_size * efi.memmap.nr_map, + efi.memmap.flags); +} + /** * efi_memmap_alloc - Allocate memory for the EFI memory map * @num_entries: Number of entries in the allocated map. + * @data: efi memmap installation parameters * * Depending on whether mm_init() has already been invoked or not, * either memblock or "normal" page allocation is used. @@ -39,34 +62,47 @@ static phys_addr_t __init __efi_memmap_alloc_late(unsigned long size) * Returns the physical address of the allocated memory map on * success, zero on failure. */ -phys_addr_t __init efi_memmap_alloc(unsigned int num_entries) +int __init efi_memmap_alloc(unsigned int num_entries, + struct efi_memory_map_data *data) { - unsigned long size = num_entries * efi.memmap.desc_size; - - if (slab_is_available()) - return __efi_memmap_alloc_late(size); + /* Expect allocation parameters are zero initialized */ + WARN_ON(data->phys_map || data->size); + + data->size = num_entries * efi.memmap.desc_size; + data->desc_version = efi.memmap.desc_version; + data->desc_size = efi.memmap.desc_size; + data->flags &= ~(EFI_MEMMAP_SLAB | EFI_MEMMAP_MEMBLOCK); + data->flags |= efi.memmap.flags & EFI_MEMMAP_LATE; + + if (slab_is_available()) { + data->flags |= EFI_MEMMAP_SLAB; + data->phys_map = __efi_memmap_alloc_late(data->size); + } else { + data->flags |= EFI_MEMMAP_MEMBLOCK; + data->phys_map = __efi_memmap_alloc_early(data->size); + } - return __efi_memmap_alloc_early(size); + if (!data->phys_map) + return -ENOMEM; + return 0; } /** * __efi_memmap_init - Common code for mapping the EFI memory map * @data: EFI memory map data - * @late: Use early or late mapping function? * * This function takes care of figuring out which function to use to * map the EFI memory map in efi.memmap based on how far into the boot * we are. * - * During bootup @late should be %false since we only have access to - * the early_memremap*() functions as the vmalloc space isn't setup. - * Once the kernel is fully booted we can fallback to the more robust - * memremap*() API. + * During bootup EFI_MEMMAP_LATE in data->flags should be clear since we + * only have access to the early_memremap*() functions as the vmalloc + * space isn't setup. Once the kernel is fully booted we can fallback + * to the more robust memremap*() API. * * Returns zero on success, a negative error code on failure. */ -static int __init -__efi_memmap_init(struct efi_memory_map_data *data, bool late) +static int __init __efi_memmap_init(struct efi_memory_map_data *data) { struct efi_memory_map map; phys_addr_t phys_map; @@ -76,7 +112,7 @@ __efi_memmap_init(struct efi_memory_map_data *data, bool late) phys_map = data->phys_map; - if (late) + if (data->flags & EFI_MEMMAP_LATE) map.map = memremap(phys_map, data->size, MEMREMAP_WB); else map.map = early_memremap(phys_map, data->size); @@ -86,13 +122,16 @@ __efi_memmap_init(struct efi_memory_map_data *data, bool late) return -ENOMEM; } + /* NOP if data->flags & (EFI_MEMMAP_MEMBLOCK | EFI_MEMMAP_SLAB) == 0 */ + efi_memmap_free(); + map.phys_map = data->phys_map; map.nr_map = data->size / data->desc_size; map.map_end = map.map + data->size; map.desc_version = data->desc_version; map.desc_size = data->desc_size; - map.late = late; + map.flags = data->flags; set_bit(EFI_MEMMAP, &efi.flags); @@ -111,9 +150,10 @@ __efi_memmap_init(struct efi_memory_map_data *data, bool late) int __init efi_memmap_init_early(struct efi_memory_map_data *data) { /* Cannot go backwards */ - WARN_ON(efi.memmap.late); + WARN_ON(efi.memmap.flags & EFI_MEMMAP_LATE); - return __efi_memmap_init(data, false); + data->flags = 0; + return __efi_memmap_init(data); } void __init efi_memmap_unmap(void) @@ -121,7 +161,7 @@ void __init efi_memmap_unmap(void) if (!efi_enabled(EFI_MEMMAP)) return; - if (!efi.memmap.late) { + if (!(efi.memmap.flags & EFI_MEMMAP_LATE)) { unsigned long size; size = efi.memmap.desc_size * efi.memmap.nr_map; @@ -162,13 +202,14 @@ int __init efi_memmap_init_late(phys_addr_t addr, unsigned long size) struct efi_memory_map_data data = { .phys_map = addr, .size = size, + .flags = EFI_MEMMAP_LATE, }; /* Did we forget to unmap the early EFI memmap? */ WARN_ON(efi.memmap.map); /* Were we already called? */ - WARN_ON(efi.memmap.late); + WARN_ON(efi.memmap.flags & EFI_MEMMAP_LATE); /* * It makes no sense to allow callers to register different @@ -178,13 +219,12 @@ int __init efi_memmap_init_late(phys_addr_t addr, unsigned long size) data.desc_version = efi.memmap.desc_version; data.desc_size = efi.memmap.desc_size; - return __efi_memmap_init(&data, true); + return __efi_memmap_init(&data); } /** * efi_memmap_install - Install a new EFI memory map in efi.memmap - * @addr: Physical address of the memory map - * @nr_map: Number of entries in the memory map + * @ctx: map allocation parameters (address, size, flags) * * Unlike efi_memmap_init_*(), this function does not allow the caller * to switch from early to late mappings. It simply uses the existing @@ -192,18 +232,11 @@ int __init efi_memmap_init_late(phys_addr_t addr, unsigned long size) * * Returns zero on success, a negative error code on failure. */ -int __init efi_memmap_install(phys_addr_t addr, unsigned int nr_map) +int __init efi_memmap_install(struct efi_memory_map_data *data) { - struct efi_memory_map_data data; - efi_memmap_unmap(); - data.phys_map = addr; - data.size = efi.memmap.desc_size * nr_map; - data.desc_version = efi.memmap.desc_version; - data.desc_size = efi.memmap.desc_size; - - return __efi_memmap_init(&data, efi.memmap.late); + return __efi_memmap_init(data); } /** diff --git a/drivers/firmware/google/coreboot_table.c b/drivers/firmware/google/coreboot_table.c index 8d132e4f008a..0205987a4fd4 100644 --- a/drivers/firmware/google/coreboot_table.c +++ b/drivers/firmware/google/coreboot_table.c @@ -163,8 +163,15 @@ static int coreboot_table_probe(struct platform_device *pdev) return ret; } +static int __cb_dev_unregister(struct device *dev, void *dummy) +{ + device_unregister(dev); + return 0; +} + static int coreboot_table_remove(struct platform_device *pdev) { + bus_for_each_dev(&coreboot_bus_type, NULL, NULL, __cb_dev_unregister); bus_unregister(&coreboot_bus_type); return 0; } diff --git a/drivers/firmware/google/gsmi.c b/drivers/firmware/google/gsmi.c index edaa4e5d84ad..5b2011ebbe26 100644 --- a/drivers/firmware/google/gsmi.c +++ b/drivers/firmware/google/gsmi.c @@ -76,6 +76,7 @@ #define GSMI_CMD_LOG_S0IX_RESUME 0x0b #define GSMI_CMD_CLEAR_CONFIG 0x20 #define GSMI_CMD_HANDSHAKE_TYPE 0xC1 +#define GSMI_CMD_RESERVED 0xff /* Magic entry type for kernel events */ #define GSMI_LOG_ENTRY_TYPE_KERNEL 0xDEAD @@ -746,6 +747,7 @@ MODULE_DEVICE_TABLE(dmi, gsmi_dmi_table); static __init int gsmi_system_valid(void) { u32 hash; + u16 cmd, result; if (!dmi_check_system(gsmi_dmi_table)) return -ENODEV; @@ -780,6 +782,23 @@ static __init int gsmi_system_valid(void) return -ENODEV; } + /* Test the smihandler with a bogus command. If it leaves the + * calling argument in %ax untouched, there is no handler for + * GSMI commands. + */ + cmd = GSMI_CALLBACK | GSMI_CMD_RESERVED << 8; + asm volatile ( + "outb %%al, %%dx\n\t" + : "=a" (result) + : "0" (cmd), + "d" (acpi_gbl_FADT.smi_command) + : "memory", "cc" + ); + if (cmd == result) { + pr_info("gsmi: no gsmi handler in firmware\n"); + return -ENODEV; + } + /* Found */ return 0; } @@ -1016,6 +1035,9 @@ out_err: dma_pool_destroy(gsmi_dev.dma_pool); platform_device_unregister(gsmi_dev.pdev); pr_info("gsmi: failed to load: %d\n", ret); +#ifdef CONFIG_PM + platform_driver_unregister(&gsmi_driver_info); +#endif return ret; } @@ -1037,6 +1059,9 @@ static void __exit gsmi_exit(void) gsmi_buf_free(gsmi_dev.name_buf); dma_pool_destroy(gsmi_dev.dma_pool); platform_device_unregister(gsmi_dev.pdev); +#ifdef CONFIG_PM + platform_driver_unregister(&gsmi_driver_info); +#endif } module_init(gsmi_init); diff --git a/drivers/firmware/imx/Kconfig b/drivers/firmware/imx/Kconfig index 0dbee32da4c6..1d2e5b85d7ca 100644 --- a/drivers/firmware/imx/Kconfig +++ b/drivers/firmware/imx/Kconfig @@ -1,6 +1,6 @@ # SPDX-License-Identifier: GPL-2.0-only config IMX_DSP - bool "IMX DSP Protocol driver" + tristate "IMX DSP Protocol driver" depends on IMX_MBOX help This enables DSP IPC protocol between host AP (Linux) diff --git a/drivers/firmware/iscsi_ibft.c b/drivers/firmware/iscsi_ibft.c index 7e12cbdf957c..96758b71a8db 100644 --- a/drivers/firmware/iscsi_ibft.c +++ b/drivers/firmware/iscsi_ibft.c @@ -104,6 +104,7 @@ struct ibft_control { u16 tgt0_off; u16 nic1_off; u16 tgt1_off; + u16 expansion[0]; } __attribute__((__packed__)); struct ibft_initiator { @@ -235,7 +236,7 @@ static int ibft_verify_hdr(char *t, struct ibft_hdr *hdr, int id, int length) "found %d instead!\n", t, id, hdr->id); return -ENODEV; } - if (hdr->length != length) { + if (length && hdr->length != length) { printk(KERN_ERR "iBFT error: We expected the %s " \ "field header.length to have %d but " \ "found %d instead!\n", t, length, hdr->length); @@ -749,16 +750,16 @@ static int __init ibft_register_kobjects(struct acpi_table_ibft *header) control = (void *)header + sizeof(*header); end = (void *)control + control->hdr.length; eot_offset = (void *)header + header->header.length - (void *)control; - rc = ibft_verify_hdr("control", (struct ibft_hdr *)control, id_control, - sizeof(*control)); + rc = ibft_verify_hdr("control", (struct ibft_hdr *)control, id_control, 0); /* iBFT table safety checking */ rc |= ((control->hdr.index) ? -ENODEV : 0); + rc |= ((control->hdr.length < sizeof(*control)) ? -ENODEV : 0); if (rc) { printk(KERN_ERR "iBFT error: Control header is invalid!\n"); return rc; } - for (ptr = &control->initiator_off; ptr < end; ptr += sizeof(u16)) { + for (ptr = &control->initiator_off; ptr + sizeof(u16) <= end; ptr += sizeof(u16)) { offset = *(u16 *)ptr; if (offset && offset < header->header.length && offset < eot_offset) { diff --git a/drivers/firmware/psci/psci.c b/drivers/firmware/psci/psci.c index b3b6c15e7b36..2937d44b5df4 100644 --- a/drivers/firmware/psci/psci.c +++ b/drivers/firmware/psci/psci.c @@ -97,7 +97,7 @@ static inline bool psci_has_ext_power_state(void) PSCI_1_0_FEATURES_CPU_SUSPEND_PF_MASK; } -static inline bool psci_has_osi_support(void) +bool psci_has_osi_support(void) { return psci_cpu_suspend_feature & PSCI_1_0_OS_INITIATED; } @@ -162,6 +162,15 @@ static u32 psci_get_version(void) return invoke_psci_fn(PSCI_0_2_FN_PSCI_VERSION, 0, 0, 0); } +int psci_set_osi_mode(void) +{ + int err; + + err = invoke_psci_fn(PSCI_1_0_FN_SET_SUSPEND_MODE, + PSCI_1_0_SUSPEND_MODE_OSI, 0, 0); + return psci_to_linux_errno(err); +} + static int psci_cpu_suspend(u32 state, unsigned long entry_point) { int err; @@ -544,9 +553,14 @@ static int __init psci_1_0_init(struct device_node *np) if (err) return err; - if (psci_has_osi_support()) + if (psci_has_osi_support()) { pr_info("OSI mode supported.\n"); + /* Default to PC mode. */ + invoke_psci_fn(PSCI_1_0_FN_SET_SUSPEND_MODE, + PSCI_1_0_SUSPEND_MODE_PC, 0, 0); + } + return 0; } diff --git a/drivers/firmware/qcom_scm-32.c b/drivers/firmware/qcom_scm-32.c deleted file mode 100644 index 48e2ef794ea3..000000000000 --- a/drivers/firmware/qcom_scm-32.c +++ /dev/null @@ -1,671 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* Copyright (c) 2010,2015, The Linux Foundation. All rights reserved. - * Copyright (C) 2015 Linaro Ltd. - */ - -#include <linux/slab.h> -#include <linux/io.h> -#include <linux/module.h> -#include <linux/mutex.h> -#include <linux/errno.h> -#include <linux/err.h> -#include <linux/qcom_scm.h> -#include <linux/dma-mapping.h> - -#include "qcom_scm.h" - -#define QCOM_SCM_FLAG_COLDBOOT_CPU0 0x00 -#define QCOM_SCM_FLAG_COLDBOOT_CPU1 0x01 -#define QCOM_SCM_FLAG_COLDBOOT_CPU2 0x08 -#define QCOM_SCM_FLAG_COLDBOOT_CPU3 0x20 - -#define QCOM_SCM_FLAG_WARMBOOT_CPU0 0x04 -#define QCOM_SCM_FLAG_WARMBOOT_CPU1 0x02 -#define QCOM_SCM_FLAG_WARMBOOT_CPU2 0x10 -#define QCOM_SCM_FLAG_WARMBOOT_CPU3 0x40 - -struct qcom_scm_entry { - int flag; - void *entry; -}; - -static struct qcom_scm_entry qcom_scm_wb[] = { - { .flag = QCOM_SCM_FLAG_WARMBOOT_CPU0 }, - { .flag = QCOM_SCM_FLAG_WARMBOOT_CPU1 }, - { .flag = QCOM_SCM_FLAG_WARMBOOT_CPU2 }, - { .flag = QCOM_SCM_FLAG_WARMBOOT_CPU3 }, -}; - -static DEFINE_MUTEX(qcom_scm_lock); - -/** - * struct qcom_scm_command - one SCM command buffer - * @len: total available memory for command and response - * @buf_offset: start of command buffer - * @resp_hdr_offset: start of response buffer - * @id: command to be executed - * @buf: buffer returned from qcom_scm_get_command_buffer() - * - * An SCM command is laid out in memory as follows: - * - * ------------------- <--- struct qcom_scm_command - * | command header | - * ------------------- <--- qcom_scm_get_command_buffer() - * | command buffer | - * ------------------- <--- struct qcom_scm_response and - * | response header | qcom_scm_command_to_response() - * ------------------- <--- qcom_scm_get_response_buffer() - * | response buffer | - * ------------------- - * - * There can be arbitrary padding between the headers and buffers so - * you should always use the appropriate qcom_scm_get_*_buffer() routines - * to access the buffers in a safe manner. - */ -struct qcom_scm_command { - __le32 len; - __le32 buf_offset; - __le32 resp_hdr_offset; - __le32 id; - __le32 buf[0]; -}; - -/** - * struct qcom_scm_response - one SCM response buffer - * @len: total available memory for response - * @buf_offset: start of response data relative to start of qcom_scm_response - * @is_complete: indicates if the command has finished processing - */ -struct qcom_scm_response { - __le32 len; - __le32 buf_offset; - __le32 is_complete; -}; - -/** - * qcom_scm_command_to_response() - Get a pointer to a qcom_scm_response - * @cmd: command - * - * Returns a pointer to a response for a command. - */ -static inline struct qcom_scm_response *qcom_scm_command_to_response( - const struct qcom_scm_command *cmd) -{ - return (void *)cmd + le32_to_cpu(cmd->resp_hdr_offset); -} - -/** - * qcom_scm_get_command_buffer() - Get a pointer to a command buffer - * @cmd: command - * - * Returns a pointer to the command buffer of a command. - */ -static inline void *qcom_scm_get_command_buffer(const struct qcom_scm_command *cmd) -{ - return (void *)cmd->buf; -} - -/** - * qcom_scm_get_response_buffer() - Get a pointer to a response buffer - * @rsp: response - * - * Returns a pointer to a response buffer of a response. - */ -static inline void *qcom_scm_get_response_buffer(const struct qcom_scm_response *rsp) -{ - return (void *)rsp + le32_to_cpu(rsp->buf_offset); -} - -static u32 smc(u32 cmd_addr) -{ - int context_id; - register u32 r0 asm("r0") = 1; - register u32 r1 asm("r1") = (u32)&context_id; - register u32 r2 asm("r2") = cmd_addr; - do { - asm volatile( - __asmeq("%0", "r0") - __asmeq("%1", "r0") - __asmeq("%2", "r1") - __asmeq("%3", "r2") -#ifdef REQUIRES_SEC - ".arch_extension sec\n" -#endif - "smc #0 @ switch to secure world\n" - : "=r" (r0) - : "r" (r0), "r" (r1), "r" (r2) - : "r3", "r12"); - } while (r0 == QCOM_SCM_INTERRUPTED); - - return r0; -} - -/** - * qcom_scm_call() - Send an SCM command - * @dev: struct device - * @svc_id: service identifier - * @cmd_id: command identifier - * @cmd_buf: command buffer - * @cmd_len: length of the command buffer - * @resp_buf: response buffer - * @resp_len: length of the response buffer - * - * Sends a command to the SCM and waits for the command to finish processing. - * - * A note on cache maintenance: - * Note that any buffers that are expected to be accessed by the secure world - * must be flushed before invoking qcom_scm_call and invalidated in the cache - * immediately after qcom_scm_call returns. Cache maintenance on the command - * and response buffers is taken care of by qcom_scm_call; however, callers are - * responsible for any other cached buffers passed over to the secure world. - */ -static int qcom_scm_call(struct device *dev, u32 svc_id, u32 cmd_id, - const void *cmd_buf, size_t cmd_len, void *resp_buf, - size_t resp_len) -{ - int ret; - struct qcom_scm_command *cmd; - struct qcom_scm_response *rsp; - size_t alloc_len = sizeof(*cmd) + cmd_len + sizeof(*rsp) + resp_len; - dma_addr_t cmd_phys; - - cmd = kzalloc(PAGE_ALIGN(alloc_len), GFP_KERNEL); - if (!cmd) - return -ENOMEM; - - cmd->len = cpu_to_le32(alloc_len); - cmd->buf_offset = cpu_to_le32(sizeof(*cmd)); - cmd->resp_hdr_offset = cpu_to_le32(sizeof(*cmd) + cmd_len); - - cmd->id = cpu_to_le32((svc_id << 10) | cmd_id); - if (cmd_buf) - memcpy(qcom_scm_get_command_buffer(cmd), cmd_buf, cmd_len); - - rsp = qcom_scm_command_to_response(cmd); - - cmd_phys = dma_map_single(dev, cmd, alloc_len, DMA_TO_DEVICE); - if (dma_mapping_error(dev, cmd_phys)) { - kfree(cmd); - return -ENOMEM; - } - - mutex_lock(&qcom_scm_lock); - ret = smc(cmd_phys); - if (ret < 0) - ret = qcom_scm_remap_error(ret); - mutex_unlock(&qcom_scm_lock); - if (ret) - goto out; - - do { - dma_sync_single_for_cpu(dev, cmd_phys + sizeof(*cmd) + cmd_len, - sizeof(*rsp), DMA_FROM_DEVICE); - } while (!rsp->is_complete); - - if (resp_buf) { - dma_sync_single_for_cpu(dev, cmd_phys + sizeof(*cmd) + cmd_len + - le32_to_cpu(rsp->buf_offset), - resp_len, DMA_FROM_DEVICE); - memcpy(resp_buf, qcom_scm_get_response_buffer(rsp), - resp_len); - } -out: - dma_unmap_single(dev, cmd_phys, alloc_len, DMA_TO_DEVICE); - kfree(cmd); - return ret; -} - -#define SCM_CLASS_REGISTER (0x2 << 8) -#define SCM_MASK_IRQS BIT(5) -#define SCM_ATOMIC(svc, cmd, n) (((((svc) << 10)|((cmd) & 0x3ff)) << 12) | \ - SCM_CLASS_REGISTER | \ - SCM_MASK_IRQS | \ - (n & 0xf)) - -/** - * qcom_scm_call_atomic1() - Send an atomic SCM command with one argument - * @svc_id: service identifier - * @cmd_id: command identifier - * @arg1: first argument - * - * This shall only be used with commands that are guaranteed to be - * uninterruptable, atomic and SMP safe. - */ -static s32 qcom_scm_call_atomic1(u32 svc, u32 cmd, u32 arg1) -{ - int context_id; - - register u32 r0 asm("r0") = SCM_ATOMIC(svc, cmd, 1); - register u32 r1 asm("r1") = (u32)&context_id; - register u32 r2 asm("r2") = arg1; - - asm volatile( - __asmeq("%0", "r0") - __asmeq("%1", "r0") - __asmeq("%2", "r1") - __asmeq("%3", "r2") -#ifdef REQUIRES_SEC - ".arch_extension sec\n" -#endif - "smc #0 @ switch to secure world\n" - : "=r" (r0) - : "r" (r0), "r" (r1), "r" (r2) - : "r3", "r12"); - return r0; -} - -/** - * qcom_scm_call_atomic2() - Send an atomic SCM command with two arguments - * @svc_id: service identifier - * @cmd_id: command identifier - * @arg1: first argument - * @arg2: second argument - * - * This shall only be used with commands that are guaranteed to be - * uninterruptable, atomic and SMP safe. - */ -static s32 qcom_scm_call_atomic2(u32 svc, u32 cmd, u32 arg1, u32 arg2) -{ - int context_id; - - register u32 r0 asm("r0") = SCM_ATOMIC(svc, cmd, 2); - register u32 r1 asm("r1") = (u32)&context_id; - register u32 r2 asm("r2") = arg1; - register u32 r3 asm("r3") = arg2; - - asm volatile( - __asmeq("%0", "r0") - __asmeq("%1", "r0") - __asmeq("%2", "r1") - __asmeq("%3", "r2") - __asmeq("%4", "r3") -#ifdef REQUIRES_SEC - ".arch_extension sec\n" -#endif - "smc #0 @ switch to secure world\n" - : "=r" (r0) - : "r" (r0), "r" (r1), "r" (r2), "r" (r3) - : "r12"); - return r0; -} - -u32 qcom_scm_get_version(void) -{ - int context_id; - static u32 version = -1; - register u32 r0 asm("r0"); - register u32 r1 asm("r1"); - - if (version != -1) - return version; - - mutex_lock(&qcom_scm_lock); - - r0 = 0x1 << 8; - r1 = (u32)&context_id; - do { - asm volatile( - __asmeq("%0", "r0") - __asmeq("%1", "r1") - __asmeq("%2", "r0") - __asmeq("%3", "r1") -#ifdef REQUIRES_SEC - ".arch_extension sec\n" -#endif - "smc #0 @ switch to secure world\n" - : "=r" (r0), "=r" (r1) - : "r" (r0), "r" (r1) - : "r2", "r3", "r12"); - } while (r0 == QCOM_SCM_INTERRUPTED); - - version = r1; - mutex_unlock(&qcom_scm_lock); - - return version; -} -EXPORT_SYMBOL(qcom_scm_get_version); - -/** - * qcom_scm_set_cold_boot_addr() - Set the cold boot address for cpus - * @entry: Entry point function for the cpus - * @cpus: The cpumask of cpus that will use the entry point - * - * Set the cold boot address of the cpus. Any cpu outside the supported - * range would be removed from the cpu present mask. - */ -int __qcom_scm_set_cold_boot_addr(void *entry, const cpumask_t *cpus) -{ - int flags = 0; - int cpu; - int scm_cb_flags[] = { - QCOM_SCM_FLAG_COLDBOOT_CPU0, - QCOM_SCM_FLAG_COLDBOOT_CPU1, - QCOM_SCM_FLAG_COLDBOOT_CPU2, - QCOM_SCM_FLAG_COLDBOOT_CPU3, - }; - - if (!cpus || (cpus && cpumask_empty(cpus))) - return -EINVAL; - - for_each_cpu(cpu, cpus) { - if (cpu < ARRAY_SIZE(scm_cb_flags)) - flags |= scm_cb_flags[cpu]; - else - set_cpu_present(cpu, false); - } - - return qcom_scm_call_atomic2(QCOM_SCM_SVC_BOOT, QCOM_SCM_BOOT_ADDR, - flags, virt_to_phys(entry)); -} - -/** - * qcom_scm_set_warm_boot_addr() - Set the warm boot address for cpus - * @entry: Entry point function for the cpus - * @cpus: The cpumask of cpus that will use the entry point - * - * Set the Linux entry point for the SCM to transfer control to when coming - * out of a power down. CPU power down may be executed on cpuidle or hotplug. - */ -int __qcom_scm_set_warm_boot_addr(struct device *dev, void *entry, - const cpumask_t *cpus) -{ - int ret; - int flags = 0; - int cpu; - struct { - __le32 flags; - __le32 addr; - } cmd; - - /* - * Reassign only if we are switching from hotplug entry point - * to cpuidle entry point or vice versa. - */ - for_each_cpu(cpu, cpus) { - if (entry == qcom_scm_wb[cpu].entry) - continue; - flags |= qcom_scm_wb[cpu].flag; - } - - /* No change in entry function */ - if (!flags) - return 0; - - cmd.addr = cpu_to_le32(virt_to_phys(entry)); - cmd.flags = cpu_to_le32(flags); - ret = qcom_scm_call(dev, QCOM_SCM_SVC_BOOT, QCOM_SCM_BOOT_ADDR, - &cmd, sizeof(cmd), NULL, 0); - if (!ret) { - for_each_cpu(cpu, cpus) - qcom_scm_wb[cpu].entry = entry; - } - - return ret; -} - -/** - * qcom_scm_cpu_power_down() - Power down the cpu - * @flags - Flags to flush cache - * - * This is an end point to power down cpu. If there was a pending interrupt, - * the control would return from this function, otherwise, the cpu jumps to the - * warm boot entry point set for this cpu upon reset. - */ -void __qcom_scm_cpu_power_down(u32 flags) -{ - qcom_scm_call_atomic1(QCOM_SCM_SVC_BOOT, QCOM_SCM_CMD_TERMINATE_PC, - flags & QCOM_SCM_FLUSH_FLAG_MASK); -} - -int __qcom_scm_is_call_available(struct device *dev, u32 svc_id, u32 cmd_id) -{ - int ret; - __le32 svc_cmd = cpu_to_le32((svc_id << 10) | cmd_id); - __le32 ret_val = 0; - - ret = qcom_scm_call(dev, QCOM_SCM_SVC_INFO, QCOM_IS_CALL_AVAIL_CMD, - &svc_cmd, sizeof(svc_cmd), &ret_val, - sizeof(ret_val)); - if (ret) - return ret; - - return le32_to_cpu(ret_val); -} - -int __qcom_scm_hdcp_req(struct device *dev, struct qcom_scm_hdcp_req *req, - u32 req_cnt, u32 *resp) -{ - if (req_cnt > QCOM_SCM_HDCP_MAX_REQ_CNT) - return -ERANGE; - - return qcom_scm_call(dev, QCOM_SCM_SVC_HDCP, QCOM_SCM_CMD_HDCP, - req, req_cnt * sizeof(*req), resp, sizeof(*resp)); -} - -int __qcom_scm_ocmem_lock(struct device *dev, u32 id, u32 offset, u32 size, - u32 mode) -{ - struct ocmem_tz_lock { - __le32 id; - __le32 offset; - __le32 size; - __le32 mode; - } request; - - request.id = cpu_to_le32(id); - request.offset = cpu_to_le32(offset); - request.size = cpu_to_le32(size); - request.mode = cpu_to_le32(mode); - - return qcom_scm_call(dev, QCOM_SCM_OCMEM_SVC, QCOM_SCM_OCMEM_LOCK_CMD, - &request, sizeof(request), NULL, 0); -} - -int __qcom_scm_ocmem_unlock(struct device *dev, u32 id, u32 offset, u32 size) -{ - struct ocmem_tz_unlock { - __le32 id; - __le32 offset; - __le32 size; - } request; - - request.id = cpu_to_le32(id); - request.offset = cpu_to_le32(offset); - request.size = cpu_to_le32(size); - - return qcom_scm_call(dev, QCOM_SCM_OCMEM_SVC, QCOM_SCM_OCMEM_UNLOCK_CMD, - &request, sizeof(request), NULL, 0); -} - -void __qcom_scm_init(void) -{ -} - -bool __qcom_scm_pas_supported(struct device *dev, u32 peripheral) -{ - __le32 out; - __le32 in; - int ret; - - in = cpu_to_le32(peripheral); - ret = qcom_scm_call(dev, QCOM_SCM_SVC_PIL, - QCOM_SCM_PAS_IS_SUPPORTED_CMD, - &in, sizeof(in), - &out, sizeof(out)); - - return ret ? false : !!out; -} - -int __qcom_scm_pas_init_image(struct device *dev, u32 peripheral, - dma_addr_t metadata_phys) -{ - __le32 scm_ret; - int ret; - struct { - __le32 proc; - __le32 image_addr; - } request; - - request.proc = cpu_to_le32(peripheral); - request.image_addr = cpu_to_le32(metadata_phys); - - ret = qcom_scm_call(dev, QCOM_SCM_SVC_PIL, - QCOM_SCM_PAS_INIT_IMAGE_CMD, - &request, sizeof(request), - &scm_ret, sizeof(scm_ret)); - - return ret ? : le32_to_cpu(scm_ret); -} - -int __qcom_scm_pas_mem_setup(struct device *dev, u32 peripheral, - phys_addr_t addr, phys_addr_t size) -{ - __le32 scm_ret; - int ret; - struct { - __le32 proc; - __le32 addr; - __le32 len; - } request; - - request.proc = cpu_to_le32(peripheral); - request.addr = cpu_to_le32(addr); - request.len = cpu_to_le32(size); - - ret = qcom_scm_call(dev, QCOM_SCM_SVC_PIL, - QCOM_SCM_PAS_MEM_SETUP_CMD, - &request, sizeof(request), - &scm_ret, sizeof(scm_ret)); - - return ret ? : le32_to_cpu(scm_ret); -} - -int __qcom_scm_pas_auth_and_reset(struct device *dev, u32 peripheral) -{ - __le32 out; - __le32 in; - int ret; - - in = cpu_to_le32(peripheral); - ret = qcom_scm_call(dev, QCOM_SCM_SVC_PIL, - QCOM_SCM_PAS_AUTH_AND_RESET_CMD, - &in, sizeof(in), - &out, sizeof(out)); - - return ret ? : le32_to_cpu(out); -} - -int __qcom_scm_pas_shutdown(struct device *dev, u32 peripheral) -{ - __le32 out; - __le32 in; - int ret; - - in = cpu_to_le32(peripheral); - ret = qcom_scm_call(dev, QCOM_SCM_SVC_PIL, - QCOM_SCM_PAS_SHUTDOWN_CMD, - &in, sizeof(in), - &out, sizeof(out)); - - return ret ? : le32_to_cpu(out); -} - -int __qcom_scm_pas_mss_reset(struct device *dev, bool reset) -{ - __le32 out; - __le32 in = cpu_to_le32(reset); - int ret; - - ret = qcom_scm_call(dev, QCOM_SCM_SVC_PIL, QCOM_SCM_PAS_MSS_RESET, - &in, sizeof(in), - &out, sizeof(out)); - - return ret ? : le32_to_cpu(out); -} - -int __qcom_scm_set_dload_mode(struct device *dev, bool enable) -{ - return qcom_scm_call_atomic2(QCOM_SCM_SVC_BOOT, QCOM_SCM_SET_DLOAD_MODE, - enable ? QCOM_SCM_SET_DLOAD_MODE : 0, 0); -} - -int __qcom_scm_set_remote_state(struct device *dev, u32 state, u32 id) -{ - struct { - __le32 state; - __le32 id; - } req; - __le32 scm_ret = 0; - int ret; - - req.state = cpu_to_le32(state); - req.id = cpu_to_le32(id); - - ret = qcom_scm_call(dev, QCOM_SCM_SVC_BOOT, QCOM_SCM_SET_REMOTE_STATE, - &req, sizeof(req), &scm_ret, sizeof(scm_ret)); - - return ret ? : le32_to_cpu(scm_ret); -} - -int __qcom_scm_assign_mem(struct device *dev, phys_addr_t mem_region, - size_t mem_sz, phys_addr_t src, size_t src_sz, - phys_addr_t dest, size_t dest_sz) -{ - return -ENODEV; -} - -int __qcom_scm_restore_sec_cfg(struct device *dev, u32 device_id, - u32 spare) -{ - struct msm_scm_sec_cfg { - __le32 id; - __le32 ctx_bank_num; - } cfg; - int ret, scm_ret = 0; - - cfg.id = cpu_to_le32(device_id); - cfg.ctx_bank_num = cpu_to_le32(spare); - - ret = qcom_scm_call(dev, QCOM_SCM_SVC_MP, QCOM_SCM_RESTORE_SEC_CFG, - &cfg, sizeof(cfg), &scm_ret, sizeof(scm_ret)); - - if (ret || scm_ret) - return ret ? ret : -EINVAL; - - return 0; -} - -int __qcom_scm_iommu_secure_ptbl_size(struct device *dev, u32 spare, - size_t *size) -{ - return -ENODEV; -} - -int __qcom_scm_iommu_secure_ptbl_init(struct device *dev, u64 addr, u32 size, - u32 spare) -{ - return -ENODEV; -} - -int __qcom_scm_io_readl(struct device *dev, phys_addr_t addr, - unsigned int *val) -{ - int ret; - - ret = qcom_scm_call_atomic1(QCOM_SCM_SVC_IO, QCOM_SCM_IO_READ, addr); - if (ret >= 0) - *val = ret; - - return ret < 0 ? ret : 0; -} - -int __qcom_scm_io_writel(struct device *dev, phys_addr_t addr, unsigned int val) -{ - return qcom_scm_call_atomic2(QCOM_SCM_SVC_IO, QCOM_SCM_IO_WRITE, - addr, val); -} - -int __qcom_scm_qsmmu500_wait_safe_toggle(struct device *dev, bool enable) -{ - return -ENODEV; -} diff --git a/drivers/firmware/qcom_scm-64.c b/drivers/firmware/qcom_scm-64.c deleted file mode 100644 index 3c5850350974..000000000000 --- a/drivers/firmware/qcom_scm-64.c +++ /dev/null @@ -1,579 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* Copyright (c) 2015, The Linux Foundation. All rights reserved. - */ - -#include <linux/io.h> -#include <linux/errno.h> -#include <linux/delay.h> -#include <linux/mutex.h> -#include <linux/slab.h> -#include <linux/types.h> -#include <linux/qcom_scm.h> -#include <linux/arm-smccc.h> -#include <linux/dma-mapping.h> - -#include "qcom_scm.h" - -#define QCOM_SCM_FNID(s, c) ((((s) & 0xFF) << 8) | ((c) & 0xFF)) - -#define MAX_QCOM_SCM_ARGS 10 -#define MAX_QCOM_SCM_RETS 3 - -enum qcom_scm_arg_types { - QCOM_SCM_VAL, - QCOM_SCM_RO, - QCOM_SCM_RW, - QCOM_SCM_BUFVAL, -}; - -#define QCOM_SCM_ARGS_IMPL(num, a, b, c, d, e, f, g, h, i, j, ...) (\ - (((a) & 0x3) << 4) | \ - (((b) & 0x3) << 6) | \ - (((c) & 0x3) << 8) | \ - (((d) & 0x3) << 10) | \ - (((e) & 0x3) << 12) | \ - (((f) & 0x3) << 14) | \ - (((g) & 0x3) << 16) | \ - (((h) & 0x3) << 18) | \ - (((i) & 0x3) << 20) | \ - (((j) & 0x3) << 22) | \ - ((num) & 0xf)) - -#define QCOM_SCM_ARGS(...) QCOM_SCM_ARGS_IMPL(__VA_ARGS__, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) - -/** - * struct qcom_scm_desc - * @arginfo: Metadata describing the arguments in args[] - * @args: The array of arguments for the secure syscall - * @res: The values returned by the secure syscall - */ -struct qcom_scm_desc { - u32 arginfo; - u64 args[MAX_QCOM_SCM_ARGS]; -}; - -static u64 qcom_smccc_convention = -1; -static DEFINE_MUTEX(qcom_scm_lock); - -#define QCOM_SCM_EBUSY_WAIT_MS 30 -#define QCOM_SCM_EBUSY_MAX_RETRY 20 - -#define N_EXT_QCOM_SCM_ARGS 7 -#define FIRST_EXT_ARG_IDX 3 -#define N_REGISTER_ARGS (MAX_QCOM_SCM_ARGS - N_EXT_QCOM_SCM_ARGS + 1) - -static void __qcom_scm_call_do(const struct qcom_scm_desc *desc, - struct arm_smccc_res *res, u32 fn_id, - u64 x5, u32 type) -{ - u64 cmd; - struct arm_smccc_quirk quirk = { .id = ARM_SMCCC_QUIRK_QCOM_A6 }; - - cmd = ARM_SMCCC_CALL_VAL(type, qcom_smccc_convention, - ARM_SMCCC_OWNER_SIP, fn_id); - - quirk.state.a6 = 0; - - do { - arm_smccc_smc_quirk(cmd, desc->arginfo, desc->args[0], - desc->args[1], desc->args[2], x5, - quirk.state.a6, 0, res, &quirk); - - if (res->a0 == QCOM_SCM_INTERRUPTED) - cmd = res->a0; - - } while (res->a0 == QCOM_SCM_INTERRUPTED); -} - -static void qcom_scm_call_do(const struct qcom_scm_desc *desc, - struct arm_smccc_res *res, u32 fn_id, - u64 x5, bool atomic) -{ - int retry_count = 0; - - if (atomic) { - __qcom_scm_call_do(desc, res, fn_id, x5, ARM_SMCCC_FAST_CALL); - return; - } - - do { - mutex_lock(&qcom_scm_lock); - - __qcom_scm_call_do(desc, res, fn_id, x5, - ARM_SMCCC_STD_CALL); - - mutex_unlock(&qcom_scm_lock); - - if (res->a0 == QCOM_SCM_V2_EBUSY) { - if (retry_count++ > QCOM_SCM_EBUSY_MAX_RETRY) - break; - msleep(QCOM_SCM_EBUSY_WAIT_MS); - } - } while (res->a0 == QCOM_SCM_V2_EBUSY); -} - -static int ___qcom_scm_call(struct device *dev, u32 svc_id, u32 cmd_id, - const struct qcom_scm_desc *desc, - struct arm_smccc_res *res, bool atomic) -{ - int arglen = desc->arginfo & 0xf; - int i; - u32 fn_id = QCOM_SCM_FNID(svc_id, cmd_id); - u64 x5 = desc->args[FIRST_EXT_ARG_IDX]; - dma_addr_t args_phys = 0; - void *args_virt = NULL; - size_t alloc_len; - gfp_t flag = atomic ? GFP_ATOMIC : GFP_KERNEL; - - if (unlikely(arglen > N_REGISTER_ARGS)) { - alloc_len = N_EXT_QCOM_SCM_ARGS * sizeof(u64); - args_virt = kzalloc(PAGE_ALIGN(alloc_len), flag); - - if (!args_virt) - return -ENOMEM; - - if (qcom_smccc_convention == ARM_SMCCC_SMC_32) { - __le32 *args = args_virt; - - for (i = 0; i < N_EXT_QCOM_SCM_ARGS; i++) - args[i] = cpu_to_le32(desc->args[i + - FIRST_EXT_ARG_IDX]); - } else { - __le64 *args = args_virt; - - for (i = 0; i < N_EXT_QCOM_SCM_ARGS; i++) - args[i] = cpu_to_le64(desc->args[i + - FIRST_EXT_ARG_IDX]); - } - - args_phys = dma_map_single(dev, args_virt, alloc_len, - DMA_TO_DEVICE); - - if (dma_mapping_error(dev, args_phys)) { - kfree(args_virt); - return -ENOMEM; - } - - x5 = args_phys; - } - - qcom_scm_call_do(desc, res, fn_id, x5, atomic); - - if (args_virt) { - dma_unmap_single(dev, args_phys, alloc_len, DMA_TO_DEVICE); - kfree(args_virt); - } - - if ((long)res->a0 < 0) - return qcom_scm_remap_error(res->a0); - - return 0; -} - -/** - * qcom_scm_call() - Invoke a syscall in the secure world - * @dev: device - * @svc_id: service identifier - * @cmd_id: command identifier - * @desc: Descriptor structure containing arguments and return values - * - * Sends a command to the SCM and waits for the command to finish processing. - * This should *only* be called in pre-emptible context. - */ -static int qcom_scm_call(struct device *dev, u32 svc_id, u32 cmd_id, - const struct qcom_scm_desc *desc, - struct arm_smccc_res *res) -{ - might_sleep(); - return ___qcom_scm_call(dev, svc_id, cmd_id, desc, res, false); -} - -/** - * qcom_scm_call_atomic() - atomic variation of qcom_scm_call() - * @dev: device - * @svc_id: service identifier - * @cmd_id: command identifier - * @desc: Descriptor structure containing arguments and return values - * @res: Structure containing results from SMC/HVC call - * - * Sends a command to the SCM and waits for the command to finish processing. - * This can be called in atomic context. - */ -static int qcom_scm_call_atomic(struct device *dev, u32 svc_id, u32 cmd_id, - const struct qcom_scm_desc *desc, - struct arm_smccc_res *res) -{ - return ___qcom_scm_call(dev, svc_id, cmd_id, desc, res, true); -} - -/** - * qcom_scm_set_cold_boot_addr() - Set the cold boot address for cpus - * @entry: Entry point function for the cpus - * @cpus: The cpumask of cpus that will use the entry point - * - * Set the cold boot address of the cpus. Any cpu outside the supported - * range would be removed from the cpu present mask. - */ -int __qcom_scm_set_cold_boot_addr(void *entry, const cpumask_t *cpus) -{ - return -ENOTSUPP; -} - -/** - * qcom_scm_set_warm_boot_addr() - Set the warm boot address for cpus - * @dev: Device pointer - * @entry: Entry point function for the cpus - * @cpus: The cpumask of cpus that will use the entry point - * - * Set the Linux entry point for the SCM to transfer control to when coming - * out of a power down. CPU power down may be executed on cpuidle or hotplug. - */ -int __qcom_scm_set_warm_boot_addr(struct device *dev, void *entry, - const cpumask_t *cpus) -{ - return -ENOTSUPP; -} - -/** - * qcom_scm_cpu_power_down() - Power down the cpu - * @flags - Flags to flush cache - * - * This is an end point to power down cpu. If there was a pending interrupt, - * the control would return from this function, otherwise, the cpu jumps to the - * warm boot entry point set for this cpu upon reset. - */ -void __qcom_scm_cpu_power_down(u32 flags) -{ -} - -int __qcom_scm_is_call_available(struct device *dev, u32 svc_id, u32 cmd_id) -{ - int ret; - struct qcom_scm_desc desc = {0}; - struct arm_smccc_res res; - - desc.arginfo = QCOM_SCM_ARGS(1); - desc.args[0] = QCOM_SCM_FNID(svc_id, cmd_id) | - (ARM_SMCCC_OWNER_SIP << ARM_SMCCC_OWNER_SHIFT); - - ret = qcom_scm_call(dev, QCOM_SCM_SVC_INFO, QCOM_IS_CALL_AVAIL_CMD, - &desc, &res); - - return ret ? : res.a1; -} - -int __qcom_scm_hdcp_req(struct device *dev, struct qcom_scm_hdcp_req *req, - u32 req_cnt, u32 *resp) -{ - int ret; - struct qcom_scm_desc desc = {0}; - struct arm_smccc_res res; - - if (req_cnt > QCOM_SCM_HDCP_MAX_REQ_CNT) - return -ERANGE; - - desc.args[0] = req[0].addr; - desc.args[1] = req[0].val; - desc.args[2] = req[1].addr; - desc.args[3] = req[1].val; - desc.args[4] = req[2].addr; - desc.args[5] = req[2].val; - desc.args[6] = req[3].addr; - desc.args[7] = req[3].val; - desc.args[8] = req[4].addr; - desc.args[9] = req[4].val; - desc.arginfo = QCOM_SCM_ARGS(10); - - ret = qcom_scm_call(dev, QCOM_SCM_SVC_HDCP, QCOM_SCM_CMD_HDCP, &desc, - &res); - *resp = res.a1; - - return ret; -} - -int __qcom_scm_ocmem_lock(struct device *dev, uint32_t id, uint32_t offset, - uint32_t size, uint32_t mode) -{ - return -ENOTSUPP; -} - -int __qcom_scm_ocmem_unlock(struct device *dev, uint32_t id, uint32_t offset, - uint32_t size) -{ - return -ENOTSUPP; -} - -void __qcom_scm_init(void) -{ - u64 cmd; - struct arm_smccc_res res; - u32 function = QCOM_SCM_FNID(QCOM_SCM_SVC_INFO, QCOM_IS_CALL_AVAIL_CMD); - - /* First try a SMC64 call */ - cmd = ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, ARM_SMCCC_SMC_64, - ARM_SMCCC_OWNER_SIP, function); - - arm_smccc_smc(cmd, QCOM_SCM_ARGS(1), cmd & (~BIT(ARM_SMCCC_TYPE_SHIFT)), - 0, 0, 0, 0, 0, &res); - - if (!res.a0 && res.a1) - qcom_smccc_convention = ARM_SMCCC_SMC_64; - else - qcom_smccc_convention = ARM_SMCCC_SMC_32; -} - -bool __qcom_scm_pas_supported(struct device *dev, u32 peripheral) -{ - int ret; - struct qcom_scm_desc desc = {0}; - struct arm_smccc_res res; - - desc.args[0] = peripheral; - desc.arginfo = QCOM_SCM_ARGS(1); - - ret = qcom_scm_call(dev, QCOM_SCM_SVC_PIL, - QCOM_SCM_PAS_IS_SUPPORTED_CMD, - &desc, &res); - - return ret ? false : !!res.a1; -} - -int __qcom_scm_pas_init_image(struct device *dev, u32 peripheral, - dma_addr_t metadata_phys) -{ - int ret; - struct qcom_scm_desc desc = {0}; - struct arm_smccc_res res; - - desc.args[0] = peripheral; - desc.args[1] = metadata_phys; - desc.arginfo = QCOM_SCM_ARGS(2, QCOM_SCM_VAL, QCOM_SCM_RW); - - ret = qcom_scm_call(dev, QCOM_SCM_SVC_PIL, QCOM_SCM_PAS_INIT_IMAGE_CMD, - &desc, &res); - - return ret ? : res.a1; -} - -int __qcom_scm_pas_mem_setup(struct device *dev, u32 peripheral, - phys_addr_t addr, phys_addr_t size) -{ - int ret; - struct qcom_scm_desc desc = {0}; - struct arm_smccc_res res; - - desc.args[0] = peripheral; - desc.args[1] = addr; - desc.args[2] = size; - desc.arginfo = QCOM_SCM_ARGS(3); - - ret = qcom_scm_call(dev, QCOM_SCM_SVC_PIL, QCOM_SCM_PAS_MEM_SETUP_CMD, - &desc, &res); - - return ret ? : res.a1; -} - -int __qcom_scm_pas_auth_and_reset(struct device *dev, u32 peripheral) -{ - int ret; - struct qcom_scm_desc desc = {0}; - struct arm_smccc_res res; - - desc.args[0] = peripheral; - desc.arginfo = QCOM_SCM_ARGS(1); - - ret = qcom_scm_call(dev, QCOM_SCM_SVC_PIL, - QCOM_SCM_PAS_AUTH_AND_RESET_CMD, - &desc, &res); - - return ret ? : res.a1; -} - -int __qcom_scm_pas_shutdown(struct device *dev, u32 peripheral) -{ - int ret; - struct qcom_scm_desc desc = {0}; - struct arm_smccc_res res; - - desc.args[0] = peripheral; - desc.arginfo = QCOM_SCM_ARGS(1); - - ret = qcom_scm_call(dev, QCOM_SCM_SVC_PIL, QCOM_SCM_PAS_SHUTDOWN_CMD, - &desc, &res); - - return ret ? : res.a1; -} - -int __qcom_scm_pas_mss_reset(struct device *dev, bool reset) -{ - struct qcom_scm_desc desc = {0}; - struct arm_smccc_res res; - int ret; - - desc.args[0] = reset; - desc.args[1] = 0; - desc.arginfo = QCOM_SCM_ARGS(2); - - ret = qcom_scm_call(dev, QCOM_SCM_SVC_PIL, QCOM_SCM_PAS_MSS_RESET, &desc, - &res); - - return ret ? : res.a1; -} - -int __qcom_scm_set_remote_state(struct device *dev, u32 state, u32 id) -{ - struct qcom_scm_desc desc = {0}; - struct arm_smccc_res res; - int ret; - - desc.args[0] = state; - desc.args[1] = id; - desc.arginfo = QCOM_SCM_ARGS(2); - - ret = qcom_scm_call(dev, QCOM_SCM_SVC_BOOT, QCOM_SCM_SET_REMOTE_STATE, - &desc, &res); - - return ret ? : res.a1; -} - -int __qcom_scm_assign_mem(struct device *dev, phys_addr_t mem_region, - size_t mem_sz, phys_addr_t src, size_t src_sz, - phys_addr_t dest, size_t dest_sz) -{ - int ret; - struct qcom_scm_desc desc = {0}; - struct arm_smccc_res res; - - desc.args[0] = mem_region; - desc.args[1] = mem_sz; - desc.args[2] = src; - desc.args[3] = src_sz; - desc.args[4] = dest; - desc.args[5] = dest_sz; - desc.args[6] = 0; - - desc.arginfo = QCOM_SCM_ARGS(7, QCOM_SCM_RO, QCOM_SCM_VAL, - QCOM_SCM_RO, QCOM_SCM_VAL, QCOM_SCM_RO, - QCOM_SCM_VAL, QCOM_SCM_VAL); - - ret = qcom_scm_call(dev, QCOM_SCM_SVC_MP, - QCOM_MEM_PROT_ASSIGN_ID, - &desc, &res); - - return ret ? : res.a1; -} - -int __qcom_scm_restore_sec_cfg(struct device *dev, u32 device_id, u32 spare) -{ - struct qcom_scm_desc desc = {0}; - struct arm_smccc_res res; - int ret; - - desc.args[0] = device_id; - desc.args[1] = spare; - desc.arginfo = QCOM_SCM_ARGS(2); - - ret = qcom_scm_call(dev, QCOM_SCM_SVC_MP, QCOM_SCM_RESTORE_SEC_CFG, - &desc, &res); - - return ret ? : res.a1; -} - -int __qcom_scm_iommu_secure_ptbl_size(struct device *dev, u32 spare, - size_t *size) -{ - struct qcom_scm_desc desc = {0}; - struct arm_smccc_res res; - int ret; - - desc.args[0] = spare; - desc.arginfo = QCOM_SCM_ARGS(1); - - ret = qcom_scm_call(dev, QCOM_SCM_SVC_MP, - QCOM_SCM_IOMMU_SECURE_PTBL_SIZE, &desc, &res); - - if (size) - *size = res.a1; - - return ret ? : res.a2; -} - -int __qcom_scm_iommu_secure_ptbl_init(struct device *dev, u64 addr, u32 size, - u32 spare) -{ - struct qcom_scm_desc desc = {0}; - struct arm_smccc_res res; - int ret; - - desc.args[0] = addr; - desc.args[1] = size; - desc.args[2] = spare; - desc.arginfo = QCOM_SCM_ARGS(3, QCOM_SCM_RW, QCOM_SCM_VAL, - QCOM_SCM_VAL); - - ret = qcom_scm_call(dev, QCOM_SCM_SVC_MP, - QCOM_SCM_IOMMU_SECURE_PTBL_INIT, &desc, &res); - - /* the pg table has been initialized already, ignore the error */ - if (ret == -EPERM) - ret = 0; - - return ret; -} - -int __qcom_scm_set_dload_mode(struct device *dev, bool enable) -{ - struct qcom_scm_desc desc = {0}; - struct arm_smccc_res res; - - desc.args[0] = QCOM_SCM_SET_DLOAD_MODE; - desc.args[1] = enable ? QCOM_SCM_SET_DLOAD_MODE : 0; - desc.arginfo = QCOM_SCM_ARGS(2); - - return qcom_scm_call(dev, QCOM_SCM_SVC_BOOT, QCOM_SCM_SET_DLOAD_MODE, - &desc, &res); -} - -int __qcom_scm_io_readl(struct device *dev, phys_addr_t addr, - unsigned int *val) -{ - struct qcom_scm_desc desc = {0}; - struct arm_smccc_res res; - int ret; - - desc.args[0] = addr; - desc.arginfo = QCOM_SCM_ARGS(1); - - ret = qcom_scm_call(dev, QCOM_SCM_SVC_IO, QCOM_SCM_IO_READ, - &desc, &res); - if (ret >= 0) - *val = res.a1; - - return ret < 0 ? ret : 0; -} - -int __qcom_scm_io_writel(struct device *dev, phys_addr_t addr, unsigned int val) -{ - struct qcom_scm_desc desc = {0}; - struct arm_smccc_res res; - - desc.args[0] = addr; - desc.args[1] = val; - desc.arginfo = QCOM_SCM_ARGS(2); - - return qcom_scm_call(dev, QCOM_SCM_SVC_IO, QCOM_SCM_IO_WRITE, - &desc, &res); -} - -int __qcom_scm_qsmmu500_wait_safe_toggle(struct device *dev, bool en) -{ - struct qcom_scm_desc desc = {0}; - struct arm_smccc_res res; - - desc.args[0] = QCOM_SCM_CONFIG_ERRATA1_CLIENT_ALL; - desc.args[1] = en; - desc.arginfo = QCOM_SCM_ARGS(2); - - return qcom_scm_call_atomic(dev, QCOM_SCM_SVC_SMMU_PROGRAM, - QCOM_SCM_CONFIG_ERRATA1, &desc, &res); -} diff --git a/drivers/firmware/qcom_scm-legacy.c b/drivers/firmware/qcom_scm-legacy.c new file mode 100644 index 000000000000..8532e7c78ef7 --- /dev/null +++ b/drivers/firmware/qcom_scm-legacy.c @@ -0,0 +1,242 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright (c) 2010,2015,2019 The Linux Foundation. All rights reserved. + * Copyright (C) 2015 Linaro Ltd. + */ + +#include <linux/slab.h> +#include <linux/io.h> +#include <linux/module.h> +#include <linux/mutex.h> +#include <linux/errno.h> +#include <linux/err.h> +#include <linux/qcom_scm.h> +#include <linux/arm-smccc.h> +#include <linux/dma-mapping.h> + +#include "qcom_scm.h" + +static DEFINE_MUTEX(qcom_scm_lock); + + +/** + * struct arm_smccc_args + * @args: The array of values used in registers in smc instruction + */ +struct arm_smccc_args { + unsigned long args[8]; +}; + + +/** + * struct scm_legacy_command - one SCM command buffer + * @len: total available memory for command and response + * @buf_offset: start of command buffer + * @resp_hdr_offset: start of response buffer + * @id: command to be executed + * @buf: buffer returned from scm_legacy_get_command_buffer() + * + * An SCM command is laid out in memory as follows: + * + * ------------------- <--- struct scm_legacy_command + * | command header | + * ------------------- <--- scm_legacy_get_command_buffer() + * | command buffer | + * ------------------- <--- struct scm_legacy_response and + * | response header | scm_legacy_command_to_response() + * ------------------- <--- scm_legacy_get_response_buffer() + * | response buffer | + * ------------------- + * + * There can be arbitrary padding between the headers and buffers so + * you should always use the appropriate scm_legacy_get_*_buffer() routines + * to access the buffers in a safe manner. + */ +struct scm_legacy_command { + __le32 len; + __le32 buf_offset; + __le32 resp_hdr_offset; + __le32 id; + __le32 buf[0]; +}; + +/** + * struct scm_legacy_response - one SCM response buffer + * @len: total available memory for response + * @buf_offset: start of response data relative to start of scm_legacy_response + * @is_complete: indicates if the command has finished processing + */ +struct scm_legacy_response { + __le32 len; + __le32 buf_offset; + __le32 is_complete; +}; + +/** + * scm_legacy_command_to_response() - Get a pointer to a scm_legacy_response + * @cmd: command + * + * Returns a pointer to a response for a command. + */ +static inline struct scm_legacy_response *scm_legacy_command_to_response( + const struct scm_legacy_command *cmd) +{ + return (void *)cmd + le32_to_cpu(cmd->resp_hdr_offset); +} + +/** + * scm_legacy_get_command_buffer() - Get a pointer to a command buffer + * @cmd: command + * + * Returns a pointer to the command buffer of a command. + */ +static inline void *scm_legacy_get_command_buffer( + const struct scm_legacy_command *cmd) +{ + return (void *)cmd->buf; +} + +/** + * scm_legacy_get_response_buffer() - Get a pointer to a response buffer + * @rsp: response + * + * Returns a pointer to a response buffer of a response. + */ +static inline void *scm_legacy_get_response_buffer( + const struct scm_legacy_response *rsp) +{ + return (void *)rsp + le32_to_cpu(rsp->buf_offset); +} + +static void __scm_legacy_do(const struct arm_smccc_args *smc, + struct arm_smccc_res *res) +{ + do { + arm_smccc_smc(smc->args[0], smc->args[1], smc->args[2], + smc->args[3], smc->args[4], smc->args[5], + smc->args[6], smc->args[7], res); + } while (res->a0 == QCOM_SCM_INTERRUPTED); +} + +/** + * qcom_scm_call() - Sends a command to the SCM and waits for the command to + * finish processing. + * + * A note on cache maintenance: + * Note that any buffers that are expected to be accessed by the secure world + * must be flushed before invoking qcom_scm_call and invalidated in the cache + * immediately after qcom_scm_call returns. Cache maintenance on the command + * and response buffers is taken care of by qcom_scm_call; however, callers are + * responsible for any other cached buffers passed over to the secure world. + */ +int scm_legacy_call(struct device *dev, const struct qcom_scm_desc *desc, + struct qcom_scm_res *res) +{ + u8 arglen = desc->arginfo & 0xf; + int ret = 0, context_id; + unsigned int i; + struct scm_legacy_command *cmd; + struct scm_legacy_response *rsp; + struct arm_smccc_args smc = {0}; + struct arm_smccc_res smc_res; + const size_t cmd_len = arglen * sizeof(__le32); + const size_t resp_len = MAX_QCOM_SCM_RETS * sizeof(__le32); + size_t alloc_len = sizeof(*cmd) + cmd_len + sizeof(*rsp) + resp_len; + dma_addr_t cmd_phys; + __le32 *arg_buf; + const __le32 *res_buf; + + cmd = kzalloc(PAGE_ALIGN(alloc_len), GFP_KERNEL); + if (!cmd) + return -ENOMEM; + + cmd->len = cpu_to_le32(alloc_len); + cmd->buf_offset = cpu_to_le32(sizeof(*cmd)); + cmd->resp_hdr_offset = cpu_to_le32(sizeof(*cmd) + cmd_len); + cmd->id = cpu_to_le32(SCM_LEGACY_FNID(desc->svc, desc->cmd)); + + arg_buf = scm_legacy_get_command_buffer(cmd); + for (i = 0; i < arglen; i++) + arg_buf[i] = cpu_to_le32(desc->args[i]); + + rsp = scm_legacy_command_to_response(cmd); + + cmd_phys = dma_map_single(dev, cmd, alloc_len, DMA_TO_DEVICE); + if (dma_mapping_error(dev, cmd_phys)) { + kfree(cmd); + return -ENOMEM; + } + + smc.args[0] = 1; + smc.args[1] = (unsigned long)&context_id; + smc.args[2] = cmd_phys; + + mutex_lock(&qcom_scm_lock); + __scm_legacy_do(&smc, &smc_res); + if (smc_res.a0) + ret = qcom_scm_remap_error(smc_res.a0); + mutex_unlock(&qcom_scm_lock); + if (ret) + goto out; + + do { + dma_sync_single_for_cpu(dev, cmd_phys + sizeof(*cmd) + cmd_len, + sizeof(*rsp), DMA_FROM_DEVICE); + } while (!rsp->is_complete); + + dma_sync_single_for_cpu(dev, cmd_phys + sizeof(*cmd) + cmd_len + + le32_to_cpu(rsp->buf_offset), + resp_len, DMA_FROM_DEVICE); + + if (res) { + res_buf = scm_legacy_get_response_buffer(rsp); + for (i = 0; i < MAX_QCOM_SCM_RETS; i++) + res->result[i] = le32_to_cpu(res_buf[i]); + } +out: + dma_unmap_single(dev, cmd_phys, alloc_len, DMA_TO_DEVICE); + kfree(cmd); + return ret; +} + +#define SCM_LEGACY_ATOMIC_N_REG_ARGS 5 +#define SCM_LEGACY_ATOMIC_FIRST_REG_IDX 2 +#define SCM_LEGACY_CLASS_REGISTER (0x2 << 8) +#define SCM_LEGACY_MASK_IRQS BIT(5) +#define SCM_LEGACY_ATOMIC_ID(svc, cmd, n) \ + ((SCM_LEGACY_FNID(svc, cmd) << 12) | \ + SCM_LEGACY_CLASS_REGISTER | \ + SCM_LEGACY_MASK_IRQS | \ + (n & 0xf)) + +/** + * qcom_scm_call_atomic() - Send an atomic SCM command with up to 5 arguments + * and 3 return values + * @desc: SCM call descriptor containing arguments + * @res: SCM call return values + * + * This shall only be used with commands that are guaranteed to be + * uninterruptable, atomic and SMP safe. + */ +int scm_legacy_call_atomic(struct device *unused, + const struct qcom_scm_desc *desc, + struct qcom_scm_res *res) +{ + int context_id; + struct arm_smccc_res smc_res; + size_t arglen = desc->arginfo & 0xf; + + BUG_ON(arglen > SCM_LEGACY_ATOMIC_N_REG_ARGS); + + arm_smccc_smc(SCM_LEGACY_ATOMIC_ID(desc->svc, desc->cmd, arglen), + (unsigned long)&context_id, + desc->args[0], desc->args[1], desc->args[2], + desc->args[3], desc->args[4], 0, &smc_res); + + if (res) { + res->result[0] = smc_res.a1; + res->result[1] = smc_res.a2; + res->result[2] = smc_res.a3; + } + + return smc_res.a0; +} diff --git a/drivers/firmware/qcom_scm-smc.c b/drivers/firmware/qcom_scm-smc.c new file mode 100644 index 000000000000..497c13ba98d6 --- /dev/null +++ b/drivers/firmware/qcom_scm-smc.c @@ -0,0 +1,151 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright (c) 2015,2019 The Linux Foundation. All rights reserved. + */ + +#include <linux/io.h> +#include <linux/errno.h> +#include <linux/delay.h> +#include <linux/mutex.h> +#include <linux/slab.h> +#include <linux/types.h> +#include <linux/qcom_scm.h> +#include <linux/arm-smccc.h> +#include <linux/dma-mapping.h> + +#include "qcom_scm.h" + +/** + * struct arm_smccc_args + * @args: The array of values used in registers in smc instruction + */ +struct arm_smccc_args { + unsigned long args[8]; +}; + +static DEFINE_MUTEX(qcom_scm_lock); + +#define QCOM_SCM_EBUSY_WAIT_MS 30 +#define QCOM_SCM_EBUSY_MAX_RETRY 20 + +#define SCM_SMC_N_REG_ARGS 4 +#define SCM_SMC_FIRST_EXT_IDX (SCM_SMC_N_REG_ARGS - 1) +#define SCM_SMC_N_EXT_ARGS (MAX_QCOM_SCM_ARGS - SCM_SMC_N_REG_ARGS + 1) +#define SCM_SMC_FIRST_REG_IDX 2 +#define SCM_SMC_LAST_REG_IDX (SCM_SMC_FIRST_REG_IDX + SCM_SMC_N_REG_ARGS - 1) + +static void __scm_smc_do_quirk(const struct arm_smccc_args *smc, + struct arm_smccc_res *res) +{ + unsigned long a0 = smc->args[0]; + struct arm_smccc_quirk quirk = { .id = ARM_SMCCC_QUIRK_QCOM_A6 }; + + quirk.state.a6 = 0; + + do { + arm_smccc_smc_quirk(a0, smc->args[1], smc->args[2], + smc->args[3], smc->args[4], smc->args[5], + quirk.state.a6, smc->args[7], res, &quirk); + + if (res->a0 == QCOM_SCM_INTERRUPTED) + a0 = res->a0; + + } while (res->a0 == QCOM_SCM_INTERRUPTED); +} + +static void __scm_smc_do(const struct arm_smccc_args *smc, + struct arm_smccc_res *res, bool atomic) +{ + int retry_count = 0; + + if (atomic) { + __scm_smc_do_quirk(smc, res); + return; + } + + do { + mutex_lock(&qcom_scm_lock); + + __scm_smc_do_quirk(smc, res); + + mutex_unlock(&qcom_scm_lock); + + if (res->a0 == QCOM_SCM_V2_EBUSY) { + if (retry_count++ > QCOM_SCM_EBUSY_MAX_RETRY) + break; + msleep(QCOM_SCM_EBUSY_WAIT_MS); + } + } while (res->a0 == QCOM_SCM_V2_EBUSY); +} + +int scm_smc_call(struct device *dev, const struct qcom_scm_desc *desc, + struct qcom_scm_res *res, bool atomic) +{ + int arglen = desc->arginfo & 0xf; + int i; + dma_addr_t args_phys = 0; + void *args_virt = NULL; + size_t alloc_len; + gfp_t flag = atomic ? GFP_ATOMIC : GFP_KERNEL; + u32 smccc_call_type = atomic ? ARM_SMCCC_FAST_CALL : ARM_SMCCC_STD_CALL; + u32 qcom_smccc_convention = + (qcom_scm_convention == SMC_CONVENTION_ARM_32) ? + ARM_SMCCC_SMC_32 : ARM_SMCCC_SMC_64; + struct arm_smccc_res smc_res; + struct arm_smccc_args smc = {0}; + + smc.args[0] = ARM_SMCCC_CALL_VAL( + smccc_call_type, + qcom_smccc_convention, + desc->owner, + SCM_SMC_FNID(desc->svc, desc->cmd)); + smc.args[1] = desc->arginfo; + for (i = 0; i < SCM_SMC_N_REG_ARGS; i++) + smc.args[i + SCM_SMC_FIRST_REG_IDX] = desc->args[i]; + + if (unlikely(arglen > SCM_SMC_N_REG_ARGS)) { + alloc_len = SCM_SMC_N_EXT_ARGS * sizeof(u64); + args_virt = kzalloc(PAGE_ALIGN(alloc_len), flag); + + if (!args_virt) + return -ENOMEM; + + if (qcom_smccc_convention == ARM_SMCCC_SMC_32) { + __le32 *args = args_virt; + + for (i = 0; i < SCM_SMC_N_EXT_ARGS; i++) + args[i] = cpu_to_le32(desc->args[i + + SCM_SMC_FIRST_EXT_IDX]); + } else { + __le64 *args = args_virt; + + for (i = 0; i < SCM_SMC_N_EXT_ARGS; i++) + args[i] = cpu_to_le64(desc->args[i + + SCM_SMC_FIRST_EXT_IDX]); + } + + args_phys = dma_map_single(dev, args_virt, alloc_len, + DMA_TO_DEVICE); + + if (dma_mapping_error(dev, args_phys)) { + kfree(args_virt); + return -ENOMEM; + } + + smc.args[SCM_SMC_LAST_REG_IDX] = args_phys; + } + + __scm_smc_do(&smc, &smc_res, atomic); + + if (args_virt) { + dma_unmap_single(dev, args_phys, alloc_len, DMA_TO_DEVICE); + kfree(args_virt); + } + + if (res) { + res->result[0] = smc_res.a1; + res->result[1] = smc_res.a2; + res->result[2] = smc_res.a3; + } + + return (long)smc_res.a0 ? qcom_scm_remap_error(smc_res.a0) : 0; +} diff --git a/drivers/firmware/qcom_scm.c b/drivers/firmware/qcom_scm.c index 1ba0df4b97ab..059bb0fbae9e 100644 --- a/drivers/firmware/qcom_scm.c +++ b/drivers/firmware/qcom_scm.c @@ -1,8 +1,5 @@ // SPDX-License-Identifier: GPL-2.0-only -/* - * Qualcomm SCM driver - * - * Copyright (c) 2010,2015, The Linux Foundation. All rights reserved. +/* Copyright (c) 2010,2015,2019 The Linux Foundation. All rights reserved. * Copyright (C) 2015 Linaro Ltd. */ #include <linux/platform_device.h> @@ -19,6 +16,7 @@ #include <linux/of_platform.h> #include <linux/clk.h> #include <linux/reset-controller.h> +#include <linux/arm-smccc.h> #include "qcom_scm.h" @@ -52,6 +50,35 @@ struct qcom_scm_mem_map_info { __le64 mem_size; }; +#define QCOM_SCM_FLAG_COLDBOOT_CPU0 0x00 +#define QCOM_SCM_FLAG_COLDBOOT_CPU1 0x01 +#define QCOM_SCM_FLAG_COLDBOOT_CPU2 0x08 +#define QCOM_SCM_FLAG_COLDBOOT_CPU3 0x20 + +#define QCOM_SCM_FLAG_WARMBOOT_CPU0 0x04 +#define QCOM_SCM_FLAG_WARMBOOT_CPU1 0x02 +#define QCOM_SCM_FLAG_WARMBOOT_CPU2 0x10 +#define QCOM_SCM_FLAG_WARMBOOT_CPU3 0x40 + +struct qcom_scm_wb_entry { + int flag; + void *entry; +}; + +static struct qcom_scm_wb_entry qcom_scm_wb[] = { + { .flag = QCOM_SCM_FLAG_WARMBOOT_CPU0 }, + { .flag = QCOM_SCM_FLAG_WARMBOOT_CPU1 }, + { .flag = QCOM_SCM_FLAG_WARMBOOT_CPU2 }, + { .flag = QCOM_SCM_FLAG_WARMBOOT_CPU3 }, +}; + +static const char *qcom_scm_convention_names[] = { + [SMC_CONVENTION_UNKNOWN] = "unknown", + [SMC_CONVENTION_ARM_32] = "smc arm 32", + [SMC_CONVENTION_ARM_64] = "smc arm 64", + [SMC_CONVENTION_LEGACY] = "smc legacy", +}; + static struct qcom_scm *__scm; static int qcom_scm_clk_enable(void) @@ -87,149 +114,308 @@ static void qcom_scm_clk_disable(void) clk_disable_unprepare(__scm->bus_clk); } -/** - * qcom_scm_set_cold_boot_addr() - Set the cold boot address for cpus - * @entry: Entry point function for the cpus - * @cpus: The cpumask of cpus that will use the entry point - * - * Set the cold boot address of the cpus. Any cpu outside the supported - * range would be removed from the cpu present mask. - */ -int qcom_scm_set_cold_boot_addr(void *entry, const cpumask_t *cpus) +static int __qcom_scm_is_call_available(struct device *dev, u32 svc_id, + u32 cmd_id); + +enum qcom_scm_convention qcom_scm_convention; +static bool has_queried __read_mostly; +static DEFINE_SPINLOCK(query_lock); + +static void __query_convention(void) { - return __qcom_scm_set_cold_boot_addr(entry, cpus); + unsigned long flags; + struct qcom_scm_desc desc = { + .svc = QCOM_SCM_SVC_INFO, + .cmd = QCOM_SCM_INFO_IS_CALL_AVAIL, + .args[0] = SCM_SMC_FNID(QCOM_SCM_SVC_INFO, + QCOM_SCM_INFO_IS_CALL_AVAIL) | + (ARM_SMCCC_OWNER_SIP << ARM_SMCCC_OWNER_SHIFT), + .arginfo = QCOM_SCM_ARGS(1), + .owner = ARM_SMCCC_OWNER_SIP, + }; + struct qcom_scm_res res; + int ret; + + spin_lock_irqsave(&query_lock, flags); + if (has_queried) + goto out; + + qcom_scm_convention = SMC_CONVENTION_ARM_64; + // Device isn't required as there is only one argument - no device + // needed to dma_map_single to secure world + ret = scm_smc_call(NULL, &desc, &res, true); + if (!ret && res.result[0] == 1) + goto out; + + qcom_scm_convention = SMC_CONVENTION_ARM_32; + ret = scm_smc_call(NULL, &desc, &res, true); + if (!ret && res.result[0] == 1) + goto out; + + qcom_scm_convention = SMC_CONVENTION_LEGACY; +out: + has_queried = true; + spin_unlock_irqrestore(&query_lock, flags); + pr_info("qcom_scm: convention: %s\n", + qcom_scm_convention_names[qcom_scm_convention]); } -EXPORT_SYMBOL(qcom_scm_set_cold_boot_addr); -/** - * qcom_scm_set_warm_boot_addr() - Set the warm boot address for cpus - * @entry: Entry point function for the cpus - * @cpus: The cpumask of cpus that will use the entry point - * - * Set the Linux entry point for the SCM to transfer control to when coming - * out of a power down. CPU power down may be executed on cpuidle or hotplug. - */ -int qcom_scm_set_warm_boot_addr(void *entry, const cpumask_t *cpus) +static inline enum qcom_scm_convention __get_convention(void) { - return __qcom_scm_set_warm_boot_addr(__scm->dev, entry, cpus); + if (unlikely(!has_queried)) + __query_convention(); + return qcom_scm_convention; } -EXPORT_SYMBOL(qcom_scm_set_warm_boot_addr); /** - * qcom_scm_cpu_power_down() - Power down the cpu - * @flags - Flags to flush cache + * qcom_scm_call() - Invoke a syscall in the secure world + * @dev: device + * @svc_id: service identifier + * @cmd_id: command identifier + * @desc: Descriptor structure containing arguments and return values * - * This is an end point to power down cpu. If there was a pending interrupt, - * the control would return from this function, otherwise, the cpu jumps to the - * warm boot entry point set for this cpu upon reset. + * Sends a command to the SCM and waits for the command to finish processing. + * This should *only* be called in pre-emptible context. */ -void qcom_scm_cpu_power_down(u32 flags) +static int qcom_scm_call(struct device *dev, const struct qcom_scm_desc *desc, + struct qcom_scm_res *res) { - __qcom_scm_cpu_power_down(flags); + might_sleep(); + switch (__get_convention()) { + case SMC_CONVENTION_ARM_32: + case SMC_CONVENTION_ARM_64: + return scm_smc_call(dev, desc, res, false); + case SMC_CONVENTION_LEGACY: + return scm_legacy_call(dev, desc, res); + default: + pr_err("Unknown current SCM calling convention.\n"); + return -EINVAL; + } } -EXPORT_SYMBOL(qcom_scm_cpu_power_down); /** - * qcom_scm_hdcp_available() - Check if secure environment supports HDCP. + * qcom_scm_call_atomic() - atomic variation of qcom_scm_call() + * @dev: device + * @svc_id: service identifier + * @cmd_id: command identifier + * @desc: Descriptor structure containing arguments and return values + * @res: Structure containing results from SMC/HVC call * - * Return true if HDCP is supported, false if not. + * Sends a command to the SCM and waits for the command to finish processing. + * This can be called in atomic context. */ -bool qcom_scm_hdcp_available(void) +static int qcom_scm_call_atomic(struct device *dev, + const struct qcom_scm_desc *desc, + struct qcom_scm_res *res) { - int ret = qcom_scm_clk_enable(); - - if (ret) - return ret; + switch (__get_convention()) { + case SMC_CONVENTION_ARM_32: + case SMC_CONVENTION_ARM_64: + return scm_smc_call(dev, desc, res, true); + case SMC_CONVENTION_LEGACY: + return scm_legacy_call_atomic(dev, desc, res); + default: + pr_err("Unknown current SCM calling convention.\n"); + return -EINVAL; + } +} - ret = __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_HDCP, - QCOM_SCM_CMD_HDCP); +static int __qcom_scm_is_call_available(struct device *dev, u32 svc_id, + u32 cmd_id) +{ + int ret; + struct qcom_scm_desc desc = { + .svc = QCOM_SCM_SVC_INFO, + .cmd = QCOM_SCM_INFO_IS_CALL_AVAIL, + .owner = ARM_SMCCC_OWNER_SIP, + }; + struct qcom_scm_res res; + + desc.arginfo = QCOM_SCM_ARGS(1); + switch (__get_convention()) { + case SMC_CONVENTION_ARM_32: + case SMC_CONVENTION_ARM_64: + desc.args[0] = SCM_SMC_FNID(svc_id, cmd_id) | + (ARM_SMCCC_OWNER_SIP << ARM_SMCCC_OWNER_SHIFT); + break; + case SMC_CONVENTION_LEGACY: + desc.args[0] = SCM_LEGACY_FNID(svc_id, cmd_id); + break; + default: + pr_err("Unknown SMC convention being used\n"); + return -EINVAL; + } - qcom_scm_clk_disable(); + ret = qcom_scm_call(dev, &desc, &res); - return ret > 0 ? true : false; + return ret ? : res.result[0]; } -EXPORT_SYMBOL(qcom_scm_hdcp_available); /** - * qcom_scm_hdcp_req() - Send HDCP request. - * @req: HDCP request array - * @req_cnt: HDCP request array count - * @resp: response buffer passed to SCM + * qcom_scm_set_warm_boot_addr() - Set the warm boot address for cpus + * @entry: Entry point function for the cpus + * @cpus: The cpumask of cpus that will use the entry point * - * Write HDCP register(s) through SCM. + * Set the Linux entry point for the SCM to transfer control to when coming + * out of a power down. CPU power down may be executed on cpuidle or hotplug. */ -int qcom_scm_hdcp_req(struct qcom_scm_hdcp_req *req, u32 req_cnt, u32 *resp) +int qcom_scm_set_warm_boot_addr(void *entry, const cpumask_t *cpus) { - int ret = qcom_scm_clk_enable(); + int ret; + int flags = 0; + int cpu; + struct qcom_scm_desc desc = { + .svc = QCOM_SCM_SVC_BOOT, + .cmd = QCOM_SCM_BOOT_SET_ADDR, + .arginfo = QCOM_SCM_ARGS(2), + }; - if (ret) - return ret; + /* + * Reassign only if we are switching from hotplug entry point + * to cpuidle entry point or vice versa. + */ + for_each_cpu(cpu, cpus) { + if (entry == qcom_scm_wb[cpu].entry) + continue; + flags |= qcom_scm_wb[cpu].flag; + } + + /* No change in entry function */ + if (!flags) + return 0; + + desc.args[0] = flags; + desc.args[1] = virt_to_phys(entry); + + ret = qcom_scm_call(__scm->dev, &desc, NULL); + if (!ret) { + for_each_cpu(cpu, cpus) + qcom_scm_wb[cpu].entry = entry; + } - ret = __qcom_scm_hdcp_req(__scm->dev, req, req_cnt, resp); - qcom_scm_clk_disable(); return ret; } -EXPORT_SYMBOL(qcom_scm_hdcp_req); +EXPORT_SYMBOL(qcom_scm_set_warm_boot_addr); /** - * qcom_scm_pas_supported() - Check if the peripheral authentication service is - * available for the given peripherial - * @peripheral: peripheral id + * qcom_scm_set_cold_boot_addr() - Set the cold boot address for cpus + * @entry: Entry point function for the cpus + * @cpus: The cpumask of cpus that will use the entry point * - * Returns true if PAS is supported for this peripheral, otherwise false. + * Set the cold boot address of the cpus. Any cpu outside the supported + * range would be removed from the cpu present mask. */ -bool qcom_scm_pas_supported(u32 peripheral) +int qcom_scm_set_cold_boot_addr(void *entry, const cpumask_t *cpus) { - int ret; + int flags = 0; + int cpu; + int scm_cb_flags[] = { + QCOM_SCM_FLAG_COLDBOOT_CPU0, + QCOM_SCM_FLAG_COLDBOOT_CPU1, + QCOM_SCM_FLAG_COLDBOOT_CPU2, + QCOM_SCM_FLAG_COLDBOOT_CPU3, + }; + struct qcom_scm_desc desc = { + .svc = QCOM_SCM_SVC_BOOT, + .cmd = QCOM_SCM_BOOT_SET_ADDR, + .arginfo = QCOM_SCM_ARGS(2), + .owner = ARM_SMCCC_OWNER_SIP, + }; + + if (!cpus || (cpus && cpumask_empty(cpus))) + return -EINVAL; - ret = __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_PIL, - QCOM_SCM_PAS_IS_SUPPORTED_CMD); - if (ret <= 0) - return false; + for_each_cpu(cpu, cpus) { + if (cpu < ARRAY_SIZE(scm_cb_flags)) + flags |= scm_cb_flags[cpu]; + else + set_cpu_present(cpu, false); + } + + desc.args[0] = flags; + desc.args[1] = virt_to_phys(entry); - return __qcom_scm_pas_supported(__scm->dev, peripheral); + return qcom_scm_call_atomic(__scm ? __scm->dev : NULL, &desc, NULL); } -EXPORT_SYMBOL(qcom_scm_pas_supported); +EXPORT_SYMBOL(qcom_scm_set_cold_boot_addr); /** - * qcom_scm_ocmem_lock_available() - is OCMEM lock/unlock interface available + * qcom_scm_cpu_power_down() - Power down the cpu + * @flags - Flags to flush cache + * + * This is an end point to power down cpu. If there was a pending interrupt, + * the control would return from this function, otherwise, the cpu jumps to the + * warm boot entry point set for this cpu upon reset. */ -bool qcom_scm_ocmem_lock_available(void) +void qcom_scm_cpu_power_down(u32 flags) { - return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_OCMEM_SVC, - QCOM_SCM_OCMEM_LOCK_CMD); + struct qcom_scm_desc desc = { + .svc = QCOM_SCM_SVC_BOOT, + .cmd = QCOM_SCM_BOOT_TERMINATE_PC, + .args[0] = flags & QCOM_SCM_FLUSH_FLAG_MASK, + .arginfo = QCOM_SCM_ARGS(1), + .owner = ARM_SMCCC_OWNER_SIP, + }; + + qcom_scm_call_atomic(__scm ? __scm->dev : NULL, &desc, NULL); } -EXPORT_SYMBOL(qcom_scm_ocmem_lock_available); +EXPORT_SYMBOL(qcom_scm_cpu_power_down); -/** - * qcom_scm_ocmem_lock() - call OCMEM lock interface to assign an OCMEM - * region to the specified initiator - * - * @id: tz initiator id - * @offset: OCMEM offset - * @size: OCMEM size - * @mode: access mode (WIDE/NARROW) - */ -int qcom_scm_ocmem_lock(enum qcom_scm_ocmem_client id, u32 offset, u32 size, - u32 mode) +int qcom_scm_set_remote_state(u32 state, u32 id) { - return __qcom_scm_ocmem_lock(__scm->dev, id, offset, size, mode); + struct qcom_scm_desc desc = { + .svc = QCOM_SCM_SVC_BOOT, + .cmd = QCOM_SCM_BOOT_SET_REMOTE_STATE, + .arginfo = QCOM_SCM_ARGS(2), + .args[0] = state, + .args[1] = id, + .owner = ARM_SMCCC_OWNER_SIP, + }; + struct qcom_scm_res res; + int ret; + + ret = qcom_scm_call(__scm->dev, &desc, &res); + + return ret ? : res.result[0]; } -EXPORT_SYMBOL(qcom_scm_ocmem_lock); +EXPORT_SYMBOL(qcom_scm_set_remote_state); -/** - * qcom_scm_ocmem_unlock() - call OCMEM unlock interface to release an OCMEM - * region from the specified initiator - * - * @id: tz initiator id - * @offset: OCMEM offset - * @size: OCMEM size - */ -int qcom_scm_ocmem_unlock(enum qcom_scm_ocmem_client id, u32 offset, u32 size) +static int __qcom_scm_set_dload_mode(struct device *dev, bool enable) { - return __qcom_scm_ocmem_unlock(__scm->dev, id, offset, size); + struct qcom_scm_desc desc = { + .svc = QCOM_SCM_SVC_BOOT, + .cmd = QCOM_SCM_BOOT_SET_DLOAD_MODE, + .arginfo = QCOM_SCM_ARGS(2), + .args[0] = QCOM_SCM_BOOT_SET_DLOAD_MODE, + .owner = ARM_SMCCC_OWNER_SIP, + }; + + desc.args[1] = enable ? QCOM_SCM_BOOT_SET_DLOAD_MODE : 0; + + return qcom_scm_call(__scm->dev, &desc, NULL); +} + +static void qcom_scm_set_download_mode(bool enable) +{ + bool avail; + int ret = 0; + + avail = __qcom_scm_is_call_available(__scm->dev, + QCOM_SCM_SVC_BOOT, + QCOM_SCM_BOOT_SET_DLOAD_MODE); + if (avail) { + ret = __qcom_scm_set_dload_mode(__scm->dev, enable); + } else if (__scm->dload_mode_addr) { + ret = qcom_scm_io_writel(__scm->dload_mode_addr, + enable ? QCOM_SCM_BOOT_SET_DLOAD_MODE : 0); + } else { + dev_err(__scm->dev, + "No available mechanism for setting download mode\n"); + } + + if (ret) + dev_err(__scm->dev, "failed to set download mode: %d\n", ret); } -EXPORT_SYMBOL(qcom_scm_ocmem_unlock); /** * qcom_scm_pas_init_image() - Initialize peripheral authentication service @@ -248,6 +434,14 @@ int qcom_scm_pas_init_image(u32 peripheral, const void *metadata, size_t size) dma_addr_t mdata_phys; void *mdata_buf; int ret; + struct qcom_scm_desc desc = { + .svc = QCOM_SCM_SVC_PIL, + .cmd = QCOM_SCM_PIL_PAS_INIT_IMAGE, + .arginfo = QCOM_SCM_ARGS(2, QCOM_SCM_VAL, QCOM_SCM_RW), + .args[0] = peripheral, + .owner = ARM_SMCCC_OWNER_SIP, + }; + struct qcom_scm_res res; /* * During the scm call memory protection will be enabled for the meta @@ -266,14 +460,16 @@ int qcom_scm_pas_init_image(u32 peripheral, const void *metadata, size_t size) if (ret) goto free_metadata; - ret = __qcom_scm_pas_init_image(__scm->dev, peripheral, mdata_phys); + desc.args[1] = mdata_phys; + + ret = qcom_scm_call(__scm->dev, &desc, &res); qcom_scm_clk_disable(); free_metadata: dma_free_coherent(__scm->dev, size, mdata_buf, mdata_phys); - return ret; + return ret ? : res.result[0]; } EXPORT_SYMBOL(qcom_scm_pas_init_image); @@ -289,15 +485,25 @@ EXPORT_SYMBOL(qcom_scm_pas_init_image); int qcom_scm_pas_mem_setup(u32 peripheral, phys_addr_t addr, phys_addr_t size) { int ret; + struct qcom_scm_desc desc = { + .svc = QCOM_SCM_SVC_PIL, + .cmd = QCOM_SCM_PIL_PAS_MEM_SETUP, + .arginfo = QCOM_SCM_ARGS(3), + .args[0] = peripheral, + .args[1] = addr, + .args[2] = size, + .owner = ARM_SMCCC_OWNER_SIP, + }; + struct qcom_scm_res res; ret = qcom_scm_clk_enable(); if (ret) return ret; - ret = __qcom_scm_pas_mem_setup(__scm->dev, peripheral, addr, size); + ret = qcom_scm_call(__scm->dev, &desc, &res); qcom_scm_clk_disable(); - return ret; + return ret ? : res.result[0]; } EXPORT_SYMBOL(qcom_scm_pas_mem_setup); @@ -311,15 +517,23 @@ EXPORT_SYMBOL(qcom_scm_pas_mem_setup); int qcom_scm_pas_auth_and_reset(u32 peripheral) { int ret; + struct qcom_scm_desc desc = { + .svc = QCOM_SCM_SVC_PIL, + .cmd = QCOM_SCM_PIL_PAS_AUTH_AND_RESET, + .arginfo = QCOM_SCM_ARGS(1), + .args[0] = peripheral, + .owner = ARM_SMCCC_OWNER_SIP, + }; + struct qcom_scm_res res; ret = qcom_scm_clk_enable(); if (ret) return ret; - ret = __qcom_scm_pas_auth_and_reset(__scm->dev, peripheral); + ret = qcom_scm_call(__scm->dev, &desc, &res); qcom_scm_clk_disable(); - return ret; + return ret ? : res.result[0]; } EXPORT_SYMBOL(qcom_scm_pas_auth_and_reset); @@ -332,18 +546,75 @@ EXPORT_SYMBOL(qcom_scm_pas_auth_and_reset); int qcom_scm_pas_shutdown(u32 peripheral) { int ret; + struct qcom_scm_desc desc = { + .svc = QCOM_SCM_SVC_PIL, + .cmd = QCOM_SCM_PIL_PAS_SHUTDOWN, + .arginfo = QCOM_SCM_ARGS(1), + .args[0] = peripheral, + .owner = ARM_SMCCC_OWNER_SIP, + }; + struct qcom_scm_res res; ret = qcom_scm_clk_enable(); if (ret) return ret; - ret = __qcom_scm_pas_shutdown(__scm->dev, peripheral); + ret = qcom_scm_call(__scm->dev, &desc, &res); + qcom_scm_clk_disable(); - return ret; + return ret ? : res.result[0]; } EXPORT_SYMBOL(qcom_scm_pas_shutdown); +/** + * qcom_scm_pas_supported() - Check if the peripheral authentication service is + * available for the given peripherial + * @peripheral: peripheral id + * + * Returns true if PAS is supported for this peripheral, otherwise false. + */ +bool qcom_scm_pas_supported(u32 peripheral) +{ + int ret; + struct qcom_scm_desc desc = { + .svc = QCOM_SCM_SVC_PIL, + .cmd = QCOM_SCM_PIL_PAS_IS_SUPPORTED, + .arginfo = QCOM_SCM_ARGS(1), + .args[0] = peripheral, + .owner = ARM_SMCCC_OWNER_SIP, + }; + struct qcom_scm_res res; + + ret = __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_PIL, + QCOM_SCM_PIL_PAS_IS_SUPPORTED); + if (ret <= 0) + return false; + + ret = qcom_scm_call(__scm->dev, &desc, &res); + + return ret ? false : !!res.result[0]; +} +EXPORT_SYMBOL(qcom_scm_pas_supported); + +static int __qcom_scm_pas_mss_reset(struct device *dev, bool reset) +{ + struct qcom_scm_desc desc = { + .svc = QCOM_SCM_SVC_PIL, + .cmd = QCOM_SCM_PIL_PAS_MSS_RESET, + .arginfo = QCOM_SCM_ARGS(2), + .args[0] = reset, + .args[1] = 0, + .owner = ARM_SMCCC_OWNER_SIP, + }; + struct qcom_scm_res res; + int ret; + + ret = qcom_scm_call(__scm->dev, &desc, &res); + + return ret ? : res.result[0]; +} + static int qcom_scm_pas_reset_assert(struct reset_controller_dev *rcdev, unsigned long idx) { @@ -367,6 +638,43 @@ static const struct reset_control_ops qcom_scm_pas_reset_ops = { .deassert = qcom_scm_pas_reset_deassert, }; +int qcom_scm_io_readl(phys_addr_t addr, unsigned int *val) +{ + struct qcom_scm_desc desc = { + .svc = QCOM_SCM_SVC_IO, + .cmd = QCOM_SCM_IO_READ, + .arginfo = QCOM_SCM_ARGS(1), + .args[0] = addr, + .owner = ARM_SMCCC_OWNER_SIP, + }; + struct qcom_scm_res res; + int ret; + + + ret = qcom_scm_call(__scm->dev, &desc, &res); + if (ret >= 0) + *val = res.result[0]; + + return ret < 0 ? ret : 0; +} +EXPORT_SYMBOL(qcom_scm_io_readl); + +int qcom_scm_io_writel(phys_addr_t addr, unsigned int val) +{ + struct qcom_scm_desc desc = { + .svc = QCOM_SCM_SVC_IO, + .cmd = QCOM_SCM_IO_WRITE, + .arginfo = QCOM_SCM_ARGS(2), + .args[0] = addr, + .args[1] = val, + .owner = ARM_SMCCC_OWNER_SIP, + }; + + + return qcom_scm_call(__scm->dev, &desc, NULL); +} +EXPORT_SYMBOL(qcom_scm_io_writel); + /** * qcom_scm_restore_sec_cfg_available() - Check if secure environment * supports restore security config interface. @@ -376,108 +684,106 @@ static const struct reset_control_ops qcom_scm_pas_reset_ops = { bool qcom_scm_restore_sec_cfg_available(void) { return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_MP, - QCOM_SCM_RESTORE_SEC_CFG); + QCOM_SCM_MP_RESTORE_SEC_CFG); } EXPORT_SYMBOL(qcom_scm_restore_sec_cfg_available); int qcom_scm_restore_sec_cfg(u32 device_id, u32 spare) { - return __qcom_scm_restore_sec_cfg(__scm->dev, device_id, spare); -} -EXPORT_SYMBOL(qcom_scm_restore_sec_cfg); - -int qcom_scm_iommu_secure_ptbl_size(u32 spare, size_t *size) -{ - return __qcom_scm_iommu_secure_ptbl_size(__scm->dev, spare, size); -} -EXPORT_SYMBOL(qcom_scm_iommu_secure_ptbl_size); - -int qcom_scm_iommu_secure_ptbl_init(u64 addr, u32 size, u32 spare) -{ - return __qcom_scm_iommu_secure_ptbl_init(__scm->dev, addr, size, spare); -} -EXPORT_SYMBOL(qcom_scm_iommu_secure_ptbl_init); + struct qcom_scm_desc desc = { + .svc = QCOM_SCM_SVC_MP, + .cmd = QCOM_SCM_MP_RESTORE_SEC_CFG, + .arginfo = QCOM_SCM_ARGS(2), + .args[0] = device_id, + .args[1] = spare, + .owner = ARM_SMCCC_OWNER_SIP, + }; + struct qcom_scm_res res; + int ret; -int qcom_scm_qsmmu500_wait_safe_toggle(bool en) -{ - return __qcom_scm_qsmmu500_wait_safe_toggle(__scm->dev, en); -} -EXPORT_SYMBOL(qcom_scm_qsmmu500_wait_safe_toggle); + ret = qcom_scm_call(__scm->dev, &desc, &res); -int qcom_scm_io_readl(phys_addr_t addr, unsigned int *val) -{ - return __qcom_scm_io_readl(__scm->dev, addr, val); + return ret ? : res.result[0]; } -EXPORT_SYMBOL(qcom_scm_io_readl); +EXPORT_SYMBOL(qcom_scm_restore_sec_cfg); -int qcom_scm_io_writel(phys_addr_t addr, unsigned int val) +int qcom_scm_iommu_secure_ptbl_size(u32 spare, size_t *size) { - return __qcom_scm_io_writel(__scm->dev, addr, val); -} -EXPORT_SYMBOL(qcom_scm_io_writel); + struct qcom_scm_desc desc = { + .svc = QCOM_SCM_SVC_MP, + .cmd = QCOM_SCM_MP_IOMMU_SECURE_PTBL_SIZE, + .arginfo = QCOM_SCM_ARGS(1), + .args[0] = spare, + .owner = ARM_SMCCC_OWNER_SIP, + }; + struct qcom_scm_res res; + int ret; -static void qcom_scm_set_download_mode(bool enable) -{ - bool avail; - int ret = 0; + ret = qcom_scm_call(__scm->dev, &desc, &res); - avail = __qcom_scm_is_call_available(__scm->dev, - QCOM_SCM_SVC_BOOT, - QCOM_SCM_SET_DLOAD_MODE); - if (avail) { - ret = __qcom_scm_set_dload_mode(__scm->dev, enable); - } else if (__scm->dload_mode_addr) { - ret = __qcom_scm_io_writel(__scm->dev, __scm->dload_mode_addr, - enable ? QCOM_SCM_SET_DLOAD_MODE : 0); - } else { - dev_err(__scm->dev, - "No available mechanism for setting download mode\n"); - } + if (size) + *size = res.result[0]; - if (ret) - dev_err(__scm->dev, "failed to set download mode: %d\n", ret); + return ret ? : res.result[1]; } +EXPORT_SYMBOL(qcom_scm_iommu_secure_ptbl_size); -static int qcom_scm_find_dload_address(struct device *dev, u64 *addr) +int qcom_scm_iommu_secure_ptbl_init(u64 addr, u32 size, u32 spare) { - struct device_node *tcsr; - struct device_node *np = dev->of_node; - struct resource res; - u32 offset; + struct qcom_scm_desc desc = { + .svc = QCOM_SCM_SVC_MP, + .cmd = QCOM_SCM_MP_IOMMU_SECURE_PTBL_INIT, + .arginfo = QCOM_SCM_ARGS(3, QCOM_SCM_RW, QCOM_SCM_VAL, + QCOM_SCM_VAL), + .args[0] = addr, + .args[1] = size, + .args[2] = spare, + .owner = ARM_SMCCC_OWNER_SIP, + }; int ret; - tcsr = of_parse_phandle(np, "qcom,dload-mode", 0); - if (!tcsr) - return 0; - - ret = of_address_to_resource(tcsr, 0, &res); - of_node_put(tcsr); - if (ret) - return ret; - - ret = of_property_read_u32_index(np, "qcom,dload-mode", 1, &offset); - if (ret < 0) - return ret; + desc.args[0] = addr; + desc.args[1] = size; + desc.args[2] = spare; + desc.arginfo = QCOM_SCM_ARGS(3, QCOM_SCM_RW, QCOM_SCM_VAL, + QCOM_SCM_VAL); - *addr = res.start + offset; + ret = qcom_scm_call(__scm->dev, &desc, NULL); - return 0; -} + /* the pg table has been initialized already, ignore the error */ + if (ret == -EPERM) + ret = 0; -/** - * qcom_scm_is_available() - Checks if SCM is available - */ -bool qcom_scm_is_available(void) -{ - return !!__scm; + return ret; } -EXPORT_SYMBOL(qcom_scm_is_available); +EXPORT_SYMBOL(qcom_scm_iommu_secure_ptbl_init); -int qcom_scm_set_remote_state(u32 state, u32 id) +static int __qcom_scm_assign_mem(struct device *dev, phys_addr_t mem_region, + size_t mem_sz, phys_addr_t src, size_t src_sz, + phys_addr_t dest, size_t dest_sz) { - return __qcom_scm_set_remote_state(__scm->dev, state, id); + int ret; + struct qcom_scm_desc desc = { + .svc = QCOM_SCM_SVC_MP, + .cmd = QCOM_SCM_MP_ASSIGN, + .arginfo = QCOM_SCM_ARGS(7, QCOM_SCM_RO, QCOM_SCM_VAL, + QCOM_SCM_RO, QCOM_SCM_VAL, QCOM_SCM_RO, + QCOM_SCM_VAL, QCOM_SCM_VAL), + .args[0] = mem_region, + .args[1] = mem_sz, + .args[2] = src, + .args[3] = src_sz, + .args[4] = dest, + .args[5] = dest_sz, + .args[6] = 0, + .owner = ARM_SMCCC_OWNER_SIP, + }; + struct qcom_scm_res res; + + ret = qcom_scm_call(dev, &desc, &res); + + return ret ? : res.result[0]; } -EXPORT_SYMBOL(qcom_scm_set_remote_state); /** * qcom_scm_assign_mem() - Make a secure call to reassign memory ownership @@ -561,6 +867,184 @@ int qcom_scm_assign_mem(phys_addr_t mem_addr, size_t mem_sz, } EXPORT_SYMBOL(qcom_scm_assign_mem); +/** + * qcom_scm_ocmem_lock_available() - is OCMEM lock/unlock interface available + */ +bool qcom_scm_ocmem_lock_available(void) +{ + return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_OCMEM, + QCOM_SCM_OCMEM_LOCK_CMD); +} +EXPORT_SYMBOL(qcom_scm_ocmem_lock_available); + +/** + * qcom_scm_ocmem_lock() - call OCMEM lock interface to assign an OCMEM + * region to the specified initiator + * + * @id: tz initiator id + * @offset: OCMEM offset + * @size: OCMEM size + * @mode: access mode (WIDE/NARROW) + */ +int qcom_scm_ocmem_lock(enum qcom_scm_ocmem_client id, u32 offset, u32 size, + u32 mode) +{ + struct qcom_scm_desc desc = { + .svc = QCOM_SCM_SVC_OCMEM, + .cmd = QCOM_SCM_OCMEM_LOCK_CMD, + .args[0] = id, + .args[1] = offset, + .args[2] = size, + .args[3] = mode, + .arginfo = QCOM_SCM_ARGS(4), + }; + + return qcom_scm_call(__scm->dev, &desc, NULL); +} +EXPORT_SYMBOL(qcom_scm_ocmem_lock); + +/** + * qcom_scm_ocmem_unlock() - call OCMEM unlock interface to release an OCMEM + * region from the specified initiator + * + * @id: tz initiator id + * @offset: OCMEM offset + * @size: OCMEM size + */ +int qcom_scm_ocmem_unlock(enum qcom_scm_ocmem_client id, u32 offset, u32 size) +{ + struct qcom_scm_desc desc = { + .svc = QCOM_SCM_SVC_OCMEM, + .cmd = QCOM_SCM_OCMEM_UNLOCK_CMD, + .args[0] = id, + .args[1] = offset, + .args[2] = size, + .arginfo = QCOM_SCM_ARGS(3), + }; + + return qcom_scm_call(__scm->dev, &desc, NULL); +} +EXPORT_SYMBOL(qcom_scm_ocmem_unlock); + +/** + * qcom_scm_hdcp_available() - Check if secure environment supports HDCP. + * + * Return true if HDCP is supported, false if not. + */ +bool qcom_scm_hdcp_available(void) +{ + int ret = qcom_scm_clk_enable(); + + if (ret) + return ret; + + ret = __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_HDCP, + QCOM_SCM_HDCP_INVOKE); + + qcom_scm_clk_disable(); + + return ret > 0 ? true : false; +} +EXPORT_SYMBOL(qcom_scm_hdcp_available); + +/** + * qcom_scm_hdcp_req() - Send HDCP request. + * @req: HDCP request array + * @req_cnt: HDCP request array count + * @resp: response buffer passed to SCM + * + * Write HDCP register(s) through SCM. + */ +int qcom_scm_hdcp_req(struct qcom_scm_hdcp_req *req, u32 req_cnt, u32 *resp) +{ + int ret; + struct qcom_scm_desc desc = { + .svc = QCOM_SCM_SVC_HDCP, + .cmd = QCOM_SCM_HDCP_INVOKE, + .arginfo = QCOM_SCM_ARGS(10), + .args = { + req[0].addr, + req[0].val, + req[1].addr, + req[1].val, + req[2].addr, + req[2].val, + req[3].addr, + req[3].val, + req[4].addr, + req[4].val + }, + .owner = ARM_SMCCC_OWNER_SIP, + }; + struct qcom_scm_res res; + + if (req_cnt > QCOM_SCM_HDCP_MAX_REQ_CNT) + return -ERANGE; + + ret = qcom_scm_clk_enable(); + if (ret) + return ret; + + ret = qcom_scm_call(__scm->dev, &desc, &res); + *resp = res.result[0]; + + qcom_scm_clk_disable(); + + return ret; +} +EXPORT_SYMBOL(qcom_scm_hdcp_req); + +int qcom_scm_qsmmu500_wait_safe_toggle(bool en) +{ + struct qcom_scm_desc desc = { + .svc = QCOM_SCM_SVC_SMMU_PROGRAM, + .cmd = QCOM_SCM_SMMU_CONFIG_ERRATA1, + .arginfo = QCOM_SCM_ARGS(2), + .args[0] = QCOM_SCM_SMMU_CONFIG_ERRATA1_CLIENT_ALL, + .args[1] = en, + .owner = ARM_SMCCC_OWNER_SIP, + }; + + + return qcom_scm_call_atomic(__scm->dev, &desc, NULL); +} +EXPORT_SYMBOL(qcom_scm_qsmmu500_wait_safe_toggle); + +static int qcom_scm_find_dload_address(struct device *dev, u64 *addr) +{ + struct device_node *tcsr; + struct device_node *np = dev->of_node; + struct resource res; + u32 offset; + int ret; + + tcsr = of_parse_phandle(np, "qcom,dload-mode", 0); + if (!tcsr) + return 0; + + ret = of_address_to_resource(tcsr, 0, &res); + of_node_put(tcsr); + if (ret) + return ret; + + ret = of_property_read_u32_index(np, "qcom,dload-mode", 1, &offset); + if (ret < 0) + return ret; + + *addr = res.start + offset; + + return 0; +} + +/** + * qcom_scm_is_available() - Checks if SCM is available + */ +bool qcom_scm_is_available(void) +{ + return !!__scm; +} +EXPORT_SYMBOL(qcom_scm_is_available); + static int qcom_scm_probe(struct platform_device *pdev) { struct qcom_scm *scm; @@ -631,7 +1115,7 @@ static int qcom_scm_probe(struct platform_device *pdev) __scm = scm; __scm->dev = &pdev->dev; - __qcom_scm_init(); + __query_convention(); /* * If requested enable "download mode", from this point on warmboot diff --git a/drivers/firmware/qcom_scm.h b/drivers/firmware/qcom_scm.h index 81dcf5f1138e..d9ed670da222 100644 --- a/drivers/firmware/qcom_scm.h +++ b/drivers/firmware/qcom_scm.h @@ -1,71 +1,116 @@ /* SPDX-License-Identifier: GPL-2.0-only */ -/* Copyright (c) 2010-2015, The Linux Foundation. All rights reserved. +/* Copyright (c) 2010-2015,2019 The Linux Foundation. All rights reserved. */ #ifndef __QCOM_SCM_INT_H #define __QCOM_SCM_INT_H -#define QCOM_SCM_SVC_BOOT 0x1 -#define QCOM_SCM_BOOT_ADDR 0x1 -#define QCOM_SCM_SET_DLOAD_MODE 0x10 -#define QCOM_SCM_BOOT_ADDR_MC 0x11 -#define QCOM_SCM_SET_REMOTE_STATE 0xa -extern int __qcom_scm_set_remote_state(struct device *dev, u32 state, u32 id); -extern int __qcom_scm_set_dload_mode(struct device *dev, bool enable); - -#define QCOM_SCM_FLAG_HLOS 0x01 -#define QCOM_SCM_FLAG_COLDBOOT_MC 0x02 -#define QCOM_SCM_FLAG_WARMBOOT_MC 0x04 -extern int __qcom_scm_set_warm_boot_addr(struct device *dev, void *entry, - const cpumask_t *cpus); -extern int __qcom_scm_set_cold_boot_addr(void *entry, const cpumask_t *cpus); - -#define QCOM_SCM_CMD_TERMINATE_PC 0x2 +enum qcom_scm_convention { + SMC_CONVENTION_UNKNOWN, + SMC_CONVENTION_LEGACY, + SMC_CONVENTION_ARM_32, + SMC_CONVENTION_ARM_64, +}; + +extern enum qcom_scm_convention qcom_scm_convention; + +#define MAX_QCOM_SCM_ARGS 10 +#define MAX_QCOM_SCM_RETS 3 + +enum qcom_scm_arg_types { + QCOM_SCM_VAL, + QCOM_SCM_RO, + QCOM_SCM_RW, + QCOM_SCM_BUFVAL, +}; + +#define QCOM_SCM_ARGS_IMPL(num, a, b, c, d, e, f, g, h, i, j, ...) (\ + (((a) & 0x3) << 4) | \ + (((b) & 0x3) << 6) | \ + (((c) & 0x3) << 8) | \ + (((d) & 0x3) << 10) | \ + (((e) & 0x3) << 12) | \ + (((f) & 0x3) << 14) | \ + (((g) & 0x3) << 16) | \ + (((h) & 0x3) << 18) | \ + (((i) & 0x3) << 20) | \ + (((j) & 0x3) << 22) | \ + ((num) & 0xf)) + +#define QCOM_SCM_ARGS(...) QCOM_SCM_ARGS_IMPL(__VA_ARGS__, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) + + +/** + * struct qcom_scm_desc + * @arginfo: Metadata describing the arguments in args[] + * @args: The array of arguments for the secure syscall + */ +struct qcom_scm_desc { + u32 svc; + u32 cmd; + u32 arginfo; + u64 args[MAX_QCOM_SCM_ARGS]; + u32 owner; +}; + +/** + * struct qcom_scm_res + * @result: The values returned by the secure syscall + */ +struct qcom_scm_res { + u64 result[MAX_QCOM_SCM_RETS]; +}; + +#define SCM_SMC_FNID(s, c) ((((s) & 0xFF) << 8) | ((c) & 0xFF)) +extern int scm_smc_call(struct device *dev, const struct qcom_scm_desc *desc, + struct qcom_scm_res *res, bool atomic); + +#define SCM_LEGACY_FNID(s, c) (((s) << 10) | ((c) & 0x3ff)) +extern int scm_legacy_call_atomic(struct device *dev, + const struct qcom_scm_desc *desc, + struct qcom_scm_res *res); +extern int scm_legacy_call(struct device *dev, const struct qcom_scm_desc *desc, + struct qcom_scm_res *res); + +#define QCOM_SCM_SVC_BOOT 0x01 +#define QCOM_SCM_BOOT_SET_ADDR 0x01 +#define QCOM_SCM_BOOT_TERMINATE_PC 0x02 +#define QCOM_SCM_BOOT_SET_DLOAD_MODE 0x10 +#define QCOM_SCM_BOOT_SET_REMOTE_STATE 0x0a #define QCOM_SCM_FLUSH_FLAG_MASK 0x3 -#define QCOM_SCM_CMD_CORE_HOTPLUGGED 0x10 -extern void __qcom_scm_cpu_power_down(u32 flags); -#define QCOM_SCM_SVC_IO 0x5 -#define QCOM_SCM_IO_READ 0x1 -#define QCOM_SCM_IO_WRITE 0x2 -extern int __qcom_scm_io_readl(struct device *dev, phys_addr_t addr, unsigned int *val); -extern int __qcom_scm_io_writel(struct device *dev, phys_addr_t addr, unsigned int val); +#define QCOM_SCM_SVC_PIL 0x02 +#define QCOM_SCM_PIL_PAS_INIT_IMAGE 0x01 +#define QCOM_SCM_PIL_PAS_MEM_SETUP 0x02 +#define QCOM_SCM_PIL_PAS_AUTH_AND_RESET 0x05 +#define QCOM_SCM_PIL_PAS_SHUTDOWN 0x06 +#define QCOM_SCM_PIL_PAS_IS_SUPPORTED 0x07 +#define QCOM_SCM_PIL_PAS_MSS_RESET 0x0a + +#define QCOM_SCM_SVC_IO 0x05 +#define QCOM_SCM_IO_READ 0x01 +#define QCOM_SCM_IO_WRITE 0x02 + +#define QCOM_SCM_SVC_INFO 0x06 +#define QCOM_SCM_INFO_IS_CALL_AVAIL 0x01 -#define QCOM_SCM_SVC_INFO 0x6 -#define QCOM_IS_CALL_AVAIL_CMD 0x1 -extern int __qcom_scm_is_call_available(struct device *dev, u32 svc_id, - u32 cmd_id); +#define QCOM_SCM_SVC_MP 0x0c +#define QCOM_SCM_MP_RESTORE_SEC_CFG 0x02 +#define QCOM_SCM_MP_IOMMU_SECURE_PTBL_SIZE 0x03 +#define QCOM_SCM_MP_IOMMU_SECURE_PTBL_INIT 0x04 +#define QCOM_SCM_MP_ASSIGN 0x16 + +#define QCOM_SCM_SVC_OCMEM 0x0f +#define QCOM_SCM_OCMEM_LOCK_CMD 0x01 +#define QCOM_SCM_OCMEM_UNLOCK_CMD 0x02 #define QCOM_SCM_SVC_HDCP 0x11 -#define QCOM_SCM_CMD_HDCP 0x01 -extern int __qcom_scm_hdcp_req(struct device *dev, - struct qcom_scm_hdcp_req *req, u32 req_cnt, u32 *resp); +#define QCOM_SCM_HDCP_INVOKE 0x01 -extern void __qcom_scm_init(void); +#define QCOM_SCM_SVC_SMMU_PROGRAM 0x15 +#define QCOM_SCM_SMMU_CONFIG_ERRATA1 0x03 +#define QCOM_SCM_SMMU_CONFIG_ERRATA1_CLIENT_ALL 0x02 -#define QCOM_SCM_OCMEM_SVC 0xf -#define QCOM_SCM_OCMEM_LOCK_CMD 0x1 -#define QCOM_SCM_OCMEM_UNLOCK_CMD 0x2 - -extern int __qcom_scm_ocmem_lock(struct device *dev, u32 id, u32 offset, - u32 size, u32 mode); -extern int __qcom_scm_ocmem_unlock(struct device *dev, u32 id, u32 offset, - u32 size); - -#define QCOM_SCM_SVC_PIL 0x2 -#define QCOM_SCM_PAS_INIT_IMAGE_CMD 0x1 -#define QCOM_SCM_PAS_MEM_SETUP_CMD 0x2 -#define QCOM_SCM_PAS_AUTH_AND_RESET_CMD 0x5 -#define QCOM_SCM_PAS_SHUTDOWN_CMD 0x6 -#define QCOM_SCM_PAS_IS_SUPPORTED_CMD 0x7 -#define QCOM_SCM_PAS_MSS_RESET 0xa -extern bool __qcom_scm_pas_supported(struct device *dev, u32 peripheral); -extern int __qcom_scm_pas_init_image(struct device *dev, u32 peripheral, - dma_addr_t metadata_phys); -extern int __qcom_scm_pas_mem_setup(struct device *dev, u32 peripheral, - phys_addr_t addr, phys_addr_t size); -extern int __qcom_scm_pas_auth_and_reset(struct device *dev, u32 peripheral); -extern int __qcom_scm_pas_shutdown(struct device *dev, u32 peripheral); -extern int __qcom_scm_pas_mss_reset(struct device *dev, bool reset); +extern void __qcom_scm_init(void); /* common error codes */ #define QCOM_SCM_V2_EBUSY -12 @@ -94,25 +139,4 @@ static inline int qcom_scm_remap_error(int err) return -EINVAL; } -#define QCOM_SCM_SVC_MP 0xc -#define QCOM_SCM_RESTORE_SEC_CFG 2 -extern int __qcom_scm_restore_sec_cfg(struct device *dev, u32 device_id, - u32 spare); -#define QCOM_SCM_IOMMU_SECURE_PTBL_SIZE 3 -#define QCOM_SCM_IOMMU_SECURE_PTBL_INIT 4 -#define QCOM_SCM_SVC_SMMU_PROGRAM 0x15 -#define QCOM_SCM_CONFIG_ERRATA1 0x3 -#define QCOM_SCM_CONFIG_ERRATA1_CLIENT_ALL 0x2 -extern int __qcom_scm_iommu_secure_ptbl_size(struct device *dev, u32 spare, - size_t *size); -extern int __qcom_scm_iommu_secure_ptbl_init(struct device *dev, u64 addr, - u32 size, u32 spare); -extern int __qcom_scm_qsmmu500_wait_safe_toggle(struct device *dev, - bool enable); -#define QCOM_MEM_PROT_ASSIGN_ID 0x16 -extern int __qcom_scm_assign_mem(struct device *dev, - phys_addr_t mem_region, size_t mem_sz, - phys_addr_t src, size_t src_sz, - phys_addr_t dest, size_t dest_sz); - #endif diff --git a/drivers/firmware/stratix10-svc.c b/drivers/firmware/stratix10-svc.c index c6c31402848d..7ffb42b0775e 100644 --- a/drivers/firmware/stratix10-svc.c +++ b/drivers/firmware/stratix10-svc.c @@ -268,7 +268,7 @@ static void svc_thread_cmd_config_status(struct stratix10_svc_controller *ctrl, */ msleep(1000); count_in_sec--; - }; + } if (res.a0 == INTEL_SIP_SMC_STATUS_OK && count_in_sec) cb_data->status = BIT(SVC_STATUS_RECONFIG_COMPLETED); @@ -512,7 +512,7 @@ static int svc_normal_to_secure_thread(void *data) break; } - }; + } kfree(cbdata); kfree(pdata); diff --git a/drivers/firmware/turris-mox-rwtm.c b/drivers/firmware/turris-mox-rwtm.c index 72be58960e54..e27f68437b56 100644 --- a/drivers/firmware/turris-mox-rwtm.c +++ b/drivers/firmware/turris-mox-rwtm.c @@ -197,7 +197,7 @@ static int mox_get_board_info(struct mox_rwtm *rwtm) rwtm->serial_number = reply->status[1]; rwtm->serial_number <<= 32; rwtm->serial_number |= reply->status[0]; - rwtm->board_version = reply->status[2]; + rwtm->board_version = reply->status[2]; rwtm->ram_size = reply->status[3]; reply_to_mac_addr(rwtm->mac_address1, reply->status[4], reply->status[5]); diff --git a/drivers/firmware/xilinx/zynqmp.c b/drivers/firmware/xilinx/zynqmp.c index 75bdfaa08380..ecc339d846de 100644 --- a/drivers/firmware/xilinx/zynqmp.c +++ b/drivers/firmware/xilinx/zynqmp.c @@ -26,6 +26,9 @@ static const struct zynqmp_eemi_ops *eemi_ops_tbl; +static bool feature_check_enabled; +static u32 zynqmp_pm_features[PM_API_MAX]; + static const struct mfd_cell firmware_devs[] = { { .name = "zynqmp_power_controller", @@ -44,10 +47,14 @@ static int zynqmp_pm_ret_code(u32 ret_status) case XST_PM_SUCCESS: case XST_PM_DOUBLE_REQ: return 0; + case XST_PM_NO_FEATURE: + return -ENOTSUPP; case XST_PM_NO_ACCESS: return -EACCES; case XST_PM_ABORT_SUSPEND: return -ECANCELED; + case XST_PM_MULT_USER: + return -EUSERS; case XST_PM_INTERNAL: case XST_PM_CONFLICT: case XST_PM_INVALID_NODE: @@ -127,6 +134,39 @@ static noinline int do_fw_call_hvc(u64 arg0, u64 arg1, u64 arg2, } /** + * zynqmp_pm_feature() - Check weather given feature is supported or not + * @api_id: API ID to check + * + * Return: Returns status, either success or error+reason + */ +static int zynqmp_pm_feature(u32 api_id) +{ + int ret; + u32 ret_payload[PAYLOAD_ARG_CNT]; + u64 smc_arg[2]; + + if (!feature_check_enabled) + return 0; + + /* Return value if feature is already checked */ + if (zynqmp_pm_features[api_id] != PM_FEATURE_UNCHECKED) + return zynqmp_pm_features[api_id]; + + smc_arg[0] = PM_SIP_SVC | PM_FEATURE_CHECK; + smc_arg[1] = api_id; + + ret = do_fw_call(smc_arg[0], smc_arg[1], 0, ret_payload); + if (ret) { + zynqmp_pm_features[api_id] = PM_FEATURE_INVALID; + return PM_FEATURE_INVALID; + } + + zynqmp_pm_features[api_id] = ret_payload[1]; + + return zynqmp_pm_features[api_id]; +} + +/** * zynqmp_pm_invoke_fn() - Invoke the system-level platform management layer * caller function depending on the configuration * @pm_api_id: Requested PM-API call @@ -160,6 +200,9 @@ int zynqmp_pm_invoke_fn(u32 pm_api_id, u32 arg0, u32 arg1, */ u64 smc_arg[4]; + if (zynqmp_pm_feature(pm_api_id) == PM_FEATURE_INVALID) + return -ENOTSUPP; + smc_arg[0] = PM_SIP_SVC | pm_api_id; smc_arg[1] = ((u64)arg1 << 32) | arg0; smc_arg[2] = ((u64)arg3 << 32) | arg2; @@ -715,6 +758,8 @@ static int zynqmp_firmware_probe(struct platform_device *pdev) np = of_find_compatible_node(NULL, NULL, "xlnx,versal"); if (!np) return 0; + + feature_check_enabled = true; } of_node_put(np); |