summaryrefslogtreecommitdiffstats
path: root/drivers/bus/fsl-mc
diff options
context:
space:
mode:
authorBogdan Purcareata <bogdan.purcareata@nxp.com>2018-02-05 15:07:42 +0100
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2018-02-22 15:10:50 +0100
commit6bd067c48efed50ac0200c4a83a415bd524254e0 (patch)
tree5c89f752c11aff0a51fccbd36098e939800c3f2b /drivers/bus/fsl-mc
parentstaging: wilc1000: rename _WPARxGtk_end_case_ label to avoid camelCase (diff)
downloadlinux-6bd067c48efed50ac0200c4a83a415bd524254e0.tar.xz
linux-6bd067c48efed50ac0200c4a83a415bd524254e0.zip
staging: fsl-mc: Move core bus out of staging
Move the source files out of staging into their final locations: -mc.h include file in drivers/staging/fsl-mc/include go to include/linux/fsl -source files in drivers/staging/fsl-mc/bus go to drivers/bus/fsl-mc -overview.rst, providing an overview of DPAA2, goes to Documentation/networking/dpaa2/overview.rst Update or delete other remaining staging files -- Makefile, Kconfig, TODO. Update dpaa2_eth and dpio staging drivers. Add integration bits for the documentation build system. Signed-off-by: Stuart Yoder <stuyoder@gmail.com> [rebased, add dpaa2_eth and dpio #include updates] Signed-off-by: Laurentiu Tudor <laurentiu.tudor@nxp.com> [rebased, split irqchip to separate patch] Signed-off-by: Bogdan Purcareata <bogdan.purcareata@nxp.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Jason Cooper <jason@lakedaemon.net> Cc: Marc Zyngier <marc.zyngier@arm.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'drivers/bus/fsl-mc')
-rw-r--r--drivers/bus/fsl-mc/Kconfig16
-rw-r--r--drivers/bus/fsl-mc/Makefile16
-rw-r--r--drivers/bus/fsl-mc/dpmcp.c99
-rw-r--r--drivers/bus/fsl-mc/dprc-driver.c809
-rw-r--r--drivers/bus/fsl-mc/dprc.c532
-rw-r--r--drivers/bus/fsl-mc/fsl-mc-allocator.c648
-rw-r--r--drivers/bus/fsl-mc/fsl-mc-bus.c948
-rw-r--r--drivers/bus/fsl-mc/fsl-mc-msi.c285
-rw-r--r--drivers/bus/fsl-mc/fsl-mc-private.h475
-rw-r--r--drivers/bus/fsl-mc/mc-io.c268
-rw-r--r--drivers/bus/fsl-mc/mc-sys.c296
11 files changed, 4392 insertions, 0 deletions
diff --git a/drivers/bus/fsl-mc/Kconfig b/drivers/bus/fsl-mc/Kconfig
new file mode 100644
index 000000000000..bcca64486fd3
--- /dev/null
+++ b/drivers/bus/fsl-mc/Kconfig
@@ -0,0 +1,16 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# DPAA2 fsl-mc bus
+#
+# Copyright (C) 2014-2016 Freescale Semiconductor, Inc.
+#
+
+config FSL_MC_BUS
+ bool "QorIQ DPAA2 fsl-mc bus driver"
+ depends on OF && (ARCH_LAYERSCAPE || (COMPILE_TEST && (ARM || ARM64 || X86 || PPC)))
+ select GENERIC_MSI_IRQ_DOMAIN
+ help
+ Driver to enable the bus infrastructure for the QorIQ DPAA2
+ architecture. The fsl-mc bus driver handles discovery of
+ DPAA2 objects (which are represented as Linux devices) and
+ binding objects to drivers.
diff --git a/drivers/bus/fsl-mc/Makefile b/drivers/bus/fsl-mc/Makefile
new file mode 100644
index 000000000000..6a97f2c3a065
--- /dev/null
+++ b/drivers/bus/fsl-mc/Makefile
@@ -0,0 +1,16 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Freescale Management Complex (MC) bus drivers
+#
+# Copyright (C) 2014 Freescale Semiconductor, Inc.
+#
+obj-$(CONFIG_FSL_MC_BUS) += mc-bus-driver.o
+
+mc-bus-driver-objs := fsl-mc-bus.o \
+ mc-sys.o \
+ mc-io.o \
+ dprc.o \
+ dprc-driver.o \
+ fsl-mc-allocator.o \
+ fsl-mc-msi.o \
+ dpmcp.o
diff --git a/drivers/bus/fsl-mc/dpmcp.c b/drivers/bus/fsl-mc/dpmcp.c
new file mode 100644
index 000000000000..8d997b0f6ba3
--- /dev/null
+++ b/drivers/bus/fsl-mc/dpmcp.c
@@ -0,0 +1,99 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/*
+ * Copyright 2013-2016 Freescale Semiconductor Inc.
+ *
+ */
+#include <linux/kernel.h>
+#include <linux/fsl/mc.h>
+
+#include "fsl-mc-private.h"
+
+/**
+ * dpmcp_open() - Open a control session for the specified object.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @dpmcp_id: DPMCP unique ID
+ * @token: Returned token; use in subsequent API calls
+ *
+ * This function can be used to open a control session for an
+ * already created object; an object may have been declared in
+ * the DPL or by calling the dpmcp_create function.
+ * This function returns a unique authentication token,
+ * associated with the specific object ID and the specific MC
+ * portal; this token must be used in all subsequent commands for
+ * this specific object
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpmcp_open(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ int dpmcp_id,
+ u16 *token)
+{
+ struct mc_command cmd = { 0 };
+ struct dpmcp_cmd_open *cmd_params;
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPMCP_CMDID_OPEN,
+ cmd_flags, 0);
+ cmd_params = (struct dpmcp_cmd_open *)cmd.params;
+ cmd_params->dpmcp_id = cpu_to_le32(dpmcp_id);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ *token = mc_cmd_hdr_read_token(&cmd);
+
+ return err;
+}
+
+/**
+ * dpmcp_close() - Close the control session of the object
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPMCP object
+ *
+ * After this function is called, no further operations are
+ * allowed on the object without opening a new control session.
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpmcp_close(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token)
+{
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPMCP_CMDID_CLOSE,
+ cmd_flags, token);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpmcp_reset() - Reset the DPMCP, returns the object to initial state.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPMCP object
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpmcp_reset(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token)
+{
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPMCP_CMDID_RESET,
+ cmd_flags, token);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
diff --git a/drivers/bus/fsl-mc/dprc-driver.c b/drivers/bus/fsl-mc/dprc-driver.c
new file mode 100644
index 000000000000..52c7e15143d6
--- /dev/null
+++ b/drivers/bus/fsl-mc/dprc-driver.c
@@ -0,0 +1,809 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Freescale data path resource container (DPRC) driver
+ *
+ * Copyright (C) 2014-2016 Freescale Semiconductor, Inc.
+ * Author: German Rivera <German.Rivera@freescale.com>
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/msi.h>
+#include <linux/fsl/mc.h>
+
+#include "fsl-mc-private.h"
+
+#define FSL_MC_DPRC_DRIVER_NAME "fsl_mc_dprc"
+
+struct fsl_mc_child_objs {
+ int child_count;
+ struct fsl_mc_obj_desc *child_array;
+};
+
+static bool fsl_mc_device_match(struct fsl_mc_device *mc_dev,
+ struct fsl_mc_obj_desc *obj_desc)
+{
+ return mc_dev->obj_desc.id == obj_desc->id &&
+ strcmp(mc_dev->obj_desc.type, obj_desc->type) == 0;
+
+}
+
+static int __fsl_mc_device_remove_if_not_in_mc(struct device *dev, void *data)
+{
+ int i;
+ struct fsl_mc_child_objs *objs;
+ struct fsl_mc_device *mc_dev;
+
+ mc_dev = to_fsl_mc_device(dev);
+ objs = data;
+
+ for (i = 0; i < objs->child_count; i++) {
+ struct fsl_mc_obj_desc *obj_desc = &objs->child_array[i];
+
+ if (strlen(obj_desc->type) != 0 &&
+ fsl_mc_device_match(mc_dev, obj_desc))
+ break;
+ }
+
+ if (i == objs->child_count)
+ fsl_mc_device_remove(mc_dev);
+
+ return 0;
+}
+
+static int __fsl_mc_device_remove(struct device *dev, void *data)
+{
+ fsl_mc_device_remove(to_fsl_mc_device(dev));
+ return 0;
+}
+
+/**
+ * dprc_remove_devices - Removes devices for objects removed from a DPRC
+ *
+ * @mc_bus_dev: pointer to the fsl-mc device that represents a DPRC object
+ * @obj_desc_array: array of object descriptors for child objects currently
+ * present in the DPRC in the MC.
+ * @num_child_objects_in_mc: number of entries in obj_desc_array
+ *
+ * Synchronizes the state of the Linux bus driver with the actual state of
+ * the MC by removing devices that represent MC objects that have
+ * been dynamically removed in the physical DPRC.
+ */
+static void dprc_remove_devices(struct fsl_mc_device *mc_bus_dev,
+ struct fsl_mc_obj_desc *obj_desc_array,
+ int num_child_objects_in_mc)
+{
+ if (num_child_objects_in_mc != 0) {
+ /*
+ * Remove child objects that are in the DPRC in Linux,
+ * but not in the MC:
+ */
+ struct fsl_mc_child_objs objs;
+
+ objs.child_count = num_child_objects_in_mc;
+ objs.child_array = obj_desc_array;
+ device_for_each_child(&mc_bus_dev->dev, &objs,
+ __fsl_mc_device_remove_if_not_in_mc);
+ } else {
+ /*
+ * There are no child objects for this DPRC in the MC.
+ * So, remove all the child devices from Linux:
+ */
+ device_for_each_child(&mc_bus_dev->dev, NULL,
+ __fsl_mc_device_remove);
+ }
+}
+
+static int __fsl_mc_device_match(struct device *dev, void *data)
+{
+ struct fsl_mc_obj_desc *obj_desc = data;
+ struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev);
+
+ return fsl_mc_device_match(mc_dev, obj_desc);
+}
+
+static struct fsl_mc_device *fsl_mc_device_lookup(struct fsl_mc_obj_desc
+ *obj_desc,
+ struct fsl_mc_device
+ *mc_bus_dev)
+{
+ struct device *dev;
+
+ dev = device_find_child(&mc_bus_dev->dev, obj_desc,
+ __fsl_mc_device_match);
+
+ return dev ? to_fsl_mc_device(dev) : NULL;
+}
+
+/**
+ * check_plugged_state_change - Check change in an MC object's plugged state
+ *
+ * @mc_dev: pointer to the fsl-mc device for a given MC object
+ * @obj_desc: pointer to the MC object's descriptor in the MC
+ *
+ * If the plugged state has changed from unplugged to plugged, the fsl-mc
+ * device is bound to the corresponding device driver.
+ * If the plugged state has changed from plugged to unplugged, the fsl-mc
+ * device is unbound from the corresponding device driver.
+ */
+static void check_plugged_state_change(struct fsl_mc_device *mc_dev,
+ struct fsl_mc_obj_desc *obj_desc)
+{
+ int error;
+ u32 plugged_flag_at_mc =
+ obj_desc->state & FSL_MC_OBJ_STATE_PLUGGED;
+
+ if (plugged_flag_at_mc !=
+ (mc_dev->obj_desc.state & FSL_MC_OBJ_STATE_PLUGGED)) {
+ if (plugged_flag_at_mc) {
+ mc_dev->obj_desc.state |= FSL_MC_OBJ_STATE_PLUGGED;
+ error = device_attach(&mc_dev->dev);
+ if (error < 0) {
+ dev_err(&mc_dev->dev,
+ "device_attach() failed: %d\n",
+ error);
+ }
+ } else {
+ mc_dev->obj_desc.state &= ~FSL_MC_OBJ_STATE_PLUGGED;
+ device_release_driver(&mc_dev->dev);
+ }
+ }
+}
+
+/**
+ * dprc_add_new_devices - Adds devices to the logical bus for a DPRC
+ *
+ * @mc_bus_dev: pointer to the fsl-mc device that represents a DPRC object
+ * @obj_desc_array: array of device descriptors for child devices currently
+ * present in the physical DPRC.
+ * @num_child_objects_in_mc: number of entries in obj_desc_array
+ *
+ * Synchronizes the state of the Linux bus driver with the actual
+ * state of the MC by adding objects that have been newly discovered
+ * in the physical DPRC.
+ */
+static void dprc_add_new_devices(struct fsl_mc_device *mc_bus_dev,
+ struct fsl_mc_obj_desc *obj_desc_array,
+ int num_child_objects_in_mc)
+{
+ int error;
+ int i;
+
+ for (i = 0; i < num_child_objects_in_mc; i++) {
+ struct fsl_mc_device *child_dev;
+ struct fsl_mc_obj_desc *obj_desc = &obj_desc_array[i];
+
+ if (strlen(obj_desc->type) == 0)
+ continue;
+
+ /*
+ * Check if device is already known to Linux:
+ */
+ child_dev = fsl_mc_device_lookup(obj_desc, mc_bus_dev);
+ if (child_dev) {
+ check_plugged_state_change(child_dev, obj_desc);
+ put_device(&child_dev->dev);
+ continue;
+ }
+
+ error = fsl_mc_device_add(obj_desc, NULL, &mc_bus_dev->dev,
+ &child_dev);
+ if (error < 0)
+ continue;
+ }
+}
+
+/**
+ * dprc_scan_objects - Discover objects in a DPRC
+ *
+ * @mc_bus_dev: pointer to the fsl-mc device that represents a DPRC object
+ * @total_irq_count: If argument is provided the function populates the
+ * total number of IRQs created by objects in the DPRC.
+ *
+ * Detects objects added and removed from a DPRC and synchronizes the
+ * state of the Linux bus driver, MC by adding and removing
+ * devices accordingly.
+ * Two types of devices can be found in a DPRC: allocatable objects (e.g.,
+ * dpbp, dpmcp) and non-allocatable devices (e.g., dprc, dpni).
+ * All allocatable devices needed to be probed before all non-allocatable
+ * devices, to ensure that device drivers for non-allocatable
+ * devices can allocate any type of allocatable devices.
+ * That is, we need to ensure that the corresponding resource pools are
+ * populated before they can get allocation requests from probe callbacks
+ * of the device drivers for the non-allocatable devices.
+ */
+static int dprc_scan_objects(struct fsl_mc_device *mc_bus_dev,
+ unsigned int *total_irq_count)
+{
+ int num_child_objects;
+ int dprc_get_obj_failures;
+ int error;
+ unsigned int irq_count = mc_bus_dev->obj_desc.irq_count;
+ struct fsl_mc_obj_desc *child_obj_desc_array = NULL;
+ struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_bus_dev);
+
+ error = dprc_get_obj_count(mc_bus_dev->mc_io,
+ 0,
+ mc_bus_dev->mc_handle,
+ &num_child_objects);
+ if (error < 0) {
+ dev_err(&mc_bus_dev->dev, "dprc_get_obj_count() failed: %d\n",
+ error);
+ return error;
+ }
+
+ if (num_child_objects != 0) {
+ int i;
+
+ child_obj_desc_array =
+ devm_kmalloc_array(&mc_bus_dev->dev, num_child_objects,
+ sizeof(*child_obj_desc_array),
+ GFP_KERNEL);
+ if (!child_obj_desc_array)
+ return -ENOMEM;
+
+ /*
+ * Discover objects currently present in the physical DPRC:
+ */
+ dprc_get_obj_failures = 0;
+ for (i = 0; i < num_child_objects; i++) {
+ struct fsl_mc_obj_desc *obj_desc =
+ &child_obj_desc_array[i];
+
+ error = dprc_get_obj(mc_bus_dev->mc_io,
+ 0,
+ mc_bus_dev->mc_handle,
+ i, obj_desc);
+ if (error < 0) {
+ dev_err(&mc_bus_dev->dev,
+ "dprc_get_obj(i=%d) failed: %d\n",
+ i, error);
+ /*
+ * Mark the obj entry as "invalid", by using the
+ * empty string as obj type:
+ */
+ obj_desc->type[0] = '\0';
+ obj_desc->id = error;
+ dprc_get_obj_failures++;
+ continue;
+ }
+
+ /*
+ * add a quirk for all versions of dpsec < 4.0...none
+ * are coherent regardless of what the MC reports.
+ */
+ if ((strcmp(obj_desc->type, "dpseci") == 0) &&
+ (obj_desc->ver_major < 4))
+ obj_desc->flags |=
+ FSL_MC_OBJ_FLAG_NO_MEM_SHAREABILITY;
+
+ irq_count += obj_desc->irq_count;
+ dev_dbg(&mc_bus_dev->dev,
+ "Discovered object: type %s, id %d\n",
+ obj_desc->type, obj_desc->id);
+ }
+
+ if (dprc_get_obj_failures != 0) {
+ dev_err(&mc_bus_dev->dev,
+ "%d out of %d devices could not be retrieved\n",
+ dprc_get_obj_failures, num_child_objects);
+ }
+ }
+
+ /*
+ * Allocate IRQ's before binding the scanned devices with their
+ * respective drivers.
+ */
+ if (dev_get_msi_domain(&mc_bus_dev->dev) && !mc_bus->irq_resources) {
+ if (irq_count > FSL_MC_IRQ_POOL_MAX_TOTAL_IRQS) {
+ dev_warn(&mc_bus_dev->dev,
+ "IRQs needed (%u) exceed IRQs preallocated (%u)\n",
+ irq_count, FSL_MC_IRQ_POOL_MAX_TOTAL_IRQS);
+ }
+
+ error = fsl_mc_populate_irq_pool(mc_bus,
+ FSL_MC_IRQ_POOL_MAX_TOTAL_IRQS);
+ if (error < 0)
+ return error;
+ }
+
+ if (total_irq_count)
+ *total_irq_count = irq_count;
+
+ dprc_remove_devices(mc_bus_dev, child_obj_desc_array,
+ num_child_objects);
+
+ dprc_add_new_devices(mc_bus_dev, child_obj_desc_array,
+ num_child_objects);
+
+ if (child_obj_desc_array)
+ devm_kfree(&mc_bus_dev->dev, child_obj_desc_array);
+
+ return 0;
+}
+
+/**
+ * dprc_scan_container - Scans a physical DPRC and synchronizes Linux bus state
+ *
+ * @mc_bus_dev: pointer to the fsl-mc device that represents a DPRC object
+ *
+ * Scans the physical DPRC and synchronizes the state of the Linux
+ * bus driver with the actual state of the MC by adding and removing
+ * devices as appropriate.
+ */
+static int dprc_scan_container(struct fsl_mc_device *mc_bus_dev)
+{
+ int error;
+ struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_bus_dev);
+
+ fsl_mc_init_all_resource_pools(mc_bus_dev);
+
+ /*
+ * Discover objects in the DPRC:
+ */
+ mutex_lock(&mc_bus->scan_mutex);
+ error = dprc_scan_objects(mc_bus_dev, NULL);
+ mutex_unlock(&mc_bus->scan_mutex);
+ if (error < 0) {
+ fsl_mc_cleanup_all_resource_pools(mc_bus_dev);
+ return error;
+ }
+
+ return 0;
+}
+
+/**
+ * dprc_irq0_handler - Regular ISR for DPRC interrupt 0
+ *
+ * @irq: IRQ number of the interrupt being handled
+ * @arg: Pointer to device structure
+ */
+static irqreturn_t dprc_irq0_handler(int irq_num, void *arg)
+{
+ return IRQ_WAKE_THREAD;
+}
+
+/**
+ * dprc_irq0_handler_thread - Handler thread function for DPRC interrupt 0
+ *
+ * @irq: IRQ number of the interrupt being handled
+ * @arg: Pointer to device structure
+ */
+static irqreturn_t dprc_irq0_handler_thread(int irq_num, void *arg)
+{
+ int error;
+ u32 status;
+ struct device *dev = arg;
+ struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev);
+ struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_dev);
+ struct fsl_mc_io *mc_io = mc_dev->mc_io;
+ struct msi_desc *msi_desc = mc_dev->irqs[0]->msi_desc;
+
+ dev_dbg(dev, "DPRC IRQ %d triggered on CPU %u\n",
+ irq_num, smp_processor_id());
+
+ if (!(mc_dev->flags & FSL_MC_IS_DPRC))
+ return IRQ_HANDLED;
+
+ mutex_lock(&mc_bus->scan_mutex);
+ if (!msi_desc || msi_desc->irq != (u32)irq_num)
+ goto out;
+
+ status = 0;
+ error = dprc_get_irq_status(mc_io, 0, mc_dev->mc_handle, 0,
+ &status);
+ if (error < 0) {
+ dev_err(dev,
+ "dprc_get_irq_status() failed: %d\n", error);
+ goto out;
+ }
+
+ error = dprc_clear_irq_status(mc_io, 0, mc_dev->mc_handle, 0,
+ status);
+ if (error < 0) {
+ dev_err(dev,
+ "dprc_clear_irq_status() failed: %d\n", error);
+ goto out;
+ }
+
+ if (status & (DPRC_IRQ_EVENT_OBJ_ADDED |
+ DPRC_IRQ_EVENT_OBJ_REMOVED |
+ DPRC_IRQ_EVENT_CONTAINER_DESTROYED |
+ DPRC_IRQ_EVENT_OBJ_DESTROYED |
+ DPRC_IRQ_EVENT_OBJ_CREATED)) {
+ unsigned int irq_count;
+
+ error = dprc_scan_objects(mc_dev, &irq_count);
+ if (error < 0) {
+ /*
+ * If the error is -ENXIO, we ignore it, as it indicates
+ * that the object scan was aborted, as we detected that
+ * an object was removed from the DPRC in the MC, while
+ * we were scanning the DPRC.
+ */
+ if (error != -ENXIO) {
+ dev_err(dev, "dprc_scan_objects() failed: %d\n",
+ error);
+ }
+
+ goto out;
+ }
+
+ if (irq_count > FSL_MC_IRQ_POOL_MAX_TOTAL_IRQS) {
+ dev_warn(dev,
+ "IRQs needed (%u) exceed IRQs preallocated (%u)\n",
+ irq_count, FSL_MC_IRQ_POOL_MAX_TOTAL_IRQS);
+ }
+ }
+
+out:
+ mutex_unlock(&mc_bus->scan_mutex);
+ return IRQ_HANDLED;
+}
+
+/*
+ * Disable and clear interrupt for a given DPRC object
+ */
+static int disable_dprc_irq(struct fsl_mc_device *mc_dev)
+{
+ int error;
+ struct fsl_mc_io *mc_io = mc_dev->mc_io;
+
+ /*
+ * Disable generation of interrupt, while we configure it:
+ */
+ error = dprc_set_irq_enable(mc_io, 0, mc_dev->mc_handle, 0, 0);
+ if (error < 0) {
+ dev_err(&mc_dev->dev,
+ "Disabling DPRC IRQ failed: dprc_set_irq_enable() failed: %d\n",
+ error);
+ return error;
+ }
+
+ /*
+ * Disable all interrupt causes for the interrupt:
+ */
+ error = dprc_set_irq_mask(mc_io, 0, mc_dev->mc_handle, 0, 0x0);
+ if (error < 0) {
+ dev_err(&mc_dev->dev,
+ "Disabling DPRC IRQ failed: dprc_set_irq_mask() failed: %d\n",
+ error);
+ return error;
+ }
+
+ /*
+ * Clear any leftover interrupts:
+ */
+ error = dprc_clear_irq_status(mc_io, 0, mc_dev->mc_handle, 0, ~0x0U);
+ if (error < 0) {
+ dev_err(&mc_dev->dev,
+ "Disabling DPRC IRQ failed: dprc_clear_irq_status() failed: %d\n",
+ error);
+ return error;
+ }
+
+ return 0;
+}
+
+static int register_dprc_irq_handler(struct fsl_mc_device *mc_dev)
+{
+ int error;
+ struct fsl_mc_device_irq *irq = mc_dev->irqs[0];
+
+ /*
+ * NOTE: devm_request_threaded_irq() invokes the device-specific
+ * function that programs the MSI physically in the device
+ */
+ error = devm_request_threaded_irq(&mc_dev->dev,
+ irq->msi_desc->irq,
+ dprc_irq0_handler,
+ dprc_irq0_handler_thread,
+ IRQF_NO_SUSPEND | IRQF_ONESHOT,
+ dev_name(&mc_dev->dev),
+ &mc_dev->dev);
+ if (error < 0) {
+ dev_err(&mc_dev->dev,
+ "devm_request_threaded_irq() failed: %d\n",
+ error);
+ return error;
+ }
+
+ return 0;
+}
+
+static int enable_dprc_irq(struct fsl_mc_device *mc_dev)
+{
+ int error;
+
+ /*
+ * Enable all interrupt causes for the interrupt:
+ */
+ error = dprc_set_irq_mask(mc_dev->mc_io, 0, mc_dev->mc_handle, 0,
+ ~0x0u);
+ if (error < 0) {
+ dev_err(&mc_dev->dev,
+ "Enabling DPRC IRQ failed: dprc_set_irq_mask() failed: %d\n",
+ error);
+
+ return error;
+ }
+
+ /*
+ * Enable generation of the interrupt:
+ */
+ error = dprc_set_irq_enable(mc_dev->mc_io, 0, mc_dev->mc_handle, 0, 1);
+ if (error < 0) {
+ dev_err(&mc_dev->dev,
+ "Enabling DPRC IRQ failed: dprc_set_irq_enable() failed: %d\n",
+ error);
+
+ return error;
+ }
+
+ return 0;
+}
+
+/*
+ * Setup interrupt for a given DPRC device
+ */
+static int dprc_setup_irq(struct fsl_mc_device *mc_dev)
+{
+ int error;
+
+ error = fsl_mc_allocate_irqs(mc_dev);
+ if (error < 0)
+ return error;
+
+ error = disable_dprc_irq(mc_dev);
+ if (error < 0)
+ goto error_free_irqs;
+
+ error = register_dprc_irq_handler(mc_dev);
+ if (error < 0)
+ goto error_free_irqs;
+
+ error = enable_dprc_irq(mc_dev);
+ if (error < 0)
+ goto error_free_irqs;
+
+ return 0;
+
+error_free_irqs:
+ fsl_mc_free_irqs(mc_dev);
+ return error;
+}
+
+/**
+ * dprc_probe - callback invoked when a DPRC is being bound to this driver
+ *
+ * @mc_dev: Pointer to fsl-mc device representing a DPRC
+ *
+ * It opens the physical DPRC in the MC.
+ * It scans the DPRC to discover the MC objects contained in it.
+ * It creates the interrupt pool for the MC bus associated with the DPRC.
+ * It configures the interrupts for the DPRC device itself.
+ */
+static int dprc_probe(struct fsl_mc_device *mc_dev)
+{
+ int error;
+ size_t region_size;
+ struct device *parent_dev = mc_dev->dev.parent;
+ struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_dev);
+ bool mc_io_created = false;
+ bool msi_domain_set = false;
+ u16 major_ver, minor_ver;
+
+ if (!is_fsl_mc_bus_dprc(mc_dev))
+ return -EINVAL;
+
+ if (dev_get_msi_domain(&mc_dev->dev))
+ return -EINVAL;
+
+ if (!mc_dev->mc_io) {
+ /*
+ * This is a child DPRC:
+ */
+ if (!dev_is_fsl_mc(parent_dev))
+ return -EINVAL;
+
+ if (mc_dev->obj_desc.region_count == 0)
+ return -EINVAL;
+
+ region_size = resource_size(mc_dev->regions);
+
+ error = fsl_create_mc_io(&mc_dev->dev,
+ mc_dev->regions[0].start,
+ region_size,
+ NULL,
+ FSL_MC_IO_ATOMIC_CONTEXT_PORTAL,
+ &mc_dev->mc_io);
+ if (error < 0)
+ return error;
+
+ mc_io_created = true;
+
+ /*
+ * Inherit parent MSI domain:
+ */
+ dev_set_msi_domain(&mc_dev->dev,
+ dev_get_msi_domain(parent_dev));
+ msi_domain_set = true;
+ } else {
+ /*
+ * This is a root DPRC
+ */
+ struct irq_domain *mc_msi_domain;
+
+ if (dev_is_fsl_mc(parent_dev))
+ return -EINVAL;
+
+ error = fsl_mc_find_msi_domain(parent_dev,
+ &mc_msi_domain);
+ if (error < 0) {
+ dev_warn(&mc_dev->dev,
+ "WARNING: MC bus without interrupt support\n");
+ } else {
+ dev_set_msi_domain(&mc_dev->dev, mc_msi_domain);
+ msi_domain_set = true;
+ }
+ }
+
+ error = dprc_open(mc_dev->mc_io, 0, mc_dev->obj_desc.id,
+ &mc_dev->mc_handle);
+ if (error < 0) {
+ dev_err(&mc_dev->dev, "dprc_open() failed: %d\n", error);
+ goto error_cleanup_msi_domain;
+ }
+
+ error = dprc_get_attributes(mc_dev->mc_io, 0, mc_dev->mc_handle,
+ &mc_bus->dprc_attr);
+ if (error < 0) {
+ dev_err(&mc_dev->dev, "dprc_get_attributes() failed: %d\n",
+ error);
+ goto error_cleanup_open;
+ }
+
+ error = dprc_get_api_version(mc_dev->mc_io, 0,
+ &major_ver,
+ &minor_ver);
+ if (error < 0) {
+ dev_err(&mc_dev->dev, "dprc_get_api_version() failed: %d\n",
+ error);
+ goto error_cleanup_open;
+ }
+
+ if (major_ver < DPRC_MIN_VER_MAJOR ||
+ (major_ver == DPRC_MIN_VER_MAJOR &&
+ minor_ver < DPRC_MIN_VER_MINOR)) {
+ dev_err(&mc_dev->dev,
+ "ERROR: DPRC version %d.%d not supported\n",
+ major_ver, minor_ver);
+ error = -ENOTSUPP;
+ goto error_cleanup_open;
+ }
+
+ mutex_init(&mc_bus->scan_mutex);
+
+ /*
+ * Discover MC objects in DPRC object:
+ */
+ error = dprc_scan_container(mc_dev);
+ if (error < 0)
+ goto error_cleanup_open;
+
+ /*
+ * Configure interrupt for the DPRC object associated with this MC bus:
+ */
+ error = dprc_setup_irq(mc_dev);
+ if (error < 0)
+ goto error_cleanup_open;
+
+ dev_info(&mc_dev->dev, "DPRC device bound to driver");
+ return 0;
+
+error_cleanup_open:
+ (void)dprc_close(mc_dev->mc_io, 0, mc_dev->mc_handle);
+
+error_cleanup_msi_domain:
+ if (msi_domain_set)
+ dev_set_msi_domain(&mc_dev->dev, NULL);
+
+ if (mc_io_created) {
+ fsl_destroy_mc_io(mc_dev->mc_io);
+ mc_dev->mc_io = NULL;
+ }
+
+ return error;
+}
+
+/*
+ * Tear down interrupt for a given DPRC object
+ */
+static void dprc_teardown_irq(struct fsl_mc_device *mc_dev)
+{
+ struct fsl_mc_device_irq *irq = mc_dev->irqs[0];
+
+ (void)disable_dprc_irq(mc_dev);
+
+ devm_free_irq(&mc_dev->dev, irq->msi_desc->irq, &mc_dev->dev);
+
+ fsl_mc_free_irqs(mc_dev);
+}
+
+/**
+ * dprc_remove - callback invoked when a DPRC is being unbound from this driver
+ *
+ * @mc_dev: Pointer to fsl-mc device representing the DPRC
+ *
+ * It removes the DPRC's child objects from Linux (not from the MC) and
+ * closes the DPRC device in the MC.
+ * It tears down the interrupts that were configured for the DPRC device.
+ * It destroys the interrupt pool associated with this MC bus.
+ */
+static int dprc_remove(struct fsl_mc_device *mc_dev)
+{
+ int error;
+ struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_dev);
+
+ if (!is_fsl_mc_bus_dprc(mc_dev))
+ return -EINVAL;
+ if (!mc_dev->mc_io)
+ return -EINVAL;
+
+ if (!mc_bus->irq_resources)
+ return -EINVAL;
+
+ if (dev_get_msi_domain(&mc_dev->dev))
+ dprc_teardown_irq(mc_dev);
+
+ device_for_each_child(&mc_dev->dev, NULL, __fsl_mc_device_remove);
+
+ if (dev_get_msi_domain(&mc_dev->dev)) {
+ fsl_mc_cleanup_irq_pool(mc_bus);
+ dev_set_msi_domain(&mc_dev->dev, NULL);
+ }
+
+ fsl_mc_cleanup_all_resource_pools(mc_dev);
+
+ error = dprc_close(mc_dev->mc_io, 0, mc_dev->mc_handle);
+ if (error < 0)
+ dev_err(&mc_dev->dev, "dprc_close() failed: %d\n", error);
+
+ if (!fsl_mc_is_root_dprc(&mc_dev->dev)) {
+ fsl_destroy_mc_io(mc_dev->mc_io);
+ mc_dev->mc_io = NULL;
+ }
+
+ dev_info(&mc_dev->dev, "DPRC device unbound from driver");
+ return 0;
+}
+
+static const struct fsl_mc_device_id match_id_table[] = {
+ {
+ .vendor = FSL_MC_VENDOR_FREESCALE,
+ .obj_type = "dprc"},
+ {.vendor = 0x0},
+};
+
+static struct fsl_mc_driver dprc_driver = {
+ .driver = {
+ .name = FSL_MC_DPRC_DRIVER_NAME,
+ .owner = THIS_MODULE,
+ .pm = NULL,
+ },
+ .match_id_table = match_id_table,
+ .probe = dprc_probe,
+ .remove = dprc_remove,
+};
+
+int __init dprc_driver_init(void)
+{
+ return fsl_mc_driver_register(&dprc_driver);
+}
+
+void dprc_driver_exit(void)
+{
+ fsl_mc_driver_unregister(&dprc_driver);
+}
diff --git a/drivers/bus/fsl-mc/dprc.c b/drivers/bus/fsl-mc/dprc.c
new file mode 100644
index 000000000000..5c23e8d4ddb3
--- /dev/null
+++ b/drivers/bus/fsl-mc/dprc.c
@@ -0,0 +1,532 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/*
+ * Copyright 2013-2016 Freescale Semiconductor Inc.
+ *
+ */
+#include <linux/kernel.h>
+#include <linux/fsl/mc.h>
+
+#include "fsl-mc-private.h"
+
+/**
+ * dprc_open() - Open DPRC object for use
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @container_id: Container ID to open
+ * @token: Returned token of DPRC object
+ *
+ * Return: '0' on Success; Error code otherwise.
+ *
+ * @warning Required before any operation on the object.
+ */
+int dprc_open(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ int container_id,
+ u16 *token)
+{
+ struct mc_command cmd = { 0 };
+ struct dprc_cmd_open *cmd_params;
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_OPEN, cmd_flags,
+ 0);
+ cmd_params = (struct dprc_cmd_open *)cmd.params;
+ cmd_params->container_id = cpu_to_le32(container_id);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ *token = mc_cmd_hdr_read_token(&cmd);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(dprc_open);
+
+/**
+ * dprc_close() - Close the control session of the object
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPRC object
+ *
+ * After this function is called, no further operations are
+ * allowed on the object without opening a new control session.
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dprc_close(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token)
+{
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_CLOSE, cmd_flags,
+ token);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+EXPORT_SYMBOL_GPL(dprc_close);
+
+/**
+ * dprc_set_irq() - Set IRQ information for the DPRC to trigger an interrupt.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPRC object
+ * @irq_index: Identifies the interrupt index to configure
+ * @irq_cfg: IRQ configuration
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dprc_set_irq(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 irq_index,
+ struct dprc_irq_cfg *irq_cfg)
+{
+ struct mc_command cmd = { 0 };
+ struct dprc_cmd_set_irq *cmd_params;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_SET_IRQ,
+ cmd_flags,
+ token);
+ cmd_params = (struct dprc_cmd_set_irq *)cmd.params;
+ cmd_params->irq_val = cpu_to_le32(irq_cfg->val);
+ cmd_params->irq_index = irq_index;
+ cmd_params->irq_addr = cpu_to_le64(irq_cfg->paddr);
+ cmd_params->irq_num = cpu_to_le32(irq_cfg->irq_num);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dprc_set_irq_enable() - Set overall interrupt state.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPRC object
+ * @irq_index: The interrupt index to configure
+ * @en: Interrupt state - enable = 1, disable = 0
+ *
+ * Allows GPP software to control when interrupts are generated.
+ * Each interrupt can have up to 32 causes. The enable/disable control's the
+ * overall interrupt state. if the interrupt is disabled no causes will cause
+ * an interrupt.
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dprc_set_irq_enable(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 irq_index,
+ u8 en)
+{
+ struct mc_command cmd = { 0 };
+ struct dprc_cmd_set_irq_enable *cmd_params;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_SET_IRQ_ENABLE,
+ cmd_flags, token);
+ cmd_params = (struct dprc_cmd_set_irq_enable *)cmd.params;
+ cmd_params->enable = en & DPRC_ENABLE;
+ cmd_params->irq_index = irq_index;
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dprc_set_irq_mask() - Set interrupt mask.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPRC object
+ * @irq_index: The interrupt index to configure
+ * @mask: event mask to trigger interrupt;
+ * each bit:
+ * 0 = ignore event
+ * 1 = consider event for asserting irq
+ *
+ * Every interrupt can have up to 32 causes and the interrupt model supports
+ * masking/unmasking each cause independently
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dprc_set_irq_mask(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 irq_index,
+ u32 mask)
+{
+ struct mc_command cmd = { 0 };
+ struct dprc_cmd_set_irq_mask *cmd_params;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_SET_IRQ_MASK,
+ cmd_flags, token);
+ cmd_params = (struct dprc_cmd_set_irq_mask *)cmd.params;
+ cmd_params->mask = cpu_to_le32(mask);
+ cmd_params->irq_index = irq_index;
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dprc_get_irq_status() - Get the current status of any pending interrupts.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPRC object
+ * @irq_index: The interrupt index to configure
+ * @status: Returned interrupts status - one bit per cause:
+ * 0 = no interrupt pending
+ * 1 = interrupt pending
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dprc_get_irq_status(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 irq_index,
+ u32 *status)
+{
+ struct mc_command cmd = { 0 };
+ struct dprc_cmd_get_irq_status *cmd_params;
+ struct dprc_rsp_get_irq_status *rsp_params;
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_IRQ_STATUS,
+ cmd_flags, token);
+ cmd_params = (struct dprc_cmd_get_irq_status *)cmd.params;
+ cmd_params->status = cpu_to_le32(*status);
+ cmd_params->irq_index = irq_index;
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ rsp_params = (struct dprc_rsp_get_irq_status *)cmd.params;
+ *status = le32_to_cpu(rsp_params->status);
+
+ return 0;
+}
+
+/**
+ * dprc_clear_irq_status() - Clear a pending interrupt's status
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPRC object
+ * @irq_index: The interrupt index to configure
+ * @status: bits to clear (W1C) - one bit per cause:
+ * 0 = don't change
+ * 1 = clear status bit
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dprc_clear_irq_status(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 irq_index,
+ u32 status)
+{
+ struct mc_command cmd = { 0 };
+ struct dprc_cmd_clear_irq_status *cmd_params;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_CLEAR_IRQ_STATUS,
+ cmd_flags, token);
+ cmd_params = (struct dprc_cmd_clear_irq_status *)cmd.params;
+ cmd_params->status = cpu_to_le32(status);
+ cmd_params->irq_index = irq_index;
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dprc_get_attributes() - Obtains container attributes
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPRC object
+ * @attributes Returned container attributes
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dprc_get_attributes(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ struct dprc_attributes *attr)
+{
+ struct mc_command cmd = { 0 };
+ struct dprc_rsp_get_attributes *rsp_params;
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_ATTR,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ rsp_params = (struct dprc_rsp_get_attributes *)cmd.params;
+ attr->container_id = le32_to_cpu(rsp_params->container_id);
+ attr->icid = le16_to_cpu(rsp_params->icid);
+ attr->options = le32_to_cpu(rsp_params->options);
+ attr->portal_id = le32_to_cpu(rsp_params->portal_id);
+
+ return 0;
+}
+
+/**
+ * dprc_get_obj_count() - Obtains the number of objects in the DPRC
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPRC object
+ * @obj_count: Number of objects assigned to the DPRC
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dprc_get_obj_count(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ int *obj_count)
+{
+ struct mc_command cmd = { 0 };
+ struct dprc_rsp_get_obj_count *rsp_params;
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_OBJ_COUNT,
+ cmd_flags, token);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ rsp_params = (struct dprc_rsp_get_obj_count *)cmd.params;
+ *obj_count = le32_to_cpu(rsp_params->obj_count);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(dprc_get_obj_count);
+
+/**
+ * dprc_get_obj() - Get general information on an object
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPRC object
+ * @obj_index: Index of the object to be queried (< obj_count)
+ * @obj_desc: Returns the requested object descriptor
+ *
+ * The object descriptors are retrieved one by one by incrementing
+ * obj_index up to (not including) the value of obj_count returned
+ * from dprc_get_obj_count(). dprc_get_obj_count() must
+ * be called prior to dprc_get_obj().
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dprc_get_obj(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ int obj_index,
+ struct fsl_mc_obj_desc *obj_desc)
+{
+ struct mc_command cmd = { 0 };
+ struct dprc_cmd_get_obj *cmd_params;
+ struct dprc_rsp_get_obj *rsp_params;
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_OBJ,
+ cmd_flags,
+ token);
+ cmd_params = (struct dprc_cmd_get_obj *)cmd.params;
+ cmd_params->obj_index = cpu_to_le32(obj_index);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ rsp_params = (struct dprc_rsp_get_obj *)cmd.params;
+ obj_desc->id = le32_to_cpu(rsp_params->id);
+ obj_desc->vendor = le16_to_cpu(rsp_params->vendor);
+ obj_desc->irq_count = rsp_params->irq_count;
+ obj_desc->region_count = rsp_params->region_count;
+ obj_desc->state = le32_to_cpu(rsp_params->state);
+ obj_desc->ver_major = le16_to_cpu(rsp_params->version_major);
+ obj_desc->ver_minor = le16_to_cpu(rsp_params->version_minor);
+ obj_desc->flags = le16_to_cpu(rsp_params->flags);
+ strncpy(obj_desc->type, rsp_params->type, 16);
+ obj_desc->type[15] = '\0';
+ strncpy(obj_desc->label, rsp_params->label, 16);
+ obj_desc->label[15] = '\0';
+ return 0;
+}
+EXPORT_SYMBOL_GPL(dprc_get_obj);
+
+/**
+ * dprc_set_obj_irq() - Set IRQ information for object to trigger an interrupt.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPRC object
+ * @obj_type: Type of the object to set its IRQ
+ * @obj_id: ID of the object to set its IRQ
+ * @irq_index: The interrupt index to configure
+ * @irq_cfg: IRQ configuration
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dprc_set_obj_irq(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ char *obj_type,
+ int obj_id,
+ u8 irq_index,
+ struct dprc_irq_cfg *irq_cfg)
+{
+ struct mc_command cmd = { 0 };
+ struct dprc_cmd_set_obj_irq *cmd_params;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_SET_OBJ_IRQ,
+ cmd_flags,
+ token);
+ cmd_params = (struct dprc_cmd_set_obj_irq *)cmd.params;
+ cmd_params->irq_val = cpu_to_le32(irq_cfg->val);
+ cmd_params->irq_index = irq_index;
+ cmd_params->irq_addr = cpu_to_le64(irq_cfg->paddr);
+ cmd_params->irq_num = cpu_to_le32(irq_cfg->irq_num);
+ cmd_params->obj_id = cpu_to_le32(obj_id);
+ strncpy(cmd_params->obj_type, obj_type, 16);
+ cmd_params->obj_type[15] = '\0';
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+EXPORT_SYMBOL_GPL(dprc_set_obj_irq);
+
+/**
+ * dprc_get_obj_region() - Get region information for a specified object.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPRC object
+ * @obj_type; Object type as returned in dprc_get_obj()
+ * @obj_id: Unique object instance as returned in dprc_get_obj()
+ * @region_index: The specific region to query
+ * @region_desc: Returns the requested region descriptor
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dprc_get_obj_region(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ char *obj_type,
+ int obj_id,
+ u8 region_index,
+ struct dprc_region_desc *region_desc)
+{
+ struct mc_command cmd = { 0 };
+ struct dprc_cmd_get_obj_region *cmd_params;
+ struct dprc_rsp_get_obj_region *rsp_params;
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_OBJ_REG,
+ cmd_flags, token);
+ cmd_params = (struct dprc_cmd_get_obj_region *)cmd.params;
+ cmd_params->obj_id = cpu_to_le32(obj_id);
+ cmd_params->region_index = region_index;
+ strncpy(cmd_params->obj_type, obj_type, 16);
+ cmd_params->obj_type[15] = '\0';
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ rsp_params = (struct dprc_rsp_get_obj_region *)cmd.params;
+ region_desc->base_offset = le64_to_cpu(rsp_params->base_addr);
+ region_desc->size = le32_to_cpu(rsp_params->size);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(dprc_get_obj_region);
+
+/**
+ * dprc_get_api_version - Get Data Path Resource Container API version
+ * @mc_io: Pointer to Mc portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @major_ver: Major version of Data Path Resource Container API
+ * @minor_ver: Minor version of Data Path Resource Container API
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dprc_get_api_version(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 *major_ver,
+ u16 *minor_ver)
+{
+ struct mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_API_VERSION,
+ cmd_flags, 0);
+
+ /* send command to mc */
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ mc_cmd_read_api_version(&cmd, major_ver, minor_ver);
+
+ return 0;
+}
+
+/**
+ * dprc_get_container_id - Get container ID associated with a given portal.
+ * @mc_io: Pointer to Mc portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @container_id: Requested container id
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dprc_get_container_id(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ int *container_id)
+{
+ struct mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_CONT_ID,
+ cmd_flags,
+ 0);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ *container_id = (int)mc_cmd_read_object_id(&cmd);
+
+ return 0;
+}
diff --git a/drivers/bus/fsl-mc/fsl-mc-allocator.c b/drivers/bus/fsl-mc/fsl-mc-allocator.c
new file mode 100644
index 000000000000..452c5d7a12e9
--- /dev/null
+++ b/drivers/bus/fsl-mc/fsl-mc-allocator.c
@@ -0,0 +1,648 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * fsl-mc object allocator driver
+ *
+ * Copyright (C) 2013-2016 Freescale Semiconductor, Inc.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/msi.h>
+#include <linux/fsl/mc.h>
+
+#include "fsl-mc-private.h"
+
+static bool __must_check fsl_mc_is_allocatable(struct fsl_mc_device *mc_dev)
+{
+ return is_fsl_mc_bus_dpbp(mc_dev) ||
+ is_fsl_mc_bus_dpmcp(mc_dev) ||
+ is_fsl_mc_bus_dpcon(mc_dev);
+}
+
+/**
+ * fsl_mc_resource_pool_add_device - add allocatable object to a resource
+ * pool of a given fsl-mc bus
+ *
+ * @mc_bus: pointer to the fsl-mc bus
+ * @pool_type: pool type
+ * @mc_dev: pointer to allocatable fsl-mc device
+ */
+static int __must_check fsl_mc_resource_pool_add_device(struct fsl_mc_bus
+ *mc_bus,
+ enum fsl_mc_pool_type
+ pool_type,
+ struct fsl_mc_device
+ *mc_dev)
+{
+ struct fsl_mc_resource_pool *res_pool;
+ struct fsl_mc_resource *resource;
+ struct fsl_mc_device *mc_bus_dev = &mc_bus->mc_dev;
+ int error = -EINVAL;
+
+ if (pool_type < 0 || pool_type >= FSL_MC_NUM_POOL_TYPES)
+ goto out;
+ if (!fsl_mc_is_allocatable(mc_dev))
+ goto out;
+ if (mc_dev->resource)
+ goto out;
+
+ res_pool = &mc_bus->resource_pools[pool_type];
+ if (res_pool->type != pool_type)
+ goto out;
+ if (res_pool->mc_bus != mc_bus)
+ goto out;
+
+ mutex_lock(&res_pool->mutex);
+
+ if (res_pool->max_count < 0)
+ goto out_unlock;
+ if (res_pool->free_count < 0 ||
+ res_pool->free_count > res_pool->max_count)
+ goto out_unlock;
+
+ resource = devm_kzalloc(&mc_bus_dev->dev, sizeof(*resource),
+ GFP_KERNEL);
+ if (!resource) {
+ error = -ENOMEM;
+ dev_err(&mc_bus_dev->dev,
+ "Failed to allocate memory for fsl_mc_resource\n");
+ goto out_unlock;
+ }
+
+ resource->type = pool_type;
+ resource->id = mc_dev->obj_desc.id;
+ resource->data = mc_dev;
+ resource->parent_pool = res_pool;
+ INIT_LIST_HEAD(&resource->node);
+ list_add_tail(&resource->node, &res_pool->free_list);
+ mc_dev->resource = resource;
+ res_pool->free_count++;
+ res_pool->max_count++;
+ error = 0;
+out_unlock:
+ mutex_unlock(&res_pool->mutex);
+out:
+ return error;
+}
+
+/**
+ * fsl_mc_resource_pool_remove_device - remove an allocatable device from a
+ * resource pool
+ *
+ * @mc_dev: pointer to allocatable fsl-mc device
+ *
+ * It permanently removes an allocatable fsl-mc device from the resource
+ * pool. It's an error if the device is in use.
+ */
+static int __must_check fsl_mc_resource_pool_remove_device(struct fsl_mc_device
+ *mc_dev)
+{
+ struct fsl_mc_device *mc_bus_dev;
+ struct fsl_mc_bus *mc_bus;
+ struct fsl_mc_resource_pool *res_pool;
+ struct fsl_mc_resource *resource;
+ int error = -EINVAL;
+
+ if (!fsl_mc_is_allocatable(mc_dev))
+ goto out;
+
+ resource = mc_dev->resource;
+ if (!resource || resource->data != mc_dev)
+ goto out;
+
+ mc_bus_dev = to_fsl_mc_device(mc_dev->dev.parent);
+ mc_bus = to_fsl_mc_bus(mc_bus_dev);
+ res_pool = resource->parent_pool;
+ if (res_pool != &mc_bus->resource_pools[resource->type])
+ goto out;
+
+ mutex_lock(&res_pool->mutex);
+
+ if (res_pool->max_count <= 0)
+ goto out_unlock;
+ if (res_pool->free_count <= 0 ||
+ res_pool->free_count > res_pool->max_count)
+ goto out_unlock;
+
+ /*
+ * If the device is currently allocated, its resource is not
+ * in the free list and thus, the device cannot be removed.
+ */
+ if (list_empty(&resource->node)) {
+ error = -EBUSY;
+ dev_err(&mc_bus_dev->dev,
+ "Device %s cannot be removed from resource pool\n",
+ dev_name(&mc_dev->dev));
+ goto out_unlock;
+ }
+
+ list_del_init(&resource->node);
+ res_pool->free_count--;
+ res_pool->max_count--;
+
+ devm_kfree(&mc_bus_dev->dev, resource);
+ mc_dev->resource = NULL;
+ error = 0;
+out_unlock:
+ mutex_unlock(&res_pool->mutex);
+out:
+ return error;
+}
+
+static const char *const fsl_mc_pool_type_strings[] = {
+ [FSL_MC_POOL_DPMCP] = "dpmcp",
+ [FSL_MC_POOL_DPBP] = "dpbp",
+ [FSL_MC_POOL_DPCON] = "dpcon",
+ [FSL_MC_POOL_IRQ] = "irq",
+};
+
+static int __must_check object_type_to_pool_type(const char *object_type,
+ enum fsl_mc_pool_type
+ *pool_type)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(fsl_mc_pool_type_strings); i++) {
+ if (strcmp(object_type, fsl_mc_pool_type_strings[i]) == 0) {
+ *pool_type = i;
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
+
+int __must_check fsl_mc_resource_allocate(struct fsl_mc_bus *mc_bus,
+ enum fsl_mc_pool_type pool_type,
+ struct fsl_mc_resource **new_resource)
+{
+ struct fsl_mc_resource_pool *res_pool;
+ struct fsl_mc_resource *resource;
+ struct fsl_mc_device *mc_bus_dev = &mc_bus->mc_dev;
+ int error = -EINVAL;
+
+ BUILD_BUG_ON(ARRAY_SIZE(fsl_mc_pool_type_strings) !=
+ FSL_MC_NUM_POOL_TYPES);
+
+ *new_resource = NULL;
+ if (pool_type < 0 || pool_type >= FSL_MC_NUM_POOL_TYPES)
+ goto out;
+
+ res_pool = &mc_bus->resource_pools[pool_type];
+ if (res_pool->mc_bus != mc_bus)
+ goto out;
+
+ mutex_lock(&res_pool->mutex);
+ resource = list_first_entry_or_null(&res_pool->free_list,
+ struct fsl_mc_resource, node);
+
+ if (!resource) {
+ error = -ENXIO;
+ dev_err(&mc_bus_dev->dev,
+ "No more resources of type %s left\n",
+ fsl_mc_pool_type_strings[pool_type]);
+ goto out_unlock;
+ }
+
+ if (resource->type != pool_type)
+ goto out_unlock;
+ if (resource->parent_pool != res_pool)
+ goto out_unlock;
+ if (res_pool->free_count <= 0 ||
+ res_pool->free_count > res_pool->max_count)
+ goto out_unlock;
+
+ list_del_init(&resource->node);
+
+ res_pool->free_count--;
+ error = 0;
+out_unlock:
+ mutex_unlock(&res_pool->mutex);
+ *new_resource = resource;
+out:
+ return error;
+}
+EXPORT_SYMBOL_GPL(fsl_mc_resource_allocate);
+
+void fsl_mc_resource_free(struct fsl_mc_resource *resource)
+{
+ struct fsl_mc_resource_pool *res_pool;
+
+ res_pool = resource->parent_pool;
+ if (resource->type != res_pool->type)
+ return;
+
+ mutex_lock(&res_pool->mutex);
+ if (res_pool->free_count < 0 ||
+ res_pool->free_count >= res_pool->max_count)
+ goto out_unlock;
+
+ if (!list_empty(&resource->node))
+ goto out_unlock;
+
+ list_add_tail(&resource->node, &res_pool->free_list);
+ res_pool->free_count++;
+out_unlock:
+ mutex_unlock(&res_pool->mutex);
+}
+EXPORT_SYMBOL_GPL(fsl_mc_resource_free);
+
+/**
+ * fsl_mc_object_allocate - Allocates an fsl-mc object of the given
+ * pool type from a given fsl-mc bus instance
+ *
+ * @mc_dev: fsl-mc device which is used in conjunction with the
+ * allocated object
+ * @pool_type: pool type
+ * @new_mc_dev: pointer to area where the pointer to the allocated device
+ * is to be returned
+ *
+ * Allocatable objects are always used in conjunction with some functional
+ * device. This function allocates an object of the specified type from
+ * the DPRC containing the functional device.
+ *
+ * NOTE: pool_type must be different from FSL_MC_POOL_MCP, since MC
+ * portals are allocated using fsl_mc_portal_allocate(), instead of
+ * this function.
+ */
+int __must_check fsl_mc_object_allocate(struct fsl_mc_device *mc_dev,
+ enum fsl_mc_pool_type pool_type,
+ struct fsl_mc_device **new_mc_adev)
+{
+ struct fsl_mc_device *mc_bus_dev;
+ struct fsl_mc_bus *mc_bus;
+ struct fsl_mc_device *mc_adev;
+ int error = -EINVAL;
+ struct fsl_mc_resource *resource = NULL;
+
+ *new_mc_adev = NULL;
+ if (mc_dev->flags & FSL_MC_IS_DPRC)
+ goto error;
+
+ if (!dev_is_fsl_mc(mc_dev->dev.parent))
+ goto error;
+
+ if (pool_type == FSL_MC_POOL_DPMCP)
+ goto error;
+
+ mc_bus_dev = to_fsl_mc_device(mc_dev->dev.parent);
+ mc_bus = to_fsl_mc_bus(mc_bus_dev);
+ error = fsl_mc_resource_allocate(mc_bus, pool_type, &resource);
+ if (error < 0)
+ goto error;
+
+ mc_adev = resource->data;
+ if (!mc_adev)
+ goto error;
+
+ *new_mc_adev = mc_adev;
+ return 0;
+error:
+ if (resource)
+ fsl_mc_resource_free(resource);
+
+ return error;
+}
+EXPORT_SYMBOL_GPL(fsl_mc_object_allocate);
+
+/**
+ * fsl_mc_object_free - Returns an fsl-mc object to the resource
+ * pool where it came from.
+ * @mc_adev: Pointer to the fsl-mc device
+ */
+void fsl_mc_object_free(struct fsl_mc_device *mc_adev)
+{
+ struct fsl_mc_resource *resource;
+
+ resource = mc_adev->resource;
+ if (resource->type == FSL_MC_POOL_DPMCP)
+ return;
+ if (resource->data != mc_adev)
+ return;
+
+ fsl_mc_resource_free(resource);
+}
+EXPORT_SYMBOL_GPL(fsl_mc_object_free);
+
+/*
+ * A DPRC and the devices in the DPRC all share the same GIC-ITS device
+ * ID. A block of IRQs is pre-allocated and maintained in a pool
+ * from which devices can allocate them when needed.
+ */
+
+/*
+ * Initialize the interrupt pool associated with an fsl-mc bus.
+ * It allocates a block of IRQs from the GIC-ITS.
+ */
+int fsl_mc_populate_irq_pool(struct fsl_mc_bus *mc_bus,
+ unsigned int irq_count)
+{
+ unsigned int i;
+ struct msi_desc *msi_desc;
+ struct fsl_mc_device_irq *irq_resources;
+ struct fsl_mc_device_irq *mc_dev_irq;
+ int error;
+ struct fsl_mc_device *mc_bus_dev = &mc_bus->mc_dev;
+ struct fsl_mc_resource_pool *res_pool =
+ &mc_bus->resource_pools[FSL_MC_POOL_IRQ];
+
+ if (irq_count == 0 ||
+ irq_count > FSL_MC_IRQ_POOL_MAX_TOTAL_IRQS)
+ return -EINVAL;
+
+ error = fsl_mc_msi_domain_alloc_irqs(&mc_bus_dev->dev, irq_count);
+ if (error < 0)
+ return error;
+
+ irq_resources = devm_kzalloc(&mc_bus_dev->dev,
+ sizeof(*irq_resources) * irq_count,
+ GFP_KERNEL);
+ if (!irq_resources) {
+ error = -ENOMEM;
+ goto cleanup_msi_irqs;
+ }
+
+ for (i = 0; i < irq_count; i++) {
+ mc_dev_irq = &irq_resources[i];
+
+ /*
+ * NOTE: This mc_dev_irq's MSI addr/value pair will be set
+ * by the fsl_mc_msi_write_msg() callback
+ */
+ mc_dev_irq->resource.type = res_pool->type;
+ mc_dev_irq->resource.data = mc_dev_irq;
+ mc_dev_irq->resource.parent_pool = res_pool;
+ INIT_LIST_HEAD(&mc_dev_irq->resource.node);
+ list_add_tail(&mc_dev_irq->resource.node, &res_pool->free_list);
+ }
+
+ for_each_msi_entry(msi_desc, &mc_bus_dev->dev) {
+ mc_dev_irq = &irq_resources[msi_desc->fsl_mc.msi_index];
+ mc_dev_irq->msi_desc = msi_desc;
+ mc_dev_irq->resource.id = msi_desc->irq;
+ }
+
+ res_pool->max_count = irq_count;
+ res_pool->free_count = irq_count;
+ mc_bus->irq_resources = irq_resources;
+ return 0;
+
+cleanup_msi_irqs:
+ fsl_mc_msi_domain_free_irqs(&mc_bus_dev->dev);
+ return error;
+}
+EXPORT_SYMBOL_GPL(fsl_mc_populate_irq_pool);
+
+/**
+ * Teardown the interrupt pool associated with an fsl-mc bus.
+ * It frees the IRQs that were allocated to the pool, back to the GIC-ITS.
+ */
+void fsl_mc_cleanup_irq_pool(struct fsl_mc_bus *mc_bus)
+{
+ struct fsl_mc_device *mc_bus_dev = &mc_bus->mc_dev;
+ struct fsl_mc_resource_pool *res_pool =
+ &mc_bus->resource_pools[FSL_MC_POOL_IRQ];
+
+ if (!mc_bus->irq_resources)
+ return;
+
+ if (res_pool->max_count == 0)
+ return;
+
+ if (res_pool->free_count != res_pool->max_count)
+ return;
+
+ INIT_LIST_HEAD(&res_pool->free_list);
+ res_pool->max_count = 0;
+ res_pool->free_count = 0;
+ mc_bus->irq_resources = NULL;
+ fsl_mc_msi_domain_free_irqs(&mc_bus_dev->dev);
+}
+EXPORT_SYMBOL_GPL(fsl_mc_cleanup_irq_pool);
+
+/**
+ * Allocate the IRQs required by a given fsl-mc device.
+ */
+int __must_check fsl_mc_allocate_irqs(struct fsl_mc_device *mc_dev)
+{
+ int i;
+ int irq_count;
+ int res_allocated_count = 0;
+ int error = -EINVAL;
+ struct fsl_mc_device_irq **irqs = NULL;
+ struct fsl_mc_bus *mc_bus;
+ struct fsl_mc_resource_pool *res_pool;
+
+ if (mc_dev->irqs)
+ return -EINVAL;
+
+ irq_count = mc_dev->obj_desc.irq_count;
+ if (irq_count == 0)
+ return -EINVAL;
+
+ if (is_fsl_mc_bus_dprc(mc_dev))
+ mc_bus = to_fsl_mc_bus(mc_dev);
+ else
+ mc_bus = to_fsl_mc_bus(to_fsl_mc_device(mc_dev->dev.parent));
+
+ if (!mc_bus->irq_resources)
+ return -EINVAL;
+
+ res_pool = &mc_bus->resource_pools[FSL_MC_POOL_IRQ];
+ if (res_pool->free_count < irq_count) {
+ dev_err(&mc_dev->dev,
+ "Not able to allocate %u irqs for device\n", irq_count);
+ return -ENOSPC;
+ }
+
+ irqs = devm_kzalloc(&mc_dev->dev, irq_count * sizeof(irqs[0]),
+ GFP_KERNEL);
+ if (!irqs)
+ return -ENOMEM;
+
+ for (i = 0; i < irq_count; i++) {
+ struct fsl_mc_resource *resource;
+
+ error = fsl_mc_resource_allocate(mc_bus, FSL_MC_POOL_IRQ,
+ &resource);
+ if (error < 0)
+ goto error_resource_alloc;
+
+ irqs[i] = to_fsl_mc_irq(resource);
+ res_allocated_count++;
+
+ irqs[i]->mc_dev = mc_dev;
+ irqs[i]->dev_irq_index = i;
+ }
+
+ mc_dev->irqs = irqs;
+ return 0;
+
+error_resource_alloc:
+ for (i = 0; i < res_allocated_count; i++) {
+ irqs[i]->mc_dev = NULL;
+ fsl_mc_resource_free(&irqs[i]->resource);
+ }
+
+ return error;
+}
+EXPORT_SYMBOL_GPL(fsl_mc_allocate_irqs);
+
+/*
+ * Frees the IRQs that were allocated for an fsl-mc device.
+ */
+void fsl_mc_free_irqs(struct fsl_mc_device *mc_dev)
+{
+ int i;
+ int irq_count;
+ struct fsl_mc_bus *mc_bus;
+ struct fsl_mc_device_irq **irqs = mc_dev->irqs;
+
+ if (!irqs)
+ return;
+
+ irq_count = mc_dev->obj_desc.irq_count;
+
+ if (is_fsl_mc_bus_dprc(mc_dev))
+ mc_bus = to_fsl_mc_bus(mc_dev);
+ else
+ mc_bus = to_fsl_mc_bus(to_fsl_mc_device(mc_dev->dev.parent));
+
+ if (!mc_bus->irq_resources)
+ return;
+
+ for (i = 0; i < irq_count; i++) {
+ irqs[i]->mc_dev = NULL;
+ fsl_mc_resource_free(&irqs[i]->resource);
+ }
+
+ mc_dev->irqs = NULL;
+}
+EXPORT_SYMBOL_GPL(fsl_mc_free_irqs);
+
+void fsl_mc_init_all_resource_pools(struct fsl_mc_device *mc_bus_dev)
+{
+ int pool_type;
+ struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_bus_dev);
+
+ for (pool_type = 0; pool_type < FSL_MC_NUM_POOL_TYPES; pool_type++) {
+ struct fsl_mc_resource_pool *res_pool =
+ &mc_bus->resource_pools[pool_type];
+
+ res_pool->type = pool_type;
+ res_pool->max_count = 0;
+ res_pool->free_count = 0;
+ res_pool->mc_bus = mc_bus;
+ INIT_LIST_HEAD(&res_pool->free_list);
+ mutex_init(&res_pool->mutex);
+ }
+}
+
+static void fsl_mc_cleanup_resource_pool(struct fsl_mc_device *mc_bus_dev,
+ enum fsl_mc_pool_type pool_type)
+{
+ struct fsl_mc_resource *resource;
+ struct fsl_mc_resource *next;
+ struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_bus_dev);
+ struct fsl_mc_resource_pool *res_pool =
+ &mc_bus->resource_pools[pool_type];
+ int free_count = 0;
+
+ list_for_each_entry_safe(resource, next, &res_pool->free_list, node) {
+ free_count++;
+ devm_kfree(&mc_bus_dev->dev, resource);
+ }
+}
+
+void fsl_mc_cleanup_all_resource_pools(struct fsl_mc_device *mc_bus_dev)
+{
+ int pool_type;
+
+ for (pool_type = 0; pool_type < FSL_MC_NUM_POOL_TYPES; pool_type++)
+ fsl_mc_cleanup_resource_pool(mc_bus_dev, pool_type);
+}
+
+/**
+ * fsl_mc_allocator_probe - callback invoked when an allocatable device is
+ * being added to the system
+ */
+static int fsl_mc_allocator_probe(struct fsl_mc_device *mc_dev)
+{
+ enum fsl_mc_pool_type pool_type;
+ struct fsl_mc_device *mc_bus_dev;
+ struct fsl_mc_bus *mc_bus;
+ int error;
+
+ if (!fsl_mc_is_allocatable(mc_dev))
+ return -EINVAL;
+
+ mc_bus_dev = to_fsl_mc_device(mc_dev->dev.parent);
+ if (!dev_is_fsl_mc(&mc_bus_dev->dev))
+ return -EINVAL;
+
+ mc_bus = to_fsl_mc_bus(mc_bus_dev);
+ error = object_type_to_pool_type(mc_dev->obj_desc.type, &pool_type);
+ if (error < 0)
+ return error;
+
+ error = fsl_mc_resource_pool_add_device(mc_bus, pool_type, mc_dev);
+ if (error < 0)
+ return error;
+
+ dev_dbg(&mc_dev->dev,
+ "Allocatable fsl-mc device bound to fsl_mc_allocator driver");
+ return 0;
+}
+
+/**
+ * fsl_mc_allocator_remove - callback invoked when an allocatable device is
+ * being removed from the system
+ */
+static int fsl_mc_allocator_remove(struct fsl_mc_device *mc_dev)
+{
+ int error;
+
+ if (!fsl_mc_is_allocatable(mc_dev))
+ return -EINVAL;
+
+ if (mc_dev->resource) {
+ error = fsl_mc_resource_pool_remove_device(mc_dev);
+ if (error < 0)
+ return error;
+ }
+
+ dev_dbg(&mc_dev->dev,
+ "Allocatable fsl-mc device unbound from fsl_mc_allocator driver");
+ return 0;
+}
+
+static const struct fsl_mc_device_id match_id_table[] = {
+ {
+ .vendor = FSL_MC_VENDOR_FREESCALE,
+ .obj_type = "dpbp",
+ },
+ {
+ .vendor = FSL_MC_VENDOR_FREESCALE,
+ .obj_type = "dpmcp",
+ },
+ {
+ .vendor = FSL_MC_VENDOR_FREESCALE,
+ .obj_type = "dpcon",
+ },
+ {.vendor = 0x0},
+};
+
+static struct fsl_mc_driver fsl_mc_allocator_driver = {
+ .driver = {
+ .name = "fsl_mc_allocator",
+ .pm = NULL,
+ },
+ .match_id_table = match_id_table,
+ .probe = fsl_mc_allocator_probe,
+ .remove = fsl_mc_allocator_remove,
+};
+
+int __init fsl_mc_allocator_driver_init(void)
+{
+ return fsl_mc_driver_register(&fsl_mc_allocator_driver);
+}
diff --git a/drivers/bus/fsl-mc/fsl-mc-bus.c b/drivers/bus/fsl-mc/fsl-mc-bus.c
new file mode 100644
index 000000000000..1b333c43aae9
--- /dev/null
+++ b/drivers/bus/fsl-mc/fsl-mc-bus.c
@@ -0,0 +1,948 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Freescale Management Complex (MC) bus driver
+ *
+ * Copyright (C) 2014-2016 Freescale Semiconductor, Inc.
+ * Author: German Rivera <German.Rivera@freescale.com>
+ *
+ */
+
+#define pr_fmt(fmt) "fsl-mc: " fmt
+
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/of_address.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <linux/limits.h>
+#include <linux/bitops.h>
+#include <linux/msi.h>
+#include <linux/dma-mapping.h>
+
+#include "fsl-mc-private.h"
+
+/**
+ * Default DMA mask for devices on a fsl-mc bus
+ */
+#define FSL_MC_DEFAULT_DMA_MASK (~0ULL)
+
+/**
+ * struct fsl_mc - Private data of a "fsl,qoriq-mc" platform device
+ * @root_mc_bus_dev: fsl-mc device representing the root DPRC
+ * @num_translation_ranges: number of entries in addr_translation_ranges
+ * @translation_ranges: array of bus to system address translation ranges
+ */
+struct fsl_mc {
+ struct fsl_mc_device *root_mc_bus_dev;
+ u8 num_translation_ranges;
+ struct fsl_mc_addr_translation_range *translation_ranges;
+};
+
+/**
+ * struct fsl_mc_addr_translation_range - bus to system address translation
+ * range
+ * @mc_region_type: Type of MC region for the range being translated
+ * @start_mc_offset: Start MC offset of the range being translated
+ * @end_mc_offset: MC offset of the first byte after the range (last MC
+ * offset of the range is end_mc_offset - 1)
+ * @start_phys_addr: system physical address corresponding to start_mc_addr
+ */
+struct fsl_mc_addr_translation_range {
+ enum dprc_region_type mc_region_type;
+ u64 start_mc_offset;
+ u64 end_mc_offset;
+ phys_addr_t start_phys_addr;
+};
+
+/**
+ * struct mc_version
+ * @major: Major version number: incremented on API compatibility changes
+ * @minor: Minor version number: incremented on API additions (that are
+ * backward compatible); reset when major version is incremented
+ * @revision: Internal revision number: incremented on implementation changes
+ * and/or bug fixes that have no impact on API
+ */
+struct mc_version {
+ u32 major;
+ u32 minor;
+ u32 revision;
+};
+
+/**
+ * fsl_mc_bus_match - device to driver matching callback
+ * @dev: the fsl-mc device to match against
+ * @drv: the device driver to search for matching fsl-mc object type
+ * structures
+ *
+ * Returns 1 on success, 0 otherwise.
+ */
+static int fsl_mc_bus_match(struct device *dev, struct device_driver *drv)
+{
+ const struct fsl_mc_device_id *id;
+ struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev);
+ struct fsl_mc_driver *mc_drv = to_fsl_mc_driver(drv);
+ bool found = false;
+
+ if (!mc_drv->match_id_table)
+ goto out;
+
+ /*
+ * If the object is not 'plugged' don't match.
+ * Only exception is the root DPRC, which is a special case.
+ */
+ if ((mc_dev->obj_desc.state & FSL_MC_OBJ_STATE_PLUGGED) == 0 &&
+ !fsl_mc_is_root_dprc(&mc_dev->dev))
+ goto out;
+
+ /*
+ * Traverse the match_id table of the given driver, trying to find
+ * a matching for the given device.
+ */
+ for (id = mc_drv->match_id_table; id->vendor != 0x0; id++) {
+ if (id->vendor == mc_dev->obj_desc.vendor &&
+ strcmp(id->obj_type, mc_dev->obj_desc.type) == 0) {
+ found = true;
+
+ break;
+ }
+ }
+
+out:
+ dev_dbg(dev, "%smatched\n", found ? "" : "not ");
+ return found;
+}
+
+/**
+ * fsl_mc_bus_uevent - callback invoked when a device is added
+ */
+static int fsl_mc_bus_uevent(struct device *dev, struct kobj_uevent_env *env)
+{
+ struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev);
+
+ if (add_uevent_var(env, "MODALIAS=fsl-mc:v%08Xd%s",
+ mc_dev->obj_desc.vendor,
+ mc_dev->obj_desc.type))
+ return -ENOMEM;
+
+ return 0;
+}
+
+static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev);
+
+ return sprintf(buf, "fsl-mc:v%08Xd%s\n", mc_dev->obj_desc.vendor,
+ mc_dev->obj_desc.type);
+}
+static DEVICE_ATTR_RO(modalias);
+
+static struct attribute *fsl_mc_dev_attrs[] = {
+ &dev_attr_modalias.attr,
+ NULL,
+};
+
+ATTRIBUTE_GROUPS(fsl_mc_dev);
+
+struct bus_type fsl_mc_bus_type = {
+ .name = "fsl-mc",
+ .match = fsl_mc_bus_match,
+ .uevent = fsl_mc_bus_uevent,
+ .dev_groups = fsl_mc_dev_groups,
+};
+EXPORT_SYMBOL_GPL(fsl_mc_bus_type);
+
+struct device_type fsl_mc_bus_dprc_type = {
+ .name = "fsl_mc_bus_dprc"
+};
+
+struct device_type fsl_mc_bus_dpni_type = {
+ .name = "fsl_mc_bus_dpni"
+};
+
+struct device_type fsl_mc_bus_dpio_type = {
+ .name = "fsl_mc_bus_dpio"
+};
+
+struct device_type fsl_mc_bus_dpsw_type = {
+ .name = "fsl_mc_bus_dpsw"
+};
+
+struct device_type fsl_mc_bus_dpbp_type = {
+ .name = "fsl_mc_bus_dpbp"
+};
+
+struct device_type fsl_mc_bus_dpcon_type = {
+ .name = "fsl_mc_bus_dpcon"
+};
+
+struct device_type fsl_mc_bus_dpmcp_type = {
+ .name = "fsl_mc_bus_dpmcp"
+};
+
+struct device_type fsl_mc_bus_dpmac_type = {
+ .name = "fsl_mc_bus_dpmac"
+};
+
+struct device_type fsl_mc_bus_dprtc_type = {
+ .name = "fsl_mc_bus_dprtc"
+};
+
+static struct device_type *fsl_mc_get_device_type(const char *type)
+{
+ static const struct {
+ struct device_type *dev_type;
+ const char *type;
+ } dev_types[] = {
+ { &fsl_mc_bus_dprc_type, "dprc" },
+ { &fsl_mc_bus_dpni_type, "dpni" },
+ { &fsl_mc_bus_dpio_type, "dpio" },
+ { &fsl_mc_bus_dpsw_type, "dpsw" },
+ { &fsl_mc_bus_dpbp_type, "dpbp" },
+ { &fsl_mc_bus_dpcon_type, "dpcon" },
+ { &fsl_mc_bus_dpmcp_type, "dpmcp" },
+ { &fsl_mc_bus_dpmac_type, "dpmac" },
+ { &fsl_mc_bus_dprtc_type, "dprtc" },
+ { NULL, NULL }
+ };
+ int i;
+
+ for (i = 0; dev_types[i].dev_type; i++)
+ if (!strcmp(dev_types[i].type, type))
+ return dev_types[i].dev_type;
+
+ return NULL;
+}
+
+static int fsl_mc_driver_probe(struct device *dev)
+{
+ struct fsl_mc_driver *mc_drv;
+ struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev);
+ int error;
+
+ mc_drv = to_fsl_mc_driver(dev->driver);
+
+ error = mc_drv->probe(mc_dev);
+ if (error < 0) {
+ if (error != -EPROBE_DEFER)
+ dev_err(dev, "%s failed: %d\n", __func__, error);
+ return error;
+ }
+
+ return 0;
+}
+
+static int fsl_mc_driver_remove(struct device *dev)
+{
+ struct fsl_mc_driver *mc_drv = to_fsl_mc_driver(dev->driver);
+ struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev);
+ int error;
+
+ error = mc_drv->remove(mc_dev);
+ if (error < 0) {
+ dev_err(dev, "%s failed: %d\n", __func__, error);
+ return error;
+ }
+
+ return 0;
+}
+
+static void fsl_mc_driver_shutdown(struct device *dev)
+{
+ struct fsl_mc_driver *mc_drv = to_fsl_mc_driver(dev->driver);
+ struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev);
+
+ mc_drv->shutdown(mc_dev);
+}
+
+/**
+ * __fsl_mc_driver_register - registers a child device driver with the
+ * MC bus
+ *
+ * This function is implicitly invoked from the registration function of
+ * fsl_mc device drivers, which is generated by the
+ * module_fsl_mc_driver() macro.
+ */
+int __fsl_mc_driver_register(struct fsl_mc_driver *mc_driver,
+ struct module *owner)
+{
+ int error;
+
+ mc_driver->driver.owner = owner;
+ mc_driver->driver.bus = &fsl_mc_bus_type;
+
+ if (mc_driver->probe)
+ mc_driver->driver.probe = fsl_mc_driver_probe;
+
+ if (mc_driver->remove)
+ mc_driver->driver.remove = fsl_mc_driver_remove;
+
+ if (mc_driver->shutdown)
+ mc_driver->driver.shutdown = fsl_mc_driver_shutdown;
+
+ error = driver_register(&mc_driver->driver);
+ if (error < 0) {
+ pr_err("driver_register() failed for %s: %d\n",
+ mc_driver->driver.name, error);
+ return error;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(__fsl_mc_driver_register);
+
+/**
+ * fsl_mc_driver_unregister - unregisters a device driver from the
+ * MC bus
+ */
+void fsl_mc_driver_unregister(struct fsl_mc_driver *mc_driver)
+{
+ driver_unregister(&mc_driver->driver);
+}
+EXPORT_SYMBOL_GPL(fsl_mc_driver_unregister);
+
+/**
+ * mc_get_version() - Retrieves the Management Complex firmware
+ * version information
+ * @mc_io: Pointer to opaque I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @mc_ver_info: Returned version information structure
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+static int mc_get_version(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ struct mc_version *mc_ver_info)
+{
+ struct mc_command cmd = { 0 };
+ struct dpmng_rsp_get_version *rsp_params;
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPMNG_CMDID_GET_VERSION,
+ cmd_flags,
+ 0);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ rsp_params = (struct dpmng_rsp_get_version *)cmd.params;
+ mc_ver_info->revision = le32_to_cpu(rsp_params->revision);
+ mc_ver_info->major = le32_to_cpu(rsp_params->version_major);
+ mc_ver_info->minor = le32_to_cpu(rsp_params->version_minor);
+
+ return 0;
+}
+
+/**
+ * fsl_mc_get_root_dprc - function to traverse to the root dprc
+ */
+static void fsl_mc_get_root_dprc(struct device *dev,
+ struct device **root_dprc_dev)
+{
+ if (!dev) {
+ *root_dprc_dev = NULL;
+ } else if (!dev_is_fsl_mc(dev)) {
+ *root_dprc_dev = NULL;
+ } else {
+ *root_dprc_dev = dev;
+ while (dev_is_fsl_mc((*root_dprc_dev)->parent))
+ *root_dprc_dev = (*root_dprc_dev)->parent;
+ }
+}
+
+static int get_dprc_attr(struct fsl_mc_io *mc_io,
+ int container_id, struct dprc_attributes *attr)
+{
+ u16 dprc_handle;
+ int error;
+
+ error = dprc_open(mc_io, 0, container_id, &dprc_handle);
+ if (error < 0) {
+ dev_err(mc_io->dev, "dprc_open() failed: %d\n", error);
+ return error;
+ }
+
+ memset(attr, 0, sizeof(struct dprc_attributes));
+ error = dprc_get_attributes(mc_io, 0, dprc_handle, attr);
+ if (error < 0) {
+ dev_err(mc_io->dev, "dprc_get_attributes() failed: %d\n",
+ error);
+ goto common_cleanup;
+ }
+
+ error = 0;
+
+common_cleanup:
+ (void)dprc_close(mc_io, 0, dprc_handle);
+ return error;
+}
+
+static int get_dprc_icid(struct fsl_mc_io *mc_io,
+ int container_id, u16 *icid)
+{
+ struct dprc_attributes attr;
+ int error;
+
+ error = get_dprc_attr(mc_io, container_id, &attr);
+ if (error == 0)
+ *icid = attr.icid;
+
+ return error;
+}
+
+static int translate_mc_addr(struct fsl_mc_device *mc_dev,
+ enum dprc_region_type mc_region_type,
+ u64 mc_offset, phys_addr_t *phys_addr)
+{
+ int i;
+ struct device *root_dprc_dev;
+ struct fsl_mc *mc;
+
+ fsl_mc_get_root_dprc(&mc_dev->dev, &root_dprc_dev);
+ mc = dev_get_drvdata(root_dprc_dev->parent);
+
+ if (mc->num_translation_ranges == 0) {
+ /*
+ * Do identity mapping:
+ */
+ *phys_addr = mc_offset;
+ return 0;
+ }
+
+ for (i = 0; i < mc->num_translation_ranges; i++) {
+ struct fsl_mc_addr_translation_range *range =
+ &mc->translation_ranges[i];
+
+ if (mc_region_type == range->mc_region_type &&
+ mc_offset >= range->start_mc_offset &&
+ mc_offset < range->end_mc_offset) {
+ *phys_addr = range->start_phys_addr +
+ (mc_offset - range->start_mc_offset);
+ return 0;
+ }
+ }
+
+ return -EFAULT;
+}
+
+static int fsl_mc_device_get_mmio_regions(struct fsl_mc_device *mc_dev,
+ struct fsl_mc_device *mc_bus_dev)
+{
+ int i;
+ int error;
+ struct resource *regions;
+ struct fsl_mc_obj_desc *obj_desc = &mc_dev->obj_desc;
+ struct device *parent_dev = mc_dev->dev.parent;
+ enum dprc_region_type mc_region_type;
+
+ if (is_fsl_mc_bus_dprc(mc_dev) ||
+ is_fsl_mc_bus_dpmcp(mc_dev)) {
+ mc_region_type = DPRC_REGION_TYPE_MC_PORTAL;
+ } else if (is_fsl_mc_bus_dpio(mc_dev)) {
+ mc_region_type = DPRC_REGION_TYPE_QBMAN_PORTAL;
+ } else {
+ /*
+ * This function should not have been called for this MC object
+ * type, as this object type is not supposed to have MMIO
+ * regions
+ */
+ return -EINVAL;
+ }
+
+ regions = kmalloc_array(obj_desc->region_count,
+ sizeof(regions[0]), GFP_KERNEL);
+ if (!regions)
+ return -ENOMEM;
+
+ for (i = 0; i < obj_desc->region_count; i++) {
+ struct dprc_region_desc region_desc;
+
+ error = dprc_get_obj_region(mc_bus_dev->mc_io,
+ 0,
+ mc_bus_dev->mc_handle,
+ obj_desc->type,
+ obj_desc->id, i, &region_desc);
+ if (error < 0) {
+ dev_err(parent_dev,
+ "dprc_get_obj_region() failed: %d\n", error);
+ goto error_cleanup_regions;
+ }
+
+ error = translate_mc_addr(mc_dev, mc_region_type,
+ region_desc.base_offset,
+ &regions[i].start);
+ if (error < 0) {
+ dev_err(parent_dev,
+ "Invalid MC offset: %#x (for %s.%d\'s region %d)\n",
+ region_desc.base_offset,
+ obj_desc->type, obj_desc->id, i);
+ goto error_cleanup_regions;
+ }
+
+ regions[i].end = regions[i].start + region_desc.size - 1;
+ regions[i].name = "fsl-mc object MMIO region";
+ regions[i].flags = IORESOURCE_IO;
+ if (region_desc.flags & DPRC_REGION_CACHEABLE)
+ regions[i].flags |= IORESOURCE_CACHEABLE;
+ }
+
+ mc_dev->regions = regions;
+ return 0;
+
+error_cleanup_regions:
+ kfree(regions);
+ return error;
+}
+
+/**
+ * fsl_mc_is_root_dprc - function to check if a given device is a root dprc
+ */
+bool fsl_mc_is_root_dprc(struct device *dev)
+{
+ struct device *root_dprc_dev;
+
+ fsl_mc_get_root_dprc(dev, &root_dprc_dev);
+ if (!root_dprc_dev)
+ return false;
+ return dev == root_dprc_dev;
+}
+
+static void fsl_mc_device_release(struct device *dev)
+{
+ struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev);
+
+ kfree(mc_dev->regions);
+
+ if (is_fsl_mc_bus_dprc(mc_dev))
+ kfree(to_fsl_mc_bus(mc_dev));
+ else
+ kfree(mc_dev);
+}
+
+/**
+ * Add a newly discovered fsl-mc device to be visible in Linux
+ */
+int fsl_mc_device_add(struct fsl_mc_obj_desc *obj_desc,
+ struct fsl_mc_io *mc_io,
+ struct device *parent_dev,
+ struct fsl_mc_device **new_mc_dev)
+{
+ int error;
+ struct fsl_mc_device *mc_dev = NULL;
+ struct fsl_mc_bus *mc_bus = NULL;
+ struct fsl_mc_device *parent_mc_dev;
+
+ if (dev_is_fsl_mc(parent_dev))
+ parent_mc_dev = to_fsl_mc_device(parent_dev);
+ else
+ parent_mc_dev = NULL;
+
+ if (strcmp(obj_desc->type, "dprc") == 0) {
+ /*
+ * Allocate an MC bus device object:
+ */
+ mc_bus = kzalloc(sizeof(*mc_bus), GFP_KERNEL);
+ if (!mc_bus)
+ return -ENOMEM;
+
+ mc_dev = &mc_bus->mc_dev;
+ } else {
+ /*
+ * Allocate a regular fsl_mc_device object:
+ */
+ mc_dev = kzalloc(sizeof(*mc_dev), GFP_KERNEL);
+ if (!mc_dev)
+ return -ENOMEM;
+ }
+
+ mc_dev->obj_desc = *obj_desc;
+ mc_dev->mc_io = mc_io;
+ device_initialize(&mc_dev->dev);
+ mc_dev->dev.parent = parent_dev;
+ mc_dev->dev.bus = &fsl_mc_bus_type;
+ mc_dev->dev.release = fsl_mc_device_release;
+ mc_dev->dev.type = fsl_mc_get_device_type(obj_desc->type);
+ if (!mc_dev->dev.type) {
+ error = -ENODEV;
+ dev_err(parent_dev, "unknown device type %s\n", obj_desc->type);
+ goto error_cleanup_dev;
+ }
+ dev_set_name(&mc_dev->dev, "%s.%d", obj_desc->type, obj_desc->id);
+
+ if (strcmp(obj_desc->type, "dprc") == 0) {
+ struct fsl_mc_io *mc_io2;
+
+ mc_dev->flags |= FSL_MC_IS_DPRC;
+
+ /*
+ * To get the DPRC's ICID, we need to open the DPRC
+ * in get_dprc_icid(). For child DPRCs, we do so using the
+ * parent DPRC's MC portal instead of the child DPRC's MC
+ * portal, in case the child DPRC is already opened with
+ * its own portal (e.g., the DPRC used by AIOP).
+ *
+ * NOTE: There cannot be more than one active open for a
+ * given MC object, using the same MC portal.
+ */
+ if (parent_mc_dev) {
+ /*
+ * device being added is a child DPRC device
+ */
+ mc_io2 = parent_mc_dev->mc_io;
+ } else {
+ /*
+ * device being added is the root DPRC device
+ */
+ if (!mc_io) {
+ error = -EINVAL;
+ goto error_cleanup_dev;
+ }
+
+ mc_io2 = mc_io;
+ }
+
+ error = get_dprc_icid(mc_io2, obj_desc->id, &mc_dev->icid);
+ if (error < 0)
+ goto error_cleanup_dev;
+ } else {
+ /*
+ * A non-DPRC object has to be a child of a DPRC, use the
+ * parent's ICID and interrupt domain.
+ */
+ mc_dev->icid = parent_mc_dev->icid;
+ mc_dev->dma_mask = FSL_MC_DEFAULT_DMA_MASK;
+ mc_dev->dev.dma_mask = &mc_dev->dma_mask;
+ dev_set_msi_domain(&mc_dev->dev,
+ dev_get_msi_domain(&parent_mc_dev->dev));
+ }
+
+ /*
+ * Get MMIO regions for the device from the MC:
+ *
+ * NOTE: the root DPRC is a special case as its MMIO region is
+ * obtained from the device tree
+ */
+ if (parent_mc_dev && obj_desc->region_count != 0) {
+ error = fsl_mc_device_get_mmio_regions(mc_dev,
+ parent_mc_dev);
+ if (error < 0)
+ goto error_cleanup_dev;
+ }
+
+ /* Objects are coherent, unless 'no shareability' flag set. */
+ if (!(obj_desc->flags & FSL_MC_OBJ_FLAG_NO_MEM_SHAREABILITY))
+ arch_setup_dma_ops(&mc_dev->dev, 0, 0, NULL, true);
+
+ /*
+ * The device-specific probe callback will get invoked by device_add()
+ */
+ error = device_add(&mc_dev->dev);
+ if (error < 0) {
+ dev_err(parent_dev,
+ "device_add() failed for device %s: %d\n",
+ dev_name(&mc_dev->dev), error);
+ goto error_cleanup_dev;
+ }
+
+ dev_dbg(parent_dev, "added %s\n", dev_name(&mc_dev->dev));
+
+ *new_mc_dev = mc_dev;
+ return 0;
+
+error_cleanup_dev:
+ kfree(mc_dev->regions);
+ kfree(mc_bus);
+ kfree(mc_dev);
+
+ return error;
+}
+EXPORT_SYMBOL_GPL(fsl_mc_device_add);
+
+/**
+ * fsl_mc_device_remove - Remove an fsl-mc device from being visible to
+ * Linux
+ *
+ * @mc_dev: Pointer to an fsl-mc device
+ */
+void fsl_mc_device_remove(struct fsl_mc_device *mc_dev)
+{
+ /*
+ * The device-specific remove callback will get invoked by device_del()
+ */
+ device_del(&mc_dev->dev);
+ put_device(&mc_dev->dev);
+}
+EXPORT_SYMBOL_GPL(fsl_mc_device_remove);
+
+static int parse_mc_ranges(struct device *dev,
+ int *paddr_cells,
+ int *mc_addr_cells,
+ int *mc_size_cells,
+ const __be32 **ranges_start)
+{
+ const __be32 *prop;
+ int range_tuple_cell_count;
+ int ranges_len;
+ int tuple_len;
+ struct device_node *mc_node = dev->of_node;
+
+ *ranges_start = of_get_property(mc_node, "ranges", &ranges_len);
+ if (!(*ranges_start) || !ranges_len) {
+ dev_warn(dev,
+ "missing or empty ranges property for device tree node '%s'\n",
+ mc_node->name);
+ return 0;
+ }
+
+ *paddr_cells = of_n_addr_cells(mc_node);
+
+ prop = of_get_property(mc_node, "#address-cells", NULL);
+ if (prop)
+ *mc_addr_cells = be32_to_cpup(prop);
+ else
+ *mc_addr_cells = *paddr_cells;
+
+ prop = of_get_property(mc_node, "#size-cells", NULL);
+ if (prop)
+ *mc_size_cells = be32_to_cpup(prop);
+ else
+ *mc_size_cells = of_n_size_cells(mc_node);
+
+ range_tuple_cell_count = *paddr_cells + *mc_addr_cells +
+ *mc_size_cells;
+
+ tuple_len = range_tuple_cell_count * sizeof(__be32);
+ if (ranges_len % tuple_len != 0) {
+ dev_err(dev, "malformed ranges property '%s'\n", mc_node->name);
+ return -EINVAL;
+ }
+
+ return ranges_len / tuple_len;
+}
+
+static int get_mc_addr_translation_ranges(struct device *dev,
+ struct fsl_mc_addr_translation_range
+ **ranges,
+ u8 *num_ranges)
+{
+ int ret;
+ int paddr_cells;
+ int mc_addr_cells;
+ int mc_size_cells;
+ int i;
+ const __be32 *ranges_start;
+ const __be32 *cell;
+
+ ret = parse_mc_ranges(dev,
+ &paddr_cells,
+ &mc_addr_cells,
+ &mc_size_cells,
+ &ranges_start);
+ if (ret < 0)
+ return ret;
+
+ *num_ranges = ret;
+ if (!ret) {
+ /*
+ * Missing or empty ranges property ("ranges;") for the
+ * 'fsl,qoriq-mc' node. In this case, identity mapping
+ * will be used.
+ */
+ *ranges = NULL;
+ return 0;
+ }
+
+ *ranges = devm_kcalloc(dev, *num_ranges,
+ sizeof(struct fsl_mc_addr_translation_range),
+ GFP_KERNEL);
+ if (!(*ranges))
+ return -ENOMEM;
+
+ cell = ranges_start;
+ for (i = 0; i < *num_ranges; ++i) {
+ struct fsl_mc_addr_translation_range *range = &(*ranges)[i];
+
+ range->mc_region_type = of_read_number(cell, 1);
+ range->start_mc_offset = of_read_number(cell + 1,
+ mc_addr_cells - 1);
+ cell += mc_addr_cells;
+ range->start_phys_addr = of_read_number(cell, paddr_cells);
+ cell += paddr_cells;
+ range->end_mc_offset = range->start_mc_offset +
+ of_read_number(cell, mc_size_cells);
+
+ cell += mc_size_cells;
+ }
+
+ return 0;
+}
+
+/**
+ * fsl_mc_bus_probe - callback invoked when the root MC bus is being
+ * added
+ */
+static int fsl_mc_bus_probe(struct platform_device *pdev)
+{
+ struct fsl_mc_obj_desc obj_desc;
+ int error;
+ struct fsl_mc *mc;
+ struct fsl_mc_device *mc_bus_dev = NULL;
+ struct fsl_mc_io *mc_io = NULL;
+ int container_id;
+ phys_addr_t mc_portal_phys_addr;
+ u32 mc_portal_size;
+ struct mc_version mc_version;
+ struct resource res;
+
+ mc = devm_kzalloc(&pdev->dev, sizeof(*mc), GFP_KERNEL);
+ if (!mc)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, mc);
+
+ /*
+ * Get physical address of MC portal for the root DPRC:
+ */
+ error = of_address_to_resource(pdev->dev.of_node, 0, &res);
+ if (error < 0) {
+ dev_err(&pdev->dev,
+ "of_address_to_resource() failed for %pOF\n",
+ pdev->dev.of_node);
+ return error;
+ }
+
+ mc_portal_phys_addr = res.start;
+ mc_portal_size = resource_size(&res);
+ error = fsl_create_mc_io(&pdev->dev, mc_portal_phys_addr,
+ mc_portal_size, NULL,
+ FSL_MC_IO_ATOMIC_CONTEXT_PORTAL, &mc_io);
+ if (error < 0)
+ return error;
+
+ error = mc_get_version(mc_io, 0, &mc_version);
+ if (error != 0) {
+ dev_err(&pdev->dev,
+ "mc_get_version() failed with error %d\n", error);
+ goto error_cleanup_mc_io;
+ }
+
+ dev_info(&pdev->dev, "MC firmware version: %u.%u.%u\n",
+ mc_version.major, mc_version.minor, mc_version.revision);
+
+ error = get_mc_addr_translation_ranges(&pdev->dev,
+ &mc->translation_ranges,
+ &mc->num_translation_ranges);
+ if (error < 0)
+ goto error_cleanup_mc_io;
+
+ error = dprc_get_container_id(mc_io, 0, &container_id);
+ if (error < 0) {
+ dev_err(&pdev->dev,
+ "dprc_get_container_id() failed: %d\n", error);
+ goto error_cleanup_mc_io;
+ }
+
+ memset(&obj_desc, 0, sizeof(struct fsl_mc_obj_desc));
+ error = dprc_get_api_version(mc_io, 0,
+ &obj_desc.ver_major,
+ &obj_desc.ver_minor);
+ if (error < 0)
+ goto error_cleanup_mc_io;
+
+ obj_desc.vendor = FSL_MC_VENDOR_FREESCALE;
+ strcpy(obj_desc.type, "dprc");
+ obj_desc.id = container_id;
+ obj_desc.irq_count = 1;
+ obj_desc.region_count = 0;
+
+ error = fsl_mc_device_add(&obj_desc, mc_io, &pdev->dev, &mc_bus_dev);
+ if (error < 0)
+ goto error_cleanup_mc_io;
+
+ mc->root_mc_bus_dev = mc_bus_dev;
+ return 0;
+
+error_cleanup_mc_io:
+ fsl_destroy_mc_io(mc_io);
+ return error;
+}
+
+/**
+ * fsl_mc_bus_remove - callback invoked when the root MC bus is being
+ * removed
+ */
+static int fsl_mc_bus_remove(struct platform_device *pdev)
+{
+ struct fsl_mc *mc = platform_get_drvdata(pdev);
+
+ if (!fsl_mc_is_root_dprc(&mc->root_mc_bus_dev->dev))
+ return -EINVAL;
+
+ fsl_mc_device_remove(mc->root_mc_bus_dev);
+
+ fsl_destroy_mc_io(mc->root_mc_bus_dev->mc_io);
+ mc->root_mc_bus_dev->mc_io = NULL;
+
+ return 0;
+}
+
+static const struct of_device_id fsl_mc_bus_match_table[] = {
+ {.compatible = "fsl,qoriq-mc",},
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, fsl_mc_bus_match_table);
+
+static struct platform_driver fsl_mc_bus_driver = {
+ .driver = {
+ .name = "fsl_mc_bus",
+ .pm = NULL,
+ .of_match_table = fsl_mc_bus_match_table,
+ },
+ .probe = fsl_mc_bus_probe,
+ .remove = fsl_mc_bus_remove,
+};
+
+static int __init fsl_mc_bus_driver_init(void)
+{
+ int error;
+
+ error = bus_register(&fsl_mc_bus_type);
+ if (error < 0) {
+ pr_err("bus type registration failed: %d\n", error);
+ goto error_cleanup_cache;
+ }
+
+ error = platform_driver_register(&fsl_mc_bus_driver);
+ if (error < 0) {
+ pr_err("platform_driver_register() failed: %d\n", error);
+ goto error_cleanup_bus;
+ }
+
+ error = dprc_driver_init();
+ if (error < 0)
+ goto error_cleanup_driver;
+
+ error = fsl_mc_allocator_driver_init();
+ if (error < 0)
+ goto error_cleanup_dprc_driver;
+
+ return 0;
+
+error_cleanup_dprc_driver:
+ dprc_driver_exit();
+
+error_cleanup_driver:
+ platform_driver_unregister(&fsl_mc_bus_driver);
+
+error_cleanup_bus:
+ bus_unregister(&fsl_mc_bus_type);
+
+error_cleanup_cache:
+ return error;
+}
+postcore_initcall(fsl_mc_bus_driver_init);
diff --git a/drivers/bus/fsl-mc/fsl-mc-msi.c b/drivers/bus/fsl-mc/fsl-mc-msi.c
new file mode 100644
index 000000000000..ec35e255b496
--- /dev/null
+++ b/drivers/bus/fsl-mc/fsl-mc-msi.c
@@ -0,0 +1,285 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Freescale Management Complex (MC) bus driver MSI support
+ *
+ * Copyright (C) 2015-2016 Freescale Semiconductor, Inc.
+ * Author: German Rivera <German.Rivera@freescale.com>
+ *
+ */
+
+#include <linux/of_device.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/msi.h>
+
+#include "fsl-mc-private.h"
+
+#ifdef GENERIC_MSI_DOMAIN_OPS
+/*
+ * Generate a unique ID identifying the interrupt (only used within the MSI
+ * irqdomain. Combine the icid with the interrupt index.
+ */
+static irq_hw_number_t fsl_mc_domain_calc_hwirq(struct fsl_mc_device *dev,
+ struct msi_desc *desc)
+{
+ /*
+ * Make the base hwirq value for ICID*10000 so it is readable
+ * as a decimal value in /proc/interrupts.
+ */
+ return (irq_hw_number_t)(desc->fsl_mc.msi_index + (dev->icid * 10000));
+}
+
+static void fsl_mc_msi_set_desc(msi_alloc_info_t *arg,
+ struct msi_desc *desc)
+{
+ arg->desc = desc;
+ arg->hwirq = fsl_mc_domain_calc_hwirq(to_fsl_mc_device(desc->dev),
+ desc);
+}
+#else
+#define fsl_mc_msi_set_desc NULL
+#endif
+
+static void fsl_mc_msi_update_dom_ops(struct msi_domain_info *info)
+{
+ struct msi_domain_ops *ops = info->ops;
+
+ if (!ops)
+ return;
+
+ /*
+ * set_desc should not be set by the caller
+ */
+ if (!ops->set_desc)
+ ops->set_desc = fsl_mc_msi_set_desc;
+}
+
+static void __fsl_mc_msi_write_msg(struct fsl_mc_device *mc_bus_dev,
+ struct fsl_mc_device_irq *mc_dev_irq)
+{
+ int error;
+ struct fsl_mc_device *owner_mc_dev = mc_dev_irq->mc_dev;
+ struct msi_desc *msi_desc = mc_dev_irq->msi_desc;
+ struct dprc_irq_cfg irq_cfg;
+
+ /*
+ * msi_desc->msg.address is 0x0 when this function is invoked in
+ * the free_irq() code path. In this case, for the MC, we don't
+ * really need to "unprogram" the MSI, so we just return.
+ */
+ if (msi_desc->msg.address_lo == 0x0 && msi_desc->msg.address_hi == 0x0)
+ return;
+
+ if (!owner_mc_dev)
+ return;
+
+ irq_cfg.paddr = ((u64)msi_desc->msg.address_hi << 32) |
+ msi_desc->msg.address_lo;
+ irq_cfg.val = msi_desc->msg.data;
+ irq_cfg.irq_num = msi_desc->irq;
+
+ if (owner_mc_dev == mc_bus_dev) {
+ /*
+ * IRQ is for the mc_bus_dev's DPRC itself
+ */
+ error = dprc_set_irq(mc_bus_dev->mc_io,
+ MC_CMD_FLAG_INTR_DIS | MC_CMD_FLAG_PRI,
+ mc_bus_dev->mc_handle,
+ mc_dev_irq->dev_irq_index,
+ &irq_cfg);
+ if (error < 0) {
+ dev_err(&owner_mc_dev->dev,
+ "dprc_set_irq() failed: %d\n", error);
+ }
+ } else {
+ /*
+ * IRQ is for for a child device of mc_bus_dev
+ */
+ error = dprc_set_obj_irq(mc_bus_dev->mc_io,
+ MC_CMD_FLAG_INTR_DIS | MC_CMD_FLAG_PRI,
+ mc_bus_dev->mc_handle,
+ owner_mc_dev->obj_desc.type,
+ owner_mc_dev->obj_desc.id,
+ mc_dev_irq->dev_irq_index,
+ &irq_cfg);
+ if (error < 0) {
+ dev_err(&owner_mc_dev->dev,
+ "dprc_obj_set_irq() failed: %d\n", error);
+ }
+ }
+}
+
+/*
+ * NOTE: This function is invoked with interrupts disabled
+ */
+static void fsl_mc_msi_write_msg(struct irq_data *irq_data,
+ struct msi_msg *msg)
+{
+ struct msi_desc *msi_desc = irq_data_get_msi_desc(irq_data);
+ struct fsl_mc_device *mc_bus_dev = to_fsl_mc_device(msi_desc->dev);
+ struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_bus_dev);
+ struct fsl_mc_device_irq *mc_dev_irq =
+ &mc_bus->irq_resources[msi_desc->fsl_mc.msi_index];
+
+ msi_desc->msg = *msg;
+
+ /*
+ * Program the MSI (paddr, value) pair in the device:
+ */
+ __fsl_mc_msi_write_msg(mc_bus_dev, mc_dev_irq);
+}
+
+static void fsl_mc_msi_update_chip_ops(struct msi_domain_info *info)
+{
+ struct irq_chip *chip = info->chip;
+
+ if (!chip)
+ return;
+
+ /*
+ * irq_write_msi_msg should not be set by the caller
+ */
+ if (!chip->irq_write_msi_msg)
+ chip->irq_write_msi_msg = fsl_mc_msi_write_msg;
+}
+
+/**
+ * fsl_mc_msi_create_irq_domain - Create a fsl-mc MSI interrupt domain
+ * @np: Optional device-tree node of the interrupt controller
+ * @info: MSI domain info
+ * @parent: Parent irq domain
+ *
+ * Updates the domain and chip ops and creates a fsl-mc MSI
+ * interrupt domain.
+ *
+ * Returns:
+ * A domain pointer or NULL in case of failure.
+ */
+struct irq_domain *fsl_mc_msi_create_irq_domain(struct fwnode_handle *fwnode,
+ struct msi_domain_info *info,
+ struct irq_domain *parent)
+{
+ struct irq_domain *domain;
+
+ if (info->flags & MSI_FLAG_USE_DEF_DOM_OPS)
+ fsl_mc_msi_update_dom_ops(info);
+ if (info->flags & MSI_FLAG_USE_DEF_CHIP_OPS)
+ fsl_mc_msi_update_chip_ops(info);
+
+ domain = msi_create_irq_domain(fwnode, info, parent);
+ if (domain)
+ irq_domain_update_bus_token(domain, DOMAIN_BUS_FSL_MC_MSI);
+
+ return domain;
+}
+
+int fsl_mc_find_msi_domain(struct device *mc_platform_dev,
+ struct irq_domain **mc_msi_domain)
+{
+ struct irq_domain *msi_domain;
+ struct device_node *mc_of_node = mc_platform_dev->of_node;
+
+ msi_domain = of_msi_get_domain(mc_platform_dev, mc_of_node,
+ DOMAIN_BUS_FSL_MC_MSI);
+ if (!msi_domain) {
+ pr_err("Unable to find fsl-mc MSI domain for %pOF\n",
+ mc_of_node);
+
+ return -ENOENT;
+ }
+
+ *mc_msi_domain = msi_domain;
+ return 0;
+}
+
+static void fsl_mc_msi_free_descs(struct device *dev)
+{
+ struct msi_desc *desc, *tmp;
+
+ list_for_each_entry_safe(desc, tmp, dev_to_msi_list(dev), list) {
+ list_del(&desc->list);
+ free_msi_entry(desc);
+ }
+}
+
+static int fsl_mc_msi_alloc_descs(struct device *dev, unsigned int irq_count)
+
+{
+ unsigned int i;
+ int error;
+ struct msi_desc *msi_desc;
+
+ for (i = 0; i < irq_count; i++) {
+ msi_desc = alloc_msi_entry(dev, 1, NULL);
+ if (!msi_desc) {
+ dev_err(dev, "Failed to allocate msi entry\n");
+ error = -ENOMEM;
+ goto cleanup_msi_descs;
+ }
+
+ msi_desc->fsl_mc.msi_index = i;
+ INIT_LIST_HEAD(&msi_desc->list);
+ list_add_tail(&msi_desc->list, dev_to_msi_list(dev));
+ }
+
+ return 0;
+
+cleanup_msi_descs:
+ fsl_mc_msi_free_descs(dev);
+ return error;
+}
+
+int fsl_mc_msi_domain_alloc_irqs(struct device *dev,
+ unsigned int irq_count)
+{
+ struct irq_domain *msi_domain;
+ int error;
+
+ if (!list_empty(dev_to_msi_list(dev)))
+ return -EINVAL;
+
+ error = fsl_mc_msi_alloc_descs(dev, irq_count);
+ if (error < 0)
+ return error;
+
+ msi_domain = dev_get_msi_domain(dev);
+ if (!msi_domain) {
+ error = -EINVAL;
+ goto cleanup_msi_descs;
+ }
+
+ /*
+ * NOTE: Calling this function will trigger the invocation of the
+ * its_fsl_mc_msi_prepare() callback
+ */
+ error = msi_domain_alloc_irqs(msi_domain, dev, irq_count);
+
+ if (error) {
+ dev_err(dev, "Failed to allocate IRQs\n");
+ goto cleanup_msi_descs;
+ }
+
+ return 0;
+
+cleanup_msi_descs:
+ fsl_mc_msi_free_descs(dev);
+ return error;
+}
+
+void fsl_mc_msi_domain_free_irqs(struct device *dev)
+{
+ struct irq_domain *msi_domain;
+
+ msi_domain = dev_get_msi_domain(dev);
+ if (!msi_domain)
+ return;
+
+ msi_domain_free_irqs(msi_domain, dev);
+
+ if (list_empty(dev_to_msi_list(dev)))
+ return;
+
+ fsl_mc_msi_free_descs(dev);
+}
diff --git a/drivers/bus/fsl-mc/fsl-mc-private.h b/drivers/bus/fsl-mc/fsl-mc-private.h
new file mode 100644
index 000000000000..bed990c517f3
--- /dev/null
+++ b/drivers/bus/fsl-mc/fsl-mc-private.h
@@ -0,0 +1,475 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Freescale Management Complex (MC) bus private declarations
+ *
+ * Copyright (C) 2016 Freescale Semiconductor, Inc.
+ *
+ */
+#ifndef _FSL_MC_PRIVATE_H_
+#define _FSL_MC_PRIVATE_H_
+
+#include <linux/fsl/mc.h>
+#include <linux/mutex.h>
+
+/*
+ * Data Path Management Complex (DPMNG) General API
+ */
+
+/* DPMNG command versioning */
+#define DPMNG_CMD_BASE_VERSION 1
+#define DPMNG_CMD_ID_OFFSET 4
+
+#define DPMNG_CMD(id) (((id) << DPMNG_CMD_ID_OFFSET) | DPMNG_CMD_BASE_VERSION)
+
+/* DPMNG command IDs */
+#define DPMNG_CMDID_GET_VERSION DPMNG_CMD(0x831)
+
+struct dpmng_rsp_get_version {
+ __le32 revision;
+ __le32 version_major;
+ __le32 version_minor;
+};
+
+/*
+ * Data Path Management Command Portal (DPMCP) API
+ */
+
+/* Minimal supported DPMCP Version */
+#define DPMCP_MIN_VER_MAJOR 3
+#define DPMCP_MIN_VER_MINOR 0
+
+/* DPMCP command versioning */
+#define DPMCP_CMD_BASE_VERSION 1
+#define DPMCP_CMD_ID_OFFSET 4
+
+#define DPMCP_CMD(id) (((id) << DPMCP_CMD_ID_OFFSET) | DPMCP_CMD_BASE_VERSION)
+
+/* DPMCP command IDs */
+#define DPMCP_CMDID_CLOSE DPMCP_CMD(0x800)
+#define DPMCP_CMDID_OPEN DPMCP_CMD(0x80b)
+#define DPMCP_CMDID_RESET DPMCP_CMD(0x005)
+
+struct dpmcp_cmd_open {
+ __le32 dpmcp_id;
+};
+
+/*
+ * Initialization and runtime control APIs for DPMCP
+ */
+int dpmcp_open(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ int dpmcp_id,
+ u16 *token);
+
+int dpmcp_close(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token);
+
+int dpmcp_reset(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token);
+
+/*
+ * Data Path Resource Container (DPRC) API
+ */
+
+/* Minimal supported DPRC Version */
+#define DPRC_MIN_VER_MAJOR 6
+#define DPRC_MIN_VER_MINOR 0
+
+/* DPRC command versioning */
+#define DPRC_CMD_BASE_VERSION 1
+#define DPRC_CMD_ID_OFFSET 4
+
+#define DPRC_CMD(id) (((id) << DPRC_CMD_ID_OFFSET) | DPRC_CMD_BASE_VERSION)
+
+/* DPRC command IDs */
+#define DPRC_CMDID_CLOSE DPRC_CMD(0x800)
+#define DPRC_CMDID_OPEN DPRC_CMD(0x805)
+#define DPRC_CMDID_GET_API_VERSION DPRC_CMD(0xa05)
+
+#define DPRC_CMDID_GET_ATTR DPRC_CMD(0x004)
+
+#define DPRC_CMDID_SET_IRQ DPRC_CMD(0x010)
+#define DPRC_CMDID_SET_IRQ_ENABLE DPRC_CMD(0x012)
+#define DPRC_CMDID_SET_IRQ_MASK DPRC_CMD(0x014)
+#define DPRC_CMDID_GET_IRQ_STATUS DPRC_CMD(0x016)
+#define DPRC_CMDID_CLEAR_IRQ_STATUS DPRC_CMD(0x017)
+
+#define DPRC_CMDID_GET_CONT_ID DPRC_CMD(0x830)
+#define DPRC_CMDID_GET_OBJ_COUNT DPRC_CMD(0x159)
+#define DPRC_CMDID_GET_OBJ DPRC_CMD(0x15A)
+#define DPRC_CMDID_GET_OBJ_REG DPRC_CMD(0x15E)
+#define DPRC_CMDID_SET_OBJ_IRQ DPRC_CMD(0x15F)
+
+struct dprc_cmd_open {
+ __le32 container_id;
+};
+
+struct dprc_cmd_set_irq {
+ /* cmd word 0 */
+ __le32 irq_val;
+ u8 irq_index;
+ u8 pad[3];
+ /* cmd word 1 */
+ __le64 irq_addr;
+ /* cmd word 2 */
+ __le32 irq_num;
+};
+
+#define DPRC_ENABLE 0x1
+
+struct dprc_cmd_set_irq_enable {
+ u8 enable;
+ u8 pad[3];
+ u8 irq_index;
+};
+
+struct dprc_cmd_set_irq_mask {
+ __le32 mask;
+ u8 irq_index;
+};
+
+struct dprc_cmd_get_irq_status {
+ __le32 status;
+ u8 irq_index;
+};
+
+struct dprc_rsp_get_irq_status {
+ __le32 status;
+};
+
+struct dprc_cmd_clear_irq_status {
+ __le32 status;
+ u8 irq_index;
+};
+
+struct dprc_rsp_get_attributes {
+ /* response word 0 */
+ __le32 container_id;
+ __le16 icid;
+ __le16 pad;
+ /* response word 1 */
+ __le32 options;
+ __le32 portal_id;
+};
+
+struct dprc_rsp_get_obj_count {
+ __le32 pad;
+ __le32 obj_count;
+};
+
+struct dprc_cmd_get_obj {
+ __le32 obj_index;
+};
+
+struct dprc_rsp_get_obj {
+ /* response word 0 */
+ __le32 pad0;
+ __le32 id;
+ /* response word 1 */
+ __le16 vendor;
+ u8 irq_count;
+ u8 region_count;
+ __le32 state;
+ /* response word 2 */
+ __le16 version_major;
+ __le16 version_minor;
+ __le16 flags;
+ __le16 pad1;
+ /* response word 3-4 */
+ u8 type[16];
+ /* response word 5-6 */
+ u8 label[16];
+};
+
+struct dprc_cmd_get_obj_region {
+ /* cmd word 0 */
+ __le32 obj_id;
+ __le16 pad0;
+ u8 region_index;
+ u8 pad1;
+ /* cmd word 1-2 */
+ __le64 pad2[2];
+ /* cmd word 3-4 */
+ u8 obj_type[16];
+};
+
+struct dprc_rsp_get_obj_region {
+ /* response word 0 */
+ __le64 pad;
+ /* response word 1 */
+ __le64 base_addr;
+ /* response word 2 */
+ __le32 size;
+};
+
+struct dprc_cmd_set_obj_irq {
+ /* cmd word 0 */
+ __le32 irq_val;
+ u8 irq_index;
+ u8 pad[3];
+ /* cmd word 1 */
+ __le64 irq_addr;
+ /* cmd word 2 */
+ __le32 irq_num;
+ __le32 obj_id;
+ /* cmd word 3-4 */
+ u8 obj_type[16];
+};
+
+/*
+ * DPRC API for managing and querying DPAA resources
+ */
+int dprc_open(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ int container_id,
+ u16 *token);
+
+int dprc_close(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token);
+
+/* DPRC IRQ events */
+
+/* IRQ event - Indicates that a new object added to the container */
+#define DPRC_IRQ_EVENT_OBJ_ADDED 0x00000001
+/* IRQ event - Indicates that an object was removed from the container */
+#define DPRC_IRQ_EVENT_OBJ_REMOVED 0x00000002
+/*
+ * IRQ event - Indicates that one of the descendant containers that opened by
+ * this container is destroyed
+ */
+#define DPRC_IRQ_EVENT_CONTAINER_DESTROYED 0x00000010
+
+/*
+ * IRQ event - Indicates that on one of the container's opened object is
+ * destroyed
+ */
+#define DPRC_IRQ_EVENT_OBJ_DESTROYED 0x00000020
+
+/* Irq event - Indicates that object is created at the container */
+#define DPRC_IRQ_EVENT_OBJ_CREATED 0x00000040
+
+/**
+ * struct dprc_irq_cfg - IRQ configuration
+ * @paddr: Address that must be written to signal a message-based interrupt
+ * @val: Value to write into irq_addr address
+ * @irq_num: A user defined number associated with this IRQ
+ */
+struct dprc_irq_cfg {
+ phys_addr_t paddr;
+ u32 val;
+ int irq_num;
+};
+
+int dprc_set_irq(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 irq_index,
+ struct dprc_irq_cfg *irq_cfg);
+
+int dprc_set_irq_enable(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 irq_index,
+ u8 en);
+
+int dprc_set_irq_mask(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 irq_index,
+ u32 mask);
+
+int dprc_get_irq_status(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 irq_index,
+ u32 *status);
+
+int dprc_clear_irq_status(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 irq_index,
+ u32 status);
+
+/**
+ * struct dprc_attributes - Container attributes
+ * @container_id: Container's ID
+ * @icid: Container's ICID
+ * @portal_id: Container's portal ID
+ * @options: Container's options as set at container's creation
+ */
+struct dprc_attributes {
+ int container_id;
+ u16 icid;
+ int portal_id;
+ u64 options;
+};
+
+int dprc_get_attributes(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ struct dprc_attributes *attributes);
+
+int dprc_get_obj_count(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ int *obj_count);
+
+int dprc_get_obj(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ int obj_index,
+ struct fsl_mc_obj_desc *obj_desc);
+
+int dprc_set_obj_irq(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ char *obj_type,
+ int obj_id,
+ u8 irq_index,
+ struct dprc_irq_cfg *irq_cfg);
+
+/* Region flags */
+/* Cacheable - Indicates that region should be mapped as cacheable */
+#define DPRC_REGION_CACHEABLE 0x00000001
+
+/**
+ * enum dprc_region_type - Region type
+ * @DPRC_REGION_TYPE_MC_PORTAL: MC portal region
+ * @DPRC_REGION_TYPE_QBMAN_PORTAL: Qbman portal region
+ */
+enum dprc_region_type {
+ DPRC_REGION_TYPE_MC_PORTAL,
+ DPRC_REGION_TYPE_QBMAN_PORTAL
+};
+
+/**
+ * struct dprc_region_desc - Mappable region descriptor
+ * @base_offset: Region offset from region's base address.
+ * For DPMCP and DPRC objects, region base is offset from SoC MC portals
+ * base address; For DPIO, region base is offset from SoC QMan portals
+ * base address
+ * @size: Region size (in bytes)
+ * @flags: Region attributes
+ * @type: Portal region type
+ */
+struct dprc_region_desc {
+ u32 base_offset;
+ u32 size;
+ u32 flags;
+ enum dprc_region_type type;
+};
+
+int dprc_get_obj_region(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ char *obj_type,
+ int obj_id,
+ u8 region_index,
+ struct dprc_region_desc *region_desc);
+
+int dprc_get_api_version(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 *major_ver,
+ u16 *minor_ver);
+
+int dprc_get_container_id(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ int *container_id);
+
+/**
+ * Maximum number of total IRQs that can be pre-allocated for an MC bus'
+ * IRQ pool
+ */
+#define FSL_MC_IRQ_POOL_MAX_TOTAL_IRQS 256
+
+/**
+ * struct fsl_mc_resource_pool - Pool of MC resources of a given
+ * type
+ * @type: type of resources in the pool
+ * @max_count: maximum number of resources in the pool
+ * @free_count: number of free resources in the pool
+ * @mutex: mutex to serialize access to the pool's free list
+ * @free_list: anchor node of list of free resources in the pool
+ * @mc_bus: pointer to the MC bus that owns this resource pool
+ */
+struct fsl_mc_resource_pool {
+ enum fsl_mc_pool_type type;
+ int max_count;
+ int free_count;
+ struct mutex mutex; /* serializes access to free_list */
+ struct list_head free_list;
+ struct fsl_mc_bus *mc_bus;
+};
+
+/**
+ * struct fsl_mc_bus - logical bus that corresponds to a physical DPRC
+ * @mc_dev: fsl-mc device for the bus device itself.
+ * @resource_pools: array of resource pools (one pool per resource type)
+ * for this MC bus. These resources represent allocatable entities
+ * from the physical DPRC.
+ * @irq_resources: Pointer to array of IRQ objects for the IRQ pool
+ * @scan_mutex: Serializes bus scanning
+ * @dprc_attr: DPRC attributes
+ */
+struct fsl_mc_bus {
+ struct fsl_mc_device mc_dev;
+ struct fsl_mc_resource_pool resource_pools[FSL_MC_NUM_POOL_TYPES];
+ struct fsl_mc_device_irq *irq_resources;
+ struct mutex scan_mutex; /* serializes bus scanning */
+ struct dprc_attributes dprc_attr;
+};
+
+#define to_fsl_mc_bus(_mc_dev) \
+ container_of(_mc_dev, struct fsl_mc_bus, mc_dev)
+
+int __must_check fsl_mc_device_add(struct fsl_mc_obj_desc *obj_desc,
+ struct fsl_mc_io *mc_io,
+ struct device *parent_dev,
+ struct fsl_mc_device **new_mc_dev);
+
+void fsl_mc_device_remove(struct fsl_mc_device *mc_dev);
+
+int __init dprc_driver_init(void);
+
+void dprc_driver_exit(void);
+
+int __init fsl_mc_allocator_driver_init(void);
+
+void fsl_mc_init_all_resource_pools(struct fsl_mc_device *mc_bus_dev);
+
+void fsl_mc_cleanup_all_resource_pools(struct fsl_mc_device *mc_bus_dev);
+
+int __must_check fsl_mc_resource_allocate(struct fsl_mc_bus *mc_bus,
+ enum fsl_mc_pool_type pool_type,
+ struct fsl_mc_resource
+ **new_resource);
+
+void fsl_mc_resource_free(struct fsl_mc_resource *resource);
+
+int fsl_mc_msi_domain_alloc_irqs(struct device *dev,
+ unsigned int irq_count);
+
+void fsl_mc_msi_domain_free_irqs(struct device *dev);
+
+int fsl_mc_find_msi_domain(struct device *mc_platform_dev,
+ struct irq_domain **mc_msi_domain);
+
+int fsl_mc_populate_irq_pool(struct fsl_mc_bus *mc_bus,
+ unsigned int irq_count);
+
+void fsl_mc_cleanup_irq_pool(struct fsl_mc_bus *mc_bus);
+
+int __must_check fsl_create_mc_io(struct device *dev,
+ phys_addr_t mc_portal_phys_addr,
+ u32 mc_portal_size,
+ struct fsl_mc_device *dpmcp_dev,
+ u32 flags, struct fsl_mc_io **new_mc_io);
+
+void fsl_destroy_mc_io(struct fsl_mc_io *mc_io);
+
+bool fsl_mc_is_root_dprc(struct device *dev);
+
+#endif /* _FSL_MC_PRIVATE_H_ */
diff --git a/drivers/bus/fsl-mc/mc-io.c b/drivers/bus/fsl-mc/mc-io.c
new file mode 100644
index 000000000000..7226cfc49b6f
--- /dev/null
+++ b/drivers/bus/fsl-mc/mc-io.c
@@ -0,0 +1,268 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/*
+ * Copyright 2013-2016 Freescale Semiconductor Inc.
+ *
+ */
+
+#include <linux/io.h>
+#include <linux/fsl/mc.h>
+
+#include "fsl-mc-private.h"
+
+static int fsl_mc_io_set_dpmcp(struct fsl_mc_io *mc_io,
+ struct fsl_mc_device *dpmcp_dev)
+{
+ int error;
+
+ if (mc_io->dpmcp_dev)
+ return -EINVAL;
+
+ if (dpmcp_dev->mc_io)
+ return -EINVAL;
+
+ error = dpmcp_open(mc_io,
+ 0,
+ dpmcp_dev->obj_desc.id,
+ &dpmcp_dev->mc_handle);
+ if (error < 0)
+ return error;
+
+ mc_io->dpmcp_dev = dpmcp_dev;
+ dpmcp_dev->mc_io = mc_io;
+ return 0;
+}
+
+static void fsl_mc_io_unset_dpmcp(struct fsl_mc_io *mc_io)
+{
+ int error;
+ struct fsl_mc_device *dpmcp_dev = mc_io->dpmcp_dev;
+
+ error = dpmcp_close(mc_io,
+ 0,
+ dpmcp_dev->mc_handle);
+ if (error < 0) {
+ dev_err(&dpmcp_dev->dev, "dpmcp_close() failed: %d\n",
+ error);
+ }
+
+ mc_io->dpmcp_dev = NULL;
+ dpmcp_dev->mc_io = NULL;
+}
+
+/**
+ * Creates an MC I/O object
+ *
+ * @dev: device to be associated with the MC I/O object
+ * @mc_portal_phys_addr: physical address of the MC portal to use
+ * @mc_portal_size: size in bytes of the MC portal
+ * @dpmcp-dev: Pointer to the DPMCP object associated with this MC I/O
+ * object or NULL if none.
+ * @flags: flags for the new MC I/O object
+ * @new_mc_io: Area to return pointer to newly created MC I/O object
+ *
+ * Returns '0' on Success; Error code otherwise.
+ */
+int __must_check fsl_create_mc_io(struct device *dev,
+ phys_addr_t mc_portal_phys_addr,
+ u32 mc_portal_size,
+ struct fsl_mc_device *dpmcp_dev,
+ u32 flags, struct fsl_mc_io **new_mc_io)
+{
+ int error;
+ struct fsl_mc_io *mc_io;
+ void __iomem *mc_portal_virt_addr;
+ struct resource *res;
+
+ mc_io = devm_kzalloc(dev, sizeof(*mc_io), GFP_KERNEL);
+ if (!mc_io)
+ return -ENOMEM;
+
+ mc_io->dev = dev;
+ mc_io->flags = flags;
+ mc_io->portal_phys_addr = mc_portal_phys_addr;
+ mc_io->portal_size = mc_portal_size;
+ if (flags & FSL_MC_IO_ATOMIC_CONTEXT_PORTAL)
+ spin_lock_init(&mc_io->spinlock);
+ else
+ mutex_init(&mc_io->mutex);
+
+ res = devm_request_mem_region(dev,
+ mc_portal_phys_addr,
+ mc_portal_size,
+ "mc_portal");
+ if (!res) {
+ dev_err(dev,
+ "devm_request_mem_region failed for MC portal %pa\n",
+ &mc_portal_phys_addr);
+ return -EBUSY;
+ }
+
+ mc_portal_virt_addr = devm_ioremap_nocache(dev,
+ mc_portal_phys_addr,
+ mc_portal_size);
+ if (!mc_portal_virt_addr) {
+ dev_err(dev,
+ "devm_ioremap_nocache failed for MC portal %pa\n",
+ &mc_portal_phys_addr);
+ return -ENXIO;
+ }
+
+ mc_io->portal_virt_addr = mc_portal_virt_addr;
+ if (dpmcp_dev) {
+ error = fsl_mc_io_set_dpmcp(mc_io, dpmcp_dev);
+ if (error < 0)
+ goto error_destroy_mc_io;
+ }
+
+ *new_mc_io = mc_io;
+ return 0;
+
+error_destroy_mc_io:
+ fsl_destroy_mc_io(mc_io);
+ return error;
+}
+
+/**
+ * Destroys an MC I/O object
+ *
+ * @mc_io: MC I/O object to destroy
+ */
+void fsl_destroy_mc_io(struct fsl_mc_io *mc_io)
+{
+ struct fsl_mc_device *dpmcp_dev = mc_io->dpmcp_dev;
+
+ if (dpmcp_dev)
+ fsl_mc_io_unset_dpmcp(mc_io);
+
+ devm_iounmap(mc_io->dev, mc_io->portal_virt_addr);
+ devm_release_mem_region(mc_io->dev,
+ mc_io->portal_phys_addr,
+ mc_io->portal_size);
+
+ mc_io->portal_virt_addr = NULL;
+ devm_kfree(mc_io->dev, mc_io);
+}
+
+/**
+ * fsl_mc_portal_allocate - Allocates an MC portal
+ *
+ * @mc_dev: MC device for which the MC portal is to be allocated
+ * @mc_io_flags: Flags for the fsl_mc_io object that wraps the allocated
+ * MC portal.
+ * @new_mc_io: Pointer to area where the pointer to the fsl_mc_io object
+ * that wraps the allocated MC portal is to be returned
+ *
+ * This function allocates an MC portal from the device's parent DPRC,
+ * from the corresponding MC bus' pool of MC portals and wraps
+ * it in a new fsl_mc_io object. If 'mc_dev' is a DPRC itself, the
+ * portal is allocated from its own MC bus.
+ */
+int __must_check fsl_mc_portal_allocate(struct fsl_mc_device *mc_dev,
+ u16 mc_io_flags,
+ struct fsl_mc_io **new_mc_io)
+{
+ struct fsl_mc_device *mc_bus_dev;
+ struct fsl_mc_bus *mc_bus;
+ phys_addr_t mc_portal_phys_addr;
+ size_t mc_portal_size;
+ struct fsl_mc_device *dpmcp_dev;
+ int error = -EINVAL;
+ struct fsl_mc_resource *resource = NULL;
+ struct fsl_mc_io *mc_io = NULL;
+
+ if (mc_dev->flags & FSL_MC_IS_DPRC) {
+ mc_bus_dev = mc_dev;
+ } else {
+ if (!dev_is_fsl_mc(mc_dev->dev.parent))
+ return error;
+
+ mc_bus_dev = to_fsl_mc_device(mc_dev->dev.parent);
+ }
+
+ mc_bus = to_fsl_mc_bus(mc_bus_dev);
+ *new_mc_io = NULL;
+ error = fsl_mc_resource_allocate(mc_bus, FSL_MC_POOL_DPMCP, &resource);
+ if (error < 0)
+ return error;
+
+ error = -EINVAL;
+ dpmcp_dev = resource->data;
+
+ if (dpmcp_dev->obj_desc.ver_major < DPMCP_MIN_VER_MAJOR ||
+ (dpmcp_dev->obj_desc.ver_major == DPMCP_MIN_VER_MAJOR &&
+ dpmcp_dev->obj_desc.ver_minor < DPMCP_MIN_VER_MINOR)) {
+ dev_err(&dpmcp_dev->dev,
+ "ERROR: Version %d.%d of DPMCP not supported.\n",
+ dpmcp_dev->obj_desc.ver_major,
+ dpmcp_dev->obj_desc.ver_minor);
+ error = -ENOTSUPP;
+ goto error_cleanup_resource;
+ }
+
+ mc_portal_phys_addr = dpmcp_dev->regions[0].start;
+ mc_portal_size = resource_size(dpmcp_dev->regions);
+
+ error = fsl_create_mc_io(&mc_bus_dev->dev,
+ mc_portal_phys_addr,
+ mc_portal_size, dpmcp_dev,
+ mc_io_flags, &mc_io);
+ if (error < 0)
+ goto error_cleanup_resource;
+
+ *new_mc_io = mc_io;
+ return 0;
+
+error_cleanup_resource:
+ fsl_mc_resource_free(resource);
+ return error;
+}
+EXPORT_SYMBOL_GPL(fsl_mc_portal_allocate);
+
+/**
+ * fsl_mc_portal_free - Returns an MC portal to the pool of free MC portals
+ * of a given MC bus
+ *
+ * @mc_io: Pointer to the fsl_mc_io object that wraps the MC portal to free
+ */
+void fsl_mc_portal_free(struct fsl_mc_io *mc_io)
+{
+ struct fsl_mc_device *dpmcp_dev;
+ struct fsl_mc_resource *resource;
+
+ /*
+ * Every mc_io obtained by calling fsl_mc_portal_allocate() is supposed
+ * to have a DPMCP object associated with.
+ */
+ dpmcp_dev = mc_io->dpmcp_dev;
+
+ resource = dpmcp_dev->resource;
+ if (!resource || resource->type != FSL_MC_POOL_DPMCP)
+ return;
+
+ if (resource->data != dpmcp_dev)
+ return;
+
+ fsl_destroy_mc_io(mc_io);
+ fsl_mc_resource_free(resource);
+}
+EXPORT_SYMBOL_GPL(fsl_mc_portal_free);
+
+/**
+ * fsl_mc_portal_reset - Resets the dpmcp object for a given fsl_mc_io object
+ *
+ * @mc_io: Pointer to the fsl_mc_io object that wraps the MC portal to free
+ */
+int fsl_mc_portal_reset(struct fsl_mc_io *mc_io)
+{
+ int error;
+ struct fsl_mc_device *dpmcp_dev = mc_io->dpmcp_dev;
+
+ error = dpmcp_reset(mc_io, 0, dpmcp_dev->mc_handle);
+ if (error < 0) {
+ dev_err(&dpmcp_dev->dev, "dpmcp_reset() failed: %d\n", error);
+ return error;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(fsl_mc_portal_reset);
diff --git a/drivers/bus/fsl-mc/mc-sys.c b/drivers/bus/fsl-mc/mc-sys.c
new file mode 100644
index 000000000000..bd03f1524ecb
--- /dev/null
+++ b/drivers/bus/fsl-mc/mc-sys.c
@@ -0,0 +1,296 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/*
+ * Copyright 2013-2016 Freescale Semiconductor Inc.
+ *
+ * I/O services to send MC commands to the MC hardware
+ *
+ */
+
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/ioport.h>
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/io-64-nonatomic-hi-lo.h>
+#include <linux/fsl/mc.h>
+
+#include "fsl-mc-private.h"
+
+/**
+ * Timeout in milliseconds to wait for the completion of an MC command
+ */
+#define MC_CMD_COMPLETION_TIMEOUT_MS 500
+
+/*
+ * usleep_range() min and max values used to throttle down polling
+ * iterations while waiting for MC command completion
+ */
+#define MC_CMD_COMPLETION_POLLING_MIN_SLEEP_USECS 10
+#define MC_CMD_COMPLETION_POLLING_MAX_SLEEP_USECS 500
+
+static enum mc_cmd_status mc_cmd_hdr_read_status(struct mc_command *cmd)
+{
+ struct mc_cmd_header *hdr = (struct mc_cmd_header *)&cmd->header;
+
+ return (enum mc_cmd_status)hdr->status;
+}
+
+static u16 mc_cmd_hdr_read_cmdid(struct mc_command *cmd)
+{
+ struct mc_cmd_header *hdr = (struct mc_cmd_header *)&cmd->header;
+ u16 cmd_id = le16_to_cpu(hdr->cmd_id);
+
+ return cmd_id;
+}
+
+static int mc_status_to_error(enum mc_cmd_status status)
+{
+ static const int mc_status_to_error_map[] = {
+ [MC_CMD_STATUS_OK] = 0,
+ [MC_CMD_STATUS_AUTH_ERR] = -EACCES,
+ [MC_CMD_STATUS_NO_PRIVILEGE] = -EPERM,
+ [MC_CMD_STATUS_DMA_ERR] = -EIO,
+ [MC_CMD_STATUS_CONFIG_ERR] = -ENXIO,
+ [MC_CMD_STATUS_TIMEOUT] = -ETIMEDOUT,
+ [MC_CMD_STATUS_NO_RESOURCE] = -ENAVAIL,
+ [MC_CMD_STATUS_NO_MEMORY] = -ENOMEM,
+ [MC_CMD_STATUS_BUSY] = -EBUSY,
+ [MC_CMD_STATUS_UNSUPPORTED_OP] = -ENOTSUPP,
+ [MC_CMD_STATUS_INVALID_STATE] = -ENODEV,
+ };
+
+ if ((u32)status >= ARRAY_SIZE(mc_status_to_error_map))
+ return -EINVAL;
+
+ return mc_status_to_error_map[status];
+}
+
+static const char *mc_status_to_string(enum mc_cmd_status status)
+{
+ static const char *const status_strings[] = {
+ [MC_CMD_STATUS_OK] = "Command completed successfully",
+ [MC_CMD_STATUS_READY] = "Command ready to be processed",
+ [MC_CMD_STATUS_AUTH_ERR] = "Authentication error",
+ [MC_CMD_STATUS_NO_PRIVILEGE] = "No privilege",
+ [MC_CMD_STATUS_DMA_ERR] = "DMA or I/O error",
+ [MC_CMD_STATUS_CONFIG_ERR] = "Configuration error",
+ [MC_CMD_STATUS_TIMEOUT] = "Operation timed out",
+ [MC_CMD_STATUS_NO_RESOURCE] = "No resources",
+ [MC_CMD_STATUS_NO_MEMORY] = "No memory available",
+ [MC_CMD_STATUS_BUSY] = "Device is busy",
+ [MC_CMD_STATUS_UNSUPPORTED_OP] = "Unsupported operation",
+ [MC_CMD_STATUS_INVALID_STATE] = "Invalid state"
+ };
+
+ if ((unsigned int)status >= ARRAY_SIZE(status_strings))
+ return "Unknown MC error";
+
+ return status_strings[status];
+}
+
+/**
+ * mc_write_command - writes a command to a Management Complex (MC) portal
+ *
+ * @portal: pointer to an MC portal
+ * @cmd: pointer to a filled command
+ */
+static inline void mc_write_command(struct mc_command __iomem *portal,
+ struct mc_command *cmd)
+{
+ int i;
+
+ /* copy command parameters into the portal */
+ for (i = 0; i < MC_CMD_NUM_OF_PARAMS; i++)
+ /*
+ * Data is already in the expected LE byte-order. Do an
+ * extra LE -> CPU conversion so that the CPU -> LE done in
+ * the device io write api puts it back in the right order.
+ */
+ writeq_relaxed(le64_to_cpu(cmd->params[i]), &portal->params[i]);
+
+ /* submit the command by writing the header */
+ writeq(le64_to_cpu(cmd->header), &portal->header);
+}
+
+/**
+ * mc_read_response - reads the response for the last MC command from a
+ * Management Complex (MC) portal
+ *
+ * @portal: pointer to an MC portal
+ * @resp: pointer to command response buffer
+ *
+ * Returns MC_CMD_STATUS_OK on Success; Error code otherwise.
+ */
+static inline enum mc_cmd_status mc_read_response(struct mc_command __iomem *
+ portal,
+ struct mc_command *resp)
+{
+ int i;
+ enum mc_cmd_status status;
+
+ /* Copy command response header from MC portal: */
+ resp->header = cpu_to_le64(readq_relaxed(&portal->header));
+ status = mc_cmd_hdr_read_status(resp);
+ if (status != MC_CMD_STATUS_OK)
+ return status;
+
+ /* Copy command response data from MC portal: */
+ for (i = 0; i < MC_CMD_NUM_OF_PARAMS; i++)
+ /*
+ * Data is expected to be in LE byte-order. Do an
+ * extra CPU -> LE to revert the LE -> CPU done in
+ * the device io read api.
+ */
+ resp->params[i] =
+ cpu_to_le64(readq_relaxed(&portal->params[i]));
+
+ return status;
+}
+
+/**
+ * Waits for the completion of an MC command doing preemptible polling.
+ * uslepp_range() is called between polling iterations.
+ *
+ * @mc_io: MC I/O object to be used
+ * @cmd: command buffer to receive MC response
+ * @mc_status: MC command completion status
+ */
+static int mc_polling_wait_preemptible(struct fsl_mc_io *mc_io,
+ struct mc_command *cmd,
+ enum mc_cmd_status *mc_status)
+{
+ enum mc_cmd_status status;
+ unsigned long jiffies_until_timeout =
+ jiffies + msecs_to_jiffies(MC_CMD_COMPLETION_TIMEOUT_MS);
+
+ /*
+ * Wait for response from the MC hardware:
+ */
+ for (;;) {
+ status = mc_read_response(mc_io->portal_virt_addr, cmd);
+ if (status != MC_CMD_STATUS_READY)
+ break;
+
+ /*
+ * TODO: When MC command completion interrupts are supported
+ * call wait function here instead of usleep_range()
+ */
+ usleep_range(MC_CMD_COMPLETION_POLLING_MIN_SLEEP_USECS,
+ MC_CMD_COMPLETION_POLLING_MAX_SLEEP_USECS);
+
+ if (time_after_eq(jiffies, jiffies_until_timeout)) {
+ dev_dbg(mc_io->dev,
+ "MC command timed out (portal: %pa, dprc handle: %#x, command: %#x)\n",
+ &mc_io->portal_phys_addr,
+ (unsigned int)mc_cmd_hdr_read_token(cmd),
+ (unsigned int)mc_cmd_hdr_read_cmdid(cmd));
+
+ return -ETIMEDOUT;
+ }
+ }
+
+ *mc_status = status;
+ return 0;
+}
+
+/**
+ * Waits for the completion of an MC command doing atomic polling.
+ * udelay() is called between polling iterations.
+ *
+ * @mc_io: MC I/O object to be used
+ * @cmd: command buffer to receive MC response
+ * @mc_status: MC command completion status
+ */
+static int mc_polling_wait_atomic(struct fsl_mc_io *mc_io,
+ struct mc_command *cmd,
+ enum mc_cmd_status *mc_status)
+{
+ enum mc_cmd_status status;
+ unsigned long timeout_usecs = MC_CMD_COMPLETION_TIMEOUT_MS * 1000;
+
+ BUILD_BUG_ON((MC_CMD_COMPLETION_TIMEOUT_MS * 1000) %
+ MC_CMD_COMPLETION_POLLING_MAX_SLEEP_USECS != 0);
+
+ for (;;) {
+ status = mc_read_response(mc_io->portal_virt_addr, cmd);
+ if (status != MC_CMD_STATUS_READY)
+ break;
+
+ udelay(MC_CMD_COMPLETION_POLLING_MAX_SLEEP_USECS);
+ timeout_usecs -= MC_CMD_COMPLETION_POLLING_MAX_SLEEP_USECS;
+ if (timeout_usecs == 0) {
+ dev_dbg(mc_io->dev,
+ "MC command timed out (portal: %pa, dprc handle: %#x, command: %#x)\n",
+ &mc_io->portal_phys_addr,
+ (unsigned int)mc_cmd_hdr_read_token(cmd),
+ (unsigned int)mc_cmd_hdr_read_cmdid(cmd));
+
+ return -ETIMEDOUT;
+ }
+ }
+
+ *mc_status = status;
+ return 0;
+}
+
+/**
+ * Sends a command to the MC device using the given MC I/O object
+ *
+ * @mc_io: MC I/O object to be used
+ * @cmd: command to be sent
+ *
+ * Returns '0' on Success; Error code otherwise.
+ */
+int mc_send_command(struct fsl_mc_io *mc_io, struct mc_command *cmd)
+{
+ int error;
+ enum mc_cmd_status status;
+ unsigned long irq_flags = 0;
+
+ if (in_irq() && !(mc_io->flags & FSL_MC_IO_ATOMIC_CONTEXT_PORTAL))
+ return -EINVAL;
+
+ if (mc_io->flags & FSL_MC_IO_ATOMIC_CONTEXT_PORTAL)
+ spin_lock_irqsave(&mc_io->spinlock, irq_flags);
+ else
+ mutex_lock(&mc_io->mutex);
+
+ /*
+ * Send command to the MC hardware:
+ */
+ mc_write_command(mc_io->portal_virt_addr, cmd);
+
+ /*
+ * Wait for response from the MC hardware:
+ */
+ if (!(mc_io->flags & FSL_MC_IO_ATOMIC_CONTEXT_PORTAL))
+ error = mc_polling_wait_preemptible(mc_io, cmd, &status);
+ else
+ error = mc_polling_wait_atomic(mc_io, cmd, &status);
+
+ if (error < 0)
+ goto common_exit;
+
+ if (status != MC_CMD_STATUS_OK) {
+ dev_dbg(mc_io->dev,
+ "MC command failed: portal: %pa, dprc handle: %#x, command: %#x, status: %s (%#x)\n",
+ &mc_io->portal_phys_addr,
+ (unsigned int)mc_cmd_hdr_read_token(cmd),
+ (unsigned int)mc_cmd_hdr_read_cmdid(cmd),
+ mc_status_to_string(status),
+ (unsigned int)status);
+
+ error = mc_status_to_error(status);
+ goto common_exit;
+ }
+
+ error = 0;
+common_exit:
+ if (mc_io->flags & FSL_MC_IO_ATOMIC_CONTEXT_PORTAL)
+ spin_unlock_irqrestore(&mc_io->spinlock, irq_flags);
+ else
+ mutex_unlock(&mc_io->mutex);
+
+ return error;
+}
+EXPORT_SYMBOL_GPL(mc_send_command);