summaryrefslogtreecommitdiffstats
path: root/drivers/firmware
diff options
context:
space:
mode:
authorOlof Johansson <olof@lixom.net>2019-09-29 20:19:18 +0200
committerOlof Johansson <olof@lixom.net>2019-09-29 20:20:41 +0200
commita4207a1c5edc3112b3538769024c7b2ad42b9fe9 (patch)
tree739b56fe8aba751719cb7c19f528ce2bd0093f24 /drivers/firmware
parentARM: aspeed: ast2500 is ARMv6K (diff)
parentreset: reset-scmi: add missing handle initialisation (diff)
downloadlinux-a4207a1c5edc3112b3538769024c7b2ad42b9fe9.tar.xz
linux-a4207a1c5edc3112b3538769024c7b2ad42b9fe9.zip
Merge tag 'scmi-fixes-5.4' of git://git.kernel.org/pub/scm/linux/kernel/git/sudeep.holla/linux into arm/fixes
ARM SCMI fixes for v5.4 Couple of fixes: one in scmi reset driver initialising missed scmi handle and an other in scmi reset API implementation fixing the assignment of reset state * tag 'scmi-fixes-5.4' of git://git.kernel.org/pub/scm/linux/kernel/git/sudeep.holla/linux: reset: reset-scmi: add missing handle initialisation firmware: arm_scmi: reset: fix reset_state assignment in scmi_domain_reset Link: https://lore.kernel.org/r/20190918142139.GA4370@bogus Signed-off-by: Olof Johansson <olof@lixom.net>
Diffstat (limited to 'drivers/firmware')
-rw-r--r--drivers/firmware/arm_scmi/Makefile2
-rw-r--r--drivers/firmware/arm_scmi/base.c2
-rw-r--r--drivers/firmware/arm_scmi/clock.c33
-rw-r--r--drivers/firmware/arm_scmi/common.h18
-rw-r--r--drivers/firmware/arm_scmi/driver.c366
-rw-r--r--drivers/firmware/arm_scmi/perf.c264
-rw-r--r--drivers/firmware/arm_scmi/power.c6
-rw-r--r--drivers/firmware/arm_scmi/reset.c231
-rw-r--r--drivers/firmware/arm_scmi/sensors.c57
9 files changed, 788 insertions, 191 deletions
diff --git a/drivers/firmware/arm_scmi/Makefile b/drivers/firmware/arm_scmi/Makefile
index c47d28d556b6..5f298f00a82e 100644
--- a/drivers/firmware/arm_scmi/Makefile
+++ b/drivers/firmware/arm_scmi/Makefile
@@ -2,5 +2,5 @@
obj-y = scmi-bus.o scmi-driver.o scmi-protocols.o
scmi-bus-y = bus.o
scmi-driver-y = driver.o
-scmi-protocols-y = base.o clock.o perf.o power.o sensors.o
+scmi-protocols-y = base.o clock.o perf.o power.o reset.o sensors.o
obj-$(CONFIG_ARM_SCMI_POWER_DOMAIN) += scmi_pm_domain.o
diff --git a/drivers/firmware/arm_scmi/base.c b/drivers/firmware/arm_scmi/base.c
index 204390297f4b..f804e8af6521 100644
--- a/drivers/firmware/arm_scmi/base.c
+++ b/drivers/firmware/arm_scmi/base.c
@@ -204,7 +204,7 @@ static int scmi_base_discover_agent_get(const struct scmi_handle *handle,
if (ret)
return ret;
- *(__le32 *)t->tx.buf = cpu_to_le32(id);
+ put_unaligned_le32(id, t->tx.buf);
ret = scmi_do_xfer(handle, t);
if (!ret)
diff --git a/drivers/firmware/arm_scmi/clock.c b/drivers/firmware/arm_scmi/clock.c
index 0a194af92438..32526a793f3a 100644
--- a/drivers/firmware/arm_scmi/clock.c
+++ b/drivers/firmware/arm_scmi/clock.c
@@ -56,7 +56,7 @@ struct scmi_msg_resp_clock_describe_rates {
struct scmi_clock_set_rate {
__le32 flags;
#define CLOCK_SET_ASYNC BIT(0)
-#define CLOCK_SET_DELAYED BIT(1)
+#define CLOCK_SET_IGNORE_RESP BIT(1)
#define CLOCK_SET_ROUND_UP BIT(2)
#define CLOCK_SET_ROUND_AUTO BIT(3)
__le32 id;
@@ -67,6 +67,7 @@ struct scmi_clock_set_rate {
struct clock_info {
int num_clocks;
int max_async_req;
+ atomic_t cur_async_req;
struct scmi_clock_info *clk;
};
@@ -106,7 +107,7 @@ static int scmi_clock_attributes_get(const struct scmi_handle *handle,
if (ret)
return ret;
- *(__le32 *)t->tx.buf = cpu_to_le32(clk_id);
+ put_unaligned_le32(clk_id, t->tx.buf);
attr = t->rx.buf;
ret = scmi_do_xfer(handle, t);
@@ -203,39 +204,47 @@ scmi_clock_rate_get(const struct scmi_handle *handle, u32 clk_id, u64 *value)
if (ret)
return ret;
- *(__le32 *)t->tx.buf = cpu_to_le32(clk_id);
+ put_unaligned_le32(clk_id, t->tx.buf);
ret = scmi_do_xfer(handle, t);
- if (!ret) {
- __le32 *pval = t->rx.buf;
-
- *value = le32_to_cpu(*pval);
- *value |= (u64)le32_to_cpu(*(pval + 1)) << 32;
- }
+ if (!ret)
+ *value = get_unaligned_le64(t->rx.buf);
scmi_xfer_put(handle, t);
return ret;
}
static int scmi_clock_rate_set(const struct scmi_handle *handle, u32 clk_id,
- u32 config, u64 rate)
+ u64 rate)
{
int ret;
+ u32 flags = 0;
struct scmi_xfer *t;
struct scmi_clock_set_rate *cfg;
+ struct clock_info *ci = handle->clk_priv;
ret = scmi_xfer_get_init(handle, CLOCK_RATE_SET, SCMI_PROTOCOL_CLOCK,
sizeof(*cfg), 0, &t);
if (ret)
return ret;
+ if (ci->max_async_req &&
+ atomic_inc_return(&ci->cur_async_req) < ci->max_async_req)
+ flags |= CLOCK_SET_ASYNC;
+
cfg = t->tx.buf;
- cfg->flags = cpu_to_le32(config);
+ cfg->flags = cpu_to_le32(flags);
cfg->id = cpu_to_le32(clk_id);
cfg->value_low = cpu_to_le32(rate & 0xffffffff);
cfg->value_high = cpu_to_le32(rate >> 32);
- ret = scmi_do_xfer(handle, t);
+ if (flags & CLOCK_SET_ASYNC)
+ ret = scmi_do_xfer_with_response(handle, t);
+ else
+ ret = scmi_do_xfer(handle, t);
+
+ if (ci->max_async_req)
+ atomic_dec(&ci->cur_async_req);
scmi_xfer_put(handle, t);
return ret;
diff --git a/drivers/firmware/arm_scmi/common.h b/drivers/firmware/arm_scmi/common.h
index 44fd4f9404a9..5237c2ff79fe 100644
--- a/drivers/firmware/arm_scmi/common.h
+++ b/drivers/firmware/arm_scmi/common.h
@@ -15,6 +15,8 @@
#include <linux/scmi_protocol.h>
#include <linux/types.h>
+#include <asm/unaligned.h>
+
#define PROTOCOL_REV_MINOR_MASK GENMASK(15, 0)
#define PROTOCOL_REV_MAJOR_MASK GENMASK(31, 16)
#define PROTOCOL_REV_MAJOR(x) (u16)(FIELD_GET(PROTOCOL_REV_MAJOR_MASK, (x)))
@@ -48,11 +50,11 @@ struct scmi_msg_resp_prot_version {
/**
* struct scmi_msg_hdr - Message(Tx/Rx) header
*
- * @id: The identifier of the command being sent
- * @protocol_id: The identifier of the protocol used to send @id command
- * @seq: The token to identify the message. when a message/command returns,
- * the platform returns the whole message header unmodified including
- * the token
+ * @id: The identifier of the message being sent
+ * @protocol_id: The identifier of the protocol used to send @id message
+ * @seq: The token to identify the message. When a message returns, the
+ * platform returns the whole message header unmodified including the
+ * token
* @status: Status of the transfer once it's complete
* @poll_completion: Indicate if the transfer needs to be polled for
* completion or interrupt mode is used
@@ -84,17 +86,21 @@ struct scmi_msg {
* @rx: Receive message, the buffer should be pre-allocated to store
* message. If request-ACK protocol is used, we can reuse the same
* buffer for the rx path as we use for the tx path.
- * @done: completion event
+ * @done: command message transmit completion event
+ * @async: pointer to delayed response message received event completion
*/
struct scmi_xfer {
struct scmi_msg_hdr hdr;
struct scmi_msg tx;
struct scmi_msg rx;
struct completion done;
+ struct completion *async_done;
};
void scmi_xfer_put(const struct scmi_handle *h, struct scmi_xfer *xfer);
int scmi_do_xfer(const struct scmi_handle *h, struct scmi_xfer *xfer);
+int scmi_do_xfer_with_response(const struct scmi_handle *h,
+ struct scmi_xfer *xfer);
int scmi_xfer_get_init(const struct scmi_handle *h, u8 msg_id, u8 prot_id,
size_t tx_size, size_t rx_size, struct scmi_xfer **p);
int scmi_handle_put(const struct scmi_handle *handle);
diff --git a/drivers/firmware/arm_scmi/driver.c b/drivers/firmware/arm_scmi/driver.c
index b5bc4c7a8fab..3eb0382491ce 100644
--- a/drivers/firmware/arm_scmi/driver.c
+++ b/drivers/firmware/arm_scmi/driver.c
@@ -30,8 +30,14 @@
#include "common.h"
#define MSG_ID_MASK GENMASK(7, 0)
+#define MSG_XTRACT_ID(hdr) FIELD_GET(MSG_ID_MASK, (hdr))
#define MSG_TYPE_MASK GENMASK(9, 8)
+#define MSG_XTRACT_TYPE(hdr) FIELD_GET(MSG_TYPE_MASK, (hdr))
+#define MSG_TYPE_COMMAND 0
+#define MSG_TYPE_DELAYED_RESP 2
+#define MSG_TYPE_NOTIFICATION 3
#define MSG_PROTOCOL_ID_MASK GENMASK(17, 10)
+#define MSG_XTRACT_PROT_ID(hdr) FIELD_GET(MSG_PROTOCOL_ID_MASK, (hdr))
#define MSG_TOKEN_ID_MASK GENMASK(27, 18)
#define MSG_XTRACT_TOKEN(hdr) FIELD_GET(MSG_TOKEN_ID_MASK, (hdr))
#define MSG_TOKEN_MAX (MSG_XTRACT_TOKEN(MSG_TOKEN_ID_MASK) + 1)
@@ -86,7 +92,7 @@ struct scmi_desc {
};
/**
- * struct scmi_chan_info - Structure representing a SCMI channel informfation
+ * struct scmi_chan_info - Structure representing a SCMI channel information
*
* @cl: Mailbox Client
* @chan: Transmit/Receive mailbox channel
@@ -111,8 +117,9 @@ struct scmi_chan_info {
* @handle: Instance of SCMI handle to send to clients
* @version: SCMI revision information containing protocol version,
* implementation version and (sub-)vendor identification.
- * @minfo: Message info
- * @tx_idr: IDR object to map protocol id to channel info pointer
+ * @tx_minfo: Universal Transmit Message management info
+ * @tx_idr: IDR object to map protocol id to Tx channel info pointer
+ * @rx_idr: IDR object to map protocol id to Rx channel info pointer
* @protocols_imp: List of protocols implemented, currently maximum of
* MAX_PROTOCOLS_IMP elements allocated by the base protocol
* @node: List head
@@ -123,8 +130,9 @@ struct scmi_info {
const struct scmi_desc *desc;
struct scmi_revision_info version;
struct scmi_handle handle;
- struct scmi_xfers_info minfo;
+ struct scmi_xfers_info tx_minfo;
struct idr tx_idr;
+ struct idr rx_idr;
u8 *protocols_imp;
struct list_head node;
int users;
@@ -182,7 +190,7 @@ static inline int scmi_to_linux_errno(int errno)
static inline void scmi_dump_header_dbg(struct device *dev,
struct scmi_msg_hdr *hdr)
{
- dev_dbg(dev, "Command ID: %x Sequence ID: %x Protocol: %x\n",
+ dev_dbg(dev, "Message ID: %x Sequence ID: %x Protocol: %x\n",
hdr->id, hdr->seq, hdr->protocol_id);
}
@@ -190,7 +198,7 @@ static void scmi_fetch_response(struct scmi_xfer *xfer,
struct scmi_shared_mem __iomem *mem)
{
xfer->hdr.status = ioread32(mem->msg_payload);
- /* Skip the length of header and statues in payload area i.e 8 bytes*/
+ /* Skip the length of header and status in payload area i.e 8 bytes */
xfer->rx.len = min_t(size_t, xfer->rx.len, ioread32(&mem->length) - 8);
/* Take a copy to the rx buffer.. */
@@ -198,56 +206,12 @@ static void scmi_fetch_response(struct scmi_xfer *xfer,
}
/**
- * scmi_rx_callback() - mailbox client callback for receive messages
- *
- * @cl: client pointer
- * @m: mailbox message
- *
- * Processes one received message to appropriate transfer information and
- * signals completion of the transfer.
- *
- * NOTE: This function will be invoked in IRQ context, hence should be
- * as optimal as possible.
- */
-static void scmi_rx_callback(struct mbox_client *cl, void *m)
-{
- u16 xfer_id;
- struct scmi_xfer *xfer;
- struct scmi_chan_info *cinfo = client_to_scmi_chan_info(cl);
- struct device *dev = cinfo->dev;
- struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
- struct scmi_xfers_info *minfo = &info->minfo;
- struct scmi_shared_mem __iomem *mem = cinfo->payload;
-
- xfer_id = MSG_XTRACT_TOKEN(ioread32(&mem->msg_header));
-
- /* Are we even expecting this? */
- if (!test_bit(xfer_id, minfo->xfer_alloc_table)) {
- dev_err(dev, "message for %d is not expected!\n", xfer_id);
- return;
- }
-
- xfer = &minfo->xfer_block[xfer_id];
-
- scmi_dump_header_dbg(dev, &xfer->hdr);
- /* Is the message of valid length? */
- if (xfer->rx.len > info->desc->max_msg_size) {
- dev_err(dev, "unable to handle %zu xfer(max %d)\n",
- xfer->rx.len, info->desc->max_msg_size);
- return;
- }
-
- scmi_fetch_response(xfer, mem);
- complete(&xfer->done);
-}
-
-/**
* pack_scmi_header() - packs and returns 32-bit header
*
* @hdr: pointer to header containing all the information on message id,
* protocol id and sequence id.
*
- * Return: 32-bit packed command header to be sent to the platform.
+ * Return: 32-bit packed message header to be sent to the platform.
*/
static inline u32 pack_scmi_header(struct scmi_msg_hdr *hdr)
{
@@ -257,6 +221,18 @@ static inline u32 pack_scmi_header(struct scmi_msg_hdr *hdr)
}
/**
+ * unpack_scmi_header() - unpacks and records message and protocol id
+ *
+ * @msg_hdr: 32-bit packed message header sent from the platform
+ * @hdr: pointer to header to fetch message and protocol id.
+ */
+static inline void unpack_scmi_header(u32 msg_hdr, struct scmi_msg_hdr *hdr)
+{
+ hdr->id = MSG_XTRACT_ID(msg_hdr);
+ hdr->protocol_id = MSG_XTRACT_PROT_ID(msg_hdr);
+}
+
+/**
* scmi_tx_prepare() - mailbox client callback to prepare for the transfer
*
* @cl: client pointer
@@ -271,6 +247,14 @@ static void scmi_tx_prepare(struct mbox_client *cl, void *m)
struct scmi_chan_info *cinfo = client_to_scmi_chan_info(cl);
struct scmi_shared_mem __iomem *mem = cinfo->payload;
+ /*
+ * Ideally channel must be free by now unless OS timeout last
+ * request and platform continued to process the same, wait
+ * until it releases the shared memory, otherwise we may endup
+ * overwriting its response with new message payload or vice-versa
+ */
+ spin_until_cond(ioread32(&mem->channel_status) &
+ SCMI_SHMEM_CHAN_STAT_CHANNEL_FREE);
/* Mark channel busy + clear error */
iowrite32(0x0, &mem->channel_status);
iowrite32(t->hdr.poll_completion ? 0 : SCMI_SHMEM_FLAG_INTR_ENABLED,
@@ -285,8 +269,9 @@ static void scmi_tx_prepare(struct mbox_client *cl, void *m)
* scmi_xfer_get() - Allocate one message
*
* @handle: Pointer to SCMI entity handle
+ * @minfo: Pointer to Tx/Rx Message management info based on channel type
*
- * Helper function which is used by various command functions that are
+ * Helper function which is used by various message functions that are
* exposed to clients of this driver for allocating a message traffic event.
*
* This function can sleep depending on pending requests already in the system
@@ -295,13 +280,13 @@ static void scmi_tx_prepare(struct mbox_client *cl, void *m)
*
* Return: 0 if all went fine, else corresponding error.
*/
-static struct scmi_xfer *scmi_xfer_get(const struct scmi_handle *handle)
+static struct scmi_xfer *scmi_xfer_get(const struct scmi_handle *handle,
+ struct scmi_xfers_info *minfo)
{
u16 xfer_id;
struct scmi_xfer *xfer;
unsigned long flags, bit_pos;
struct scmi_info *info = handle_to_scmi_info(handle);
- struct scmi_xfers_info *minfo = &info->minfo;
/* Keep the locked section as small as possible */
spin_lock_irqsave(&minfo->xfer_lock, flags);
@@ -324,18 +309,17 @@ static struct scmi_xfer *scmi_xfer_get(const struct scmi_handle *handle)
}
/**
- * scmi_xfer_put() - Release a message
+ * __scmi_xfer_put() - Release a message
*
- * @handle: Pointer to SCMI entity handle
+ * @minfo: Pointer to Tx/Rx Message management info based on channel type
* @xfer: message that was reserved by scmi_xfer_get
*
* This holds a spinlock to maintain integrity of internal data structures.
*/
-void scmi_xfer_put(const struct scmi_handle *handle, struct scmi_xfer *xfer)
+static void
+__scmi_xfer_put(struct scmi_xfers_info *minfo, struct scmi_xfer *xfer)
{
unsigned long flags;
- struct scmi_info *info = handle_to_scmi_info(handle);
- struct scmi_xfers_info *minfo = &info->minfo;
/*
* Keep the locked section as small as possible
@@ -347,6 +331,68 @@ void scmi_xfer_put(const struct scmi_handle *handle, struct scmi_xfer *xfer)
spin_unlock_irqrestore(&minfo->xfer_lock, flags);
}
+/**
+ * scmi_rx_callback() - mailbox client callback for receive messages
+ *
+ * @cl: client pointer
+ * @m: mailbox message
+ *
+ * Processes one received message to appropriate transfer information and
+ * signals completion of the transfer.
+ *
+ * NOTE: This function will be invoked in IRQ context, hence should be
+ * as optimal as possible.
+ */
+static void scmi_rx_callback(struct mbox_client *cl, void *m)
+{
+ u8 msg_type;
+ u32 msg_hdr;
+ u16 xfer_id;
+ struct scmi_xfer *xfer;
+ struct scmi_chan_info *cinfo = client_to_scmi_chan_info(cl);
+ struct device *dev = cinfo->dev;
+ struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
+ struct scmi_xfers_info *minfo = &info->tx_minfo;
+ struct scmi_shared_mem __iomem *mem = cinfo->payload;
+
+ msg_hdr = ioread32(&mem->msg_header);
+ msg_type = MSG_XTRACT_TYPE(msg_hdr);
+ xfer_id = MSG_XTRACT_TOKEN(msg_hdr);
+
+ if (msg_type == MSG_TYPE_NOTIFICATION)
+ return; /* Notifications not yet supported */
+
+ /* Are we even expecting this? */
+ if (!test_bit(xfer_id, minfo->xfer_alloc_table)) {
+ dev_err(dev, "message for %d is not expected!\n", xfer_id);
+ return;
+ }
+
+ xfer = &minfo->xfer_block[xfer_id];
+
+ scmi_dump_header_dbg(dev, &xfer->hdr);
+
+ scmi_fetch_response(xfer, mem);
+
+ if (msg_type == MSG_TYPE_DELAYED_RESP)
+ complete(xfer->async_done);
+ else
+ complete(&xfer->done);
+}
+
+/**
+ * scmi_xfer_put() - Release a transmit message
+ *
+ * @handle: Pointer to SCMI entity handle
+ * @xfer: message that was reserved by scmi_xfer_get
+ */
+void scmi_xfer_put(const struct scmi_handle *handle, struct scmi_xfer *xfer)
+{
+ struct scmi_info *info = handle_to_scmi_info(handle);
+
+ __scmi_xfer_put(&info->tx_minfo, xfer);
+}
+
static bool
scmi_xfer_poll_done(const struct scmi_chan_info *cinfo, struct scmi_xfer *xfer)
{
@@ -435,8 +481,36 @@ int scmi_do_xfer(const struct scmi_handle *handle, struct scmi_xfer *xfer)
return ret;
}
+#define SCMI_MAX_RESPONSE_TIMEOUT (2 * MSEC_PER_SEC)
+
+/**
+ * scmi_do_xfer_with_response() - Do one transfer and wait until the delayed
+ * response is received
+ *
+ * @handle: Pointer to SCMI entity handle
+ * @xfer: Transfer to initiate and wait for response
+ *
+ * Return: -ETIMEDOUT in case of no delayed response, if transmit error,
+ * return corresponding error, else if all goes well, return 0.
+ */
+int scmi_do_xfer_with_response(const struct scmi_handle *handle,
+ struct scmi_xfer *xfer)
+{
+ int ret, timeout = msecs_to_jiffies(SCMI_MAX_RESPONSE_TIMEOUT);
+ DECLARE_COMPLETION_ONSTACK(async_response);
+
+ xfer->async_done = &async_response;
+
+ ret = scmi_do_xfer(handle, xfer);
+ if (!ret && !wait_for_completion_timeout(xfer->async_done, timeout))
+ ret = -ETIMEDOUT;
+
+ xfer->async_done = NULL;
+ return ret;
+}
+
/**
- * scmi_xfer_get_init() - Allocate and initialise one message
+ * scmi_xfer_get_init() - Allocate and initialise one message for transmit
*
* @handle: Pointer to SCMI entity handle
* @msg_id: Message identifier
@@ -457,6 +531,7 @@ int scmi_xfer_get_init(const struct scmi_handle *handle, u8 msg_id, u8 prot_id,
int ret;
struct scmi_xfer *xfer;
struct scmi_info *info = handle_to_scmi_info(handle);
+ struct scmi_xfers_info *minfo = &info->tx_minfo;
struct device *dev = info->dev;
/* Ensure we have sane transfer sizes */
@@ -464,7 +539,7 @@ int scmi_xfer_get_init(const struct scmi_handle *handle, u8 msg_id, u8 prot_id,
tx_size > info->desc->max_msg_size)
return -ERANGE;
- xfer = scmi_xfer_get(handle);
+ xfer = scmi_xfer_get(handle, minfo);
if (IS_ERR(xfer)) {
ret = PTR_ERR(xfer);
dev_err(dev, "failed to get free message slot(%d)\n", ret);
@@ -597,27 +672,13 @@ int scmi_handle_put(const struct scmi_handle *handle)
return 0;
}
-static const struct scmi_desc scmi_generic_desc = {
- .max_rx_timeout_ms = 30, /* We may increase this if required */
- .max_msg = 20, /* Limited by MBOX_TX_QUEUE_LEN */
- .max_msg_size = 128,
-};
-
-/* Each compatible listed below must have descriptor associated with it */
-static const struct of_device_id scmi_of_match[] = {
- { .compatible = "arm,scmi", .data = &scmi_generic_desc },
- { /* Sentinel */ },
-};
-
-MODULE_DEVICE_TABLE(of, scmi_of_match);
-
static int scmi_xfer_info_init(struct scmi_info *sinfo)
{
int i;
struct scmi_xfer *xfer;
struct device *dev = sinfo->dev;
const struct scmi_desc *desc = sinfo->desc;
- struct scmi_xfers_info *info = &sinfo->minfo;
+ struct scmi_xfers_info *info = &sinfo->tx_minfo;
/* Pre-allocated messages, no more than what hdr.seq can support */
if (WARN_ON(desc->max_msg >= MSG_TOKEN_MAX)) {
@@ -652,61 +713,32 @@ static int scmi_xfer_info_init(struct scmi_info *sinfo)
return 0;
}
-static int scmi_mailbox_check(struct device_node *np)
+static int scmi_mailbox_check(struct device_node *np, int idx)
{
- return of_parse_phandle_with_args(np, "mboxes", "#mbox-cells", 0, NULL);
+ return of_parse_phandle_with_args(np, "mboxes", "#mbox-cells",
+ idx, NULL);
}
-static int scmi_mbox_free_channel(int id, void *p, void *data)
+static int scmi_mbox_chan_setup(struct scmi_info *info, struct device *dev,
+ int prot_id, bool tx)
{
- struct scmi_chan_info *cinfo = p;
- struct idr *idr = data;
-
- if (!IS_ERR_OR_NULL(cinfo->chan)) {
- mbox_free_channel(cinfo->chan);
- cinfo->chan = NULL;
- }
-
- idr_remove(idr, id);
-
- return 0;
-}
-
-static int scmi_remove(struct platform_device *pdev)
-{
- int ret = 0;
- struct scmi_info *info = platform_get_drvdata(pdev);
- struct idr *idr = &info->tx_idr;
-
- mutex_lock(&scmi_list_mutex);
- if (info->users)
- ret = -EBUSY;
- else
- list_del(&info->node);
- mutex_unlock(&scmi_list_mutex);
-
- if (ret)
- return ret;
-
- /* Safe to free channels since no more users */
- ret = idr_for_each(idr, scmi_mbox_free_channel, idr);
- idr_destroy(&info->tx_idr);
-
- return ret;
-}
-
-static inline int
-scmi_mbox_chan_setup(struct scmi_info *info, struct device *dev, int prot_id)
-{
- int ret;
+ int ret, idx;
struct resource res;
resource_size_t size;
struct device_node *shmem, *np = dev->of_node;
struct scmi_chan_info *cinfo;
struct mbox_client *cl;
+ struct idr *idr;
+ const char *desc = tx ? "Tx" : "Rx";
- if (scmi_mailbox_check(np)) {
- cinfo = idr_find(&info->tx_idr, SCMI_PROTOCOL_BASE);
+ /* Transmit channel is first entry i.e. index 0 */
+ idx = tx ? 0 : 1;
+ idr = tx ? &info->tx_idr : &info->rx_idr;
+
+ if (scmi_mailbox_check(np, idx)) {
+ cinfo = idr_find(idr, SCMI_PROTOCOL_BASE);
+ if (unlikely(!cinfo)) /* Possible only if platform has no Rx */
+ return -EINVAL;
goto idr_alloc;
}
@@ -719,36 +751,36 @@ scmi_mbox_chan_setup(struct scmi_info *info, struct device *dev, int prot_id)
cl = &cinfo->cl;
cl->dev = dev;
cl->rx_callback = scmi_rx_callback;
- cl->tx_prepare = scmi_tx_prepare;
+ cl->tx_prepare = tx ? scmi_tx_prepare : NULL;
cl->tx_block = false;
- cl->knows_txdone = true;
+ cl->knows_txdone = tx;
- shmem = of_parse_phandle(np, "shmem", 0);
+ shmem = of_parse_phandle(np, "shmem", idx);
ret = of_address_to_resource(shmem, 0, &res);
of_node_put(shmem);
if (ret) {
- dev_err(dev, "failed to get SCMI Tx payload mem resource\n");
+ dev_err(dev, "failed to get SCMI %s payload memory\n", desc);
return ret;
}
size = resource_size(&res);
cinfo->payload = devm_ioremap(info->dev, res.start, size);
if (!cinfo->payload) {
- dev_err(dev, "failed to ioremap SCMI Tx payload\n");
+ dev_err(dev, "failed to ioremap SCMI %s payload\n", desc);
return -EADDRNOTAVAIL;
}
- /* Transmit channel is first entry i.e. index 0 */
- cinfo->chan = mbox_request_channel(cl, 0);
+ cinfo->chan = mbox_request_channel(cl, idx);
if (IS_ERR(cinfo->chan)) {
ret = PTR_ERR(cinfo->chan);
if (ret != -EPROBE_DEFER)
- dev_err(dev, "failed to request SCMI Tx mailbox\n");
+ dev_err(dev, "failed to request SCMI %s mailbox\n",
+ desc);
return ret;
}
idr_alloc:
- ret = idr_alloc(&info->tx_idr, cinfo, prot_id, prot_id + 1, GFP_KERNEL);
+ ret = idr_alloc(idr, cinfo, prot_id, prot_id + 1, GFP_KERNEL);
if (ret != prot_id) {
dev_err(dev, "unable to allocate SCMI idr slot err %d\n", ret);
return ret;
@@ -758,6 +790,17 @@ idr_alloc:
return 0;
}
+static inline int
+scmi_mbox_txrx_setup(struct scmi_info *info, struct device *dev, int prot_id)
+{
+ int ret = scmi_mbox_chan_setup(info, dev, prot_id, true);
+
+ if (!ret) /* Rx is optional, hence no error check */
+ scmi_mbox_chan_setup(info, dev, prot_id, false);
+
+ return ret;
+}
+
static inline void
scmi_create_protocol_device(struct device_node *np, struct scmi_info *info,
int prot_id)
@@ -771,7 +814,7 @@ scmi_create_protocol_device(struct device_node *np, struct scmi_info *info,
return;
}
- if (scmi_mbox_chan_setup(info, &sdev->dev, prot_id)) {
+ if (scmi_mbox_txrx_setup(info, &sdev->dev, prot_id)) {
dev_err(&sdev->dev, "failed to setup transport\n");
scmi_device_destroy(sdev);
return;
@@ -791,7 +834,7 @@ static int scmi_probe(struct platform_device *pdev)
struct device_node *child, *np = dev->of_node;
/* Only mailbox method supported, check for the presence of one */
- if (scmi_mailbox_check(np)) {
+ if (scmi_mailbox_check(np, 0)) {
dev_err(dev, "no mailbox found in %pOF\n", np);
return -EINVAL;
}
@@ -814,12 +857,13 @@ static int scmi_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, info);
idr_init(&info->tx_idr);
+ idr_init(&info->rx_idr);
handle = &info->handle;
handle->dev = info->dev;
handle->version = &info->version;
- ret = scmi_mbox_chan_setup(info, dev, SCMI_PROTOCOL_BASE);
+ ret = scmi_mbox_txrx_setup(info, dev, SCMI_PROTOCOL_BASE);
if (ret)
return ret;
@@ -854,6 +898,62 @@ static int scmi_probe(struct platform_device *pdev)
return 0;
}
+static int scmi_mbox_free_channel(int id, void *p, void *data)
+{
+ struct scmi_chan_info *cinfo = p;
+ struct idr *idr = data;
+
+ if (!IS_ERR_OR_NULL(cinfo->chan)) {
+ mbox_free_channel(cinfo->chan);
+ cinfo->chan = NULL;
+ }
+
+ idr_remove(idr, id);
+
+ return 0;
+}
+
+static int scmi_remove(struct platform_device *pdev)
+{
+ int ret = 0;
+ struct scmi_info *info = platform_get_drvdata(pdev);
+ struct idr *idr = &info->tx_idr;
+
+ mutex_lock(&scmi_list_mutex);
+ if (info->users)
+ ret = -EBUSY;
+ else
+ list_del(&info->node);
+ mutex_unlock(&scmi_list_mutex);
+
+ if (ret)
+ return ret;
+
+ /* Safe to free channels since no more users */
+ ret = idr_for_each(idr, scmi_mbox_free_channel, idr);
+ idr_destroy(&info->tx_idr);
+
+ idr = &info->rx_idr;
+ ret = idr_for_each(idr, scmi_mbox_free_channel, idr);
+ idr_destroy(&info->rx_idr);
+
+ return ret;
+}
+
+static const struct scmi_desc scmi_generic_desc = {
+ .max_rx_timeout_ms = 30, /* We may increase this if required */
+ .max_msg = 20, /* Limited by MBOX_TX_QUEUE_LEN */
+ .max_msg_size = 128,
+};
+
+/* Each compatible listed below must have descriptor associated with it */
+static const struct of_device_id scmi_of_match[] = {
+ { .compatible = "arm,scmi", .data = &scmi_generic_desc },
+ { /* Sentinel */ },
+};
+
+MODULE_DEVICE_TABLE(of, scmi_of_match);
+
static struct platform_driver scmi_driver = {
.driver = {
.name = "arm-scmi",
diff --git a/drivers/firmware/arm_scmi/perf.c b/drivers/firmware/arm_scmi/perf.c
index 3c8ae7cc35de..4a8012e3cb8c 100644
--- a/drivers/firmware/arm_scmi/perf.c
+++ b/drivers/firmware/arm_scmi/perf.c
@@ -5,7 +5,10 @@
* Copyright (C) 2018 ARM Ltd.
*/
+#include <linux/bits.h>
#include <linux/of.h>
+#include <linux/io.h>
+#include <linux/io-64-nonatomic-hi-lo.h>
#include <linux/platform_device.h>
#include <linux/pm_opp.h>
#include <linux/sort.h>
@@ -21,6 +24,7 @@ enum scmi_performance_protocol_cmd {
PERF_LEVEL_GET = 0x8,
PERF_NOTIFY_LIMITS = 0x9,
PERF_NOTIFY_LEVEL = 0xa,
+ PERF_DESCRIBE_FASTCHANNEL = 0xb,
};
struct scmi_opp {
@@ -44,6 +48,7 @@ struct scmi_msg_resp_perf_domain_attributes {
#define SUPPORTS_SET_PERF_LVL(x) ((x) & BIT(30))
#define SUPPORTS_PERF_LIMIT_NOTIFY(x) ((x) & BIT(29))
#define SUPPORTS_PERF_LEVEL_NOTIFY(x) ((x) & BIT(28))
+#define SUPPORTS_PERF_FASTCHANNELS(x) ((x) & BIT(27))
__le32 rate_limit_us;
__le32 sustained_freq_khz;
__le32 sustained_perf_level;
@@ -87,17 +92,56 @@ struct scmi_msg_resp_perf_describe_levels {
} opp[0];
};
+struct scmi_perf_get_fc_info {
+ __le32 domain;
+ __le32 message_id;
+};
+
+struct scmi_msg_resp_perf_desc_fc {
+ __le32 attr;
+#define SUPPORTS_DOORBELL(x) ((x) & BIT(0))
+#define DOORBELL_REG_WIDTH(x) FIELD_GET(GENMASK(2, 1), (x))
+ __le32 rate_limit;
+ __le32 chan_addr_low;
+ __le32 chan_addr_high;
+ __le32 chan_size;
+ __le32 db_addr_low;
+ __le32 db_addr_high;
+ __le32 db_set_lmask;
+ __le32 db_set_hmask;
+ __le32 db_preserve_lmask;
+ __le32 db_preserve_hmask;
+};
+
+struct scmi_fc_db_info {
+ int width;
+ u64 set;
+ u64 mask;
+ void __iomem *addr;
+};
+
+struct scmi_fc_info {
+ void __iomem *level_set_addr;
+ void __iomem *limit_set_addr;
+ void __iomem *level_get_addr;
+ void __iomem *limit_get_addr;
+ struct scmi_fc_db_info *level_set_db;
+ struct scmi_fc_db_info *limit_set_db;
+};
+
struct perf_dom_info {
bool set_limits;
bool set_perf;
bool perf_limit_notify;
bool perf_level_notify;
+ bool perf_fastchannels;
u32 opp_count;
u32 sustained_freq_khz;
u32 sustained_perf_level;
u32 mult_factor;
char name[SCMI_MAX_STR_SIZE];
struct scmi_opp opp[MAX_OPPS];
+ struct scmi_fc_info *fc_info;
};
struct scmi_perf_info {
@@ -151,7 +195,7 @@ scmi_perf_domain_attributes_get(const struct scmi_handle *handle, u32 domain,
if (ret)
return ret;
- *(__le32 *)t->tx.buf = cpu_to_le32(domain);
+ put_unaligned_le32(domain, t->tx.buf);
attr = t->rx.buf;
ret = scmi_do_xfer(handle, t);
@@ -162,6 +206,7 @@ scmi_perf_domain_attributes_get(const struct scmi_handle *handle, u32 domain,
dom_info->set_perf = SUPPORTS_SET_PERF_LVL(flags);
dom_info->perf_limit_notify = SUPPORTS_PERF_LIMIT_NOTIFY(flags);
dom_info->perf_level_notify = SUPPORTS_PERF_LEVEL_NOTIFY(flags);
+ dom_info->perf_fastchannels = SUPPORTS_PERF_FASTCHANNELS(flags);
dom_info->sustained_freq_khz =
le32_to_cpu(attr->sustained_freq_khz);
dom_info->sustained_perf_level =
@@ -249,8 +294,42 @@ scmi_perf_describe_levels_get(const struct scmi_handle *handle, u32 domain,
return ret;
}
-static int scmi_perf_limits_set(const struct scmi_handle *handle, u32 domain,
- u32 max_perf, u32 min_perf)
+#define SCMI_PERF_FC_RING_DB(w) \
+do { \
+ u##w val = 0; \
+ \
+ if (db->mask) \
+ val = ioread##w(db->addr) & db->mask; \
+ iowrite##w((u##w)db->set | val, db->addr); \
+} while (0)
+
+static void scmi_perf_fc_ring_db(struct scmi_fc_db_info *db)
+{
+ if (!db || !db->addr)
+ return;
+
+ if (db->width == 1)
+ SCMI_PERF_FC_RING_DB(8);
+ else if (db->width == 2)
+ SCMI_PERF_FC_RING_DB(16);
+ else if (db->width == 4)
+ SCMI_PERF_FC_RING_DB(32);
+ else /* db->width == 8 */
+#ifdef CONFIG_64BIT
+ SCMI_PERF_FC_RING_DB(64);
+#else
+ {
+ u64 val = 0;
+
+ if (db->mask)
+ val = ioread64_hi_lo(db->addr) & db->mask;
+ iowrite64_hi_lo(db->set, db->addr);
+ }
+#endif
+}
+
+static int scmi_perf_mb_limits_set(const struct scmi_handle *handle, u32 domain,
+ u32 max_perf, u32 min_perf)
{
int ret;
struct scmi_xfer *t;
@@ -272,8 +351,24 @@ static int scmi_perf_limits_set(const struct scmi_handle *handle, u32 domain,
return ret;
}
-static int scmi_perf_limits_get(const struct scmi_handle *handle, u32 domain,
- u32 *max_perf, u32 *min_perf)
+static int scmi_perf_limits_set(const struct scmi_handle *handle, u32 domain,
+ u32 max_perf, u32 min_perf)
+{
+ struct scmi_perf_info *pi = handle->perf_priv;
+ struct perf_dom_info *dom = pi->dom_info + domain;
+
+ if (dom->fc_info && dom->fc_info->limit_set_addr) {
+ iowrite32(max_perf, dom->fc_info->limit_set_addr);
+ iowrite32(min_perf, dom->fc_info->limit_set_addr + 4);
+ scmi_perf_fc_ring_db(dom->fc_info->limit_set_db);
+ return 0;
+ }
+
+ return scmi_perf_mb_limits_set(handle, domain, max_perf, min_perf);
+}
+
+static int scmi_perf_mb_limits_get(const struct scmi_handle *handle, u32 domain,
+ u32 *max_perf, u32 *min_perf)
{
int ret;
struct scmi_xfer *t;
@@ -284,7 +379,7 @@ static int scmi_perf_limits_get(const struct scmi_handle *handle, u32 domain,
if (ret)
return ret;
- *(__le32 *)t->tx.buf = cpu_to_le32(domain);
+ put_unaligned_le32(domain, t->tx.buf);
ret = scmi_do_xfer(handle, t);
if (!ret) {
@@ -298,8 +393,23 @@ static int scmi_perf_limits_get(const struct scmi_handle *handle, u32 domain,
return ret;
}
-static int scmi_perf_level_set(const struct scmi_handle *handle, u32 domain,
- u32 level, bool poll)
+static int scmi_perf_limits_get(const struct scmi_handle *handle, u32 domain,
+ u32 *max_perf, u32 *min_perf)
+{
+ struct scmi_perf_info *pi = handle->perf_priv;
+ struct perf_dom_info *dom = pi->dom_info + domain;
+
+ if (dom->fc_info && dom->fc_info->limit_get_addr) {
+ *max_perf = ioread32(dom->fc_info->limit_get_addr);
+ *min_perf = ioread32(dom->fc_info->limit_get_addr + 4);
+ return 0;
+ }
+
+ return scmi_perf_mb_limits_get(handle, domain, max_perf, min_perf);
+}
+
+static int scmi_perf_mb_level_set(const struct scmi_handle *handle, u32 domain,
+ u32 level, bool poll)
{
int ret;
struct scmi_xfer *t;
@@ -321,8 +431,23 @@ static int scmi_perf_level_set(const struct scmi_handle *handle, u32 domain,
return ret;
}
-static int scmi_perf_level_get(const struct scmi_handle *handle, u32 domain,
- u32 *level, bool poll)
+static int scmi_perf_level_set(const struct scmi_handle *handle, u32 domain,
+ u32 level, bool poll)
+{
+ struct scmi_perf_info *pi = handle->perf_priv;
+ struct perf_dom_info *dom = pi->dom_info + domain;
+
+ if (dom->fc_info && dom->fc_info->level_set_addr) {
+ iowrite32(level, dom->fc_info->level_set_addr);
+ scmi_perf_fc_ring_db(dom->fc_info->level_set_db);
+ return 0;
+ }
+
+ return scmi_perf_mb_level_set(handle, domain, level, poll);
+}
+
+static int scmi_perf_mb_level_get(const struct scmi_handle *handle, u32 domain,
+ u32 *level, bool poll)
{
int ret;
struct scmi_xfer *t;
@@ -333,16 +458,128 @@ static int scmi_perf_level_get(const struct scmi_handle *handle, u32 domain,
return ret;
t->hdr.poll_completion = poll;
- *(__le32 *)t->tx.buf = cpu_to_le32(domain);
+ put_unaligned_le32(domain, t->tx.buf);
ret = scmi_do_xfer(handle, t);
if (!ret)
- *level = le32_to_cpu(*(__le32 *)t->rx.buf);
+ *level = get_unaligned_le32(t->rx.buf);
scmi_xfer_put(handle, t);
return ret;
}
+static int scmi_perf_level_get(const struct scmi_handle *handle, u32 domain,
+ u32 *level, bool poll)
+{
+ struct scmi_perf_info *pi = handle->perf_priv;
+ struct perf_dom_info *dom = pi->dom_info + domain;
+
+ if (dom->fc_info && dom->fc_info->level_get_addr) {
+ *level = ioread32(dom->fc_info->level_get_addr);
+ return 0;
+ }
+
+ return scmi_perf_mb_level_get(handle, domain, level, poll);
+}
+
+static bool scmi_perf_fc_size_is_valid(u32 msg, u32 size)
+{
+ if ((msg == PERF_LEVEL_GET || msg == PERF_LEVEL_SET) && size == 4)
+ return true;
+ if ((msg == PERF_LIMITS_GET || msg == PERF_LIMITS_SET) && size == 8)
+ return true;
+ return false;
+}
+
+static void
+scmi_perf_domain_desc_fc(const struct scmi_handle *handle, u32 domain,
+ u32 message_id, void __iomem **p_addr,
+ struct scmi_fc_db_info **p_db)
+{
+ int ret;
+ u32 flags;
+ u64 phys_addr;
+ u8 size;
+ void __iomem *addr;
+ struct scmi_xfer *t;
+ struct scmi_fc_db_info *db;
+ struct scmi_perf_get_fc_info *info;
+ struct scmi_msg_resp_perf_desc_fc *resp;
+
+ if (!p_addr)
+ return;
+
+ ret = scmi_xfer_get_init(handle, PERF_DESCRIBE_FASTCHANNEL,
+ SCMI_PROTOCOL_PERF,
+ sizeof(*info), sizeof(*resp), &t);
+ if (ret)
+ return;
+
+ info = t->tx.buf;
+ info->domain = cpu_to_le32(domain);
+ info->message_id = cpu_to_le32(message_id);
+
+ ret = scmi_do_xfer(handle, t);
+ if (ret)
+ goto err_xfer;
+
+ resp = t->rx.buf;
+ flags = le32_to_cpu(resp->attr);
+ size = le32_to_cpu(resp->chan_size);
+ if (!scmi_perf_fc_size_is_valid(message_id, size))
+ goto err_xfer;
+
+ phys_addr = le32_to_cpu(resp->chan_addr_low);
+ phys_addr |= (u64)le32_to_cpu(resp->chan_addr_high) << 32;
+ addr = devm_ioremap(handle->dev, phys_addr, size);
+ if (!addr)
+ goto err_xfer;
+ *p_addr = addr;
+
+ if (p_db && SUPPORTS_DOORBELL(flags)) {
+ db = devm_kzalloc(handle->dev, sizeof(*db), GFP_KERNEL);
+ if (!db)
+ goto err_xfer;
+
+ size = 1 << DOORBELL_REG_WIDTH(flags);
+ phys_addr = le32_to_cpu(resp->db_addr_low);
+ phys_addr |= (u64)le32_to_cpu(resp->db_addr_high) << 32;
+ addr = devm_ioremap(handle->dev, phys_addr, size);
+ if (!addr)
+ goto err_xfer;
+
+ db->addr = addr;
+ db->width = size;
+ db->set = le32_to_cpu(resp->db_set_lmask);
+ db->set |= (u64)le32_to_cpu(resp->db_set_hmask) << 32;
+ db->mask = le32_to_cpu(resp->db_preserve_lmask);
+ db->mask |= (u64)le32_to_cpu(resp->db_preserve_hmask) << 32;
+ *p_db = db;
+ }
+err_xfer:
+ scmi_xfer_put(handle, t);
+}
+
+static void scmi_perf_domain_init_fc(const struct scmi_handle *handle,
+ u32 domain, struct scmi_fc_info **p_fc)
+{
+ struct scmi_fc_info *fc;
+
+ fc = devm_kzalloc(handle->dev, sizeof(*fc), GFP_KERNEL);
+ if (!fc)
+ return;
+
+ scmi_perf_domain_desc_fc(handle, domain, PERF_LEVEL_SET,
+ &fc->level_set_addr, &fc->level_set_db);
+ scmi_perf_domain_desc_fc(handle, domain, PERF_LEVEL_GET,
+ &fc->level_get_addr, NULL);
+ scmi_perf_domain_desc_fc(handle, domain, PERF_LIMITS_SET,
+ &fc->limit_set_addr, &fc->limit_set_db);
+ scmi_perf_domain_desc_fc(handle, domain, PERF_LIMITS_GET,
+ &fc->limit_get_addr, NULL);
+ *p_fc = fc;
+}
+
/* Device specific ops */
static int scmi_dev_domain_id(struct device *dev)
{
@@ -494,6 +731,9 @@ static int scmi_perf_protocol_init(struct scmi_handle *handle)
scmi_perf_domain_attributes_get(handle, domain, dom);
scmi_perf_describe_levels_get(handle, domain, dom);
+
+ if (dom->perf_fastchannels)
+ scmi_perf_domain_init_fc(handle, domain, &dom->fc_info);
}
handle->perf_ops = &perf_ops;
diff --git a/drivers/firmware/arm_scmi/power.c b/drivers/firmware/arm_scmi/power.c
index 62f3401a1f01..5abef7079c0a 100644
--- a/drivers/firmware/arm_scmi/power.c
+++ b/drivers/firmware/arm_scmi/power.c
@@ -96,7 +96,7 @@ scmi_power_domain_attributes_get(const struct scmi_handle *handle, u32 domain,
if (ret)
return ret;
- *(__le32 *)t->tx.buf = cpu_to_le32(domain);
+ put_unaligned_le32(domain, t->tx.buf);
attr = t->rx.buf;
ret = scmi_do_xfer(handle, t);
@@ -147,11 +147,11 @@ scmi_power_state_get(const struct scmi_handle *handle, u32 domain, u32 *state)
if (ret)
return ret;
- *(__le32 *)t->tx.buf = cpu_to_le32(domain);
+ put_unaligned_le32(domain, t->tx.buf);
ret = scmi_do_xfer(handle, t);
if (!ret)
- *state = le32_to_cpu(*(__le32 *)t->rx.buf);
+ *state = get_unaligned_le32(t->rx.buf);
scmi_xfer_put(handle, t);
return ret;
diff --git a/drivers/firmware/arm_scmi/reset.c b/drivers/firmware/arm_scmi/reset.c
new file mode 100644
index 000000000000..ab42c21c5517
--- /dev/null
+++ b/drivers/firmware/arm_scmi/reset.c
@@ -0,0 +1,231 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * System Control and Management Interface (SCMI) Reset Protocol
+ *
+ * Copyright (C) 2019 ARM Ltd.
+ */
+
+#include "common.h"
+
+enum scmi_reset_protocol_cmd {
+ RESET_DOMAIN_ATTRIBUTES = 0x3,
+ RESET = 0x4,
+ RESET_NOTIFY = 0x5,
+};
+
+enum scmi_reset_protocol_notify {
+ RESET_ISSUED = 0x0,
+};
+
+#define NUM_RESET_DOMAIN_MASK 0xffff
+#define RESET_NOTIFY_ENABLE BIT(0)
+
+struct scmi_msg_resp_reset_domain_attributes {
+ __le32 attributes;
+#define SUPPORTS_ASYNC_RESET(x) ((x) & BIT(31))
+#define SUPPORTS_NOTIFY_RESET(x) ((x) & BIT(30))
+ __le32 latency;
+ u8 name[SCMI_MAX_STR_SIZE];
+};
+
+struct scmi_msg_reset_domain_reset {
+ __le32 domain_id;
+ __le32 flags;
+#define AUTONOMOUS_RESET BIT(0)
+#define EXPLICIT_RESET_ASSERT BIT(1)
+#define ASYNCHRONOUS_RESET BIT(2)
+ __le32 reset_state;
+#define ARCH_RESET_TYPE BIT(31)
+#define COLD_RESET_STATE BIT(0)
+#define ARCH_COLD_RESET (ARCH_RESET_TYPE | COLD_RESET_STATE)
+};
+
+struct reset_dom_info {
+ bool async_reset;
+ bool reset_notify;
+ u32 latency_us;
+ char name[SCMI_MAX_STR_SIZE];
+};
+
+struct scmi_reset_info {
+ int num_domains;
+ struct reset_dom_info *dom_info;
+};
+
+static int scmi_reset_attributes_get(const struct scmi_handle *handle,
+ struct scmi_reset_info *pi)
+{
+ int ret;
+ struct scmi_xfer *t;
+ u32 attr;
+
+ ret = scmi_xfer_get_init(handle, PROTOCOL_ATTRIBUTES,
+ SCMI_PROTOCOL_RESET, 0, sizeof(attr), &t);
+ if (ret)
+ return ret;
+
+ ret = scmi_do_xfer(handle, t);
+ if (!ret) {
+ attr = get_unaligned_le32(t->rx.buf);
+ pi->num_domains = attr & NUM_RESET_DOMAIN_MASK;
+ }
+
+ scmi_xfer_put(handle, t);
+ return ret;
+}
+
+static int
+scmi_reset_domain_attributes_get(const struct scmi_handle *handle, u32 domain,
+ struct reset_dom_info *dom_info)
+{
+ int ret;
+ struct scmi_xfer *t;
+ struct scmi_msg_resp_reset_domain_attributes *attr;
+
+ ret = scmi_xfer_get_init(handle, RESET_DOMAIN_ATTRIBUTES,
+ SCMI_PROTOCOL_RESET, sizeof(domain),
+ sizeof(*attr), &t);
+ if (ret)
+ return ret;
+
+ put_unaligned_le32(domain, t->tx.buf);
+ attr = t->rx.buf;
+
+ ret = scmi_do_xfer(handle, t);
+ if (!ret) {
+ u32 attributes = le32_to_cpu(attr->attributes);
+
+ dom_info->async_reset = SUPPORTS_ASYNC_RESET(attributes);
+ dom_info->reset_notify = SUPPORTS_NOTIFY_RESET(attributes);
+ dom_info->latency_us = le32_to_cpu(attr->latency);
+ if (dom_info->latency_us == U32_MAX)
+ dom_info->latency_us = 0;
+ strlcpy(dom_info->name, attr->name, SCMI_MAX_STR_SIZE);
+ }
+
+ scmi_xfer_put(handle, t);
+ return ret;
+}
+
+static int scmi_reset_num_domains_get(const struct scmi_handle *handle)
+{
+ struct scmi_reset_info *pi = handle->reset_priv;
+
+ return pi->num_domains;
+}
+
+static char *scmi_reset_name_get(const struct scmi_handle *handle, u32 domain)
+{
+ struct scmi_reset_info *pi = handle->reset_priv;
+ struct reset_dom_info *dom = pi->dom_info + domain;
+
+ return dom->name;
+}
+
+static int scmi_reset_latency_get(const struct scmi_handle *handle, u32 domain)
+{
+ struct scmi_reset_info *pi = handle->reset_priv;
+ struct reset_dom_info *dom = pi->dom_info + domain;
+
+ return dom->latency_us;
+}
+
+static int scmi_domain_reset(const struct scmi_handle *handle, u32 domain,
+ u32 flags, u32 state)
+{
+ int ret;
+ struct scmi_xfer *t;
+ struct scmi_msg_reset_domain_reset *dom;
+ struct scmi_reset_info *pi = handle->reset_priv;
+ struct reset_dom_info *rdom = pi->dom_info + domain;
+
+ if (rdom->async_reset)
+ flags |= ASYNCHRONOUS_RESET;
+
+ ret = scmi_xfer_get_init(handle, RESET, SCMI_PROTOCOL_RESET,
+ sizeof(*dom), 0, &t);
+ if (ret)
+ return ret;
+
+ dom = t->tx.buf;
+ dom->domain_id = cpu_to_le32(domain);
+ dom->flags = cpu_to_le32(flags);
+ dom->reset_state = cpu_to_le32(state);
+
+ if (rdom->async_reset)
+ ret = scmi_do_xfer_with_response(handle, t);
+ else
+ ret = scmi_do_xfer(handle, t);
+
+ scmi_xfer_put(handle, t);
+ return ret;
+}
+
+static int scmi_reset_domain_reset(const struct scmi_handle *handle, u32 domain)
+{
+ return scmi_domain_reset(handle, domain, AUTONOMOUS_RESET,
+ ARCH_COLD_RESET);
+}
+
+static int
+scmi_reset_domain_assert(const struct scmi_handle *handle, u32 domain)
+{
+ return scmi_domain_reset(handle, domain, EXPLICIT_RESET_ASSERT,
+ ARCH_COLD_RESET);
+}
+
+static int
+scmi_reset_domain_deassert(const struct scmi_handle *handle, u32 domain)
+{
+ return scmi_domain_reset(handle, domain, 0, ARCH_COLD_RESET);
+}
+
+static struct scmi_reset_ops reset_ops = {
+ .num_domains_get = scmi_reset_num_domains_get,
+ .name_get = scmi_reset_name_get,
+ .latency_get = scmi_reset_latency_get,
+ .reset = scmi_reset_domain_reset,
+ .assert = scmi_reset_domain_assert,
+ .deassert = scmi_reset_domain_deassert,
+};
+
+static int scmi_reset_protocol_init(struct scmi_handle *handle)
+{
+ int domain;
+ u32 version;
+ struct scmi_reset_info *pinfo;
+
+ scmi_version_get(handle, SCMI_PROTOCOL_RESET, &version);
+
+ dev_dbg(handle->dev, "Reset Version %d.%d\n",
+ PROTOCOL_REV_MAJOR(version), PROTOCOL_REV_MINOR(version));
+
+ pinfo = devm_kzalloc(handle->dev, sizeof(*pinfo), GFP_KERNEL);
+ if (!pinfo)
+ return -ENOMEM;
+
+ scmi_reset_attributes_get(handle, pinfo);
+
+ pinfo->dom_info = devm_kcalloc(handle->dev, pinfo->num_domains,
+ sizeof(*pinfo->dom_info), GFP_KERNEL);
+ if (!pinfo->dom_info)
+ return -ENOMEM;
+
+ for (domain = 0; domain < pinfo->num_domains; domain++) {
+ struct reset_dom_info *dom = pinfo->dom_info + domain;
+
+ scmi_reset_domain_attributes_get(handle, domain, dom);
+ }
+
+ handle->reset_ops = &reset_ops;
+ handle->reset_priv = pinfo;
+
+ return 0;
+}
+
+static int __init scmi_reset_init(void)
+{
+ return scmi_protocol_register(SCMI_PROTOCOL_RESET,
+ &scmi_reset_protocol_init);
+}
+subsys_initcall(scmi_reset_init);
diff --git a/drivers/firmware/arm_scmi/sensors.c b/drivers/firmware/arm_scmi/sensors.c
index 0e94ab56f679..a400ea805fc2 100644
--- a/drivers/firmware/arm_scmi/sensors.c
+++ b/drivers/firmware/arm_scmi/sensors.c
@@ -9,8 +9,8 @@
enum scmi_sensor_protocol_cmd {
SENSOR_DESCRIPTION_GET = 0x3,
- SENSOR_CONFIG_SET = 0x4,
- SENSOR_TRIP_POINT_SET = 0x5,
+ SENSOR_TRIP_POINT_NOTIFY = 0x4,
+ SENSOR_TRIP_POINT_CONFIG = 0x5,
SENSOR_READING_GET = 0x6,
};
@@ -42,9 +42,10 @@ struct scmi_msg_resp_sensor_description {
} desc[0];
};
-struct scmi_msg_set_sensor_config {
+struct scmi_msg_sensor_trip_point_notify {
__le32 id;
__le32 event_control;
+#define SENSOR_TP_NOTIFY_ALL BIT(0)
};
struct scmi_msg_set_sensor_trip_point {
@@ -119,7 +120,7 @@ static int scmi_sensor_description_get(const struct scmi_handle *handle,
do {
/* Set the number of sensors to be skipped/already read */
- *(__le32 *)t->tx.buf = cpu_to_le32(desc_index);
+ put_unaligned_le32(desc_index, t->tx.buf);
ret = scmi_do_xfer(handle, t);
if (ret)
@@ -135,9 +136,10 @@ static int scmi_sensor_description_get(const struct scmi_handle *handle,
}
for (cnt = 0; cnt < num_returned; cnt++) {
- u32 attrh;
+ u32 attrh, attrl;
struct scmi_sensor_info *s;
+ attrl = le32_to_cpu(buf->desc[cnt].attributes_low);
attrh = le32_to_cpu(buf->desc[cnt].attributes_high);
s = &si->sensors[desc_index + cnt];
s->id = le32_to_cpu(buf->desc[cnt].id);
@@ -146,6 +148,8 @@ static int scmi_sensor_description_get(const struct scmi_handle *handle,
/* Sign extend to a full s8 */
if (s->scale & SENSOR_SCALE_SIGN)
s->scale |= SENSOR_SCALE_EXTEND;
+ s->async = SUPPORTS_ASYNC_READ(attrl);
+ s->num_trip_points = NUM_TRIP_POINTS(attrl);
strlcpy(s->name, buf->desc[cnt].name, SCMI_MAX_STR_SIZE);
}
@@ -160,15 +164,15 @@ static int scmi_sensor_description_get(const struct scmi_handle *handle,
return ret;
}
-static int
-scmi_sensor_configuration_set(const struct scmi_handle *handle, u32 sensor_id)
+static int scmi_sensor_trip_point_notify(const struct scmi_handle *handle,
+ u32 sensor_id, bool enable)
{
int ret;
- u32 evt_cntl = BIT(0);
+ u32 evt_cntl = enable ? SENSOR_TP_NOTIFY_ALL : 0;
struct scmi_xfer *t;
- struct scmi_msg_set_sensor_config *cfg;
+ struct scmi_msg_sensor_trip_point_notify *cfg;
- ret = scmi_xfer_get_init(handle, SENSOR_CONFIG_SET,
+ ret = scmi_xfer_get_init(handle, SENSOR_TRIP_POINT_NOTIFY,
SCMI_PROTOCOL_SENSOR, sizeof(*cfg), 0, &t);
if (ret)
return ret;
@@ -183,15 +187,16 @@ scmi_sensor_configuration_set(const struct scmi_handle *handle, u32 sensor_id)
return ret;
}
-static int scmi_sensor_trip_point_set(const struct scmi_handle *handle,
- u32 sensor_id, u8 trip_id, u64 trip_value)
+static int
+scmi_sensor_trip_point_config(const struct scmi_handle *handle, u32 sensor_id,
+ u8 trip_id, u64 trip_value)
{
int ret;
u32 evt_cntl = SENSOR_TP_BOTH;
struct scmi_xfer *t;
struct scmi_msg_set_sensor_trip_point *trip;
- ret = scmi_xfer_get_init(handle, SENSOR_TRIP_POINT_SET,
+ ret = scmi_xfer_get_init(handle, SENSOR_TRIP_POINT_CONFIG,
SCMI_PROTOCOL_SENSOR, sizeof(*trip), 0, &t);
if (ret)
return ret;
@@ -209,11 +214,13 @@ static int scmi_sensor_trip_point_set(const struct scmi_handle *handle,
}
static int scmi_sensor_reading_get(const struct scmi_handle *handle,
- u32 sensor_id, bool async, u64 *value)
+ u32 sensor_id, u64 *value)
{
int ret;
struct scmi_xfer *t;
struct scmi_msg_sensor_reading_get *sensor;
+ struct sensors_info *si = handle->sensor_priv;
+ struct scmi_sensor_info *s = si->sensors + sensor_id;
ret = scmi_xfer_get_init(handle, SENSOR_READING_GET,
SCMI_PROTOCOL_SENSOR, sizeof(*sensor),
@@ -223,14 +230,18 @@ static int scmi_sensor_reading_get(const struct scmi_handle *handle,
sensor = t->tx.buf;
sensor->id = cpu_to_le32(sensor_id);
- sensor->flags = cpu_to_le32(async ? SENSOR_READ_ASYNC : 0);
- ret = scmi_do_xfer(handle, t);
- if (!ret) {
- __le32 *pval = t->rx.buf;
-
- *value = le32_to_cpu(*pval);
- *value |= (u64)le32_to_cpu(*(pval + 1)) << 32;
+ if (s->async) {
+ sensor->flags = cpu_to_le32(SENSOR_READ_ASYNC);
+ ret = scmi_do_xfer_with_response(handle, t);
+ if (!ret)
+ *value = get_unaligned_le64((void *)
+ ((__le32 *)t->rx.buf + 1));
+ } else {
+ sensor->flags = cpu_to_le32(0);
+ ret = scmi_do_xfer(handle, t);
+ if (!ret)
+ *value = get_unaligned_le64(t->rx.buf);
}
scmi_xfer_put(handle, t);
@@ -255,8 +266,8 @@ static int scmi_sensor_count_get(const struct scmi_handle *handle)
static struct scmi_sensor_ops sensor_ops = {
.count_get = scmi_sensor_count_get,
.info_get = scmi_sensor_info_get,
- .configuration_set = scmi_sensor_configuration_set,
- .trip_point_set = scmi_sensor_trip_point_set,
+ .trip_point_notify = scmi_sensor_trip_point_notify,
+ .trip_point_config = scmi_sensor_trip_point_config,
.reading_get = scmi_sensor_reading_get,
};