summaryrefslogtreecommitdiffstats
path: root/drivers/net/wwan
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/wwan')
-rw-r--r--drivers/net/wwan/Kconfig37
-rw-r--r--drivers/net/wwan/Makefile9
-rw-r--r--drivers/net/wwan/mhi_wwan_ctrl.c284
-rw-r--r--drivers/net/wwan/wwan_core.c554
4 files changed, 884 insertions, 0 deletions
diff --git a/drivers/net/wwan/Kconfig b/drivers/net/wwan/Kconfig
new file mode 100644
index 000000000000..7ad1920120bc
--- /dev/null
+++ b/drivers/net/wwan/Kconfig
@@ -0,0 +1,37 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Wireless WAN device configuration
+#
+
+menuconfig WWAN
+ bool "Wireless WAN"
+ help
+ This section contains Wireless WAN configuration for WWAN framework
+ and drivers.
+
+if WWAN
+
+config WWAN_CORE
+ tristate "WWAN Driver Core"
+ help
+ Say Y here if you want to use the WWAN driver core. This driver
+ provides a common framework for WWAN drivers.
+
+ To compile this driver as a module, choose M here: the module will be
+ called wwan.
+
+config MHI_WWAN_CTRL
+ tristate "MHI WWAN control driver for QCOM-based PCIe modems"
+ select WWAN_CORE
+ depends on MHI_BUS
+ help
+ MHI WWAN CTRL allows QCOM-based PCIe modems to expose different modem
+ control protocols/ports to userspace, including AT, MBIM, QMI, DIAG
+ and FIREHOSE. These protocols can be accessed directly from userspace
+ (e.g. AT commands) or via libraries/tools (e.g. libmbim, libqmi,
+ libqcdm...).
+
+ To compile this driver as a module, choose M here: the module will be
+ called mhi_wwan_ctrl.
+
+endif # WWAN
diff --git a/drivers/net/wwan/Makefile b/drivers/net/wwan/Makefile
new file mode 100644
index 000000000000..556cd90958ca
--- /dev/null
+++ b/drivers/net/wwan/Makefile
@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for the Linux WWAN device drivers.
+#
+
+obj-$(CONFIG_WWAN_CORE) += wwan.o
+wwan-objs += wwan_core.o
+
+obj-$(CONFIG_MHI_WWAN_CTRL) += mhi_wwan_ctrl.o
diff --git a/drivers/net/wwan/mhi_wwan_ctrl.c b/drivers/net/wwan/mhi_wwan_ctrl.c
new file mode 100644
index 000000000000..1bc6b69aa530
--- /dev/null
+++ b/drivers/net/wwan/mhi_wwan_ctrl.c
@@ -0,0 +1,284 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2021, Linaro Ltd <loic.poulain@linaro.org> */
+#include <linux/kernel.h>
+#include <linux/mhi.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/wwan.h>
+
+/* MHI wwan flags */
+enum mhi_wwan_flags {
+ MHI_WWAN_DL_CAP,
+ MHI_WWAN_UL_CAP,
+ MHI_WWAN_RX_REFILL,
+};
+
+#define MHI_WWAN_MAX_MTU 0x8000
+
+struct mhi_wwan_dev {
+ /* Lower level is a mhi dev, upper level is a wwan port */
+ struct mhi_device *mhi_dev;
+ struct wwan_port *wwan_port;
+
+ /* State and capabilities */
+ unsigned long flags;
+ size_t mtu;
+
+ /* Protect against concurrent TX and TX-completion (bh) */
+ spinlock_t tx_lock;
+
+ /* Protect RX budget and rx_refill scheduling */
+ spinlock_t rx_lock;
+ struct work_struct rx_refill;
+
+ /* RX budget is initially set to the size of the MHI RX queue and is
+ * used to limit the number of allocated and queued packets. It is
+ * decremented on data queueing and incremented on data release.
+ */
+ unsigned int rx_budget;
+};
+
+/* Increment RX budget and schedule RX refill if necessary */
+static void mhi_wwan_rx_budget_inc(struct mhi_wwan_dev *mhiwwan)
+{
+ spin_lock(&mhiwwan->rx_lock);
+
+ mhiwwan->rx_budget++;
+
+ if (test_bit(MHI_WWAN_RX_REFILL, &mhiwwan->flags))
+ schedule_work(&mhiwwan->rx_refill);
+
+ spin_unlock(&mhiwwan->rx_lock);
+}
+
+/* Decrement RX budget if non-zero and return true on success */
+static bool mhi_wwan_rx_budget_dec(struct mhi_wwan_dev *mhiwwan)
+{
+ bool ret = false;
+
+ spin_lock(&mhiwwan->rx_lock);
+
+ if (mhiwwan->rx_budget) {
+ mhiwwan->rx_budget--;
+ if (test_bit(MHI_WWAN_RX_REFILL, &mhiwwan->flags))
+ ret = true;
+ }
+
+ spin_unlock(&mhiwwan->rx_lock);
+
+ return ret;
+}
+
+static void __mhi_skb_destructor(struct sk_buff *skb)
+{
+ /* RX buffer has been consumed, increase the allowed budget */
+ mhi_wwan_rx_budget_inc(skb_shinfo(skb)->destructor_arg);
+}
+
+static void mhi_wwan_ctrl_refill_work(struct work_struct *work)
+{
+ struct mhi_wwan_dev *mhiwwan = container_of(work, struct mhi_wwan_dev, rx_refill);
+ struct mhi_device *mhi_dev = mhiwwan->mhi_dev;
+
+ while (mhi_wwan_rx_budget_dec(mhiwwan)) {
+ struct sk_buff *skb;
+
+ skb = alloc_skb(mhiwwan->mtu, GFP_KERNEL);
+ if (!skb) {
+ mhi_wwan_rx_budget_inc(mhiwwan);
+ break;
+ }
+
+ /* To prevent unlimited buffer allocation if nothing consumes
+ * the RX buffers (passed to WWAN core), track their lifespan
+ * to not allocate more than allowed budget.
+ */
+ skb->destructor = __mhi_skb_destructor;
+ skb_shinfo(skb)->destructor_arg = mhiwwan;
+
+ if (mhi_queue_skb(mhi_dev, DMA_FROM_DEVICE, skb, mhiwwan->mtu, MHI_EOT)) {
+ dev_err(&mhi_dev->dev, "Failed to queue buffer\n");
+ kfree_skb(skb);
+ break;
+ }
+ }
+}
+
+static int mhi_wwan_ctrl_start(struct wwan_port *port)
+{
+ struct mhi_wwan_dev *mhiwwan = wwan_port_get_drvdata(port);
+ int ret;
+
+ /* Start mhi device's channel(s) */
+ ret = mhi_prepare_for_transfer(mhiwwan->mhi_dev);
+ if (ret)
+ return ret;
+
+ /* Don't allocate more buffers than MHI channel queue size */
+ mhiwwan->rx_budget = mhi_get_free_desc_count(mhiwwan->mhi_dev, DMA_FROM_DEVICE);
+
+ /* Add buffers to the MHI inbound queue */
+ if (test_bit(MHI_WWAN_DL_CAP, &mhiwwan->flags)) {
+ set_bit(MHI_WWAN_RX_REFILL, &mhiwwan->flags);
+ mhi_wwan_ctrl_refill_work(&mhiwwan->rx_refill);
+ }
+
+ return 0;
+}
+
+static void mhi_wwan_ctrl_stop(struct wwan_port *port)
+{
+ struct mhi_wwan_dev *mhiwwan = wwan_port_get_drvdata(port);
+
+ spin_lock(&mhiwwan->rx_lock);
+ clear_bit(MHI_WWAN_RX_REFILL, &mhiwwan->flags);
+ spin_unlock(&mhiwwan->rx_lock);
+
+ cancel_work_sync(&mhiwwan->rx_refill);
+
+ mhi_unprepare_from_transfer(mhiwwan->mhi_dev);
+}
+
+static int mhi_wwan_ctrl_tx(struct wwan_port *port, struct sk_buff *skb)
+{
+ struct mhi_wwan_dev *mhiwwan = wwan_port_get_drvdata(port);
+ int ret;
+
+ if (skb->len > mhiwwan->mtu)
+ return -EMSGSIZE;
+
+ if (!test_bit(MHI_WWAN_UL_CAP, &mhiwwan->flags))
+ return -EOPNOTSUPP;
+
+ /* Queue the packet for MHI transfer and check fullness of the queue */
+ spin_lock_bh(&mhiwwan->tx_lock);
+ ret = mhi_queue_skb(mhiwwan->mhi_dev, DMA_TO_DEVICE, skb, skb->len, MHI_EOT);
+ if (mhi_queue_is_full(mhiwwan->mhi_dev, DMA_TO_DEVICE))
+ wwan_port_txoff(port);
+ spin_unlock_bh(&mhiwwan->tx_lock);
+
+ return ret;
+}
+
+static const struct wwan_port_ops wwan_pops = {
+ .start = mhi_wwan_ctrl_start,
+ .stop = mhi_wwan_ctrl_stop,
+ .tx = mhi_wwan_ctrl_tx,
+};
+
+static void mhi_ul_xfer_cb(struct mhi_device *mhi_dev,
+ struct mhi_result *mhi_result)
+{
+ struct mhi_wwan_dev *mhiwwan = dev_get_drvdata(&mhi_dev->dev);
+ struct wwan_port *port = mhiwwan->wwan_port;
+ struct sk_buff *skb = mhi_result->buf_addr;
+
+ dev_dbg(&mhi_dev->dev, "%s: status: %d xfer_len: %zu\n", __func__,
+ mhi_result->transaction_status, mhi_result->bytes_xferd);
+
+ /* MHI core has done with the buffer, release it */
+ consume_skb(skb);
+
+ /* There is likely new slot available in the MHI queue, re-allow TX */
+ spin_lock_bh(&mhiwwan->tx_lock);
+ if (!mhi_queue_is_full(mhiwwan->mhi_dev, DMA_TO_DEVICE))
+ wwan_port_txon(port);
+ spin_unlock_bh(&mhiwwan->tx_lock);
+}
+
+static void mhi_dl_xfer_cb(struct mhi_device *mhi_dev,
+ struct mhi_result *mhi_result)
+{
+ struct mhi_wwan_dev *mhiwwan = dev_get_drvdata(&mhi_dev->dev);
+ struct wwan_port *port = mhiwwan->wwan_port;
+ struct sk_buff *skb = mhi_result->buf_addr;
+
+ dev_dbg(&mhi_dev->dev, "%s: status: %d receive_len: %zu\n", __func__,
+ mhi_result->transaction_status, mhi_result->bytes_xferd);
+
+ if (mhi_result->transaction_status &&
+ mhi_result->transaction_status != -EOVERFLOW) {
+ kfree_skb(skb);
+ return;
+ }
+
+ /* MHI core does not update skb->len, do it before forward */
+ skb_put(skb, mhi_result->bytes_xferd);
+ wwan_port_rx(port, skb);
+
+ /* Do not increment rx budget nor refill RX buffers now, wait for the
+ * buffer to be consumed. Done from __mhi_skb_destructor().
+ */
+}
+
+static int mhi_wwan_ctrl_probe(struct mhi_device *mhi_dev,
+ const struct mhi_device_id *id)
+{
+ struct mhi_controller *cntrl = mhi_dev->mhi_cntrl;
+ struct mhi_wwan_dev *mhiwwan;
+ struct wwan_port *port;
+
+ mhiwwan = kzalloc(sizeof(*mhiwwan), GFP_KERNEL);
+ if (!mhiwwan)
+ return -ENOMEM;
+
+ mhiwwan->mhi_dev = mhi_dev;
+ mhiwwan->mtu = MHI_WWAN_MAX_MTU;
+ INIT_WORK(&mhiwwan->rx_refill, mhi_wwan_ctrl_refill_work);
+ spin_lock_init(&mhiwwan->tx_lock);
+ spin_lock_init(&mhiwwan->rx_lock);
+
+ if (mhi_dev->dl_chan)
+ set_bit(MHI_WWAN_DL_CAP, &mhiwwan->flags);
+ if (mhi_dev->ul_chan)
+ set_bit(MHI_WWAN_UL_CAP, &mhiwwan->flags);
+
+ dev_set_drvdata(&mhi_dev->dev, mhiwwan);
+
+ /* Register as a wwan port, id->driver_data contains wwan port type */
+ port = wwan_create_port(&cntrl->mhi_dev->dev, id->driver_data,
+ &wwan_pops, mhiwwan);
+ if (IS_ERR(port)) {
+ kfree(mhiwwan);
+ return PTR_ERR(port);
+ }
+
+ mhiwwan->wwan_port = port;
+
+ return 0;
+};
+
+static void mhi_wwan_ctrl_remove(struct mhi_device *mhi_dev)
+{
+ struct mhi_wwan_dev *mhiwwan = dev_get_drvdata(&mhi_dev->dev);
+
+ wwan_remove_port(mhiwwan->wwan_port);
+ kfree(mhiwwan);
+}
+
+static const struct mhi_device_id mhi_wwan_ctrl_match_table[] = {
+ { .chan = "DUN", .driver_data = WWAN_PORT_AT },
+ { .chan = "MBIM", .driver_data = WWAN_PORT_MBIM },
+ { .chan = "QMI", .driver_data = WWAN_PORT_QMI },
+ { .chan = "DIAG", .driver_data = WWAN_PORT_QCDM },
+ { .chan = "FIREHOSE", .driver_data = WWAN_PORT_FIREHOSE },
+ {},
+};
+MODULE_DEVICE_TABLE(mhi, mhi_wwan_ctrl_match_table);
+
+static struct mhi_driver mhi_wwan_ctrl_driver = {
+ .id_table = mhi_wwan_ctrl_match_table,
+ .remove = mhi_wwan_ctrl_remove,
+ .probe = mhi_wwan_ctrl_probe,
+ .ul_xfer_cb = mhi_ul_xfer_cb,
+ .dl_xfer_cb = mhi_dl_xfer_cb,
+ .driver = {
+ .name = "mhi_wwan_ctrl",
+ },
+};
+
+module_mhi_driver(mhi_wwan_ctrl_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("MHI WWAN CTRL Driver");
+MODULE_AUTHOR("Loic Poulain <loic.poulain@linaro.org>");
diff --git a/drivers/net/wwan/wwan_core.c b/drivers/net/wwan/wwan_core.c
new file mode 100644
index 000000000000..cff04e532c1e
--- /dev/null
+++ b/drivers/net/wwan/wwan_core.c
@@ -0,0 +1,554 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2021, Linaro Ltd <loic.poulain@linaro.org> */
+
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/fs.h>
+#include <linux/init.h>
+#include <linux/idr.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/poll.h>
+#include <linux/skbuff.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/wwan.h>
+
+#define WWAN_MAX_MINORS 256 /* 256 minors allowed with register_chrdev() */
+
+static DEFINE_MUTEX(wwan_register_lock); /* WWAN device create|remove lock */
+static DEFINE_IDA(minors); /* minors for WWAN port chardevs */
+static DEFINE_IDA(wwan_dev_ids); /* for unique WWAN device IDs */
+static struct class *wwan_class;
+static int wwan_major;
+
+#define to_wwan_dev(d) container_of(d, struct wwan_device, dev)
+#define to_wwan_port(d) container_of(d, struct wwan_port, dev)
+
+/* WWAN port flags */
+#define WWAN_PORT_TX_OFF 0
+
+/**
+ * struct wwan_device - The structure that defines a WWAN device
+ *
+ * @id: WWAN device unique ID.
+ * @dev: Underlying device.
+ * @port_id: Current available port ID to pick.
+ */
+struct wwan_device {
+ unsigned int id;
+ struct device dev;
+ atomic_t port_id;
+};
+
+/**
+ * struct wwan_port - The structure that defines a WWAN port
+ * @type: Port type
+ * @start_count: Port start counter
+ * @flags: Store port state and capabilities
+ * @ops: Pointer to WWAN port operations
+ * @ops_lock: Protect port ops
+ * @dev: Underlying device
+ * @rxq: Buffer inbound queue
+ * @waitqueue: The waitqueue for port fops (read/write/poll)
+ */
+struct wwan_port {
+ enum wwan_port_type type;
+ unsigned int start_count;
+ unsigned long flags;
+ const struct wwan_port_ops *ops;
+ struct mutex ops_lock; /* Serialize ops + protect against removal */
+ struct device dev;
+ struct sk_buff_head rxq;
+ wait_queue_head_t waitqueue;
+};
+
+static void wwan_dev_destroy(struct device *dev)
+{
+ struct wwan_device *wwandev = to_wwan_dev(dev);
+
+ ida_free(&wwan_dev_ids, wwandev->id);
+ kfree(wwandev);
+}
+
+static const struct device_type wwan_dev_type = {
+ .name = "wwan_dev",
+ .release = wwan_dev_destroy,
+};
+
+static int wwan_dev_parent_match(struct device *dev, const void *parent)
+{
+ return (dev->type == &wwan_dev_type && dev->parent == parent);
+}
+
+static struct wwan_device *wwan_dev_get_by_parent(struct device *parent)
+{
+ struct device *dev;
+
+ dev = class_find_device(wwan_class, NULL, parent, wwan_dev_parent_match);
+ if (!dev)
+ return ERR_PTR(-ENODEV);
+
+ return to_wwan_dev(dev);
+}
+
+/* This function allocates and registers a new WWAN device OR if a WWAN device
+ * already exist for the given parent, it gets a reference and return it.
+ * This function is not exported (for now), it is called indirectly via
+ * wwan_create_port().
+ */
+static struct wwan_device *wwan_create_dev(struct device *parent)
+{
+ struct wwan_device *wwandev;
+ int err, id;
+
+ /* The 'find-alloc-register' operation must be protected against
+ * concurrent execution, a WWAN device is possibly shared between
+ * multiple callers or concurrently unregistered from wwan_remove_dev().
+ */
+ mutex_lock(&wwan_register_lock);
+
+ /* If wwandev already exists, return it */
+ wwandev = wwan_dev_get_by_parent(parent);
+ if (!IS_ERR(wwandev))
+ goto done_unlock;
+
+ id = ida_alloc(&wwan_dev_ids, GFP_KERNEL);
+ if (id < 0)
+ goto done_unlock;
+
+ wwandev = kzalloc(sizeof(*wwandev), GFP_KERNEL);
+ if (!wwandev) {
+ ida_free(&wwan_dev_ids, id);
+ goto done_unlock;
+ }
+
+ wwandev->dev.parent = parent;
+ wwandev->dev.class = wwan_class;
+ wwandev->dev.type = &wwan_dev_type;
+ wwandev->id = id;
+ dev_set_name(&wwandev->dev, "wwan%d", wwandev->id);
+
+ err = device_register(&wwandev->dev);
+ if (err) {
+ put_device(&wwandev->dev);
+ wwandev = NULL;
+ }
+
+done_unlock:
+ mutex_unlock(&wwan_register_lock);
+
+ return wwandev;
+}
+
+static int is_wwan_child(struct device *dev, void *data)
+{
+ return dev->class == wwan_class;
+}
+
+static void wwan_remove_dev(struct wwan_device *wwandev)
+{
+ int ret;
+
+ /* Prevent concurrent picking from wwan_create_dev */
+ mutex_lock(&wwan_register_lock);
+
+ /* WWAN device is created and registered (get+add) along with its first
+ * child port, and subsequent port registrations only grab a reference
+ * (get). The WWAN device must then be unregistered (del+put) along with
+ * its latest port, and reference simply dropped (put) otherwise.
+ */
+ ret = device_for_each_child(&wwandev->dev, NULL, is_wwan_child);
+ if (!ret)
+ device_unregister(&wwandev->dev);
+ else
+ put_device(&wwandev->dev);
+
+ mutex_unlock(&wwan_register_lock);
+}
+
+/* ------- WWAN port management ------- */
+
+static void wwan_port_destroy(struct device *dev)
+{
+ struct wwan_port *port = to_wwan_port(dev);
+
+ ida_free(&minors, MINOR(port->dev.devt));
+ skb_queue_purge(&port->rxq);
+ mutex_destroy(&port->ops_lock);
+ kfree(port);
+}
+
+static const struct device_type wwan_port_dev_type = {
+ .name = "wwan_port",
+ .release = wwan_port_destroy,
+};
+
+static int wwan_port_minor_match(struct device *dev, const void *minor)
+{
+ return (dev->type == &wwan_port_dev_type &&
+ MINOR(dev->devt) == *(unsigned int *)minor);
+}
+
+static struct wwan_port *wwan_port_get_by_minor(unsigned int minor)
+{
+ struct device *dev;
+
+ dev = class_find_device(wwan_class, NULL, &minor, wwan_port_minor_match);
+ if (!dev)
+ return ERR_PTR(-ENODEV);
+
+ return to_wwan_port(dev);
+}
+
+/* Keep aligned with wwan_port_type enum */
+static const char * const wwan_port_type_str[] = {
+ "AT",
+ "MBIM",
+ "QMI",
+ "QCDM",
+ "FIREHOSE"
+};
+
+struct wwan_port *wwan_create_port(struct device *parent,
+ enum wwan_port_type type,
+ const struct wwan_port_ops *ops,
+ void *drvdata)
+{
+ struct wwan_device *wwandev;
+ struct wwan_port *port;
+ int minor, err = -ENOMEM;
+
+ if (type >= WWAN_PORT_MAX || !ops)
+ return ERR_PTR(-EINVAL);
+
+ /* A port is always a child of a WWAN device, retrieve (allocate or
+ * pick) the WWAN device based on the provided parent device.
+ */
+ wwandev = wwan_create_dev(parent);
+ if (IS_ERR(wwandev))
+ return ERR_CAST(wwandev);
+
+ /* A port is exposed as character device, get a minor */
+ minor = ida_alloc_range(&minors, 0, WWAN_MAX_MINORS - 1, GFP_KERNEL);
+ if (minor < 0)
+ goto error_wwandev_remove;
+
+ port = kzalloc(sizeof(*port), GFP_KERNEL);
+ if (!port) {
+ ida_free(&minors, minor);
+ goto error_wwandev_remove;
+ }
+
+ port->type = type;
+ port->ops = ops;
+ mutex_init(&port->ops_lock);
+ skb_queue_head_init(&port->rxq);
+ init_waitqueue_head(&port->waitqueue);
+
+ port->dev.parent = &wwandev->dev;
+ port->dev.class = wwan_class;
+ port->dev.type = &wwan_port_dev_type;
+ port->dev.devt = MKDEV(wwan_major, minor);
+ dev_set_drvdata(&port->dev, drvdata);
+
+ /* create unique name based on wwan device id, port index and type */
+ dev_set_name(&port->dev, "wwan%up%u%s", wwandev->id,
+ atomic_inc_return(&wwandev->port_id),
+ wwan_port_type_str[port->type]);
+
+ err = device_register(&port->dev);
+ if (err)
+ goto error_put_device;
+
+ return port;
+
+error_put_device:
+ put_device(&port->dev);
+error_wwandev_remove:
+ wwan_remove_dev(wwandev);
+
+ return ERR_PTR(err);
+}
+EXPORT_SYMBOL_GPL(wwan_create_port);
+
+void wwan_remove_port(struct wwan_port *port)
+{
+ struct wwan_device *wwandev = to_wwan_dev(port->dev.parent);
+
+ mutex_lock(&port->ops_lock);
+ if (port->start_count)
+ port->ops->stop(port);
+ port->ops = NULL; /* Prevent any new port operations (e.g. from fops) */
+ mutex_unlock(&port->ops_lock);
+
+ wake_up_interruptible(&port->waitqueue);
+
+ skb_queue_purge(&port->rxq);
+ dev_set_drvdata(&port->dev, NULL);
+ device_unregister(&port->dev);
+
+ /* Release related wwan device */
+ wwan_remove_dev(wwandev);
+}
+EXPORT_SYMBOL_GPL(wwan_remove_port);
+
+void wwan_port_rx(struct wwan_port *port, struct sk_buff *skb)
+{
+ skb_queue_tail(&port->rxq, skb);
+ wake_up_interruptible(&port->waitqueue);
+}
+EXPORT_SYMBOL_GPL(wwan_port_rx);
+
+void wwan_port_txon(struct wwan_port *port)
+{
+ clear_bit(WWAN_PORT_TX_OFF, &port->flags);
+ wake_up_interruptible(&port->waitqueue);
+}
+EXPORT_SYMBOL_GPL(wwan_port_txon);
+
+void wwan_port_txoff(struct wwan_port *port)
+{
+ set_bit(WWAN_PORT_TX_OFF, &port->flags);
+}
+EXPORT_SYMBOL_GPL(wwan_port_txoff);
+
+void *wwan_port_get_drvdata(struct wwan_port *port)
+{
+ return dev_get_drvdata(&port->dev);
+}
+EXPORT_SYMBOL_GPL(wwan_port_get_drvdata);
+
+static int wwan_port_op_start(struct wwan_port *port)
+{
+ int ret = 0;
+
+ mutex_lock(&port->ops_lock);
+ if (!port->ops) { /* Port got unplugged */
+ ret = -ENODEV;
+ goto out_unlock;
+ }
+
+ /* If port is already started, don't start again */
+ if (!port->start_count)
+ ret = port->ops->start(port);
+
+ if (!ret)
+ port->start_count++;
+
+out_unlock:
+ mutex_unlock(&port->ops_lock);
+
+ return ret;
+}
+
+static void wwan_port_op_stop(struct wwan_port *port)
+{
+ mutex_lock(&port->ops_lock);
+ port->start_count--;
+ if (port->ops && !port->start_count)
+ port->ops->stop(port);
+ mutex_unlock(&port->ops_lock);
+}
+
+static int wwan_port_op_tx(struct wwan_port *port, struct sk_buff *skb)
+{
+ int ret;
+
+ mutex_lock(&port->ops_lock);
+ if (!port->ops) { /* Port got unplugged */
+ ret = -ENODEV;
+ goto out_unlock;
+ }
+
+ ret = port->ops->tx(port, skb);
+
+out_unlock:
+ mutex_unlock(&port->ops_lock);
+
+ return ret;
+}
+
+static bool is_read_blocked(struct wwan_port *port)
+{
+ return skb_queue_empty(&port->rxq) && port->ops;
+}
+
+static bool is_write_blocked(struct wwan_port *port)
+{
+ return test_bit(WWAN_PORT_TX_OFF, &port->flags) && port->ops;
+}
+
+static int wwan_wait_rx(struct wwan_port *port, bool nonblock)
+{
+ if (!is_read_blocked(port))
+ return 0;
+
+ if (nonblock)
+ return -EAGAIN;
+
+ if (wait_event_interruptible(port->waitqueue, !is_read_blocked(port)))
+ return -ERESTARTSYS;
+
+ return 0;
+}
+
+static int wwan_wait_tx(struct wwan_port *port, bool nonblock)
+{
+ if (!is_write_blocked(port))
+ return 0;
+
+ if (nonblock)
+ return -EAGAIN;
+
+ if (wait_event_interruptible(port->waitqueue, !is_write_blocked(port)))
+ return -ERESTARTSYS;
+
+ return 0;
+}
+
+static int wwan_port_fops_open(struct inode *inode, struct file *file)
+{
+ struct wwan_port *port;
+ int err = 0;
+
+ port = wwan_port_get_by_minor(iminor(inode));
+ if (IS_ERR(port))
+ return PTR_ERR(port);
+
+ file->private_data = port;
+ stream_open(inode, file);
+
+ err = wwan_port_op_start(port);
+ if (err)
+ put_device(&port->dev);
+
+ return err;
+}
+
+static int wwan_port_fops_release(struct inode *inode, struct file *filp)
+{
+ struct wwan_port *port = filp->private_data;
+
+ wwan_port_op_stop(port);
+ put_device(&port->dev);
+
+ return 0;
+}
+
+static ssize_t wwan_port_fops_read(struct file *filp, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct wwan_port *port = filp->private_data;
+ struct sk_buff *skb;
+ size_t copied;
+ int ret;
+
+ ret = wwan_wait_rx(port, !!(filp->f_flags & O_NONBLOCK));
+ if (ret)
+ return ret;
+
+ skb = skb_dequeue(&port->rxq);
+ if (!skb)
+ return -EIO;
+
+ copied = min_t(size_t, count, skb->len);
+ if (copy_to_user(buf, skb->data, copied)) {
+ kfree_skb(skb);
+ return -EFAULT;
+ }
+ skb_pull(skb, copied);
+
+ /* skb is not fully consumed, keep it in the queue */
+ if (skb->len)
+ skb_queue_head(&port->rxq, skb);
+ else
+ consume_skb(skb);
+
+ return copied;
+}
+
+static ssize_t wwan_port_fops_write(struct file *filp, const char __user *buf,
+ size_t count, loff_t *offp)
+{
+ struct wwan_port *port = filp->private_data;
+ struct sk_buff *skb;
+ int ret;
+
+ ret = wwan_wait_tx(port, !!(filp->f_flags & O_NONBLOCK));
+ if (ret)
+ return ret;
+
+ skb = alloc_skb(count, GFP_KERNEL);
+ if (!skb)
+ return -ENOMEM;
+
+ if (copy_from_user(skb_put(skb, count), buf, count)) {
+ kfree_skb(skb);
+ return -EFAULT;
+ }
+
+ ret = wwan_port_op_tx(port, skb);
+ if (ret) {
+ kfree_skb(skb);
+ return ret;
+ }
+
+ return count;
+}
+
+static __poll_t wwan_port_fops_poll(struct file *filp, poll_table *wait)
+{
+ struct wwan_port *port = filp->private_data;
+ __poll_t mask = 0;
+
+ poll_wait(filp, &port->waitqueue, wait);
+
+ if (!is_write_blocked(port))
+ mask |= EPOLLOUT | EPOLLWRNORM;
+ if (!is_read_blocked(port))
+ mask |= EPOLLIN | EPOLLRDNORM;
+ if (!port->ops)
+ mask |= EPOLLHUP | EPOLLERR;
+
+ return mask;
+}
+
+static const struct file_operations wwan_port_fops = {
+ .owner = THIS_MODULE,
+ .open = wwan_port_fops_open,
+ .release = wwan_port_fops_release,
+ .read = wwan_port_fops_read,
+ .write = wwan_port_fops_write,
+ .poll = wwan_port_fops_poll,
+ .llseek = noop_llseek,
+};
+
+static int __init wwan_init(void)
+{
+ wwan_class = class_create(THIS_MODULE, "wwan");
+ if (IS_ERR(wwan_class))
+ return PTR_ERR(wwan_class);
+
+ /* chrdev used for wwan ports */
+ wwan_major = register_chrdev(0, "wwan_port", &wwan_port_fops);
+ if (wwan_major < 0) {
+ class_destroy(wwan_class);
+ return wwan_major;
+ }
+
+ return 0;
+}
+
+static void __exit wwan_exit(void)
+{
+ unregister_chrdev(wwan_major, "wwan_port");
+ class_destroy(wwan_class);
+}
+
+module_init(wwan_init);
+module_exit(wwan_exit);
+
+MODULE_AUTHOR("Loic Poulain <loic.poulain@linaro.org>");
+MODULE_DESCRIPTION("WWAN core");
+MODULE_LICENSE("GPL v2");