diff options
Diffstat (limited to 'drivers/net/can/ti_hecc.c')
-rw-r--r-- | drivers/net/can/ti_hecc.c | 268 |
1 files changed, 92 insertions, 176 deletions
diff --git a/drivers/net/can/ti_hecc.c b/drivers/net/can/ti_hecc.c index db6ea936dc3f..f8b19eef5d26 100644 --- a/drivers/net/can/ti_hecc.c +++ b/drivers/net/can/ti_hecc.c @@ -5,6 +5,7 @@ * specs for the same is available at <http://www.ti.com> * * Copyright (C) 2009 Texas Instruments Incorporated - http://www.ti.com/ + * Copyright (C) 2019 Jeroen Hofstee <jhofstee@victronenergy.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as @@ -34,6 +35,7 @@ #include <linux/can/dev.h> #include <linux/can/error.h> #include <linux/can/led.h> +#include <linux/can/rx-offload.h> #define DRV_NAME "ti_hecc" #define HECC_MODULE_VERSION "0.7" @@ -44,8 +46,7 @@ MODULE_VERSION(HECC_MODULE_VERSION); #define HECC_MAX_MAILBOXES 32 /* hardware mailboxes - do not change */ #define MAX_TX_PRIO 0x3F /* hardware value - do not change */ -/* - * Important Note: TX mailbox configuration +/* Important Note: TX mailbox configuration * TX mailboxes should be restricted to the number of SKB buffers to avoid * maintaining SKB buffers separately. TX mailboxes should be a power of 2 * for the mailbox logic to work. Top mailbox numbers are reserved for RX @@ -63,29 +64,15 @@ MODULE_VERSION(HECC_MODULE_VERSION); #define HECC_TX_PRIO_MASK (MAX_TX_PRIO << HECC_MB_TX_SHIFT) #define HECC_TX_MB_MASK (HECC_MAX_TX_MBOX - 1) #define HECC_TX_MASK ((HECC_MAX_TX_MBOX - 1) | HECC_TX_PRIO_MASK) -#define HECC_TX_MBOX_MASK (~(BIT(HECC_MAX_TX_MBOX) - 1)) -#define HECC_DEF_NAPI_WEIGHT HECC_MAX_RX_MBOX -/* - * Important Note: RX mailbox configuration - * RX mailboxes are further logically split into two - main and buffer - * mailboxes. The goal is to get all packets into main mailboxes as - * driven by mailbox number and receive priority (higher to lower) and - * buffer mailboxes are used to receive pkts while main mailboxes are being - * processed. This ensures in-order packet reception. +/* RX mailbox configuration * - * Here are the recommended values for buffer mailbox. Note that RX mailboxes - * start after TX mailboxes: - * - * HECC_MAX_RX_MBOX HECC_RX_BUFFER_MBOX No of buffer mailboxes - * 28 12 8 - * 16 20 4 + * The remaining mailboxes are used for reception and are delivered + * based on their timestamp, to avoid a hardware race when CANME is + * changed while CAN-bus traffic is being received. */ - #define HECC_MAX_RX_MBOX (HECC_MAX_MAILBOXES - HECC_MAX_TX_MBOX) -#define HECC_RX_BUFFER_MBOX 12 /* as per table above */ #define HECC_RX_FIRST_MBOX (HECC_MAX_MAILBOXES - 1) -#define HECC_RX_HIGH_MBOX_MASK (~(BIT(HECC_RX_BUFFER_MBOX) - 1)) /* TI HECC module registers */ #define HECC_CANME 0x0 /* Mailbox enable */ @@ -117,6 +104,9 @@ MODULE_VERSION(HECC_MODULE_VERSION); #define HECC_CANTIOCE 0x68 /* SCC only:Enhanced TX I/O control */ #define HECC_CANRIOCE 0x6C /* SCC only:Enhanced RX I/O control */ +/* TI HECC RAM registers */ +#define HECC_CANMOTS 0x80 /* Message object time stamp */ + /* Mailbox registers */ #define HECC_CANMID 0x0 #define HECC_CANMCF 0x4 @@ -193,7 +183,7 @@ static const struct can_bittiming_const ti_hecc_bittiming_const = { struct ti_hecc_priv { struct can_priv can; /* MUST be first member/field */ - struct napi_struct napi; + struct can_rx_offload offload; struct net_device *ndev; struct clk *clk; void __iomem *base; @@ -203,7 +193,6 @@ struct ti_hecc_priv { spinlock_t mbx_lock; /* CANME register needs protection */ u32 tx_head; u32 tx_tail; - u32 rx_next; struct regulator *reg_xceiver; }; @@ -227,8 +216,13 @@ static inline void hecc_write_lam(struct ti_hecc_priv *priv, u32 mbxno, u32 val) __raw_writel(val, priv->hecc_ram + mbxno * 4); } +static inline u32 hecc_read_stamp(struct ti_hecc_priv *priv, u32 mbxno) +{ + return __raw_readl(priv->hecc_ram + HECC_CANMOTS + mbxno * 4); +} + static inline void hecc_write_mbx(struct ti_hecc_priv *priv, u32 mbxno, - u32 reg, u32 val) + u32 reg, u32 val) { __raw_writel(val, priv->mbx + mbxno * 0x10 + reg); } @@ -249,13 +243,13 @@ static inline u32 hecc_read(struct ti_hecc_priv *priv, int reg) } static inline void hecc_set_bit(struct ti_hecc_priv *priv, int reg, - u32 bit_mask) + u32 bit_mask) { hecc_write(priv, reg, hecc_read(priv, reg) | bit_mask); } static inline void hecc_clear_bit(struct ti_hecc_priv *priv, int reg, - u32 bit_mask) + u32 bit_mask) { hecc_write(priv, reg, hecc_read(priv, reg) & ~bit_mask); } @@ -277,8 +271,8 @@ static int ti_hecc_set_btc(struct ti_hecc_priv *priv) if (bit_timing->brp > 4) can_btc |= HECC_CANBTC_SAM; else - netdev_warn(priv->ndev, "WARN: Triple" - "sampling not set due to h/w limitations"); + netdev_warn(priv->ndev, + "WARN: Triple sampling not set due to h/w limitations"); } can_btc |= ((bit_timing->sjw - 1) & 0x3) << 8; can_btc |= ((bit_timing->brp - 1) & 0xFF) << 16; @@ -314,8 +308,7 @@ static void ti_hecc_reset(struct net_device *ndev) /* Set change control request and wait till enabled */ hecc_set_bit(priv, HECC_CANMC, HECC_CANMC_CCR); - /* - * INFO: It has been observed that at times CCE bit may not be + /* INFO: It has been observed that at times CCE bit may not be * set and hw seems to be ok even if this bit is not set so * timing out with a timing of 1ms to respect the specs */ @@ -325,8 +318,7 @@ static void ti_hecc_reset(struct net_device *ndev) udelay(10); } - /* - * Note: On HECC, BTC can be programmed only in initialization mode, so + /* Note: On HECC, BTC can be programmed only in initialization mode, so * it is expected that the can bittiming parameters are set via ip * utility before the device is opened */ @@ -335,13 +327,11 @@ static void ti_hecc_reset(struct net_device *ndev) /* Clear CCR (and CANMC register) and wait for CCE = 0 enable */ hecc_write(priv, HECC_CANMC, 0); - /* - * INFO: CAN net stack handles bus off and hence disabling auto-bus-on + /* INFO: CAN net stack handles bus off and hence disabling auto-bus-on * hecc_set_bit(priv, HECC_CANMC, HECC_CANMC_ABO); */ - /* - * INFO: It has been observed that at times CCE bit may not be + /* INFO: It has been observed that at times CCE bit may not be * set and hw seems to be ok even if this bit is not set so */ cnt = HECC_CCE_WAIT_COUNT; @@ -374,8 +364,8 @@ static void ti_hecc_start(struct net_device *ndev) /* put HECC in initialization mode and set btc */ ti_hecc_reset(ndev); - priv->tx_head = priv->tx_tail = HECC_TX_MASK; - priv->rx_next = HECC_RX_FIRST_MBOX; + priv->tx_head = HECC_TX_MASK; + priv->tx_tail = HECC_TX_MASK; /* Enable local and global acceptance mask registers */ hecc_write(priv, HECC_CANGAM, HECC_SET_REG); @@ -401,7 +391,7 @@ static void ti_hecc_start(struct net_device *ndev) } else { hecc_write(priv, HECC_CANMIL, 0); hecc_write(priv, HECC_CANGIM, - HECC_CANGIM_DEF_MASK | HECC_CANGIM_I0EN); + HECC_CANGIM_DEF_MASK | HECC_CANGIM_I0EN); } priv->can.state = CAN_STATE_ERROR_ACTIVE; } @@ -435,7 +425,7 @@ static int ti_hecc_do_set_mode(struct net_device *ndev, enum can_mode mode) } static int ti_hecc_get_berr_counter(const struct net_device *ndev, - struct can_berr_counter *bec) + struct can_berr_counter *bec) { struct ti_hecc_priv *priv = netdev_priv(ndev); @@ -445,8 +435,7 @@ static int ti_hecc_get_berr_counter(const struct net_device *ndev, return 0; } -/* - * ti_hecc_xmit: HECC Transmit +/* ti_hecc_xmit: HECC Transmit * * The transmit mailboxes start from 0 to HECC_MAX_TX_MBOX. In HECC the * priority of the mailbox for tranmission is dependent upon priority setting @@ -484,8 +473,8 @@ static netdev_tx_t ti_hecc_xmit(struct sk_buff *skb, struct net_device *ndev) spin_unlock_irqrestore(&priv->mbx_lock, flags); netif_stop_queue(ndev); netdev_err(priv->ndev, - "BUG: TX mbx not ready tx_head=%08X, tx_tail=%08X\n", - priv->tx_head, priv->tx_tail); + "BUG: TX mbx not ready tx_head=%08X, tx_tail=%08X\n", + priv->tx_head, priv->tx_tail); return NETDEV_TX_BUSY; } spin_unlock_irqrestore(&priv->mbx_lock, flags); @@ -502,10 +491,10 @@ static netdev_tx_t ti_hecc_xmit(struct sk_buff *skb, struct net_device *ndev) data = (cf->can_id & CAN_SFF_MASK) << 18; hecc_write_mbx(priv, mbxno, HECC_CANMID, data); hecc_write_mbx(priv, mbxno, HECC_CANMDL, - be32_to_cpu(*(__be32 *)(cf->data))); + be32_to_cpu(*(__be32 *)(cf->data))); if (cf->can_dlc > 4) hecc_write_mbx(priv, mbxno, HECC_CANMDH, - be32_to_cpu(*(__be32 *)(cf->data + 4))); + be32_to_cpu(*(__be32 *)(cf->data + 4))); else *(u32 *)(cf->data + 4) = 0; can_put_echo_skb(skb, ndev, mbxno); @@ -513,7 +502,7 @@ static netdev_tx_t ti_hecc_xmit(struct sk_buff *skb, struct net_device *ndev) spin_lock_irqsave(&priv->mbx_lock, flags); --priv->tx_head; if ((hecc_read(priv, HECC_CANME) & BIT(get_tx_head_mb(priv))) || - (priv->tx_head & HECC_TX_MASK) == HECC_TX_MASK) { + (priv->tx_head & HECC_TX_MASK) == HECC_TX_MASK) { netif_stop_queue(ndev); } hecc_set_bit(priv, HECC_CANME, mbx_mask); @@ -526,139 +515,57 @@ static netdev_tx_t ti_hecc_xmit(struct sk_buff *skb, struct net_device *ndev) return NETDEV_TX_OK; } -static int ti_hecc_rx_pkt(struct ti_hecc_priv *priv, int mbxno) +static inline +struct ti_hecc_priv *rx_offload_to_priv(struct can_rx_offload *offload) { - struct net_device_stats *stats = &priv->ndev->stats; - struct can_frame *cf; - struct sk_buff *skb; - u32 data, mbx_mask; - unsigned long flags; + return container_of(offload, struct ti_hecc_priv, offload); +} - skb = alloc_can_skb(priv->ndev, &cf); - if (!skb) { - if (printk_ratelimit()) - netdev_err(priv->ndev, - "ti_hecc_rx_pkt: alloc_can_skb() failed\n"); - return -ENOMEM; - } +static unsigned int ti_hecc_mailbox_read(struct can_rx_offload *offload, + struct can_frame *cf, + u32 *timestamp, unsigned int mbxno) +{ + struct ti_hecc_priv *priv = rx_offload_to_priv(offload); + u32 data; - mbx_mask = BIT(mbxno); data = hecc_read_mbx(priv, mbxno, HECC_CANMID); if (data & HECC_CANMID_IDE) cf->can_id = (data & CAN_EFF_MASK) | CAN_EFF_FLAG; else cf->can_id = (data >> 18) & CAN_SFF_MASK; + data = hecc_read_mbx(priv, mbxno, HECC_CANMCF); if (data & HECC_CANMCF_RTR) cf->can_id |= CAN_RTR_FLAG; cf->can_dlc = get_can_dlc(data & 0xF); + data = hecc_read_mbx(priv, mbxno, HECC_CANMDL); *(__be32 *)(cf->data) = cpu_to_be32(data); if (cf->can_dlc > 4) { data = hecc_read_mbx(priv, mbxno, HECC_CANMDH); *(__be32 *)(cf->data + 4) = cpu_to_be32(data); } - spin_lock_irqsave(&priv->mbx_lock, flags); - hecc_clear_bit(priv, HECC_CANME, mbx_mask); - hecc_write(priv, HECC_CANRMP, mbx_mask); - /* enable mailbox only if it is part of rx buffer mailboxes */ - if (priv->rx_next < HECC_RX_BUFFER_MBOX) - hecc_set_bit(priv, HECC_CANME, mbx_mask); - spin_unlock_irqrestore(&priv->mbx_lock, flags); - stats->rx_bytes += cf->can_dlc; - can_led_event(priv->ndev, CAN_LED_EVENT_RX); - netif_receive_skb(skb); - stats->rx_packets++; + *timestamp = hecc_read_stamp(priv, mbxno); - return 0; -} - -/* - * ti_hecc_rx_poll - HECC receive pkts - * - * The receive mailboxes start from highest numbered mailbox till last xmit - * mailbox. On CAN frame reception the hardware places the data into highest - * numbered mailbox that matches the CAN ID filter. Since all receive mailboxes - * have same filtering (ALL CAN frames) packets will arrive in the highest - * available RX mailbox and we need to ensure in-order packet reception. - * - * To ensure the packets are received in the right order we logically divide - * the RX mailboxes into main and buffer mailboxes. Packets are received as per - * mailbox priotity (higher to lower) in the main bank and once it is full we - * disable further reception into main mailboxes. While the main mailboxes are - * processed in NAPI, further packets are received in buffer mailboxes. - * - * We maintain a RX next mailbox counter to process packets and once all main - * mailboxe packets are passed to the upper stack we enable all of them but - * continue to process packets received in buffer mailboxes. With each packet - * received from buffer mailbox we enable it immediately so as to handle the - * overflow from higher mailboxes. - */ -static int ti_hecc_rx_poll(struct napi_struct *napi, int quota) -{ - struct net_device *ndev = napi->dev; - struct ti_hecc_priv *priv = netdev_priv(ndev); - u32 num_pkts = 0; - u32 mbx_mask; - unsigned long pending_pkts, flags; - - if (!netif_running(ndev)) - return 0; - - while ((pending_pkts = hecc_read(priv, HECC_CANRMP)) && - num_pkts < quota) { - mbx_mask = BIT(priv->rx_next); /* next rx mailbox to process */ - if (mbx_mask & pending_pkts) { - if (ti_hecc_rx_pkt(priv, priv->rx_next) < 0) - return num_pkts; - ++num_pkts; - } else if (priv->rx_next > HECC_RX_BUFFER_MBOX) { - break; /* pkt not received yet */ - } - --priv->rx_next; - if (priv->rx_next == HECC_RX_BUFFER_MBOX) { - /* enable high bank mailboxes */ - spin_lock_irqsave(&priv->mbx_lock, flags); - mbx_mask = hecc_read(priv, HECC_CANME); - mbx_mask |= HECC_RX_HIGH_MBOX_MASK; - hecc_write(priv, HECC_CANME, mbx_mask); - spin_unlock_irqrestore(&priv->mbx_lock, flags); - } else if (priv->rx_next == HECC_MAX_TX_MBOX - 1) { - priv->rx_next = HECC_RX_FIRST_MBOX; - break; - } - } - - /* Enable packet interrupt if all pkts are handled */ - if (hecc_read(priv, HECC_CANRMP) == 0) { - napi_complete(napi); - /* Re-enable RX mailbox interrupts */ - mbx_mask = hecc_read(priv, HECC_CANMIM); - mbx_mask |= HECC_TX_MBOX_MASK; - hecc_write(priv, HECC_CANMIM, mbx_mask); - } else { - /* repoll is done only if whole budget is used */ - num_pkts = quota; - } - - return num_pkts; + return 1; } static int ti_hecc_error(struct net_device *ndev, int int_status, - int err_status) + int err_status) { struct ti_hecc_priv *priv = netdev_priv(ndev); - struct net_device_stats *stats = &ndev->stats; struct can_frame *cf; struct sk_buff *skb; + u32 timestamp; /* propagate the error condition to the can stack */ skb = alloc_can_err_skb(ndev, &cf); if (!skb) { if (printk_ratelimit()) netdev_err(priv->ndev, - "ti_hecc_error: alloc_can_err_skb() failed\n"); + "%s: alloc_can_err_skb() failed\n", + __func__); return -ENOMEM; } @@ -692,8 +599,7 @@ static int ti_hecc_error(struct net_device *ndev, int int_status, hecc_clear_bit(priv, HECC_CANMC, HECC_CANMC_CCR); } - /* - * Need to check busoff condition in error status register too to + /* Need to check busoff condition in error status register too to * ensure warning interrupts don't hog the system */ if ((int_status & HECC_CANGIF_BOIF) || (err_status & HECC_CANES_BO)) { @@ -732,9 +638,8 @@ static int ti_hecc_error(struct net_device *ndev, int int_status, } } - stats->rx_packets++; - stats->rx_bytes += cf->can_dlc; - netif_rx(skb); + timestamp = hecc_read(priv, HECC_CANLNT); + can_rx_offload_queue_sorted(&priv->offload, skb, timestamp); return 0; } @@ -744,19 +649,20 @@ static irqreturn_t ti_hecc_interrupt(int irq, void *dev_id) struct net_device *ndev = (struct net_device *)dev_id; struct ti_hecc_priv *priv = netdev_priv(ndev); struct net_device_stats *stats = &ndev->stats; - u32 mbxno, mbx_mask, int_status, err_status; - unsigned long ack, flags; + u32 mbxno, mbx_mask, int_status, err_status, stamp; + unsigned long flags, rx_pending; int_status = hecc_read(priv, - (priv->use_hecc1int) ? HECC_CANGIF1 : HECC_CANGIF0); + priv->use_hecc1int ? + HECC_CANGIF1 : HECC_CANGIF0); if (!int_status) return IRQ_NONE; err_status = hecc_read(priv, HECC_CANES); if (err_status & (HECC_BUS_ERROR | HECC_CANES_BO | - HECC_CANES_EP | HECC_CANES_EW)) - ti_hecc_error(ndev, int_status, err_status); + HECC_CANES_EP | HECC_CANES_EW)) + ti_hecc_error(ndev, int_status, err_status); if (int_status & HECC_CANGIF_GMIF) { while (priv->tx_tail - priv->tx_head > 0) { @@ -769,27 +675,27 @@ static irqreturn_t ti_hecc_interrupt(int irq, void *dev_id) spin_lock_irqsave(&priv->mbx_lock, flags); hecc_clear_bit(priv, HECC_CANME, mbx_mask); spin_unlock_irqrestore(&priv->mbx_lock, flags); - stats->tx_bytes += hecc_read_mbx(priv, mbxno, - HECC_CANMCF) & 0xF; + stamp = hecc_read_stamp(priv, mbxno); + stats->tx_bytes += + can_rx_offload_get_echo_skb(&priv->offload, + mbxno, stamp); stats->tx_packets++; can_led_event(ndev, CAN_LED_EVENT_TX); - can_get_echo_skb(ndev, mbxno); --priv->tx_tail; } /* restart queue if wrap-up or if queue stalled on last pkt */ - if (((priv->tx_head == priv->tx_tail) && - ((priv->tx_head & HECC_TX_MASK) != HECC_TX_MASK)) || - (((priv->tx_tail & HECC_TX_MASK) == HECC_TX_MASK) && - ((priv->tx_head & HECC_TX_MASK) == HECC_TX_MASK))) + if ((priv->tx_head == priv->tx_tail && + ((priv->tx_head & HECC_TX_MASK) != HECC_TX_MASK)) || + (((priv->tx_tail & HECC_TX_MASK) == HECC_TX_MASK) && + ((priv->tx_head & HECC_TX_MASK) == HECC_TX_MASK))) netif_wake_queue(ndev); - /* Disable RX mailbox interrupts and let NAPI reenable them */ - if (hecc_read(priv, HECC_CANRMP)) { - ack = hecc_read(priv, HECC_CANMIM); - ack &= BIT(HECC_MAX_TX_MBOX) - 1; - hecc_write(priv, HECC_CANMIM, ack); - napi_schedule(&priv->napi); + /* offload RX mailboxes and let NAPI deliver them */ + while ((rx_pending = hecc_read(priv, HECC_CANRMP))) { + can_rx_offload_irq_offload_timestamp(&priv->offload, + rx_pending); + hecc_write(priv, HECC_CANRMP, rx_pending); } } @@ -811,7 +717,7 @@ static int ti_hecc_open(struct net_device *ndev) int err; err = request_irq(ndev->irq, ti_hecc_interrupt, IRQF_SHARED, - ndev->name, ndev); + ndev->name, ndev); if (err) { netdev_err(ndev, "error requesting interrupt\n"); return err; @@ -831,7 +737,7 @@ static int ti_hecc_open(struct net_device *ndev) can_led_event(ndev, CAN_LED_EVENT_OPEN); ti_hecc_start(ndev); - napi_enable(&priv->napi); + can_rx_offload_enable(&priv->offload); netif_start_queue(ndev); return 0; @@ -842,7 +748,7 @@ static int ti_hecc_close(struct net_device *ndev) struct ti_hecc_priv *priv = netdev_priv(ndev); netif_stop_queue(ndev); - napi_disable(&priv->napi); + can_rx_offload_disable(&priv->offload); ti_hecc_stop(ndev); free_irq(ndev->irq, ndev); close_candev(ndev); @@ -962,8 +868,6 @@ static int ti_hecc_probe(struct platform_device *pdev) goto probe_exit_candev; } priv->can.clock.freq = clk_get_rate(priv->clk); - netif_napi_add(ndev, &priv->napi, ti_hecc_rx_poll, - HECC_DEF_NAPI_WEIGHT); err = clk_prepare_enable(priv->clk); if (err) { @@ -971,19 +875,30 @@ static int ti_hecc_probe(struct platform_device *pdev) goto probe_exit_clk; } + priv->offload.mailbox_read = ti_hecc_mailbox_read; + priv->offload.mb_first = HECC_RX_FIRST_MBOX; + priv->offload.mb_last = HECC_MAX_TX_MBOX; + err = can_rx_offload_add_timestamp(ndev, &priv->offload); + if (err) { + dev_err(&pdev->dev, "can_rx_offload_add_timestamp() failed\n"); + goto probe_exit_clk; + } + err = register_candev(ndev); if (err) { dev_err(&pdev->dev, "register_candev() failed\n"); - goto probe_exit_clk; + goto probe_exit_offload; } devm_can_led_init(ndev); dev_info(&pdev->dev, "device registered (reg_base=%p, irq=%u)\n", - priv->base, (u32) ndev->irq); + priv->base, (u32)ndev->irq); return 0; +probe_exit_offload: + can_rx_offload_del(&priv->offload); probe_exit_clk: clk_put(priv->clk); probe_exit_candev: @@ -1000,6 +915,7 @@ static int ti_hecc_remove(struct platform_device *pdev) unregister_candev(ndev); clk_disable_unprepare(priv->clk); clk_put(priv->clk); + can_rx_offload_del(&priv->offload); free_candev(ndev); return 0; |