summaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/cavium/thunder/nicvf_queues.c')
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_queues.c211
1 files changed, 65 insertions, 146 deletions
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
index 206b6a71a545..d0d1b5490061 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
@@ -18,14 +18,6 @@
#include "q_struct.h"
#include "nicvf_queues.h"
-struct rbuf_info {
- struct page *page;
- void *data;
- u64 offset;
-};
-
-#define GET_RBUF_INFO(x) ((struct rbuf_info *)(x - NICVF_RCV_BUF_ALIGN_BYTES))
-
/* Poll a register for a specific value */
static int nicvf_poll_reg(struct nicvf *nic, int qidx,
u64 reg, int bit_pos, int bits, int val)
@@ -86,8 +78,6 @@ static void nicvf_free_q_desc_mem(struct nicvf *nic, struct q_desc_mem *dmem)
static inline int nicvf_alloc_rcv_buffer(struct nicvf *nic, gfp_t gfp,
u32 buf_len, u64 **rbuf)
{
- u64 data;
- struct rbuf_info *rinfo;
int order = get_order(buf_len);
/* Check if request can be accomodated in previous allocated page */
@@ -113,46 +103,28 @@ static inline int nicvf_alloc_rcv_buffer(struct nicvf *nic, gfp_t gfp,
nic->rb_page_offset = 0;
}
- data = (u64)page_address(nic->rb_page) + nic->rb_page_offset;
-
- /* Align buffer addr to cache line i.e 128 bytes */
- rinfo = (struct rbuf_info *)(data + NICVF_RCV_BUF_ALIGN_LEN(data));
- /* Save page address for reference updation */
- rinfo->page = nic->rb_page;
- /* Store start address for later retrieval */
- rinfo->data = (void *)data;
- /* Store alignment offset */
- rinfo->offset = NICVF_RCV_BUF_ALIGN_LEN(data);
+ *rbuf = (u64 *)((u64)page_address(nic->rb_page) + nic->rb_page_offset);
- data += rinfo->offset;
-
- /* Give next aligned address to hw for DMA */
- *rbuf = (u64 *)(data + NICVF_RCV_BUF_ALIGN_BYTES);
return 0;
}
-/* Retrieve actual buffer start address and build skb for received packet */
+/* Build skb around receive buffer */
static struct sk_buff *nicvf_rb_ptr_to_skb(struct nicvf *nic,
u64 rb_ptr, int len)
{
+ void *data;
struct sk_buff *skb;
- struct rbuf_info *rinfo;
- rb_ptr = (u64)phys_to_virt(rb_ptr);
- /* Get buffer start address and alignment offset */
- rinfo = GET_RBUF_INFO(rb_ptr);
+ data = phys_to_virt(rb_ptr);
/* Now build an skb to give to stack */
- skb = build_skb(rinfo->data, RCV_FRAG_LEN);
+ skb = build_skb(data, RCV_FRAG_LEN);
if (!skb) {
- put_page(rinfo->page);
+ put_page(virt_to_page(data));
return NULL;
}
- /* Set correct skb->data */
- skb_reserve(skb, rinfo->offset + NICVF_RCV_BUF_ALIGN_BYTES);
-
- prefetch((void *)rb_ptr);
+ prefetch(skb->data);
return skb;
}
@@ -196,7 +168,6 @@ static void nicvf_free_rbdr(struct nicvf *nic, struct rbdr *rbdr)
int head, tail;
u64 buf_addr;
struct rbdr_entry_t *desc;
- struct rbuf_info *rinfo;
if (!rbdr)
return;
@@ -212,16 +183,14 @@ static void nicvf_free_rbdr(struct nicvf *nic, struct rbdr *rbdr)
while (head != tail) {
desc = GET_RBDR_DESC(rbdr, head);
buf_addr = desc->buf_addr << NICVF_RCV_BUF_ALIGN;
- rinfo = GET_RBUF_INFO((u64)phys_to_virt(buf_addr));
- put_page(rinfo->page);
+ put_page(virt_to_page(phys_to_virt(buf_addr)));
head++;
head &= (rbdr->dmem.q_len - 1);
}
/* Free SKB of tail desc */
desc = GET_RBDR_DESC(rbdr, tail);
buf_addr = desc->buf_addr << NICVF_RCV_BUF_ALIGN;
- rinfo = GET_RBUF_INFO((u64)phys_to_virt(buf_addr));
- put_page(rinfo->page);
+ put_page(virt_to_page(phys_to_virt(buf_addr)));
/* Free RBDR ring */
nicvf_free_q_desc_mem(nic, &rbdr->dmem);
@@ -330,7 +299,7 @@ static int nicvf_init_cmp_queue(struct nicvf *nic,
return err;
cq->desc = cq->dmem.base;
- cq->thresh = CMP_QUEUE_CQE_THRESH;
+ cq->thresh = pass1_silicon(nic->pdev) ? 0 : CMP_QUEUE_CQE_THRESH;
nic->cq_coalesce_usecs = (CMP_QUEUE_TIMER_THRESH * 0.05) - 1;
return 0;
@@ -956,7 +925,7 @@ static int nicvf_sq_subdesc_required(struct nicvf *nic, struct sk_buff *skb)
{
int subdesc_cnt = MIN_SQ_DESC_PER_PKT_XMIT;
- if (skb_shinfo(skb)->gso_size) {
+ if (skb_shinfo(skb)->gso_size && !nic->hw_tso) {
subdesc_cnt = nicvf_tso_count_subdescs(skb);
return subdesc_cnt;
}
@@ -971,7 +940,7 @@ static int nicvf_sq_subdesc_required(struct nicvf *nic, struct sk_buff *skb)
* First subdescriptor for every send descriptor.
*/
static inline void
-nicvf_sq_add_hdr_subdesc(struct snd_queue *sq, int qentry,
+nicvf_sq_add_hdr_subdesc(struct nicvf *nic, struct snd_queue *sq, int qentry,
int subdesc_cnt, struct sk_buff *skb, int len)
{
int proto;
@@ -1007,6 +976,15 @@ nicvf_sq_add_hdr_subdesc(struct snd_queue *sq, int qentry,
break;
}
}
+
+ if (nic->hw_tso && skb_shinfo(skb)->gso_size) {
+ hdr->tso = 1;
+ hdr->tso_start = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ hdr->tso_max_paysize = skb_shinfo(skb)->gso_size;
+ /* For non-tunneled pkts, point this to L2 ethertype */
+ hdr->inner_l3_offset = skb_network_offset(skb) - 2;
+ nic->drv_stats.tx_tso++;
+ }
}
/* SQ GATHER subdescriptor
@@ -1076,7 +1054,7 @@ static int nicvf_sq_append_tso(struct nicvf *nic, struct snd_queue *sq,
data_left -= size;
tso_build_data(skb, &tso, size);
}
- nicvf_sq_add_hdr_subdesc(sq, hdr_qentry,
+ nicvf_sq_add_hdr_subdesc(nic, sq, hdr_qentry,
seg_subdescs - 1, skb, seg_len);
sq->skbuff[hdr_qentry] = (u64)NULL;
qentry = nicvf_get_nxt_sqentry(sq, qentry);
@@ -1129,11 +1107,12 @@ int nicvf_sq_append_skb(struct nicvf *nic, struct sk_buff *skb)
qentry = nicvf_get_sq_desc(sq, subdesc_cnt);
/* Check if its a TSO packet */
- if (skb_shinfo(skb)->gso_size)
+ if (skb_shinfo(skb)->gso_size && !nic->hw_tso)
return nicvf_sq_append_tso(nic, sq, sq_num, qentry, skb);
/* Add SQ header subdesc */
- nicvf_sq_add_hdr_subdesc(sq, qentry, subdesc_cnt - 1, skb, skb->len);
+ nicvf_sq_add_hdr_subdesc(nic, sq, qentry, subdesc_cnt - 1,
+ skb, skb->len);
/* Add SQ gather subdescs */
qentry = nicvf_get_nxt_sqentry(sq, qentry);
@@ -1234,153 +1213,93 @@ struct sk_buff *nicvf_get_rcv_skb(struct nicvf *nic, struct cqe_rx_t *cqe_rx)
return skb;
}
-/* Enable interrupt */
-void nicvf_enable_intr(struct nicvf *nic, int int_type, int q_idx)
+static u64 nicvf_int_type_to_mask(int int_type, int q_idx)
{
u64 reg_val;
- reg_val = nicvf_reg_read(nic, NIC_VF_ENA_W1S);
-
switch (int_type) {
case NICVF_INTR_CQ:
- reg_val |= ((1ULL << q_idx) << NICVF_INTR_CQ_SHIFT);
+ reg_val = ((1ULL << q_idx) << NICVF_INTR_CQ_SHIFT);
break;
case NICVF_INTR_SQ:
- reg_val |= ((1ULL << q_idx) << NICVF_INTR_SQ_SHIFT);
+ reg_val = ((1ULL << q_idx) << NICVF_INTR_SQ_SHIFT);
break;
case NICVF_INTR_RBDR:
- reg_val |= ((1ULL << q_idx) << NICVF_INTR_RBDR_SHIFT);
+ reg_val = ((1ULL << q_idx) << NICVF_INTR_RBDR_SHIFT);
break;
case NICVF_INTR_PKT_DROP:
- reg_val |= (1ULL << NICVF_INTR_PKT_DROP_SHIFT);
+ reg_val = (1ULL << NICVF_INTR_PKT_DROP_SHIFT);
break;
case NICVF_INTR_TCP_TIMER:
- reg_val |= (1ULL << NICVF_INTR_TCP_TIMER_SHIFT);
+ reg_val = (1ULL << NICVF_INTR_TCP_TIMER_SHIFT);
break;
case NICVF_INTR_MBOX:
- reg_val |= (1ULL << NICVF_INTR_MBOX_SHIFT);
+ reg_val = (1ULL << NICVF_INTR_MBOX_SHIFT);
break;
case NICVF_INTR_QS_ERR:
- reg_val |= (1ULL << NICVF_INTR_QS_ERR_SHIFT);
+ reg_val = (1ULL << NICVF_INTR_QS_ERR_SHIFT);
break;
default:
- netdev_err(nic->netdev,
- "Failed to enable interrupt: unknown type\n");
- break;
+ reg_val = 0;
}
- nicvf_reg_write(nic, NIC_VF_ENA_W1S, reg_val);
+ return reg_val;
+}
+
+/* Enable interrupt */
+void nicvf_enable_intr(struct nicvf *nic, int int_type, int q_idx)
+{
+ u64 mask = nicvf_int_type_to_mask(int_type, q_idx);
+
+ if (!mask) {
+ netdev_dbg(nic->netdev,
+ "Failed to enable interrupt: unknown type\n");
+ return;
+ }
+ nicvf_reg_write(nic, NIC_VF_ENA_W1S,
+ nicvf_reg_read(nic, NIC_VF_ENA_W1S) | mask);
}
/* Disable interrupt */
void nicvf_disable_intr(struct nicvf *nic, int int_type, int q_idx)
{
- u64 reg_val = 0;
+ u64 mask = nicvf_int_type_to_mask(int_type, q_idx);
- switch (int_type) {
- case NICVF_INTR_CQ:
- reg_val |= ((1ULL << q_idx) << NICVF_INTR_CQ_SHIFT);
- break;
- case NICVF_INTR_SQ:
- reg_val |= ((1ULL << q_idx) << NICVF_INTR_SQ_SHIFT);
- break;
- case NICVF_INTR_RBDR:
- reg_val |= ((1ULL << q_idx) << NICVF_INTR_RBDR_SHIFT);
- break;
- case NICVF_INTR_PKT_DROP:
- reg_val |= (1ULL << NICVF_INTR_PKT_DROP_SHIFT);
- break;
- case NICVF_INTR_TCP_TIMER:
- reg_val |= (1ULL << NICVF_INTR_TCP_TIMER_SHIFT);
- break;
- case NICVF_INTR_MBOX:
- reg_val |= (1ULL << NICVF_INTR_MBOX_SHIFT);
- break;
- case NICVF_INTR_QS_ERR:
- reg_val |= (1ULL << NICVF_INTR_QS_ERR_SHIFT);
- break;
- default:
- netdev_err(nic->netdev,
+ if (!mask) {
+ netdev_dbg(nic->netdev,
"Failed to disable interrupt: unknown type\n");
- break;
+ return;
}
- nicvf_reg_write(nic, NIC_VF_ENA_W1C, reg_val);
+ nicvf_reg_write(nic, NIC_VF_ENA_W1C, mask);
}
/* Clear interrupt */
void nicvf_clear_intr(struct nicvf *nic, int int_type, int q_idx)
{
- u64 reg_val = 0;
+ u64 mask = nicvf_int_type_to_mask(int_type, q_idx);
- switch (int_type) {
- case NICVF_INTR_CQ:
- reg_val = ((1ULL << q_idx) << NICVF_INTR_CQ_SHIFT);
- break;
- case NICVF_INTR_SQ:
- reg_val = ((1ULL << q_idx) << NICVF_INTR_SQ_SHIFT);
- break;
- case NICVF_INTR_RBDR:
- reg_val = ((1ULL << q_idx) << NICVF_INTR_RBDR_SHIFT);
- break;
- case NICVF_INTR_PKT_DROP:
- reg_val = (1ULL << NICVF_INTR_PKT_DROP_SHIFT);
- break;
- case NICVF_INTR_TCP_TIMER:
- reg_val = (1ULL << NICVF_INTR_TCP_TIMER_SHIFT);
- break;
- case NICVF_INTR_MBOX:
- reg_val = (1ULL << NICVF_INTR_MBOX_SHIFT);
- break;
- case NICVF_INTR_QS_ERR:
- reg_val |= (1ULL << NICVF_INTR_QS_ERR_SHIFT);
- break;
- default:
- netdev_err(nic->netdev,
+ if (!mask) {
+ netdev_dbg(nic->netdev,
"Failed to clear interrupt: unknown type\n");
- break;
+ return;
}
- nicvf_reg_write(nic, NIC_VF_INT, reg_val);
+ nicvf_reg_write(nic, NIC_VF_INT, mask);
}
/* Check if interrupt is enabled */
int nicvf_is_intr_enabled(struct nicvf *nic, int int_type, int q_idx)
{
- u64 reg_val;
- u64 mask = 0xff;
-
- reg_val = nicvf_reg_read(nic, NIC_VF_ENA_W1S);
-
- switch (int_type) {
- case NICVF_INTR_CQ:
- mask = ((1ULL << q_idx) << NICVF_INTR_CQ_SHIFT);
- break;
- case NICVF_INTR_SQ:
- mask = ((1ULL << q_idx) << NICVF_INTR_SQ_SHIFT);
- break;
- case NICVF_INTR_RBDR:
- mask = ((1ULL << q_idx) << NICVF_INTR_RBDR_SHIFT);
- break;
- case NICVF_INTR_PKT_DROP:
- mask = NICVF_INTR_PKT_DROP_MASK;
- break;
- case NICVF_INTR_TCP_TIMER:
- mask = NICVF_INTR_TCP_TIMER_MASK;
- break;
- case NICVF_INTR_MBOX:
- mask = NICVF_INTR_MBOX_MASK;
- break;
- case NICVF_INTR_QS_ERR:
- mask = NICVF_INTR_QS_ERR_MASK;
- break;
- default:
- netdev_err(nic->netdev,
+ u64 mask = nicvf_int_type_to_mask(int_type, q_idx);
+ /* If interrupt type is unknown, we treat it disabled. */
+ if (!mask) {
+ netdev_dbg(nic->netdev,
"Failed to check interrupt enable: unknown type\n");
- break;
+ return 0;
}
- return (reg_val & mask);
+ return mask & nicvf_reg_read(nic, NIC_VF_ENA_W1S);
}
void nicvf_update_rq_stats(struct nicvf *nic, int rq_idx)