diff options
author | Raghu Vatsavayi <rvatsavayi@caviumnetworks.com> | 2016-07-03 22:56:49 +0200 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2016-07-05 01:15:31 +0200 |
commit | 14866ccd8fbb9b1ace953a7fad719b050763426a (patch) | |
tree | 92a013f24ff04b505839bcae7aa205be48aa317a /drivers | |
parent | liquidio: Macro replacements (diff) | |
download | linux-14866ccd8fbb9b1ace953a7fad719b050763426a.tar.xz linux-14866ccd8fbb9b1ace953a7fad719b050763426a.zip |
liquidio: IQ synchronization
This patch tries to protect against bh preemption with
sc_buf_pool. It also modifies the syncronization primitives
during input queue processing.
Signed-off-by: Derek Chickles <derek.chickles@caviumnetworks.com>
Signed-off-by: Satanand Burla <satananda.burla@caviumnetworks.com>
Signed-off-by: Felix Manlunas <felix.manlunas@caviumnetworks.com>
Signed-off-by: Raghu Vatsavayi <raghu.vatsavayi@caviumnetworks.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/net/ethernet/cavium/liquidio/request_manager.c | 27 |
1 files changed, 16 insertions, 11 deletions
diff --git a/drivers/net/ethernet/cavium/liquidio/request_manager.c b/drivers/net/ethernet/cavium/liquidio/request_manager.c index 5e2211fdb4bc..ef0bdd869688 100644 --- a/drivers/net/ethernet/cavium/liquidio/request_manager.c +++ b/drivers/net/ethernet/cavium/liquidio/request_manager.c @@ -360,6 +360,7 @@ lio_process_iq_request_list(struct octeon_device *oct, unsigned int pkts_compl = 0, bytes_compl = 0; struct octeon_soft_command *sc; struct octeon_instr_irh *irh; + unsigned long flags; while (old != iq->octeon_read_index) { reqtype = iq->request_list[old].reqtype; @@ -389,15 +390,19 @@ lio_process_iq_request_list(struct octeon_device *oct, * command response list because we expect * a response from Octeon. */ - spin_lock_bh(&oct->response_list - [OCTEON_ORDERED_SC_LIST].lock); + spin_lock_irqsave + (&oct->response_list + [OCTEON_ORDERED_SC_LIST].lock, + flags); atomic_inc(&oct->response_list [OCTEON_ORDERED_SC_LIST]. pending_req_count); list_add_tail(&sc->node, &oct->response_list [OCTEON_ORDERED_SC_LIST].head); - spin_unlock_bh(&oct->response_list - [OCTEON_ORDERED_SC_LIST].lock); + spin_unlock_irqrestore + (&oct->response_list + [OCTEON_ORDERED_SC_LIST].lock, + flags); } else { if (sc->callback) { sc->callback(oct, OCTEON_REQUEST_DONE, @@ -674,7 +679,7 @@ int octeon_free_sc_buffer_pool(struct octeon_device *oct) struct list_head *tmp, *tmp2; struct octeon_soft_command *sc; - spin_lock(&oct->sc_buf_pool.lock); + spin_lock_bh(&oct->sc_buf_pool.lock); list_for_each_safe(tmp, tmp2, &oct->sc_buf_pool.head) { list_del(tmp); @@ -686,7 +691,7 @@ int octeon_free_sc_buffer_pool(struct octeon_device *oct) INIT_LIST_HEAD(&oct->sc_buf_pool.head); - spin_unlock(&oct->sc_buf_pool.lock); + spin_unlock_bh(&oct->sc_buf_pool.lock); return 0; } @@ -705,10 +710,10 @@ struct octeon_soft_command *octeon_alloc_soft_command(struct octeon_device *oct, WARN_ON((offset + datasize + rdatasize + ctxsize) > SOFT_COMMAND_BUFFER_SIZE); - spin_lock(&oct->sc_buf_pool.lock); + spin_lock_bh(&oct->sc_buf_pool.lock); if (list_empty(&oct->sc_buf_pool.head)) { - spin_unlock(&oct->sc_buf_pool.lock); + spin_unlock_bh(&oct->sc_buf_pool.lock); return NULL; } @@ -719,7 +724,7 @@ struct octeon_soft_command *octeon_alloc_soft_command(struct octeon_device *oct, atomic_inc(&oct->sc_buf_pool.alloc_buf_count); - spin_unlock(&oct->sc_buf_pool.lock); + spin_unlock_bh(&oct->sc_buf_pool.lock); sc = (struct octeon_soft_command *)tmp; @@ -762,11 +767,11 @@ struct octeon_soft_command *octeon_alloc_soft_command(struct octeon_device *oct, void octeon_free_soft_command(struct octeon_device *oct, struct octeon_soft_command *sc) { - spin_lock(&oct->sc_buf_pool.lock); + spin_lock_bh(&oct->sc_buf_pool.lock); list_add_tail(&sc->node, &oct->sc_buf_pool.head); atomic_dec(&oct->sc_buf_pool.alloc_buf_count); - spin_unlock(&oct->sc_buf_pool.lock); + spin_unlock_bh(&oct->sc_buf_pool.lock); } |