diff options
author | K. Y. Srinivasan <kys@microsoft.com> | 2016-01-28 07:29:45 +0100 |
---|---|---|
committer | Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 2016-02-08 06:34:12 +0100 |
commit | fe760e4d64fe5c17c39e86c410d41f6587ee88bc (patch) | |
tree | e25710bde0e1e4da9a127a3e6c449c08696c1940 /drivers/hv/channel.c | |
parent | Drivers: hv: vmbus: Eliminate the spin lock on the read path (diff) | |
download | linux-fe760e4d64fe5c17c39e86c410d41f6587ee88bc.tar.xz linux-fe760e4d64fe5c17c39e86c410d41f6587ee88bc.zip |
Drivers: hv: vmbus: Give control over how the ring access is serialized
On the channel send side, many of the VMBUS
device drivers explicity serialize access to the
outgoing ring buffer. Give more control to the
VMBUS device drivers in terms how to serialize
accesss to the outgoing ring buffer.
The default behavior will be to aquire the
ring lock to preserve the current behavior.
Signed-off-by: K. Y. Srinivasan <kys@microsoft.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'drivers/hv/channel.c')
-rw-r--r-- | drivers/hv/channel.c | 15 |
1 files changed, 11 insertions, 4 deletions
diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c index fcab234796ef..56dd261f7142 100644 --- a/drivers/hv/channel.c +++ b/drivers/hv/channel.c @@ -639,6 +639,7 @@ int vmbus_sendpacket_ctl(struct vmbus_channel *channel, void *buffer, u64 aligned_data = 0; int ret; bool signal = false; + bool lock = channel->acquire_ring_lock; int num_vecs = ((bufferlen != 0) ? 3 : 1); @@ -658,7 +659,7 @@ int vmbus_sendpacket_ctl(struct vmbus_channel *channel, void *buffer, bufferlist[2].iov_len = (packetlen_aligned - packetlen); ret = hv_ringbuffer_write(&channel->outbound, bufferlist, num_vecs, - &signal); + &signal, lock); /* * Signalling the host is conditional on many factors: @@ -738,6 +739,7 @@ int vmbus_sendpacket_pagebuffer_ctl(struct vmbus_channel *channel, struct kvec bufferlist[3]; u64 aligned_data = 0; bool signal = false; + bool lock = channel->acquire_ring_lock; if (pagecount > MAX_PAGE_BUFFER_COUNT) return -EINVAL; @@ -774,7 +776,8 @@ int vmbus_sendpacket_pagebuffer_ctl(struct vmbus_channel *channel, bufferlist[2].iov_base = &aligned_data; bufferlist[2].iov_len = (packetlen_aligned - packetlen); - ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3, &signal); + ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3, + &signal, lock); /* * Signalling the host is conditional on many factors: @@ -837,6 +840,7 @@ int vmbus_sendpacket_mpb_desc(struct vmbus_channel *channel, struct kvec bufferlist[3]; u64 aligned_data = 0; bool signal = false; + bool lock = channel->acquire_ring_lock; packetlen = desc_size + bufferlen; packetlen_aligned = ALIGN(packetlen, sizeof(u64)); @@ -856,7 +860,8 @@ int vmbus_sendpacket_mpb_desc(struct vmbus_channel *channel, bufferlist[2].iov_base = &aligned_data; bufferlist[2].iov_len = (packetlen_aligned - packetlen); - ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3, &signal); + ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3, + &signal, lock); if (ret == 0 && signal) vmbus_setevent(channel); @@ -881,6 +886,7 @@ int vmbus_sendpacket_multipagebuffer(struct vmbus_channel *channel, struct kvec bufferlist[3]; u64 aligned_data = 0; bool signal = false; + bool lock = channel->acquire_ring_lock; u32 pfncount = NUM_PAGES_SPANNED(multi_pagebuffer->offset, multi_pagebuffer->len); @@ -919,7 +925,8 @@ int vmbus_sendpacket_multipagebuffer(struct vmbus_channel *channel, bufferlist[2].iov_base = &aligned_data; bufferlist[2].iov_len = (packetlen_aligned - packetlen); - ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3, &signal); + ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3, + &signal, lock); if (ret == 0 && signal) vmbus_setevent(channel); |