summaryrefslogtreecommitdiffstats
path: root/drivers/hv/ring_buffer.c
diff options
context:
space:
mode:
authorK. Y. Srinivasan <kys@microsoft.com>2016-11-06 22:14:17 +0100
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2016-11-07 10:01:17 +0100
commit1f6ee4e7d83586c8b10bd4f2f4346353d04ce884 (patch)
tree0a81141b2e8dde8f97e099eab3ef3663d333657d /drivers/hv/ring_buffer.c
parentDrivers: hv: vmbus: Base host signaling strictly on the ring state (diff)
downloadlinux-1f6ee4e7d83586c8b10bd4f2f4346353d04ce884.tar.xz
linux-1f6ee4e7d83586c8b10bd4f2f4346353d04ce884.zip
Drivers: hv: vmbus: On write cleanup the logic to interrupt the host
Signal the host when we determine the host is to be signaled. The currrent code determines the need to signal in the ringbuffer code and actually issues the signal elsewhere. This can result in the host viewing this interrupt as spurious since the host may also poll the channel. Make the necessary adjustments. Signed-off-by: K. Y. Srinivasan <kys@microsoft.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'drivers/hv/ring_buffer.c')
-rw-r--r--drivers/hv/ring_buffer.c30
1 files changed, 21 insertions, 9 deletions
diff --git a/drivers/hv/ring_buffer.c b/drivers/hv/ring_buffer.c
index 5d11d93eedf4..4af71306d0ff 100644
--- a/drivers/hv/ring_buffer.c
+++ b/drivers/hv/ring_buffer.c
@@ -66,14 +66,25 @@ u32 hv_end_read(struct hv_ring_buffer_info *rbi)
* once the ring buffer is empty, it will clear the
* interrupt_mask and re-check to see if new data has
* arrived.
+ *
+ * KYS: Oct. 30, 2016:
+ * It looks like Windows hosts have logic to deal with DOS attacks that
+ * can be triggered if it receives interrupts when it is not expecting
+ * the interrupt. The host expects interrupts only when the ring
+ * transitions from empty to non-empty (or full to non full on the guest
+ * to host ring).
+ * So, base the signaling decision solely on the ring state until the
+ * host logic is fixed.
*/
-static bool hv_need_to_signal(u32 old_write, struct hv_ring_buffer_info *rbi,
- enum hv_signal_policy policy)
+static void hv_signal_on_write(u32 old_write, struct vmbus_channel *channel,
+ bool kick_q)
{
+ struct hv_ring_buffer_info *rbi = &channel->outbound;
+
virt_mb();
if (READ_ONCE(rbi->ring_buffer->interrupt_mask))
- return false;
+ return;
/* check interrupt_mask before read_index */
virt_rmb();
@@ -82,9 +93,9 @@ static bool hv_need_to_signal(u32 old_write, struct hv_ring_buffer_info *rbi,
* ring transitions from being empty to non-empty.
*/
if (old_write == READ_ONCE(rbi->ring_buffer->read_index))
- return true;
+ vmbus_setevent(channel);
- return false;
+ return;
}
/* Get the next write location for the specified ring buffer. */
@@ -273,9 +284,9 @@ void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info)
}
/* Write to the ring buffer. */
-int hv_ringbuffer_write(struct hv_ring_buffer_info *outring_info,
- struct kvec *kv_list, u32 kv_count, bool *signal, bool lock,
- enum hv_signal_policy policy)
+int hv_ringbuffer_write(struct vmbus_channel *channel,
+ struct kvec *kv_list, u32 kv_count, bool lock,
+ bool kick_q)
{
int i = 0;
u32 bytes_avail_towrite;
@@ -285,6 +296,7 @@ int hv_ringbuffer_write(struct hv_ring_buffer_info *outring_info,
u32 old_write;
u64 prev_indices = 0;
unsigned long flags = 0;
+ struct hv_ring_buffer_info *outring_info = &channel->outbound;
for (i = 0; i < kv_count; i++)
totalbytes_towrite += kv_list[i].iov_len;
@@ -337,7 +349,7 @@ int hv_ringbuffer_write(struct hv_ring_buffer_info *outring_info,
if (lock)
spin_unlock_irqrestore(&outring_info->ring_lock, flags);
- *signal = hv_need_to_signal(old_write, outring_info, policy);
+ hv_signal_on_write(old_write, channel, kick_q);
return 0;
}