summaryrefslogtreecommitdiffstats
path: root/drivers/hv
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2017-11-16 18:10:59 +0100
committerLinus Torvalds <torvalds@linux-foundation.org>2017-11-16 18:10:59 +0100
commit2bf16b7a73caf3435f782e4170cfe563675e10f9 (patch)
tree7f4c5b28a02f08c4d6fd69dd43db5872b07c20c4 /drivers/hv
parentMerge tag 'driver-core-4.15-rc1' of git://git.kernel.org/pub/scm/linux/kernel... (diff)
parentVME: Return -EBUSY when DMA list in use (diff)
downloadlinux-2bf16b7a73caf3435f782e4170cfe563675e10f9.tar.xz
linux-2bf16b7a73caf3435f782e4170cfe563675e10f9.zip
Merge tag 'char-misc-4.15-rc1' of ssh://gitolite.kernel.org/pub/scm/linux/kernel/git/gregkh/char-misc
Pull char/misc updates from Greg KH: "Here is the big set of char/misc and other driver subsystem patches for 4.15-rc1. There are small changes all over here, hyperv driver updates, pcmcia driver updates, w1 driver updats, vme driver updates, nvmem driver updates, and lots of other little one-off driver updates as well. The shortlog has the full details. All of these have been in linux-next for quite a while with no reported issues" * tag 'char-misc-4.15-rc1' of ssh://gitolite.kernel.org/pub/scm/linux/kernel/git/gregkh/char-misc: (90 commits) VME: Return -EBUSY when DMA list in use w1: keep balance of mutex locks and refcnts MAINTAINERS: Update VME subsystem tree. nvmem: sunxi-sid: add support for A64/H5's SID controller nvmem: imx-ocotp: Update module description nvmem: imx-ocotp: Enable i.MX7D OTP write support nvmem: imx-ocotp: Add i.MX7D timing write clock setup support nvmem: imx-ocotp: Move i.MX6 write clock setup to dedicated function nvmem: imx-ocotp: Add support for banked OTP addressing nvmem: imx-ocotp: Pass parameters via a struct nvmem: imx-ocotp: Restrict OTP write to IMX6 processors nvmem: uniphier: add UniPhier eFuse driver dt-bindings: nvmem: add description for UniPhier eFuse nvmem: set nvmem->owner to nvmem->dev->driver->owner if unset nvmem: qfprom: fix different address space warnings of sparse nvmem: mtk-efuse: fix different address space warnings of sparse nvmem: mtk-efuse: use stack for nvmem_config instead of malloc'ing it nvmem: imx-iim: use stack for nvmem_config instead of malloc'ing it thunderbolt: tb: fix use after free in tb_activate_pcie_devices MAINTAINERS: Add git tree for Thunderbolt development ...
Diffstat (limited to 'drivers/hv')
-rw-r--r--drivers/hv/Makefile4
-rw-r--r--drivers/hv/channel.c23
-rw-r--r--drivers/hv/channel_mgmt.c36
-rw-r--r--drivers/hv/connection.c7
-rw-r--r--drivers/hv/hv_trace.c4
-rw-r--r--drivers/hv/hv_trace.h327
-rw-r--r--drivers/hv/hyperv_vmbus.h4
-rw-r--r--drivers/hv/vmbus_drv.c209
8 files changed, 594 insertions, 20 deletions
diff --git a/drivers/hv/Makefile b/drivers/hv/Makefile
index e7b1d796ba2e..14c22786b519 100644
--- a/drivers/hv/Makefile
+++ b/drivers/hv/Makefile
@@ -3,7 +3,9 @@ obj-$(CONFIG_HYPERV) += hv_vmbus.o
obj-$(CONFIG_HYPERV_UTILS) += hv_utils.o
obj-$(CONFIG_HYPERV_BALLOON) += hv_balloon.o
+CFLAGS_hv_trace.o = -I$(src)
+
hv_vmbus-y := vmbus_drv.o \
hv.o connection.o channel.o \
- channel_mgmt.o ring_buffer.o
+ channel_mgmt.o ring_buffer.o hv_trace.o
hv_utils-y := hv_util.o hv_kvp.o hv_snapshot.o hv_fcopy.o hv_utils_transport.o
diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
index 894b67ac2cae..19f0cf37e0ed 100644
--- a/drivers/hv/channel.c
+++ b/drivers/hv/channel.c
@@ -43,6 +43,8 @@ void vmbus_setevent(struct vmbus_channel *channel)
{
struct hv_monitor_page *monitorpage;
+ trace_vmbus_setevent(channel);
+
/*
* For channels marked as in "low latency" mode
* bypass the monitor page mechanism.
@@ -185,6 +187,8 @@ int vmbus_open(struct vmbus_channel *newchannel, u32 send_ringbuffer_size,
ret = vmbus_post_msg(open_msg,
sizeof(struct vmbus_channel_open_channel), true);
+ trace_vmbus_open(open_msg, ret);
+
if (ret != 0) {
err = ret;
goto error_clean_msglist;
@@ -234,13 +238,18 @@ int vmbus_send_tl_connect_request(const uuid_le *shv_guest_servie_id,
const uuid_le *shv_host_servie_id)
{
struct vmbus_channel_tl_connect_request conn_msg;
+ int ret;
memset(&conn_msg, 0, sizeof(conn_msg));
conn_msg.header.msgtype = CHANNELMSG_TL_CONNECT_REQUEST;
conn_msg.guest_endpoint_id = *shv_guest_servie_id;
conn_msg.host_service_id = *shv_host_servie_id;
- return vmbus_post_msg(&conn_msg, sizeof(conn_msg), true);
+ ret = vmbus_post_msg(&conn_msg, sizeof(conn_msg), true);
+
+ trace_vmbus_send_tl_connect_request(&conn_msg, ret);
+
+ return ret;
}
EXPORT_SYMBOL_GPL(vmbus_send_tl_connect_request);
@@ -433,6 +442,9 @@ int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer,
ret = vmbus_post_msg(gpadlmsg, msginfo->msgsize -
sizeof(*msginfo), true);
+
+ trace_vmbus_establish_gpadl_header(gpadlmsg, ret);
+
if (ret != 0)
goto cleanup;
@@ -448,6 +460,9 @@ int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer,
ret = vmbus_post_msg(gpadl_body,
submsginfo->msgsize - sizeof(*submsginfo),
true);
+
+ trace_vmbus_establish_gpadl_body(gpadl_body, ret);
+
if (ret != 0)
goto cleanup;
@@ -511,6 +526,8 @@ int vmbus_teardown_gpadl(struct vmbus_channel *channel, u32 gpadl_handle)
ret = vmbus_post_msg(msg, sizeof(struct vmbus_channel_gpadl_teardown),
true);
+ trace_vmbus_teardown_gpadl(msg, ret);
+
if (ret)
goto post_msg_err;
@@ -589,6 +606,8 @@ static int vmbus_close_internal(struct vmbus_channel *channel)
ret = vmbus_post_msg(msg, sizeof(struct vmbus_channel_close_channel),
true);
+ trace_vmbus_close_internal(msg, ret);
+
if (ret) {
pr_err("Close failed: close post msg return is %d\n", ret);
/*
@@ -745,6 +764,7 @@ int vmbus_sendpacket_pagebuffer(struct vmbus_channel *channel,
desc.dataoffset8 = descsize >> 3; /* in 8-bytes granularity */
desc.length8 = (u16)(packetlen_aligned >> 3);
desc.transactionid = requestid;
+ desc.reserved = 0;
desc.rangecount = pagecount;
for (i = 0; i < pagecount; i++) {
@@ -788,6 +808,7 @@ int vmbus_sendpacket_mpb_desc(struct vmbus_channel *channel,
desc->dataoffset8 = desc_size >> 3; /* in 8-bytes granularity */
desc->length8 = (u16)(packetlen_aligned >> 3);
desc->transactionid = requestid;
+ desc->reserved = 0;
desc->rangecount = 1;
bufferlist[0].iov_base = desc;
diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c
index 379b0df123be..ec5454f3f4a6 100644
--- a/drivers/hv/channel_mgmt.c
+++ b/drivers/hv/channel_mgmt.c
@@ -350,7 +350,7 @@ static void free_channel(struct vmbus_channel *channel)
{
tasklet_kill(&channel->callback_event);
- kfree_rcu(channel, rcu);
+ kobject_put(&channel->kobj);
}
static void percpu_channel_enq(void *arg)
@@ -373,12 +373,15 @@ static void percpu_channel_deq(void *arg)
static void vmbus_release_relid(u32 relid)
{
struct vmbus_channel_relid_released msg;
+ int ret;
memset(&msg, 0, sizeof(struct vmbus_channel_relid_released));
msg.child_relid = relid;
msg.header.msgtype = CHANNELMSG_RELID_RELEASED;
- vmbus_post_msg(&msg, sizeof(struct vmbus_channel_relid_released),
- true);
+ ret = vmbus_post_msg(&msg, sizeof(struct vmbus_channel_relid_released),
+ true);
+
+ trace_vmbus_release_relid(&msg, ret);
}
void hv_process_channel_removal(u32 relid)
@@ -520,6 +523,14 @@ static void vmbus_process_offer(struct vmbus_channel *newchannel)
newchannel->state = CHANNEL_OPEN_STATE;
if (!fnew) {
+ struct hv_device *dev
+ = newchannel->primary_channel->device_obj;
+
+ if (vmbus_add_channel_kobj(dev, newchannel)) {
+ atomic_dec(&vmbus_connection.offer_in_progress);
+ goto err_free_chan;
+ }
+
if (channel->sc_creation_callback != NULL)
channel->sc_creation_callback(newchannel);
newchannel->probe_done = true;
@@ -805,6 +816,8 @@ static void vmbus_onoffer(struct vmbus_channel_message_header *hdr)
offer = (struct vmbus_channel_offer_channel *)hdr;
+ trace_vmbus_onoffer(offer);
+
/* Allocate the channel object and save this offer. */
newchannel = alloc_channel();
if (!newchannel) {
@@ -846,6 +859,8 @@ static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr)
rescind = (struct vmbus_channel_rescind_offer *)hdr;
+ trace_vmbus_onoffer_rescind(rescind);
+
/*
* The offer msg and the corresponding rescind msg
* from the host are guranteed to be ordered -
@@ -974,6 +989,8 @@ static void vmbus_onopen_result(struct vmbus_channel_message_header *hdr)
result = (struct vmbus_channel_open_result *)hdr;
+ trace_vmbus_onopen_result(result);
+
/*
* Find the open msg, copy the result and signal/unblock the wait event
*/
@@ -1018,6 +1035,8 @@ static void vmbus_ongpadl_created(struct vmbus_channel_message_header *hdr)
gpadlcreated = (struct vmbus_channel_gpadl_created *)hdr;
+ trace_vmbus_ongpadl_created(gpadlcreated);
+
/*
* Find the establish msg, copy the result and signal/unblock the wait
* event
@@ -1066,6 +1085,8 @@ static void vmbus_ongpadl_torndown(
gpadl_torndown = (struct vmbus_channel_gpadl_torndown *)hdr;
+ trace_vmbus_ongpadl_torndown(gpadl_torndown);
+
/*
* Find the open msg, copy the result and signal/unblock the wait event
*/
@@ -1109,6 +1130,9 @@ static void vmbus_onversion_response(
unsigned long flags;
version_response = (struct vmbus_channel_version_response *)hdr;
+
+ trace_vmbus_onversion_response(version_response);
+
spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
list_for_each_entry(msginfo, &vmbus_connection.chn_msg_list,
@@ -1168,6 +1192,8 @@ void vmbus_onmessage(void *context)
hdr = (struct vmbus_channel_message_header *)msg->u.payload;
size = msg->header.payload_size;
+ trace_vmbus_on_message(hdr);
+
if (hdr->msgtype >= CHANNELMSG_COUNT) {
pr_err("Received invalid channel message type %d size %d\n",
hdr->msgtype, size);
@@ -1201,9 +1227,11 @@ int vmbus_request_offers(void)
msg->msgtype = CHANNELMSG_REQUESTOFFERS;
-
ret = vmbus_post_msg(msg, sizeof(struct vmbus_channel_message_header),
true);
+
+ trace_vmbus_request_offers(ret);
+
if (ret != 0) {
pr_err("Unable to request offers - %d\n", ret);
diff --git a/drivers/hv/connection.c b/drivers/hv/connection.c
index f41901f80b64..447371f4de56 100644
--- a/drivers/hv/connection.c
+++ b/drivers/hv/connection.c
@@ -117,6 +117,9 @@ static int vmbus_negotiate_version(struct vmbus_channel_msginfo *msginfo,
ret = vmbus_post_msg(msg,
sizeof(struct vmbus_channel_initiate_contact),
true);
+
+ trace_vmbus_negotiate_version(msg, ret);
+
if (ret != 0) {
spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
list_del(&msginfo->msglistentry);
@@ -319,6 +322,8 @@ void vmbus_on_event(unsigned long data)
struct vmbus_channel *channel = (void *) data;
unsigned long time_limit = jiffies + 2;
+ trace_vmbus_on_event(channel);
+
do {
void (*callback_fn)(void *);
@@ -409,6 +414,8 @@ void vmbus_set_event(struct vmbus_channel *channel)
if (!channel->is_dedicated_interrupt)
vmbus_send_interrupt(child_relid);
+ ++channel->sig_events;
+
hv_do_fast_hypercall8(HVCALL_SIGNAL_EVENT, channel->sig_event);
}
EXPORT_SYMBOL_GPL(vmbus_set_event);
diff --git a/drivers/hv/hv_trace.c b/drivers/hv/hv_trace.c
new file mode 100644
index 000000000000..df47acd01a81
--- /dev/null
+++ b/drivers/hv/hv_trace.c
@@ -0,0 +1,4 @@
+#include "hyperv_vmbus.h"
+
+#define CREATE_TRACE_POINTS
+#include "hv_trace.h"
diff --git a/drivers/hv/hv_trace.h b/drivers/hv/hv_trace.h
new file mode 100644
index 000000000000..d635ee95b20d
--- /dev/null
+++ b/drivers/hv/hv_trace.h
@@ -0,0 +1,327 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM hyperv
+
+#if !defined(_HV_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _HV_TRACE_H
+
+#include <linux/tracepoint.h>
+
+DECLARE_EVENT_CLASS(vmbus_hdr_msg,
+ TP_PROTO(const struct vmbus_channel_message_header *hdr),
+ TP_ARGS(hdr),
+ TP_STRUCT__entry(__field(unsigned int, msgtype)),
+ TP_fast_assign(__entry->msgtype = hdr->msgtype;),
+ TP_printk("msgtype=%u", __entry->msgtype)
+);
+
+DEFINE_EVENT(vmbus_hdr_msg, vmbus_on_msg_dpc,
+ TP_PROTO(const struct vmbus_channel_message_header *hdr),
+ TP_ARGS(hdr)
+);
+
+DEFINE_EVENT(vmbus_hdr_msg, vmbus_on_message,
+ TP_PROTO(const struct vmbus_channel_message_header *hdr),
+ TP_ARGS(hdr)
+);
+
+TRACE_EVENT(vmbus_onoffer,
+ TP_PROTO(const struct vmbus_channel_offer_channel *offer),
+ TP_ARGS(offer),
+ TP_STRUCT__entry(
+ __field(u32, child_relid)
+ __field(u8, monitorid)
+ __field(u16, is_ddc_int)
+ __field(u32, connection_id)
+ __array(char, if_type, 16)
+ __array(char, if_instance, 16)
+ __field(u16, chn_flags)
+ __field(u16, mmio_mb)
+ __field(u16, sub_idx)
+ ),
+ TP_fast_assign(__entry->child_relid = offer->child_relid;
+ __entry->monitorid = offer->monitorid;
+ __entry->is_ddc_int = offer->is_dedicated_interrupt;
+ __entry->connection_id = offer->connection_id;
+ memcpy(__entry->if_type,
+ &offer->offer.if_type.b, 16);
+ memcpy(__entry->if_instance,
+ &offer->offer.if_instance.b, 16);
+ __entry->chn_flags = offer->offer.chn_flags;
+ __entry->mmio_mb = offer->offer.mmio_megabytes;
+ __entry->sub_idx = offer->offer.sub_channel_index;
+ ),
+ TP_printk("child_relid 0x%x, monitorid 0x%x, is_dedicated %d, "
+ "connection_id 0x%x, if_type %pUl, if_instance %pUl, "
+ "chn_flags 0x%x, mmio_megabytes %d, sub_channel_index %d",
+ __entry->child_relid, __entry->monitorid,
+ __entry->is_ddc_int, __entry->connection_id,
+ __entry->if_type, __entry->if_instance,
+ __entry->chn_flags, __entry->mmio_mb,
+ __entry->sub_idx
+ )
+ );
+
+TRACE_EVENT(vmbus_onoffer_rescind,
+ TP_PROTO(const struct vmbus_channel_rescind_offer *offer),
+ TP_ARGS(offer),
+ TP_STRUCT__entry(__field(u32, child_relid)),
+ TP_fast_assign(__entry->child_relid = offer->child_relid),
+ TP_printk("child_relid 0x%x", __entry->child_relid)
+ );
+
+TRACE_EVENT(vmbus_onopen_result,
+ TP_PROTO(const struct vmbus_channel_open_result *result),
+ TP_ARGS(result),
+ TP_STRUCT__entry(
+ __field(u32, child_relid)
+ __field(u32, openid)
+ __field(u32, status)
+ ),
+ TP_fast_assign(__entry->child_relid = result->child_relid;
+ __entry->openid = result->openid;
+ __entry->status = result->status;
+ ),
+ TP_printk("child_relid 0x%x, openid %d, status %d",
+ __entry->child_relid, __entry->openid, __entry->status
+ )
+ );
+
+TRACE_EVENT(vmbus_ongpadl_created,
+ TP_PROTO(const struct vmbus_channel_gpadl_created *gpadlcreated),
+ TP_ARGS(gpadlcreated),
+ TP_STRUCT__entry(
+ __field(u32, child_relid)
+ __field(u32, gpadl)
+ __field(u32, status)
+ ),
+ TP_fast_assign(__entry->child_relid = gpadlcreated->child_relid;
+ __entry->gpadl = gpadlcreated->gpadl;
+ __entry->status = gpadlcreated->creation_status;
+ ),
+ TP_printk("child_relid 0x%x, gpadl 0x%x, creation_status %d",
+ __entry->child_relid, __entry->gpadl, __entry->status
+ )
+ );
+
+TRACE_EVENT(vmbus_ongpadl_torndown,
+ TP_PROTO(const struct vmbus_channel_gpadl_torndown *gpadltorndown),
+ TP_ARGS(gpadltorndown),
+ TP_STRUCT__entry(__field(u32, gpadl)),
+ TP_fast_assign(__entry->gpadl = gpadltorndown->gpadl),
+ TP_printk("gpadl 0x%x", __entry->gpadl)
+ );
+
+TRACE_EVENT(vmbus_onversion_response,
+ TP_PROTO(const struct vmbus_channel_version_response *response),
+ TP_ARGS(response),
+ TP_STRUCT__entry(
+ __field(u8, ver)
+ ),
+ TP_fast_assign(__entry->ver = response->version_supported;
+ ),
+ TP_printk("version_supported %d", __entry->ver)
+ );
+
+TRACE_EVENT(vmbus_request_offers,
+ TP_PROTO(int ret),
+ TP_ARGS(ret),
+ TP_STRUCT__entry(__field(int, ret)),
+ TP_fast_assign(__entry->ret = ret),
+ TP_printk("sending ret %d", __entry->ret)
+ );
+
+TRACE_EVENT(vmbus_open,
+ TP_PROTO(const struct vmbus_channel_open_channel *msg, int ret),
+ TP_ARGS(msg, ret),
+ TP_STRUCT__entry(
+ __field(u32, child_relid)
+ __field(u32, openid)
+ __field(u32, gpadlhandle)
+ __field(u32, target_vp)
+ __field(u32, offset)
+ __field(int, ret)
+ ),
+ TP_fast_assign(
+ __entry->child_relid = msg->child_relid;
+ __entry->openid = msg->openid;
+ __entry->gpadlhandle = msg->ringbuffer_gpadlhandle;
+ __entry->target_vp = msg->target_vp;
+ __entry->offset = msg->downstream_ringbuffer_pageoffset;
+ __entry->ret = ret;
+ ),
+ TP_printk("sending child_relid 0x%x, openid %d, "
+ "gpadlhandle 0x%x, target_vp 0x%x, offset 0x%x, ret %d",
+ __entry->child_relid, __entry->openid,
+ __entry->gpadlhandle, __entry->target_vp,
+ __entry->offset, __entry->ret
+ )
+ );
+
+TRACE_EVENT(vmbus_close_internal,
+ TP_PROTO(const struct vmbus_channel_close_channel *msg, int ret),
+ TP_ARGS(msg, ret),
+ TP_STRUCT__entry(
+ __field(u32, child_relid)
+ __field(int, ret)
+ ),
+ TP_fast_assign(
+ __entry->child_relid = msg->child_relid;
+ __entry->ret = ret;
+ ),
+ TP_printk("sending child_relid 0x%x, ret %d", __entry->child_relid,
+ __entry->ret)
+ );
+
+TRACE_EVENT(vmbus_establish_gpadl_header,
+ TP_PROTO(const struct vmbus_channel_gpadl_header *msg, int ret),
+ TP_ARGS(msg, ret),
+ TP_STRUCT__entry(
+ __field(u32, child_relid)
+ __field(u32, gpadl)
+ __field(u16, range_buflen)
+ __field(u16, rangecount)
+ __field(int, ret)
+ ),
+ TP_fast_assign(
+ __entry->child_relid = msg->child_relid;
+ __entry->gpadl = msg->gpadl;
+ __entry->range_buflen = msg->range_buflen;
+ __entry->rangecount = msg->rangecount;
+ __entry->ret = ret;
+ ),
+ TP_printk("sending child_relid 0x%x, gpadl 0x%x, range_buflen %d "
+ "rangecount %d, ret %d",
+ __entry->child_relid, __entry->gpadl,
+ __entry->range_buflen, __entry->rangecount, __entry->ret
+ )
+ );
+
+TRACE_EVENT(vmbus_establish_gpadl_body,
+ TP_PROTO(const struct vmbus_channel_gpadl_body *msg, int ret),
+ TP_ARGS(msg, ret),
+ TP_STRUCT__entry(
+ __field(u32, msgnumber)
+ __field(u32, gpadl)
+ __field(int, ret)
+ ),
+ TP_fast_assign(
+ __entry->msgnumber = msg->msgnumber;
+ __entry->gpadl = msg->gpadl;
+ __entry->ret = ret;
+ ),
+ TP_printk("sending msgnumber %d, gpadl 0x%x, ret %d",
+ __entry->msgnumber, __entry->gpadl, __entry->ret
+ )
+ );
+
+TRACE_EVENT(vmbus_teardown_gpadl,
+ TP_PROTO(const struct vmbus_channel_gpadl_teardown *msg, int ret),
+ TP_ARGS(msg, ret),
+ TP_STRUCT__entry(
+ __field(u32, child_relid)
+ __field(u32, gpadl)
+ __field(int, ret)
+ ),
+ TP_fast_assign(
+ __entry->child_relid = msg->child_relid;
+ __entry->gpadl = msg->gpadl;
+ __entry->ret = ret;
+ ),
+ TP_printk("sending child_relid 0x%x, gpadl 0x%x, ret %d",
+ __entry->child_relid, __entry->gpadl, __entry->ret
+ )
+ );
+
+TRACE_EVENT(vmbus_negotiate_version,
+ TP_PROTO(const struct vmbus_channel_initiate_contact *msg, int ret),
+ TP_ARGS(msg, ret),
+ TP_STRUCT__entry(
+ __field(u32, ver)
+ __field(u32, target_vcpu)
+ __field(int, ret)
+ __field(u64, int_page)
+ __field(u64, mon_page1)
+ __field(u64, mon_page2)
+ ),
+ TP_fast_assign(
+ __entry->ver = msg->vmbus_version_requested;
+ __entry->target_vcpu = msg->target_vcpu;
+ __entry->int_page = msg->interrupt_page;
+ __entry->mon_page1 = msg->monitor_page1;
+ __entry->mon_page2 = msg->monitor_page2;
+ __entry->ret = ret;
+ ),
+ TP_printk("sending vmbus_version_requested %d, target_vcpu 0x%x, "
+ "pages %llx:%llx:%llx, ret %d",
+ __entry->ver, __entry->target_vcpu, __entry->int_page,
+ __entry->mon_page1, __entry->mon_page2, __entry->ret
+ )
+ );
+
+TRACE_EVENT(vmbus_release_relid,
+ TP_PROTO(const struct vmbus_channel_relid_released *msg, int ret),
+ TP_ARGS(msg, ret),
+ TP_STRUCT__entry(
+ __field(u32, child_relid)
+ __field(int, ret)
+ ),
+ TP_fast_assign(
+ __entry->child_relid = msg->child_relid;
+ __entry->ret = ret;
+ ),
+ TP_printk("sending child_relid 0x%x, ret %d",
+ __entry->child_relid, __entry->ret
+ )
+ );
+
+TRACE_EVENT(vmbus_send_tl_connect_request,
+ TP_PROTO(const struct vmbus_channel_tl_connect_request *msg,
+ int ret),
+ TP_ARGS(msg, ret),
+ TP_STRUCT__entry(
+ __array(char, guest_id, 16)
+ __array(char, host_id, 16)
+ __field(int, ret)
+ ),
+ TP_fast_assign(
+ memcpy(__entry->guest_id, &msg->guest_endpoint_id.b, 16);
+ memcpy(__entry->host_id, &msg->host_service_id.b, 16);
+ __entry->ret = ret;
+ ),
+ TP_printk("sending guest_endpoint_id %pUl, host_service_id %pUl, "
+ "ret %d",
+ __entry->guest_id, __entry->host_id, __entry->ret
+ )
+ );
+
+DECLARE_EVENT_CLASS(vmbus_channel,
+ TP_PROTO(const struct vmbus_channel *channel),
+ TP_ARGS(channel),
+ TP_STRUCT__entry(__field(u32, relid)),
+ TP_fast_assign(__entry->relid = channel->offermsg.child_relid),
+ TP_printk("relid 0x%x", __entry->relid)
+);
+
+DEFINE_EVENT(vmbus_channel, vmbus_chan_sched,
+ TP_PROTO(const struct vmbus_channel *channel),
+ TP_ARGS(channel)
+);
+
+DEFINE_EVENT(vmbus_channel, vmbus_setevent,
+ TP_PROTO(const struct vmbus_channel *channel),
+ TP_ARGS(channel)
+);
+
+DEFINE_EVENT(vmbus_channel, vmbus_on_event,
+ TP_PROTO(const struct vmbus_channel *channel),
+ TP_ARGS(channel)
+);
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE hv_trace
+#endif /* _HV_TRACE_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
index 49569f8fe038..22300ec7b556 100644
--- a/drivers/hv/hyperv_vmbus.h
+++ b/drivers/hv/hyperv_vmbus.h
@@ -31,6 +31,8 @@
#include <linux/hyperv.h>
#include <linux/interrupt.h>
+#include "hv_trace.h"
+
/*
* Timeout for services such as KVP and fcopy.
*/
@@ -373,6 +375,8 @@ struct hv_device *vmbus_device_create(const uuid_le *type,
int vmbus_device_register(struct hv_device *child_device_obj);
void vmbus_device_unregister(struct hv_device *device_obj);
+int vmbus_add_channel_kobj(struct hv_device *device_obj,
+ struct vmbus_channel *channel);
struct vmbus_channel *relid2channel(u32 relid);
diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
index 2cd134dd94d2..76ed9a216f10 100644
--- a/drivers/hv/vmbus_drv.c
+++ b/drivers/hv/vmbus_drv.c
@@ -65,7 +65,7 @@ static int hyperv_panic_event(struct notifier_block *nb, unsigned long val,
regs = current_pt_regs();
- hyperv_report_panic(regs);
+ hyperv_report_panic(regs, val);
return NOTIFY_DONE;
}
@@ -75,7 +75,7 @@ static int hyperv_die_event(struct notifier_block *nb, unsigned long val,
struct die_args *die = (struct die_args *)args;
struct pt_regs *regs = die->regs;
- hyperv_report_panic(regs);
+ hyperv_report_panic(regs, val);
return NOTIFY_DONE;
}
@@ -107,28 +107,30 @@ static void print_alias_name(struct hv_device *hv_dev, char *alias_name)
sprintf(&alias_name[i], "%02x", hv_dev->dev_type.b[i/2]);
}
-static u8 channel_monitor_group(struct vmbus_channel *channel)
+static u8 channel_monitor_group(const struct vmbus_channel *channel)
{
return (u8)channel->offermsg.monitorid / 32;
}
-static u8 channel_monitor_offset(struct vmbus_channel *channel)
+static u8 channel_monitor_offset(const struct vmbus_channel *channel)
{
return (u8)channel->offermsg.monitorid % 32;
}
-static u32 channel_pending(struct vmbus_channel *channel,
- struct hv_monitor_page *monitor_page)
+static u32 channel_pending(const struct vmbus_channel *channel,
+ const struct hv_monitor_page *monitor_page)
{
u8 monitor_group = channel_monitor_group(channel);
+
return monitor_page->trigger_group[monitor_group].pending;
}
-static u32 channel_latency(struct vmbus_channel *channel,
- struct hv_monitor_page *monitor_page)
+static u32 channel_latency(const struct vmbus_channel *channel,
+ const struct hv_monitor_page *monitor_page)
{
u8 monitor_group = channel_monitor_group(channel);
u8 monitor_offset = channel_monitor_offset(channel);
+
return monitor_page->latency[monitor_group][monitor_offset];
}
@@ -833,6 +835,8 @@ void vmbus_on_msg_dpc(unsigned long data)
hdr = (struct vmbus_channel_message_header *)msg->u.payload;
+ trace_vmbus_on_msg_dpc(hdr);
+
if (hdr->msgtype >= CHANNELMSG_COUNT) {
WARN_ONCE(1, "unknown msgtype=%d\n", hdr->msgtype);
goto msg_handled;
@@ -942,6 +946,10 @@ static void vmbus_chan_sched(struct hv_per_cpu_context *hv_cpu)
if (channel->rescind)
continue;
+ trace_vmbus_chan_sched(channel);
+
+ ++channel->interrupts;
+
switch (channel->callback_mode) {
case HV_CALL_ISR:
vmbus_channel_isr(channel);
@@ -1133,6 +1141,159 @@ void vmbus_driver_unregister(struct hv_driver *hv_driver)
}
EXPORT_SYMBOL_GPL(vmbus_driver_unregister);
+
+/*
+ * Called when last reference to channel is gone.
+ */
+static void vmbus_chan_release(struct kobject *kobj)
+{
+ struct vmbus_channel *channel
+ = container_of(kobj, struct vmbus_channel, kobj);
+
+ kfree_rcu(channel, rcu);
+}
+
+struct vmbus_chan_attribute {
+ struct attribute attr;
+ ssize_t (*show)(const struct vmbus_channel *chan, char *buf);
+ ssize_t (*store)(struct vmbus_channel *chan,
+ const char *buf, size_t count);
+};
+#define VMBUS_CHAN_ATTR(_name, _mode, _show, _store) \
+ struct vmbus_chan_attribute chan_attr_##_name \
+ = __ATTR(_name, _mode, _show, _store)
+#define VMBUS_CHAN_ATTR_RW(_name) \
+ struct vmbus_chan_attribute chan_attr_##_name = __ATTR_RW(_name)
+#define VMBUS_CHAN_ATTR_RO(_name) \
+ struct vmbus_chan_attribute chan_attr_##_name = __ATTR_RO(_name)
+#define VMBUS_CHAN_ATTR_WO(_name) \
+ struct vmbus_chan_attribute chan_attr_##_name = __ATTR_WO(_name)
+
+static ssize_t vmbus_chan_attr_show(struct kobject *kobj,
+ struct attribute *attr, char *buf)
+{
+ const struct vmbus_chan_attribute *attribute
+ = container_of(attr, struct vmbus_chan_attribute, attr);
+ const struct vmbus_channel *chan
+ = container_of(kobj, struct vmbus_channel, kobj);
+
+ if (!attribute->show)
+ return -EIO;
+
+ return attribute->show(chan, buf);
+}
+
+static const struct sysfs_ops vmbus_chan_sysfs_ops = {
+ .show = vmbus_chan_attr_show,
+};
+
+static ssize_t out_mask_show(const struct vmbus_channel *channel, char *buf)
+{
+ const struct hv_ring_buffer_info *rbi = &channel->outbound;
+
+ return sprintf(buf, "%u\n", rbi->ring_buffer->interrupt_mask);
+}
+VMBUS_CHAN_ATTR_RO(out_mask);
+
+static ssize_t in_mask_show(const struct vmbus_channel *channel, char *buf)
+{
+ const struct hv_ring_buffer_info *rbi = &channel->inbound;
+
+ return sprintf(buf, "%u\n", rbi->ring_buffer->interrupt_mask);
+}
+VMBUS_CHAN_ATTR_RO(in_mask);
+
+static ssize_t read_avail_show(const struct vmbus_channel *channel, char *buf)
+{
+ const struct hv_ring_buffer_info *rbi = &channel->inbound;
+
+ return sprintf(buf, "%u\n", hv_get_bytes_to_read(rbi));
+}
+VMBUS_CHAN_ATTR_RO(read_avail);
+
+static ssize_t write_avail_show(const struct vmbus_channel *channel, char *buf)
+{
+ const struct hv_ring_buffer_info *rbi = &channel->outbound;
+
+ return sprintf(buf, "%u\n", hv_get_bytes_to_write(rbi));
+}
+VMBUS_CHAN_ATTR_RO(write_avail);
+
+static ssize_t show_target_cpu(const struct vmbus_channel *channel, char *buf)
+{
+ return sprintf(buf, "%u\n", channel->target_cpu);
+}
+VMBUS_CHAN_ATTR(cpu, S_IRUGO, show_target_cpu, NULL);
+
+static ssize_t channel_pending_show(const struct vmbus_channel *channel,
+ char *buf)
+{
+ return sprintf(buf, "%d\n",
+ channel_pending(channel,
+ vmbus_connection.monitor_pages[1]));
+}
+VMBUS_CHAN_ATTR(pending, S_IRUGO, channel_pending_show, NULL);
+
+static ssize_t channel_latency_show(const struct vmbus_channel *channel,
+ char *buf)
+{
+ return sprintf(buf, "%d\n",
+ channel_latency(channel,
+ vmbus_connection.monitor_pages[1]));
+}
+VMBUS_CHAN_ATTR(latency, S_IRUGO, channel_latency_show, NULL);
+
+static ssize_t channel_interrupts_show(const struct vmbus_channel *channel, char *buf)
+{
+ return sprintf(buf, "%llu\n", channel->interrupts);
+}
+VMBUS_CHAN_ATTR(interrupts, S_IRUGO, channel_interrupts_show, NULL);
+
+static ssize_t channel_events_show(const struct vmbus_channel *channel, char *buf)
+{
+ return sprintf(buf, "%llu\n", channel->sig_events);
+}
+VMBUS_CHAN_ATTR(events, S_IRUGO, channel_events_show, NULL);
+
+static struct attribute *vmbus_chan_attrs[] = {
+ &chan_attr_out_mask.attr,
+ &chan_attr_in_mask.attr,
+ &chan_attr_read_avail.attr,
+ &chan_attr_write_avail.attr,
+ &chan_attr_cpu.attr,
+ &chan_attr_pending.attr,
+ &chan_attr_latency.attr,
+ &chan_attr_interrupts.attr,
+ &chan_attr_events.attr,
+ NULL
+};
+
+static struct kobj_type vmbus_chan_ktype = {
+ .sysfs_ops = &vmbus_chan_sysfs_ops,
+ .release = vmbus_chan_release,
+ .default_attrs = vmbus_chan_attrs,
+};
+
+/*
+ * vmbus_add_channel_kobj - setup a sub-directory under device/channels
+ */
+int vmbus_add_channel_kobj(struct hv_device *dev, struct vmbus_channel *channel)
+{
+ struct kobject *kobj = &channel->kobj;
+ u32 relid = channel->offermsg.child_relid;
+ int ret;
+
+ kobj->kset = dev->channels_kset;
+ ret = kobject_init_and_add(kobj, &vmbus_chan_ktype, NULL,
+ "%u", relid);
+ if (ret)
+ return ret;
+
+ kobject_uevent(kobj, KOBJ_ADD);
+
+ return 0;
+}
+
/*
* vmbus_device_create - Creates and registers a new child device
* on the vmbus.
@@ -1164,7 +1325,8 @@ struct hv_device *vmbus_device_create(const uuid_le *type,
*/
int vmbus_device_register(struct hv_device *child_device_obj)
{
- int ret = 0;
+ struct kobject *kobj = &child_device_obj->device.kobj;
+ int ret;
dev_set_name(&child_device_obj->device, "%pUl",
child_device_obj->channel->offermsg.offer.if_instance.b);
@@ -1178,13 +1340,32 @@ int vmbus_device_register(struct hv_device *child_device_obj)
* binding...which will eventually call vmbus_match() and vmbus_probe()
*/
ret = device_register(&child_device_obj->device);
-
- if (ret)
+ if (ret) {
pr_err("Unable to register child device\n");
- else
- pr_debug("child device %s registered\n",
- dev_name(&child_device_obj->device));
+ return ret;
+ }
+
+ child_device_obj->channels_kset = kset_create_and_add("channels",
+ NULL, kobj);
+ if (!child_device_obj->channels_kset) {
+ ret = -ENOMEM;
+ goto err_dev_unregister;
+ }
+
+ ret = vmbus_add_channel_kobj(child_device_obj,
+ child_device_obj->channel);
+ if (ret) {
+ pr_err("Unable to register primary channeln");
+ goto err_kset_unregister;
+ }
+
+ return 0;
+
+err_kset_unregister:
+ kset_unregister(child_device_obj->channels_kset);
+err_dev_unregister:
+ device_unregister(&child_device_obj->device);
return ret;
}