From 2b2154f9391f3ca03b4f5ba599b2ee7a3a80f781 Mon Sep 17 00:00:00 2001 From: Vincent Legoll Date: Sun, 21 May 2017 11:04:41 +0200 Subject: x86/PCI: Fix whitespace in set_bios_x() printk Remove the space from "PCI :" to make the message consistent with other PCI messages. Signed-off-by: Vincent Legoll Signed-off-by: Bjorn Helgaas --- arch/x86/pci/pcbios.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/x86') diff --git a/arch/x86/pci/pcbios.c b/arch/x86/pci/pcbios.c index c1bdb9edcae7..76595408ff53 100644 --- a/arch/x86/pci/pcbios.c +++ b/arch/x86/pci/pcbios.c @@ -46,7 +46,7 @@ static inline void set_bios_x(void) pcibios_enabled = 1; set_memory_x(PAGE_OFFSET + BIOS_BEGIN, (BIOS_END - BIOS_BEGIN) >> PAGE_SHIFT); if (__supported_pte_mask & _PAGE_NX) - printk(KERN_INFO "PCI : PCI BIOS area is rw and x. Use pci=nobios if you want it NX.\n"); + printk(KERN_INFO "PCI: PCI BIOS area is rw and x. Use pci=nobios if you want it NX.\n"); } /* -- cgit v1.2.3 From f5ab3b70a6063b340d7ea4fcea20313f5607c71e Mon Sep 17 00:00:00 2001 From: Jean Delvare Date: Fri, 2 Jun 2017 16:13:11 +0200 Subject: x86/PCI: Simplify Dell DMI B1 quirk No need for such convoluted code, when all we need is to call one function in one specific case. Tested-by: Narendra K # DellEMC PowerEdge 1950, R730XD Signed-off-by: Jean Delvare Signed-off-by: Bjorn Helgaas --- arch/x86/pci/common.c | 27 +++++---------------------- 1 file changed, 5 insertions(+), 22 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/pci/common.c b/arch/x86/pci/common.c index 190e718694b1..1f9f2ee7c421 100644 --- a/arch/x86/pci/common.c +++ b/arch/x86/pci/common.c @@ -24,7 +24,6 @@ unsigned int pci_probe = PCI_PROBE_BIOS | PCI_PROBE_CONF1 | PCI_PROBE_CONF2 | unsigned int pci_early_dump_regs; static int pci_bf_sort; -static int smbios_type_b1_flag; int pci_routeirq; int noioapicquirk; #ifdef CONFIG_X86_REROUTE_FOR_BROKEN_BOOT_IRQS @@ -197,34 +196,18 @@ static int __init set_bf_sort(const struct dmi_system_id *d) static void __init read_dmi_type_b1(const struct dmi_header *dm, void *private_data) { - u8 *d = (u8 *)dm + 4; + u8 *data = (u8 *)dm + 4; if (dm->type != 0xB1) return; - switch (((*(u32 *)d) >> 9) & 0x03) { - case 0x00: - printk(KERN_INFO "dmi type 0xB1 record - unknown flag\n"); - break; - case 0x01: /* set pci=bfsort */ - smbios_type_b1_flag = 1; - break; - case 0x02: /* do not set pci=bfsort */ - smbios_type_b1_flag = 2; - break; - default: - break; - } + if ((((*(u32 *)data) >> 9) & 0x03) == 0x01) + set_bf_sort((const struct dmi_system_id *)private_data); } static int __init find_sort_method(const struct dmi_system_id *d) { - dmi_walk(read_dmi_type_b1, NULL); - - if (smbios_type_b1_flag == 1) { - set_bf_sort(d); - return 0; - } - return -1; + dmi_walk(read_dmi_type_b1, (void *)d); + return 0; } /* -- cgit v1.2.3 From 13cfc732160f7bc7e596128ce34cda361c556966 Mon Sep 17 00:00:00 2001 From: Bjorn Helgaas Date: Fri, 19 Aug 2016 16:30:25 +0800 Subject: PCI: Work around poweroff & suspend-to-RAM issue on Macbook Pro 11 Neither soft poweroff (transition to ACPI power state S5) nor suspend-to-RAM (transition to state S3) works on the Macbook Pro 11,4 and 11,5. The problem is related to the [mem 0x7fa00000-0x7fbfffff] space. When we use that space, e.g., by assigning it to the 00:1c.0 Root Port, the ACPI Power Management 1 Control Register (PM1_CNT) at [io 0x1804] doesn't work anymore. Linux does a soft poweroff (transition to S5) by writing to PM1_CNT. The theory about why this doesn't work is: - The write to PM1_CNT causes an SMI - The BIOS SMI handler depends on something in [mem 0x7fa00000-0x7fbfffff] - When Linux assigns [mem 0x7fa00000-0x7fbfffff] to the 00:1c.0 Port, it covers up whatever the SMI handler uses, so the SMI handler no longer works correctly Reserve the [mem 0x7fa00000-0x7fbfffff] space so we don't assign it to anything. This is voodoo programming, since we don't know what the real conflict is, but we've failed to find the root cause. Bugzilla: https://bugzilla.kernel.org/show_bug.cgi?id=103211 Tested-by: thejoe@gmail.com Signed-off-by: Bjorn Helgaas Cc: stable@vger.kernel.org Cc: Rafael J. Wysocki Cc: Lukas Wunner Cc: Chen Yu --- arch/x86/pci/fixup.c | 32 ++++++++++++++++++++++++++++++++ 1 file changed, 32 insertions(+) (limited to 'arch/x86') diff --git a/arch/x86/pci/fixup.c b/arch/x86/pci/fixup.c index 6d52b94f4bb9..20fa7c84109d 100644 --- a/arch/x86/pci/fixup.c +++ b/arch/x86/pci/fixup.c @@ -571,3 +571,35 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x2fc0, pci_invalid_bar); DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x6f60, pci_invalid_bar); DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x6fa0, pci_invalid_bar); DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x6fc0, pci_invalid_bar); + +/* + * Apple MacBook Pro: Avoid [mem 0x7fa00000-0x7fbfffff] + * + * Using the [mem 0x7fa00000-0x7fbfffff] region, e.g., by assigning it to + * the 00:1c.0 Root Port, causes a conflict with [io 0x1804], which is used + * for soft poweroff and suspend-to-RAM. + * + * As far as we know, this is related to the address space, not to the Root + * Port itself. Attaching the quirk to the Root Port is a convenience, but + * it could probably also be a standalone DMI quirk. + * + * https://bugzilla.kernel.org/show_bug.cgi?id=103211 + */ +static void quirk_apple_mbp_poweroff(struct pci_dev *pdev) +{ + struct device *dev = &pdev->dev; + struct resource *res; + + if ((!dmi_match(DMI_PRODUCT_NAME, "MacBookPro11,4") && + !dmi_match(DMI_PRODUCT_NAME, "MacBookPro11,5")) || + pdev->bus->number != 0 || pdev->devfn != PCI_DEVFN(0x1c, 0)) + return; + + res = request_mem_region(0x7fa00000, 0x200000, + "MacBook Pro poweroff workaround"); + if (res) + dev_info(dev, "claimed %s %pR\n", res->name, res); + else + dev_info(dev, "can't work around MacBook Pro poweroff issue\n"); +} +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x8c10, quirk_apple_mbp_poweroff); -- cgit v1.2.3 From 0bf3730bbc41f155543fa63becb4ff436d6a00a9 Mon Sep 17 00:00:00 2001 From: Kai-Heng Feng Date: Fri, 16 Jun 2017 17:40:54 +0800 Subject: x86/PCI: Avoid AMD SB7xx EHCI USB wakeup defect On an AMD Carrizo laptop, when EHCI runtime PM is enabled, EHCI ports do not assert PME# for device plug/unplug events while in D3. As Alan Stern points out [1], the PME signal is not enabled when controller is in D3, therefore it's not being woken up when new devices get plugged in. Testing shows PME signal works when the EHCI power state is D2. Clear the PCI_PM_CAP_PME_D3 and PCI_PM_CAP_PME_D3cold bits in dev->pme_support to indicate the device will not assert PME# from those states. [1] http://lkml.kernel.org/r/Pine.LNX.4.44L0.1706121010010.2092-100000@iolanthe.rowland.org Link: https://bugzilla.kernel.org/show_bug.cgi?id=196091 Link: https://support.amd.com/TechDocs/46837.pdf (Section 23) Link: https://support.amd.com/TechDocs/42413.pdf (Appendix A2) Signed-off-by: Kai-Heng Feng [bhelgaas: changelog, add parens in quirk] Signed-off-by: Bjorn Helgaas --- arch/x86/pci/fixup.c | 15 +++++++++++++++ 1 file changed, 15 insertions(+) (limited to 'arch/x86') diff --git a/arch/x86/pci/fixup.c b/arch/x86/pci/fixup.c index 6d52b94f4bb9..2259acdcede5 100644 --- a/arch/x86/pci/fixup.c +++ b/arch/x86/pci/fixup.c @@ -571,3 +571,18 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x2fc0, pci_invalid_bar); DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x6f60, pci_invalid_bar); DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x6fa0, pci_invalid_bar); DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x6fc0, pci_invalid_bar); + +/* + * Device [1022:7808] + * 23. USB Wake on Connect/Disconnect with Low Speed Devices + * https://support.amd.com/TechDocs/46837.pdf + * Appendix A2 + * https://support.amd.com/TechDocs/42413.pdf + */ +static void pci_fixup_amd_ehci_pme(struct pci_dev *dev) +{ + dev_info(&dev->dev, "PME# does not work under D3, disabling it\n"); + dev->pme_support &= ~((PCI_PM_CAP_PME_D3 | PCI_PM_CAP_PME_D3cold) + >> PCI_PM_CAP_PME_SHIFT); +} +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x7808, pci_fixup_amd_ehci_pme); -- cgit v1.2.3 From 7dcf90e9e032432e91ce77dd872d2227e9d5b741 Mon Sep 17 00:00:00 2001 From: Jork Loeser Date: Wed, 24 May 2017 13:41:28 -0700 Subject: PCI: hv: Use vPCI protocol version 1.2 Update the Hyper-V vPCI driver to use the Server-2016 version of the vPCI protocol, fixing MSI creation and retargeting issues. Signed-off-by: Jork Loeser Signed-off-by: Bjorn Helgaas Reviewed-by: K. Y. Srinivasan Acked-by: K. Y. Srinivasan --- arch/x86/include/uapi/asm/hyperv.h | 6 + drivers/pci/host/pci-hyperv.c | 300 ++++++++++++++++++++++++++++++------- 2 files changed, 252 insertions(+), 54 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/include/uapi/asm/hyperv.h b/arch/x86/include/uapi/asm/hyperv.h index 432df4b1baec..237ec6cda206 100644 --- a/arch/x86/include/uapi/asm/hyperv.h +++ b/arch/x86/include/uapi/asm/hyperv.h @@ -152,6 +152,12 @@ */ #define HV_X64_DEPRECATING_AEOI_RECOMMENDED (1 << 9) +/* + * HV_VP_SET available + */ +#define HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED (1 << 11) + + /* * Crash notification flag. */ diff --git a/drivers/pci/host/pci-hyperv.c b/drivers/pci/host/pci-hyperv.c index 4a8a21e37b8e..415dcc69a502 100644 --- a/drivers/pci/host/pci-hyperv.c +++ b/drivers/pci/host/pci-hyperv.c @@ -70,6 +70,7 @@ enum pci_protocol_version_t { PCI_PROTOCOL_VERSION_1_1 = PCI_MAKE_VERSION(1, 1), /* Win10 */ + PCI_PROTOCOL_VERSION_1_2 = PCI_MAKE_VERSION(1, 2), /* RS1 */ }; #define CPU_AFFINITY_ALL -1ULL @@ -79,6 +80,7 @@ enum pci_protocol_version_t { * first. */ static enum pci_protocol_version_t pci_protocol_versions[] = { + PCI_PROTOCOL_VERSION_1_2, PCI_PROTOCOL_VERSION_1_1, }; @@ -124,6 +126,9 @@ enum pci_message_type { PCI_QUERY_PROTOCOL_VERSION = PCI_MESSAGE_BASE + 0x13, PCI_CREATE_INTERRUPT_MESSAGE = PCI_MESSAGE_BASE + 0x14, PCI_DELETE_INTERRUPT_MESSAGE = PCI_MESSAGE_BASE + 0x15, + PCI_RESOURCES_ASSIGNED2 = PCI_MESSAGE_BASE + 0x16, + PCI_CREATE_INTERRUPT_MESSAGE2 = PCI_MESSAGE_BASE + 0x17, + PCI_DELETE_INTERRUPT_MESSAGE2 = PCI_MESSAGE_BASE + 0x18, /* unused */ PCI_MESSAGE_MAXIMUM }; @@ -193,6 +198,30 @@ struct hv_msi_desc { u64 cpu_mask; } __packed; +/** + * struct hv_msi_desc2 - 1.2 version of hv_msi_desc + * @vector: IDT entry + * @delivery_mode: As defined in Intel's Programmer's + * Reference Manual, Volume 3, Chapter 8. + * @vector_count: Number of contiguous entries in the + * Interrupt Descriptor Table that are + * occupied by this Message-Signaled + * Interrupt. For "MSI", as first defined + * in PCI 2.2, this can be between 1 and + * 32. For "MSI-X," as first defined in PCI + * 3.0, this must be 1, as each MSI-X table + * entry would have its own descriptor. + * @processor_count: number of bits enabled in array. + * @processor_array: All the target virtual processors. + */ +struct hv_msi_desc2 { + u8 vector; + u8 delivery_mode; + u16 vector_count; + u16 processor_count; + u16 processor_array[32]; +} __packed; + /** * struct tran_int_desc * @reserved: unused, padding @@ -309,6 +338,14 @@ struct pci_resources_assigned { u32 reserved[4]; } __packed; +struct pci_resources_assigned2 { + struct pci_message message_type; + union win_slot_encoding wslot; + u8 memory_range[0x14][6]; /* not used here */ + u32 msi_descriptor_count; + u8 reserved[70]; +} __packed; + struct pci_create_interrupt { struct pci_message message_type; union win_slot_encoding wslot; @@ -321,6 +358,12 @@ struct pci_create_int_response { struct tran_int_desc int_desc; } __packed; +struct pci_create_interrupt2 { + struct pci_message message_type; + union win_slot_encoding wslot; + struct hv_msi_desc2 int_desc; +} __packed; + struct pci_delete_interrupt { struct pci_message message_type; union win_slot_encoding wslot; @@ -346,17 +389,42 @@ static int pci_ring_size = (4 * PAGE_SIZE); #define HV_PARTITION_ID_SELF ((u64)-1) #define HVCALL_RETARGET_INTERRUPT 0x7e -struct retarget_msi_interrupt { - u64 partition_id; /* use "self" */ - u64 device_id; +struct hv_interrupt_entry { u32 source; /* 1 for MSI(-X) */ u32 reserved1; u32 address; u32 data; - u64 reserved2; +}; + +#define HV_VP_SET_BANK_COUNT_MAX 5 /* current implementation limit */ + +struct hv_vp_set { + u64 format; /* 0 (HvGenericSetSparse4k) */ + u64 valid_banks; + u64 masks[HV_VP_SET_BANK_COUNT_MAX]; +}; + +/* + * flags for hv_device_interrupt_target.flags + */ +#define HV_DEVICE_INTERRUPT_TARGET_MULTICAST 1 +#define HV_DEVICE_INTERRUPT_TARGET_PROCESSOR_SET 2 + +struct hv_device_interrupt_target { u32 vector; u32 flags; - u64 vp_mask; + union { + u64 vp_mask; + struct hv_vp_set vp_set; + }; +}; + +struct retarget_msi_interrupt { + u64 partition_id; /* use "self" */ + u64 device_id; + struct hv_interrupt_entry int_entry; + u64 reserved2; + struct hv_device_interrupt_target int_target; } __packed; /* @@ -850,8 +918,11 @@ static void hv_irq_unmask(struct irq_data *data) struct cpumask *dest; struct pci_bus *pbus; struct pci_dev *pdev; - int cpu; unsigned long flags; + u32 var_size = 0; + int cpu_vmbus; + int cpu; + u64 res; dest = irq_data_get_affinity_mask(data); pdev = msi_desc_to_pci_dev(msi_desc); @@ -863,23 +934,74 @@ static void hv_irq_unmask(struct irq_data *data) params = &hbus->retarget_msi_interrupt_params; memset(params, 0, sizeof(*params)); params->partition_id = HV_PARTITION_ID_SELF; - params->source = 1; /* MSI(-X) */ - params->address = msi_desc->msg.address_lo; - params->data = msi_desc->msg.data; + params->int_entry.source = 1; /* MSI(-X) */ + params->int_entry.address = msi_desc->msg.address_lo; + params->int_entry.data = msi_desc->msg.data; params->device_id = (hbus->hdev->dev_instance.b[5] << 24) | (hbus->hdev->dev_instance.b[4] << 16) | (hbus->hdev->dev_instance.b[7] << 8) | (hbus->hdev->dev_instance.b[6] & 0xf8) | PCI_FUNC(pdev->devfn); - params->vector = cfg->vector; + params->int_target.vector = cfg->vector; + + /* + * Honoring apic->irq_delivery_mode set to dest_Fixed by + * setting the HV_DEVICE_INTERRUPT_TARGET_MULTICAST flag results in a + * spurious interrupt storm. Not doing so does not seem to have a + * negative effect (yet?). + */ + + if (pci_protocol_version >= PCI_PROTOCOL_VERSION_1_2) { + /* + * PCI_PROTOCOL_VERSION_1_2 supports the VP_SET version of the + * HVCALL_RETARGET_INTERRUPT hypercall, which also coincides + * with >64 VP support. + * ms_hyperv.hints & HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED + * is not sufficient for this hypercall. + */ + params->int_target.flags |= + HV_DEVICE_INTERRUPT_TARGET_PROCESSOR_SET; + params->int_target.vp_set.valid_banks = + (1ull << HV_VP_SET_BANK_COUNT_MAX) - 1; + + /* + * var-sized hypercall, var-size starts after vp_mask (thus + * vp_set.format does not count, but vp_set.valid_banks does). + */ + var_size = 1 + HV_VP_SET_BANK_COUNT_MAX; + + for_each_cpu_and(cpu, dest, cpu_online_mask) { + cpu_vmbus = hv_tmp_cpu_nr_to_vp_nr(cpu); + + if (cpu_vmbus >= HV_VP_SET_BANK_COUNT_MAX * 64) { + dev_err(&hbus->hdev->device, + "too high CPU %d", cpu_vmbus); + res = 1; + goto exit_unlock; + } - for_each_cpu_and(cpu, dest, cpu_online_mask) - params->vp_mask |= (1ULL << hv_tmp_cpu_nr_to_vp_nr(cpu)); + params->int_target.vp_set.masks[cpu_vmbus / 64] |= + (1ULL << (cpu_vmbus & 63)); + } + } else { + for_each_cpu_and(cpu, dest, cpu_online_mask) { + params->int_target.vp_mask |= + (1ULL << hv_tmp_cpu_nr_to_vp_nr(cpu)); + } + } - hv_do_hypercall(HVCALL_RETARGET_INTERRUPT, params, NULL); + res = hv_do_hypercall(HVCALL_RETARGET_INTERRUPT | (var_size << 17), + params, NULL); +exit_unlock: spin_unlock_irqrestore(&hbus->retarget_msi_interrupt_lock, flags); + if (res) { + dev_err(&hbus->hdev->device, + "%s() failed: %#llx", __func__, res); + return; + } + pci_msi_unmask_irq(data); } @@ -900,6 +1022,53 @@ static void hv_pci_compose_compl(void *context, struct pci_response *resp, complete(&comp_pkt->comp_pkt.host_event); } +static u32 hv_compose_msi_req_v1( + struct pci_create_interrupt *int_pkt, struct cpumask *affinity, + u32 slot, u8 vector) +{ + int_pkt->message_type.type = PCI_CREATE_INTERRUPT_MESSAGE; + int_pkt->wslot.slot = slot; + int_pkt->int_desc.vector = vector; + int_pkt->int_desc.vector_count = 1; + int_pkt->int_desc.delivery_mode = + (apic->irq_delivery_mode == dest_LowestPrio) ? + dest_LowestPrio : dest_Fixed; + + /* + * Create MSI w/ dummy vCPU set, overwritten by subsequent retarget in + * hv_irq_unmask(). + */ + int_pkt->int_desc.cpu_mask = CPU_AFFINITY_ALL; + + return sizeof(*int_pkt); +} + +static u32 hv_compose_msi_req_v2( + struct pci_create_interrupt2 *int_pkt, struct cpumask *affinity, + u32 slot, u8 vector) +{ + int cpu; + + int_pkt->message_type.type = PCI_CREATE_INTERRUPT_MESSAGE2; + int_pkt->wslot.slot = slot; + int_pkt->int_desc.vector = vector; + int_pkt->int_desc.vector_count = 1; + int_pkt->int_desc.delivery_mode = + (apic->irq_delivery_mode == dest_LowestPrio) ? + dest_LowestPrio : dest_Fixed; + + /* + * Create MSI w/ dummy vCPU set targeting just one vCPU, overwritten + * by subsequent retarget in hv_irq_unmask(). + */ + cpu = cpumask_first_and(affinity, cpu_online_mask); + int_pkt->int_desc.processor_array[0] = + hv_tmp_cpu_nr_to_vp_nr(cpu); + int_pkt->int_desc.processor_count = 1; + + return sizeof(*int_pkt); +} + /** * hv_compose_msi_msg() - Supplies a valid MSI address/data * @data: Everything about this MSI @@ -918,15 +1087,17 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) struct hv_pci_dev *hpdev; struct pci_bus *pbus; struct pci_dev *pdev; - struct pci_create_interrupt *int_pkt; struct compose_comp_ctxt comp; struct tran_int_desc *int_desc; - struct cpumask *affinity; struct { - struct pci_packet pkt; - u8 buffer[sizeof(struct pci_create_interrupt)]; - } ctxt; - int cpu; + struct pci_packet pci_pkt; + union { + struct pci_create_interrupt v1; + struct pci_create_interrupt2 v2; + } int_pkts; + } __packed ctxt; + + u32 size; int ret; pdev = msi_desc_to_pci_dev(irq_data_get_msi_desc(data)); @@ -949,36 +1120,44 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) memset(&ctxt, 0, sizeof(ctxt)); init_completion(&comp.comp_pkt.host_event); - ctxt.pkt.completion_func = hv_pci_compose_compl; - ctxt.pkt.compl_ctxt = ∁ - int_pkt = (struct pci_create_interrupt *)&ctxt.pkt.message; - int_pkt->message_type.type = PCI_CREATE_INTERRUPT_MESSAGE; - int_pkt->wslot.slot = hpdev->desc.win_slot.slot; - int_pkt->int_desc.vector = cfg->vector; - int_pkt->int_desc.vector_count = 1; - int_pkt->int_desc.delivery_mode = - (apic->irq_delivery_mode == dest_LowestPrio) ? 1 : 0; + ctxt.pci_pkt.completion_func = hv_pci_compose_compl; + ctxt.pci_pkt.compl_ctxt = ∁ + + switch (pci_protocol_version) { + case PCI_PROTOCOL_VERSION_1_1: + size = hv_compose_msi_req_v1(&ctxt.int_pkts.v1, + irq_data_get_affinity_mask(data), + hpdev->desc.win_slot.slot, + cfg->vector); + break; - /* - * This bit doesn't have to work on machines with more than 64 - * processors because Hyper-V only supports 64 in a guest. - */ - affinity = irq_data_get_affinity_mask(data); - if (cpumask_weight(affinity) >= 32) { - int_pkt->int_desc.cpu_mask = CPU_AFFINITY_ALL; - } else { - for_each_cpu_and(cpu, affinity, cpu_online_mask) { - int_pkt->int_desc.cpu_mask |= - (1ULL << hv_tmp_cpu_nr_to_vp_nr(cpu)); - } + case PCI_PROTOCOL_VERSION_1_2: + size = hv_compose_msi_req_v2(&ctxt.int_pkts.v2, + irq_data_get_affinity_mask(data), + hpdev->desc.win_slot.slot, + cfg->vector); + break; + + default: + /* As we only negotiate protocol versions known to this driver, + * this path should never hit. However, this is it not a hot + * path so we print a message to aid future updates. + */ + dev_err(&hbus->hdev->device, + "Unexpected vPCI protocol, update driver."); + goto free_int_desc; } - ret = vmbus_sendpacket(hpdev->hbus->hdev->channel, int_pkt, - sizeof(*int_pkt), (unsigned long)&ctxt.pkt, + ret = vmbus_sendpacket(hpdev->hbus->hdev->channel, &ctxt.int_pkts, + size, (unsigned long)&ctxt.pci_pkt, VM_PKT_DATA_INBAND, VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); - if (ret) + if (ret) { + dev_err(&hbus->hdev->device, + "Sending request for interrupt failed: 0x%x", + comp.comp_pkt.completion_status); goto free_int_desc; + } wait_for_completion(&comp.comp_pkt.host_event); @@ -2177,13 +2356,18 @@ static int hv_send_resources_allocated(struct hv_device *hdev) { struct hv_pcibus_device *hbus = hv_get_drvdata(hdev); struct pci_resources_assigned *res_assigned; + struct pci_resources_assigned2 *res_assigned2; struct hv_pci_compl comp_pkt; struct hv_pci_dev *hpdev; struct pci_packet *pkt; + size_t size_res; u32 wslot; int ret; - pkt = kmalloc(sizeof(*pkt) + sizeof(*res_assigned), GFP_KERNEL); + size_res = (pci_protocol_version < PCI_PROTOCOL_VERSION_1_2) + ? sizeof(*res_assigned) : sizeof(*res_assigned2); + + pkt = kmalloc(sizeof(*pkt) + size_res, GFP_KERNEL); if (!pkt) return -ENOMEM; @@ -2194,22 +2378,30 @@ static int hv_send_resources_allocated(struct hv_device *hdev) if (!hpdev) continue; - memset(pkt, 0, sizeof(*pkt) + sizeof(*res_assigned)); + memset(pkt, 0, sizeof(*pkt) + size_res); init_completion(&comp_pkt.host_event); pkt->completion_func = hv_pci_generic_compl; pkt->compl_ctxt = &comp_pkt; - res_assigned = (struct pci_resources_assigned *)&pkt->message; - res_assigned->message_type.type = PCI_RESOURCES_ASSIGNED; - res_assigned->wslot.slot = hpdev->desc.win_slot.slot; + if (pci_protocol_version < PCI_PROTOCOL_VERSION_1_2) { + res_assigned = + (struct pci_resources_assigned *)&pkt->message; + res_assigned->message_type.type = + PCI_RESOURCES_ASSIGNED; + res_assigned->wslot.slot = hpdev->desc.win_slot.slot; + } else { + res_assigned2 = + (struct pci_resources_assigned2 *)&pkt->message; + res_assigned2->message_type.type = + PCI_RESOURCES_ASSIGNED2; + res_assigned2->wslot.slot = hpdev->desc.win_slot.slot; + } put_pcichild(hpdev, hv_pcidev_ref_by_slot); - ret = vmbus_sendpacket( - hdev->channel, &pkt->message, - sizeof(*res_assigned), - (unsigned long)pkt, - VM_PKT_DATA_INBAND, - VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); + ret = vmbus_sendpacket(hdev->channel, &pkt->message, + size_res, (unsigned long)pkt, + VM_PKT_DATA_INBAND, + VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); if (ret) break; -- cgit v1.2.3