summaryrefslogtreecommitdiffstats
path: root/drivers/irqchip
diff options
context:
space:
mode:
authorMarc Zyngier <maz@kernel.org>2022-03-17 10:49:02 +0100
committerMarc Zyngier <maz@kernel.org>2022-04-05 17:33:13 +0200
commitaf27e41612ec7e5b4783f589b753a7c31a37aac8 (patch)
treec8faa04942b674ff973e59afb75c0b37378972a4 /drivers/irqchip
parentirqchip/irq-qcom-mpm: fix return value check in qcom_mpm_init() (diff)
downloadlinux-af27e41612ec7e5b4783f589b753a7c31a37aac8.tar.xz
linux-af27e41612ec7e5b4783f589b753a7c31a37aac8.zip
irqchip/gic-v4: Wait for GICR_VPENDBASER.Dirty to clear before descheduling
The way KVM drives GICv4.{0,1} is as follows: - vcpu_load() makes the VPE resident, instructing the RD to start scanning for interrupts - just before entering the guest, we check that the RD has finished scanning and that we can start running the vcpu - on preemption, we deschedule the VPE by making it invalid on the RD However, we are preemptible between the first two steps. If it so happens *and* that the RD was still scanning, we nonetheless write to the GICR_VPENDBASER register while Dirty is set, and bad things happen (we're in UNPRED land). This affects both the 4.0 and 4.1 implementations. Make sure Dirty is cleared before performing the deschedule, meaning that its_clear_vpend_valid() becomes a sort of full VPE residency barrier. Reported-by: Jingyi Wang <wangjingyi11@huawei.com> Tested-by: Nianyao Tang <tangnianyao@huawei.com> Signed-off-by: Marc Zyngier <maz@kernel.org> Fixes: 57e3cebd022f ("KVM: arm64: Delay the polling of the GICR_VPENDBASER.Dirty bit") Link: https://lore.kernel.org/r/4aae10ba-b39a-5f84-754b-69c2eb0a2c03@huawei.com
Diffstat (limited to 'drivers/irqchip')
-rw-r--r--drivers/irqchip/irq-gic-v3-its.c28
1 files changed, 19 insertions, 9 deletions
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index cd772973114a..a0fc764ec9dc 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -3011,18 +3011,12 @@ static int __init allocate_lpi_tables(void)
return 0;
}
-static u64 its_clear_vpend_valid(void __iomem *vlpi_base, u64 clr, u64 set)
+static u64 read_vpend_dirty_clear(void __iomem *vlpi_base)
{
u32 count = 1000000; /* 1s! */
bool clean;
u64 val;
- val = gicr_read_vpendbaser(vlpi_base + GICR_VPENDBASER);
- val &= ~GICR_VPENDBASER_Valid;
- val &= ~clr;
- val |= set;
- gicr_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER);
-
do {
val = gicr_read_vpendbaser(vlpi_base + GICR_VPENDBASER);
clean = !(val & GICR_VPENDBASER_Dirty);
@@ -3033,10 +3027,26 @@ static u64 its_clear_vpend_valid(void __iomem *vlpi_base, u64 clr, u64 set)
}
} while (!clean && count);
- if (unlikely(val & GICR_VPENDBASER_Dirty)) {
+ if (unlikely(!clean))
pr_err_ratelimited("ITS virtual pending table not cleaning\n");
+
+ return val;
+}
+
+static u64 its_clear_vpend_valid(void __iomem *vlpi_base, u64 clr, u64 set)
+{
+ u64 val;
+
+ /* Make sure we wait until the RD is done with the initial scan */
+ val = read_vpend_dirty_clear(vlpi_base);
+ val &= ~GICR_VPENDBASER_Valid;
+ val &= ~clr;
+ val |= set;
+ gicr_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER);
+
+ val = read_vpend_dirty_clear(vlpi_base);
+ if (unlikely(val & GICR_VPENDBASER_Dirty))
val |= GICR_VPENDBASER_PendingLast;
- }
return val;
}