summaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/arm/net/bpf_jit_32.c57
-rw-r--r--arch/arm64/kernel/entry.S5
-rw-r--r--arch/arm64/kernel/irq.c4
-rw-r--r--arch/avr32/kernel/time.c65
-rw-r--r--arch/s390/kernel/asm-offsets.c15
-rw-r--r--arch/s390/kernel/entry.S13
-rw-r--r--arch/s390/kernel/traps.c4
-rw-r--r--arch/x86/include/uapi/asm/kvm.h4
-rw-r--r--arch/x86/kvm/lapic.c2
-rw-r--r--arch/x86/kvm/mtrr.c40
-rw-r--r--arch/x86/kvm/svm.c2
-rw-r--r--arch/x86/kvm/vmx.c5
-rw-r--r--arch/x86/kvm/x86.h5
13 files changed, 143 insertions, 78 deletions
diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c
index 4550d247e308..c011e2296cb1 100644
--- a/arch/arm/net/bpf_jit_32.c
+++ b/arch/arm/net/bpf_jit_32.c
@@ -74,32 +74,52 @@ struct jit_ctx {
int bpf_jit_enable __read_mostly;
-static u64 jit_get_skb_b(struct sk_buff *skb, unsigned offset)
+static inline int call_neg_helper(struct sk_buff *skb, int offset, void *ret,
+ unsigned int size)
+{
+ void *ptr = bpf_internal_load_pointer_neg_helper(skb, offset, size);
+
+ if (!ptr)
+ return -EFAULT;
+ memcpy(ret, ptr, size);
+ return 0;
+}
+
+static u64 jit_get_skb_b(struct sk_buff *skb, int offset)
{
u8 ret;
int err;
- err = skb_copy_bits(skb, offset, &ret, 1);
+ if (offset < 0)
+ err = call_neg_helper(skb, offset, &ret, 1);
+ else
+ err = skb_copy_bits(skb, offset, &ret, 1);
return (u64)err << 32 | ret;
}
-static u64 jit_get_skb_h(struct sk_buff *skb, unsigned offset)
+static u64 jit_get_skb_h(struct sk_buff *skb, int offset)
{
u16 ret;
int err;
- err = skb_copy_bits(skb, offset, &ret, 2);
+ if (offset < 0)
+ err = call_neg_helper(skb, offset, &ret, 2);
+ else
+ err = skb_copy_bits(skb, offset, &ret, 2);
return (u64)err << 32 | ntohs(ret);
}
-static u64 jit_get_skb_w(struct sk_buff *skb, unsigned offset)
+static u64 jit_get_skb_w(struct sk_buff *skb, int offset)
{
u32 ret;
int err;
- err = skb_copy_bits(skb, offset, &ret, 4);
+ if (offset < 0)
+ err = call_neg_helper(skb, offset, &ret, 4);
+ else
+ err = skb_copy_bits(skb, offset, &ret, 4);
return (u64)err << 32 | ntohl(ret);
}
@@ -536,9 +556,6 @@ static int build_body(struct jit_ctx *ctx)
case BPF_LD | BPF_B | BPF_ABS:
load_order = 0;
load:
- /* the interpreter will deal with the negative K */
- if ((int)k < 0)
- return -ENOTSUPP;
emit_mov_i(r_off, k, ctx);
load_common:
ctx->seen |= SEEN_DATA | SEEN_CALL;
@@ -547,12 +564,24 @@ load_common:
emit(ARM_SUB_I(r_scratch, r_skb_hl,
1 << load_order), ctx);
emit(ARM_CMP_R(r_scratch, r_off), ctx);
- condt = ARM_COND_HS;
+ condt = ARM_COND_GE;
} else {
emit(ARM_CMP_R(r_skb_hl, r_off), ctx);
condt = ARM_COND_HI;
}
+ /*
+ * test for negative offset, only if we are
+ * currently scheduled to take the fast
+ * path. this will update the flags so that
+ * the slowpath instruction are ignored if the
+ * offset is negative.
+ *
+ * for loard_order == 0 the HI condition will
+ * make loads at offset 0 take the slow path too.
+ */
+ _emit(condt, ARM_CMP_I(r_off, 0), ctx);
+
_emit(condt, ARM_ADD_R(r_scratch, r_off, r_skb_data),
ctx);
@@ -860,9 +889,11 @@ b_epilogue:
off = offsetof(struct sk_buff, vlan_tci);
emit(ARM_LDRH_I(r_A, r_skb, off), ctx);
if (code == (BPF_ANC | SKF_AD_VLAN_TAG))
- OP_IMM3(ARM_AND, r_A, r_A, VLAN_VID_MASK, ctx);
- else
- OP_IMM3(ARM_AND, r_A, r_A, VLAN_TAG_PRESENT, ctx);
+ OP_IMM3(ARM_AND, r_A, r_A, ~VLAN_TAG_PRESENT, ctx);
+ else {
+ OP_IMM3(ARM_LSR, r_A, r_A, 12, ctx);
+ OP_IMM3(ARM_AND, r_A, r_A, 0x1, ctx);
+ }
break;
case BPF_ANC | SKF_AD_QUEUE:
ctx->seen |= SEEN_SKB;
diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
index f860bfda454a..e16351819fed 100644
--- a/arch/arm64/kernel/entry.S
+++ b/arch/arm64/kernel/entry.S
@@ -585,7 +585,8 @@ ENDPROC(el0_irq)
*
*/
ENTRY(cpu_switch_to)
- add x8, x0, #THREAD_CPU_CONTEXT
+ mov x10, #THREAD_CPU_CONTEXT
+ add x8, x0, x10
mov x9, sp
stp x19, x20, [x8], #16 // store callee-saved registers
stp x21, x22, [x8], #16
@@ -594,7 +595,7 @@ ENTRY(cpu_switch_to)
stp x27, x28, [x8], #16
stp x29, x9, [x8], #16
str lr, [x8]
- add x8, x1, #THREAD_CPU_CONTEXT
+ add x8, x1, x10
ldp x19, x20, [x8], #16 // restore callee-saved registers
ldp x21, x22, [x8], #16
ldp x23, x24, [x8], #16
diff --git a/arch/arm64/kernel/irq.c b/arch/arm64/kernel/irq.c
index 240b75c0e94f..463fa2e7e34c 100644
--- a/arch/arm64/kernel/irq.c
+++ b/arch/arm64/kernel/irq.c
@@ -61,7 +61,7 @@ void __init init_IRQ(void)
static bool migrate_one_irq(struct irq_desc *desc)
{
struct irq_data *d = irq_desc_get_irq_data(desc);
- const struct cpumask *affinity = d->affinity;
+ const struct cpumask *affinity = irq_data_get_affinity_mask(d);
struct irq_chip *c;
bool ret = false;
@@ -81,7 +81,7 @@ static bool migrate_one_irq(struct irq_desc *desc)
if (!c->irq_set_affinity)
pr_debug("IRQ%u: unable to set affinity\n", d->irq);
else if (c->irq_set_affinity(d, affinity, false) == IRQ_SET_MASK_OK && ret)
- cpumask_copy(d->affinity, affinity);
+ cpumask_copy(irq_data_get_affinity_mask(d), affinity);
return ret;
}
diff --git a/arch/avr32/kernel/time.c b/arch/avr32/kernel/time.c
index d0f771be9e96..a124c55733db 100644
--- a/arch/avr32/kernel/time.c
+++ b/arch/avr32/kernel/time.c
@@ -18,6 +18,7 @@
#include <mach/pm.h>
+static bool disable_cpu_idle_poll;
static cycle_t read_cycle_count(struct clocksource *cs)
{
@@ -80,45 +81,45 @@ static int comparator_next_event(unsigned long delta,
return 0;
}
-static void comparator_mode(enum clock_event_mode mode,
- struct clock_event_device *evdev)
+static int comparator_shutdown(struct clock_event_device *evdev)
{
- switch (mode) {
- case CLOCK_EVT_MODE_ONESHOT:
- pr_debug("%s: start\n", evdev->name);
- /* FALLTHROUGH */
- case CLOCK_EVT_MODE_RESUME:
+ pr_debug("%s: %s\n", __func__, evdev->name);
+ sysreg_write(COMPARE, 0);
+
+ if (disable_cpu_idle_poll) {
+ disable_cpu_idle_poll = false;
/*
- * If we're using the COUNT and COMPARE registers we
- * need to force idle poll.
+ * Only disable idle poll if we have forced that
+ * in a previous call.
*/
- cpu_idle_poll_ctrl(true);
- break;
- case CLOCK_EVT_MODE_UNUSED:
- case CLOCK_EVT_MODE_SHUTDOWN:
- sysreg_write(COMPARE, 0);
- pr_debug("%s: stop\n", evdev->name);
- if (evdev->mode == CLOCK_EVT_MODE_ONESHOT ||
- evdev->mode == CLOCK_EVT_MODE_RESUME) {
- /*
- * Only disable idle poll if we have forced that
- * in a previous call.
- */
- cpu_idle_poll_ctrl(false);
- }
- break;
- default:
- BUG();
+ cpu_idle_poll_ctrl(false);
}
+ return 0;
+}
+
+static int comparator_set_oneshot(struct clock_event_device *evdev)
+{
+ pr_debug("%s: %s\n", __func__, evdev->name);
+
+ disable_cpu_idle_poll = true;
+ /*
+ * If we're using the COUNT and COMPARE registers we
+ * need to force idle poll.
+ */
+ cpu_idle_poll_ctrl(true);
+
+ return 0;
}
static struct clock_event_device comparator = {
- .name = "avr32_comparator",
- .features = CLOCK_EVT_FEAT_ONESHOT,
- .shift = 16,
- .rating = 50,
- .set_next_event = comparator_next_event,
- .set_mode = comparator_mode,
+ .name = "avr32_comparator",
+ .features = CLOCK_EVT_FEAT_ONESHOT,
+ .shift = 16,
+ .rating = 50,
+ .set_next_event = comparator_next_event,
+ .set_state_shutdown = comparator_shutdown,
+ .set_state_oneshot = comparator_set_oneshot,
+ .tick_resume = comparator_set_oneshot,
};
void read_persistent_clock(struct timespec *ts)
diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c
index c7d1b9d09011..a2da259d9327 100644
--- a/arch/s390/kernel/asm-offsets.c
+++ b/arch/s390/kernel/asm-offsets.c
@@ -23,15 +23,15 @@
int main(void)
{
- DEFINE(__THREAD_info, offsetof(struct task_struct, stack));
- DEFINE(__THREAD_ksp, offsetof(struct task_struct, thread.ksp));
- DEFINE(__THREAD_mm_segment, offsetof(struct task_struct, thread.mm_segment));
- BLANK();
+ DEFINE(__TASK_thread_info, offsetof(struct task_struct, stack));
+ DEFINE(__TASK_thread, offsetof(struct task_struct, thread));
DEFINE(__TASK_pid, offsetof(struct task_struct, pid));
BLANK();
- DEFINE(__THREAD_per_cause, offsetof(struct task_struct, thread.per_event.cause));
- DEFINE(__THREAD_per_address, offsetof(struct task_struct, thread.per_event.address));
- DEFINE(__THREAD_per_paid, offsetof(struct task_struct, thread.per_event.paid));
+ DEFINE(__THREAD_ksp, offsetof(struct thread_struct, ksp));
+ DEFINE(__THREAD_per_cause, offsetof(struct thread_struct, per_event.cause));
+ DEFINE(__THREAD_per_address, offsetof(struct thread_struct, per_event.address));
+ DEFINE(__THREAD_per_paid, offsetof(struct thread_struct, per_event.paid));
+ DEFINE(__THREAD_trap_tdb, offsetof(struct thread_struct, trap_tdb));
BLANK();
DEFINE(__TI_task, offsetof(struct thread_info, task));
DEFINE(__TI_flags, offsetof(struct thread_info, flags));
@@ -176,7 +176,6 @@ int main(void)
DEFINE(__LC_VDSO_PER_CPU, offsetof(struct _lowcore, vdso_per_cpu_data));
DEFINE(__LC_GMAP, offsetof(struct _lowcore, gmap));
DEFINE(__LC_PGM_TDB, offsetof(struct _lowcore, pgm_tdb));
- DEFINE(__THREAD_trap_tdb, offsetof(struct task_struct, thread.trap_tdb));
DEFINE(__GMAP_ASCE, offsetof(struct gmap, asce));
DEFINE(__SIE_PROG0C, offsetof(struct kvm_s390_sie_block, prog0c));
DEFINE(__SIE_PROG20, offsetof(struct kvm_s390_sie_block, prog20));
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
index 3238893c9d4f..84062e7a77da 100644
--- a/arch/s390/kernel/entry.S
+++ b/arch/s390/kernel/entry.S
@@ -178,17 +178,21 @@ _PIF_WORK = (_PIF_PER_TRAP)
*/
ENTRY(__switch_to)
stmg %r6,%r15,__SF_GPRS(%r15) # store gprs of prev task
- stg %r15,__THREAD_ksp(%r2) # store kernel stack of prev
- lg %r4,__THREAD_info(%r2) # get thread_info of prev
- lg %r5,__THREAD_info(%r3) # get thread_info of next
+ lgr %r1,%r2
+ aghi %r1,__TASK_thread # thread_struct of prev task
+ lg %r4,__TASK_thread_info(%r2) # get thread_info of prev
+ lg %r5,__TASK_thread_info(%r3) # get thread_info of next
+ stg %r15,__THREAD_ksp(%r1) # store kernel stack of prev
+ lgr %r1,%r3
+ aghi %r1,__TASK_thread # thread_struct of next task
lgr %r15,%r5
aghi %r15,STACK_INIT # end of kernel stack of next
stg %r3,__LC_CURRENT # store task struct of next
stg %r5,__LC_THREAD_INFO # store thread info of next
stg %r15,__LC_KERNEL_STACK # store end of kernel stack
+ lg %r15,__THREAD_ksp(%r1) # load kernel stack of next
lctl %c4,%c4,__TASK_pid(%r3) # load pid to control reg. 4
mvc __LC_CURRENT_PID+4(4,%r0),__TASK_pid(%r3) # store pid of next
- lg %r15,__THREAD_ksp(%r3) # load kernel stack of next
lmg %r6,%r15,__SF_GPRS(%r15) # load gprs of next task
br %r14
@@ -417,6 +421,7 @@ ENTRY(pgm_check_handler)
LAST_BREAK %r14
lg %r15,__LC_KERNEL_STACK
lg %r14,__TI_task(%r12)
+ aghi %r14,__TASK_thread # pointer to thread_struct
lghi %r13,__LC_PGM_TDB
tm __LC_PGM_ILC+2,0x02 # check for transaction abort
jz 2f
diff --git a/arch/s390/kernel/traps.c b/arch/s390/kernel/traps.c
index 4d96c9f53455..7bea81d8a363 100644
--- a/arch/s390/kernel/traps.c
+++ b/arch/s390/kernel/traps.c
@@ -259,7 +259,7 @@ void vector_exception(struct pt_regs *regs)
}
/* get vector interrupt code from fpc */
- asm volatile("stfpc %0" : "=m" (current->thread.fp_regs.fpc));
+ asm volatile("stfpc %0" : "=Q" (current->thread.fp_regs.fpc));
vic = (current->thread.fp_regs.fpc & 0xf00) >> 8;
switch (vic) {
case 1: /* invalid vector operation */
@@ -297,7 +297,7 @@ void data_exception(struct pt_regs *regs)
location = get_trap_ip(regs);
- asm volatile("stfpc %0" : "=m" (current->thread.fp_regs.fpc));
+ asm volatile("stfpc %0" : "=Q" (current->thread.fp_regs.fpc));
/* Check for vector register enablement */
if (MACHINE_HAS_VX && !current->thread.vxrs &&
(current->thread.fp_regs.fpc & FPC_DXC_MASK) == 0xfe00) {
diff --git a/arch/x86/include/uapi/asm/kvm.h b/arch/x86/include/uapi/asm/kvm.h
index a4ae82eb82aa..cd54147cb365 100644
--- a/arch/x86/include/uapi/asm/kvm.h
+++ b/arch/x86/include/uapi/asm/kvm.h
@@ -354,7 +354,7 @@ struct kvm_xcrs {
struct kvm_sync_regs {
};
-#define KVM_QUIRK_LINT0_REENABLED (1 << 0)
-#define KVM_QUIRK_CD_NW_CLEARED (1 << 1)
+#define KVM_X86_QUIRK_LINT0_REENABLED (1 << 0)
+#define KVM_X86_QUIRK_CD_NW_CLEARED (1 << 1)
#endif /* _ASM_X86_KVM_H */
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index 954e98a8c2e3..2a5ca97c263b 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -1595,7 +1595,7 @@ void kvm_lapic_reset(struct kvm_vcpu *vcpu, bool init_event)
for (i = 0; i < APIC_LVT_NUM; i++)
apic_set_reg(apic, APIC_LVTT + 0x10 * i, APIC_LVT_MASKED);
apic_update_lvtt(apic);
- if (!(vcpu->kvm->arch.disabled_quirks & KVM_QUIRK_LINT0_REENABLED))
+ if (kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_LINT0_REENABLED))
apic_set_reg(apic, APIC_LVT0,
SET_APIC_DELIVERY_MODE(0, APIC_MODE_EXTINT));
apic_manage_nmi_watchdog(apic, kvm_apic_get_reg(apic, APIC_LVT0));
diff --git a/arch/x86/kvm/mtrr.c b/arch/x86/kvm/mtrr.c
index de1d2d8062e2..dc0a84a6f309 100644
--- a/arch/x86/kvm/mtrr.c
+++ b/arch/x86/kvm/mtrr.c
@@ -120,6 +120,16 @@ static u8 mtrr_default_type(struct kvm_mtrr *mtrr_state)
return mtrr_state->deftype & IA32_MTRR_DEF_TYPE_TYPE_MASK;
}
+static u8 mtrr_disabled_type(void)
+{
+ /*
+ * Intel SDM 11.11.2.2: all MTRRs are disabled when
+ * IA32_MTRR_DEF_TYPE.E bit is cleared, and the UC
+ * memory type is applied to all of physical memory.
+ */
+ return MTRR_TYPE_UNCACHABLE;
+}
+
/*
* Three terms are used in the following code:
* - segment, it indicates the address segments covered by fixed MTRRs.
@@ -434,6 +444,8 @@ struct mtrr_iter {
/* output fields. */
int mem_type;
+ /* mtrr is completely disabled? */
+ bool mtrr_disabled;
/* [start, end) is not fully covered in MTRRs? */
bool partial_map;
@@ -549,7 +561,7 @@ static void mtrr_lookup_var_next(struct mtrr_iter *iter)
static void mtrr_lookup_start(struct mtrr_iter *iter)
{
if (!mtrr_is_enabled(iter->mtrr_state)) {
- iter->partial_map = true;
+ iter->mtrr_disabled = true;
return;
}
@@ -563,6 +575,7 @@ static void mtrr_lookup_init(struct mtrr_iter *iter,
iter->mtrr_state = mtrr_state;
iter->start = start;
iter->end = end;
+ iter->mtrr_disabled = false;
iter->partial_map = false;
iter->fixed = false;
iter->range = NULL;
@@ -656,15 +669,19 @@ u8 kvm_mtrr_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn)
return MTRR_TYPE_WRBACK;
}
- /* It is not covered by MTRRs. */
- if (iter.partial_map) {
- /*
- * We just check one page, partially covered by MTRRs is
- * impossible.
- */
- WARN_ON(type != -1);
- type = mtrr_default_type(mtrr_state);
- }
+ if (iter.mtrr_disabled)
+ return mtrr_disabled_type();
+
+ /*
+ * We just check one page, partially covered by MTRRs is
+ * impossible.
+ */
+ WARN_ON(iter.partial_map);
+
+ /* not contained in any MTRRs. */
+ if (type == -1)
+ return mtrr_default_type(mtrr_state);
+
return type;
}
EXPORT_SYMBOL_GPL(kvm_mtrr_get_guest_memory_type);
@@ -689,6 +706,9 @@ bool kvm_mtrr_check_gfn_range_consistency(struct kvm_vcpu *vcpu, gfn_t gfn,
return false;
}
+ if (iter.mtrr_disabled)
+ return true;
+
if (!iter.partial_map)
return true;
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index bbc678a66b18..8e0c0844c6b9 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -1672,7 +1672,7 @@ static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
* does not do it - this results in some delay at
* reboot
*/
- if (!(vcpu->kvm->arch.disabled_quirks & KVM_QUIRK_CD_NW_CLEARED))
+ if (kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_CD_NW_CLEARED))
cr0 &= ~(X86_CR0_CD | X86_CR0_NW);
svm->vmcb->save.cr0 = cr0;
mark_dirty(svm->vmcb, VMCB_CR);
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 5b4e9384717a..83b7b5cd75d5 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -8650,7 +8650,10 @@ static u64 vmx_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio)
if (kvm_read_cr0(vcpu) & X86_CR0_CD) {
ipat = VMX_EPT_IPAT_BIT;
- cache = MTRR_TYPE_UNCACHABLE;
+ if (kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_CD_NW_CLEARED))
+ cache = MTRR_TYPE_WRBACK;
+ else
+ cache = MTRR_TYPE_UNCACHABLE;
goto exit;
}
diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h
index edc8cdcd786b..0ca2f3e4803c 100644
--- a/arch/x86/kvm/x86.h
+++ b/arch/x86/kvm/x86.h
@@ -147,6 +147,11 @@ static inline void kvm_register_writel(struct kvm_vcpu *vcpu,
return kvm_register_write(vcpu, reg, val);
}
+static inline bool kvm_check_has_quirk(struct kvm *kvm, u64 quirk)
+{
+ return !(kvm->arch.disabled_quirks & quirk);
+}
+
void kvm_before_handle_nmi(struct kvm_vcpu *vcpu);
void kvm_after_handle_nmi(struct kvm_vcpu *vcpu);
void kvm_set_pending_timer(struct kvm_vcpu *vcpu);