diff options
author | Adam Buchbinder <adam.buchbinder@gmail.com> | 2016-02-24 00:34:30 +0100 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2016-02-24 08:44:58 +0100 |
commit | 6a6256f9e0ebaabf7ded1fef8977a4352dbe7784 (patch) | |
tree | 3aa9ed17e0de8deb0ba6689d443962a56dd9bd12 /arch/x86/kvm | |
parent | x86/cpu: Convert printk(KERN_<LEVEL> ...) to pr_<level>(...) (diff) | |
download | linux-6a6256f9e0ebaabf7ded1fef8977a4352dbe7784.tar.xz linux-6a6256f9e0ebaabf7ded1fef8977a4352dbe7784.zip |
x86: Fix misspellings in comments
Signed-off-by: Adam Buchbinder <adam.buchbinder@gmail.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: trivial@kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'arch/x86/kvm')
-rw-r--r-- | arch/x86/kvm/mmu.c | 4 | ||||
-rw-r--r-- | arch/x86/kvm/vmx.c | 4 | ||||
-rw-r--r-- | arch/x86/kvm/x86.c | 2 |
3 files changed, 5 insertions, 5 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 95a955de5964..e1bb320dd5b2 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -478,7 +478,7 @@ static bool spte_is_locklessly_modifiable(u64 spte) static bool spte_has_volatile_bits(u64 spte) { /* - * Always atomicly update spte if it can be updated + * Always atomically update spte if it can be updated * out of mmu-lock, it can ensure dirty bit is not lost, * also, it can help us to get a stable is_writable_pte() * to ensure tlb flush is not missed. @@ -549,7 +549,7 @@ static bool mmu_spte_update(u64 *sptep, u64 new_spte) /* * For the spte updated out of mmu-lock is safe, since - * we always atomicly update it, see the comments in + * we always atomically update it, see the comments in * spte_has_volatile_bits(). */ if (spte_is_locklessly_modifiable(old_spte) && diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index e2951b6edbbc..34208bf57c35 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -5475,7 +5475,7 @@ static int handle_set_cr4(struct kvm_vcpu *vcpu, unsigned long val) return kvm_set_cr4(vcpu, val); } -/* called to set cr0 as approriate for clts instruction exit. */ +/* called to set cr0 as appropriate for clts instruction exit. */ static void handle_clts(struct kvm_vcpu *vcpu) { if (is_guest_mode(vcpu)) { @@ -7223,7 +7223,7 @@ static int handle_vmwrite(struct kvm_vcpu *vcpu) /* The value to write might be 32 or 64 bits, depending on L1's long * mode, and eventually we need to write that into a field of several * possible lengths. The code below first zero-extends the value to 64 - * bit (field_value), and then copies only the approriate number of + * bit (field_value), and then copies only the appropriate number of * bits into the vmcs12 field. */ u64 field_value = 0; diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 4244c2baf57d..429c3f5fc618 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -1562,7 +1562,7 @@ static cycle_t read_tsc(void) /* * GCC likes to generate cmov here, but this branch is extremely - * predictable (it's just a funciton of time and the likely is + * predictable (it's just a function of time and the likely is * very likely) and there's a data dependence, so force GCC * to generate a branch instead. I don't barrier() because * we don't actually need a barrier, and if this function |