1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
|
/* SPDX-License-Identifier: GPL-2.0-or-later */
/* include/asm-generic/tlb.h
*
* Generic TLB shootdown code
*
* Copyright 2001 Red Hat, Inc.
* Based on code from mm/memory.c Copyright Linus Torvalds and others.
*
* Copyright 2011 Red Hat, Inc., Peter Zijlstra
*/
#ifndef _ASM_GENERIC__TLB_H
#define _ASM_GENERIC__TLB_H
#include <linux/mmu_notifier.h>
#include <linux/swap.h>
#include <linux/hugetlb_inline.h>
#include <asm/tlbflush.h>
#include <asm/cacheflush.h>
/*
* Blindly accessing user memory from NMI context can be dangerous
* if we're in the middle of switching the current user task or switching
* the loaded mm.
*/
#ifndef nmi_uaccess_okay
# define nmi_uaccess_okay() true
#endif
#ifdef CONFIG_MMU
/*
* Generic MMU-gather implementation.
*
* The mmu_gather data structure is used by the mm code to implement the
* correct and efficient ordering of freeing pages and TLB invalidations.
*
* This correct ordering is:
*
* 1) unhook page
* 2) TLB invalidate page
* 3) free page
*
* That is, we must never free a page before we have ensured there are no live
* translations left to it. Otherwise it might be possible to observe (or
* worse, change) the page content after it has been reused.
*
* The mmu_gather API consists of:
*
* - tlb_gather_mmu() / tlb_gather_mmu_fullmm() / tlb_finish_mmu()
*
* start and finish a mmu_gather
*
* Finish in particular will issue a (final) TLB invalidate and free
* all (remaining) queued pages.
*
* - tlb_start_vma() / tlb_end_vma(); marks the start / end of a VMA
*
* Defaults to flushing at tlb_end_vma() to reset the range; helps when
* there's large holes between the VMAs.
*
* - tlb_remove_table()
*
* tlb_remove_table() is the basic primitive to free page-table directories
* (__p*_free_tlb()). In it's most primitive form it is an alias for
* tlb_remove_page() below, for when page directories are pages and have no
* additional constraints.
*
* See also MMU_GATHER_TABLE_FREE and MMU_GATHER_RCU_TABLE_FREE.
*
* - tlb_remove_page() / __tlb_remove_page()
* - tlb_remove_page_size() / __tlb_remove_page_size()
*
* __tlb_remove_page_size() is the basic primitive that queues a page for
* freeing. __tlb_remove_page() assumes PAGE_SIZE. Both will return a
* boolean indicating if the queue is (now) full and a call to
* tlb_flush_mmu() is required.
*
* tlb_remove_page() and tlb_remove_page_size() imply the call to
* tlb_flush_mmu() when required and has no return value.
*
* - tlb_change_page_size()
*
* call before __tlb_remove_page*() to set the current page-size; implies a
* possible tlb_flush_mmu() call.
*
* - tlb_flush_mmu() / tlb_flush_mmu_tlbonly()
*
* tlb_flush_mmu_tlbonly() - does the TLB invalidate (and resets
* related state, like the range)
*
* tlb_flush_mmu() - in addition to the above TLB invalidate, also frees
* whatever pages are still batched.
*
* - mmu_gather::fullmm
*
* A flag set by tlb_gather_mmu_fullmm() to indicate we're going to free
* the entire mm; this allows a number of optimizations.
*
* - We can ignore tlb_{start,end}_vma(); because we don't
* care about ranges. Everything will be shot down.
*
* - (RISC) architectures that use ASIDs can cycle to a new ASID
* and delay the invalidation until ASID space runs out.
*
* - mmu_gather::need_flush_all
*
* A flag that can be set by the arch code if it wants to force
* flush the entire TLB irrespective of the range. For instance
* x86-PAE needs this when changing top-level entries.
*
* And allows the architecture to provide and implement tlb_flush():
*
* tlb_flush() may, in addition to the above mentioned mmu_gather fields, make
* use of:
*
* - mmu_gather::start / mmu_gather::end
*
* which provides the range that needs to be flushed to cover the pages to
* be freed.
*
* - mmu_gather::freed_tables
*
* set when we freed page table pages
*
* - tlb_get_unmap_shift() / tlb_get_unmap_size()
*
* returns the smallest TLB entry size unmapped in this range.
*
* If an architecture does not provide tlb_flush() a default implementation
* based on flush_tlb_range() will be used, unless MMU_GATHER_NO_RANGE is
* specified, in which case we'll default to flush_tlb_mm().
*
* Additionally there are a few opt-in features:
*
* MMU_GATHER_PAGE_SIZE
*
* This ensures we call tlb_flush() every time tlb_change_page_size() actually
* changes the size and provides mmu_gather::page_size to tlb_flush().
*
* This might be useful if your architecture has size specific TLB
* invalidation instructions.
*
* MMU_GATHER_TABLE_FREE
*
* This provides tlb_remove_table(), to be used instead of tlb_remove_page()
* for page directores (__p*_free_tlb()).
*
* Useful if your architecture has non-page page directories.
*
* When used, an architecture is expected to provide __tlb_remove_table()
* which does the actual freeing of these pages.
*
* MMU_GATHER_RCU_TABLE_FREE
*
* Like MMU_GATHER_TABLE_FREE, and adds semi-RCU semantics to the free (see
* comment below).
*
* Useful if your architecture doesn't use IPIs for remote TLB invalidates
* and therefore doesn't naturally serialize with software page-table walkers.
*
* MMU_GATHER_NO_FLUSH_CACHE
*
* Indicates the architecture has flush_cache_range() but it needs *NOT* be called
* before unmapping a VMA.
*
* NOTE: strictly speaking we shouldn't have this knob and instead rely on
* flush_cache_range() being a NOP, except Sparc64 seems to be
* different here.
*
* MMU_GATHER_MERGE_VMAS
*
* Indicates the architecture wants to merge ranges over VMAs; typical when
* multiple range invalidates are more expensive than a full invalidate.
*
* MMU_GATHER_NO_RANGE
*
* Use this if your architecture lacks an efficient flush_tlb_range(). This
* option implies MMU_GATHER_MERGE_VMAS above.
*
* MMU_GATHER_NO_GATHER
*
* If the option is set the mmu_gather will not track individual pages for
* delayed page free anymore. A platform that enables the option needs to
* provide its own implementation of the __tlb_remove_page_size() function to
* free pages.
*
* This is useful if your architecture already flushes TLB entries in the
* various ptep_get_and_clear() functions.
*/
#ifdef CONFIG_MMU_GATHER_TABLE_FREE
struct mmu_table_batch {
#ifdef CONFIG_MMU_GATHER_RCU_TABLE_FREE
struct rcu_head rcu;
#endif
unsigned int nr;
void *tables[];
};
#define MAX_TABLE_BATCH \
((PAGE_SIZE - sizeof(struct mmu_table_batch)) / sizeof(void *))
extern void tlb_remove_table(struct mmu_gather *tlb, void *table);
#else /* !CONFIG_MMU_GATHER_HAVE_TABLE_FREE */
/*
* Without MMU_GATHER_TABLE_FREE the architecture is assumed to have page based
* page directories and we can use the normal page batching to free them.
*/
#define tlb_remove_table(tlb, page) tlb_remove_page((tlb), (page))
#endif /* CONFIG_MMU_GATHER_TABLE_FREE */
#ifdef CONFIG_MMU_GATHER_RCU_TABLE_FREE
/*
* This allows an architecture that does not use the linux page-tables for
* hardware to skip the TLBI when freeing page tables.
*/
#ifndef tlb_needs_table_invalidate
#define tlb_needs_table_invalidate() (true)
#endif
void tlb_remove_table_sync_one(void);
#else
#ifdef tlb_needs_table_invalidate
#error tlb_needs_table_invalidate() requires MMU_GATHER_RCU_TABLE_FREE
#endif
static inline void tlb_remove_table_sync_one(void) { }
#endif /* CONFIG_MMU_GATHER_RCU_TABLE_FREE */
#ifndef CONFIG_MMU_GATHER_NO_GATHER
/*
* If we can't allocate a page to make a big batch of page pointers
* to work on, then just handle a few from the on-stack structure.
*/
#define MMU_GATHER_BUNDLE 8
struct mmu_gather_batch {
struct mmu_gather_batch *next;
unsigned int nr;
unsigned int max;
struct encoded_page *encoded_pages[];
};
#define MAX_GATHER_BATCH \
((PAGE_SIZE - sizeof(struct mmu_gather_batch)) / sizeof(void *))
/*
* Limit the maximum number of mmu_gather batches to reduce a risk of soft
* lockups for non-preemptible kernels on huge machines when a lot of memory
* is zapped during unmapping.
* 10K pages freed at once should be safe even without a preemption point.
*/
#define MAX_GATHER_BATCH_COUNT (10000UL/MAX_GATHER_BATCH)
extern bool __tlb_remove_page_size(struct mmu_gather *tlb,
struct encoded_page *page,
int page_size);
#endif
/*
* struct mmu_gather is an opaque type used by the mm code for passing around
* any data needed by arch specific code for tlb_remove_page.
*/
struct mmu_gather {
struct mm_struct *mm;
#ifdef CONFIG_MMU_GATHER_TABLE_FREE
struct mmu_table_batch *batch;
#endif
unsigned long start;
unsigned long end;
/*
* we are in the middle of an operation to clear
* a full mm and can make some optimizations
*/
unsigned int fullmm : 1;
/*
* we have performed an operation which
* requires a complete flush of the tlb
*/
unsigned int need_flush_all : 1;
/*
* we have removed page directories
*/
unsigned int freed_tables : 1;
/*
* at which levels have we cleared entries?
*/
unsigned int cleared_ptes : 1;
unsigned int cleared_pmds : 1;
unsigned int cleared_puds : 1;
unsigned int cleared_p4ds : 1;
/*
* tracks VM_EXEC | VM_HUGETLB in tlb_start_vma
*/
unsigned int vma_exec : 1;
unsigned int vma_huge : 1;
unsigned int vma_pfn : 1;
unsigned int batch_count;
#ifndef CONFIG_MMU_GATHER_NO_GATHER
struct mmu_gather_batch *active;
struct mmu_gather_batch local;
struct page *__pages[MMU_GATHER_BUNDLE];
#ifdef CONFIG_MMU_GATHER_PAGE_SIZE
unsigned int page_size;
#endif
#endif
};
void tlb_flush_mmu(struct mmu_gather *tlb);
static inline void __tlb_adjust_range(struct mmu_gather *tlb,
unsigned long address,
unsigned int range_size)
{
tlb->start = min(tlb->start, address);
tlb->end = max(tlb->end, address + range_size);
}
static inline void __tlb_reset_range(struct mmu_gather *tlb)
{
if (tlb->fullmm) {
tlb->start = tlb->end = ~0;
} else {
tlb->start = TASK_SIZE;
tlb->end = 0;
}
tlb->freed_tables = 0;
tlb->cleared_ptes = 0;
tlb->cleared_pmds = 0;
tlb->cleared_puds = 0;
tlb->cleared_p4ds = 0;
/*
* Do not reset mmu_gather::vma_* fields here, we do not
* call into tlb_start_vma() again to set them if there is an
* intermediate flush.
*/
}
#ifdef CONFIG_MMU_GATHER_NO_RANGE
#if defined(tlb_flush)
#error MMU_GATHER_NO_RANGE relies on default tlb_flush()
#endif
/*
* When an architecture does not have efficient means of range flushing TLBs
* there is no point in doing intermediate flushes on tlb_end_vma() to keep the
* range small. We equally don't have to worry about page granularity or other
* things.
*
* All we need to do is issue a full flush for any !0 range.
*/
static inline void tlb_flush(struct mmu_gather *tlb)
{
if (tlb->end)
flush_tlb_mm(tlb->mm);
}
#else /* CONFIG_MMU_GATHER_NO_RANGE */
#ifndef tlb_flush
/*
* When an architecture does not provide its own tlb_flush() implementation
* but does have a reasonably efficient flush_vma_range() implementation
* use that.
*/
static inline void tlb_flush(struct mmu_gather *tlb)
{
if (tlb->fullmm || tlb->need_flush_all) {
flush_tlb_mm(tlb->mm);
} else if (tlb->end) {
struct vm_area_struct vma = {
.vm_mm = tlb->mm,
.vm_flags = (tlb->vma_exec ? VM_EXEC : 0) |
(tlb->vma_huge ? VM_HUGETLB : 0),
};
flush_tlb_range(&vma, tlb->start, tlb->end);
}
}
#endif
#endif /* CONFIG_MMU_GATHER_NO_RANGE */
static inline void
tlb_update_vma_flags(struct mmu_gather *tlb, struct vm_area_struct *vma)
{
/*
* flush_tlb_range() implementations that look at VM_HUGETLB (tile,
* mips-4k) flush only large pages.
*
* flush_tlb_range() implementations that flush I-TLB also flush D-TLB
* (tile, xtensa, arm), so it's ok to just add VM_EXEC to an existing
* range.
*
* We rely on tlb_end_vma() to issue a flush, such that when we reset
* these values the batch is empty.
*/
tlb->vma_huge = is_vm_hugetlb_page(vma);
tlb->vma_exec = !!(vma->vm_flags & VM_EXEC);
tlb->vma_pfn = !!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP));
}
static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
{
/*
* Anything calling __tlb_adjust_range() also sets at least one of
* these bits.
*/
if (!(tlb->freed_tables || tlb->cleared_ptes || tlb->cleared_pmds ||
tlb->cleared_puds || tlb->cleared_p4ds))
return;
tlb_flush(tlb);
mmu_notifier_invalidate_range(tlb->mm, tlb->start, tlb->end);
__tlb_reset_range(tlb);
}
static inline void tlb_remove_page_size(struct mmu_gather *tlb,
struct page *page, int page_size)
{
if (__tlb_remove_page_size(tlb, encode_page(page, 0), page_size))
tlb_flush_mmu(tlb);
}
static inline bool __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
{
return __tlb_remove_page_size(tlb, encode_page(page, 0), PAGE_SIZE);
}
/* tlb_remove_page
* Similar to __tlb_remove_page but will call tlb_flush_mmu() itself when
* required.
*/
static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
{
return tlb_remove_page_size(tlb, page, PAGE_SIZE);
}
static inline void tlb_change_page_size(struct mmu_gather *tlb,
unsigned int page_size)
{
#ifdef CONFIG_MMU_GATHER_PAGE_SIZE
if (tlb->page_size && tlb->page_size != page_size) {
if (!tlb->fullmm && !tlb->need_flush_all)
tlb_flush_mmu(tlb);
}
tlb->page_size = page_size;
#endif
}
static inline unsigned long tlb_get_unmap_shift(struct mmu_gather *tlb)
{
if (tlb->cleared_ptes)
return PAGE_SHIFT;
if (tlb->cleared_pmds)
return PMD_SHIFT;
if (tlb->cleared_puds)
return PUD_SHIFT;
if (tlb->cleared_p4ds)
return P4D_SHIFT;
return PAGE_SHIFT;
}
static inline unsigned long tlb_get_unmap_size(struct mmu_gather *tlb)
{
return 1UL << tlb_get_unmap_shift(tlb);
}
/*
* In the case of tlb vma handling, we can optimise these away in the
* case where we're doing a full MM flush. When we're doing a munmap,
* the vmas are adjusted to only cover the region to be torn down.
*/
static inline void tlb_start_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
{
if (tlb->fullmm)
return;
tlb_update_vma_flags(tlb, vma);
#ifndef CONFIG_MMU_GATHER_NO_FLUSH_CACHE
flush_cache_range(vma, vma->vm_start, vma->vm_end);
#endif
}
static inline void tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
{
if (tlb->fullmm)
return;
/*
* VM_PFNMAP is more fragile because the core mm will not track the
* page mapcount -- there might not be page-frames for these PFNs after
* all. Force flush TLBs for such ranges to avoid munmap() vs
* unmap_mapping_range() races.
*/
if (tlb->vma_pfn || !IS_ENABLED(CONFIG_MMU_GATHER_MERGE_VMAS)) {
/*
* Do a TLB flush and reset the range at VMA boundaries; this avoids
* the ranges growing with the unused space between consecutive VMAs.
*/
tlb_flush_mmu_tlbonly(tlb);
}
}
/*
* tlb_flush_{pte|pmd|pud|p4d}_range() adjust the tlb->start and tlb->end,
* and set corresponding cleared_*.
*/
static inline void tlb_flush_pte_range(struct mmu_gather *tlb,
unsigned long address, unsigned long size)
{
__tlb_adjust_range(tlb, address, size);
tlb->cleared_ptes = 1;
}
static inline void tlb_flush_pmd_range(struct mmu_gather *tlb,
unsigned long address, unsigned long size)
{
__tlb_adjust_range(tlb, address, size);
tlb->cleared_pmds = 1;
}
static inline void tlb_flush_pud_range(struct mmu_gather *tlb,
unsigned long address, unsigned long size)
{
__tlb_adjust_range(tlb, address, size);
tlb->cleared_puds = 1;
}
static inline void tlb_flush_p4d_range(struct mmu_gather *tlb,
unsigned long address, unsigned long size)
{
__tlb_adjust_range(tlb, address, size);
tlb->cleared_p4ds = 1;
}
#ifndef __tlb_remove_tlb_entry
#define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0)
#endif
/**
* tlb_remove_tlb_entry - remember a pte unmapping for later tlb invalidation.
*
* Record the fact that pte's were really unmapped by updating the range,
* so we can later optimise away the tlb invalidate. This helps when
* userspace is unmapping already-unmapped pages, which happens quite a lot.
*/
#define tlb_remove_tlb_entry(tlb, ptep, address) \
do { \
tlb_flush_pte_range(tlb, address, PAGE_SIZE); \
__tlb_remove_tlb_entry(tlb, ptep, address); \
} while (0)
#define tlb_remove_huge_tlb_entry(h, tlb, ptep, address) \
do { \
unsigned long _sz = huge_page_size(h); \
if (_sz >= P4D_SIZE) \
tlb_flush_p4d_range(tlb, address, _sz); \
else if (_sz >= PUD_SIZE) \
tlb_flush_pud_range(tlb, address, _sz); \
else if (_sz >= PMD_SIZE) \
tlb_flush_pmd_range(tlb, address, _sz); \
else \
tlb_flush_pte_range(tlb, address, _sz); \
__tlb_remove_tlb_entry(tlb, ptep, address); \
} while (0)
/**
* tlb_remove_pmd_tlb_entry - remember a pmd mapping for later tlb invalidation
* This is a nop so far, because only x86 needs it.
*/
#ifndef __tlb_remove_pmd_tlb_entry
#define __tlb_remove_pmd_tlb_entry(tlb, pmdp, address) do {} while (0)
#endif
#define tlb_remove_pmd_tlb_entry(tlb, pmdp, address) \
do { \
tlb_flush_pmd_range(tlb, address, HPAGE_PMD_SIZE); \
__tlb_remove_pmd_tlb_entry(tlb, pmdp, address); \
} while (0)
/**
* tlb_remove_pud_tlb_entry - remember a pud mapping for later tlb
* invalidation. This is a nop so far, because only x86 needs it.
*/
#ifndef __tlb_remove_pud_tlb_entry
#define __tlb_remove_pud_tlb_entry(tlb, pudp, address) do {} while (0)
#endif
#define tlb_remove_pud_tlb_entry(tlb, pudp, address) \
do { \
tlb_flush_pud_range(tlb, address, HPAGE_PUD_SIZE); \
__tlb_remove_pud_tlb_entry(tlb, pudp, address); \
} while (0)
/*
* For things like page tables caches (ie caching addresses "inside" the
* page tables, like x86 does), for legacy reasons, flushing an
* individual page had better flush the page table caches behind it. This
* is definitely how x86 works, for example. And if you have an
* architected non-legacy page table cache (which I'm not aware of
* anybody actually doing), you're going to have some architecturally
* explicit flushing for that, likely *separate* from a regular TLB entry
* flush, and thus you'd need more than just some range expansion..
*
* So if we ever find an architecture
* that would want something that odd, I think it is up to that
* architecture to do its own odd thing, not cause pain for others
* http://lkml.kernel.org/r/CA+55aFzBggoXtNXQeng5d_mRoDnaMBE5Y+URs+PHR67nUpMtaw@mail.gmail.com
*
* For now w.r.t page table cache, mark the range_size as PAGE_SIZE
*/
#ifndef pte_free_tlb
#define pte_free_tlb(tlb, ptep, address) \
do { \
tlb_flush_pmd_range(tlb, address, PAGE_SIZE); \
tlb->freed_tables = 1; \
__pte_free_tlb(tlb, ptep, address); \
} while (0)
#endif
#ifndef pmd_free_tlb
#define pmd_free_tlb(tlb, pmdp, address) \
do { \
tlb_flush_pud_range(tlb, address, PAGE_SIZE); \
tlb->freed_tables = 1; \
__pmd_free_tlb(tlb, pmdp, address); \
} while (0)
#endif
#ifndef pud_free_tlb
#define pud_free_tlb(tlb, pudp, address) \
do { \
tlb_flush_p4d_range(tlb, address, PAGE_SIZE); \
tlb->freed_tables = 1; \
__pud_free_tlb(tlb, pudp, address); \
} while (0)
#endif
#ifndef p4d_free_tlb
#define p4d_free_tlb(tlb, pudp, address) \
do { \
__tlb_adjust_range(tlb, address, PAGE_SIZE); \
tlb->freed_tables = 1; \
__p4d_free_tlb(tlb, pudp, address); \
} while (0)
#endif
#ifndef pte_needs_flush
static inline bool pte_needs_flush(pte_t oldpte, pte_t newpte)
{
return true;
}
#endif
#ifndef huge_pmd_needs_flush
static inline bool huge_pmd_needs_flush(pmd_t oldpmd, pmd_t newpmd)
{
return true;
}
#endif
#endif /* CONFIG_MMU */
#endif /* _ASM_GENERIC__TLB_H */
|