-
Notifications
You must be signed in to change notification settings - Fork 23
/
my_tlb.h
153 lines (115 loc) · 3.75 KB
/
my_tlb.h
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
#ifndef _MY_TLB_H
#define _MY_TLB_H
#include <linux/version.h>
typedef unsigned long (* unmap_page_range_t)(struct mmu_gather *tlb,
struct vm_area_struct *vma,
unsigned long addr, unsigned long end,
long *zap_work, struct zap_details *details);
static unmap_page_range_t unmap_page_range = NULL;
typedef void (* free_pages_and_swap_cache_t )(struct page **, int);
free_pages_and_swap_cache_t kern_free_pages_and_swap_cachep = NULL;
typedef void (* flush_tlb_mm_t) (struct mm_struct *mm);
flush_tlb_mm_t kern_flush_tlb_mm = NULL;
typedef void (*free_pgtables_t)(struct mmu_gather *tlb, struct vm_area_struct *vma,
unsigned long floor, unsigned long ceiling);
free_pgtables_t kern_free_pgtables = NULL;
#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 2, 0)
/*
* If we can't allocate a page to make a big batch of page pointers
* to work on, then just handle a few from the on-stack structure.
*/
#define MMU_GATHER_BUNDLE 8
struct mmu_gather_batch {
struct mmu_gather_batch *next;
unsigned int nr;
unsigned int max;
struct page *pages[0];
};
/* struct mmu_gather is an opaque type used by the mm code for passing around
* any data needed by arch specific code for tlb_remove_page.
*/
struct mmu_gather {
struct mm_struct *mm;
#ifdef CONFIG_HAVE_RCU_TABLE_FREE
struct mmu_table_batch *batch;
#endif
unsigned int need_flush : 1, /* Did free PTEs */
fast_mode : 1; /* No batching */
unsigned int fullmm;
struct mmu_gather_batch *active;
struct mmu_gather_batch local;
struct page *__pages[MMU_GATHER_BUNDLE];
};
typedef void (*tlb_gather_mmu_t)(struct mmu_gather *tlb, struct mm_struct *mm, bool fullmm);
tlb_gather_mmu_t my_tlb_gather_mmu = NULL;
typedef void (*tlb_flush_mmu_t)(struct mmu_gather *tlb);
tlb_flush_mmu_t my_tlb_flush_mmu = NULL;
typedef void (*tlb_finish_mmu_t)(struct mmu_gather *tlb, unsigned long start, unsigned long end);
tlb_finish_mmu_t my_tlb_finish_mmu = NULL;
#else
#ifdef CONFIG_X86
#ifdef CONFIG_SMP
#ifdef ARCH_FREE_PTR_NR
#define FREE_PTR_NR ARCH_FREE_PTR_NR
#else
#define FREE_PTE_NR 506
#endif
#define tlb_fast_mode(tlb) ((tlb)->nr == ~0U)
#else
#define FREE_PTE_NR 1
#define tlb_fast_mode(tlb) 1
#endif
#define tlb_flush(tlb) kern_flush_tlb_mm((tlb)->mm)
/* struct mmu_gather is an opaque type used by the mm code for passing around
* any data needed by arch specific code for tlb_remove_page.
*/
struct mmu_gather {
struct mm_struct *mm;
unsigned int nr; /* set to ~0U means fast mode */
unsigned int need_flush;/* Really unmapped some ptes? */
unsigned int fullmm; /* non-zero means full mm flush */
struct page * pages[FREE_PTE_NR];
};
#else
#error Need mmu_gather def
#endif
struct mmu_gather *pmmu_gathers = NULL;
/* tlb_gather_mmu
* Return a pointer to an initialized struct mmu_gather.
*/
static inline struct mmu_gather *
my_tlb_gather_mmu(struct mm_struct *mm, unsigned int full_mm_flush)
{
struct mmu_gather *tlb = &get_cpu_var(*pmmu_gathers);
tlb->mm = mm;
/* Use fast mode if only one CPU is online */
tlb->nr = num_online_cpus() > 1 ? 0U : ~0U;
tlb->fullmm = full_mm_flush;
return tlb;
}
static inline void
my_tlb_flush_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
{
if (!tlb->need_flush)
return;
tlb->need_flush = 0;
tlb_flush(tlb);
if (!tlb_fast_mode(tlb)) {
kern_free_pages_and_swap_cachep(tlb->pages, tlb->nr);
tlb->nr = 0;
}
}
/* tlb_finish_mmu
* Called at the end of the shootdown operation to free up any resources
* that were required.
*/
static inline void
my_tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
{
my_tlb_flush_mmu(tlb, start, end);
/* keep the page table cache within bounds */
check_pgt_cache();
put_cpu_var(*pmmu_gathers);
}
#endif // Pre 3.2 kernel
#endif