tlb.h 7.18 KB
Newer Older
Linus Torvalds's avatar
Linus Torvalds committed
1
/*
2
 *  arch/arm/include/asm/tlb.h
Linus Torvalds's avatar
Linus Torvalds committed
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
 *
 *  Copyright (C) 2002 Russell King
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 *
 *  Experimentation shows that on a StrongARM, it appears to be faster
 *  to use the "invalidate whole tlb" rather than "invalidate single
 *  tlb" for this.
 *
 *  This appears true for both the process fork+exit case, as well as
 *  the munmap-large-area case.
 */
#ifndef __ASMARM_TLB_H
#define __ASMARM_TLB_H

#include <asm/cacheflush.h>
21
22
23
24

#ifndef CONFIG_MMU

#include <linux/pagemap.h>
25
26
27

#define tlb_flush(tlb)	((void) tlb)

28
29
30
31
#include <asm-generic/tlb.h>

#else /* !CONFIG_MMU */

32
#include <linux/swap.h>
Linus Torvalds's avatar
Linus Torvalds committed
33
#include <asm/pgalloc.h>
34
35
#include <asm/tlbflush.h>

Peter Zijlstra's avatar
Peter Zijlstra committed
36
37
#define MMU_GATHER_BUNDLE	8

38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
#ifdef CONFIG_HAVE_RCU_TABLE_FREE
static inline void __tlb_remove_table(void *_table)
{
	free_page_and_swap_cache((struct page *)_table);
}

struct mmu_table_batch {
	struct rcu_head		rcu;
	unsigned int		nr;
	void			*tables[0];
};

#define MAX_TABLE_BATCH		\
	((PAGE_SIZE - sizeof(struct mmu_table_batch)) / sizeof(void *))

extern void tlb_table_flush(struct mmu_gather *tlb);
extern void tlb_remove_table(struct mmu_gather *tlb, void *table);

#define tlb_remove_entry(tlb, entry)	tlb_remove_table(tlb, entry)
#else
#define tlb_remove_entry(tlb, entry)	tlb_remove_page(tlb, entry)
#endif /* CONFIG_HAVE_RCU_TABLE_FREE */

Linus Torvalds's avatar
Linus Torvalds committed
61
62
63
64
65
66
/*
 * TLB handling.  This allows us to remove pages from the page
 * tables, and efficiently handle the TLB issues.
 */
struct mmu_gather {
	struct mm_struct	*mm;
67
68
69
70
#ifdef CONFIG_HAVE_RCU_TABLE_FREE
	struct mmu_table_batch	*batch;
	unsigned int		need_flush;
#endif
Linus Torvalds's avatar
Linus Torvalds committed
71
	unsigned int		fullmm;
72
	struct vm_area_struct	*vma;
73
	unsigned long		start, end;
74
75
	unsigned long		range_start;
	unsigned long		range_end;
76
	unsigned int		nr;
Peter Zijlstra's avatar
Peter Zijlstra committed
77
78
79
	unsigned int		max;
	struct page		**pages;
	struct page		*local[MMU_GATHER_BUNDLE];
Linus Torvalds's avatar
Linus Torvalds committed
80
81
82
83
};

DECLARE_PER_CPU(struct mmu_gather, mmu_gathers);

84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
/*
 * This is unnecessarily complex.  There's three ways the TLB shootdown
 * code is used:
 *  1. Unmapping a range of vmas.  See zap_page_range(), unmap_region().
 *     tlb->fullmm = 0, and tlb_start_vma/tlb_end_vma will be called.
 *     tlb->vma will be non-NULL.
 *  2. Unmapping all vmas.  See exit_mmap().
 *     tlb->fullmm = 1, and tlb_start_vma/tlb_end_vma will be called.
 *     tlb->vma will be non-NULL.  Additionally, page tables will be freed.
 *  3. Unmapping argument pages.  See shift_arg_pages().
 *     tlb->fullmm = 0, but tlb_start_vma/tlb_end_vma will not be called.
 *     tlb->vma will be NULL.
 */
static inline void tlb_flush(struct mmu_gather *tlb)
{
	if (tlb->fullmm || !tlb->vma)
		flush_tlb_mm(tlb->mm);
	else if (tlb->range_end > 0) {
		flush_tlb_range(tlb->vma, tlb->range_start, tlb->range_end);
		tlb->range_start = TASK_SIZE;
		tlb->range_end = 0;
	}
}

static inline void tlb_add_flush(struct mmu_gather *tlb, unsigned long addr)
{
	if (!tlb->fullmm) {
		if (addr < tlb->range_start)
			tlb->range_start = addr;
		if (addr + PAGE_SIZE > tlb->range_end)
			tlb->range_end = addr + PAGE_SIZE;
	}
}

Peter Zijlstra's avatar
Peter Zijlstra committed
118
119
120
121
122
123
124
125
126
127
static inline void __tlb_alloc_page(struct mmu_gather *tlb)
{
	unsigned long addr = __get_free_pages(GFP_NOWAIT | __GFP_NOWARN, 0);

	if (addr) {
		tlb->pages = (void *)addr;
		tlb->max = PAGE_SIZE / sizeof(struct page *);
	}
}

128
static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
129
130
{
	tlb_flush(tlb);
131
132
133
#ifdef CONFIG_HAVE_RCU_TABLE_FREE
	tlb_table_flush(tlb);
#endif
134
135
136
137
}

static inline void tlb_flush_mmu_free(struct mmu_gather *tlb)
{
138
139
140
141
	free_pages_and_swap_cache(tlb->pages, tlb->nr);
	tlb->nr = 0;
	if (tlb->pages == tlb->local)
		__tlb_alloc_page(tlb);
142
143
}

144
145
146
147
148
149
static inline void tlb_flush_mmu(struct mmu_gather *tlb)
{
	tlb_flush_mmu_tlbonly(tlb);
	tlb_flush_mmu_free(tlb);
}

Peter Zijlstra's avatar
Peter Zijlstra committed
150
static inline void
Minchan Kim's avatar
Minchan Kim committed
151
152
arch_tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
			unsigned long start, unsigned long end)
Linus Torvalds's avatar
Linus Torvalds committed
153
154
{
	tlb->mm = mm;
155
156
157
	tlb->fullmm = !(start | (end+1));
	tlb->start = start;
	tlb->end = end;
158
	tlb->vma = NULL;
Peter Zijlstra's avatar
Peter Zijlstra committed
159
160
	tlb->max = ARRAY_SIZE(tlb->local);
	tlb->pages = tlb->local;
161
	tlb->nr = 0;
Peter Zijlstra's avatar
Peter Zijlstra committed
162
	__tlb_alloc_page(tlb);
163
164
165
166

#ifdef CONFIG_HAVE_RCU_TABLE_FREE
	tlb->batch = NULL;
#endif
Linus Torvalds's avatar
Linus Torvalds committed
167
168
169
}

static inline void
Minchan Kim's avatar
Minchan Kim committed
170
arch_tlb_finish_mmu(struct mmu_gather *tlb,
171
			unsigned long start, unsigned long end, bool force)
Linus Torvalds's avatar
Linus Torvalds committed
172
{
173
174
175
176
177
	if (force) {
		tlb->range_start = start;
		tlb->range_end = end;
	}

178
	tlb_flush_mmu(tlb);
Linus Torvalds's avatar
Linus Torvalds committed
179
180
181

	/* keep the page table cache within bounds */
	check_pgt_cache();
182

Peter Zijlstra's avatar
Peter Zijlstra committed
183
184
	if (tlb->pages != tlb->local)
		free_pages((unsigned long)tlb->pages, 0);
Linus Torvalds's avatar
Linus Torvalds committed
185
186
}

187
188
189
190
191
192
/*
 * Memorize the range for the TLB flush.
 */
static inline void
tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep, unsigned long addr)
{
193
	tlb_add_flush(tlb, addr);
194
}
Linus Torvalds's avatar
Linus Torvalds committed
195

196
197
#define tlb_remove_huge_tlb_entry(h, tlb, ptep, address)	\
	tlb_remove_tlb_entry(tlb, ptep, address)
Linus Torvalds's avatar
Linus Torvalds committed
198
199
200
201
202
203
204
205
/*
 * In the case of tlb vma handling, we can optimise these away in the
 * case where we're doing a full MM flush.  When we're doing a munmap,
 * the vmas are adjusted to only cover the region to be torn down.
 */
static inline void
tlb_start_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
{
206
	if (!tlb->fullmm) {
Linus Torvalds's avatar
Linus Torvalds committed
207
		flush_cache_range(vma, vma->vm_start, vma->vm_end);
208
		tlb->vma = vma;
209
210
211
		tlb->range_start = TASK_SIZE;
		tlb->range_end = 0;
	}
Linus Torvalds's avatar
Linus Torvalds committed
212
213
214
215
216
}

static inline void
tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
{
217
218
219
220
	if (!tlb->fullmm)
		tlb_flush(tlb);
}

221
static inline bool __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
222
{
223
224
	tlb->pages[tlb->nr++] = page;
	VM_WARN_ON(tlb->nr > tlb->max);
225
226
227
	if (tlb->nr == tlb->max)
		return true;
	return false;
Peter Zijlstra's avatar
Peter Zijlstra committed
228
229
230
231
}

static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
{
232
	if (__tlb_remove_page(tlb, page))
Peter Zijlstra's avatar
Peter Zijlstra committed
233
		tlb_flush_mmu(tlb);
234
235
}

236
237
238
239
240
241
242
243
244
245
246
247
static inline bool __tlb_remove_page_size(struct mmu_gather *tlb,
					  struct page *page, int page_size)
{
	return __tlb_remove_page(tlb, page);
}

static inline void tlb_remove_page_size(struct mmu_gather *tlb,
					struct page *page, int page_size)
{
	return tlb_remove_page(tlb, page);
}

248
249
250
251
static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
	unsigned long addr)
{
	pgtable_page_dtor(pte);
252

253
254
255
#ifdef CONFIG_ARM_LPAE
	tlb_add_flush(tlb, addr);
#else
256
257
258
259
260
261
262
	/*
	 * With the classic ARM MMU, a pte page has two corresponding pmd
	 * entries, each covering 1MB.
	 */
	addr &= PMD_MASK;
	tlb_add_flush(tlb, addr + SZ_1M - PAGE_SIZE);
	tlb_add_flush(tlb, addr + SZ_1M);
263
#endif
264

265
	tlb_remove_entry(tlb, pte);
Linus Torvalds's avatar
Linus Torvalds committed
266
267
}

268
269
270
271
272
static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmdp,
				  unsigned long addr)
{
#ifdef CONFIG_ARM_LPAE
	tlb_add_flush(tlb, addr);
273
	tlb_remove_entry(tlb, virt_to_page(pmdp));
274
275
276
#endif
}

277
278
279
280
281
282
static inline void
tlb_remove_pmd_tlb_entry(struct mmu_gather *tlb, pmd_t *pmdp, unsigned long addr)
{
	tlb_add_flush(tlb, addr);
}

283
#define pte_free_tlb(tlb, ptep, addr)	__pte_free_tlb(tlb, ptep, addr)
284
#define pmd_free_tlb(tlb, pmdp, addr)	__pmd_free_tlb(tlb, pmdp, addr)
285
#define pud_free_tlb(tlb, pudp, addr)	pud_free((tlb)->mm, pudp)
Linus Torvalds's avatar
Linus Torvalds committed
286
287
288

#define tlb_migrate_finish(mm)		do { } while (0)

289
290
291
292
293
294
#define tlb_remove_check_page_size_change tlb_remove_check_page_size_change
static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb,
						     unsigned int page_size)
{
}

295
#endif /* CONFIG_MMU */
Linus Torvalds's avatar
Linus Torvalds committed
296
#endif