tlb.c 6.48 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0
Linus Torvalds's avatar
Linus Torvalds committed
2
3
4
5
6
7
8
9
10
/* arch/sparc64/mm/tlb.c
 *
 * Copyright (C) 2004 David S. Miller <davem@redhat.com>
 */

#include <linux/kernel.h>
#include <linux/percpu.h>
#include <linux/mm.h>
#include <linux/swap.h>
11
#include <linux/preempt.h>
Linus Torvalds's avatar
Linus Torvalds committed
12
13
14
15
16
17
18
19
20
21

#include <asm/pgtable.h>
#include <asm/pgalloc.h>
#include <asm/tlbflush.h>
#include <asm/cacheflush.h>
#include <asm/mmu_context.h>
#include <asm/tlb.h>

/* Heavily inspired by the ppc64 code.  */

Peter Zijlstra's avatar
Peter Zijlstra committed
22
static DEFINE_PER_CPU(struct tlb_batch, tlb_batch);
Linus Torvalds's avatar
Linus Torvalds committed
23
24
25

void flush_tlb_pending(void)
{
Peter Zijlstra's avatar
Peter Zijlstra committed
26
	struct tlb_batch *tb = &get_cpu_var(tlb_batch);
27
	struct mm_struct *mm = tb->mm;
Linus Torvalds's avatar
Linus Torvalds committed
28

29
30
	if (!tb->tlb_nr)
		goto out;
31

32
33
34
35
36
37
	flush_tsb_user(tb);

	if (CTX_VALID(mm->context)) {
		if (tb->tlb_nr == 1) {
			global_flush_tlb_page(mm, tb->vaddrs[0]);
		} else {
Linus Torvalds's avatar
Linus Torvalds committed
38
#ifdef CONFIG_SMP
Peter Zijlstra's avatar
Peter Zijlstra committed
39
40
			smp_flush_tlb_pending(tb->mm, tb->tlb_nr,
					      &tb->vaddrs[0]);
Linus Torvalds's avatar
Linus Torvalds committed
41
#else
Peter Zijlstra's avatar
Peter Zijlstra committed
42
43
			__flush_tlb_pending(CTX_HWBITS(tb->mm->context),
					    tb->tlb_nr, &tb->vaddrs[0]);
Linus Torvalds's avatar
Linus Torvalds committed
44
45
46
#endif
		}
	}
47

48
49
50
	tb->tlb_nr = 0;

out:
Peter Zijlstra's avatar
Peter Zijlstra committed
51
	put_cpu_var(tlb_batch);
Linus Torvalds's avatar
Linus Torvalds committed
52
53
}

54
55
void arch_enter_lazy_mmu_mode(void)
{
56
	struct tlb_batch *tb = this_cpu_ptr(&tlb_batch);
57
58
59
60
61
62

	tb->active = 1;
}

void arch_leave_lazy_mmu_mode(void)
{
63
	struct tlb_batch *tb = this_cpu_ptr(&tlb_batch);
64
65
66
67
68
69

	if (tb->tlb_nr)
		flush_tlb_pending();
	tb->active = 0;
}

70
static void tlb_batch_add_one(struct mm_struct *mm, unsigned long vaddr,
Nitin Gupta's avatar
Nitin Gupta committed
71
			      bool exec, unsigned int hugepage_shift)
Linus Torvalds's avatar
Linus Torvalds committed
72
{
Peter Zijlstra's avatar
Peter Zijlstra committed
73
	struct tlb_batch *tb = &get_cpu_var(tlb_batch);
Linus Torvalds's avatar
Linus Torvalds committed
74
75
76
	unsigned long nr;

	vaddr &= PAGE_MASK;
77
	if (exec)
Linus Torvalds's avatar
Linus Torvalds committed
78
79
		vaddr |= 0x1UL;

80
81
82
83
84
85
86
	nr = tb->tlb_nr;

	if (unlikely(nr != 0 && mm != tb->mm)) {
		flush_tlb_pending();
		nr = 0;
	}

87
	if (!tb->active) {
Nitin Gupta's avatar
Nitin Gupta committed
88
		flush_tsb_user_page(mm, vaddr, hugepage_shift);
89
		global_flush_tlb_page(mm, vaddr);
90
		goto out;
91
92
	}

93
	if (nr == 0) {
94
		tb->mm = mm;
Nitin Gupta's avatar
Nitin Gupta committed
95
		tb->hugepage_shift = hugepage_shift;
96
97
	}

Nitin Gupta's avatar
Nitin Gupta committed
98
	if (tb->hugepage_shift != hugepage_shift) {
99
		flush_tlb_pending();
Nitin Gupta's avatar
Nitin Gupta committed
100
		tb->hugepage_shift = hugepage_shift;
101
102
		nr = 0;
	}
103
104
105
106
107
108

	tb->vaddrs[nr] = vaddr;
	tb->tlb_nr = ++nr;
	if (nr >= TLB_BATCH_NR)
		flush_tlb_pending();

109
out:
110
111
112
113
	put_cpu_var(tlb_batch);
}

void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr,
Nitin Gupta's avatar
Nitin Gupta committed
114
115
		   pte_t *ptep, pte_t orig, int fullmm,
		   unsigned int hugepage_shift)
116
{
117
118
	if (tlb_type != hypervisor &&
	    pte_dirty(orig)) {
Linus Torvalds's avatar
Linus Torvalds committed
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
		unsigned long paddr, pfn = pte_pfn(orig);
		struct address_space *mapping;
		struct page *page;

		if (!pfn_valid(pfn))
			goto no_cache_flush;

		page = pfn_to_page(pfn);
		if (PageReserved(page))
			goto no_cache_flush;

		/* A real file page? */
		mapping = page_mapping(page);
		if (!mapping)
			goto no_cache_flush;

		paddr = (unsigned long) page_address(page);
		if ((paddr ^ vaddr) & (1 << 13))
			flush_dcache_page_all(mm, page);
	}

no_cache_flush:
141
	if (!fullmm)
Nitin Gupta's avatar
Nitin Gupta committed
142
		tlb_batch_add_one(mm, vaddr, pte_exec(orig), hugepage_shift);
143
144
145
146
}

#ifdef CONFIG_TRANSPARENT_HUGEPAGE
static void tlb_batch_pmd_scan(struct mm_struct *mm, unsigned long vaddr,
147
			       pmd_t pmd)
148
149
150
151
152
153
154
{
	unsigned long end;
	pte_t *pte;

	pte = pte_offset_map(&pmd, vaddr);
	end = vaddr + HPAGE_SIZE;
	while (vaddr < end) {
155
156
157
		if (pte_val(*pte) & _PAGE_VALID) {
			bool exec = pte_exec(*pte);

158
			tlb_batch_add_one(mm, vaddr, exec, PAGE_SHIFT);
159
		}
160
161
162
163
164
		pte++;
		vaddr += PAGE_SIZE;
	}
	pte_unmap(pte);
}
Linus Torvalds's avatar
Linus Torvalds committed
165

166

167
168
169
static void __set_pmd_acct(struct mm_struct *mm, unsigned long addr,
			   pmd_t orig, pmd_t pmd)
{
170
	if (mm == &init_mm)
Linus Torvalds's avatar
Linus Torvalds committed
171
		return;
172

173
	if ((pmd_val(pmd) ^ pmd_val(orig)) & _PAGE_PMD_HUGE) {
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
		/*
		 * Note that this routine only sets pmds for THP pages.
		 * Hugetlb pages are handled elsewhere.  We need to check
		 * for huge zero page.  Huge zero pages are like hugetlb
		 * pages in that there is no RSS, but there is the need
		 * for TSB entries.  So, huge zero page counts go into
		 * hugetlb_pte_count.
		 */
		if (pmd_val(pmd) & _PAGE_PMD_HUGE) {
			if (is_huge_zero_page(pmd_page(pmd)))
				mm->context.hugetlb_pte_count++;
			else
				mm->context.thp_pte_count++;
		} else {
			if (is_huge_zero_page(pmd_page(orig)))
				mm->context.hugetlb_pte_count--;
			else
				mm->context.thp_pte_count--;
		}
193
194
195
196
197
198
199
200
201

		/* Do not try to allocate the TSB hash table if we
		 * don't have one already.  We have various locks held
		 * and thus we'll end up doing a GFP_KERNEL allocation
		 * in an atomic context.
		 *
		 * Instead, we let the first TLB miss on a hugepage
		 * take care of this.
		 */
Peter Zijlstra's avatar
Peter Zijlstra committed
202
	}
Linus Torvalds's avatar
Linus Torvalds committed
203

204
205
	if (!pmd_none(orig)) {
		addr &= HPAGE_MASK;
206
		if (pmd_trans_huge(orig)) {
207
208
209
			pte_t orig_pte = __pte(pmd_val(orig));
			bool exec = pte_exec(orig_pte);

210
			tlb_batch_add_one(mm, addr, exec, REAL_HPAGE_SHIFT);
211
			tlb_batch_add_one(mm, addr + REAL_HPAGE_SIZE, exec,
212
					  REAL_HPAGE_SHIFT);
213
		} else {
214
			tlb_batch_pmd_scan(mm, addr, orig);
215
		}
Linus Torvalds's avatar
Linus Torvalds committed
216
	}
217
}
Linus Torvalds's avatar
Linus Torvalds committed
218

219
220
221
222
223
224
225
226
227
void set_pmd_at(struct mm_struct *mm, unsigned long addr,
		pmd_t *pmdp, pmd_t pmd)
{
	pmd_t orig = *pmdp;

	*pmdp = pmd;
	__set_pmd_acct(mm, addr, orig, pmd);
}

228
229
230
231
232
233
234
235
static inline pmd_t pmdp_establish(struct vm_area_struct *vma,
		unsigned long address, pmd_t *pmdp, pmd_t pmd)
{
	pmd_t old;

	do {
		old = *pmdp;
	} while (cmpxchg64(&pmdp->pmd, old.pmd, pmd.pmd) != old.pmd);
236
	__set_pmd_acct(vma->vm_mm, address, old, pmd);
237
238
239
240

	return old;
}

241
242
243
/*
 * This routine is only called when splitting a THP
 */
244
pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
245
246
		     pmd_t *pmdp)
{
247
	pmd_t old, entry;
248

249
250
	entry = __pmd(pmd_val(*pmdp) & ~_PAGE_VALID);
	old = pmdp_establish(vma, address, pmdp, entry);
251
	flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
252
253
254
255
256
257
258
259
260

	/*
	 * set_pmd_at() will not be called in a way to decrement
	 * thp_pte_count when splitting a THP, so do it now.
	 * Sanity check pmd before doing the actual decrement.
	 */
	if ((pmd_val(entry) & _PAGE_PMD_HUGE) &&
	    !is_huge_zero_page(pmd_page(entry)))
		(vma->vm_mm)->context.thp_pte_count--;
261
262

	return old;
263
264
}

265
266
void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
				pgtable_t pgtable)
267
268
{
	struct list_head *lh = (struct list_head *) pgtable;
Linus Torvalds's avatar
Linus Torvalds committed
269

270
	assert_spin_locked(&mm->page_table_lock);
Peter Zijlstra's avatar
Peter Zijlstra committed
271

272
	/* FIFO */
273
	if (!pmd_huge_pte(mm, pmdp))
274
275
		INIT_LIST_HEAD(lh);
	else
276
277
		list_add(lh, (struct list_head *) pmd_huge_pte(mm, pmdp));
	pmd_huge_pte(mm, pmdp) = pgtable;
278
279
}

280
pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp)
281
282
283
284
285
286
287
{
	struct list_head *lh;
	pgtable_t pgtable;

	assert_spin_locked(&mm->page_table_lock);

	/* FIFO */
288
	pgtable = pmd_huge_pte(mm, pmdp);
289
290
	lh = (struct list_head *) pgtable;
	if (list_empty(lh))
291
		pmd_huge_pte(mm, pmdp) = NULL;
292
	else {
293
		pmd_huge_pte(mm, pmdp) = (pgtable_t) lh->next;
294
295
296
297
298
299
		list_del(lh);
	}
	pte_val(pgtable[0]) = 0;
	pte_val(pgtable[1]) = 0;

	return pgtable;
Linus Torvalds's avatar
Linus Torvalds committed
300
}
301
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */