swiotlb.c 26.7 KB
Newer Older
Linus Torvalds's avatar
Linus Torvalds committed
1 2 3
/*
 * Dynamic DMA mapping support.
 *
Jan Beulich's avatar
Jan Beulich committed
4
 * This implementation is a fallback for platforms that do not support
Linus Torvalds's avatar
Linus Torvalds committed
5 6 7 8 9 10 11 12 13
 * I/O TLBs (aka DMA address translation hardware).
 * Copyright (C) 2000 Asit Mallick <Asit.K.Mallick@intel.com>
 * Copyright (C) 2000 Goutham Rao <goutham.rao@intel.com>
 * Copyright (C) 2000, 2003 Hewlett-Packard Co
 *	David Mosberger-Tang <davidm@hpl.hp.com>
 *
 * 03/05/07 davidm	Switch from PCI-DMA to generic device DMA API.
 * 00/12/13 davidm	Rename to swiotlb.c and add mark_clean() to avoid
 *			unnecessary i-cache flushing.
14 15 16
 * 04/07/.. ak		Better overflow handling. Assorted fixes.
 * 05/09/10 linville	Add support for syncing ranges, support syncing for
 *			DMA_BIDIRECTIONAL mappings, miscellaneous cleanup.
17
 * 08/12/11 beckyb	Add highmem support
Linus Torvalds's avatar
Linus Torvalds committed
18 19 20
 */

#include <linux/cache.h>
21
#include <linux/dma-mapping.h>
Linus Torvalds's avatar
Linus Torvalds committed
22
#include <linux/mm.h>
23
#include <linux/export.h>
Linus Torvalds's avatar
Linus Torvalds committed
24 25
#include <linux/spinlock.h>
#include <linux/string.h>
26
#include <linux/swiotlb.h>
27
#include <linux/pfn.h>
Linus Torvalds's avatar
Linus Torvalds committed
28 29
#include <linux/types.h>
#include <linux/ctype.h>
30
#include <linux/highmem.h>
31
#include <linux/gfp.h>
Linus Torvalds's avatar
Linus Torvalds committed
32 33 34

#include <asm/io.h>
#include <asm/dma.h>
35
#include <asm/scatterlist.h>
Linus Torvalds's avatar
Linus Torvalds committed
36 37 38

#include <linux/init.h>
#include <linux/bootmem.h>
39
#include <linux/iommu-helper.h>
Linus Torvalds's avatar
Linus Torvalds committed
40 41 42 43

#define OFFSET(val,align) ((unsigned long)	\
	                   ( (val) & ( (align) - 1)))

44 45 46 47 48 49 50 51 52
#define SLABS_PER_PAGE (1 << (PAGE_SHIFT - IO_TLB_SHIFT))

/*
 * Minimum IO TLB size to bother booting with.  Systems with mainly
 * 64bit capable cards will only lightly use the swiotlb.  If we can't
 * allocate a contiguous 1MB, we're probably in trouble anyway.
 */
#define IO_TLB_MIN_SLABS ((1<<20) >> IO_TLB_SHIFT)

Linus Torvalds's avatar
Linus Torvalds committed
53 54 55
int swiotlb_force;

/*
56 57
 * Used to do a quick range check in swiotlb_tbl_unmap_single and
 * swiotlb_tbl_sync_single_*, to see if the memory was in fact allocated by this
Linus Torvalds's avatar
Linus Torvalds committed
58 59
 * API.
 */
60
static phys_addr_t io_tlb_start, io_tlb_end;
Linus Torvalds's avatar
Linus Torvalds committed
61 62

/*
63
 * The number of IO TLB blocks (in groups of 64) between io_tlb_start and
Linus Torvalds's avatar
Linus Torvalds committed
64 65 66 67 68 69 70 71 72
 * io_tlb_end.  This is command line adjustable via setup_io_tlb_npages.
 */
static unsigned long io_tlb_nslabs;

/*
 * When the IOMMU overflows we return a fallback buffer. This sets the size.
 */
static unsigned long io_tlb_overflow = 32*1024;

73
static phys_addr_t io_tlb_overflow_buffer;
Linus Torvalds's avatar
Linus Torvalds committed
74 75 76 77 78 79 80 81 82 83 84 85

/*
 * This is a free list describing the number of free entries available from
 * each index
 */
static unsigned int *io_tlb_list;
static unsigned int io_tlb_index;

/*
 * We need to save away the original address corresponding to a mapped entry
 * for the sync operations.
 */
86
static phys_addr_t *io_tlb_orig_addr;
Linus Torvalds's avatar
Linus Torvalds committed
87 88 89 90 91 92

/*
 * Protect the above data structures in the map and unmap calls
 */
static DEFINE_SPINLOCK(io_tlb_lock);

93 94
static int late_alloc;

Linus Torvalds's avatar
Linus Torvalds committed
95 96 97 98
static int __init
setup_io_tlb_npages(char *str)
{
	if (isdigit(*str)) {
99
		io_tlb_nslabs = simple_strtoul(str, &str, 0);
Linus Torvalds's avatar
Linus Torvalds committed
100 101 102 103 104
		/* avoid tail segment of size < IO_TLB_SEGSIZE */
		io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE);
	}
	if (*str == ',')
		++str;
105
	if (!strcmp(str, "force"))
Linus Torvalds's avatar
Linus Torvalds committed
106
		swiotlb_force = 1;
107

108
	return 0;
Linus Torvalds's avatar
Linus Torvalds committed
109
}
110
early_param("swiotlb", setup_io_tlb_npages);
Linus Torvalds's avatar
Linus Torvalds committed
111 112
/* make io_tlb_overflow tunable too? */

113
unsigned long swiotlb_nr_tbl(void)
114 115 116
{
	return io_tlb_nslabs;
}
117
EXPORT_SYMBOL_GPL(swiotlb_nr_tbl);
118 119 120 121 122 123 124 125 126 127 128 129

/* default to 64MB */
#define IO_TLB_DEFAULT_SIZE (64UL<<20)
unsigned long swiotlb_size_or_default(void)
{
	unsigned long size;

	size = io_tlb_nslabs << IO_TLB_SHIFT;

	return size ? size : (IO_TLB_DEFAULT_SIZE);
}

130
/* Note that this doesn't work with highmem page */
131 132
static dma_addr_t swiotlb_virt_to_bus(struct device *hwdev,
				      volatile void *address)
133
{
134
	return phys_to_dma(hwdev, virt_to_phys(address));
135 136
}

137 138
static bool no_iotlb_memory;

139
void swiotlb_print_info(void)
140
{
141
	unsigned long bytes = io_tlb_nslabs << IO_TLB_SHIFT;
142
	unsigned char *vstart, *vend;
143

144 145 146 147 148
	if (no_iotlb_memory) {
		pr_warn("software IO TLB: No low mem\n");
		return;
	}

149
	vstart = phys_to_virt(io_tlb_start);
150
	vend = phys_to_virt(io_tlb_end);
151

152
	printk(KERN_INFO "software IO TLB [mem %#010llx-%#010llx] (%luMB) mapped at [%p-%p]\n",
153
	       (unsigned long long)io_tlb_start,
154
	       (unsigned long long)io_tlb_end,
155
	       bytes >> 20, vstart, vend - 1);
156 157
}

158
int __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose)
Linus Torvalds's avatar
Linus Torvalds committed
159
{
160
	void *v_overflow_buffer;
Jan Beulich's avatar
Jan Beulich committed
161
	unsigned long i, bytes;
Linus Torvalds's avatar
Linus Torvalds committed
162

163
	bytes = nslabs << IO_TLB_SHIFT;
Linus Torvalds's avatar
Linus Torvalds committed
164

165
	io_tlb_nslabs = nslabs;
166 167
	io_tlb_start = __pa(tlb);
	io_tlb_end = io_tlb_start + bytes;
Linus Torvalds's avatar
Linus Torvalds committed
168

169 170 171
	/*
	 * Get the overflow emergency buffer
	 */
172 173
	v_overflow_buffer = alloc_bootmem_low_pages_nopanic(
						PAGE_ALIGN(io_tlb_overflow));
174
	if (!v_overflow_buffer)
175
		return -ENOMEM;
176 177 178

	io_tlb_overflow_buffer = __pa(v_overflow_buffer);

Linus Torvalds's avatar
Linus Torvalds committed
179 180 181 182 183
	/*
	 * Allocate and initialize the free list array.  This array is used
	 * to find contiguous free memory regions of size up to IO_TLB_SEGSIZE
	 * between io_tlb_start and io_tlb_end.
	 */
184
	io_tlb_list = alloc_bootmem_pages(PAGE_ALIGN(io_tlb_nslabs * sizeof(int)));
185
	for (i = 0; i < io_tlb_nslabs; i++)
Linus Torvalds's avatar
Linus Torvalds committed
186 187
 		io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE);
	io_tlb_index = 0;
188
	io_tlb_orig_addr = alloc_bootmem_pages(PAGE_ALIGN(io_tlb_nslabs * sizeof(phys_addr_t)));
Linus Torvalds's avatar
Linus Torvalds committed
189

190 191
	if (verbose)
		swiotlb_print_info();
192 193

	return 0;
Linus Torvalds's avatar
Linus Torvalds committed
194 195
}

196 197 198 199
/*
 * Statically reserve bounce buffer space and initialize bounce buffer data
 * structures for the software IO TLB used to implement the DMA API.
 */
200 201
void  __init
swiotlb_init(int verbose)
202
{
203
	size_t default_size = IO_TLB_DEFAULT_SIZE;
204
	unsigned char *vstart;
205 206 207 208 209 210 211 212 213
	unsigned long bytes;

	if (!io_tlb_nslabs) {
		io_tlb_nslabs = (default_size >> IO_TLB_SHIFT);
		io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE);
	}

	bytes = io_tlb_nslabs << IO_TLB_SHIFT;

214 215 216 217
	/* Get IO TLB memory from the low pages */
	vstart = alloc_bootmem_low_pages_nopanic(PAGE_ALIGN(bytes));
	if (vstart && !swiotlb_init_with_tbl(vstart, io_tlb_nslabs, verbose))
		return;
218

219 220 221 222 223
	if (io_tlb_start)
		free_bootmem(io_tlb_start,
				 PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT));
	pr_warn("Cannot allocate SWIOTLB buffer");
	no_iotlb_memory = true;
Linus Torvalds's avatar
Linus Torvalds committed
224 225
}

226 227 228 229 230 231
/*
 * Systems with larger DMA zones (those that don't support ISA) can
 * initialize the swiotlb later using the slab allocator if needed.
 * This should be just like above, but with some error catching.
 */
int
Jan Beulich's avatar
Jan Beulich committed
232
swiotlb_late_init_with_default_size(size_t default_size)
233
{
234
	unsigned long bytes, req_nslabs = io_tlb_nslabs;
235
	unsigned char *vstart = NULL;
236
	unsigned int order;
237
	int rc = 0;
238 239 240 241 242 243 244 245 246

	if (!io_tlb_nslabs) {
		io_tlb_nslabs = (default_size >> IO_TLB_SHIFT);
		io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE);
	}

	/*
	 * Get IO TLB memory from the low pages
	 */
Jan Beulich's avatar
Jan Beulich committed
247
	order = get_order(io_tlb_nslabs << IO_TLB_SHIFT);
248
	io_tlb_nslabs = SLABS_PER_PAGE << order;
Jan Beulich's avatar
Jan Beulich committed
249
	bytes = io_tlb_nslabs << IO_TLB_SHIFT;
250 251

	while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) {
252 253 254
		vstart = (void *)__get_free_pages(GFP_DMA | __GFP_NOWARN,
						  order);
		if (vstart)
255 256 257 258
			break;
		order--;
	}

259
	if (!vstart) {
260 261 262
		io_tlb_nslabs = req_nslabs;
		return -ENOMEM;
	}
Jan Beulich's avatar
Jan Beulich committed
263
	if (order != get_order(bytes)) {
264 265 266 267
		printk(KERN_WARNING "Warning: only able to allocate %ld MB "
		       "for software IO TLB\n", (PAGE_SIZE << order) >> 20);
		io_tlb_nslabs = SLABS_PER_PAGE << order;
	}
268
	rc = swiotlb_late_init_with_tbl(vstart, io_tlb_nslabs);
269
	if (rc)
270
		free_pages((unsigned long)vstart, order);
271 272 273 274 275 276 277
	return rc;
}

int
swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs)
{
	unsigned long i, bytes;
278
	unsigned char *v_overflow_buffer;
279 280 281 282

	bytes = nslabs << IO_TLB_SHIFT;

	io_tlb_nslabs = nslabs;
283 284
	io_tlb_start = virt_to_phys(tlb);
	io_tlb_end = io_tlb_start + bytes;
285

286
	memset(tlb, 0, bytes);
287

288 289 290 291 292 293 294 295 296 297
	/*
	 * Get the overflow emergency buffer
	 */
	v_overflow_buffer = (void *)__get_free_pages(GFP_DMA,
						     get_order(io_tlb_overflow));
	if (!v_overflow_buffer)
		goto cleanup2;

	io_tlb_overflow_buffer = virt_to_phys(v_overflow_buffer);

298 299 300 301 302 303 304 305
	/*
	 * Allocate and initialize the free list array.  This array is used
	 * to find contiguous free memory regions of size up to IO_TLB_SEGSIZE
	 * between io_tlb_start and io_tlb_end.
	 */
	io_tlb_list = (unsigned int *)__get_free_pages(GFP_KERNEL,
	                              get_order(io_tlb_nslabs * sizeof(int)));
	if (!io_tlb_list)
306
		goto cleanup3;
307 308 309 310 311

	for (i = 0; i < io_tlb_nslabs; i++)
 		io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE);
	io_tlb_index = 0;

312 313 314 315
	io_tlb_orig_addr = (phys_addr_t *)
		__get_free_pages(GFP_KERNEL,
				 get_order(io_tlb_nslabs *
					   sizeof(phys_addr_t)));
316
	if (!io_tlb_orig_addr)
317
		goto cleanup4;
318

319
	memset(io_tlb_orig_addr, 0, io_tlb_nslabs * sizeof(phys_addr_t));
320

321
	swiotlb_print_info();
322

323 324
	late_alloc = 1;

325 326 327
	return 0;

cleanup4:
328 329
	free_pages((unsigned long)io_tlb_list, get_order(io_tlb_nslabs *
	                                                 sizeof(int)));
330
	io_tlb_list = NULL;
331 332 333 334
cleanup3:
	free_pages((unsigned long)v_overflow_buffer,
		   get_order(io_tlb_overflow));
	io_tlb_overflow_buffer = 0;
335
cleanup2:
336
	io_tlb_end = 0;
337
	io_tlb_start = 0;
338
	io_tlb_nslabs = 0;
339 340 341
	return -ENOMEM;
}

342 343
void __init swiotlb_free(void)
{
344
	if (!io_tlb_orig_addr)
345 346 347
		return;

	if (late_alloc) {
348
		free_pages((unsigned long)phys_to_virt(io_tlb_overflow_buffer),
349 350 351 352 353
			   get_order(io_tlb_overflow));
		free_pages((unsigned long)io_tlb_orig_addr,
			   get_order(io_tlb_nslabs * sizeof(phys_addr_t)));
		free_pages((unsigned long)io_tlb_list, get_order(io_tlb_nslabs *
								 sizeof(int)));
354
		free_pages((unsigned long)phys_to_virt(io_tlb_start),
355 356
			   get_order(io_tlb_nslabs << IO_TLB_SHIFT));
	} else {
357
		free_bootmem_late(io_tlb_overflow_buffer,
358
				  PAGE_ALIGN(io_tlb_overflow));
359
		free_bootmem_late(__pa(io_tlb_orig_addr),
360
				  PAGE_ALIGN(io_tlb_nslabs * sizeof(phys_addr_t)));
361
		free_bootmem_late(__pa(io_tlb_list),
362
				  PAGE_ALIGN(io_tlb_nslabs * sizeof(int)));
363
		free_bootmem_late(io_tlb_start,
364
				  PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT));
365
	}
366
	io_tlb_nslabs = 0;
367 368
}

369
static int is_swiotlb_buffer(phys_addr_t paddr)
370
{
371
	return paddr >= io_tlb_start && paddr < io_tlb_end;
372 373
}

374 375 376
/*
 * Bounce: copy the swiotlb buffer back to the original dma location
 */
377 378
static void swiotlb_bounce(phys_addr_t orig_addr, phys_addr_t tlb_addr,
			   size_t size, enum dma_data_direction dir)
379
{
380 381
	unsigned long pfn = PFN_DOWN(orig_addr);
	unsigned char *vaddr = phys_to_virt(tlb_addr);
382 383 384

	if (PageHighMem(pfn_to_page(pfn))) {
		/* The buffer does not have a mapping.  Map it in and copy */
385
		unsigned int offset = orig_addr & ~PAGE_MASK;
386 387 388 389 390
		char *buffer;
		unsigned int sz = 0;
		unsigned long flags;

		while (size) {
Becky Bruce's avatar
Becky Bruce committed
391
			sz = min_t(size_t, PAGE_SIZE - offset, size);
392 393

			local_irq_save(flags);
394
			buffer = kmap_atomic(pfn_to_page(pfn));
395
			if (dir == DMA_TO_DEVICE)
396
				memcpy(vaddr, buffer + offset, sz);
397
			else
398
				memcpy(buffer + offset, vaddr, sz);
399
			kunmap_atomic(buffer);
400
			local_irq_restore(flags);
401 402 403

			size -= sz;
			pfn++;
404
			vaddr += sz;
405
			offset = 0;
406
		}
407 408
	} else if (dir == DMA_TO_DEVICE) {
		memcpy(vaddr, phys_to_virt(orig_addr), size);
409
	} else {
410
		memcpy(phys_to_virt(orig_addr), vaddr, size);
411
	}
412 413
}

414 415 416 417
phys_addr_t swiotlb_tbl_map_single(struct device *hwdev,
				   dma_addr_t tbl_dma_addr,
				   phys_addr_t orig_addr, size_t size,
				   enum dma_data_direction dir)
Linus Torvalds's avatar
Linus Torvalds committed
418 419
{
	unsigned long flags;
420
	phys_addr_t tlb_addr;
Linus Torvalds's avatar
Linus Torvalds committed
421 422
	unsigned int nslots, stride, index, wrap;
	int i;
423 424 425 426
	unsigned long mask;
	unsigned long offset_slots;
	unsigned long max_slots;

427 428 429
	if (no_iotlb_memory)
		panic("Can not allocate SWIOTLB buffer earlier and can't now provide you with the DMA bounce buffer");

430 431
	mask = dma_get_seg_boundary(hwdev);

432 433 434
	tbl_dma_addr &= mask;

	offset_slots = ALIGN(tbl_dma_addr, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
435 436 437 438

	/*
 	 * Carefully handle integer overflow which can occur when mask == ~0UL.
 	 */
439 440 441
	max_slots = mask + 1
		    ? ALIGN(mask + 1, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT
		    : 1UL << (BITS_PER_LONG - IO_TLB_SHIFT);
Linus Torvalds's avatar
Linus Torvalds committed
442 443 444 445 446 447 448 449 450 451 452

	/*
	 * For mappings greater than a page, we limit the stride (and
	 * hence alignment) to a page size.
	 */
	nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
	if (size > PAGE_SIZE)
		stride = (1 << (PAGE_SHIFT - IO_TLB_SHIFT));
	else
		stride = 1;

453
	BUG_ON(!nslots);
Linus Torvalds's avatar
Linus Torvalds committed
454 455 456 457 458 459

	/*
	 * Find suitable number of IO TLB entries size that will fit this
	 * request and allocate a buffer from that IO TLB pool.
	 */
	spin_lock_irqsave(&io_tlb_lock, flags);
Andrew Morton's avatar
Andrew Morton committed
460 461 462 463 464 465
	index = ALIGN(io_tlb_index, stride);
	if (index >= io_tlb_nslabs)
		index = 0;
	wrap = index;

	do {
466 467
		while (iommu_is_span_boundary(index, nslots, offset_slots,
					      max_slots)) {
468 469 470
			index += stride;
			if (index >= io_tlb_nslabs)
				index = 0;
Andrew Morton's avatar
Andrew Morton committed
471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486
			if (index == wrap)
				goto not_found;
		}

		/*
		 * If we find a slot that indicates we have 'nslots' number of
		 * contiguous buffers, we allocate the buffers from that slot
		 * and mark the entries as '0' indicating unavailable.
		 */
		if (io_tlb_list[index] >= nslots) {
			int count = 0;

			for (i = index; i < (int) (index + nslots); i++)
				io_tlb_list[i] = 0;
			for (i = index - 1; (OFFSET(i, IO_TLB_SEGSIZE) != IO_TLB_SEGSIZE - 1) && io_tlb_list[i]; i--)
				io_tlb_list[i] = ++count;
487
			tlb_addr = io_tlb_start + (index << IO_TLB_SHIFT);
Linus Torvalds's avatar
Linus Torvalds committed
488

Andrew Morton's avatar
Andrew Morton committed
489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504
			/*
			 * Update the indices to avoid searching in the next
			 * round.
			 */
			io_tlb_index = ((index + nslots) < io_tlb_nslabs
					? (index + nslots) : 0);

			goto found;
		}
		index += stride;
		if (index >= io_tlb_nslabs)
			index = 0;
	} while (index != wrap);

not_found:
	spin_unlock_irqrestore(&io_tlb_lock, flags);
505
	return SWIOTLB_MAP_ERROR;
Andrew Morton's avatar
Andrew Morton committed
506
found:
Linus Torvalds's avatar
Linus Torvalds committed
507 508 509 510 511 512 513
	spin_unlock_irqrestore(&io_tlb_lock, flags);

	/*
	 * Save away the mapping from the original address to the DMA address.
	 * This is needed when we sync the memory.  Then we sync the buffer if
	 * needed.
	 */
514
	for (i = 0; i < nslots; i++)
515
		io_tlb_orig_addr[index+i] = orig_addr + (i << IO_TLB_SHIFT);
Linus Torvalds's avatar
Linus Torvalds committed
516
	if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)
517
		swiotlb_bounce(orig_addr, tlb_addr, size, DMA_TO_DEVICE);
Linus Torvalds's avatar
Linus Torvalds committed
518

519
	return tlb_addr;
Linus Torvalds's avatar
Linus Torvalds committed
520
}
521
EXPORT_SYMBOL_GPL(swiotlb_tbl_map_single);
Linus Torvalds's avatar
Linus Torvalds committed
522

523 524 525 526
/*
 * Allocates bounce buffer and returns its kernel virtual address.
 */

527 528
phys_addr_t map_single(struct device *hwdev, phys_addr_t phys, size_t size,
		       enum dma_data_direction dir)
529
{
530
	dma_addr_t start_dma_addr = phys_to_dma(hwdev, io_tlb_start);
531 532 533 534

	return swiotlb_tbl_map_single(hwdev, start_dma_addr, phys, size, dir);
}

Linus Torvalds's avatar
Linus Torvalds committed
535 536 537
/*
 * dma_addr is the kernel virtual address of the bounce buffer to unmap.
 */
538 539
void swiotlb_tbl_unmap_single(struct device *hwdev, phys_addr_t tlb_addr,
			      size_t size, enum dma_data_direction dir)
Linus Torvalds's avatar
Linus Torvalds committed
540 541 542
{
	unsigned long flags;
	int i, count, nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
543 544
	int index = (tlb_addr - io_tlb_start) >> IO_TLB_SHIFT;
	phys_addr_t orig_addr = io_tlb_orig_addr[index];
Linus Torvalds's avatar
Linus Torvalds committed
545 546 547 548

	/*
	 * First, sync the memory before unmapping the entry
	 */
549 550
	if (orig_addr && ((dir == DMA_FROM_DEVICE) || (dir == DMA_BIDIRECTIONAL)))
		swiotlb_bounce(orig_addr, tlb_addr, size, DMA_FROM_DEVICE);
Linus Torvalds's avatar
Linus Torvalds committed
551 552 553

	/*
	 * Return the buffer to the free list by setting the corresponding
554
	 * entries to indicate the number of contiguous entries available.
Linus Torvalds's avatar
Linus Torvalds committed
555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576
	 * While returning the entries to the free list, we merge the entries
	 * with slots below and above the pool being returned.
	 */
	spin_lock_irqsave(&io_tlb_lock, flags);
	{
		count = ((index + nslots) < ALIGN(index + 1, IO_TLB_SEGSIZE) ?
			 io_tlb_list[index + nslots] : 0);
		/*
		 * Step 1: return the slots to the free list, merging the
		 * slots with superceeding slots
		 */
		for (i = index + nslots - 1; i >= index; i--)
			io_tlb_list[i] = ++count;
		/*
		 * Step 2: merge the returned slots with the preceding slots,
		 * if available (non zero)
		 */
		for (i = index - 1; (OFFSET(i, IO_TLB_SEGSIZE) != IO_TLB_SEGSIZE -1) && io_tlb_list[i]; i--)
			io_tlb_list[i] = ++count;
	}
	spin_unlock_irqrestore(&io_tlb_lock, flags);
}
577
EXPORT_SYMBOL_GPL(swiotlb_tbl_unmap_single);
Linus Torvalds's avatar
Linus Torvalds committed
578

579 580 581
void swiotlb_tbl_sync_single(struct device *hwdev, phys_addr_t tlb_addr,
			     size_t size, enum dma_data_direction dir,
			     enum dma_sync_target target)
Linus Torvalds's avatar
Linus Torvalds committed
582
{
583 584
	int index = (tlb_addr - io_tlb_start) >> IO_TLB_SHIFT;
	phys_addr_t orig_addr = io_tlb_orig_addr[index];
585

586
	orig_addr += (unsigned long)tlb_addr & ((1 << IO_TLB_SHIFT) - 1);
Keir Fraser's avatar
Keir Fraser committed
587

588 589 590
	switch (target) {
	case SYNC_FOR_CPU:
		if (likely(dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL))
591
			swiotlb_bounce(orig_addr, tlb_addr,
592
				       size, DMA_FROM_DEVICE);
593 594
		else
			BUG_ON(dir != DMA_TO_DEVICE);
595 596 597
		break;
	case SYNC_FOR_DEVICE:
		if (likely(dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL))
598
			swiotlb_bounce(orig_addr, tlb_addr,
599
				       size, DMA_TO_DEVICE);
600 601
		else
			BUG_ON(dir != DMA_FROM_DEVICE);
602 603
		break;
	default:
Linus Torvalds's avatar
Linus Torvalds committed
604
		BUG();
605
	}
Linus Torvalds's avatar
Linus Torvalds committed
606
}
607
EXPORT_SYMBOL_GPL(swiotlb_tbl_sync_single);
Linus Torvalds's avatar
Linus Torvalds committed
608 609 610

void *
swiotlb_alloc_coherent(struct device *hwdev, size_t size,
Al Viro's avatar
Al Viro committed
611
		       dma_addr_t *dma_handle, gfp_t flags)
Linus Torvalds's avatar
Linus Torvalds committed
612
{
Jan Beulich's avatar
Jan Beulich committed
613
	dma_addr_t dev_addr;
Linus Torvalds's avatar
Linus Torvalds committed
614 615
	void *ret;
	int order = get_order(size);
616
	u64 dma_mask = DMA_BIT_MASK(32);
617 618 619

	if (hwdev && hwdev->coherent_dma_mask)
		dma_mask = hwdev->coherent_dma_mask;
Linus Torvalds's avatar
Linus Torvalds committed
620

621
	ret = (void *)__get_free_pages(flags, order);
622 623 624 625 626 627 628 629 630
	if (ret) {
		dev_addr = swiotlb_virt_to_bus(hwdev, ret);
		if (dev_addr + size - 1 > dma_mask) {
			/*
			 * The allocated memory isn't reachable by the device.
			 */
			free_pages((unsigned long) ret, order);
			ret = NULL;
		}
Linus Torvalds's avatar
Linus Torvalds committed
631 632 633
	}
	if (!ret) {
		/*
634 635
		 * We are either out of memory or the device can't DMA to
		 * GFP_DMA memory; fall back on map_single(), which
Becky Bruce's avatar
Becky Bruce committed
636
		 * will grab memory from the lowest available address range.
Linus Torvalds's avatar
Linus Torvalds committed
637
		 */
638 639
		phys_addr_t paddr = map_single(hwdev, 0, size, DMA_FROM_DEVICE);
		if (paddr == SWIOTLB_MAP_ERROR)
Linus Torvalds's avatar
Linus Torvalds committed
640 641
			return NULL;

642 643
		ret = phys_to_virt(paddr);
		dev_addr = phys_to_dma(hwdev, paddr);
Linus Torvalds's avatar
Linus Torvalds committed
644

645 646 647 648 649
		/* Confirm address can be DMA'd by device */
		if (dev_addr + size - 1 > dma_mask) {
			printk("hwdev DMA mask = 0x%016Lx, dev_addr = 0x%016Lx\n",
			       (unsigned long long)dma_mask,
			       (unsigned long long)dev_addr);
650

651 652 653 654 655
			/* DMA_TO_DEVICE to avoid memcpy in unmap_single */
			swiotlb_tbl_unmap_single(hwdev, paddr,
						 size, DMA_TO_DEVICE);
			return NULL;
		}
Linus Torvalds's avatar
Linus Torvalds committed
656
	}
657

Linus Torvalds's avatar
Linus Torvalds committed
658
	*dma_handle = dev_addr;
659 660
	memset(ret, 0, size);

Linus Torvalds's avatar
Linus Torvalds committed
661 662
	return ret;
}
663
EXPORT_SYMBOL(swiotlb_alloc_coherent);
Linus Torvalds's avatar
Linus Torvalds committed
664 665 666

void
swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
667
		      dma_addr_t dev_addr)
Linus Torvalds's avatar
Linus Torvalds committed
668
{
669
	phys_addr_t paddr = dma_to_phys(hwdev, dev_addr);
670

671
	WARN_ON(irqs_disabled());
672 673
	if (!is_swiotlb_buffer(paddr))
		free_pages((unsigned long)vaddr, get_order(size));
Linus Torvalds's avatar
Linus Torvalds committed
674
	else
675
		/* DMA_TO_DEVICE to avoid memcpy in swiotlb_tbl_unmap_single */
676
		swiotlb_tbl_unmap_single(hwdev, paddr, size, DMA_TO_DEVICE);
Linus Torvalds's avatar
Linus Torvalds committed
677
}
678
EXPORT_SYMBOL(swiotlb_free_coherent);
Linus Torvalds's avatar
Linus Torvalds committed
679 680

static void
681 682
swiotlb_full(struct device *dev, size_t size, enum dma_data_direction dir,
	     int do_panic)
Linus Torvalds's avatar
Linus Torvalds committed
683 684 685 686
{
	/*
	 * Ran out of IOMMU space for this operation. This is very bad.
	 * Unfortunately the drivers cannot handle this operation properly.
687
	 * unless they check for dma_mapping_error (most don't)
Linus Torvalds's avatar
Linus Torvalds committed
688 689 690
	 * When the mapping is small enough return a static buffer to limit
	 * the damage, or panic when the transfer is too big.
	 */
Jan Beulich's avatar
Jan Beulich committed
691
	printk(KERN_ERR "DMA: Out of SW-IOMMU space for %zu bytes at "
692
	       "device %s\n", size, dev ? dev_name(dev) : "?");
Linus Torvalds's avatar
Linus Torvalds committed
693

694 695 696 697 698 699 700 701 702
	if (size <= io_tlb_overflow || !do_panic)
		return;

	if (dir == DMA_BIDIRECTIONAL)
		panic("DMA: Random memory could be DMA accessed\n");
	if (dir == DMA_FROM_DEVICE)
		panic("DMA: Random memory could be DMA written\n");
	if (dir == DMA_TO_DEVICE)
		panic("DMA: Random memory could be DMA read\n");
Linus Torvalds's avatar
Linus Torvalds committed
703 704 705 706
}

/*
 * Map a single buffer of the indicated size for DMA in streaming mode.  The
707
 * physical address to use is returned.
Linus Torvalds's avatar
Linus Torvalds committed
708 709
 *
 * Once the device is given the dma address, the device owns this memory until
Becky Bruce's avatar
Becky Bruce committed
710
 * either swiotlb_unmap_page or swiotlb_dma_sync_single is performed.
Linus Torvalds's avatar
Linus Torvalds committed
711
 */
712 713 714 715
dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
			    unsigned long offset, size_t size,
			    enum dma_data_direction dir,
			    struct dma_attrs *attrs)
Linus Torvalds's avatar
Linus Torvalds committed
716
{
717
	phys_addr_t map, phys = page_to_phys(page) + offset;
718
	dma_addr_t dev_addr = phys_to_dma(dev, phys);
Linus Torvalds's avatar
Linus Torvalds committed
719

720
	BUG_ON(dir == DMA_NONE);
Linus Torvalds's avatar
Linus Torvalds committed
721
	/*
Becky Bruce's avatar
Becky Bruce committed
722
	 * If the address happens to be in the device's DMA window,
Linus Torvalds's avatar
Linus Torvalds committed
723 724 725
	 * we can safely return the device addr and not worry about bounce
	 * buffering it.
	 */
FUJITA Tomonori's avatar
FUJITA Tomonori committed
726
	if (dma_capable(dev, dev_addr, size) && !swiotlb_force)
Linus Torvalds's avatar
Linus Torvalds committed
727 728
		return dev_addr;

729
	/* Oh well, have to allocate and map a bounce buffer. */
730
	map = map_single(dev, phys, size, dir);
731
	if (map == SWIOTLB_MAP_ERROR) {
732
		swiotlb_full(dev, size, dir, 1);
733
		return phys_to_dma(dev, io_tlb_overflow_buffer);
Linus Torvalds's avatar
Linus Torvalds committed
734 735
	}

736
	dev_addr = phys_to_dma(dev, map);
Linus Torvalds's avatar
Linus Torvalds committed
737

738
	/* Ensure that the address returned is DMA'ble */
FUJITA Tomonori's avatar
FUJITA Tomonori committed
739
	if (!dma_capable(dev, dev_addr, size)) {
740
		swiotlb_tbl_unmap_single(dev, map, size, dir);
741
		return phys_to_dma(dev, io_tlb_overflow_buffer);
FUJITA Tomonori's avatar
FUJITA Tomonori committed
742
	}
Linus Torvalds's avatar
Linus Torvalds committed
743 744 745

	return dev_addr;
}
746
EXPORT_SYMBOL_GPL(swiotlb_map_page);
Linus Torvalds's avatar
Linus Torvalds committed
747 748 749

/*
 * Unmap a single streaming mode DMA translation.  The dma_addr and size must
Becky Bruce's avatar
Becky Bruce committed
750
 * match what was provided for in a previous swiotlb_map_page call.  All
Linus Torvalds's avatar
Linus Torvalds committed
751 752 753 754 755
 * other usages are undefined.
 *
 * After this call, reads by the cpu to the buffer are guaranteed to see
 * whatever the device wrote there.
 */
756
static void unmap_single(struct device *hwdev, dma_addr_t dev_addr,
757
			 size_t size, enum dma_data_direction dir)
Linus Torvalds's avatar
Linus Torvalds committed
758
{
759
	phys_addr_t paddr = dma_to_phys(hwdev, dev_addr);
Linus Torvalds's avatar
Linus Torvalds committed
760

761
	BUG_ON(dir == DMA_NONE);
762

763
	if (is_swiotlb_buffer(paddr)) {
764
		swiotlb_tbl_unmap_single(hwdev, paddr, size, dir);
765 766 767 768 769 770
		return;
	}

	if (dir != DMA_FROM_DEVICE)
		return;

771 772 773 774 775 776 777
	/*
	 * phys_to_virt doesn't work with hihgmem page but we could
	 * call dma_mark_clean() with hihgmem page here. However, we
	 * are fine since dma_mark_clean() is null on POWERPC. We can
	 * make dma_mark_clean() take a physical address if necessary.
	 */
	dma_mark_clean(phys_to_virt(paddr), size);
778 779 780 781 782 783 784
}

void swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr,
			size_t size, enum dma_data_direction dir,
			struct dma_attrs *attrs)
{
	unmap_single(hwdev, dev_addr, size, dir);
Linus Torvalds's avatar
Linus Torvalds committed
785
}
786
EXPORT_SYMBOL_GPL(swiotlb_unmap_page);
787

Linus Torvalds's avatar
Linus Torvalds committed
788 789 790 791
/*
 * Make physical memory consistent for a single streaming mode DMA translation
 * after a transfer.
 *
Becky Bruce's avatar
Becky Bruce committed
792
 * If you perform a swiotlb_map_page() but wish to interrogate the buffer
793 794
 * using the cpu, yet do not wish to teardown the dma mapping, you must
 * call this function before doing so.  At the next point you give the dma
Linus Torvalds's avatar
Linus Torvalds committed
795 796 797
 * address back to the card, you must first perform a
 * swiotlb_dma_sync_for_device, and then the device again owns the buffer
 */
Andrew Morton's avatar
Andrew Morton committed
798
static void
799
swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr,
800 801
		    size_t size, enum dma_data_direction dir,
		    enum dma_sync_target target)
Linus Torvalds's avatar
Linus Torvalds committed
802
{
803
	phys_addr_t paddr = dma_to_phys(hwdev, dev_addr);
Linus Torvalds's avatar
Linus Torvalds committed
804

805
	BUG_ON(dir == DMA_NONE);
806

807
	if (is_swiotlb_buffer(paddr)) {
808
		swiotlb_tbl_sync_single(hwdev, paddr, size, dir, target);
809 810 811 812 813 814
		return;
	}

	if (dir != DMA_FROM_DEVICE)
		return;

815
	dma_mark_clean(phys_to_virt(paddr), size);
Linus Torvalds's avatar
Linus Torvalds committed
816 817
}

818 819
void
swiotlb_sync_single_for_cpu(struct device *hwdev, dma_addr_t dev_addr,
820
			    size_t size, enum dma_data_direction dir)
821
{
822
	swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_CPU);
823
}
824
EXPORT_SYMBOL(swiotlb_sync_single_for_cpu);
825

Linus Torvalds's avatar
Linus Torvalds committed
826 827
void
swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr,
828
			       size_t size, enum dma_data_direction dir)
Linus Torvalds's avatar
Linus Torvalds committed
829
{
830
	swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_DEVICE);
Linus Torvalds's avatar
Linus Torvalds committed
831
}
832
EXPORT_SYMBOL(swiotlb_sync_single_for_device);
Linus Torvalds's avatar
Linus Torvalds committed
833 834 835

/*
 * Map a set of buffers described by scatterlist in streaming mode for DMA.
Becky Bruce's avatar
Becky Bruce committed
836
 * This is the scatter-gather version of the above swiotlb_map_page
Linus Torvalds's avatar
Linus Torvalds committed
837 838 839 840 841 842 843 844 845 846
 * interface.  Here the scatter gather list elements are each tagged with the
 * appropriate dma address and length.  They are obtained via
 * sg_dma_{address,length}(SG).
 *
 * NOTE: An implementation may be able to use a smaller number of
 *       DMA address/length pairs than there are SG table elements.
 *       (for example via virtual mapping capabilities)
 *       The routine returns the number of addr/length pairs actually
 *       used, at most nents.
 *
Becky Bruce's avatar
Becky Bruce committed
847
 * Device ownership issues as mentioned above for swiotlb_map_page are the
Linus Torvalds's avatar
Linus Torvalds committed
848 849 850
 * same here.
 */
int
851
swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems,
852
		     enum dma_data_direction dir, struct dma_attrs *attrs)
Linus Torvalds's avatar
Linus Torvalds committed
853
{
Jens Axboe's avatar
Jens Axboe committed
854
	struct scatterlist *sg;
Linus Torvalds's avatar
Linus Torvalds committed
855 856
	int i;

857
	BUG_ON(dir == DMA_NONE);
Linus Torvalds's avatar
Linus Torvalds committed
858

Jens Axboe's avatar
Jens Axboe committed
859
	for_each_sg(sgl, sg, nelems, i) {
Ian Campbell's avatar
Ian Campbell committed
860
		phys_addr_t paddr = sg_phys(sg);
861
		dma_addr_t dev_addr = phys_to_dma(hwdev, paddr);
862

863
		if (swiotlb_force ||
FUJITA Tomonori's avatar
FUJITA Tomonori committed
864
		    !dma_capable(hwdev, dev_addr, sg->length)) {
865 866 867
			phys_addr_t map = map_single(hwdev, sg_phys(sg),
						     sg->length, dir);
			if (map == SWIOTLB_MAP_ERROR) {
Linus Torvalds's avatar
Linus Torvalds committed
868 869 870
				/* Don't panic here, we expect map_sg users
				   to do proper error handling. */
				swiotlb_full(hwdev, sg->length, dir, 0);
871 872
				swiotlb_unmap_sg_attrs(hwdev, sgl, i, dir,
						       attrs);
Jens Axboe's avatar
Jens Axboe committed
873
				sgl[0].dma_length = 0;
Linus Torvalds's avatar
Linus Torvalds committed
874 875
				return 0;
			}
876
			sg->dma_address = phys_to_dma(hwdev, map);
Linus Torvalds's avatar
Linus Torvalds committed
877 878 879 880 881 882
		} else
			sg->dma_address = dev_addr;
		sg->dma_length = sg->length;
	}
	return nelems;
}
883 884 885 886
EXPORT_SYMBOL(swiotlb_map_sg_attrs);

int
swiotlb_map_sg(struct device *hwdev, struct scatterlist *sgl, int nelems,
887
	       enum dma_data_direction dir)
888 889 890
{
	return swiotlb_map_sg_attrs(hwdev, sgl, nelems, dir, NULL);
}
891
EXPORT_SYMBOL(swiotlb_map_sg);
Linus Torvalds's avatar
Linus Torvalds committed
892 893 894

/*
 * Unmap a set of streaming mode DMA translations.  Again, cpu read rules
Becky Bruce's avatar
Becky Bruce committed
895
 * concerning calls here are the same as for swiotlb_unmap_page() above.
Linus Torvalds's avatar
Linus Torvalds committed
896 897
 */
void