diff --git a/arch/arm/include/asm/highmem.h b/arch/arm/include/asm/highmem.h
index 7f36d00600b43da38ba4fda7fa59aa61da7d60ab..feb988a7ec37dcf2336d7a908e5561b0cc3ee7d5 100644
--- a/arch/arm/include/asm/highmem.h
+++ b/arch/arm/include/asm/highmem.h
@@ -11,7 +11,11 @@
 
 #define kmap_prot		PAGE_KERNEL
 
-#define flush_cache_kmaps()	flush_cache_all()
+#define flush_cache_kmaps() \
+	do { \
+		if (cache_is_vivt()) \
+			flush_cache_all(); \
+	} while (0)
 
 extern pte_t *pkmap_page_table;
 
@@ -21,11 +25,20 @@ extern void *kmap_high(struct page *page);
 extern void *kmap_high_get(struct page *page);
 extern void kunmap_high(struct page *page);
 
+extern void *kmap_high_l1_vipt(struct page *page, pte_t *saved_pte);
+extern void kunmap_high_l1_vipt(struct page *page, pte_t saved_pte);
+
+/*
+ * The following functions are already defined by <linux/highmem.h>
+ * when CONFIG_HIGHMEM is not set.
+ */
+#ifdef CONFIG_HIGHMEM
 extern void *kmap(struct page *page);
 extern void kunmap(struct page *page);
 extern void *kmap_atomic(struct page *page, enum km_type type);
 extern void kunmap_atomic(void *kvaddr, enum km_type type);
 extern void *kmap_atomic_pfn(unsigned long pfn, enum km_type type);
 extern struct page *kmap_atomic_to_page(const void *ptr);
+#endif
 
 #endif
diff --git a/arch/arm/include/asm/kmap_types.h b/arch/arm/include/asm/kmap_types.h
index c019949a5189dc725a937006eb8445c18d0ad2ef..c4b2ea3fbe4249c886fad25593553502e63f5796 100644
--- a/arch/arm/include/asm/kmap_types.h
+++ b/arch/arm/include/asm/kmap_types.h
@@ -18,6 +18,7 @@ enum km_type {
 	KM_IRQ1,
 	KM_SOFTIRQ0,
 	KM_SOFTIRQ1,
+	KM_L1_CACHE,
 	KM_L2_CACHE,
 	KM_TYPE_NR
 };
diff --git a/arch/arm/mm/copypage-v6.c b/arch/arm/mm/copypage-v6.c
index 8bca4dea6dfa234bbcf0c343a70f87f751de3b66..f55fa1044f72b829d0c307683f9ab12d768a4893 100644
--- a/arch/arm/mm/copypage-v6.c
+++ b/arch/arm/mm/copypage-v6.c
@@ -41,14 +41,7 @@ static void v6_copy_user_highpage_nonaliasing(struct page *to,
 	kfrom = kmap_atomic(from, KM_USER0);
 	kto = kmap_atomic(to, KM_USER1);
 	copy_page(kto, kfrom);
-#ifdef CONFIG_HIGHMEM
-	/*
-	 * kmap_atomic() doesn't set the page virtual address, and
-	 * kunmap_atomic() takes care of cache flushing already.
-	 */
-	if (page_address(to) != NULL)
-#endif
-		__cpuc_flush_dcache_area(kto, PAGE_SIZE);
+	__cpuc_flush_dcache_area(kto, PAGE_SIZE);
 	kunmap_atomic(kto, KM_USER1);
 	kunmap_atomic(kfrom, KM_USER0);
 }
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index 1351edc0b26feba5330e3bf045b74702ffe88683..13fa536d82e695ff69b6e74f8e95853cf7199ede 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -464,6 +464,11 @@ static void dma_cache_maint_page(struct page *page, unsigned long offset,
 				vaddr += offset;
 				op(vaddr, len, dir);
 				kunmap_high(page);
+			} else if (cache_is_vipt()) {
+				pte_t saved_pte;
+				vaddr = kmap_high_l1_vipt(page, &saved_pte);
+				op(vaddr + offset, len, dir);
+				kunmap_high_l1_vipt(page, saved_pte);
 			}
 		} else {
 			vaddr = page_address(page) + offset;
diff --git a/arch/arm/mm/flush.c b/arch/arm/mm/flush.c
index e34f095e2090517b8f968f4af6703e60dd371c8b..c6844cb9b508dde69c49af40bb0d2956b126b8d3 100644
--- a/arch/arm/mm/flush.c
+++ b/arch/arm/mm/flush.c
@@ -13,6 +13,7 @@
 
 #include <asm/cacheflush.h>
 #include <asm/cachetype.h>
+#include <asm/highmem.h>
 #include <asm/smp_plat.h>
 #include <asm/system.h>
 #include <asm/tlbflush.h>
@@ -152,21 +153,25 @@ void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
 
 void __flush_dcache_page(struct address_space *mapping, struct page *page)
 {
-	void *addr = page_address(page);
-
 	/*
 	 * Writeback any data associated with the kernel mapping of this
 	 * page.  This ensures that data in the physical page is mutually
 	 * coherent with the kernels mapping.
 	 */
-#ifdef CONFIG_HIGHMEM
-	/*
-	 * kmap_atomic() doesn't set the page virtual address, and
-	 * kunmap_atomic() takes care of cache flushing already.
-	 */
-	if (addr)
-#endif
-		__cpuc_flush_dcache_area(addr, PAGE_SIZE);
+	if (!PageHighMem(page)) {
+		__cpuc_flush_dcache_area(page_address(page), PAGE_SIZE);
+	} else {
+		void *addr = kmap_high_get(page);
+		if (addr) {
+			__cpuc_flush_dcache_area(addr, PAGE_SIZE);
+			kunmap_high(page);
+		} else if (cache_is_vipt()) {
+			pte_t saved_pte;
+			addr = kmap_high_l1_vipt(page, &saved_pte);
+			__cpuc_flush_dcache_area(addr, PAGE_SIZE);
+			kunmap_high_l1_vipt(page, saved_pte);
+		}
+	}
 
 	/*
 	 * If this is a page cache page, and we have an aliasing VIPT cache,
diff --git a/arch/arm/mm/highmem.c b/arch/arm/mm/highmem.c
index 2be1ec7c1b41acea66987a3ef532e96020b71c5b..77b030f5ec09fa2dfbbcc562f345eb0ec04e3092 100644
--- a/arch/arm/mm/highmem.c
+++ b/arch/arm/mm/highmem.c
@@ -79,7 +79,8 @@ void kunmap_atomic(void *kvaddr, enum km_type type)
 	unsigned int idx = type + KM_TYPE_NR * smp_processor_id();
 
 	if (kvaddr >= (void *)FIXADDR_START) {
-		__cpuc_flush_dcache_area((void *)vaddr, PAGE_SIZE);
+		if (cache_is_vivt())
+			__cpuc_flush_dcache_area((void *)vaddr, PAGE_SIZE);
 #ifdef CONFIG_DEBUG_HIGHMEM
 		BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
 		set_pte_ext(TOP_PTE(vaddr), __pte(0), 0);
@@ -124,3 +125,87 @@ struct page *kmap_atomic_to_page(const void *ptr)
 	pte = TOP_PTE(vaddr);
 	return pte_page(*pte);
 }
+
+#ifdef CONFIG_CPU_CACHE_VIPT
+
+#include <linux/percpu.h>
+
+/*
+ * The VIVT cache of a highmem page is always flushed before the page
+ * is unmapped. Hence unmapped highmem pages need no cache maintenance
+ * in that case.
+ *
+ * However unmapped pages may still be cached with a VIPT cache, and
+ * it is not possible to perform cache maintenance on them using physical
+ * addresses unfortunately.  So we have no choice but to set up a temporary
+ * virtual mapping for that purpose.
+ *
+ * Yet this VIPT cache maintenance may be triggered from DMA support
+ * functions which are possibly called from interrupt context. As we don't
+ * want to keep interrupt disabled all the time when such maintenance is
+ * taking place, we therefore allow for some reentrancy by preserving and
+ * restoring the previous fixmap entry before the interrupted context is
+ * resumed.  If the reentrancy depth is 0 then there is no need to restore
+ * the previous fixmap, and leaving the current one in place allow it to
+ * be reused the next time without a TLB flush (common with DMA).
+ */
+
+static DEFINE_PER_CPU(int, kmap_high_l1_vipt_depth);
+
+void *kmap_high_l1_vipt(struct page *page, pte_t *saved_pte)
+{
+	unsigned int idx, cpu = smp_processor_id();
+	int *depth = &per_cpu(kmap_high_l1_vipt_depth, cpu);
+	unsigned long vaddr, flags;
+	pte_t pte, *ptep;
+
+	idx = KM_L1_CACHE + KM_TYPE_NR * cpu;
+	vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
+	ptep = TOP_PTE(vaddr);
+	pte = mk_pte(page, kmap_prot);
+
+	if (!in_interrupt())
+		preempt_disable();
+
+	raw_local_irq_save(flags);
+	(*depth)++;
+	if (pte_val(*ptep) == pte_val(pte)) {
+		*saved_pte = pte;
+	} else {
+		*saved_pte = *ptep;
+		set_pte_ext(ptep, pte, 0);
+		local_flush_tlb_kernel_page(vaddr);
+	}
+	raw_local_irq_restore(flags);
+
+	return (void *)vaddr;
+}
+
+void kunmap_high_l1_vipt(struct page *page, pte_t saved_pte)
+{
+	unsigned int idx, cpu = smp_processor_id();
+	int *depth = &per_cpu(kmap_high_l1_vipt_depth, cpu);
+	unsigned long vaddr, flags;
+	pte_t pte, *ptep;
+
+	idx = KM_L1_CACHE + KM_TYPE_NR * cpu;
+	vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
+	ptep = TOP_PTE(vaddr);
+	pte = mk_pte(page, kmap_prot);
+
+	BUG_ON(pte_val(*ptep) != pte_val(pte));
+	BUG_ON(*depth <= 0);
+
+	raw_local_irq_save(flags);
+	(*depth)--;
+	if (*depth != 0 && pte_val(pte) != pte_val(saved_pte)) {
+		set_pte_ext(ptep, saved_pte, 0);
+		local_flush_tlb_kernel_page(vaddr);
+	}
+	raw_local_irq_restore(flags);
+
+	if (!in_interrupt())
+		preempt_enable();
+}
+
+#endif  /* CONFIG_CPU_CACHE_VIPT */