diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
index 706e8a63ced93218334a227b62f5303595c27790..a33583f3b0e7d0e2c7f07b5ccc3bec039bf3f14e 100644
--- a/arch/powerpc/mm/hash_utils_64.c
+++ b/arch/powerpc/mm/hash_utils_64.c
@@ -601,7 +601,7 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
 	/* Handle hugepage regions */
 	if (unlikely(in_hugepage_area(mm->context, ea))) {
 		DBG_LOW(" -> huge page !\n");
-		return hash_huge_page(mm, access, ea, vsid, local);
+		return hash_huge_page(mm, access, ea, vsid, local, trap);
 	}
 
 	/* Get PTE and page size from page tables */
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
index 8bce515dc320d085938b1bbfdaacd946c94ee0af..97512b89e7b027ecb30d52703f45dc24023a32ae 100644
--- a/arch/powerpc/mm/hugetlbpage.c
+++ b/arch/powerpc/mm/hugetlbpage.c
@@ -639,8 +639,36 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
 	return -ENOMEM;
 }
 
+/*
+ * Called by asm hashtable.S for doing lazy icache flush
+ */
+static unsigned int hash_huge_page_do_lazy_icache(unsigned long rflags,
+						  pte_t pte, int trap)
+{
+	struct page *page;
+	int i;
+
+	if (!pfn_valid(pte_pfn(pte)))
+		return rflags;
+
+	page = pte_page(pte);
+
+	/* page is dirty */
+	if (!test_bit(PG_arch_1, &page->flags) && !PageReserved(page)) {
+		if (trap == 0x400) {
+			for (i = 0; i < (HPAGE_SIZE / PAGE_SIZE); i++)
+				__flush_dcache_icache(page_address(page+i));
+			set_bit(PG_arch_1, &page->flags);
+		} else {
+			rflags |= HPTE_R_N;
+		}
+	}
+	return rflags;
+}
+
 int hash_huge_page(struct mm_struct *mm, unsigned long access,
-		   unsigned long ea, unsigned long vsid, int local)
+		   unsigned long ea, unsigned long vsid, int local,
+		   unsigned long trap)
 {
 	pte_t *ptep;
 	unsigned long old_pte, new_pte;
@@ -691,6 +719,11 @@ int hash_huge_page(struct mm_struct *mm, unsigned long access,
 	rflags = 0x2 | (!(new_pte & _PAGE_RW));
  	/* _PAGE_EXEC -> HW_NO_EXEC since it's inverted */
 	rflags |= ((new_pte & _PAGE_EXEC) ? 0 : HPTE_R_N);
+	if (!cpu_has_feature(CPU_FTR_COHERENT_ICACHE))
+		/* No CPU has hugepages but lacks no execute, so we
+		 * don't need to worry about that case */
+		rflags = hash_huge_page_do_lazy_icache(rflags, __pte(old_pte),
+						       trap);
 
 	/* Check if pte already has an hpte (case 2) */
 	if (unlikely(old_pte & _PAGE_HASHPTE)) {
diff --git a/include/asm-powerpc/mmu.h b/include/asm-powerpc/mmu.h
index c1b4bbabbe97f4816c26261ba9488c0acfac0ccb..29b0bb0086d3fc5f40e8848d82662c345d4a4a44 100644
--- a/include/asm-powerpc/mmu.h
+++ b/include/asm-powerpc/mmu.h
@@ -220,7 +220,8 @@ extern int __hash_page_64K(unsigned long ea, unsigned long access,
 			   unsigned int local);
 struct mm_struct;
 extern int hash_huge_page(struct mm_struct *mm, unsigned long access,
-			  unsigned long ea, unsigned long vsid, int local);
+			  unsigned long ea, unsigned long vsid, int local,
+			  unsigned long trap);
 
 extern void htab_finish_init(void);
 extern int htab_bolt_mapping(unsigned long vstart, unsigned long vend,