diff --git a/mm/memory.c b/mm/memory.c
index 5e0e91cc6b67a895a2ca2c9101113f2a89877cc2..99e8d5c7b3126a455d664942d0f90621ce6c8825 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -767,7 +767,8 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
 			else {
 				if (pte_dirty(ptent))
 					set_page_dirty(page);
-				if (pte_young(ptent))
+				if (pte_young(ptent) &&
+				    likely(!VM_SequentialReadHint(vma)))
 					mark_page_accessed(page);
 				file_rss--;
 			}
diff --git a/mm/rmap.c b/mm/rmap.c
index 53c56dacd725ee2427d3c5526b3396d98d647695..f01e92244c532b96c0214f316f973369eed6ee50 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -360,8 +360,17 @@ static int page_referenced_one(struct page *page,
 		goto out_unmap;
 	}
 
-	if (ptep_clear_flush_young_notify(vma, address, pte))
-		referenced++;
+	if (ptep_clear_flush_young_notify(vma, address, pte)) {
+		/*
+		 * Don't treat a reference through a sequentially read
+		 * mapping as such.  If the page has been used in
+		 * another mapping, we will catch it; if this other
+		 * mapping is already gone, the unmap path will have
+		 * set PG_referenced or activated the page.
+		 */
+		if (likely(!VM_SequentialReadHint(vma)))
+			referenced++;
+	}
 
 	/* Pretend the page is referenced if the task has the
 	   swap token and is in the middle of a page fault. */