diff --git a/drivers/net/bnx2x.h b/drivers/net/bnx2x.h
index 96a8889afbe180932429cba721d6cf65b280e567..2cd1e427828338b6674593df01f829349515b028 100644
--- a/drivers/net/bnx2x.h
+++ b/drivers/net/bnx2x.h
@@ -150,6 +150,9 @@ struct sw_rx_page {
 
 #define PAGES_PER_SGE_SHIFT		0
 #define PAGES_PER_SGE			(1 << PAGES_PER_SGE_SHIFT)
+#define SGE_PAGE_SIZE			PAGE_SIZE
+#define SGE_PAGE_SHIFT			PAGE_SHIFT
+#define SGE_PAGE_ALIGN(addr)		PAGE_ALIGN(addr)
 
 #define BCM_RX_ETH_PAYLOAD_ALIGN	64
 
diff --git a/drivers/net/bnx2x_main.c b/drivers/net/bnx2x_main.c
index f0b2e73b87f72db764f18beec6da6787d727116b..75b2624cd60bcdc6c501da428a8a9389051fd2b0 100644
--- a/drivers/net/bnx2x_main.c
+++ b/drivers/net/bnx2x_main.c
@@ -974,7 +974,7 @@ static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
 		return;
 
 	pci_unmap_page(bp->pdev, pci_unmap_addr(sw_buf, mapping),
-		       BCM_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
+		       SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
 	__free_pages(page, PAGES_PER_SGE_SHIFT);
 
 	sw_buf->page = NULL;
@@ -1002,7 +1002,7 @@ static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
 	if (unlikely(page == NULL))
 		return -ENOMEM;
 
-	mapping = pci_map_page(bp->pdev, page, 0, BCM_PAGE_SIZE*PAGES_PER_SGE,
+	mapping = pci_map_page(bp->pdev, page, 0, SGE_PAGE_SIZE*PAGES_PER_SGE,
 			       PCI_DMA_FROMDEVICE);
 	if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
 		__free_pages(page, PAGES_PER_SGE_SHIFT);
@@ -1098,9 +1098,9 @@ static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
 				  struct eth_fast_path_rx_cqe *fp_cqe)
 {
 	struct bnx2x *bp = fp->bp;
-	u16 sge_len = BCM_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
+	u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
 				     le16_to_cpu(fp_cqe->len_on_bd)) >>
-		      BCM_PAGE_SHIFT;
+		      SGE_PAGE_SHIFT;
 	u16 last_max, last_elem, first_elem;
 	u16 delta = 0;
 	u16 i;
@@ -1205,22 +1205,22 @@ static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
 			       u16 cqe_idx)
 {
 	struct sw_rx_page *rx_pg, old_rx_pg;
-	struct page *sge;
 	u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
 	u32 i, frag_len, frag_size, pages;
 	int err;
 	int j;
 
 	frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
-	pages = BCM_PAGE_ALIGN(frag_size) >> BCM_PAGE_SHIFT;
+	pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
 
 	/* This is needed in order to enable forwarding support */
 	if (frag_size)
-		skb_shinfo(skb)->gso_size = min((u32)BCM_PAGE_SIZE,
+		skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
 					       max(frag_size, (u32)len_on_bd));
 
 #ifdef BNX2X_STOP_ON_ERROR
-	if (pages > 8*PAGES_PER_SGE) {
+	if (pages >
+	    min((u32)8, (u32)MAX_SKB_FRAGS) * SGE_PAGE_SIZE * PAGES_PER_SGE) {
 		BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
 			  pages, cqe_idx);
 		BNX2X_ERR("fp_cqe->pkt_len = %d  fp_cqe->len_on_bd = %d\n",
@@ -1236,9 +1236,8 @@ static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
 
 		/* FW gives the indices of the SGE as if the ring is an array
 		   (meaning that "next" element will consume 2 indices) */
-		frag_len = min(frag_size, (u32)(BCM_PAGE_SIZE*PAGES_PER_SGE));
+		frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
 		rx_pg = &fp->rx_page_ring[sge_idx];
-		sge = rx_pg->page;
 		old_rx_pg = *rx_pg;
 
 		/* If we fail to allocate a substitute page, we simply stop
@@ -1251,7 +1250,7 @@ static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
 
 		/* Unmap the page as we r going to pass it to the stack */
 		pci_unmap_page(bp->pdev, pci_unmap_addr(&old_rx_pg, mapping),
-			      BCM_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
+			      SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
 
 		/* Add one frag and update the appropriate fields in the skb */
 		skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
@@ -4544,7 +4543,7 @@ static void bnx2x_set_client_config(struct bnx2x *bp)
 
 	if (bp->flags & TPA_ENABLE_FLAG) {
 		tstorm_client.max_sges_for_packet =
-			BCM_PAGE_ALIGN(tstorm_client.mtu) >> BCM_PAGE_SHIFT;
+			SGE_PAGE_ALIGN(tstorm_client.mtu) >> SGE_PAGE_SHIFT;
 		tstorm_client.max_sges_for_packet =
 			((tstorm_client.max_sges_for_packet +
 			  PAGES_PER_SGE - 1) & (~(PAGES_PER_SGE - 1))) >>
@@ -4727,10 +4726,11 @@ static void bnx2x_init_internal_func(struct bnx2x *bp)
 			 bp->e1hov);
 	}
 
-	/* Init CQ ring mapping and aggregation size */
-	max_agg_size = min((u32)(bp->rx_buf_size +
-				 8*BCM_PAGE_SIZE*PAGES_PER_SGE),
-			   (u32)0xffff);
+	/* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
+	max_agg_size =
+		min((u32)(min((u32)8, (u32)MAX_SKB_FRAGS) *
+			  SGE_PAGE_SIZE * PAGES_PER_SGE),
+		    (u32)0xffff);
 	for_each_queue(bp, i) {
 		struct bnx2x_fastpath *fp = &bp->fp[i];