Commit b0eb57cb authored by Andy King's avatar Andy King Committed by David S. Miller

VMXNET3: Add support for virtual IOMMU

This patch adds support for virtual IOMMU to the vmxnet3 module.  We
switch to DMA consistent mappings for anything we pass to the device.
There were a few places where we already did this, but using pci_blah();
these have been fixed to use dma_blah(), along with all new occurrences
where we've replaced kmalloc() and friends.

Also fix two small bugs:
1) use after free of rq->buf_info in vmxnet3_rq_destroy()
2) a cpu_to_le32() that should have been a cpu_to_le64()
Acked-by: default avatarGeorge Zhang <georgezhang@vmware.com>
Acked-by: default avatarAditya Sarwade <asarwade@vmware.com>
Signed-off-by: default avatarAndy King <acking@vmware.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 68d7bdcb
...@@ -313,10 +313,10 @@ vmxnet3_unmap_tx_buf(struct vmxnet3_tx_buf_info *tbi, ...@@ -313,10 +313,10 @@ vmxnet3_unmap_tx_buf(struct vmxnet3_tx_buf_info *tbi,
struct pci_dev *pdev) struct pci_dev *pdev)
{ {
if (tbi->map_type == VMXNET3_MAP_SINGLE) if (tbi->map_type == VMXNET3_MAP_SINGLE)
pci_unmap_single(pdev, tbi->dma_addr, tbi->len, dma_unmap_single(&pdev->dev, tbi->dma_addr, tbi->len,
PCI_DMA_TODEVICE); PCI_DMA_TODEVICE);
else if (tbi->map_type == VMXNET3_MAP_PAGE) else if (tbi->map_type == VMXNET3_MAP_PAGE)
pci_unmap_page(pdev, tbi->dma_addr, tbi->len, dma_unmap_page(&pdev->dev, tbi->dma_addr, tbi->len,
PCI_DMA_TODEVICE); PCI_DMA_TODEVICE);
else else
BUG_ON(tbi->map_type != VMXNET3_MAP_NONE); BUG_ON(tbi->map_type != VMXNET3_MAP_NONE);
...@@ -429,25 +429,29 @@ vmxnet3_tq_destroy(struct vmxnet3_tx_queue *tq, ...@@ -429,25 +429,29 @@ vmxnet3_tq_destroy(struct vmxnet3_tx_queue *tq,
struct vmxnet3_adapter *adapter) struct vmxnet3_adapter *adapter)
{ {
if (tq->tx_ring.base) { if (tq->tx_ring.base) {
pci_free_consistent(adapter->pdev, tq->tx_ring.size * dma_free_coherent(&adapter->pdev->dev, tq->tx_ring.size *
sizeof(struct Vmxnet3_TxDesc), sizeof(struct Vmxnet3_TxDesc),
tq->tx_ring.base, tq->tx_ring.basePA); tq->tx_ring.base, tq->tx_ring.basePA);
tq->tx_ring.base = NULL; tq->tx_ring.base = NULL;
} }
if (tq->data_ring.base) { if (tq->data_ring.base) {
pci_free_consistent(adapter->pdev, tq->data_ring.size * dma_free_coherent(&adapter->pdev->dev, tq->data_ring.size *
sizeof(struct Vmxnet3_TxDataDesc), sizeof(struct Vmxnet3_TxDataDesc),
tq->data_ring.base, tq->data_ring.basePA); tq->data_ring.base, tq->data_ring.basePA);
tq->data_ring.base = NULL; tq->data_ring.base = NULL;
} }
if (tq->comp_ring.base) { if (tq->comp_ring.base) {
pci_free_consistent(adapter->pdev, tq->comp_ring.size * dma_free_coherent(&adapter->pdev->dev, tq->comp_ring.size *
sizeof(struct Vmxnet3_TxCompDesc), sizeof(struct Vmxnet3_TxCompDesc),
tq->comp_ring.base, tq->comp_ring.basePA); tq->comp_ring.base, tq->comp_ring.basePA);
tq->comp_ring.base = NULL; tq->comp_ring.base = NULL;
} }
kfree(tq->buf_info); if (tq->buf_info) {
tq->buf_info = NULL; dma_free_coherent(&adapter->pdev->dev,
tq->tx_ring.size * sizeof(tq->buf_info[0]),
tq->buf_info, tq->buf_info_pa);
tq->buf_info = NULL;
}
} }
...@@ -496,37 +500,38 @@ static int ...@@ -496,37 +500,38 @@ static int
vmxnet3_tq_create(struct vmxnet3_tx_queue *tq, vmxnet3_tq_create(struct vmxnet3_tx_queue *tq,
struct vmxnet3_adapter *adapter) struct vmxnet3_adapter *adapter)
{ {
size_t sz;
BUG_ON(tq->tx_ring.base || tq->data_ring.base || BUG_ON(tq->tx_ring.base || tq->data_ring.base ||
tq->comp_ring.base || tq->buf_info); tq->comp_ring.base || tq->buf_info);
tq->tx_ring.base = pci_alloc_consistent(adapter->pdev, tq->tx_ring.size tq->tx_ring.base = dma_alloc_coherent(&adapter->pdev->dev,
* sizeof(struct Vmxnet3_TxDesc), tq->tx_ring.size * sizeof(struct Vmxnet3_TxDesc),
&tq->tx_ring.basePA); &tq->tx_ring.basePA, GFP_KERNEL);
if (!tq->tx_ring.base) { if (!tq->tx_ring.base) {
netdev_err(adapter->netdev, "failed to allocate tx ring\n"); netdev_err(adapter->netdev, "failed to allocate tx ring\n");
goto err; goto err;
} }
tq->data_ring.base = pci_alloc_consistent(adapter->pdev, tq->data_ring.base = dma_alloc_coherent(&adapter->pdev->dev,
tq->data_ring.size * tq->data_ring.size * sizeof(struct Vmxnet3_TxDataDesc),
sizeof(struct Vmxnet3_TxDataDesc), &tq->data_ring.basePA, GFP_KERNEL);
&tq->data_ring.basePA);
if (!tq->data_ring.base) { if (!tq->data_ring.base) {
netdev_err(adapter->netdev, "failed to allocate data ring\n"); netdev_err(adapter->netdev, "failed to allocate data ring\n");
goto err; goto err;
} }
tq->comp_ring.base = pci_alloc_consistent(adapter->pdev, tq->comp_ring.base = dma_alloc_coherent(&adapter->pdev->dev,
tq->comp_ring.size * tq->comp_ring.size * sizeof(struct Vmxnet3_TxCompDesc),
sizeof(struct Vmxnet3_TxCompDesc), &tq->comp_ring.basePA, GFP_KERNEL);
&tq->comp_ring.basePA);
if (!tq->comp_ring.base) { if (!tq->comp_ring.base) {
netdev_err(adapter->netdev, "failed to allocate tx comp ring\n"); netdev_err(adapter->netdev, "failed to allocate tx comp ring\n");
goto err; goto err;
} }
tq->buf_info = kcalloc(tq->tx_ring.size, sizeof(tq->buf_info[0]), sz = tq->tx_ring.size * sizeof(tq->buf_info[0]);
GFP_KERNEL); tq->buf_info = dma_zalloc_coherent(&adapter->pdev->dev, sz,
&tq->buf_info_pa, GFP_KERNEL);
if (!tq->buf_info) if (!tq->buf_info)
goto err; goto err;
...@@ -578,7 +583,8 @@ vmxnet3_rq_alloc_rx_buf(struct vmxnet3_rx_queue *rq, u32 ring_idx, ...@@ -578,7 +583,8 @@ vmxnet3_rq_alloc_rx_buf(struct vmxnet3_rx_queue *rq, u32 ring_idx,
break; break;
} }
rbi->dma_addr = pci_map_single(adapter->pdev, rbi->dma_addr = dma_map_single(
&adapter->pdev->dev,
rbi->skb->data, rbi->len, rbi->skb->data, rbi->len,
PCI_DMA_FROMDEVICE); PCI_DMA_FROMDEVICE);
} else { } else {
...@@ -595,7 +601,8 @@ vmxnet3_rq_alloc_rx_buf(struct vmxnet3_rx_queue *rq, u32 ring_idx, ...@@ -595,7 +601,8 @@ vmxnet3_rq_alloc_rx_buf(struct vmxnet3_rx_queue *rq, u32 ring_idx,
rq->stats.rx_buf_alloc_failure++; rq->stats.rx_buf_alloc_failure++;
break; break;
} }
rbi->dma_addr = pci_map_page(adapter->pdev, rbi->dma_addr = dma_map_page(
&adapter->pdev->dev,
rbi->page, 0, PAGE_SIZE, rbi->page, 0, PAGE_SIZE,
PCI_DMA_FROMDEVICE); PCI_DMA_FROMDEVICE);
} else { } else {
...@@ -705,7 +712,7 @@ vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx, ...@@ -705,7 +712,7 @@ vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx,
tbi = tq->buf_info + tq->tx_ring.next2fill; tbi = tq->buf_info + tq->tx_ring.next2fill;
tbi->map_type = VMXNET3_MAP_SINGLE; tbi->map_type = VMXNET3_MAP_SINGLE;
tbi->dma_addr = pci_map_single(adapter->pdev, tbi->dma_addr = dma_map_single(&adapter->pdev->dev,
skb->data + buf_offset, buf_size, skb->data + buf_offset, buf_size,
PCI_DMA_TODEVICE); PCI_DMA_TODEVICE);
...@@ -1221,7 +1228,8 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq, ...@@ -1221,7 +1228,8 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
goto rcd_done; goto rcd_done;
} }
pci_unmap_single(adapter->pdev, rbi->dma_addr, rbi->len, dma_unmap_single(&adapter->pdev->dev, rbi->dma_addr,
rbi->len,
PCI_DMA_FROMDEVICE); PCI_DMA_FROMDEVICE);
#ifdef VMXNET3_RSS #ifdef VMXNET3_RSS
...@@ -1233,7 +1241,7 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq, ...@@ -1233,7 +1241,7 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
/* Immediate refill */ /* Immediate refill */
rbi->skb = new_skb; rbi->skb = new_skb;
rbi->dma_addr = pci_map_single(adapter->pdev, rbi->dma_addr = dma_map_single(&adapter->pdev->dev,
rbi->skb->data, rbi->len, rbi->skb->data, rbi->len,
PCI_DMA_FROMDEVICE); PCI_DMA_FROMDEVICE);
rxd->addr = cpu_to_le64(rbi->dma_addr); rxd->addr = cpu_to_le64(rbi->dma_addr);
...@@ -1267,7 +1275,7 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq, ...@@ -1267,7 +1275,7 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
} }
if (rcd->len) { if (rcd->len) {
pci_unmap_page(adapter->pdev, dma_unmap_page(&adapter->pdev->dev,
rbi->dma_addr, rbi->len, rbi->dma_addr, rbi->len,
PCI_DMA_FROMDEVICE); PCI_DMA_FROMDEVICE);
...@@ -1276,7 +1284,8 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq, ...@@ -1276,7 +1284,8 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
/* Immediate refill */ /* Immediate refill */
rbi->page = new_page; rbi->page = new_page;
rbi->dma_addr = pci_map_page(adapter->pdev, rbi->page, rbi->dma_addr = dma_map_page(&adapter->pdev->dev,
rbi->page,
0, PAGE_SIZE, 0, PAGE_SIZE,
PCI_DMA_FROMDEVICE); PCI_DMA_FROMDEVICE);
rxd->addr = cpu_to_le64(rbi->dma_addr); rxd->addr = cpu_to_le64(rbi->dma_addr);
...@@ -1352,13 +1361,13 @@ vmxnet3_rq_cleanup(struct vmxnet3_rx_queue *rq, ...@@ -1352,13 +1361,13 @@ vmxnet3_rq_cleanup(struct vmxnet3_rx_queue *rq,
if (rxd->btype == VMXNET3_RXD_BTYPE_HEAD && if (rxd->btype == VMXNET3_RXD_BTYPE_HEAD &&
rq->buf_info[ring_idx][i].skb) { rq->buf_info[ring_idx][i].skb) {
pci_unmap_single(adapter->pdev, rxd->addr, dma_unmap_single(&adapter->pdev->dev, rxd->addr,
rxd->len, PCI_DMA_FROMDEVICE); rxd->len, PCI_DMA_FROMDEVICE);
dev_kfree_skb(rq->buf_info[ring_idx][i].skb); dev_kfree_skb(rq->buf_info[ring_idx][i].skb);
rq->buf_info[ring_idx][i].skb = NULL; rq->buf_info[ring_idx][i].skb = NULL;
} else if (rxd->btype == VMXNET3_RXD_BTYPE_BODY && } else if (rxd->btype == VMXNET3_RXD_BTYPE_BODY &&
rq->buf_info[ring_idx][i].page) { rq->buf_info[ring_idx][i].page) {
pci_unmap_page(adapter->pdev, rxd->addr, dma_unmap_page(&adapter->pdev->dev, rxd->addr,
rxd->len, PCI_DMA_FROMDEVICE); rxd->len, PCI_DMA_FROMDEVICE);
put_page(rq->buf_info[ring_idx][i].page); put_page(rq->buf_info[ring_idx][i].page);
rq->buf_info[ring_idx][i].page = NULL; rq->buf_info[ring_idx][i].page = NULL;
...@@ -1400,25 +1409,31 @@ static void vmxnet3_rq_destroy(struct vmxnet3_rx_queue *rq, ...@@ -1400,25 +1409,31 @@ static void vmxnet3_rq_destroy(struct vmxnet3_rx_queue *rq,
} }
kfree(rq->buf_info[0]);
for (i = 0; i < 2; i++) { for (i = 0; i < 2; i++) {
if (rq->rx_ring[i].base) { if (rq->rx_ring[i].base) {
pci_free_consistent(adapter->pdev, rq->rx_ring[i].size dma_free_coherent(&adapter->pdev->dev,
* sizeof(struct Vmxnet3_RxDesc), rq->rx_ring[i].size
rq->rx_ring[i].base, * sizeof(struct Vmxnet3_RxDesc),
rq->rx_ring[i].basePA); rq->rx_ring[i].base,
rq->rx_ring[i].basePA);
rq->rx_ring[i].base = NULL; rq->rx_ring[i].base = NULL;
} }
rq->buf_info[i] = NULL; rq->buf_info[i] = NULL;
} }
if (rq->comp_ring.base) { if (rq->comp_ring.base) {
pci_free_consistent(adapter->pdev, rq->comp_ring.size * dma_free_coherent(&adapter->pdev->dev, rq->comp_ring.size
sizeof(struct Vmxnet3_RxCompDesc), * sizeof(struct Vmxnet3_RxCompDesc),
rq->comp_ring.base, rq->comp_ring.basePA); rq->comp_ring.base, rq->comp_ring.basePA);
rq->comp_ring.base = NULL; rq->comp_ring.base = NULL;
} }
if (rq->buf_info[0]) {
size_t sz = sizeof(struct vmxnet3_rx_buf_info) *
(rq->rx_ring[0].size + rq->rx_ring[1].size);
dma_free_coherent(&adapter->pdev->dev, sz, rq->buf_info[0],
rq->buf_info_pa);
}
} }
...@@ -1503,8 +1518,10 @@ vmxnet3_rq_create(struct vmxnet3_rx_queue *rq, struct vmxnet3_adapter *adapter) ...@@ -1503,8 +1518,10 @@ vmxnet3_rq_create(struct vmxnet3_rx_queue *rq, struct vmxnet3_adapter *adapter)
for (i = 0; i < 2; i++) { for (i = 0; i < 2; i++) {
sz = rq->rx_ring[i].size * sizeof(struct Vmxnet3_RxDesc); sz = rq->rx_ring[i].size * sizeof(struct Vmxnet3_RxDesc);
rq->rx_ring[i].base = pci_alloc_consistent(adapter->pdev, sz, rq->rx_ring[i].base = dma_alloc_coherent(
&rq->rx_ring[i].basePA); &adapter->pdev->dev, sz,
&rq->rx_ring[i].basePA,
GFP_KERNEL);
if (!rq->rx_ring[i].base) { if (!rq->rx_ring[i].base) {
netdev_err(adapter->netdev, netdev_err(adapter->netdev,
"failed to allocate rx ring %d\n", i); "failed to allocate rx ring %d\n", i);
...@@ -1513,8 +1530,9 @@ vmxnet3_rq_create(struct vmxnet3_rx_queue *rq, struct vmxnet3_adapter *adapter) ...@@ -1513,8 +1530,9 @@ vmxnet3_rq_create(struct vmxnet3_rx_queue *rq, struct vmxnet3_adapter *adapter)
} }
sz = rq->comp_ring.size * sizeof(struct Vmxnet3_RxCompDesc); sz = rq->comp_ring.size * sizeof(struct Vmxnet3_RxCompDesc);
rq->comp_ring.base = pci_alloc_consistent(adapter->pdev, sz, rq->comp_ring.base = dma_alloc_coherent(&adapter->pdev->dev, sz,
&rq->comp_ring.basePA); &rq->comp_ring.basePA,
GFP_KERNEL);
if (!rq->comp_ring.base) { if (!rq->comp_ring.base) {
netdev_err(adapter->netdev, "failed to allocate rx comp ring\n"); netdev_err(adapter->netdev, "failed to allocate rx comp ring\n");
goto err; goto err;
...@@ -1522,7 +1540,8 @@ vmxnet3_rq_create(struct vmxnet3_rx_queue *rq, struct vmxnet3_adapter *adapter) ...@@ -1522,7 +1540,8 @@ vmxnet3_rq_create(struct vmxnet3_rx_queue *rq, struct vmxnet3_adapter *adapter)
sz = sizeof(struct vmxnet3_rx_buf_info) * (rq->rx_ring[0].size + sz = sizeof(struct vmxnet3_rx_buf_info) * (rq->rx_ring[0].size +
rq->rx_ring[1].size); rq->rx_ring[1].size);
bi = kzalloc(sz, GFP_KERNEL); bi = dma_zalloc_coherent(&adapter->pdev->dev, sz, &rq->buf_info_pa,
GFP_KERNEL);
if (!bi) if (!bi)
goto err; goto err;
...@@ -2005,6 +2024,7 @@ vmxnet3_set_mc(struct net_device *netdev) ...@@ -2005,6 +2024,7 @@ vmxnet3_set_mc(struct net_device *netdev)
struct Vmxnet3_RxFilterConf *rxConf = struct Vmxnet3_RxFilterConf *rxConf =
&adapter->shared->devRead.rxFilterConf; &adapter->shared->devRead.rxFilterConf;
u8 *new_table = NULL; u8 *new_table = NULL;
dma_addr_t new_table_pa = 0;
u32 new_mode = VMXNET3_RXM_UCAST; u32 new_mode = VMXNET3_RXM_UCAST;
if (netdev->flags & IFF_PROMISC) { if (netdev->flags & IFF_PROMISC) {
...@@ -2028,8 +2048,12 @@ vmxnet3_set_mc(struct net_device *netdev) ...@@ -2028,8 +2048,12 @@ vmxnet3_set_mc(struct net_device *netdev)
new_mode |= VMXNET3_RXM_MCAST; new_mode |= VMXNET3_RXM_MCAST;
rxConf->mfTableLen = cpu_to_le16( rxConf->mfTableLen = cpu_to_le16(
netdev_mc_count(netdev) * ETH_ALEN); netdev_mc_count(netdev) * ETH_ALEN);
rxConf->mfTablePA = cpu_to_le64(virt_to_phys( new_table_pa = dma_map_single(
new_table)); &adapter->pdev->dev,
new_table,
rxConf->mfTableLen,
PCI_DMA_TODEVICE);
rxConf->mfTablePA = cpu_to_le64(new_table_pa);
} else { } else {
netdev_info(netdev, "failed to copy mcast list" netdev_info(netdev, "failed to copy mcast list"
", setting ALL_MULTI\n"); ", setting ALL_MULTI\n");
...@@ -2056,7 +2080,11 @@ vmxnet3_set_mc(struct net_device *netdev) ...@@ -2056,7 +2080,11 @@ vmxnet3_set_mc(struct net_device *netdev)
VMXNET3_CMD_UPDATE_MAC_FILTERS); VMXNET3_CMD_UPDATE_MAC_FILTERS);
spin_unlock_irqrestore(&adapter->cmd_lock, flags); spin_unlock_irqrestore(&adapter->cmd_lock, flags);
kfree(new_table); if (new_table) {
dma_unmap_single(&adapter->pdev->dev, new_table_pa,
rxConf->mfTableLen, PCI_DMA_TODEVICE);
kfree(new_table);
}
} }
void void
...@@ -2096,7 +2124,7 @@ vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter) ...@@ -2096,7 +2124,7 @@ vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter)
devRead->misc.driverInfo.vmxnet3RevSpt = cpu_to_le32(1); devRead->misc.driverInfo.vmxnet3RevSpt = cpu_to_le32(1);
devRead->misc.driverInfo.uptVerSpt = cpu_to_le32(1); devRead->misc.driverInfo.uptVerSpt = cpu_to_le32(1);
devRead->misc.ddPA = cpu_to_le64(virt_to_phys(adapter)); devRead->misc.ddPA = cpu_to_le64(adapter->adapter_pa);
devRead->misc.ddLen = cpu_to_le32(sizeof(struct vmxnet3_adapter)); devRead->misc.ddLen = cpu_to_le32(sizeof(struct vmxnet3_adapter));
/* set up feature flags */ /* set up feature flags */
...@@ -2125,7 +2153,7 @@ vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter) ...@@ -2125,7 +2153,7 @@ vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter)
tqc->txRingBasePA = cpu_to_le64(tq->tx_ring.basePA); tqc->txRingBasePA = cpu_to_le64(tq->tx_ring.basePA);
tqc->dataRingBasePA = cpu_to_le64(tq->data_ring.basePA); tqc->dataRingBasePA = cpu_to_le64(tq->data_ring.basePA);
tqc->compRingBasePA = cpu_to_le64(tq->comp_ring.basePA); tqc->compRingBasePA = cpu_to_le64(tq->comp_ring.basePA);
tqc->ddPA = cpu_to_le64(virt_to_phys(tq->buf_info)); tqc->ddPA = cpu_to_le64(tq->buf_info_pa);
tqc->txRingSize = cpu_to_le32(tq->tx_ring.size); tqc->txRingSize = cpu_to_le32(tq->tx_ring.size);
tqc->dataRingSize = cpu_to_le32(tq->data_ring.size); tqc->dataRingSize = cpu_to_le32(tq->data_ring.size);
tqc->compRingSize = cpu_to_le32(tq->comp_ring.size); tqc->compRingSize = cpu_to_le32(tq->comp_ring.size);
...@@ -2143,8 +2171,7 @@ vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter) ...@@ -2143,8 +2171,7 @@ vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter)
rqc->rxRingBasePA[0] = cpu_to_le64(rq->rx_ring[0].basePA); rqc->rxRingBasePA[0] = cpu_to_le64(rq->rx_ring[0].basePA);
rqc->rxRingBasePA[1] = cpu_to_le64(rq->rx_ring[1].basePA); rqc->rxRingBasePA[1] = cpu_to_le64(rq->rx_ring[1].basePA);
rqc->compRingBasePA = cpu_to_le64(rq->comp_ring.basePA); rqc->compRingBasePA = cpu_to_le64(rq->comp_ring.basePA);
rqc->ddPA = cpu_to_le64(virt_to_phys( rqc->ddPA = cpu_to_le64(rq->buf_info_pa);
rq->buf_info));
rqc->rxRingSize[0] = cpu_to_le32(rq->rx_ring[0].size); rqc->rxRingSize[0] = cpu_to_le32(rq->rx_ring[0].size);
rqc->rxRingSize[1] = cpu_to_le32(rq->rx_ring[1].size); rqc->rxRingSize[1] = cpu_to_le32(rq->rx_ring[1].size);
rqc->compRingSize = cpu_to_le32(rq->comp_ring.size); rqc->compRingSize = cpu_to_le32(rq->comp_ring.size);
...@@ -2184,8 +2211,9 @@ vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter) ...@@ -2184,8 +2211,9 @@ vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter)
i, adapter->num_rx_queues); i, adapter->num_rx_queues);
devRead->rssConfDesc.confVer = 1; devRead->rssConfDesc.confVer = 1;
devRead->rssConfDesc.confLen = sizeof(*rssConf); devRead->rssConfDesc.confLen = cpu_to_le32(sizeof(*rssConf));
devRead->rssConfDesc.confPA = virt_to_phys(rssConf); devRead->rssConfDesc.confPA =
cpu_to_le64(adapter->rss_conf_pa);
} }
#endif /* VMXNET3_RSS */ #endif /* VMXNET3_RSS */
...@@ -2948,9 +2976,13 @@ vmxnet3_probe_device(struct pci_dev *pdev, ...@@ -2948,9 +2976,13 @@ vmxnet3_probe_device(struct pci_dev *pdev,
adapter->pdev = pdev; adapter->pdev = pdev;
spin_lock_init(&adapter->cmd_lock); spin_lock_init(&adapter->cmd_lock);
adapter->shared = pci_alloc_consistent(adapter->pdev, adapter->adapter_pa = dma_map_single(&adapter->pdev->dev, adapter,
sizeof(struct Vmxnet3_DriverShared), sizeof(struct vmxnet3_adapter),
&adapter->shared_pa); PCI_DMA_TODEVICE);
adapter->shared = dma_alloc_coherent(
&adapter->pdev->dev,
sizeof(struct Vmxnet3_DriverShared),
&adapter->shared_pa, GFP_KERNEL);
if (!adapter->shared) { if (!adapter->shared) {
dev_err(&pdev->dev, "Failed to allocate memory\n"); dev_err(&pdev->dev, "Failed to allocate memory\n");
err = -ENOMEM; err = -ENOMEM;
...@@ -2963,8 +2995,9 @@ vmxnet3_probe_device(struct pci_dev *pdev, ...@@ -2963,8 +2995,9 @@ vmxnet3_probe_device(struct pci_dev *pdev,
size = sizeof(struct Vmxnet3_TxQueueDesc) * adapter->num_tx_queues; size = sizeof(struct Vmxnet3_TxQueueDesc) * adapter->num_tx_queues;
size += sizeof(struct Vmxnet3_RxQueueDesc) * adapter->num_rx_queues; size += sizeof(struct Vmxnet3_RxQueueDesc) * adapter->num_rx_queues;
adapter->tqd_start = pci_alloc_consistent(adapter->pdev, size, adapter->tqd_start = dma_alloc_coherent(&adapter->pdev->dev, size,
&adapter->queue_desc_pa); &adapter->queue_desc_pa,
GFP_KERNEL);
if (!adapter->tqd_start) { if (!adapter->tqd_start) {
dev_err(&pdev->dev, "Failed to allocate memory\n"); dev_err(&pdev->dev, "Failed to allocate memory\n");
...@@ -2974,7 +3007,10 @@ vmxnet3_probe_device(struct pci_dev *pdev, ...@@ -2974,7 +3007,10 @@ vmxnet3_probe_device(struct pci_dev *pdev,
adapter->rqd_start = (struct Vmxnet3_RxQueueDesc *)(adapter->tqd_start + adapter->rqd_start = (struct Vmxnet3_RxQueueDesc *)(adapter->tqd_start +
adapter->num_tx_queues); adapter->num_tx_queues);
adapter->pm_conf = kmalloc(sizeof(struct Vmxnet3_PMConf), GFP_KERNEL); adapter->pm_conf = dma_alloc_coherent(&adapter->pdev->dev,
sizeof(struct Vmxnet3_PMConf),
&adapter->pm_conf_pa,
GFP_KERNEL);
if (adapter->pm_conf == NULL) { if (adapter->pm_conf == NULL) {
err = -ENOMEM; err = -ENOMEM;
goto err_alloc_pm; goto err_alloc_pm;
...@@ -2982,7 +3018,10 @@ vmxnet3_probe_device(struct pci_dev *pdev, ...@@ -2982,7 +3018,10 @@ vmxnet3_probe_device(struct pci_dev *pdev,
#ifdef VMXNET3_RSS #ifdef VMXNET3_RSS
adapter->rss_conf = kmalloc(sizeof(struct UPT1_RSSConf), GFP_KERNEL); adapter->rss_conf = dma_alloc_coherent(&adapter->pdev->dev,
sizeof(struct UPT1_RSSConf),
&adapter->rss_conf_pa,
GFP_KERNEL);
if (adapter->rss_conf == NULL) { if (adapter->rss_conf == NULL) {
err = -ENOMEM; err = -ENOMEM;
goto err_alloc_rss; goto err_alloc_rss;
...@@ -3077,17 +3116,22 @@ vmxnet3_probe_device(struct pci_dev *pdev, ...@@ -3077,17 +3116,22 @@ vmxnet3_probe_device(struct pci_dev *pdev,
vmxnet3_free_pci_resources(adapter); vmxnet3_free_pci_resources(adapter);
err_alloc_pci: err_alloc_pci:
#ifdef VMXNET3_RSS #ifdef VMXNET3_RSS
kfree(adapter->rss_conf); dma_free_coherent(&adapter->pdev->dev, sizeof(struct UPT1_RSSConf),
adapter->rss_conf, adapter->rss_conf_pa);
err_alloc_rss: err_alloc_rss:
#endif #endif
kfree(adapter->pm_conf); dma_free_coherent(&adapter->pdev->dev, sizeof(struct Vmxnet3_PMConf),
adapter->pm_conf, adapter->pm_conf_pa);
err_alloc_pm: err_alloc_pm:
pci_free_consistent(adapter->pdev, size, adapter->tqd_start, dma_free_coherent(&adapter->pdev->dev, size, adapter->tqd_start,
adapter->queue_desc_pa); adapter->queue_desc_pa);
err_alloc_queue_desc: err_alloc_queue_desc:
pci_free_consistent(adapter->pdev, sizeof(struct Vmxnet3_DriverShared), dma_free_coherent(&adapter->pdev->dev,
adapter->shared, adapter->shared_pa); sizeof(struct Vmxnet3_DriverShared),
adapter->shared, adapter->shared_pa);
err_alloc_shared: err_alloc_shared:
dma_unmap_single(&adapter->pdev->dev, adapter->adapter_pa,
sizeof(struct vmxnet3_adapter), PCI_DMA_TODEVICE);
pci_set_drvdata(pdev, NULL); pci_set_drvdata(pdev, NULL);
free_netdev(netdev); free_netdev(netdev);
return err; return err;
...@@ -3118,16 +3162,21 @@ vmxnet3_remove_device(struct pci_dev *pdev) ...@@ -3118,16 +3162,21 @@ vmxnet3_remove_device(struct pci_dev *pdev)
vmxnet3_free_intr_resources(adapter); vmxnet3_free_intr_resources(adapter);
vmxnet3_free_pci_resources(adapter); vmxnet3_free_pci_resources(adapter);
#ifdef VMXNET3_RSS #ifdef VMXNET3_RSS
kfree(adapter->rss_conf); dma_free_coherent(&adapter->pdev->dev, sizeof(struct UPT1_RSSConf),
adapter->rss_conf, adapter->rss_conf_pa);
#endif #endif
kfree(adapter->pm_conf); dma_free_coherent(&adapter->pdev->dev, sizeof(struct Vmxnet3_PMConf),
adapter->pm_conf, adapter->pm_conf_pa);
size = sizeof(struct Vmxnet3_TxQueueDesc) * adapter->num_tx_queues; size = sizeof(struct Vmxnet3_TxQueueDesc) * adapter->num_tx_queues;
size += sizeof(struct Vmxnet3_RxQueueDesc) * num_rx_queues; size += sizeof(struct Vmxnet3_RxQueueDesc) * num_rx_queues;
pci_free_consistent(adapter->pdev, size, adapter->tqd_start, dma_free_coherent(&adapter->pdev->dev, size, adapter->tqd_start,
adapter->queue_desc_pa); adapter->queue_desc_pa);
pci_free_consistent(adapter->pdev, sizeof(struct Vmxnet3_DriverShared), dma_free_coherent(&adapter->pdev->dev,
adapter->shared, adapter->shared_pa); sizeof(struct Vmxnet3_DriverShared),
adapter->shared, adapter->shared_pa);
dma_unmap_single(&adapter->pdev->dev, adapter->adapter_pa,
sizeof(struct vmxnet3_adapter), PCI_DMA_TODEVICE);
free_netdev(netdev); free_netdev(netdev);
} }
...@@ -3227,8 +3276,8 @@ vmxnet3_suspend(struct device *device) ...@@ -3227,8 +3276,8 @@ vmxnet3_suspend(struct device *device)
adapter->shared->devRead.pmConfDesc.confVer = cpu_to_le32(1); adapter->shared->devRead.pmConfDesc.confVer = cpu_to_le32(1);
adapter->shared->devRead.pmConfDesc.confLen = cpu_to_le32(sizeof( adapter->shared->devRead.pmConfDesc.confLen = cpu_to_le32(sizeof(
*pmConf)); *pmConf));
adapter->shared->devRead.pmConfDesc.confPA = cpu_to_le64(virt_to_phys( adapter->shared->devRead.pmConfDesc.confPA =
pmConf)); cpu_to_le64(adapter->pm_conf_pa);
spin_lock_irqsave(&adapter->cmd_lock, flags);