diff --git a/Documentation/driver-api/index.rst b/Documentation/driver-api/index.rst
index 6d8352c0f3547a9f63fa7db614e1466f2b822616..f7aca562f267859da09aa075dfaf53d072741684 100644
--- a/Documentation/driver-api/index.rst
+++ b/Documentation/driver-api/index.rst
@@ -34,6 +34,7 @@ available subsections can be seen below.
    edac
    scsi
    libata
+   target
    mtdnand
    miscellaneous
    w1
diff --git a/Documentation/driver-api/scsi.rst b/Documentation/driver-api/scsi.rst
index 31ad0fed6763bcdac1df622ae8b66433c6e2a5a5..64b231d125e0fc0f5396e18022898086b2d5be40 100644
--- a/Documentation/driver-api/scsi.rst
+++ b/Documentation/driver-api/scsi.rst
@@ -334,5 +334,5 @@ todo
 ~~~~
 
 Parallel (fast/wide/ultra) SCSI, USB, SATA, SAS, Fibre Channel,
-FireWire, ATAPI devices, Infiniband, I2O, iSCSI, Parallel ports,
+FireWire, ATAPI devices, Infiniband, I2O, Parallel ports,
 netlink...
diff --git a/Documentation/driver-api/target.rst b/Documentation/driver-api/target.rst
new file mode 100644
index 0000000000000000000000000000000000000000..4363611dd86d1dba4696c2574d2b83cdf8419321
--- /dev/null
+++ b/Documentation/driver-api/target.rst
@@ -0,0 +1,64 @@
+=================================
+target and iSCSI Interfaces Guide
+=================================
+
+Introduction and Overview
+=========================
+
+TBD
+
+Target core device interfaces
+=============================
+
+.. kernel-doc:: drivers/target/target_core_device.c
+    :export:
+
+Target core transport interfaces
+================================
+
+.. kernel-doc:: drivers/target/target_core_transport.c
+    :export:
+
+Target-supported userspace I/O
+==============================
+
+.. kernel-doc:: drivers/target/target_core_user.c
+    :doc: Userspace I/O
+
+.. kernel-doc:: include/uapi/linux/target_core_user.h
+    :doc: Ring Design
+
+iSCSI helper functions
+======================
+
+.. kernel-doc:: drivers/scsi/libiscsi.c
+   :export:
+
+
+iSCSI boot information
+======================
+
+.. kernel-doc:: drivers/scsi/iscsi_boot_sysfs.c
+   :export:
+
+
+iSCSI transport class
+=====================
+
+The file drivers/scsi/scsi_transport_iscsi.c defines transport
+attributes for the iSCSI class, which sends SCSI packets over TCP/IP
+connections.
+
+.. kernel-doc:: drivers/scsi/scsi_transport_iscsi.c
+   :export:
+
+
+iSCSI TCP interfaces
+====================
+
+.. kernel-doc:: drivers/scsi/iscsi_tcp.c
+   :internal:
+
+.. kernel-doc:: drivers/scsi/libiscsi_tcp.c
+   :export:
+
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index 89a9d4a2efc8a56a76407611a30d4079363fdfe9..1c9f80fbc51c03ec16a0f58bccb479ef70ed24e9 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -5051,6 +5051,18 @@ int ata_sas_port_init(struct ata_port *ap)
 }
 EXPORT_SYMBOL_GPL(ata_sas_port_init);
 
+int ata_sas_tport_add(struct device *parent, struct ata_port *ap)
+{
+	return ata_tport_add(parent, ap);
+}
+EXPORT_SYMBOL_GPL(ata_sas_tport_add);
+
+void ata_sas_tport_delete(struct ata_port *ap)
+{
+	ata_tport_delete(ap);
+}
+EXPORT_SYMBOL_GPL(ata_sas_tport_delete);
+
 /**
  *	ata_sas_port_destroy - Destroy a SATA port allocated by ata_sas_port_alloc
  *	@ap: SATA port to destroy
diff --git a/drivers/hv/ring_buffer.c b/drivers/hv/ring_buffer.c
index 8699bb969e7e31dd42a249018f1bb4db78b520c8..3c836c099a8f35e865feae4207a5f8c6d13888bc 100644
--- a/drivers/hv/ring_buffer.c
+++ b/drivers/hv/ring_buffer.c
@@ -227,6 +227,8 @@ int hv_ringbuffer_init(struct hv_ring_buffer_info *ring_info,
 	ring_info->ring_buffer->feature_bits.value = 1;
 
 	ring_info->ring_size = page_cnt << PAGE_SHIFT;
+	ring_info->ring_size_div10_reciprocal =
+		reciprocal_value(ring_info->ring_size / 10);
 	ring_info->ring_datasize = ring_info->ring_size -
 		sizeof(struct hv_ring_buffer);
 
diff --git a/drivers/message/fusion/lsi/mpi_cnfg.h b/drivers/message/fusion/lsi/mpi_cnfg.h
index 4e9c0ce94f2713f2b4cb88e6bfb774bbef47985f..059997f8ebceeb4f6d8c3216f8a50846cbcbc7c9 100644
--- a/drivers/message/fusion/lsi/mpi_cnfg.h
+++ b/drivers/message/fusion/lsi/mpi_cnfg.h
@@ -1802,13 +1802,13 @@ typedef struct _CONFIG_PAGE_FC_PORT_0
 #define MPI_FCPORTPAGE0_SUPPORT_CLASS_2                 (0x00000002)
 #define MPI_FCPORTPAGE0_SUPPORT_CLASS_3                 (0x00000004)
 
-#define MPI_FCPORTPAGE0_SUPPORT_SPEED_UKNOWN            (0x00000000) /* (SNIA)HBA_PORTSPEED_UNKNOWN 0   Unknown - transceiver incapable of reporting */
+#define MPI_FCPORTPAGE0_SUPPORT_SPEED_UNKNOWN           (0x00000000) /* (SNIA)HBA_PORTSPEED_UNKNOWN 0   Unknown - transceiver incapable of reporting */
 #define MPI_FCPORTPAGE0_SUPPORT_1GBIT_SPEED             (0x00000001) /* (SNIA)HBA_PORTSPEED_1GBIT   1   1 GBit/sec */
 #define MPI_FCPORTPAGE0_SUPPORT_2GBIT_SPEED             (0x00000002) /* (SNIA)HBA_PORTSPEED_2GBIT   2   2 GBit/sec */
 #define MPI_FCPORTPAGE0_SUPPORT_10GBIT_SPEED            (0x00000004) /* (SNIA)HBA_PORTSPEED_10GBIT  4  10 GBit/sec */
 #define MPI_FCPORTPAGE0_SUPPORT_4GBIT_SPEED             (0x00000008) /* (SNIA)HBA_PORTSPEED_4GBIT   8   4 GBit/sec */
 
-#define MPI_FCPORTPAGE0_CURRENT_SPEED_UKNOWN            MPI_FCPORTPAGE0_SUPPORT_SPEED_UKNOWN
+#define MPI_FCPORTPAGE0_CURRENT_SPEED_UNKNOWN           MPI_FCPORTPAGE0_SUPPORT_SPEED_UNKNOWN
 #define MPI_FCPORTPAGE0_CURRENT_SPEED_1GBIT             MPI_FCPORTPAGE0_SUPPORT_1GBIT_SPEED
 #define MPI_FCPORTPAGE0_CURRENT_SPEED_2GBIT             MPI_FCPORTPAGE0_SUPPORT_2GBIT_SPEED
 #define MPI_FCPORTPAGE0_CURRENT_SPEED_10GBIT            MPI_FCPORTPAGE0_SUPPORT_10GBIT_SPEED
diff --git a/drivers/message/fusion/mptfc.c b/drivers/message/fusion/mptfc.c
index 6d461ca97150553cba7ddcd880571247af1a50c0..06b175420be94c55029999ba12bd9a9b5feb4e58 100644
--- a/drivers/message/fusion/mptfc.c
+++ b/drivers/message/fusion/mptfc.c
@@ -693,7 +693,7 @@ mptfc_display_port_link_speed(MPT_ADAPTER *ioc, int portnum, FCPortPage0_t *pp0d
 	state = pp0dest->PortState;
 
 	if (state != MPI_FCPORTPAGE0_PORTSTATE_OFFLINE &&
-	    new_speed != MPI_FCPORTPAGE0_CURRENT_SPEED_UKNOWN) {
+	    new_speed != MPI_FCPORTPAGE0_CURRENT_SPEED_UNKNOWN) {
 
 		old = old_speed == MPI_FCPORTPAGE0_CURRENT_SPEED_1GBIT ? "1 Gbps" :
 		       old_speed == MPI_FCPORTPAGE0_CURRENT_SPEED_2GBIT ? "2 Gbps" :
diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
index 6ebe39a3dde66b9b5af230944c618e2bb542dd90..14170c25eef2067ddfb3900383b94fc0102ce626 100644
--- a/drivers/net/hyperv/hyperv_net.h
+++ b/drivers/net/hyperv/hyperv_net.h
@@ -189,7 +189,6 @@ struct netvsc_device;
 struct net_device_context;
 
 extern u32 netvsc_ring_bytes;
-extern struct reciprocal_value netvsc_ring_reciprocal;
 
 struct netvsc_device *netvsc_device_add(struct hv_device *device,
 					const struct netvsc_device_info *info);
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index d2ee66c259a70fe305fd235a11892fea7834400e..5d5bd513847fff4ff353e7c58d9967a354d06955 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -31,7 +31,6 @@
 #include <linux/vmalloc.h>
 #include <linux/rtnetlink.h>
 #include <linux/prefetch.h>
-#include <linux/reciprocal_div.h>
 
 #include <asm/sync_bitops.h>
 
@@ -635,17 +634,6 @@ void netvsc_device_remove(struct hv_device *device)
 #define RING_AVAIL_PERCENT_HIWATER 20
 #define RING_AVAIL_PERCENT_LOWATER 10
 
-/*
- * Get the percentage of available bytes to write in the ring.
- * The return value is in range from 0 to 100.
- */
-static u32 hv_ringbuf_avail_percent(const struct hv_ring_buffer_info *ring_info)
-{
-	u32 avail_write = hv_get_bytes_to_write(ring_info);
-
-	return reciprocal_divide(avail_write  * 100, netvsc_ring_reciprocal);
-}
-
 static inline void netvsc_free_send_slot(struct netvsc_device *net_device,
 					 u32 index)
 {
@@ -694,8 +682,8 @@ static void netvsc_send_tx_complete(struct net_device *ndev,
 		struct netdev_queue *txq = netdev_get_tx_queue(ndev, q_idx);
 
 		if (netif_tx_queue_stopped(txq) &&
-		    (hv_ringbuf_avail_percent(&channel->outbound) > RING_AVAIL_PERCENT_HIWATER ||
-		     queue_sends < 1)) {
+		    (hv_get_avail_to_write_percent(&channel->outbound) >
+		     RING_AVAIL_PERCENT_HIWATER || queue_sends < 1)) {
 			netif_tx_wake_queue(txq);
 			ndev_ctx->eth_stats.wake_queue++;
 		}
@@ -802,7 +790,7 @@ static inline int netvsc_send_pkt(
 	struct netdev_queue *txq = netdev_get_tx_queue(ndev, packet->q_idx);
 	u64 req_id;
 	int ret;
-	u32 ring_avail = hv_ringbuf_avail_percent(&out_channel->outbound);
+	u32 ring_avail = hv_get_avail_to_write_percent(&out_channel->outbound);
 
 	nvmsg.hdr.msg_type = NVSP_MSG1_TYPE_SEND_RNDIS_PKT;
 	if (skb)
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index ecc84954c511053fe6c493e0a1131f544467f178..895a54f96c6c88b52d7cc07c42854a885033073d 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -35,7 +35,6 @@
 #include <linux/slab.h>
 #include <linux/rtnetlink.h>
 #include <linux/netpoll.h>
-#include <linux/reciprocal_div.h>
 
 #include <net/arp.h>
 #include <net/route.h>
@@ -58,7 +57,6 @@ static unsigned int ring_size __ro_after_init = 128;
 module_param(ring_size, uint, 0444);
 MODULE_PARM_DESC(ring_size, "Ring buffer size (# of pages)");
 unsigned int netvsc_ring_bytes __ro_after_init;
-struct reciprocal_value netvsc_ring_reciprocal __ro_after_init;
 
 static const u32 default_msg = NETIF_MSG_DRV | NETIF_MSG_PROBE |
 				NETIF_MSG_LINK | NETIF_MSG_IFUP |
@@ -2218,7 +2216,6 @@ static int __init netvsc_drv_init(void)
 			ring_size);
 	}
 	netvsc_ring_bytes = ring_size * PAGE_SIZE;
-	netvsc_ring_reciprocal = reciprocal_value(netvsc_ring_bytes);
 
 	ret = vmbus_driver_register(&netvsc_drv);
 	if (ret)
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 11e89e56b8658543ba00c5e4a6dfda3bda5a6bda..35c909bbf8bac18251296c71581f6002a6fbaf00 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -1351,6 +1351,20 @@ config SCSI_ZORRO7XX
 	      accelerator card for the Amiga 1200,
 	    - the SCSI controller on the GVP Turbo 040/060 accelerator.
 
+config SCSI_ZORRO_ESP
+	tristate "Zorro ESP SCSI support"
+	depends on ZORRO && SCSI
+	select SCSI_SPI_ATTRS
+	help
+	  Support for various NCR53C9x (ESP) based SCSI controllers on Zorro
+	  expansion boards for the Amiga.
+	  This includes:
+	    - the Phase5 Blizzard 1230 II and IV SCSI controllers,
+	    - the Phase5 Blizzard 2060 SCSI controller,
+	    - the Phase5 Blizzard Cyberstorm and Cyberstorm II SCSI
+	      controllers,
+	    - the Fastlane Zorro III SCSI controller.
+
 config ATARI_SCSI
 	tristate "Atari native SCSI support"
 	depends on ATARI && SCSI
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index e29f9b8fd66db1b21167fd7d15eaf0723b59b826..eb30e558fc360dd2fbe257589b4641828313b0a5 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -48,6 +48,7 @@ obj-$(CONFIG_INFINIBAND_ISER) 	+= libiscsi.o
 obj-$(CONFIG_ISCSI_BOOT_SYSFS)	+= iscsi_boot_sysfs.o
 obj-$(CONFIG_SCSI_A4000T)	+= 53c700.o	a4000t.o
 obj-$(CONFIG_SCSI_ZORRO7XX)	+= 53c700.o	zorro7xx.o
+obj-$(CONFIG_SCSI_ZORRO_ESP)	+= esp_scsi.o	zorro_esp.o
 obj-$(CONFIG_A3000_SCSI)	+= a3000.o	wd33c93.o
 obj-$(CONFIG_A2091_SCSI)	+= a2091.o	wd33c93.o
 obj-$(CONFIG_GVP11_SCSI)	+= gvp11.o	wd33c93.o
@@ -189,7 +190,7 @@ $(obj)/53c700.o $(MODVERDIR)/$(obj)/53c700.ver: $(obj)/53c700_d.h
 $(obj)/scsi_sysfs.o: $(obj)/scsi_devinfo_tbl.c
 
 quiet_cmd_bflags = GEN     $@
-	cmd_bflags = sed -n 's/.*BLIST_\([A-Z0-9_]*\) *.*/BLIST_FLAG_NAME(\1),/p' $< > $@
+	cmd_bflags = sed -n 's/.*define *BLIST_\([A-Z0-9_]*\) *.*/BLIST_FLAG_NAME(\1),/p' $< > $@
 
 $(obj)/scsi_devinfo_tbl.c: include/scsi/scsi_devinfo.h
 	$(call if_changed,bflags)
diff --git a/drivers/scsi/cxlflash/Kconfig b/drivers/scsi/cxlflash/Kconfig
index a011c5dbf214055315c6bbdd73c814fd752c2bbe..e2a3a1ba26fad12eb9c90bd090a2f5411496335f 100644
--- a/drivers/scsi/cxlflash/Kconfig
+++ b/drivers/scsi/cxlflash/Kconfig
@@ -4,7 +4,7 @@
 
 config CXLFLASH
 	tristate "Support for IBM CAPI Flash"
-	depends on PCI && SCSI && CXL && EEH
+	depends on PCI && SCSI && CXL && OCXL && EEH
 	select IRQ_POLL
 	default m
 	help
diff --git a/drivers/scsi/cxlflash/Makefile b/drivers/scsi/cxlflash/Makefile
index 7ec3f6b55dde96d1ed5ac413359f92167791cf4d..5124c68f8d88078de612c347f07223e515f1a145 100644
--- a/drivers/scsi/cxlflash/Makefile
+++ b/drivers/scsi/cxlflash/Makefile
@@ -1,2 +1,2 @@
 obj-$(CONFIG_CXLFLASH) += cxlflash.o
-cxlflash-y += main.o superpipe.o lunmgt.o vlun.o cxl_hw.o
+cxlflash-y += main.o superpipe.o lunmgt.o vlun.o cxl_hw.o ocxl_hw.o
diff --git a/drivers/scsi/cxlflash/backend.h b/drivers/scsi/cxlflash/backend.h
index 339e42b03c49d22eff1596a020749d3b35f30385..bcd8a6c588d377331826d8185718b43b7bce6ef9 100644
--- a/drivers/scsi/cxlflash/backend.h
+++ b/drivers/scsi/cxlflash/backend.h
@@ -13,29 +13,35 @@
  */
 
 extern const struct cxlflash_backend_ops cxlflash_cxl_ops;
+extern const struct cxlflash_backend_ops cxlflash_ocxl_ops;
 
 struct cxlflash_backend_ops {
 	struct module *module;
-	void __iomem * (*psa_map)(void *);
-	void (*psa_unmap)(void __iomem *);
-	int (*process_element)(void *);
-	int (*map_afu_irq)(void *, int, irq_handler_t, void *, char *);
-	void (*unmap_afu_irq)(void *, int, void *);
-	int (*start_context)(void *);
-	int (*stop_context)(void *);
-	int (*afu_reset)(void *);
-	void (*set_master)(void *);
-	void * (*get_context)(struct pci_dev *, void *);
-	void * (*dev_context_init)(struct pci_dev *, void *);
-	int (*release_context)(void *);
-	void (*perst_reloads_same_image)(void *, bool);
-	ssize_t (*read_adapter_vpd)(struct pci_dev *, void *, size_t);
-	int (*allocate_afu_irqs)(void *, int);
-	void (*free_afu_irqs)(void *);
-	void * (*create_afu)(struct pci_dev *);
-	struct file * (*get_fd)(void *, struct file_operations *, int *);
-	void * (*fops_get_context)(struct file *);
-	int (*start_work)(void *, u64);
-	int (*fd_mmap)(struct file *, struct vm_area_struct *);
-	int (*fd_release)(struct inode *, struct file *);
+	void __iomem * (*psa_map)(void *ctx_cookie);
+	void (*psa_unmap)(void __iomem *addr);
+	int (*process_element)(void *ctx_cookie);
+	int (*map_afu_irq)(void *ctx_cookie, int num, irq_handler_t handler,
+			   void *cookie, char *name);
+	void (*unmap_afu_irq)(void *ctx_cookie, int num, void *cookie);
+	u64 (*get_irq_objhndl)(void *ctx_cookie, int irq);
+	int (*start_context)(void *ctx_cookie);
+	int (*stop_context)(void *ctx_cookie);
+	int (*afu_reset)(void *ctx_cookie);
+	void (*set_master)(void *ctx_cookie);
+	void * (*get_context)(struct pci_dev *dev, void *afu_cookie);
+	void * (*dev_context_init)(struct pci_dev *dev, void *afu_cookie);
+	int (*release_context)(void *ctx_cookie);
+	void (*perst_reloads_same_image)(void *afu_cookie, bool image);
+	ssize_t (*read_adapter_vpd)(struct pci_dev *dev, void *buf,
+				    size_t count);
+	int (*allocate_afu_irqs)(void *ctx_cookie, int num);
+	void (*free_afu_irqs)(void *ctx_cookie);
+	void * (*create_afu)(struct pci_dev *dev);
+	void (*destroy_afu)(void *afu_cookie);
+	struct file * (*get_fd)(void *ctx_cookie, struct file_operations *fops,
+				int *fd);
+	void * (*fops_get_context)(struct file *file);
+	int (*start_work)(void *ctx_cookie, u64 irqs);
+	int (*fd_mmap)(struct file *file, struct vm_area_struct *vm);
+	int (*fd_release)(struct inode *inode, struct file *file);
 };
diff --git a/drivers/scsi/cxlflash/common.h b/drivers/scsi/cxlflash/common.h
index 102fd26ca886ecf0947c14d7c4e94eea6f6d883f..89240b84745c84ebe14e201efc28abf832bfcd3e 100644
--- a/drivers/scsi/cxlflash/common.h
+++ b/drivers/scsi/cxlflash/common.h
@@ -211,6 +211,7 @@ struct hwq {
 	struct sisl_ctrl_map __iomem *ctrl_map;		/* MC control map */
 	ctx_hndl_t ctx_hndl;	/* master's context handle */
 	u32 index;		/* Index of this hwq */
+	int num_irqs;		/* Number of interrupts requested for context */
 	struct list_head pending_cmds;	/* Commands pending completion */
 
 	atomic_t hsq_credits;
@@ -223,6 +224,7 @@ struct hwq {
 	u64 *hrrq_end;
 	u64 *hrrq_curr;
 	bool toggle;
+	bool hrrq_online;
 
 	s64 room;
 
@@ -231,8 +233,8 @@ struct hwq {
 
 struct afu {
 	struct hwq hwqs[CXLFLASH_MAX_HWQS];
-	int (*send_cmd)(struct afu *, struct afu_cmd *);
-	int (*context_reset)(struct hwq *);
+	int (*send_cmd)(struct afu *afu, struct afu_cmd *cmd);
+	int (*context_reset)(struct hwq *hwq);
 
 	/* AFU HW */
 	struct cxlflash_afu_map __iomem *afu_map;	/* entire MMIO map */
@@ -272,6 +274,11 @@ static inline bool afu_has_cap(struct afu *afu, u64 cap)
 	return afu_cap & cap;
 }
 
+static inline bool afu_is_ocxl_lisn(struct afu *afu)
+{
+	return afu_has_cap(afu, SISL_INTVER_CAP_OCXL_LISN);
+}
+
 static inline bool afu_is_afu_debug(struct afu *afu)
 {
 	return afu_has_cap(afu, SISL_INTVER_CAP_AFU_DEBUG);
diff --git a/drivers/scsi/cxlflash/cxl_hw.c b/drivers/scsi/cxlflash/cxl_hw.c
index db1cadad5c5d34a0177f76af00e2e5771b7eaeda..b42da88386bddf9260bd778b90b0a19638a3b29d 100644
--- a/drivers/scsi/cxlflash/cxl_hw.c
+++ b/drivers/scsi/cxlflash/cxl_hw.c
@@ -49,6 +49,12 @@ static void cxlflash_unmap_afu_irq(void *ctx_cookie, int num, void *cookie)
 	cxl_unmap_afu_irq(ctx_cookie, num, cookie);
 }
 
+static u64 cxlflash_get_irq_objhndl(void *ctx_cookie, int irq)
+{
+	/* Dummy fop for cxl */
+	return 0;
+}
+
 static int cxlflash_start_context(void *ctx_cookie)
 {
 	return cxl_start_context(ctx_cookie, 0, NULL);
@@ -110,6 +116,11 @@ static void *cxlflash_create_afu(struct pci_dev *dev)
 	return cxl_pci_to_afu(dev);
 }
 
+static void cxlflash_destroy_afu(void *afu)
+{
+	/* Dummy fop for cxl */
+}
+
 static struct file *cxlflash_get_fd(void *ctx_cookie,
 				    struct file_operations *fops, int *fd)
 {
@@ -148,6 +159,7 @@ const struct cxlflash_backend_ops cxlflash_cxl_ops = {
 	.process_element	= cxlflash_process_element,
 	.map_afu_irq		= cxlflash_map_afu_irq,
 	.unmap_afu_irq		= cxlflash_unmap_afu_irq,
+	.get_irq_objhndl	= cxlflash_get_irq_objhndl,
 	.start_context		= cxlflash_start_context,
 	.stop_context		= cxlflash_stop_context,
 	.afu_reset		= cxlflash_afu_reset,
@@ -160,6 +172,7 @@ const struct cxlflash_backend_ops cxlflash_cxl_ops = {
 	.allocate_afu_irqs	= cxlflash_allocate_afu_irqs,
 	.free_afu_irqs		= cxlflash_free_afu_irqs,
 	.create_afu		= cxlflash_create_afu,
+	.destroy_afu		= cxlflash_destroy_afu,
 	.get_fd			= cxlflash_get_fd,
 	.fops_get_context	= cxlflash_fops_get_context,
 	.start_work		= cxlflash_start_work,
diff --git a/drivers/scsi/cxlflash/main.c b/drivers/scsi/cxlflash/main.c
index d8fe7ab870b8660474f7d7af24a5441500579b86..a24d7e6e51c10b45779f09fc6e6e5782ffe215f5 100644
--- a/drivers/scsi/cxlflash/main.c
+++ b/drivers/scsi/cxlflash/main.c
@@ -473,6 +473,7 @@ static int send_tmf(struct cxlflash_cfg *cfg, struct scsi_device *sdev,
 	struct afu_cmd *cmd = NULL;
 	struct device *dev = &cfg->dev->dev;
 	struct hwq *hwq = get_hwq(afu, PRIMARY_HWQ);
+	bool needs_deletion = false;
 	char *buf = NULL;
 	ulong lock_flags;
 	int rc = 0;
@@ -527,6 +528,7 @@ static int send_tmf(struct cxlflash_cfg *cfg, struct scsi_device *sdev,
 	if (!to) {
 		dev_err(dev, "%s: TMF timed out\n", __func__);
 		rc = -ETIMEDOUT;
+		needs_deletion = true;
 	} else if (cmd->cmd_aborted) {
 		dev_err(dev, "%s: TMF aborted\n", __func__);
 		rc = -EAGAIN;
@@ -537,6 +539,12 @@ static int send_tmf(struct cxlflash_cfg *cfg, struct scsi_device *sdev,
 	}
 	cfg->tmf_active = false;
 	spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
+
+	if (needs_deletion) {
+		spin_lock_irqsave(&hwq->hsq_slock, lock_flags);
+		list_del(&cmd->list);
+		spin_unlock_irqrestore(&hwq->hsq_slock, lock_flags);
+	}
 out:
 	kfree(buf);
 	return rc;
@@ -793,6 +801,10 @@ static void term_mc(struct cxlflash_cfg *cfg, u32 index)
 		WARN_ON(cfg->ops->release_context(hwq->ctx_cookie));
 	hwq->ctx_cookie = NULL;
 
+	spin_lock_irqsave(&hwq->hrrq_slock, lock_flags);
+	hwq->hrrq_online = false;
+	spin_unlock_irqrestore(&hwq->hrrq_slock, lock_flags);
+
 	spin_lock_irqsave(&hwq->hsq_slock, lock_flags);
 	flush_pending_cmds(hwq);
 	spin_unlock_irqrestore(&hwq->hsq_slock, lock_flags);
@@ -946,9 +958,9 @@ static void cxlflash_remove(struct pci_dev *pdev)
 		return;
 	}
 
-	/* If a Task Management Function is active, wait for it to complete
-	 * before continuing with remove.
-	 */
+	/* Yield to running recovery threads before continuing with remove */
+	wait_event(cfg->reset_waitq, cfg->state != STATE_RESET &&
+				     cfg->state != STATE_PROBING);
 	spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
 	if (cfg->tmf_active)
 		wait_event_interruptible_lock_irq(cfg->tmf_waitq,
@@ -971,6 +983,7 @@ static void cxlflash_remove(struct pci_dev *pdev)
 	case INIT_STATE_AFU:
 		term_afu(cfg);
 	case INIT_STATE_PCI:
+		cfg->ops->destroy_afu(cfg->afu_cookie);
 		pci_disable_device(pdev);
 	case INIT_STATE_NONE:
 		free_mem(cfg);
@@ -1303,7 +1316,10 @@ static void afu_err_intr_init(struct afu *afu)
 	for (i = 0; i < afu->num_hwqs; i++) {
 		hwq = get_hwq(afu, i);
 
-		writeq_be(SISL_MSI_SYNC_ERROR, &hwq->host_map->ctx_ctrl);
+		reg = readq_be(&hwq->host_map->ctx_ctrl);
+		WARN_ON((reg & SISL_CTX_CTRL_LISN_MASK) != 0);
+		reg |= SISL_MSI_SYNC_ERROR;
+		writeq_be(reg, &hwq->host_map->ctx_ctrl);
 		writeq_be(SISL_ISTATUS_MASK, &hwq->host_map->intr_mask);
 	}
 }
@@ -1463,6 +1479,12 @@ static irqreturn_t cxlflash_rrq_irq(int irq, void *data)
 
 	spin_lock_irqsave(&hwq->hrrq_slock, hrrq_flags);
 
+	/* Silently drop spurious interrupts when queue is not online */
+	if (!hwq->hrrq_online) {
+		spin_unlock_irqrestore(&hwq->hrrq_slock, hrrq_flags);
+		return IRQ_HANDLED;
+	}
+
 	if (afu_is_irqpoll_enabled(afu)) {
 		irq_poll_sched(&hwq->irqpoll);
 		spin_unlock_irqrestore(&hwq->hrrq_slock, hrrq_flags);
@@ -1752,6 +1774,8 @@ static int init_global(struct cxlflash_cfg *cfg)
 	u64 wwpn[MAX_FC_PORTS];	/* wwpn of AFU ports */
 	int i = 0, num_ports = 0;
 	int rc = 0;
+	int j;
+	void *ctx;
 	u64 reg;
 
 	rc = read_vpd(cfg, &wwpn[0]);
@@ -1767,6 +1791,7 @@ static int init_global(struct cxlflash_cfg *cfg)
 
 		writeq_be((u64) hwq->hrrq_start, &hmap->rrq_start);
 		writeq_be((u64) hwq->hrrq_end, &hmap->rrq_end);
+		hwq->hrrq_online = true;
 
 		if (afu_is_sq_cmd_mode(afu)) {
 			writeq_be((u64)hwq->hsq_start, &hmap->sq_start);
@@ -1812,6 +1837,25 @@ static int init_global(struct cxlflash_cfg *cfg)
 		msleep(100);
 	}
 
+	if (afu_is_ocxl_lisn(afu)) {
+		/* Set up the LISN effective address for each master */
+		for (i = 0; i < afu->num_hwqs; i++) {
+			hwq = get_hwq(afu, i);
+			ctx = hwq->ctx_cookie;
+
+			for (j = 0; j < hwq->num_irqs; j++) {
+				reg = cfg->ops->get_irq_objhndl(ctx, j);
+				writeq_be(reg, &hwq->ctrl_map->lisn_ea[j]);
+			}
+
+			reg = hwq->ctx_hndl;
+			writeq_be(SISL_LISN_PASID(reg, reg),
+				  &hwq->ctrl_map->lisn_pasid[0]);
+			writeq_be(SISL_LISN_PASID(0UL, reg),
+				  &hwq->ctrl_map->lisn_pasid[1]);
+		}
+	}
+
 	/* Set up master's own CTX_CAP to allow real mode, host translation */
 	/* tables, afu cmds and read/write GSCSI cmds. */
 	/* First, unlock ctx_cap write by reading mbox */
@@ -1911,7 +1955,7 @@ static enum undo_level init_intr(struct cxlflash_cfg *cfg,
 	int rc = 0;
 	enum undo_level level = UNDO_NOOP;
 	bool is_primary_hwq = (hwq->index == PRIMARY_HWQ);
-	int num_irqs = is_primary_hwq ? 3 : 2;
+	int num_irqs = hwq->num_irqs;
 
 	rc = cfg->ops->allocate_afu_irqs(ctx, num_irqs);
 	if (unlikely(rc)) {
@@ -1965,16 +2009,20 @@ static int init_mc(struct cxlflash_cfg *cfg, u32 index)
 	struct device *dev = &cfg->dev->dev;
 	struct hwq *hwq = get_hwq(cfg->afu, index);
 	int rc = 0;
+	int num_irqs;
 	enum undo_level level;
 
 	hwq->afu = cfg->afu;
 	hwq->index = index;
 	INIT_LIST_HEAD(&hwq->pending_cmds);
 
-	if (index == PRIMARY_HWQ)
+	if (index == PRIMARY_HWQ) {
 		ctx = cfg->ops->get_context(cfg->dev, cfg->afu_cookie);
-	else
+		num_irqs = 3;
+	} else {
 		ctx = cfg->ops->dev_context_init(cfg->dev, cfg->afu_cookie);
+		num_irqs = 2;
+	}
 	if (IS_ERR_OR_NULL(ctx)) {
 		rc = -ENOMEM;
 		goto err1;
@@ -1982,6 +2030,7 @@ static int init_mc(struct cxlflash_cfg *cfg, u32 index)
 
 	WARN_ON(hwq->ctx_cookie);
 	hwq->ctx_cookie = ctx;
+	hwq->num_irqs = num_irqs;
 
 	/* Set it up as a master with the CXL */
 	cfg->ops->set_master(ctx);
@@ -2254,6 +2303,7 @@ static int send_afu_cmd(struct afu *afu, struct sisl_ioarcb *rcb)
 	struct device *dev = &cfg->dev->dev;
 	struct afu_cmd *cmd = NULL;
 	struct hwq *hwq = get_hwq(afu, PRIMARY_HWQ);
+	ulong lock_flags;
 	char *buf = NULL;
 	int rc = 0;
 	int nretry = 0;
@@ -2299,6 +2349,11 @@ static int send_afu_cmd(struct afu *afu, struct sisl_ioarcb *rcb)
 	case -ETIMEDOUT:
 		rc = afu->context_reset(hwq);
 		if (rc) {
+			/* Delete the command from pending_cmds list */
+			spin_lock_irqsave(&hwq->hsq_slock, lock_flags);
+			list_del(&cmd->list);
+			spin_unlock_irqrestore(&hwq->hsq_slock, lock_flags);
+
 			cxlflash_schedule_async_reset(cfg);
 			break;
 		}
@@ -3138,7 +3193,8 @@ static struct dev_dependent_vals dev_corsa_vals = { CXLFLASH_MAX_SECTORS,
 static struct dev_dependent_vals dev_flash_gt_vals = { CXLFLASH_MAX_SECTORS,
 					CXLFLASH_NOTIFY_SHUTDOWN };
 static struct dev_dependent_vals dev_briard_vals = { CXLFLASH_MAX_SECTORS,
-					CXLFLASH_NOTIFY_SHUTDOWN };
+					(CXLFLASH_NOTIFY_SHUTDOWN |
+					CXLFLASH_OCXL_DEV) };
 
 /*
  * PCI device binding table
@@ -3649,9 +3705,13 @@ static int cxlflash_probe(struct pci_dev *pdev,
 
 	cfg->init_state = INIT_STATE_NONE;
 	cfg->dev = pdev;
-	cfg->ops = &cxlflash_cxl_ops;
 	cfg->cxl_fops = cxlflash_cxl_fops;
 
+	if (ddv->flags & CXLFLASH_OCXL_DEV)
+		cfg->ops = &cxlflash_ocxl_ops;
+	else
+		cfg->ops = &cxlflash_cxl_ops;
+
 	/*
 	 * Promoted LUNs move to the top of the LUN table. The rest stay on
 	 * the bottom half. The bottom half grows from the end (index = 255),
@@ -3681,8 +3741,6 @@ static int cxlflash_probe(struct pci_dev *pdev,
 
 	pci_set_drvdata(pdev, cfg);
 
-	cfg->afu_cookie = cfg->ops->create_afu(pdev);
-
 	rc = init_pci(cfg);
 	if (rc) {
 		dev_err(dev, "%s: init_pci failed rc=%d\n", __func__, rc);
@@ -3690,6 +3748,12 @@ static int cxlflash_probe(struct pci_dev *pdev,
 	}
 	cfg->init_state = INIT_STATE_PCI;
 
+	cfg->afu_cookie = cfg->ops->create_afu(pdev);
+	if (unlikely(!cfg->afu_cookie)) {
+		dev_err(dev, "%s: create_afu failed\n", __func__);
+		goto out_remove;
+	}
+
 	rc = init_afu(cfg);
 	if (rc && !wq_has_sleeper(&cfg->reset_waitq)) {
 		dev_err(dev, "%s: init_afu failed rc=%d\n", __func__, rc);
diff --git a/drivers/scsi/cxlflash/main.h b/drivers/scsi/cxlflash/main.h
index ba0108a7a9c23abae6fc48432868a1f3329b680c..6f1be621e473b54ad36436039506f80557cb616a 100644
--- a/drivers/scsi/cxlflash/main.h
+++ b/drivers/scsi/cxlflash/main.h
@@ -97,6 +97,7 @@ struct dev_dependent_vals {
 	u64 flags;
 #define CXLFLASH_NOTIFY_SHUTDOWN	0x0000000000000001ULL
 #define CXLFLASH_WWPN_VPD_REQUIRED	0x0000000000000002ULL
+#define CXLFLASH_OCXL_DEV		0x0000000000000004ULL
 };
 
 struct asyc_intr_info {
diff --git a/drivers/scsi/cxlflash/ocxl_hw.c b/drivers/scsi/cxlflash/ocxl_hw.c
new file mode 100644
index 0000000000000000000000000000000000000000..0a95b5f253807888a8fa680397e5e344236d81d6
--- /dev/null
+++ b/drivers/scsi/cxlflash/ocxl_hw.c
@@ -0,0 +1,1436 @@
+/*
+ * CXL Flash Device Driver
+ *
+ * Written by: Matthew R. Ochs <mrochs@linux.vnet.ibm.com>, IBM Corporation
+ *             Uma Krishnan <ukrishn@linux.vnet.ibm.com>, IBM Corporation
+ *
+ * Copyright (C) 2018 IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/file.h>
+#include <linux/idr.h>
+#include <linux/module.h>
+#include <linux/mount.h>
+#include <linux/poll.h>
+#include <linux/sched/signal.h>
+
+#include <misc/ocxl.h>
+
+#include <uapi/misc/cxl.h>
+
+#include "backend.h"
+#include "ocxl_hw.h"
+
+/*
+ * Pseudo-filesystem to allocate inodes.
+ */
+
+#define OCXLFLASH_FS_MAGIC      0x1697698f
+
+static int ocxlflash_fs_cnt;
+static struct vfsmount *ocxlflash_vfs_mount;
+
+static const struct dentry_operations ocxlflash_fs_dops = {
+	.d_dname	= simple_dname,
+};
+
+/*
+ * ocxlflash_fs_mount() - mount the pseudo-filesystem
+ * @fs_type:	File system type.
+ * @flags:	Flags for the filesystem.
+ * @dev_name:	Device name associated with the filesystem.
+ * @data:	Data pointer.
+ *
+ * Return: pointer to the directory entry structure
+ */
+static struct dentry *ocxlflash_fs_mount(struct file_system_type *fs_type,
+					 int flags, const char *dev_name,
+					 void *data)
+{
+	return mount_pseudo(fs_type, "ocxlflash:", NULL, &ocxlflash_fs_dops,
+			    OCXLFLASH_FS_MAGIC);
+}
+
+static struct file_system_type ocxlflash_fs_type = {
+	.name		= "ocxlflash",
+	.owner		= THIS_MODULE,
+	.mount		= ocxlflash_fs_mount,
+	.kill_sb	= kill_anon_super,
+};
+
+/*
+ * ocxlflash_release_mapping() - release the memory mapping
+ * @ctx:	Context whose mapping is to be released.
+ */
+static void ocxlflash_release_mapping(struct ocxlflash_context *ctx)
+{
+	if (ctx->mapping)
+		simple_release_fs(&ocxlflash_vfs_mount, &ocxlflash_fs_cnt);
+	ctx->mapping = NULL;
+}
+
+/*
+ * ocxlflash_getfile() - allocate pseudo filesystem, inode, and the file
+ * @dev:	Generic device of the host.
+ * @name:	Name of the pseudo filesystem.
+ * @fops:	File operations.
+ * @priv:	Private data.
+ * @flags:	Flags for the file.
+ *
+ * Return: pointer to the file on success, ERR_PTR on failure
+ */
+static struct file *ocxlflash_getfile(struct device *dev, const char *name,
+				      const struct file_operations *fops,
+				      void *priv, int flags)
+{
+	struct qstr this;
+	struct path path;
+	struct file *file;
+	struct inode *inode = NULL;
+	int rc;
+
+	if (fops->owner && !try_module_get(fops->owner)) {
+		dev_err(dev, "%s: Owner does not exist\n", __func__);
+		rc = -ENOENT;
+		goto err1;
+	}
+
+	rc = simple_pin_fs(&ocxlflash_fs_type, &ocxlflash_vfs_mount,
+			   &ocxlflash_fs_cnt);
+	if (unlikely(rc < 0)) {
+		dev_err(dev, "%s: Cannot mount ocxlflash pseudofs rc=%d\n",
+			__func__, rc);
+		goto err2;
+	}
+
+	inode = alloc_anon_inode(ocxlflash_vfs_mount->mnt_sb);
+	if (IS_ERR(inode)) {
+		rc = PTR_ERR(inode);
+		dev_err(dev, "%s: alloc_anon_inode failed rc=%d\n",
+			__func__, rc);
+		goto err3;
+	}
+
+	this.name = name;
+	this.len = strlen(name);
+	this.hash = 0;
+	path.dentry = d_alloc_pseudo(ocxlflash_vfs_mount->mnt_sb, &this);
+	if (!path.dentry) {
+		dev_err(dev, "%s: d_alloc_pseudo failed\n", __func__);
+		rc = -ENOMEM;
+		goto err4;
+	}
+
+	path.mnt = mntget(ocxlflash_vfs_mount);
+	d_instantiate(path.dentry, inode);
+
+	file = alloc_file(&path, OPEN_FMODE(flags), fops);
+	if (IS_ERR(file)) {
+		rc = PTR_ERR(file);
+		dev_err(dev, "%s: alloc_file failed rc=%d\n",
+			__func__, rc);
+		goto err5;
+	}
+
+	file->f_flags = flags & (O_ACCMODE | O_NONBLOCK);
+	file->private_data = priv;
+out:
+	return file;
+err5:
+	path_put(&path);
+err4:
+	iput(inode);
+err3:
+	simple_release_fs(&ocxlflash_vfs_mount, &ocxlflash_fs_cnt);
+err2:
+	module_put(fops->owner);
+err1:
+	file = ERR_PTR(rc);
+	goto out;
+}
+
+/**
+ * ocxlflash_psa_map() - map the process specific MMIO space
+ * @ctx_cookie:	Adapter context for which the mapping needs to be done.
+ *
+ * Return: MMIO pointer of the mapped region
+ */
+static void __iomem *ocxlflash_psa_map(void *ctx_cookie)
+{
+	struct ocxlflash_context *ctx = ctx_cookie;
+	struct device *dev = ctx->hw_afu->dev;
+
+	mutex_lock(&ctx->state_mutex);
+	if (ctx->state != STARTED) {
+		dev_err(dev, "%s: Context not started, state=%d\n", __func__,
+			ctx->state);
+		mutex_unlock(&ctx->state_mutex);
+		return NULL;
+	}
+	mutex_unlock(&ctx->state_mutex);
+
+	return ioremap(ctx->psn_phys, ctx->psn_size);
+}
+
+/**
+ * ocxlflash_psa_unmap() - unmap the process specific MMIO space
+ * @addr:	MMIO pointer to unmap.
+ */
+static void ocxlflash_psa_unmap(void __iomem *addr)
+{
+	iounmap(addr);
+}
+
+/**
+ * ocxlflash_process_element() - get process element of the adapter context
+ * @ctx_cookie:	Adapter context associated with the process element.
+ *
+ * Return: process element of the adapter context
+ */
+static int ocxlflash_process_element(void *ctx_cookie)
+{
+	struct ocxlflash_context *ctx = ctx_cookie;
+
+	return ctx->pe;
+}
+
+/**
+ * afu_map_irq() - map the interrupt of the adapter context
+ * @flags:	Flags.
+ * @ctx:	Adapter context.
+ * @num:	Per-context AFU interrupt number.
+ * @handler:	Interrupt handler to register.
+ * @cookie:	Interrupt handler private data.
+ * @name:	Name of the interrupt.
+ *
+ * Return: 0 on success, -errno on failure
+ */
+static int afu_map_irq(u64 flags, struct ocxlflash_context *ctx, int num,
+		       irq_handler_t handler, void *cookie, char *name)
+{
+	struct ocxl_hw_afu *afu = ctx->hw_afu;
+	struct device *dev = afu->dev;
+	struct ocxlflash_irqs *irq;
+	void __iomem *vtrig;
+	u32 virq;
+	int rc = 0;
+
+	if (num < 0 || num >= ctx->num_irqs) {
+		dev_err(dev, "%s: Interrupt %d not allocated\n", __func__, num);
+		rc = -ENOENT;
+		goto out;
+	}
+
+	irq = &ctx->irqs[num];
+	virq = irq_create_mapping(NULL, irq->hwirq);
+	if (unlikely(!virq)) {
+		dev_err(dev, "%s: irq_create_mapping failed\n", __func__);
+		rc = -ENOMEM;
+		goto out;
+	}
+
+	rc = request_irq(virq, handler, 0, name, cookie);
+	if (unlikely(rc)) {
+		dev_err(dev, "%s: request_irq failed rc=%d\n", __func__, rc);
+		goto err1;
+	}
+
+	vtrig = ioremap(irq->ptrig, PAGE_SIZE);
+	if (unlikely(!vtrig)) {
+		dev_err(dev, "%s: Trigger page mapping failed\n", __func__);
+		rc = -ENOMEM;
+		goto err2;
+	}
+
+	irq->virq = virq;
+	irq->vtrig = vtrig;
+out:
+	return rc;
+err2:
+	free_irq(virq, cookie);
+err1:
+	irq_dispose_mapping(virq);
+	goto out;
+}
+
+/**
+ * ocxlflash_map_afu_irq() - map the interrupt of the adapter context
+ * @ctx_cookie:	Adapter context.
+ * @num:	Per-context AFU interrupt number.
+ * @handler:	Interrupt handler to register.
+ * @cookie:	Interrupt handler private data.
+ * @name:	Name of the interrupt.
+ *
+ * Return: 0 on success, -errno on failure
+ */
+static int ocxlflash_map_afu_irq(void *ctx_cookie, int num,
+				 irq_handler_t handler, void *cookie,
+				 char *name)
+{
+	return afu_map_irq(0, ctx_cookie, num, handler, cookie, name);
+}
+
+/**
+ * afu_unmap_irq() - unmap the interrupt
+ * @flags:	Flags.
+ * @ctx:	Adapter context.
+ * @num:	Per-context AFU interrupt number.
+ * @cookie:	Interrupt handler private data.
+ */
+static void afu_unmap_irq(u64 flags, struct ocxlflash_context *ctx, int num,
+			  void *cookie)
+{
+	struct ocxl_hw_afu *afu = ctx->hw_afu;
+	struct device *dev = afu->dev;
+	struct ocxlflash_irqs *irq;
+
+	if (num < 0 || num >= ctx->num_irqs) {
+		dev_err(dev, "%s: Interrupt %d not allocated\n", __func__, num);
+		return;
+	}
+
+	irq = &ctx->irqs[num];
+	if (irq->vtrig)
+		iounmap(irq->vtrig);
+
+	if (irq_find_mapping(NULL, irq->hwirq)) {
+		free_irq(irq->virq, cookie);
+		irq_dispose_mapping(irq->virq);
+	}
+
+	memset(irq, 0, sizeof(*irq));
+}
+
+/**
+ * ocxlflash_unmap_afu_irq() - unmap the interrupt
+ * @ctx_cookie:	Adapter context.
+ * @num:	Per-context AFU interrupt number.
+ * @cookie:	Interrupt handler private data.
+ */
+static void ocxlflash_unmap_afu_irq(void *ctx_cookie, int num, void *cookie)
+{
+	return afu_unmap_irq(0, ctx_cookie, num, cookie);
+}
+
+/**
+ * ocxlflash_get_irq_objhndl() - get the object handle for an interrupt
+ * @ctx_cookie:	Context associated with the interrupt.
+ * @irq:	Interrupt number.
+ *
+ * Return: effective address of the mapped region
+ */
+static u64 ocxlflash_get_irq_objhndl(void *ctx_cookie, int irq)
+{
+	struct ocxlflash_context *ctx = ctx_cookie;
+
+	if (irq < 0 || irq >= ctx->num_irqs)
+		return 0;
+
+	return (__force u64)ctx->irqs[irq].vtrig;
+}
+
+/**
+ * ocxlflash_xsl_fault() - callback when translation error is triggered
+ * @data:	Private data provided at callback registration, the context.
+ * @addr:	Address that triggered the error.
+ * @dsisr:	Value of dsisr register.
+ */
+static void ocxlflash_xsl_fault(void *data, u64 addr, u64 dsisr)
+{
+	struct ocxlflash_context *ctx = data;
+
+	spin_lock(&ctx->slock);
+	ctx->fault_addr = addr;
+	ctx->fault_dsisr = dsisr;
+	ctx->pending_fault = true;
+	spin_unlock(&ctx->slock);
+
+	wake_up_all(&ctx->wq);
+}
+
+/**
+ * start_context() - local routine to start a context
+ * @ctx:	Adapter context to be started.
+ *
+ * Assign the context specific MMIO space, add and enable the PE.
+ *
+ * Return: 0 on success, -errno on failure
+ */
+static int start_context(struct ocxlflash_context *ctx)
+{
+	struct ocxl_hw_afu *afu = ctx->hw_afu;
+	struct ocxl_afu_config *acfg = &afu->acfg;
+	void *link_token = afu->link_token;
+	struct device *dev = afu->dev;
+	bool master = ctx->master;
+	struct mm_struct *mm;
+	int rc = 0;
+	u32 pid;
+
+	mutex_lock(&ctx->state_mutex);
+	if (ctx->state != OPENED) {
+		dev_err(dev, "%s: Context state invalid, state=%d\n",
+			__func__, ctx->state);
+		rc = -EINVAL;
+		goto out;
+	}
+
+	if (master) {
+		ctx->psn_size = acfg->global_mmio_size;
+		ctx->psn_phys = afu->gmmio_phys;
+	} else {
+		ctx->psn_size = acfg->pp_mmio_stride;
+		ctx->psn_phys = afu->ppmmio_phys + (ctx->pe * ctx->psn_size);
+	}
+
+	/* pid and mm not set for master contexts */
+	if (master) {
+		pid = 0;
+		mm = NULL;
+	} else {
+		pid = current->mm->context.id;
+		mm = current->mm;
+	}
+
+	rc = ocxl_link_add_pe(link_token, ctx->pe, pid, 0, 0, mm,
+			      ocxlflash_xsl_fault, ctx);
+	if (unlikely(rc)) {
+		dev_err(dev, "%s: ocxl_link_add_pe failed rc=%d\n",
+			__func__, rc);
+		goto out;
+	}
+
+	ctx->state = STARTED;
+out:
+	mutex_unlock(&ctx->state_mutex);
+	return rc;
+}
+
+/**
+ * ocxlflash_start_context() - start a kernel context
+ * @ctx_cookie:	Adapter context to be started.
+ *
+ * Return: 0 on success, -errno on failure
+ */
+static int ocxlflash_start_context(void *ctx_cookie)
+{
+	struct ocxlflash_context *ctx = ctx_cookie;
+
+	return start_context(ctx);
+}
+
+/**
+ * ocxlflash_stop_context() - stop a context
+ * @ctx_cookie:	Adapter context to be stopped.
+ *
+ * Return: 0 on success, -errno on failure
+ */
+static int ocxlflash_stop_context(void *ctx_cookie)
+{
+	struct ocxlflash_context *ctx = ctx_cookie;
+	struct ocxl_hw_afu *afu = ctx->hw_afu;
+	struct ocxl_afu_config *acfg = &afu->acfg;
+	struct pci_dev *pdev = afu->pdev;
+	struct device *dev = afu->dev;
+	enum ocxlflash_ctx_state state;
+	int rc = 0;
+
+	mutex_lock(&ctx->state_mutex);
+	state = ctx->state;
+	ctx->state = CLOSED;
+	mutex_unlock(&ctx->state_mutex);
+	if (state != STARTED)
+		goto out;
+
+	rc = ocxl_config_terminate_pasid(pdev, acfg->dvsec_afu_control_pos,
+					 ctx->pe);
+	if (unlikely(rc)) {
+		dev_err(dev, "%s: ocxl_config_terminate_pasid failed rc=%d\n",
+			__func__, rc);
+		/* If EBUSY, PE could be referenced in future by the AFU */
+		if (rc == -EBUSY)
+			goto out;
+	}
+
+	rc = ocxl_link_remove_pe(afu->link_token, ctx->pe);
+	if (unlikely(rc)) {
+		dev_err(dev, "%s: ocxl_link_remove_pe failed rc=%d\n",
+			__func__, rc);
+		goto out;
+	}
+out:
+	return rc;
+}
+
+/**
+ * ocxlflash_afu_reset() - reset the AFU
+ * @ctx_cookie:	Adapter context.
+ */
+static int ocxlflash_afu_reset(void *ctx_cookie)
+{
+	struct ocxlflash_context *ctx = ctx_cookie;
+	struct device *dev = ctx->hw_afu->dev;
+
+	/* Pending implementation from OCXL transport services */
+	dev_err_once(dev, "%s: afu_reset() fop not supported\n", __func__);
+
+	/* Silently return success until it is implemented */
+	return 0;
+}
+
+/**
+ * ocxlflash_set_master() - sets the context as master
+ * @ctx_cookie:	Adapter context to set as master.
+ */
+static void ocxlflash_set_master(void *ctx_cookie)
+{
+	struct ocxlflash_context *ctx = ctx_cookie;
+
+	ctx->master = true;
+}
+
+/**
+ * ocxlflash_get_context() - obtains the context associated with the host
+ * @pdev:	PCI device associated with the host.
+ * @afu_cookie:	Hardware AFU associated with the host.
+ *
+ * Return: returns the pointer to host adapter context
+ */
+static void *ocxlflash_get_context(struct pci_dev *pdev, void *afu_cookie)
+{
+	struct ocxl_hw_afu *afu = afu_cookie;
+
+	return afu->ocxl_ctx;
+}
+
+/**
+ * ocxlflash_dev_context_init() - allocate and initialize an adapter context
+ * @pdev:	PCI device associated with the host.
+ * @afu_cookie:	Hardware AFU associated with the host.
+ *
+ * Return: returns the adapter context on success, ERR_PTR on failure
+ */
+static void *ocxlflash_dev_context_init(struct pci_dev *pdev, void *afu_cookie)
+{
+	struct ocxl_hw_afu *afu = afu_cookie;
+	struct device *dev = afu->dev;
+	struct ocxlflash_context *ctx;
+	int rc;
+
+	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+	if (unlikely(!ctx)) {
+		dev_err(dev, "%s: Context allocation failed\n", __func__);
+		rc = -ENOMEM;
+		goto err1;
+	}
+
+	idr_preload(GFP_KERNEL);
+	rc = idr_alloc(&afu->idr, ctx, 0, afu->max_pasid, GFP_NOWAIT);
+	idr_preload_end();
+	if (unlikely(rc < 0)) {
+		dev_err(dev, "%s: idr_alloc failed rc=%d\n", __func__, rc);
+		goto err2;
+	}
+
+	spin_lock_init(&ctx->slock);
+	init_waitqueue_head(&ctx->wq);
+	mutex_init(&ctx->state_mutex);
+
+	ctx->state = OPENED;
+	ctx->pe = rc;
+	ctx->master = false;
+	ctx->mapping = NULL;
+	ctx->hw_afu = afu;
+	ctx->irq_bitmap = 0;
+	ctx->pending_irq = false;
+	ctx->pending_fault = false;
+out:
+	return ctx;
+err2:
+	kfree(ctx);
+err1:
+	ctx = ERR_PTR(rc);
+	goto out;
+}
+
+/**
+ * ocxlflash_release_context() - releases an adapter context
+ * @ctx_cookie:	Adapter context to be released.
+ *
+ * Return: 0 on success, -errno on failure
+ */
+static int ocxlflash_release_context(void *ctx_cookie)
+{
+	struct ocxlflash_context *ctx = ctx_cookie;
+	struct device *dev;
+	int rc = 0;
+
+	if (!ctx)
+		goto out;
+
+	dev = ctx->hw_afu->dev;
+	mutex_lock(&ctx->state_mutex);
+	if (ctx->state >= STARTED) {
+		dev_err(dev, "%s: Context in use, state=%d\n", __func__,
+			ctx->state);
+		mutex_unlock(&ctx->state_mutex);
+		rc = -EBUSY;
+		goto out;
+	}
+	mutex_unlock(&ctx->state_mutex);
+
+	idr_remove(&ctx->hw_afu->idr, ctx->pe);
+	ocxlflash_release_mapping(ctx);
+	kfree(ctx);
+out:
+	return rc;
+}
+
+/**
+ * ocxlflash_perst_reloads_same_image() - sets the image reload policy
+ * @afu_cookie:	Hardware AFU associated with the host.
+ * @image:	Whether to load the same image on PERST.
+ */
+static void ocxlflash_perst_reloads_same_image(void *afu_cookie, bool image)
+{
+	struct ocxl_hw_afu *afu = afu_cookie;
+
+	afu->perst_same_image = image;
+}
+
+/**
+ * ocxlflash_read_adapter_vpd() - reads the adapter VPD
+ * @pdev:	PCI device associated with the host.
+ * @buf:	Buffer to get the VPD data.
+ * @count:	Size of buffer (maximum bytes that can be read).
+ *
+ * Return: size of VPD on success, -errno on failure
+ */
+static ssize_t ocxlflash_read_adapter_vpd(struct pci_dev *pdev, void *buf,
+					  size_t count)
+{
+	return pci_read_vpd(pdev, 0, count, buf);
+}
+
+/**
+ * free_afu_irqs() - internal service to free interrupts
+ * @ctx:	Adapter context.
+ */
+static void free_afu_irqs(struct ocxlflash_context *ctx)
+{
+	struct ocxl_hw_afu *afu = ctx->hw_afu;
+	struct device *dev = afu->dev;
+	int i;
+
+	if (!ctx->irqs) {
+		dev_err(dev, "%s: Interrupts not allocated\n", __func__);
+		return;
+	}
+
+	for (i = ctx->num_irqs; i >= 0; i--)
+		ocxl_link_free_irq(afu->link_token, ctx->irqs[i].hwirq);
+
+	kfree(ctx->irqs);
+	ctx->irqs = NULL;
+}
+
+/**
+ * alloc_afu_irqs() - internal service to allocate interrupts
+ * @ctx:	Context associated with the request.
+ * @num:	Number of interrupts requested.
+ *
+ * Return: 0 on success, -errno on failure
+ */
+static int alloc_afu_irqs(struct ocxlflash_context *ctx, int num)
+{
+	struct ocxl_hw_afu *afu = ctx->hw_afu;
+	struct device *dev = afu->dev;
+	struct ocxlflash_irqs *irqs;
+	u64 addr;
+	int rc = 0;
+	int hwirq;
+	int i;
+
+	if (ctx->irqs) {
+		dev_err(dev, "%s: Interrupts already allocated\n", __func__);
+		rc = -EEXIST;
+		goto out;
+	}
+
+	if (num > OCXL_MAX_IRQS) {
+		dev_err(dev, "%s: Too many interrupts num=%d\n", __func__, num);
+		rc = -EINVAL;
+		goto out;
+	}
+
+	irqs = kcalloc(num, sizeof(*irqs), GFP_KERNEL);
+	if (unlikely(!irqs)) {
+		dev_err(dev, "%s: Context irqs allocation failed\n", __func__);
+		rc = -ENOMEM;
+		goto out;
+	}
+
+	for (i = 0; i < num; i++) {
+		rc = ocxl_link_irq_alloc(afu->link_token, &hwirq, &addr);
+		if (unlikely(rc)) {
+			dev_err(dev, "%s: ocxl_link_irq_alloc failed rc=%d\n",
+				__func__, rc);
+			goto err;
+		}
+
+		irqs[i].hwirq = hwirq;
+		irqs[i].ptrig = addr;
+	}
+
+	ctx->irqs = irqs;
+	ctx->num_irqs = num;
+out:
+	return rc;
+err:
+	for (i = i-1; i >= 0; i--)
+		ocxl_link_free_irq(afu->link_token, irqs[i].hwirq);
+	kfree(irqs);
+	goto out;
+}
+
+/**
+ * ocxlflash_allocate_afu_irqs() - allocates the requested number of interrupts
+ * @ctx_cookie:	Context associated with the request.
+ * @num:	Number of interrupts requested.
+ *
+ * Return: 0 on success, -errno on failure
+ */
+static int ocxlflash_allocate_afu_irqs(void *ctx_cookie, int num)
+{
+	return alloc_afu_irqs(ctx_cookie, num);
+}
+
+/**
+ * ocxlflash_free_afu_irqs() - frees the interrupts of an adapter context
+ * @ctx_cookie:	Adapter context.
+ */
+static void ocxlflash_free_afu_irqs(void *ctx_cookie)
+{
+	free_afu_irqs(ctx_cookie);
+}
+
+/**
+ * ocxlflash_unconfig_afu() - unconfigure the AFU
+ * @afu: AFU associated with the host.
+ */
+static void ocxlflash_unconfig_afu(struct ocxl_hw_afu *afu)
+{
+	if (afu->gmmio_virt) {
+		iounmap(afu->gmmio_virt);
+		afu->gmmio_virt = NULL;
+	}
+}
+
+/**
+ * ocxlflash_destroy_afu() - destroy the AFU structure
+ * @afu_cookie:	AFU to be freed.
+ */
+static void ocxlflash_destroy_afu(void *afu_cookie)
+{
+	struct ocxl_hw_afu *afu = afu_cookie;
+	int pos;
+
+	if (!afu)
+		return;
+
+	ocxlflash_release_context(afu->ocxl_ctx);
+	idr_destroy(&afu->idr);
+
+	/* Disable the AFU */
+	pos = afu->acfg.dvsec_afu_control_pos;
+	ocxl_config_set_afu_state(afu->pdev, pos, 0);
+
+	ocxlflash_unconfig_afu(afu);
+	kfree(afu);
+}
+
+/**
+ * ocxlflash_config_fn() - configure the host function
+ * @pdev:	PCI device associated with the host.
+ * @afu:	AFU associated with the host.
+ *
+ * Return: 0 on success, -errno on failure
+ */
+static int ocxlflash_config_fn(struct pci_dev *pdev, struct ocxl_hw_afu *afu)
+{
+	struct ocxl_fn_config *fcfg = &afu->fcfg;
+	struct device *dev = &pdev->dev;
+	u16 base, enabled, supported;
+	int rc = 0;
+
+	/* Read DVSEC config of the function */
+	rc = ocxl_config_read_function(pdev, fcfg);
+	if (unlikely(rc)) {
+		dev_err(dev, "%s: ocxl_config_read_function failed rc=%d\n",
+			__func__, rc);
+		goto out;
+	}
+
+	/* Check if function has AFUs defined, only 1 per function supported */
+	if (fcfg->max_afu_index >= 0) {
+		afu->is_present = true;
+		if (fcfg->max_afu_index != 0)
+			dev_warn(dev, "%s: Unexpected AFU index value %d\n",
+				 __func__, fcfg->max_afu_index);
+	}
+
+	rc = ocxl_config_get_actag_info(pdev, &base, &enabled, &supported);
+	if (unlikely(rc)) {
+		dev_err(dev, "%s: ocxl_config_get_actag_info failed rc=%d\n",
+			__func__, rc);
+		goto out;
+	}
+
+	afu->fn_actag_base = base;
+	afu->fn_actag_enabled = enabled;
+
+	ocxl_config_set_actag(pdev, fcfg->dvsec_function_pos, base, enabled);
+	dev_dbg(dev, "%s: Function acTag range base=%u enabled=%u\n",
+		__func__, base, enabled);
+
+	rc = ocxl_link_setup(pdev, 0, &afu->link_token);
+	if (unlikely(rc)) {
+		dev_err(dev, "%s: ocxl_link_setup failed rc=%d\n",
+			__func__, rc);
+		goto out;
+	}
+
+	rc = ocxl_config_set_TL(pdev, fcfg->dvsec_tl_pos);
+	if (unlikely(rc)) {
+		dev_err(dev, "%s: ocxl_config_set_TL failed rc=%d\n",
+			__func__, rc);
+		goto err;
+	}
+out:
+	return rc;
+err:
+	ocxl_link_release(pdev, afu->link_token);
+	goto out;
+}
+
+/**
+ * ocxlflash_unconfig_fn() - unconfigure the host function
+ * @pdev:	PCI device associated with the host.
+ * @afu:	AFU associated with the host.
+ */
+static void ocxlflash_unconfig_fn(struct pci_dev *pdev, struct ocxl_hw_afu *afu)
+{
+	ocxl_link_release(pdev, afu->link_token);
+}
+
+/**
+ * ocxlflash_map_mmio() - map the AFU MMIO space
+ * @afu: AFU associated with the host.
+ *
+ * Return: 0 on success, -errno on failure
+ */
+static int ocxlflash_map_mmio(struct ocxl_hw_afu *afu)
+{
+	struct ocxl_afu_config *acfg = &afu->acfg;
+	struct pci_dev *pdev = afu->pdev;
+	struct device *dev = afu->dev;
+	phys_addr_t gmmio, ppmmio;
+	int rc = 0;
+
+	rc = pci_request_region(pdev, acfg->global_mmio_bar, "ocxlflash");
+	if (unlikely(rc)) {
+		dev_err(dev, "%s: pci_request_region for global failed rc=%d\n",
+			__func__, rc);
+		goto out;
+	}
+	gmmio = pci_resource_start(pdev, acfg->global_mmio_bar);
+	gmmio += acfg->global_mmio_offset;
+
+	rc = pci_request_region(pdev, acfg->pp_mmio_bar, "ocxlflash");
+	if (unlikely(rc)) {
+		dev_err(dev, "%s: pci_request_region for pp bar failed rc=%d\n",
+			__func__, rc);
+		goto err1;
+	}
+	ppmmio = pci_resource_start(pdev, acfg->pp_mmio_bar);
+	ppmmio += acfg->pp_mmio_offset;
+
+	afu->gmmio_virt = ioremap(gmmio, acfg->global_mmio_size);
+	if (unlikely(!afu->gmmio_virt)) {
+		dev_err(dev, "%s: MMIO mapping failed\n", __func__);
+		rc = -ENOMEM;
+		goto err2;
+	}
+
+	afu->gmmio_phys = gmmio;
+	afu->ppmmio_phys = ppmmio;
+out:
+	return rc;
+err2:
+	pci_release_region(pdev, acfg->pp_mmio_bar);
+err1:
+	pci_release_region(pdev, acfg->global_mmio_bar);
+	goto out;
+}
+
+/**
+ * ocxlflash_config_afu() - configure the host AFU
+ * @pdev:	PCI device associated with the host.
+ * @afu:	AFU associated with the host.
+ *
+ * Must be called _after_ host function configuration.
+ *
+ * Return: 0 on success, -errno on failure
+ */
+static int ocxlflash_config_afu(struct pci_dev *pdev, struct ocxl_hw_afu *afu)
+{
+	struct ocxl_afu_config *acfg = &afu->acfg;
+	struct ocxl_fn_config *fcfg = &afu->fcfg;
+	struct device *dev = &pdev->dev;
+	int count;
+	int base;
+	int pos;
+	int rc = 0;
+
+	/* This HW AFU function does not have any AFUs defined */
+	if (!afu->is_present)
+		goto out;
+
+	/* Read AFU config at index 0 */
+	rc = ocxl_config_read_afu(pdev, fcfg, acfg, 0);
+	if (unlikely(rc)) {
+		dev_err(dev, "%s: ocxl_config_read_afu failed rc=%d\n",
+			__func__, rc);
+		goto out;
+	}
+
+	/* Only one AFU per function is supported, so actag_base is same */
+	base = afu->fn_actag_base;
+	count = min_t(int, acfg->actag_supported, afu->fn_actag_enabled);
+	pos = acfg->dvsec_afu_control_pos;
+
+	ocxl_config_set_afu_actag(pdev, pos, base, count);
+	dev_dbg(dev, "%s: acTag base=%d enabled=%d\n", __func__, base, count);
+	afu->afu_actag_base = base;
+	afu->afu_actag_enabled = count;
+	afu->max_pasid = 1 << acfg->pasid_supported_log;
+
+	ocxl_config_set_afu_pasid(pdev, pos, 0, acfg->pasid_supported_log);
+
+	rc = ocxlflash_map_mmio(afu);
+	if (unlikely(rc)) {
+		dev_err(dev, "%s: ocxlflash_map_mmio failed rc=%d\n",
+			__func__, rc);
+		goto out;
+	}
+
+	/* Enable the AFU */
+	ocxl_config_set_afu_state(pdev, acfg->dvsec_afu_control_pos, 1);
+out:
+	return rc;
+}
+
+/**
+ * ocxlflash_create_afu() - create the AFU for OCXL
+ * @pdev:	PCI device associated with the host.
+ *
+ * Return: AFU on success, NULL on failure
+ */
+static void *ocxlflash_create_afu(struct pci_dev *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct ocxlflash_context *ctx;
+	struct ocxl_hw_afu *afu;
+	int rc;
+
+	afu = kzalloc(sizeof(*afu), GFP_KERNEL);
+	if (unlikely(!afu)) {
+		dev_err(dev, "%s: HW AFU allocation failed\n", __func__);
+		goto out;
+	}
+
+	afu->pdev = pdev;
+	afu->dev = dev;
+	idr_init(&afu->idr);
+
+	rc = ocxlflash_config_fn(pdev, afu);
+	if (unlikely(rc)) {
+		dev_err(dev, "%s: Function configuration failed rc=%d\n",
+			__func__, rc);
+		goto err1;
+	}
+
+	rc = ocxlflash_config_afu(pdev, afu);
+	if (unlikely(rc)) {
+		dev_err(dev, "%s: AFU configuration failed rc=%d\n",
+			__func__, rc);
+		goto err2;
+	}
+
+	ctx = ocxlflash_dev_context_init(pdev, afu);
+	if (IS_ERR(ctx)) {
+		rc = PTR_ERR(ctx);
+		dev_err(dev, "%s: ocxlflash_dev_context_init failed rc=%d\n",
+			__func__, rc);
+		goto err3;
+	}
+
+	afu->ocxl_ctx = ctx;
+out:
+	return afu;
+err3:
+	ocxlflash_unconfig_afu(afu);
+err2:
+	ocxlflash_unconfig_fn(pdev, afu);
+err1:
+	idr_destroy(&afu->idr);
+	kfree(afu);
+	afu = NULL;
+	goto out;
+}
+
+/**
+ * ctx_event_pending() - check for any event pending on the context
+ * @ctx:	Context to be checked.
+ *
+ * Return: true if there is an event pending, false if none pending
+ */
+static inline bool ctx_event_pending(struct ocxlflash_context *ctx)
+{
+	if (ctx->pending_irq || ctx->pending_fault)
+		return true;
+
+	return false;
+}
+
+/**
+ * afu_poll() - poll the AFU for events on the context
+ * @file:	File associated with the adapter context.
+ * @poll:	Poll structure from the user.
+ *
+ * Return: poll mask
+ */
+static unsigned int afu_poll(struct file *file, struct poll_table_struct *poll)
+{
+	struct ocxlflash_context *ctx = file->private_data;
+	struct device *dev = ctx->hw_afu->dev;
+	ulong lock_flags;
+	int mask = 0;
+
+	poll_wait(file, &ctx->wq, poll);
+
+	spin_lock_irqsave(&ctx->slock, lock_flags);
+	if (ctx_event_pending(ctx))
+		mask |= POLLIN | POLLRDNORM;
+	else if (ctx->state == CLOSED)
+		mask |= POLLERR;
+	spin_unlock_irqrestore(&ctx->slock, lock_flags);
+
+	dev_dbg(dev, "%s: Poll wait completed for pe %i mask %i\n",
+		__func__, ctx->pe, mask);
+
+	return mask;
+}
+
+/**
+ * afu_read() - perform a read on the context for any event
+ * @file:	File associated with the adapter context.
+ * @buf:	Buffer to receive the data.
+ * @count:	Size of buffer (maximum bytes that can be read).
+ * @off:	Offset.
+ *
+ * Return: size of the data read on success, -errno on failure
+ */
+static ssize_t afu_read(struct file *file, char __user *buf, size_t count,
+			loff_t *off)
+{
+	struct ocxlflash_context *ctx = file->private_data;
+	struct device *dev = ctx->hw_afu->dev;
+	struct cxl_event event;
+	ulong lock_flags;
+	ssize_t esize;
+	ssize_t rc;
+	int bit;
+	DEFINE_WAIT(event_wait);
+
+	if (*off != 0) {
+		dev_err(dev, "%s: Non-zero offset not supported, off=%lld\n",
+			__func__, *off);
+		rc = -EINVAL;
+		goto out;
+	}
+
+	spin_lock_irqsave(&ctx->slock, lock_flags);
+
+	for (;;) {
+		prepare_to_wait(&ctx->wq, &event_wait, TASK_INTERRUPTIBLE);
+
+		if (ctx_event_pending(ctx) || (ctx->state == CLOSED))
+			break;
+
+		if (file->f_flags & O_NONBLOCK) {
+			dev_err(dev, "%s: File cannot be blocked on I/O\n",
+				__func__);
+			rc = -EAGAIN;
+			goto err;
+		}
+
+		if (signal_pending(current)) {
+			dev_err(dev, "%s: Signal pending on the process\n",
+				__func__);
+			rc = -ERESTARTSYS;
+			goto err;
+		}
+
+		spin_unlock_irqrestore(&ctx->slock, lock_flags);
+		schedule();
+		spin_lock_irqsave(&ctx->slock, lock_flags);
+	}
+
+	finish_wait(&ctx->wq, &event_wait);
+
+	memset(&event, 0, sizeof(event));
+	event.header.process_element = ctx->pe;
+	event.header.size = sizeof(struct cxl_event_header);
+	if (ctx->pending_irq) {
+		esize = sizeof(struct cxl_event_afu_interrupt);
+		event.header.size += esize;
+		event.header.type = CXL_EVENT_AFU_INTERRUPT;
+
+		bit = find_first_bit(&ctx->irq_bitmap, ctx->num_irqs);
+		clear_bit(bit, &ctx->irq_bitmap);
+		event.irq.irq = bit + 1;
+		if (bitmap_empty(&ctx->irq_bitmap, ctx->num_irqs))
+			ctx->pending_irq = false;
+	} else if (ctx->pending_fault) {
+		event.header.size += sizeof(struct cxl_event_data_storage);
+		event.header.type = CXL_EVENT_DATA_STORAGE;
+		event.fault.addr = ctx->fault_addr;
+		event.fault.dsisr = ctx->fault_dsisr;
+		ctx->pending_fault = false;
+	}
+
+	spin_unlock_irqrestore(&ctx->slock, lock_flags);
+
+	if (copy_to_user(buf, &event, event.header.size)) {
+		dev_err(dev, "%s: copy_to_user failed\n", __func__);
+		rc = -EFAULT;
+		goto out;
+	}
+
+	rc = event.header.size;
+out:
+	return rc;
+err:
+	finish_wait(&ctx->wq, &event_wait);
+	spin_unlock_irqrestore(&ctx->slock, lock_flags);
+	goto out;
+}
+
+/**
+ * afu_release() - release and free the context
+ * @inode:	File inode pointer.
+ * @file:	File associated with the context.
+ *
+ * Return: 0 on success, -errno on failure
+ */
+static int afu_release(struct inode *inode, struct file *file)
+{
+	struct ocxlflash_context *ctx = file->private_data;
+	int i;
+
+	/* Unmap and free the interrupts associated with the context */
+	for (i = ctx->num_irqs; i >= 0; i--)
+		afu_unmap_irq(0, ctx, i, ctx);
+	free_afu_irqs(ctx);
+
+	return ocxlflash_release_context(ctx);
+}
+
+/**
+ * ocxlflash_mmap_fault() - mmap fault handler
+ * @vmf:	VM fault associated with current fault.
+ *
+ * Return: 0 on success, -errno on failure
+ */
+static int ocxlflash_mmap_fault(struct vm_fault *vmf)
+{
+	struct vm_area_struct *vma = vmf->vma;
+	struct ocxlflash_context *ctx = vma->vm_file->private_data;
+	struct device *dev = ctx->hw_afu->dev;
+	u64 mmio_area, offset;
+
+	offset = vmf->pgoff << PAGE_SHIFT;
+	if (offset >= ctx->psn_size)
+		return VM_FAULT_SIGBUS;
+
+	mutex_lock(&ctx->state_mutex);
+	if (ctx->state != STARTED) {
+		dev_err(dev, "%s: Context not started, state=%d\n",
+			__func__, ctx->state);
+		mutex_unlock(&ctx->state_mutex);
+		return VM_FAULT_SIGBUS;
+	}
+	mutex_unlock(&ctx->state_mutex);
+
+	mmio_area = ctx->psn_phys;
+	mmio_area += offset;
+
+	vm_insert_pfn(vma, vmf->address, mmio_area >> PAGE_SHIFT);
+	return VM_FAULT_NOPAGE;
+}
+
+static const struct vm_operations_struct ocxlflash_vmops = {
+	.fault = ocxlflash_mmap_fault,
+};
+
+/**
+ * afu_mmap() - map the fault handler operations
+ * @file:	File associated with the context.
+ * @vma:	VM area associated with mapping.
+ *
+ * Return: 0 on success, -errno on failure
+ */
+static int afu_mmap(struct file *file, struct vm_area_struct *vma)
+{
+	struct ocxlflash_context *ctx = file->private_data;
+
+	if ((vma_pages(vma) + vma->vm_pgoff) >
+	    (ctx->psn_size >> PAGE_SHIFT))
+		return -EINVAL;
+
+	vma->vm_flags |= VM_IO | VM_PFNMAP;
+	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+	vma->vm_ops = &ocxlflash_vmops;
+	return 0;
+}
+
+static const struct file_operations ocxl_afu_fops = {
+	.owner		= THIS_MODULE,
+	.poll		= afu_poll,
+	.read		= afu_read,
+	.release	= afu_release,
+	.mmap		= afu_mmap,
+};
+
+#define PATCH_FOPS(NAME)						\
+	do { if (!fops->NAME) fops->NAME = ocxl_afu_fops.NAME; } while (0)
+
+/**
+ * ocxlflash_get_fd() - get file descriptor for an adapter context
+ * @ctx_cookie:	Adapter context.
+ * @fops:	File operations to be associated.
+ * @fd:		File descriptor to be returned back.
+ *
+ * Return: pointer to the file on success, ERR_PTR on failure
+ */
+static struct file *ocxlflash_get_fd(void *ctx_cookie,
+				     struct file_operations *fops, int *fd)
+{
+	struct ocxlflash_context *ctx = ctx_cookie;
+	struct device *dev = ctx->hw_afu->dev;
+	struct file *file;
+	int flags, fdtmp;
+	int rc = 0;
+	char *name = NULL;
+
+	/* Only allow one fd per context */
+	if (ctx->mapping) {
+		dev_err(dev, "%s: Context is already mapped to an fd\n",
+			__func__);
+		rc = -EEXIST;
+		goto err1;
+	}
+
+	flags = O_RDWR | O_CLOEXEC;
+
+	/* This code is similar to anon_inode_getfd() */
+	rc = get_unused_fd_flags(flags);
+	if (unlikely(rc < 0)) {
+		dev_err(dev, "%s: get_unused_fd_flags failed rc=%d\n",
+			__func__, rc);
+		goto err1;
+	}
+	fdtmp = rc;
+
+	/* Patch the file ops that are not defined */
+	if (fops) {
+		PATCH_FOPS(poll);
+		PATCH_FOPS(read);
+		PATCH_FOPS(release);
+		PATCH_FOPS(mmap);
+	} else /* Use default ops */
+		fops = (struct file_operations *)&ocxl_afu_fops;
+
+	name = kasprintf(GFP_KERNEL, "ocxlflash:%d", ctx->pe);
+	file = ocxlflash_getfile(dev, name, fops, ctx, flags);
+	kfree(name);
+	if (IS_ERR(file)) {
+		rc = PTR_ERR(file);
+		dev_err(dev, "%s: ocxlflash_getfile failed rc=%d\n",
+			__func__, rc);
+		goto err2;
+	}
+
+	ctx->mapping = file->f_mapping;
+	*fd = fdtmp;
+out:
+	return file;
+err2:
+	put_unused_fd(fdtmp);
+err1:
+	file = ERR_PTR(rc);
+	goto out;
+}
+
+/**
+ * ocxlflash_fops_get_context() - get the context associated with the file
+ * @file:	File associated with the adapter context.
+ *
+ * Return: pointer to the context
+ */
+static void *ocxlflash_fops_get_context(struct file *file)
+{
+	return file->private_data;
+}
+
+/**
+ * ocxlflash_afu_irq() - interrupt handler for user contexts
+ * @irq:	Interrupt number.
+ * @data:	Private data provided at interrupt registration, the context.
+ *
+ * Return: Always return IRQ_HANDLED.
+ */
+static irqreturn_t ocxlflash_afu_irq(int irq, void *data)
+{
+	struct ocxlflash_context *ctx = data;
+	struct device *dev = ctx->hw_afu->dev;
+	int i;
+
+	dev_dbg(dev, "%s: Interrupt raised for pe %i virq %i\n",
+		__func__, ctx->pe, irq);
+
+	for (i = 0; i < ctx->num_irqs; i++) {
+		if (ctx->irqs[i].virq == irq)
+			break;
+	}
+	if (unlikely(i >= ctx->num_irqs)) {
+		dev_err(dev, "%s: Received AFU IRQ out of range\n", __func__);
+		goto out;
+	}
+
+	spin_lock(&ctx->slock);
+	set_bit(i - 1, &ctx->irq_bitmap);
+	ctx->pending_irq = true;
+	spin_unlock(&ctx->slock);
+
+	wake_up_all(&ctx->wq);
+out:
+	return IRQ_HANDLED;
+}
+
+/**
+ * ocxlflash_start_work() - start a user context
+ * @ctx_cookie:	Context to be started.
+ * @num_irqs:	Number of interrupts requested.
+ *
+ * Return: 0 on success, -errno on failure
+ */
+static int ocxlflash_start_work(void *ctx_cookie, u64 num_irqs)
+{
+	struct ocxlflash_context *ctx = ctx_cookie;
+	struct ocxl_hw_afu *afu = ctx->hw_afu;
+	struct device *dev = afu->dev;
+	char *name;
+	int rc = 0;
+	int i;
+
+	rc = alloc_afu_irqs(ctx, num_irqs);
+	if (unlikely(rc < 0)) {
+		dev_err(dev, "%s: alloc_afu_irqs failed rc=%d\n", __func__, rc);
+		goto out;
+	}
+
+	for (i = 0; i < num_irqs; i++) {
+		name = kasprintf(GFP_KERNEL, "ocxlflash-%s-pe%i-%i",
+				 dev_name(dev), ctx->pe, i);
+		rc = afu_map_irq(0, ctx, i, ocxlflash_afu_irq, ctx, name);
+		kfree(name);
+		if (unlikely(rc < 0)) {
+			dev_err(dev, "%s: afu_map_irq failed rc=%d\n",
+				__func__, rc);
+			goto err;
+		}
+	}
+
+	rc = start_context(ctx);
+	if (unlikely(rc)) {
+		dev_err(dev, "%s: start_context failed rc=%d\n", __func__, rc);
+		goto err;
+	}
+out:
+	return rc;
+err:
+	for (i = i-1; i >= 0; i--)
+		afu_unmap_irq(0, ctx, i, ctx);
+	free_afu_irqs(ctx);
+	goto out;
+};
+
+/**
+ * ocxlflash_fd_mmap() - mmap handler for adapter file descriptor
+ * @file:	File installed with adapter file descriptor.
+ * @vma:	VM area associated with mapping.
+ *
+ * Return: 0 on success, -errno on failure
+ */
+static int ocxlflash_fd_mmap(struct file *file, struct vm_area_struct *vma)
+{
+	return afu_mmap(file, vma);
+}
+
+/**
+ * ocxlflash_fd_release() - release the context associated with the file
+ * @inode:	File inode pointer.
+ * @file:	File associated with the adapter context.
+ *
+ * Return: 0 on success, -errno on failure
+ */
+static int ocxlflash_fd_release(struct inode *inode, struct file *file)
+{
+	return afu_release(inode, file);
+}
+
+/* Backend ops to ocxlflash services */
+const struct cxlflash_backend_ops cxlflash_ocxl_ops = {
+	.module			= THIS_MODULE,
+	.psa_map		= ocxlflash_psa_map,
+	.psa_unmap		= ocxlflash_psa_unmap,
+	.process_element	= ocxlflash_process_element,
+	.map_afu_irq		= ocxlflash_map_afu_irq,
+	.unmap_afu_irq		= ocxlflash_unmap_afu_irq,
+	.get_irq_objhndl	= ocxlflash_get_irq_objhndl,
+	.start_context		= ocxlflash_start_context,
+	.stop_context		= ocxlflash_stop_context,
+	.afu_reset		= ocxlflash_afu_reset,
+	.set_master		= ocxlflash_set_master,
+	.get_context		= ocxlflash_get_context,
+	.dev_context_init	= ocxlflash_dev_context_init,
+	.release_context	= ocxlflash_release_context,
+	.perst_reloads_same_image = ocxlflash_perst_reloads_same_image,
+	.read_adapter_vpd	= ocxlflash_read_adapter_vpd,
+	.allocate_afu_irqs	= ocxlflash_allocate_afu_irqs,
+	.free_afu_irqs		= ocxlflash_free_afu_irqs,
+	.create_afu		= ocxlflash_create_afu,
+	.destroy_afu		= ocxlflash_destroy_afu,
+	.get_fd			= ocxlflash_get_fd,
+	.fops_get_context	= ocxlflash_fops_get_context,
+	.start_work		= ocxlflash_start_work,
+	.fd_mmap		= ocxlflash_fd_mmap,
+	.fd_release		= ocxlflash_fd_release,
+};
diff --git a/drivers/scsi/cxlflash/ocxl_hw.h b/drivers/scsi/cxlflash/ocxl_hw.h
new file mode 100644
index 0000000000000000000000000000000000000000..9270d35c46209eecdc310d96b411235e4c9a8af1
--- /dev/null
+++ b/drivers/scsi/cxlflash/ocxl_hw.h
@@ -0,0 +1,77 @@
+/*
+ * CXL Flash Device Driver
+ *
+ * Written by: Matthew R. Ochs <mrochs@linux.vnet.ibm.com>, IBM Corporation
+ *	       Uma Krishnan <ukrishn@linux.vnet.ibm.com>, IBM Corporation
+ *
+ * Copyright (C) 2018 IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#define OCXL_MAX_IRQS	4	/* Max interrupts per process */
+
+struct ocxlflash_irqs {
+	int hwirq;
+	u32 virq;
+	u64 ptrig;
+	void __iomem *vtrig;
+};
+
+/* OCXL hardware AFU associated with the host */
+struct ocxl_hw_afu {
+	struct ocxlflash_context *ocxl_ctx; /* Host context */
+	struct pci_dev *pdev;		/* PCI device */
+	struct device *dev;		/* Generic device */
+	bool perst_same_image;		/* Same image loaded on perst */
+
+	struct ocxl_fn_config fcfg;	/* DVSEC config of the function */
+	struct ocxl_afu_config acfg;	/* AFU configuration data */
+
+	int fn_actag_base;		/* Function acTag base */
+	int fn_actag_enabled;		/* Function acTag number enabled */
+	int afu_actag_base;		/* AFU acTag base */
+	int afu_actag_enabled;		/* AFU acTag number enabled */
+
+	phys_addr_t ppmmio_phys;	/* Per process MMIO space */
+	phys_addr_t gmmio_phys;		/* Global AFU MMIO space */
+	void __iomem *gmmio_virt;	/* Global MMIO map */
+
+	void *link_token;		/* Link token for the SPA */
+	struct idr idr;			/* IDR to manage contexts */
+	int max_pasid;			/* Maximum number of contexts */
+	bool is_present;		/* Function has AFUs defined */
+};
+
+enum ocxlflash_ctx_state {
+	CLOSED,
+	OPENED,
+	STARTED
+};
+
+struct ocxlflash_context {
+	struct ocxl_hw_afu *hw_afu;	/* HW AFU back pointer */
+	struct address_space *mapping;	/* Mapping for pseudo filesystem */
+	bool master;			/* Whether this is a master context */
+	int pe;				/* Process element */
+
+	phys_addr_t psn_phys;		/* Process mapping */
+	u64 psn_size;			/* Process mapping size */
+
+	spinlock_t slock;		/* Protects irq/fault/event updates */
+	wait_queue_head_t wq;		/* Wait queue for poll and interrupts */
+	struct mutex state_mutex;	/* Mutex to update context state */
+	enum ocxlflash_ctx_state state;	/* Context state */
+
+	struct ocxlflash_irqs *irqs;	/* Pointer to array of structures */
+	int num_irqs;			/* Number of interrupts */
+	bool pending_irq;		/* Pending interrupt on the context */
+	ulong irq_bitmap;		/* Bits indicating pending irq num */
+
+	u64 fault_addr;			/* Address that triggered the fault */
+	u64 fault_dsisr;		/* Value of dsisr register at fault */
+	bool pending_fault;		/* Pending translation fault */
+};
diff --git a/drivers/scsi/cxlflash/sislite.h b/drivers/scsi/cxlflash/sislite.h
index bedf1ce2f33c468913d3e13891fdc21ff58bfc44..874abce35ab4a7063b84fbaeac805b6f0812d2ec 100644
--- a/drivers/scsi/cxlflash/sislite.h
+++ b/drivers/scsi/cxlflash/sislite.h
@@ -258,23 +258,30 @@ struct sisl_host_map {
 				 * exit since there is no way to tell which
 				 * command caused the error.
 				 */
-#define SISL_ISTATUS_PERM_ERR_CMDROOM    0x0010ULL	/* b59, user error */
-#define SISL_ISTATUS_PERM_ERR_RCB_READ   0x0008ULL	/* b60, user error */
-#define SISL_ISTATUS_PERM_ERR_SA_WRITE   0x0004ULL	/* b61, user error */
-#define SISL_ISTATUS_PERM_ERR_RRQ_WRITE  0x0002ULL	/* b62, user error */
+#define SISL_ISTATUS_PERM_ERR_LISN_3_EA		0x0400ULL /* b53, user error */
+#define SISL_ISTATUS_PERM_ERR_LISN_2_EA		0x0200ULL /* b54, user error */
+#define SISL_ISTATUS_PERM_ERR_LISN_1_EA		0x0100ULL /* b55, user error */
+#define SISL_ISTATUS_PERM_ERR_LISN_3_PASID	0x0080ULL /* b56, user error */
+#define SISL_ISTATUS_PERM_ERR_LISN_2_PASID	0x0040ULL /* b57, user error */
+#define SISL_ISTATUS_PERM_ERR_LISN_1_PASID	0x0020ULL /* b58, user error */
+#define SISL_ISTATUS_PERM_ERR_CMDROOM		0x0010ULL /* b59, user error */
+#define SISL_ISTATUS_PERM_ERR_RCB_READ		0x0008ULL /* b60, user error */
+#define SISL_ISTATUS_PERM_ERR_SA_WRITE		0x0004ULL /* b61, user error */
+#define SISL_ISTATUS_PERM_ERR_RRQ_WRITE		0x0002ULL /* b62, user error */
 	/* Page in wait accessing RCB/IOASA/RRQ is reported in b63.
 	 * Same error in data/LXT/RHT access is reported via IOASA.
 	 */
-#define SISL_ISTATUS_TEMP_ERR_PAGEIN     0x0001ULL	/* b63, can be generated
-							 * only when AFU auto
-							 * retry is disabled.
-							 * If user can determine
-							 * the command that
-							 * caused the error, it
-							 * can be retried.
-							 */
-#define SISL_ISTATUS_UNMASK  (0x001FULL)	/* 1 means unmasked */
-#define SISL_ISTATUS_MASK    ~(SISL_ISTATUS_UNMASK)	/* 1 means masked */
+#define SISL_ISTATUS_TEMP_ERR_PAGEIN		0x0001ULL /* b63, can only be
+							   * generated when AFU
+							   * auto retry is
+							   * disabled. If user
+							   * can determine the
+							   * command that caused
+							   * the error, it can
+							   * be retried.
+							   */
+#define SISL_ISTATUS_UNMASK	(0x07FFULL)		/* 1 means unmasked */
+#define SISL_ISTATUS_MASK	~(SISL_ISTATUS_UNMASK)	/* 1 means masked */
 
 	__be64 intr_clear;
 	__be64 intr_mask;
@@ -284,6 +291,7 @@ struct sisl_host_map {
 	__be64 cmd_room;
 	__be64 ctx_ctrl;	/* least significant byte or b56:63 is LISN# */
 #define SISL_CTX_CTRL_UNMAP_SECTOR	0x8000000000000000ULL /* b0 */
+#define SISL_CTX_CTRL_LISN_MASK		(0xFFULL)
 	__be64 mbox_w;		/* restricted use */
 	__be64 sq_start;	/* Submission Queue (R/W): write sequence and */
 	__be64 sq_end;		/* inclusion semantics are the same as RRQ    */
@@ -309,6 +317,10 @@ struct sisl_ctrl_map {
 #define SISL_CTX_CAP_WRITE_CMD         0x0000000000000002ULL /* afu_rc 0x21 */
 #define SISL_CTX_CAP_READ_CMD          0x0000000000000001ULL /* afu_rc 0x21 */
 	__be64 mbox_r;
+	__be64 lisn_pasid[2];
+	/* pasid _a arg must be ULL */
+#define SISL_LISN_PASID(_a, _b)	(((_a) << 32) | (_b))
+	__be64 lisn_ea[3];
 };
 
 /* single copy global regs */
@@ -415,6 +427,7 @@ struct sisl_global_regs {
 #define SISL_INTVER_CAP_RESERVED_CMD_MODE_B	0x100000000000ULL
 #define SISL_INTVER_CAP_LUN_PROVISION		0x080000000000ULL
 #define SISL_INTVER_CAP_AFU_DEBUG		0x040000000000ULL
+#define SISL_INTVER_CAP_OCXL_LISN		0x020000000000ULL
 };
 
 #define CXLFLASH_NUM_FC_PORTS_PER_BANK	2	/* fixed # of ports per bank */
diff --git a/drivers/scsi/cxlflash/superpipe.c b/drivers/scsi/cxlflash/superpipe.c
index 2fe79df5c73c62bc3ca73f887c127b2ba3be90f5..04a3bf9dc85fd9738e66ddfbab01b6ab1cb4bfac 100644
--- a/drivers/scsi/cxlflash/superpipe.c
+++ b/drivers/scsi/cxlflash/superpipe.c
@@ -269,6 +269,7 @@ static int afu_attach(struct cxlflash_cfg *cfg, struct ctx_info *ctxi)
 	int rc = 0;
 	struct hwq *hwq = get_hwq(afu, PRIMARY_HWQ);
 	u64 val;
+	int i;
 
 	/* Unlock cap and restrict user to read/write cmds in translated mode */
 	readq_be(&ctrl_map->mbox_r);
@@ -282,6 +283,19 @@ static int afu_attach(struct cxlflash_cfg *cfg, struct ctx_info *ctxi)
 		goto out;
 	}
 
+	if (afu_is_ocxl_lisn(afu)) {
+		/* Set up the LISN effective address for each interrupt */
+		for (i = 0; i < ctxi->irqs; i++) {
+			val = cfg->ops->get_irq_objhndl(ctxi->ctx, i);
+			writeq_be(val, &ctrl_map->lisn_ea[i]);
+		}
+
+		/* Use primary HWQ PASID as identifier for all interrupts */
+		val = hwq->ctx_hndl;
+		writeq_be(SISL_LISN_PASID(val, val), &ctrl_map->lisn_pasid[0]);
+		writeq_be(SISL_LISN_PASID(0UL, val), &ctrl_map->lisn_pasid[1]);
+	}
+
 	/* Set up MMIO registers pointing to the RHT */
 	writeq_be((u64)ctxi->rht_start, &ctrl_map->rht_start);
 	val = SISL_RHT_CNT_ID((u64)MAX_RHT_PER_CONTEXT, (u64)(hwq->ctx_hndl));
diff --git a/drivers/scsi/esas2r/esas2r_init.c b/drivers/scsi/esas2r/esas2r_init.c
index 9dffcb28c9b74d0cf0ee89abce9cf3a39f3eefd6..9db645dde35ec355071219362d301f05ef322543 100644
--- a/drivers/scsi/esas2r/esas2r_init.c
+++ b/drivers/scsi/esas2r/esas2r_init.c
@@ -1202,8 +1202,6 @@ static bool esas2r_format_init_msg(struct esas2r_adapter *a,
 	case ESAS2R_INIT_MSG_START:
 	case ESAS2R_INIT_MSG_REINIT:
 	{
-		struct timeval now;
-		do_gettimeofday(&now);
 		esas2r_hdebug("CFG init");
 		esas2r_build_cfg_req(a,
 				     rq,
@@ -1212,7 +1210,8 @@ static bool esas2r_format_init_msg(struct esas2r_adapter *a,
 				     NULL);
 		ci = (struct atto_vda_cfg_init *)&rq->vrq->cfg.data.init;
 		ci->sgl_page_size = cpu_to_le32(sgl_page_size);
-		ci->epoch_time = cpu_to_le32(now.tv_sec);
+		/* firmware interface overflows in y2106 */
+		ci->epoch_time = cpu_to_le32(ktime_get_real_seconds());
 		rq->flags |= RF_FAILURE_OK;
 		a->init_msg = ESAS2R_INIT_MSG_INIT;
 		break;
diff --git a/drivers/scsi/hisi_sas/hisi_sas.h b/drivers/scsi/hisi_sas/hisi_sas.h
index d1153e8e846bda304b8a071d4bfa170553d1d49f..d413d05fda2629a12763c9888edf473f714fcbcc 100644
--- a/drivers/scsi/hisi_sas/hisi_sas.h
+++ b/drivers/scsi/hisi_sas/hisi_sas.h
@@ -137,7 +137,6 @@ struct hisi_sas_phy {
 	struct asd_sas_phy	sas_phy;
 	struct sas_identify	identify;
 	u64		port_id; /* from hw */
-	u64		dev_sas_addr;
 	u64		frame_rcvd_size;
 	u8		frame_rcvd[32];
 	u8		phy_attached;
@@ -174,7 +173,6 @@ struct hisi_sas_device {
 	struct completion *completion;
 	struct hisi_sas_dq	*dq;
 	struct list_head	list;
-	u64 attached_phy;
 	enum sas_device_type	dev_type;
 	int device_id;
 	int sata_idx;
@@ -440,7 +438,6 @@ extern struct scsi_transport_template *hisi_sas_stt;
 extern struct scsi_host_template *hisi_sas_sht;
 
 extern void hisi_sas_stop_phys(struct hisi_hba *hisi_hba);
-extern void hisi_sas_init_add(struct hisi_hba *hisi_hba);
 extern int hisi_sas_alloc(struct hisi_hba *hisi_hba, struct Scsi_Host *shost);
 extern void hisi_sas_free(struct hisi_hba *hisi_hba);
 extern u8 hisi_sas_get_ata_protocol(struct host_to_dev_fis *fis,
diff --git a/drivers/scsi/hisi_sas/hisi_sas_main.c b/drivers/scsi/hisi_sas/hisi_sas_main.c
index 49c1fa6438030bd4f0bba526b1a8bcc2c27c50b4..d1a61b1e591b06d82566b87177a10583c005a5ac 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_main.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_main.c
@@ -78,22 +78,23 @@ u8 hisi_sas_get_ata_protocol(struct host_to_dev_fis *fis, int direction)
 	case ATA_CMD_STANDBYNOW1:
 	case ATA_CMD_ZAC_MGMT_OUT:
 		return HISI_SAS_SATA_PROTOCOL_NONDATA;
+
+	case ATA_CMD_SET_MAX:
+		switch (fis->features) {
+		case ATA_SET_MAX_PASSWD:
+		case ATA_SET_MAX_LOCK:
+			return HISI_SAS_SATA_PROTOCOL_PIO;
+
+		case ATA_SET_MAX_PASSWD_DMA:
+		case ATA_SET_MAX_UNLOCK_DMA:
+			return HISI_SAS_SATA_PROTOCOL_DMA;
+
+		default:
+			return HISI_SAS_SATA_PROTOCOL_NONDATA;
+		}
+
 	default:
 	{
-		if (fis->command == ATA_CMD_SET_MAX) {
-			switch (fis->features) {
-			case ATA_SET_MAX_PASSWD:
-			case ATA_SET_MAX_LOCK:
-				return HISI_SAS_SATA_PROTOCOL_PIO;
-
-			case ATA_SET_MAX_PASSWD_DMA:
-			case ATA_SET_MAX_UNLOCK_DMA:
-				return HISI_SAS_SATA_PROTOCOL_DMA;
-
-			default:
-				return HISI_SAS_SATA_PROTOCOL_NONDATA;
-			}
-		}
 		if (direction == DMA_NONE)
 			return HISI_SAS_SATA_PROTOCOL_NONDATA;
 		return HISI_SAS_SATA_PROTOCOL_PIO;
@@ -576,10 +577,8 @@ static int hisi_sas_dev_found(struct domain_device *device)
 		for (phy_no = 0; phy_no < phy_num; phy_no++) {
 			phy = &parent_dev->ex_dev.ex_phy[phy_no];
 			if (SAS_ADDR(phy->attached_sas_addr) ==
-				SAS_ADDR(device->sas_addr)) {
-				sas_dev->attached_phy = phy_no;
+				SAS_ADDR(device->sas_addr))
 				break;
-			}
 		}
 
 		if (phy_no == phy_num) {
@@ -1822,13 +1821,11 @@ int hisi_sas_alloc(struct hisi_hba *hisi_hba, struct Scsi_Host *shost)
 		goto err_out;
 
 	s = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_itct);
-	hisi_hba->itct = dma_alloc_coherent(dev, s, &hisi_hba->itct_dma,
+	hisi_hba->itct = dma_zalloc_coherent(dev, s, &hisi_hba->itct_dma,
 					    GFP_KERNEL);
 	if (!hisi_hba->itct)
 		goto err_out;
 
-	memset(hisi_hba->itct, 0, s);
-
 	hisi_hba->slot_info = devm_kcalloc(dev, max_command_entries,
 					   sizeof(struct hisi_sas_slot),
 					   GFP_KERNEL);
@@ -2080,17 +2077,6 @@ static struct Scsi_Host *hisi_sas_shost_alloc(struct platform_device *pdev,
 	return NULL;
 }
 
-void hisi_sas_init_add(struct hisi_hba *hisi_hba)
-{
-	int i;
-
-	for (i = 0; i < hisi_hba->n_phy; i++)
-		memcpy(&hisi_hba->phy[i].dev_sas_addr,
-		       hisi_hba->sas_addr,
-		       SAS_ADDR_SIZE);
-}
-EXPORT_SYMBOL_GPL(hisi_sas_init_add);
-
 int hisi_sas_probe(struct platform_device *pdev,
 			 const struct hisi_sas_hw *hw)
 {
@@ -2144,8 +2130,6 @@ int hisi_sas_probe(struct platform_device *pdev,
 		sha->sas_port[i] = &hisi_hba->port[i].sas_port;
 	}
 
-	hisi_sas_init_add(hisi_hba);
-
 	rc = scsi_add_host(shost, &pdev->dev);
 	if (rc)
 		goto err_out_ha;
@@ -2177,6 +2161,9 @@ int hisi_sas_remove(struct platform_device *pdev)
 	struct hisi_hba *hisi_hba = sha->lldd_ha;
 	struct Scsi_Host *shost = sha->core.shost;
 
+	if (timer_pending(&hisi_hba->timer))
+		del_timer(&hisi_hba->timer);
+
 	sas_unregister_ha(sha);
 	sas_remove_host(sha->core.shost);
 
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
index f89fb9a49ea972c21d33db91f77b049b87484355..a5abde855cb2575a0cebbfd1950ac06e9cbdb595 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
@@ -2459,10 +2459,10 @@ slot_complete_v2_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot)
 			slot_err_v2_hw(hisi_hba, task, slot, 2);
 
 		if (ts->stat != SAS_DATA_UNDERRUN)
-			dev_info(dev, "erroneous completion iptt=%d task=%p "
+			dev_info(dev, "erroneous completion iptt=%d task=%p dev id=%d "
 				"CQ hdr: 0x%x 0x%x 0x%x 0x%x "
 				"Error info: 0x%x 0x%x 0x%x 0x%x\n",
-				slot->idx, task,
+				slot->idx, task, sas_dev->device_id,
 				complete_hdr->dw0, complete_hdr->dw1,
 				complete_hdr->act, complete_hdr->dw3,
 				error_info[0], error_info[1],
@@ -3295,6 +3295,7 @@ static irqreturn_t sata_int_v2_hw(int irq_no, void *p)
 	sas_phy->oob_mode = SATA_OOB_MODE;
 	/* Make up some unique SAS address */
 	attached_sas_addr[0] = 0x50;
+	attached_sas_addr[6] = hisi_hba->shost->host_no;
 	attached_sas_addr[7] = phy_no;
 	memcpy(sas_phy->attached_sas_addr, attached_sas_addr, SAS_ADDR_SIZE);
 	memcpy(sas_phy->frame_rcvd, fis, sizeof(struct dev_to_host_fis));
@@ -3598,9 +3599,6 @@ static int hisi_sas_v2_remove(struct platform_device *pdev)
 	struct sas_ha_struct *sha = platform_get_drvdata(pdev);
 	struct hisi_hba *hisi_hba = sha->lldd_ha;
 
-	if (timer_pending(&hisi_hba->timer))
-		del_timer(&hisi_hba->timer);
-
 	hisi_sas_kill_tasklets(hisi_hba);
 
 	return hisi_sas_remove(pdev);
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
index 6f3e5ba6b472f25c14f192e0bae9fa2ded493b22..33735a7082b6dfab094c6a734517f68f0a820ec2 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
@@ -216,6 +216,9 @@
 #define SAS_RAS_INTR1			(RAS_BASE + 0x04)
 #define SAS_RAS_INTR0_MASK		(RAS_BASE + 0x08)
 #define SAS_RAS_INTR1_MASK		(RAS_BASE + 0x0c)
+#define CFG_SAS_RAS_INTR_MASK		(RAS_BASE + 0x1c)
+#define SAS_RAS_INTR2			(RAS_BASE + 0x20)
+#define SAS_RAS_INTR2_MASK		(RAS_BASE + 0x24)
 
 /* HW dma structures */
 /* Delivery queue header */
@@ -392,6 +395,7 @@ static u32 hisi_sas_phy_read32(struct hisi_hba *hisi_hba,
 
 static void init_reg_v3_hw(struct hisi_hba *hisi_hba)
 {
+	struct pci_dev *pdev = hisi_hba->pci_dev;
 	int i;
 
 	/* Global registers init */
@@ -409,7 +413,10 @@ static void init_reg_v3_hw(struct hisi_hba *hisi_hba)
 	hisi_sas_write32(hisi_hba, ENT_INT_SRC3, 0xffffffff);
 	hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK1, 0xfefefefe);
 	hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK2, 0xfefefefe);
-	hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, 0xfffe20ff);
+	if (pdev->revision >= 0x21)
+		hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, 0xffff7fff);
+	else
+		hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, 0xfffe20ff);
 	hisi_sas_write32(hisi_hba, CHNL_PHYUPDOWN_INT_MSK, 0x0);
 	hisi_sas_write32(hisi_hba, CHNL_ENT_INT_MSK, 0x0);
 	hisi_sas_write32(hisi_hba, HGC_COM_INT_MSK, 0x0);
@@ -428,7 +435,12 @@ static void init_reg_v3_hw(struct hisi_hba *hisi_hba)
 		hisi_sas_phy_write32(hisi_hba, i, CHL_INT1, 0xffffffff);
 		hisi_sas_phy_write32(hisi_hba, i, CHL_INT2, 0xffffffff);
 		hisi_sas_phy_write32(hisi_hba, i, RXOP_CHECK_CFG_H, 0x1000);
-		hisi_sas_phy_write32(hisi_hba, i, CHL_INT1_MSK, 0xff87ffff);
+		if (pdev->revision >= 0x21)
+			hisi_sas_phy_write32(hisi_hba, i, CHL_INT1_MSK,
+					0xffffffff);
+		else
+			hisi_sas_phy_write32(hisi_hba, i, CHL_INT1_MSK,
+					0xff87ffff);
 		hisi_sas_phy_write32(hisi_hba, i, CHL_INT2_MSK, 0xffffbfe);
 		hisi_sas_phy_write32(hisi_hba, i, PHY_CTRL_RDY_MSK, 0x0);
 		hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_NOT_RDY_MSK, 0x0);
@@ -503,6 +515,8 @@ static void init_reg_v3_hw(struct hisi_hba *hisi_hba)
 	/* RAS registers init */
 	hisi_sas_write32(hisi_hba, SAS_RAS_INTR0_MASK, 0x0);
 	hisi_sas_write32(hisi_hba, SAS_RAS_INTR1_MASK, 0x0);
+	hisi_sas_write32(hisi_hba, SAS_RAS_INTR2_MASK, 0x0);
+	hisi_sas_write32(hisi_hba, CFG_SAS_RAS_INTR_MASK, 0x0);
 }
 
 static void config_phy_opt_mode_v3_hw(struct hisi_hba *hisi_hba, int phy_no)
@@ -1319,6 +1333,13 @@ static irqreturn_t int_chnl_int_v3_hw(int irq_no, void *p)
 						     CHL_INT1);
 		u32 irq_value2 = hisi_sas_phy_read32(hisi_hba, phy_no,
 						     CHL_INT2);
+		u32 irq_msk1 = hisi_sas_phy_read32(hisi_hba, phy_no,
+							CHL_INT1_MSK);
+		u32 irq_msk2 = hisi_sas_phy_read32(hisi_hba, phy_no,
+							CHL_INT2_MSK);
+
+		irq_value1 &= ~irq_msk1;
+		irq_value2 &= ~irq_msk2;
 
 		if ((irq_msk & (4 << (phy_no * 4))) &&
 						irq_value1) {
@@ -1448,6 +1469,7 @@ static irqreturn_t fatal_axi_int_v3_hw(int irq_no, void *p)
 	hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, irq_msk | 0x1df00);
 
 	irq_value = hisi_sas_read32(hisi_hba, ENT_INT_SRC3);
+	irq_value &= ~irq_msk;
 
 	for (i = 0; i < ARRAY_SIZE(fatal_axi_error); i++) {
 		const struct hisi_sas_hw_error *error = &fatal_axi_error[i];
@@ -1619,10 +1641,10 @@ slot_complete_v3_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot)
 
 		slot_err_v3_hw(hisi_hba, task, slot);
 		if (ts->stat != SAS_DATA_UNDERRUN)
-			dev_info(dev, "erroneous completion iptt=%d task=%p "
+			dev_info(dev, "erroneous completion iptt=%d task=%p dev id=%d "
 				"CQ hdr: 0x%x 0x%x 0x%x 0x%x "
 				"Error info: 0x%x 0x%x 0x%x 0x%x\n",
-				slot->idx, task,
+				slot->idx, task, sas_dev->device_id,
 				complete_hdr->dw0, complete_hdr->dw1,
 				complete_hdr->act, complete_hdr->dw3,
 				error_info[0], error_info[1],
@@ -1709,15 +1731,19 @@ static void cq_tasklet_v3_hw(unsigned long val)
 
 	while (rd_point != wr_point) {
 		struct hisi_sas_complete_v3_hdr *complete_hdr;
+		struct device *dev = hisi_hba->dev;
 		int iptt;
 
 		complete_hdr = &complete_queue[rd_point];
 
 		iptt = (complete_hdr->dw1) & CMPLT_HDR_IPTT_MSK;
-		slot = &hisi_hba->slot_info[iptt];
-		slot->cmplt_queue_slot = rd_point;
-		slot->cmplt_queue = queue;
-		slot_complete_v3_hw(hisi_hba, slot);
+		if (likely(iptt < HISI_SAS_COMMAND_ENTRIES_V3_HW)) {
+			slot = &hisi_hba->slot_info[iptt];
+			slot->cmplt_queue_slot = rd_point;
+			slot->cmplt_queue = queue;
+			slot_complete_v3_hw(hisi_hba, slot);
+		} else
+			dev_err(dev, "IPTT %d is invalid, discard it.\n", iptt);
 
 		if (++rd_point >= HISI_SAS_QUEUE_SLOTS)
 			rd_point = 0;
@@ -2108,8 +2134,6 @@ hisi_sas_v3_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 		sha->sas_port[i] = &hisi_hba->port[i].sas_port;
 	}
 
-	hisi_sas_init_add(hisi_hba);
-
 	rc = scsi_add_host(shost, dev);
 	if (rc)
 		goto err_out_ha;
@@ -2161,6 +2185,9 @@ static void hisi_sas_v3_remove(struct pci_dev *pdev)
 	struct hisi_hba *hisi_hba = sha->lldd_ha;
 	struct Scsi_Host *shost = sha->core.shost;
 
+	if (timer_pending(&hisi_hba->timer))
+		del_timer(&hisi_hba->timer);
+
 	sas_unregister_ha(sha);
 	sas_remove_host(sha->core.shost);
 
@@ -2222,6 +2249,29 @@ static const struct hisi_sas_hw_error sas_ras_intr1_nfe[] = {
 	{ .irq_msk = BIT(31), .msg = "DMAC7_RX_POISON" },
 };
 
+static const struct hisi_sas_hw_error sas_ras_intr2_nfe[] = {
+	{ .irq_msk = BIT(0), .msg = "DMAC0_AXI_BUS_ERR" },
+	{ .irq_msk = BIT(1), .msg = "DMAC1_AXI_BUS_ERR" },
+	{ .irq_msk = BIT(2), .msg = "DMAC2_AXI_BUS_ERR" },
+	{ .irq_msk = BIT(3), .msg = "DMAC3_AXI_BUS_ERR" },
+	{ .irq_msk = BIT(4), .msg = "DMAC4_AXI_BUS_ERR" },
+	{ .irq_msk = BIT(5), .msg = "DMAC5_AXI_BUS_ERR" },
+	{ .irq_msk = BIT(6), .msg = "DMAC6_AXI_BUS_ERR" },
+	{ .irq_msk = BIT(7), .msg = "DMAC7_AXI_BUS_ERR" },
+	{ .irq_msk = BIT(8), .msg = "DMAC0_FIFO_OMIT_ERR" },
+	{ .irq_msk = BIT(9), .msg = "DMAC1_FIFO_OMIT_ERR" },
+	{ .irq_msk = BIT(10), .msg = "DMAC2_FIFO_OMIT_ERR" },
+	{ .irq_msk = BIT(11), .msg = "DMAC3_FIFO_OMIT_ERR" },
+	{ .irq_msk = BIT(12), .msg = "DMAC4_FIFO_OMIT_ERR" },
+	{ .irq_msk = BIT(13), .msg = "DMAC5_FIFO_OMIT_ERR" },
+	{ .irq_msk = BIT(14), .msg = "DMAC6_FIFO_OMIT_ERR" },
+	{ .irq_msk = BIT(15), .msg = "DMAC7_FIFO_OMIT_ERR" },
+	{ .irq_msk = BIT(16), .msg = "HGC_RLSE_SLOT_UNMATCH" },
+	{ .irq_msk = BIT(17), .msg = "HGC_LM_ADD_FCH_LIST_ERR" },
+	{ .irq_msk = BIT(18), .msg = "HGC_AXI_BUS_ERR" },
+	{ .irq_msk = BIT(19), .msg = "HGC_FIFO_OMIT_ERR" },
+};
+
 static bool process_non_fatal_error_v3_hw(struct hisi_hba *hisi_hba)
 {
 	struct device *dev = hisi_hba->dev;
@@ -2252,6 +2302,17 @@ static bool process_non_fatal_error_v3_hw(struct hisi_hba *hisi_hba)
 	}
 	hisi_sas_write32(hisi_hba, SAS_RAS_INTR1, irq_value);
 
+	irq_value = hisi_sas_read32(hisi_hba, SAS_RAS_INTR2);
+	for (i = 0; i < ARRAY_SIZE(sas_ras_intr2_nfe); i++) {
+		ras_error = &sas_ras_intr2_nfe[i];
+		if (ras_error->irq_msk & irq_value) {
+			dev_warn(dev, "SAS_RAS_INTR2: %s(irq_value=0x%x) found.\n",
+					ras_error->msg, irq_value);
+			need_reset = true;
+		}
+	}
+	hisi_sas_write32(hisi_hba, SAS_RAS_INTR2, irq_value);
+
 	return need_reset;
 }
 
diff --git a/drivers/scsi/ips.c b/drivers/scsi/ips.c
index e3c8857741a13aa2ff3d84d1f5812b28ca46c85a..bd6ac6b5980a1128af2b0d4234e51f9a97c52a5b 100644
--- a/drivers/scsi/ips.c
+++ b/drivers/scsi/ips.c
@@ -291,7 +291,7 @@ static void ips_freescb(ips_ha_t *, ips_scb_t *);
 static void ips_setup_funclist(ips_ha_t *);
 static void ips_statinit(ips_ha_t *);
 static void ips_statinit_memio(ips_ha_t *);
-static void ips_fix_ffdc_time(ips_ha_t *, ips_scb_t *, time_t);
+static void ips_fix_ffdc_time(ips_ha_t *, ips_scb_t *, time64_t);
 static void ips_ffdc_reset(ips_ha_t *, int);
 static void ips_ffdc_time(ips_ha_t *);
 static uint32_t ips_statupd_copperhead(ips_ha_t *);
@@ -985,10 +985,7 @@ static int __ips_eh_reset(struct scsi_cmnd *SC)
 
 	/* FFDC */
 	if (le32_to_cpu(ha->subsys->param[3]) & 0x300000) {
-		struct timeval tv;
-
-		do_gettimeofday(&tv);
-		ha->last_ffdc = tv.tv_sec;
+		ha->last_ffdc = ktime_get_real_seconds();
 		ha->reset_count++;
 		ips_ffdc_reset(ha, IPS_INTR_IORL);
 	}
@@ -2392,7 +2389,6 @@ static int
 ips_hainit(ips_ha_t * ha)
 {
 	int i;
-	struct timeval tv;
 
 	METHOD_TRACE("ips_hainit", 1);
 
@@ -2407,8 +2403,7 @@ ips_hainit(ips_ha_t * ha)
 
 	/* Send FFDC */
 	ha->reset_count = 1;
-	do_gettimeofday(&tv);
-	ha->last_ffdc = tv.tv_sec;
+	ha->last_ffdc = ktime_get_real_seconds();
 	ips_ffdc_reset(ha, IPS_INTR_IORL);
 
 	if (!ips_read_config(ha, IPS_INTR_IORL)) {
@@ -2548,12 +2543,9 @@ ips_next(ips_ha_t * ha, int intr)
 
 	if ((ha->subsys->param[3] & 0x300000)
 	    && (ha->scb_activelist.count == 0)) {
-		struct timeval tv;
-
-		do_gettimeofday(&tv);
-
-		if (tv.tv_sec - ha->last_ffdc > IPS_SECS_8HOURS) {
-			ha->last_ffdc = tv.tv_sec;
+		time64_t now = ktime_get_real_seconds();
+		if (now - ha->last_ffdc > IPS_SECS_8HOURS) {
+			ha->last_ffdc = now;
 			ips_ffdc_time(ha);
 		}
 	}
@@ -5988,59 +5980,21 @@ ips_ffdc_time(ips_ha_t * ha)
 /*                                                                          */
 /****************************************************************************/
 static void
-ips_fix_ffdc_time(ips_ha_t * ha, ips_scb_t * scb, time_t current_time)
+ips_fix_ffdc_time(ips_ha_t * ha, ips_scb_t * scb, time64_t current_time)
 {
-	long days;
-	long rem;
-	int i;
-	int year;
-	int yleap;
-	int year_lengths[2] = { IPS_DAYS_NORMAL_YEAR, IPS_DAYS_LEAP_YEAR };
-	int month_lengths[12][2] = { {31, 31},
-	{28, 29},
-	{31, 31},
-	{30, 30},
-	{31, 31},
-	{30, 30},
-	{31, 31},
-	{31, 31},
-	{30, 30},
-	{31, 31},
-	{30, 30},
-	{31, 31}
-	};
+	struct tm tm;
 
 	METHOD_TRACE("ips_fix_ffdc_time", 1);
 
-	days = current_time / IPS_SECS_DAY;
-	rem = current_time % IPS_SECS_DAY;
-
-	scb->cmd.ffdc.hour = (rem / IPS_SECS_HOUR);
-	rem = rem % IPS_SECS_HOUR;
-	scb->cmd.ffdc.minute = (rem / IPS_SECS_MIN);
-	scb->cmd.ffdc.second = (rem % IPS_SECS_MIN);
-
-	year = IPS_EPOCH_YEAR;
-	while (days < 0 || days >= year_lengths[yleap = IPS_IS_LEAP_YEAR(year)]) {
-		int newy;
-
-		newy = year + (days / IPS_DAYS_NORMAL_YEAR);
-		if (days < 0)
-			--newy;
-		days -= (newy - year) * IPS_DAYS_NORMAL_YEAR +
-		    IPS_NUM_LEAP_YEARS_THROUGH(newy - 1) -
-		    IPS_NUM_LEAP_YEARS_THROUGH(year - 1);
-		year = newy;
-	}
-
-	scb->cmd.ffdc.yearH = year / 100;
-	scb->cmd.ffdc.yearL = year % 100;
-
-	for (i = 0; days >= month_lengths[i][yleap]; ++i)
-		days -= month_lengths[i][yleap];
+	time64_to_tm(current_time, 0, &tm);
 
-	scb->cmd.ffdc.month = i + 1;
-	scb->cmd.ffdc.day = days + 1;
+	scb->cmd.ffdc.hour   = tm.tm_hour;
+	scb->cmd.ffdc.minute = tm.tm_min;
+	scb->cmd.ffdc.second = tm.tm_sec;
+	scb->cmd.ffdc.yearH  = (tm.tm_year + 1900) / 100;
+	scb->cmd.ffdc.yearL  = tm.tm_year % 100;
+	scb->cmd.ffdc.month  = tm.tm_mon + 1;
+	scb->cmd.ffdc.day    = tm.tm_mday;
 }
 
 /****************************************************************************
diff --git a/drivers/scsi/ips.h b/drivers/scsi/ips.h
index 366be3b2f9b49a8d42b95eb5b612f912e329dba4..db546171e97fbdfc37f65d9f530a51ca0b18dab6 100644
--- a/drivers/scsi/ips.h
+++ b/drivers/scsi/ips.h
@@ -402,16 +402,7 @@
    #define IPS_BIOS_HEADER             0xC0
 
    /* time oriented stuff */
-   #define IPS_IS_LEAP_YEAR(y)           (((y % 4 == 0) && ((y % 100 != 0) || (y % 400 == 0))) ? 1 : 0)
-   #define IPS_NUM_LEAP_YEARS_THROUGH(y) ((y) / 4 - (y) / 100 + (y) / 400)
-
-   #define IPS_SECS_MIN                 60
-   #define IPS_SECS_HOUR                3600
    #define IPS_SECS_8HOURS              28800
-   #define IPS_SECS_DAY                 86400
-   #define IPS_DAYS_NORMAL_YEAR         365
-   #define IPS_DAYS_LEAP_YEAR           366
-   #define IPS_EPOCH_YEAR               1970
 
    /*
     * Scsi_Host Template
@@ -1054,7 +1045,7 @@ typedef struct ips_ha {
    uint8_t            active;
    int                ioctl_reset;        /* IOCTL Requested Reset Flag */
    uint16_t           reset_count;        /* number of resets           */
-   time_t             last_ffdc;          /* last time we sent ffdc info*/
+   time64_t           last_ffdc;          /* last time we sent ffdc info*/
    uint8_t            slot_num;           /* PCI Slot Number            */
    int                ioctl_len;          /* size of ioctl buffer       */
    dma_addr_t         ioctl_busaddr;      /* dma address of ioctl buffer*/
diff --git a/drivers/scsi/isci/port_config.c b/drivers/scsi/isci/port_config.c
index edb7be786c6501602195b25285dda78ceb9ea537..9e8de1462593aa7371104354e0ac6b9f540ae57a 100644
--- a/drivers/scsi/isci/port_config.c
+++ b/drivers/scsi/isci/port_config.c
@@ -291,7 +291,7 @@ sci_mpc_agent_validate_phy_configuration(struct isci_host *ihost,
 		 * Note: We have not moved the current phy_index so we will actually
 		 *       compare the startting phy with itself.
 		 *       This is expected and required to add the phy to the port. */
-		while (phy_index < SCI_MAX_PHYS) {
+		for (; phy_index < SCI_MAX_PHYS; phy_index++) {
 			if ((phy_mask & (1 << phy_index)) == 0)
 				continue;
 			sci_phy_get_sas_address(&ihost->phys[phy_index],
@@ -311,7 +311,6 @@ sci_mpc_agent_validate_phy_configuration(struct isci_host *ihost,
 					      &ihost->phys[phy_index]);
 
 			assigned_phy_mask |= (1 << phy_index);
-			phy_index++;
 		}
 
 	}
diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c
index 2ba4b68fdb73f44840509708a170e763e5502be2..b025a0b7434174cea0096b23fad34aa18a22de20 100644
--- a/drivers/scsi/iscsi_tcp.c
+++ b/drivers/scsi/iscsi_tcp.c
@@ -962,7 +962,6 @@ static int iscsi_sw_tcp_slave_configure(struct scsi_device *sdev)
 	if (conn->datadgst_en)
 		sdev->request_queue->backing_dev_info->capabilities
 			|= BDI_CAP_STABLE_WRITES;
-	blk_queue_bounce_limit(sdev->request_queue, BLK_BOUNCE_ANY);
 	blk_queue_dma_alignment(sdev->request_queue, 0);
 	return 0;
 }
diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
index 0cc1567eacc19eabdeefe3b35c5b79c9c4c25239..ff1d612f6fb97a2d95cd3e27569af89968d63c8c 100644
--- a/drivers/scsi/libsas/sas_ata.c
+++ b/drivers/scsi/libsas/sas_ata.c
@@ -577,6 +577,11 @@ int sas_ata_init(struct domain_device *found_dev)
 		ata_sas_port_destroy(ap);
 		return rc;
 	}
+	rc = ata_sas_tport_add(found_dev->sata_dev.ata_host.dev, ap);
+	if (rc) {
+		ata_sas_port_destroy(ap);
+		return rc;
+	}
 	found_dev->sata_dev.ap = ap;
 
 	return 0;
diff --git a/drivers/scsi/libsas/sas_discover.c b/drivers/scsi/libsas/sas_discover.c
index a0fa7ef3a071ceb7df0fd845e8b9a4838e1fa1ca..1ffca28fe6a864f7523a993a1c32cd32140b930b 100644
--- a/drivers/scsi/libsas/sas_discover.c
+++ b/drivers/scsi/libsas/sas_discover.c
@@ -314,6 +314,7 @@ void sas_free_device(struct kref *kref)
 		kfree(dev->ex_dev.ex_phy);
 
 	if (dev_is_sata(dev) && dev->sata_dev.ap) {
+		ata_sas_tport_delete(dev->sata_dev.ap);
 		ata_sas_port_destroy(dev->sata_dev.ap);
 		dev->sata_dev.ap = NULL;
 	}
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index 6c0d351c0d0dbbb8511ea44fb6a02bd7f991c997..20b249a649dd67626303b3be9370222a572df6d0 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -64,8 +64,6 @@ struct lpfc_sli2_slim;
 #define LPFC_IOCB_LIST_CNT	2250	/* list of IOCBs for fast-path usage. */
 #define LPFC_Q_RAMP_UP_INTERVAL 120     /* lun q_depth ramp up interval */
 #define LPFC_VNAME_LEN		100	/* vport symbolic name length */
-#define LPFC_TGTQ_INTERVAL	40000	/* Min amount of time between tgt
-					   queue depth change in millisecs */
 #define LPFC_TGTQ_RAMPUP_PCENT	5	/* Target queue rampup in percentage */
 #define LPFC_MIN_TGT_QDEPTH	10
 #define LPFC_MAX_TGT_QDEPTH	0xFFFF
@@ -784,6 +782,7 @@ struct lpfc_hba {
 	uint32_t cfg_nvme_oas;
 	uint32_t cfg_nvme_embed_cmd;
 	uint32_t cfg_nvme_io_channel;
+	uint32_t cfg_nvmet_mrq_post;
 	uint32_t cfg_nvmet_mrq;
 	uint32_t cfg_enable_nvmet;
 	uint32_t cfg_nvme_enable_fb;
@@ -922,12 +921,6 @@ struct lpfc_hba {
 	atomic_t fc4ScsiOutputRequests;
 	atomic_t fc4ScsiControlRequests;
 	atomic_t fc4ScsiIoCmpls;
-	atomic_t fc4NvmeInputRequests;
-	atomic_t fc4NvmeOutputRequests;
-	atomic_t fc4NvmeControlRequests;
-	atomic_t fc4NvmeIoCmpls;
-	atomic_t fc4NvmeLsRequests;
-	atomic_t fc4NvmeLsCmpls;
 
 	uint64_t bg_guard_err_cnt;
 	uint64_t bg_apptag_err_cnt;
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index 2ac1d21c553f14e0853964507d2336fcf1319b3a..fd3b253178870cd98ccb487d4fb752d731121418 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -149,10 +149,14 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
 	struct lpfc_nvmet_tgtport *tgtp;
 	struct nvme_fc_local_port *localport;
 	struct lpfc_nvme_lport *lport;
+	struct lpfc_nvme_rport *rport;
 	struct lpfc_nodelist *ndlp;
 	struct nvme_fc_remote_port *nrport;
-	uint64_t data1, data2, data3, tot;
+	struct lpfc_nvme_ctrl_stat *cstat;
+	uint64_t data1, data2, data3;
+	uint64_t totin, totout, tot;
 	char *statep;
+	int i;
 	int len = 0;
 
 	if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)) {
@@ -309,11 +313,14 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
 			localport->port_id, statep);
 
 	list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
-		if (!ndlp->nrport)
+		rport = lpfc_ndlp_get_nrport(ndlp);
+		if (!rport)
 			continue;
 
 		/* local short-hand pointer. */
-		nrport = ndlp->nrport->remoteport;
+		nrport = rport->remoteport;
+		if (!nrport)
+			continue;
 
 		/* Port state is only one of two values for now. */
 		switch (nrport->port_state) {
@@ -364,11 +371,14 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
 	}
 	spin_unlock_irq(shost->host_lock);
 
+	if (!lport)
+		return len;
+
 	len += snprintf(buf + len, PAGE_SIZE - len, "\nNVME Statistics\n");
 	len += snprintf(buf+len, PAGE_SIZE-len,
 			"LS: Xmt %010x Cmpl %010x Abort %08x\n",
-			atomic_read(&phba->fc4NvmeLsRequests),
-			atomic_read(&phba->fc4NvmeLsCmpls),
+			atomic_read(&lport->fc4NvmeLsRequests),
+			atomic_read(&lport->fc4NvmeLsCmpls),
 			atomic_read(&lport->xmt_ls_abort));
 
 	len += snprintf(buf + len, PAGE_SIZE - len,
@@ -377,27 +387,31 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
 			atomic_read(&lport->cmpl_ls_xb),
 			atomic_read(&lport->cmpl_ls_err));
 
-	tot = atomic_read(&phba->fc4NvmeIoCmpls);
-	data1 = atomic_read(&phba->fc4NvmeInputRequests);
-	data2 = atomic_read(&phba->fc4NvmeOutputRequests);
-	data3 = atomic_read(&phba->fc4NvmeControlRequests);
+	totin = 0;
+	totout = 0;
+	for (i = 0; i < phba->cfg_nvme_io_channel; i++) {
+		cstat = &lport->cstat[i];
+		tot = atomic_read(&cstat->fc4NvmeIoCmpls);
+		totin += tot;
+		data1 = atomic_read(&cstat->fc4NvmeInputRequests);
+		data2 = atomic_read(&cstat->fc4NvmeOutputRequests);
+		data3 = atomic_read(&cstat->fc4NvmeControlRequests);
+		totout += (data1 + data2 + data3);
+	}
 	len += snprintf(buf+len, PAGE_SIZE-len,
-			"FCP: Rd %016llx Wr %016llx IO %016llx\n",
-			data1, data2, data3);
+			"Total FCP Cmpl %016llx Issue %016llx "
+			"OutIO %016llx\n",
+			totin, totout, totout - totin);
 
 	len += snprintf(buf+len, PAGE_SIZE-len,
-			"    noxri %08x nondlp %08x qdepth %08x "
+			"      abort %08x noxri %08x nondlp %08x qdepth %08x "
 			"wqerr %08x\n",
+			atomic_read(&lport->xmt_fcp_abort),
 			atomic_read(&lport->xmt_fcp_noxri),
 			atomic_read(&lport->xmt_fcp_bad_ndlp),
 			atomic_read(&lport->xmt_fcp_qdepth),
 			atomic_read(&lport->xmt_fcp_wqerr));
 
-	len += snprintf(buf + len, PAGE_SIZE - len,
-			"    Cmpl %016llx Outstanding %016llx Abort %08x\n",
-			tot, ((data1 + data2 + data3) - tot),
-			atomic_read(&lport->xmt_fcp_abort));
-
 	len += snprintf(buf + len, PAGE_SIZE - len,
 			"FCP CMPL: xb %08x Err %08x\n",
 			atomic_read(&lport->cmpl_fcp_xb),
@@ -3280,6 +3294,9 @@ lpfc_update_rport_devloss_tmo(struct lpfc_vport *vport)
 {
 	struct Scsi_Host  *shost;
 	struct lpfc_nodelist  *ndlp;
+#if (IS_ENABLED(CONFIG_NVME_FC))
+	struct lpfc_nvme_rport *rport;
+#endif
 
 	shost = lpfc_shost_from_vport(vport);
 	spin_lock_irq(shost->host_lock);
@@ -3289,8 +3306,9 @@ lpfc_update_rport_devloss_tmo(struct lpfc_vport *vport)
 		if (ndlp->rport)
 			ndlp->rport->dev_loss_tmo = vport->cfg_devloss_tmo;
 #if (IS_ENABLED(CONFIG_NVME_FC))
-		if (ndlp->nrport)
-			nvme_fc_set_remoteport_devloss(ndlp->nrport->remoteport,
+		rport = lpfc_ndlp_get_nrport(ndlp);
+		if (rport)
+			nvme_fc_set_remoteport_devloss(rport->remoteport,
 						       vport->cfg_devloss_tmo);
 #endif
 	}
@@ -3413,6 +3431,15 @@ LPFC_ATTR_R(nvmet_mrq,
 	    LPFC_NVMET_MRQ_AUTO, LPFC_NVMET_MRQ_AUTO, LPFC_NVMET_MRQ_MAX,
 	    "Specify number of RQ pairs for processing NVMET cmds");
 
+/*
+ * lpfc_nvmet_mrq_post: Specify number of RQ buffer to initially post
+ * to each NVMET RQ. Range 64 to 2048, default is 512.
+ */
+LPFC_ATTR_R(nvmet_mrq_post,
+	    LPFC_NVMET_RQE_DEF_POST, LPFC_NVMET_RQE_MIN_POST,
+	    LPFC_NVMET_RQE_DEF_COUNT,
+	    "Specify number of RQ buffers to initially post");
+
 /*
  * lpfc_enable_fc4_type: Defines what FC4 types are supported.
  * Supported Values:  1 - register just FCP
@@ -3469,8 +3496,49 @@ LPFC_VPORT_ATTR_R(lun_queue_depth, 30, 1, 512,
 # tgt_queue_depth:  This parameter is used to limit the number of outstanding
 # commands per target port. Value range is [10,65535]. Default value is 65535.
 */
-LPFC_VPORT_ATTR_RW(tgt_queue_depth, 65535, 10, 65535,
-		   "Max number of FCP commands we can queue to a specific target port");
+static uint lpfc_tgt_queue_depth = LPFC_MAX_TGT_QDEPTH;
+module_param(lpfc_tgt_queue_depth, uint, 0444);
+MODULE_PARM_DESC(lpfc_tgt_queue_depth, "Set max Target queue depth");
+lpfc_vport_param_show(tgt_queue_depth);
+lpfc_vport_param_init(tgt_queue_depth, LPFC_MAX_TGT_QDEPTH,
+		      LPFC_MIN_TGT_QDEPTH, LPFC_MAX_TGT_QDEPTH);
+
+/**
+ * lpfc_tgt_queue_depth_store: Sets an attribute value.
+ * @phba: pointer the the adapter structure.
+ * @val: integer attribute value.
+ *
+ * Description: Sets the parameter to the new value.
+ *
+ * Returns:
+ * zero on success
+ * -EINVAL if val is invalid
+ */
+static int
+lpfc_tgt_queue_depth_set(struct lpfc_vport *vport, uint val)
+{
+	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+	struct lpfc_nodelist *ndlp;
+
+	if (!lpfc_rangecheck(val, LPFC_MIN_TGT_QDEPTH, LPFC_MAX_TGT_QDEPTH))
+		return -EINVAL;
+
+	if (val == vport->cfg_tgt_queue_depth)
+		return 0;
+
+	spin_lock_irq(shost->host_lock);
+	vport->cfg_tgt_queue_depth = val;
+
+	/* Next loop thru nodelist and change cmd_qdepth */
+	list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp)
+		ndlp->cmd_qdepth = vport->cfg_tgt_queue_depth;
+
+	spin_unlock_irq(shost->host_lock);
+	return 0;
+}
+
+lpfc_vport_param_store(tgt_queue_depth);
+static DEVICE_ATTR_RW(lpfc_tgt_queue_depth);
 
 /*
 # hba_queue_depth:  This parameter is used to limit the number of outstanding
@@ -5302,6 +5370,7 @@ struct device_attribute *lpfc_hba_attrs[] = {
 	&dev_attr_lpfc_suppress_rsp,
 	&dev_attr_lpfc_nvme_io_channel,
 	&dev_attr_lpfc_nvmet_mrq,
+	&dev_attr_lpfc_nvmet_mrq_post,
 	&dev_attr_lpfc_nvme_enable_fb,
 	&dev_attr_lpfc_nvmet_fb_size,
 	&dev_attr_lpfc_enable_bg,
@@ -6352,6 +6421,7 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
 
 	lpfc_enable_fc4_type_init(phba, lpfc_enable_fc4_type);
 	lpfc_nvmet_mrq_init(phba, lpfc_nvmet_mrq);
+	lpfc_nvmet_mrq_post_init(phba, lpfc_nvmet_mrq_post);
 
 	/* Initialize first burst. Target vs Initiator are different. */
 	lpfc_nvme_enable_fb_init(phba, lpfc_nvme_enable_fb);
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
index 0617c8ea88c6fd582a56e14bbbb62b8df76ed4e6..d4a200ae5a6fcaa08fd0d7a99e5860cea8672854 100644
--- a/drivers/scsi/lpfc/lpfc_ct.c
+++ b/drivers/scsi/lpfc/lpfc_ct.c
@@ -471,6 +471,11 @@ lpfc_prep_node_fc4type(struct lpfc_vport *vport, uint32_t Did, uint8_t fc4_type)
 				"Parse GID_FTrsp: did:x%x flg:x%x x%x",
 				Did, ndlp->nlp_flag, vport->fc_flag);
 
+			/* Don't assume the rport is always the previous
+			 * FC4 type.
+			 */
+			ndlp->nlp_fc4_type &= ~(NLP_FC4_FCP | NLP_FC4_NVME);
+
 			/* By default, the driver expects to support FCP FC4 */
 			if (fc4_type == FC_TYPE_FCP)
 				ndlp->nlp_fc4_type |= NLP_FC4_FCP;
@@ -691,6 +696,11 @@ lpfc_cmpl_ct_cmd_gid_ft(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
 		vport->fc_flag &= ~FC_RSCN_DEFERRED;
 		spin_unlock_irq(shost->host_lock);
 
+		/* This is a GID_FT completing so the gidft_inp counter was
+		 * incremented before the GID_FT was issued to the wire.
+		 */
+		vport->gidft_inp--;
+
 		/*
 		 * Skip processing the NS response
 		 * Re-issue the NS cmd
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
index fb0dc2aeed91263c20b4ec49df5b6d9f42322332..afe7883c988aaec940a1a108e9576e59295fdf2b 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.c
+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
@@ -544,7 +544,7 @@ static int
 lpfc_debugfs_nodelist_data(struct lpfc_vport *vport, char *buf, int size)
 {
 	int len = 0;
-	int cnt;
+	int i, iocnt, outio, cnt;
 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
 	struct lpfc_hba  *phba = vport->phba;
 	struct lpfc_nodelist *ndlp;
@@ -552,12 +552,15 @@ lpfc_debugfs_nodelist_data(struct lpfc_vport *vport, char *buf, int size)
 	struct nvme_fc_local_port *localport;
 	struct lpfc_nvmet_tgtport *tgtp;
 	struct nvme_fc_remote_port *nrport;
+	struct lpfc_nvme_rport *rport;
 
 	cnt = (LPFC_NODELIST_SIZE / LPFC_NODELIST_ENTRY_SIZE);
+	outio = 0;
 
 	len += snprintf(buf+len, size-len, "\nFCP Nodelist Entries ...\n");
 	spin_lock_irq(shost->host_lock);
 	list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
+		iocnt = 0;
 		if (!cnt) {
 			len +=  snprintf(buf+len, size-len,
 				"Missing Nodelist Entries\n");
@@ -585,9 +588,11 @@ lpfc_debugfs_nodelist_data(struct lpfc_vport *vport, char *buf, int size)
 			break;
 		case NLP_STE_UNMAPPED_NODE:
 			statep = "UNMAP ";
+			iocnt = 1;
 			break;
 		case NLP_STE_MAPPED_NODE:
 			statep = "MAPPED";
+			iocnt = 1;
 			break;
 		case NLP_STE_NPR_NODE:
 			statep = "NPR   ";
@@ -614,8 +619,10 @@ lpfc_debugfs_nodelist_data(struct lpfc_vport *vport, char *buf, int size)
 			len += snprintf(buf+len, size-len, "UNKNOWN_TYPE ");
 		if (ndlp->nlp_type & NLP_FC_NODE)
 			len += snprintf(buf+len, size-len, "FC_NODE ");
-		if (ndlp->nlp_type & NLP_FABRIC)
+		if (ndlp->nlp_type & NLP_FABRIC) {
 			len += snprintf(buf+len, size-len, "FABRIC ");
+			iocnt = 0;
+		}
 		if (ndlp->nlp_type & NLP_FCP_TARGET)
 			len += snprintf(buf+len, size-len, "FCP_TGT sid:%d ",
 				ndlp->nlp_sid);
@@ -632,10 +639,20 @@ lpfc_debugfs_nodelist_data(struct lpfc_vport *vport, char *buf, int size)
 			ndlp->nlp_usg_map);
 		len += snprintf(buf+len, size-len, "refcnt:%x",
 			kref_read(&ndlp->kref));
+		if (iocnt) {
+			i = atomic_read(&ndlp->cmd_pending);
+			len += snprintf(buf + len, size - len,
+					" OutIO:x%x Qdepth x%x",
+					i, ndlp->cmd_qdepth);
+			outio += i;
+		}
 		len +=  snprintf(buf+len, size-len, "\n");
 	}
 	spin_unlock_irq(shost->host_lock);
 
+	len += snprintf(buf + len, size - len,
+			"\nOutstanding IO x%x\n",  outio);
+
 	if (phba->nvmet_support && phba->targetport && (vport == phba->pport)) {
 		tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
 		len += snprintf(buf + len, size - len,
@@ -679,10 +696,13 @@ lpfc_debugfs_nodelist_data(struct lpfc_vport *vport, char *buf, int size)
 	len += snprintf(buf + len, size - len, "\tRport List:\n");
 	list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
 		/* local short-hand pointer. */
-		if (!ndlp->nrport)
+		rport = lpfc_ndlp_get_nrport(ndlp);
+		if (!rport)
 			continue;
 
-		nrport = ndlp->nrport->remoteport;
+		nrport = rport->remoteport;
+		if (!nrport)
+			continue;
 
 		/* Port state is only one of two values for now. */
 		switch (nrport->port_state) {
@@ -751,10 +771,12 @@ lpfc_debugfs_nvmestat_data(struct lpfc_vport *vport, char *buf, int size)
 	struct lpfc_nvmet_tgtport *tgtp;
 	struct lpfc_nvmet_rcv_ctx *ctxp, *next_ctxp;
 	struct nvme_fc_local_port *localport;
+	struct lpfc_nvme_ctrl_stat *cstat;
 	struct lpfc_nvme_lport *lport;
-	uint64_t tot, data1, data2, data3;
+	uint64_t data1, data2, data3;
+	uint64_t tot, totin, totout;
+	int cnt, i, maxch;
 	int len = 0;
-	int cnt;
 
 	if (phba->nvmet_support) {
 		if (!phba->targetport)
@@ -880,33 +902,52 @@ lpfc_debugfs_nvmestat_data(struct lpfc_vport *vport, char *buf, int size)
 		if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME))
 			return len;
 
+		localport = vport->localport;
+		if (!localport)
+			return len;
+		lport = (struct lpfc_nvme_lport *)localport->private;
+		if (!lport)
+			return len;
+
 		len += snprintf(buf + len, size - len,
 				"\nNVME Lport Statistics\n");
 
 		len += snprintf(buf + len, size - len,
 				"LS: Xmt %016x Cmpl %016x\n",
-				atomic_read(&phba->fc4NvmeLsRequests),
-				atomic_read(&phba->fc4NvmeLsCmpls));
-
-		tot = atomic_read(&phba->fc4NvmeIoCmpls);
-		data1 = atomic_read(&phba->fc4NvmeInputRequests);
-		data2 = atomic_read(&phba->fc4NvmeOutputRequests);
-		data3 = atomic_read(&phba->fc4NvmeControlRequests);
+				atomic_read(&lport->fc4NvmeLsRequests),
+				atomic_read(&lport->fc4NvmeLsCmpls));
 
-		len += snprintf(buf + len, size - len,
-				"FCP: Rd %016llx Wr %016llx IO %016llx\n",
-				data1, data2, data3);
-
-		len += snprintf(buf + len, size - len,
-				"   Cmpl %016llx Outstanding %016llx\n",
-				tot, (data1 + data2 + data3) - tot);
+		if (phba->cfg_nvme_io_channel < 32)
+			maxch = phba->cfg_nvme_io_channel;
+		else
+			maxch = 32;
+		totin = 0;
+		totout = 0;
+		for (i = 0; i < phba->cfg_nvme_io_channel; i++) {
+			cstat = &lport->cstat[i];
+			tot = atomic_read(&cstat->fc4NvmeIoCmpls);
+			totin += tot;
+			data1 = atomic_read(&cstat->fc4NvmeInputRequests);
+			data2 = atomic_read(&cstat->fc4NvmeOutputRequests);
+			data3 = atomic_read(&cstat->fc4NvmeControlRequests);
+			totout += (data1 + data2 + data3);
+
+			/* Limit to 32, debugfs display buffer limitation */
+			if (i >= 32)
+				continue;
 
-		localport = vport->localport;
-		if (!localport)
-			return len;
-		lport = (struct lpfc_nvme_lport *)localport->private;
-		if (!lport)
-			return len;
+			len += snprintf(buf + len, PAGE_SIZE - len,
+					"FCP (%d): Rd %016llx Wr %016llx "
+					"IO %016llx ",
+					i, data1, data2, data3);
+			len += snprintf(buf + len, PAGE_SIZE - len,
+					"Cmpl %016llx OutIO %016llx\n",
+					tot, ((data1 + data2 + data3) - tot));
+		}
+		len += snprintf(buf + len, PAGE_SIZE - len,
+				"Total FCP Cmpl %016llx Issue %016llx "
+				"OutIO %016llx\n",
+				totin, totout, totout - totin);
 
 		len += snprintf(buf + len, size - len,
 				"LS Xmt Err: Abrt %08x Err %08x  "
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index 74895e62aaeaab6ae30d4d0b4cf206911a9c2739..6d84a10fef0791b8d70750192ffd63c8e9442de4 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -6268,7 +6268,6 @@ lpfc_els_handle_rscn(struct lpfc_vport *vport)
 		 * flush the RSCN.  Otherwise, the outstanding requests
 		 * need to complete.
 		 */
-		vport->gidft_inp = 0;
 		if (lpfc_issue_gidft(vport) > 0)
 			return 1;
 	} else {
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index 3e7712cd6c9ac525705d7e8226b33881b2312f0a..cf2cbaa241b9e442f180a65d0f5c03d0567ef1ff 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -3876,10 +3876,6 @@ int
 lpfc_issue_gidft(struct lpfc_vport *vport)
 {
 	struct lpfc_hba *phba = vport->phba;
-	struct lpfc_nodelist *ndlp;
-
-	list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp)
-		ndlp->nlp_fc4_type &= ~(NLP_FC4_FCP | NLP_FC4_NVME);
 
 	/* Good status, issue CT Request to NameServer */
 	if ((phba->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) ||
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
index 98b80559c2158456c78420b0eeff0829ef30f9ea..9df1c8da6f52e8bb2d10e8ec0912c7401e9a99c3 100644
--- a/drivers/scsi/lpfc/lpfc_hw4.h
+++ b/drivers/scsi/lpfc/lpfc_hw4.h
@@ -104,6 +104,17 @@ struct lpfc_sli_intf {
 #define LPFC_SLI_INTF_IF_TYPE_VIRT	1
 };
 
+struct lpfc_sli_asic_rev {
+	u32 word0;
+#define LPFC_SLI_ASIC_VER_A	0x0
+#define LPFC_SLI_ASIC_VER_B	0x1
+#define LPFC_SLI_ASIC_VER_C	0x2
+#define LPFC_SLI_ASIC_VER_D	0x3
+#define lpfc_sli_asic_ver_SHIFT		4
+#define lpfc_sli_asic_ver_MASK		0x0000000F
+#define lpfc_sli_asic_ver_WORD		word0
+};
+
 #define LPFC_SLI4_MBX_EMBED	true
 #define LPFC_SLI4_MBX_NEMBED	false
 
@@ -566,6 +577,7 @@ struct lpfc_register {
 
 /* The following BAR0 register sets are defined for if_type 0 and 2 UCNAs. */
 #define LPFC_SLI_INTF			0x0058
+#define LPFC_SLI_ASIC_VER		0x009C
 
 #define LPFC_CTL_PORT_SEM_OFFSET	0x400
 #define lpfc_port_smphr_perr_SHIFT	31
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 7887468c71b4db440f288879115533219d34ae23..060f0e2f6ff57ee2fbd4c63a9c2f26adc2f80b96 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -1266,6 +1266,9 @@ lpfc_hb_timeout_handler(struct lpfc_hba *phba)
 	uint64_t tot, data1, data2, data3;
 	struct lpfc_nvmet_tgtport *tgtp;
 	struct lpfc_register reg_data;
+	struct nvme_fc_local_port *localport;
+	struct lpfc_nvme_lport *lport;
+	struct lpfc_nvme_ctrl_stat *cstat;
 	void __iomem *eqdreg = phba->sli4_hba.u.if_type2.EQDregaddr;
 
 	vports = lpfc_create_vport_work_array(phba);
@@ -1299,14 +1302,25 @@ lpfc_hb_timeout_handler(struct lpfc_hba *phba)
 				tot += atomic_read(&tgtp->xmt_fcp_release);
 				tot = atomic_read(&tgtp->rcv_fcp_cmd_in) - tot;
 			} else {
-				tot = atomic_read(&phba->fc4NvmeIoCmpls);
-				data1 = atomic_read(
-					&phba->fc4NvmeInputRequests);
-				data2 = atomic_read(
-					&phba->fc4NvmeOutputRequests);
-				data3 = atomic_read(
-					&phba->fc4NvmeControlRequests);
-				tot =  (data1 + data2 + data3) - tot;
+				localport = phba->pport->localport;
+				if (!localport || !localport->private)
+					goto skip_eqdelay;
+				lport = (struct lpfc_nvme_lport *)
+					localport->private;
+				tot = 0;
+				for (i = 0;
+					i < phba->cfg_nvme_io_channel; i++) {
+					cstat = &lport->cstat[i];
+					data1 = atomic_read(
+						&cstat->fc4NvmeInputRequests);
+					data2 = atomic_read(
+						&cstat->fc4NvmeOutputRequests);
+					data3 = atomic_read(
+						&cstat->fc4NvmeControlRequests);
+					tot += (data1 + data2 + data3);
+					tot -= atomic_read(
+						&cstat->fc4NvmeIoCmpls);
+				}
 			}
 		}
 
@@ -6406,8 +6420,11 @@ lpfc_setup_driver_resource_phase2(struct lpfc_hba *phba)
 		return error;
 	}
 
-	/* workqueue for deferred irq use */
-	phba->wq = alloc_workqueue("lpfc_wq", WQ_MEM_RECLAIM, 0);
+	/* The lpfc_wq workqueue for deferred irq use, is only used for SLI4 */
+	if (phba->sli_rev == LPFC_SLI_REV4)
+		phba->wq = alloc_workqueue("lpfc_wq", WQ_MEM_RECLAIM, 0);
+	else
+		phba->wq = NULL;
 
 	return 0;
 }
@@ -6430,7 +6447,8 @@ lpfc_unset_driver_resource_phase2(struct lpfc_hba *phba)
 	}
 
 	/* Stop kernel worker thread */
-	kthread_stop(phba->worker_thread);
+	if (phba->worker_thread)
+		kthread_stop(phba->worker_thread);
 }
 
 /**
@@ -6895,12 +6913,6 @@ lpfc_create_shost(struct lpfc_hba *phba)
 	atomic_set(&phba->fc4ScsiOutputRequests, 0);
 	atomic_set(&phba->fc4ScsiControlRequests, 0);
 	atomic_set(&phba->fc4ScsiIoCmpls, 0);
-	atomic_set(&phba->fc4NvmeInputRequests, 0);
-	atomic_set(&phba->fc4NvmeOutputRequests, 0);
-	atomic_set(&phba->fc4NvmeControlRequests, 0);
-	atomic_set(&phba->fc4NvmeIoCmpls, 0);
-	atomic_set(&phba->fc4NvmeLsRequests, 0);
-	atomic_set(&phba->fc4NvmeLsCmpls, 0);
 	vport = lpfc_create_port(phba, phba->brd_no, &phba->pcidev->dev);
 	if (!vport)
 		return -ENODEV;
@@ -9502,6 +9514,11 @@ lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba)
 		return error;
 	}
 
+	if (pci_read_config_dword(pdev, LPFC_SLI_ASIC_VER,
+				  &phba->sli4_hba.sli_asic_ver.word0)) {
+		return error;
+	}
+
 	/* There is no SLI3 failback for SLI4 devices. */
 	if (bf_get(lpfc_sli_intf_valid, &phba->sli4_hba.sli_intf) !=
 	    LPFC_SLI_INTF_VALID) {
@@ -10533,6 +10550,7 @@ lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
 	struct lpfc_pc_sli4_params *sli4_params;
 	uint32_t mbox_tmo;
 	int length;
+	bool exp_wqcq_pages = true;
 	struct lpfc_sli4_parameters *mbx_sli4_parameters;
 
 	/*
@@ -10659,8 +10677,17 @@ lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
 			phba->nvme_support, phba->nvme_embed_pbde,
 			phba->cfg_nvme_embed_cmd, phba->cfg_suppress_rsp);
 
+	if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
+	    LPFC_SLI_INTF_IF_TYPE_2) &&
+	    (bf_get(lpfc_sli_intf_sli_family, &phba->sli4_hba.sli_intf) ==
+		 LPFC_SLI_INTF_FAMILY_LNCR_A0) &&
+	    (bf_get(lpfc_sli_asic_ver, &phba->sli4_hba.sli_asic_ver) ==
+	    LPFC_SLI_ASIC_VER_A))
+		exp_wqcq_pages = false;
+
 	if ((bf_get(cfg_cqpsize, mbx_sli4_parameters) & LPFC_CQ_16K_PAGE_SZ) &&
 	    (bf_get(cfg_wqpsize, mbx_sli4_parameters) & LPFC_WQ_16K_PAGE_SZ) &&
+	    exp_wqcq_pages &&
 	    (sli4_params->wqsize & LPFC_WQ_SZ128_SUPPORT))
 		phba->enab_exp_wqcq_pages = 1;
 	else
@@ -11719,6 +11746,7 @@ lpfc_pci_remove_one_s4(struct pci_dev *pdev)
 	lpfc_nvme_free(phba);
 	lpfc_free_iocb_list(phba);
 
+	lpfc_unset_driver_resource_phase2(phba);
 	lpfc_sli4_driver_resource_unset(phba);
 
 	/* Unmap adapter Control and Doorbell registers */
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index 022060636ae1f663ba263a3df7dd99e6f721ebda..e790c0bc64fc34af7e51a84b267489c41ce14016 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -1936,31 +1936,14 @@ lpfc_cmpl_prli_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
 			goto out;
 		}
 
-		/* When the rport rejected the FCP PRLI as unsupported.
-		 * This should only happen in Pt2Pt so an NVME PRLI
-		 * should be outstanding still.
-		 */
-		if (npr && ndlp->nlp_flag & NLP_FCP_PRLI_RJT) {
+		/* Adjust the nlp_type accordingly if the PRLI failed */
+		if (npr)
 			ndlp->nlp_fc4_type &= ~NLP_FC4_FCP;
-			goto out_err;
-		}
-
-		/* The LS Req had some error.  Don't let this be a
-		 * target.
-		 */
-		if ((ndlp->fc4_prli_sent == 1) &&
-		    (ndlp->nlp_state == NLP_STE_PRLI_ISSUE) &&
-		    (ndlp->nlp_type & (NLP_FCP_TARGET | NLP_FCP_INITIATOR)))
-			/* The FCP PRLI completed successfully but
-			 * the NVME PRLI failed.  Since they are sent in
-			 * succession, allow the FCP to complete.
-			 */
-			goto out_err;
+		if (nvpr)
+			ndlp->nlp_fc4_type &= ~NLP_FC4_NVME;
 
-		ndlp->nlp_prev_state = NLP_STE_PRLI_ISSUE;
-		ndlp->nlp_type |= NLP_FCP_INITIATOR;
-		lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
-		return ndlp->nlp_state;
+		/* We can't set the DSM state till BOTH PRLIs complete */
+		goto out_err;
 	}
 
 	if (npr && (npr->acceptRspCode == PRLI_REQ_EXECUTED) &&
diff --git a/drivers/scsi/lpfc/lpfc_nvme.c b/drivers/scsi/lpfc/lpfc_nvme.c
index 378dca40ca204b8d1dee652684c4e2045e6eb241..9e0345697e1b0c51992e0e3e61d86f0ee357a3db 100644
--- a/drivers/scsi/lpfc/lpfc_nvme.c
+++ b/drivers/scsi/lpfc/lpfc_nvme.c
@@ -334,7 +334,14 @@ lpfc_nvme_remoteport_delete(struct nvme_fc_remote_port *remoteport)
 			"6146 remoteport delete of remoteport %p\n",
 			remoteport);
 	spin_lock_irq(&vport->phba->hbalock);
-	ndlp->nrport = NULL;
+
+	/* The register rebind might have occurred before the delete
+	 * downcall.  Guard against this race.
+	 */
+	if (ndlp->upcall_flags & NLP_WAIT_FOR_UNREG) {
+		ndlp->nrport = NULL;
+		ndlp->upcall_flags &= ~NLP_WAIT_FOR_UNREG;
+	}
 	spin_unlock_irq(&vport->phba->hbalock);
 
 	/* Remove original register reference. The host transport
@@ -357,15 +364,19 @@ lpfc_nvme_cmpl_gen_req(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
 	struct lpfc_dmabuf *buf_ptr;
 	struct lpfc_nodelist *ndlp;
 
-	atomic_inc(&vport->phba->fc4NvmeLsCmpls);
-
 	pnvme_lsreq = (struct nvmefc_ls_req *)cmdwqe->context2;
 	status = bf_get(lpfc_wcqe_c_status, wcqe) & LPFC_IOCB_STATUS_MASK;
-	if (status) {
+
+	if (vport->localport) {
 		lport = (struct lpfc_nvme_lport *)vport->localport->private;
-		if (bf_get(lpfc_wcqe_c_xb, wcqe))
-			atomic_inc(&lport->cmpl_ls_xb);
-		atomic_inc(&lport->cmpl_ls_err);
+		if (lport) {
+			atomic_inc(&lport->fc4NvmeLsCmpls);
+			if (status) {
+				if (bf_get(lpfc_wcqe_c_xb, wcqe))
+					atomic_inc(&lport->cmpl_ls_xb);
+				atomic_inc(&lport->cmpl_ls_err);
+			}
+		}
 	}
 
 	ndlp = (struct lpfc_nodelist *)cmdwqe->context1;
@@ -570,6 +581,9 @@ lpfc_nvme_ls_req(struct nvme_fc_local_port *pnvme_lport,
 
 	lport = (struct lpfc_nvme_lport *)pnvme_lport->private;
 	rport = (struct lpfc_nvme_rport *)pnvme_rport->private;
+	if (unlikely(!lport) || unlikely(!rport))
+		return -EINVAL;
+
 	vport = lport->vport;
 
 	if (vport->load_flag & FC_UNLOADING)
@@ -639,7 +653,7 @@ lpfc_nvme_ls_req(struct nvme_fc_local_port *pnvme_lport,
 			 pnvme_lsreq->rsplen, &pnvme_lsreq->rqstdma,
 			 &pnvme_lsreq->rspdma);
 
-	atomic_inc(&vport->phba->fc4NvmeLsRequests);
+	atomic_inc(&lport->fc4NvmeLsRequests);
 
 	/* Hardcode the wait to 30 seconds.  Connections are failing otherwise.
 	 * This code allows it all to work.
@@ -690,6 +704,8 @@ lpfc_nvme_ls_abort(struct nvme_fc_local_port *pnvme_lport,
 	struct lpfc_iocbq *wqe, *next_wqe;
 
 	lport = (struct lpfc_nvme_lport *)pnvme_lport->private;
+	if (unlikely(!lport))
+		return;
 	vport = lport->vport;
 	phba = vport->phba;
 
@@ -949,8 +965,9 @@ lpfc_nvme_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
 	struct lpfc_nodelist *ndlp;
 	struct lpfc_nvme_fcpreq_priv *freqpriv;
 	struct lpfc_nvme_lport *lport;
+	struct lpfc_nvme_ctrl_stat *cstat;
 	unsigned long flags;
-	uint32_t code, status;
+	uint32_t code, status, idx;
 	uint16_t cid, sqhd, data;
 	uint32_t *ptr;
 
@@ -961,16 +978,22 @@ lpfc_nvme_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
 				 wcqe);
 		return;
 	}
-	atomic_inc(&phba->fc4NvmeIoCmpls);
-
 	nCmd = lpfc_ncmd->nvmeCmd;
 	rport = lpfc_ncmd->nrport;
 	status = bf_get(lpfc_wcqe_c_status, wcqe);
-	if (status) {
+
+	if (vport->localport) {
 		lport = (struct lpfc_nvme_lport *)vport->localport->private;
-		if (bf_get(lpfc_wcqe_c_xb, wcqe))
-			atomic_inc(&lport->cmpl_fcp_xb);
-		atomic_inc(&lport->cmpl_fcp_err);
+		if (lport) {
+			idx = lpfc_ncmd->cur_iocbq.hba_wqidx;
+			cstat = &lport->cstat[idx];
+			atomic_inc(&cstat->fc4NvmeIoCmpls);
+			if (status) {
+				if (bf_get(lpfc_wcqe_c_xb, wcqe))
+					atomic_inc(&lport->cmpl_fcp_xb);
+				atomic_inc(&lport->cmpl_fcp_err);
+			}
+		}
 	}
 
 	lpfc_nvmeio_data(phba, "NVME FCP CMPL: xri x%x stat x%x parm x%x\n",
@@ -1163,7 +1186,8 @@ lpfc_nvme_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
 static int
 lpfc_nvme_prep_io_cmd(struct lpfc_vport *vport,
 		      struct lpfc_nvme_buf *lpfc_ncmd,
-		      struct lpfc_nodelist *pnode)
+		      struct lpfc_nodelist *pnode,
+		      struct lpfc_nvme_ctrl_stat *cstat)
 {
 	struct lpfc_hba *phba = vport->phba;
 	struct nvmefc_fcp_req *nCmd = lpfc_ncmd->nvmeCmd;
@@ -1201,7 +1225,7 @@ lpfc_nvme_prep_io_cmd(struct lpfc_vport *vport,
 			} else {
 				wqe->fcp_iwrite.initial_xfer_len = 0;
 			}
-			atomic_inc(&phba->fc4NvmeOutputRequests);
+			atomic_inc(&cstat->fc4NvmeOutputRequests);
 		} else {
 			/* From the iread template, initialize words 7 - 11 */
 			memcpy(&wqe->words[7],
@@ -1214,13 +1238,13 @@ lpfc_nvme_prep_io_cmd(struct lpfc_vport *vport,
 			/* Word 5 */
 			wqe->fcp_iread.rsrvd5 = 0;
 
-			atomic_inc(&phba->fc4NvmeInputRequests);
+			atomic_inc(&cstat->fc4NvmeInputRequests);
 		}
 	} else {
 		/* From the icmnd template, initialize words 4 - 11 */
 		memcpy(&wqe->words[4], &lpfc_icmnd_cmd_template.words[4],
 		       sizeof(uint32_t) * 8);
-		atomic_inc(&phba->fc4NvmeControlRequests);
+		atomic_inc(&cstat->fc4NvmeControlRequests);
 	}
 	/*
 	 * Finish initializing those WQE fields that are independent
@@ -1400,7 +1424,9 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
 {
 	int ret = 0;
 	int expedite = 0;
+	int idx;
 	struct lpfc_nvme_lport *lport;
+	struct lpfc_nvme_ctrl_stat *cstat;
 	struct lpfc_vport *vport;
 	struct lpfc_hba *phba;
 	struct lpfc_nodelist *ndlp;
@@ -1543,15 +1569,6 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
 	lpfc_ncmd->ndlp = ndlp;
 	lpfc_ncmd->start_time = jiffies;
 
-	lpfc_nvme_prep_io_cmd(vport, lpfc_ncmd, ndlp);
-	ret = lpfc_nvme_prep_io_dma(vport, lpfc_ncmd);
-	if (ret) {
-		ret = -ENOMEM;
-		goto out_free_nvme_buf;
-	}
-
-	atomic_inc(&ndlp->cmd_pending);
-
 	/*
 	 * Issue the IO on the WQ indicated by index in the hw_queue_handle.
 	 * This identfier was create in our hardware queue create callback
@@ -1560,7 +1577,18 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
 	 * index to use and that they have affinitized a CPU to this hardware
 	 * queue. A hardware queue maps to a driver MSI-X vector/EQ/CQ/WQ.
 	 */
-	lpfc_ncmd->cur_iocbq.hba_wqidx = lpfc_queue_info->index;
+	idx = lpfc_queue_info->index;
+	lpfc_ncmd->cur_iocbq.hba_wqidx = idx;
+	cstat = &lport->cstat[idx];
+
+	lpfc_nvme_prep_io_cmd(vport, lpfc_ncmd, ndlp, cstat);
+	ret = lpfc_nvme_prep_io_dma(vport, lpfc_ncmd);
+	if (ret) {
+		ret = -ENOMEM;
+		goto out_free_nvme_buf;
+	}
+
+	atomic_inc(&ndlp->cmd_pending);
 
 	lpfc_nvmeio_data(phba, "NVME FCP XMIT: xri x%x idx %d to %06x\n",
 			 lpfc_ncmd->cur_iocbq.sli4_xritag,
@@ -1605,11 +1633,11 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
  out_free_nvme_buf:
 	if (lpfc_ncmd->nvmeCmd->sg_cnt) {
 		if (lpfc_ncmd->nvmeCmd->io_dir == NVMEFC_FCP_WRITE)
-			atomic_dec(&phba->fc4NvmeOutputRequests);
+			atomic_dec(&cstat->fc4NvmeOutputRequests);
 		else
-			atomic_dec(&phba->fc4NvmeInputRequests);
+			atomic_dec(&cstat->fc4NvmeInputRequests);
 	} else
-		atomic_dec(&phba->fc4NvmeControlRequests);
+		atomic_dec(&cstat->fc4NvmeControlRequests);
 	lpfc_release_nvme_buf(phba, lpfc_ncmd);
  out_fail:
 	return ret;
@@ -2390,7 +2418,8 @@ lpfc_nvme_create_localport(struct lpfc_vport *vport)
 	struct nvme_fc_port_info nfcp_info;
 	struct nvme_fc_local_port *localport;
 	struct lpfc_nvme_lport *lport;
-	int len;
+	struct lpfc_nvme_ctrl_stat *cstat;
+	int len, i;
 
 	/* Initialize this localport instance.  The vport wwn usage ensures
 	 * that NPIV is accounted for.
@@ -2414,6 +2443,11 @@ lpfc_nvme_create_localport(struct lpfc_vport *vport)
 	lpfc_nvme_template.max_sgl_segments = phba->cfg_nvme_seg_cnt + 1;
 	lpfc_nvme_template.max_hw_queues = phba->cfg_nvme_io_channel;
 
+	cstat = kmalloc((sizeof(struct lpfc_nvme_ctrl_stat) *
+			phba->cfg_nvme_io_channel), GFP_KERNEL);
+	if (!cstat)
+		return -ENOMEM;
+
 	/* localport is allocated from the stack, but the registration
 	 * call allocates heap memory as well as the private area.
 	 */
@@ -2436,6 +2470,7 @@ lpfc_nvme_create_localport(struct lpfc_vport *vport)
 		lport = (struct lpfc_nvme_lport *)localport->private;
 		vport->localport = localport;
 		lport->vport = vport;
+		lport->cstat = cstat;
 		vport->nvmei_support = 1;
 
 		atomic_set(&lport->xmt_fcp_noxri, 0);
@@ -2449,6 +2484,16 @@ lpfc_nvme_create_localport(struct lpfc_vport *vport)
 		atomic_set(&lport->cmpl_fcp_err, 0);
 		atomic_set(&lport->cmpl_ls_xb, 0);
 		atomic_set(&lport->cmpl_ls_err, 0);
+		atomic_set(&lport->fc4NvmeLsRequests, 0);
+		atomic_set(&lport->fc4NvmeLsCmpls, 0);
+
+		for (i = 0; i < phba->cfg_nvme_io_channel; i++) {
+			cstat = &lport->cstat[i];
+			atomic_set(&cstat->fc4NvmeInputRequests, 0);
+			atomic_set(&cstat->fc4NvmeOutputRequests, 0);
+			atomic_set(&cstat->fc4NvmeControlRequests, 0);
+			atomic_set(&cstat->fc4NvmeIoCmpls, 0);
+		}
 
 		/* Don't post more new bufs if repost already recovered
 		 * the nvme sgls.
@@ -2458,6 +2503,8 @@ lpfc_nvme_create_localport(struct lpfc_vport *vport)
 						 phba->sli4_hba.nvme_xri_max);
 			vport->phba->total_nvme_bufs += len;
 		}
+	} else {
+		kfree(cstat);
 	}
 
 	return ret;
@@ -2520,6 +2567,7 @@ lpfc_nvme_destroy_localport(struct lpfc_vport *vport)
 #if (IS_ENABLED(CONFIG_NVME_FC))
 	struct nvme_fc_local_port *localport;
 	struct lpfc_nvme_lport *lport;
+	struct lpfc_nvme_ctrl_stat *cstat;
 	int ret;
 
 	if (vport->nvmei_support == 0)
@@ -2528,6 +2576,7 @@ lpfc_nvme_destroy_localport(struct lpfc_vport *vport)
 	localport = vport->localport;
 	vport->localport = NULL;
 	lport = (struct lpfc_nvme_lport *)localport->private;
+	cstat = lport->cstat;
 
 	lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME,
 			 "6011 Destroying NVME localport %p\n",
@@ -2543,6 +2592,7 @@ lpfc_nvme_destroy_localport(struct lpfc_vport *vport)
 	 * indefinitely or succeeds
 	 */
 	lpfc_nvme_lport_unreg_wait(vport, lport);
+	kfree(cstat);
 
 	/* Regardless of the unregister upcall response, clear
 	 * nvmei_support.  All rports are unregistered and the
@@ -2607,6 +2657,7 @@ lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
 	struct nvme_fc_local_port *localport;
 	struct lpfc_nvme_lport *lport;
 	struct lpfc_nvme_rport *rport;
+	struct lpfc_nvme_rport *oldrport;
 	struct nvme_fc_remote_port *remote_port;
 	struct nvme_fc_port_info rpinfo;
 	struct lpfc_nodelist *prev_ndlp;
@@ -2639,7 +2690,9 @@ lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
 
 	rpinfo.port_name = wwn_to_u64(ndlp->nlp_portname.u.wwn);
 	rpinfo.node_name = wwn_to_u64(ndlp->nlp_nodename.u.wwn);
-	if (!ndlp->nrport)
+
+	oldrport = lpfc_ndlp_get_nrport(ndlp);
+	if (!oldrport)
 		lpfc_nlp_get(ndlp);
 
 	ret = nvme_fc_register_remoteport(localport, &rpinfo, &remote_port);
@@ -2648,9 +2701,15 @@ lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
 		 * a resume of the existing rport.  Else this is a
 		 * new rport.
 		 */
+		/* Guard against an unregister/reregister
+		 * race that leaves the WAIT flag set.
+		 */
+		spin_lock_irq(&vport->phba->hbalock);
+		ndlp->upcall_flags &= ~NLP_WAIT_FOR_UNREG;
+		spin_unlock_irq(&vport->phba->hbalock);
 		rport = remote_port->private;
-		if (ndlp->nrport) {
-			if (ndlp->nrport == remote_port->private) {
+		if (oldrport) {
+			if (oldrport == remote_port->private) {
 				/* Same remoteport.  Just reuse. */
 				lpfc_printf_vlog(ndlp->vport, KERN_INFO,
 						 LOG_NVME_DISC,
@@ -2674,11 +2733,20 @@ lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
 			 */
 			spin_lock_irq(&vport->phba->hbalock);
 			ndlp->nrport = NULL;
+			ndlp->upcall_flags &= ~NLP_WAIT_FOR_UNREG;
 			spin_unlock_irq(&vport->phba->hbalock);
 			rport->ndlp = NULL;
 			rport->remoteport = NULL;
-			if (prev_ndlp)
-				lpfc_nlp_put(ndlp);
+
+			/* Reference only removed if previous NDLP is no longer
+			 * active. It might be just a swap and removing the
+			 * reference would cause a premature cleanup.
+			 */
+			if (prev_ndlp && prev_ndlp != ndlp) {
+				if ((!NLP_CHK_NODE_ACT(prev_ndlp)) ||
+				    (!prev_ndlp->nrport))
+					lpfc_nlp_put(prev_ndlp);
+			}
 		}
 
 		/* Clean bind the rport to the ndlp. */
@@ -2746,7 +2814,7 @@ lpfc_nvme_unregister_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
 	if (!lport)
 		goto input_err;
 
-	rport = ndlp->nrport;
+	rport = lpfc_ndlp_get_nrport(ndlp);
 	if (!rport)
 		goto input_err;
 
diff --git a/drivers/scsi/lpfc/lpfc_nvme.h b/drivers/scsi/lpfc/lpfc_nvme.h
index 9216653e0441a2754900ad91343d09d84e7d944a..53236974f2dd7545a25f6f0713efba0ef461679a 100644
--- a/drivers/scsi/lpfc/lpfc_nvme.h
+++ b/drivers/scsi/lpfc/lpfc_nvme.h
@@ -30,17 +30,31 @@
 #define LPFC_NVME_FB_SHIFT		9
 #define LPFC_NVME_MAX_FB		(1 << 20)	/* 1M */
 
+#define lpfc_ndlp_get_nrport(ndlp)					\
+	((!ndlp->nrport || (ndlp->upcall_flags & NLP_WAIT_FOR_UNREG))	\
+	? NULL : ndlp->nrport)
+
 struct lpfc_nvme_qhandle {
 	uint32_t index;		/* WQ index to use */
 	uint32_t qidx;		/* queue index passed to create */
 	uint32_t cpu_id;	/* current cpu id at time of create */
 };
 
+struct lpfc_nvme_ctrl_stat {
+	atomic_t fc4NvmeInputRequests;
+	atomic_t fc4NvmeOutputRequests;
+	atomic_t fc4NvmeControlRequests;
+	atomic_t fc4NvmeIoCmpls;
+};
+
 /* Declare nvme-based local and remote port definitions. */
 struct lpfc_nvme_lport {
 	struct lpfc_vport *vport;
 	struct completion lport_unreg_done;
 	/* Add stats counters here */
+	struct lpfc_nvme_ctrl_stat *cstat;
+	atomic_t fc4NvmeLsRequests;
+	atomic_t fc4NvmeLsCmpls;
 	atomic_t xmt_fcp_noxri;
 	atomic_t xmt_fcp_bad_ndlp;
 	atomic_t xmt_fcp_qdepth;
diff --git a/drivers/scsi/lpfc/lpfc_nvmet.h b/drivers/scsi/lpfc/lpfc_nvmet.h
index c1bcef3f103c3482fbe3331c9204ac8ceb0279ff..81f520abfd6424b075c77f4a995a82704143889e 100644
--- a/drivers/scsi/lpfc/lpfc_nvmet.h
+++ b/drivers/scsi/lpfc/lpfc_nvmet.h
@@ -22,8 +22,10 @@
  ********************************************************************/
 
 #define LPFC_NVMET_DEFAULT_SEGS		(64 + 1)	/* 256K IOs */
-#define LPFC_NVMET_RQE_DEF_COUNT	512
-#define LPFC_NVMET_SUCCESS_LEN	12
+#define LPFC_NVMET_RQE_MIN_POST		128
+#define LPFC_NVMET_RQE_DEF_POST		512
+#define LPFC_NVMET_RQE_DEF_COUNT	2048
+#define LPFC_NVMET_SUCCESS_LEN		12
 
 #define LPFC_NVMET_MRQ_OFF		0xffff
 #define LPFC_NVMET_MRQ_AUTO		0
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index 050f04418f5fb2f4f95c5b5f655a256733dc80a8..7932bf30c8d73c1bd02efd4b8a1f1cdd52ee6e42 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -1021,7 +1021,7 @@ lpfc_get_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
 		if (lpfc_test_rrq_active(phba, ndlp,
 					 lpfc_cmd->cur_iocbq.sli4_lxritag))
 			continue;
-		list_del(&lpfc_cmd->list);
+		list_del_init(&lpfc_cmd->list);
 		found = 1;
 		break;
 	}
@@ -1036,7 +1036,7 @@ lpfc_get_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
 			if (lpfc_test_rrq_active(
 				phba, ndlp, lpfc_cmd->cur_iocbq.sli4_lxritag))
 				continue;
-			list_del(&lpfc_cmd->list);
+			list_del_init(&lpfc_cmd->list);
 			found = 1;
 			break;
 		}
@@ -3983,9 +3983,6 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
 	}
 #endif
 
-	if (pnode && NLP_CHK_NODE_ACT(pnode))
-		atomic_dec(&pnode->cmd_pending);
-
 	if (lpfc_cmd->status) {
 		if (lpfc_cmd->status == IOSTAT_LOCAL_REJECT &&
 		    (lpfc_cmd->result & IOERR_DRVR_MASK))
@@ -4125,6 +4122,7 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
 		msecs_to_jiffies(vport->cfg_max_scsicmpl_time))) {
 		spin_lock_irqsave(shost->host_lock, flags);
 		if (pnode && NLP_CHK_NODE_ACT(pnode)) {
+			atomic_dec(&pnode->cmd_pending);
 			if (pnode->cmd_qdepth >
 				atomic_read(&pnode->cmd_pending) &&
 				(atomic_read(&pnode->cmd_pending) >
@@ -4138,16 +4136,8 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
 		}
 		spin_unlock_irqrestore(shost->host_lock, flags);
 	} else if (pnode && NLP_CHK_NODE_ACT(pnode)) {
-		if ((pnode->cmd_qdepth != vport->cfg_tgt_queue_depth) &&
-		    time_after(jiffies, pnode->last_change_time +
-			      msecs_to_jiffies(LPFC_TGTQ_INTERVAL))) {
-			spin_lock_irqsave(shost->host_lock, flags);
-			pnode->cmd_qdepth = vport->cfg_tgt_queue_depth;
-			pnode->last_change_time = jiffies;
-			spin_unlock_irqrestore(shost->host_lock, flags);
-		}
+		atomic_dec(&pnode->cmd_pending);
 	}
-
 	lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
 
 	spin_lock_irqsave(&phba->hbalock, flags);
@@ -4591,6 +4581,8 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
 				 ndlp->nlp_portname.u.wwn[7]);
 		goto out_tgt_busy;
 	}
+	atomic_inc(&ndlp->cmd_pending);
+
 	lpfc_cmd = lpfc_get_scsi_buf(phba, ndlp);
 	if (lpfc_cmd == NULL) {
 		lpfc_rampdown_queue_depth(phba);
@@ -4643,11 +4635,9 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
 
 	lpfc_scsi_prep_cmnd(vport, lpfc_cmd, ndlp);
 
-	atomic_inc(&ndlp->cmd_pending);
 	err = lpfc_sli_issue_iocb(phba, LPFC_FCP_RING,
 				  &lpfc_cmd->cur_iocbq, SLI_IOCB_RET_IOCB);
 	if (err) {
-		atomic_dec(&ndlp->cmd_pending);
 		lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
 				 "3376 FCP could not issue IOCB err %x"
 				 "FCP cmd x%x <%d/%llu> "
@@ -4691,6 +4681,7 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
 	lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
 	lpfc_release_scsi_buf(phba, lpfc_cmd);
  out_host_busy:
+	atomic_dec(&ndlp->cmd_pending);
 	return SCSI_MLQUEUE_HOST_BUSY;
 
  out_tgt_busy:
@@ -4725,7 +4716,7 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
 	int ret = SUCCESS, status = 0;
 	struct lpfc_sli_ring *pring_s4;
 	int ret_val;
-	unsigned long flags, iflags;
+	unsigned long flags;
 	DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq);
 
 	status = fc_block_scsi_eh(cmnd);
@@ -4825,16 +4816,16 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
 	abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
 	abtsiocb->vport = vport;
 	if (phba->sli_rev == LPFC_SLI_REV4) {
-		pring_s4 = lpfc_sli4_calc_ring(phba, iocb);
+		pring_s4 = lpfc_sli4_calc_ring(phba, abtsiocb);
 		if (pring_s4 == NULL) {
 			ret = FAILED;
 			goto out_unlock;
 		}
 		/* Note: both hbalock and ring_lock must be set here */
-		spin_lock_irqsave(&pring_s4->ring_lock, iflags);
+		spin_lock(&pring_s4->ring_lock);
 		ret_val = __lpfc_sli_issue_iocb(phba, pring_s4->ringno,
 						abtsiocb, 0);
-		spin_unlock_irqrestore(&pring_s4->ring_lock, iflags);
+		spin_unlock(&pring_s4->ring_lock);
 	} else {
 		ret_val = __lpfc_sli_issue_iocb(phba, LPFC_FCP_RING,
 						abtsiocb, 0);
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index cb17e2b2be8187458c07d2656709dde8aad43b6e..38993efbe37e7d9ac8da0545e253cf88a87bd5ed 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -7199,7 +7199,7 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
 			lpfc_post_rq_buffer(
 				phba, phba->sli4_hba.nvmet_mrq_hdr[i],
 				phba->sli4_hba.nvmet_mrq_data[i],
-				LPFC_NVMET_RQE_DEF_COUNT, i);
+				phba->cfg_nvmet_mrq_post, i);
 		}
 	}
 
@@ -11300,11 +11300,11 @@ lpfc_sli_abort_taskmgmt(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
 	unsigned long iflags;
 	struct lpfc_sli_ring *pring_s4;
 
-	spin_lock_irq(&phba->hbalock);
+	spin_lock_irqsave(&phba->hbalock, iflags);
 
 	/* all I/Os are in process of being flushed */
 	if (phba->hba_flag & HBA_FCP_IOQ_FLUSH) {
-		spin_unlock_irq(&phba->hbalock);
+		spin_unlock_irqrestore(&phba->hbalock, iflags);
 		return 0;
 	}
 	sum = 0;
@@ -11366,14 +11366,14 @@ lpfc_sli_abort_taskmgmt(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
 		iocbq->iocb_flag |= LPFC_DRIVER_ABORTED;
 
 		if (phba->sli_rev == LPFC_SLI_REV4) {
-			pring_s4 = lpfc_sli4_calc_ring(phba, iocbq);
-			if (pring_s4 == NULL)
+			pring_s4 = lpfc_sli4_calc_ring(phba, abtsiocbq);
+			if (!pring_s4)
 				continue;
 			/* Note: both hbalock and ring_lock must be set here */
-			spin_lock_irqsave(&pring_s4->ring_lock, iflags);
+			spin_lock(&pring_s4->ring_lock);
 			ret_val = __lpfc_sli_issue_iocb(phba, pring_s4->ringno,
 							abtsiocbq, 0);
-			spin_unlock_irqrestore(&pring_s4->ring_lock, iflags);
+			spin_unlock(&pring_s4->ring_lock);
 		} else {
 			ret_val = __lpfc_sli_issue_iocb(phba, pring->ringno,
 							abtsiocbq, 0);
@@ -11385,7 +11385,7 @@ lpfc_sli_abort_taskmgmt(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
 		else
 			sum++;
 	}
-	spin_unlock_irq(&phba->hbalock);
+	spin_unlock_irqrestore(&phba->hbalock, iflags);
 	return sum;
 }
 
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
index cf64aca82bd059dceccd7779da96c15fa35cb87d..179e870a00b4991c7164061ff758d6aacbd797d7 100644
--- a/drivers/scsi/lpfc/lpfc_sli4.h
+++ b/drivers/scsi/lpfc/lpfc_sli4.h
@@ -592,6 +592,7 @@ struct lpfc_sli4_hba {
 	uint32_t ue_to_sr;
 	uint32_t ue_to_rp;
 	struct lpfc_register sli_intf;
+	struct lpfc_register sli_asic_ver;
 	struct lpfc_pc_sli4_params pc_sli4_params;
 	struct lpfc_bbscn_params bbscn_params;
 	struct lpfc_hba_eq_hdl *hba_eq_hdl; /* HBA per-WQ handle */
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index e8b089abbfb361835d1416470fbd92b1273e485e..0cd474bb0bdded3c56f5c6809c635df93815377a 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -20,7 +20,7 @@
  * included with this package.                                     *
  *******************************************************************/
 
-#define LPFC_DRIVER_VERSION "12.0.0.1"
+#define LPFC_DRIVER_VERSION "12.0.0.2"
 #define LPFC_DRIVER_NAME		"lpfc"
 
 /* Used for SLI 2/3 */
diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h
index 27fab8235ea5c27326abfe4356b8629fbc8ca622..75dc25f78336b89a659b91f9ab80b9e7c0a99765 100644
--- a/drivers/scsi/megaraid/megaraid_sas.h
+++ b/drivers/scsi/megaraid/megaraid_sas.h
@@ -35,8 +35,8 @@
 /*
  * MegaRAID SAS Driver meta data
  */
-#define MEGASAS_VERSION				"07.704.04.00-rc1"
-#define MEGASAS_RELDATE				"December 7, 2017"
+#define MEGASAS_VERSION				"07.705.02.00-rc1"
+#define MEGASAS_RELDATE				"April 4, 2018"
 
 /*
  * Device IDs
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index b89c6e6c0589ade35371e1fb158e9229ad82ca89..026fad818b2ab8a02d4b4c52296e5ea6a07d51c9 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -2224,9 +2224,9 @@ static int megasas_get_ld_vf_affiliation_111(struct megasas_instance *instance,
 			       sizeof(struct MR_LD_VF_AFFILIATION_111));
 	else {
 		new_affiliation_111 =
-			pci_alloc_consistent(instance->pdev,
-					     sizeof(struct MR_LD_VF_AFFILIATION_111),
-					     &new_affiliation_111_h);
+			pci_zalloc_consistent(instance->pdev,
+					      sizeof(struct MR_LD_VF_AFFILIATION_111),
+					      &new_affiliation_111_h);
 		if (!new_affiliation_111) {
 			dev_printk(KERN_DEBUG, &instance->pdev->dev, "SR-IOV: Couldn't allocate "
 			       "memory for new affiliation for scsi%d\n",
@@ -2234,8 +2234,6 @@ static int megasas_get_ld_vf_affiliation_111(struct megasas_instance *instance,
 			megasas_return_cmd(instance, cmd);
 			return -ENOMEM;
 		}
-		memset(new_affiliation_111, 0,
-		       sizeof(struct MR_LD_VF_AFFILIATION_111));
 	}
 
 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
@@ -2333,10 +2331,10 @@ static int megasas_get_ld_vf_affiliation_12(struct megasas_instance *instance,
 		       sizeof(struct MR_LD_VF_AFFILIATION));
 	else {
 		new_affiliation =
-			pci_alloc_consistent(instance->pdev,
-					     (MAX_LOGICAL_DRIVES + 1) *
-					     sizeof(struct MR_LD_VF_AFFILIATION),
-					     &new_affiliation_h);
+			pci_zalloc_consistent(instance->pdev,
+					      (MAX_LOGICAL_DRIVES + 1) *
+					      sizeof(struct MR_LD_VF_AFFILIATION),
+					      &new_affiliation_h);
 		if (!new_affiliation) {
 			dev_printk(KERN_DEBUG, &instance->pdev->dev, "SR-IOV: Couldn't allocate "
 			       "memory for new affiliation for scsi%d\n",
@@ -2344,8 +2342,6 @@ static int megasas_get_ld_vf_affiliation_12(struct megasas_instance *instance,
 			megasas_return_cmd(instance, cmd);
 			return -ENOMEM;
 		}
-		memset(new_affiliation, 0, (MAX_LOGICAL_DRIVES + 1) *
-		       sizeof(struct MR_LD_VF_AFFILIATION));
 	}
 
 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
@@ -5636,16 +5632,15 @@ megasas_get_seq_num(struct megasas_instance *instance,
 	}
 
 	dcmd = &cmd->frame->dcmd;
-	el_info = pci_alloc_consistent(instance->pdev,
-				       sizeof(struct megasas_evt_log_info),
-				       &el_info_h);
+	el_info = pci_zalloc_consistent(instance->pdev,
+					sizeof(struct megasas_evt_log_info),
+					&el_info_h);
 
 	if (!el_info) {
 		megasas_return_cmd(instance, cmd);
 		return -ENOMEM;
 	}
 
-	memset(el_info, 0, sizeof(*el_info));
 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
 
 	dcmd->cmd = MFI_CMD_DCMD;
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c
index f4d988dd1e9d1a8389f4486a9b08bdfe9eb31a84..98a7a090b75e92c9d3b94ecc14783449ebd298c0 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c
@@ -684,15 +684,14 @@ megasas_alloc_rdpq_fusion(struct megasas_instance *instance)
 	array_size = sizeof(struct MPI2_IOC_INIT_RDPQ_ARRAY_ENTRY) *
 		     MAX_MSIX_QUEUES_FUSION;
 
-	fusion->rdpq_virt = pci_alloc_consistent(instance->pdev, array_size,
-						 &fusion->rdpq_phys);
+	fusion->rdpq_virt = pci_zalloc_consistent(instance->pdev, array_size,
+						  &fusion->rdpq_phys);
 	if (!fusion->rdpq_virt) {
 		dev_err(&instance->pdev->dev,
 			"Failed from %s %d\n",  __func__, __LINE__);
 		return -ENOMEM;
 	}
 
-	memset(fusion->rdpq_virt, 0, array_size);
 	msix_count = instance->msix_vectors > 0 ? instance->msix_vectors : 1;
 
 	fusion->reply_frames_desc_pool = dma_pool_create("mr_rdpq",
@@ -2981,6 +2980,9 @@ megasas_build_syspd_fusion(struct megasas_instance *instance,
 		pRAID_Context->timeout_value = cpu_to_le16(os_timeout_value);
 		pRAID_Context->virtual_disk_tgt_id = cpu_to_le16(device_id);
 	} else {
+		if (os_timeout_value)
+			os_timeout_value++;
+
 		/* system pd Fast Path */
 		io_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
 		timeout_limit = (scmd->device->type == TYPE_DISK) ?
diff --git a/drivers/scsi/mvumi.c b/drivers/scsi/mvumi.c
index fe97401ad1927b9a0edc7de7afbdbe2007d80a4f..2e6fd864723b185cd0f540512c33d0f34af19013 100644
--- a/drivers/scsi/mvumi.c
+++ b/drivers/scsi/mvumi.c
@@ -2693,22 +2693,4 @@ static struct pci_driver mvumi_pci_driver = {
 #endif
 };
 
-/**
- * mvumi_init - Driver load entry point
- */
-static int __init mvumi_init(void)
-{
-	return pci_register_driver(&mvumi_pci_driver);
-}
-
-/**
- * mvumi_exit - Driver unload entry point
- */
-static void __exit mvumi_exit(void)
-{
-
-	pci_unregister_driver(&mvumi_pci_driver);
-}
-
-module_init(mvumi_init);
-module_exit(mvumi_exit);
+module_pci_driver(mvumi_pci_driver);
diff --git a/drivers/scsi/scsi_debugfs.c b/drivers/scsi/scsi_debugfs.c
index b784002ef0bd00369c10f79aced81290a2366a3c..c5a8756384bcf1b120a54b0d1a1bc391137bc964 100644
--- a/drivers/scsi/scsi_debugfs.c
+++ b/drivers/scsi/scsi_debugfs.c
@@ -4,7 +4,7 @@
 #include <scsi/scsi_dbg.h>
 #include "scsi_debugfs.h"
 
-#define SCSI_CMD_FLAG_NAME(name) [ilog2(SCMD_##name)] = #name
+#define SCSI_CMD_FLAG_NAME(name)[const_ilog2(SCMD_##name)] = #name
 static const char *const scsi_cmd_flags[] = {
 	SCSI_CMD_FLAG_NAME(TAGGED),
 	SCSI_CMD_FLAG_NAME(UNCHECKED_ISA_DMA),
diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c
index dd107dc4db0e55430d077110c0a08c693689636f..c4cbfd07b9167f0e29b635b9b24e65a6df3826d9 100644
--- a/drivers/scsi/scsi_devinfo.c
+++ b/drivers/scsi/scsi_devinfo.c
@@ -161,15 +161,16 @@ static struct {
 	{"DGC", "RAID", NULL, BLIST_SPARSELUN},	/* EMC CLARiiON, storage on LUN 0 */
 	{"DGC", "DISK", NULL, BLIST_SPARSELUN},	/* EMC CLARiiON, no storage on LUN 0 */
 	{"EMC",  "Invista", "*", BLIST_SPARSELUN | BLIST_LARGELUN},
-	{"EMC", "SYMMETRIX", NULL, BLIST_SPARSELUN | BLIST_LARGELUN | BLIST_REPORTLUN2},
+	{"EMC", "SYMMETRIX", NULL, BLIST_SPARSELUN | BLIST_LARGELUN |
+	 BLIST_REPORTLUN2 | BLIST_RETRY_ITF},
 	{"EMULEX", "MD21/S2     ESDI", NULL, BLIST_SINGLELUN},
 	{"easyRAID", "16P", NULL, BLIST_NOREPORTLUN},
 	{"easyRAID", "X6P", NULL, BLIST_NOREPORTLUN},
 	{"easyRAID", "F8", NULL, BLIST_NOREPORTLUN},
 	{"FSC", "CentricStor", "*", BLIST_SPARSELUN | BLIST_LARGELUN},
+	{"FUJITSU", "ETERNUS_DXM", "*", BLIST_RETRY_ASC_C1},
 	{"Generic", "USB SD Reader", "1.00", BLIST_FORCELUN | BLIST_INQUIRY_36},
-	{"Generic", "USB Storage-SMC", "0180", BLIST_FORCELUN | BLIST_INQUIRY_36},
-	{"Generic", "USB Storage-SMC", "0207", BLIST_FORCELUN | BLIST_INQUIRY_36},
+	{"Generic", "USB Storage-SMC", NULL, BLIST_FORCELUN | BLIST_INQUIRY_36}, /* FW: 0180 and 0207 */
 	{"HITACHI", "DF400", "*", BLIST_REPORTLUN2},
 	{"HITACHI", "DF500", "*", BLIST_REPORTLUN2},
 	{"HITACHI", "DISK-SUBSYSTEM", "*", BLIST_REPORTLUN2},
@@ -361,8 +362,22 @@ int scsi_dev_info_list_add_keyed(int compatible, char *vendor, char *model,
 	scsi_strcpy_devinfo("model", devinfo->model, sizeof(devinfo->model),
 			    model, compatible);
 
-	if (strflags)
-		flags = (__force blist_flags_t)simple_strtoul(strflags, NULL, 0);
+	if (strflags) {
+		unsigned long long val;
+		int ret = kstrtoull(strflags, 0, &val);
+
+		if (ret != 0) {
+			kfree(devinfo);
+			return ret;
+		}
+		flags = (__force blist_flags_t)val;
+	}
+	if (flags & __BLIST_UNUSED_MASK) {
+		pr_err("scsi_devinfo (%s:%s): unsupported flags 0x%llx",
+		       vendor, model, flags & __BLIST_UNUSED_MASK);
+		kfree(devinfo);
+		return -EINVAL;
+	}
 	devinfo->flags = flags;
 	devinfo->compatible = compatible;
 
@@ -615,7 +630,7 @@ static int devinfo_seq_show(struct seq_file *m, void *v)
 	    devinfo_table->name)
 		seq_printf(m, "[%s]:\n", devinfo_table->name);
 
-	seq_printf(m, "'%.8s' '%.16s' 0x%x\n",
+	seq_printf(m, "'%.8s' '%.16s' 0x%llx\n",
 		   devinfo->vendor, devinfo->model, devinfo->flags);
 	return 0;
 }
@@ -734,9 +749,9 @@ MODULE_PARM_DESC(dev_flags,
 	 " list entries for vendor and model with an integer value of flags"
 	 " to the scsi device info list");
 
-module_param_named(default_dev_flags, scsi_default_dev_flags, int, S_IRUGO|S_IWUSR);
+module_param_named(default_dev_flags, scsi_default_dev_flags, ullong, 0644);
 MODULE_PARM_DESC(default_dev_flags,
-		 "scsi default device flag integer value");
+		 "scsi default device flag uint64_t value");
 
 /**
  * scsi_exit_devinfo - remove /proc/scsi/device_info & the scsi_dev_info_list
diff --git a/drivers/scsi/scsi_dh.c b/drivers/scsi/scsi_dh.c
index 188f30572aa1f45beebc9beb32d2c99b5fc79141..5a58cbf3a75da9123899ce668934e002933d1416 100644
--- a/drivers/scsi/scsi_dh.c
+++ b/drivers/scsi/scsi_dh.c
@@ -58,7 +58,10 @@ static const struct scsi_dh_blist scsi_dh_blist[] = {
 	{"IBM", "3526",			"rdac", },
 	{"IBM", "3542",			"rdac", },
 	{"IBM", "3552",			"rdac", },
-	{"SGI", "TP9",			"rdac", },
+	{"SGI", "TP9300",		"rdac", },
+	{"SGI", "TP9400",		"rdac", },
+	{"SGI", "TP9500",		"rdac", },
+	{"SGI", "TP9700",		"rdac", },
 	{"SGI", "IS",			"rdac", },
 	{"STK", "OPENstorage",		"rdac", },
 	{"STK", "FLEXLINE 380",		"rdac", },
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index 946039117bf4f2c655db2bb1129ee75df1f1df21..94d2047e0096aee29eb3238aa69982a1306ef2dc 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -38,6 +38,7 @@
 #include <scsi/scsi_host.h>
 #include <scsi/scsi_ioctl.h>
 #include <scsi/scsi_dh.h>
+#include <scsi/scsi_devinfo.h>
 #include <scsi/sg.h>
 
 #include "scsi_priv.h"
@@ -525,6 +526,12 @@ int scsi_check_sense(struct scsi_cmnd *scmd)
 		if (sshdr.asc == 0x10) /* DIF */
 			return SUCCESS;
 
+		if (sshdr.asc == 0x44 && sdev->sdev_bflags & BLIST_RETRY_ITF)
+			return ADD_TO_MLQUEUE;
+		if (sshdr.asc == 0xc1 && sshdr.ascq == 0x01 &&
+		    sdev->sdev_bflags & BLIST_RETRY_ASC_C1)
+			return ADD_TO_MLQUEUE;
+
 		return NEEDS_RETRY;
 	case NOT_READY:
 	case UNIT_ATTENTION:
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index 1e36c9a9ad17df678529e13bf0dfc0f888a3c35e..7943b762c12de7609d28168316e57b3ac15c91be 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -968,7 +968,7 @@ sdev_show_wwid(struct device *dev, struct device_attribute *attr,
 static DEVICE_ATTR(wwid, S_IRUGO, sdev_show_wwid, NULL);
 
 #define BLIST_FLAG_NAME(name)					\
-	[ilog2((__force unsigned int)BLIST_##name)] = #name
+	[const_ilog2((__force __u64)BLIST_##name)] = #name
 static const char *const sdev_bflags_name[] = {
 #include "scsi_devinfo_tbl.c"
 };
diff --git a/drivers/scsi/sd.h b/drivers/scsi/sd.h
index 0d663b5e45bb816c29920e84123096532da749d8..392c7d078ae37eae9f2d966d3b6a3c1a1c531485 100644
--- a/drivers/scsi/sd.h
+++ b/drivers/scsi/sd.h
@@ -74,12 +74,12 @@ struct scsi_disk {
 	struct gendisk	*disk;
 	struct opal_dev *opal_dev;
 #ifdef CONFIG_BLK_DEV_ZONED
-	unsigned int	nr_zones;
-	unsigned int	zone_blocks;
-	unsigned int	zone_shift;
-	unsigned int	zones_optimal_open;
-	unsigned int	zones_optimal_nonseq;
-	unsigned int	zones_max_open;
+	u32		nr_zones;
+	u32		zone_blocks;
+	u32		zone_shift;
+	u32		zones_optimal_open;
+	u32		zones_optimal_nonseq;
+	u32		zones_max_open;
 #endif
 	atomic_t	openers;
 	sector_t	capacity;	/* size in logical blocks */
diff --git a/drivers/scsi/sd_zbc.c b/drivers/scsi/sd_zbc.c
index 210407cd2341bf9bb89f5831939d4c0a204af485..323e3dc4bc591622b293a70ceee49b5084f93418 100644
--- a/drivers/scsi/sd_zbc.c
+++ b/drivers/scsi/sd_zbc.c
@@ -299,16 +299,6 @@ void sd_zbc_complete(struct scsi_cmnd *cmd, unsigned int good_bytes,
 	case REQ_OP_WRITE:
 	case REQ_OP_WRITE_ZEROES:
 	case REQ_OP_WRITE_SAME:
-
-		if (result &&
-		    sshdr->sense_key == ILLEGAL_REQUEST &&
-		    sshdr->asc == 0x21)
-			/*
-			 * INVALID ADDRESS FOR WRITE error: It is unlikely that
-			 * retrying write requests failed with any kind of
-			 * alignement error will result in success. So don't.
-			 */
-			cmd->allowed = 0;
 		break;
 
 	case REQ_OP_ZONE_REPORT:
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index c198b96368dd69beba2dcffbe16efe7a3c74ae1d..c2b7d347ede4c065f8b86b0bc31b01c37fa16c87 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -1192,7 +1192,7 @@ sg_fasync(int fd, struct file *filp, int mode)
 	return fasync_helper(fd, filp, mode, &sfp->async_qp);
 }
 
-static int
+static vm_fault_t
 sg_vma_fault(struct vm_fault *vmf)
 {
 	struct vm_area_struct *vma = vmf->vma;
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index 6c399480783d3051aac5ad65e9b4865ed99eca41..e64489a4a9a610e76109e4b4fa8fa6a3f6c86a51 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -3878,7 +3878,7 @@ static struct st_buffer *new_tape_buffer(int need_dma, int max_sg)
 {
 	struct st_buffer *tb;
 
-	tb = kzalloc(sizeof(struct st_buffer), GFP_ATOMIC);
+	tb = kzalloc(sizeof(struct st_buffer), GFP_KERNEL);
 	if (!tb) {
 		printk(KERN_NOTICE "st: Can't allocate new tape buffer.\n");
 		return NULL;
@@ -3889,7 +3889,7 @@ static struct st_buffer *new_tape_buffer(int need_dma, int max_sg)
 	tb->buffer_size = 0;
 
 	tb->reserved_pages = kzalloc(max_sg * sizeof(struct page *),
-				     GFP_ATOMIC);
+				     GFP_KERNEL);
 	if (!tb->reserved_pages) {
 		kfree(tb);
 		return NULL;
@@ -4290,7 +4290,7 @@ static int st_probe(struct device *dev)
 		goto out_buffer_free;
 	}
 
-	tpnt = kzalloc(sizeof(struct scsi_tape), GFP_ATOMIC);
+	tpnt = kzalloc(sizeof(struct scsi_tape), GFP_KERNEL);
 	if (tpnt == NULL) {
 		sdev_printk(KERN_ERR, SDp,
 			    "st: Can't allocate device descriptor.\n");
diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
index 8c51d628b52edfd7e891182919fab16b469b0f3c..7ba99210928138a06e11b49ed274a77b6a1de264 100644
--- a/drivers/scsi/storvsc_drv.c
+++ b/drivers/scsi/storvsc_drv.c
@@ -395,6 +395,12 @@ MODULE_PARM_DESC(storvsc_ringbuffer_size, "Ring buffer size (bytes)");
 
 module_param(storvsc_vcpus_per_sub_channel, int, S_IRUGO);
 MODULE_PARM_DESC(storvsc_vcpus_per_sub_channel, "Ratio of VCPUs to subchannels");
+
+static int ring_avail_percent_lowater = 10;
+module_param(ring_avail_percent_lowater, int, S_IRUGO);
+MODULE_PARM_DESC(ring_avail_percent_lowater,
+		"Select a channel if available ring size > this in percent");
+
 /*
  * Timeout in seconds for all devices managed by this driver.
  */
@@ -468,6 +474,13 @@ struct storvsc_device {
 	 * Mask of CPUs bound to subchannels.
 	 */
 	struct cpumask alloced_cpus;
+	/*
+	 * Pre-allocated struct cpumask for each hardware queue.
+	 * struct cpumask is used by selecting out-going channels. It is a
+	 * big structure, default to 1024k bytes when CONFIG_MAXSMP=y.
+	 * Pre-allocate it to avoid allocation on the kernel stack.
+	 */
+	struct cpumask *cpumask_chns;
 	/* Used for vsc/vsp channel reset process */
 	struct storvsc_cmd_request init_request;
 	struct storvsc_cmd_request reset_request;
@@ -872,6 +885,13 @@ static int storvsc_channel_init(struct hv_device *device, bool is_fc)
 	if (stor_device->stor_chns == NULL)
 		return -ENOMEM;
 
+	stor_device->cpumask_chns = kcalloc(num_possible_cpus(),
+			sizeof(struct cpumask), GFP_KERNEL);
+	if (stor_device->cpumask_chns == NULL) {
+		kfree(stor_device->stor_chns);
+		return -ENOMEM;
+	}
+
 	stor_device->stor_chns[device->channel->target_cpu] = device->channel;
 	cpumask_set_cpu(device->channel->target_cpu,
 			&stor_device->alloced_cpus);
@@ -1232,6 +1252,7 @@ static int storvsc_dev_remove(struct hv_device *device)
 	vmbus_close(device->channel);
 
 	kfree(stor_device->stor_chns);
+	kfree(stor_device->cpumask_chns);
 	kfree(stor_device);
 	return 0;
 }
@@ -1241,7 +1262,7 @@ static struct vmbus_channel *get_og_chn(struct storvsc_device *stor_device,
 {
 	u16 slot = 0;
 	u16 hash_qnum;
-	struct cpumask alloced_mask;
+	struct cpumask *alloced_mask = &stor_device->cpumask_chns[q_num];
 	int num_channels, tgt_cpu;
 
 	if (stor_device->num_sc == 0)
@@ -1257,10 +1278,10 @@ static struct vmbus_channel *get_og_chn(struct storvsc_device *stor_device,
 	 * III. Mapping is persistent.
 	 */
 
-	cpumask_and(&alloced_mask, &stor_device->alloced_cpus,
+	cpumask_and(alloced_mask, &stor_device->alloced_cpus,
 		    cpumask_of_node(cpu_to_node(q_num)));
 
-	num_channels = cpumask_weight(&alloced_mask);
+	num_channels = cpumask_weight(alloced_mask);
 	if (num_channels == 0)
 		return stor_device->device->channel;
 
@@ -1268,7 +1289,7 @@ static struct vmbus_channel *get_og_chn(struct storvsc_device *stor_device,
 	while (hash_qnum >= num_channels)
 		hash_qnum -= num_channels;
 
-	for_each_cpu(tgt_cpu, &alloced_mask) {
+	for_each_cpu(tgt_cpu, alloced_mask) {
 		if (slot == hash_qnum)
 			break;
 		slot++;
@@ -1285,9 +1306,9 @@ static int storvsc_do_io(struct hv_device *device,
 {
 	struct storvsc_device *stor_device;
 	struct vstor_packet *vstor_packet;
-	struct vmbus_channel *outgoing_channel;
+	struct vmbus_channel *outgoing_channel, *channel;
 	int ret = 0;
-	struct cpumask alloced_mask;
+	struct cpumask *alloced_mask;
 	int tgt_cpu;
 
 	vstor_packet = &request->vstor_packet;
@@ -1301,22 +1322,53 @@ static int storvsc_do_io(struct hv_device *device,
 	/*
 	 * Select an an appropriate channel to send the request out.
 	 */
-
 	if (stor_device->stor_chns[q_num] != NULL) {
 		outgoing_channel = stor_device->stor_chns[q_num];
-		if (outgoing_channel->target_cpu == smp_processor_id()) {
+		if (outgoing_channel->target_cpu == q_num) {
 			/*
 			 * Ideally, we want to pick a different channel if
 			 * available on the same NUMA node.
 			 */
-			cpumask_and(&alloced_mask, &stor_device->alloced_cpus,
+			alloced_mask = &stor_device->cpumask_chns[q_num];
+			cpumask_and(alloced_mask, &stor_device->alloced_cpus,
 				    cpumask_of_node(cpu_to_node(q_num)));
-			for_each_cpu_wrap(tgt_cpu, &alloced_mask,
-					outgoing_channel->target_cpu + 1) {
-				if (tgt_cpu != outgoing_channel->target_cpu) {
-					outgoing_channel =
-					stor_device->stor_chns[tgt_cpu];
-					break;
+
+			for_each_cpu_wrap(tgt_cpu, alloced_mask, q_num + 1) {
+				if (tgt_cpu == q_num)
+					continue;
+				channel = stor_device->stor_chns[tgt_cpu];
+				if (hv_get_avail_to_write_percent(
+							&channel->outbound)
+						> ring_avail_percent_lowater) {
+					outgoing_channel = channel;
+					goto found_channel;
+				}
+			}
+
+			/*
+			 * All the other channels on the same NUMA node are
+			 * busy. Try to use the channel on the current CPU
+			 */
+			if (hv_get_avail_to_write_percent(
+						&outgoing_channel->outbound)
+					> ring_avail_percent_lowater)
+				goto found_channel;
+
+			/*
+			 * If we reach here, all the channels on the current
+			 * NUMA node are busy. Try to find a channel in
+			 * other NUMA nodes
+			 */
+			cpumask_andnot(alloced_mask, &stor_device->alloced_cpus,
+					cpumask_of_node(cpu_to_node(q_num)));
+
+			for_each_cpu(tgt_cpu, alloced_mask) {
+				channel = stor_device->stor_chns[tgt_cpu];
+				if (hv_get_avail_to_write_percent(
+							&channel->outbound)
+						> ring_avail_percent_lowater) {
+					outgoing_channel = channel;
+					goto found_channel;
 				}
 			}
 		}
@@ -1324,7 +1376,7 @@ static int storvsc_do_io(struct hv_device *device,
 		outgoing_channel = get_og_chn(stor_device, q_num);
 	}
 
-
+found_channel:
 	vstor_packet->flags |= REQUEST_COMPLETION_FLAG;
 
 	vstor_packet->vm_srb.length = (sizeof(struct vmscsi_request) -
@@ -1382,9 +1434,6 @@ static int storvsc_device_alloc(struct scsi_device *sdevice)
 
 static int storvsc_device_configure(struct scsi_device *sdevice)
 {
-
-	blk_queue_bounce_limit(sdevice->request_queue, BLK_BOUNCE_ANY);
-
 	blk_queue_rq_timeout(sdevice->request_queue, (storvsc_timeout * HZ));
 
 	/* Ensure there are no gaps in presented sgls */
@@ -1722,15 +1771,19 @@ static int storvsc_probe(struct hv_device *device,
 		max_targets = STORVSC_MAX_TARGETS;
 		max_channels = STORVSC_MAX_CHANNELS;
 		/*
-		 * On Windows8 and above, we support sub-channels for storage.
+		 * On Windows8 and above, we support sub-channels for storage
+		 * on SCSI and FC controllers.
 		 * The number of sub-channels offerred is based on the number of
 		 * VCPUs in the guest.
 		 */
-		max_sub_channels = (num_cpus / storvsc_vcpus_per_sub_channel);
+		if (!dev_is_ide)
+			max_sub_channels =
+				(num_cpus - 1) / storvsc_vcpus_per_sub_channel;
 	}
 
-	scsi_driver.can_queue = (max_outstanding_req_per_channel *
-				 (max_sub_channels + 1));
+	scsi_driver.can_queue = max_outstanding_req_per_channel *
+				(max_sub_channels + 1) *
+				(100 - ring_avail_percent_lowater) / 100;
 
 	host = scsi_host_alloc(&scsi_driver,
 			       sizeof(struct hv_host_device));
@@ -1861,6 +1914,7 @@ static int storvsc_probe(struct hv_device *device,
 
 err_out1:
 	kfree(stor_device->stor_chns);
+	kfree(stor_device->cpumask_chns);
 	kfree(stor_device);
 
 err_out0:
diff --git a/drivers/scsi/zorro_esp.c b/drivers/scsi/zorro_esp.c
new file mode 100644
index 0000000000000000000000000000000000000000..bb70882e6b56e0bd4f1d37e367e205bbb16f19ac
--- /dev/null
+++ b/drivers/scsi/zorro_esp.c
@@ -0,0 +1,1172 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * ESP front-end for Amiga ZORRO SCSI systems.
+ *
+ * Copyright (C) 1996 Jesper Skov (jskov@cygnus.co.uk)
+ *
+ * Copyright (C) 2011,2018 Michael Schmitz (schmitz@debian.org) for
+ *               migration to ESP SCSI core
+ *
+ * Copyright (C) 2013 Tuomas Vainikka (tuomas.vainikka@aalto.fi) for
+ *               Blizzard 1230 DMA and probe function fixes
+ *
+ * Copyright (C) 2017 Finn Thain for PIO code from Mac ESP driver adapted here
+ */
+/*
+ * ZORRO bus code from:
+ */
+/*
+ * Detection routine for the NCR53c710 based Amiga SCSI Controllers for Linux.
+ *		Amiga MacroSystemUS WarpEngine SCSI controller.
+ *		Amiga Technologies/DKB A4091 SCSI controller.
+ *
+ * Written 1997 by Alan Hourihane <alanh@fairlite.demon.co.uk>
+ * plus modifications of the 53c7xx.c driver to support the Amiga.
+ *
+ * Rewritten to use 53c700.c by Kars de Jong <jongk@linux-m68k.org>
+ */
+
+#define pr_fmt(fmt)        KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/dma-mapping.h>
+#include <linux/scatterlist.h>
+#include <linux/delay.h>
+#include <linux/zorro.h>
+#include <linux/slab.h>
+
+#include <asm/page.h>
+#include <asm/pgtable.h>
+#include <asm/cacheflush.h>
+#include <asm/amigahw.h>
+#include <asm/amigaints.h>
+
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_transport_spi.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_tcq.h>
+
+#include "esp_scsi.h"
+
+MODULE_AUTHOR("Michael Schmitz <schmitz@debian.org>");
+MODULE_DESCRIPTION("Amiga Zorro NCR5C9x (ESP) driver");
+MODULE_LICENSE("GPL");
+
+/* per-board register layout definitions */
+
+/* Blizzard 1230 DMA interface */
+
+struct blz1230_dma_registers {
+	unsigned char dma_addr;		/* DMA address      [0x0000] */
+	unsigned char dmapad2[0x7fff];
+	unsigned char dma_latch;	/* DMA latch        [0x8000] */
+};
+
+/* Blizzard 1230II DMA interface */
+
+struct blz1230II_dma_registers {
+	unsigned char dma_addr;		/* DMA address      [0x0000] */
+	unsigned char dmapad2[0xf];
+	unsigned char dma_latch;	/* DMA latch        [0x0010] */
+};
+
+/* Blizzard 2060 DMA interface */
+
+struct blz2060_dma_registers {
+	unsigned char dma_led_ctrl;	/* DMA led control   [0x000] */
+	unsigned char dmapad1[0x0f];
+	unsigned char dma_addr0;	/* DMA address (MSB) [0x010] */
+	unsigned char dmapad2[0x03];
+	unsigned char dma_addr1;	/* DMA address       [0x014] */
+	unsigned char dmapad3[0x03];
+	unsigned char dma_addr2;	/* DMA address       [0x018] */
+	unsigned char dmapad4[0x03];
+	unsigned char dma_addr3;	/* DMA address (LSB) [0x01c] */
+};
+
+/* DMA control bits */
+#define DMA_WRITE 0x80000000
+
+/* Cyberstorm DMA interface */
+
+struct cyber_dma_registers {
+	unsigned char dma_addr0;	/* DMA address (MSB) [0x000] */
+	unsigned char dmapad1[1];
+	unsigned char dma_addr1;	/* DMA address       [0x002] */
+	unsigned char dmapad2[1];
+	unsigned char dma_addr2;	/* DMA address       [0x004] */
+	unsigned char dmapad3[1];
+	unsigned char dma_addr3;	/* DMA address (LSB) [0x006] */
+	unsigned char dmapad4[0x3fb];
+	unsigned char cond_reg;		/* DMA cond    (ro)  [0x402] */
+#define ctrl_reg  cond_reg		/* DMA control (wo)  [0x402] */
+};
+
+/* DMA control bits */
+#define CYBER_DMA_WRITE  0x40	/* DMA direction. 1 = write */
+#define CYBER_DMA_Z3     0x20	/* 16 (Z2) or 32 (CHIP/Z3) bit DMA transfer */
+
+/* DMA status bits */
+#define CYBER_DMA_HNDL_INTR 0x80	/* DMA IRQ pending? */
+
+/* The CyberStorm II DMA interface */
+struct cyberII_dma_registers {
+	unsigned char cond_reg;		/* DMA cond    (ro)  [0x000] */
+#define ctrl_reg  cond_reg		/* DMA control (wo)  [0x000] */
+	unsigned char dmapad4[0x3f];
+	unsigned char dma_addr0;	/* DMA address (MSB) [0x040] */
+	unsigned char dmapad1[3];
+	unsigned char dma_addr1;	/* DMA address       [0x044] */
+	unsigned char dmapad2[3];
+	unsigned char dma_addr2;	/* DMA address       [0x048] */
+	unsigned char dmapad3[3];
+	unsigned char dma_addr3;	/* DMA address (LSB) [0x04c] */
+};
+
+/* Fastlane DMA interface */
+
+struct fastlane_dma_registers {
+	unsigned char cond_reg;		/* DMA status  (ro) [0x0000] */
+#define ctrl_reg  cond_reg		/* DMA control (wo) [0x0000] */
+	char dmapad1[0x3f];
+	unsigned char clear_strobe;	/* DMA clear   (wo) [0x0040] */
+};
+
+/*
+ * The controller registers can be found in the Z2 config area at these
+ * offsets:
+ */
+#define FASTLANE_ESP_ADDR	0x1000001
+
+/* DMA status bits */
+#define FASTLANE_DMA_MINT	0x80
+#define FASTLANE_DMA_IACT	0x40
+#define FASTLANE_DMA_CREQ	0x20
+
+/* DMA control bits */
+#define FASTLANE_DMA_FCODE	0xa0
+#define FASTLANE_DMA_MASK	0xf3
+#define FASTLANE_DMA_WRITE	0x08	/* 1 = write */
+#define FASTLANE_DMA_ENABLE	0x04	/* Enable DMA */
+#define FASTLANE_DMA_EDI	0x02	/* Enable DMA IRQ ? */
+#define FASTLANE_DMA_ESI	0x01	/* Enable SCSI IRQ */
+
+/*
+ * private data used for driver
+ */
+struct zorro_esp_priv {
+	struct esp *esp;		/* our ESP instance - for Scsi_host* */
+	void __iomem *board_base;	/* virtual address (Zorro III board) */
+	int error;			/* PIO error flag */
+	int zorro3;			/* board is Zorro III */
+	unsigned char ctrl_data;	/* shadow copy of ctrl_reg */
+};
+
+/*
+ * On all implementations except for the Oktagon, padding between ESP
+ * registers is three bytes.
+ * On Oktagon, it is one byte - use a different accessor there.
+ *
+ * Oktagon needs PDMA - currently unsupported!
+ */
+
+static void zorro_esp_write8(struct esp *esp, u8 val, unsigned long reg)
+{
+	writeb(val, esp->regs + (reg * 4UL));
+}
+
+static u8 zorro_esp_read8(struct esp *esp, unsigned long reg)
+{
+	return readb(esp->regs + (reg * 4UL));
+}
+
+static dma_addr_t zorro_esp_map_single(struct esp *esp, void *buf,
+				      size_t sz, int dir)
+{
+	return dma_map_single(esp->dev, buf, sz, dir);
+}
+
+static int zorro_esp_map_sg(struct esp *esp, struct scatterlist *sg,
+				  int num_sg, int dir)
+{
+	return dma_map_sg(esp->dev, sg, num_sg, dir);
+}
+
+static void zorro_esp_unmap_single(struct esp *esp, dma_addr_t addr,
+				  size_t sz, int dir)
+{
+	dma_unmap_single(esp->dev, addr, sz, dir);
+}
+
+static void zorro_esp_unmap_sg(struct esp *esp, struct scatterlist *sg,
+			      int num_sg, int dir)
+{
+	dma_unmap_sg(esp->dev, sg, num_sg, dir);
+}
+
+static int zorro_esp_irq_pending(struct esp *esp)
+{
+	/* check ESP status register; DMA has no status reg. */
+	if (zorro_esp_read8(esp, ESP_STATUS) & ESP_STAT_INTR)
+		return 1;
+
+	return 0;
+}
+
+static int cyber_esp_irq_pending(struct esp *esp)
+{
+	struct cyber_dma_registers __iomem *dregs = esp->dma_regs;
+	unsigned char dma_status = readb(&dregs->cond_reg);
+
+	/* It's important to check the DMA IRQ bit in the correct way! */
+	return ((zorro_esp_read8(esp, ESP_STATUS) & ESP_STAT_INTR) &&
+		(dma_status & CYBER_DMA_HNDL_INTR));
+}
+
+static int fastlane_esp_irq_pending(struct esp *esp)
+{
+	struct fastlane_dma_registers __iomem *dregs = esp->dma_regs;
+	unsigned char dma_status;
+
+	dma_status = readb(&dregs->cond_reg);
+
+	if (dma_status & FASTLANE_DMA_IACT)
+		return 0;	/* not our IRQ */
+
+	/* Return non-zero if ESP requested IRQ */
+	return (
+	   (dma_status & FASTLANE_DMA_CREQ) &&
+	   (!(dma_status & FASTLANE_DMA_MINT)) &&
+	   (zorro_esp_read8(esp, ESP_STATUS) & ESP_STAT_INTR));
+}
+
+static u32 zorro_esp_dma_length_limit(struct esp *esp, u32 dma_addr,
+					u32 dma_len)
+{
+	return dma_len > 0xFFFFFF ? 0xFFFFFF : dma_len;
+}
+
+static void zorro_esp_reset_dma(struct esp *esp)
+{
+	/* nothing to do here */
+}
+
+static void zorro_esp_dma_drain(struct esp *esp)
+{
+	/* nothing to do here */
+}
+
+static void zorro_esp_dma_invalidate(struct esp *esp)
+{
+	/* nothing to do here */
+}
+
+static void fastlane_esp_dma_invalidate(struct esp *esp)
+{
+	struct zorro_esp_priv *zep = dev_get_drvdata(esp->dev);
+	struct fastlane_dma_registers __iomem *dregs = esp->dma_regs;
+	unsigned char *ctrl_data = &zep->ctrl_data;
+
+	*ctrl_data = (*ctrl_data & FASTLANE_DMA_MASK);
+	writeb(0, &dregs->clear_strobe);
+	z_writel(0, zep->board_base);
+}
+
+/*
+ * Programmed IO routines follow.
+ */
+
+static inline unsigned int zorro_esp_wait_for_fifo(struct esp *esp)
+{
+	int i = 500000;
+
+	do {
+		unsigned int fbytes = zorro_esp_read8(esp, ESP_FFLAGS)
+							& ESP_FF_FBYTES;
+
+		if (fbytes)
+			return fbytes;
+
+		udelay(2);
+	} while (--i);
+
+	pr_err("FIFO is empty (sreg %02x)\n",
+	       zorro_esp_read8(esp, ESP_STATUS));
+	return 0;
+}
+
+static inline int zorro_esp_wait_for_intr(struct esp *esp)
+{
+	struct zorro_esp_priv *zep = dev_get_drvdata(esp->dev);
+	int i = 500000;
+
+	do {
+		esp->sreg = zorro_esp_read8(esp, ESP_STATUS);
+		if (esp->sreg & ESP_STAT_INTR)
+			return 0;
+
+		udelay(2);
+	} while (--i);
+
+	pr_err("IRQ timeout (sreg %02x)\n", esp->sreg);
+	zep->error = 1;
+	return 1;
+}
+
+/*
+ * PIO macros as used in mac_esp.c.
+ * Note that addr and fifo arguments are local-scope variables declared
+ * in zorro_esp_send_pio_cmd(), the macros are only used in that function,
+ * and addr and fifo are referenced in each use of the macros so there
+ * is no need to pass them as macro parameters.
+ */
+#define ZORRO_ESP_PIO_LOOP(operands, reg1) \
+	asm volatile ( \
+	     "1:     moveb " operands "\n" \
+	     "       subqw #1,%1       \n" \
+	     "       jbne 1b           \n" \
+	     : "+a" (addr), "+r" (reg1) \
+	     : "a" (fifo));
+
+#define ZORRO_ESP_PIO_FILL(operands, reg1) \
+	asm volatile ( \
+	     "       moveb " operands "\n" \
+	     "       moveb " operands "\n" \
+	     "       moveb " operands "\n" \
+	     "       moveb " operands "\n" \
+	     "       moveb " operands "\n" \
+	     "       moveb " operands "\n" \
+	     "       moveb " operands "\n" \
+	     "       moveb " operands "\n" \
+	     "       moveb " operands "\n" \
+	     "       moveb " operands "\n" \
+	     "       moveb " operands "\n" \
+	     "       moveb " operands "\n" \
+	     "       moveb " operands "\n" \
+	     "       moveb " operands "\n" \
+	     "       moveb " operands "\n" \
+	     "       moveb " operands "\n" \
+	     "       subqw #8,%1       \n" \
+	     "       subqw #8,%1       \n" \
+	     : "+a" (addr), "+r" (reg1) \
+	     : "a" (fifo));
+
+#define ZORRO_ESP_FIFO_SIZE 16
+
+static void zorro_esp_send_pio_cmd(struct esp *esp, u32 addr, u32 esp_count,
+				 u32 dma_count, int write, u8 cmd)
+{
+	struct zorro_esp_priv *zep = dev_get_drvdata(esp->dev);
+	u8 __iomem *fifo = esp->regs + ESP_FDATA * 16;
+	u8 phase = esp->sreg & ESP_STAT_PMASK;
+
+	cmd &= ~ESP_CMD_DMA;
+
+	if (write) {
+		u8 *dst = (u8 *)addr;
+		u8 mask = ~(phase == ESP_MIP ? ESP_INTR_FDONE : ESP_INTR_BSERV);
+
+		scsi_esp_cmd(esp, cmd);
+
+		while (1) {
+			if (!zorro_esp_wait_for_fifo(esp))
+				break;
+
+			*dst++ = zorro_esp_read8(esp, ESP_FDATA);
+			--esp_count;
+
+			if (!esp_count)
+				break;
+
+			if (zorro_esp_wait_for_intr(esp))
+				break;
+
+			if ((esp->sreg & ESP_STAT_PMASK) != phase)
+				break;
+
+			esp->ireg = zorro_esp_read8(esp, ESP_INTRPT);
+			if (esp->ireg & mask) {
+				zep->error = 1;
+				break;
+			}
+
+			if (phase == ESP_MIP)
+				scsi_esp_cmd(esp, ESP_CMD_MOK);
+
+			scsi_esp_cmd(esp, ESP_CMD_TI);
+		}
+	} else {	/* unused, as long as we only handle MIP here */
+		scsi_esp_cmd(esp, ESP_CMD_FLUSH);
+
+		if (esp_count >= ZORRO_ESP_FIFO_SIZE)
+			ZORRO_ESP_PIO_FILL("%0@+,%2@", esp_count)
+		else
+			ZORRO_ESP_PIO_LOOP("%0@+,%2@", esp_count)
+
+		scsi_esp_cmd(esp, cmd);
+
+		while (esp_count) {
+			unsigned int n;
+
+			if (zorro_esp_wait_for_intr(esp))
+				break;
+
+			if ((esp->sreg & ESP_STAT_PMASK) != phase)
+				break;
+
+			esp->ireg = zorro_esp_read8(esp, ESP_INTRPT);
+			if (esp->ireg & ~ESP_INTR_BSERV) {
+				zep->error = 1;
+				break;
+			}
+
+			n = ZORRO_ESP_FIFO_SIZE -
+			    (zorro_esp_read8(esp, ESP_FFLAGS) & ESP_FF_FBYTES);
+			if (n > esp_count)
+				n = esp_count;
+
+			if (n == ZORRO_ESP_FIFO_SIZE)
+				ZORRO_ESP_PIO_FILL("%0@+,%2@", esp_count)
+			else {
+				esp_count -= n;
+				ZORRO_ESP_PIO_LOOP("%0@+,%2@", n)
+			}
+
+			scsi_esp_cmd(esp, ESP_CMD_TI);
+		}
+	}
+}
+
+/* Blizzard 1230/60 SCSI-IV DMA */
+
+static void zorro_esp_send_blz1230_dma_cmd(struct esp *esp, u32 addr,
+			u32 esp_count, u32 dma_count, int write, u8 cmd)
+{
+	struct zorro_esp_priv *zep = dev_get_drvdata(esp->dev);
+	struct blz1230_dma_registers __iomem *dregs = esp->dma_regs;
+	u8 phase = esp->sreg & ESP_STAT_PMASK;
+
+	zep->error = 0;
+	/*
+	 * Use PIO if transferring message bytes to esp->command_block_dma.
+	 * PIO requires a virtual address, so substitute esp->command_block
+	 * for addr.
+	 */
+	if (phase == ESP_MIP && addr == esp->command_block_dma) {
+		zorro_esp_send_pio_cmd(esp, (u32) esp->command_block,
+					esp_count, dma_count, write, cmd);
+		return;
+	}
+
+	if (write)
+		/* DMA receive */
+		dma_sync_single_for_device(esp->dev, addr, esp_count,
+				DMA_FROM_DEVICE);
+	else
+		/* DMA send */
+		dma_sync_single_for_device(esp->dev, addr, esp_count,
+				DMA_TO_DEVICE);
+
+	addr >>= 1;
+	if (write)
+		addr &= ~(DMA_WRITE);
+	else
+		addr |= DMA_WRITE;
+
+	writeb((addr >> 24) & 0xff, &dregs->dma_latch);
+	writeb((addr >> 24) & 0xff, &dregs->dma_addr);
+	writeb((addr >> 16) & 0xff, &dregs->dma_addr);
+	writeb((addr >>  8) & 0xff, &dregs->dma_addr);
+	writeb(addr & 0xff, &dregs->dma_addr);
+
+	scsi_esp_cmd(esp, ESP_CMD_DMA);
+	zorro_esp_write8(esp, (esp_count >> 0) & 0xff, ESP_TCLOW);
+	zorro_esp_write8(esp, (esp_count >> 8) & 0xff, ESP_TCMED);
+	zorro_esp_write8(esp, (esp_count >> 16) & 0xff, ESP_TCHI);
+
+	scsi_esp_cmd(esp, cmd);
+}
+
+/* Blizzard 1230-II DMA */
+
+static void zorro_esp_send_blz1230II_dma_cmd(struct esp *esp, u32 addr,
+			u32 esp_count, u32 dma_count, int write, u8 cmd)
+{
+	struct zorro_esp_priv *zep = dev_get_drvdata(esp->dev);
+	struct blz1230II_dma_registers __iomem *dregs = esp->dma_regs;
+	u8 phase = esp->sreg & ESP_STAT_PMASK;
+
+	zep->error = 0;
+	/* Use PIO if transferring message bytes to esp->command_block_dma */
+	if (phase == ESP_MIP && addr == esp->command_block_dma) {
+		zorro_esp_send_pio_cmd(esp, (u32) esp->command_block,
+					esp_count, dma_count, write, cmd);
+		return;
+	}
+
+	if (write)
+		/* DMA receive */
+		dma_sync_single_for_device(esp->dev, addr, esp_count,
+				DMA_FROM_DEVICE);
+	else
+		/* DMA send */
+		dma_sync_single_for_device(esp->dev, addr, esp_count,
+				DMA_TO_DEVICE);
+
+	addr >>= 1;
+	if (write)
+		addr &= ~(DMA_WRITE);
+	else
+		addr |= DMA_WRITE;
+
+	writeb((addr >> 24) & 0xff, &dregs->dma_latch);
+	writeb((addr >> 16) & 0xff, &dregs->dma_addr);
+	writeb((addr >>  8) & 0xff, &dregs->dma_addr);
+	writeb(addr & 0xff, &dregs->dma_addr);
+
+	scsi_esp_cmd(esp, ESP_CMD_DMA);
+	zorro_esp_write8(esp, (esp_count >> 0) & 0xff, ESP_TCLOW);
+	zorro_esp_write8(esp, (esp_count >> 8) & 0xff, ESP_TCMED);
+	zorro_esp_write8(esp, (esp_count >> 16) & 0xff, ESP_TCHI);
+
+	scsi_esp_cmd(esp, cmd);
+}
+
+/* Blizzard 2060 DMA */
+
+static void zorro_esp_send_blz2060_dma_cmd(struct esp *esp, u32 addr,
+			u32 esp_count, u32 dma_count, int write, u8 cmd)
+{
+	struct zorro_esp_priv *zep = dev_get_drvdata(esp->dev);
+	struct blz2060_dma_registers __iomem *dregs = esp->dma_regs;
+	u8 phase = esp->sreg & ESP_STAT_PMASK;
+
+	zep->error = 0;
+	/* Use PIO if transferring message bytes to esp->command_block_dma */
+	if (phase == ESP_MIP && addr == esp->command_block_dma) {
+		zorro_esp_send_pio_cmd(esp, (u32) esp->command_block,
+					esp_count, dma_count, write, cmd);
+		return;
+	}
+
+	if (write)
+		/* DMA receive */
+		dma_sync_single_for_device(esp->dev, addr, esp_count,
+				DMA_FROM_DEVICE);
+	else
+		/* DMA send */
+		dma_sync_single_for_device(esp->dev, addr, esp_count,
+				DMA_TO_DEVICE);
+
+	addr >>= 1;
+	if (write)
+		addr &= ~(DMA_WRITE);
+	else
+		addr |= DMA_WRITE;
+
+	writeb(addr & 0xff, &dregs->dma_addr3);
+	writeb((addr >>  8) & 0xff, &dregs->dma_addr2);
+	writeb((addr >> 16) & 0xff, &dregs->dma_addr1);
+	writeb((addr >> 24) & 0xff, &dregs->dma_addr0);
+
+	scsi_esp_cmd(esp, ESP_CMD_DMA);
+	zorro_esp_write8(esp, (esp_count >> 0) & 0xff, ESP_TCLOW);
+	zorro_esp_write8(esp, (esp_count >> 8) & 0xff, ESP_TCMED);
+	zorro_esp_write8(esp, (esp_count >> 16) & 0xff, ESP_TCHI);
+
+	scsi_esp_cmd(esp, cmd);
+}
+
+/* Cyberstorm I DMA */
+
+static void zorro_esp_send_cyber_dma_cmd(struct esp *esp, u32 addr,
+			u32 esp_count, u32 dma_count, int write, u8 cmd)
+{
+	struct zorro_esp_priv *zep = dev_get_drvdata(esp->dev);
+	struct cyber_dma_registers __iomem *dregs = esp->dma_regs;
+	u8 phase = esp->sreg & ESP_STAT_PMASK;
+	unsigned char *ctrl_data = &zep->ctrl_data;
+
+	zep->error = 0;
+	/* Use PIO if transferring message bytes to esp->command_block_dma */
+	if (phase == ESP_MIP && addr == esp->command_block_dma) {
+		zorro_esp_send_pio_cmd(esp, (u32) esp->command_block,
+					esp_count, dma_count, write, cmd);
+		return;
+	}
+
+	zorro_esp_write8(esp, (esp_count >> 0) & 0xff, ESP_TCLOW);
+	zorro_esp_write8(esp, (esp_count >> 8) & 0xff, ESP_TCMED);
+	zorro_esp_write8(esp, (esp_count >> 16) & 0xff, ESP_TCHI);
+
+	if (write) {
+		/* DMA receive */
+		dma_sync_single_for_device(esp->dev, addr, esp_count,
+				DMA_FROM_DEVICE);
+		addr &= ~(1);
+	} else {
+		/* DMA send */
+		dma_sync_single_for_device(esp->dev, addr, esp_count,
+				DMA_TO_DEVICE);
+		addr |= 1;
+	}
+
+	writeb((addr >> 24) & 0xff, &dregs->dma_addr0);
+	writeb((addr >> 16) & 0xff, &dregs->dma_addr1);
+	writeb((addr >>  8) & 0xff, &dregs->dma_addr2);
+	writeb(addr & 0xff, &dregs->dma_addr3);
+
+	if (write)
+		*ctrl_data &= ~(CYBER_DMA_WRITE);
+	else
+		*ctrl_data |= CYBER_DMA_WRITE;
+
+	*ctrl_data &= ~(CYBER_DMA_Z3);	/* Z2, do 16 bit DMA */
+
+	writeb(*ctrl_data, &dregs->ctrl_reg);
+
+	scsi_esp_cmd(esp, cmd);
+}
+
+/* Cyberstorm II DMA */
+
+static void zorro_esp_send_cyberII_dma_cmd(struct esp *esp, u32 addr,
+			u32 esp_count, u32 dma_count, int write, u8 cmd)
+{
+	struct zorro_esp_priv *zep = dev_get_drvdata(esp->dev);
+	struct cyberII_dma_registers __iomem *dregs = esp->dma_regs;
+	u8 phase = esp->sreg & ESP_STAT_PMASK;
+
+	zep->error = 0;
+	/* Use PIO if transferring message bytes to esp->command_block_dma */
+	if (phase == ESP_MIP && addr == esp->command_block_dma) {
+		zorro_esp_send_pio_cmd(esp, (u32) esp->command_block,
+					esp_count, dma_count, write, cmd);
+		return;
+	}
+
+	zorro_esp_write8(esp, (esp_count >> 0) & 0xff, ESP_TCLOW);
+	zorro_esp_write8(esp, (esp_count >> 8) & 0xff, ESP_TCMED);
+	zorro_esp_write8(esp, (esp_count >> 16) & 0xff, ESP_TCHI);
+
+	if (write) {
+		/* DMA receive */
+		dma_sync_single_for_device(esp->dev, addr, esp_count,
+				DMA_FROM_DEVICE);
+		addr &= ~(1);
+	} else {
+		/* DMA send */
+		dma_sync_single_for_device(esp->dev, addr, esp_count,
+				DMA_TO_DEVICE);
+		addr |= 1;
+	}
+
+	writeb((addr >> 24) & 0xff, &dregs->dma_addr0);
+	writeb((addr >> 16) & 0xff, &dregs->dma_addr1);
+	writeb((addr >>  8) & 0xff, &dregs->dma_addr2);
+	writeb(addr & 0xff, &dregs->dma_addr3);
+
+	scsi_esp_cmd(esp, cmd);
+}
+
+/* Fastlane DMA */
+
+static void zorro_esp_send_fastlane_dma_cmd(struct esp *esp, u32 addr,
+			u32 esp_count, u32 dma_count, int write, u8 cmd)
+{
+	struct zorro_esp_priv *zep = dev_get_drvdata(esp->dev);
+	struct fastlane_dma_registers __iomem *dregs = esp->dma_regs;
+	u8 phase = esp->sreg & ESP_STAT_PMASK;
+	unsigned char *ctrl_data = &zep->ctrl_data;
+
+	zep->error = 0;
+	/* Use PIO if transferring message bytes to esp->command_block_dma */
+	if (phase == ESP_MIP && addr == esp->command_block_dma) {
+		zorro_esp_send_pio_cmd(esp, (u32) esp->command_block,
+					esp_count, dma_count, write, cmd);
+		return;
+	}
+
+	zorro_esp_write8(esp, (esp_count >> 0) & 0xff, ESP_TCLOW);
+	zorro_esp_write8(esp, (esp_count >> 8) & 0xff, ESP_TCMED);
+	zorro_esp_write8(esp, (esp_count >> 16) & 0xff, ESP_TCHI);
+
+	if (write) {
+		/* DMA receive */
+		dma_sync_single_for_device(esp->dev, addr, esp_count,
+				DMA_FROM_DEVICE);
+		addr &= ~(1);
+	} else {
+		/* DMA send */
+		dma_sync_single_for_device(esp->dev, addr, esp_count,
+				DMA_TO_DEVICE);
+		addr |= 1;
+	}
+
+	writeb(0, &dregs->clear_strobe);
+	z_writel(addr, ((addr & 0x00ffffff) + zep->board_base));
+
+	if (write) {
+		*ctrl_data = (*ctrl_data & FASTLANE_DMA_MASK) |
+				FASTLANE_DMA_ENABLE;
+	} else {
+		*ctrl_data = ((*ctrl_data & FASTLANE_DMA_MASK) |
+				FASTLANE_DMA_ENABLE |
+				FASTLANE_DMA_WRITE);
+	}
+
+	writeb(*ctrl_data, &dregs->ctrl_reg);
+
+	scsi_esp_cmd(esp, cmd);
+}
+
+static int zorro_esp_dma_error(struct esp *esp)
+{
+	struct zorro_esp_priv *zep = dev_get_drvdata(esp->dev);
+
+	/* check for error in case we've been doing PIO */
+	if (zep->error == 1)
+		return 1;
+
+	/* do nothing - there seems to be no way to check for DMA errors */
+	return 0;
+}
+
+/* per-board ESP driver ops */
+
+static const struct esp_driver_ops blz1230_esp_ops = {
+	.esp_write8		= zorro_esp_write8,
+	.esp_read8		= zorro_esp_read8,
+	.map_single		= zorro_esp_map_single,
+	.map_sg			= zorro_esp_map_sg,
+	.unmap_single		= zorro_esp_unmap_single,
+	.unmap_sg		= zorro_esp_unmap_sg,
+	.irq_pending		= zorro_esp_irq_pending,
+	.dma_length_limit	= zorro_esp_dma_length_limit,
+	.reset_dma		= zorro_esp_reset_dma,
+	.dma_drain		= zorro_esp_dma_drain,
+	.dma_invalidate		= zorro_esp_dma_invalidate,
+	.send_dma_cmd		= zorro_esp_send_blz1230_dma_cmd,
+	.dma_error		= zorro_esp_dma_error,
+};
+
+static const struct esp_driver_ops blz1230II_esp_ops = {
+	.esp_write8		= zorro_esp_write8,
+	.esp_read8		= zorro_esp_read8,
+	.map_single		= zorro_esp_map_single,
+	.map_sg			= zorro_esp_map_sg,
+	.unmap_single		= zorro_esp_unmap_single,
+	.unmap_sg		= zorro_esp_unmap_sg,
+	.irq_pending		= zorro_esp_irq_pending,
+	.dma_length_limit	= zorro_esp_dma_length_limit,
+	.reset_dma		= zorro_esp_reset_dma,
+	.dma_drain		= zorro_esp_dma_drain,
+	.dma_invalidate		= zorro_esp_dma_invalidate,
+	.send_dma_cmd		= zorro_esp_send_blz1230II_dma_cmd,
+	.dma_error		= zorro_esp_dma_error,
+};
+
+static const struct esp_driver_ops blz2060_esp_ops = {
+	.esp_write8		= zorro_esp_write8,
+	.esp_read8		= zorro_esp_read8,
+	.map_single		= zorro_esp_map_single,
+	.map_sg			= zorro_esp_map_sg,
+	.unmap_single		= zorro_esp_unmap_single,
+	.unmap_sg		= zorro_esp_unmap_sg,
+	.irq_pending		= zorro_esp_irq_pending,
+	.dma_length_limit	= zorro_esp_dma_length_limit,
+	.reset_dma		= zorro_esp_reset_dma,
+	.dma_drain		= zorro_esp_dma_drain,
+	.dma_invalidate		= zorro_esp_dma_invalidate,
+	.send_dma_cmd		= zorro_esp_send_blz2060_dma_cmd,
+	.dma_error		= zorro_esp_dma_error,
+};
+
+static const struct esp_driver_ops cyber_esp_ops = {
+	.esp_write8		= zorro_esp_write8,
+	.esp_read8		= zorro_esp_read8,
+	.map_single		= zorro_esp_map_single,
+	.map_sg			= zorro_esp_map_sg,
+	.unmap_single		= zorro_esp_unmap_single,
+	.unmap_sg		= zorro_esp_unmap_sg,
+	.irq_pending		= cyber_esp_irq_pending,
+	.dma_length_limit	= zorro_esp_dma_length_limit,
+	.reset_dma		= zorro_esp_reset_dma,
+	.dma_drain		= zorro_esp_dma_drain,
+	.dma_invalidate		= zorro_esp_dma_invalidate,
+	.send_dma_cmd		= zorro_esp_send_cyber_dma_cmd,
+	.dma_error		= zorro_esp_dma_error,
+};
+
+static const struct esp_driver_ops cyberII_esp_ops = {
+	.esp_write8		= zorro_esp_write8,
+	.esp_read8		= zorro_esp_read8,
+	.map_single		= zorro_esp_map_single,
+	.map_sg			= zorro_esp_map_sg,
+	.unmap_single		= zorro_esp_unmap_single,
+	.unmap_sg		= zorro_esp_unmap_sg,
+	.irq_pending		= zorro_esp_irq_pending,
+	.dma_length_limit	= zorro_esp_dma_length_limit,
+	.reset_dma		= zorro_esp_reset_dma,
+	.dma_drain		= zorro_esp_dma_drain,
+	.dma_invalidate		= zorro_esp_dma_invalidate,
+	.send_dma_cmd		= zorro_esp_send_cyberII_dma_cmd,
+	.dma_error		= zorro_esp_dma_error,
+};
+
+static const struct esp_driver_ops fastlane_esp_ops = {
+	.esp_write8		= zorro_esp_write8,
+	.esp_read8		= zorro_esp_read8,
+	.map_single		= zorro_esp_map_single,
+	.map_sg			= zorro_esp_map_sg,
+	.unmap_single		= zorro_esp_unmap_single,
+	.unmap_sg		= zorro_esp_unmap_sg,
+	.irq_pending		= fastlane_esp_irq_pending,
+	.dma_length_limit	= zorro_esp_dma_length_limit,
+	.reset_dma		= zorro_esp_reset_dma,
+	.dma_drain		= zorro_esp_dma_drain,
+	.dma_invalidate		= fastlane_esp_dma_invalidate,
+	.send_dma_cmd		= zorro_esp_send_fastlane_dma_cmd,
+	.dma_error		= zorro_esp_dma_error,
+};
+
+/* Zorro driver config data */
+
+struct zorro_driver_data {
+	const char *name;
+	unsigned long offset;
+	unsigned long dma_offset;
+	int absolute;	/* offset is absolute address */
+	int scsi_option;
+	const struct esp_driver_ops *esp_ops;
+};
+
+/* board types */
+
+enum {
+	ZORRO_BLZ1230,
+	ZORRO_BLZ1230II,
+	ZORRO_BLZ2060,
+	ZORRO_CYBER,
+	ZORRO_CYBERII,
+	ZORRO_FASTLANE,
+};
+
+/* per-board config data */
+
+static const struct zorro_driver_data zorro_esp_boards[] = {
+	[ZORRO_BLZ1230] = {
+				.name		= "Blizzard 1230",
+				.offset		= 0x8000,
+				.dma_offset	= 0x10000,
+				.scsi_option	= 1,
+				.esp_ops	= &blz1230_esp_ops,
+	},
+	[ZORRO_BLZ1230II] = {
+				.name		= "Blizzard 1230II",
+				.offset		= 0x10000,
+				.dma_offset	= 0x10021,
+				.scsi_option	= 1,
+				.esp_ops	= &blz1230II_esp_ops,
+	},
+	[ZORRO_BLZ2060] = {
+				.name		= "Blizzard 2060",
+				.offset		= 0x1ff00,
+				.dma_offset	= 0x1ffe0,
+				.esp_ops	= &blz2060_esp_ops,
+	},
+	[ZORRO_CYBER] = {
+				.name		= "CyberStormI",
+				.offset		= 0xf400,
+				.dma_offset	= 0xf800,
+				.esp_ops	= &cyber_esp_ops,
+	},
+	[ZORRO_CYBERII] = {
+				.name		= "CyberStormII",
+				.offset		= 0x1ff03,
+				.dma_offset	= 0x1ff43,
+				.scsi_option	= 1,
+				.esp_ops	= &cyberII_esp_ops,
+	},
+	[ZORRO_FASTLANE] = {
+				.name		= "Fastlane",
+				.offset		= 0x1000001,
+				.dma_offset	= 0x1000041,
+				.esp_ops	= &fastlane_esp_ops,
+	},
+};
+
+static const struct zorro_device_id zorro_esp_zorro_tbl[] = {
+	{	/* Blizzard 1230 IV */
+		.id = ZORRO_ID(PHASE5, 0x11, 0),
+		.driver_data = ZORRO_BLZ1230,
+	},
+	{	/* Blizzard 1230 II (Zorro II) or Fastlane (Zorro III) */
+		.id = ZORRO_ID(PHASE5, 0x0B, 0),
+		.driver_data = ZORRO_BLZ1230II,
+	},
+	{	/* Blizzard 2060 */
+		.id = ZORRO_ID(PHASE5, 0x18, 0),
+		.driver_data = ZORRO_BLZ2060,
+	},
+	{	/* Cyberstorm */
+		.id = ZORRO_ID(PHASE5, 0x0C, 0),
+		.driver_data = ZORRO_CYBER,
+	},
+	{	/* Cyberstorm II */
+		.id = ZORRO_ID(PHASE5, 0x19, 0),
+		.driver_data = ZORRO_CYBERII,
+	},
+	{ 0 }
+};
+MODULE_DEVICE_TABLE(zorro, zorro_esp_zorro_tbl);
+
+static int zorro_esp_probe(struct zorro_dev *z,
+				       const struct zorro_device_id *ent)
+{
+	struct scsi_host_template *tpnt = &scsi_esp_template;
+	struct Scsi_Host *host;
+	struct esp *esp;
+	const struct zorro_driver_data *zdd;
+	struct zorro_esp_priv *zep;
+	unsigned long board, ioaddr, dmaaddr;
+	int err;
+
+	board = zorro_resource_start(z);
+	zdd = &zorro_esp_boards[ent->driver_data];
+
+	pr_info("%s found at address 0x%lx.\n", zdd->name, board);
+
+	zep = kzalloc(sizeof(*zep), GFP_KERNEL);
+	if (!zep) {
+		pr_err("Can't allocate device private data!\n");
+		return -ENOMEM;
+	}
+
+	/* let's figure out whether we have a Zorro II or Zorro III board */
+	if ((z->rom.er_Type & ERT_TYPEMASK) == ERT_ZORROIII) {
+		if (board > 0xffffff)
+			zep->zorro3 = 1;
+	} else {
+		/*
+		 * Even though most of these boards identify as Zorro II,
+		 * they are in fact CPU expansion slot boards and have full
+		 * access to all of memory. Fix up DMA bitmask here.
+		 */
+		z->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+	}
+
+	/*
+	 * If Zorro III and ID matches Fastlane, our device table entry
+	 * contains data for the Blizzard 1230 II board which does share the
+	 * same ID. Fix up device table entry here.
+	 * TODO: Some Cyberstom060 boards also share this ID but would need
+	 * to use the Cyberstorm I driver data ... we catch this by checking
+	 * for presence of ESP chip later, but don't try to fix up yet.
+	 */
+	if (zep->zorro3 && ent->driver_data == ZORRO_BLZ1230II) {
+		pr_info("%s at address 0x%lx is Fastlane Z3, fixing data!\n",
+			zdd->name, board);
+		zdd = &zorro_esp_boards[ZORRO_FASTLANE];
+	}
+
+	if (zdd->absolute) {
+		ioaddr  = zdd->offset;
+		dmaaddr = zdd->dma_offset;
+	} else {
+		ioaddr  = board + zdd->offset;
+		dmaaddr = board + zdd->dma_offset;
+	}
+
+	if (!zorro_request_device(z, zdd->name)) {
+		pr_err("cannot reserve region 0x%lx, abort\n",
+		       board);
+		err = -EBUSY;
+		goto fail_free_zep;
+	}
+
+	host = scsi_host_alloc(tpnt, sizeof(struct esp));
+
+	if (!host) {
+		pr_err("No host detected; board configuration problem?\n");
+		err = -ENOMEM;
+		goto fail_release_device;
+	}
+
+	host->base		= ioaddr;
+	host->this_id		= 7;
+
+	esp			= shost_priv(host);
+	esp->host		= host;
+	esp->dev		= &z->dev;
+
+	esp->scsi_id		= host->this_id;
+	esp->scsi_id_mask	= (1 << esp->scsi_id);
+
+	esp->cfreq = 40000000;
+
+	zep->esp = esp;
+
+	dev_set_drvdata(esp->dev, zep);
+
+	/* additional setup required for Fastlane */
+	if (zep->zorro3 && ent->driver_data == ZORRO_BLZ1230II) {
+		/* map full address space up to ESP base for DMA */
+		zep->board_base = ioremap_nocache(board,
+						FASTLANE_ESP_ADDR-1);
+		if (!zep->board_base) {
+			pr_err("Cannot allocate board address space\n");
+			err = -ENOMEM;
+			goto fail_free_host;
+		}
+		/* initialize DMA control shadow register */
+		zep->ctrl_data = (FASTLANE_DMA_FCODE |
+				  FASTLANE_DMA_EDI | FASTLANE_DMA_ESI);
+	}
+
+	esp->ops = zdd->esp_ops;
+
+	if (ioaddr > 0xffffff)
+		esp->regs = ioremap_nocache(ioaddr, 0x20);
+	else
+		/* ZorroII address space remapped nocache by early startup */
+		esp->regs = ZTWO_VADDR(ioaddr);
+
+	if (!esp->regs) {
+		err = -ENOMEM;
+		goto fail_unmap_fastlane;
+	}
+
+	/* Check whether a Blizzard 12x0 or CyberstormII really has SCSI */
+	if (zdd->scsi_option) {
+		zorro_esp_write8(esp, (ESP_CONFIG1_PENABLE | 7), ESP_CFG1);
+		if (zorro_esp_read8(esp, ESP_CFG1) != (ESP_CONFIG1_PENABLE|7)) {
+			err = -ENODEV;
+			goto fail_unmap_regs;
+		}
+	}
+
+	if (zep->zorro3) {
+		/*
+		 * Only Fastlane Z3 for now - add switch for correct struct
+		 * dma_registers size if adding any more
+		 */
+		esp->dma_regs = ioremap_nocache(dmaaddr,
+				sizeof(struct fastlane_dma_registers));
+	} else
+		/* ZorroII address space remapped nocache by early startup */
+		esp->dma_regs = ZTWO_VADDR(dmaaddr);
+
+	if (!esp->dma_regs) {
+		err = -ENOMEM;
+		goto fail_unmap_regs;
+	}
+
+	esp->command_block = dma_alloc_coherent(esp->dev, 16,
+						&esp->command_block_dma,
+						GFP_KERNEL);
+
+	if (!esp->command_block) {
+		err = -ENOMEM;
+		goto fail_unmap_dma_regs;
+	}
+
+	host->irq = IRQ_AMIGA_PORTS;
+	err = request_irq(host->irq, scsi_esp_intr, IRQF_SHARED,
+			  "Amiga Zorro ESP", esp);
+	if (err < 0) {
+		err = -ENODEV;
+		goto fail_free_command_block;
+	}
+
+	/* register the chip */
+	err = scsi_esp_register(esp, &z->dev);
+
+	if (err) {
+		err = -ENOMEM;
+		goto fail_free_irq;
+	}
+
+	return 0;
+
+fail_free_irq:
+	free_irq(host->irq, esp);
+
+fail_free_command_block:
+	dma_free_coherent(esp->dev, 16,
+			  esp->command_block,
+			  esp->command_block_dma);
+
+fail_unmap_dma_regs:
+	if (zep->zorro3)
+		iounmap(esp->dma_regs);
+
+fail_unmap_regs:
+	if (ioaddr > 0xffffff)
+		iounmap(esp->regs);
+
+fail_unmap_fastlane:
+	if (zep->zorro3)
+		iounmap(zep->board_base);
+
+fail_free_host:
+	scsi_host_put(host);
+
+fail_release_device:
+	zorro_release_device(z);
+
+fail_free_zep:
+	kfree(zep);
+
+	return err;
+}
+
+static void zorro_esp_remove(struct zorro_dev *z)
+{
+	struct zorro_esp_priv *zep = dev_get_drvdata(&z->dev);
+	struct esp *esp	= zep->esp;
+	struct Scsi_Host *host = esp->host;
+
+	scsi_esp_unregister(esp);
+
+	free_irq(host->irq, esp);
+	dma_free_coherent(esp->dev, 16,
+			  esp->command_block,
+			  esp->command_block_dma);
+
+	if (zep->zorro3) {
+		iounmap(zep->board_base);
+		iounmap(esp->dma_regs);
+	}
+
+	if (host->base > 0xffffff)
+		iounmap(esp->regs);
+
+	scsi_host_put(host);
+
+	zorro_release_device(z);
+
+	kfree(zep);
+}
+
+static struct zorro_driver zorro_esp_driver = {
+	.name	  = KBUILD_MODNAME,
+	.id_table = zorro_esp_zorro_tbl,
+	.probe	  = zorro_esp_probe,
+	.remove	  = zorro_esp_remove,
+};
+
+static int __init zorro_esp_scsi_init(void)
+{
+	return zorro_register_driver(&zorro_esp_driver);
+}
+
+static void __exit zorro_esp_scsi_exit(void)
+{
+	zorro_unregister_driver(&zorro_esp_driver);
+}
+
+module_init(zorro_esp_scsi_init);
+module_exit(zorro_esp_scsi_exit);
diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c
index 3f4bf126eed066771e72237dd530abf2fe67b45f..5ccef7d597fa34c95cf5ab5e821ce4e95e16c2b7 100644
--- a/drivers/target/target_core_configfs.c
+++ b/drivers/target/target_core_configfs.c
@@ -155,6 +155,8 @@ static ssize_t target_core_item_dbroot_store(struct config_item *item,
 
 	mutex_unlock(&g_tf_lock);
 
+	pr_debug("Target_Core_ConfigFS: db_root set to %s\n", db_root);
+
 	return read_bytes;
 }
 
@@ -3213,6 +3215,27 @@ void target_setup_backend_cits(struct target_backend *tb)
 	target_core_setup_dev_stat_cit(tb);
 }
 
+static void target_init_dbroot(void)
+{
+	struct file *fp;
+
+	snprintf(db_root_stage, DB_ROOT_LEN, DB_ROOT_PREFERRED);
+	fp = filp_open(db_root_stage, O_RDONLY, 0);
+	if (IS_ERR(fp)) {
+		pr_err("db_root: cannot open: %s\n", db_root_stage);
+		return;
+	}
+	if (!S_ISDIR(file_inode(fp)->i_mode)) {
+		filp_close(fp, NULL);
+		pr_err("db_root: not a valid directory: %s\n", db_root_stage);
+		return;
+	}
+	filp_close(fp, NULL);
+
+	strncpy(db_root, db_root_stage, DB_ROOT_LEN);
+	pr_debug("Target_Core_ConfigFS: db_root set to %s\n", db_root);
+}
+
 static int __init target_core_init_configfs(void)
 {
 	struct configfs_subsystem *subsys = &target_core_fabrics;
@@ -3293,6 +3316,8 @@ static int __init target_core_init_configfs(void)
 	if (ret < 0)
 		goto out;
 
+	target_init_dbroot();
+
 	return 0;
 
 out:
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c
index 07c814c42648faa00d7bca39ad339d7ef916ec99..60429011292a2c4c2fa4104a90414ae948f956a7 100644
--- a/drivers/target/target_core_iblock.c
+++ b/drivers/target/target_core_iblock.c
@@ -427,8 +427,8 @@ iblock_execute_zero_out(struct block_device *bdev, struct se_cmd *cmd)
 {
 	struct se_device *dev = cmd->se_dev;
 	struct scatterlist *sg = &cmd->t_data_sg[0];
-	unsigned char *buf, zero = 0x00, *p = &zero;
-	int rc, ret;
+	unsigned char *buf, *not_zero;
+	int ret;
 
 	buf = kmap(sg_page(sg)) + sg->offset;
 	if (!buf)
@@ -437,10 +437,10 @@ iblock_execute_zero_out(struct block_device *bdev, struct se_cmd *cmd)
 	 * Fall back to block_execute_write_same() slow-path if
 	 * incoming WRITE_SAME payload does not contain zeros.
 	 */
-	rc = memcmp(buf, p, cmd->data_length);
+	not_zero = memchr_inv(buf, 0x00, cmd->data_length);
 	kunmap(sg_page(sg));
 
-	if (rc)
+	if (not_zero)
 		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
 
 	ret = blkdev_issue_zeroout(bdev,
diff --git a/drivers/target/target_core_internal.h b/drivers/target/target_core_internal.h
index 1d5afc3ae017cfbae8519fcb43a884885b617da1..dead30b1d32c977518684168ca45e93624db84f9 100644
--- a/drivers/target/target_core_internal.h
+++ b/drivers/target/target_core_internal.h
@@ -166,6 +166,7 @@ extern struct se_portal_group xcopy_pt_tpg;
 /* target_core_configfs.c */
 #define DB_ROOT_LEN		4096
 #define	DB_ROOT_DEFAULT		"/var/target"
+#define	DB_ROOT_PREFERRED	"/etc/target"
 
 extern char db_root[];
 
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index 4558f2e1fe1bb5ae3b4907f6ba4b86cb7f1d2e3a..3500aa5927f23a2bdcdf798a3524682cd9e0cab3 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -1431,7 +1431,7 @@ transport_generic_map_mem_to_cmd(struct se_cmd *cmd, struct scatterlist *sgl,
 	return 0;
 }
 
-/*
+/**
  * target_submit_cmd_map_sgls - lookup unpacked lun and submit uninitialized
  * 			 se_cmd + use pre-allocated SGL memory.
  *
@@ -1441,7 +1441,7 @@ transport_generic_map_mem_to_cmd(struct se_cmd *cmd, struct scatterlist *sgl,
  * @sense: pointer to SCSI sense buffer
  * @unpacked_lun: unpacked LUN to reference for struct se_lun
  * @data_length: fabric expected data transfer length
- * @task_addr: SAM task attribute
+ * @task_attr: SAM task attribute
  * @data_dir: DMA data direction
  * @flags: flags for command submission from target_sc_flags_tables
  * @sgl: struct scatterlist memory for unidirectional mapping
@@ -1578,7 +1578,7 @@ int target_submit_cmd_map_sgls(struct se_cmd *se_cmd, struct se_session *se_sess
 }
 EXPORT_SYMBOL(target_submit_cmd_map_sgls);
 
-/*
+/**
  * target_submit_cmd - lookup unpacked lun and submit uninitialized se_cmd
  *
  * @se_cmd: command descriptor to submit
@@ -1587,7 +1587,7 @@ EXPORT_SYMBOL(target_submit_cmd_map_sgls);
  * @sense: pointer to SCSI sense buffer
  * @unpacked_lun: unpacked LUN to reference for struct se_lun
  * @data_length: fabric expected data transfer length
- * @task_addr: SAM task attribute
+ * @task_attr: SAM task attribute
  * @data_dir: DMA data direction
  * @flags: flags for command submission from target_sc_flags_tables
  *
@@ -1654,7 +1654,7 @@ static bool target_lookup_lun_from_tag(struct se_session *se_sess, u64 tag,
  * @se_sess: associated se_sess for endpoint
  * @sense: pointer to SCSI sense buffer
  * @unpacked_lun: unpacked LUN to reference for struct se_lun
- * @fabric_context: fabric context for TMR req
+ * @fabric_tmr_ptr: fabric context for TMR req
  * @tm_type: Type of TM request
  * @gfp: gfp type for caller
  * @tag: referenced task tag for TMR_ABORT_TASK
@@ -2606,7 +2606,8 @@ int transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks)
 }
 EXPORT_SYMBOL(transport_generic_free_cmd);
 
-/* target_get_sess_cmd - Add command to active ->sess_cmd_list
+/**
+ * target_get_sess_cmd - Add command to active ->sess_cmd_list
  * @se_cmd:	command descriptor to add
  * @ack_kref:	Signal that fabric will perform an ack target_put_sess_cmd()
  */
@@ -2800,7 +2801,8 @@ void target_show_cmd(const char *pfx, struct se_cmd *cmd)
 }
 EXPORT_SYMBOL(target_show_cmd);
 
-/* target_sess_cmd_list_set_waiting - Flag all commands in
+/**
+ * target_sess_cmd_list_set_waiting - Flag all commands in
  *         sess_cmd_list to complete cmd_wait_comp.  Set
  *         sess_tearing_down so no more commands are queued.
  * @se_sess:	session to flag
@@ -2835,7 +2837,8 @@ void target_sess_cmd_list_set_waiting(struct se_session *se_sess)
 }
 EXPORT_SYMBOL(target_sess_cmd_list_set_waiting);
 
-/* target_wait_for_sess_cmds - Wait for outstanding descriptors
+/**
+ * target_wait_for_sess_cmds - Wait for outstanding descriptors
  * @se_sess:    session to wait for active I/O
  */
 void target_wait_for_sess_cmds(struct se_session *se_sess)
diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c
index 4ad89ea71a70118dad2e5d960f89f54431a1ad67..ae0aea9a3aad1b11e616f6a8ddb57551e7c6acb0 100644
--- a/drivers/target/target_core_user.c
+++ b/drivers/target/target_core_user.c
@@ -42,7 +42,11 @@
 
 #include <linux/target_core_user.h>
 
-/*
+/**
+ * DOC: Userspace I/O
+ * Userspace I/O
+ * -------------
+ *
  * Define a shared-memory interface for LIO to pass SCSI commands and
  * data to userspace for processing. This is to allow backends that
  * are too complex for in-kernel support to be possible.
@@ -53,7 +57,7 @@
  * See the .h file for how the ring is laid out. Note that while the
  * command ring is defined, the particulars of the data area are
  * not. Offset values in the command entry point to other locations
- * internal to the mmap()ed area. There is separate space outside the
+ * internal to the mmap-ed area. There is separate space outside the
  * command ring for data buffers. This leaves maximum flexibility for
  * moving buffer allocations, or even page flipping or other
  * allocation techniques, without altering the command ring layout.
@@ -1382,7 +1386,7 @@ static struct page *tcmu_try_get_block_page(struct tcmu_dev *udev, uint32_t dbi)
 	return page;
 }
 
-static int tcmu_vma_fault(struct vm_fault *vmf)
+static vm_fault_t tcmu_vma_fault(struct vm_fault *vmf)
 {
 	struct tcmu_dev *udev = vmf->vma->vm_private_data;
 	struct uio_info *info = &udev->uio_info;
diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h
index 192ed8fbc403ce87eef231868dcb8bf3d3a50b0d..9ac954ee577ef50dc97c0bb28f3f2319756cf3f8 100644
--- a/include/linux/hyperv.h
+++ b/include/linux/hyperv.h
@@ -35,6 +35,7 @@
 #include <linux/device.h>
 #include <linux/mod_devicetable.h>
 #include <linux/interrupt.h>
+#include <linux/reciprocal_div.h>
 
 #define MAX_PAGE_BUFFER_COUNT				32
 #define MAX_MULTIPAGE_BUFFER_COUNT			32 /* 128K */
@@ -120,6 +121,7 @@ struct hv_ring_buffer {
 struct hv_ring_buffer_info {
 	struct hv_ring_buffer *ring_buffer;
 	u32 ring_size;			/* Include the shared header */
+	struct reciprocal_value ring_size_div10_reciprocal;
 	spinlock_t ring_lock;
 
 	u32 ring_datasize;		/* < ring_size */
@@ -154,6 +156,16 @@ static inline u32 hv_get_bytes_to_write(const struct hv_ring_buffer_info *rbi)
 	return write;
 }
 
+static inline u32 hv_get_avail_to_write_percent(
+		const struct hv_ring_buffer_info *rbi)
+{
+	u32 avail_write = hv_get_bytes_to_write(rbi);
+
+	return reciprocal_divide(
+			(avail_write  << 3) + (avail_write << 1),
+			rbi->ring_size_div10_reciprocal);
+}
+
 /*
  * VMBUS version is 32 bit entity broken up into
  * two 16 bit quantities: major_number. minor_number.
diff --git a/include/linux/libata.h b/include/linux/libata.h
index 1795fecdea171f5034b8df58eeeff95634955c98..0619ebf4d4758b1520521c0a3e9d89f95e35f110 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -1130,6 +1130,8 @@ extern void ata_sas_async_probe(struct ata_port *ap);
 extern int ata_sas_sync_probe(struct ata_port *ap);
 extern int ata_sas_port_init(struct ata_port *);
 extern int ata_sas_port_start(struct ata_port *ap);
+extern int ata_sas_tport_add(struct device *parent, struct ata_port *ap);
+extern void ata_sas_tport_delete(struct ata_port *ap);
 extern void ata_sas_port_stop(struct ata_port *ap);
 extern int ata_sas_slave_configure(struct scsi_device *, struct ata_port *);
 extern int ata_sas_queuecmd(struct scsi_cmnd *cmd, struct ata_port *ap);
diff --git a/include/linux/log2.h b/include/linux/log2.h
index 41a1ae01099397dc0e6553bbb60155df33448ef4..2af7f77866d035fd125f363eb432cddfdacf83bf 100644
--- a/include/linux/log2.h
+++ b/include/linux/log2.h
@@ -72,16 +72,13 @@ unsigned long __rounddown_pow_of_two(unsigned long n)
 }
 
 /**
- * ilog2 - log base 2 of 32-bit or a 64-bit unsigned value
+ * const_ilog2 - log base 2 of 32-bit or a 64-bit constant unsigned value
  * @n: parameter
  *
- * constant-capable log of base 2 calculation
- * - this can be used to initialise global variables from constant data, hence
- * the massive ternary operator construction
- *
- * selects the appropriately-sized optimised version depending on sizeof(n)
+ * Use this where sparse expects a true constant expression, e.g. for array
+ * indices.
  */
-#define ilog2(n)				\
+#define const_ilog2(n)				\
 (						\
 	__builtin_constant_p(n) ? (		\
 		(n) < 2 ? 0 :			\
@@ -147,10 +144,26 @@ unsigned long __rounddown_pow_of_two(unsigned long n)
 		(n) & (1ULL <<  4) ?  4 :	\
 		(n) & (1ULL <<  3) ?  3 :	\
 		(n) & (1ULL <<  2) ?  2 :	\
-		1 ) :				\
-	(sizeof(n) <= 4) ?			\
-	__ilog2_u32(n) :			\
-	__ilog2_u64(n)				\
+		1) :				\
+	-1)
+
+/**
+ * ilog2 - log base 2 of 32-bit or a 64-bit unsigned value
+ * @n: parameter
+ *
+ * constant-capable log of base 2 calculation
+ * - this can be used to initialise global variables from constant data, hence
+ * the massive ternary operator construction
+ *
+ * selects the appropriately-sized optimised version depending on sizeof(n)
+ */
+#define ilog2(n) \
+( \
+	__builtin_constant_p(n) ?	\
+	const_ilog2(n) :		\
+	(sizeof(n) <= 4) ?		\
+	__ilog2_u32(n) :		\
+	__ilog2_u64(n)			\
  )
 
 /**
diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
index 7ae177c8e3993c0c39ac6539d149509b575cc1ec..4c36af6edd79b60081611d4729ad03570b471fc5 100644
--- a/include/scsi/scsi_device.h
+++ b/include/scsi/scsi_device.h
@@ -15,7 +15,7 @@ struct scsi_cmnd;
 struct scsi_lun;
 struct scsi_sense_hdr;
 
-typedef unsigned int __bitwise blist_flags_t;
+typedef __u64 __bitwise blist_flags_t;
 
 struct scsi_mode_data {
 	__u32	length;
diff --git a/include/scsi/scsi_devinfo.h b/include/scsi/scsi_devinfo.h
index ea67c32e870e9c330d3d89b1a9a1366077869809..3fdb322d4c4bbeaba31fc0ac0a5a912d924637f6 100644
--- a/include/scsi/scsi_devinfo.h
+++ b/include/scsi/scsi_devinfo.h
@@ -6,55 +6,80 @@
  */
 
 /* Only scan LUN 0 */
-#define BLIST_NOLUN		((__force blist_flags_t)(1 << 0))
+#define BLIST_NOLUN		((__force blist_flags_t)(1ULL << 0))
 /* Known to have LUNs, force scanning.
  * DEPRECATED: Use max_luns=N */
-#define BLIST_FORCELUN		((__force blist_flags_t)(1 << 1))
+#define BLIST_FORCELUN		((__force blist_flags_t)(1ULL << 1))
 /* Flag for broken handshaking */
-#define BLIST_BORKEN		((__force blist_flags_t)(1 << 2))
+#define BLIST_BORKEN		((__force blist_flags_t)(1ULL << 2))
 /* unlock by special command */
-#define BLIST_KEY		((__force blist_flags_t)(1 << 3))
+#define BLIST_KEY		((__force blist_flags_t)(1ULL << 3))
 /* Do not use LUNs in parallel */
-#define BLIST_SINGLELUN		((__force blist_flags_t)(1 << 4))
+#define BLIST_SINGLELUN		((__force blist_flags_t)(1ULL << 4))
 /* Buggy Tagged Command Queuing */
-#define BLIST_NOTQ		((__force blist_flags_t)(1 << 5))
+#define BLIST_NOTQ		((__force blist_flags_t)(1ULL << 5))
 /* Non consecutive LUN numbering */
-#define BLIST_SPARSELUN		((__force blist_flags_t)(1 << 6))
+#define BLIST_SPARSELUN		((__force blist_flags_t)(1ULL << 6))
 /* Avoid LUNS >= 5 */
-#define BLIST_MAX5LUN		((__force blist_flags_t)(1 << 7))
+#define BLIST_MAX5LUN		((__force blist_flags_t)(1ULL << 7))
 /* Treat as (removable) CD-ROM */
-#define BLIST_ISROM		((__force blist_flags_t)(1 << 8))
+#define BLIST_ISROM		((__force blist_flags_t)(1ULL << 8))
 /* LUNs past 7 on a SCSI-2 device */
-#define BLIST_LARGELUN		((__force blist_flags_t)(1 << 9))
+#define BLIST_LARGELUN		((__force blist_flags_t)(1ULL << 9))
 /* override additional length field */
-#define BLIST_INQUIRY_36	((__force blist_flags_t)(1 << 10))
+#define BLIST_INQUIRY_36	((__force blist_flags_t)(1ULL << 10))
+#define __BLIST_UNUSED_11	((__force blist_flags_t)(1ULL << 11))
 /* do not do automatic start on add */
-#define BLIST_NOSTARTONADD	((__force blist_flags_t)(1 << 12))
+#define BLIST_NOSTARTONADD	((__force blist_flags_t)(1ULL << 12))
+#define __BLIST_UNUSED_13	((__force blist_flags_t)(1ULL << 13))
+#define __BLIST_UNUSED_14	((__force blist_flags_t)(1ULL << 14))
+#define __BLIST_UNUSED_15	((__force blist_flags_t)(1ULL << 15))
+#define __BLIST_UNUSED_16	((__force blist_flags_t)(1ULL << 16))
 /* try REPORT_LUNS even for SCSI-2 devs (if HBA supports more than 8 LUNs) */
-#define BLIST_REPORTLUN2	((__force blist_flags_t)(1 << 17))
+#define BLIST_REPORTLUN2	((__force blist_flags_t)(1ULL << 17))
 /* don't try REPORT_LUNS scan (SCSI-3 devs) */
-#define BLIST_NOREPORTLUN	((__force blist_flags_t)(1 << 18))
+#define BLIST_NOREPORTLUN	((__force blist_flags_t)(1ULL << 18))
 /* don't use PREVENT-ALLOW commands */
-#define BLIST_NOT_LOCKABLE	((__force blist_flags_t)(1 << 19))
+#define BLIST_NOT_LOCKABLE	((__force blist_flags_t)(1ULL << 19))
 /* device is actually for RAID config */
-#define BLIST_NO_ULD_ATTACH	((__force blist_flags_t)(1 << 20))
+#define BLIST_NO_ULD_ATTACH	((__force blist_flags_t)(1ULL << 20))
 /* select without ATN */
-#define BLIST_SELECT_NO_ATN	((__force blist_flags_t)(1 << 21))
+#define BLIST_SELECT_NO_ATN	((__force blist_flags_t)(1ULL << 21))
 /* retry HARDWARE_ERROR */
-#define BLIST_RETRY_HWERROR	((__force blist_flags_t)(1 << 22))
+#define BLIST_RETRY_HWERROR	((__force blist_flags_t)(1ULL << 22))
 /* maximum 512 sector cdb length */
-#define BLIST_MAX_512		((__force blist_flags_t)(1 << 23))
+#define BLIST_MAX_512		((__force blist_flags_t)(1ULL << 23))
+#define __BLIST_UNUSED_24	((__force blist_flags_t)(1ULL << 24))
 /* Disable T10 PI (DIF) */
-#define BLIST_NO_DIF		((__force blist_flags_t)(1 << 25))
+#define BLIST_NO_DIF		((__force blist_flags_t)(1ULL << 25))
 /* Ignore SBC-3 VPD pages */
-#define BLIST_SKIP_VPD_PAGES	((__force blist_flags_t)(1 << 26))
+#define BLIST_SKIP_VPD_PAGES	((__force blist_flags_t)(1ULL << 26))
+#define __BLIST_UNUSED_27	((__force blist_flags_t)(1ULL << 27))
 /* Attempt to read VPD pages */
-#define BLIST_TRY_VPD_PAGES	((__force blist_flags_t)(1 << 28))
+#define BLIST_TRY_VPD_PAGES	((__force blist_flags_t)(1ULL << 28))
 /* don't try to issue RSOC */
-#define BLIST_NO_RSOC		((__force blist_flags_t)(1 << 29))
+#define BLIST_NO_RSOC		((__force blist_flags_t)(1ULL << 29))
 /* maximum 1024 sector cdb length */
-#define BLIST_MAX_1024		((__force blist_flags_t)(1 << 30))
+#define BLIST_MAX_1024		((__force blist_flags_t)(1ULL << 30))
 /* Use UNMAP limit for WRITE SAME */
-#define BLIST_UNMAP_LIMIT_WS	((__force blist_flags_t)(1 << 31))
+#define BLIST_UNMAP_LIMIT_WS	((__force blist_flags_t)(1ULL << 31))
+/* Always retry ABORTED_COMMAND with Internal Target Failure */
+#define BLIST_RETRY_ITF		((__force blist_flags_t)(1ULL << 32))
+/* Always retry ABORTED_COMMAND with ASC 0xc1 */
+#define BLIST_RETRY_ASC_C1	((__force blist_flags_t)(1ULL << 33))
+
+#define __BLIST_LAST_USED BLIST_RETRY_ASC_C1
+
+#define __BLIST_HIGH_UNUSED (~(__BLIST_LAST_USED | \
+			       (__force blist_flags_t) \
+			       ((__force __u64)__BLIST_LAST_USED - 1ULL)))
+#define __BLIST_UNUSED_MASK (__BLIST_UNUSED_11 | \
+			     __BLIST_UNUSED_13 | \
+			     __BLIST_UNUSED_14 | \
+			     __BLIST_UNUSED_15 | \
+			     __BLIST_UNUSED_16 | \
+			     __BLIST_UNUSED_24 | \
+			     __BLIST_UNUSED_27 | \
+			     __BLIST_HIGH_UNUSED)
 
 #endif
diff --git a/include/uapi/linux/target_core_user.h b/include/uapi/linux/target_core_user.h
index 0be80f72646b1f86f40a811c86d1946cd4ea59ac..6e299349b15876d3302cc784576dd84cff6f1d66 100644
--- a/include/uapi/linux/target_core_user.h
+++ b/include/uapi/linux/target_core_user.h
@@ -9,21 +9,22 @@
 
 #define TCMU_VERSION "2.0"
 
-/*
+/**
+ * DOC: Ring Design
  * Ring Design
  * -----------
  *
  * The mmaped area is divided into three parts:
- * 1) The mailbox (struct tcmu_mailbox, below)
- * 2) The command ring
- * 3) Everything beyond the command ring (data)
+ * 1) The mailbox (struct tcmu_mailbox, below);
+ * 2) The command ring;
+ * 3) Everything beyond the command ring (data).
  *
  * The mailbox tells userspace the offset of the command ring from the
  * start of the shared memory region, and how big the command ring is.
  *
  * The kernel passes SCSI commands to userspace by putting a struct
  * tcmu_cmd_entry in the ring, updating mailbox->cmd_head, and poking
- * userspace via uio's interrupt mechanism.
+ * userspace via UIO's interrupt mechanism.
  *
  * tcmu_cmd_entry contains a header. If the header type is PAD,
  * userspace should skip hdr->length bytes (mod cmdr_size) to find the