diff --git a/Documentation/ABI/testing/sysfs-driver-w1_therm b/Documentation/ABI/testing/sysfs-driver-w1_therm
index 6a37dc33ffdb567af4224e6ef2e9453383ca44d3..74642c73d29c79ee6b613bea5e73526b9f2b5e17 100644
--- a/Documentation/ABI/testing/sysfs-driver-w1_therm
+++ b/Documentation/ABI/testing/sysfs-driver-w1_therm
@@ -14,7 +14,7 @@ Users:		any user space application which wants to communicate with
 		w1_term device
 
 
-What:		/sys/bus/w1/devices/.../eeprom
+What:		/sys/bus/w1/devices/.../eeprom_cmd
 Date:		May 2020
 Contact:	Akira Shimahara <akira215corp@gmail.com>
 Description:
diff --git a/Documentation/admin-guide/spkguide.txt b/Documentation/admin-guide/spkguide.txt
index 3782f6a09e97a6a6d5d84f59c9d29ff677761177..5ff6a0fe87d1e5b06c910a8c9fb2eee20d9c5b4a 100644
--- a/Documentation/admin-guide/spkguide.txt
+++ b/Documentation/admin-guide/spkguide.txt
@@ -344,6 +344,7 @@ spk key_slash = say_attributes
 spk key_8 = speakup_paste
 shift spk key_m = say_first_char
  ctrl spk key_semicolon = say_last_char
+spk key_r = read_all_doc
 
 5.  The Speakup Sys System
 
diff --git a/Documentation/devicetree/bindings/clock/ingenic,cgu.yaml b/Documentation/devicetree/bindings/clock/ingenic,cgu.yaml
index 5dd7ea8a78e4bafb38d4e5419df9469db9d49582..c65b9458c0b6a46a90fc3652a7c22ff14a1c9f3f 100644
--- a/Documentation/devicetree/bindings/clock/ingenic,cgu.yaml
+++ b/Documentation/devicetree/bindings/clock/ingenic,cgu.yaml
@@ -92,7 +92,7 @@ required:
 
 patternProperties:
   "^usb-phy@[a-f0-9]+$":
-    allOf: [ $ref: "../usb/ingenic,jz4770-phy.yaml#" ]
+    allOf: [ $ref: "../phy/ingenic,phy-usb.yaml#" ]
 
 additionalProperties: false
 
diff --git a/Documentation/devicetree/bindings/extcon/extcon-fsa9480.txt b/Documentation/devicetree/bindings/extcon/extcon-fsa9480.txt
deleted file mode 100644
index 624bd76f468e9a3565babc12b5fbca8b786deef2..0000000000000000000000000000000000000000
--- a/Documentation/devicetree/bindings/extcon/extcon-fsa9480.txt
+++ /dev/null
@@ -1,21 +0,0 @@
-FAIRCHILD SEMICONDUCTOR FSA9480 MICROUSB SWITCH
-
-The FSA9480 is a USB port accessory detector and switch. The FSA9480 is fully
-controlled using I2C and enables USB data, stereo and mono audio, video,
-microphone, and UART data to use a common connector port.
-
-Required properties:
- - compatible : Must be one of
-   "fcs,fsa9480"
-   "fcs,fsa880"
- - reg : Specifies i2c slave address. Must be 0x25.
- - interrupts : Should contain one entry specifying interrupt signal of
-   interrupt parent to which interrupt pin of the chip is connected.
-
- Example:
-	musb@25 {
-		compatible = "fcs,fsa9480";
-		reg = <0x25>;
-		interrupt-parent = <&gph2>;
-		interrupts = <7 0>;
-	};
diff --git a/Documentation/devicetree/bindings/extcon/extcon-usbc-tusb320.yaml b/Documentation/devicetree/bindings/extcon/extcon-usbc-tusb320.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..9875b4d5c356e470208bc3259f5a2182b161f0f0
--- /dev/null
+++ b/Documentation/devicetree/bindings/extcon/extcon-usbc-tusb320.yaml
@@ -0,0 +1,41 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/extcon/extcon-usbc-tusb320.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: TI TUSB320 USB Type-C CC Logic controller
+
+maintainers:
+  - Michael Auchter <michael.auchter@ni.com>
+
+properties:
+  compatible:
+    const: ti,tusb320
+
+  reg:
+    maxItems: 1
+
+  interrupts:
+    maxItems: 1
+
+required:
+  - compatible
+  - reg
+  - interrupts
+
+additionalProperties: false
+
+examples:
+  - |
+    i2c0 {
+        #address-cells = <1>;
+        #size-cells = <0>;
+        tusb320@61 {
+            compatible = "ti,tusb320";
+            reg = <0x61>;
+            interrupt-parent = <&gpio>;
+            interrupts = <27 1>;
+        };
+    };
+...
diff --git a/Documentation/devicetree/bindings/extcon/fcs,fsa880.yaml b/Documentation/devicetree/bindings/extcon/fcs,fsa880.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..ef6a246a1337816771b6d1f5c6e53b4822685067
--- /dev/null
+++ b/Documentation/devicetree/bindings/extcon/fcs,fsa880.yaml
@@ -0,0 +1,52 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/extcon/fcs,fsa880.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Fairchild Semiconductor FSA880, FSA9480 and compatibles
+
+maintainers:
+  - Linus Walleij <linus.walleij@linaro.org>
+
+description:
+  The FSA880 and FSA9480 are USB port accessory detectors and switches.
+  The switch is fully controlled using I2C and enables USB data, stereo
+  and mono audio, video, microphone, and UART data to use a common
+  connector port. Compatible switches exist from other manufacturers.
+
+properties:
+  compatible:
+    enum:
+      - fcs,fsa880
+      - fcs,fsa9480
+      - ti,tsu6111
+
+  reg:
+    maxItems: 1
+    description: The I2C address for an FSA880 compatible device is
+      usually 0x25.
+
+  interrupts:
+    maxItems: 1
+
+required:
+  - compatible
+  - reg
+  - interrupts
+
+additionalProperties: false
+
+examples:
+  - |
+    #include <dt-bindings/interrupt-controller/irq.h>
+    i2c {
+        #address-cells = <1>;
+        #size-cells = <0>;
+        usb-switch@25 {
+            compatible = "fcs,fsa880";
+            reg = <0x25>;
+            interrupt-parent = <&gpio>;
+            interrupts = <1 IRQ_TYPE_EDGE_FALLING>;
+        };
+    };
diff --git a/Documentation/devicetree/bindings/nvmem/mtk-efuse.txt b/Documentation/devicetree/bindings/nvmem/mtk-efuse.txt
index 0668c45a156d3324fa41f920870b72057fb9567c..ef93c3b95424d911b64f99d1ecdc14d37097ff16 100644
--- a/Documentation/devicetree/bindings/nvmem/mtk-efuse.txt
+++ b/Documentation/devicetree/bindings/nvmem/mtk-efuse.txt
@@ -7,6 +7,7 @@ Required properties:
 	      "mediatek,mt7622-efuse", "mediatek,efuse": for MT7622
 	      "mediatek,mt7623-efuse", "mediatek,efuse": for MT7623
 	      "mediatek,mt8173-efuse" or "mediatek,efuse": for MT8173
+	      "mediatek,mt8516-efuse", "mediatek,efuse": for MT8516
 - reg: Should contain registers location and length
 
 = Data cells =
diff --git a/Documentation/devicetree/bindings/nvmem/qcom,qfprom.yaml b/Documentation/devicetree/bindings/nvmem/qcom,qfprom.yaml
index 1a18b6bab35e758c80f6a090a6ad62a4c92d7714..992777c90a0bf3803ed7fb2edfe0c4a065353a2f 100644
--- a/Documentation/devicetree/bindings/nvmem/qcom,qfprom.yaml
+++ b/Documentation/devicetree/bindings/nvmem/qcom,qfprom.yaml
@@ -14,7 +14,18 @@ allOf:
 
 properties:
   compatible:
-    const: qcom,qfprom
+    items:
+      - enum:
+          - qcom,apq8064-qfprom
+          - qcom,apq8084-qfprom
+          - qcom,msm8974-qfprom
+          - qcom,msm8916-qfprom
+          - qcom,msm8996-qfprom
+          - qcom,msm8998-qfprom
+          - qcom,qcs404-qfprom
+          - qcom,sc7180-qfprom
+          - qcom,sdm845-qfprom
+      - const: qcom,qfprom
 
   reg:
     # If the QFPROM is read-only OS image then only the corrected region
@@ -60,7 +71,7 @@ examples:
       #size-cells = <2>;
 
       efuse@784000 {
-        compatible = "qcom,qfprom";
+        compatible = "qcom,sc7180-qfprom", "qcom,qfprom";
         reg = <0 0x00784000 0 0x8ff>,
               <0 0x00780000 0 0x7a0>,
               <0 0x00782000 0 0x100>,
@@ -85,7 +96,7 @@ examples:
       #size-cells = <2>;
 
       efuse@784000 {
-        compatible = "qcom,qfprom";
+        compatible = "qcom,sdm845-qfprom", "qcom,qfprom";
         reg = <0 0x00784000 0 0x8ff>;
         #address-cells = <1>;
         #size-cells = <1>;
diff --git a/Documentation/devicetree/bindings/phy/amlogic,axg-mipi-dphy.yaml b/Documentation/devicetree/bindings/phy/amlogic,axg-mipi-dphy.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..be485f5008870d81b3055d32a373bf5dc94e1725
--- /dev/null
+++ b/Documentation/devicetree/bindings/phy/amlogic,axg-mipi-dphy.yaml
@@ -0,0 +1,70 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+# Copyright 2020 BayLibre, SAS
+%YAML 1.2
+---
+$id: "http://devicetree.org/schemas/phy/amlogic,axg-mipi-dphy.yaml#"
+$schema: "http://devicetree.org/meta-schemas/core.yaml#"
+
+title: Amlogic AXG MIPI D-PHY
+
+maintainers:
+  - Neil Armstrong <narmstrong@baylibre.com>
+
+properties:
+  compatible:
+    enum:
+      - amlogic,axg-mipi-dphy
+
+  reg:
+    maxItems: 1
+
+  clocks:
+    maxItems: 1
+
+  clock-names:
+    items:
+      - const: pclk
+
+  resets:
+    maxItems: 1
+
+  reset-names:
+    items:
+      - const: phy
+
+  "#phy-cells":
+    const: 0
+
+  phys:
+    maxItems: 1
+
+  phy-names:
+    items:
+      - const: analog
+
+required:
+  - compatible
+  - reg
+  - clocks
+  - clock-names
+  - resets
+  - reset-names
+  - phys
+  - phy-names
+  - "#phy-cells"
+
+additionalProperties: false
+
+examples:
+  - |
+    phy@ff640000 {
+            compatible = "amlogic,axg-mipi-dphy";
+            reg = <0xff640000 0x100>;
+            clocks = <&clk_mipi_dsi_phy>;
+            clock-names = "pclk";
+            resets = <&reset_phy>;
+            reset-names = "phy";
+            phys = <&mipi_pcie_analog_dphy>;
+            phy-names = "analog";
+            #phy-cells = <0>;
+    };
diff --git a/Documentation/devicetree/bindings/phy/amlogic,meson-axg-mipi-pcie-analog.yaml b/Documentation/devicetree/bindings/phy/amlogic,meson-axg-mipi-pcie-analog.yaml
index 18c1ec5e19ad8de9624a8772bedcbbb9bf096123..4d01f3124e1c95d8665a32b0fd0877a7ef8014b7 100644
--- a/Documentation/devicetree/bindings/phy/amlogic,meson-axg-mipi-pcie-analog.yaml
+++ b/Documentation/devicetree/bindings/phy/amlogic,meson-axg-mipi-pcie-analog.yaml
@@ -9,27 +9,32 @@ title: Amlogic AXG shared MIPI/PCIE analog PHY
 maintainers:
   - Remi Pommarel <repk@triplefau.lt>
 
+description: |+
+  The Everything-Else Power Domains node should be the child of a syscon
+  node with the required property:
+
+  - compatible: Should be the following:
+                "amlogic,meson-gx-hhi-sysctrl", "simple-mfd", "syscon"
+
+  Refer to the the bindings described in
+  Documentation/devicetree/bindings/mfd/syscon.yaml
+
 properties:
   compatible:
     const: amlogic,axg-mipi-pcie-analog-phy
 
-  reg:
-    maxItems: 1
-
   "#phy-cells":
-    const: 1
+    const: 0
 
 required:
   - compatible
-  - reg
   - "#phy-cells"
 
 additionalProperties: false
 
 examples:
   - |
-    mpphy: phy@0 {
+    mpphy: phy {
           compatible = "amlogic,axg-mipi-pcie-analog-phy";
-          reg = <0x0 0xc>;
-          #phy-cells = <1>;
+          #phy-cells = <0>;
     };
diff --git a/Documentation/devicetree/bindings/phy/brcm,sata-phy.yaml b/Documentation/devicetree/bindings/phy/brcm,sata-phy.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..58c3ef8004ad83d0b3d6f70b61730681344ccfe1
--- /dev/null
+++ b/Documentation/devicetree/bindings/phy/brcm,sata-phy.yaml
@@ -0,0 +1,148 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: "http://devicetree.org/schemas/phy/brcm,sata-phy.yaml#"
+$schema: "http://devicetree.org/meta-schemas/core.yaml#"
+
+title: Broadcom SATA3 PHY
+
+maintainers:
+  - Florian Fainelli <f.fainelli@gmail.com>
+
+properties:
+  $nodename:
+    pattern: "^sata[-|_]phy(@.*)?$"
+
+  compatible:
+    oneOf:
+      - items:
+          - enum:
+              - brcm,bcm7216-sata-phy
+              - brcm,bcm7425-sata-phy
+              - brcm,bcm7445-sata-phy
+              - brcm,bcm63138-sata-phy
+          - const: brcm,phy-sata3
+      - items:
+          - const: brcm,iproc-nsp-sata-phy
+      - items:
+          - const: brcm,iproc-ns2-sata-phy
+      - items:
+          - const: brcm,iproc-sr-sata-phy
+
+  reg:
+    minItems: 1
+    maxItems: 2
+
+  reg-names:
+    minItems: 1
+    maxItems: 2
+    items:
+      - const: phy
+      - const: phy-ctrl
+
+  "#address-cells":
+    const: 1
+
+  "#size-cells":
+    const: 0
+
+patternProperties:
+  "^sata-phy@[0-9]+$":
+    type: object
+    description: |
+      Each port's PHY should be represented as a sub-node.
+
+    properties:
+      reg:
+        description: The SATA PHY port number
+        maxItems: 1
+
+      "#phy-cells":
+        const: 0
+
+      "brcm,enable-ssc":
+        $ref: /schemas/types.yaml#/definitions/flag
+        description: |
+          Use spread spectrum clocking (SSC) on this port
+          This property is not applicable for "brcm,iproc-ns2-sata-phy",
+          "brcm,iproc-nsp-sata-phy" and "brcm,iproc-sr-sata-phy".
+
+      "brcm,rxaeq-mode":
+        $ref: /schemas/types.yaml#/definitions/string
+        description:
+          String that indicates the desired RX equalizer mode.
+        enum:
+          - off
+          - auto
+          - manual
+
+      "brcm,rxaeq-value":
+        $ref: /schemas/types.yaml#/definitions/uint32
+        description: |
+            When 'brcm,rxaeq-mode' is set to "manual", provides the RX
+            equalizer value that should be used.
+        minimum: 0
+        maximum: 63
+
+      "brcm,tx-amplitude-millivolt":
+        description: |
+            Transmit amplitude voltage in millivolt.
+        $ref: /schemas/types.yaml#/definitions/uint32
+        enum: [400, 500, 600, 800]
+
+    required:
+      - reg
+      - "#phy-cells"
+
+    additionalProperties: false
+
+if:
+  properties:
+    compatible:
+      items:
+        const: brcm,iproc-ns2-sata-phy
+then:
+  properties:
+    reg:
+      maxItems: 2
+    reg-names:
+      items:
+        - const: "phy"
+        - const: "phy-ctrl"
+else:
+  properties:
+    reg:
+      maxItems: 1
+    reg-names:
+      maxItems: 1
+      items:
+        - const: "phy"
+
+required:
+  - compatible
+  - "#address-cells"
+  - "#size-cells"
+  - reg
+  - reg-names
+
+additionalProperties: false
+
+examples:
+  - |
+    sata_phy@f0458100 {
+        compatible = "brcm,bcm7445-sata-phy", "brcm,phy-sata3";
+        reg = <0xf0458100 0x1e00>;
+        reg-names = "phy";
+        #address-cells = <1>;
+        #size-cells = <0>;
+
+        sata-phy@0 {
+                reg = <0>;
+                #phy-cells = <0>;
+        };
+
+        sata-phy@1 {
+                reg = <1>;
+                #phy-cells = <0>;
+        };
+    };
diff --git a/Documentation/devicetree/bindings/phy/brcm-sata-phy.txt b/Documentation/devicetree/bindings/phy/brcm-sata-phy.txt
deleted file mode 100644
index c03ad2198410435e822b0f8389d9e7c1b0c0ab33..0000000000000000000000000000000000000000
--- a/Documentation/devicetree/bindings/phy/brcm-sata-phy.txt
+++ /dev/null
@@ -1,58 +0,0 @@
-* Broadcom SATA3 PHY
-
-Required properties:
-- compatible: should be one or more of
-     "brcm,bcm7216-sata-phy"
-     "brcm,bcm7425-sata-phy"
-     "brcm,bcm7445-sata-phy"
-     "brcm,iproc-ns2-sata-phy"
-     "brcm,iproc-nsp-sata-phy"
-     "brcm,phy-sata3"
-     "brcm,iproc-sr-sata-phy"
-     "brcm,bcm63138-sata-phy"
-- address-cells: should be 1
-- size-cells: should be 0
-- reg: register ranges for the PHY PCB interface
-- reg-names: should be "phy" and "phy-ctrl"
-     The "phy-ctrl" registers are only required for
-     "brcm,iproc-ns2-sata-phy" and "brcm,iproc-sr-sata-phy".
-
-Sub-nodes:
-  Each port's PHY should be represented as a sub-node.
-
-Sub-nodes required properties:
-- reg: the PHY number
-- phy-cells: generic PHY binding; must be 0
-
-Sub-nodes optional properties:
-- brcm,enable-ssc: use spread spectrum clocking (SSC) on this port
-     This property is not applicable for "brcm,iproc-ns2-sata-phy",
-     "brcm,iproc-nsp-sata-phy" and "brcm,iproc-sr-sata-phy".
-
-- brcm,rxaeq-mode: string that indicates the desired RX equalizer
-  mode, possible values are:
-	"off" (equivalent to not specifying the property)
-	"auto"
-	"manual" (brcm,rxaeq-value is used in that case)
-
-- brcm,rxaeq-value: when 'rxaeq-mode' is set to "manual", provides the RX
-  equalizer value that should be used. Allowed range is 0..63.
-
-Example
-	sata-phy@f0458100 {
-		compatible = "brcm,bcm7445-sata-phy", "brcm,phy-sata3";
-		reg = <0xf0458100 0x1e00>, <0xf045804c 0x10>;
-		reg-names = "phy";
-		#address-cells = <1>;
-		#size-cells = <0>;
-
-		sata-phy@0 {
-			reg = <0>;
-			#phy-cells = <0>;
-		};
-
-		sata-phy@1 {
-			reg = <1>;
-			#phy-cells = <0>;
-		};
-	};
diff --git a/Documentation/devicetree/bindings/usb/ingenic,jz4770-phy.yaml b/Documentation/devicetree/bindings/phy/ingenic,phy-usb.yaml
similarity index 89%
rename from Documentation/devicetree/bindings/usb/ingenic,jz4770-phy.yaml
rename to Documentation/devicetree/bindings/phy/ingenic,phy-usb.yaml
index 2d61166ea5cf73807f7ea1f02777b283ebbee4e2..0fd93d71fe5a935de96cb6a3581fb2ba161f2613 100644
--- a/Documentation/devicetree/bindings/usb/ingenic,jz4770-phy.yaml
+++ b/Documentation/devicetree/bindings/phy/ingenic,phy-usb.yaml
@@ -1,7 +1,7 @@
 # SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
 %YAML 1.2
 ---
-$id: http://devicetree.org/schemas/usb/ingenic,jz4770-phy.yaml#
+$id: http://devicetree.org/schemas/phy/ingenic,phy-usb.yaml#
 $schema: http://devicetree.org/meta-schemas/core.yaml#
 
 title: Ingenic SoCs USB PHY devicetree bindings
@@ -17,9 +17,11 @@ properties:
   compatible:
     enum:
       - ingenic,jz4770-phy
+      - ingenic,jz4775-phy
       - ingenic,jz4780-phy
       - ingenic,x1000-phy
       - ingenic,x1830-phy
+      - ingenic,x2000-phy
 
   reg:
     maxItems: 1
diff --git a/Documentation/devicetree/bindings/phy/intel,phy-keembay-usb.yaml b/Documentation/devicetree/bindings/phy/intel,phy-keembay-usb.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..a217bb8ac5bc0887a88528d82a3c8a27bcbd70a7
--- /dev/null
+++ b/Documentation/devicetree/bindings/phy/intel,phy-keembay-usb.yaml
@@ -0,0 +1,44 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/phy/intel,phy-keembay-usb.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Intel Keem Bay USB PHY bindings
+
+maintainers:
+  - Wan Ahmad Zainie <wan.ahmad.zainie.wan.mohamad@intel.com>
+
+properties:
+  compatible:
+    const: intel,keembay-usb-phy
+
+  reg:
+    items:
+      - description: USB APB CPR (clock, power, reset) register
+      - description: USB APB slave register
+
+  reg-names:
+    items:
+      - const: cpr-apb-base
+      - const: slv-apb-base
+
+  '#phy-cells':
+    const: 0
+
+required:
+  - compatible
+  - reg
+  - '#phy-cells'
+
+additionalProperties: false
+
+examples:
+  - |
+    usb-phy@20400000 {
+          compatible = "intel,keembay-usb-phy";
+          reg = <0x20400000 0x1c>,
+                <0x20480000 0xd0>;
+          reg-names = "cpr-apb-base", "slv-apb-base";
+          #phy-cells = <0>;
+    };
diff --git a/Documentation/devicetree/bindings/phy/marvell,mmp3-hsic-phy.yaml b/Documentation/devicetree/bindings/phy/marvell,mmp3-hsic-phy.yaml
index 00609ace677c9a7ebd87dc1bb2eadda9de44f80b..ff255aa4cc1033cb5e9e2f3702af6b020ad57b29 100644
--- a/Documentation/devicetree/bindings/phy/marvell,mmp3-hsic-phy.yaml
+++ b/Documentation/devicetree/bindings/phy/marvell,mmp3-hsic-phy.yaml
@@ -1,4 +1,4 @@
-# SPDX-License-Identifier: GPL-2.0-or-later
+# SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-2-Clause)
 # Copyright 2019 Lubomir Rintel <lkundrak@v3.sk>
 %YAML 1.2
 ---
@@ -18,27 +18,20 @@ properties:
     maxItems: 1
     description: base address of the device
 
-  reset-gpios:
-    maxItems: 1
-    description: GPIO connected to reset
-
   "#phy-cells":
     const: 0
 
 required:
   - compatible
   - reg
-  - reset-gpios
   - "#phy-cells"
 
 additionalProperties: false
 
 examples:
   - |
-    #include <dt-bindings/gpio/gpio.h>
     hsic-phy@f0001800 {
             compatible = "marvell,mmp3-hsic-phy";
             reg = <0xf0001800 0x40>;
-            reset-gpios = <&gpio 63 GPIO_ACTIVE_HIGH>;
             #phy-cells = <0>;
     };
diff --git a/drivers/staging/mt7621-pci-phy/mediatek,mt7621-pci-phy.yaml b/Documentation/devicetree/bindings/phy/mediatek,mt7621-pci-phy.yaml
similarity index 92%
rename from drivers/staging/mt7621-pci-phy/mediatek,mt7621-pci-phy.yaml
rename to Documentation/devicetree/bindings/phy/mediatek,mt7621-pci-phy.yaml
index cf32bbc45b5d9f84f9ccc4d0328d79a26d5ff3a4..0ccaded3f245c4b67c3be2243b544d1be9312046 100644
--- a/drivers/staging/mt7621-pci-phy/mediatek,mt7621-pci-phy.yaml
+++ b/Documentation/devicetree/bindings/phy/mediatek,mt7621-pci-phy.yaml
@@ -1,4 +1,4 @@
-# SPDX-License-Identifier: GPL-2.0
+# SPDX-License-Identifier: (GPL-2.0-only or BSD-2-Clause)
 %YAML 1.2
 ---
 $id: "http://devicetree.org/schemas/phy/mediatek,mt7621-pci-phy.yaml#"
diff --git a/Documentation/devicetree/bindings/phy/phy-cadence-sierra.txt b/Documentation/devicetree/bindings/phy/phy-cadence-sierra.txt
deleted file mode 100644
index 03f5939d3d19344415e6eaf00a069a46ed3c53cf..0000000000000000000000000000000000000000
--- a/Documentation/devicetree/bindings/phy/phy-cadence-sierra.txt
+++ /dev/null
@@ -1,70 +0,0 @@
-Cadence Sierra PHY
------------------------
-
-Required properties:
-- compatible:	Must be "cdns,sierra-phy-t0" for Sierra in Cadence platform
-		Must be "ti,sierra-phy-t0" for Sierra in TI's J721E SoC.
-- resets:	Must contain an entry for each in reset-names.
-		See ../reset/reset.txt for details.
-- reset-names:	Must include "sierra_reset" and "sierra_apb".
-		"sierra_reset" must control the reset line to the PHY.
-		"sierra_apb" must control the reset line to the APB PHY
-		interface ("sierra_apb" is optional).
-- reg:		register range for the PHY.
-- #address-cells: Must be 1
-- #size-cells:	Must be 0
-
-Optional properties:
-- clocks:		Must contain an entry in clock-names.
-			See ../clocks/clock-bindings.txt for details.
-- clock-names:		Must contain "cmn_refclk_dig_div" and
-			"cmn_refclk1_dig_div" for configuring the frequency of
-			the clock to the lanes. "phy_clk" is deprecated.
-- cdns,autoconf:	A boolean property whose presence indicates that the
-			PHY registers will be configured by hardware. If not
-			present, all sub-node optional properties must be
-			provided.
-
-Sub-nodes:
-  Each group of PHY lanes with a single master lane should be represented as
-  a sub-node. Note that the actual configuration of each lane is determined by
-  hardware strapping, and must match the configuration specified here.
-
-Sub-node required properties:
-- #phy-cells:	Generic PHY binding; must be 0.
-- reg:		The master lane number.  This is the lowest numbered lane
-		in the lane group.
-- resets:	Must contain one entry which controls the reset line for the
-		master lane of the sub-node.
-		See ../reset/reset.txt for details.
-
-Sub-node optional properties:
-- cdns,num-lanes:	Number of lanes in this group.  From 1 to 4.  The
-			group is made up of consecutive lanes.
-- cdns,phy-type:	Can be PHY_TYPE_PCIE or PHY_TYPE_USB3, depending on
-			configuration of lanes.
-
-Example:
-	pcie_phy4: pcie-phy@fd240000 {
-		compatible = "cdns,sierra-phy-t0";
-		reg = <0x0 0xfd240000 0x0 0x40000>;
-		resets = <&phyrst 0>, <&phyrst 1>;
-		reset-names = "sierra_reset", "sierra_apb";
-		clocks = <&phyclock>;
-		clock-names = "phy_clk";
-		#address-cells = <1>;
-		#size-cells = <0>;
-		pcie0_phy0: pcie-phy@0 {
-				reg = <0>;
-				resets = <&phyrst 2>;
-				cdns,num-lanes = <2>;
-				#phy-cells = <0>;
-				cdns,phy-type = <PHY_TYPE_PCIE>;
-		};
-		pcie0_phy1: pcie-phy@2 {
-				reg = <2>;
-				resets = <&phyrst 4>;
-				cdns,num-lanes = <1>;
-				#phy-cells = <0>;
-				cdns,phy-type = <PHY_TYPE_PCIE>;
-		};
diff --git a/Documentation/devicetree/bindings/phy/phy-cadence-sierra.yaml b/Documentation/devicetree/bindings/phy/phy-cadence-sierra.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..d210843863df084b646b5f07224117c7086562fd
--- /dev/null
+++ b/Documentation/devicetree/bindings/phy/phy-cadence-sierra.yaml
@@ -0,0 +1,152 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: "http://devicetree.org/schemas/phy/phy-cadence-sierra.yaml#"
+$schema: "http://devicetree.org/meta-schemas/core.yaml#"
+
+title: Cadence Sierra PHY binding
+
+description:
+  This binding describes the Cadence Sierra PHY. Sierra PHY supports multilink
+  multiprotocol combinations including protocols such as PCIe, USB etc.
+
+maintainers:
+  - Swapnil Jakhade <sjakhade@cadence.com>
+  - Yuti Amonkar <yamonkar@cadence.com>
+
+properties:
+  compatible:
+    enum:
+      - cdns,sierra-phy-t0
+      - ti,sierra-phy-t0
+
+  '#address-cells':
+    const: 1
+
+  '#size-cells':
+    const: 0
+
+  resets:
+    minItems: 1
+    maxItems: 2
+    items:
+      - description: Sierra PHY reset.
+      - description: Sierra APB reset. This is optional.
+
+  reset-names:
+    minItems: 1
+    maxItems: 2
+    items:
+      - const: sierra_reset
+      - const: sierra_apb
+
+  reg:
+    maxItems: 1
+    description:
+      Offset of the Sierra PHY configuration registers.
+
+  reg-names:
+    const: serdes
+
+  clocks:
+    maxItems: 2
+
+  clock-names:
+    items:
+      - const: cmn_refclk_dig_div
+      - const: cmn_refclk1_dig_div
+
+  cdns,autoconf:
+    type: boolean
+    description:
+      A boolean property whose presence indicates that the PHY registers will be
+      configured by hardware. If not present, all sub-node optional properties
+      must be provided.
+
+patternProperties:
+  '^phy@[0-9a-f]$':
+    type: object
+    description:
+      Each group of PHY lanes with a single master lane should be represented as
+      a sub-node. Note that the actual configuration of each lane is determined
+      by hardware strapping, and must match the configuration specified here.
+    properties:
+      reg:
+        description:
+          The master lane number. This is the lowest numbered lane in the lane group.
+        minimum: 0
+        maximum: 15
+
+      resets:
+        minItems: 1
+        maxItems: 4
+        description:
+          Contains list of resets, one per lane, to get all the link lanes out of reset.
+
+      "#phy-cells":
+        const: 0
+
+      cdns,phy-type:
+        description:
+          Specifies the type of PHY for which the group of PHY lanes is used.
+          Refer include/dt-bindings/phy/phy.h. Constants from the header should be used.
+        $ref: /schemas/types.yaml#/definitions/uint32
+        enum: [2, 4]
+
+      cdns,num-lanes:
+        description:
+          Number of lanes in this group. The group is made up of consecutive lanes.
+        $ref: /schemas/types.yaml#/definitions/uint32
+        minimum: 1
+        maximum: 16
+
+    required:
+      - reg
+      - resets
+      - "#phy-cells"
+
+    additionalProperties: false
+
+required:
+  - compatible
+  - "#address-cells"
+  - "#size-cells"
+  - reg
+  - resets
+  - reset-names
+
+additionalProperties: false
+
+examples:
+  - |
+    #include <dt-bindings/phy/phy.h>
+
+    bus {
+        #address-cells = <2>;
+        #size-cells = <2>;
+
+        sierra-phy@fd240000 {
+            compatible = "cdns,sierra-phy-t0";
+            reg = <0x0 0xfd240000 0x0 0x40000>;
+            resets = <&phyrst 0>, <&phyrst 1>;
+            reset-names = "sierra_reset", "sierra_apb";
+            clocks = <&cmn_refclk_dig_div>, <&cmn_refclk1_dig_div>;
+            clock-names = "cmn_refclk_dig_div", "cmn_refclk1_dig_div";
+            #address-cells = <1>;
+            #size-cells = <0>;
+            pcie0_phy0: phy@0 {
+                reg = <0>;
+                resets = <&phyrst 2>;
+                cdns,num-lanes = <2>;
+                #phy-cells = <0>;
+                cdns,phy-type = <PHY_TYPE_PCIE>;
+            };
+            pcie0_phy1: phy@2 {
+                reg = <2>;
+                resets = <&phyrst 4>;
+                cdns,num-lanes = <1>;
+                #phy-cells = <0>;
+                cdns,phy-type = <PHY_TYPE_PCIE>;
+            };
+        };
+    };
diff --git a/Documentation/devicetree/bindings/phy/phy-stm32-usbphyc.txt b/Documentation/devicetree/bindings/phy/phy-stm32-usbphyc.txt
deleted file mode 100644
index 725ae71ae6535a0bdb8ccbf2ed83e168d3002d56..0000000000000000000000000000000000000000
--- a/Documentation/devicetree/bindings/phy/phy-stm32-usbphyc.txt
+++ /dev/null
@@ -1,73 +0,0 @@
-STMicroelectronics STM32 USB HS PHY controller
-
-The STM32 USBPHYC block contains a dual port High Speed UTMI+ PHY and a UTMI
-switch. It controls PHY configuration and status, and the UTMI+ switch that
-selects either OTG or HOST controller for the second PHY port. It also sets
-PLL configuration.
-
-USBPHYC
-      |_ PLL
-      |
-      |_ PHY port#1 _________________ HOST controller
-      |                    _                 |
-      |                  / 1|________________|
-      |_ PHY port#2 ----|   |________________
-      |                  \_0|                |
-      |_ UTMI switch_______|          OTG controller
-
-
-Phy provider node
-=================
-
-Required properties:
-- compatible: must be "st,stm32mp1-usbphyc"
-- reg: address and length of the usb phy control register set
-- clocks: phandle + clock specifier for the PLL phy clock
-- #address-cells: number of address cells for phys sub-nodes, must be <1>
-- #size-cells: number of size cells for phys sub-nodes, must be <0>
-
-Optional properties:
-- assigned-clocks: phandle + clock specifier for the PLL phy clock
-- assigned-clock-parents: the PLL phy clock parent
-- resets: phandle + reset specifier
-
-Required nodes: one sub-node per port the controller provides.
-
-Phy sub-nodes
-==============
-
-Required properties:
-- reg: phy port index
-- phy-supply: phandle to the regulator providing 3V3 power to the PHY,
-	      see phy-bindings.txt in the same directory.
-- vdda1v1-supply: phandle to the regulator providing 1V1 power to the PHY
-- vdda1v8-supply: phandle to the regulator providing 1V8 power to the PHY
-- #phy-cells: see phy-bindings.txt in the same directory, must be <0> for PHY
-  port#1 and must be <1> for PHY port#2, to select USB controller
-
-
-Example:
-		usbphyc: usb-phy@5a006000 {
-			compatible = "st,stm32mp1-usbphyc";
-			reg = <0x5a006000 0x1000>;
-			clocks = <&rcc_clk USBPHY_K>;
-			resets = <&rcc_rst USBPHY_R>;
-			#address-cells = <1>;
-			#size-cells = <0>;
-
-			usbphyc_port0: usb-phy@0 {
-				reg = <0>;
-				phy-supply = <&vdd_usb>;
-				vdda1v1-supply = <&reg11>;
-				vdda1v8-supply = <&reg18>
-				#phy-cells = <0>;
-			};
-
-			usbphyc_port1: usb-phy@1 {
-				reg = <1>;
-				phy-supply = <&vdd_usb>;
-				vdda1v1-supply = <&reg11>;
-				vdda1v8-supply = <&reg18>
-				#phy-cells = <1>;
-			};
-		};
diff --git a/Documentation/devicetree/bindings/phy/phy-stm32-usbphyc.yaml b/Documentation/devicetree/bindings/phy/phy-stm32-usbphyc.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..0ba61979b970d700f66887f19306432c8c797f69
--- /dev/null
+++ b/Documentation/devicetree/bindings/phy/phy-stm32-usbphyc.yaml
@@ -0,0 +1,138 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/phy/phy-stm32-usbphyc.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: STMicroelectronics STM32 USB HS PHY controller binding
+
+description:
+
+  The STM32 USBPHYC block contains a dual port High Speed UTMI+ PHY and a UTMI
+  switch. It controls PHY configuration and status, and the UTMI+ switch that
+  selects either OTG or HOST controller for the second PHY port. It also sets
+  PLL configuration.
+
+  USBPHYC
+  |_ PLL
+  |
+  |_ PHY port#1 _________________ HOST controller
+  |                   __                 |
+  |                  / 1|________________|
+  |_ PHY port#2 ----|   |________________
+  |                  \_0|                |
+  |_ UTMI switch_______|          OTG controller
+
+maintainers:
+  - Amelie Delaunay <amelie.delaunay@st.com>
+
+properties:
+  compatible:
+    const: st,stm32mp1-usbphyc
+
+  reg:
+    maxItems: 1
+
+  clocks:
+    maxItems: 1
+
+  resets:
+    maxItems: 1
+
+  "#address-cells":
+    const: 1
+
+  "#size-cells":
+    const: 0
+
+#Required child nodes:
+
+patternProperties:
+  "^usb-phy@[0|1]$":
+    type: object
+    description:
+      Each port the controller provides must be represented as a sub-node.
+
+    properties:
+      reg:
+        description: phy port index.
+        maxItems: 1
+
+      phy-supply:
+        description: regulator providing 3V3 power supply to the PHY.
+
+      vdda1v1-supply:
+        description: regulator providing 1V1 power supply to the PLL block
+
+      vdda1v8-supply:
+        description: regulator providing 1V8 power supply to the PLL block
+
+      "#phy-cells":
+        enum: [ 0x0, 0x1 ]
+
+    allOf:
+      - if:
+          properties:
+            reg:
+              const: 0
+        then:
+          properties:
+            "#phy-cells":
+              const: 0
+        else:
+          properties:
+            "#phy-cells":
+              const: 1
+              description:
+                The value is used to select UTMI switch output.
+                0 for OTG controller and 1 for Host controller.
+
+    required:
+      - reg
+      - phy-supply
+      - vdda1v1-supply
+      - vdda1v8-supply
+      - "#phy-cells"
+
+    additionalProperties: false
+
+required:
+  - compatible
+  - reg
+  - clocks
+  - "#address-cells"
+  - "#size-cells"
+  - usb-phy@0
+  - usb-phy@1
+
+additionalProperties: false
+
+examples:
+  - |
+    #include <dt-bindings/clock/stm32mp1-clks.h>
+    #include <dt-bindings/reset/stm32mp1-resets.h>
+    usbphyc: usbphyc@5a006000 {
+        compatible = "st,stm32mp1-usbphyc";
+        reg = <0x5a006000 0x1000>;
+        clocks = <&rcc USBPHY_K>;
+        resets = <&rcc USBPHY_R>;
+        #address-cells = <1>;
+        #size-cells = <0>;
+
+        usbphyc_port0: usb-phy@0 {
+            reg = <0>;
+            phy-supply = <&vdd_usb>;
+            vdda1v1-supply = <&reg11>;
+            vdda1v8-supply = <&reg18>;
+            #phy-cells = <0>;
+        };
+
+        usbphyc_port1: usb-phy@1 {
+            reg = <1>;
+            phy-supply = <&vdd_usb>;
+            vdda1v1-supply = <&reg11>;
+            vdda1v8-supply = <&reg18>;
+            #phy-cells = <1>;
+        };
+    };
+...
diff --git a/Documentation/devicetree/bindings/phy/qcom,qmp-phy.yaml b/Documentation/devicetree/bindings/phy/qcom,qmp-phy.yaml
index 185cdea9cf81a03be6150efca0a5a0f5f812482b..ec05db374645da14030afa99e97ae562b6c98021 100644
--- a/Documentation/devicetree/bindings/phy/qcom,qmp-phy.yaml
+++ b/Documentation/devicetree/bindings/phy/qcom,qmp-phy.yaml
@@ -31,6 +31,9 @@ properties:
       - qcom,sdm845-qmp-usb3-uni-phy
       - qcom,sm8150-qmp-ufs-phy
       - qcom,sm8250-qmp-ufs-phy
+      - qcom,sm8250-qmp-gen3x1-pcie-phy
+      - qcom,sm8250-qmp-gen3x2-pcie-phy
+      - qcom,sm8250-qmp-modem-pcie-phy
 
   reg:
     items:
@@ -259,6 +262,9 @@ allOf:
             enum:
               - qcom,sdm845-qhp-pcie-phy
               - qcom,sdm845-qmp-pcie-phy
+              - qcom,sm8250-qmp-gen3x1-pcie-phy
+              - qcom,sm8250-qmp-gen3x2-pcie-phy
+              - qcom,sm8250-qmp-modem-pcie-phy
     then:
       properties:
         clocks:
diff --git a/Documentation/devicetree/bindings/phy/rockchip-emmc-phy.txt b/Documentation/devicetree/bindings/phy/rockchip-emmc-phy.txt
index e728786f21e0beaa9dcbe25a1a518e2c64d05e59..00aa2d349e55ccbbaec80fd2281fe78f9781b44b 100644
--- a/Documentation/devicetree/bindings/phy/rockchip-emmc-phy.txt
+++ b/Documentation/devicetree/bindings/phy/rockchip-emmc-phy.txt
@@ -16,6 +16,11 @@ Optional properties:
  - drive-impedance-ohm: Specifies the drive impedance in Ohm.
                         Possible values are 33, 40, 50, 66 and 100.
                         If not set, the default value of 50 will be applied.
+ - enable-strobe-pulldown: Enable internal pull-down for the strobe line.
+                           If not set, pull-down is not used.
+ - output-tapdelay-select: Specifies the phyctrl_otapdlysec register.
+                           If not set, the register defaults to 0x4.
+                           Maximum value 0xf.
 
 Example:
 
diff --git a/Documentation/devicetree/bindings/phy/samsung-phy.txt b/Documentation/devicetree/bindings/phy/samsung-phy.txt
index 7510830a79bdcb58588ce02ff64791381879f562..8f51aee91101172cb28a100cc4c4c6f88e4b221a 100644
--- a/Documentation/devicetree/bindings/phy/samsung-phy.txt
+++ b/Documentation/devicetree/bindings/phy/samsung-phy.txt
@@ -47,6 +47,7 @@ Required properties:
 	- "samsung,exynos4210-usb2-phy"
 	- "samsung,exynos4x12-usb2-phy"
 	- "samsung,exynos5250-usb2-phy"
+	- "samsung,exynos5420-usb2-phy"
 	- "samsung,s5pv210-usb2-phy"
 - reg : a list of registers used by phy driver
 	- first and obligatory is the location of phy modules registers
diff --git a/Documentation/w1/slaves/w1_therm.rst b/Documentation/w1/slaves/w1_therm.rst
index e39202e2b0004c29efb7637fed9b5870c0af4321..c3c9ed7a356c926427cdecd9d4944d52c9ee6519 100644
--- a/Documentation/w1/slaves/w1_therm.rst
+++ b/Documentation/w1/slaves/w1_therm.rst
@@ -82,7 +82,7 @@ resolution is read back from the chip and verified.
 
 Note: Changing the resolution reverts the conversion time to default.
 
-The write-only sysfs entry ``eeprom`` is an alternative for EEPROM operations.
+The write-only sysfs entry ``eeprom_cmd`` is an alternative for EEPROM operations.
 Write ``save`` to save device RAM to EEPROM. Write ``restore`` to restore EEPROM
 data in device RAM.
 
diff --git a/MAINTAINERS b/MAINTAINERS
index 2147671b4e53c696a371702eefba33bbd53614c9..a2bee6cf91bc28c2f4a4c8c842537dd2f7dae485 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -7741,9 +7741,9 @@ F:	drivers/clocksource/h8300_*.c
 F:	drivers/irqchip/irq-renesas-h8*.c
 
 HABANALABS PCI DRIVER
-M:	Oded Gabbay <oded.gabbay@gmail.com>
+M:	Oded Gabbay <ogabbay@kernel.org>
 S:	Supported
-T:	git https://github.com/HabanaAI/linux.git
+T:	git https://git.kernel.org/pub/scm/linux/kernel/git/ogabbay/linux.git
 F:	Documentation/ABI/testing/debugfs-driver-habanalabs
 F:	Documentation/ABI/testing/sysfs-driver-habanalabs
 F:	drivers/misc/habanalabs/
@@ -11171,6 +11171,12 @@ S:	Maintained
 F:	Documentation/devicetree/bindings/i2c/i2c-mt7621.txt
 F:	drivers/i2c/busses/i2c-mt7621.c
 
+MEDIATEK MT7621 PHY PCI DRIVER
+M:	Sergio Paracuellos <sergio.paracuellos@gmail.com>
+S:	Maintained
+F:	Documentation/devicetree/bindings/phy/mediatek,mt7621-pci-phy.yaml
+F:	drivers/phy/ralink/phy-mt7621-pci.c
+
 MEDIATEK NAND CONTROLLER DRIVER
 L:	linux-mtd@lists.infradead.org
 S:	Orphan
@@ -14647,6 +14653,14 @@ F:	Documentation/devicetree/bindings/mailbox/qcom-ipcc.yaml
 F:	drivers/mailbox/qcom-ipcc.c
 F:	include/dt-bindings/mailbox/qcom-ipcc.h
 
+QUALCOMM IPQ4019 USB PHY DRIVER
+M:	Robert Marko <robert.marko@sartura.hr>
+M:	Luka Perkov <luka.perkov@sartura.hr>
+L:	linux-arm-msm@vger.kernel.org
+S:	Maintained
+F:	Documentation/devicetree/bindings/phy/qcom-usb-ipq4019-phy.yaml
+F:	drivers/phy/qualcomm/phy-qcom-ipq4019-usb.c
+
 QUALCOMM IPQ4019 VQMMC REGULATOR DRIVER
 M:	Robert Marko <robert.marko@sartura.hr>
 M:	Luka Perkov <luka.perkov@sartura.hr>
@@ -15508,6 +15522,14 @@ L:	linux-fbdev@vger.kernel.org
 S:	Maintained
 F:	drivers/video/fbdev/s3c-fb.c
 
+SAMSUNG INTERCONNECT DRIVERS
+M:	Sylwester Nawrocki <s.nawrocki@samsung.com>
+M:	Artur Świgoń <a.swigon@samsung.com>
+L:	linux-pm@vger.kernel.org
+L:	linux-samsung-soc@vger.kernel.org
+S:	Supported
+F:	drivers/interconnect/samsung/
+
 SAMSUNG LAPTOP DRIVER
 M:	Corentin Chary <corentin.chary@gmail.com>
 L:	platform-driver-x86@vger.kernel.org
@@ -16624,8 +16646,10 @@ F:	Documentation/networking/device_drivers/ethernet/toshiba/spider_net.rst
 F:	drivers/net/ethernet/toshiba/spider_net*
 
 SPMI SUBSYSTEM
-R:	Stephen Boyd <sboyd@kernel.org>
-L:	linux-arm-msm@vger.kernel.org
+M:	Stephen Boyd <sboyd@kernel.org>
+L:	linux-kernel@vger.kernel.org
+S:	Maintained
+T:	git git://git.kernel.org/pub/scm/linux/kernel/git/sboyd/spmi.git
 F:	Documentation/devicetree/bindings/spmi/
 F:	drivers/spmi/
 F:	include/dt-bindings/spmi/spmi.h
diff --git a/drivers/accessibility/speakup/i18n.h b/drivers/accessibility/speakup/i18n.h
index 2ec6e659d02b8e79c5a81208aeb5a7d55c5aacb8..2a607d2632340ac4ff0c3e57ea12b271b463a10c 100644
--- a/drivers/accessibility/speakup/i18n.h
+++ b/drivers/accessibility/speakup/i18n.h
@@ -23,12 +23,15 @@ enum msg_index_t {
 	MSG_OFF = MSG_STATUS_START,
 	MSG_ON,
 	MSG_NO_WINDOW,
+
+	/* These must be ordered the same as enum cursor_track */
 	MSG_CURSOR_MSGS_START,
 	MSG_CURSORING_OFF = MSG_CURSOR_MSGS_START,
 	MSG_CURSORING_ON,
 	MSG_HIGHLIGHT_TRACKING,
 	MSG_READ_WINDOW,
 	MSG_READ_ALL,
+
 	MSG_EDIT_DONE,
 	MSG_WINDOW_ALREADY_SET,
 	MSG_END_BEFORE_START,
@@ -41,11 +44,14 @@ enum msg_index_t {
 	MSG_LEAVING_HELP,
 	MSG_IS_UNASSIGNED,
 	MSG_HELP_INFO,
+
+	/* These must be ordered the same as enum edge */
 	MSG_EDGE_MSGS_START,
 	MSG_EDGE_TOP  = MSG_EDGE_MSGS_START,
 	MSG_EDGE_BOTTOM,
 	MSG_EDGE_LEFT,
 	MSG_EDGE_RIGHT,
+
 	MSG_NUMBER,
 	MSG_SPACE,
 	MSG_START, /* A little confusing, given our convention. */
diff --git a/drivers/accessibility/speakup/main.c b/drivers/accessibility/speakup/main.c
index 48019660a0967922e5c14b8d60f384219ecf4297..428fceaf9d50e9386b8c8ea58920dad0868d055d 100644
--- a/drivers/accessibility/speakup/main.c
+++ b/drivers/accessibility/speakup/main.c
@@ -90,19 +90,18 @@ const u_char spk_key_defaults[] = {
 #include "speakupmap.h"
 };
 
-/* Speakup Cursor Track Variables */
-static int cursor_track = 1, prev_cursor_track = 1;
-
-/* cursor track modes, must be ordered same as cursor_msgs */
-enum {
+/* cursor track modes, must be ordered same as cursor_msgs in enum msg_index_t */
+enum cursor_track {
 	CT_Off = 0,
 	CT_On,
 	CT_Highlight,
 	CT_Window,
-	CT_Max
+	CT_Max,
+	read_all_mode = CT_Max,
 };
 
-#define read_all_mode CT_Max
+/* Speakup Cursor Track Variables */
+static enum cursor_track cursor_track = 1, prev_cursor_track = 1;
 
 static struct tty_struct *tty;
 
@@ -404,15 +403,17 @@ static void say_attributes(struct vc_data *vc)
 	synth_printf("%s\n", spk_msg_get(MSG_COLORS_START + bg));
 }
 
-enum {
-	edge_top = 1,
+/* must be ordered same as edge_msgs in enum msg_index_t */
+enum edge {
+	edge_none = 0,
+	edge_top,
 	edge_bottom,
 	edge_left,
 	edge_right,
 	edge_quiet
 };
 
-static void announce_edge(struct vc_data *vc, int msg_id)
+static void announce_edge(struct vc_data *vc, enum edge msg_id)
 {
 	if (spk_bleeps & 1)
 		bleep(spk_y);
@@ -607,7 +608,8 @@ static void say_prev_word(struct vc_data *vc)
 {
 	u_char temp;
 	u16 ch;
-	u_short edge_said = 0, last_state = 0, state = 0;
+	enum edge edge_said = edge_none;
+	u_short last_state = 0, state = 0;
 
 	spk_parked |= 0x01;
 
@@ -652,7 +654,7 @@ static void say_prev_word(struct vc_data *vc)
 	}
 	if (spk_x == 0 && edge_said == edge_quiet)
 		edge_said = edge_left;
-	if (edge_said > 0 && edge_said < edge_quiet)
+	if (edge_said > edge_none && edge_said < edge_quiet)
 		announce_edge(vc, edge_said);
 	say_word(vc);
 }
@@ -661,7 +663,8 @@ static void say_next_word(struct vc_data *vc)
 {
 	u_char temp;
 	u16 ch;
-	u_short edge_said = 0, last_state = 2, state = 0;
+	enum edge edge_said = edge_none;
+	u_short last_state = 2, state = 0;
 
 	spk_parked |= 0x01;
 	if (spk_x == vc->vc_cols - 1 && spk_y == vc->vc_rows - 1) {
@@ -693,7 +696,7 @@ static void say_next_word(struct vc_data *vc)
 		spk_pos += 2;
 		last_state = state;
 	}
-	if (edge_said > 0)
+	if (edge_said > edge_none)
 		announce_edge(vc, edge_said);
 	say_word(vc);
 }
@@ -1365,31 +1368,30 @@ static void speakup_deallocate(struct vc_data *vc)
 	speakup_console[vc_num] = NULL;
 }
 
+enum read_all_command {
+	RA_NEXT_SENT = KVAL(K_DOWN)+1,
+	RA_PREV_LINE = KVAL(K_LEFT)+1,
+	RA_NEXT_LINE = KVAL(K_RIGHT)+1,
+	RA_PREV_SENT = KVAL(K_UP)+1,
+	RA_DOWN_ARROW,
+	RA_TIMER,
+	RA_FIND_NEXT_SENT,
+	RA_FIND_PREV_SENT,
+};
+
 static u_char is_cursor;
 static u_long old_cursor_pos, old_cursor_x, old_cursor_y;
 static int cursor_con;
 
 static void reset_highlight_buffers(struct vc_data *);
 
-static int read_all_key;
+static enum read_all_command read_all_key;
 
 static int in_keyboard_notifier;
 
-static void start_read_all_timer(struct vc_data *vc, int command);
-
-enum {
-	RA_NOTHING,
-	RA_NEXT_SENT,
-	RA_PREV_LINE,
-	RA_NEXT_LINE,
-	RA_PREV_SENT,
-	RA_DOWN_ARROW,
-	RA_TIMER,
-	RA_FIND_NEXT_SENT,
-	RA_FIND_PREV_SENT,
-};
+static void start_read_all_timer(struct vc_data *vc, enum read_all_command command);
 
-static void kbd_fakekey2(struct vc_data *vc, int command)
+static void kbd_fakekey2(struct vc_data *vc, enum read_all_command command)
 {
 	del_timer(&cursor_timer);
 	speakup_fake_down_arrow();
@@ -1426,7 +1428,7 @@ static void stop_read_all(struct vc_data *vc)
 	spk_do_flush();
 }
 
-static void start_read_all_timer(struct vc_data *vc, int command)
+static void start_read_all_timer(struct vc_data *vc, enum read_all_command command)
 {
 	struct var_t *cursor_timeout;
 
@@ -1437,7 +1439,7 @@ static void start_read_all_timer(struct vc_data *vc, int command)
 		  jiffies + msecs_to_jiffies(cursor_timeout->u.n.value));
 }
 
-static void handle_cursor_read_all(struct vc_data *vc, int command)
+static void handle_cursor_read_all(struct vc_data *vc, enum read_all_command command)
 {
 	int indcount, sentcount, rv, sn;
 
diff --git a/drivers/accessibility/speakup/speakup_dectlk.c b/drivers/accessibility/speakup/speakup_dectlk.c
index 780214b5ca16ea98e2809f1a0a6398604c62dea3..ab6d61e80b1cbac8d66951190a89d5f6c99ee7f7 100644
--- a/drivers/accessibility/speakup/speakup_dectlk.c
+++ b/drivers/accessibility/speakup/speakup_dectlk.c
@@ -37,7 +37,7 @@ static unsigned char get_index(struct spk_synth *synth);
 static int in_escape;
 static int is_flushing;
 
-static spinlock_t flush_lock;
+static DEFINE_SPINLOCK(flush_lock);
 static DECLARE_WAIT_QUEUE_HEAD(flush);
 
 static struct var_t vars[] = {
diff --git a/drivers/accessibility/speakup/speakup_dummy.c b/drivers/accessibility/speakup/speakup_dummy.c
index e393438af81bd652aeb3ffee58a5a2f2e8d5dd5d..63c2f29432823ce9cbcb28ef2e019986c83857c8 100644
--- a/drivers/accessibility/speakup/speakup_dummy.c
+++ b/drivers/accessibility/speakup/speakup_dummy.c
@@ -80,6 +80,11 @@ static struct attribute *synth_attrs[] = {
 	NULL,	/* need to NULL terminate the list of attributes */
 };
 
+static void read_buff_add(u_char c)
+{
+	pr_info("speakup_dummy: got character %02x\n", c);
+}
+
 static struct spk_synth synth_dummy = {
 	.name = "dummy",
 	.version = DRV_VERSION,
@@ -103,7 +108,7 @@ static struct spk_synth synth_dummy = {
 	.flush = spk_synth_flush,
 	.is_alive = spk_synth_is_alive_restart,
 	.synth_adjust = NULL,
-	.read_buff_add = NULL,
+	.read_buff_add = read_buff_add,
 	.get_index = NULL,
 	.indexing = {
 		.command = NULL,
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index b5117576792bc6ac6cc06da69d2cf6f43c95261f..1338209f9f865ecb870f3a19fc9b0d9c45b2d1a0 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -68,11 +68,9 @@
 #include <linux/sizes.h>
 
 #include <uapi/linux/android/binder.h>
-#include <uapi/linux/android/binderfs.h>
 
 #include <asm/cacheflush.h>
 
-#include "binder_alloc.h"
 #include "binder_internal.h"
 #include "binder_trace.h"
 
@@ -160,24 +158,6 @@ module_param_call(stop_on_user_error, binder_set_stop_on_user_error,
 #define to_binder_fd_array_object(hdr) \
 	container_of(hdr, struct binder_fd_array_object, hdr)
 
-enum binder_stat_types {
-	BINDER_STAT_PROC,
-	BINDER_STAT_THREAD,
-	BINDER_STAT_NODE,
-	BINDER_STAT_REF,
-	BINDER_STAT_DEATH,
-	BINDER_STAT_TRANSACTION,
-	BINDER_STAT_TRANSACTION_COMPLETE,
-	BINDER_STAT_COUNT
-};
-
-struct binder_stats {
-	atomic_t br[_IOC_NR(BR_FAILED_REPLY) + 1];
-	atomic_t bc[_IOC_NR(BC_REPLY_SG) + 1];
-	atomic_t obj_created[BINDER_STAT_COUNT];
-	atomic_t obj_deleted[BINDER_STAT_COUNT];
-};
-
 static struct binder_stats binder_stats;
 
 static inline void binder_stats_deleted(enum binder_stat_types type)
@@ -213,278 +193,11 @@ static struct binder_transaction_log_entry *binder_transaction_log_add(
 	return e;
 }
 
-/**
- * struct binder_work - work enqueued on a worklist
- * @entry:             node enqueued on list
- * @type:              type of work to be performed
- *
- * There are separate work lists for proc, thread, and node (async).
- */
-struct binder_work {
-	struct list_head entry;
-
-	enum binder_work_type {
-		BINDER_WORK_TRANSACTION = 1,
-		BINDER_WORK_TRANSACTION_COMPLETE,
-		BINDER_WORK_RETURN_ERROR,
-		BINDER_WORK_NODE,
-		BINDER_WORK_DEAD_BINDER,
-		BINDER_WORK_DEAD_BINDER_AND_CLEAR,
-		BINDER_WORK_CLEAR_DEATH_NOTIFICATION,
-	} type;
-};
-
-struct binder_error {
-	struct binder_work work;
-	uint32_t cmd;
-};
-
-/**
- * struct binder_node - binder node bookkeeping
- * @debug_id:             unique ID for debugging
- *                        (invariant after initialized)
- * @lock:                 lock for node fields
- * @work:                 worklist element for node work
- *                        (protected by @proc->inner_lock)
- * @rb_node:              element for proc->nodes tree
- *                        (protected by @proc->inner_lock)
- * @dead_node:            element for binder_dead_nodes list
- *                        (protected by binder_dead_nodes_lock)
- * @proc:                 binder_proc that owns this node
- *                        (invariant after initialized)
- * @refs:                 list of references on this node
- *                        (protected by @lock)
- * @internal_strong_refs: used to take strong references when
- *                        initiating a transaction
- *                        (protected by @proc->inner_lock if @proc
- *                        and by @lock)
- * @local_weak_refs:      weak user refs from local process
- *                        (protected by @proc->inner_lock if @proc
- *                        and by @lock)
- * @local_strong_refs:    strong user refs from local process
- *                        (protected by @proc->inner_lock if @proc
- *                        and by @lock)
- * @tmp_refs:             temporary kernel refs
- *                        (protected by @proc->inner_lock while @proc
- *                        is valid, and by binder_dead_nodes_lock
- *                        if @proc is NULL. During inc/dec and node release
- *                        it is also protected by @lock to provide safety
- *                        as the node dies and @proc becomes NULL)
- * @ptr:                  userspace pointer for node
- *                        (invariant, no lock needed)
- * @cookie:               userspace cookie for node
- *                        (invariant, no lock needed)
- * @has_strong_ref:       userspace notified of strong ref
- *                        (protected by @proc->inner_lock if @proc
- *                        and by @lock)
- * @pending_strong_ref:   userspace has acked notification of strong ref
- *                        (protected by @proc->inner_lock if @proc
- *                        and by @lock)
- * @has_weak_ref:         userspace notified of weak ref
- *                        (protected by @proc->inner_lock if @proc
- *                        and by @lock)
- * @pending_weak_ref:     userspace has acked notification of weak ref
- *                        (protected by @proc->inner_lock if @proc
- *                        and by @lock)
- * @has_async_transaction: async transaction to node in progress
- *                        (protected by @lock)
- * @accept_fds:           file descriptor operations supported for node
- *                        (invariant after initialized)
- * @min_priority:         minimum scheduling priority
- *                        (invariant after initialized)
- * @txn_security_ctx:     require sender's security context
- *                        (invariant after initialized)
- * @async_todo:           list of async work items
- *                        (protected by @proc->inner_lock)
- *
- * Bookkeeping structure for binder nodes.
- */
-struct binder_node {
-	int debug_id;
-	spinlock_t lock;
-	struct binder_work work;
-	union {
-		struct rb_node rb_node;
-		struct hlist_node dead_node;
-	};
-	struct binder_proc *proc;
-	struct hlist_head refs;
-	int internal_strong_refs;
-	int local_weak_refs;
-	int local_strong_refs;
-	int tmp_refs;
-	binder_uintptr_t ptr;
-	binder_uintptr_t cookie;
-	struct {
-		/*
-		 * bitfield elements protected by
-		 * proc inner_lock
-		 */
-		u8 has_strong_ref:1;
-		u8 pending_strong_ref:1;
-		u8 has_weak_ref:1;
-		u8 pending_weak_ref:1;
-	};
-	struct {
-		/*
-		 * invariant after initialization
-		 */
-		u8 accept_fds:1;
-		u8 txn_security_ctx:1;
-		u8 min_priority;
-	};
-	bool has_async_transaction;
-	struct list_head async_todo;
-};
-
-struct binder_ref_death {
-	/**
-	 * @work: worklist element for death notifications
-	 *        (protected by inner_lock of the proc that
-	 *        this ref belongs to)
-	 */
-	struct binder_work work;
-	binder_uintptr_t cookie;
-};
-
-/**
- * struct binder_ref_data - binder_ref counts and id
- * @debug_id:        unique ID for the ref
- * @desc:            unique userspace handle for ref
- * @strong:          strong ref count (debugging only if not locked)
- * @weak:            weak ref count (debugging only if not locked)
- *
- * Structure to hold ref count and ref id information. Since
- * the actual ref can only be accessed with a lock, this structure
- * is used to return information about the ref to callers of
- * ref inc/dec functions.
- */
-struct binder_ref_data {
-	int debug_id;
-	uint32_t desc;
-	int strong;
-	int weak;
-};
-
-/**
- * struct binder_ref - struct to track references on nodes
- * @data:        binder_ref_data containing id, handle, and current refcounts
- * @rb_node_desc: node for lookup by @data.desc in proc's rb_tree
- * @rb_node_node: node for lookup by @node in proc's rb_tree
- * @node_entry:  list entry for node->refs list in target node
- *               (protected by @node->lock)
- * @proc:        binder_proc containing ref
- * @node:        binder_node of target node. When cleaning up a
- *               ref for deletion in binder_cleanup_ref, a non-NULL
- *               @node indicates the node must be freed
- * @death:       pointer to death notification (ref_death) if requested
- *               (protected by @node->lock)
- *
- * Structure to track references from procA to target node (on procB). This
- * structure is unsafe to access without holding @proc->outer_lock.
- */
-struct binder_ref {
-	/* Lookups needed: */
-	/*   node + proc => ref (transaction) */
-	/*   desc + proc => ref (transaction, inc/dec ref) */
-	/*   node => refs + procs (proc exit) */
-	struct binder_ref_data data;
-	struct rb_node rb_node_desc;
-	struct rb_node rb_node_node;
-	struct hlist_node node_entry;
-	struct binder_proc *proc;
-	struct binder_node *node;
-	struct binder_ref_death *death;
-};
-
 enum binder_deferred_state {
 	BINDER_DEFERRED_FLUSH        = 0x01,
 	BINDER_DEFERRED_RELEASE      = 0x02,
 };
 
-/**
- * struct binder_proc - binder process bookkeeping
- * @proc_node:            element for binder_procs list
- * @threads:              rbtree of binder_threads in this proc
- *                        (protected by @inner_lock)
- * @nodes:                rbtree of binder nodes associated with
- *                        this proc ordered by node->ptr
- *                        (protected by @inner_lock)
- * @refs_by_desc:         rbtree of refs ordered by ref->desc
- *                        (protected by @outer_lock)
- * @refs_by_node:         rbtree of refs ordered by ref->node
- *                        (protected by @outer_lock)
- * @waiting_threads:      threads currently waiting for proc work
- *                        (protected by @inner_lock)
- * @pid                   PID of group_leader of process
- *                        (invariant after initialized)
- * @tsk                   task_struct for group_leader of process
- *                        (invariant after initialized)
- * @deferred_work_node:   element for binder_deferred_list
- *                        (protected by binder_deferred_lock)
- * @deferred_work:        bitmap of deferred work to perform
- *                        (protected by binder_deferred_lock)
- * @is_dead:              process is dead and awaiting free
- *                        when outstanding transactions are cleaned up
- *                        (protected by @inner_lock)
- * @todo:                 list of work for this process
- *                        (protected by @inner_lock)
- * @stats:                per-process binder statistics
- *                        (atomics, no lock needed)
- * @delivered_death:      list of delivered death notification
- *                        (protected by @inner_lock)
- * @max_threads:          cap on number of binder threads
- *                        (protected by @inner_lock)
- * @requested_threads:    number of binder threads requested but not
- *                        yet started. In current implementation, can
- *                        only be 0 or 1.
- *                        (protected by @inner_lock)
- * @requested_threads_started: number binder threads started
- *                        (protected by @inner_lock)
- * @tmp_ref:              temporary reference to indicate proc is in use
- *                        (protected by @inner_lock)
- * @default_priority:     default scheduler priority
- *                        (invariant after initialized)
- * @debugfs_entry:        debugfs node
- * @alloc:                binder allocator bookkeeping
- * @context:              binder_context for this proc
- *                        (invariant after initialized)
- * @inner_lock:           can nest under outer_lock and/or node lock
- * @outer_lock:           no nesting under innor or node lock
- *                        Lock order: 1) outer, 2) node, 3) inner
- * @binderfs_entry:       process-specific binderfs log file
- *
- * Bookkeeping structure for binder processes
- */
-struct binder_proc {
-	struct hlist_node proc_node;
-	struct rb_root threads;
-	struct rb_root nodes;
-	struct rb_root refs_by_desc;
-	struct rb_root refs_by_node;
-	struct list_head waiting_threads;
-	int pid;
-	struct task_struct *tsk;
-	struct hlist_node deferred_work_node;
-	int deferred_work;
-	bool is_dead;
-
-	struct list_head todo;
-	struct binder_stats stats;
-	struct list_head delivered_death;
-	int max_threads;
-	int requested_threads;
-	int requested_threads_started;
-	int tmp_ref;
-	long default_priority;
-	struct dentry *debugfs_entry;
-	struct binder_alloc alloc;
-	struct binder_context *context;
-	spinlock_t inner_lock;
-	spinlock_t outer_lock;
-	struct dentry *binderfs_entry;
-};
-
 enum {
 	BINDER_LOOPER_STATE_REGISTERED  = 0x01,
 	BINDER_LOOPER_STATE_ENTERED     = 0x02,
@@ -494,125 +207,6 @@ enum {
 	BINDER_LOOPER_STATE_POLL        = 0x20,
 };
 
-/**
- * struct binder_thread - binder thread bookkeeping
- * @proc:                 binder process for this thread
- *                        (invariant after initialization)
- * @rb_node:              element for proc->threads rbtree
- *                        (protected by @proc->inner_lock)
- * @waiting_thread_node:  element for @proc->waiting_threads list
- *                        (protected by @proc->inner_lock)
- * @pid:                  PID for this thread
- *                        (invariant after initialization)
- * @looper:               bitmap of looping state
- *                        (only accessed by this thread)
- * @looper_needs_return:  looping thread needs to exit driver
- *                        (no lock needed)
- * @transaction_stack:    stack of in-progress transactions for this thread
- *                        (protected by @proc->inner_lock)
- * @todo:                 list of work to do for this thread
- *                        (protected by @proc->inner_lock)
- * @process_todo:         whether work in @todo should be processed
- *                        (protected by @proc->inner_lock)
- * @return_error:         transaction errors reported by this thread
- *                        (only accessed by this thread)
- * @reply_error:          transaction errors reported by target thread
- *                        (protected by @proc->inner_lock)
- * @wait:                 wait queue for thread work
- * @stats:                per-thread statistics
- *                        (atomics, no lock needed)
- * @tmp_ref:              temporary reference to indicate thread is in use
- *                        (atomic since @proc->inner_lock cannot
- *                        always be acquired)
- * @is_dead:              thread is dead and awaiting free
- *                        when outstanding transactions are cleaned up
- *                        (protected by @proc->inner_lock)
- *
- * Bookkeeping structure for binder threads.
- */
-struct binder_thread {
-	struct binder_proc *proc;
-	struct rb_node rb_node;
-	struct list_head waiting_thread_node;
-	int pid;
-	int looper;              /* only modified by this thread */
-	bool looper_need_return; /* can be written by other thread */
-	struct binder_transaction *transaction_stack;
-	struct list_head todo;
-	bool process_todo;
-	struct binder_error return_error;
-	struct binder_error reply_error;
-	wait_queue_head_t wait;
-	struct binder_stats stats;
-	atomic_t tmp_ref;
-	bool is_dead;
-};
-
-/**
- * struct binder_txn_fd_fixup - transaction fd fixup list element
- * @fixup_entry:          list entry
- * @file:                 struct file to be associated with new fd
- * @offset:               offset in buffer data to this fixup
- *
- * List element for fd fixups in a transaction. Since file
- * descriptors need to be allocated in the context of the
- * target process, we pass each fd to be processed in this
- * struct.
- */
-struct binder_txn_fd_fixup {
-	struct list_head fixup_entry;
-	struct file *file;
-	size_t offset;
-};
-
-struct binder_transaction {
-	int debug_id;
-	struct binder_work work;
-	struct binder_thread *from;
-	struct binder_transaction *from_parent;
-	struct binder_proc *to_proc;
-	struct binder_thread *to_thread;
-	struct binder_transaction *to_parent;
-	unsigned need_reply:1;
-	/* unsigned is_dead:1; */	/* not used at the moment */
-
-	struct binder_buffer *buffer;
-	unsigned int	code;
-	unsigned int	flags;
-	long	priority;
-	long	saved_priority;
-	kuid_t	sender_euid;
-	struct list_head fd_fixups;
-	binder_uintptr_t security_ctx;
-	/**
-	 * @lock:  protects @from, @to_proc, and @to_thread
-	 *
-	 * @from, @to_proc, and @to_thread can be set to NULL
-	 * during thread teardown
-	 */
-	spinlock_t lock;
-};
-
-/**
- * struct binder_object - union of flat binder object types
- * @hdr:   generic object header
- * @fbo:   binder object (nodes and refs)
- * @fdo:   file descriptor object
- * @bbo:   binder buffer pointer
- * @fdao:  file descriptor array
- *
- * Used for type-independent object copies
- */
-struct binder_object {
-	union {
-		struct binder_object_header hdr;
-		struct flat_binder_object fbo;
-		struct binder_fd_object fdo;
-		struct binder_buffer_object bbo;
-		struct binder_fd_array_object fdao;
-	};
-};
-
 /**
  * binder_proc_lock() - Acquire outer lock for given binder_proc
  * @proc:         struct binder_proc to acquire
@@ -1892,6 +1486,20 @@ static void binder_free_txn_fixups(struct binder_transaction *t)
 	}
 }
 
+static void binder_txn_latency_free(struct binder_transaction *t)
+{
+	int from_proc, from_thread, to_proc, to_thread;
+
+	spin_lock(&t->lock);
+	from_proc = t->from ? t->from->proc->pid : 0;
+	from_thread = t->from ? t->from->pid : 0;
+	to_proc = t->to_proc ? t->to_proc->pid : 0;
+	to_thread = t->to_thread ? t->to_thread->pid : 0;
+	spin_unlock(&t->lock);
+
+	trace_binder_txn_latency_free(t, from_proc, from_thread, to_proc, to_thread);
+}
+
 static void binder_free_transaction(struct binder_transaction *t)
 {
 	struct binder_proc *target_proc = t->to_proc;
@@ -1902,6 +1510,8 @@ static void binder_free_transaction(struct binder_transaction *t)
 			t->buffer->transaction = NULL;
 		binder_inner_proc_unlock(target_proc);
 	}
+	if (trace_binder_txn_latency_free_enabled())
+		binder_txn_latency_free(t);
 	/*
 	 * If the transaction has no target_proc, then
 	 * t->buffer->transaction has already been cleared.
@@ -3103,7 +2713,7 @@ static void binder_transaction(struct binder_proc *proc,
 		if (extra_buffers_size < added_size) {
 			/* integer overflow of extra_buffers_size */
 			return_error = BR_FAILED_REPLY;
-			return_error_param = EINVAL;
+			return_error_param = -EINVAL;
 			return_error_line = __LINE__;
 			goto err_bad_extra_size;
 		}
@@ -3146,6 +2756,7 @@ static void binder_transaction(struct binder_proc *proc,
 	t->buffer->debug_id = t->debug_id;
 	t->buffer->transaction = t;
 	t->buffer->target_node = target_node;
+	t->buffer->clear_on_free = !!(t->flags & TF_CLEAR_BUF);
 	trace_binder_transaction_alloc_buf(t->buffer);
 
 	if (binder_alloc_copy_user_to_buffer(
@@ -3479,6 +3090,8 @@ static void binder_transaction(struct binder_proc *proc,
 	kfree(tcomplete);
 	binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
 err_alloc_tcomplete_failed:
+	if (trace_binder_txn_latency_free_enabled())
+		binder_txn_latency_free(t);
 	kfree(t);
 	binder_stats_deleted(BINDER_STAT_TRANSACTION);
 err_alloc_t_failed:
@@ -3614,6 +3227,7 @@ static int binder_thread_write(struct binder_proc *proc,
 			ret = -1;
 			if (increment && !target) {
 				struct binder_node *ctx_mgr_node;
+
 				mutex_lock(&context->context_mgr_node_lock);
 				ctx_mgr_node = context->binder_context_mgr_node;
 				if (ctx_mgr_node) {
diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c
index 2f846b7ae8b823bd99b927528577c43a3cab4967..7caf74ad24053a49327e07eeb902feb527376d90 100644
--- a/drivers/android/binder_alloc.c
+++ b/drivers/android/binder_alloc.c
@@ -696,6 +696,8 @@ static void binder_free_buf_locked(struct binder_alloc *alloc,
 	binder_insert_free_buffer(alloc, buffer);
 }
 
+static void binder_alloc_clear_buf(struct binder_alloc *alloc,
+				   struct binder_buffer *buffer);
 /**
  * binder_alloc_free_buf() - free a binder buffer
  * @alloc:	binder_alloc for this proc
@@ -706,6 +708,18 @@ static void binder_free_buf_locked(struct binder_alloc *alloc,
 void binder_alloc_free_buf(struct binder_alloc *alloc,
 			    struct binder_buffer *buffer)
 {
+	/*
+	 * We could eliminate the call to binder_alloc_clear_buf()
+	 * from binder_alloc_deferred_release() by moving this to
+	 * binder_alloc_free_buf_locked(). However, that could
+	 * increase contention for the alloc mutex if clear_on_free
+	 * is used frequently for large buffers. The mutex is not
+	 * needed for correctness here.
+	 */
+	if (buffer->clear_on_free) {
+		binder_alloc_clear_buf(alloc, buffer);
+		buffer->clear_on_free = false;
+	}
 	mutex_lock(&alloc->mutex);
 	binder_free_buf_locked(alloc, buffer);
 	mutex_unlock(&alloc->mutex);
@@ -802,6 +816,10 @@ void binder_alloc_deferred_release(struct binder_alloc *alloc)
 		/* Transaction should already have been freed */
 		BUG_ON(buffer->transaction);
 
+		if (buffer->clear_on_free) {
+			binder_alloc_clear_buf(alloc, buffer);
+			buffer->clear_on_free = false;
+		}
 		binder_free_buf_locked(alloc, buffer);
 		buffers++;
 	}
@@ -1135,6 +1153,36 @@ static struct page *binder_alloc_get_page(struct binder_alloc *alloc,
 	return lru_page->page_ptr;
 }
 
+/**
+ * binder_alloc_clear_buf() - zero out buffer
+ * @alloc: binder_alloc for this proc
+ * @buffer: binder buffer to be cleared
+ *
+ * memset the given buffer to 0
+ */
+static void binder_alloc_clear_buf(struct binder_alloc *alloc,
+				   struct binder_buffer *buffer)
+{
+	size_t bytes = binder_alloc_buffer_size(alloc, buffer);
+	binder_size_t buffer_offset = 0;
+
+	while (bytes) {
+		unsigned long size;
+		struct page *page;
+		pgoff_t pgoff;
+		void *kptr;
+
+		page = binder_alloc_get_page(alloc, buffer,
+					     buffer_offset, &pgoff);
+		size = min_t(size_t, bytes, PAGE_SIZE - pgoff);
+		kptr = kmap(page) + pgoff;
+		memset(kptr, 0, size);
+		kunmap(page);
+		bytes -= size;
+		buffer_offset += size;
+	}
+}
+
 /**
  * binder_alloc_copy_user_to_buffer() - copy src user to tgt user
  * @alloc: binder_alloc for this proc
diff --git a/drivers/android/binder_alloc.h b/drivers/android/binder_alloc.h
index 55d8b4106766a6104fbcc58c5eb5d3a7d5c15cb6..6e8e001381af4ba2c2f73a2ab7bbca1feff5397f 100644
--- a/drivers/android/binder_alloc.h
+++ b/drivers/android/binder_alloc.h
@@ -23,6 +23,7 @@ struct binder_transaction;
  * @entry:              entry alloc->buffers
  * @rb_node:            node for allocated_buffers/free_buffers rb trees
  * @free:               %true if buffer is free
+ * @clear_on_free:      %true if buffer must be zeroed after use
  * @allow_user_free:    %true if user is allowed to free buffer
  * @async_transaction:  %true if buffer is in use for an async txn
  * @debug_id:           unique ID for debugging
@@ -41,9 +42,10 @@ struct binder_buffer {
 	struct rb_node rb_node; /* free entry by size or allocated entry */
 				/* by address */
 	unsigned free:1;
+	unsigned clear_on_free:1;
 	unsigned allow_user_free:1;
 	unsigned async_transaction:1;
-	unsigned debug_id:29;
+	unsigned debug_id:28;
 
 	struct binder_transaction *transaction;
 
diff --git a/drivers/android/binder_internal.h b/drivers/android/binder_internal.h
index 283d3cb9c16e5ffca25e171d7eb18b26e059f61f..6cd79011e35d5d4c4499969ff37ba0a09c24736f 100644
--- a/drivers/android/binder_internal.h
+++ b/drivers/android/binder_internal.h
@@ -12,6 +12,8 @@
 #include <linux/stddef.h>
 #include <linux/types.h>
 #include <linux/uidgid.h>
+#include <uapi/linux/android/binderfs.h>
+#include "binder_alloc.h"
 
 struct binder_context {
 	struct binder_node *binder_context_mgr_node;
@@ -141,6 +143,410 @@ struct binder_transaction_log {
 	struct binder_transaction_log_entry entry[32];
 };
 
+enum binder_stat_types {
+	BINDER_STAT_PROC,
+	BINDER_STAT_THREAD,
+	BINDER_STAT_NODE,
+	BINDER_STAT_REF,
+	BINDER_STAT_DEATH,
+	BINDER_STAT_TRANSACTION,
+	BINDER_STAT_TRANSACTION_COMPLETE,
+	BINDER_STAT_COUNT
+};
+
+struct binder_stats {
+	atomic_t br[_IOC_NR(BR_FAILED_REPLY) + 1];
+	atomic_t bc[_IOC_NR(BC_REPLY_SG) + 1];
+	atomic_t obj_created[BINDER_STAT_COUNT];
+	atomic_t obj_deleted[BINDER_STAT_COUNT];
+};
+
+/**
+ * struct binder_work - work enqueued on a worklist
+ * @entry:             node enqueued on list
+ * @type:              type of work to be performed
+ *
+ * There are separate work lists for proc, thread, and node (async).
+ */
+struct binder_work {
+	struct list_head entry;
+
+	enum binder_work_type {
+		BINDER_WORK_TRANSACTION = 1,
+		BINDER_WORK_TRANSACTION_COMPLETE,
+		BINDER_WORK_RETURN_ERROR,
+		BINDER_WORK_NODE,
+		BINDER_WORK_DEAD_BINDER,
+		BINDER_WORK_DEAD_BINDER_AND_CLEAR,
+		BINDER_WORK_CLEAR_DEATH_NOTIFICATION,
+	} type;
+};
+
+struct binder_error {
+	struct binder_work work;
+	uint32_t cmd;
+};
+
+/**
+ * struct binder_node - binder node bookkeeping
+ * @debug_id:             unique ID for debugging
+ *                        (invariant after initialized)
+ * @lock:                 lock for node fields
+ * @work:                 worklist element for node work
+ *                        (protected by @proc->inner_lock)
+ * @rb_node:              element for proc->nodes tree
+ *                        (protected by @proc->inner_lock)
+ * @dead_node:            element for binder_dead_nodes list
+ *                        (protected by binder_dead_nodes_lock)
+ * @proc:                 binder_proc that owns this node
+ *                        (invariant after initialized)
+ * @refs:                 list of references on this node
+ *                        (protected by @lock)
+ * @internal_strong_refs: used to take strong references when
+ *                        initiating a transaction
+ *                        (protected by @proc->inner_lock if @proc
+ *                        and by @lock)
+ * @local_weak_refs:      weak user refs from local process
+ *                        (protected by @proc->inner_lock if @proc
+ *                        and by @lock)
+ * @local_strong_refs:    strong user refs from local process
+ *                        (protected by @proc->inner_lock if @proc
+ *                        and by @lock)
+ * @tmp_refs:             temporary kernel refs
+ *                        (protected by @proc->inner_lock while @proc
+ *                        is valid, and by binder_dead_nodes_lock
+ *                        if @proc is NULL. During inc/dec and node release
+ *                        it is also protected by @lock to provide safety
+ *                        as the node dies and @proc becomes NULL)
+ * @ptr:                  userspace pointer for node
+ *                        (invariant, no lock needed)
+ * @cookie:               userspace cookie for node
+ *                        (invariant, no lock needed)
+ * @has_strong_ref:       userspace notified of strong ref
+ *                        (protected by @proc->inner_lock if @proc
+ *                        and by @lock)
+ * @pending_strong_ref:   userspace has acked notification of strong ref
+ *                        (protected by @proc->inner_lock if @proc
+ *                        and by @lock)
+ * @has_weak_ref:         userspace notified of weak ref
+ *                        (protected by @proc->inner_lock if @proc
+ *                        and by @lock)
+ * @pending_weak_ref:     userspace has acked notification of weak ref
+ *                        (protected by @proc->inner_lock if @proc
+ *                        and by @lock)
+ * @has_async_transaction: async transaction to node in progress
+ *                        (protected by @lock)
+ * @accept_fds:           file descriptor operations supported for node
+ *                        (invariant after initialized)
+ * @min_priority:         minimum scheduling priority
+ *                        (invariant after initialized)
+ * @txn_security_ctx:     require sender's security context
+ *                        (invariant after initialized)
+ * @async_todo:           list of async work items
+ *                        (protected by @proc->inner_lock)
+ *
+ * Bookkeeping structure for binder nodes.
+ */
+struct binder_node {
+	int debug_id;
+	spinlock_t lock;
+	struct binder_work work;
+	union {
+		struct rb_node rb_node;
+		struct hlist_node dead_node;
+	};
+	struct binder_proc *proc;
+	struct hlist_head refs;
+	int internal_strong_refs;
+	int local_weak_refs;
+	int local_strong_refs;
+	int tmp_refs;
+	binder_uintptr_t ptr;
+	binder_uintptr_t cookie;
+	struct {
+		/*
+		 * bitfield elements protected by
+		 * proc inner_lock
+		 */
+		u8 has_strong_ref:1;
+		u8 pending_strong_ref:1;
+		u8 has_weak_ref:1;
+		u8 pending_weak_ref:1;
+	};
+	struct {
+		/*
+		 * invariant after initialization
+		 */
+		u8 accept_fds:1;
+		u8 txn_security_ctx:1;
+		u8 min_priority;
+	};
+	bool has_async_transaction;
+	struct list_head async_todo;
+};
+
+struct binder_ref_death {
+	/**
+	 * @work: worklist element for death notifications
+	 *        (protected by inner_lock of the proc that
+	 *        this ref belongs to)
+	 */
+	struct binder_work work;
+	binder_uintptr_t cookie;
+};
+
+/**
+ * struct binder_ref_data - binder_ref counts and id
+ * @debug_id:        unique ID for the ref
+ * @desc:            unique userspace handle for ref
+ * @strong:          strong ref count (debugging only if not locked)
+ * @weak:            weak ref count (debugging only if not locked)
+ *
+ * Structure to hold ref count and ref id information. Since
+ * the actual ref can only be accessed with a lock, this structure
+ * is used to return information about the ref to callers of
+ * ref inc/dec functions.
+ */
+struct binder_ref_data {
+	int debug_id;
+	uint32_t desc;
+	int strong;
+	int weak;
+};
+
+/**
+ * struct binder_ref - struct to track references on nodes
+ * @data:        binder_ref_data containing id, handle, and current refcounts
+ * @rb_node_desc: node for lookup by @data.desc in proc's rb_tree
+ * @rb_node_node: node for lookup by @node in proc's rb_tree
+ * @node_entry:  list entry for node->refs list in target node
+ *               (protected by @node->lock)
+ * @proc:        binder_proc containing ref
+ * @node:        binder_node of target node. When cleaning up a
+ *               ref for deletion in binder_cleanup_ref, a non-NULL
+ *               @node indicates the node must be freed
+ * @death:       pointer to death notification (ref_death) if requested
+ *               (protected by @node->lock)
+ *
+ * Structure to track references from procA to target node (on procB). This
+ * structure is unsafe to access without holding @proc->outer_lock.
+ */
+struct binder_ref {
+	/* Lookups needed: */
+	/*   node + proc => ref (transaction) */
+	/*   desc + proc => ref (transaction, inc/dec ref) */
+	/*   node => refs + procs (proc exit) */
+	struct binder_ref_data data;
+	struct rb_node rb_node_desc;
+	struct rb_node rb_node_node;
+	struct hlist_node node_entry;
+	struct binder_proc *proc;
+	struct binder_node *node;
+	struct binder_ref_death *death;
+};
+
+/**
+ * struct binder_proc - binder process bookkeeping
+ * @proc_node:            element for binder_procs list
+ * @threads:              rbtree of binder_threads in this proc
+ *                        (protected by @inner_lock)
+ * @nodes:                rbtree of binder nodes associated with
+ *                        this proc ordered by node->ptr
+ *                        (protected by @inner_lock)
+ * @refs_by_desc:         rbtree of refs ordered by ref->desc
+ *                        (protected by @outer_lock)
+ * @refs_by_node:         rbtree of refs ordered by ref->node
+ *                        (protected by @outer_lock)
+ * @waiting_threads:      threads currently waiting for proc work
+ *                        (protected by @inner_lock)
+ * @pid                   PID of group_leader of process
+ *                        (invariant after initialized)
+ * @tsk                   task_struct for group_leader of process
+ *                        (invariant after initialized)
+ * @deferred_work_node:   element for binder_deferred_list
+ *                        (protected by binder_deferred_lock)
+ * @deferred_work:        bitmap of deferred work to perform
+ *                        (protected by binder_deferred_lock)
+ * @is_dead:              process is dead and awaiting free
+ *                        when outstanding transactions are cleaned up
+ *                        (protected by @inner_lock)
+ * @todo:                 list of work for this process
+ *                        (protected by @inner_lock)
+ * @stats:                per-process binder statistics
+ *                        (atomics, no lock needed)
+ * @delivered_death:      list of delivered death notification
+ *                        (protected by @inner_lock)
+ * @max_threads:          cap on number of binder threads
+ *                        (protected by @inner_lock)
+ * @requested_threads:    number of binder threads requested but not
+ *                        yet started. In current implementation, can
+ *                        only be 0 or 1.
+ *                        (protected by @inner_lock)
+ * @requested_threads_started: number binder threads started
+ *                        (protected by @inner_lock)
+ * @tmp_ref:              temporary reference to indicate proc is in use
+ *                        (protected by @inner_lock)
+ * @default_priority:     default scheduler priority
+ *                        (invariant after initialized)
+ * @debugfs_entry:        debugfs node
+ * @alloc:                binder allocator bookkeeping
+ * @context:              binder_context for this proc
+ *                        (invariant after initialized)
+ * @inner_lock:           can nest under outer_lock and/or node lock
+ * @outer_lock:           no nesting under innor or node lock
+ *                        Lock order: 1) outer, 2) node, 3) inner
+ * @binderfs_entry:       process-specific binderfs log file
+ *
+ * Bookkeeping structure for binder processes
+ */
+struct binder_proc {
+	struct hlist_node proc_node;
+	struct rb_root threads;
+	struct rb_root nodes;
+	struct rb_root refs_by_desc;
+	struct rb_root refs_by_node;
+	struct list_head waiting_threads;
+	int pid;
+	struct task_struct *tsk;
+	struct hlist_node deferred_work_node;
+	int deferred_work;
+	bool is_dead;
+
+	struct list_head todo;
+	struct binder_stats stats;
+	struct list_head delivered_death;
+	int max_threads;
+	int requested_threads;
+	int requested_threads_started;
+	int tmp_ref;
+	long default_priority;
+	struct dentry *debugfs_entry;
+	struct binder_alloc alloc;
+	struct binder_context *context;
+	spinlock_t inner_lock;
+	spinlock_t outer_lock;
+	struct dentry *binderfs_entry;
+};
+
+/**
+ * struct binder_thread - binder thread bookkeeping
+ * @proc:                 binder process for this thread
+ *                        (invariant after initialization)
+ * @rb_node:              element for proc->threads rbtree
+ *                        (protected by @proc->inner_lock)
+ * @waiting_thread_node:  element for @proc->waiting_threads list
+ *                        (protected by @proc->inner_lock)
+ * @pid:                  PID for this thread
+ *                        (invariant after initialization)
+ * @looper:               bitmap of looping state
+ *                        (only accessed by this thread)
+ * @looper_needs_return:  looping thread needs to exit driver
+ *                        (no lock needed)
+ * @transaction_stack:    stack of in-progress transactions for this thread
+ *                        (protected by @proc->inner_lock)
+ * @todo:                 list of work to do for this thread
+ *                        (protected by @proc->inner_lock)
+ * @process_todo:         whether work in @todo should be processed
+ *                        (protected by @proc->inner_lock)
+ * @return_error:         transaction errors reported by this thread
+ *                        (only accessed by this thread)
+ * @reply_error:          transaction errors reported by target thread
+ *                        (protected by @proc->inner_lock)
+ * @wait:                 wait queue for thread work
+ * @stats:                per-thread statistics
+ *                        (atomics, no lock needed)
+ * @tmp_ref:              temporary reference to indicate thread is in use
+ *                        (atomic since @proc->inner_lock cannot
+ *                        always be acquired)
+ * @is_dead:              thread is dead and awaiting free
+ *                        when outstanding transactions are cleaned up
+ *                        (protected by @proc->inner_lock)
+ *
+ * Bookkeeping structure for binder threads.
+ */
+struct binder_thread {
+	struct binder_proc *proc;
+	struct rb_node rb_node;
+	struct list_head waiting_thread_node;
+	int pid;
+	int looper;              /* only modified by this thread */
+	bool looper_need_return; /* can be written by other thread */
+	struct binder_transaction *transaction_stack;
+	struct list_head todo;
+	bool process_todo;
+	struct binder_error return_error;
+	struct binder_error reply_error;
+	wait_queue_head_t wait;
+	struct binder_stats stats;
+	atomic_t tmp_ref;
+	bool is_dead;
+};
+
+/**
+ * struct binder_txn_fd_fixup - transaction fd fixup list element
+ * @fixup_entry:          list entry
+ * @file:                 struct file to be associated with new fd
+ * @offset:               offset in buffer data to this fixup
+ *
+ * List element for fd fixups in a transaction. Since file
+ * descriptors need to be allocated in the context of the
+ * target process, we pass each fd to be processed in this
+ * struct.
+ */
+struct binder_txn_fd_fixup {
+	struct list_head fixup_entry;
+	struct file *file;
+	size_t offset;
+};
+
+struct binder_transaction {
+	int debug_id;
+	struct binder_work work;
+	struct binder_thread *from;
+	struct binder_transaction *from_parent;
+	struct binder_proc *to_proc;
+	struct binder_thread *to_thread;
+	struct binder_transaction *to_parent;
+	unsigned need_reply:1;
+	/* unsigned is_dead:1; */       /* not used at the moment */
+
+	struct binder_buffer *buffer;
+	unsigned int    code;
+	unsigned int    flags;
+	long    priority;
+	long    saved_priority;
+	kuid_t  sender_euid;
+	struct list_head fd_fixups;
+	binder_uintptr_t security_ctx;
+	/**
+	 * @lock:  protects @from, @to_proc, and @to_thread
+	 *
+	 * @from, @to_proc, and @to_thread can be set to NULL
+	 * during thread teardown
+	 */
+	spinlock_t lock;
+};
+
+/**
+ * struct binder_object - union of flat binder object types
+ * @hdr:   generic object header
+ * @fbo:   binder object (nodes and refs)
+ * @fdo:   file descriptor object
+ * @bbo:   binder buffer pointer
+ * @fdao:  file descriptor array
+ *
+ * Used for type-independent object copies
+ */
+struct binder_object {
+	union {
+		struct binder_object_header hdr;
+		struct flat_binder_object fbo;
+		struct binder_fd_object fdo;
+		struct binder_buffer_object bbo;
+		struct binder_fd_array_object fdao;
+	};
+};
+
 extern struct binder_transaction_log binder_transaction_log;
 extern struct binder_transaction_log binder_transaction_log_failed;
 #endif /* _LINUX_BINDER_INTERNAL_H */
diff --git a/drivers/android/binder_trace.h b/drivers/android/binder_trace.h
index 6731c3cd814550ae9364e9fc071706baf04df5d4..8eeccdc64724b12b18e63a3ebfff8edf0c733ecb 100644
--- a/drivers/android/binder_trace.h
+++ b/drivers/android/binder_trace.h
@@ -95,6 +95,35 @@ TRACE_EVENT(binder_wait_for_work,
 		  __entry->thread_todo)
 );
 
+TRACE_EVENT(binder_txn_latency_free,
+	TP_PROTO(struct binder_transaction *t,
+		 int from_proc, int from_thread,
+		 int to_proc, int to_thread),
+	TP_ARGS(t, from_proc, from_thread, to_proc, to_thread),
+	TP_STRUCT__entry(
+		__field(int, debug_id)
+		__field(int, from_proc)
+		__field(int, from_thread)
+		__field(int, to_proc)
+		__field(int, to_thread)
+		__field(unsigned int, code)
+		__field(unsigned int, flags)
+	),
+	TP_fast_assign(
+		__entry->debug_id = t->debug_id;
+		__entry->from_proc = from_proc;
+		__entry->from_thread = from_thread;
+		__entry->to_proc = to_proc;
+		__entry->to_thread = to_thread;
+		__entry->code = t->code;
+		__entry->flags = t->flags;
+	),
+	TP_printk("transaction=%d from %d:%d to %d:%d flags=0x%x code=0x%x",
+		  __entry->debug_id, __entry->from_proc, __entry->from_thread,
+		  __entry->to_proc, __entry->to_thread, __entry->code,
+		  __entry->flags)
+);
+
 TRACE_EVENT(binder_transaction,
 	TP_PROTO(bool reply, struct binder_transaction *t,
 		 struct binder_node *target_node),
diff --git a/drivers/bus/fsl-mc/dprc-driver.c b/drivers/bus/fsl-mc/dprc-driver.c
index 91dc015963a8b368e3580e1a0b58793fef753e80..68488a7ad0d6a8c9ee20df4abed6d95fff9cab0b 100644
--- a/drivers/bus/fsl-mc/dprc-driver.c
+++ b/drivers/bus/fsl-mc/dprc-driver.c
@@ -670,9 +670,7 @@ int dprc_setup(struct fsl_mc_device *mc_dev)
 		goto error_cleanup_open;
 	}
 
-	if (major_ver < DPRC_MIN_VER_MAJOR ||
-	    (major_ver == DPRC_MIN_VER_MAJOR &&
-	     minor_ver < DPRC_MIN_VER_MINOR)) {
+	if (major_ver < DPRC_MIN_VER_MAJOR) {
 		dev_err(&mc_dev->dev,
 			"ERROR: DPRC version %d.%d not supported\n",
 			major_ver, minor_ver);
diff --git a/drivers/bus/fsl-mc/dprc.c b/drivers/bus/fsl-mc/dprc.c
index 57b097caf2550c9d36cb61a53fd8cefa6edb810b..27b0a01bad9b0327c95f1333e1df03ceb91f26b9 100644
--- a/drivers/bus/fsl-mc/dprc.c
+++ b/drivers/bus/fsl-mc/dprc.c
@@ -576,6 +576,8 @@ int dprc_get_obj_region(struct fsl_mc_io *mc_io,
 	rsp_params = (struct dprc_rsp_get_obj_region *)cmd.params;
 	region_desc->base_offset = le64_to_cpu(rsp_params->base_offset);
 	region_desc->size = le32_to_cpu(rsp_params->size);
+	region_desc->type = rsp_params->type;
+	region_desc->flags = le32_to_cpu(rsp_params->flags);
 	if (dprc_major_ver > 6 || (dprc_major_ver == 6 && dprc_minor_ver >= 3))
 		region_desc->base_address = le64_to_cpu(rsp_params->base_addr);
 	else
diff --git a/drivers/bus/fsl-mc/fsl-mc-allocator.c b/drivers/bus/fsl-mc/fsl-mc-allocator.c
index e71a6f52ea0cfdee7a35cca2c440476e1cd025e2..2d7c764bb7dcfc2a62c7fcc54dd5d30d18dc384d 100644
--- a/drivers/bus/fsl-mc/fsl-mc-allocator.c
+++ b/drivers/bus/fsl-mc/fsl-mc-allocator.c
@@ -292,8 +292,10 @@ int __must_check fsl_mc_object_allocate(struct fsl_mc_device *mc_dev,
 		goto error;
 
 	mc_adev = resource->data;
-	if (!mc_adev)
+	if (!mc_adev) {
+		error = -EINVAL;
 		goto error;
+	}
 
 	mc_adev->consumer_link = device_link_add(&mc_dev->dev,
 						 &mc_adev->dev,
diff --git a/drivers/bus/fsl-mc/fsl-mc-bus.c b/drivers/bus/fsl-mc/fsl-mc-bus.c
index 76a6ee505d33de98c455d3cf457e67c8e70071c2..b8e6acdf932e631319111b07d9abc4f4ee51c930 100644
--- a/drivers/bus/fsl-mc/fsl-mc-bus.c
+++ b/drivers/bus/fsl-mc/fsl-mc-bus.c
@@ -60,6 +60,9 @@ struct fsl_mc_addr_translation_range {
 	phys_addr_t start_phys_addr;
 };
 
+#define FSL_MC_GCR1	0x0
+#define GCR1_P1_STOP	BIT(31)
+
 #define FSL_MC_FAPR	0x28
 #define MC_FAPR_PL	BIT(18)
 #define MC_FAPR_BMT	BIT(17)
@@ -967,24 +970,42 @@ static int fsl_mc_bus_probe(struct platform_device *pdev)
 	platform_set_drvdata(pdev, mc);
 
 	plat_res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
-	if (plat_res)
+	if (plat_res) {
 		mc->fsl_mc_regs = devm_ioremap_resource(&pdev->dev, plat_res);
+		if (IS_ERR(mc->fsl_mc_regs))
+			return PTR_ERR(mc->fsl_mc_regs);
+	}
 
-	if (mc->fsl_mc_regs && IS_ENABLED(CONFIG_ACPI) &&
-	    !dev_of_node(&pdev->dev)) {
-		mc_stream_id = readl(mc->fsl_mc_regs + FSL_MC_FAPR);
+	if (mc->fsl_mc_regs) {
 		/*
-		 * HW ORs the PL and BMT bit, places the result in bit 15 of
-		 * the StreamID and ORs in the ICID. Calculate it accordingly.
+		 * Some bootloaders pause the MC firmware before booting the
+		 * kernel so that MC will not cause faults as soon as the
+		 * SMMU probes due to the fact that there's no configuration
+		 * in place for MC.
+		 * At this point MC should have all its SMMU setup done so make
+		 * sure it is resumed.
 		 */
-		mc_stream_id = (mc_stream_id & 0xffff) |
+		writel(readl(mc->fsl_mc_regs + FSL_MC_GCR1) & (~GCR1_P1_STOP),
+		       mc->fsl_mc_regs + FSL_MC_GCR1);
+
+		if (IS_ENABLED(CONFIG_ACPI) && !dev_of_node(&pdev->dev)) {
+			mc_stream_id = readl(mc->fsl_mc_regs + FSL_MC_FAPR);
+			/*
+			 * HW ORs the PL and BMT bit, places the result in bit
+			 * 14 of the StreamID and ORs in the ICID. Calculate it
+			 * accordingly.
+			 */
+			mc_stream_id = (mc_stream_id & 0xffff) |
 				((mc_stream_id & (MC_FAPR_PL | MC_FAPR_BMT)) ?
-					0x4000 : 0);
-		error = acpi_dma_configure_id(&pdev->dev, DEV_DMA_COHERENT,
-					      &mc_stream_id);
-		if (error)
-			dev_warn(&pdev->dev, "failed to configure dma: %d.\n",
-				 error);
+					BIT(14) : 0);
+			error = acpi_dma_configure_id(&pdev->dev,
+						      DEV_DMA_COHERENT,
+						      &mc_stream_id);
+			if (error)
+				dev_warn(&pdev->dev,
+					 "failed to configure dma: %d.\n",
+					 error);
+		}
 	}
 
 	/*
diff --git a/drivers/bus/fsl-mc/fsl-mc-private.h b/drivers/bus/fsl-mc/fsl-mc-private.h
index 85ca5fdee5810d32b9662374669ee9583fc5ce9d..c932387641fa1cd767a3d257b48e25ebbf30d599 100644
--- a/drivers/bus/fsl-mc/fsl-mc-private.h
+++ b/drivers/bus/fsl-mc/fsl-mc-private.h
@@ -211,12 +211,13 @@ struct dprc_cmd_get_obj_region {
 
 struct dprc_rsp_get_obj_region {
 	/* response word 0 */
-	__le64 pad;
+	__le64 pad0;
 	/* response word 1 */
 	__le64 base_offset;
 	/* response word 2 */
 	__le32 size;
-	__le32 pad2;
+	u8 type;
+	u8 pad2[3];
 	/* response word 3 */
 	__le32 flags;
 	__le32 pad3;
diff --git a/drivers/bus/mhi/Kconfig b/drivers/bus/mhi/Kconfig
index e841c1097fb4d8b41ae62930640bf0d8bd12e608..da5cd0c9fc620ab595e742c422f1a22a2a84c7b9 100644
--- a/drivers/bus/mhi/Kconfig
+++ b/drivers/bus/mhi/Kconfig
@@ -20,3 +20,12 @@ config MHI_BUS_DEBUG
 	  Enable debugfs support for use with the MHI transport. Allows
 	  reading and/or modifying some values within the MHI controller
 	  for debug and test purposes.
+
+config MHI_BUS_PCI_GENERIC
+	tristate "MHI PCI controller driver"
+	depends on MHI_BUS
+	depends on PCI
+	help
+	  This driver provides MHI PCI controller driver for devices such as
+	  Qualcomm SDX55 based PCIe modems.
+
diff --git a/drivers/bus/mhi/Makefile b/drivers/bus/mhi/Makefile
index 19e6443b72df4645ade41a232a4f3a40b6bf7b9d..0a2d778d6fb42f31c80b5faedd40fea83531fc4f 100644
--- a/drivers/bus/mhi/Makefile
+++ b/drivers/bus/mhi/Makefile
@@ -1,2 +1,6 @@
 # core layer
 obj-y += core/
+
+obj-$(CONFIG_MHI_BUS_PCI_GENERIC) += mhi_pci_generic.o
+mhi_pci_generic-y += pci_generic.o
+
diff --git a/drivers/bus/mhi/core/boot.c b/drivers/bus/mhi/core/boot.c
index 24422f5c3d808d8dfa6aada5196eba96c4e09398..c2546bf229fb362d49d666596715a1dab4291403 100644
--- a/drivers/bus/mhi/core/boot.c
+++ b/drivers/bus/mhi/core/boot.c
@@ -92,6 +92,9 @@ static int __mhi_download_rddm_in_panic(struct mhi_controller *mhi_cntrl)
 	 * image download completion.
 	 */
 	ee = mhi_get_exec_env(mhi_cntrl);
+	if (ee == MHI_EE_MAX)
+		goto error_exit_rddm;
+
 	if (ee != MHI_EE_RDDM) {
 		dev_dbg(dev, "Trigger device into RDDM mode using SYS ERR\n");
 		mhi_set_mhi_state(mhi_cntrl, MHI_STATE_SYS_ERR);
@@ -139,15 +142,17 @@ static int __mhi_download_rddm_in_panic(struct mhi_controller *mhi_cntrl)
 	ee = mhi_get_exec_env(mhi_cntrl);
 	ret = mhi_read_reg(mhi_cntrl, base, BHIE_RXVECSTATUS_OFFS, &rx_status);
 
-	dev_err(dev, "Did not complete RDDM transfer\n");
-	dev_err(dev, "Current EE: %s\n", TO_MHI_EXEC_STR(ee));
 	dev_err(dev, "RXVEC_STATUS: 0x%x\n", rx_status);
 
+error_exit_rddm:
+	dev_err(dev, "RDDM transfer failed. Current EE: %s\n",
+		TO_MHI_EXEC_STR(ee));
+
 	return -EIO;
 }
 
 /* Download RDDM image from device */
-int mhi_download_rddm_img(struct mhi_controller *mhi_cntrl, bool in_panic)
+int mhi_download_rddm_image(struct mhi_controller *mhi_cntrl, bool in_panic)
 {
 	void __iomem *base = mhi_cntrl->bhie;
 	struct device *dev = &mhi_cntrl->mhi_dev->dev;
@@ -169,9 +174,9 @@ int mhi_download_rddm_img(struct mhi_controller *mhi_cntrl, bool in_panic)
 
 	return (rx_status == BHIE_RXVECSTATUS_STATUS_XFER_COMPL) ? 0 : -EIO;
 }
-EXPORT_SYMBOL_GPL(mhi_download_rddm_img);
+EXPORT_SYMBOL_GPL(mhi_download_rddm_image);
 
-static int mhi_fw_load_amss(struct mhi_controller *mhi_cntrl,
+static int mhi_fw_load_bhie(struct mhi_controller *mhi_cntrl,
 			    const struct mhi_buf *mhi_buf)
 {
 	void __iomem *base = mhi_cntrl->bhie;
@@ -187,7 +192,7 @@ static int mhi_fw_load_amss(struct mhi_controller *mhi_cntrl,
 	}
 
 	sequence_id = MHI_RANDOM_U32_NONZERO(BHIE_TXVECSTATUS_SEQNUM_BMSK);
-	dev_dbg(dev, "Starting AMSS download via BHIe. Sequence ID:%u\n",
+	dev_dbg(dev, "Starting image download via BHIe. Sequence ID: %u\n",
 		sequence_id);
 	mhi_write_reg(mhi_cntrl, base, BHIE_TXVECADDR_HIGH_OFFS,
 		      upper_32_bits(mhi_buf->dma_addr));
@@ -218,7 +223,7 @@ static int mhi_fw_load_amss(struct mhi_controller *mhi_cntrl,
 	return (!ret) ? -ETIMEDOUT : 0;
 }
 
-static int mhi_fw_load_sbl(struct mhi_controller *mhi_cntrl,
+static int mhi_fw_load_bhi(struct mhi_controller *mhi_cntrl,
 			   dma_addr_t dma_addr,
 			   size_t size)
 {
@@ -245,7 +250,7 @@ static int mhi_fw_load_sbl(struct mhi_controller *mhi_cntrl,
 	}
 
 	session_id = MHI_RANDOM_U32_NONZERO(BHI_TXDB_SEQNUM_BMSK);
-	dev_dbg(dev, "Starting SBL download via BHI. Session ID:%u\n",
+	dev_dbg(dev, "Starting image download via BHI. Session ID: %u\n",
 		session_id);
 	mhi_write_reg(mhi_cntrl, base, BHI_STATUS, 0);
 	mhi_write_reg(mhi_cntrl, base, BHI_IMGADDR_HIGH,
@@ -365,7 +370,6 @@ static void mhi_firmware_copy(struct mhi_controller *mhi_cntrl,
 	size_t remainder = firmware->size;
 	size_t to_cpy;
 	const u8 *buf = firmware->data;
-	int i = 0;
 	struct mhi_buf *mhi_buf = img_info->mhi_buf;
 	struct bhi_vec_entry *bhi_vec = img_info->bhi_vec;
 
@@ -377,7 +381,6 @@ static void mhi_firmware_copy(struct mhi_controller *mhi_cntrl,
 
 		buf += to_cpy;
 		remainder -= to_cpy;
-		i++;
 		bhi_vec++;
 		mhi_buf++;
 	}
@@ -425,13 +428,13 @@ void mhi_fw_load_handler(struct mhi_controller *mhi_cntrl)
 						     !mhi_cntrl->seg_len))) {
 		dev_err(dev,
 			"No firmware image defined or !sbl_size || !seg_len\n");
-		return;
+		goto error_fw_load;
 	}
 
 	ret = request_firmware(&firmware, fw_name, dev);
 	if (ret) {
 		dev_err(dev, "Error loading firmware: %d\n", ret);
-		return;
+		goto error_fw_load;
 	}
 
 	size = (mhi_cntrl->fbc_download) ? mhi_cntrl->sbl_size : firmware->size;
@@ -443,25 +446,25 @@ void mhi_fw_load_handler(struct mhi_controller *mhi_cntrl)
 	buf = mhi_alloc_coherent(mhi_cntrl, size, &dma_addr, GFP_KERNEL);
 	if (!buf) {
 		release_firmware(firmware);
-		return;
+		goto error_fw_load;
 	}
 
-	/* Download SBL image */
+	/* Download image using BHI */
 	memcpy(buf, firmware->data, size);
-	ret = mhi_fw_load_sbl(mhi_cntrl, dma_addr, size);
+	ret = mhi_fw_load_bhi(mhi_cntrl, dma_addr, size);
 	mhi_free_coherent(mhi_cntrl, size, buf, dma_addr);
 
-	if (!mhi_cntrl->fbc_download || ret || mhi_cntrl->ee == MHI_EE_EDL)
-		release_firmware(firmware);
-
 	/* Error or in EDL mode, we're done */
 	if (ret) {
-		dev_err(dev, "MHI did not load SBL, ret:%d\n", ret);
-		return;
+		dev_err(dev, "MHI did not load image over BHI, ret: %d\n", ret);
+		release_firmware(firmware);
+		goto error_fw_load;
 	}
 
-	if (mhi_cntrl->ee == MHI_EE_EDL)
+	if (mhi_cntrl->ee == MHI_EE_EDL) {
+		release_firmware(firmware);
 		return;
+	}
 
 	write_lock_irq(&mhi_cntrl->pm_lock);
 	mhi_cntrl->dev_state = MHI_STATE_RESET;
@@ -474,13 +477,17 @@ void mhi_fw_load_handler(struct mhi_controller *mhi_cntrl)
 	if (mhi_cntrl->fbc_download) {
 		ret = mhi_alloc_bhie_table(mhi_cntrl, &mhi_cntrl->fbc_image,
 					   firmware->size);
-		if (ret)
-			goto error_alloc_fw_table;
+		if (ret) {
+			release_firmware(firmware);
+			goto error_fw_load;
+		}
 
 		/* Load the firmware into BHIE vec table */
 		mhi_firmware_copy(mhi_cntrl, firmware, mhi_cntrl->fbc_image);
 	}
 
+	release_firmware(firmware);
+
 fw_load_ee_pthru:
 	/* Transitioning into MHI RESET->READY state */
 	ret = mhi_ready_state_transition(mhi_cntrl);
@@ -490,7 +497,7 @@ void mhi_fw_load_handler(struct mhi_controller *mhi_cntrl)
 
 	if (ret) {
 		dev_err(dev, "MHI did not enter READY state\n");
-		goto error_read;
+		goto error_ready_state;
 	}
 
 	/* Wait for the SBL event */
@@ -501,25 +508,27 @@ void mhi_fw_load_handler(struct mhi_controller *mhi_cntrl)
 
 	if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
 		dev_err(dev, "MHI did not enter SBL\n");
-		goto error_read;
+		goto error_ready_state;
 	}
 
 	/* Start full firmware image download */
 	image_info = mhi_cntrl->fbc_image;
-	ret = mhi_fw_load_amss(mhi_cntrl,
+	ret = mhi_fw_load_bhie(mhi_cntrl,
 			       /* Vector table is the last entry */
 			       &image_info->mhi_buf[image_info->entries - 1]);
-	if (ret)
-		dev_err(dev, "MHI did not load AMSS, ret:%d\n", ret);
-
-	release_firmware(firmware);
+	if (ret) {
+		dev_err(dev, "MHI did not load image over BHIe, ret: %d\n",
+			ret);
+		goto error_fw_load;
+	}
 
 	return;
 
-error_read:
+error_ready_state:
 	mhi_free_bhie_table(mhi_cntrl, mhi_cntrl->fbc_image);
 	mhi_cntrl->fbc_image = NULL;
 
-error_alloc_fw_table:
-	release_firmware(firmware);
+error_fw_load:
+	mhi_cntrl->pm_state = MHI_PM_FW_DL_ERR;
+	wake_up_all(&mhi_cntrl->state_event);
 }
diff --git a/drivers/bus/mhi/core/debugfs.c b/drivers/bus/mhi/core/debugfs.c
index 3a48801e01f4a8a3c03a572ddfdc50b3f1b92b6c..7d43138ce66d8408a1f09c91a2a76ec0f8693a15 100644
--- a/drivers/bus/mhi/core/debugfs.c
+++ b/drivers/bus/mhi/core/debugfs.c
@@ -159,7 +159,9 @@ static int mhi_debugfs_devices_show(struct seq_file *m, void *d)
 		return -ENODEV;
 	}
 
-	device_for_each_child(mhi_cntrl->cntrl_dev, m, mhi_device_info_show);
+	/* Show controller and client(s) info */
+	mhi_device_info_show(&mhi_cntrl->mhi_dev->dev, m);
+	device_for_each_child(&mhi_cntrl->mhi_dev->dev, m, mhi_device_info_show);
 
 	return 0;
 }
diff --git a/drivers/bus/mhi/core/init.c b/drivers/bus/mhi/core/init.c
index 381fdea2eb9fe4b623a7eeb9458732ad521b3f8c..f0697f433c2f1bb8d32ad71ca24148b5a6d3cbfa 100644
--- a/drivers/bus/mhi/core/init.c
+++ b/drivers/bus/mhi/core/init.c
@@ -8,6 +8,7 @@
 #include <linux/device.h>
 #include <linux/dma-direction.h>
 #include <linux/dma-mapping.h>
+#include <linux/idr.h>
 #include <linux/interrupt.h>
 #include <linux/list.h>
 #include <linux/mhi.h>
@@ -18,6 +19,8 @@
 #include <linux/wait.h>
 #include "internal.h"
 
+static DEFINE_IDA(mhi_controller_ida);
+
 const char * const mhi_ee_str[MHI_EE_MAX] = {
 	[MHI_EE_PBL] = "PBL",
 	[MHI_EE_SBL] = "SBL",
@@ -610,7 +613,7 @@ static int parse_ev_cfg(struct mhi_controller *mhi_cntrl,
 {
 	struct mhi_event *mhi_event;
 	const struct mhi_event_config *event_cfg;
-	struct device *dev = &mhi_cntrl->mhi_dev->dev;
+	struct device *dev = mhi_cntrl->cntrl_dev;
 	int i, num;
 
 	num = config->num_events;
@@ -692,7 +695,7 @@ static int parse_ch_cfg(struct mhi_controller *mhi_cntrl,
 			const struct mhi_controller_config *config)
 {
 	const struct mhi_channel_config *ch_cfg;
-	struct device *dev = &mhi_cntrl->mhi_dev->dev;
+	struct device *dev = mhi_cntrl->cntrl_dev;
 	int i;
 	u32 chan;
 
@@ -857,7 +860,7 @@ int mhi_register_controller(struct mhi_controller *mhi_cntrl,
 
 	if (!mhi_cntrl->runtime_get || !mhi_cntrl->runtime_put ||
 	    !mhi_cntrl->status_cb || !mhi_cntrl->read_reg ||
-	    !mhi_cntrl->write_reg)
+	    !mhi_cntrl->write_reg || !mhi_cntrl->nr_irqs)
 		return -EINVAL;
 
 	ret = parse_config(mhi_cntrl, config);
@@ -868,7 +871,7 @@ int mhi_register_controller(struct mhi_controller *mhi_cntrl,
 				     sizeof(*mhi_cntrl->mhi_cmd), GFP_KERNEL);
 	if (!mhi_cntrl->mhi_cmd) {
 		ret = -ENOMEM;
-		goto error_alloc_cmd;
+		goto err_free_event;
 	}
 
 	INIT_LIST_HEAD(&mhi_cntrl->transition_list);
@@ -879,6 +882,14 @@ int mhi_register_controller(struct mhi_controller *mhi_cntrl,
 	INIT_WORK(&mhi_cntrl->st_worker, mhi_pm_st_worker);
 	init_waitqueue_head(&mhi_cntrl->state_event);
 
+	mhi_cntrl->hiprio_wq = alloc_ordered_workqueue
+				("mhi_hiprio_wq", WQ_MEM_RECLAIM | WQ_HIGHPRI);
+	if (!mhi_cntrl->hiprio_wq) {
+		dev_err(mhi_cntrl->cntrl_dev, "Failed to allocate workqueue\n");
+		ret = -ENOMEM;
+		goto err_free_cmd;
+	}
+
 	mhi_cmd = mhi_cntrl->mhi_cmd;
 	for (i = 0; i < NR_OF_CMD_RINGS; i++, mhi_cmd++)
 		spin_lock_init(&mhi_cmd->lock);
@@ -922,7 +933,7 @@ int mhi_register_controller(struct mhi_controller *mhi_cntrl,
 	ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->regs,
 			   SOC_HW_VERSION_OFFS, &soc_info);
 	if (ret)
-		goto error_alloc_dev;
+		goto err_destroy_wq;
 
 	mhi_cntrl->family_number = (soc_info & SOC_HW_VERSION_FAM_NUM_BMSK) >>
 					SOC_HW_VERSION_FAM_NUM_SHFT;
@@ -933,25 +944,31 @@ int mhi_register_controller(struct mhi_controller *mhi_cntrl,
 	mhi_cntrl->minor_version = (soc_info & SOC_HW_VERSION_MINOR_VER_BMSK) >>
 					SOC_HW_VERSION_MINOR_VER_SHFT;
 
+	mhi_cntrl->index = ida_alloc(&mhi_controller_ida, GFP_KERNEL);
+	if (mhi_cntrl->index < 0) {
+		ret = mhi_cntrl->index;
+		goto err_destroy_wq;
+	}
+
 	/* Register controller with MHI bus */
 	mhi_dev = mhi_alloc_device(mhi_cntrl);
 	if (IS_ERR(mhi_dev)) {
 		dev_err(mhi_cntrl->cntrl_dev, "Failed to allocate MHI device\n");
 		ret = PTR_ERR(mhi_dev);
-		goto error_alloc_dev;
+		goto err_ida_free;
 	}
 
 	mhi_dev->dev_type = MHI_DEVICE_CONTROLLER;
 	mhi_dev->mhi_cntrl = mhi_cntrl;
-	dev_set_name(&mhi_dev->dev, "%s", dev_name(mhi_cntrl->cntrl_dev));
-	mhi_dev->name = dev_name(mhi_cntrl->cntrl_dev);
+	dev_set_name(&mhi_dev->dev, "mhi%d", mhi_cntrl->index);
+	mhi_dev->name = dev_name(&mhi_dev->dev);
 
 	/* Init wakeup source */
 	device_init_wakeup(&mhi_dev->dev, true);
 
 	ret = device_add(&mhi_dev->dev);
 	if (ret)
-		goto error_add_dev;
+		goto err_release_dev;
 
 	mhi_cntrl->mhi_dev = mhi_dev;
 
@@ -959,15 +976,17 @@ int mhi_register_controller(struct mhi_controller *mhi_cntrl,
 
 	return 0;
 
-error_add_dev:
+err_release_dev:
 	put_device(&mhi_dev->dev);
-
-error_alloc_dev:
+err_ida_free:
+	ida_free(&mhi_controller_ida, mhi_cntrl->index);
+err_destroy_wq:
+	destroy_workqueue(mhi_cntrl->hiprio_wq);
+err_free_cmd:
 	kfree(mhi_cntrl->mhi_cmd);
-
-error_alloc_cmd:
-	vfree(mhi_cntrl->mhi_chan);
+err_free_event:
 	kfree(mhi_cntrl->mhi_event);
+	vfree(mhi_cntrl->mhi_chan);
 
 	return ret;
 }
@@ -981,6 +1000,7 @@ void mhi_unregister_controller(struct mhi_controller *mhi_cntrl)
 
 	mhi_destroy_debugfs(mhi_cntrl);
 
+	destroy_workqueue(mhi_cntrl->hiprio_wq);
 	kfree(mhi_cntrl->mhi_cmd);
 	kfree(mhi_cntrl->mhi_event);
 
@@ -995,6 +1015,8 @@ void mhi_unregister_controller(struct mhi_controller *mhi_cntrl)
 
 	device_del(&mhi_dev->dev);
 	put_device(&mhi_dev->dev);
+
+	ida_free(&mhi_controller_ida, mhi_cntrl->index);
 }
 EXPORT_SYMBOL_GPL(mhi_unregister_controller);
 
@@ -1121,7 +1143,15 @@ struct mhi_device *mhi_alloc_device(struct mhi_controller *mhi_cntrl)
 	device_initialize(dev);
 	dev->bus = &mhi_bus_type;
 	dev->release = mhi_release_device;
-	dev->parent = mhi_cntrl->cntrl_dev;
+
+	if (mhi_cntrl->mhi_dev) {
+		/* for MHI client devices, parent is the MHI controller device */
+		dev->parent = &mhi_cntrl->mhi_dev->dev;
+	} else {
+		/* for MHI controller device, parent is the bus device (e.g. pci device) */
+		dev->parent = mhi_cntrl->cntrl_dev;
+	}
+
 	mhi_dev->mhi_cntrl = mhi_cntrl;
 	mhi_dev->dev_wake = 0;
 
@@ -1267,10 +1297,8 @@ static int mhi_driver_remove(struct device *dev)
 		mutex_unlock(&mhi_chan->mutex);
 	}
 
-	read_lock_bh(&mhi_cntrl->pm_lock);
 	while (mhi_dev->dev_wake)
 		mhi_device_put(mhi_dev);
-	read_unlock_bh(&mhi_cntrl->pm_lock);
 
 	return 0;
 }
diff --git a/drivers/bus/mhi/core/internal.h b/drivers/bus/mhi/core/internal.h
index 33c23203c5315c30f734a0d8b98aea46db310961..6f80ec30c0cdcc342ad0faa128bda13e13b96473 100644
--- a/drivers/bus/mhi/core/internal.h
+++ b/drivers/bus/mhi/core/internal.h
@@ -153,8 +153,8 @@ extern struct bus_type mhi_bus_type;
 #define BHI_SERIALNU (0x40)
 #define BHI_SBLANTIROLLVER (0x44)
 #define BHI_NUMSEG (0x48)
-#define BHI_MSMHWID(n) (0x4C + (0x4 * n))
-#define BHI_OEMPKHASH(n) (0x64 + (0x4 * n))
+#define BHI_MSMHWID(n) (0x4C + (0x4 * (n)))
+#define BHI_OEMPKHASH(n) (0x64 + (0x4 * (n)))
 #define BHI_RSVD5 (0xC4)
 #define BHI_STATUS_MASK (0xC0000000)
 #define BHI_STATUS_SHIFT (30)
@@ -608,12 +608,10 @@ enum mhi_pm_state __must_check mhi_tryset_pm_state(
 					struct mhi_controller *mhi_cntrl,
 					enum mhi_pm_state state);
 const char *to_mhi_pm_state_str(enum mhi_pm_state state);
-enum mhi_ee_type mhi_get_exec_env(struct mhi_controller *mhi_cntrl);
 int mhi_queue_state_transition(struct mhi_controller *mhi_cntrl,
 			       enum dev_st_transition state);
 void mhi_pm_st_worker(struct work_struct *work);
 void mhi_pm_sys_err_handler(struct mhi_controller *mhi_cntrl);
-void mhi_fw_load_worker(struct work_struct *work);
 int mhi_ready_state_transition(struct mhi_controller *mhi_cntrl);
 int mhi_pm_m0_transition(struct mhi_controller *mhi_cntrl);
 void mhi_pm_m1_transition(struct mhi_controller *mhi_cntrl);
diff --git a/drivers/bus/mhi/core/main.c b/drivers/bus/mhi/core/main.c
index ba9e721d61b71d67c6fbf10843beeff90b104d99..d34d7e90e38d9c5f9bf58ead5f945b23e273d648 100644
--- a/drivers/bus/mhi/core/main.c
+++ b/drivers/bus/mhi/core/main.c
@@ -123,6 +123,7 @@ enum mhi_ee_type mhi_get_exec_env(struct mhi_controller *mhi_cntrl)
 
 	return (ret) ? MHI_EE_MAX : exec;
 }
+EXPORT_SYMBOL_GPL(mhi_get_exec_env);
 
 enum mhi_state mhi_get_mhi_state(struct mhi_controller *mhi_cntrl)
 {
@@ -132,6 +133,7 @@ enum mhi_state mhi_get_mhi_state(struct mhi_controller *mhi_cntrl)
 				     MHISTATUS_MHISTATE_SHIFT, &state);
 	return ret ? MHI_STATE_MAX : state;
 }
+EXPORT_SYMBOL_GPL(mhi_get_mhi_state);
 
 int mhi_map_single_no_bb(struct mhi_controller *mhi_cntrl,
 			 struct mhi_buf_info *buf_info)
@@ -329,7 +331,7 @@ void mhi_create_devices(struct mhi_controller *mhi_cntrl)
 		/* Channel name is same for both UL and DL */
 		mhi_dev->name = mhi_chan->name;
 		dev_set_name(&mhi_dev->dev, "%s_%s",
-			     dev_name(mhi_cntrl->cntrl_dev),
+			     dev_name(&mhi_cntrl->mhi_dev->dev),
 			     mhi_dev->name);
 
 		/* Init wakeup source if available */
@@ -399,6 +401,10 @@ irqreturn_t mhi_intvec_threaded_handler(int irq_number, void *priv)
 
 	 /* If device supports RDDM don't bother processing SYS error */
 	if (mhi_cntrl->rddm_image) {
+		/* host may be performing a device power down already */
+		if (!mhi_is_active(mhi_cntrl))
+			goto exit_intvec;
+
 		if (mhi_cntrl->ee == MHI_EE_RDDM && mhi_cntrl->ee != ee) {
 			mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_EE_RDDM);
 			wake_up_all(&mhi_cntrl->state_event);
@@ -735,11 +741,6 @@ int mhi_process_ctrl_ev_ring(struct mhi_controller *mhi_cntrl,
 			{
 				enum mhi_pm_state new_state;
 
-				/* skip SYS_ERROR handling if RDDM supported */
-				if (mhi_cntrl->ee == MHI_EE_RDDM ||
-				    mhi_cntrl->rddm_image)
-					break;
-
 				dev_dbg(dev, "System error detected\n");
 				write_lock_irq(&mhi_cntrl->pm_lock);
 				new_state = mhi_tryset_pm_state(mhi_cntrl,
@@ -1235,7 +1236,8 @@ static void __mhi_unprepare_channel(struct mhi_controller *mhi_cntrl,
 	/* no more processing events for this channel */
 	mutex_lock(&mhi_chan->mutex);
 	write_lock_irq(&mhi_chan->lock);
-	if (mhi_chan->ch_state != MHI_CH_STATE_ENABLED) {
+	if (mhi_chan->ch_state != MHI_CH_STATE_ENABLED &&
+	    mhi_chan->ch_state != MHI_CH_STATE_SUSPENDED) {
 		write_unlock_irq(&mhi_chan->lock);
 		mutex_unlock(&mhi_chan->mutex);
 		return;
diff --git a/drivers/bus/mhi/core/pm.c b/drivers/bus/mhi/core/pm.c
index 3de7b1639ec6a5c27b7c1bc8f91ed7c43668adb2..681960c72d2a8629560b7d7c65c48a22cd0eec80 100644
--- a/drivers/bus/mhi/core/pm.c
+++ b/drivers/bus/mhi/core/pm.c
@@ -37,9 +37,10 @@
  *     M0 -> FW_DL_ERR
  *     M0 -> M3_ENTER -> M3 -> M3_EXIT --> M0
  * L1: SYS_ERR_DETECT -> SYS_ERR_PROCESS --> POR
- * L2: SHUTDOWN_PROCESS -> DISABLE
+ * L2: SHUTDOWN_PROCESS -> LD_ERR_FATAL_DETECT
+ *     SHUTDOWN_PROCESS -> DISABLE
  * L3: LD_ERR_FATAL_DETECT <--> LD_ERR_FATAL_DETECT
- *     LD_ERR_FATAL_DETECT -> SHUTDOWN_PROCESS
+ *     LD_ERR_FATAL_DETECT -> DISABLE
  */
 static struct mhi_pm_transitions const dev_state_transitions[] = {
 	/* L0 States */
@@ -72,7 +73,7 @@ static struct mhi_pm_transitions const dev_state_transitions[] = {
 	{
 		MHI_PM_M3,
 		MHI_PM_M3_EXIT | MHI_PM_SYS_ERR_DETECT |
-		MHI_PM_SHUTDOWN_PROCESS | MHI_PM_LD_ERR_FATAL_DETECT
+		MHI_PM_LD_ERR_FATAL_DETECT
 	},
 	{
 		MHI_PM_M3_EXIT,
@@ -103,7 +104,7 @@ static struct mhi_pm_transitions const dev_state_transitions[] = {
 	/* L3 States */
 	{
 		MHI_PM_LD_ERR_FATAL_DETECT,
-		MHI_PM_LD_ERR_FATAL_DETECT | MHI_PM_SHUTDOWN_PROCESS
+		MHI_PM_LD_ERR_FATAL_DETECT | MHI_PM_DISABLE
 	},
 };
 
@@ -383,10 +384,14 @@ static int mhi_pm_mission_mode_transition(struct mhi_controller *mhi_cntrl)
 	write_lock_irq(&mhi_cntrl->pm_lock);
 	if (MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state))
 		mhi_cntrl->ee = mhi_get_exec_env(mhi_cntrl);
-	write_unlock_irq(&mhi_cntrl->pm_lock);
 
-	if (!MHI_IN_MISSION_MODE(mhi_cntrl->ee))
+	if (!MHI_IN_MISSION_MODE(mhi_cntrl->ee)) {
+		mhi_cntrl->pm_state = MHI_PM_LD_ERR_FATAL_DETECT;
+		write_unlock_irq(&mhi_cntrl->pm_lock);
+		wake_up_all(&mhi_cntrl->state_event);
 		return -EIO;
+	}
+	write_unlock_irq(&mhi_cntrl->pm_lock);
 
 	wake_up_all(&mhi_cntrl->state_event);
 
@@ -440,9 +445,119 @@ static int mhi_pm_mission_mode_transition(struct mhi_controller *mhi_cntrl)
 	return ret;
 }
 
-/* Handle SYS_ERR and Shutdown transitions */
-static void mhi_pm_disable_transition(struct mhi_controller *mhi_cntrl,
-				      enum mhi_pm_state transition_state)
+/* Handle shutdown transitions */
+static void mhi_pm_disable_transition(struct mhi_controller *mhi_cntrl)
+{
+	enum mhi_pm_state cur_state;
+	struct mhi_event *mhi_event;
+	struct mhi_cmd_ctxt *cmd_ctxt;
+	struct mhi_cmd *mhi_cmd;
+	struct mhi_event_ctxt *er_ctxt;
+	struct device *dev = &mhi_cntrl->mhi_dev->dev;
+	int ret, i;
+
+	dev_dbg(dev, "Processing disable transition with PM state: %s\n",
+		to_mhi_pm_state_str(mhi_cntrl->pm_state));
+
+	mutex_lock(&mhi_cntrl->pm_mutex);
+
+	/* Trigger MHI RESET so that the device will not access host memory */
+	if (!MHI_PM_IN_FATAL_STATE(mhi_cntrl->pm_state)) {
+		u32 in_reset = -1;
+		unsigned long timeout = msecs_to_jiffies(mhi_cntrl->timeout_ms);
+
+		dev_dbg(dev, "Triggering MHI Reset in device\n");
+		mhi_set_mhi_state(mhi_cntrl, MHI_STATE_RESET);
+
+		/* Wait for the reset bit to be cleared by the device */
+		ret = wait_event_timeout(mhi_cntrl->state_event,
+					 mhi_read_reg_field(mhi_cntrl,
+							    mhi_cntrl->regs,
+							    MHICTRL,
+							    MHICTRL_RESET_MASK,
+							    MHICTRL_RESET_SHIFT,
+							    &in_reset) ||
+					!in_reset, timeout);
+		if (!ret || in_reset)
+			dev_err(dev, "Device failed to exit MHI Reset state\n");
+
+		/*
+		 * Device will clear BHI_INTVEC as a part of RESET processing,
+		 * hence re-program it
+		 */
+		mhi_write_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_INTVEC, 0);
+	}
+
+	dev_dbg(dev,
+		 "Waiting for all pending event ring processing to complete\n");
+	mhi_event = mhi_cntrl->mhi_event;
+	for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
+		if (mhi_event->offload_ev)
+			continue;
+		free_irq(mhi_cntrl->irq[mhi_event->irq], mhi_event);
+		tasklet_kill(&mhi_event->task);
+	}
+
+	/* Release lock and wait for all pending threads to complete */
+	mutex_unlock(&mhi_cntrl->pm_mutex);
+	dev_dbg(dev, "Waiting for all pending threads to complete\n");
+	wake_up_all(&mhi_cntrl->state_event);
+
+	dev_dbg(dev, "Reset all active channels and remove MHI devices\n");
+	device_for_each_child(&mhi_cntrl->mhi_dev->dev, NULL, mhi_destroy_device);
+
+	mutex_lock(&mhi_cntrl->pm_mutex);
+
+	WARN_ON(atomic_read(&mhi_cntrl->dev_wake));
+	WARN_ON(atomic_read(&mhi_cntrl->pending_pkts));
+
+	/* Reset the ev rings and cmd rings */
+	dev_dbg(dev, "Resetting EV CTXT and CMD CTXT\n");
+	mhi_cmd = mhi_cntrl->mhi_cmd;
+	cmd_ctxt = mhi_cntrl->mhi_ctxt->cmd_ctxt;
+	for (i = 0; i < NR_OF_CMD_RINGS; i++, mhi_cmd++, cmd_ctxt++) {
+		struct mhi_ring *ring = &mhi_cmd->ring;
+
+		ring->rp = ring->base;
+		ring->wp = ring->base;
+		cmd_ctxt->rp = cmd_ctxt->rbase;
+		cmd_ctxt->wp = cmd_ctxt->rbase;
+	}
+
+	mhi_event = mhi_cntrl->mhi_event;
+	er_ctxt = mhi_cntrl->mhi_ctxt->er_ctxt;
+	for (i = 0; i < mhi_cntrl->total_ev_rings; i++, er_ctxt++,
+		     mhi_event++) {
+		struct mhi_ring *ring = &mhi_event->ring;
+
+		/* Skip offload events */
+		if (mhi_event->offload_ev)
+			continue;
+
+		ring->rp = ring->base;
+		ring->wp = ring->base;
+		er_ctxt->rp = er_ctxt->rbase;
+		er_ctxt->wp = er_ctxt->rbase;
+	}
+
+	/* Move to disable state */
+	write_lock_irq(&mhi_cntrl->pm_lock);
+	cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_DISABLE);
+	write_unlock_irq(&mhi_cntrl->pm_lock);
+	if (unlikely(cur_state != MHI_PM_DISABLE))
+		dev_err(dev, "Error moving from PM state: %s to: %s\n",
+			to_mhi_pm_state_str(cur_state),
+			to_mhi_pm_state_str(MHI_PM_DISABLE));
+
+	dev_dbg(dev, "Exiting with PM state: %s, MHI state: %s\n",
+		to_mhi_pm_state_str(mhi_cntrl->pm_state),
+		TO_MHI_STATE_STR(mhi_cntrl->dev_state));
+
+	mutex_unlock(&mhi_cntrl->pm_mutex);
+}
+
+/* Handle system error transitions */
+static void mhi_pm_sys_error_transition(struct mhi_controller *mhi_cntrl)
 {
 	enum mhi_pm_state cur_state, prev_state;
 	struct mhi_event *mhi_event;
@@ -454,33 +569,30 @@ static void mhi_pm_disable_transition(struct mhi_controller *mhi_cntrl,
 
 	dev_dbg(dev, "Transitioning from PM state: %s to: %s\n",
 		to_mhi_pm_state_str(mhi_cntrl->pm_state),
-		to_mhi_pm_state_str(transition_state));
+		to_mhi_pm_state_str(MHI_PM_SYS_ERR_PROCESS));
 
 	/* We must notify MHI control driver so it can clean up first */
-	if (transition_state == MHI_PM_SYS_ERR_PROCESS)
-		mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_SYS_ERROR);
+	mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_SYS_ERROR);
 
 	mutex_lock(&mhi_cntrl->pm_mutex);
 	write_lock_irq(&mhi_cntrl->pm_lock);
 	prev_state = mhi_cntrl->pm_state;
-	cur_state = mhi_tryset_pm_state(mhi_cntrl, transition_state);
-	if (cur_state == transition_state) {
-		mhi_cntrl->ee = MHI_EE_DISABLE_TRANSITION;
-		mhi_cntrl->dev_state = MHI_STATE_RESET;
-	}
+	cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_SYS_ERR_PROCESS);
 	write_unlock_irq(&mhi_cntrl->pm_lock);
 
+	if (cur_state != MHI_PM_SYS_ERR_PROCESS) {
+		dev_err(dev, "Failed to transition from PM state: %s to: %s\n",
+			to_mhi_pm_state_str(cur_state),
+			to_mhi_pm_state_str(MHI_PM_SYS_ERR_PROCESS));
+		goto exit_sys_error_transition;
+	}
+
+	mhi_cntrl->ee = MHI_EE_DISABLE_TRANSITION;
+	mhi_cntrl->dev_state = MHI_STATE_RESET;
+
 	/* Wake up threads waiting for state transition */
 	wake_up_all(&mhi_cntrl->state_event);
 
-	if (cur_state != transition_state) {
-		dev_err(dev, "Failed to transition to state: %s from: %s\n",
-			to_mhi_pm_state_str(transition_state),
-			to_mhi_pm_state_str(cur_state));
-		mutex_unlock(&mhi_cntrl->pm_mutex);
-		return;
-	}
-
 	/* Trigger MHI RESET so that the device will not access host memory */
 	if (MHI_REG_ACCESS_VALID(prev_state)) {
 		u32 in_reset = -1;
@@ -498,10 +610,9 @@ static void mhi_pm_disable_transition(struct mhi_controller *mhi_cntrl,
 							    MHICTRL_RESET_SHIFT,
 							    &in_reset) ||
 					!in_reset, timeout);
-		if ((!ret || in_reset) && cur_state == MHI_PM_SYS_ERR_PROCESS) {
+		if (!ret || in_reset) {
 			dev_err(dev, "Device failed to exit MHI Reset state\n");
-			mutex_unlock(&mhi_cntrl->pm_mutex);
-			return;
+			goto exit_sys_error_transition;
 		}
 
 		/*
@@ -512,7 +623,7 @@ static void mhi_pm_disable_transition(struct mhi_controller *mhi_cntrl,
 	}
 
 	dev_dbg(dev,
-		 "Waiting for all pending event ring processing to complete\n");
+		"Waiting for all pending event ring processing to complete\n");
 	mhi_event = mhi_cntrl->mhi_event;
 	for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
 		if (mhi_event->offload_ev)
@@ -526,7 +637,7 @@ static void mhi_pm_disable_transition(struct mhi_controller *mhi_cntrl,
 	wake_up_all(&mhi_cntrl->state_event);
 
 	dev_dbg(dev, "Reset all active channels and remove MHI devices\n");
-	device_for_each_child(mhi_cntrl->cntrl_dev, NULL, mhi_destroy_device);
+	device_for_each_child(&mhi_cntrl->mhi_dev->dev, NULL, mhi_destroy_device);
 
 	mutex_lock(&mhi_cntrl->pm_mutex);
 
@@ -549,7 +660,7 @@ static void mhi_pm_disable_transition(struct mhi_controller *mhi_cntrl,
 	mhi_event = mhi_cntrl->mhi_event;
 	er_ctxt = mhi_cntrl->mhi_ctxt->er_ctxt;
 	for (i = 0; i < mhi_cntrl->total_ev_rings; i++, er_ctxt++,
-		     mhi_event++) {
+	     mhi_event++) {
 		struct mhi_ring *ring = &mhi_event->ring;
 
 		/* Skip offload events */
@@ -562,19 +673,9 @@ static void mhi_pm_disable_transition(struct mhi_controller *mhi_cntrl,
 		er_ctxt->wp = er_ctxt->rbase;
 	}
 
-	if (cur_state == MHI_PM_SYS_ERR_PROCESS) {
-		mhi_ready_state_transition(mhi_cntrl);
-	} else {
-		/* Move to disable state */
-		write_lock_irq(&mhi_cntrl->pm_lock);
-		cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_DISABLE);
-		write_unlock_irq(&mhi_cntrl->pm_lock);
-		if (unlikely(cur_state != MHI_PM_DISABLE))
-			dev_err(dev, "Error moving from PM state: %s to: %s\n",
-				to_mhi_pm_state_str(cur_state),
-				to_mhi_pm_state_str(MHI_PM_DISABLE));
-	}
+	mhi_ready_state_transition(mhi_cntrl);
 
+exit_sys_error_transition:
 	dev_dbg(dev, "Exiting with PM state: %s, MHI state: %s\n",
 		to_mhi_pm_state_str(mhi_cntrl->pm_state),
 		TO_MHI_STATE_STR(mhi_cntrl->dev_state));
@@ -597,7 +698,7 @@ int mhi_queue_state_transition(struct mhi_controller *mhi_cntrl,
 	list_add_tail(&item->node, &mhi_cntrl->transition_list);
 	spin_unlock_irqrestore(&mhi_cntrl->transition_lock, flags);
 
-	schedule_work(&mhi_cntrl->st_worker);
+	queue_work(mhi_cntrl->hiprio_wq, &mhi_cntrl->st_worker);
 
 	return 0;
 }
@@ -662,12 +763,10 @@ void mhi_pm_st_worker(struct work_struct *work)
 			mhi_ready_state_transition(mhi_cntrl);
 			break;
 		case DEV_ST_TRANSITION_SYS_ERR:
-			mhi_pm_disable_transition
-				(mhi_cntrl, MHI_PM_SYS_ERR_PROCESS);
+			mhi_pm_sys_error_transition(mhi_cntrl);
 			break;
 		case DEV_ST_TRANSITION_DISABLE:
-			mhi_pm_disable_transition
-				(mhi_cntrl, MHI_PM_SHUTDOWN_PROCESS);
+			mhi_pm_disable_transition(mhi_cntrl);
 			break;
 		default:
 			break;
@@ -827,6 +926,10 @@ int __mhi_device_get_sync(struct mhi_controller *mhi_cntrl)
 
 	/* Wake up the device */
 	read_lock_bh(&mhi_cntrl->pm_lock);
+	if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
+		read_unlock_bh(&mhi_cntrl->pm_lock);
+		return -EIO;
+	}
 	mhi_cntrl->wake_get(mhi_cntrl, true);
 	if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state))
 		mhi_trigger_resume(mhi_cntrl);
@@ -918,9 +1021,6 @@ int mhi_async_power_up(struct mhi_controller *mhi_cntrl)
 
 	dev_info(dev, "Requested to power ON\n");
 
-	if (mhi_cntrl->nr_irqs < 1)
-		return -EINVAL;
-
 	/* Supply default wake routines if not provided by controller driver */
 	if (!mhi_cntrl->wake_get || !mhi_cntrl->wake_put ||
 	    !mhi_cntrl->wake_toggle) {
@@ -1033,29 +1133,39 @@ EXPORT_SYMBOL_GPL(mhi_async_power_up);
 
 void mhi_power_down(struct mhi_controller *mhi_cntrl, bool graceful)
 {
-	enum mhi_pm_state cur_state;
+	enum mhi_pm_state cur_state, transition_state;
 	struct device *dev = &mhi_cntrl->mhi_dev->dev;
 
 	/* If it's not a graceful shutdown, force MHI to linkdown state */
-	if (!graceful) {
-		mutex_lock(&mhi_cntrl->pm_mutex);
-		write_lock_irq(&mhi_cntrl->pm_lock);
-		cur_state = mhi_tryset_pm_state(mhi_cntrl,
-						MHI_PM_LD_ERR_FATAL_DETECT);
-		write_unlock_irq(&mhi_cntrl->pm_lock);
-		mutex_unlock(&mhi_cntrl->pm_mutex);
-		if (cur_state != MHI_PM_LD_ERR_FATAL_DETECT)
-			dev_dbg(dev, "Failed to move to state: %s from: %s\n",
-				to_mhi_pm_state_str(MHI_PM_LD_ERR_FATAL_DETECT),
-				to_mhi_pm_state_str(mhi_cntrl->pm_state));
+	transition_state = (graceful) ? MHI_PM_SHUTDOWN_PROCESS :
+			   MHI_PM_LD_ERR_FATAL_DETECT;
+
+	mutex_lock(&mhi_cntrl->pm_mutex);
+	write_lock_irq(&mhi_cntrl->pm_lock);
+	cur_state = mhi_tryset_pm_state(mhi_cntrl, transition_state);
+	if (cur_state != transition_state) {
+		dev_err(dev, "Failed to move to state: %s from: %s\n",
+			to_mhi_pm_state_str(transition_state),
+			to_mhi_pm_state_str(mhi_cntrl->pm_state));
+		/* Force link down or error fatal detected state */
+		mhi_cntrl->pm_state = MHI_PM_LD_ERR_FATAL_DETECT;
 	}
 
+	/* mark device inactive to avoid any further host processing */
+	mhi_cntrl->ee = MHI_EE_DISABLE_TRANSITION;
+	mhi_cntrl->dev_state = MHI_STATE_RESET;
+
+	wake_up_all(&mhi_cntrl->state_event);
+
+	write_unlock_irq(&mhi_cntrl->pm_lock);
+	mutex_unlock(&mhi_cntrl->pm_mutex);
+
 	mhi_queue_state_transition(mhi_cntrl, DEV_ST_TRANSITION_DISABLE);
 
 	/* Wait for shutdown to complete */
 	flush_work(&mhi_cntrl->st_worker);
 
-	mhi_deinit_free_irq(mhi_cntrl);
+	free_irq(mhi_cntrl->irq[0], mhi_cntrl);
 
 	if (!mhi_cntrl->pre_init) {
 		/* Free all allocated resources */
diff --git a/drivers/bus/mhi/pci_generic.c b/drivers/bus/mhi/pci_generic.c
new file mode 100644
index 0000000000000000000000000000000000000000..f5bee76ea0618ef02277f736f9989b188afd4d29
--- /dev/null
+++ b/drivers/bus/mhi/pci_generic.c
@@ -0,0 +1,345 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * MHI PCI driver - MHI over PCI controller driver
+ *
+ * This module is a generic driver for registering MHI-over-PCI devices,
+ * such as PCIe QCOM modems.
+ *
+ * Copyright (C) 2020 Linaro Ltd <loic.poulain@linaro.org>
+ */
+
+#include <linux/device.h>
+#include <linux/mhi.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+
+#define MHI_PCI_DEFAULT_BAR_NUM 0
+
+/**
+ * struct mhi_pci_dev_info - MHI PCI device specific information
+ * @config: MHI controller configuration
+ * @name: name of the PCI module
+ * @fw: firmware path (if any)
+ * @edl: emergency download mode firmware path (if any)
+ * @bar_num: PCI base address register to use for MHI MMIO register space
+ * @dma_data_width: DMA transfer word size (32 or 64 bits)
+ */
+struct mhi_pci_dev_info {
+	const struct mhi_controller_config *config;
+	const char *name;
+	const char *fw;
+	const char *edl;
+	unsigned int bar_num;
+	unsigned int dma_data_width;
+};
+
+#define MHI_CHANNEL_CONFIG_UL(ch_num, ch_name, el_count, ev_ring) \
+	{						\
+		.num = ch_num,				\
+		.name = ch_name,			\
+		.num_elements = el_count,		\
+		.event_ring = ev_ring,			\
+		.dir = DMA_TO_DEVICE,			\
+		.ee_mask = BIT(MHI_EE_AMSS),		\
+		.pollcfg = 0,				\
+		.doorbell = MHI_DB_BRST_DISABLE,	\
+		.lpm_notify = false,			\
+		.offload_channel = false,		\
+		.doorbell_mode_switch = false,		\
+	}						\
+
+#define MHI_CHANNEL_CONFIG_DL(ch_num, ch_name, el_count, ev_ring) \
+	{						\
+		.num = ch_num,				\
+		.name = ch_name,			\
+		.num_elements = el_count,		\
+		.event_ring = ev_ring,			\
+		.dir = DMA_FROM_DEVICE,			\
+		.ee_mask = BIT(MHI_EE_AMSS),		\
+		.pollcfg = 0,				\
+		.doorbell = MHI_DB_BRST_DISABLE,	\
+		.lpm_notify = false,			\
+		.offload_channel = false,		\
+		.doorbell_mode_switch = false,		\
+	}
+
+#define MHI_EVENT_CONFIG_CTRL(ev_ring)		\
+	{					\
+		.num_elements = 64,		\
+		.irq_moderation_ms = 0,		\
+		.irq = (ev_ring) + 1,		\
+		.priority = 1,			\
+		.mode = MHI_DB_BRST_DISABLE,	\
+		.data_type = MHI_ER_CTRL,	\
+		.hardware_event = false,	\
+		.client_managed = false,	\
+		.offload_channel = false,	\
+	}
+
+#define MHI_EVENT_CONFIG_DATA(ev_ring)		\
+	{					\
+		.num_elements = 128,		\
+		.irq_moderation_ms = 5,		\
+		.irq = (ev_ring) + 1,		\
+		.priority = 1,			\
+		.mode = MHI_DB_BRST_DISABLE,	\
+		.data_type = MHI_ER_DATA,	\
+		.hardware_event = false,	\
+		.client_managed = false,	\
+		.offload_channel = false,	\
+	}
+
+#define MHI_EVENT_CONFIG_HW_DATA(ev_ring, ch_num) \
+	{					\
+		.num_elements = 128,		\
+		.irq_moderation_ms = 5,		\
+		.irq = (ev_ring) + 1,		\
+		.priority = 1,			\
+		.mode = MHI_DB_BRST_DISABLE,	\
+		.data_type = MHI_ER_DATA,	\
+		.hardware_event = true,		\
+		.client_managed = false,	\
+		.offload_channel = false,	\
+		.channel = ch_num,		\
+	}
+
+static const struct mhi_channel_config modem_qcom_v1_mhi_channels[] = {
+	MHI_CHANNEL_CONFIG_UL(12, "MBIM", 4, 0),
+	MHI_CHANNEL_CONFIG_DL(13, "MBIM", 4, 0),
+	MHI_CHANNEL_CONFIG_UL(14, "QMI", 4, 0),
+	MHI_CHANNEL_CONFIG_DL(15, "QMI", 4, 0),
+	MHI_CHANNEL_CONFIG_UL(20, "IPCR", 8, 0),
+	MHI_CHANNEL_CONFIG_DL(21, "IPCR", 8, 0),
+	MHI_CHANNEL_CONFIG_UL(100, "IP_HW0", 128, 1),
+	MHI_CHANNEL_CONFIG_DL(101, "IP_HW0", 128, 2),
+};
+
+static const struct mhi_event_config modem_qcom_v1_mhi_events[] = {
+	/* first ring is control+data ring */
+	MHI_EVENT_CONFIG_CTRL(0),
+	/* Hardware channels request dedicated hardware event rings */
+	MHI_EVENT_CONFIG_HW_DATA(1, 100),
+	MHI_EVENT_CONFIG_HW_DATA(2, 101)
+};
+
+static const struct mhi_controller_config modem_qcom_v1_mhiv_config = {
+	.max_channels = 128,
+	.timeout_ms = 5000,
+	.num_channels = ARRAY_SIZE(modem_qcom_v1_mhi_channels),
+	.ch_cfg = modem_qcom_v1_mhi_channels,
+	.num_events = ARRAY_SIZE(modem_qcom_v1_mhi_events),
+	.event_cfg = modem_qcom_v1_mhi_events,
+};
+
+static const struct mhi_pci_dev_info mhi_qcom_sdx55_info = {
+	.name = "qcom-sdx55m",
+	.fw = "qcom/sdx55m/sbl1.mbn",
+	.edl = "qcom/sdx55m/edl.mbn",
+	.config = &modem_qcom_v1_mhiv_config,
+	.bar_num = MHI_PCI_DEFAULT_BAR_NUM,
+	.dma_data_width = 32
+};
+
+static const struct pci_device_id mhi_pci_id_table[] = {
+	{ PCI_DEVICE(PCI_VENDOR_ID_QCOM, 0x0306),
+		.driver_data = (kernel_ulong_t) &mhi_qcom_sdx55_info },
+	{  }
+};
+MODULE_DEVICE_TABLE(pci, mhi_pci_id_table);
+
+static int mhi_pci_read_reg(struct mhi_controller *mhi_cntrl,
+			    void __iomem *addr, u32 *out)
+{
+	*out = readl(addr);
+	return 0;
+}
+
+static void mhi_pci_write_reg(struct mhi_controller *mhi_cntrl,
+			      void __iomem *addr, u32 val)
+{
+	writel(val, addr);
+}
+
+static void mhi_pci_status_cb(struct mhi_controller *mhi_cntrl,
+			      enum mhi_callback cb)
+{
+	/* Nothing to do for now */
+}
+
+static int mhi_pci_claim(struct mhi_controller *mhi_cntrl,
+			 unsigned int bar_num, u64 dma_mask)
+{
+	struct pci_dev *pdev = to_pci_dev(mhi_cntrl->cntrl_dev);
+	int err;
+
+	err = pci_assign_resource(pdev, bar_num);
+	if (err)
+		return err;
+
+	err = pcim_enable_device(pdev);
+	if (err) {
+		dev_err(&pdev->dev, "failed to enable pci device: %d\n", err);
+		return err;
+	}
+
+	err = pcim_iomap_regions(pdev, 1 << bar_num, pci_name(pdev));
+	if (err) {
+		dev_err(&pdev->dev, "failed to map pci region: %d\n", err);
+		return err;
+	}
+	mhi_cntrl->regs = pcim_iomap_table(pdev)[bar_num];
+
+	err = pci_set_dma_mask(pdev, dma_mask);
+	if (err) {
+		dev_err(&pdev->dev, "Cannot set proper DMA mask\n");
+		return err;
+	}
+
+	err = pci_set_consistent_dma_mask(pdev, dma_mask);
+	if (err) {
+		dev_err(&pdev->dev, "set consistent dma mask failed\n");
+		return err;
+	}
+
+	pci_set_master(pdev);
+
+	return 0;
+}
+
+static int mhi_pci_get_irqs(struct mhi_controller *mhi_cntrl,
+			    const struct mhi_controller_config *mhi_cntrl_config)
+{
+	struct pci_dev *pdev = to_pci_dev(mhi_cntrl->cntrl_dev);
+	int nr_vectors, i;
+	int *irq;
+
+	/*
+	 * Alloc one MSI vector for BHI + one vector per event ring, ideally...
+	 * No explicit pci_free_irq_vectors required, done by pcim_release.
+	 */
+	mhi_cntrl->nr_irqs = 1 + mhi_cntrl_config->num_events;
+
+	nr_vectors = pci_alloc_irq_vectors(pdev, 1, mhi_cntrl->nr_irqs, PCI_IRQ_MSI);
+	if (nr_vectors < 0) {
+		dev_err(&pdev->dev, "Error allocating MSI vectors %d\n",
+			nr_vectors);
+		return nr_vectors;
+	}
+
+	if (nr_vectors < mhi_cntrl->nr_irqs) {
+		dev_warn(&pdev->dev, "Not enough MSI vectors (%d/%d), use shared MSI\n",
+			 nr_vectors, mhi_cntrl_config->num_events);
+	}
+
+	irq = devm_kcalloc(&pdev->dev, mhi_cntrl->nr_irqs, sizeof(int), GFP_KERNEL);
+	if (!irq)
+		return -ENOMEM;
+
+	for (i = 0; i < mhi_cntrl->nr_irqs; i++) {
+		int vector = i >= nr_vectors ? (nr_vectors - 1) : i;
+
+		irq[i] = pci_irq_vector(pdev, vector);
+	}
+
+	mhi_cntrl->irq = irq;
+
+	return 0;
+}
+
+static int mhi_pci_runtime_get(struct mhi_controller *mhi_cntrl)
+{
+	/* no PM for now */
+	return 0;
+}
+
+static void mhi_pci_runtime_put(struct mhi_controller *mhi_cntrl)
+{
+	/* no PM for now */
+}
+
+static int mhi_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+	const struct mhi_pci_dev_info *info = (struct mhi_pci_dev_info *) id->driver_data;
+	const struct mhi_controller_config *mhi_cntrl_config;
+	struct mhi_controller *mhi_cntrl;
+	int err;
+
+	dev_dbg(&pdev->dev, "MHI PCI device found: %s\n", info->name);
+
+	mhi_cntrl = mhi_alloc_controller();
+	if (!mhi_cntrl)
+		return -ENOMEM;
+
+	mhi_cntrl_config = info->config;
+	mhi_cntrl->cntrl_dev = &pdev->dev;
+	mhi_cntrl->iova_start = 0;
+	mhi_cntrl->iova_stop = (dma_addr_t)DMA_BIT_MASK(info->dma_data_width);
+	mhi_cntrl->fw_image = info->fw;
+	mhi_cntrl->edl_image = info->edl;
+
+	mhi_cntrl->read_reg = mhi_pci_read_reg;
+	mhi_cntrl->write_reg = mhi_pci_write_reg;
+	mhi_cntrl->status_cb = mhi_pci_status_cb;
+	mhi_cntrl->runtime_get = mhi_pci_runtime_get;
+	mhi_cntrl->runtime_put = mhi_pci_runtime_put;
+
+	err = mhi_pci_claim(mhi_cntrl, info->bar_num, DMA_BIT_MASK(info->dma_data_width));
+	if (err)
+		goto err_release;
+
+	err = mhi_pci_get_irqs(mhi_cntrl, mhi_cntrl_config);
+	if (err)
+		goto err_release;
+
+	pci_set_drvdata(pdev, mhi_cntrl);
+
+	err = mhi_register_controller(mhi_cntrl, mhi_cntrl_config);
+	if (err)
+		goto err_release;
+
+	/* MHI bus does not power up the controller by default */
+	err = mhi_prepare_for_power_up(mhi_cntrl);
+	if (err) {
+		dev_err(&pdev->dev, "failed to prepare MHI controller\n");
+		goto err_unregister;
+	}
+
+	err = mhi_sync_power_up(mhi_cntrl);
+	if (err) {
+		dev_err(&pdev->dev, "failed to power up MHI controller\n");
+		goto err_unprepare;
+	}
+
+	return 0;
+
+err_unprepare:
+	mhi_unprepare_after_power_down(mhi_cntrl);
+err_unregister:
+	mhi_unregister_controller(mhi_cntrl);
+err_release:
+	mhi_free_controller(mhi_cntrl);
+
+	return err;
+}
+
+static void mhi_pci_remove(struct pci_dev *pdev)
+{
+	struct mhi_controller *mhi_cntrl = pci_get_drvdata(pdev);
+
+	mhi_power_down(mhi_cntrl, true);
+	mhi_unprepare_after_power_down(mhi_cntrl);
+	mhi_unregister_controller(mhi_cntrl);
+	mhi_free_controller(mhi_cntrl);
+}
+
+static struct pci_driver mhi_pci_driver = {
+	.name		= "mhi-pci-generic",
+	.id_table	= mhi_pci_id_table,
+	.probe		= mhi_pci_probe,
+	.remove		= mhi_pci_remove
+};
+module_pci_driver(mhi_pci_driver);
+
+MODULE_AUTHOR("Loic Poulain <loic.poulain@linaro.org>");
+MODULE_DESCRIPTION("Modem Host Interface (MHI) PCI controller driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/lp.c b/drivers/char/lp.c
index 0ec73917d8ddb8f6e6be9e3dc7ea53ec3a83fc55..862c2fd933c7beace6e01d6e573d5f510ee01cb4 100644
--- a/drivers/char/lp.c
+++ b/drivers/char/lp.c
@@ -622,7 +622,6 @@ static int lp_do_ioctl(unsigned int minor, unsigned int cmd,
 			break;
 		case LPSETIRQ:
 			return -EINVAL;
-			break;
 		case LPGETIRQ:
 			if (copy_to_user(argp, &LP_IRQ(minor),
 					sizeof(int)))
diff --git a/drivers/char/misc.c b/drivers/char/misc.c
index f6a147427029a83bd966f22cc8cd7d72ec85c770..ca5141ed5ef349db6e5409e73fe56fa4b73b05b6 100644
--- a/drivers/char/misc.c
+++ b/drivers/char/misc.c
@@ -60,7 +60,7 @@ static DEFINE_MUTEX(misc_mtx);
 /*
  * Assigned numbers, used for dynamic minors
  */
-#define DYNAMIC_MINORS 64 /* like dynamic majors */
+#define DYNAMIC_MINORS 128 /* like dynamic majors */
 static DECLARE_BITMAP(misc_minors, DYNAMIC_MINORS);
 
 #ifdef CONFIG_PROC_FS
diff --git a/drivers/char/mwave/mwavedd.c b/drivers/char/mwave/mwavedd.c
index e43c876a92232d9fc0e8a18269c5c4fd7342a3ff..11272d605ecd501503c15f6144db467deb9be14e 100644
--- a/drivers/char/mwave/mwavedd.c
+++ b/drivers/char/mwave/mwavedd.c
@@ -403,7 +403,6 @@ static long mwave_ioctl(struct file *file, unsigned int iocmd,
 	
 		default:
 			return -ENOTTY;
-			break;
 	} /* switch */
 
 	PRINTK_2(TRACE_MWAVE, "mwavedd::mwave_ioctl, exit retval %x\n", retval);
diff --git a/drivers/extcon/Kconfig b/drivers/extcon/Kconfig
index aac507bff135c06dfd6bce81752b7f717aa601ba..af58ebca2bf619b09ee9c29ed7aeb1950f53646f 100644
--- a/drivers/extcon/Kconfig
+++ b/drivers/extcon/Kconfig
@@ -186,4 +186,12 @@ config EXTCON_USBC_CROS_EC
 	  Say Y here to enable USB Type C cable detection extcon support when
 	  using Chrome OS EC based USB Type-C ports.
 
+config EXTCON_USBC_TUSB320
+	tristate "TI TUSB320 USB-C extcon support"
+	depends on I2C
+	select REGMAP_I2C
+	help
+	  Say Y here to enable support for USB Type C cable detection extcon
+	  support using a TUSB320.
+
 endif
diff --git a/drivers/extcon/Makefile b/drivers/extcon/Makefile
index 52096fd8a216c7ca08ba43dac24d7ac85f2c6e75..fe10a1b7d18b5c2d5c10d0c9c03860208059348e 100644
--- a/drivers/extcon/Makefile
+++ b/drivers/extcon/Makefile
@@ -25,3 +25,4 @@ obj-$(CONFIG_EXTCON_RT8973A)	+= extcon-rt8973a.o
 obj-$(CONFIG_EXTCON_SM5502)	+= extcon-sm5502.o
 obj-$(CONFIG_EXTCON_USB_GPIO)	+= extcon-usb-gpio.o
 obj-$(CONFIG_EXTCON_USBC_CROS_EC) += extcon-usbc-cros-ec.o
+obj-$(CONFIG_EXTCON_USBC_TUSB320) += extcon-usbc-tusb320.o
diff --git a/drivers/extcon/extcon-fsa9480.c b/drivers/extcon/extcon-fsa9480.c
index 8405512f5199d4ba337146e5d721c7d871a4a5a2..08bdedbcdb0db06cf8f0c8b4ad3ecdf541e12715 100644
--- a/drivers/extcon/extcon-fsa9480.c
+++ b/drivers/extcon/extcon-fsa9480.c
@@ -364,6 +364,7 @@ MODULE_DEVICE_TABLE(i2c, fsa9480_id);
 static const struct of_device_id fsa9480_of_match[] = {
 	{ .compatible = "fcs,fsa9480", },
 	{ .compatible = "fcs,fsa880", },
+	{ .compatible = "ti,tsu6111", },
 	{ },
 };
 MODULE_DEVICE_TABLE(of, fsa9480_of_match);
diff --git a/drivers/extcon/extcon-max77693.c b/drivers/extcon/extcon-max77693.c
index 4a410fd2ea9aec2adaf6a969b06552537122795d..92af97e00828fafdd1148a1e3e3cdedf787ad753 100644
--- a/drivers/extcon/extcon-max77693.c
+++ b/drivers/extcon/extcon-max77693.c
@@ -1277,4 +1277,4 @@ module_platform_driver(max77693_muic_driver);
 MODULE_DESCRIPTION("Maxim MAX77693 Extcon driver");
 MODULE_AUTHOR("Chanwoo Choi <cw00.choi@samsung.com>");
 MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:extcon-max77693");
+MODULE_ALIAS("platform:max77693-muic");
diff --git a/drivers/extcon/extcon-usbc-tusb320.c b/drivers/extcon/extcon-usbc-tusb320.c
new file mode 100644
index 0000000000000000000000000000000000000000..805af73b41521c71bf281fa1bd8a9b76d1d2fbf1
--- /dev/null
+++ b/drivers/extcon/extcon-usbc-tusb320.c
@@ -0,0 +1,184 @@
+// SPDX-License-Identifier: GPL-2.0
+/**
+ * drivers/extcon/extcon-tusb320.c - TUSB320 extcon driver
+ *
+ * Copyright (C) 2020 National Instruments Corporation
+ * Author: Michael Auchter <michael.auchter@ni.com>
+ */
+
+#include <linux/extcon-provider.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+
+#define TUSB320_REG9				0x9
+#define TUSB320_REG9_ATTACHED_STATE_SHIFT	6
+#define TUSB320_REG9_ATTACHED_STATE_MASK	0x3
+#define TUSB320_REG9_CABLE_DIRECTION		BIT(5)
+#define TUSB320_REG9_INTERRUPT_STATUS		BIT(4)
+#define TUSB320_ATTACHED_STATE_NONE		0x0
+#define TUSB320_ATTACHED_STATE_DFP		0x1
+#define TUSB320_ATTACHED_STATE_UFP		0x2
+#define TUSB320_ATTACHED_STATE_ACC		0x3
+
+struct tusb320_priv {
+	struct device *dev;
+	struct regmap *regmap;
+	struct extcon_dev *edev;
+};
+
+static const char * const tusb_attached_states[] = {
+	[TUSB320_ATTACHED_STATE_NONE] = "not attached",
+	[TUSB320_ATTACHED_STATE_DFP]  = "downstream facing port",
+	[TUSB320_ATTACHED_STATE_UFP]  = "upstream facing port",
+	[TUSB320_ATTACHED_STATE_ACC]  = "accessory",
+};
+
+static const unsigned int tusb320_extcon_cable[] = {
+	EXTCON_USB,
+	EXTCON_USB_HOST,
+	EXTCON_NONE,
+};
+
+static int tusb320_check_signature(struct tusb320_priv *priv)
+{
+	static const char sig[] = { '\0', 'T', 'U', 'S', 'B', '3', '2', '0' };
+	unsigned val;
+	int i, ret;
+
+	for (i = 0; i < sizeof(sig); i++) {
+		ret = regmap_read(priv->regmap, sizeof(sig) - 1 - i, &val);
+		if (ret < 0)
+			return ret;
+		if (val != sig[i]) {
+			dev_err(priv->dev, "signature mismatch!\n");
+			return -ENODEV;
+		}
+	}
+
+	return 0;
+}
+
+static irqreturn_t tusb320_irq_handler(int irq, void *dev_id)
+{
+	struct tusb320_priv *priv = dev_id;
+	int state, polarity;
+	unsigned reg;
+
+	if (regmap_read(priv->regmap, TUSB320_REG9, &reg)) {
+		dev_err(priv->dev, "error during i2c read!\n");
+		return IRQ_NONE;
+	}
+
+	if (!(reg & TUSB320_REG9_INTERRUPT_STATUS))
+		return IRQ_NONE;
+
+	state = (reg >> TUSB320_REG9_ATTACHED_STATE_SHIFT) &
+		TUSB320_REG9_ATTACHED_STATE_MASK;
+	polarity = !!(reg & TUSB320_REG9_CABLE_DIRECTION);
+
+	dev_dbg(priv->dev, "attached state: %s, polarity: %d\n",
+		tusb_attached_states[state], polarity);
+
+	extcon_set_state(priv->edev, EXTCON_USB,
+			 state == TUSB320_ATTACHED_STATE_UFP);
+	extcon_set_state(priv->edev, EXTCON_USB_HOST,
+			 state == TUSB320_ATTACHED_STATE_DFP);
+	extcon_set_property(priv->edev, EXTCON_USB,
+			    EXTCON_PROP_USB_TYPEC_POLARITY,
+			    (union extcon_property_value)polarity);
+	extcon_set_property(priv->edev, EXTCON_USB_HOST,
+			    EXTCON_PROP_USB_TYPEC_POLARITY,
+			    (union extcon_property_value)polarity);
+	extcon_sync(priv->edev, EXTCON_USB);
+	extcon_sync(priv->edev, EXTCON_USB_HOST);
+
+	regmap_write(priv->regmap, TUSB320_REG9, reg);
+
+	return IRQ_HANDLED;
+}
+
+static const struct regmap_config tusb320_regmap_config = {
+	.reg_bits = 8,
+	.val_bits = 8,
+};
+
+static int tusb320_extcon_probe(struct i2c_client *client,
+				const struct i2c_device_id *id)
+{
+	struct tusb320_priv *priv;
+	int ret;
+
+	priv = devm_kzalloc(&client->dev, sizeof(*priv), GFP_KERNEL);
+	if (!priv)
+		return -ENOMEM;
+	priv->dev = &client->dev;
+
+	priv->regmap = devm_regmap_init_i2c(client, &tusb320_regmap_config);
+	if (IS_ERR(priv->regmap))
+		return PTR_ERR(priv->regmap);
+
+	ret = tusb320_check_signature(priv);
+	if (ret)
+		return ret;
+
+	priv->edev = devm_extcon_dev_allocate(priv->dev, tusb320_extcon_cable);
+	if (IS_ERR(priv->edev)) {
+		dev_err(priv->dev, "failed to allocate extcon device\n");
+		return PTR_ERR(priv->edev);
+	}
+
+	ret = devm_extcon_dev_register(priv->dev, priv->edev);
+	if (ret < 0) {
+		dev_err(priv->dev, "failed to register extcon device\n");
+		return ret;
+	}
+
+	extcon_set_property_capability(priv->edev, EXTCON_USB,
+				       EXTCON_PROP_USB_TYPEC_POLARITY);
+	extcon_set_property_capability(priv->edev, EXTCON_USB_HOST,
+				       EXTCON_PROP_USB_TYPEC_POLARITY);
+
+	/* update initial state */
+	tusb320_irq_handler(client->irq, priv);
+
+	ret = devm_request_threaded_irq(priv->dev, client->irq, NULL,
+					tusb320_irq_handler,
+					IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+					client->name, priv);
+
+	return ret;
+}
+
+static const struct of_device_id tusb320_extcon_dt_match[] = {
+	{ .compatible = "ti,tusb320", },
+	{ }
+};
+MODULE_DEVICE_TABLE(of, tusb320_extcon_dt_match);
+
+static struct i2c_driver tusb320_extcon_driver = {
+	.probe		= tusb320_extcon_probe,
+	.driver		= {
+		.name	= "extcon-tusb320",
+		.of_match_table = tusb320_extcon_dt_match,
+	},
+};
+
+static int __init tusb320_init(void)
+{
+	return i2c_add_driver(&tusb320_extcon_driver);
+}
+subsys_initcall(tusb320_init);
+
+static void __exit tusb320_exit(void)
+{
+	i2c_del_driver(&tusb320_extcon_driver);
+}
+module_exit(tusb320_exit);
+
+MODULE_AUTHOR("Michael Auchter <michael.auchter@ni.com>");
+MODULE_DESCRIPTION("TI TUSB320 extcon driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/firmware/Kconfig b/drivers/firmware/Kconfig
index 3315e3c215864c24b10b2b447bfac1db15d08fa6..3f14dffb9669693ede1298a45398ad81545e93f5 100644
--- a/drivers/firmware/Kconfig
+++ b/drivers/firmware/Kconfig
@@ -51,7 +51,7 @@ config ARM_SCPI_PROTOCOL
 	  provides a mechanism for inter-processor communication between SCP
 	  and AP.
 
-	  SCP controls most of the power managament on the Application
+	  SCP controls most of the power management on the Application
 	  Processors. It offers control and management of: the core/cluster
 	  power states, various power domain DVFS including the core/cluster,
 	  certain system clocks configuration, thermal sensors and many
diff --git a/drivers/firmware/google/gsmi.c b/drivers/firmware/google/gsmi.c
index 7d9367b220107b95f9c18ec6d7cdf9f81d155beb..3d77f26c1e8c938796f4b15a3b68459ad100c6b0 100644
--- a/drivers/firmware/google/gsmi.c
+++ b/drivers/firmware/google/gsmi.c
@@ -17,7 +17,6 @@
 #include <linux/string.h>
 #include <linux/spinlock.h>
 #include <linux/dma-mapping.h>
-#include <linux/dmapool.h>
 #include <linux/fs.h>
 #include <linux/slab.h>
 #include <linux/ioctl.h>
@@ -85,7 +84,6 @@
 struct gsmi_buf {
 	u8 *start;			/* start of buffer */
 	size_t length;			/* length of buffer */
-	dma_addr_t handle;		/* dma allocation handle */
 	u32 address;			/* physical address of buffer */
 };
 
@@ -97,7 +95,7 @@ static struct gsmi_device {
 	spinlock_t lock;		/* serialize access to SMIs */
 	u16 smi_cmd;			/* SMI command port */
 	int handshake_type;		/* firmware handler interlock type */
-	struct dma_pool *dma_pool;	/* DMA buffer pool */
+	struct kmem_cache *mem_pool;	/* kmem cache for gsmi_buf allocations */
 } gsmi_dev;
 
 /* Packed structures for communicating with the firmware */
@@ -157,8 +155,7 @@ static struct gsmi_buf *gsmi_buf_alloc(void)
 	}
 
 	/* allocate buffer in 32bit address space */
-	smibuf->start = dma_pool_alloc(gsmi_dev.dma_pool, GFP_KERNEL,
-				       &smibuf->handle);
+	smibuf->start = kmem_cache_alloc(gsmi_dev.mem_pool, GFP_KERNEL);
 	if (!smibuf->start) {
 		printk(KERN_ERR "gsmi: failed to allocate name buffer\n");
 		kfree(smibuf);
@@ -176,8 +173,7 @@ static void gsmi_buf_free(struct gsmi_buf *smibuf)
 {
 	if (smibuf) {
 		if (smibuf->start)
-			dma_pool_free(gsmi_dev.dma_pool, smibuf->start,
-				      smibuf->handle);
+			kmem_cache_free(gsmi_dev.mem_pool, smibuf->start);
 		kfree(smibuf);
 	}
 }
@@ -914,9 +910,20 @@ static __init int gsmi_init(void)
 	spin_lock_init(&gsmi_dev.lock);
 
 	ret = -ENOMEM;
-	gsmi_dev.dma_pool = dma_pool_create("gsmi", &gsmi_dev.pdev->dev,
-					     GSMI_BUF_SIZE, GSMI_BUF_ALIGN, 0);
-	if (!gsmi_dev.dma_pool)
+
+	/*
+	 * SLAB cache is created using SLAB_CACHE_DMA32 to ensure that the
+	 * allocations for gsmi_buf come from the DMA32 memory zone. These
+	 * buffers have nothing to do with DMA. They are required for
+	 * communication with firmware executing in SMI mode which can only
+	 * access the bottom 4GiB of physical memory. Since DMA32 memory zone
+	 * guarantees allocation under the 4GiB boundary, this driver creates
+	 * a SLAB cache with SLAB_CACHE_DMA32 flag.
+	 */
+	gsmi_dev.mem_pool = kmem_cache_create("gsmi", GSMI_BUF_SIZE,
+					      GSMI_BUF_ALIGN,
+					      SLAB_CACHE_DMA32, NULL);
+	if (!gsmi_dev.mem_pool)
 		goto out_err;
 
 	/*
@@ -1032,7 +1039,7 @@ static __init int gsmi_init(void)
 	gsmi_buf_free(gsmi_dev.param_buf);
 	gsmi_buf_free(gsmi_dev.data_buf);
 	gsmi_buf_free(gsmi_dev.name_buf);
-	dma_pool_destroy(gsmi_dev.dma_pool);
+	kmem_cache_destroy(gsmi_dev.mem_pool);
 	platform_device_unregister(gsmi_dev.pdev);
 	pr_info("gsmi: failed to load: %d\n", ret);
 #ifdef CONFIG_PM
@@ -1057,7 +1064,7 @@ static void __exit gsmi_exit(void)
 	gsmi_buf_free(gsmi_dev.param_buf);
 	gsmi_buf_free(gsmi_dev.data_buf);
 	gsmi_buf_free(gsmi_dev.name_buf);
-	dma_pool_destroy(gsmi_dev.dma_pool);
+	kmem_cache_destroy(gsmi_dev.mem_pool);
 	platform_device_unregister(gsmi_dev.pdev);
 #ifdef CONFIG_PM
 	platform_driver_unregister(&gsmi_driver_info);
diff --git a/drivers/firmware/raspberrypi.c b/drivers/firmware/raspberrypi.c
index 2371d08bdd17a34f88efa61af116fbbfb340590c..30259dc9b805a41dcc31692f09519ba213bca253 100644
--- a/drivers/firmware/raspberrypi.c
+++ b/drivers/firmware/raspberrypi.c
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0
 /*
- * Defines interfaces for interacting wtih the Raspberry Pi firmware's
+ * Defines interfaces for interacting with the Raspberry Pi firmware's
  * property channel.
  *
  * Copyright © 2015 Broadcom
diff --git a/drivers/fpga/altera-pr-ip-core-plat.c b/drivers/fpga/altera-pr-ip-core-plat.c
index 99b9cc0e70f08fbf462310f6eb822712cc807757..b008a6b8d2d3a82f3b78e68ddeff94da9a410d9a 100644
--- a/drivers/fpga/altera-pr-ip-core-plat.c
+++ b/drivers/fpga/altera-pr-ip-core-plat.c
@@ -28,15 +28,6 @@ static int alt_pr_platform_probe(struct platform_device *pdev)
 	return alt_pr_register(dev, reg_base);
 }
 
-static int alt_pr_platform_remove(struct platform_device *pdev)
-{
-	struct device *dev = &pdev->dev;
-
-	alt_pr_unregister(dev);
-
-	return 0;
-}
-
 static const struct of_device_id alt_pr_of_match[] = {
 	{ .compatible = "altr,a10-pr-ip", },
 	{},
@@ -46,7 +37,6 @@ MODULE_DEVICE_TABLE(of, alt_pr_of_match);
 
 static struct platform_driver alt_pr_platform_driver = {
 	.probe = alt_pr_platform_probe,
-	.remove = alt_pr_platform_remove,
 	.driver = {
 		.name	= "alt_a10_pr_ip",
 		.of_match_table = alt_pr_of_match,
diff --git a/drivers/fpga/altera-pr-ip-core.c b/drivers/fpga/altera-pr-ip-core.c
index 2cf25fd5e897957ac4582cfdd8f832669909d52a..5b130c4d988299c1869116a2811a9f56a7fad14a 100644
--- a/drivers/fpga/altera-pr-ip-core.c
+++ b/drivers/fpga/altera-pr-ip-core.c
@@ -195,9 +195,7 @@ int alt_pr_register(struct device *dev, void __iomem *reg_base)
 	if (!mgr)
 		return -ENOMEM;
 
-	dev_set_drvdata(dev, mgr);
-
-	return fpga_mgr_register(mgr);
+	return devm_fpga_mgr_register(dev, mgr);
 }
 EXPORT_SYMBOL_GPL(alt_pr_register);
 
diff --git a/drivers/fpga/altera-ps-spi.c b/drivers/fpga/altera-ps-spi.c
index 0221dee8dd4c6b3e90f159ec024f09e4ee847390..23bfd4d1ad0f742b44c399894147ed825345e23a 100644
--- a/drivers/fpga/altera-ps-spi.c
+++ b/drivers/fpga/altera-ps-spi.c
@@ -307,18 +307,7 @@ static int altera_ps_probe(struct spi_device *spi)
 	if (!mgr)
 		return -ENOMEM;
 
-	spi_set_drvdata(spi, mgr);
-
-	return fpga_mgr_register(mgr);
-}
-
-static int altera_ps_remove(struct spi_device *spi)
-{
-	struct fpga_manager *mgr = spi_get_drvdata(spi);
-
-	fpga_mgr_unregister(mgr);
-
-	return 0;
+	return devm_fpga_mgr_register(&spi->dev, mgr);
 }
 
 static const struct spi_device_id altera_ps_spi_ids[] = {
@@ -337,7 +326,6 @@ static struct spi_driver altera_ps_driver = {
 	},
 	.id_table = altera_ps_spi_ids,
 	.probe = altera_ps_probe,
-	.remove = altera_ps_remove,
 };
 
 module_spi_driver(altera_ps_driver)
diff --git a/drivers/fpga/dfl-fme-mgr.c b/drivers/fpga/dfl-fme-mgr.c
index b3f7eee3c93f629f155974d1ff0a165191f64339..d5861d13b306904ccb66b8e9614f8be107240d58 100644
--- a/drivers/fpga/dfl-fme-mgr.c
+++ b/drivers/fpga/dfl-fme-mgr.c
@@ -314,18 +314,8 @@ static int fme_mgr_probe(struct platform_device *pdev)
 		return -ENOMEM;
 
 	mgr->compat_id = compat_id;
-	platform_set_drvdata(pdev, mgr);
 
-	return fpga_mgr_register(mgr);
-}
-
-static int fme_mgr_remove(struct platform_device *pdev)
-{
-	struct fpga_manager *mgr = platform_get_drvdata(pdev);
-
-	fpga_mgr_unregister(mgr);
-
-	return 0;
+	return devm_fpga_mgr_register(dev, mgr);
 }
 
 static struct platform_driver fme_mgr_driver = {
@@ -333,7 +323,6 @@ static struct platform_driver fme_mgr_driver = {
 		.name    = DFL_FPGA_FME_MGR,
 	},
 	.probe   = fme_mgr_probe,
-	.remove  = fme_mgr_remove,
 };
 
 module_platform_driver(fme_mgr_driver);
diff --git a/drivers/fpga/fpga-mgr.c b/drivers/fpga/fpga-mgr.c
index f38bab01432e2eae6a1bc7d7bd541837c488bcb0..b85bc47c91a9a127d5da6a162e44ac5cff673141 100644
--- a/drivers/fpga/fpga-mgr.c
+++ b/drivers/fpga/fpga-mgr.c
@@ -21,6 +21,10 @@
 static DEFINE_IDA(fpga_mgr_ida);
 static struct class *fpga_mgr_class;
 
+struct fpga_mgr_devres {
+	struct fpga_manager *mgr;
+};
+
 /**
  * fpga_image_info_alloc - Allocate a FPGA image info struct
  * @dev: owning device
@@ -625,9 +629,9 @@ EXPORT_SYMBOL_GPL(fpga_mgr_free);
 
 static void devm_fpga_mgr_release(struct device *dev, void *res)
 {
-	struct fpga_manager *mgr = *(struct fpga_manager **)res;
+	struct fpga_mgr_devres *dr = res;
 
-	fpga_mgr_free(mgr);
+	fpga_mgr_free(dr->mgr);
 }
 
 /**
@@ -651,21 +655,21 @@ struct fpga_manager *devm_fpga_mgr_create(struct device *dev, const char *name,
 					  const struct fpga_manager_ops *mops,
 					  void *priv)
 {
-	struct fpga_manager **ptr, *mgr;
+	struct fpga_mgr_devres *dr;
 
-	ptr = devres_alloc(devm_fpga_mgr_release, sizeof(*ptr), GFP_KERNEL);
-	if (!ptr)
+	dr = devres_alloc(devm_fpga_mgr_release, sizeof(*dr), GFP_KERNEL);
+	if (!dr)
 		return NULL;
 
-	mgr = fpga_mgr_create(dev, name, mops, priv);
-	if (!mgr) {
-		devres_free(ptr);
-	} else {
-		*ptr = mgr;
-		devres_add(dev, ptr);
+	dr->mgr = fpga_mgr_create(dev, name, mops, priv);
+	if (!dr->mgr) {
+		devres_free(dr);
+		return NULL;
 	}
 
-	return mgr;
+	devres_add(dev, dr);
+
+	return dr->mgr;
 }
 EXPORT_SYMBOL_GPL(devm_fpga_mgr_create);
 
@@ -722,6 +726,59 @@ void fpga_mgr_unregister(struct fpga_manager *mgr)
 }
 EXPORT_SYMBOL_GPL(fpga_mgr_unregister);
 
+static int fpga_mgr_devres_match(struct device *dev, void *res,
+				 void *match_data)
+{
+	struct fpga_mgr_devres *dr = res;
+
+	return match_data == dr->mgr;
+}
+
+static void devm_fpga_mgr_unregister(struct device *dev, void *res)
+{
+	struct fpga_mgr_devres *dr = res;
+
+	fpga_mgr_unregister(dr->mgr);
+}
+
+/**
+ * devm_fpga_mgr_register - resource managed variant of fpga_mgr_register()
+ * @dev: managing device for this FPGA manager
+ * @mgr: fpga manager struct
+ *
+ * This is the devres variant of fpga_mgr_register() for which the unregister
+ * function will be called automatically when the managing device is detached.
+ */
+int devm_fpga_mgr_register(struct device *dev, struct fpga_manager *mgr)
+{
+	struct fpga_mgr_devres *dr;
+	int ret;
+
+	/*
+	 * Make sure that the struct fpga_manager * that is passed in is
+	 * managed itself.
+	 */
+	if (WARN_ON(!devres_find(dev, devm_fpga_mgr_release,
+				 fpga_mgr_devres_match, mgr)))
+		return -EINVAL;
+
+	dr = devres_alloc(devm_fpga_mgr_unregister, sizeof(*dr), GFP_KERNEL);
+	if (!dr)
+		return -ENOMEM;
+
+	ret = fpga_mgr_register(mgr);
+	if (ret) {
+		devres_free(dr);
+		return ret;
+	}
+
+	dr->mgr = mgr;
+	devres_add(dev, dr);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(devm_fpga_mgr_register);
+
 static void fpga_mgr_dev_release(struct device *dev)
 {
 }
diff --git a/drivers/fpga/ice40-spi.c b/drivers/fpga/ice40-spi.c
index 8d689fea0dabc1172dd35c0cbab056ad70c4542f..69dec5af23c366f7acb2e6198b2357d0e3316acb 100644
--- a/drivers/fpga/ice40-spi.c
+++ b/drivers/fpga/ice40-spi.c
@@ -183,18 +183,7 @@ static int ice40_fpga_probe(struct spi_device *spi)
 	if (!mgr)
 		return -ENOMEM;
 
-	spi_set_drvdata(spi, mgr);
-
-	return fpga_mgr_register(mgr);
-}
-
-static int ice40_fpga_remove(struct spi_device *spi)
-{
-	struct fpga_manager *mgr = spi_get_drvdata(spi);
-
-	fpga_mgr_unregister(mgr);
-
-	return 0;
+	return devm_fpga_mgr_register(dev, mgr);
 }
 
 static const struct of_device_id ice40_fpga_of_match[] = {
@@ -205,7 +194,6 @@ MODULE_DEVICE_TABLE(of, ice40_fpga_of_match);
 
 static struct spi_driver ice40_fpga_driver = {
 	.probe = ice40_fpga_probe,
-	.remove = ice40_fpga_remove,
 	.driver = {
 		.name = "ice40spi",
 		.of_match_table = of_match_ptr(ice40_fpga_of_match),
diff --git a/drivers/fpga/machxo2-spi.c b/drivers/fpga/machxo2-spi.c
index b316369156fe6b5087916c63484568c3901f9085..114a64d2b7a4d68f81b50f73f9c38446a370d3d5 100644
--- a/drivers/fpga/machxo2-spi.c
+++ b/drivers/fpga/machxo2-spi.c
@@ -371,18 +371,7 @@ static int machxo2_spi_probe(struct spi_device *spi)
 	if (!mgr)
 		return -ENOMEM;
 
-	spi_set_drvdata(spi, mgr);
-
-	return fpga_mgr_register(mgr);
-}
-
-static int machxo2_spi_remove(struct spi_device *spi)
-{
-	struct fpga_manager *mgr = spi_get_drvdata(spi);
-
-	fpga_mgr_unregister(mgr);
-
-	return 0;
+	return devm_fpga_mgr_register(dev, mgr);
 }
 
 static const struct of_device_id of_match[] = {
@@ -403,7 +392,6 @@ static struct spi_driver machxo2_spi_driver = {
 		.of_match_table = of_match_ptr(of_match),
 	},
 	.probe = machxo2_spi_probe,
-	.remove = machxo2_spi_remove,
 	.id_table = lattice_ids,
 };
 
diff --git a/drivers/fpga/socfpga.c b/drivers/fpga/socfpga.c
index 4a8a2fcd4e6c938d006401dd22b392f06f102f10..1f467173fc1f3574a47468353e5ef156e3453606 100644
--- a/drivers/fpga/socfpga.c
+++ b/drivers/fpga/socfpga.c
@@ -576,18 +576,7 @@ static int socfpga_fpga_probe(struct platform_device *pdev)
 	if (!mgr)
 		return -ENOMEM;
 
-	platform_set_drvdata(pdev, mgr);
-
-	return fpga_mgr_register(mgr);
-}
-
-static int socfpga_fpga_remove(struct platform_device *pdev)
-{
-	struct fpga_manager *mgr = platform_get_drvdata(pdev);
-
-	fpga_mgr_unregister(mgr);
-
-	return 0;
+	return devm_fpga_mgr_register(dev, mgr);
 }
 
 #ifdef CONFIG_OF
@@ -601,7 +590,6 @@ MODULE_DEVICE_TABLE(of, socfpga_fpga_of_match);
 
 static struct platform_driver socfpga_fpga_driver = {
 	.probe = socfpga_fpga_probe,
-	.remove = socfpga_fpga_remove,
 	.driver = {
 		.name	= "socfpga_fpga_manager",
 		.of_match_table = of_match_ptr(socfpga_fpga_of_match),
diff --git a/drivers/fpga/ts73xx-fpga.c b/drivers/fpga/ts73xx-fpga.c
index 2888ff000e4d1bdbfc5dc1ce85268be05188b92a..101f016c6ed8c8997d3cc0f35388a9ae00bf8d92 100644
--- a/drivers/fpga/ts73xx-fpga.c
+++ b/drivers/fpga/ts73xx-fpga.c
@@ -127,18 +127,7 @@ static int ts73xx_fpga_probe(struct platform_device *pdev)
 	if (!mgr)
 		return -ENOMEM;
 
-	platform_set_drvdata(pdev, mgr);
-
-	return fpga_mgr_register(mgr);
-}
-
-static int ts73xx_fpga_remove(struct platform_device *pdev)
-{
-	struct fpga_manager *mgr = platform_get_drvdata(pdev);
-
-	fpga_mgr_unregister(mgr);
-
-	return 0;
+	return devm_fpga_mgr_register(kdev, mgr);
 }
 
 static struct platform_driver ts73xx_fpga_driver = {
@@ -146,7 +135,6 @@ static struct platform_driver ts73xx_fpga_driver = {
 		.name	= "ts73xx-fpga-mgr",
 	},
 	.probe	= ts73xx_fpga_probe,
-	.remove	= ts73xx_fpga_remove,
 };
 module_platform_driver(ts73xx_fpga_driver);
 
diff --git a/drivers/fpga/xilinx-spi.c b/drivers/fpga/xilinx-spi.c
index 824abbbd631e433e9d43c1778d6b8ff30c4e946d..27defa98092ddce55d56d3ce3d980cc8104d7481 100644
--- a/drivers/fpga/xilinx-spi.c
+++ b/drivers/fpga/xilinx-spi.c
@@ -259,18 +259,7 @@ static int xilinx_spi_probe(struct spi_device *spi)
 	if (!mgr)
 		return -ENOMEM;
 
-	spi_set_drvdata(spi, mgr);
-
-	return fpga_mgr_register(mgr);
-}
-
-static int xilinx_spi_remove(struct spi_device *spi)
-{
-	struct fpga_manager *mgr = spi_get_drvdata(spi);
-
-	fpga_mgr_unregister(mgr);
-
-	return 0;
+	return devm_fpga_mgr_register(&spi->dev, mgr);
 }
 
 static const struct of_device_id xlnx_spi_of_match[] = {
@@ -285,7 +274,6 @@ static struct spi_driver xilinx_slave_spi_driver = {
 		.of_match_table = of_match_ptr(xlnx_spi_of_match),
 	},
 	.probe = xilinx_spi_probe,
-	.remove = xilinx_spi_remove,
 };
 
 module_spi_driver(xilinx_slave_spi_driver)
diff --git a/drivers/fpga/zynqmp-fpga.c b/drivers/fpga/zynqmp-fpga.c
index 4a1139e05280c4749c515f1bdb9574c8ce64ce80..125743c9797ffbaad12ed7fb8e2fcc4ce5f589b8 100644
--- a/drivers/fpga/zynqmp-fpga.c
+++ b/drivers/fpga/zynqmp-fpga.c
@@ -95,7 +95,6 @@ static int zynqmp_fpga_probe(struct platform_device *pdev)
 	struct device *dev = &pdev->dev;
 	struct zynqmp_fpga_priv *priv;
 	struct fpga_manager *mgr;
-	int ret;
 
 	priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
 	if (!priv)
@@ -108,24 +107,7 @@ static int zynqmp_fpga_probe(struct platform_device *pdev)
 	if (!mgr)
 		return -ENOMEM;
 
-	platform_set_drvdata(pdev, mgr);
-
-	ret = fpga_mgr_register(mgr);
-	if (ret) {
-		dev_err(dev, "unable to register FPGA manager");
-		return ret;
-	}
-
-	return 0;
-}
-
-static int zynqmp_fpga_remove(struct platform_device *pdev)
-{
-	struct fpga_manager *mgr = platform_get_drvdata(pdev);
-
-	fpga_mgr_unregister(mgr);
-
-	return 0;
+	return devm_fpga_mgr_register(dev, mgr);
 }
 
 static const struct of_device_id zynqmp_fpga_of_match[] = {
@@ -137,7 +119,6 @@ MODULE_DEVICE_TABLE(of, zynqmp_fpga_of_match);
 
 static struct platform_driver zynqmp_fpga_driver = {
 	.probe = zynqmp_fpga_probe,
-	.remove = zynqmp_fpga_remove,
 	.driver = {
 		.name = "zynqmp_fpga_manager",
 		.of_match_table = of_match_ptr(zynqmp_fpga_of_match),
diff --git a/drivers/fsi/fsi-master-aspeed.c b/drivers/fsi/fsi-master-aspeed.c
index c006ec008a1aae87d6b5b3a0e2d64b1f1cf41fd6..90dbe58ca1edca5c204732b616412a802309d884 100644
--- a/drivers/fsi/fsi-master-aspeed.c
+++ b/drivers/fsi/fsi-master-aspeed.c
@@ -8,6 +8,7 @@
 #include <linux/io.h>
 #include <linux/mfd/syscon.h>
 #include <linux/module.h>
+#include <linux/mutex.h>
 #include <linux/of.h>
 #include <linux/platform_device.h>
 #include <linux/regmap.h>
@@ -19,6 +20,7 @@
 
 struct fsi_master_aspeed {
 	struct fsi_master	master;
+	struct mutex		lock;	/* protect HW access */
 	struct device		*dev;
 	void __iomem		*base;
 	struct clk		*clk;
@@ -254,6 +256,8 @@ static int aspeed_master_read(struct fsi_master *master, int link,
 	addr |= id << 21;
 	addr += link * FSI_HUB_LINK_SIZE;
 
+	mutex_lock(&aspeed->lock);
+
 	switch (size) {
 	case 1:
 		ret = opb_readb(aspeed, fsi_base + addr, val);
@@ -265,14 +269,14 @@ static int aspeed_master_read(struct fsi_master *master, int link,
 		ret = opb_readl(aspeed, fsi_base + addr, val);
 		break;
 	default:
-		return -EINVAL;
+		ret = -EINVAL;
+		goto done;
 	}
 
 	ret = check_errors(aspeed, ret);
-	if (ret)
-		return ret;
-
-	return 0;
+done:
+	mutex_unlock(&aspeed->lock);
+	return ret;
 }
 
 static int aspeed_master_write(struct fsi_master *master, int link,
@@ -287,6 +291,8 @@ static int aspeed_master_write(struct fsi_master *master, int link,
 	addr |= id << 21;
 	addr += link * FSI_HUB_LINK_SIZE;
 
+	mutex_lock(&aspeed->lock);
+
 	switch (size) {
 	case 1:
 		ret = opb_writeb(aspeed, fsi_base + addr, *(u8 *)val);
@@ -298,14 +304,14 @@ static int aspeed_master_write(struct fsi_master *master, int link,
 		ret = opb_writel(aspeed, fsi_base + addr, *(__be32 *)val);
 		break;
 	default:
-		return -EINVAL;
+		ret = -EINVAL;
+		goto done;
 	}
 
 	ret = check_errors(aspeed, ret);
-	if (ret)
-		return ret;
-
-	return 0;
+done:
+	mutex_unlock(&aspeed->lock);
+	return ret;
 }
 
 static int aspeed_master_link_enable(struct fsi_master *master, int link,
@@ -320,17 +326,21 @@ static int aspeed_master_link_enable(struct fsi_master *master, int link,
 
 	reg = cpu_to_be32(0x80000000 >> bit);
 
-	if (!enable)
-		return opb_writel(aspeed, ctrl_base + FSI_MCENP0 + (4 * idx),
-				  reg);
+	mutex_lock(&aspeed->lock);
+
+	if (!enable) {
+		ret = opb_writel(aspeed, ctrl_base + FSI_MCENP0 + (4 * idx), reg);
+		goto done;
+	}
 
 	ret = opb_writel(aspeed, ctrl_base + FSI_MSENP0 + (4 * idx), reg);
 	if (ret)
-		return ret;
+		goto done;
 
 	mdelay(FSI_LINK_ENABLE_SETUP_TIME);
-
-	return 0;
+done:
+	mutex_unlock(&aspeed->lock);
+	return ret;
 }
 
 static int aspeed_master_term(struct fsi_master *master, int link, uint8_t id)
@@ -431,9 +441,11 @@ static ssize_t cfam_reset_store(struct device *dev, struct device_attribute *att
 {
 	struct fsi_master_aspeed *aspeed = dev_get_drvdata(dev);
 
+	mutex_lock(&aspeed->lock);
 	gpiod_set_value(aspeed->cfam_reset_gpio, 1);
 	usleep_range(900, 1000);
 	gpiod_set_value(aspeed->cfam_reset_gpio, 0);
+	mutex_unlock(&aspeed->lock);
 
 	return count;
 }
@@ -597,6 +609,7 @@ static int fsi_master_aspeed_probe(struct platform_device *pdev)
 
 	dev_set_drvdata(&pdev->dev, aspeed);
 
+	mutex_init(&aspeed->lock);
 	aspeed_master_init(aspeed);
 
 	rc = fsi_master_register(&aspeed->master);
diff --git a/drivers/hwtracing/coresight/Kconfig b/drivers/hwtracing/coresight/Kconfig
index c1198245461d4b7b3001bc23995d0d5d4cfe9af7..7b44ba22cbe1dbd89212a4955471ea98c2141ba0 100644
--- a/drivers/hwtracing/coresight/Kconfig
+++ b/drivers/hwtracing/coresight/Kconfig
@@ -110,6 +110,14 @@ config CORESIGHT_SOURCE_ETM4X
 	  To compile this driver as a module, choose M here: the
 	  module will be called coresight-etm4x.
 
+config ETM4X_IMPDEF_FEATURE
+	bool "Control implementation defined overflow support in ETM 4.x driver"
+	depends on CORESIGHT_SOURCE_ETM4X
+	help
+	  This control provides implementation define control for CoreSight
+	  ETM 4.x tracer module that can't reduce commit rate automatically.
+	  This avoids overflow between the ETM tracer module and the cpu core.
+
 config CORESIGHT_STM
 	tristate "CoreSight System Trace Macrocell driver"
 	depends on (ARM && !(CPU_32v3 || CPU_32v4 || CPU_32v4T)) || ARM64
diff --git a/drivers/hwtracing/coresight/coresight-catu.c b/drivers/hwtracing/coresight/coresight-catu.c
index 99430f6cf5a5d1ba0fc6d3de587056d7a7a37cf9..a61313f320bda220e54c41e7ef536edffb91474e 100644
--- a/drivers/hwtracing/coresight/coresight-catu.c
+++ b/drivers/hwtracing/coresight/coresight-catu.c
@@ -567,7 +567,7 @@ static int catu_probe(struct amba_device *adev, const struct amba_id *id)
 	return ret;
 }
 
-static int __exit catu_remove(struct amba_device *adev)
+static int catu_remove(struct amba_device *adev)
 {
 	struct catu_drvdata *drvdata = dev_get_drvdata(&adev->dev);
 
diff --git a/drivers/hwtracing/coresight/coresight-core.c b/drivers/hwtracing/coresight/coresight-core.c
index cc9e8025c533c018942d5fb5e9b751fb8873a228..4ba801dffcb7985c4f23763a7faf43ff83c76213 100644
--- a/drivers/hwtracing/coresight/coresight-core.c
+++ b/drivers/hwtracing/coresight/coresight-core.c
@@ -418,7 +418,7 @@ static int coresight_enable_source(struct coresight_device *csdev, u32 mode)
 			if (ret) {
 				coresight_control_assoc_ectdev(csdev, false);
 				return ret;
-			};
+			}
 		}
 		csdev->enable = true;
 	}
@@ -432,7 +432,7 @@ static int coresight_enable_source(struct coresight_device *csdev, u32 mode)
  *  coresight_disable_source - Drop the reference count by 1 and disable
  *  the device if there are no users left.
  *
- *  @csdev - The coresight device to disable
+ *  @csdev: The coresight device to disable
  *
  *  Returns true if the device has been disabled.
  */
@@ -663,6 +663,9 @@ struct coresight_device *coresight_get_sink_by_id(u32 id)
 /**
  * coresight_get_ref- Helper function to increase reference count to module
  * and device.
+ *
+ * @csdev: The coresight device to get a reference on.
+ *
  * Return true in successful case and power up the device.
  * Return false when failed to get reference of module.
  */
@@ -682,6 +685,8 @@ static inline bool coresight_get_ref(struct coresight_device *csdev)
 /**
  * coresight_put_ref- Helper function to decrease reference count to module
  * and device. Power off the device.
+ *
+ * @csdev: The coresight device to decrement a reference from.
  */
 static inline void coresight_put_ref(struct coresight_device *csdev)
 {
@@ -744,6 +749,7 @@ static void coresight_drop_device(struct coresight_device *csdev)
 /**
  * _coresight_build_path - recursively build a path from a @csdev to a sink.
  * @csdev:	The device to start from.
+ * @sink:	The final sink we want in this path.
  * @path:	The list to add devices to.
  *
  * The tree of Coresight device is traversed until an activated sink is
diff --git a/drivers/hwtracing/coresight/coresight-cti-core.c b/drivers/hwtracing/coresight/coresight-cti-core.c
index d28eae93e55c8e26d2f159310633ff323af73759..61dbc1afd8da506a84f6ade2b58be670f1c651bf 100644
--- a/drivers/hwtracing/coresight/coresight-cti-core.c
+++ b/drivers/hwtracing/coresight/coresight-cti-core.c
@@ -836,7 +836,7 @@ static void cti_device_release(struct device *dev)
 	if (drvdata->csdev_release)
 		drvdata->csdev_release(dev);
 }
-static int __exit cti_remove(struct amba_device *adev)
+static int cti_remove(struct amba_device *adev)
 {
 	struct cti_drvdata *drvdata = dev_get_drvdata(&adev->dev);
 
diff --git a/drivers/hwtracing/coresight/coresight-etb10.c b/drivers/hwtracing/coresight/coresight-etb10.c
index 248cc82c838e722a8d60ecb6711327ffbd6e0bef..0cf6f0b947b6f8849d3732f9e0fa7a062e245323 100644
--- a/drivers/hwtracing/coresight/coresight-etb10.c
+++ b/drivers/hwtracing/coresight/coresight-etb10.c
@@ -176,6 +176,7 @@ static int etb_enable_perf(struct coresight_device *csdev, void *data)
 	unsigned long flags;
 	struct etb_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
 	struct perf_output_handle *handle = data;
+	struct cs_buffers *buf = etm_perf_sink_config(handle);
 
 	spin_lock_irqsave(&drvdata->spinlock, flags);
 
@@ -186,7 +187,7 @@ static int etb_enable_perf(struct coresight_device *csdev, void *data)
 	}
 
 	/* Get a handle on the pid of the process to monitor */
-	pid = task_pid_nr(handle->event->owner);
+	pid = buf->pid;
 
 	if (drvdata->pid != -1 && drvdata->pid != pid) {
 		ret = -EBUSY;
@@ -383,6 +384,7 @@ static void *etb_alloc_buffer(struct coresight_device *csdev,
 	if (!buf)
 		return NULL;
 
+	buf->pid = task_pid_nr(event->owner);
 	buf->snapshot = overwrite;
 	buf->nr_pages = nr_pages;
 	buf->data_pages = pages;
@@ -801,7 +803,7 @@ static int etb_probe(struct amba_device *adev, const struct amba_id *id)
 	return ret;
 }
 
-static int __exit etb_remove(struct amba_device *adev)
+static int etb_remove(struct amba_device *adev)
 {
 	struct etb_drvdata *drvdata = dev_get_drvdata(&adev->dev);
 
diff --git a/drivers/hwtracing/coresight/coresight-etm3x-core.c b/drivers/hwtracing/coresight/coresight-etm3x-core.c
index 47f610b1c2b18ab48fd82f477871792ed7f129a3..5bf5a5a4ce6d15265831afac116f193ca2ea4026 100644
--- a/drivers/hwtracing/coresight/coresight-etm3x-core.c
+++ b/drivers/hwtracing/coresight/coresight-etm3x-core.c
@@ -902,14 +902,14 @@ static int etm_probe(struct amba_device *adev, const struct amba_id *id)
 	return 0;
 }
 
-static void __exit clear_etmdrvdata(void *info)
+static void clear_etmdrvdata(void *info)
 {
 	int cpu = *(int *)info;
 
 	etmdrvdata[cpu] = NULL;
 }
 
-static int __exit etm_remove(struct amba_device *adev)
+static int etm_remove(struct amba_device *adev)
 {
 	struct etm_drvdata *drvdata = dev_get_drvdata(&adev->dev);
 
diff --git a/drivers/hwtracing/coresight/coresight-etm4x-core.c b/drivers/hwtracing/coresight/coresight-etm4x-core.c
index abd706b216ac909ff98a7555538dece54f183e86..b20b6ff17cf65805b442a4d71294406d7d96417f 100644
--- a/drivers/hwtracing/coresight/coresight-etm4x-core.c
+++ b/drivers/hwtracing/coresight/coresight-etm4x-core.c
@@ -3,6 +3,7 @@
  * Copyright (c) 2014, The Linux Foundation. All rights reserved.
  */
 
+#include <linux/bitops.h>
 #include <linux/kernel.h>
 #include <linux/moduleparam.h>
 #include <linux/init.h>
@@ -28,7 +29,9 @@
 #include <linux/perf_event.h>
 #include <linux/pm_runtime.h>
 #include <linux/property.h>
+
 #include <asm/sections.h>
+#include <asm/sysreg.h>
 #include <asm/local.h>
 #include <asm/virt.h>
 
@@ -103,6 +106,97 @@ struct etm4_enable_arg {
 	int rc;
 };
 
+#ifdef CONFIG_ETM4X_IMPDEF_FEATURE
+
+#define HISI_HIP08_AMBA_ID		0x000b6d01
+#define ETM4_AMBA_MASK			0xfffff
+#define HISI_HIP08_CORE_COMMIT_MASK	0x3000
+#define HISI_HIP08_CORE_COMMIT_SHIFT	12
+#define HISI_HIP08_CORE_COMMIT_FULL	0b00
+#define HISI_HIP08_CORE_COMMIT_LVL_1	0b01
+#define HISI_HIP08_CORE_COMMIT_REG	sys_reg(3, 1, 15, 2, 5)
+
+struct etm4_arch_features {
+	void (*arch_callback)(bool enable);
+};
+
+static bool etm4_hisi_match_pid(unsigned int id)
+{
+	return (id & ETM4_AMBA_MASK) == HISI_HIP08_AMBA_ID;
+}
+
+static void etm4_hisi_config_core_commit(bool enable)
+{
+	u8 commit = enable ? HISI_HIP08_CORE_COMMIT_LVL_1 :
+		    HISI_HIP08_CORE_COMMIT_FULL;
+	u64 val;
+
+	/*
+	 * bit 12 and 13 of HISI_HIP08_CORE_COMMIT_REG are used together
+	 * to set core-commit, 2'b00 means cpu is at full speed, 2'b01,
+	 * 2'b10, 2'b11 mean reduce pipeline speed, and 2'b01 means level-1
+	 * speed(minimun value). So bit 12 and 13 should be cleared together.
+	 */
+	val = read_sysreg_s(HISI_HIP08_CORE_COMMIT_REG);
+	val &= ~HISI_HIP08_CORE_COMMIT_MASK;
+	val |= commit << HISI_HIP08_CORE_COMMIT_SHIFT;
+	write_sysreg_s(val, HISI_HIP08_CORE_COMMIT_REG);
+}
+
+static struct etm4_arch_features etm4_features[] = {
+	[ETM4_IMPDEF_HISI_CORE_COMMIT] = {
+		.arch_callback = etm4_hisi_config_core_commit,
+	},
+	{},
+};
+
+static void etm4_enable_arch_specific(struct etmv4_drvdata *drvdata)
+{
+	struct etm4_arch_features *ftr;
+	int bit;
+
+	for_each_set_bit(bit, drvdata->arch_features, ETM4_IMPDEF_FEATURE_MAX) {
+		ftr = &etm4_features[bit];
+
+		if (ftr->arch_callback)
+			ftr->arch_callback(true);
+	}
+}
+
+static void etm4_disable_arch_specific(struct etmv4_drvdata *drvdata)
+{
+	struct etm4_arch_features *ftr;
+	int bit;
+
+	for_each_set_bit(bit, drvdata->arch_features, ETM4_IMPDEF_FEATURE_MAX) {
+		ftr = &etm4_features[bit];
+
+		if (ftr->arch_callback)
+			ftr->arch_callback(false);
+	}
+}
+
+static void etm4_check_arch_features(struct etmv4_drvdata *drvdata,
+				      unsigned int id)
+{
+	if (etm4_hisi_match_pid(id))
+		set_bit(ETM4_IMPDEF_HISI_CORE_COMMIT, drvdata->arch_features);
+}
+#else
+static void etm4_enable_arch_specific(struct etmv4_drvdata *drvdata)
+{
+}
+
+static void etm4_disable_arch_specific(struct etmv4_drvdata *drvdata)
+{
+}
+
+static void etm4_check_arch_features(struct etmv4_drvdata *drvdata,
+				     unsigned int id)
+{
+}
+#endif /* CONFIG_ETM4X_IMPDEF_FEATURE */
+
 static int etm4_enable_hw(struct etmv4_drvdata *drvdata)
 {
 	int i, rc;
@@ -110,6 +204,7 @@ static int etm4_enable_hw(struct etmv4_drvdata *drvdata)
 	struct device *etm_dev = &drvdata->csdev->dev;
 
 	CS_UNLOCK(drvdata->base);
+	etm4_enable_arch_specific(drvdata);
 
 	etm4_os_unlock(drvdata);
 
@@ -124,8 +219,8 @@ static int etm4_enable_hw(struct etmv4_drvdata *drvdata)
 	if (coresight_timeout(drvdata->base, TRCSTATR, TRCSTATR_IDLE_BIT, 1))
 		dev_err(etm_dev,
 			"timeout while waiting for Idle Trace Status\n");
-
-	writel_relaxed(config->pe_sel, drvdata->base + TRCPROCSELR);
+	if (drvdata->nr_pe)
+		writel_relaxed(config->pe_sel, drvdata->base + TRCPROCSELR);
 	writel_relaxed(config->cfg, drvdata->base + TRCCONFIGR);
 	/* nothing specific implemented */
 	writel_relaxed(0x0, drvdata->base + TRCAUXCTLR);
@@ -141,8 +236,9 @@ static int etm4_enable_hw(struct etmv4_drvdata *drvdata)
 	writel_relaxed(config->viiectlr, drvdata->base + TRCVIIECTLR);
 	writel_relaxed(config->vissctlr,
 		       drvdata->base + TRCVISSCTLR);
-	writel_relaxed(config->vipcssctlr,
-		       drvdata->base + TRCVIPCSSCTLR);
+	if (drvdata->nr_pe_cmp)
+		writel_relaxed(config->vipcssctlr,
+			       drvdata->base + TRCVIPCSSCTLR);
 	for (i = 0; i < drvdata->nrseqstate - 1; i++)
 		writel_relaxed(config->seq_ctrl[i],
 			       drvdata->base + TRCSEQEVRn(i));
@@ -187,13 +283,15 @@ static int etm4_enable_hw(struct etmv4_drvdata *drvdata)
 		writeq_relaxed(config->ctxid_pid[i],
 			       drvdata->base + TRCCIDCVRn(i));
 	writel_relaxed(config->ctxid_mask0, drvdata->base + TRCCIDCCTLR0);
-	writel_relaxed(config->ctxid_mask1, drvdata->base + TRCCIDCCTLR1);
+	if (drvdata->numcidc > 4)
+		writel_relaxed(config->ctxid_mask1, drvdata->base + TRCCIDCCTLR1);
 
 	for (i = 0; i < drvdata->numvmidc; i++)
 		writeq_relaxed(config->vmid_val[i],
 			       drvdata->base + TRCVMIDCVRn(i));
 	writel_relaxed(config->vmid_mask0, drvdata->base + TRCVMIDCCTLR0);
-	writel_relaxed(config->vmid_mask1, drvdata->base + TRCVMIDCCTLR1);
+	if (drvdata->numvmidc > 4)
+		writel_relaxed(config->vmid_mask1, drvdata->base + TRCVMIDCCTLR1);
 
 	if (!drvdata->skip_power_up) {
 		/*
@@ -476,6 +574,7 @@ static void etm4_disable_hw(void *info)
 	int i;
 
 	CS_UNLOCK(drvdata->base);
+	etm4_disable_arch_specific(drvdata);
 
 	if (!drvdata->skip_power_up) {
 		/* power can be removed from the trace unit now */
@@ -722,8 +821,13 @@ static void etm4_init_arch_data(void *info)
 	else
 		drvdata->sysstall = false;
 
-	/* NUMPROC, bits[30:28] the number of PEs available for tracing */
-	drvdata->nr_pe = BMVAL(etmidr3, 28, 30);
+	/*
+	 * NUMPROC - the number of PEs available for tracing, 5bits
+	 *         = TRCIDR3.bits[13:12]bits[30:28]
+	 *  bits[4:3] = TRCIDR3.bits[13:12] (since etm-v4.2, otherwise RES0)
+	 *  bits[3:0] = TRCIDR3.bits[30:28]
+	 */
+	drvdata->nr_pe = (BMVAL(etmidr3, 12, 13) << 3) | BMVAL(etmidr3, 28, 30);
 
 	/* NOOVERFLOW, bit[31] is trace overflow prevention supported */
 	if (BMVAL(etmidr3, 31, 31))
@@ -779,7 +883,7 @@ static void etm4_init_arch_data(void *info)
 	 * LPOVERRIDE, bit[23] implementation supports
 	 * low-power state override
 	 */
-	if (BMVAL(etmidr5, 23, 23))
+	if (BMVAL(etmidr5, 23, 23) && (!drvdata->skip_power_up))
 		drvdata->lpoverride = true;
 	else
 		drvdata->lpoverride = false;
@@ -1178,7 +1282,8 @@ static int etm4_cpu_save(struct etmv4_drvdata *drvdata)
 	state = drvdata->save_state;
 
 	state->trcprgctlr = readl(drvdata->base + TRCPRGCTLR);
-	state->trcprocselr = readl(drvdata->base + TRCPROCSELR);
+	if (drvdata->nr_pe)
+		state->trcprocselr = readl(drvdata->base + TRCPROCSELR);
 	state->trcconfigr = readl(drvdata->base + TRCCONFIGR);
 	state->trcauxctlr = readl(drvdata->base + TRCAUXCTLR);
 	state->trceventctl0r = readl(drvdata->base + TRCEVENTCTL0R);
@@ -1194,7 +1299,8 @@ static int etm4_cpu_save(struct etmv4_drvdata *drvdata)
 	state->trcvictlr = readl(drvdata->base + TRCVICTLR);
 	state->trcviiectlr = readl(drvdata->base + TRCVIIECTLR);
 	state->trcvissctlr = readl(drvdata->base + TRCVISSCTLR);
-	state->trcvipcssctlr = readl(drvdata->base + TRCVIPCSSCTLR);
+	if (drvdata->nr_pe_cmp)
+		state->trcvipcssctlr = readl(drvdata->base + TRCVIPCSSCTLR);
 	state->trcvdctlr = readl(drvdata->base + TRCVDCTLR);
 	state->trcvdsacctlr = readl(drvdata->base + TRCVDSACCTLR);
 	state->trcvdarcctlr = readl(drvdata->base + TRCVDARCCTLR);
@@ -1240,10 +1346,12 @@ static int etm4_cpu_save(struct etmv4_drvdata *drvdata)
 		state->trcvmidcvr[i] = readq(drvdata->base + TRCVMIDCVRn(i));
 
 	state->trccidcctlr0 = readl(drvdata->base + TRCCIDCCTLR0);
-	state->trccidcctlr1 = readl(drvdata->base + TRCCIDCCTLR1);
+	if (drvdata->numcidc > 4)
+		state->trccidcctlr1 = readl(drvdata->base + TRCCIDCCTLR1);
 
 	state->trcvmidcctlr0 = readl(drvdata->base + TRCVMIDCCTLR0);
-	state->trcvmidcctlr1 = readl(drvdata->base + TRCVMIDCCTLR1);
+	if (drvdata->numvmidc > 4)
+		state->trcvmidcctlr1 = readl(drvdata->base + TRCVMIDCCTLR1);
 
 	state->trcclaimset = readl(drvdata->base + TRCCLAIMCLR);
 
@@ -1283,7 +1391,8 @@ static void etm4_cpu_restore(struct etmv4_drvdata *drvdata)
 	writel_relaxed(state->trcclaimset, drvdata->base + TRCCLAIMSET);
 
 	writel_relaxed(state->trcprgctlr, drvdata->base + TRCPRGCTLR);
-	writel_relaxed(state->trcprocselr, drvdata->base + TRCPROCSELR);
+	if (drvdata->nr_pe)
+		writel_relaxed(state->trcprocselr, drvdata->base + TRCPROCSELR);
 	writel_relaxed(state->trcconfigr, drvdata->base + TRCCONFIGR);
 	writel_relaxed(state->trcauxctlr, drvdata->base + TRCAUXCTLR);
 	writel_relaxed(state->trceventctl0r, drvdata->base + TRCEVENTCTL0R);
@@ -1299,7 +1408,8 @@ static void etm4_cpu_restore(struct etmv4_drvdata *drvdata)
 	writel_relaxed(state->trcvictlr, drvdata->base + TRCVICTLR);
 	writel_relaxed(state->trcviiectlr, drvdata->base + TRCVIIECTLR);
 	writel_relaxed(state->trcvissctlr, drvdata->base + TRCVISSCTLR);
-	writel_relaxed(state->trcvipcssctlr, drvdata->base + TRCVIPCSSCTLR);
+	if (drvdata->nr_pe_cmp)
+		writel_relaxed(state->trcvipcssctlr, drvdata->base + TRCVIPCSSCTLR);
 	writel_relaxed(state->trcvdctlr, drvdata->base + TRCVDCTLR);
 	writel_relaxed(state->trcvdsacctlr, drvdata->base + TRCVDSACCTLR);
 	writel_relaxed(state->trcvdarcctlr, drvdata->base + TRCVDARCCTLR);
@@ -1350,10 +1460,12 @@ static void etm4_cpu_restore(struct etmv4_drvdata *drvdata)
 			       drvdata->base + TRCVMIDCVRn(i));
 
 	writel_relaxed(state->trccidcctlr0, drvdata->base + TRCCIDCCTLR0);
-	writel_relaxed(state->trccidcctlr1, drvdata->base + TRCCIDCCTLR1);
+	if (drvdata->numcidc > 4)
+		writel_relaxed(state->trccidcctlr1, drvdata->base + TRCCIDCCTLR1);
 
 	writel_relaxed(state->trcvmidcctlr0, drvdata->base + TRCVMIDCCTLR0);
-	writel_relaxed(state->trcvmidcctlr1, drvdata->base + TRCVMIDCCTLR1);
+	if (drvdata->numvmidc > 4)
+		writel_relaxed(state->trcvmidcctlr1, drvdata->base + TRCVMIDCCTLR1);
 
 	writel_relaxed(state->trcclaimset, drvdata->base + TRCCLAIMSET);
 
@@ -1547,6 +1659,8 @@ static int etm4_probe(struct amba_device *adev, const struct amba_id *id)
 		drvdata->boot_enable = true;
 	}
 
+	etm4_check_arch_features(drvdata, id->id);
+
 	return 0;
 }
 
@@ -1559,14 +1673,14 @@ static struct amba_cs_uci_id uci_id_etm4[] = {
 	}
 };
 
-static void __exit clear_etmdrvdata(void *info)
+static void clear_etmdrvdata(void *info)
 {
 	int cpu = *(int *)info;
 
 	etmdrvdata[cpu] = NULL;
 }
 
-static int __exit etm4_remove(struct amba_device *adev)
+static int etm4_remove(struct amba_device *adev)
 {
 	struct etmv4_drvdata *drvdata = dev_get_drvdata(&adev->dev);
 
diff --git a/drivers/hwtracing/coresight/coresight-etm4x.h b/drivers/hwtracing/coresight/coresight-etm4x.h
index eefc7371c6c4d537e408beaa3425864a927b6c15..3dd3e0633328fc2434a6feb6a9de82228c7d13cc 100644
--- a/drivers/hwtracing/coresight/coresight-etm4x.h
+++ b/drivers/hwtracing/coresight/coresight-etm4x.h
@@ -8,6 +8,7 @@
 
 #include <asm/local.h>
 #include <linux/spinlock.h>
+#include <linux/types.h>
 #include "coresight-priv.h"
 
 /*
@@ -203,6 +204,11 @@
 /* Interpretation of resource numbers change at ETM v4.3 architecture */
 #define ETM4X_ARCH_4V3	0x43
 
+enum etm_impdef_type {
+	ETM4_IMPDEF_HISI_CORE_COMMIT,
+	ETM4_IMPDEF_FEATURE_MAX,
+};
+
 /**
  * struct etmv4_config - configuration information related to an ETMv4
  * @mode:	Controls various modes supported by this ETM.
@@ -415,6 +421,7 @@ struct etmv4_save_state {
  * @state_needs_restore: True when there is context to restore after PM exit
  * @skip_power_up: Indicates if an implementation can skip powering up
  *		   the trace unit.
+ * @arch_features: Bitmap of arch features of etmv4 devices.
  */
 struct etmv4_drvdata {
 	void __iomem			*base;
@@ -463,6 +470,7 @@ struct etmv4_drvdata {
 	struct etmv4_save_state		*save_state;
 	bool				state_needs_restore;
 	bool				skip_power_up;
+	DECLARE_BITMAP(arch_features, ETM4_IMPDEF_FEATURE_MAX);
 };
 
 /* Address comparator access types */
diff --git a/drivers/hwtracing/coresight/coresight-funnel.c b/drivers/hwtracing/coresight/coresight-funnel.c
index af40814ce5603d6240a7a53c1a151a346136cd17..071c723227dbdadddb10ccc33f52100b7bf3604d 100644
--- a/drivers/hwtracing/coresight/coresight-funnel.c
+++ b/drivers/hwtracing/coresight/coresight-funnel.c
@@ -274,7 +274,7 @@ static int funnel_probe(struct device *dev, struct resource *res)
 	return ret;
 }
 
-static int __exit funnel_remove(struct device *dev)
+static int funnel_remove(struct device *dev)
 {
 	struct funnel_drvdata *drvdata = dev_get_drvdata(dev);
 
@@ -328,7 +328,7 @@ static int static_funnel_probe(struct platform_device *pdev)
 	return ret;
 }
 
-static int __exit static_funnel_remove(struct platform_device *pdev)
+static int static_funnel_remove(struct platform_device *pdev)
 {
 	funnel_remove(&pdev->dev);
 	pm_runtime_disable(&pdev->dev);
@@ -356,7 +356,7 @@ static struct platform_driver static_funnel_driver = {
 	.remove          = static_funnel_remove,
 	.driver         = {
 		.name   = "coresight-static-funnel",
-		.owner	= THIS_MODULE,
+		/* THIS_MODULE is taken care of by platform_driver_register() */
 		.of_match_table = static_funnel_match,
 		.acpi_match_table = ACPI_PTR(static_funnel_ids),
 		.pm	= &funnel_dev_pm_ops,
@@ -370,7 +370,7 @@ static int dynamic_funnel_probe(struct amba_device *adev,
 	return funnel_probe(&adev->dev, &adev->res);
 }
 
-static int __exit dynamic_funnel_remove(struct amba_device *adev)
+static int dynamic_funnel_remove(struct amba_device *adev)
 {
 	return funnel_remove(&adev->dev);
 }
diff --git a/drivers/hwtracing/coresight/coresight-priv.h b/drivers/hwtracing/coresight/coresight-priv.h
index 65a29293b6cb9a9ef9b930fefd30872946e49224..f5f654ea29946dd53d842492ba279fb6e18172c7 100644
--- a/drivers/hwtracing/coresight/coresight-priv.h
+++ b/drivers/hwtracing/coresight/coresight-priv.h
@@ -87,6 +87,7 @@ enum cs_mode {
  * struct cs_buffer - keep track of a recording session' specifics
  * @cur:	index of the current buffer
  * @nr_pages:	max number of pages granted to us
+ * @pid:	PID this cs_buffer belongs to
  * @offset:	offset within the current buffer
  * @data_size:	how much we collected in this run
  * @snapshot:	is this run in snapshot mode
@@ -95,6 +96,7 @@ enum cs_mode {
 struct cs_buffers {
 	unsigned int		cur;
 	unsigned int		nr_pages;
+	pid_t			pid;
 	unsigned long		offset;
 	local_t			data_size;
 	bool			snapshot;
diff --git a/drivers/hwtracing/coresight/coresight-replicator.c b/drivers/hwtracing/coresight/coresight-replicator.c
index 62afdde0e5eaba34320dae3f4d14d002d7bd6c01..7e2a2b7f503f4340ff5d3873a28155769546318d 100644
--- a/drivers/hwtracing/coresight/coresight-replicator.c
+++ b/drivers/hwtracing/coresight/coresight-replicator.c
@@ -291,7 +291,7 @@ static int replicator_probe(struct device *dev, struct resource *res)
 	return ret;
 }
 
-static int __exit replicator_remove(struct device *dev)
+static int replicator_remove(struct device *dev)
 {
 	struct replicator_drvdata *drvdata = dev_get_drvdata(dev);
 
@@ -318,7 +318,7 @@ static int static_replicator_probe(struct platform_device *pdev)
 	return ret;
 }
 
-static int __exit static_replicator_remove(struct platform_device *pdev)
+static int static_replicator_remove(struct platform_device *pdev)
 {
 	replicator_remove(&pdev->dev);
 	pm_runtime_disable(&pdev->dev);
@@ -374,7 +374,7 @@ static struct platform_driver static_replicator_driver = {
 	.remove         = static_replicator_remove,
 	.driver         = {
 		.name   = "coresight-static-replicator",
-		.owner	= THIS_MODULE,
+		/* THIS_MODULE is taken care of by platform_driver_register() */
 		.of_match_table = of_match_ptr(static_replicator_match),
 		.acpi_match_table = ACPI_PTR(static_replicator_acpi_ids),
 		.pm	= &replicator_dev_pm_ops,
@@ -388,7 +388,7 @@ static int dynamic_replicator_probe(struct amba_device *adev,
 	return replicator_probe(&adev->dev, &adev->res);
 }
 
-static int __exit dynamic_replicator_remove(struct amba_device *adev)
+static int dynamic_replicator_remove(struct amba_device *adev)
 {
 	return replicator_remove(&adev->dev);
 }
diff --git a/drivers/hwtracing/coresight/coresight-stm.c b/drivers/hwtracing/coresight/coresight-stm.c
index b0ad912651a99e4feec2b9986712d8ba98f18c1e..99791773f682ff561119953fc304b1fa7bf10263 100644
--- a/drivers/hwtracing/coresight/coresight-stm.c
+++ b/drivers/hwtracing/coresight/coresight-stm.c
@@ -96,7 +96,7 @@ module_param_named(
 	boot_nr_channel, boot_nr_channel, int, S_IRUGO
 );
 
-/**
+/*
  * struct channel_space - central management entity for extended ports
  * @base:		memory mapped base address where channels start.
  * @phys:		physical base address of channel region.
@@ -951,7 +951,7 @@ static int stm_probe(struct amba_device *adev, const struct amba_id *id)
 	return ret;
 }
 
-static int __exit stm_remove(struct amba_device *adev)
+static int stm_remove(struct amba_device *adev)
 {
 	struct stm_drvdata *drvdata = dev_get_drvdata(&adev->dev);
 
diff --git a/drivers/hwtracing/coresight/coresight-tmc-core.c b/drivers/hwtracing/coresight/coresight-tmc-core.c
index 5653e0945c74b66d25e5ab54d8b018132f868f32..8169dff5a9f6a789552d61bf0db7f5bcc2c11185 100644
--- a/drivers/hwtracing/coresight/coresight-tmc-core.c
+++ b/drivers/hwtracing/coresight/coresight-tmc-core.c
@@ -559,7 +559,7 @@ static void tmc_shutdown(struct amba_device *adev)
 	spin_unlock_irqrestore(&drvdata->spinlock, flags);
 }
 
-static int __exit tmc_remove(struct amba_device *adev)
+static int tmc_remove(struct amba_device *adev)
 {
 	struct tmc_drvdata *drvdata = dev_get_drvdata(&adev->dev);
 
diff --git a/drivers/hwtracing/coresight/coresight-tmc-etf.c b/drivers/hwtracing/coresight/coresight-tmc-etf.c
index 44402d413ebbdf02c8ab293f27a6dd1b6e4096c8..989d965f3d901116febcab17cbb42ded636e2809 100644
--- a/drivers/hwtracing/coresight/coresight-tmc-etf.c
+++ b/drivers/hwtracing/coresight/coresight-tmc-etf.c
@@ -227,6 +227,7 @@ static int tmc_enable_etf_sink_perf(struct coresight_device *csdev, void *data)
 	unsigned long flags;
 	struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
 	struct perf_output_handle *handle = data;
+	struct cs_buffers *buf = etm_perf_sink_config(handle);
 
 	spin_lock_irqsave(&drvdata->spinlock, flags);
 	do {
@@ -243,7 +244,7 @@ static int tmc_enable_etf_sink_perf(struct coresight_device *csdev, void *data)
 		}
 
 		/* Get a handle on the pid of the process to monitor */
-		pid = task_pid_nr(handle->event->owner);
+		pid = buf->pid;
 
 		if (drvdata->pid != -1 && drvdata->pid != pid) {
 			ret = -EBUSY;
@@ -399,6 +400,7 @@ static void *tmc_alloc_etf_buffer(struct coresight_device *csdev,
 	if (!buf)
 		return NULL;
 
+	buf->pid = task_pid_nr(event->owner);
 	buf->snapshot = overwrite;
 	buf->nr_pages = nr_pages;
 	buf->data_pages = pages;
diff --git a/drivers/hwtracing/coresight/coresight-tmc-etr.c b/drivers/hwtracing/coresight/coresight-tmc-etr.c
index 714f9e867e5f6abac57010d8449139b866471004..bf5230e39c5bede1c28bbb1366abfecc0476b67d 100644
--- a/drivers/hwtracing/coresight/coresight-tmc-etr.c
+++ b/drivers/hwtracing/coresight/coresight-tmc-etr.c
@@ -217,6 +217,8 @@ static int tmc_pages_alloc(struct tmc_pages *tmc_pages,
 		} else {
 			page = alloc_pages_node(node,
 						GFP_KERNEL | __GFP_ZERO, 0);
+			if (!page)
+				goto err;
 		}
 		paddr = dma_map_page(real_dev, page, 0, PAGE_SIZE, dir);
 		if (dma_mapping_error(real_dev, paddr))
@@ -954,11 +956,11 @@ static void tmc_sync_etr_buf(struct tmc_drvdata *drvdata)
 		dev_dbg(&drvdata->csdev->dev,
 			"tmc memory error detected, truncating buffer\n");
 		etr_buf->len = 0;
-		etr_buf->full = 0;
+		etr_buf->full = false;
 		return;
 	}
 
-	etr_buf->full = status & TMC_STS_FULL;
+	etr_buf->full = !!(status & TMC_STS_FULL);
 
 	WARN_ON(!etr_buf->ops || !etr_buf->ops->sync);
 
@@ -1550,7 +1552,7 @@ tmc_update_etr_buffer(struct coresight_device *csdev,
 
 	/* Insert barrier packets at the beginning, if there was an overflow */
 	if (lost)
-		tmc_etr_buf_insert_barrier_packet(etr_buf, etr_buf->offset);
+		tmc_etr_buf_insert_barrier_packet(etr_buf, offset);
 	tmc_etr_sync_perf_buffer(etr_perf, offset, size);
 
 	/*
diff --git a/drivers/hwtracing/coresight/coresight-tpiu.c b/drivers/hwtracing/coresight/coresight-tpiu.c
index 566c57e035961e09b3021206a349e96da4541f32..d5dfee9ee55698b8606da6ad107e9618f1614631 100644
--- a/drivers/hwtracing/coresight/coresight-tpiu.c
+++ b/drivers/hwtracing/coresight/coresight-tpiu.c
@@ -49,7 +49,7 @@
 
 DEFINE_CORESIGHT_DEVLIST(tpiu_devs, "tpiu");
 
-/**
+/*
  * @base:	memory mapped base address for this component.
  * @atclk:	optional clock for the core parts of the TPIU.
  * @csdev:	component vitals needed by the framework.
@@ -173,7 +173,7 @@ static int tpiu_probe(struct amba_device *adev, const struct amba_id *id)
 	return PTR_ERR(drvdata->csdev);
 }
 
-static int __exit tpiu_remove(struct amba_device *adev)
+static int tpiu_remove(struct amba_device *adev)
 {
 	struct tpiu_drvdata *drvdata = dev_get_drvdata(&adev->dev);
 
diff --git a/drivers/interconnect/Kconfig b/drivers/interconnect/Kconfig
index 5b7204ee2eb2ab20b403461c71a34b746b5de29d..d637a89d4695c527ef225c2036a46b045b596e2b 100644
--- a/drivers/interconnect/Kconfig
+++ b/drivers/interconnect/Kconfig
@@ -13,5 +13,6 @@ if INTERCONNECT
 
 source "drivers/interconnect/imx/Kconfig"
 source "drivers/interconnect/qcom/Kconfig"
+source "drivers/interconnect/samsung/Kconfig"
 
 endif
diff --git a/drivers/interconnect/Makefile b/drivers/interconnect/Makefile
index d203520b0a5671ac471e38801cad7d3c4a6f2bff..97d393fd638d7939e3321ed4984cd325b29e1180 100644
--- a/drivers/interconnect/Makefile
+++ b/drivers/interconnect/Makefile
@@ -6,3 +6,4 @@ icc-core-objs				:= core.o bulk.o
 obj-$(CONFIG_INTERCONNECT)		+= icc-core.o
 obj-$(CONFIG_INTERCONNECT_IMX)		+= imx/
 obj-$(CONFIG_INTERCONNECT_QCOM)		+= qcom/
+obj-$(CONFIG_INTERCONNECT_SAMSUNG)	+= samsung/
diff --git a/drivers/interconnect/qcom/bcm-voter.c b/drivers/interconnect/qcom/bcm-voter.c
index 887d13721e521e64f1b0d7ba3b58a8b5f2f133f0..1cc565bce2f4da956be6a358d04a7c18657d5996 100644
--- a/drivers/interconnect/qcom/bcm-voter.c
+++ b/drivers/interconnect/qcom/bcm-voter.c
@@ -41,17 +41,10 @@ struct bcm_voter {
 
 static int cmp_vcd(void *priv, struct list_head *a, struct list_head *b)
 {
-	const struct qcom_icc_bcm *bcm_a =
-			list_entry(a, struct qcom_icc_bcm, list);
-	const struct qcom_icc_bcm *bcm_b =
-			list_entry(b, struct qcom_icc_bcm, list);
-
-	if (bcm_a->aux_data.vcd < bcm_b->aux_data.vcd)
-		return -1;
-	else if (bcm_a->aux_data.vcd == bcm_b->aux_data.vcd)
-		return 0;
-	else
-		return 1;
+	const struct qcom_icc_bcm *bcm_a = list_entry(a, struct qcom_icc_bcm, list);
+	const struct qcom_icc_bcm *bcm_b = list_entry(b, struct qcom_icc_bcm, list);
+
+	return bcm_a->aux_data.vcd - bcm_b->aux_data.vcd;
 }
 
 static u64 bcm_div(u64 num, u32 base)
diff --git a/drivers/interconnect/samsung/Kconfig b/drivers/interconnect/samsung/Kconfig
new file mode 100644
index 0000000000000000000000000000000000000000..6820e4f772cce28b1194ca0bf09d4edeae0fdb26
--- /dev/null
+++ b/drivers/interconnect/samsung/Kconfig
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config INTERCONNECT_SAMSUNG
+	bool "Samsung SoC interconnect drivers"
+	depends on ARCH_EXYNOS || COMPILE_TEST
+	help
+	  Interconnect drivers for Samsung SoCs.
+
+config INTERCONNECT_EXYNOS
+	tristate "Exynos generic interconnect driver"
+	depends on INTERCONNECT_SAMSUNG
+	default y if ARCH_EXYNOS
+	help
+	  Generic interconnect driver for Exynos SoCs.
diff --git a/drivers/interconnect/samsung/Makefile b/drivers/interconnect/samsung/Makefile
new file mode 100644
index 0000000000000000000000000000000000000000..e19d1df7da489c07898f137d31940720d0dfb2be
--- /dev/null
+++ b/drivers/interconnect/samsung/Makefile
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
+exynos-interconnect-objs		:= exynos.o
+
+obj-$(CONFIG_INTERCONNECT_EXYNOS)	+= exynos-interconnect.o
diff --git a/drivers/interconnect/samsung/exynos.c b/drivers/interconnect/samsung/exynos.c
new file mode 100644
index 0000000000000000000000000000000000000000..6559d8cf80687bf8d4034f52ea1f150c33c5f13c
--- /dev/null
+++ b/drivers/interconnect/samsung/exynos.c
@@ -0,0 +1,199 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Exynos generic interconnect provider driver
+ *
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd.
+ *
+ * Authors: Artur Świgoń <a.swigon@samsung.com>
+ *          Sylwester Nawrocki <s.nawrocki@samsung.com>
+ */
+#include <linux/device.h>
+#include <linux/interconnect-provider.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pm_qos.h>
+#include <linux/slab.h>
+
+#define EXYNOS_ICC_DEFAULT_BUS_CLK_RATIO	8
+
+struct exynos_icc_priv {
+	struct device *dev;
+
+	/* One interconnect node per provider */
+	struct icc_provider provider;
+	struct icc_node *node;
+
+	struct dev_pm_qos_request qos_req;
+	u32 bus_clk_ratio;
+};
+
+static struct icc_node *exynos_icc_get_parent(struct device_node *np)
+{
+	struct of_phandle_args args;
+	struct icc_node_data *icc_node_data;
+	struct icc_node *icc_node;
+	int num, ret;
+
+	num = of_count_phandle_with_args(np, "interconnects",
+					 "#interconnect-cells");
+	if (num < 1)
+		return NULL; /* parent nodes are optional */
+
+	/* Get the interconnect target node */
+	ret = of_parse_phandle_with_args(np, "interconnects",
+					"#interconnect-cells", 0, &args);
+	if (ret < 0)
+		return ERR_PTR(ret);
+
+	icc_node_data = of_icc_get_from_provider(&args);
+	of_node_put(args.np);
+
+	if (IS_ERR(icc_node_data))
+		return ERR_CAST(icc_node_data);
+
+	icc_node = icc_node_data->node;
+	kfree(icc_node_data);
+
+	return icc_node;
+}
+
+static int exynos_generic_icc_set(struct icc_node *src, struct icc_node *dst)
+{
+	struct exynos_icc_priv *src_priv = src->data, *dst_priv = dst->data;
+	s32 src_freq = max(src->avg_bw, src->peak_bw) / src_priv->bus_clk_ratio;
+	s32 dst_freq = max(dst->avg_bw, dst->peak_bw) / dst_priv->bus_clk_ratio;
+	int ret;
+
+	ret = dev_pm_qos_update_request(&src_priv->qos_req, src_freq);
+	if (ret < 0) {
+		dev_err(src_priv->dev, "failed to update PM QoS of %s (src)\n",
+			src->name);
+		return ret;
+	}
+
+	ret = dev_pm_qos_update_request(&dst_priv->qos_req, dst_freq);
+	if (ret < 0) {
+		dev_err(dst_priv->dev, "failed to update PM QoS of %s (dst)\n",
+			dst->name);
+		return ret;
+	}
+
+	return 0;
+}
+
+static struct icc_node *exynos_generic_icc_xlate(struct of_phandle_args *spec,
+						 void *data)
+{
+	struct exynos_icc_priv *priv = data;
+
+	if (spec->np != priv->dev->parent->of_node)
+		return ERR_PTR(-EINVAL);
+
+	return priv->node;
+}
+
+static int exynos_generic_icc_remove(struct platform_device *pdev)
+{
+	struct exynos_icc_priv *priv = platform_get_drvdata(pdev);
+	struct icc_node *parent_node, *node = priv->node;
+
+	parent_node = exynos_icc_get_parent(priv->dev->parent->of_node);
+	if (parent_node && !IS_ERR(parent_node))
+		icc_link_destroy(node, parent_node);
+
+	icc_nodes_remove(&priv->provider);
+	icc_provider_del(&priv->provider);
+
+	return 0;
+}
+
+static int exynos_generic_icc_probe(struct platform_device *pdev)
+{
+	struct device *bus_dev = pdev->dev.parent;
+	struct exynos_icc_priv *priv;
+	struct icc_provider *provider;
+	struct icc_node *icc_node, *icc_parent_node;
+	int ret;
+
+	priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+	if (!priv)
+		return -ENOMEM;
+
+	priv->dev = &pdev->dev;
+	platform_set_drvdata(pdev, priv);
+
+	provider = &priv->provider;
+
+	provider->set = exynos_generic_icc_set;
+	provider->aggregate = icc_std_aggregate;
+	provider->xlate = exynos_generic_icc_xlate;
+	provider->dev = bus_dev;
+	provider->inter_set = true;
+	provider->data = priv;
+
+	ret = icc_provider_add(provider);
+	if (ret < 0)
+		return ret;
+
+	icc_node = icc_node_create(pdev->id);
+	if (IS_ERR(icc_node)) {
+		ret = PTR_ERR(icc_node);
+		goto err_prov_del;
+	}
+
+	priv->node = icc_node;
+	icc_node->name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "%pOFn",
+					bus_dev->of_node);
+	if (of_property_read_u32(bus_dev->of_node, "samsung,data-clock-ratio",
+				 &priv->bus_clk_ratio))
+		priv->bus_clk_ratio = EXYNOS_ICC_DEFAULT_BUS_CLK_RATIO;
+
+	/*
+	 * Register a PM QoS request for the parent (devfreq) device.
+	 */
+	ret = dev_pm_qos_add_request(bus_dev, &priv->qos_req,
+				     DEV_PM_QOS_MIN_FREQUENCY, 0);
+	if (ret < 0)
+		goto err_node_del;
+
+	icc_node->data = priv;
+	icc_node_add(icc_node, provider);
+
+	icc_parent_node = exynos_icc_get_parent(bus_dev->of_node);
+	if (IS_ERR(icc_parent_node)) {
+		ret = PTR_ERR(icc_parent_node);
+		goto err_pmqos_del;
+	}
+	if (icc_parent_node) {
+		ret = icc_link_create(icc_node, icc_parent_node->id);
+		if (ret < 0)
+			goto err_pmqos_del;
+	}
+
+	return 0;
+
+err_pmqos_del:
+	dev_pm_qos_remove_request(&priv->qos_req);
+err_node_del:
+	icc_nodes_remove(provider);
+err_prov_del:
+	icc_provider_del(provider);
+	return ret;
+}
+
+static struct platform_driver exynos_generic_icc_driver = {
+	.driver = {
+		.name = "exynos-generic-icc",
+		.sync_state = icc_sync_state,
+	},
+	.probe = exynos_generic_icc_probe,
+	.remove = exynos_generic_icc_remove,
+};
+module_platform_driver(exynos_generic_icc_driver);
+
+MODULE_DESCRIPTION("Exynos generic interconnect driver");
+MODULE_AUTHOR("Artur Świgoń <a.swigon@samsung.com>");
+MODULE_AUTHOR("Sylwester Nawrocki <s.nawrocki@samsung.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:exynos-generic-icc");
diff --git a/drivers/ipack/devices/ipoctal.c b/drivers/ipack/devices/ipoctal.c
index d480a514c983792082e4715c7a9990812db372ba..3940714e4397b07f5ea8d616e75cf18920909c1c 100644
--- a/drivers/ipack/devices/ipoctal.c
+++ b/drivers/ipack/devices/ipoctal.c
@@ -544,7 +544,6 @@ static void ipoctal_set_termios(struct tty_struct *tty,
 		break;
 	default:
 		return;
-		break;
 	}
 
 	baud = tty_get_baud_rate(tty);
diff --git a/drivers/misc/altera-stapl/altera.c b/drivers/misc/altera-stapl/altera.c
index 5bdf574723144c8952e6b2cd0e83207e8b5ac45e..92c0611034b0183dcb1d22ac3f14b25ec39ccf9b 100644
--- a/drivers/misc/altera-stapl/altera.c
+++ b/drivers/misc/altera-stapl/altera.c
@@ -2265,11 +2265,6 @@ static int altera_check_crc(u8 *p, s32 program_size)
 				"actual %04x\n", __func__, local_expected,
 				local_actual);
 			break;
-		case -ENODATA:
-			printk(KERN_ERR "%s: expected CRC not found, "
-				"actual CRC = %04x\n", __func__,
-				local_actual);
-			break;
 		case -EIO:
 			printk(KERN_ERR "%s: error: format isn't "
 				"recognized.\n", __func__);
diff --git a/drivers/misc/c2port/core.c b/drivers/misc/c2port/core.c
index 80d87e8a0bea980aeafa111f98cf4968a6f4551d..fb9a1b49ff6de77a911d90c40f57a498fa49cd60 100644
--- a/drivers/misc/c2port/core.c
+++ b/drivers/misc/c2port/core.c
@@ -899,7 +899,7 @@ struct c2port_device *c2port_device_register(char *name,
 		unlikely(!ops->c2d_get) || unlikely(!ops->c2d_set))
 		return ERR_PTR(-EINVAL);
 
-	c2dev = kmalloc(sizeof(struct c2port_device), GFP_KERNEL);
+	c2dev = kzalloc(sizeof(struct c2port_device), GFP_KERNEL);
 	if (unlikely(!c2dev))
 		return ERR_PTR(-ENOMEM);
 
diff --git a/drivers/misc/cardreader/rts5249.c b/drivers/misc/cardreader/rts5249.c
index b85279f1fc5ee2b05fdb3f683b60999f58531444..b2676e7f50271d50e5d4fb16fd95db3f1344bd87 100644
--- a/drivers/misc/cardreader/rts5249.c
+++ b/drivers/misc/cardreader/rts5249.c
@@ -73,6 +73,9 @@ static void rtsx_base_fetch_vendor_settings(struct rtsx_pcr *pcr)
 
 	pci_read_config_dword(pdev, PCR_SETTING_REG2, &reg);
 	pcr_dbg(pcr, "Cfg 0x%x: 0x%x\n", PCR_SETTING_REG2, reg);
+
+	pcr->rtd3_en = rtsx_reg_to_rtd3_uhsii(reg);
+
 	if (rtsx_check_mmc_support(reg))
 		pcr->extra_caps |= EXTRA_CAPS_NO_MMC;
 	pcr->sd30_drive_sel_3v3 = rtsx_reg_to_sd30_drive_sel_3v3(reg);
@@ -278,15 +281,28 @@ static int rts5249_extra_init_hw(struct rtsx_pcr *pcr)
 
 	rtsx_pci_send_cmd(pcr, CMD_TIMEOUT_DEF);
 
-	if (CHK_PCI_PID(pcr, PID_524A) || CHK_PCI_PID(pcr, PID_525A)) {
+	if (CHK_PCI_PID(pcr, PID_524A) || CHK_PCI_PID(pcr, PID_525A))
 		rtsx_pci_write_register(pcr, REG_VREF, PWD_SUSPND_EN, PWD_SUSPND_EN);
-		rtsx_pci_write_register(pcr, RTS524A_PM_CTRL3, 0x01, 0x00);
-		rtsx_pci_write_register(pcr, RTS524A_PME_FORCE_CTL, 0x30, 0x20);
+
+	if (pcr->rtd3_en) {
+		if (CHK_PCI_PID(pcr, PID_524A) || CHK_PCI_PID(pcr, PID_525A)) {
+			rtsx_pci_write_register(pcr, RTS524A_PM_CTRL3, 0x01, 0x01);
+			rtsx_pci_write_register(pcr, RTS524A_PME_FORCE_CTL, 0x30, 0x30);
+		} else {
+			rtsx_pci_write_register(pcr, PM_CTRL3, 0x01, 0x01);
+			rtsx_pci_write_register(pcr, PME_FORCE_CTL, 0xFF, 0x33);
+		}
 	} else {
-		rtsx_pci_write_register(pcr, PME_FORCE_CTL, 0xFF, 0x30);
-		rtsx_pci_write_register(pcr, PM_CTRL3, 0x01, 0x00);
+		if (CHK_PCI_PID(pcr, PID_524A) || CHK_PCI_PID(pcr, PID_525A)) {
+			rtsx_pci_write_register(pcr, RTS524A_PM_CTRL3, 0x01, 0x00);
+			rtsx_pci_write_register(pcr, RTS524A_PME_FORCE_CTL, 0x30, 0x20);
+		} else {
+			rtsx_pci_write_register(pcr, PME_FORCE_CTL, 0xFF, 0x30);
+			rtsx_pci_write_register(pcr, PM_CTRL3, 0x01, 0x00);
+		}
 	}
 
+
 	/*
 	 * If u_force_clkreq_0 is enabled, CLKREQ# PIN will be forced
 	 * to drive low, and we forcibly request clock.
diff --git a/drivers/misc/cardreader/rtsx_pcr.c b/drivers/misc/cardreader/rtsx_pcr.c
index 5d15607027e9e05a5eaedbd9a9ba40321e810ef9..2700d199775026a8205efb6eced55b63ed4be2b4 100644
--- a/drivers/misc/cardreader/rtsx_pcr.c
+++ b/drivers/misc/cardreader/rtsx_pcr.c
@@ -20,6 +20,8 @@
 #include <linux/rtsx_pci.h>
 #include <linux/mmc/card.h>
 #include <asm/unaligned.h>
+#include <linux/pm.h>
+#include <linux/pm_runtime.h>
 
 #include "rtsx_pcr.h"
 #include "rts5261.h"
@@ -89,9 +91,15 @@ static void rtsx_comm_set_aspm(struct rtsx_pcr *pcr, bool enable)
 	if (pcr->aspm_enabled == enable)
 		return;
 
-	pcie_capability_clear_and_set_word(pcr->pci, PCI_EXP_LNKCTL,
-					   PCI_EXP_LNKCTL_ASPMC,
-					   enable ? pcr->aspm_en : 0);
+	if (pcr->aspm_en & 0x02)
+		rtsx_pci_write_register(pcr, ASPM_FORCE_CTL, FORCE_ASPM_CTL0 |
+			FORCE_ASPM_CTL1, enable ? 0 : FORCE_ASPM_CTL0 | FORCE_ASPM_CTL1);
+	else
+		rtsx_pci_write_register(pcr, ASPM_FORCE_CTL, FORCE_ASPM_CTL0 |
+			FORCE_ASPM_CTL1, FORCE_ASPM_CTL0 | FORCE_ASPM_CTL1);
+
+	if (!enable && (pcr->aspm_en & 0x02))
+		mdelay(10);
 
 	pcr->aspm_enabled = enable;
 }
@@ -144,6 +152,12 @@ void rtsx_pci_start_run(struct rtsx_pcr *pcr)
 	if (pcr->remove_pci)
 		return;
 
+	if (pcr->rtd3_en)
+		if (pcr->is_runtime_suspended) {
+			pm_runtime_get(&(pcr->pci->dev));
+			pcr->is_runtime_suspended = false;
+		}
+
 	if (pcr->state != PDEV_STAT_RUN) {
 		pcr->state = PDEV_STAT_RUN;
 		if (pcr->ops->enable_auto_blink)
@@ -1075,6 +1089,16 @@ static void rtsx_pm_power_saving(struct rtsx_pcr *pcr)
 	rtsx_comm_pm_power_saving(pcr);
 }
 
+static void rtsx_pci_rtd3_work(struct work_struct *work)
+{
+	struct delayed_work *dwork = to_delayed_work(work);
+	struct rtsx_pcr *pcr = container_of(dwork, struct rtsx_pcr, rtd3_work);
+
+	pcr_dbg(pcr, "--> %s\n", __func__);
+	if (!pcr->is_runtime_suspended)
+		pm_runtime_put(&(pcr->pci->dev));
+}
+
 static void rtsx_pci_idle_work(struct work_struct *work)
 {
 	struct delayed_work *dwork = to_delayed_work(work);
@@ -1094,6 +1118,9 @@ static void rtsx_pci_idle_work(struct work_struct *work)
 	rtsx_pm_power_saving(pcr);
 
 	mutex_unlock(&pcr->pcr_mutex);
+
+	if (pcr->rtd3_en)
+		mod_delayed_work(system_wq, &pcr->rtd3_work, msecs_to_jiffies(10000));
 }
 
 static void rtsx_base_force_power_down(struct rtsx_pcr *pcr, u8 pm_state)
@@ -1283,7 +1310,7 @@ static int rtsx_pci_init_hw(struct rtsx_pcr *pcr)
 	/* Wait SSC power stable */
 	udelay(200);
 
-	rtsx_pci_disable_aspm(pcr);
+	rtsx_disable_aspm(pcr);
 	if (pcr->ops->optimize_phy) {
 		err = pcr->ops->optimize_phy(pcr);
 		if (err < 0)
@@ -1357,8 +1384,8 @@ static int rtsx_pci_init_hw(struct rtsx_pcr *pcr)
 	rtsx_pci_init_ocp(pcr);
 
 	/* Enable clk_request_n to enable clock power management */
-	pcie_capability_write_word(pdev, PCI_EXP_LNKCTL,
-				   PCI_EXP_LNKCTL_CLKREQ_EN);
+	pcie_capability_clear_and_set_word(pcr->pci, PCI_EXP_LNKCTL,
+					0, PCI_EXP_LNKCTL_CLKREQ_EN);
 	/* Enter L1 when host tx idle */
 	pci_write_config_byte(pdev, 0x70F, 0x5B);
 
@@ -1368,6 +1395,8 @@ static int rtsx_pci_init_hw(struct rtsx_pcr *pcr)
 			return err;
 	}
 
+	rtsx_pci_write_register(pcr, ASPM_FORCE_CTL, 0x30, 0x30);
+
 	/* No CD interrupt if probing driver with card inserted.
 	 * So we need to initialize pcr->card_exist here.
 	 */
@@ -1571,6 +1600,15 @@ static int rtsx_pci_probe(struct pci_dev *pcidev,
 		rtsx_pcr_cells[i].platform_data = handle;
 		rtsx_pcr_cells[i].pdata_size = sizeof(*handle);
 	}
+
+	if (pcr->rtd3_en) {
+		INIT_DELAYED_WORK(&pcr->rtd3_work, rtsx_pci_rtd3_work);
+		pm_runtime_allow(&pcidev->dev);
+		pm_runtime_enable(&pcidev->dev);
+		pcr->is_runtime_suspended = false;
+	}
+
+
 	ret = mfd_add_devices(&pcidev->dev, pcr->id, rtsx_pcr_cells,
 			ARRAY_SIZE(rtsx_pcr_cells), NULL, 0, NULL);
 	if (ret < 0)
@@ -1608,6 +1646,9 @@ static void rtsx_pci_remove(struct pci_dev *pcidev)
 	struct pcr_handle *handle = pci_get_drvdata(pcidev);
 	struct rtsx_pcr *pcr = handle->pcr;
 
+	if (pcr->rtd3_en)
+		pm_runtime_get_noresume(&pcr->pci->dev);
+
 	pcr->remove_pci = true;
 
 	/* Disable interrupts at the pcr level */
@@ -1618,6 +1659,8 @@ static void rtsx_pci_remove(struct pci_dev *pcidev)
 
 	cancel_delayed_work_sync(&pcr->carddet_work);
 	cancel_delayed_work_sync(&pcr->idle_work);
+	if (pcr->rtd3_en)
+		cancel_delayed_work_sync(&pcr->rtd3_work);
 
 	mfd_remove_devices(&pcidev->dev);
 
@@ -1635,6 +1678,11 @@ static void rtsx_pci_remove(struct pci_dev *pcidev)
 	idr_remove(&rtsx_pci_idr, pcr->id);
 	spin_unlock(&rtsx_pci_lock);
 
+	if (pcr->rtd3_en) {
+		pm_runtime_disable(&pcr->pci->dev);
+		pm_runtime_put_noidle(&pcr->pci->dev);
+	}
+
 	kfree(pcr->slots);
 	kfree(pcr);
 	kfree(handle);
@@ -1716,13 +1764,77 @@ static void rtsx_pci_shutdown(struct pci_dev *pcidev)
 		pci_disable_msi(pcr->pci);
 }
 
+static int rtsx_pci_runtime_suspend(struct device *device)
+{
+	struct pci_dev *pcidev = to_pci_dev(device);
+	struct pcr_handle *handle;
+	struct rtsx_pcr *pcr;
+
+	handle = pci_get_drvdata(pcidev);
+	pcr = handle->pcr;
+	dev_dbg(&(pcidev->dev), "--> %s\n", __func__);
+
+	cancel_delayed_work(&pcr->carddet_work);
+	cancel_delayed_work(&pcr->rtd3_work);
+	cancel_delayed_work(&pcr->idle_work);
+
+	mutex_lock(&pcr->pcr_mutex);
+	rtsx_pci_power_off(pcr, HOST_ENTER_S3);
+
+	free_irq(pcr->irq, (void *)pcr);
+
+	mutex_unlock(&pcr->pcr_mutex);
+
+	pcr->is_runtime_suspended = true;
+
+	return 0;
+}
+
+static int rtsx_pci_runtime_resume(struct device *device)
+{
+	struct pci_dev *pcidev = to_pci_dev(device);
+	struct pcr_handle *handle;
+	struct rtsx_pcr *pcr;
+	int ret = 0;
+
+	handle = pci_get_drvdata(pcidev);
+	pcr = handle->pcr;
+	dev_dbg(&(pcidev->dev), "--> %s\n", __func__);
+
+	mutex_lock(&pcr->pcr_mutex);
+
+	rtsx_pci_write_register(pcr, HOST_SLEEP_STATE, 0x03, 0x00);
+	rtsx_pci_acquire_irq(pcr);
+	synchronize_irq(pcr->irq);
+
+	if (pcr->ops->fetch_vendor_settings)
+		pcr->ops->fetch_vendor_settings(pcr);
+
+	rtsx_pci_init_hw(pcr);
+
+	if (pcr->slots[RTSX_SD_CARD].p_dev != NULL) {
+		pcr->slots[RTSX_SD_CARD].card_event(
+				pcr->slots[RTSX_SD_CARD].p_dev);
+	}
+
+	schedule_delayed_work(&pcr->idle_work, msecs_to_jiffies(200));
+
+	mutex_unlock(&pcr->pcr_mutex);
+	return ret;
+}
+
 #else /* CONFIG_PM */
 
 #define rtsx_pci_shutdown NULL
+#define rtsx_pci_runtime_suspend NULL
+#define rtsx_pic_runtime_resume NULL
 
 #endif /* CONFIG_PM */
 
-static SIMPLE_DEV_PM_OPS(rtsx_pci_pm_ops, rtsx_pci_suspend, rtsx_pci_resume);
+static const struct dev_pm_ops rtsx_pci_pm_ops = {
+	SET_SYSTEM_SLEEP_PM_OPS(rtsx_pci_suspend, rtsx_pci_resume)
+	SET_RUNTIME_PM_OPS(rtsx_pci_runtime_suspend, rtsx_pci_runtime_resume, NULL)
+};
 
 static struct pci_driver rtsx_pci_driver = {
 	.name = DRV_NAME_RTSX_PCI,
diff --git a/drivers/misc/cardreader/rtsx_pcr.h b/drivers/misc/cardreader/rtsx_pcr.h
index fe5f4ca0f9374392ec83fe925cc61e2ceb7659a3..daf057c4eea62759e3d03dbbd66f8db298b791f2 100644
--- a/drivers/misc/cardreader/rtsx_pcr.h
+++ b/drivers/misc/cardreader/rtsx_pcr.h
@@ -90,6 +90,7 @@ static inline u8 map_sd_drive(int idx)
 
 #define rtsx_check_mmc_support(reg)		((reg) & 0x10)
 #define rtsx_reg_to_rtd3(reg)				((reg) & 0x02)
+#define rtsx_reg_to_rtd3_uhsii(reg)				((reg) & 0x04)
 #define rtsx_reg_to_aspm(reg)			(((reg) >> 28) & 0x03)
 #define rtsx_reg_to_sd30_drive_sel_1v8(reg)	(((reg) >> 26) & 0x03)
 #define rtsx_reg_to_sd30_drive_sel_3v3(reg)	(((reg) >> 5) & 0x03)
diff --git a/drivers/misc/eeprom/at25.c b/drivers/misc/eeprom/at25.c
index 3b7d8b7584f4d79714a99fa63197c1bc9facacf8..b76e4901b4a49893e60a9c8670ad9f98580a4453 100644
--- a/drivers/misc/eeprom/at25.c
+++ b/drivers/misc/eeprom/at25.c
@@ -22,6 +22,9 @@
  * mean that some AT25 products are EEPROMs, and others are FLASH.
  * Handle FLASH chips with the drivers/mtd/devices/m25p80.c driver,
  * not this one!
+ *
+ * EEPROMs that can be used with this driver include, for example:
+ *   AT25M02, AT25128B
  */
 
 struct at25_data {
diff --git a/drivers/misc/genwqe/card_base.c b/drivers/misc/genwqe/card_base.c
index c9b8866180715ef3c2fde07f6f078f4179b573cd..2e1befbd1ad991035dce1b608a71326cd395cf9f 100644
--- a/drivers/misc/genwqe/card_base.c
+++ b/drivers/misc/genwqe/card_base.c
@@ -1089,24 +1089,9 @@ static int genwqe_pci_setup(struct genwqe_dev *cd)
 	}
 
 	/* check for 64-bit DMA address supported (DAC) */
-	if (!pci_set_dma_mask(pci_dev, DMA_BIT_MASK(64))) {
-		err = pci_set_consistent_dma_mask(pci_dev, DMA_BIT_MASK(64));
-		if (err) {
-			dev_err(&pci_dev->dev,
-				"err: DMA64 consistent mask error\n");
-			err = -EIO;
-			goto out_release_resources;
-		}
 	/* check for 32-bit DMA address supported (SAC) */
-	} else if (!pci_set_dma_mask(pci_dev, DMA_BIT_MASK(32))) {
-		err = pci_set_consistent_dma_mask(pci_dev, DMA_BIT_MASK(32));
-		if (err) {
-			dev_err(&pci_dev->dev,
-				"err: DMA32 consistent mask error\n");
-			err = -EIO;
-			goto out_release_resources;
-		}
-	} else {
+	if (dma_set_mask_and_coherent(&pci_dev->dev, DMA_BIT_MASK(64)) ||
+	    dma_set_mask_and_coherent(&pci_dev->dev, DMA_BIT_MASK(32))) {
 		dev_err(&pci_dev->dev,
 			"err: neither DMA32 nor DMA64 supported\n");
 		err = -EIO;
diff --git a/drivers/misc/habanalabs/common/command_buffer.c b/drivers/misc/habanalabs/common/command_buffer.c
index ada570f35a41acbfa9213f61f4194eb51776e9b9..6f6a904ab6ca05eb4e8a8039a8b6d275a52573a4 100644
--- a/drivers/misc/habanalabs/common/command_buffer.c
+++ b/drivers/misc/habanalabs/common/command_buffer.c
@@ -11,7 +11,6 @@
 #include <linux/mm.h>
 #include <linux/slab.h>
 #include <linux/uaccess.h>
-#include <linux/genalloc.h>
 
 static int cb_map_mem(struct hl_ctx *ctx, struct hl_cb *cb)
 {
@@ -68,9 +67,9 @@ static int cb_map_mem(struct hl_ctx *ctx, struct hl_cb *cb)
 	bus_addr = cb->bus_address;
 	offset = 0;
 	list_for_each_entry(va_block, &cb->va_block_list, node) {
-		rc = hl_mmu_map(ctx, va_block->start, bus_addr, va_block->size,
-				list_is_last(&va_block->node,
-						&cb->va_block_list));
+		rc = hl_mmu_map_page(ctx, va_block->start, bus_addr,
+				va_block->size, list_is_last(&va_block->node,
+							&cb->va_block_list));
 		if (rc) {
 			dev_err(hdev->dev, "Failed to map VA %#llx to CB\n",
 				va_block->start);
@@ -93,7 +92,7 @@ static int cb_map_mem(struct hl_ctx *ctx, struct hl_cb *cb)
 	list_for_each_entry(va_block, &cb->va_block_list, node) {
 		if (offset <= 0)
 			break;
-		hl_mmu_unmap(ctx, va_block->start, va_block->size,
+		hl_mmu_unmap_page(ctx, va_block->start, va_block->size,
 				offset <= va_block->size);
 		offset -= va_block->size;
 	}
@@ -120,7 +119,7 @@ static void cb_unmap_mem(struct hl_ctx *ctx, struct hl_cb *cb)
 	mutex_lock(&ctx->mmu_lock);
 
 	list_for_each_entry(va_block, &cb->va_block_list, node)
-		if (hl_mmu_unmap(ctx, va_block->start, va_block->size,
+		if (hl_mmu_unmap_page(ctx, va_block->start, va_block->size,
 				list_is_last(&va_block->node,
 						&cb->va_block_list)))
 			dev_warn_ratelimited(hdev->dev,
@@ -376,17 +375,49 @@ int hl_cb_destroy(struct hl_device *hdev, struct hl_cb_mgr *mgr, u64 cb_handle)
 	return rc;
 }
 
+static int hl_cb_info(struct hl_device *hdev, struct hl_cb_mgr *mgr,
+			u64 cb_handle, u32 *usage_cnt)
+{
+	struct hl_cb *cb;
+	u32 handle;
+	int rc = 0;
+
+	/* The CB handle was given to user to do mmap, so need to shift it back
+	 * to the value which was allocated by the IDR module.
+	 */
+	cb_handle >>= PAGE_SHIFT;
+	handle = (u32) cb_handle;
+
+	spin_lock(&mgr->cb_lock);
+
+	cb = idr_find(&mgr->cb_handles, handle);
+	if (!cb) {
+		dev_err(hdev->dev,
+			"CB info failed, no match to handle 0x%x\n", handle);
+		rc = -EINVAL;
+		goto out;
+	}
+
+	*usage_cnt = atomic_read(&cb->cs_cnt);
+
+out:
+	spin_unlock(&mgr->cb_lock);
+	return rc;
+}
+
 int hl_cb_ioctl(struct hl_fpriv *hpriv, void *data)
 {
 	union hl_cb_args *args = data;
 	struct hl_device *hdev = hpriv->hdev;
+	enum hl_device_status status;
 	u64 handle = 0;
+	u32 usage_cnt = 0;
 	int rc;
 
-	if (hl_device_disabled_or_in_reset(hdev)) {
+	if (!hl_device_operational(hdev, &status)) {
 		dev_warn_ratelimited(hdev->dev,
 			"Device is %s. Can't execute CB IOCTL\n",
-			atomic_read(&hdev->in_reset) ? "in_reset" : "disabled");
+			hdev->status[status]);
 		return -EBUSY;
 	}
 
@@ -413,6 +444,13 @@ int hl_cb_ioctl(struct hl_fpriv *hpriv, void *data)
 					args->in.cb_handle);
 		break;
 
+	case HL_CB_OP_INFO:
+		rc = hl_cb_info(hdev, &hpriv->cb_mgr, args->in.cb_handle,
+				&usage_cnt);
+		memset(args, 0, sizeof(*args));
+		args->out.usage_cnt = usage_cnt;
+		break;
+
 	default:
 		rc = -ENOTTY;
 		break;
@@ -517,6 +555,7 @@ int hl_cb_mmap(struct hl_fpriv *hpriv, struct vm_area_struct *vma)
 	}
 
 	cb->mmap_size = cb->size;
+	vma->vm_pgoff = handle;
 
 	return 0;
 
diff --git a/drivers/misc/habanalabs/common/command_submission.c b/drivers/misc/habanalabs/common/command_submission.c
index b2b974ecc431b16d0b6d0d6f05e5c1753ab0587e..beb482310a58640a255ad9c8a4c815326ad1411e 100644
--- a/drivers/misc/habanalabs/common/command_submission.c
+++ b/drivers/misc/habanalabs/common/command_submission.c
@@ -11,11 +11,25 @@
 #include <linux/uaccess.h>
 #include <linux/slab.h>
 
-#define HL_CS_FLAGS_SIG_WAIT	(HL_CS_FLAGS_SIGNAL | HL_CS_FLAGS_WAIT)
+#define HL_CS_FLAGS_TYPE_MASK	(HL_CS_FLAGS_SIGNAL | HL_CS_FLAGS_WAIT | \
+				HL_CS_FLAGS_COLLECTIVE_WAIT)
+
+/**
+ * enum hl_cs_wait_status - cs wait status
+ * @CS_WAIT_STATUS_BUSY: cs was not completed yet
+ * @CS_WAIT_STATUS_COMPLETED: cs completed
+ * @CS_WAIT_STATUS_GONE: cs completed but fence is already gone
+ */
+enum hl_cs_wait_status {
+	CS_WAIT_STATUS_BUSY,
+	CS_WAIT_STATUS_COMPLETED,
+	CS_WAIT_STATUS_GONE
+};
 
 static void job_wq_completion(struct work_struct *work);
-static long _hl_cs_wait_ioctl(struct hl_device *hdev,
-		struct hl_ctx *ctx, u64 timeout_us, u64 seq);
+static int _hl_cs_wait_ioctl(struct hl_device *hdev, struct hl_ctx *ctx,
+				u64 timeout_us, u64 seq,
+				enum hl_cs_wait_status *status, s64 *timestamp);
 static void cs_do_release(struct kref *ref);
 
 static void hl_sob_reset(struct kref *ref)
@@ -38,6 +52,38 @@ void hl_sob_reset_error(struct kref *ref)
 			hw_sob->q_idx, hw_sob->sob_id);
 }
 
+/**
+ * hl_gen_sob_mask() - Generates a sob mask to be used in a monitor arm packet
+ * @sob_base: sob base id
+ * @sob_mask: sob user mask, each bit represents a sob offset from sob base
+ * @mask: generated mask
+ *
+ * Return: 0 if given parameters are valid
+ */
+int hl_gen_sob_mask(u16 sob_base, u8 sob_mask, u8 *mask)
+{
+	int i;
+
+	if (sob_mask == 0)
+		return -EINVAL;
+
+	if (sob_mask == 0x1) {
+		*mask = ~(1 << (sob_base & 0x7));
+	} else {
+		/* find msb in order to verify sob range is valid */
+		for (i = BITS_PER_BYTE - 1 ; i >= 0 ; i--)
+			if (BIT(i) & sob_mask)
+				break;
+
+		if (i > (HL_MAX_SOBS_PER_MONITOR - (sob_base & 0x7) - 1))
+			return -EINVAL;
+
+		*mask = ~sob_mask;
+	}
+
+	return 0;
+}
+
 static void hl_fence_release(struct kref *kref)
 {
 	struct hl_fence *fence =
@@ -53,7 +99,8 @@ static void hl_fence_release(struct kref *kref)
 		goto free;
 
 	if ((hl_cs_cmpl->type == CS_TYPE_SIGNAL) ||
-			(hl_cs_cmpl->type == CS_TYPE_WAIT)) {
+		(hl_cs_cmpl->type == CS_TYPE_WAIT) ||
+		(hl_cs_cmpl->type == CS_TYPE_COLLECTIVE_WAIT)) {
 
 		dev_dbg(hdev->dev,
 			"CS 0x%llx type %d finished, sob_id: %d, sob_val: 0x%x\n",
@@ -80,6 +127,10 @@ static void hl_fence_release(struct kref *kref)
 		 * hence the above scenario is avoided.
 		 */
 		kref_put(&hl_cs_cmpl->hw_sob->kref, hl_sob_reset);
+
+		if (hl_cs_cmpl->type == CS_TYPE_COLLECTIVE_WAIT)
+			hdev->asic_funcs->reset_sob_group(hdev,
+					hl_cs_cmpl->sob_group);
 	}
 
 free:
@@ -102,10 +153,11 @@ static void hl_fence_init(struct hl_fence *fence)
 {
 	kref_init(&fence->refcount);
 	fence->error = 0;
+	fence->timestamp = ktime_set(0, 0);
 	init_completion(&fence->completion);
 }
 
-static void cs_get(struct hl_cs *cs)
+void cs_get(struct hl_cs *cs)
 {
 	kref_get(&cs->refcount);
 }
@@ -120,6 +172,18 @@ static void cs_put(struct hl_cs *cs)
 	kref_put(&cs->refcount, cs_do_release);
 }
 
+static void cs_job_do_release(struct kref *ref)
+{
+	struct hl_cs_job *job = container_of(ref, struct hl_cs_job, refcount);
+
+	kfree(job);
+}
+
+static void cs_job_put(struct hl_cs_job *job)
+{
+	kref_put(&job->refcount, cs_job_do_release);
+}
+
 static bool is_cb_patched(struct hl_device *hdev, struct hl_cs_job *job)
 {
 	/*
@@ -169,10 +233,7 @@ static int cs_parser(struct hl_fpriv *hpriv, struct hl_cs_job *job)
 			job->patched_cb = parser.patched_cb;
 			job->job_cb_size = parser.patched_cb_size;
 			job->contains_dma_pkt = parser.contains_dma_pkt;
-
-			spin_lock(&job->patched_cb->lock);
-			job->patched_cb->cs_cnt++;
-			spin_unlock(&job->patched_cb->lock);
+			atomic_inc(&job->patched_cb->cs_cnt);
 		}
 
 		/*
@@ -180,9 +241,7 @@ static int cs_parser(struct hl_fpriv *hpriv, struct hl_cs_job *job)
 		 * original CB anymore because it was already parsed and
 		 * won't be accessed again for this CS
 		 */
-		spin_lock(&job->user_cb->lock);
-		job->user_cb->cs_cnt--;
-		spin_unlock(&job->user_cb->lock);
+		atomic_dec(&job->user_cb->cs_cnt);
 		hl_cb_put(job->user_cb);
 		job->user_cb = NULL;
 	} else if (!rc) {
@@ -192,7 +251,7 @@ static int cs_parser(struct hl_fpriv *hpriv, struct hl_cs_job *job)
 	return rc;
 }
 
-static void free_job(struct hl_device *hdev, struct hl_cs_job *job)
+static void complete_job(struct hl_device *hdev, struct hl_cs_job *job)
 {
 	struct hl_cs *cs = job->cs;
 
@@ -204,10 +263,7 @@ static void free_job(struct hl_device *hdev, struct hl_cs_job *job)
 		 * created, so we need to check it's not NULL
 		 */
 		if (job->patched_cb) {
-			spin_lock(&job->patched_cb->lock);
-			job->patched_cb->cs_cnt--;
-			spin_unlock(&job->patched_cb->lock);
-
+			atomic_dec(&job->patched_cb->cs_cnt);
 			hl_cb_put(job->patched_cb);
 		}
 	}
@@ -215,13 +271,12 @@ static void free_job(struct hl_device *hdev, struct hl_cs_job *job)
 	/* For H/W queue jobs, if a user CB was allocated by driver and MMU is
 	 * enabled, the user CB isn't released in cs_parser() and thus should be
 	 * released here.
+	 * This is also true for INT queues jobs which were allocated by driver
 	 */
-	if (job->queue_type == QUEUE_TYPE_HW &&
-			job->is_kernel_allocated_cb && hdev->mmu_enable) {
-		spin_lock(&job->user_cb->lock);
-		job->user_cb->cs_cnt--;
-		spin_unlock(&job->user_cb->lock);
-
+	if (job->is_kernel_allocated_cb &&
+		((job->queue_type == QUEUE_TYPE_HW && hdev->mmu_enable) ||
+				job->queue_type == QUEUE_TYPE_INT)) {
+		atomic_dec(&job->user_cb->cs_cnt);
 		hl_cb_put(job->user_cb);
 	}
 
@@ -239,27 +294,12 @@ static void free_job(struct hl_device *hdev, struct hl_cs_job *job)
 			job->queue_type == QUEUE_TYPE_HW)
 		cs_put(cs);
 
-	kfree(job);
-}
-
-static void cs_counters_aggregate(struct hl_device *hdev, struct hl_ctx *ctx)
-{
-	hdev->aggregated_cs_counters.device_in_reset_drop_cnt +=
-			ctx->cs_counters.device_in_reset_drop_cnt;
-	hdev->aggregated_cs_counters.out_of_mem_drop_cnt +=
-			ctx->cs_counters.out_of_mem_drop_cnt;
-	hdev->aggregated_cs_counters.parsing_drop_cnt +=
-			ctx->cs_counters.parsing_drop_cnt;
-	hdev->aggregated_cs_counters.queue_full_drop_cnt +=
-			ctx->cs_counters.queue_full_drop_cnt;
-	hdev->aggregated_cs_counters.max_cs_in_flight_drop_cnt +=
-			ctx->cs_counters.max_cs_in_flight_drop_cnt;
+	cs_job_put(job);
 }
 
 static void cs_do_release(struct kref *ref)
 {
-	struct hl_cs *cs = container_of(ref, struct hl_cs,
-						refcount);
+	struct hl_cs *cs = container_of(ref, struct hl_cs, refcount);
 	struct hl_device *hdev = cs->ctx->hdev;
 	struct hl_cs_job *job, *tmp;
 
@@ -268,77 +308,78 @@ static void cs_do_release(struct kref *ref)
 	/*
 	 * Although if we reached here it means that all external jobs have
 	 * finished, because each one of them took refcnt to CS, we still
-	 * need to go over the internal jobs and free them. Otherwise, we
+	 * need to go over the internal jobs and complete them. Otherwise, we
 	 * will have leaked memory and what's worse, the CS object (and
 	 * potentially the CTX object) could be released, while the JOB
 	 * still holds a pointer to them (but no reference).
 	 */
 	list_for_each_entry_safe(job, tmp, &cs->job_list, cs_node)
-		free_job(hdev, job);
+		complete_job(hdev, job);
 
-	/* We also need to update CI for internal queues */
-	if (cs->submitted) {
-		hdev->asic_funcs->hw_queues_lock(hdev);
+	if (!cs->submitted) {
+		/* In case the wait for signal CS was submitted, the put occurs
+		 * in init_signal_wait_cs() or collective_wait_init_cs()
+		 * right before hanging on the PQ.
+		 */
+		if (cs->type == CS_TYPE_WAIT ||
+				cs->type == CS_TYPE_COLLECTIVE_WAIT)
+			hl_fence_put(cs->signal_fence);
 
-		hdev->cs_active_cnt--;
-		if (!hdev->cs_active_cnt) {
-			struct hl_device_idle_busy_ts *ts;
+		goto out;
+	}
 
-			ts = &hdev->idle_busy_ts_arr[hdev->idle_busy_ts_idx++];
-			ts->busy_to_idle_ts = ktime_get();
+	hdev->asic_funcs->hw_queues_lock(hdev);
 
-			if (hdev->idle_busy_ts_idx == HL_IDLE_BUSY_TS_ARR_SIZE)
-				hdev->idle_busy_ts_idx = 0;
-		} else if (hdev->cs_active_cnt < 0) {
-			dev_crit(hdev->dev, "CS active cnt %d is negative\n",
-				hdev->cs_active_cnt);
-		}
+	hdev->cs_active_cnt--;
+	if (!hdev->cs_active_cnt) {
+		struct hl_device_idle_busy_ts *ts;
 
-		hdev->asic_funcs->hw_queues_unlock(hdev);
+		ts = &hdev->idle_busy_ts_arr[hdev->idle_busy_ts_idx++];
+		ts->busy_to_idle_ts = ktime_get();
 
-		hl_int_hw_queue_update_ci(cs);
+		if (hdev->idle_busy_ts_idx == HL_IDLE_BUSY_TS_ARR_SIZE)
+			hdev->idle_busy_ts_idx = 0;
+	} else if (hdev->cs_active_cnt < 0) {
+		dev_crit(hdev->dev, "CS active cnt %d is negative\n",
+			hdev->cs_active_cnt);
+	}
 
-		spin_lock(&hdev->hw_queues_mirror_lock);
-		/* remove CS from hw_queues mirror list */
-		list_del_init(&cs->mirror_node);
-		spin_unlock(&hdev->hw_queues_mirror_lock);
+	hdev->asic_funcs->hw_queues_unlock(hdev);
 
-		/*
-		 * Don't cancel TDR in case this CS was timedout because we
-		 * might be running from the TDR context
-		 */
-		if ((!cs->timedout) &&
-			(hdev->timeout_jiffies != MAX_SCHEDULE_TIMEOUT)) {
-			struct hl_cs *next;
+	/* Need to update CI for internal queues */
+	hl_int_hw_queue_update_ci(cs);
 
-			if (cs->tdr_active)
-				cancel_delayed_work_sync(&cs->work_tdr);
+	/* remove CS from CS mirror list */
+	spin_lock(&hdev->cs_mirror_lock);
+	list_del_init(&cs->mirror_node);
+	spin_unlock(&hdev->cs_mirror_lock);
 
-			spin_lock(&hdev->hw_queues_mirror_lock);
+	/* Don't cancel TDR in case this CS was timedout because we might be
+	 * running from the TDR context
+	 */
+	if (!cs->timedout && hdev->timeout_jiffies != MAX_SCHEDULE_TIMEOUT) {
+		struct hl_cs *next;
 
-			/* queue TDR for next CS */
-			next = list_first_entry_or_null(
-					&hdev->hw_queues_mirror_list,
-					struct hl_cs, mirror_node);
+		if (cs->tdr_active)
+			cancel_delayed_work_sync(&cs->work_tdr);
 
-			if ((next) && (!next->tdr_active)) {
-				next->tdr_active = true;
-				schedule_delayed_work(&next->work_tdr,
-							hdev->timeout_jiffies);
-			}
+		spin_lock(&hdev->cs_mirror_lock);
 
-			spin_unlock(&hdev->hw_queues_mirror_lock);
+		/* queue TDR for next CS */
+		next = list_first_entry_or_null(&hdev->cs_mirror_list,
+						struct hl_cs, mirror_node);
+
+		if (next && !next->tdr_active) {
+			next->tdr_active = true;
+			schedule_delayed_work(&next->work_tdr,
+						hdev->timeout_jiffies);
 		}
-	} else if (cs->type == CS_TYPE_WAIT) {
-		/*
-		 * In case the wait for signal CS was submitted, the put occurs
-		 * in init_signal_wait_cs() right before hanging on the PQ.
-		 */
-		hl_fence_put(cs->signal_fence);
+
+		spin_unlock(&hdev->cs_mirror_lock);
 	}
 
-	/*
-	 * Must be called before hl_ctx_put because inside we use ctx to get
+out:
+	/* Must be called before hl_ctx_put because inside we use ctx to get
 	 * the device
 	 */
 	hl_debugfs_remove_cs(cs);
@@ -356,9 +397,10 @@ static void cs_do_release(struct kref *ref)
 	else if (!cs->submitted)
 		cs->fence->error = -EBUSY;
 
+	if (cs->timestamp)
+		cs->fence->timestamp = ktime_get();
 	complete_all(&cs->fence->completion);
 	hl_fence_put(cs->fence);
-	cs_counters_aggregate(hdev, cs->ctx);
 
 	kfree(cs->jobs_in_queue_cnt);
 	kfree(cs);
@@ -384,24 +426,51 @@ static void cs_timedout(struct work_struct *work)
 
 	hdev = cs->ctx->hdev;
 
-	dev_err(hdev->dev,
-		"Command submission %llu has not finished in time!\n",
-		cs->sequence);
+	switch (cs->type) {
+	case CS_TYPE_SIGNAL:
+		dev_err(hdev->dev,
+			"Signal command submission %llu has not finished in time!\n",
+			cs->sequence);
+		break;
+
+	case CS_TYPE_WAIT:
+		dev_err(hdev->dev,
+			"Wait command submission %llu has not finished in time!\n",
+			cs->sequence);
+		break;
+
+	case CS_TYPE_COLLECTIVE_WAIT:
+		dev_err(hdev->dev,
+			"Collective Wait command submission %llu has not finished in time!\n",
+			cs->sequence);
+		break;
+
+	default:
+		dev_err(hdev->dev,
+			"Command submission %llu has not finished in time!\n",
+			cs->sequence);
+		break;
+	}
 
 	cs_put(cs);
 
 	if (hdev->reset_on_lockup)
 		hl_device_reset(hdev, false, false);
+	else
+		hdev->needs_reset = true;
 }
 
 static int allocate_cs(struct hl_device *hdev, struct hl_ctx *ctx,
 			enum hl_cs_type cs_type, struct hl_cs **cs_new)
 {
-	struct hl_cs_compl *cs_cmpl;
+	struct hl_cs_counters_atomic *cntr;
 	struct hl_fence *other = NULL;
+	struct hl_cs_compl *cs_cmpl;
 	struct hl_cs *cs;
 	int rc;
 
+	cntr = &hdev->aggregated_cs_counters;
+
 	cs = kzalloc(sizeof(*cs), GFP_ATOMIC);
 	if (!cs)
 		return -ENOMEM;
@@ -435,7 +504,8 @@ static int allocate_cs(struct hl_device *hdev, struct hl_ctx *ctx,
 	if (other && !completion_done(&other->completion)) {
 		dev_dbg_ratelimited(hdev->dev,
 			"Rejecting CS because of too many in-flights CS\n");
-		ctx->cs_counters.max_cs_in_flight_drop_cnt++;
+		atomic64_inc(&ctx->cs_counters.max_cs_in_flight_drop_cnt);
+		atomic64_inc(&cntr->max_cs_in_flight_drop_cnt);
 		rc = -EAGAIN;
 		goto free_fence;
 	}
@@ -480,7 +550,7 @@ static void cs_rollback(struct hl_device *hdev, struct hl_cs *cs)
 	struct hl_cs_job *job, *tmp;
 
 	list_for_each_entry_safe(job, tmp, &cs->job_list, cs_node)
-		free_job(hdev, job);
+		complete_job(hdev, job);
 }
 
 void hl_cs_rollback_all(struct hl_device *hdev)
@@ -493,8 +563,7 @@ void hl_cs_rollback_all(struct hl_device *hdev)
 		flush_workqueue(hdev->cq_wq[i]);
 
 	/* Make sure we don't have leftovers in the H/W queues mirror list */
-	list_for_each_entry_safe(cs, tmp, &hdev->hw_queues_mirror_list,
-				mirror_node) {
+	list_for_each_entry_safe(cs, tmp, &hdev->cs_mirror_list, mirror_node) {
 		cs_get(cs);
 		cs->aborted = true;
 		dev_warn_ratelimited(hdev->dev, "Killing CS %d.%llu\n",
@@ -512,7 +581,7 @@ static void job_wq_completion(struct work_struct *work)
 	struct hl_device *hdev = cs->ctx->hdev;
 
 	/* job is no longer needed */
-	free_job(hdev, job);
+	complete_job(hdev, job);
 }
 
 static int validate_queue_index(struct hl_device *hdev,
@@ -547,9 +616,36 @@ static int validate_queue_index(struct hl_device *hdev,
 		return -EINVAL;
 	}
 
-	*queue_type = hw_queue_prop->type;
-	*is_kernel_allocated_cb = !!hw_queue_prop->requires_kernel_cb;
+	/* When hw queue type isn't QUEUE_TYPE_HW,
+	 * USER_ALLOC_CB flag shall be referred as "don't care".
+	 */
+	if (hw_queue_prop->type == QUEUE_TYPE_HW) {
+		if (chunk->cs_chunk_flags & HL_CS_CHUNK_FLAGS_USER_ALLOC_CB) {
+			if (!(hw_queue_prop->cb_alloc_flags & CB_ALLOC_USER)) {
+				dev_err(hdev->dev,
+					"Queue index %d doesn't support user CB\n",
+					chunk->queue_index);
+				return -EINVAL;
+			}
+
+			*is_kernel_allocated_cb = false;
+		} else {
+			if (!(hw_queue_prop->cb_alloc_flags &
+					CB_ALLOC_KERNEL)) {
+				dev_err(hdev->dev,
+					"Queue index %d doesn't support kernel CB\n",
+					chunk->queue_index);
+				return -EINVAL;
+			}
+
+			*is_kernel_allocated_cb = true;
+		}
+	} else {
+		*is_kernel_allocated_cb = !!(hw_queue_prop->cb_alloc_flags
+						& CB_ALLOC_KERNEL);
+	}
 
+	*queue_type = hw_queue_prop->type;
 	return 0;
 }
 
@@ -573,9 +669,7 @@ static struct hl_cb *get_cb_from_cs_chunk(struct hl_device *hdev,
 		goto release_cb;
 	}
 
-	spin_lock(&cb->lock);
-	cb->cs_cnt++;
-	spin_unlock(&cb->lock);
+	atomic_inc(&cb->cs_cnt);
 
 	return cb;
 
@@ -593,6 +687,7 @@ struct hl_cs_job *hl_cs_allocate_job(struct hl_device *hdev,
 	if (!job)
 		return NULL;
 
+	kref_init(&job->refcount);
 	job->queue_type = queue_type;
 	job->is_kernel_allocated_cb = is_kernel_allocated_cb;
 
@@ -605,42 +700,115 @@ struct hl_cs_job *hl_cs_allocate_job(struct hl_device *hdev,
 	return job;
 }
 
-static int cs_ioctl_default(struct hl_fpriv *hpriv, void __user *chunks,
-				u32 num_chunks, u64 *cs_seq)
+static enum hl_cs_type hl_cs_get_cs_type(u32 cs_type_flags)
+{
+	if (cs_type_flags & HL_CS_FLAGS_SIGNAL)
+		return CS_TYPE_SIGNAL;
+	else if (cs_type_flags & HL_CS_FLAGS_WAIT)
+		return CS_TYPE_WAIT;
+	else if (cs_type_flags & HL_CS_FLAGS_COLLECTIVE_WAIT)
+		return CS_TYPE_COLLECTIVE_WAIT;
+	else
+		return CS_TYPE_DEFAULT;
+}
+
+static int hl_cs_sanity_checks(struct hl_fpriv *hpriv, union hl_cs_args *args)
 {
 	struct hl_device *hdev = hpriv->hdev;
-	struct hl_cs_chunk *cs_chunk_array;
-	struct hl_cs_job *job;
-	struct hl_cs *cs;
-	struct hl_cb *cb;
-	bool int_queues_only = true;
-	u32 size_to_copy;
-	int rc, i;
+	struct hl_ctx *ctx = hpriv->ctx;
+	u32 cs_type_flags, num_chunks;
+	enum hl_device_status status;
+	enum hl_cs_type cs_type;
 
-	*cs_seq = ULLONG_MAX;
+	if (!hl_device_operational(hdev, &status)) {
+		dev_warn_ratelimited(hdev->dev,
+			"Device is %s. Can't submit new CS\n",
+			hdev->status[status]);
+		return -EBUSY;
+	}
+
+	cs_type_flags = args->in.cs_flags & HL_CS_FLAGS_TYPE_MASK;
+
+	if (unlikely(cs_type_flags && !is_power_of_2(cs_type_flags))) {
+		dev_err(hdev->dev,
+			"CS type flags are mutually exclusive, context %d\n",
+			ctx->asid);
+		return -EINVAL;
+	}
+
+	cs_type = hl_cs_get_cs_type(cs_type_flags);
+	num_chunks = args->in.num_chunks_execute;
+
+	if (unlikely((cs_type != CS_TYPE_DEFAULT) &&
+					!hdev->supports_sync_stream)) {
+		dev_err(hdev->dev, "Sync stream CS is not supported\n");
+		return -EINVAL;
+	}
+
+	if (cs_type == CS_TYPE_DEFAULT) {
+		if (!num_chunks) {
+			dev_err(hdev->dev,
+				"Got execute CS with 0 chunks, context %d\n",
+				ctx->asid);
+			return -EINVAL;
+		}
+	} else if (num_chunks != 1) {
+		dev_err(hdev->dev,
+			"Sync stream CS mandates one chunk only, context %d\n",
+			ctx->asid);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int hl_cs_copy_chunk_array(struct hl_device *hdev,
+					struct hl_cs_chunk **cs_chunk_array,
+					void __user *chunks, u32 num_chunks)
+{
+	u32 size_to_copy;
 
 	if (num_chunks > HL_MAX_JOBS_PER_CS) {
 		dev_err(hdev->dev,
 			"Number of chunks can NOT be larger than %d\n",
 			HL_MAX_JOBS_PER_CS);
-		rc = -EINVAL;
-		goto out;
+		return -EINVAL;
 	}
 
-	cs_chunk_array = kmalloc_array(num_chunks, sizeof(*cs_chunk_array),
+	*cs_chunk_array = kmalloc_array(num_chunks, sizeof(**cs_chunk_array),
 					GFP_ATOMIC);
-	if (!cs_chunk_array) {
-		rc = -ENOMEM;
-		goto out;
-	}
+	if (!*cs_chunk_array)
+		return -ENOMEM;
 
 	size_to_copy = num_chunks * sizeof(struct hl_cs_chunk);
-	if (copy_from_user(cs_chunk_array, chunks, size_to_copy)) {
+	if (copy_from_user(*cs_chunk_array, chunks, size_to_copy)) {
 		dev_err(hdev->dev, "Failed to copy cs chunk array from user\n");
-		rc = -EFAULT;
-		goto free_cs_chunk_array;
+		kfree(*cs_chunk_array);
+		return -EFAULT;
 	}
 
+	return 0;
+}
+
+static int cs_ioctl_default(struct hl_fpriv *hpriv, void __user *chunks,
+				u32 num_chunks, u64 *cs_seq, bool timestamp)
+{
+	bool int_queues_only = true;
+	struct hl_device *hdev = hpriv->hdev;
+	struct hl_cs_chunk *cs_chunk_array;
+	struct hl_cs_counters_atomic *cntr;
+	struct hl_cs_job *job;
+	struct hl_cs *cs;
+	struct hl_cb *cb;
+	int rc, i;
+
+	cntr = &hdev->aggregated_cs_counters;
+	*cs_seq = ULLONG_MAX;
+
+	rc = hl_cs_copy_chunk_array(hdev, &cs_chunk_array, chunks, num_chunks);
+	if (rc)
+		goto out;
+
 	/* increment refcnt for context */
 	hl_ctx_get(hdev, hpriv->ctx);
 
@@ -650,6 +818,7 @@ static int cs_ioctl_default(struct hl_fpriv *hpriv, void __user *chunks,
 		goto free_cs_chunk_array;
 	}
 
+	cs->timestamp = !!timestamp;
 	*cs_seq = cs->sequence;
 
 	hl_debugfs_add_cs(cs);
@@ -663,14 +832,17 @@ static int cs_ioctl_default(struct hl_fpriv *hpriv, void __user *chunks,
 		rc = validate_queue_index(hdev, chunk, &queue_type,
 						&is_kernel_allocated_cb);
 		if (rc) {
-			hpriv->ctx->cs_counters.parsing_drop_cnt++;
+			atomic64_inc(&hpriv->ctx->cs_counters.parsing_drop_cnt);
+			atomic64_inc(&cntr->parsing_drop_cnt);
 			goto free_cs_object;
 		}
 
 		if (is_kernel_allocated_cb) {
 			cb = get_cb_from_cs_chunk(hdev, &hpriv->cb_mgr, chunk);
 			if (!cb) {
-				hpriv->ctx->cs_counters.parsing_drop_cnt++;
+				atomic64_inc(
+				&hpriv->ctx->cs_counters.parsing_drop_cnt);
+				atomic64_inc(&cntr->parsing_drop_cnt);
 				rc = -EINVAL;
 				goto free_cs_object;
 			}
@@ -684,7 +856,9 @@ static int cs_ioctl_default(struct hl_fpriv *hpriv, void __user *chunks,
 		job = hl_cs_allocate_job(hdev, queue_type,
 						is_kernel_allocated_cb);
 		if (!job) {
-			hpriv->ctx->cs_counters.out_of_mem_drop_cnt++;
+			atomic64_inc(
+			&hpriv->ctx->cs_counters.out_of_mem_drop_cnt);
+			atomic64_inc(&cntr->out_of_mem_drop_cnt);
 			dev_err(hdev->dev, "Failed to allocate a new job\n");
 			rc = -ENOMEM;
 			if (is_kernel_allocated_cb)
@@ -717,7 +891,8 @@ static int cs_ioctl_default(struct hl_fpriv *hpriv, void __user *chunks,
 
 		rc = cs_parser(hpriv, job);
 		if (rc) {
-			hpriv->ctx->cs_counters.parsing_drop_cnt++;
+			atomic64_inc(&hpriv->ctx->cs_counters.parsing_drop_cnt);
+			atomic64_inc(&cntr->parsing_drop_cnt);
 			dev_err(hdev->dev,
 				"Failed to parse JOB %d.%llu.%d, err %d, rejecting the CS\n",
 				cs->ctx->asid, cs->sequence, job->id, rc);
@@ -726,7 +901,8 @@ static int cs_ioctl_default(struct hl_fpriv *hpriv, void __user *chunks,
 	}
 
 	if (int_queues_only) {
-		hpriv->ctx->cs_counters.parsing_drop_cnt++;
+		atomic64_inc(&hpriv->ctx->cs_counters.parsing_drop_cnt);
+		atomic64_inc(&cntr->parsing_drop_cnt);
 		dev_err(hdev->dev,
 			"Reject CS %d.%llu because only internal queues jobs are present\n",
 			cs->ctx->asid, cs->sequence);
@@ -747,9 +923,7 @@ static int cs_ioctl_default(struct hl_fpriv *hpriv, void __user *chunks,
 	goto put_cs;
 
 release_cb:
-	spin_lock(&cb->lock);
-	cb->cs_cnt--;
-	spin_unlock(&cb->lock);
+	atomic_dec(&cb->cs_cnt);
 	hl_cb_put(cb);
 free_cs_object:
 	cs_rollback(hdev, cs);
@@ -764,47 +938,234 @@ static int cs_ioctl_default(struct hl_fpriv *hpriv, void __user *chunks,
 	return rc;
 }
 
-static int cs_ioctl_signal_wait(struct hl_fpriv *hpriv, enum hl_cs_type cs_type,
-				void __user *chunks, u32 num_chunks,
+static int hl_cs_ctx_switch(struct hl_fpriv *hpriv, union hl_cs_args *args,
 				u64 *cs_seq)
 {
 	struct hl_device *hdev = hpriv->hdev;
 	struct hl_ctx *ctx = hpriv->ctx;
-	struct hl_cs_chunk *cs_chunk_array, *chunk;
-	struct hw_queue_properties *hw_queue_prop;
-	struct hl_fence *sig_fence = NULL;
-	struct hl_cs_job *job;
-	struct hl_cs *cs;
-	struct hl_cb *cb;
-	enum hl_queue_type q_type;
-	u64 *signal_seq_arr = NULL, signal_seq;
-	u32 size_to_copy, q_idx, signal_seq_arr_len, cb_size;
-	int rc;
+	bool need_soft_reset = false;
+	int rc = 0, do_ctx_switch;
+	void __user *chunks;
+	u32 num_chunks, tmp;
+	int ret;
 
-	*cs_seq = ULLONG_MAX;
+	do_ctx_switch = atomic_cmpxchg(&ctx->thread_ctx_switch_token, 1, 0);
 
-	if (num_chunks > HL_MAX_JOBS_PER_CS) {
+	if (do_ctx_switch || (args->in.cs_flags & HL_CS_FLAGS_FORCE_RESTORE)) {
+		mutex_lock(&hpriv->restore_phase_mutex);
+
+		if (do_ctx_switch) {
+			rc = hdev->asic_funcs->context_switch(hdev, ctx->asid);
+			if (rc) {
+				dev_err_ratelimited(hdev->dev,
+					"Failed to switch to context %d, rejecting CS! %d\n",
+					ctx->asid, rc);
+				/*
+				 * If we timedout, or if the device is not IDLE
+				 * while we want to do context-switch (-EBUSY),
+				 * we need to soft-reset because QMAN is
+				 * probably stuck. However, we can't call to
+				 * reset here directly because of deadlock, so
+				 * need to do it at the very end of this
+				 * function
+				 */
+				if ((rc == -ETIMEDOUT) || (rc == -EBUSY))
+					need_soft_reset = true;
+				mutex_unlock(&hpriv->restore_phase_mutex);
+				goto out;
+			}
+		}
+
+		hdev->asic_funcs->restore_phase_topology(hdev);
+
+		chunks = (void __user *) (uintptr_t) args->in.chunks_restore;
+		num_chunks = args->in.num_chunks_restore;
+
+		if (!num_chunks) {
+			dev_dbg(hdev->dev,
+				"Need to run restore phase but restore CS is empty\n");
+			rc = 0;
+		} else {
+			rc = cs_ioctl_default(hpriv, chunks, num_chunks,
+						cs_seq, false);
+		}
+
+		mutex_unlock(&hpriv->restore_phase_mutex);
+
+		if (rc) {
+			dev_err(hdev->dev,
+				"Failed to submit restore CS for context %d (%d)\n",
+				ctx->asid, rc);
+			goto out;
+		}
+
+		/* Need to wait for restore completion before execution phase */
+		if (num_chunks) {
+			enum hl_cs_wait_status status;
+wait_again:
+			ret = _hl_cs_wait_ioctl(hdev, ctx,
+					jiffies_to_usecs(hdev->timeout_jiffies),
+					*cs_seq, &status, NULL);
+			if (ret) {
+				if (ret == -ERESTARTSYS) {
+					usleep_range(100, 200);
+					goto wait_again;
+				}
+
+				dev_err(hdev->dev,
+					"Restore CS for context %d failed to complete %d\n",
+					ctx->asid, ret);
+				rc = -ENOEXEC;
+				goto out;
+			}
+		}
+
+		ctx->thread_ctx_switch_wait_token = 1;
+
+	} else if (!ctx->thread_ctx_switch_wait_token) {
+		rc = hl_poll_timeout_memory(hdev,
+			&ctx->thread_ctx_switch_wait_token, tmp, (tmp == 1),
+			100, jiffies_to_usecs(hdev->timeout_jiffies), false);
+
+		if (rc == -ETIMEDOUT) {
+			dev_err(hdev->dev,
+				"context switch phase timeout (%d)\n", tmp);
+			goto out;
+		}
+	}
+
+out:
+	if ((rc == -ETIMEDOUT || rc == -EBUSY) && (need_soft_reset))
+		hl_device_reset(hdev, false, false);
+
+	return rc;
+}
+
+static int cs_ioctl_extract_signal_seq(struct hl_device *hdev,
+		struct hl_cs_chunk *chunk, u64 *signal_seq)
+{
+	u64 *signal_seq_arr = NULL;
+	u32 size_to_copy, signal_seq_arr_len;
+	int rc = 0;
+
+	signal_seq_arr_len = chunk->num_signal_seq_arr;
+
+	/* currently only one signal seq is supported */
+	if (signal_seq_arr_len != 1) {
 		dev_err(hdev->dev,
-			"Number of chunks can NOT be larger than %d\n",
-			HL_MAX_JOBS_PER_CS);
-		rc = -EINVAL;
-		goto out;
+			"Wait for signal CS supports only one signal CS seq\n");
+		return -EINVAL;
 	}
 
-	cs_chunk_array = kmalloc_array(num_chunks, sizeof(*cs_chunk_array),
+	signal_seq_arr = kmalloc_array(signal_seq_arr_len,
+					sizeof(*signal_seq_arr),
 					GFP_ATOMIC);
-	if (!cs_chunk_array) {
-		rc = -ENOMEM;
+	if (!signal_seq_arr)
+		return -ENOMEM;
+
+	size_to_copy = chunk->num_signal_seq_arr * sizeof(*signal_seq_arr);
+	if (copy_from_user(signal_seq_arr,
+				u64_to_user_ptr(chunk->signal_seq_arr),
+				size_to_copy)) {
+		dev_err(hdev->dev,
+			"Failed to copy signal seq array from user\n");
+		rc = -EFAULT;
 		goto out;
 	}
 
-	size_to_copy = num_chunks * sizeof(struct hl_cs_chunk);
-	if (copy_from_user(cs_chunk_array, chunks, size_to_copy)) {
-		dev_err(hdev->dev, "Failed to copy cs chunk array from user\n");
-		rc = -EFAULT;
-		goto free_cs_chunk_array;
+	/* currently it is guaranteed to have only one signal seq */
+	*signal_seq = signal_seq_arr[0];
+
+out:
+	kfree(signal_seq_arr);
+
+	return rc;
+}
+
+static int cs_ioctl_signal_wait_create_jobs(struct hl_device *hdev,
+		struct hl_ctx *ctx, struct hl_cs *cs, enum hl_queue_type q_type,
+		u32 q_idx)
+{
+	struct hl_cs_counters_atomic *cntr;
+	struct hl_cs_job *job;
+	struct hl_cb *cb;
+	u32 cb_size;
+
+	cntr = &hdev->aggregated_cs_counters;
+
+	job = hl_cs_allocate_job(hdev, q_type, true);
+	if (!job) {
+		atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt);
+		atomic64_inc(&cntr->out_of_mem_drop_cnt);
+		dev_err(hdev->dev, "Failed to allocate a new job\n");
+		return -ENOMEM;
 	}
 
+	if (cs->type == CS_TYPE_WAIT)
+		cb_size = hdev->asic_funcs->get_wait_cb_size(hdev);
+	else
+		cb_size = hdev->asic_funcs->get_signal_cb_size(hdev);
+
+	cb = hl_cb_kernel_create(hdev, cb_size,
+				q_type == QUEUE_TYPE_HW && hdev->mmu_enable);
+	if (!cb) {
+		atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt);
+		atomic64_inc(&cntr->out_of_mem_drop_cnt);
+		kfree(job);
+		return -EFAULT;
+	}
+
+	job->id = 0;
+	job->cs = cs;
+	job->user_cb = cb;
+	atomic_inc(&job->user_cb->cs_cnt);
+	job->user_cb_size = cb_size;
+	job->hw_queue_id = q_idx;
+
+	/*
+	 * No need in parsing, user CB is the patched CB.
+	 * We call hl_cb_destroy() out of two reasons - we don't need the CB in
+	 * the CB idr anymore and to decrement its refcount as it was
+	 * incremented inside hl_cb_kernel_create().
+	 */
+	job->patched_cb = job->user_cb;
+	job->job_cb_size = job->user_cb_size;
+	hl_cb_destroy(hdev, &hdev->kernel_cb_mgr, cb->id << PAGE_SHIFT);
+
+	/* increment refcount as for external queues we get completion */
+	cs_get(cs);
+
+	cs->jobs_in_queue_cnt[job->hw_queue_id]++;
+
+	list_add_tail(&job->cs_node, &cs->job_list);
+
+	hl_debugfs_add_job(hdev, job);
+
+	return 0;
+}
+
+static int cs_ioctl_signal_wait(struct hl_fpriv *hpriv, enum hl_cs_type cs_type,
+				void __user *chunks, u32 num_chunks,
+				u64 *cs_seq, bool timestamp)
+{
+	struct hl_cs_chunk *cs_chunk_array, *chunk;
+	struct hw_queue_properties *hw_queue_prop;
+	struct hl_device *hdev = hpriv->hdev;
+	struct hl_cs_compl *sig_waitcs_cmpl;
+	u32 q_idx, collective_engine_id = 0;
+	struct hl_fence *sig_fence = NULL;
+	struct hl_ctx *ctx = hpriv->ctx;
+	enum hl_queue_type q_type;
+	struct hl_cs *cs;
+	u64 signal_seq;
+	int rc;
+
+	*cs_seq = ULLONG_MAX;
+
+	rc = hl_cs_copy_chunk_array(hdev, &cs_chunk_array, chunks, num_chunks);
+	if (rc)
+		goto out;
+
 	/* currently it is guaranteed to have only one chunk */
 	chunk = &cs_chunk_array[0];
 
@@ -819,60 +1180,43 @@ static int cs_ioctl_signal_wait(struct hl_fpriv *hpriv, enum hl_cs_type cs_type,
 	hw_queue_prop = &hdev->asic_prop.hw_queues_props[q_idx];
 	q_type = hw_queue_prop->type;
 
-	if ((q_idx >= hdev->asic_prop.max_queues) ||
-			(!hw_queue_prop->supports_sync_stream)) {
-		dev_err(hdev->dev, "Queue index %d is invalid\n", q_idx);
+	if (!hw_queue_prop->supports_sync_stream) {
+		dev_err(hdev->dev,
+			"Queue index %d does not support sync stream operations\n",
+			q_idx);
 		rc = -EINVAL;
 		goto free_cs_chunk_array;
 	}
 
-	if (cs_type == CS_TYPE_WAIT) {
-		struct hl_cs_compl *sig_waitcs_cmpl;
-
-		signal_seq_arr_len = chunk->num_signal_seq_arr;
-
-		/* currently only one signal seq is supported */
-		if (signal_seq_arr_len != 1) {
+	if (cs_type == CS_TYPE_COLLECTIVE_WAIT) {
+		if (!(hw_queue_prop->collective_mode == HL_COLLECTIVE_MASTER)) {
 			dev_err(hdev->dev,
-				"Wait for signal CS supports only one signal CS seq\n");
+				"Queue index %d is invalid\n", q_idx);
 			rc = -EINVAL;
 			goto free_cs_chunk_array;
 		}
 
-		signal_seq_arr = kmalloc_array(signal_seq_arr_len,
-						sizeof(*signal_seq_arr),
-						GFP_ATOMIC);
-		if (!signal_seq_arr) {
-			rc = -ENOMEM;
-			goto free_cs_chunk_array;
-		}
+		collective_engine_id = chunk->collective_engine_id;
+	}
 
-		size_to_copy = chunk->num_signal_seq_arr *
-				sizeof(*signal_seq_arr);
-		if (copy_from_user(signal_seq_arr,
-					u64_to_user_ptr(chunk->signal_seq_arr),
-					size_to_copy)) {
-			dev_err(hdev->dev,
-				"Failed to copy signal seq array from user\n");
-			rc = -EFAULT;
-			goto free_signal_seq_array;
-		}
+	if (cs_type == CS_TYPE_WAIT || cs_type == CS_TYPE_COLLECTIVE_WAIT) {
+		rc = cs_ioctl_extract_signal_seq(hdev, chunk, &signal_seq);
+		if (rc)
+			goto free_cs_chunk_array;
 
-		/* currently it is guaranteed to have only one signal seq */
-		signal_seq = signal_seq_arr[0];
 		sig_fence = hl_ctx_get_fence(ctx, signal_seq);
 		if (IS_ERR(sig_fence)) {
 			dev_err(hdev->dev,
 				"Failed to get signal CS with seq 0x%llx\n",
 				signal_seq);
 			rc = PTR_ERR(sig_fence);
-			goto free_signal_seq_array;
+			goto free_cs_chunk_array;
 		}
 
 		if (!sig_fence) {
 			/* signal CS already finished */
 			rc = 0;
-			goto free_signal_seq_array;
+			goto free_cs_chunk_array;
 		}
 
 		sig_waitcs_cmpl =
@@ -884,14 +1228,14 @@ static int cs_ioctl_signal_wait(struct hl_fpriv *hpriv, enum hl_cs_type cs_type,
 				signal_seq);
 			hl_fence_put(sig_fence);
 			rc = -EINVAL;
-			goto free_signal_seq_array;
+			goto free_cs_chunk_array;
 		}
 
 		if (completion_done(&sig_fence->completion)) {
 			/* signal CS already finished */
 			hl_fence_put(sig_fence);
 			rc = 0;
-			goto free_signal_seq_array;
+			goto free_cs_chunk_array;
 		}
 	}
 
@@ -900,70 +1244,37 @@ static int cs_ioctl_signal_wait(struct hl_fpriv *hpriv, enum hl_cs_type cs_type,
 
 	rc = allocate_cs(hdev, ctx, cs_type, &cs);
 	if (rc) {
-		if (cs_type == CS_TYPE_WAIT)
+		if (cs_type == CS_TYPE_WAIT ||
+			cs_type == CS_TYPE_COLLECTIVE_WAIT)
 			hl_fence_put(sig_fence);
 		hl_ctx_put(ctx);
-		goto free_signal_seq_array;
+		goto free_cs_chunk_array;
 	}
 
+	cs->timestamp = !!timestamp;
+
 	/*
 	 * Save the signal CS fence for later initialization right before
 	 * hanging the wait CS on the queue.
 	 */
-	if (cs->type == CS_TYPE_WAIT)
+	if (cs_type == CS_TYPE_WAIT || cs_type == CS_TYPE_COLLECTIVE_WAIT)
 		cs->signal_fence = sig_fence;
 
 	hl_debugfs_add_cs(cs);
 
 	*cs_seq = cs->sequence;
 
-	job = hl_cs_allocate_job(hdev, q_type, true);
-	if (!job) {
-		ctx->cs_counters.out_of_mem_drop_cnt++;
-		dev_err(hdev->dev, "Failed to allocate a new job\n");
-		rc = -ENOMEM;
-		goto put_cs;
-	}
-
-	if (cs->type == CS_TYPE_WAIT)
-		cb_size = hdev->asic_funcs->get_wait_cb_size(hdev);
+	if (cs_type == CS_TYPE_WAIT || cs_type == CS_TYPE_SIGNAL)
+		rc = cs_ioctl_signal_wait_create_jobs(hdev, ctx, cs, q_type,
+				q_idx);
+	else if (cs_type == CS_TYPE_COLLECTIVE_WAIT)
+		rc = hdev->asic_funcs->collective_wait_create_jobs(hdev, ctx,
+				cs, q_idx, collective_engine_id);
 	else
-		cb_size = hdev->asic_funcs->get_signal_cb_size(hdev);
-
-	cb = hl_cb_kernel_create(hdev, cb_size,
-				q_type == QUEUE_TYPE_HW && hdev->mmu_enable);
-	if (!cb) {
-		ctx->cs_counters.out_of_mem_drop_cnt++;
-		kfree(job);
-		rc = -EFAULT;
-		goto put_cs;
-	}
-
-	job->id = 0;
-	job->cs = cs;
-	job->user_cb = cb;
-	job->user_cb->cs_cnt++;
-	job->user_cb_size = cb_size;
-	job->hw_queue_id = q_idx;
-
-	/*
-	 * No need in parsing, user CB is the patched CB.
-	 * We call hl_cb_destroy() out of two reasons - we don't need the CB in
-	 * the CB idr anymore and to decrement its refcount as it was
-	 * incremented inside hl_cb_kernel_create().
-	 */
-	job->patched_cb = job->user_cb;
-	job->job_cb_size = job->user_cb_size;
-	hl_cb_destroy(hdev, &hdev->kernel_cb_mgr, cb->id << PAGE_SHIFT);
-
-	cs->jobs_in_queue_cnt[job->hw_queue_id]++;
-
-	list_add_tail(&job->cs_node, &cs->job_list);
-
-	/* increment refcount as for external queues we get completion */
-	cs_get(cs);
+		rc = -EINVAL;
 
-	hl_debugfs_add_job(hdev, job);
+	if (rc)
+		goto free_cs_object;
 
 	rc = hl_hw_queue_schedule_cs(cs);
 	if (rc) {
@@ -984,9 +1295,6 @@ static int cs_ioctl_signal_wait(struct hl_fpriv *hpriv, enum hl_cs_type cs_type,
 put_cs:
 	/* We finished with the CS in this function, so put the ref */
 	cs_put(cs);
-free_signal_seq_array:
-	if (cs_type == CS_TYPE_WAIT)
-		kfree(signal_seq_arr);
 free_cs_chunk_array:
 	kfree(cs_chunk_array);
 out:
@@ -995,156 +1303,39 @@ static int cs_ioctl_signal_wait(struct hl_fpriv *hpriv, enum hl_cs_type cs_type,
 
 int hl_cs_ioctl(struct hl_fpriv *hpriv, void *data)
 {
-	struct hl_device *hdev = hpriv->hdev;
 	union hl_cs_args *args = data;
-	struct hl_ctx *ctx = hpriv->ctx;
-	void __user *chunks_execute, *chunks_restore;
 	enum hl_cs_type cs_type;
-	u32 num_chunks_execute, num_chunks_restore, sig_wait_flags;
 	u64 cs_seq = ULONG_MAX;
-	int rc, do_ctx_switch;
-	bool need_soft_reset = false;
-
-	if (hl_device_disabled_or_in_reset(hdev)) {
-		dev_warn_ratelimited(hdev->dev,
-			"Device is %s. Can't submit new CS\n",
-			atomic_read(&hdev->in_reset) ? "in_reset" : "disabled");
-		rc = -EBUSY;
-		goto out;
-	}
-
-	sig_wait_flags = args->in.cs_flags & HL_CS_FLAGS_SIG_WAIT;
+	void __user *chunks;
+	u32 num_chunks;
+	int rc;
 
-	if (unlikely(sig_wait_flags == HL_CS_FLAGS_SIG_WAIT)) {
-		dev_err(hdev->dev,
-			"Signal and wait CS flags are mutually exclusive, context %d\n",
-		ctx->asid);
-		rc = -EINVAL;
+	rc = hl_cs_sanity_checks(hpriv, args);
+	if (rc)
 		goto out;
-	}
 
-	if (unlikely((sig_wait_flags & HL_CS_FLAGS_SIG_WAIT) &&
-			(!hdev->supports_sync_stream))) {
-		dev_err(hdev->dev, "Sync stream CS is not supported\n");
-		rc = -EINVAL;
+	rc = hl_cs_ctx_switch(hpriv, args, &cs_seq);
+	if (rc)
 		goto out;
-	}
 
-	if (args->in.cs_flags & HL_CS_FLAGS_SIGNAL)
-		cs_type = CS_TYPE_SIGNAL;
-	else if (args->in.cs_flags & HL_CS_FLAGS_WAIT)
-		cs_type = CS_TYPE_WAIT;
-	else
-		cs_type = CS_TYPE_DEFAULT;
-
-	chunks_execute = (void __user *) (uintptr_t) args->in.chunks_execute;
-	num_chunks_execute = args->in.num_chunks_execute;
-
-	if (cs_type == CS_TYPE_DEFAULT) {
-		if (!num_chunks_execute) {
-			dev_err(hdev->dev,
-				"Got execute CS with 0 chunks, context %d\n",
-				ctx->asid);
-			rc = -EINVAL;
-			goto out;
-		}
-	} else if (num_chunks_execute != 1) {
-		dev_err(hdev->dev,
-			"Sync stream CS mandates one chunk only, context %d\n",
-			ctx->asid);
-		rc = -EINVAL;
-		goto out;
+	cs_type = hl_cs_get_cs_type(args->in.cs_flags &
+					~HL_CS_FLAGS_FORCE_RESTORE);
+	chunks = (void __user *) (uintptr_t) args->in.chunks_execute;
+	num_chunks = args->in.num_chunks_execute;
+
+	switch (cs_type) {
+	case CS_TYPE_SIGNAL:
+	case CS_TYPE_WAIT:
+	case CS_TYPE_COLLECTIVE_WAIT:
+		rc = cs_ioctl_signal_wait(hpriv, cs_type, chunks, num_chunks,
+			&cs_seq, args->in.cs_flags & HL_CS_FLAGS_TIMESTAMP);
+		break;
+	default:
+		rc = cs_ioctl_default(hpriv, chunks, num_chunks, &cs_seq,
+				args->in.cs_flags & HL_CS_FLAGS_TIMESTAMP);
+		break;
 	}
 
-	do_ctx_switch = atomic_cmpxchg(&ctx->thread_ctx_switch_token, 1, 0);
-
-	if (do_ctx_switch || (args->in.cs_flags & HL_CS_FLAGS_FORCE_RESTORE)) {
-		long ret;
-
-		chunks_restore =
-			(void __user *) (uintptr_t) args->in.chunks_restore;
-		num_chunks_restore = args->in.num_chunks_restore;
-
-		mutex_lock(&hpriv->restore_phase_mutex);
-
-		if (do_ctx_switch) {
-			rc = hdev->asic_funcs->context_switch(hdev, ctx->asid);
-			if (rc) {
-				dev_err_ratelimited(hdev->dev,
-					"Failed to switch to context %d, rejecting CS! %d\n",
-					ctx->asid, rc);
-				/*
-				 * If we timedout, or if the device is not IDLE
-				 * while we want to do context-switch (-EBUSY),
-				 * we need to soft-reset because QMAN is
-				 * probably stuck. However, we can't call to
-				 * reset here directly because of deadlock, so
-				 * need to do it at the very end of this
-				 * function
-				 */
-				if ((rc == -ETIMEDOUT) || (rc == -EBUSY))
-					need_soft_reset = true;
-				mutex_unlock(&hpriv->restore_phase_mutex);
-				goto out;
-			}
-		}
-
-		hdev->asic_funcs->restore_phase_topology(hdev);
-
-		if (!num_chunks_restore) {
-			dev_dbg(hdev->dev,
-			"Need to run restore phase but restore CS is empty\n");
-			rc = 0;
-		} else {
-			rc = cs_ioctl_default(hpriv, chunks_restore,
-						num_chunks_restore, &cs_seq);
-		}
-
-		mutex_unlock(&hpriv->restore_phase_mutex);
-
-		if (rc) {
-			dev_err(hdev->dev,
-				"Failed to submit restore CS for context %d (%d)\n",
-				ctx->asid, rc);
-			goto out;
-		}
-
-		/* Need to wait for restore completion before execution phase */
-		if (num_chunks_restore) {
-			ret = _hl_cs_wait_ioctl(hdev, ctx,
-					jiffies_to_usecs(hdev->timeout_jiffies),
-					cs_seq);
-			if (ret <= 0) {
-				dev_err(hdev->dev,
-					"Restore CS for context %d failed to complete %ld\n",
-					ctx->asid, ret);
-				rc = -ENOEXEC;
-				goto out;
-			}
-		}
-
-		ctx->thread_ctx_switch_wait_token = 1;
-	} else if (!ctx->thread_ctx_switch_wait_token) {
-		u32 tmp;
-
-		rc = hl_poll_timeout_memory(hdev,
-			&ctx->thread_ctx_switch_wait_token, tmp, (tmp == 1),
-			100, jiffies_to_usecs(hdev->timeout_jiffies), false);
-
-		if (rc == -ETIMEDOUT) {
-			dev_err(hdev->dev,
-				"context switch phase timeout (%d)\n", tmp);
-			goto out;
-		}
-	}
-
-	if (cs_type == CS_TYPE_DEFAULT)
-		rc = cs_ioctl_default(hpriv, chunks_execute, num_chunks_execute,
-					&cs_seq);
-	else
-		rc = cs_ioctl_signal_wait(hpriv, cs_type, chunks_execute,
-						num_chunks_execute, &cs_seq);
-
 out:
 	if (rc != -EAGAIN) {
 		memset(args, 0, sizeof(*args));
@@ -1152,18 +1343,20 @@ int hl_cs_ioctl(struct hl_fpriv *hpriv, void *data)
 		args->out.seq = cs_seq;
 	}
 
-	if (((rc == -ETIMEDOUT) || (rc == -EBUSY)) && (need_soft_reset))
-		hl_device_reset(hdev, false, false);
-
 	return rc;
 }
 
-static long _hl_cs_wait_ioctl(struct hl_device *hdev,
-		struct hl_ctx *ctx, u64 timeout_us, u64 seq)
+static int _hl_cs_wait_ioctl(struct hl_device *hdev, struct hl_ctx *ctx,
+				u64 timeout_us, u64 seq,
+				enum hl_cs_wait_status *status, s64 *timestamp)
 {
 	struct hl_fence *fence;
 	unsigned long timeout;
-	long rc;
+	int rc = 0;
+	long completion_rc;
+
+	if (timestamp)
+		*timestamp = 0;
 
 	if (timeout_us == MAX_SCHEDULE_TIMEOUT)
 		timeout = timeout_us;
@@ -1181,11 +1374,20 @@ static long _hl_cs_wait_ioctl(struct hl_device *hdev,
 				seq, ctx->cs_sequence);
 	} else if (fence) {
 		if (!timeout_us)
-			rc = completion_done(&fence->completion);
+			completion_rc = completion_done(&fence->completion);
 		else
-			rc = wait_for_completion_interruptible_timeout(
+			completion_rc =
+				wait_for_completion_interruptible_timeout(
 					&fence->completion, timeout);
 
+		if (completion_rc > 0) {
+			*status = CS_WAIT_STATUS_COMPLETED;
+			if (timestamp)
+				*timestamp = ktime_to_ns(fence->timestamp);
+		} else {
+			*status = CS_WAIT_STATUS_BUSY;
+		}
+
 		if (fence->error == -ETIMEDOUT)
 			rc = -ETIMEDOUT;
 		else if (fence->error == -EIO)
@@ -1196,7 +1398,7 @@ static long _hl_cs_wait_ioctl(struct hl_device *hdev,
 		dev_dbg(hdev->dev,
 			"Can't wait on seq %llu because current CS is at seq %llu (Fence is gone)\n",
 			seq, ctx->cs_sequence);
-		rc = 1;
+		*status = CS_WAIT_STATUS_GONE;
 	}
 
 	hl_ctx_put(ctx);
@@ -1208,14 +1410,17 @@ int hl_cs_wait_ioctl(struct hl_fpriv *hpriv, void *data)
 {
 	struct hl_device *hdev = hpriv->hdev;
 	union hl_wait_cs_args *args = data;
+	enum hl_cs_wait_status status;
 	u64 seq = args->in.seq;
-	long rc;
+	s64 timestamp;
+	int rc;
 
-	rc = _hl_cs_wait_ioctl(hdev, hpriv->ctx, args->in.timeout_us, seq);
+	rc = _hl_cs_wait_ioctl(hdev, hpriv->ctx, args->in.timeout_us, seq,
+				&status, &timestamp);
 
 	memset(args, 0, sizeof(*args));
 
-	if (rc < 0) {
+	if (rc) {
 		if (rc == -ERESTARTSYS) {
 			dev_err_ratelimited(hdev->dev,
 				"user process got signal while waiting for CS handle %llu\n",
@@ -1236,10 +1441,23 @@ int hl_cs_wait_ioctl(struct hl_fpriv *hpriv, void *data)
 		return rc;
 	}
 
-	if (rc == 0)
-		args->out.status = HL_WAIT_CS_STATUS_BUSY;
-	else
+	if (timestamp) {
+		args->out.flags |= HL_WAIT_CS_STATUS_FLAG_TIMESTAMP_VLD;
+		args->out.timestamp_nsec = timestamp;
+	}
+
+	switch (status) {
+	case CS_WAIT_STATUS_GONE:
+		args->out.flags |= HL_WAIT_CS_STATUS_FLAG_GONE;
+		fallthrough;
+	case CS_WAIT_STATUS_COMPLETED:
 		args->out.status = HL_WAIT_CS_STATUS_COMPLETED;
+		break;
+	case CS_WAIT_STATUS_BUSY:
+	default:
+		args->out.status = HL_WAIT_CS_STATUS_BUSY;
+		break;
+	}
 
 	return 0;
 }
diff --git a/drivers/misc/habanalabs/common/context.c b/drivers/misc/habanalabs/common/context.c
index 7a59dd7c6450b1869945565f3ecfe0e6003fa5bb..f65e6559149bc8c08f3c62208618b3570af33ba6 100644
--- a/drivers/misc/habanalabs/common/context.c
+++ b/drivers/misc/habanalabs/common/context.c
@@ -40,10 +40,14 @@ static void hl_ctx_fini(struct hl_ctx *ctx)
 		if ((hdev->in_debug) && (hdev->compute_ctx == ctx))
 			hl_device_set_debug_mode(hdev, false);
 
+		hdev->asic_funcs->ctx_fini(ctx);
 		hl_cb_va_pool_fini(ctx);
 		hl_vm_ctx_fini(ctx);
 		hl_asid_free(hdev, ctx->asid);
 
+		/* Scrub both SRAM and DRAM */
+		hdev->asic_funcs->scrub_device_mem(hdev, 0, 0);
+
 		if ((!hdev->pldm) && (hdev->pdev) &&
 				(!hdev->asic_funcs->is_device_idle(hdev,
 							&idle_mask, NULL)))
diff --git a/drivers/misc/habanalabs/common/debugfs.c b/drivers/misc/habanalabs/common/debugfs.c
index 912ddfa360b1375e79d8b6d37e69bda169485ad5..cef716643979d738d1d92f11f1a046a07f9992ed 100644
--- a/drivers/misc/habanalabs/common/debugfs.c
+++ b/drivers/misc/habanalabs/common/debugfs.c
@@ -22,9 +22,10 @@ static int hl_debugfs_i2c_read(struct hl_device *hdev, u8 i2c_bus, u8 i2c_addr,
 				u8 i2c_reg, long *val)
 {
 	struct cpucp_packet pkt;
+	u64 result;
 	int rc;
 
-	if (hl_device_disabled_or_in_reset(hdev))
+	if (!hl_device_operational(hdev, NULL))
 		return -EBUSY;
 
 	memset(&pkt, 0, sizeof(pkt));
@@ -36,7 +37,9 @@ static int hl_debugfs_i2c_read(struct hl_device *hdev, u8 i2c_bus, u8 i2c_addr,
 	pkt.i2c_reg = i2c_reg;
 
 	rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
-						0, val);
+						0, &result);
+
+	*val = (long) result;
 
 	if (rc)
 		dev_err(hdev->dev, "Failed to read from I2C, error %d\n", rc);
@@ -50,7 +53,7 @@ static int hl_debugfs_i2c_write(struct hl_device *hdev, u8 i2c_bus, u8 i2c_addr,
 	struct cpucp_packet pkt;
 	int rc;
 
-	if (hl_device_disabled_or_in_reset(hdev))
+	if (!hl_device_operational(hdev, NULL))
 		return -EBUSY;
 
 	memset(&pkt, 0, sizeof(pkt));
@@ -76,7 +79,7 @@ static void hl_debugfs_led_set(struct hl_device *hdev, u8 led, u8 state)
 	struct cpucp_packet pkt;
 	int rc;
 
-	if (hl_device_disabled_or_in_reset(hdev))
+	if (!hl_device_operational(hdev, NULL))
 		return;
 
 	memset(&pkt, 0, sizeof(pkt));
@@ -113,7 +116,7 @@ static int command_buffers_show(struct seq_file *s, void *data)
 			"   %03llu        %d    0x%08x      %d          %d          %d\n",
 			cb->id, cb->ctx->asid, cb->size,
 			kref_read(&cb->refcount),
-			cb->mmap, cb->cs_cnt);
+			cb->mmap, atomic_read(&cb->cs_cnt));
 	}
 
 	spin_unlock(&dev_entry->cb_spinlock);
@@ -168,18 +171,19 @@ static int command_submission_jobs_show(struct seq_file *s, void *data)
 		if (first) {
 			first = false;
 			seq_puts(s, "\n");
-			seq_puts(s, " JOB ID   CS ID    CTX ASID   H/W Queue\n");
-			seq_puts(s, "---------------------------------------\n");
+			seq_puts(s, " JOB ID   CS ID    CTX ASID   JOB RefCnt   H/W Queue\n");
+			seq_puts(s, "----------------------------------------------------\n");
 		}
 		if (job->cs)
 			seq_printf(s,
-				"    %02d       %llu         %d         %d\n",
+				"   %02d      %llu        %d          %d           %d\n",
 				job->id, job->cs->sequence, job->cs->ctx->asid,
-				job->hw_queue_id);
+				kref_read(&job->refcount), job->hw_queue_id);
 		else
 			seq_printf(s,
-				"    %02d       0         %d         %d\n",
-				job->id, HL_KERNEL_ASID_ID, job->hw_queue_id);
+				"   %02d      0        %d          %d           %d\n",
+				job->id, HL_KERNEL_ASID_ID,
+				kref_read(&job->refcount), job->hw_queue_id);
 	}
 
 	spin_unlock(&dev_entry->cs_job_spinlock);
@@ -300,93 +304,15 @@ static int vm_show(struct seq_file *s, void *data)
 	return 0;
 }
 
-/* these inline functions are copied from mmu.c */
-static inline u64 get_hop0_addr(struct hl_ctx *ctx)
-{
-	return ctx->hdev->asic_prop.mmu_pgt_addr +
-			(ctx->asid * ctx->hdev->asic_prop.mmu_hop_table_size);
-}
-
-static inline u64 get_hopN_pte_addr(struct hl_ctx *ctx, u64 hop_addr,
-					u64 virt_addr, u64 mask, u64 shift)
-{
-	return hop_addr + ctx->hdev->asic_prop.mmu_pte_size *
-			((virt_addr & mask) >> shift);
-}
-
-static inline u64 get_hop0_pte_addr(struct hl_ctx *ctx,
-					struct hl_mmu_properties *mmu_specs,
-					u64 hop_addr, u64 vaddr)
-{
-	return get_hopN_pte_addr(ctx, hop_addr, vaddr, mmu_specs->hop0_mask,
-					mmu_specs->hop0_shift);
-}
-
-static inline u64 get_hop1_pte_addr(struct hl_ctx *ctx,
-					struct hl_mmu_properties *mmu_specs,
-					u64 hop_addr, u64 vaddr)
-{
-	return get_hopN_pte_addr(ctx, hop_addr, vaddr, mmu_specs->hop1_mask,
-					mmu_specs->hop1_shift);
-}
-
-static inline u64 get_hop2_pte_addr(struct hl_ctx *ctx,
-					struct hl_mmu_properties *mmu_specs,
-					u64 hop_addr, u64 vaddr)
-{
-	return get_hopN_pte_addr(ctx, hop_addr, vaddr, mmu_specs->hop2_mask,
-					mmu_specs->hop2_shift);
-}
-
-static inline u64 get_hop3_pte_addr(struct hl_ctx *ctx,
-					struct hl_mmu_properties *mmu_specs,
-					u64 hop_addr, u64 vaddr)
-{
-	return get_hopN_pte_addr(ctx, hop_addr, vaddr, mmu_specs->hop3_mask,
-					mmu_specs->hop3_shift);
-}
-
-static inline u64 get_hop4_pte_addr(struct hl_ctx *ctx,
-					struct hl_mmu_properties *mmu_specs,
-					u64 hop_addr, u64 vaddr)
-{
-	return get_hopN_pte_addr(ctx, hop_addr, vaddr, mmu_specs->hop4_mask,
-					mmu_specs->hop4_shift);
-}
-
-static inline u64 get_hop5_pte_addr(struct hl_ctx *ctx,
-					struct hl_mmu_properties *mmu_specs,
-					u64 hop_addr, u64 vaddr)
-{
-	return get_hopN_pte_addr(ctx, hop_addr, vaddr, mmu_specs->hop5_mask,
-					mmu_specs->hop5_shift);
-}
-
-static inline u64 get_next_hop_addr(u64 curr_pte)
-{
-	if (curr_pte & PAGE_PRESENT_MASK)
-		return curr_pte & HOP_PHYS_ADDR_MASK;
-	else
-		return ULLONG_MAX;
-}
-
 static int mmu_show(struct seq_file *s, void *data)
 {
 	struct hl_debugfs_entry *entry = s->private;
 	struct hl_dbg_device_entry *dev_entry = entry->dev_entry;
 	struct hl_device *hdev = dev_entry->hdev;
-	struct asic_fixed_properties *prop = &hdev->asic_prop;
-	struct hl_mmu_properties *mmu_prop;
 	struct hl_ctx *ctx;
-	bool is_dram_addr;
-
-	u64 hop0_addr = 0, hop0_pte_addr = 0, hop0_pte = 0,
-		hop1_addr = 0, hop1_pte_addr = 0, hop1_pte = 0,
-		hop2_addr = 0, hop2_pte_addr = 0, hop2_pte = 0,
-		hop3_addr = 0, hop3_pte_addr = 0, hop3_pte = 0,
-		hop4_addr = 0, hop4_pte_addr = 0, hop4_pte = 0,
-		hop5_addr = 0, hop5_pte_addr = 0, hop5_pte = 0,
-		virt_addr = dev_entry->mmu_addr;
+	struct hl_mmu_hop_info hops_info;
+	u64 virt_addr = dev_entry->mmu_addr;
+	int i;
 
 	if (!hdev->mmu_enable)
 		return 0;
@@ -401,132 +327,24 @@ static int mmu_show(struct seq_file *s, void *data)
 		return 0;
 	}
 
-	is_dram_addr = hl_mem_area_inside_range(virt_addr, prop->dmmu.page_size,
-						prop->dmmu.start_addr,
-						prop->dmmu.end_addr);
-
-	/* shifts and masks are the same in PMMU and HPMMU, use one of them */
-	mmu_prop = is_dram_addr ? &prop->dmmu : &prop->pmmu;
-
-	mutex_lock(&ctx->mmu_lock);
-
-	/* the following lookup is copied from unmap() in mmu.c */
-
-	hop0_addr = get_hop0_addr(ctx);
-	hop0_pte_addr = get_hop0_pte_addr(ctx, mmu_prop, hop0_addr, virt_addr);
-	hop0_pte = hdev->asic_funcs->read_pte(hdev, hop0_pte_addr);
-	hop1_addr = get_next_hop_addr(hop0_pte);
-
-	if (hop1_addr == ULLONG_MAX)
-		goto not_mapped;
-
-	hop1_pte_addr = get_hop1_pte_addr(ctx, mmu_prop, hop1_addr, virt_addr);
-	hop1_pte = hdev->asic_funcs->read_pte(hdev, hop1_pte_addr);
-	hop2_addr = get_next_hop_addr(hop1_pte);
-
-	if (hop2_addr == ULLONG_MAX)
-		goto not_mapped;
-
-	hop2_pte_addr = get_hop2_pte_addr(ctx, mmu_prop, hop2_addr, virt_addr);
-	hop2_pte = hdev->asic_funcs->read_pte(hdev, hop2_pte_addr);
-	hop3_addr = get_next_hop_addr(hop2_pte);
-
-	if (hop3_addr == ULLONG_MAX)
-		goto not_mapped;
-
-	hop3_pte_addr = get_hop3_pte_addr(ctx, mmu_prop, hop3_addr, virt_addr);
-	hop3_pte = hdev->asic_funcs->read_pte(hdev, hop3_pte_addr);
-
-	if (mmu_prop->num_hops == MMU_ARCH_5_HOPS) {
-		if (!(hop3_pte & LAST_MASK)) {
-			hop4_addr = get_next_hop_addr(hop3_pte);
-
-			if (hop4_addr == ULLONG_MAX)
-				goto not_mapped;
-
-			hop4_pte_addr = get_hop4_pte_addr(ctx, mmu_prop,
-							hop4_addr, virt_addr);
-			hop4_pte = hdev->asic_funcs->read_pte(hdev,
-								hop4_pte_addr);
-			if (!(hop4_pte & PAGE_PRESENT_MASK))
-				goto not_mapped;
-		} else {
-			if (!(hop3_pte & PAGE_PRESENT_MASK))
-				goto not_mapped;
-		}
-	} else {
-		hop4_addr = get_next_hop_addr(hop3_pte);
-
-		if (hop4_addr == ULLONG_MAX)
-			goto not_mapped;
-
-		hop4_pte_addr = get_hop4_pte_addr(ctx, mmu_prop,
-						hop4_addr, virt_addr);
-		hop4_pte = hdev->asic_funcs->read_pte(hdev,
-							hop4_pte_addr);
-		if (!(hop4_pte & LAST_MASK)) {
-			hop5_addr = get_next_hop_addr(hop4_pte);
-
-			if (hop5_addr == ULLONG_MAX)
-				goto not_mapped;
-
-			hop5_pte_addr = get_hop5_pte_addr(ctx, mmu_prop,
-							hop5_addr, virt_addr);
-			hop5_pte = hdev->asic_funcs->read_pte(hdev,
-								hop5_pte_addr);
-			if (!(hop5_pte & PAGE_PRESENT_MASK))
-				goto not_mapped;
-		} else {
-			if (!(hop4_pte & PAGE_PRESENT_MASK))
-				goto not_mapped;
-		}
+	if (hl_mmu_get_tlb_info(ctx, virt_addr, &hops_info)) {
+		dev_err(hdev->dev, "virt addr 0x%llx is not mapped to phys addr\n",
+				virt_addr);
+		return 0;
 	}
 
 	seq_printf(s, "asid: %u, virt_addr: 0x%llx\n",
 			dev_entry->mmu_asid, dev_entry->mmu_addr);
 
-	seq_printf(s, "hop0_addr: 0x%llx\n", hop0_addr);
-	seq_printf(s, "hop0_pte_addr: 0x%llx\n", hop0_pte_addr);
-	seq_printf(s, "hop0_pte: 0x%llx\n", hop0_pte);
-
-	seq_printf(s, "hop1_addr: 0x%llx\n", hop1_addr);
-	seq_printf(s, "hop1_pte_addr: 0x%llx\n", hop1_pte_addr);
-	seq_printf(s, "hop1_pte: 0x%llx\n", hop1_pte);
-
-	seq_printf(s, "hop2_addr: 0x%llx\n", hop2_addr);
-	seq_printf(s, "hop2_pte_addr: 0x%llx\n", hop2_pte_addr);
-	seq_printf(s, "hop2_pte: 0x%llx\n", hop2_pte);
-
-	seq_printf(s, "hop3_addr: 0x%llx\n", hop3_addr);
-	seq_printf(s, "hop3_pte_addr: 0x%llx\n", hop3_pte_addr);
-	seq_printf(s, "hop3_pte: 0x%llx\n", hop3_pte);
-
-	if (mmu_prop->num_hops == MMU_ARCH_5_HOPS) {
-		if (!(hop3_pte & LAST_MASK)) {
-			seq_printf(s, "hop4_addr: 0x%llx\n", hop4_addr);
-			seq_printf(s, "hop4_pte_addr: 0x%llx\n", hop4_pte_addr);
-			seq_printf(s, "hop4_pte: 0x%llx\n", hop4_pte);
-		}
-	} else {
-		seq_printf(s, "hop4_addr: 0x%llx\n", hop4_addr);
-		seq_printf(s, "hop4_pte_addr: 0x%llx\n", hop4_pte_addr);
-		seq_printf(s, "hop4_pte: 0x%llx\n", hop4_pte);
-
-		if (!(hop4_pte & LAST_MASK)) {
-			seq_printf(s, "hop5_addr: 0x%llx\n", hop5_addr);
-			seq_printf(s, "hop5_pte_addr: 0x%llx\n", hop5_pte_addr);
-			seq_printf(s, "hop5_pte: 0x%llx\n", hop5_pte);
-		}
+	for (i = 0 ; i < hops_info.used_hops ; i++) {
+		seq_printf(s, "hop%d_addr: 0x%llx\n",
+				i, hops_info.hop_info[i].hop_addr);
+		seq_printf(s, "hop%d_pte_addr: 0x%llx\n",
+				i, hops_info.hop_info[i].hop_pte_addr);
+		seq_printf(s, "hop%d_pte: 0x%llx\n",
+				i, hops_info.hop_info[i].hop_pte_val);
 	}
 
-	goto out;
-
-not_mapped:
-	dev_err(hdev->dev, "virt addr 0x%llx is not mapped to phys addr\n",
-			virt_addr);
-out:
-	mutex_unlock(&ctx->mmu_lock);
-
 	return 0;
 }
 
@@ -597,7 +415,7 @@ static bool hl_is_device_va(struct hl_device *hdev, u64 addr)
 	if (!hdev->mmu_enable)
 		goto out;
 
-	if (hdev->dram_supports_virtual_memory &&
+	if (prop->dram_supports_virtual_memory &&
 		(addr >= prop->dmmu.start_addr && addr < prop->dmmu.end_addr))
 		return true;
 
@@ -616,78 +434,20 @@ static int device_va_to_pa(struct hl_device *hdev, u64 virt_addr,
 				u64 *phys_addr)
 {
 	struct hl_ctx *ctx = hdev->compute_ctx;
-	struct asic_fixed_properties *prop = &hdev->asic_prop;
-	struct hl_mmu_properties *mmu_prop;
-	u64 hop_addr, hop_pte_addr, hop_pte;
-	u64 offset_mask = HOP4_MASK | FLAGS_MASK;
 	int rc = 0;
-	bool is_dram_addr;
 
 	if (!ctx) {
 		dev_err(hdev->dev, "no ctx available\n");
 		return -EINVAL;
 	}
 
-	is_dram_addr = hl_mem_area_inside_range(virt_addr, prop->dmmu.page_size,
-						prop->dmmu.start_addr,
-						prop->dmmu.end_addr);
-
-	/* shifts and masks are the same in PMMU and HPMMU, use one of them */
-	mmu_prop = is_dram_addr ? &prop->dmmu : &prop->pmmu;
-
-	mutex_lock(&ctx->mmu_lock);
-
-	/* hop 0 */
-	hop_addr = get_hop0_addr(ctx);
-	hop_pte_addr = get_hop0_pte_addr(ctx, mmu_prop, hop_addr, virt_addr);
-	hop_pte = hdev->asic_funcs->read_pte(hdev, hop_pte_addr);
-
-	/* hop 1 */
-	hop_addr = get_next_hop_addr(hop_pte);
-	if (hop_addr == ULLONG_MAX)
-		goto not_mapped;
-	hop_pte_addr = get_hop1_pte_addr(ctx, mmu_prop, hop_addr, virt_addr);
-	hop_pte = hdev->asic_funcs->read_pte(hdev, hop_pte_addr);
-
-	/* hop 2 */
-	hop_addr = get_next_hop_addr(hop_pte);
-	if (hop_addr == ULLONG_MAX)
-		goto not_mapped;
-	hop_pte_addr = get_hop2_pte_addr(ctx, mmu_prop, hop_addr, virt_addr);
-	hop_pte = hdev->asic_funcs->read_pte(hdev, hop_pte_addr);
-
-	/* hop 3 */
-	hop_addr = get_next_hop_addr(hop_pte);
-	if (hop_addr == ULLONG_MAX)
-		goto not_mapped;
-	hop_pte_addr = get_hop3_pte_addr(ctx, mmu_prop, hop_addr, virt_addr);
-	hop_pte = hdev->asic_funcs->read_pte(hdev, hop_pte_addr);
-
-	if (!(hop_pte & LAST_MASK)) {
-		/* hop 4 */
-		hop_addr = get_next_hop_addr(hop_pte);
-		if (hop_addr == ULLONG_MAX)
-			goto not_mapped;
-		hop_pte_addr = get_hop4_pte_addr(ctx, mmu_prop, hop_addr,
-							virt_addr);
-		hop_pte = hdev->asic_funcs->read_pte(hdev, hop_pte_addr);
-
-		offset_mask = FLAGS_MASK;
+	rc = hl_mmu_va_to_pa(ctx, virt_addr, phys_addr);
+	if (rc) {
+		dev_err(hdev->dev, "virt addr 0x%llx is not mapped to phys addr\n",
+				virt_addr);
+		rc = -EINVAL;
 	}
 
-	if (!(hop_pte & PAGE_PRESENT_MASK))
-		goto not_mapped;
-
-	*phys_addr = (hop_pte & ~offset_mask) | (virt_addr & offset_mask);
-
-	goto out;
-
-not_mapped:
-	dev_err(hdev->dev, "virt addr 0x%llx is not mapped to phys addr\n",
-			virt_addr);
-	rc = -EINVAL;
-out:
-	mutex_unlock(&ctx->mmu_lock);
 	return rc;
 }
 
diff --git a/drivers/misc/habanalabs/common/device.c b/drivers/misc/habanalabs/common/device.c
index 783bbdcb1e6183f48c2370dd4efe239ef481a15e..5871162a84425ef895852296a1b7b44b02ef166b 100644
--- a/drivers/misc/habanalabs/common/device.c
+++ b/drivers/misc/habanalabs/common/device.c
@@ -10,20 +10,9 @@
 #include "habanalabs.h"
 
 #include <linux/pci.h>
-#include <linux/sched/signal.h>
 #include <linux/hwmon.h>
 #include <uapi/misc/habanalabs.h>
 
-#define HL_PLDM_PENDING_RESET_PER_SEC	(HL_PENDING_RESET_PER_SEC * 10)
-
-bool hl_device_disabled_or_in_reset(struct hl_device *hdev)
-{
-	if ((hdev->disabled) || (atomic_read(&hdev->in_reset)))
-		return true;
-	else
-		return false;
-}
-
 enum hl_device_status hl_device_status(struct hl_device *hdev)
 {
 	enum hl_device_status status;
@@ -32,12 +21,34 @@ enum hl_device_status hl_device_status(struct hl_device *hdev)
 		status = HL_DEVICE_STATUS_MALFUNCTION;
 	else if (atomic_read(&hdev->in_reset))
 		status = HL_DEVICE_STATUS_IN_RESET;
+	else if (hdev->needs_reset)
+		status = HL_DEVICE_STATUS_NEEDS_RESET;
 	else
 		status = HL_DEVICE_STATUS_OPERATIONAL;
 
 	return status;
 }
 
+bool hl_device_operational(struct hl_device *hdev,
+		enum hl_device_status *status)
+{
+	enum hl_device_status current_status;
+
+	current_status = hl_device_status(hdev);
+	if (status)
+		*status = current_status;
+
+	switch (current_status) {
+	case HL_DEVICE_STATUS_IN_RESET:
+	case HL_DEVICE_STATUS_MALFUNCTION:
+	case HL_DEVICE_STATUS_NEEDS_RESET:
+		return false;
+	case HL_DEVICE_STATUS_OPERATIONAL:
+	default:
+		return true;
+	}
+}
+
 static void hpriv_release(struct kref *ref)
 {
 	struct hl_fpriv *hpriv;
@@ -243,6 +254,26 @@ static void device_cdev_sysfs_del(struct hl_device *hdev)
 	put_device(hdev->dev_ctrl);
 }
 
+static void device_hard_reset_pending(struct work_struct *work)
+{
+	struct hl_device_reset_work *device_reset_work =
+		container_of(work, struct hl_device_reset_work,
+				reset_work.work);
+	struct hl_device *hdev = device_reset_work->hdev;
+	int rc;
+
+	rc = hl_device_reset(hdev, true, true);
+	if ((rc == -EBUSY) && !hdev->device_fini_pending) {
+		dev_info(hdev->dev,
+			"Could not reset device. will try again in %u seconds",
+			HL_PENDING_RESET_PER_SEC);
+
+		queue_delayed_work(device_reset_work->wq,
+			&device_reset_work->reset_work,
+			msecs_to_jiffies(HL_PENDING_RESET_PER_SEC * 1000));
+	}
+}
+
 /*
  * device_early_init - do some early initialization for the habanalabs device
  *
@@ -327,17 +358,32 @@ static int device_early_init(struct hl_device *hdev)
 
 	hl_cb_mgr_init(&hdev->kernel_cb_mgr);
 
+	hdev->device_reset_work.wq =
+			create_singlethread_workqueue("hl_device_reset");
+	if (!hdev->device_reset_work.wq) {
+		rc = -ENOMEM;
+		dev_err(hdev->dev, "Failed to create device reset WQ\n");
+		goto free_cb_mgr;
+	}
+
+	INIT_DELAYED_WORK(&hdev->device_reset_work.reset_work,
+			device_hard_reset_pending);
+	hdev->device_reset_work.hdev = hdev;
+	hdev->device_fini_pending = 0;
+
 	mutex_init(&hdev->send_cpu_message_lock);
 	mutex_init(&hdev->debug_lock);
 	mutex_init(&hdev->mmu_cache_lock);
-	INIT_LIST_HEAD(&hdev->hw_queues_mirror_list);
-	spin_lock_init(&hdev->hw_queues_mirror_lock);
+	INIT_LIST_HEAD(&hdev->cs_mirror_list);
+	spin_lock_init(&hdev->cs_mirror_lock);
 	INIT_LIST_HEAD(&hdev->fpriv_list);
 	mutex_init(&hdev->fpriv_list_lock);
 	atomic_set(&hdev->in_reset, 0);
 
 	return 0;
 
+free_cb_mgr:
+	hl_cb_mgr_fini(hdev, &hdev->kernel_cb_mgr);
 free_idle_busy_ts_arr:
 	kfree(hdev->idle_busy_ts_arr);
 free_chip_info:
@@ -380,6 +426,7 @@ static void device_early_fini(struct hl_device *hdev)
 	kfree(hdev->hl_chip_info);
 
 	destroy_workqueue(hdev->eq_wq);
+	destroy_workqueue(hdev->device_reset_work.wq);
 
 	for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++)
 		destroy_workqueue(hdev->cq_wq[i]);
@@ -412,7 +459,7 @@ static void hl_device_heartbeat(struct work_struct *work)
 	struct hl_device *hdev = container_of(work, struct hl_device,
 						work_heartbeat.work);
 
-	if (hl_device_disabled_or_in_reset(hdev))
+	if (!hl_device_operational(hdev, NULL))
 		goto reschedule;
 
 	if (!hdev->asic_funcs->send_heartbeat(hdev))
@@ -758,16 +805,12 @@ int hl_device_resume(struct hl_device *hdev)
 	return rc;
 }
 
-static int device_kill_open_processes(struct hl_device *hdev)
+static int device_kill_open_processes(struct hl_device *hdev, u32 timeout)
 {
-	u16 pending_total, pending_cnt;
 	struct hl_fpriv	*hpriv;
 	struct task_struct *task = NULL;
+	u32 pending_cnt;
 
-	if (hdev->pldm)
-		pending_total = HL_PLDM_PENDING_RESET_PER_SEC;
-	else
-		pending_total = HL_PENDING_RESET_PER_SEC;
 
 	/* Giving time for user to close FD, and for processes that are inside
 	 * hl_device_open to finish
@@ -775,6 +818,19 @@ static int device_kill_open_processes(struct hl_device *hdev)
 	if (!list_empty(&hdev->fpriv_list))
 		ssleep(1);
 
+	if (timeout) {
+		pending_cnt = timeout;
+	} else {
+		if (hdev->process_kill_trial_cnt) {
+			/* Processes have been already killed */
+			pending_cnt = 1;
+			goto wait_for_processes;
+		} else {
+			/* Wait a small period after process kill */
+			pending_cnt = HL_PENDING_RESET_PER_SEC;
+		}
+	}
+
 	mutex_lock(&hdev->fpriv_list_lock);
 
 	/* This section must be protected because we are dereferencing
@@ -794,16 +850,18 @@ static int device_kill_open_processes(struct hl_device *hdev)
 
 	mutex_unlock(&hdev->fpriv_list_lock);
 
-	/* We killed the open users, but because the driver cleans up after the
-	 * user contexts are closed (e.g. mmu mappings), we need to wait again
-	 * to make sure the cleaning phase is finished before continuing with
-	 * the reset
+	/*
+	 * We killed the open users, but that doesn't mean they are closed.
+	 * It could be that they are running a long cleanup phase in the driver
+	 * e.g. MMU unmappings, or running other long teardown flow even before
+	 * our cleanup.
+	 * Therefore we need to wait again to make sure they are closed before
+	 * continuing with the reset.
 	 */
 
-	pending_cnt = pending_total;
-
+wait_for_processes:
 	while ((!list_empty(&hdev->fpriv_list)) && (pending_cnt)) {
-		dev_info(hdev->dev,
+		dev_dbg(hdev->dev,
 			"Waiting for all unmap operations to finish before hard reset\n");
 
 		pending_cnt--;
@@ -811,18 +869,17 @@ static int device_kill_open_processes(struct hl_device *hdev)
 		ssleep(1);
 	}
 
-	return list_empty(&hdev->fpriv_list) ? 0 : -EBUSY;
-}
+	/* All processes exited successfully */
+	if (list_empty(&hdev->fpriv_list))
+		return 0;
 
-static void device_hard_reset_pending(struct work_struct *work)
-{
-	struct hl_device_reset_work *device_reset_work =
-		container_of(work, struct hl_device_reset_work, reset_work);
-	struct hl_device *hdev = device_reset_work->hdev;
+	/* Give up waiting for processes to exit */
+	if (hdev->process_kill_trial_cnt == HL_PENDING_RESET_MAX_TRIALS)
+		return -ETIME;
 
-	hl_device_reset(hdev, true, true);
+	hdev->process_kill_trial_cnt++;
 
-	kfree(device_reset_work);
+	return -EBUSY;
 }
 
 /*
@@ -859,6 +916,10 @@ int hl_device_reset(struct hl_device *hdev, bool hard_reset,
 		hard_reset = true;
 	}
 
+	/* Re-entry of reset thread */
+	if (from_hard_reset_thread && hdev->process_kill_trial_cnt)
+		goto kill_processes;
+
 	/*
 	 * Prevent concurrency in this function - only one reset should be
 	 * done at any given time. Only need to perform this if we didn't
@@ -904,26 +965,17 @@ int hl_device_reset(struct hl_device *hdev, bool hard_reset,
 
 again:
 	if ((hard_reset) && (!from_hard_reset_thread)) {
-		struct hl_device_reset_work *device_reset_work;
-
 		hdev->hard_reset_pending = true;
 
-		device_reset_work = kzalloc(sizeof(*device_reset_work),
-						GFP_ATOMIC);
-		if (!device_reset_work) {
-			rc = -ENOMEM;
-			goto out_err;
-		}
+		hdev->process_kill_trial_cnt = 0;
 
 		/*
 		 * Because the reset function can't run from interrupt or
 		 * from heartbeat work, we need to call the reset function
 		 * from a dedicated work
 		 */
-		INIT_WORK(&device_reset_work->reset_work,
-				device_hard_reset_pending);
-		device_reset_work->hdev = hdev;
-		schedule_work(&device_reset_work->reset_work);
+		queue_delayed_work(hdev->device_reset_work.wq,
+			&hdev->device_reset_work.reset_work, 0);
 
 		return 0;
 	}
@@ -949,12 +1001,25 @@ int hl_device_reset(struct hl_device *hdev, bool hard_reset,
 	/* Go over all the queues, release all CS and their jobs */
 	hl_cs_rollback_all(hdev);
 
+kill_processes:
 	if (hard_reset) {
 		/* Kill processes here after CS rollback. This is because the
 		 * process can't really exit until all its CSs are done, which
 		 * is what we do in cs rollback
 		 */
-		rc = device_kill_open_processes(hdev);
+		rc = device_kill_open_processes(hdev, 0);
+
+		if (rc == -EBUSY) {
+			if (hdev->device_fini_pending) {
+				dev_crit(hdev->dev,
+					"Failed to kill all open processes, stopping hard reset\n");
+				goto out_err;
+			}
+
+			/* signal reset thread to reschedule */
+			return rc;
+		}
+
 		if (rc) {
 			dev_crit(hdev->dev,
 				"Failed to kill all open processes, stopping hard reset\n");
@@ -1089,6 +1154,7 @@ int hl_device_reset(struct hl_device *hdev, bool hard_reset,
 	}
 
 	atomic_set(&hdev->in_reset, 0);
+	hdev->needs_reset = false;
 
 	if (hard_reset)
 		hdev->hard_reset_cnt++;
@@ -1261,13 +1327,6 @@ int hl_device_init(struct hl_device *hdev, struct class *hclass)
 
 	hl_debugfs_add_device(hdev);
 
-	if (hdev->asic_funcs->get_hw_state(hdev) == HL_DEVICE_HW_STATE_DIRTY) {
-		dev_info(hdev->dev,
-			"H/W state is dirty, must reset before initializing\n");
-		hdev->asic_funcs->halt_engines(hdev, true);
-		hdev->asic_funcs->hw_fini(hdev, true);
-	}
-
 	/*
 	 * From this point, in case of an error, add char devices and create
 	 * sysfs nodes as part of the error flow, to allow debugging.
@@ -1398,11 +1457,14 @@ int hl_device_init(struct hl_device *hdev, struct class *hclass)
  */
 void hl_device_fini(struct hl_device *hdev)
 {
-	int i, rc;
 	ktime_t timeout;
+	int i, rc;
 
 	dev_info(hdev->dev, "Removing device\n");
 
+	hdev->device_fini_pending = 1;
+	flush_delayed_work(&hdev->device_reset_work.reset_work);
+
 	/*
 	 * This function is competing with the reset function, so try to
 	 * take the reset atomic and if we are already in middle of reset,
@@ -1458,7 +1520,11 @@ void hl_device_fini(struct hl_device *hdev)
 	 * can't really exit until all its CSs are done, which is what we
 	 * do in cs rollback
 	 */
-	rc = device_kill_open_processes(hdev);
+	dev_info(hdev->dev,
+		"Waiting for all processes to exit (timeout of %u seconds)",
+		HL_PENDING_RESET_LONG_SEC);
+
+	rc = device_kill_open_processes(hdev, HL_PENDING_RESET_LONG_SEC);
 	if (rc)
 		dev_crit(hdev->dev, "Failed to kill all open processes\n");
 
diff --git a/drivers/misc/habanalabs/common/firmware_if.c b/drivers/misc/habanalabs/common/firmware_if.c
index cd41c7ceb0e78ce292bcc37e7179199a283fa9f1..0e1c629e9800ab4ad834765f2b4643fc84639f63 100644
--- a/drivers/misc/habanalabs/common/firmware_if.c
+++ b/drivers/misc/habanalabs/common/firmware_if.c
@@ -9,8 +9,6 @@
 #include "../include/common/hl_boot_if.h"
 
 #include <linux/firmware.h>
-#include <linux/genalloc.h>
-#include <linux/io-64-nonatomic-lo-hi.h>
 #include <linux/slab.h>
 
 #define FW_FILE_MAX_SIZE	0x1400000 /* maximum size of 20MB */
@@ -20,16 +18,18 @@
  * @hdev: pointer to hl_device structure.
  * @fw_name: the firmware image name
  * @dst: IO memory mapped address space to copy firmware to
+ * @src_offset: offset in src FW to copy from
+ * @size: amount of bytes to copy (0 to copy the whole binary)
  *
  * Copy fw code from firmware file to device memory.
  *
  * Return: 0 on success, non-zero for failure.
  */
 int hl_fw_load_fw_to_device(struct hl_device *hdev, const char *fw_name,
-				void __iomem *dst)
+				void __iomem *dst, u32 src_offset, u32 size)
 {
 	const struct firmware *fw;
-	const u64 *fw_data;
+	const void *fw_data;
 	size_t fw_size;
 	int rc;
 
@@ -57,9 +57,20 @@ int hl_fw_load_fw_to_device(struct hl_device *hdev, const char *fw_name,
 		goto out;
 	}
 
-	fw_data = (const u64 *) fw->data;
+	if (size - src_offset > fw_size) {
+		dev_err(hdev->dev,
+			"size to copy(%u) and offset(%u) are invalid\n",
+			size, src_offset);
+		rc = -EINVAL;
+		goto out;
+	}
+
+	if (size)
+		fw_size = size;
+
+	fw_data = (const void *) fw->data;
 
-	memcpy_toio(dst, fw_data, fw_size);
+	memcpy_toio(dst, fw_data + src_offset, fw_size);
 
 out:
 	release_firmware(fw);
@@ -77,7 +88,7 @@ int hl_fw_send_pci_access_msg(struct hl_device *hdev, u32 opcode)
 }
 
 int hl_fw_send_cpu_message(struct hl_device *hdev, u32 hw_queue_id, u32 *msg,
-				u16 len, u32 timeout, long *result)
+				u16 len, u32 timeout, u64 *result)
 {
 	struct cpucp_packet *pkt;
 	dma_addr_t pkt_dma_addr;
@@ -132,7 +143,7 @@ int hl_fw_send_cpu_message(struct hl_device *hdev, u32 hw_queue_id, u32 *msg,
 						>> CPUCP_PKT_CTL_OPCODE_SHIFT);
 		rc = -EIO;
 	} else if (result) {
-		*result = (long) le64_to_cpu(pkt->result);
+		*result = le64_to_cpu(pkt->result);
 	}
 
 out:
@@ -146,7 +157,7 @@ int hl_fw_send_cpu_message(struct hl_device *hdev, u32 hw_queue_id, u32 *msg,
 int hl_fw_unmask_irq(struct hl_device *hdev, u16 event_type)
 {
 	struct cpucp_packet pkt;
-	long result;
+	u64 result;
 	int rc;
 
 	memset(&pkt, 0, sizeof(pkt));
@@ -169,7 +180,7 @@ int hl_fw_unmask_irq_arr(struct hl_device *hdev, const u32 *irq_arr,
 {
 	struct cpucp_unmask_irq_arr_packet *pkt;
 	size_t total_pkt_size;
-	long result;
+	u64 result;
 	int rc;
 
 	total_pkt_size = sizeof(struct cpucp_unmask_irq_arr_packet) +
@@ -208,7 +219,7 @@ int hl_fw_unmask_irq_arr(struct hl_device *hdev, const u32 *irq_arr,
 int hl_fw_test_cpu_queue(struct hl_device *hdev)
 {
 	struct cpucp_packet test_pkt = {};
-	long result;
+	u64 result;
 	int rc;
 
 	test_pkt.ctl = cpu_to_le32(CPUCP_PACKET_TEST <<
@@ -221,7 +232,7 @@ int hl_fw_test_cpu_queue(struct hl_device *hdev)
 	if (!rc) {
 		if (result != CPUCP_PACKET_FENCE_VAL)
 			dev_err(hdev->dev,
-				"CPU queue test failed (0x%08lX)\n", result);
+				"CPU queue test failed (%#08llx)\n", result);
 	} else {
 		dev_err(hdev->dev, "CPU queue test failed, error %d\n", rc);
 	}
@@ -252,7 +263,7 @@ void hl_fw_cpu_accessible_dma_pool_free(struct hl_device *hdev, size_t size,
 int hl_fw_send_heartbeat(struct hl_device *hdev)
 {
 	struct cpucp_packet hb_pkt = {};
-	long result;
+	u64 result;
 	int rc;
 
 	hb_pkt.ctl = cpu_to_le32(CPUCP_PACKET_TEST <<
@@ -268,13 +279,14 @@ int hl_fw_send_heartbeat(struct hl_device *hdev)
 	return rc;
 }
 
-int hl_fw_cpucp_info_get(struct hl_device *hdev)
+int hl_fw_cpucp_info_get(struct hl_device *hdev,
+			u32 cpu_security_boot_status_reg)
 {
 	struct asic_fixed_properties *prop = &hdev->asic_prop;
 	struct cpucp_packet pkt = {};
 	void *cpucp_info_cpu_addr;
 	dma_addr_t cpucp_info_dma_addr;
-	long result;
+	u64 result;
 	int rc;
 
 	cpucp_info_cpu_addr =
@@ -313,6 +325,11 @@ int hl_fw_cpucp_info_get(struct hl_device *hdev)
 		goto out;
 	}
 
+	/* Read FW application security bits again */
+	if (hdev->asic_prop.fw_security_status_valid)
+		hdev->asic_prop.fw_app_security_map =
+				RREG32(cpu_security_boot_status_reg);
+
 out:
 	hdev->asic_funcs->cpu_accessible_dma_pool_free(hdev,
 			sizeof(struct cpucp_info), cpucp_info_cpu_addr);
@@ -325,7 +342,7 @@ int hl_fw_get_eeprom_data(struct hl_device *hdev, void *data, size_t max_size)
 	struct cpucp_packet pkt = {};
 	void *eeprom_info_cpu_addr;
 	dma_addr_t eeprom_info_dma_addr;
-	long result;
+	u64 result;
 	int rc;
 
 	eeprom_info_cpu_addr =
@@ -368,7 +385,7 @@ int hl_fw_cpucp_pci_counters_get(struct hl_device *hdev,
 		struct hl_info_pci_counters *counters)
 {
 	struct cpucp_packet pkt = {};
-	long result;
+	u64 result;
 	int rc;
 
 	pkt.ctl = cpu_to_le32(CPUCP_PACKET_PCIE_THROUGHPUT_GET <<
@@ -415,7 +432,7 @@ int hl_fw_cpucp_pci_counters_get(struct hl_device *hdev,
 int hl_fw_cpucp_total_energy_get(struct hl_device *hdev, u64 *total_energy)
 {
 	struct cpucp_packet pkt = {};
-	long result;
+	u64 result;
 	int rc;
 
 	pkt.ctl = cpu_to_le32(CPUCP_PACKET_TOTAL_ENERGY_GET <<
@@ -435,9 +452,36 @@ int hl_fw_cpucp_total_energy_get(struct hl_device *hdev, u64 *total_energy)
 	return rc;
 }
 
-static void fw_read_errors(struct hl_device *hdev, u32 boot_err0_reg)
+int hl_fw_cpucp_pll_info_get(struct hl_device *hdev, u16 pll_index,
+		u16 *pll_freq_arr)
 {
-	u32 err_val;
+	struct cpucp_packet pkt;
+	u64 result;
+	int rc;
+
+	memset(&pkt, 0, sizeof(pkt));
+
+	pkt.ctl = cpu_to_le32(CPUCP_PACKET_PLL_INFO_GET <<
+				CPUCP_PKT_CTL_OPCODE_SHIFT);
+	pkt.pll_type = __cpu_to_le16(pll_index);
+
+	rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
+			HL_CPUCP_INFO_TIMEOUT_USEC, &result);
+	if (rc)
+		dev_err(hdev->dev, "Failed to read PLL info, error %d\n", rc);
+
+	pll_freq_arr[0] = FIELD_GET(CPUCP_PKT_RES_PLL_OUT0_MASK, result);
+	pll_freq_arr[1] = FIELD_GET(CPUCP_PKT_RES_PLL_OUT1_MASK, result);
+	pll_freq_arr[2] = FIELD_GET(CPUCP_PKT_RES_PLL_OUT2_MASK, result);
+	pll_freq_arr[3] = FIELD_GET(CPUCP_PKT_RES_PLL_OUT3_MASK, result);
+
+	return rc;
+}
+
+static void fw_read_errors(struct hl_device *hdev, u32 boot_err0_reg,
+		u32 cpu_security_boot_status_reg)
+{
+	u32 err_val, security_val;
 
 	/* Some of the firmware status codes are deprecated in newer f/w
 	 * versions. In those versions, the errors are reported
@@ -472,6 +516,18 @@ static void fw_read_errors(struct hl_device *hdev, u32 boot_err0_reg)
 	if (err_val & CPU_BOOT_ERR0_NIC_FW_FAIL)
 		dev_err(hdev->dev,
 			"Device boot error - NIC F/W initialization failed\n");
+	if (err_val & CPU_BOOT_ERR0_SECURITY_NOT_RDY)
+		dev_warn(hdev->dev,
+			"Device boot warning - security not ready\n");
+	if (err_val & CPU_BOOT_ERR0_SECURITY_FAIL)
+		dev_err(hdev->dev, "Device boot error - security failure\n");
+	if (err_val & CPU_BOOT_ERR0_EFUSE_FAIL)
+		dev_err(hdev->dev, "Device boot error - eFuse failure\n");
+
+	security_val = RREG32(cpu_security_boot_status_reg);
+	if (security_val & CPU_BOOT_DEV_STS0_ENABLED)
+		dev_dbg(hdev->dev, "Device security status %#x\n",
+				security_val);
 }
 
 static void detect_cpu_boot_status(struct hl_device *hdev, u32 status)
@@ -524,10 +580,12 @@ static void detect_cpu_boot_status(struct hl_device *hdev, u32 status)
 	}
 }
 
-int hl_fw_read_preboot_ver(struct hl_device *hdev, u32 cpu_boot_status_reg,
-				u32 boot_err0_reg, u32 timeout)
+int hl_fw_read_preboot_status(struct hl_device *hdev, u32 cpu_boot_status_reg,
+		u32 cpu_security_boot_status_reg, u32 boot_err0_reg,
+		u32 timeout)
 {
-	u32 status;
+	struct asic_fixed_properties *prop = &hdev->asic_prop;
+	u32 status, security_status;
 	int rc;
 
 	if (!hdev->cpu_enable)
@@ -557,23 +615,52 @@ int hl_fw_read_preboot_ver(struct hl_device *hdev, u32 cpu_boot_status_reg,
 	if (rc) {
 		dev_err(hdev->dev, "Failed to read preboot version\n");
 		detect_cpu_boot_status(hdev, status);
-		fw_read_errors(hdev, boot_err0_reg);
+		fw_read_errors(hdev, boot_err0_reg,
+				cpu_security_boot_status_reg);
 		return -EIO;
 	}
 
-	hdev->asic_funcs->read_device_fw_version(hdev, FW_COMP_PREBOOT);
+	rc = hdev->asic_funcs->read_device_fw_version(hdev, FW_COMP_PREBOOT);
+	if (rc)
+		return rc;
+
+	security_status = RREG32(cpu_security_boot_status_reg);
+
+	/* We read security status multiple times during boot:
+	 * 1. preboot - we check if fw security feature is supported
+	 * 2. boot cpu - we get boot cpu security status
+	 * 3. FW application - we get FW application security status
+	 *
+	 * Preboot:
+	 * Check security status bit (CPU_BOOT_DEV_STS0_ENABLED), if it is set
+	 * check security enabled bit (CPU_BOOT_DEV_STS0_SECURITY_EN)
+	 */
+	if (security_status & CPU_BOOT_DEV_STS0_ENABLED) {
+		hdev->asic_prop.fw_security_status_valid = 1;
+		prop->fw_security_disabled =
+			!(security_status & CPU_BOOT_DEV_STS0_SECURITY_EN);
+	} else {
+		hdev->asic_prop.fw_security_status_valid = 0;
+		prop->fw_security_disabled = true;
+	}
+
+	dev_info(hdev->dev, "firmware-level security is %s\n",
+		prop->fw_security_disabled ? "disabled" : "enabled");
 
 	return 0;
 }
 
 int hl_fw_init_cpu(struct hl_device *hdev, u32 cpu_boot_status_reg,
 			u32 msg_to_cpu_reg, u32 cpu_msg_status_reg,
-			u32 boot_err0_reg, bool skip_bmc,
-			u32 cpu_timeout, u32 boot_fit_timeout)
+			u32 cpu_security_boot_status_reg, u32 boot_err0_reg,
+			bool skip_bmc, u32 cpu_timeout, u32 boot_fit_timeout)
 {
 	u32 status;
 	int rc;
 
+	if (!(hdev->fw_loading & FW_TYPE_BOOT_CPU))
+		return 0;
+
 	dev_info(hdev->dev, "Going to wait for device boot (up to %lds)\n",
 		cpu_timeout / USEC_PER_SEC);
 
@@ -631,17 +718,24 @@ int hl_fw_init_cpu(struct hl_device *hdev, u32 cpu_boot_status_reg,
 		10000,
 		cpu_timeout);
 
+	dev_dbg(hdev->dev, "uboot status = %d\n", status);
+
 	/* Read U-Boot version now in case we will later fail */
 	hdev->asic_funcs->read_device_fw_version(hdev, FW_COMP_UBOOT);
 
+	/* Read boot_cpu security bits */
+	if (hdev->asic_prop.fw_security_status_valid)
+		hdev->asic_prop.fw_boot_cpu_security_map =
+				RREG32(cpu_security_boot_status_reg);
+
 	if (rc) {
 		detect_cpu_boot_status(hdev, status);
 		rc = -EIO;
 		goto out;
 	}
 
-	if (!hdev->fw_loading) {
-		dev_info(hdev->dev, "Skip loading FW\n");
+	if (!(hdev->fw_loading & FW_TYPE_LINUX)) {
+		dev_info(hdev->dev, "Skip loading Linux F/W\n");
 		goto out;
 	}
 
@@ -702,10 +796,23 @@ int hl_fw_init_cpu(struct hl_device *hdev, u32 cpu_boot_status_reg,
 		goto out;
 	}
 
+	/* Read FW application security bits */
+	if (hdev->asic_prop.fw_security_status_valid) {
+		hdev->asic_prop.fw_app_security_map =
+				RREG32(cpu_security_boot_status_reg);
+
+		if (hdev->asic_prop.fw_app_security_map &
+				CPU_BOOT_DEV_STS0_FW_HARD_RST_EN)
+			hdev->asic_prop.hard_reset_done_by_fw = true;
+	}
+
+	dev_dbg(hdev->dev, "Firmware hard-reset is %s\n",
+		hdev->asic_prop.hard_reset_done_by_fw ? "enabled" : "disabled");
+
 	dev_info(hdev->dev, "Successfully loaded firmware to device\n");
 
 out:
-	fw_read_errors(hdev, boot_err0_reg);
+	fw_read_errors(hdev, boot_err0_reg, cpu_security_boot_status_reg);
 
 	return rc;
 }
diff --git a/drivers/misc/habanalabs/common/habanalabs.h b/drivers/misc/habanalabs/common/habanalabs.h
index 6ed974d2def0e6f8098622f132e092aeafcc6dfe..571eda6ef5ab09b77e2ce2a7a43c609efd82397d 100644
--- a/drivers/misc/habanalabs/common/habanalabs.h
+++ b/drivers/misc/habanalabs/common/habanalabs.h
@@ -10,6 +10,7 @@
 
 #include "../include/common/cpucp_if.h"
 #include "../include/common/qman_if.h"
+#include "../include/hw_ip/mmu/mmu_general.h"
 #include <uapi/misc/habanalabs.h>
 
 #include <linux/cdev.h>
@@ -19,6 +20,10 @@
 #include <linux/scatterlist.h>
 #include <linux/hashtable.h>
 #include <linux/bitfield.h>
+#include <linux/genalloc.h>
+#include <linux/sched/signal.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
+#include <linux/coresight.h>
 
 #define HL_NAME				"habanalabs"
 
@@ -36,7 +41,9 @@
 #define HL_MMAP_OFFSET_VALUE_MASK	(0x3FFFFFFFFFFFull >> PAGE_SHIFT)
 #define HL_MMAP_OFFSET_VALUE_GET(off)	(off & HL_MMAP_OFFSET_VALUE_MASK)
 
-#define HL_PENDING_RESET_PER_SEC	30
+#define HL_PENDING_RESET_PER_SEC	10
+#define HL_PENDING_RESET_MAX_TRIALS	60 /* 10 minutes */
+#define HL_PENDING_RESET_LONG_SEC	60
 
 #define HL_HARD_RESET_MAX_TIMEOUT	120
 
@@ -61,15 +68,29 @@
 /* MMU */
 #define MMU_HASH_TABLE_BITS		7 /* 1 << 7 buckets */
 
+/**
+ * enum hl_mmu_page_table_locaion - mmu page table location
+ * @MMU_DR_PGT: page-table is located on device DRAM.
+ * @MMU_HR_PGT: page-table is located on host memory.
+ * @MMU_NUM_PGT_LOCATIONS: number of page-table locations currently supported.
+ */
+enum hl_mmu_page_table_location {
+	MMU_DR_PGT = 0,		/* device-dram-resident MMU PGT */
+	MMU_HR_PGT,		/* host resident MMU PGT */
+	MMU_NUM_PGT_LOCATIONS	/* num of PGT locations */
+};
+
 /*
  * HL_RSVD_SOBS 'sync stream' reserved sync objects per QMAN stream
  * HL_RSVD_MONS 'sync stream' reserved monitors per QMAN stream
  */
-#define HL_RSVD_SOBS			4
-#define HL_RSVD_MONS			2
+#define HL_RSVD_SOBS			2
+#define HL_RSVD_MONS			1
 
-#define HL_RSVD_SOBS_IN_USE		2
-#define HL_RSVD_MONS_IN_USE		1
+/*
+ * HL_COLLECTIVE_RSVD_MSTR_MONS 'collective' reserved monitors per QMAN stream
+ */
+#define HL_COLLECTIVE_RSVD_MSTR_MONS	2
 
 #define HL_MAX_SOB_VAL			(1 << 15)
 
@@ -80,6 +101,28 @@
 
 #define HL_MAX_DCORES			4
 
+#define HL_MAX_SOBS_PER_MONITOR	8
+
+/**
+ * struct hl_gen_wait_properties - properties for generating a wait CB
+ * @data: command buffer
+ * @q_idx: queue id is used to extract fence register address
+ * @size: offset in command buffer
+ * @sob_base: SOB base to use in this wait CB
+ * @sob_val: SOB value to wait for
+ * @mon_id: monitor to use in this wait CB
+ * @sob_mask: each bit represents a SOB offset from sob_base to be used
+ */
+struct hl_gen_wait_properties {
+	void	*data;
+	u32	q_idx;
+	u32	size;
+	u16	sob_base;
+	u16	sob_val;
+	u16	mon_id;
+	u8	sob_mask;
+};
+
 /**
  * struct pgt_info - MMU hop page info.
  * @node: hash linked-list node for the pgts shadow hash of pgts.
@@ -124,6 +167,18 @@ enum hl_fw_component {
 	FW_COMP_PREBOOT
 };
 
+/**
+ * enum hl_fw_types - F/W types to load
+ * @FW_TYPE_LINUX: Linux image for device CPU
+ * @FW_TYPE_BOOT_CPU: Boot image for device CPU
+ * @FW_TYPE_ALL_TYPES: Mask for all types
+ */
+enum hl_fw_types {
+	FW_TYPE_LINUX = 0x1,
+	FW_TYPE_BOOT_CPU = 0x2,
+	FW_TYPE_ALL_TYPES = (FW_TYPE_LINUX | FW_TYPE_BOOT_CPU)
+};
+
 /**
  * enum hl_queue_type - Supported QUEUE types.
  * @QUEUE_TYPE_NA: queue is not available.
@@ -146,7 +201,8 @@ enum hl_queue_type {
 enum hl_cs_type {
 	CS_TYPE_DEFAULT,
 	CS_TYPE_SIGNAL,
-	CS_TYPE_WAIT
+	CS_TYPE_WAIT,
+	CS_TYPE_COLLECTIVE_WAIT
 };
 
 /*
@@ -175,6 +231,17 @@ struct hl_outbound_pci_region {
 	u64	size;
 };
 
+/*
+ * enum queue_cb_alloc_flags - Indicates queue support for CBs that
+ * allocated by Kernel or by User
+ * @CB_ALLOC_KERNEL: support only CBs that allocated by Kernel
+ * @CB_ALLOC_USER: support only CBs that allocated by User
+ */
+enum queue_cb_alloc_flags {
+	CB_ALLOC_KERNEL = 0x1,
+	CB_ALLOC_USER   = 0x2
+};
+
 /*
  * struct hl_hw_sob - H/W SOB info.
  * @hdev: habanalabs device structure.
@@ -189,19 +256,29 @@ struct hl_hw_sob {
 	u32			q_idx;
 };
 
+enum hl_collective_mode {
+	HL_COLLECTIVE_NOT_SUPPORTED = 0x0,
+	HL_COLLECTIVE_MASTER = 0x1,
+	HL_COLLECTIVE_SLAVE = 0x2
+};
+
 /**
  * struct hw_queue_properties - queue information.
  * @type: queue type.
+ * @queue_cb_alloc_flags: bitmap which indicates if the hw queue supports CB
+ *                        that allocated by the Kernel driver and therefore,
+ *                        a CB handle can be provided for jobs on this queue.
+ *                        Otherwise, a CB address must be provided.
+ * @collective_mode: collective mode of current queue
  * @driver_only: true if only the driver is allowed to send a job to this queue,
  *               false otherwise.
- * @requires_kernel_cb: true if a CB handle must be provided for jobs on this
- *                      queue, false otherwise (a CB address must be provided).
  * @supports_sync_stream: True if queue supports sync stream
  */
 struct hw_queue_properties {
 	enum hl_queue_type	type;
+	enum queue_cb_alloc_flags cb_alloc_flags;
+	enum hl_collective_mode	collective_mode;
 	u8			driver_only;
-	u8			requires_kernel_cb;
 	u8			supports_sync_stream;
 };
 
@@ -227,6 +304,8 @@ enum hl_device_hw_state {
 	HL_DEVICE_HW_STATE_DIRTY
 };
 
+#define HL_MMU_VA_ALIGNMENT_NOT_NEEDED 0
+
 /**
  * struct hl_mmu_properties - ASIC specific MMU address translation properties.
  * @start_addr: virtual start address of the memory region.
@@ -245,6 +324,8 @@ enum hl_device_hw_state {
  * @hop5_mask: mask to get the PTE address in hop 5.
  * @page_size: default page size used to allocate memory.
  * @num_hops: The amount of hops supported by the translation table.
+ * @host_resident: Should the MMU page table reside in host memory or in the
+ *                 device DRAM.
  */
 struct hl_mmu_properties {
 	u64	start_addr;
@@ -263,6 +344,7 @@ struct hl_mmu_properties {
 	u64	hop5_mask;
 	u32	page_size;
 	u32	num_hops;
+	u8	host_resident;
 };
 
 /**
@@ -314,6 +396,14 @@ struct hl_mmu_properties {
  * @cb_pool_cb_size: size of each CB in the CB pool.
  * @max_pending_cs: maximum of concurrent pending command submissions
  * @max_queues: maximum amount of queues in the system
+ * @fw_boot_cpu_security_map: bitmap representation of boot cpu security status
+ *                            reported by FW, bit description can be found in
+ *                            CPU_BOOT_DEV_STS*
+ * @fw_app_security_map: bitmap representation of application security status
+ *                       reported by FW, bit description can be found in
+ *                       CPU_BOOT_DEV_STS*
+ * @collective_first_sob: first sync object available for collective use
+ * @collective_first_mon: first monitor available for collective use
  * @sync_stream_first_sob: first sync object available for sync stream use
  * @sync_stream_first_mon: first monitor available for sync stream use
  * @first_available_user_sob: first sob available for the user
@@ -322,6 +412,10 @@ struct hl_mmu_properties {
  * @completion_queues_count: number of completion queues.
  * @fw_security_disabled: true if security measures are disabled in firmware,
  *                        false otherwise
+ * @fw_security_status_valid: security status bits are valid and can be fetched
+ *                            from BOOT_DEV_STS0
+ * @dram_supports_virtual_memory: is there an MMU towards the DRAM
+ * @hard_reset_done_by_fw: true if firmware is handling hard reset flow
  */
 struct asic_fixed_properties {
 	struct hw_queue_properties	*hw_queues_props;
@@ -366,6 +460,10 @@ struct asic_fixed_properties {
 	u32				cb_pool_cb_size;
 	u32				max_pending_cs;
 	u32				max_queues;
+	u32				fw_boot_cpu_security_map;
+	u32				fw_app_security_map;
+	u16				collective_first_sob;
+	u16				collective_first_mon;
 	u16				sync_stream_first_sob;
 	u16				sync_stream_first_mon;
 	u16				first_available_user_sob[HL_MAX_DCORES];
@@ -373,6 +471,9 @@ struct asic_fixed_properties {
 	u8				tpc_enabled_mask;
 	u8				completion_queues_count;
 	u8				fw_security_disabled;
+	u8				fw_security_status_valid;
+	u8				dram_supports_virtual_memory;
+	u8				hard_reset_done_by_fw;
 };
 
 /**
@@ -380,12 +481,14 @@ struct asic_fixed_properties {
  * @completion: fence is implemented using completion
  * @refcount: refcount for this fence
  * @error: mark this fence with error
+ * @timestamp: timestamp upon completion
  *
  */
 struct hl_fence {
 	struct completion	completion;
 	struct kref		refcount;
 	int			error;
+	ktime_t			timestamp;
 };
 
 /**
@@ -397,6 +500,7 @@ struct hl_fence {
  * @cs_seq: command submission sequence number.
  * @type: type of the CS - signal/wait.
  * @sob_val: the SOB value that is used in this signal/wait CS.
+ * @sob_group: the SOB group that is used in this collective wait CS.
  */
 struct hl_cs_compl {
 	struct hl_fence		base_fence;
@@ -406,6 +510,7 @@ struct hl_cs_compl {
 	u64			cs_seq;
 	enum hl_cs_type		type;
 	u16			sob_val;
+	u16			sob_group;
 };
 
 /*
@@ -427,7 +532,7 @@ struct hl_cb_mgr {
  * @refcount: reference counter for usage of the CB.
  * @hdev: pointer to device this CB belongs to.
  * @ctx: pointer to the CB owner's context.
- * @lock: spinlock to protect mmap/cs flows.
+ * @lock: spinlock to protect mmap flows.
  * @debugfs_list: node in debugfs list of command buffers.
  * @pool_list: node in pool list of command buffers.
  * @va_block_list: list of virtual addresses blocks of the CB if it is mapped to
@@ -456,7 +561,7 @@ struct hl_cb {
 	dma_addr_t		bus_address;
 	u32			mmap_size;
 	u32			size;
-	u32			cs_cnt;
+	atomic_t		cs_cnt;
 	u8			mmap;
 	u8			is_pool;
 	u8			is_internal;
@@ -468,6 +573,7 @@ struct hl_cb {
  * QUEUES
  */
 
+struct hl_cs;
 struct hl_cs_job;
 
 /* Queue length of external and HW queues */
@@ -490,10 +596,38 @@ struct hl_cs_job;
 #define HL_CPU_ACCESSIBLE_MEM_SIZE	SZ_2M
 
 /**
- * struct hl_hw_queue - describes a H/W transport queue.
+ * struct hl_sync_stream_properties -
+ *     describes a H/W queue sync stream properties
  * @hw_sob: array of the used H/W SOBs by this H/W queue.
+ * @next_sob_val: the next value to use for the currently used SOB.
+ * @base_sob_id: the base SOB id of the SOBs used by this queue.
+ * @base_mon_id: the base MON id of the MONs used by this queue.
+ * @collective_mstr_mon_id: the MON ids of the MONs used by this master queue
+ *                          in order to sync with all slave queues.
+ * @collective_slave_mon_id: the MON id used by this slave queue in order to
+ *                           sync with its master queue.
+ * @collective_sob_id: current SOB id used by this collective slave queue
+ *                     to signal its collective master queue upon completion.
+ * @curr_sob_offset: the id offset to the currently used SOB from the
+ *                   HL_RSVD_SOBS that are being used by this queue.
+ */
+struct hl_sync_stream_properties {
+	struct hl_hw_sob hw_sob[HL_RSVD_SOBS];
+	u16		next_sob_val;
+	u16		base_sob_id;
+	u16		base_mon_id;
+	u16		collective_mstr_mon_id[HL_COLLECTIVE_RSVD_MSTR_MONS];
+	u16		collective_slave_mon_id;
+	u16		collective_sob_id;
+	u8		curr_sob_offset;
+};
+
+/**
+ * struct hl_hw_queue - describes a H/W transport queue.
  * @shadow_queue: pointer to a shadow queue that holds pointers to jobs.
+ * @sync_stream_prop: sync stream queue properties
  * @queue_type: type of queue.
+ * @collective_mode: collective mode of current queue
  * @kernel_address: holds the queue's kernel virtual address.
  * @bus_address: holds the queue's DMA address.
  * @pi: holds the queue's pi value.
@@ -502,33 +636,25 @@ struct hl_cs_job;
  * @cq_id: the id for the corresponding CQ for this H/W queue.
  * @msi_vec: the IRQ number of the H/W queue.
  * @int_queue_len: length of internal queue (number of entries).
- * @next_sob_val: the next value to use for the currently used SOB.
- * @base_sob_id: the base SOB id of the SOBs used by this queue.
- * @base_mon_id: the base MON id of the MONs used by this queue.
  * @valid: is the queue valid (we have array of 32 queues, not all of them
  *         exist).
- * @curr_sob_offset: the id offset to the currently used SOB from the
- *                   HL_RSVD_SOBS that are being used by this queue.
  * @supports_sync_stream: True if queue supports sync stream
  */
 struct hl_hw_queue {
-	struct hl_hw_sob	hw_sob[HL_RSVD_SOBS];
-	struct hl_cs_job	**shadow_queue;
-	enum hl_queue_type	queue_type;
-	void			*kernel_address;
-	dma_addr_t		bus_address;
-	u32			pi;
-	atomic_t		ci;
-	u32			hw_queue_id;
-	u32			cq_id;
-	u32			msi_vec;
-	u16			int_queue_len;
-	u16			next_sob_val;
-	u16			base_sob_id;
-	u16			base_mon_id;
-	u8			valid;
-	u8			curr_sob_offset;
-	u8			supports_sync_stream;
+	struct hl_cs_job			**shadow_queue;
+	struct hl_sync_stream_properties	sync_stream_prop;
+	enum hl_queue_type			queue_type;
+	enum hl_collective_mode			collective_mode;
+	void					*kernel_address;
+	dma_addr_t				bus_address;
+	u32					pi;
+	atomic_t				ci;
+	u32					hw_queue_id;
+	u32					cq_id;
+	u32					msi_vec;
+	u16					int_queue_len;
+	u8					valid;
+	u8					supports_sync_stream;
 };
 
 /**
@@ -650,6 +776,7 @@ enum div_select_defs {
  *                           dma_free_coherent(). This is ASIC function because
  *                           its implementation is not trivial when the driver
  *                           is loaded in simulation mode (not upstreamed).
+ * @scrub_device_mem: Scrub device memory given an address and size
  * @get_int_queue_base: get the internal queue base address.
  * @test_queues: run simple test on all queues for sanity check.
  * @asic_dma_pool_zalloc: small DMA allocation of coherent memory from DMA pool.
@@ -700,6 +827,7 @@ enum div_select_defs {
  * @wreg: Write a register. Needed for simulator support.
  * @halt_coresight: stop the ETF and ETR traces.
  * @ctx_init: context dependent initialization.
+ * @ctx_fini: context dependent cleanup.
  * @get_clk_rate: Retrieve the ASIC current and maximum clock rate in MHz
  * @get_queue_id_for_cq: Get the H/W queue id related to the given CQ index.
  * @read_device_fw_version: read the device's firmware versions that are
@@ -711,9 +839,13 @@ enum div_select_defs {
  * @gen_signal_cb: Generate a signal CB.
  * @gen_wait_cb: Generate a wait CB.
  * @reset_sob: Reset a SOB.
+ * @reset_sob_group: Reset SOB group
  * @set_dma_mask_from_fw: set the DMA mask in the driver according to the
  *                        firmware configuration
  * @get_device_time: Get the device time.
+ * @collective_wait_init_cs: Generate collective master/slave packets
+ *                           and place them in the relevant cs jobs
+ * @collective_wait_create_jobs: allocate collective wait cs jobs
  */
 struct hl_asic_funcs {
 	int (*early_init)(struct hl_device *hdev);
@@ -736,6 +868,7 @@ struct hl_asic_funcs {
 					dma_addr_t *dma_handle, gfp_t flag);
 	void (*asic_dma_free_coherent)(struct hl_device *hdev, size_t size,
 					void *cpu_addr, dma_addr_t dma_handle);
+	int (*scrub_device_mem)(struct hl_device *hdev, u64 addr, u64 size);
 	void* (*get_int_queue_base)(struct hl_device *hdev, u32 queue_id,
 				dma_addr_t *dma_handle, u16 *queue_len);
 	int (*test_queues)(struct hl_device *hdev);
@@ -794,28 +927,34 @@ struct hl_asic_funcs {
 	int (*get_eeprom_data)(struct hl_device *hdev, void *data,
 				size_t max_size);
 	int (*send_cpu_message)(struct hl_device *hdev, u32 *msg,
-				u16 len, u32 timeout, long *result);
-	enum hl_device_hw_state (*get_hw_state)(struct hl_device *hdev);
+				u16 len, u32 timeout, u64 *result);
 	int (*pci_bars_map)(struct hl_device *hdev);
 	int (*init_iatu)(struct hl_device *hdev);
 	u32 (*rreg)(struct hl_device *hdev, u32 reg);
 	void (*wreg)(struct hl_device *hdev, u32 reg, u32 val);
 	void (*halt_coresight)(struct hl_device *hdev);
 	int (*ctx_init)(struct hl_ctx *ctx);
+	void (*ctx_fini)(struct hl_ctx *ctx);
 	int (*get_clk_rate)(struct hl_device *hdev, u32 *cur_clk, u32 *max_clk);
 	u32 (*get_queue_id_for_cq)(struct hl_device *hdev, u32 cq_idx);
-	void (*read_device_fw_version)(struct hl_device *hdev,
+	int (*read_device_fw_version)(struct hl_device *hdev,
 					enum hl_fw_component fwc);
 	int (*load_firmware_to_device)(struct hl_device *hdev);
 	int (*load_boot_fit_to_device)(struct hl_device *hdev);
 	u32 (*get_signal_cb_size)(struct hl_device *hdev);
 	u32 (*get_wait_cb_size)(struct hl_device *hdev);
-	void (*gen_signal_cb)(struct hl_device *hdev, void *data, u16 sob_id);
-	void (*gen_wait_cb)(struct hl_device *hdev, void *data, u16 sob_id,
-				u16 sob_val, u16 mon_id, u32 q_idx);
+	u32 (*gen_signal_cb)(struct hl_device *hdev, void *data, u16 sob_id,
+			u32 size);
+	u32 (*gen_wait_cb)(struct hl_device *hdev,
+			struct hl_gen_wait_properties *prop);
 	void (*reset_sob)(struct hl_device *hdev, void *data);
+	void (*reset_sob_group)(struct hl_device *hdev, u16 sob_group);
 	void (*set_dma_mask_from_fw)(struct hl_device *hdev);
 	u64 (*get_device_time)(struct hl_device *hdev);
+	void (*collective_wait_init_cs)(struct hl_cs *cs);
+	int (*collective_wait_create_jobs)(struct hl_device *hdev,
+			struct hl_ctx *ctx, struct hl_cs *cs, u32 wait_queue_id,
+			u32 collective_engine_id);
 };
 
 
@@ -825,18 +964,49 @@ struct hl_asic_funcs {
 
 #define HL_KERNEL_ASID_ID	0
 
+/**
+ * enum hl_va_range_type - virtual address range type.
+ * @HL_VA_RANGE_TYPE_HOST: range type of host pages
+ * @HL_VA_RANGE_TYPE_HOST_HUGE: range type of host huge pages
+ * @HL_VA_RANGE_TYPE_DRAM: range type of dram pages
+ */
+enum hl_va_range_type {
+	HL_VA_RANGE_TYPE_HOST,
+	HL_VA_RANGE_TYPE_HOST_HUGE,
+	HL_VA_RANGE_TYPE_DRAM,
+	HL_VA_RANGE_TYPE_MAX
+};
+
 /**
  * struct hl_va_range - virtual addresses range.
  * @lock: protects the virtual addresses list.
  * @list: list of virtual addresses blocks available for mappings.
  * @start_addr: range start address.
  * @end_addr: range end address.
+ * @page_size: page size of this va range.
  */
 struct hl_va_range {
 	struct mutex		lock;
 	struct list_head	list;
 	u64			start_addr;
 	u64			end_addr;
+	u32			page_size;
+};
+
+/**
+ * struct hl_cs_counters_atomic - command submission counters
+ * @out_of_mem_drop_cnt: dropped due to memory allocation issue
+ * @parsing_drop_cnt: dropped due to error in packet parsing
+ * @queue_full_drop_cnt: dropped due to queue full
+ * @device_in_reset_drop_cnt: dropped due to device in reset
+ * @max_cs_in_flight_drop_cnt: dropped due to maximum CS in-flight
+ */
+struct hl_cs_counters_atomic {
+	atomic64_t out_of_mem_drop_cnt;
+	atomic64_t parsing_drop_cnt;
+	atomic64_t queue_full_drop_cnt;
+	atomic64_t device_in_reset_drop_cnt;
+	atomic64_t max_cs_in_flight_drop_cnt;
 };
 
 /**
@@ -849,14 +1019,12 @@ struct hl_va_range {
  * @refcount: reference counter for the context. Context is released only when
  *		this hits 0l. It is incremented on CS and CS_WAIT.
  * @cs_pending: array of hl fence objects representing pending CS.
- * @host_va_range: holds available virtual addresses for host mappings.
- * @host_huge_va_range: holds available virtual addresses for host mappings
- *                      with huge pages.
- * @dram_va_range: holds available virtual addresses for DRAM mappings.
+ * @va_range: holds available virtual addresses for host and dram mappings.
  * @mem_hash_lock: protects the mem_hash.
  * @mmu_lock: protects the MMU page tables. Any change to the PGT, modifying the
  *            MMU hash or walking the PGT requires talking this lock.
  * @debugfs_list: node in debugfs list of contexts.
+ * @cs_counters: context command submission counters.
  * @cb_va_pool: device VA pool for command buffers which are mapped to the
  *              device's MMU.
  * @cs_sequence: sequence number for CS. Value is assigned to a CS and passed
@@ -879,26 +1047,24 @@ struct hl_va_range {
 struct hl_ctx {
 	DECLARE_HASHTABLE(mem_hash, MEM_HASH_TABLE_BITS);
 	DECLARE_HASHTABLE(mmu_shadow_hash, MMU_HASH_TABLE_BITS);
-	struct hl_fpriv		*hpriv;
-	struct hl_device	*hdev;
-	struct kref		refcount;
-	struct hl_fence		**cs_pending;
-	struct hl_va_range	*host_va_range;
-	struct hl_va_range	*host_huge_va_range;
-	struct hl_va_range	*dram_va_range;
-	struct mutex		mem_hash_lock;
-	struct mutex		mmu_lock;
-	struct list_head	debugfs_list;
-	struct hl_cs_counters	cs_counters;
-	struct gen_pool		*cb_va_pool;
-	u64			cs_sequence;
-	u64			*dram_default_hops;
-	spinlock_t		cs_lock;
-	atomic64_t		dram_phys_mem;
-	atomic_t		thread_ctx_switch_token;
-	u32			thread_ctx_switch_wait_token;
-	u32			asid;
-	u32			handle;
+	struct hl_fpriv			*hpriv;
+	struct hl_device		*hdev;
+	struct kref			refcount;
+	struct hl_fence			**cs_pending;
+	struct hl_va_range		*va_range[HL_VA_RANGE_TYPE_MAX];
+	struct mutex			mem_hash_lock;
+	struct mutex			mmu_lock;
+	struct list_head		debugfs_list;
+	struct hl_cs_counters_atomic	cs_counters;
+	struct gen_pool			*cb_va_pool;
+	u64				cs_sequence;
+	u64				*dram_default_hops;
+	spinlock_t			cs_lock;
+	atomic64_t			dram_phys_mem;
+	atomic_t			thread_ctx_switch_token;
+	u32				thread_ctx_switch_wait_token;
+	u32				asid;
+	u32				handle;
 };
 
 /**
@@ -963,6 +1129,7 @@ struct hl_userptr {
  * @tdr_active: true if TDR was activated for this CS (to prevent
  *		double TDR activation).
  * @aborted: true if CS was aborted due to some device error.
+ * @timestamp: true if a timestmap must be captured upon completion
  */
 struct hl_cs {
 	u16			*jobs_in_queue_cnt;
@@ -983,6 +1150,7 @@ struct hl_cs {
 	u8			timedout;
 	u8			tdr_active;
 	u8			aborted;
+	u8			timestamp;
 };
 
 /**
@@ -996,6 +1164,7 @@ struct hl_cs {
  * @userptr_list: linked-list of userptr mappings that belong to this job and
  *			wait for completion.
  * @debugfs_list: node in debugfs list of command submission jobs.
+ * @refcount: reference counter for usage of the CS job.
  * @queue_type: the type of the H/W queue this job is submitted to.
  * @id: the id of this job inside a CS.
  * @hw_queue_id: the id of the H/W queue this job is submitted to.
@@ -1019,6 +1188,7 @@ struct hl_cs_job {
 	struct work_struct	finish_work;
 	struct list_head	userptr_list;
 	struct list_head	debugfs_list;
+	struct kref		refcount;
 	enum hl_queue_type	queue_type;
 	u32			id;
 	u32			hw_queue_id;
@@ -1067,7 +1237,6 @@ struct hl_cs_parser {
 	u8			contains_dma_pkt;
 };
 
-
 /*
  * MEMORY STRUCTURE
  */
@@ -1285,6 +1454,10 @@ struct hl_dbg_device_entry {
  * DEVICES
  */
 
+#define HL_STR_MAX	32
+
+#define HL_DEV_STS_MAX (HL_DEVICE_STATUS_NEEDS_RESET + 1)
+
 /* Theoretical limit only. A single host can only contain up to 4 or 8 PCIe
  * x16 cards. In extreme cases, there are hosts that can accommodate 16 cards.
  */
@@ -1428,11 +1601,13 @@ struct hwmon_chip_info;
 
 /**
  * struct hl_device_reset_work - reset workqueue task wrapper.
+ * @wq: work queue for device reset procedure.
  * @reset_work: reset work to be done.
  * @hdev: habanalabs device structure.
  */
 struct hl_device_reset_work {
-	struct work_struct		reset_work;
+	struct workqueue_struct		*wq;
+	struct delayed_work		reset_work;
 	struct hl_device		*hdev;
 };
 
@@ -1446,17 +1621,77 @@ struct hl_device_idle_busy_ts {
 	ktime_t				busy_to_idle_ts;
 };
 
+/**
+ * struct hr_mmu_hop_addrs - used for holding per-device host-resident mmu hop
+ * information.
+ * @virt_addr: the virtual address of the hop.
+ * @phys-addr: the physical address of the hop (used by the device-mmu).
+ * @shadow_addr: The shadow of the hop used by the driver for walking the hops.
+ */
+struct hr_mmu_hop_addrs {
+	u64 virt_addr;
+	u64 phys_addr;
+	u64 shadow_addr;
+};
 
 /**
- * struct hl_mmu_priv - used for holding per-device mmu internal information.
+ * struct hl_mmu_hr_pgt_priv - used for holding per-device mmu host-resident
+ * page-table internal information.
  * @mmu_pgt_pool: pool of page tables used by MMU for allocating hops.
  * @mmu_shadow_hop0: shadow array of hop0 tables.
  */
-struct hl_mmu_priv {
+struct hl_mmu_hr_priv {
+	struct gen_pool *mmu_pgt_pool;
+	struct hr_mmu_hop_addrs *mmu_shadow_hop0;
+};
+
+/**
+ * struct hl_mmu_dr_pgt_priv - used for holding per-device mmu device-resident
+ * page-table internal information.
+ * @mmu_pgt_pool: pool of page tables used by MMU for allocating hops.
+ * @mmu_shadow_hop0: shadow array of hop0 tables.
+ */
+struct hl_mmu_dr_priv {
 	struct gen_pool *mmu_pgt_pool;
 	void *mmu_shadow_hop0;
 };
 
+/**
+ * struct hl_mmu_priv - used for holding per-device mmu internal information.
+ * @dr: information on the device-resident MMU, when exists.
+ * @hr: information on the host-resident MMU, when exists.
+ */
+struct hl_mmu_priv {
+	struct hl_mmu_dr_priv dr;
+	struct hl_mmu_hr_priv hr;
+};
+
+/**
+ * struct hl_mmu_per_hop_info - A structure describing one TLB HOP and its entry
+ *                that was created in order to translate a virtual address to a
+ *                physical one.
+ * @hop_addr: The address of the hop.
+ * @hop_pte_addr: The address of the hop entry.
+ * @hop_pte_val: The value in the hop entry.
+ */
+struct hl_mmu_per_hop_info {
+	u64 hop_addr;
+	u64 hop_pte_addr;
+	u64 hop_pte_val;
+};
+
+/**
+ * struct hl_mmu_hop_info - A structure describing the TLB hops and their
+ * hop-entries that were created in order to translate a virtual address to a
+ * physical one.
+ * @hop_info: Array holding the per-hop information used for the translation.
+ * @used_hops: The number of hops used for the translation.
+ */
+struct hl_mmu_hop_info {
+	struct hl_mmu_per_hop_info hop_info[MMU_ARCH_5_HOPS];
+	u32 used_hops;
+};
+
 /**
  * struct hl_mmu_funcs - Device related MMU functions.
  * @init: initialize the MMU module.
@@ -1468,6 +1703,9 @@ struct hl_mmu_priv {
  * @flush: flush all writes from all cores to reach device MMU.
  * @swap_out: marks all mapping of the given context as swapped out.
  * @swap_in: marks all mapping of the given context as swapped in.
+ * @get_tlb_info: returns the list of hops and hop-entries used that were
+ *                created in order to translate the giver virtual address to a
+ *                physical one.
  */
 struct hl_mmu_funcs {
 	int (*init)(struct hl_device *hdev);
@@ -1482,6 +1720,8 @@ struct hl_mmu_funcs {
 	void (*flush)(struct hl_ctx *ctx);
 	void (*swap_out)(struct hl_ctx *ctx);
 	void (*swap_in)(struct hl_ctx *ctx);
+	int (*get_tlb_info)(struct hl_ctx *ctx,
+			u64 virt_addr, struct hl_mmu_hop_info *hops);
 };
 
 /**
@@ -1497,6 +1737,7 @@ struct hl_mmu_funcs {
  * @dev_ctrl: related kernel device structure for the control device
  * @work_freq: delayed work to lower device frequency if possible.
  * @work_heartbeat: delayed work for CPU-CP is-alive check.
+ * @device_reset_work: delayed work which performs hard reset
  * @asic_name: ASIC specific name.
  * @asic_type: ASIC specific type.
  * @completion_queue: array of hl_cq.
@@ -1505,8 +1746,8 @@ struct hl_mmu_funcs {
  * @eq_wq: work queue of event queue for executing work in process context.
  * @kernel_ctx: Kernel driver context structure.
  * @kernel_queues: array of hl_hw_queue.
- * @hw_queues_mirror_list: CS mirror list for TDR.
- * @hw_queues_mirror_lock: protects hw_queues_mirror_list.
+ * @cs_mirror_list: CS mirror list for TDR.
+ * @cs_mirror_lock: protects cs_mirror_list.
  * @kernel_cb_mgr: command buffer manager for creating/destroying/handling CGs.
  * @event_queue: event queue for IRQ from CPU-CP.
  * @dma_pool: DMA pool for small allocations.
@@ -1525,6 +1766,7 @@ struct hl_mmu_funcs {
  * @hwmon_dev: H/W monitor device.
  * @pm_mng_profile: current power management profile.
  * @hl_chip_info: ASIC's sensors information.
+ * @device_status_description: device status description.
  * @hl_debugfs: device's debugfs manager.
  * @cb_pool: list of preallocated CBs.
  * @cb_pool_lock: protects the CB pool.
@@ -1572,13 +1814,12 @@ struct hl_mmu_funcs {
  * @heartbeat: is heartbeat sanity check towards CPU-CP enabled.
  * @reset_on_lockup: true if a reset should be done in case of stuck CS, false
  *                   otherwise.
- * @dram_supports_virtual_memory: is MMU enabled towards DRAM.
  * @dram_default_page_mapping: is DRAM default page mapping enabled.
+ * @memory_scrub: true to perform device memory scrub in various locations,
+ *                such as context-switch, context close, page free, etc.
  * @pmmu_huge_range: is a different virtual addresses range used for PMMU with
  *                   huge pages.
  * @init_done: is the initialization of the device done.
- * @mmu_enable: is MMU enabled.
- * @mmu_huge_page_opt: is MMU huge pages optimization enabled.
  * @device_cpu_disabled: is the device CPU disabled (due to timeouts)
  * @dma_mask: the dma mask that was set for this device
  * @in_debug: is device under debug. This, together with fpriv_list, enforces
@@ -1589,9 +1830,16 @@ struct hl_mmu_funcs {
  * @stop_on_err: true if engines should stop on error.
  * @supports_sync_stream: is sync stream supported.
  * @sync_stream_queue_idx: helper index for sync stream queues initialization.
+ * @collective_mon_idx: helper index for collective initialization
  * @supports_coresight: is CoreSight supported.
  * @supports_soft_reset: is soft reset supported.
  * @supports_cb_mapping: is mapping a CB to the device's MMU supported.
+ * @needs_reset: true if reset_on_lockup is false and device should be reset
+ *               due to lockup.
+ * @process_kill_trial_cnt: number of trials reset thread tried killing
+ *                          user processes
+ * @device_fini_pending: true if device_fini was called and might be
+ *                       waiting for the reset thread to finish
  */
 struct hl_device {
 	struct pci_dev			*pdev;
@@ -1604,15 +1852,17 @@ struct hl_device {
 	struct device			*dev_ctrl;
 	struct delayed_work		work_freq;
 	struct delayed_work		work_heartbeat;
-	char				asic_name[32];
+	struct hl_device_reset_work	device_reset_work;
+	char				asic_name[HL_STR_MAX];
+	char				status[HL_DEV_STS_MAX][HL_STR_MAX];
 	enum hl_asic_type		asic_type;
 	struct hl_cq			*completion_queue;
 	struct workqueue_struct		**cq_wq;
 	struct workqueue_struct		*eq_wq;
 	struct hl_ctx			*kernel_ctx;
 	struct hl_hw_queue		*kernel_queues;
-	struct list_head		hw_queues_mirror_list;
-	spinlock_t			hw_queues_mirror_lock;
+	struct list_head		cs_mirror_list;
+	spinlock_t			cs_mirror_lock;
 	struct hl_cb_mgr		kernel_cb_mgr;
 	struct hl_eq			event_queue;
 	struct dma_pool			*dma_pool;
@@ -1649,10 +1899,10 @@ struct hl_device {
 
 	struct hl_device_idle_busy_ts	*idle_busy_ts_arr;
 
-	struct hl_cs_counters		aggregated_cs_counters;
+	struct hl_cs_counters_atomic	aggregated_cs_counters;
 
 	struct hl_mmu_priv		mmu_priv;
-	struct hl_mmu_funcs		mmu_func;
+	struct hl_mmu_funcs		mmu_func[MMU_NUM_PGT_LOCATIONS];
 
 	atomic64_t			dram_used_mem;
 	u64				timeout_jiffies;
@@ -1677,8 +1927,8 @@ struct hl_device {
 	u8				hard_reset_pending;
 	u8				heartbeat;
 	u8				reset_on_lockup;
-	u8				dram_supports_virtual_memory;
 	u8				dram_default_page_mapping;
+	u8				memory_scrub;
 	u8				pmmu_huge_range;
 	u8				init_done;
 	u8				device_cpu_disabled;
@@ -1689,17 +1939,22 @@ struct hl_device {
 	u8				stop_on_err;
 	u8				supports_sync_stream;
 	u8				sync_stream_queue_idx;
+	u8				collective_mon_idx;
 	u8				supports_coresight;
 	u8				supports_soft_reset;
 	u8				supports_cb_mapping;
+	u8				needs_reset;
+	u8				process_kill_trial_cnt;
+	u8				device_fini_pending;
 
 	/* Parameters for bring-up */
+	u64				nic_ports_mask;
+	u64				fw_loading;
 	u8				mmu_enable;
 	u8				mmu_huge_page_opt;
 	u8				cpu_enable;
 	u8				reset_pcilink;
 	u8				cpu_queues_enable;
-	u8				fw_loading;
 	u8				pldm;
 	u8				axi_drain;
 	u8				sram_scrambler_enable;
@@ -1707,6 +1962,7 @@ struct hl_device {
 	u8				hard_reset_on_fw_events;
 	u8				bmc_enable;
 	u8				rl_enable;
+	u8				reset_on_preboot_fail;
 };
 
 
@@ -1793,7 +2049,8 @@ static inline bool hl_mem_area_crosses_range(u64 address, u32 size,
 
 int hl_device_open(struct inode *inode, struct file *filp);
 int hl_device_open_ctrl(struct inode *inode, struct file *filp);
-bool hl_device_disabled_or_in_reset(struct hl_device *hdev);
+bool hl_device_operational(struct hl_device *hdev,
+		enum hl_device_status *status);
 enum hl_device_status hl_device_status(struct hl_device *hdev);
 int hl_device_set_debug_mode(struct hl_device *hdev, bool enable);
 int create_hdev(struct hl_device **dev, struct pci_dev *pdev,
@@ -1878,8 +2135,10 @@ void hl_cs_rollback_all(struct hl_device *hdev);
 struct hl_cs_job *hl_cs_allocate_job(struct hl_device *hdev,
 		enum hl_queue_type queue_type, bool is_kernel_allocated_cb);
 void hl_sob_reset_error(struct kref *ref);
+int hl_gen_sob_mask(u16 sob_base, u8 sob_mask, u8 *mask);
 void hl_fence_put(struct hl_fence *fence);
 void hl_fence_get(struct hl_fence *fence);
+void cs_get(struct hl_cs *cs);
 
 void goya_set_asic_funcs(struct hl_device *hdev);
 void gaudi_set_asic_funcs(struct hl_device *hdev);
@@ -1890,6 +2149,10 @@ void hl_vm_ctx_fini(struct hl_ctx *ctx);
 int hl_vm_init(struct hl_device *hdev);
 void hl_vm_fini(struct hl_device *hdev);
 
+u64 hl_reserve_va_block(struct hl_device *hdev, struct hl_ctx *ctx,
+		enum hl_va_range_type type, u32 size, u32 alignment);
+int hl_unreserve_va_block(struct hl_device *hdev, struct hl_ctx *ctx,
+		u64 start_addr, u64 size);
 int hl_pin_host_memory(struct hl_device *hdev, u64 addr, u64 size,
 			struct hl_userptr *userptr);
 void hl_unpin_host_memory(struct hl_device *hdev, struct hl_userptr *userptr);
@@ -1903,20 +2166,26 @@ int hl_mmu_init(struct hl_device *hdev);
 void hl_mmu_fini(struct hl_device *hdev);
 int hl_mmu_ctx_init(struct hl_ctx *ctx);
 void hl_mmu_ctx_fini(struct hl_ctx *ctx);
-int hl_mmu_map(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr,
+int hl_mmu_map_page(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr,
 		u32 page_size, bool flush_pte);
-int hl_mmu_unmap(struct hl_ctx *ctx, u64 virt_addr, u32 page_size,
+int hl_mmu_unmap_page(struct hl_ctx *ctx, u64 virt_addr, u32 page_size,
 		bool flush_pte);
+int hl_mmu_map_contiguous(struct hl_ctx *ctx, u64 virt_addr,
+					u64 phys_addr, u32 size);
+int hl_mmu_unmap_contiguous(struct hl_ctx *ctx, u64 virt_addr, u32 size);
 void hl_mmu_swap_out(struct hl_ctx *ctx);
 void hl_mmu_swap_in(struct hl_ctx *ctx);
 int hl_mmu_if_set_funcs(struct hl_device *hdev);
-void hl_mmu_v1_set_funcs(struct hl_device *hdev);
+void hl_mmu_v1_set_funcs(struct hl_device *hdev, struct hl_mmu_funcs *mmu);
+int hl_mmu_va_to_pa(struct hl_ctx *ctx, u64 virt_addr, u64 *phys_addr);
+int hl_mmu_get_tlb_info(struct hl_ctx *ctx, u64 virt_addr,
+			struct hl_mmu_hop_info *hops);
 
 int hl_fw_load_fw_to_device(struct hl_device *hdev, const char *fw_name,
-				void __iomem *dst);
+				void __iomem *dst, u32 src_offset, u32 size);
 int hl_fw_send_pci_access_msg(struct hl_device *hdev, u32 opcode);
 int hl_fw_send_cpu_message(struct hl_device *hdev, u32 hw_queue_id, u32 *msg,
-				u16 len, u32 timeout, long *result);
+				u16 len, u32 timeout, u64 *result);
 int hl_fw_unmask_irq(struct hl_device *hdev, u16 event_type);
 int hl_fw_unmask_irq_arr(struct hl_device *hdev, const u32 *irq_arr,
 		size_t irq_arr_size);
@@ -1926,18 +2195,22 @@ void *hl_fw_cpu_accessible_dma_pool_alloc(struct hl_device *hdev, size_t size,
 void hl_fw_cpu_accessible_dma_pool_free(struct hl_device *hdev, size_t size,
 					void *vaddr);
 int hl_fw_send_heartbeat(struct hl_device *hdev);
-int hl_fw_cpucp_info_get(struct hl_device *hdev);
+int hl_fw_cpucp_info_get(struct hl_device *hdev,
+			u32 cpu_security_boot_status_reg);
 int hl_fw_get_eeprom_data(struct hl_device *hdev, void *data, size_t max_size);
 int hl_fw_cpucp_pci_counters_get(struct hl_device *hdev,
 		struct hl_info_pci_counters *counters);
 int hl_fw_cpucp_total_energy_get(struct hl_device *hdev,
 			u64 *total_energy);
+int hl_fw_cpucp_pll_info_get(struct hl_device *hdev, u16 pll_index,
+		u16 *pll_freq_arr);
 int hl_fw_init_cpu(struct hl_device *hdev, u32 cpu_boot_status_reg,
 			u32 msg_to_cpu_reg, u32 cpu_msg_status_reg,
-			u32 boot_err0_reg, bool skip_bmc,
-			u32 cpu_timeout, u32 boot_fit_timeout);
-int hl_fw_read_preboot_ver(struct hl_device *hdev, u32 cpu_boot_status_reg,
-				u32 boot_err0_reg, u32 timeout);
+			u32 cpu_security_boot_status_reg, u32 boot_err0_reg,
+			bool skip_bmc, u32 cpu_timeout, u32 boot_fit_timeout);
+int hl_fw_read_preboot_status(struct hl_device *hdev, u32 cpu_boot_status_reg,
+		u32 cpu_security_boot_status_reg, u32 boot_err0_reg,
+		u32 timeout);
 
 int hl_pci_bars_map(struct hl_device *hdev, const char * const name[3],
 			bool is_wc[3]);
@@ -1946,8 +2219,7 @@ int hl_pci_set_inbound_region(struct hl_device *hdev, u8 region,
 		struct hl_inbound_pci_region *pci_region);
 int hl_pci_set_outbound_region(struct hl_device *hdev,
 		struct hl_outbound_pci_region *pci_region);
-int hl_pci_init(struct hl_device *hdev, u32 cpu_boot_status_reg,
-		u32 boot_err0_reg, u32 preboot_ver_timeout);
+int hl_pci_init(struct hl_device *hdev);
 void hl_pci_fini(struct hl_device *hdev);
 
 long hl_get_frequency(struct hl_device *hdev, u32 pll_index, bool curr);
diff --git a/drivers/misc/habanalabs/common/habanalabs_drv.c b/drivers/misc/habanalabs/common/habanalabs_drv.c
index f9067d3ef43765b661ed0b31c0a044c958fa0148..6bbb6bca68600732fcae1ff0239a91afc6291a0f 100644
--- a/drivers/misc/habanalabs/common/habanalabs_drv.c
+++ b/drivers/misc/habanalabs/common/habanalabs_drv.c
@@ -29,6 +29,7 @@ static DEFINE_MUTEX(hl_devs_idr_lock);
 
 static int timeout_locked = 5;
 static int reset_on_lockup = 1;
+static int memory_scrub = 1;
 
 module_param(timeout_locked, int, 0444);
 MODULE_PARM_DESC(timeout_locked,
@@ -38,6 +39,10 @@ module_param(reset_on_lockup, int, 0444);
 MODULE_PARM_DESC(reset_on_lockup,
 	"Do device reset on lockup (0 = no, 1 = yes, default yes)");
 
+module_param(memory_scrub, int, 0444);
+MODULE_PARM_DESC(memory_scrub,
+	"Scrub device memory in various states (0 = no, 1 = yes, default yes)");
+
 #define PCI_VENDOR_ID_HABANALABS	0x1da3
 
 #define PCI_IDS_GOYA			0x0001
@@ -87,6 +92,7 @@ static enum hl_asic_type get_asic_type(u16 device)
  */
 int hl_device_open(struct inode *inode, struct file *filp)
 {
+	enum hl_device_status status;
 	struct hl_device *hdev;
 	struct hl_fpriv *hpriv;
 	int rc;
@@ -119,10 +125,10 @@ int hl_device_open(struct inode *inode, struct file *filp)
 
 	mutex_lock(&hdev->fpriv_list_lock);
 
-	if (hl_device_disabled_or_in_reset(hdev)) {
+	if (!hl_device_operational(hdev, &status)) {
 		dev_err_ratelimited(hdev->dev,
-			"Can't open %s because it is disabled or in reset\n",
-			dev_name(hdev->dev));
+			"Can't open %s because it is %s\n",
+			dev_name(hdev->dev), hdev->status[status]);
 		rc = -EPERM;
 		goto out_err;
 	}
@@ -199,7 +205,7 @@ int hl_device_open_ctrl(struct inode *inode, struct file *filp)
 
 	mutex_lock(&hdev->fpriv_list_lock);
 
-	if (hl_device_disabled_or_in_reset(hdev)) {
+	if (!hl_device_operational(hdev, NULL)) {
 		dev_err_ratelimited(hdev->dev_ctrl,
 			"Can't open %s because it is disabled or in reset\n",
 			dev_name(hdev->dev_ctrl));
@@ -228,19 +234,20 @@ int hl_device_open_ctrl(struct inode *inode, struct file *filp)
 
 static void set_driver_behavior_per_device(struct hl_device *hdev)
 {
-	hdev->mmu_enable = 1;
 	hdev->cpu_enable = 1;
-	hdev->fw_loading = 1;
+	hdev->fw_loading = FW_TYPE_ALL_TYPES;
 	hdev->cpu_queues_enable = 1;
 	hdev->heartbeat = 1;
+	hdev->mmu_enable = 1;
 	hdev->clock_gating_mask = ULONG_MAX;
-
-	hdev->reset_pcilink = 0;
-	hdev->axi_drain = 0;
 	hdev->sram_scrambler_enable = 1;
 	hdev->dram_scrambler_enable = 1;
 	hdev->bmc_enable = 1;
 	hdev->hard_reset_on_fw_events = 1;
+	hdev->reset_on_preboot_fail = 1;
+
+	hdev->reset_pcilink = 0;
+	hdev->axi_drain = 0;
 }
 
 /*
@@ -281,8 +288,17 @@ int create_hdev(struct hl_device **dev, struct pci_dev *pdev,
 		hdev->asic_type = asic_type;
 	}
 
+	/* Assign status description string */
+	strncpy(hdev->status[HL_DEVICE_STATUS_MALFUNCTION],
+					"disabled", HL_STR_MAX);
+	strncpy(hdev->status[HL_DEVICE_STATUS_IN_RESET],
+					"in reset", HL_STR_MAX);
+	strncpy(hdev->status[HL_DEVICE_STATUS_NEEDS_RESET],
+					"needs reset", HL_STR_MAX);
+
 	hdev->major = hl_major;
 	hdev->reset_on_lockup = reset_on_lockup;
+	hdev->memory_scrub = memory_scrub;
 	hdev->pldm = 0;
 
 	set_driver_behavior_per_device(hdev);
diff --git a/drivers/misc/habanalabs/common/habanalabs_ioctl.c b/drivers/misc/habanalabs/common/habanalabs_ioctl.c
index 07317ea4912956e292f13b9e3f7f029bef8f0e96..32e6af1db4e35ed0f31360872e6b03c62ffc02dc 100644
--- a/drivers/misc/habanalabs/common/habanalabs_ioctl.c
+++ b/drivers/misc/habanalabs/common/habanalabs_ioctl.c
@@ -314,20 +314,45 @@ static int clk_throttle_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
 
 static int cs_counters_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
 {
+	void __user *out = (void __user *) (uintptr_t) args->return_pointer;
+	struct hl_info_cs_counters cs_counters = {0};
 	struct hl_device *hdev = hpriv->hdev;
-	struct hl_info_cs_counters cs_counters = { {0} };
+	struct hl_cs_counters_atomic *cntr;
 	u32 max_size = args->return_size;
-	void __user *out = (void __user *) (uintptr_t) args->return_pointer;
+
+	cntr = &hdev->aggregated_cs_counters;
 
 	if ((!max_size) || (!out))
 		return -EINVAL;
 
-	memcpy(&cs_counters.cs_counters, &hdev->aggregated_cs_counters,
-			sizeof(struct hl_cs_counters));
-
-	if (hpriv->ctx)
-		memcpy(&cs_counters.ctx_cs_counters, &hpriv->ctx->cs_counters,
-				sizeof(struct hl_cs_counters));
+	cs_counters.total_out_of_mem_drop_cnt =
+			atomic64_read(&cntr->out_of_mem_drop_cnt);
+	cs_counters.total_parsing_drop_cnt =
+			atomic64_read(&cntr->parsing_drop_cnt);
+	cs_counters.total_queue_full_drop_cnt =
+			atomic64_read(&cntr->queue_full_drop_cnt);
+	cs_counters.total_device_in_reset_drop_cnt =
+			atomic64_read(&cntr->device_in_reset_drop_cnt);
+	cs_counters.total_max_cs_in_flight_drop_cnt =
+			atomic64_read(&cntr->max_cs_in_flight_drop_cnt);
+
+	if (hpriv->ctx) {
+		cs_counters.ctx_out_of_mem_drop_cnt =
+				atomic64_read(
+				&hpriv->ctx->cs_counters.out_of_mem_drop_cnt);
+		cs_counters.ctx_parsing_drop_cnt =
+				atomic64_read(
+				&hpriv->ctx->cs_counters.parsing_drop_cnt);
+		cs_counters.ctx_queue_full_drop_cnt =
+				atomic64_read(
+				&hpriv->ctx->cs_counters.queue_full_drop_cnt);
+		cs_counters.ctx_device_in_reset_drop_cnt =
+				atomic64_read(
+			&hpriv->ctx->cs_counters.device_in_reset_drop_cnt);
+		cs_counters.ctx_max_cs_in_flight_drop_cnt =
+				atomic64_read(
+			&hpriv->ctx->cs_counters.max_cs_in_flight_drop_cnt);
+	}
 
 	return copy_to_user(out, &cs_counters,
 		min((size_t) max_size, sizeof(cs_counters))) ? -EFAULT : 0;
@@ -378,11 +403,32 @@ static int total_energy_consumption_info(struct hl_fpriv *hpriv,
 		min((size_t) max_size, sizeof(total_energy))) ? -EFAULT : 0;
 }
 
+static int pll_frequency_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
+{
+	struct hl_device *hdev = hpriv->hdev;
+	struct hl_pll_frequency_info freq_info = {0};
+	u32 max_size = args->return_size;
+	void __user *out = (void __user *) (uintptr_t) args->return_pointer;
+	int rc;
+
+	if ((!max_size) || (!out))
+		return -EINVAL;
+
+	rc = hl_fw_cpucp_pll_info_get(hdev, args->pll_index, freq_info.output);
+	if (rc)
+		return rc;
+
+	return copy_to_user(out, &freq_info,
+		min((size_t) max_size, sizeof(freq_info))) ? -EFAULT : 0;
+}
+
 static int _hl_info_ioctl(struct hl_fpriv *hpriv, void *data,
 				struct device *dev)
 {
+	enum hl_device_status status;
 	struct hl_info_args *args = data;
 	struct hl_device *hdev = hpriv->hdev;
+
 	int rc;
 
 	/*
@@ -403,10 +449,10 @@ static int _hl_info_ioctl(struct hl_fpriv *hpriv, void *data,
 		break;
 	}
 
-	if (hl_device_disabled_or_in_reset(hdev)) {
+	if (!hl_device_operational(hdev, &status)) {
 		dev_warn_ratelimited(dev,
 			"Device is %s. Can't execute INFO IOCTL\n",
-			atomic_read(&hdev->in_reset) ? "in_reset" : "disabled");
+			hdev->status[status]);
 		return -EBUSY;
 	}
 
@@ -453,6 +499,9 @@ static int _hl_info_ioctl(struct hl_fpriv *hpriv, void *data,
 	case HL_INFO_TOTAL_ENERGY:
 		return total_energy_consumption_info(hpriv, args);
 
+	case HL_INFO_PLL_FREQUENCY:
+		return pll_frequency_info(hpriv, args);
+
 	default:
 		dev_err(dev, "Invalid request %d\n", args->op);
 		rc = -ENOTTY;
@@ -476,12 +525,14 @@ static int hl_debug_ioctl(struct hl_fpriv *hpriv, void *data)
 {
 	struct hl_debug_args *args = data;
 	struct hl_device *hdev = hpriv->hdev;
+	enum hl_device_status status;
+
 	int rc = 0;
 
-	if (hl_device_disabled_or_in_reset(hdev)) {
+	if (!hl_device_operational(hdev, &status)) {
 		dev_warn_ratelimited(hdev->dev,
 			"Device is %s. Can't execute DEBUG IOCTL\n",
-			atomic_read(&hdev->in_reset) ? "in_reset" : "disabled");
+			hdev->status[status]);
 		return -EBUSY;
 	}
 
@@ -544,7 +595,7 @@ static long _hl_ioctl(struct file *filep, unsigned int cmd, unsigned long arg,
 	int retcode;
 
 	if (hdev->hard_reset_pending) {
-		dev_crit_ratelimited(hdev->dev_ctrl,
+		dev_crit_ratelimited(dev,
 			"Device HARD reset pending! Please close FD\n");
 		return -ENODEV;
 	}
diff --git a/drivers/misc/habanalabs/common/hw_queue.c b/drivers/misc/habanalabs/common/hw_queue.c
index 250cf9cefc060c505bd82b85585a2eec59d41ec3..7caf868d1585c2fbd1d5cb033cedde1ddbfb035e 100644
--- a/drivers/misc/habanalabs/common/hw_queue.c
+++ b/drivers/misc/habanalabs/common/hw_queue.c
@@ -48,6 +48,11 @@ void hl_int_hw_queue_update_ci(struct hl_cs *cs)
 		return;
 
 	q = &hdev->kernel_queues[0];
+
+	/* There are no internal queues if H/W queues are being used */
+	if (!hdev->asic_prop.max_queues || q->queue_type == QUEUE_TYPE_HW)
+		return;
+
 	for (i = 0 ; i < hdev->asic_prop.max_queues ; i++, q++) {
 		if (q->queue_type == QUEUE_TYPE_INT)
 			atomic_add(cs->jobs_in_queue_cnt[i], &q->ci);
@@ -333,7 +338,14 @@ static void int_queue_schedule_job(struct hl_cs_job *job)
 
 	bd.ctl = 0;
 	bd.len = cpu_to_le32(job->job_cb_size);
-	bd.ptr = cpu_to_le64((u64) (uintptr_t) job->user_cb);
+
+	if (job->is_kernel_allocated_cb)
+		/* bus_address is actually a mmu mapped address
+		 * allocated from an internal pool
+		 */
+		bd.ptr = cpu_to_le64(job->user_cb->bus_address);
+	else
+		bd.ptr = cpu_to_le64((u64) (uintptr_t) job->user_cb);
 
 	pi = q->kernel_address + (q->pi & (q->int_queue_len - 1)) * sizeof(bd);
 
@@ -388,6 +400,91 @@ static void hw_queue_schedule_job(struct hl_cs_job *job)
 	ext_and_hw_queue_submit_bd(hdev, q, ctl, len, ptr);
 }
 
+static void init_signal_cs(struct hl_device *hdev,
+		struct hl_cs_job *job, struct hl_cs_compl *cs_cmpl)
+{
+	struct hl_sync_stream_properties *prop;
+	struct hl_hw_sob *hw_sob;
+	u32 q_idx;
+
+	q_idx = job->hw_queue_id;
+	prop = &hdev->kernel_queues[q_idx].sync_stream_prop;
+	hw_sob = &prop->hw_sob[prop->curr_sob_offset];
+
+	cs_cmpl->hw_sob = hw_sob;
+	cs_cmpl->sob_val = prop->next_sob_val++;
+
+	dev_dbg(hdev->dev,
+		"generate signal CB, sob_id: %d, sob val: 0x%x, q_idx: %d\n",
+		cs_cmpl->hw_sob->sob_id, cs_cmpl->sob_val, q_idx);
+
+	hdev->asic_funcs->gen_signal_cb(hdev, job->patched_cb,
+				cs_cmpl->hw_sob->sob_id, 0);
+
+	kref_get(&hw_sob->kref);
+
+	/* check for wraparound */
+	if (prop->next_sob_val == HL_MAX_SOB_VAL) {
+		/*
+		 * Decrement as we reached the max value.
+		 * The release function won't be called here as we've
+		 * just incremented the refcount.
+		 */
+		kref_put(&hw_sob->kref, hl_sob_reset_error);
+		prop->next_sob_val = 1;
+		/* only two SOBs are currently in use */
+		prop->curr_sob_offset =
+			(prop->curr_sob_offset + 1) % HL_RSVD_SOBS;
+
+		dev_dbg(hdev->dev, "switched to SOB %d, q_idx: %d\n",
+				prop->curr_sob_offset, q_idx);
+	}
+}
+
+static void init_wait_cs(struct hl_device *hdev, struct hl_cs *cs,
+		struct hl_cs_job *job, struct hl_cs_compl *cs_cmpl)
+{
+	struct hl_cs_compl *signal_cs_cmpl;
+	struct hl_sync_stream_properties *prop;
+	struct hl_gen_wait_properties wait_prop;
+	u32 q_idx;
+
+	q_idx = job->hw_queue_id;
+	prop = &hdev->kernel_queues[q_idx].sync_stream_prop;
+
+	signal_cs_cmpl = container_of(cs->signal_fence,
+					struct hl_cs_compl,
+					base_fence);
+
+	/* copy the SOB id and value of the signal CS */
+	cs_cmpl->hw_sob = signal_cs_cmpl->hw_sob;
+	cs_cmpl->sob_val = signal_cs_cmpl->sob_val;
+
+	dev_dbg(hdev->dev,
+		"generate wait CB, sob_id: %d, sob_val: 0x%x, mon_id: %d, q_idx: %d\n",
+		cs_cmpl->hw_sob->sob_id, cs_cmpl->sob_val,
+		prop->base_mon_id, q_idx);
+
+	wait_prop.data = (void *) job->patched_cb;
+	wait_prop.sob_base = cs_cmpl->hw_sob->sob_id;
+	wait_prop.sob_mask = 0x1;
+	wait_prop.sob_val = cs_cmpl->sob_val;
+	wait_prop.mon_id = prop->base_mon_id;
+	wait_prop.q_idx = q_idx;
+	wait_prop.size = 0;
+	hdev->asic_funcs->gen_wait_cb(hdev, &wait_prop);
+
+	kref_get(&cs_cmpl->hw_sob->kref);
+	/*
+	 * Must put the signal fence after the SOB refcnt increment so
+	 * the SOB refcnt won't turn 0 and reset the SOB before the
+	 * wait CS was submitted.
+	 */
+	mb();
+	hl_fence_put(cs->signal_fence);
+	cs->signal_fence = NULL;
+}
+
 /*
  * init_signal_wait_cs - initialize a signal/wait CS
  * @cs: pointer to the signal/wait CS
@@ -398,84 +495,18 @@ static void init_signal_wait_cs(struct hl_cs *cs)
 {
 	struct hl_ctx *ctx = cs->ctx;
 	struct hl_device *hdev = ctx->hdev;
-	struct hl_hw_queue *hw_queue;
+	struct hl_cs_job *job;
 	struct hl_cs_compl *cs_cmpl =
 			container_of(cs->fence, struct hl_cs_compl, base_fence);
 
-	struct hl_hw_sob *hw_sob;
-	struct hl_cs_job *job;
-	u32 q_idx;
-
 	/* There is only one job in a signal/wait CS */
 	job = list_first_entry(&cs->job_list, struct hl_cs_job,
 				cs_node);
-	q_idx = job->hw_queue_id;
-	hw_queue = &hdev->kernel_queues[q_idx];
-
-	if (cs->type & CS_TYPE_SIGNAL) {
-		hw_sob = &hw_queue->hw_sob[hw_queue->curr_sob_offset];
-
-		cs_cmpl->hw_sob = hw_sob;
-		cs_cmpl->sob_val = hw_queue->next_sob_val++;
-
-		dev_dbg(hdev->dev,
-			"generate signal CB, sob_id: %d, sob val: 0x%x, q_idx: %d\n",
-			cs_cmpl->hw_sob->sob_id, cs_cmpl->sob_val, q_idx);
-
-		hdev->asic_funcs->gen_signal_cb(hdev, job->patched_cb,
-					cs_cmpl->hw_sob->sob_id);
-
-		kref_get(&hw_sob->kref);
-
-		/* check for wraparound */
-		if (hw_queue->next_sob_val == HL_MAX_SOB_VAL) {
-			/*
-			 * Decrement as we reached the max value.
-			 * The release function won't be called here as we've
-			 * just incremented the refcount.
-			 */
-			kref_put(&hw_sob->kref, hl_sob_reset_error);
-			hw_queue->next_sob_val = 1;
-			/* only two SOBs are currently in use */
-			hw_queue->curr_sob_offset =
-					(hw_queue->curr_sob_offset + 1) %
-						HL_RSVD_SOBS_IN_USE;
-
-			dev_dbg(hdev->dev, "switched to SOB %d, q_idx: %d\n",
-					hw_queue->curr_sob_offset, q_idx);
-		}
-	} else if (cs->type & CS_TYPE_WAIT) {
-		struct hl_cs_compl *signal_cs_cmpl;
-
-		signal_cs_cmpl = container_of(cs->signal_fence,
-						struct hl_cs_compl,
-						base_fence);
-
-		/* copy the the SOB id and value of the signal CS */
-		cs_cmpl->hw_sob = signal_cs_cmpl->hw_sob;
-		cs_cmpl->sob_val = signal_cs_cmpl->sob_val;
-
-		dev_dbg(hdev->dev,
-			"generate wait CB, sob_id: %d, sob_val: 0x%x, mon_id: %d, q_idx: %d\n",
-			cs_cmpl->hw_sob->sob_id, cs_cmpl->sob_val,
-			hw_queue->base_mon_id, q_idx);
 
-		hdev->asic_funcs->gen_wait_cb(hdev, job->patched_cb,
-						cs_cmpl->hw_sob->sob_id,
-						cs_cmpl->sob_val,
-						hw_queue->base_mon_id,
-						q_idx);
-
-		kref_get(&cs_cmpl->hw_sob->kref);
-		/*
-		 * Must put the signal fence after the SOB refcnt increment so
-		 * the SOB refcnt won't turn 0 and reset the SOB before the
-		 * wait CS was submitted.
-		 */
-		mb();
-		hl_fence_put(cs->signal_fence);
-		cs->signal_fence = NULL;
-	}
+	if (cs->type & CS_TYPE_SIGNAL)
+		init_signal_cs(hdev, job, cs_cmpl);
+	else if (cs->type & CS_TYPE_WAIT)
+		init_wait_cs(hdev, cs, job, cs_cmpl);
 }
 
 /*
@@ -484,19 +515,24 @@ static void init_signal_wait_cs(struct hl_cs *cs)
  */
 int hl_hw_queue_schedule_cs(struct hl_cs *cs)
 {
+	enum hl_device_status status;
+	struct hl_cs_counters_atomic *cntr;
 	struct hl_ctx *ctx = cs->ctx;
 	struct hl_device *hdev = ctx->hdev;
 	struct hl_cs_job *job, *tmp;
 	struct hl_hw_queue *q;
-	u32 max_queues;
 	int rc = 0, i, cq_cnt;
+	u32 max_queues;
+
+	cntr = &hdev->aggregated_cs_counters;
 
 	hdev->asic_funcs->hw_queues_lock(hdev);
 
-	if (hl_device_disabled_or_in_reset(hdev)) {
-		ctx->cs_counters.device_in_reset_drop_cnt++;
+	if (!hl_device_operational(hdev, &status)) {
+		atomic64_inc(&cntr->device_in_reset_drop_cnt);
+		atomic64_inc(&ctx->cs_counters.device_in_reset_drop_cnt);
 		dev_err(hdev->dev,
-			"device is disabled or in reset, CS rejected!\n");
+			"device is %s, CS rejected!\n", hdev->status[status]);
 		rc = -EPERM;
 		goto out;
 	}
@@ -527,7 +563,9 @@ int hl_hw_queue_schedule_cs(struct hl_cs *cs)
 			}
 
 			if (rc) {
-				ctx->cs_counters.queue_full_drop_cnt++;
+				atomic64_inc(
+					&ctx->cs_counters.queue_full_drop_cnt);
+				atomic64_inc(&cntr->queue_full_drop_cnt);
 				goto unroll_cq_resv;
 			}
 
@@ -538,21 +576,23 @@ int hl_hw_queue_schedule_cs(struct hl_cs *cs)
 
 	if ((cs->type == CS_TYPE_SIGNAL) || (cs->type == CS_TYPE_WAIT))
 		init_signal_wait_cs(cs);
+	else if (cs->type == CS_TYPE_COLLECTIVE_WAIT)
+		hdev->asic_funcs->collective_wait_init_cs(cs);
 
-	spin_lock(&hdev->hw_queues_mirror_lock);
-	list_add_tail(&cs->mirror_node, &hdev->hw_queues_mirror_list);
+	spin_lock(&hdev->cs_mirror_lock);
+	list_add_tail(&cs->mirror_node, &hdev->cs_mirror_list);
 
 	/* Queue TDR if the CS is the first entry and if timeout is wanted */
 	if ((hdev->timeout_jiffies != MAX_SCHEDULE_TIMEOUT) &&
-			(list_first_entry(&hdev->hw_queues_mirror_list,
+			(list_first_entry(&hdev->cs_mirror_list,
 					struct hl_cs, mirror_node) == cs)) {
 		cs->tdr_active = true;
 		schedule_delayed_work(&cs->work_tdr, hdev->timeout_jiffies);
-		spin_unlock(&hdev->hw_queues_mirror_lock);
-	} else {
-		spin_unlock(&hdev->hw_queues_mirror_lock);
+
 	}
 
+	spin_unlock(&hdev->cs_mirror_lock);
+
 	if (!hdev->cs_active_cnt++) {
 		struct hl_device_idle_busy_ts *ts;
 
@@ -714,22 +754,56 @@ static int hw_queue_init(struct hl_device *hdev, struct hl_hw_queue *q)
 
 static void sync_stream_queue_init(struct hl_device *hdev, u32 q_idx)
 {
-	struct hl_hw_queue *hw_queue = &hdev->kernel_queues[q_idx];
+	struct hl_sync_stream_properties *sync_stream_prop;
 	struct asic_fixed_properties *prop = &hdev->asic_prop;
 	struct hl_hw_sob *hw_sob;
-	int sob, queue_idx = hdev->sync_stream_queue_idx++;
+	int sob, reserved_mon_idx, queue_idx;
+
+	sync_stream_prop = &hdev->kernel_queues[q_idx].sync_stream_prop;
+
+	/* We use 'collective_mon_idx' as a running index in order to reserve
+	 * monitors for collective master/slave queues.
+	 * collective master queue gets 2 reserved monitors
+	 * collective slave queue gets 1 reserved monitor
+	 */
+	if (hdev->kernel_queues[q_idx].collective_mode ==
+			HL_COLLECTIVE_MASTER) {
+		reserved_mon_idx = hdev->collective_mon_idx;
+
+		/* reserve the first monitor for collective master queue */
+		sync_stream_prop->collective_mstr_mon_id[0] =
+			prop->collective_first_mon + reserved_mon_idx;
+
+		/* reserve the second monitor for collective master queue */
+		sync_stream_prop->collective_mstr_mon_id[1] =
+			prop->collective_first_mon + reserved_mon_idx + 1;
+
+		hdev->collective_mon_idx += HL_COLLECTIVE_RSVD_MSTR_MONS;
+	} else if (hdev->kernel_queues[q_idx].collective_mode ==
+			HL_COLLECTIVE_SLAVE) {
+		reserved_mon_idx = hdev->collective_mon_idx++;
+
+		/* reserve a monitor for collective slave queue */
+		sync_stream_prop->collective_slave_mon_id =
+			prop->collective_first_mon + reserved_mon_idx;
+	}
+
+	if (!hdev->kernel_queues[q_idx].supports_sync_stream)
+		return;
+
+	queue_idx = hdev->sync_stream_queue_idx++;
 
-	hw_queue->base_sob_id =
-		prop->sync_stream_first_sob + queue_idx * HL_RSVD_SOBS;
-	hw_queue->base_mon_id =
-		prop->sync_stream_first_mon + queue_idx * HL_RSVD_MONS;
-	hw_queue->next_sob_val = 1;
-	hw_queue->curr_sob_offset = 0;
+	sync_stream_prop->base_sob_id = prop->sync_stream_first_sob +
+			(queue_idx * HL_RSVD_SOBS);
+	sync_stream_prop->base_mon_id = prop->sync_stream_first_mon +
+			(queue_idx * HL_RSVD_MONS);
+	sync_stream_prop->next_sob_val = 1;
+	sync_stream_prop->curr_sob_offset = 0;
 
 	for (sob = 0 ; sob < HL_RSVD_SOBS ; sob++) {
-		hw_sob = &hw_queue->hw_sob[sob];
+		hw_sob = &sync_stream_prop->hw_sob[sob];
 		hw_sob->hdev = hdev;
-		hw_sob->sob_id = hw_queue->base_sob_id + sob;
+		hw_sob->sob_id = sync_stream_prop->base_sob_id + sob;
 		hw_sob->q_idx = q_idx;
 		kref_init(&hw_sob->kref);
 	}
@@ -737,15 +811,16 @@ static void sync_stream_queue_init(struct hl_device *hdev, u32 q_idx)
 
 static void sync_stream_queue_reset(struct hl_device *hdev, u32 q_idx)
 {
-	struct hl_hw_queue *hw_queue = &hdev->kernel_queues[q_idx];
+	struct hl_sync_stream_properties *prop =
+			&hdev->kernel_queues[q_idx].sync_stream_prop;
 
 	/*
 	 * In case we got here due to a stuck CS, the refcnt might be bigger
 	 * than 1 and therefore we reset it.
 	 */
-	kref_init(&hw_queue->hw_sob[hw_queue->curr_sob_offset].kref);
-	hw_queue->curr_sob_offset = 0;
-	hw_queue->next_sob_val = 1;
+	kref_init(&prop->hw_sob[prop->curr_sob_offset].kref);
+	prop->curr_sob_offset = 0;
+	prop->next_sob_val = 1;
 }
 
 /*
@@ -788,8 +863,7 @@ static int queue_init(struct hl_device *hdev, struct hl_hw_queue *q,
 		break;
 	}
 
-	if (q->supports_sync_stream)
-		sync_stream_queue_init(hdev, q->hw_queue_id);
+	sync_stream_queue_init(hdev, q->hw_queue_id);
 
 	if (rc)
 		return rc;
@@ -867,6 +941,7 @@ int hl_hw_queues_create(struct hl_device *hdev)
 		q->queue_type = asic->hw_queues_props[i].type;
 		q->supports_sync_stream =
 				asic->hw_queues_props[i].supports_sync_stream;
+		q->collective_mode = asic->hw_queues_props[i].collective_mode;
 		rc = queue_init(hdev, q, i);
 		if (rc) {
 			dev_err(hdev->dev,
diff --git a/drivers/misc/habanalabs/common/hwmon.c b/drivers/misc/habanalabs/common/hwmon.c
index 2ac29cb2fe612a52155d4533def4b52c8311fde3..6b421d76b3111dfac9edc4e44d16db5cf6a7c3ab 100644
--- a/drivers/misc/habanalabs/common/hwmon.c
+++ b/drivers/misc/habanalabs/common/hwmon.c
@@ -114,7 +114,7 @@ static int hl_read(struct device *dev, enum hwmon_sensor_types type,
 	struct hl_device *hdev = dev_get_drvdata(dev);
 	int rc;
 
-	if (hl_device_disabled_or_in_reset(hdev))
+	if (!hl_device_operational(hdev, NULL))
 		return -ENODEV;
 
 	switch (type) {
@@ -192,7 +192,7 @@ static int hl_write(struct device *dev, enum hwmon_sensor_types type,
 {
 	struct hl_device *hdev = dev_get_drvdata(dev);
 
-	if (hl_device_disabled_or_in_reset(hdev))
+	if (!hl_device_operational(hdev, NULL))
 		return -ENODEV;
 
 	switch (type) {
@@ -312,6 +312,7 @@ int hl_get_temperature(struct hl_device *hdev,
 			int sensor_index, u32 attr, long *value)
 {
 	struct cpucp_packet pkt;
+	u64 result;
 	int rc;
 
 	memset(&pkt, 0, sizeof(pkt));
@@ -322,7 +323,9 @@ int hl_get_temperature(struct hl_device *hdev,
 	pkt.type = __cpu_to_le16(attr);
 
 	rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
-						0, value);
+						0, &result);
+
+	*value = (long) result;
 
 	if (rc) {
 		dev_err(hdev->dev,
@@ -363,6 +366,7 @@ int hl_get_voltage(struct hl_device *hdev,
 			int sensor_index, u32 attr, long *value)
 {
 	struct cpucp_packet pkt;
+	u64 result;
 	int rc;
 
 	memset(&pkt, 0, sizeof(pkt));
@@ -373,7 +377,9 @@ int hl_get_voltage(struct hl_device *hdev,
 	pkt.type = __cpu_to_le16(attr);
 
 	rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
-						0, value);
+						0, &result);
+
+	*value = (long) result;
 
 	if (rc) {
 		dev_err(hdev->dev,
@@ -389,6 +395,7 @@ int hl_get_current(struct hl_device *hdev,
 			int sensor_index, u32 attr, long *value)
 {
 	struct cpucp_packet pkt;
+	u64 result;
 	int rc;
 
 	memset(&pkt, 0, sizeof(pkt));
@@ -399,7 +406,9 @@ int hl_get_current(struct hl_device *hdev,
 	pkt.type = __cpu_to_le16(attr);
 
 	rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
-						0, value);
+						0, &result);
+
+	*value = (long) result;
 
 	if (rc) {
 		dev_err(hdev->dev,
@@ -415,6 +424,7 @@ int hl_get_fan_speed(struct hl_device *hdev,
 			int sensor_index, u32 attr, long *value)
 {
 	struct cpucp_packet pkt;
+	u64 result;
 	int rc;
 
 	memset(&pkt, 0, sizeof(pkt));
@@ -425,7 +435,9 @@ int hl_get_fan_speed(struct hl_device *hdev,
 	pkt.type = __cpu_to_le16(attr);
 
 	rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
-						0, value);
+						0, &result);
+
+	*value = (long) result;
 
 	if (rc) {
 		dev_err(hdev->dev,
@@ -441,6 +453,7 @@ int hl_get_pwm_info(struct hl_device *hdev,
 			int sensor_index, u32 attr, long *value)
 {
 	struct cpucp_packet pkt;
+	u64 result;
 	int rc;
 
 	memset(&pkt, 0, sizeof(pkt));
@@ -451,7 +464,9 @@ int hl_get_pwm_info(struct hl_device *hdev,
 	pkt.type = __cpu_to_le16(attr);
 
 	rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
-						0, value);
+						0, &result);
+
+	*value = (long) result;
 
 	if (rc) {
 		dev_err(hdev->dev,
@@ -542,7 +557,7 @@ int hl_hwmon_init(struct hl_device *hdev)
 	struct asic_fixed_properties *prop = &hdev->asic_prop;
 	int rc;
 
-	if ((hdev->hwmon_initialized) || !(hdev->fw_loading))
+	if ((hdev->hwmon_initialized) || !(hdev->cpu_queues_enable))
 		return 0;
 
 	if (hdev->hl_chip_info->info) {
diff --git a/drivers/misc/habanalabs/common/memory.c b/drivers/misc/habanalabs/common/memory.c
index bfe223abf142672de217cdb6eaaccaeeaaf82f74..cbe9da4e0211b5118b112b2bdf92f91cfef46360 100644
--- a/drivers/misc/habanalabs/common/memory.c
+++ b/drivers/misc/habanalabs/common/memory.c
@@ -11,7 +11,6 @@
 
 #include <linux/uaccess.h>
 #include <linux/slab.h>
-#include <linux/genalloc.h>
 
 #define HL_MMU_DEBUG	0
 
@@ -46,7 +45,7 @@
  * @ret_handle          : result handle
  *
  * This function does the following:
- * - Allocate the requested size rounded up to 2MB pages
+ * - Allocate the requested size rounded up to 'dram_page_size' pages
  * - Return unique handle
  */
 static int alloc_device_memory(struct hl_ctx *ctx, struct hl_mem_in *args,
@@ -81,6 +80,16 @@ static int alloc_device_memory(struct hl_ctx *ctx, struct hl_mem_in *args,
 				num_pgs, total_size);
 			return -ENOMEM;
 		}
+
+		if (hdev->memory_scrub) {
+			rc = hdev->asic_funcs->scrub_device_mem(hdev, paddr,
+					total_size);
+			if (rc) {
+				dev_err(hdev->dev,
+					"Failed to scrub contiguous device memory\n");
+				goto pages_pack_err;
+			}
+		}
 	}
 
 	phys_pg_pack = kzalloc(sizeof(*phys_pg_pack), GFP_KERNEL);
@@ -118,6 +127,17 @@ static int alloc_device_memory(struct hl_ctx *ctx, struct hl_mem_in *args,
 				goto page_err;
 			}
 
+			if (hdev->memory_scrub) {
+				rc = hdev->asic_funcs->scrub_device_mem(hdev,
+						phys_pg_pack->pages[i],
+						page_size);
+				if (rc) {
+					dev_err(hdev->dev,
+						"Failed to scrub device memory\n");
+					goto page_err;
+				}
+			}
+
 			num_curr_pgs++;
 		}
 	}
@@ -600,6 +620,87 @@ static u64 get_va_block(struct hl_device *hdev, struct hl_va_range *va_range,
 	return res_valid_start;
 }
 
+/*
+ * hl_reserve_va_block() - reserve a virtual block of a given size.
+ * @hdev: pointer to the habanalabs device structure.
+ * @ctx: current context
+ * @type: virtual addresses range type.
+ * @size: requested block size.
+ * @alignment: required alignment in bytes of the virtual block start address,
+ *             0 means no alignment.
+ *
+ * This function does the following:
+ * - Iterate on the virtual block list to find a suitable virtual block for the
+ *   given size and alignment.
+ * - Reserve the requested block and update the list.
+ * - Return the start address of the virtual block.
+ */
+u64 hl_reserve_va_block(struct hl_device *hdev, struct hl_ctx *ctx,
+		enum hl_va_range_type type, u32 size, u32 alignment)
+{
+	return get_va_block(hdev, ctx->va_range[type], size, 0,
+			max(alignment, ctx->va_range[type]->page_size));
+}
+
+/**
+ * hl_get_va_range_type() - get va_range type for the given address and size.
+ * @address: The start address of the area we want to validate.
+ * @size: The size in bytes of the area we want to validate.
+ * @type: returned va_range type
+ *
+ * Return: true if the area is inside a valid range, false otherwise.
+ */
+static int hl_get_va_range_type(struct hl_ctx *ctx, u64 address, u64 size,
+			enum hl_va_range_type *type)
+{
+	int i;
+
+	for (i = 0 ; i < HL_VA_RANGE_TYPE_MAX; i++) {
+		if (hl_mem_area_inside_range(address, size,
+				ctx->va_range[i]->start_addr,
+				ctx->va_range[i]->end_addr)) {
+			*type = i;
+			return 0;
+		}
+	}
+
+	return -EINVAL;
+}
+
+/*
+ * hl_unreserve_va_block - wrapper for add_va_block for unreserving a va block
+ *
+ * @hdev: pointer to the habanalabs device structure
+ * @ctx: current context
+ * @start: start virtual address
+ * @end: end virtual address
+ *
+ * This function does the following:
+ * - Takes the list lock and calls add_va_block_locked
+ */
+int hl_unreserve_va_block(struct hl_device *hdev, struct hl_ctx *ctx,
+		u64 start_addr, u64 size)
+{
+	enum hl_va_range_type type;
+	int rc;
+
+	rc = hl_get_va_range_type(ctx, start_addr, size, &type);
+	if (rc) {
+		dev_err(hdev->dev,
+			"cannot find va_range for va %#llx size %llu",
+			start_addr, size);
+		return rc;
+	}
+
+	rc = add_va_block(hdev, ctx->va_range[type], start_addr,
+						start_addr + size - 1);
+	if (rc)
+		dev_warn(hdev->dev,
+			"add va block failed for vaddr: 0x%llx\n", start_addr);
+
+	return rc;
+}
+
 /*
  * get_sg_info - get number of pages and the DMA address from SG list
  *
@@ -742,7 +843,7 @@ static int map_phys_pg_pack(struct hl_ctx *ctx, u64 vaddr,
 	for (i = 0 ; i < phys_pg_pack->npages ; i++) {
 		paddr = phys_pg_pack->pages[i];
 
-		rc = hl_mmu_map(ctx, next_vaddr, paddr, page_size,
+		rc = hl_mmu_map_page(ctx, next_vaddr, paddr, page_size,
 				(i + 1) == phys_pg_pack->npages);
 		if (rc) {
 			dev_err(hdev->dev,
@@ -761,7 +862,7 @@ static int map_phys_pg_pack(struct hl_ctx *ctx, u64 vaddr,
 err:
 	next_vaddr = vaddr;
 	for (i = 0 ; i < mapped_pg_cnt ; i++) {
-		if (hl_mmu_unmap(ctx, next_vaddr, page_size,
+		if (hl_mmu_unmap_page(ctx, next_vaddr, page_size,
 					(i + 1) == mapped_pg_cnt))
 			dev_warn_ratelimited(hdev->dev,
 				"failed to unmap handle %u, va: 0x%llx, pa: 0x%llx, page size: %u\n",
@@ -791,7 +892,7 @@ static void unmap_phys_pg_pack(struct hl_ctx *ctx, u64 vaddr,
 	next_vaddr = vaddr;
 
 	for (i = 0 ; i < phys_pg_pack->npages ; i++, next_vaddr += page_size) {
-		if (hl_mmu_unmap(ctx, next_vaddr, page_size,
+		if (hl_mmu_unmap_page(ctx, next_vaddr, page_size,
 				       (i + 1) == phys_pg_pack->npages))
 			dev_warn_ratelimited(hdev->dev,
 			"unmap failed for vaddr: 0x%llx\n", next_vaddr);
@@ -888,7 +989,7 @@ static int map_device_va(struct hl_ctx *ctx, struct hl_mem_in *args,
 
 		/* get required alignment */
 		if (phys_pg_pack->page_size == page_size) {
-			va_range = ctx->host_va_range;
+			va_range = ctx->va_range[HL_VA_RANGE_TYPE_HOST];
 
 			/*
 			 * huge page alignment may be needed in case of regular
@@ -903,7 +1004,7 @@ static int map_device_va(struct hl_ctx *ctx, struct hl_mem_in *args,
 			 * huge page alignment is needed in case of huge page
 			 * mapping
 			 */
-			va_range = ctx->host_huge_va_range;
+			va_range = ctx->va_range[HL_VA_RANGE_TYPE_HOST_HUGE];
 			va_block_align = huge_page_size;
 		}
 	} else {
@@ -928,7 +1029,7 @@ static int map_device_va(struct hl_ctx *ctx, struct hl_mem_in *args,
 		hint_addr = args->map_device.hint_addr;
 
 		/* DRAM VA alignment is the same as the DRAM page size */
-		va_range = ctx->dram_va_range;
+		va_range = ctx->va_range[HL_VA_RANGE_TYPE_DRAM];
 		va_block_align = hdev->asic_prop.dmmu.page_size;
 	}
 
@@ -1073,12 +1174,12 @@ static int unmap_device_va(struct hl_ctx *ctx, u64 vaddr, bool ctx_free)
 
 		if (phys_pg_pack->page_size ==
 					hdev->asic_prop.pmmu.page_size)
-			va_range = ctx->host_va_range;
+			va_range = ctx->va_range[HL_VA_RANGE_TYPE_HOST];
 		else
-			va_range = ctx->host_huge_va_range;
+			va_range = ctx->va_range[HL_VA_RANGE_TYPE_HOST_HUGE];
 	} else if (*vm_type == VM_TYPE_PHYS_PACK) {
 		is_userptr = false;
-		va_range = ctx->dram_va_range;
+		va_range = ctx->va_range[HL_VA_RANGE_TYPE_DRAM];
 		phys_pg_pack = hnode->ptr;
 	} else {
 		dev_warn(hdev->dev,
@@ -1217,6 +1318,7 @@ static int mem_ioctl_no_mmu(struct hl_fpriv *hpriv, union hl_mem_args *args)
 
 int hl_mem_ioctl(struct hl_fpriv *hpriv, void *data)
 {
+	enum hl_device_status status;
 	union hl_mem_args *args = data;
 	struct hl_device *hdev = hpriv->hdev;
 	struct hl_ctx *ctx = hpriv->ctx;
@@ -1224,10 +1326,10 @@ int hl_mem_ioctl(struct hl_fpriv *hpriv, void *data)
 	u32 handle = 0;
 	int rc;
 
-	if (hl_device_disabled_or_in_reset(hdev)) {
+	if (!hl_device_operational(hdev, &status)) {
 		dev_warn_ratelimited(hdev->dev,
 			"Device is %s. Can't execute MEMORY IOCTL\n",
-			atomic_read(&hdev->in_reset) ? "in_reset" : "disabled");
+			hdev->status[status]);
 		return -EBUSY;
 	}
 
@@ -1236,18 +1338,35 @@ int hl_mem_ioctl(struct hl_fpriv *hpriv, void *data)
 
 	switch (args->in.op) {
 	case HL_MEM_OP_ALLOC:
-		if (!hdev->dram_supports_virtual_memory) {
-			dev_err(hdev->dev, "DRAM alloc is not supported\n");
-			rc = -EINVAL;
-			goto out;
-		}
-
 		if (args->in.alloc.mem_size == 0) {
 			dev_err(hdev->dev,
 				"alloc size must be larger than 0\n");
 			rc = -EINVAL;
 			goto out;
 		}
+
+		/* If DRAM does not support virtual memory the driver won't
+		 * handle the allocation/freeing of that memory. However, for
+		 * system administration/monitoring purposes, the driver will
+		 * keep track of the amount of DRAM memory that is allocated
+		 * and freed by the user. Because this code totally relies on
+		 * the user's input, the driver can't ensure the validity
+		 * of this accounting.
+		 */
+		if (!hdev->asic_prop.dram_supports_virtual_memory) {
+			atomic64_add(args->in.alloc.mem_size,
+					&ctx->dram_phys_mem);
+			atomic64_add(args->in.alloc.mem_size,
+					&hdev->dram_used_mem);
+
+			dev_dbg(hdev->dev, "DRAM alloc is not supported\n");
+			rc = 0;
+
+			memset(args, 0, sizeof(*args));
+			args->out.handle = 0;
+			goto out;
+		}
+
 		rc = alloc_device_memory(ctx, &args->in, &handle);
 
 		memset(args, 0, sizeof(*args));
@@ -1255,6 +1374,26 @@ int hl_mem_ioctl(struct hl_fpriv *hpriv, void *data)
 		break;
 
 	case HL_MEM_OP_FREE:
+		/* If DRAM does not support virtual memory the driver won't
+		 * handle the allocation/freeing of that memory. However, for
+		 * system administration/monitoring purposes, the driver will
+		 * keep track of the amount of DRAM memory that is allocated
+		 * and freed by the user. Because this code totally relies on
+		 * the user's input, the driver can't ensure the validity
+		 * of this accounting.
+		 */
+		if (!hdev->asic_prop.dram_supports_virtual_memory) {
+			atomic64_sub(args->in.alloc.mem_size,
+					&ctx->dram_phys_mem);
+			atomic64_sub(args->in.alloc.mem_size,
+					&hdev->dram_used_mem);
+
+			dev_dbg(hdev->dev, "DRAM alloc is not supported\n");
+			rc = 0;
+
+			goto out;
+		}
+
 		rc = free_device_memory(ctx, args->in.free.handle);
 		break;
 
@@ -1498,7 +1637,7 @@ bool hl_userptr_is_pinned(struct hl_device *hdev, u64 addr,
  *   addresses.
  */
 static int va_range_init(struct hl_device *hdev, struct hl_va_range *va_range,
-				u64 start, u64 end)
+				u64 start, u64 end, u32 page_size)
 {
 	int rc;
 
@@ -1528,6 +1667,7 @@ static int va_range_init(struct hl_device *hdev, struct hl_va_range *va_range,
 
 	va_range->start_addr = start;
 	va_range->end_addr = end;
+	va_range->page_size = page_size;
 
 	return 0;
 }
@@ -1540,8 +1680,7 @@ static int va_range_init(struct hl_device *hdev, struct hl_va_range *va_range,
  * This function does the following:
  * - Frees the virtual addresses block list and its lock
  */
-static void va_range_fini(struct hl_device *hdev,
-		struct hl_va_range *va_range)
+static void va_range_fini(struct hl_device *hdev, struct hl_va_range *va_range)
 {
 	mutex_lock(&va_range->lock);
 	clear_va_list_locked(hdev, &va_range->list);
@@ -1571,102 +1710,97 @@ static void va_range_fini(struct hl_device *hdev,
 static int vm_ctx_init_with_ranges(struct hl_ctx *ctx,
 					u64 host_range_start,
 					u64 host_range_end,
+					u32 host_page_size,
 					u64 host_huge_range_start,
 					u64 host_huge_range_end,
+					u32 host_huge_page_size,
 					u64 dram_range_start,
-					u64 dram_range_end)
+					u64 dram_range_end,
+					u32 dram_page_size)
 {
 	struct hl_device *hdev = ctx->hdev;
-	int rc;
-
-	ctx->host_va_range = kzalloc(sizeof(*ctx->host_va_range), GFP_KERNEL);
-	if (!ctx->host_va_range)
-		return -ENOMEM;
-
-	ctx->host_huge_va_range = kzalloc(sizeof(*ctx->host_huge_va_range),
-						GFP_KERNEL);
-	if (!ctx->host_huge_va_range) {
-		rc =  -ENOMEM;
-		goto host_huge_va_range_err;
-	}
-
-	ctx->dram_va_range = kzalloc(sizeof(*ctx->dram_va_range), GFP_KERNEL);
-	if (!ctx->dram_va_range) {
-		rc = -ENOMEM;
-		goto dram_va_range_err;
+	int i, rc;
+
+	for (i = 0 ; i < HL_VA_RANGE_TYPE_MAX ; i++) {
+		ctx->va_range[i] =
+			kzalloc(sizeof(struct hl_va_range), GFP_KERNEL);
+		if (!ctx->va_range[i]) {
+			rc = -ENOMEM;
+			goto free_va_range;
+		}
 	}
 
 	rc = hl_mmu_ctx_init(ctx);
 	if (rc) {
 		dev_err(hdev->dev, "failed to init context %d\n", ctx->asid);
-		goto mmu_ctx_err;
+		goto free_va_range;
 	}
 
 	mutex_init(&ctx->mem_hash_lock);
 	hash_init(ctx->mem_hash);
 
-	mutex_init(&ctx->host_va_range->lock);
+	mutex_init(&ctx->va_range[HL_VA_RANGE_TYPE_HOST]->lock);
 
-	rc = va_range_init(hdev, ctx->host_va_range, host_range_start,
-				host_range_end);
+	rc = va_range_init(hdev, ctx->va_range[HL_VA_RANGE_TYPE_HOST],
+			host_range_start, host_range_end, host_page_size);
 	if (rc) {
 		dev_err(hdev->dev, "failed to init host vm range\n");
-		goto host_page_range_err;
+		goto mmu_ctx_fini;
 	}
 
 	if (hdev->pmmu_huge_range) {
-		mutex_init(&ctx->host_huge_va_range->lock);
+		mutex_init(&ctx->va_range[HL_VA_RANGE_TYPE_HOST_HUGE]->lock);
 
-		rc = va_range_init(hdev, ctx->host_huge_va_range,
-					host_huge_range_start,
-					host_huge_range_end);
+		rc = va_range_init(hdev,
+			ctx->va_range[HL_VA_RANGE_TYPE_HOST_HUGE],
+			host_huge_range_start, host_huge_range_end,
+			host_huge_page_size);
 		if (rc) {
 			dev_err(hdev->dev,
 				"failed to init host huge vm range\n");
-			goto host_hpage_range_err;
+			goto clear_host_va_range;
 		}
 	} else {
-		kfree(ctx->host_huge_va_range);
-		ctx->host_huge_va_range = ctx->host_va_range;
+		kfree(ctx->va_range[HL_VA_RANGE_TYPE_HOST_HUGE]);
+		ctx->va_range[HL_VA_RANGE_TYPE_HOST_HUGE] =
+				ctx->va_range[HL_VA_RANGE_TYPE_HOST];
 	}
 
-	mutex_init(&ctx->dram_va_range->lock);
+	mutex_init(&ctx->va_range[HL_VA_RANGE_TYPE_DRAM]->lock);
 
-	rc = va_range_init(hdev, ctx->dram_va_range, dram_range_start,
-			dram_range_end);
+	rc = va_range_init(hdev, ctx->va_range[HL_VA_RANGE_TYPE_DRAM],
+			dram_range_start, dram_range_end, dram_page_size);
 	if (rc) {
 		dev_err(hdev->dev, "failed to init dram vm range\n");
-		goto dram_vm_err;
+		goto clear_host_huge_va_range;
 	}
 
 	hl_debugfs_add_ctx_mem_hash(hdev, ctx);
 
 	return 0;
 
-dram_vm_err:
-	mutex_destroy(&ctx->dram_va_range->lock);
+clear_host_huge_va_range:
+	mutex_destroy(&ctx->va_range[HL_VA_RANGE_TYPE_DRAM]->lock);
 
 	if (hdev->pmmu_huge_range) {
-		mutex_lock(&ctx->host_huge_va_range->lock);
-		clear_va_list_locked(hdev, &ctx->host_huge_va_range->list);
-		mutex_unlock(&ctx->host_huge_va_range->lock);
+		mutex_lock(&ctx->va_range[HL_VA_RANGE_TYPE_HOST_HUGE]->lock);
+		clear_va_list_locked(hdev,
+			&ctx->va_range[HL_VA_RANGE_TYPE_HOST_HUGE]->list);
+		mutex_unlock(&ctx->va_range[HL_VA_RANGE_TYPE_HOST_HUGE]->lock);
 	}
-host_hpage_range_err:
+clear_host_va_range:
 	if (hdev->pmmu_huge_range)
-		mutex_destroy(&ctx->host_huge_va_range->lock);
-	mutex_lock(&ctx->host_va_range->lock);
-	clear_va_list_locked(hdev, &ctx->host_va_range->list);
-	mutex_unlock(&ctx->host_va_range->lock);
-host_page_range_err:
-	mutex_destroy(&ctx->host_va_range->lock);
+		mutex_destroy(&ctx->va_range[HL_VA_RANGE_TYPE_HOST_HUGE]->lock);
+	mutex_lock(&ctx->va_range[HL_VA_RANGE_TYPE_HOST]->lock);
+	clear_va_list_locked(hdev, &ctx->va_range[HL_VA_RANGE_TYPE_HOST]->list);
+	mutex_unlock(&ctx->va_range[HL_VA_RANGE_TYPE_HOST]->lock);
+mmu_ctx_fini:
+	mutex_destroy(&ctx->va_range[HL_VA_RANGE_TYPE_HOST]->lock);
 	mutex_destroy(&ctx->mem_hash_lock);
 	hl_mmu_ctx_fini(ctx);
-mmu_ctx_err:
-	kfree(ctx->dram_va_range);
-dram_va_range_err:
-	kfree(ctx->host_huge_va_range);
-host_huge_va_range_err:
-	kfree(ctx->host_va_range);
+free_va_range:
+	for (i = 0 ; i < HL_VA_RANGE_TYPE_MAX ; i++)
+		kfree(ctx->va_range[i]);
 
 	return rc;
 }
@@ -1676,6 +1810,7 @@ int hl_vm_ctx_init(struct hl_ctx *ctx)
 	struct asic_fixed_properties *prop = &ctx->hdev->asic_prop;
 	u64 host_range_start, host_range_end, host_huge_range_start,
 		host_huge_range_end, dram_range_start, dram_range_end;
+	u32 host_page_size, host_huge_page_size, dram_page_size;
 
 	atomic64_set(&ctx->dram_phys_mem, 0);
 
@@ -1686,27 +1821,23 @@ int hl_vm_ctx_init(struct hl_ctx *ctx)
 	 *   In case of DRAM mapping, the returned address is the physical
 	 *   address of the memory related to the given handle.
 	 */
-	if (ctx->hdev->mmu_enable) {
-		dram_range_start = prop->dmmu.start_addr;
-		dram_range_end = prop->dmmu.end_addr;
-		host_range_start = prop->pmmu.start_addr;
-		host_range_end = prop->pmmu.end_addr;
-		host_huge_range_start = prop->pmmu_huge.start_addr;
-		host_huge_range_end = prop->pmmu_huge.end_addr;
-	} else {
-		dram_range_start = prop->dram_user_base_address;
-		dram_range_end = prop->dram_end_address;
-		host_range_start = prop->dram_user_base_address;
-		host_range_end = prop->dram_end_address;
-		host_huge_range_start = prop->dram_user_base_address;
-		host_huge_range_end = prop->dram_end_address;
-	}
+	if (!ctx->hdev->mmu_enable)
+		return 0;
+
+	dram_range_start = prop->dmmu.start_addr;
+	dram_range_end = prop->dmmu.end_addr;
+	dram_page_size = prop->dmmu.page_size;
+	host_range_start = prop->pmmu.start_addr;
+	host_range_end = prop->pmmu.end_addr;
+	host_page_size = prop->pmmu.page_size;
+	host_huge_range_start = prop->pmmu_huge.start_addr;
+	host_huge_range_end = prop->pmmu_huge.end_addr;
+	host_huge_page_size = prop->pmmu_huge.page_size;
 
 	return vm_ctx_init_with_ranges(ctx, host_range_start, host_range_end,
-					host_huge_range_start,
-					host_huge_range_end,
-					dram_range_start,
-					dram_range_end);
+			host_page_size, host_huge_range_start,
+			host_huge_range_end, host_huge_page_size,
+			dram_range_start, dram_range_end, dram_page_size);
 }
 
 /*
@@ -1738,6 +1869,9 @@ void hl_vm_ctx_fini(struct hl_ctx *ctx)
 	struct hlist_node *tmp_node;
 	int i;
 
+	if (!ctx->hdev->mmu_enable)
+		return;
+
 	hl_debugfs_remove_ctx_mem_hash(hdev, ctx);
 
 	/*
@@ -1772,13 +1906,21 @@ void hl_vm_ctx_fini(struct hl_ctx *ctx)
 		}
 	spin_unlock(&vm->idr_lock);
 
-	va_range_fini(hdev, ctx->dram_va_range);
+	va_range_fini(hdev, ctx->va_range[HL_VA_RANGE_TYPE_DRAM]);
+	va_range_fini(hdev, ctx->va_range[HL_VA_RANGE_TYPE_HOST]);
+
 	if (hdev->pmmu_huge_range)
-		va_range_fini(hdev, ctx->host_huge_va_range);
-	va_range_fini(hdev, ctx->host_va_range);
+		va_range_fini(hdev, ctx->va_range[HL_VA_RANGE_TYPE_HOST_HUGE]);
 
 	mutex_destroy(&ctx->mem_hash_lock);
 	hl_mmu_ctx_fini(ctx);
+
+	/* In this case we need to clear the global accounting of DRAM usage
+	 * because the user notifies us on allocations. If the user is no more,
+	 * all DRAM is available
+	 */
+	if (!ctx->hdev->asic_prop.dram_supports_virtual_memory)
+		atomic64_set(&ctx->hdev->dram_used_mem, 0);
 }
 
 /*
diff --git a/drivers/misc/habanalabs/common/mmu.c b/drivers/misc/habanalabs/common/mmu.c
index b5058798aeb9c829fbafd4e39dae395b1b1324b3..33ae953d3a3680126cbe42090ea677fb25b9f582 100644
--- a/drivers/misc/habanalabs/common/mmu.c
+++ b/drivers/misc/habanalabs/common/mmu.c
@@ -22,18 +22,25 @@ static bool is_dram_va(struct hl_device *hdev, u64 virt_addr)
  * hl_mmu_init() - initialize the MMU module.
  * @hdev: habanalabs device structure.
  *
- * This function does the following:
- * - Create a pool of pages for pgt_infos.
- * - Create a shadow table for pgt
- *
  * Return: 0 for success, non-zero for failure.
  */
 int hl_mmu_init(struct hl_device *hdev)
 {
-	if (hdev->mmu_enable)
-		return hdev->mmu_func.init(hdev);
+	int rc = -EOPNOTSUPP;
 
-	return 0;
+	if (!hdev->mmu_enable)
+		return 0;
+
+	if (hdev->mmu_func[MMU_DR_PGT].init != NULL) {
+		rc = hdev->mmu_func[MMU_DR_PGT].init(hdev);
+		if (rc)
+			return rc;
+	}
+
+	if (hdev->mmu_func[MMU_HR_PGT].init != NULL)
+		rc = hdev->mmu_func[MMU_HR_PGT].init(hdev);
+
+	return rc;
 }
 
 /**
@@ -48,8 +55,14 @@ int hl_mmu_init(struct hl_device *hdev)
  */
 void hl_mmu_fini(struct hl_device *hdev)
 {
-	if (hdev->mmu_enable)
-		hdev->mmu_func.fini(hdev);
+	if (!hdev->mmu_enable)
+		return;
+
+	if (hdev->mmu_func[MMU_DR_PGT].fini != NULL)
+		hdev->mmu_func[MMU_DR_PGT].fini(hdev);
+
+	if (hdev->mmu_func[MMU_HR_PGT].fini != NULL)
+		hdev->mmu_func[MMU_HR_PGT].fini(hdev);
 }
 
 /**
@@ -63,11 +76,23 @@ void hl_mmu_fini(struct hl_device *hdev)
 int hl_mmu_ctx_init(struct hl_ctx *ctx)
 {
 	struct hl_device *hdev = ctx->hdev;
+	int rc = -EOPNOTSUPP;
 
-	if (hdev->mmu_enable)
-		return hdev->mmu_func.ctx_init(ctx);
+	if (!hdev->mmu_enable)
+		return 0;
 
-	return 0;
+	mutex_init(&ctx->mmu_lock);
+
+	if (hdev->mmu_func[MMU_DR_PGT].ctx_init != NULL) {
+		rc = hdev->mmu_func[MMU_DR_PGT].ctx_init(ctx);
+		if (rc)
+			return rc;
+	}
+
+	if (hdev->mmu_func[MMU_HR_PGT].ctx_init != NULL)
+		rc = hdev->mmu_func[MMU_HR_PGT].ctx_init(ctx);
+
+	return rc;
 }
 
 /*
@@ -84,12 +109,20 @@ void hl_mmu_ctx_fini(struct hl_ctx *ctx)
 {
 	struct hl_device *hdev = ctx->hdev;
 
-	if (hdev->mmu_enable)
-		hdev->mmu_func.ctx_fini(ctx);
+	if (!hdev->mmu_enable)
+		return;
+
+	if (hdev->mmu_func[MMU_DR_PGT].ctx_fini != NULL)
+		hdev->mmu_func[MMU_DR_PGT].ctx_fini(ctx);
+
+	if (hdev->mmu_func[MMU_HR_PGT].ctx_fini != NULL)
+		hdev->mmu_func[MMU_HR_PGT].ctx_fini(ctx);
+
+	mutex_destroy(&ctx->mmu_lock);
 }
 
 /*
- * hl_mmu_unmap - unmaps a virtual addr
+ * hl_mmu_unmap_page - unmaps a virtual addr
  *
  * @ctx: pointer to the context structure
  * @virt_addr: virt addr to map from
@@ -109,7 +142,7 @@ void hl_mmu_ctx_fini(struct hl_ctx *ctx)
  * For optimization reasons PCI flush may be requested once after unmapping of
  * large area.
  */
-int hl_mmu_unmap(struct hl_ctx *ctx, u64 virt_addr, u32 page_size,
+int hl_mmu_unmap_page(struct hl_ctx *ctx, u64 virt_addr, u32 page_size,
 		bool flush_pte)
 {
 	struct hl_device *hdev = ctx->hdev;
@@ -117,7 +150,7 @@ int hl_mmu_unmap(struct hl_ctx *ctx, u64 virt_addr, u32 page_size,
 	struct hl_mmu_properties *mmu_prop;
 	u64 real_virt_addr;
 	u32 real_page_size, npages;
-	int i, rc = 0;
+	int i, rc = 0, pgt_residency;
 	bool is_dram_addr;
 
 	if (!hdev->mmu_enable)
@@ -132,6 +165,8 @@ int hl_mmu_unmap(struct hl_ctx *ctx, u64 virt_addr, u32 page_size,
 	else
 		mmu_prop = &prop->pmmu;
 
+	pgt_residency = mmu_prop->host_resident ? MMU_HR_PGT : MMU_DR_PGT;
+
 	/*
 	 * The H/W handles mapping of specific page sizes. Hence if the page
 	 * size is bigger, we break it to sub-pages and unmap them separately.
@@ -150,7 +185,8 @@ int hl_mmu_unmap(struct hl_ctx *ctx, u64 virt_addr, u32 page_size,
 	real_virt_addr = virt_addr;
 
 	for (i = 0 ; i < npages ; i++) {
-		rc = hdev->mmu_func.unmap(ctx, real_virt_addr, is_dram_addr);
+		rc = hdev->mmu_func[pgt_residency].unmap(ctx,
+						real_virt_addr, is_dram_addr);
 		if (rc)
 			break;
 
@@ -158,13 +194,13 @@ int hl_mmu_unmap(struct hl_ctx *ctx, u64 virt_addr, u32 page_size,
 	}
 
 	if (flush_pte)
-		hdev->mmu_func.flush(ctx);
+		hdev->mmu_func[pgt_residency].flush(ctx);
 
 	return rc;
 }
 
 /*
- * hl_mmu_map - maps a virtual addr to physical addr
+ * hl_mmu_map_page - maps a virtual addr to physical addr
  *
  * @ctx: pointer to the context structure
  * @virt_addr: virt addr to map from
@@ -185,17 +221,18 @@ int hl_mmu_unmap(struct hl_ctx *ctx, u64 virt_addr, u32 page_size,
  * For optimization reasons PCI flush may be requested once after mapping of
  * large area.
  */
-int hl_mmu_map(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr, u32 page_size,
-		bool flush_pte)
+int hl_mmu_map_page(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr,
+		u32 page_size, bool flush_pte)
 {
 	struct hl_device *hdev = ctx->hdev;
 	struct asic_fixed_properties *prop = &hdev->asic_prop;
 	struct hl_mmu_properties *mmu_prop;
 	u64 real_virt_addr, real_phys_addr;
 	u32 real_page_size, npages;
-	int i, rc, mapped_cnt = 0;
+	int i, rc, pgt_residency, mapped_cnt = 0;
 	bool is_dram_addr;
 
+
 	if (!hdev->mmu_enable)
 		return 0;
 
@@ -208,6 +245,8 @@ int hl_mmu_map(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr, u32 page_size,
 	else
 		mmu_prop = &prop->pmmu;
 
+	pgt_residency = mmu_prop->host_resident ? MMU_HR_PGT : MMU_DR_PGT;
+
 	/*
 	 * The H/W handles mapping of specific page sizes. Hence if the page
 	 * size is bigger, we break it to sub-pages and map them separately.
@@ -216,7 +255,7 @@ int hl_mmu_map(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr, u32 page_size,
 		real_page_size = mmu_prop->page_size;
 	} else {
 		dev_err(hdev->dev,
-			"page size of %u is not %uKB aligned, can't unmap\n",
+			"page size of %u is not %uKB aligned, can't map\n",
 			page_size, mmu_prop->page_size >> 10);
 
 		return -EFAULT;
@@ -231,8 +270,9 @@ int hl_mmu_map(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr, u32 page_size,
 	real_phys_addr = phys_addr;
 
 	for (i = 0 ; i < npages ; i++) {
-		rc = hdev->mmu_func.map(ctx, real_virt_addr, real_phys_addr,
-				real_page_size, is_dram_addr);
+		rc = hdev->mmu_func[pgt_residency].map(ctx,
+						real_virt_addr, real_phys_addr,
+						real_page_size, is_dram_addr);
 		if (rc)
 			goto err;
 
@@ -242,21 +282,124 @@ int hl_mmu_map(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr, u32 page_size,
 	}
 
 	if (flush_pte)
-		hdev->mmu_func.flush(ctx);
+		hdev->mmu_func[pgt_residency].flush(ctx);
 
 	return 0;
 
 err:
 	real_virt_addr = virt_addr;
 	for (i = 0 ; i < mapped_cnt ; i++) {
-		if (hdev->mmu_func.unmap(ctx, real_virt_addr, is_dram_addr))
+		if (hdev->mmu_func[pgt_residency].unmap(ctx,
+						real_virt_addr, is_dram_addr))
 			dev_warn_ratelimited(hdev->dev,
 				"failed to unmap va: 0x%llx\n", real_virt_addr);
 
 		real_virt_addr += real_page_size;
 	}
 
-	hdev->mmu_func.flush(ctx);
+	hdev->mmu_func[pgt_residency].flush(ctx);
+
+	return rc;
+}
+
+/*
+ * hl_mmu_map_contiguous - implements a wrapper for hl_mmu_map_page
+ *                         for mapping contiguous physical memory
+ *
+ * @ctx: pointer to the context structure
+ * @virt_addr: virt addr to map from
+ * @phys_addr: phys addr to map to
+ * @size: size to map
+ *
+ */
+int hl_mmu_map_contiguous(struct hl_ctx *ctx, u64 virt_addr,
+					u64 phys_addr, u32 size)
+{
+	struct hl_device *hdev = ctx->hdev;
+	struct asic_fixed_properties *prop = &hdev->asic_prop;
+	u64 curr_va, curr_pa;
+	u32 page_size;
+	bool flush_pte;
+	int rc = 0, off;
+
+	if (hl_mem_area_inside_range(virt_addr, size,
+			prop->dmmu.start_addr, prop->dmmu.end_addr))
+		page_size = prop->dmmu.page_size;
+	else if (hl_mem_area_inside_range(virt_addr, size,
+			prop->pmmu.start_addr, prop->pmmu.end_addr))
+		page_size = prop->pmmu.page_size;
+	else if (hl_mem_area_inside_range(virt_addr, size,
+			prop->pmmu_huge.start_addr, prop->pmmu_huge.end_addr))
+		page_size = prop->pmmu_huge.page_size;
+	else
+		return -EINVAL;
+
+	for (off = 0 ; off < size ; off += page_size) {
+		curr_va = virt_addr + off;
+		curr_pa = phys_addr + off;
+		flush_pte = (off + page_size) >= size;
+		rc = hl_mmu_map_page(ctx, curr_va, curr_pa, page_size,
+								flush_pte);
+		if (rc) {
+			dev_err(hdev->dev,
+				"Map failed for va 0x%llx to pa 0x%llx\n",
+				curr_va, curr_pa);
+			goto unmap;
+		}
+	}
+
+	return rc;
+
+unmap:
+	for (; off >= 0 ; off -= page_size) {
+		curr_va = virt_addr + off;
+		flush_pte = (off - (s32) page_size) < 0;
+		if (hl_mmu_unmap_page(ctx, curr_va, page_size, flush_pte))
+			dev_warn_ratelimited(hdev->dev,
+				"failed to unmap va 0x%llx\n", curr_va);
+	}
+
+	return rc;
+}
+
+/*
+ * hl_mmu_unmap_contiguous - implements a wrapper for hl_mmu_unmap_page
+ *                           for unmapping contiguous physical memory
+ *
+ * @ctx: pointer to the context structure
+ * @virt_addr: virt addr to unmap
+ * @size: size to unmap
+ *
+ */
+int hl_mmu_unmap_contiguous(struct hl_ctx *ctx, u64 virt_addr, u32 size)
+{
+	struct hl_device *hdev = ctx->hdev;
+	struct asic_fixed_properties *prop = &hdev->asic_prop;
+	u64 curr_va;
+	u32 page_size;
+	bool flush_pte;
+	int rc = 0, off;
+
+	if (hl_mem_area_inside_range(virt_addr, size,
+			prop->dmmu.start_addr, prop->dmmu.end_addr))
+		page_size = prop->dmmu.page_size;
+	else if (hl_mem_area_inside_range(virt_addr, size,
+			prop->pmmu.start_addr, prop->pmmu.end_addr))
+		page_size = prop->pmmu.page_size;
+	else if (hl_mem_area_inside_range(virt_addr, size,
+			prop->pmmu_huge.start_addr, prop->pmmu_huge.end_addr))
+		page_size = prop->pmmu_huge.page_size;
+	else
+		return -EINVAL;
+
+	for (off = 0 ; off < size ; off += page_size) {
+		curr_va = virt_addr + off;
+		flush_pte = (off + page_size) >= size;
+		rc = hl_mmu_unmap_page(ctx, curr_va, page_size, flush_pte);
+		if (rc)
+			dev_warn_ratelimited(hdev->dev,
+				"Unmap failed for va 0x%llx\n", curr_va);
+	}
 
 	return rc;
 }
@@ -271,8 +414,14 @@ void hl_mmu_swap_out(struct hl_ctx *ctx)
 {
 	struct hl_device *hdev = ctx->hdev;
 
-	if (hdev->mmu_enable)
-		hdev->mmu_func.swap_out(ctx);
+	if (!hdev->mmu_enable)
+		return;
+
+	if (hdev->mmu_func[MMU_DR_PGT].swap_out != NULL)
+		hdev->mmu_func[MMU_DR_PGT].swap_out(ctx);
+
+	if (hdev->mmu_func[MMU_HR_PGT].swap_out != NULL)
+		hdev->mmu_func[MMU_HR_PGT].swap_out(ctx);
 }
 
 /*
@@ -285,8 +434,64 @@ void hl_mmu_swap_in(struct hl_ctx *ctx)
 {
 	struct hl_device *hdev = ctx->hdev;
 
-	if (hdev->mmu_enable)
-		hdev->mmu_func.swap_in(ctx);
+	if (!hdev->mmu_enable)
+		return;
+
+	if (hdev->mmu_func[MMU_DR_PGT].swap_in != NULL)
+		hdev->mmu_func[MMU_DR_PGT].swap_in(ctx);
+
+	if (hdev->mmu_func[MMU_HR_PGT].swap_in != NULL)
+		hdev->mmu_func[MMU_HR_PGT].swap_in(ctx);
+}
+
+int hl_mmu_va_to_pa(struct hl_ctx *ctx, u64 virt_addr, u64 *phys_addr)
+{
+	struct hl_mmu_hop_info hops;
+	u64 tmp_addr;
+	int rc;
+
+	rc = hl_mmu_get_tlb_info(ctx, virt_addr, &hops);
+	if (rc)
+		return rc;
+
+	/* last hop holds the phys address and flags */
+	tmp_addr = hops.hop_info[hops.used_hops - 1].hop_pte_val;
+	*phys_addr = (tmp_addr & HOP_PHYS_ADDR_MASK) | (virt_addr & FLAGS_MASK);
+
+	return 0;
+}
+
+int hl_mmu_get_tlb_info(struct hl_ctx *ctx, u64 virt_addr,
+			struct hl_mmu_hop_info *hops)
+{
+	struct hl_device *hdev = ctx->hdev;
+	struct asic_fixed_properties *prop = &hdev->asic_prop;
+	struct hl_mmu_properties *mmu_prop;
+	int rc;
+	bool is_dram_addr;
+
+	if (!hdev->mmu_enable)
+		return -EOPNOTSUPP;
+
+	is_dram_addr = hl_mem_area_inside_range(virt_addr, prop->dmmu.page_size,
+						prop->dmmu.start_addr,
+						prop->dmmu.end_addr);
+
+	/* host-residency is the same in PMMU and HPMMU, use one of them */
+	mmu_prop = is_dram_addr ? &prop->dmmu : &prop->pmmu;
+
+	mutex_lock(&ctx->mmu_lock);
+
+	if (mmu_prop->host_resident)
+		rc = hdev->mmu_func[MMU_HR_PGT].get_tlb_info(ctx,
+							virt_addr, hops);
+	else
+		rc = hdev->mmu_func[MMU_DR_PGT].get_tlb_info(ctx,
+							virt_addr, hops);
+
+	mutex_unlock(&ctx->mmu_lock);
+
+	return rc;
 }
 
 int hl_mmu_if_set_funcs(struct hl_device *hdev)
@@ -297,7 +502,7 @@ int hl_mmu_if_set_funcs(struct hl_device *hdev)
 	switch (hdev->asic_type) {
 	case ASIC_GOYA:
 	case ASIC_GAUDI:
-		hl_mmu_v1_set_funcs(hdev);
+		hl_mmu_v1_set_funcs(hdev, &hdev->mmu_func[MMU_DR_PGT]);
 		break;
 	default:
 		dev_err(hdev->dev, "Unrecognized ASIC type %d\n",
diff --git a/drivers/misc/habanalabs/common/mmu_v1.c b/drivers/misc/habanalabs/common/mmu_v1.c
index 8d1eb5265419d9bda13159dd6cb332b6fb9a72ca..2ce6ea89d4fa22930aef8e325e2db9ce1b193d49 100644
--- a/drivers/misc/habanalabs/common/mmu_v1.c
+++ b/drivers/misc/habanalabs/common/mmu_v1.c
@@ -8,7 +8,6 @@
 #include "habanalabs.h"
 #include "../include/hw_ip/mmu/mmu_general.h"
 
-#include <linux/genalloc.h>
 #include <linux/slab.h>
 
 static inline u64 get_phys_addr(struct hl_ctx *ctx, u64 shadow_addr);
@@ -29,7 +28,7 @@ static void _free_hop(struct hl_ctx *ctx, struct pgt_info *pgt_info)
 {
 	struct hl_device *hdev = ctx->hdev;
 
-	gen_pool_free(hdev->mmu_priv.mmu_pgt_pool, pgt_info->phys_addr,
+	gen_pool_free(hdev->mmu_priv.dr.mmu_pgt_pool, pgt_info->phys_addr,
 			hdev->asic_prop.mmu_hop_table_size);
 	hash_del(&pgt_info->node);
 	kfree((u64 *) (uintptr_t) pgt_info->shadow_addr);
@@ -54,7 +53,7 @@ static u64 alloc_hop(struct hl_ctx *ctx)
 	if (!pgt_info)
 		return ULLONG_MAX;
 
-	phys_addr = (u64) gen_pool_alloc(hdev->mmu_priv.mmu_pgt_pool,
+	phys_addr = (u64) gen_pool_alloc(hdev->mmu_priv.dr.mmu_pgt_pool,
 					prop->mmu_hop_table_size);
 	if (!phys_addr) {
 		dev_err(hdev->dev, "failed to allocate page\n");
@@ -75,7 +74,7 @@ static u64 alloc_hop(struct hl_ctx *ctx)
 	return shadow_addr;
 
 shadow_err:
-	gen_pool_free(hdev->mmu_priv.mmu_pgt_pool, phys_addr,
+	gen_pool_free(hdev->mmu_priv.dr.mmu_pgt_pool, phys_addr,
 			prop->mmu_hop_table_size);
 pool_add_err:
 	kfree(pgt_info);
@@ -91,7 +90,7 @@ static inline u64 get_phys_hop0_addr(struct hl_ctx *ctx)
 
 static inline u64 get_hop0_addr(struct hl_ctx *ctx)
 {
-	return (u64) (uintptr_t) ctx->hdev->mmu_priv.mmu_shadow_hop0 +
+	return (u64) (uintptr_t) ctx->hdev->mmu_priv.dr.mmu_shadow_hop0 +
 			(ctx->asid * ctx->hdev->asic_prop.mmu_hop_table_size);
 }
 
@@ -263,7 +262,7 @@ static int dram_default_mapping_init(struct hl_ctx *ctx)
 		hop2_pte_addr, hop3_pte_addr, pte_val;
 	int rc, i, j, hop3_allocated = 0;
 
-	if ((!hdev->dram_supports_virtual_memory) ||
+	if ((!prop->dram_supports_virtual_memory) ||
 			(!hdev->dram_default_page_mapping) ||
 			(ctx->asid == HL_KERNEL_ASID_ID))
 		return 0;
@@ -363,7 +362,7 @@ static void dram_default_mapping_fini(struct hl_ctx *ctx)
 		hop2_pte_addr, hop3_pte_addr;
 	int i, j;
 
-	if ((!hdev->dram_supports_virtual_memory) ||
+	if ((!prop->dram_supports_virtual_memory) ||
 			(!hdev->dram_default_page_mapping) ||
 			(ctx->asid == HL_KERNEL_ASID_ID))
 		return;
@@ -419,15 +418,15 @@ static int hl_mmu_v1_init(struct hl_device *hdev)
 	struct asic_fixed_properties *prop = &hdev->asic_prop;
 	int rc;
 
-	hdev->mmu_priv.mmu_pgt_pool =
+	hdev->mmu_priv.dr.mmu_pgt_pool =
 			gen_pool_create(__ffs(prop->mmu_hop_table_size), -1);
 
-	if (!hdev->mmu_priv.mmu_pgt_pool) {
+	if (!hdev->mmu_priv.dr.mmu_pgt_pool) {
 		dev_err(hdev->dev, "Failed to create page gen pool\n");
 		return -ENOMEM;
 	}
 
-	rc = gen_pool_add(hdev->mmu_priv.mmu_pgt_pool, prop->mmu_pgt_addr +
+	rc = gen_pool_add(hdev->mmu_priv.dr.mmu_pgt_pool, prop->mmu_pgt_addr +
 			prop->mmu_hop0_tables_total_size,
 			prop->mmu_pgt_size - prop->mmu_hop0_tables_total_size,
 			-1);
@@ -436,10 +435,10 @@ static int hl_mmu_v1_init(struct hl_device *hdev)
 		goto err_pool_add;
 	}
 
-	hdev->mmu_priv.mmu_shadow_hop0 = kvmalloc_array(prop->max_asid,
+	hdev->mmu_priv.dr.mmu_shadow_hop0 = kvmalloc_array(prop->max_asid,
 						prop->mmu_hop_table_size,
 						GFP_KERNEL | __GFP_ZERO);
-	if (ZERO_OR_NULL_PTR(hdev->mmu_priv.mmu_shadow_hop0)) {
+	if (ZERO_OR_NULL_PTR(hdev->mmu_priv.dr.mmu_shadow_hop0)) {
 		rc = -ENOMEM;
 		goto err_pool_add;
 	}
@@ -449,7 +448,7 @@ static int hl_mmu_v1_init(struct hl_device *hdev)
 	return 0;
 
 err_pool_add:
-	gen_pool_destroy(hdev->mmu_priv.mmu_pgt_pool);
+	gen_pool_destroy(hdev->mmu_priv.dr.mmu_pgt_pool);
 
 	return rc;
 }
@@ -468,8 +467,8 @@ static void hl_mmu_v1_fini(struct hl_device *hdev)
 {
 	/* MMU H/W fini was already done in device hw_fini() */
 
-	kvfree(hdev->mmu_priv.mmu_shadow_hop0);
-	gen_pool_destroy(hdev->mmu_priv.mmu_pgt_pool);
+	kvfree(hdev->mmu_priv.dr.mmu_shadow_hop0);
+	gen_pool_destroy(hdev->mmu_priv.dr.mmu_pgt_pool);
 }
 
 /**
@@ -482,9 +481,7 @@ static void hl_mmu_v1_fini(struct hl_device *hdev)
  */
 static int hl_mmu_v1_ctx_init(struct hl_ctx *ctx)
 {
-	mutex_init(&ctx->mmu_lock);
 	hash_init(ctx->mmu_shadow_hash);
-
 	return dram_default_mapping_init(ctx);
 }
 
@@ -517,8 +514,6 @@ static void hl_mmu_v1_ctx_fini(struct hl_ctx *ctx)
 			pgt_info->phys_addr, ctx->asid, pgt_info->num_of_ptes);
 		_free_hop(ctx, pgt_info);
 	}
-
-	mutex_destroy(&ctx->mmu_lock);
 }
 
 static int _hl_mmu_v1_unmap(struct hl_ctx *ctx,
@@ -842,15 +837,114 @@ static void hl_mmu_v1_swap_in(struct hl_ctx *ctx)
 
 }
 
+static inline u64 get_hop_pte_addr(struct hl_ctx *ctx,
+				struct hl_mmu_properties *mmu_prop,
+				int hop_num, u64 hop_addr, u64 virt_addr)
+{
+	switch (hop_num) {
+	case 0:
+		return get_hop0_pte_addr(ctx, mmu_prop, hop_addr, virt_addr);
+	case 1:
+		return get_hop1_pte_addr(ctx, mmu_prop, hop_addr, virt_addr);
+	case 2:
+		return get_hop2_pte_addr(ctx, mmu_prop, hop_addr, virt_addr);
+	case 3:
+		return get_hop3_pte_addr(ctx, mmu_prop, hop_addr, virt_addr);
+	case 4:
+		return get_hop4_pte_addr(ctx, mmu_prop, hop_addr, virt_addr);
+	default:
+		break;
+	}
+	return U64_MAX;
+}
+
+static int hl_mmu_v1_get_tlb_info(struct hl_ctx *ctx, u64 virt_addr,
+				struct hl_mmu_hop_info *hops)
+{
+	struct hl_device *hdev = ctx->hdev;
+	struct asic_fixed_properties *prop = &hdev->asic_prop;
+	struct hl_mmu_properties *mmu_prop;
+	bool is_dram_addr, is_pmmu_addr, is_pmmu_h_addr, is_huge;
+	int i, used_hops;
+
+	is_dram_addr = hl_mem_area_inside_range(virt_addr, prop->dmmu.page_size,
+						prop->dmmu.start_addr,
+						prop->dmmu.end_addr);
+	is_pmmu_addr = hl_mem_area_inside_range(virt_addr, prop->pmmu.page_size,
+						prop->pmmu.start_addr,
+						prop->pmmu.end_addr);
+	is_pmmu_h_addr = hl_mem_area_inside_range(virt_addr,
+						prop->pmmu_huge.page_size,
+						prop->pmmu_huge.start_addr,
+						prop->pmmu_huge.end_addr);
+	if (is_dram_addr) {
+		mmu_prop = &prop->dmmu;
+		is_huge = true;
+	} else if (is_pmmu_addr) {
+		mmu_prop = &prop->pmmu;
+		is_huge = false;
+	} else if (is_pmmu_h_addr) {
+		mmu_prop = &prop->pmmu_huge;
+		is_huge = true;
+	} else {
+		return -EINVAL;
+	}
+
+	used_hops = mmu_prop->num_hops;
+
+	/* huge pages use lesser hops */
+	if (is_huge)
+		used_hops--;
+
+	hops->hop_info[0].hop_addr = get_phys_hop0_addr(ctx);
+	hops->hop_info[0].hop_pte_addr =
+			get_hop_pte_addr(ctx, mmu_prop, 0,
+					hops->hop_info[0].hop_addr, virt_addr);
+	hops->hop_info[0].hop_pte_val =
+			hdev->asic_funcs->read_pte(hdev,
+						hops->hop_info[0].hop_pte_addr);
+
+	for (i = 1 ; i < used_hops ; i++) {
+		hops->hop_info[i].hop_addr =
+			get_next_hop_addr(ctx,
+					hops->hop_info[i - 1].hop_pte_val);
+		if (hops->hop_info[i].hop_addr == ULLONG_MAX)
+			return -EFAULT;
+
+		hops->hop_info[i].hop_pte_addr =
+				get_hop_pte_addr(ctx, mmu_prop, i,
+						hops->hop_info[i].hop_addr,
+						virt_addr);
+		hops->hop_info[i].hop_pte_val =
+				hdev->asic_funcs->read_pte(hdev,
+						hops->hop_info[i].hop_pte_addr);
+
+		if (!(hops->hop_info[i].hop_pte_val & PAGE_PRESENT_MASK))
+			return -EFAULT;
+
+		if (hops->hop_info[i].hop_pte_val & LAST_MASK)
+			break;
+	}
+
+	/* if passed over all hops then no last hop was found */
+	if (i == mmu_prop->num_hops)
+		return -EFAULT;
+
+	if (!(hops->hop_info[i].hop_pte_val & PAGE_PRESENT_MASK))
+		return -EFAULT;
+
+	hops->used_hops = i + 1;
+
+	return 0;
+}
+
 /*
  * hl_mmu_v1_prepare - prepare mmu  for working with mmu v1
  *
  * @hdev: pointer to the device structure
  */
-void hl_mmu_v1_set_funcs(struct hl_device *hdev)
+void hl_mmu_v1_set_funcs(struct hl_device *hdev, struct hl_mmu_funcs *mmu)
 {
-	struct hl_mmu_funcs *mmu = &hdev->mmu_func;
-
 	mmu->init = hl_mmu_v1_init;
 	mmu->fini = hl_mmu_v1_fini;
 	mmu->ctx_init = hl_mmu_v1_ctx_init;
@@ -860,4 +954,5 @@ void hl_mmu_v1_set_funcs(struct hl_device *hdev)
 	mmu->flush = flush;
 	mmu->swap_out = hl_mmu_v1_swap_out;
 	mmu->swap_in = hl_mmu_v1_swap_in;
+	mmu->get_tlb_info = hl_mmu_v1_get_tlb_info;
 }
diff --git a/drivers/misc/habanalabs/common/pci.c b/drivers/misc/habanalabs/common/pci.c
index 4327e5704ebb69f704e0e04b6fd0e94bed5fc8ce..923b2606e29fe607214aa1b0e91183b22f595531 100644
--- a/drivers/misc/habanalabs/common/pci.c
+++ b/drivers/misc/habanalabs/common/pci.c
@@ -338,17 +338,12 @@ static int hl_pci_set_dma_mask(struct hl_device *hdev)
 /**
  * hl_pci_init() - PCI initialization code.
  * @hdev: Pointer to hl_device structure.
- * @cpu_boot_status_reg: status register of the device's CPU
- * @boot_err0_reg: boot error register of the device's CPU
- * @preboot_ver_timeout: how much to wait before bailing out on reading
- *                       the preboot version
  *
  * Set DMA masks, initialize the PCI controller and map the PCI BARs.
  *
  * Return: 0 on success, non-zero for failure.
  */
-int hl_pci_init(struct hl_device *hdev, u32 cpu_boot_status_reg,
-		u32 boot_err0_reg, u32 preboot_ver_timeout)
+int hl_pci_init(struct hl_device *hdev)
 {
 	struct pci_dev *pdev = hdev->pdev;
 	int rc;
@@ -380,15 +375,6 @@ int hl_pci_init(struct hl_device *hdev, u32 cpu_boot_status_reg,
 	if (rc)
 		goto unmap_pci_bars;
 
-	/* Before continuing in the initialization, we need to read the preboot
-	 * version to determine whether we run with a security-enabled firmware
-	 * The check will be done in each ASIC's specific code
-	 */
-	rc = hl_fw_read_preboot_ver(hdev, cpu_boot_status_reg, boot_err0_reg,
-					preboot_ver_timeout);
-	if (rc)
-		goto unmap_pci_bars;
-
 	return 0;
 
 unmap_pci_bars:
diff --git a/drivers/misc/habanalabs/common/sysfs.c b/drivers/misc/habanalabs/common/sysfs.c
index 3ceae87016b1686222d8ec24fa59a02c656bb19f..4366d8f9384290437e05fe23d06044f0bda9e50c 100644
--- a/drivers/misc/habanalabs/common/sysfs.c
+++ b/drivers/misc/habanalabs/common/sysfs.c
@@ -12,7 +12,7 @@
 long hl_get_frequency(struct hl_device *hdev, u32 pll_index, bool curr)
 {
 	struct cpucp_packet pkt;
-	long result;
+	u64 result;
 	int rc;
 
 	memset(&pkt, 0, sizeof(pkt));
@@ -32,10 +32,10 @@ long hl_get_frequency(struct hl_device *hdev, u32 pll_index, bool curr)
 		dev_err(hdev->dev,
 			"Failed to get frequency of PLL %d, error %d\n",
 			pll_index, rc);
-		result = rc;
+		return rc;
 	}
 
-	return result;
+	return (long) result;
 }
 
 void hl_set_frequency(struct hl_device *hdev, u32 pll_index, u64 freq)
@@ -62,7 +62,7 @@ void hl_set_frequency(struct hl_device *hdev, u32 pll_index, u64 freq)
 u64 hl_get_max_power(struct hl_device *hdev)
 {
 	struct cpucp_packet pkt;
-	long result;
+	u64 result;
 	int rc;
 
 	memset(&pkt, 0, sizeof(pkt));
@@ -75,7 +75,7 @@ u64 hl_get_max_power(struct hl_device *hdev)
 
 	if (rc) {
 		dev_err(hdev->dev, "Failed to get max power, error %d\n", rc);
-		result = rc;
+		return (u64) rc;
 	}
 
 	return result;
@@ -276,6 +276,8 @@ static ssize_t status_show(struct device *dev, struct device_attribute *attr,
 		str = "In reset";
 	else if (hdev->disabled)
 		str = "Malfunction";
+	else if (hdev->needs_reset)
+		str = "Needs Reset";
 	else
 		str = "Operational";
 
@@ -304,7 +306,7 @@ static ssize_t max_power_show(struct device *dev, struct device_attribute *attr,
 	struct hl_device *hdev = dev_get_drvdata(dev);
 	long val;
 
-	if (hl_device_disabled_or_in_reset(hdev))
+	if (!hl_device_operational(hdev, NULL))
 		return -ENODEV;
 
 	val = hl_get_max_power(hdev);
@@ -319,7 +321,7 @@ static ssize_t max_power_store(struct device *dev,
 	unsigned long value;
 	int rc;
 
-	if (hl_device_disabled_or_in_reset(hdev)) {
+	if (!hl_device_operational(hdev, NULL)) {
 		count = -ENODEV;
 		goto out;
 	}
@@ -347,7 +349,7 @@ static ssize_t eeprom_read_handler(struct file *filp, struct kobject *kobj,
 	char *data;
 	int rc;
 
-	if (hl_device_disabled_or_in_reset(hdev))
+	if (!hl_device_operational(hdev, NULL))
 		return -ENODEV;
 
 	if (!max_size)
diff --git a/drivers/misc/habanalabs/gaudi/gaudi.c b/drivers/misc/habanalabs/gaudi/gaudi.c
index 7ea6b4368a913319783be3dfd9094a5bdd576c86..1f1926607c5e7acf9eb2902fa6aecb0e0ebd38c7 100644
--- a/drivers/misc/habanalabs/gaudi/gaudi.c
+++ b/drivers/misc/habanalabs/gaudi/gaudi.c
@@ -17,8 +17,6 @@
 #include <linux/pci.h>
 #include <linux/firmware.h>
 #include <linux/hwmon.h>
-#include <linux/genalloc.h>
-#include <linux/io-64-nonatomic-lo-hi.h>
 #include <linux/iommu.h>
 #include <linux/seq_file.h>
 
@@ -38,7 +36,7 @@
  *
  * MMU is always enabled.
  *
- * QMAN DMA channels 0,1,5 (PCI DMAN):
+ * QMAN DMA channels 0,1 (PCI DMAN):
  *     - DMA is not secured.
  *     - PQ and CQ are secured.
  *     - CP is secured: The driver needs to parse CB but WREG should be allowed
@@ -55,7 +53,7 @@
  *       idle)
  *     - MMU page tables area clear (happens on init)
  *
- * QMAN DMA 2-4,6,7, TPC, MME, NIC:
+ * QMAN DMA 2-7, TPC, MME, NIC:
  * PQ is secured and is located on the Host (HBM CON TPC3 bug)
  * CQ, CP and the engine are not secured
  *
@@ -67,7 +65,7 @@
 
 #define GAUDI_DMA_POOL_BLK_SIZE		0x100 /* 256 bytes */
 
-#define GAUDI_RESET_TIMEOUT_MSEC	1000		/* 1000ms */
+#define GAUDI_RESET_TIMEOUT_MSEC	2000		/* 2000ms */
 #define GAUDI_RESET_WAIT_MSEC		1		/* 1ms */
 #define GAUDI_CPU_RESET_WAIT_MSEC	200		/* 200ms */
 #define GAUDI_TEST_QUEUE_WAIT_USEC	100000		/* 100ms */
@@ -103,6 +101,10 @@
 		BIT(GAUDI_ENGINE_ID_MME_2) |\
 		GENMASK_ULL(GAUDI_ENGINE_ID_TPC_7, GAUDI_ENGINE_ID_TPC_0))
 
+#define HBM_SCRUBBING_TIMEOUT_US	1000000 /* 1s */
+
+#define GAUDI_PLL_MAX 10
+
 static const char gaudi_irq_name[GAUDI_MSI_ENTRIES][GAUDI_MAX_STRING_LEN] = {
 		"gaudi cq 0_0", "gaudi cq 0_1", "gaudi cq 0_2", "gaudi cq 0_3",
 		"gaudi cq 1_0", "gaudi cq 1_1", "gaudi cq 1_2", "gaudi cq 1_3",
@@ -113,12 +115,12 @@ static const char gaudi_irq_name[GAUDI_MSI_ENTRIES][GAUDI_MAX_STRING_LEN] = {
 static const u8 gaudi_dma_assignment[GAUDI_DMA_MAX] = {
 	[GAUDI_PCI_DMA_1] = GAUDI_ENGINE_ID_DMA_0,
 	[GAUDI_PCI_DMA_2] = GAUDI_ENGINE_ID_DMA_1,
-	[GAUDI_PCI_DMA_3] = GAUDI_ENGINE_ID_DMA_5,
 	[GAUDI_HBM_DMA_1] = GAUDI_ENGINE_ID_DMA_2,
 	[GAUDI_HBM_DMA_2] = GAUDI_ENGINE_ID_DMA_3,
 	[GAUDI_HBM_DMA_3] = GAUDI_ENGINE_ID_DMA_4,
-	[GAUDI_HBM_DMA_4] = GAUDI_ENGINE_ID_DMA_6,
-	[GAUDI_HBM_DMA_5] = GAUDI_ENGINE_ID_DMA_7
+	[GAUDI_HBM_DMA_4] = GAUDI_ENGINE_ID_DMA_5,
+	[GAUDI_HBM_DMA_5] = GAUDI_ENGINE_ID_DMA_6,
+	[GAUDI_HBM_DMA_6] = GAUDI_ENGINE_ID_DMA_7
 };
 
 static const u8 gaudi_cq_assignment[NUMBER_OF_CMPLT_QUEUES] = {
@@ -130,10 +132,6 @@ static const u8 gaudi_cq_assignment[NUMBER_OF_CMPLT_QUEUES] = {
 	[5] = GAUDI_QUEUE_ID_DMA_1_1,
 	[6] = GAUDI_QUEUE_ID_DMA_1_2,
 	[7] = GAUDI_QUEUE_ID_DMA_1_3,
-	[8] = GAUDI_QUEUE_ID_DMA_5_0,
-	[9] = GAUDI_QUEUE_ID_DMA_5_1,
-	[10] = GAUDI_QUEUE_ID_DMA_5_2,
-	[11] = GAUDI_QUEUE_ID_DMA_5_3
 };
 
 static const u16 gaudi_packet_sizes[MAX_PACKET_ID] = {
@@ -153,6 +151,19 @@ static const u16 gaudi_packet_sizes[MAX_PACKET_ID] = {
 	[PACKET_LOAD_AND_EXE]	= sizeof(struct packet_load_and_exe)
 };
 
+static const u32 gaudi_pll_base_addresses[GAUDI_PLL_MAX] = {
+	[CPU_PLL] = mmPSOC_CPU_PLL_NR,
+	[PCI_PLL] = mmPSOC_PCI_PLL_NR,
+	[SRAM_PLL] = mmSRAM_W_PLL_NR,
+	[HBM_PLL] = mmPSOC_HBM_PLL_NR,
+	[NIC_PLL] = mmNIC0_PLL_NR,
+	[DMA_PLL] = mmDMA_W_PLL_NR,
+	[MESH_PLL] = mmMESH_W_PLL_NR,
+	[MME_PLL] = mmPSOC_MME_PLL_NR,
+	[TPC_PLL] = mmPSOC_TPC_PLL_NR,
+	[IF_PLL] = mmIF_W_PLL_NR
+};
+
 static inline bool validate_packet_id(enum packet_id id)
 {
 	switch (id) {
@@ -249,10 +260,10 @@ static enum hl_queue_type gaudi_queue_type[GAUDI_QUEUE_ID_SIZE] = {
 	QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_DMA_4_1 */
 	QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_DMA_4_2 */
 	QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_DMA_4_3 */
-	QUEUE_TYPE_EXT, /* GAUDI_QUEUE_ID_DMA_5_0 */
-	QUEUE_TYPE_EXT, /* GAUDI_QUEUE_ID_DMA_5_1 */
-	QUEUE_TYPE_EXT, /* GAUDI_QUEUE_ID_DMA_5_2 */
-	QUEUE_TYPE_EXT, /* GAUDI_QUEUE_ID_DMA_5_3 */
+	QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_DMA_5_0 */
+	QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_DMA_5_1 */
+	QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_DMA_5_2 */
+	QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_DMA_5_3 */
 	QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_DMA_6_0 */
 	QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_DMA_6_1 */
 	QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_DMA_6_2 */
@@ -301,46 +312,46 @@ static enum hl_queue_type gaudi_queue_type[GAUDI_QUEUE_ID_SIZE] = {
 	QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_TPC_7_1 */
 	QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_TPC_7_2 */
 	QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_TPC_7_3 */
-	QUEUE_TYPE_NA,  /* GAUDI_QUEUE_ID_NIC_0_0 */
-	QUEUE_TYPE_NA,  /* GAUDI_QUEUE_ID_NIC_0_1 */
-	QUEUE_TYPE_NA,  /* GAUDI_QUEUE_ID_NIC_0_2 */
-	QUEUE_TYPE_NA,  /* GAUDI_QUEUE_ID_NIC_0_3 */
-	QUEUE_TYPE_NA,  /* GAUDI_QUEUE_ID_NIC_1_0 */
-	QUEUE_TYPE_NA,  /* GAUDI_QUEUE_ID_NIC_1_1 */
-	QUEUE_TYPE_NA,  /* GAUDI_QUEUE_ID_NIC_1_2 */
-	QUEUE_TYPE_NA,  /* GAUDI_QUEUE_ID_NIC_1_3 */
-	QUEUE_TYPE_NA,  /* GAUDI_QUEUE_ID_NIC_2_0 */
-	QUEUE_TYPE_NA,  /* GAUDI_QUEUE_ID_NIC_2_1 */
-	QUEUE_TYPE_NA,  /* GAUDI_QUEUE_ID_NIC_2_2 */
-	QUEUE_TYPE_NA,  /* GAUDI_QUEUE_ID_NIC_2_3 */
-	QUEUE_TYPE_NA,  /* GAUDI_QUEUE_ID_NIC_3_0 */
-	QUEUE_TYPE_NA,  /* GAUDI_QUEUE_ID_NIC_3_1 */
-	QUEUE_TYPE_NA,  /* GAUDI_QUEUE_ID_NIC_3_2 */
-	QUEUE_TYPE_NA,  /* GAUDI_QUEUE_ID_NIC_3_3 */
-	QUEUE_TYPE_NA,  /* GAUDI_QUEUE_ID_NIC_4_0 */
-	QUEUE_TYPE_NA,  /* GAUDI_QUEUE_ID_NIC_4_1 */
-	QUEUE_TYPE_NA,  /* GAUDI_QUEUE_ID_NIC_4_2 */
-	QUEUE_TYPE_NA,  /* GAUDI_QUEUE_ID_NIC_4_3 */
-	QUEUE_TYPE_NA,  /* GAUDI_QUEUE_ID_NIC_5_0 */
-	QUEUE_TYPE_NA,  /* GAUDI_QUEUE_ID_NIC_5_1 */
-	QUEUE_TYPE_NA,  /* GAUDI_QUEUE_ID_NIC_5_2 */
-	QUEUE_TYPE_NA,  /* GAUDI_QUEUE_ID_NIC_5_3 */
-	QUEUE_TYPE_NA,  /* GAUDI_QUEUE_ID_NIC_6_0 */
-	QUEUE_TYPE_NA,  /* GAUDI_QUEUE_ID_NIC_6_1 */
-	QUEUE_TYPE_NA,  /* GAUDI_QUEUE_ID_NIC_6_2 */
-	QUEUE_TYPE_NA,  /* GAUDI_QUEUE_ID_NIC_6_3 */
-	QUEUE_TYPE_NA,  /* GAUDI_QUEUE_ID_NIC_7_0 */
-	QUEUE_TYPE_NA,  /* GAUDI_QUEUE_ID_NIC_7_1 */
-	QUEUE_TYPE_NA,  /* GAUDI_QUEUE_ID_NIC_7_2 */
-	QUEUE_TYPE_NA,  /* GAUDI_QUEUE_ID_NIC_7_3 */
-	QUEUE_TYPE_NA,  /* GAUDI_QUEUE_ID_NIC_8_0 */
-	QUEUE_TYPE_NA,  /* GAUDI_QUEUE_ID_NIC_8_1 */
-	QUEUE_TYPE_NA,  /* GAUDI_QUEUE_ID_NIC_8_2 */
-	QUEUE_TYPE_NA,  /* GAUDI_QUEUE_ID_NIC_8_3 */
-	QUEUE_TYPE_NA,  /* GAUDI_QUEUE_ID_NIC_9_0 */
-	QUEUE_TYPE_NA,  /* GAUDI_QUEUE_ID_NIC_9_1 */
-	QUEUE_TYPE_NA,  /* GAUDI_QUEUE_ID_NIC_9_2 */
-	QUEUE_TYPE_NA,  /* GAUDI_QUEUE_ID_NIC_9_3 */
+	QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_NIC_0_0 */
+	QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_NIC_0_1 */
+	QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_NIC_0_2 */
+	QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_NIC_0_3 */
+	QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_NIC_1_0 */
+	QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_NIC_1_1 */
+	QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_NIC_1_2 */
+	QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_NIC_1_3 */
+	QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_NIC_2_0 */
+	QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_NIC_2_1 */
+	QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_NIC_2_2 */
+	QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_NIC_2_3 */
+	QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_NIC_3_0 */
+	QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_NIC_3_1 */
+	QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_NIC_3_2 */
+	QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_NIC_3_3 */
+	QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_NIC_4_0 */
+	QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_NIC_4_1 */
+	QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_NIC_4_2 */
+	QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_NIC_4_3 */
+	QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_NIC_5_0 */
+	QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_NIC_5_1 */
+	QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_NIC_5_2 */
+	QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_NIC_5_3 */
+	QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_NIC_6_0 */
+	QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_NIC_6_1 */
+	QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_NIC_6_2 */
+	QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_NIC_6_3 */
+	QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_NIC_7_0 */
+	QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_NIC_7_1 */
+	QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_NIC_7_2 */
+	QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_NIC_7_3 */
+	QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_NIC_8_0 */
+	QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_NIC_8_1 */
+	QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_NIC_8_2 */
+	QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_NIC_8_3 */
+	QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_NIC_9_0 */
+	QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_NIC_9_1 */
+	QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_NIC_9_2 */
+	QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_NIC_9_3 */
 };
 
 struct ecc_info_extract_params {
@@ -362,6 +373,31 @@ static int gaudi_mmu_clear_pgt_range(struct hl_device *hdev);
 static int gaudi_cpucp_info_get(struct hl_device *hdev);
 static void gaudi_disable_clock_gating(struct hl_device *hdev);
 static void gaudi_mmu_prepare(struct hl_device *hdev, u32 asid);
+static u32 gaudi_gen_signal_cb(struct hl_device *hdev, void *data, u16 sob_id,
+				u32 size);
+static u32 gaudi_gen_wait_cb(struct hl_device *hdev,
+				struct hl_gen_wait_properties *prop);
+
+static inline enum hl_collective_mode
+get_collective_mode(struct hl_device *hdev, u32 queue_id)
+{
+	if (gaudi_queue_type[queue_id] == QUEUE_TYPE_EXT)
+		return HL_COLLECTIVE_MASTER;
+
+	if (queue_id >= GAUDI_QUEUE_ID_DMA_5_0 &&
+			queue_id <= GAUDI_QUEUE_ID_DMA_5_3)
+		return HL_COLLECTIVE_SLAVE;
+
+	if (queue_id >= GAUDI_QUEUE_ID_TPC_7_0 &&
+			queue_id <= GAUDI_QUEUE_ID_TPC_7_3)
+		return HL_COLLECTIVE_SLAVE;
+
+	if (queue_id >= GAUDI_QUEUE_ID_NIC_0_0 &&
+			queue_id <= GAUDI_QUEUE_ID_NIC_9_3)
+		return HL_COLLECTIVE_SLAVE;
+
+	return HL_COLLECTIVE_NOT_SUPPORTED;
+}
 
 static int gaudi_get_fixed_properties(struct hl_device *hdev)
 {
@@ -381,29 +417,44 @@ static int gaudi_get_fixed_properties(struct hl_device *hdev)
 		if (gaudi_queue_type[i] == QUEUE_TYPE_EXT) {
 			prop->hw_queues_props[i].type = QUEUE_TYPE_EXT;
 			prop->hw_queues_props[i].driver_only = 0;
-			prop->hw_queues_props[i].requires_kernel_cb = 1;
 			prop->hw_queues_props[i].supports_sync_stream = 1;
+			prop->hw_queues_props[i].cb_alloc_flags =
+				CB_ALLOC_KERNEL;
 			num_sync_stream_queues++;
 		} else if (gaudi_queue_type[i] == QUEUE_TYPE_CPU) {
 			prop->hw_queues_props[i].type = QUEUE_TYPE_CPU;
 			prop->hw_queues_props[i].driver_only = 1;
-			prop->hw_queues_props[i].requires_kernel_cb = 0;
 			prop->hw_queues_props[i].supports_sync_stream = 0;
+			prop->hw_queues_props[i].cb_alloc_flags =
+				CB_ALLOC_KERNEL;
 		} else if (gaudi_queue_type[i] == QUEUE_TYPE_INT) {
 			prop->hw_queues_props[i].type = QUEUE_TYPE_INT;
 			prop->hw_queues_props[i].driver_only = 0;
-			prop->hw_queues_props[i].requires_kernel_cb = 0;
-		} else if (gaudi_queue_type[i] == QUEUE_TYPE_NA) {
-			prop->hw_queues_props[i].type = QUEUE_TYPE_NA;
-			prop->hw_queues_props[i].driver_only = 0;
-			prop->hw_queues_props[i].requires_kernel_cb = 0;
 			prop->hw_queues_props[i].supports_sync_stream = 0;
+			prop->hw_queues_props[i].cb_alloc_flags =
+				CB_ALLOC_USER;
+
 		}
+		prop->hw_queues_props[i].collective_mode =
+						get_collective_mode(hdev, i);
 	}
 
 	prop->completion_queues_count = NUMBER_OF_CMPLT_QUEUES;
-	prop->sync_stream_first_sob = 0;
-	prop->sync_stream_first_mon = 0;
+	prop->collective_first_sob = 0;
+	prop->collective_first_mon = 0;
+
+	/* 2 SOBs per internal queue stream are reserved for collective */
+	prop->sync_stream_first_sob =
+			ALIGN(NUMBER_OF_SOBS_IN_GRP, HL_MAX_SOBS_PER_MONITOR)
+			* QMAN_STREAMS * HL_RSVD_SOBS;
+
+	/* 1 monitor per internal queue stream are reserved for collective
+	 * 2 monitors per external queue stream are reserved for collective
+	 */
+	prop->sync_stream_first_mon =
+			(NUMBER_OF_COLLECTIVE_QUEUES * QMAN_STREAMS) +
+			(NUMBER_OF_EXT_HW_QUEUES * 2);
+
 	prop->dram_base_address = DRAM_PHYS_BASE;
 	prop->dram_size = GAUDI_HBM_SIZE_32GB;
 	prop->dram_end_address = prop->dram_base_address +
@@ -426,6 +477,7 @@ static int gaudi_get_fixed_properties(struct hl_device *hdev)
 	prop->mmu_hop_table_size = HOP_TABLE_SIZE;
 	prop->mmu_hop0_tables_total_size = HOP0_TABLES_TOTAL_SIZE;
 	prop->dram_page_size = PAGE_SIZE_2MB;
+	prop->dram_supports_virtual_memory = false;
 
 	prop->pmmu.hop0_shift = HOP0_SHIFT;
 	prop->pmmu.hop1_shift = HOP1_SHIFT;
@@ -472,9 +524,16 @@ static int gaudi_get_fixed_properties(struct hl_device *hdev)
 	prop->max_pending_cs = GAUDI_MAX_PENDING_CS;
 
 	prop->first_available_user_sob[HL_GAUDI_WS_DCORE] =
-			num_sync_stream_queues * HL_RSVD_SOBS;
+			prop->sync_stream_first_sob +
+			(num_sync_stream_queues * HL_RSVD_SOBS);
 	prop->first_available_user_mon[HL_GAUDI_WS_DCORE] =
-			num_sync_stream_queues * HL_RSVD_MONS;
+			prop->sync_stream_first_mon +
+			(num_sync_stream_queues * HL_RSVD_MONS);
+
+	/* disable fw security for now, set it in a later stage */
+	prop->fw_security_disabled = true;
+	prop->fw_security_status_valid = false;
+	prop->hard_reset_done_by_fw = false;
 
 	return 0;
 }
@@ -562,6 +621,11 @@ static int gaudi_init_iatu(struct hl_device *hdev)
 	return rc;
 }
 
+static enum hl_device_hw_state gaudi_get_hw_state(struct hl_device *hdev)
+{
+	return RREG32(mmHW_STATE);
+}
+
 static int gaudi_early_init(struct hl_device *hdev)
 {
 	struct asic_fixed_properties *prop = &hdev->asic_prop;
@@ -599,17 +663,32 @@ static int gaudi_early_init(struct hl_device *hdev)
 
 	prop->dram_pci_bar_size = pci_resource_len(pdev, HBM_BAR_ID);
 
-	rc = hl_pci_init(hdev, mmPSOC_GLOBAL_CONF_CPU_BOOT_STATUS,
-			mmCPU_BOOT_ERR0, GAUDI_BOOT_FIT_REQ_TIMEOUT_USEC);
+	rc = hl_pci_init(hdev);
 	if (rc)
 		goto free_queue_props;
 
-	/* GAUDI Firmware does not yet support security */
-	prop->fw_security_disabled = true;
-	dev_info(hdev->dev, "firmware-level security is disabled\n");
+	if (gaudi_get_hw_state(hdev) == HL_DEVICE_HW_STATE_DIRTY) {
+		dev_info(hdev->dev,
+			"H/W state is dirty, must reset before initializing\n");
+		hdev->asic_funcs->hw_fini(hdev, true);
+	}
+
+	/* Before continuing in the initialization, we need to read the preboot
+	 * version to determine whether we run with a security-enabled firmware
+	 */
+	rc = hl_fw_read_preboot_status(hdev, mmPSOC_GLOBAL_CONF_CPU_BOOT_STATUS,
+			mmCPU_BOOT_DEV_STS0, mmCPU_BOOT_ERR0,
+			GAUDI_BOOT_FIT_REQ_TIMEOUT_USEC);
+	if (rc) {
+		if (hdev->reset_on_preboot_fail)
+			hdev->asic_funcs->hw_fini(hdev, true);
+		goto pci_fini;
+	}
 
 	return 0;
 
+pci_fini:
+	hl_pci_fini(hdev);
 free_queue_props:
 	kfree(hdev->asic_prop.hw_queues_props);
 	return rc;
@@ -624,44 +703,95 @@ static int gaudi_early_fini(struct hl_device *hdev)
 }
 
 /**
- * gaudi_fetch_psoc_frequency - Fetch PSOC frequency values
+ * gaudi_fetch_pll_frequency - Fetch PLL frequency values
  *
  * @hdev: pointer to hl_device structure
+ * @pll_index: index of the pll to fetch frequency from
+ * @pll_freq: pointer to store the pll frequency in MHz in each of the available
+ *            outputs. if a certain output is not available a 0 will be set
  *
  */
-static void gaudi_fetch_psoc_frequency(struct hl_device *hdev)
+static int gaudi_fetch_pll_frequency(struct hl_device *hdev,
+				enum gaudi_pll_index pll_index,
+				u16 *pll_freq_arr)
 {
-	struct asic_fixed_properties *prop = &hdev->asic_prop;
-	u32 trace_freq = 0;
-	u32 pll_clk = 0;
-	u32 div_fctr = RREG32(mmPSOC_CPU_PLL_DIV_FACTOR_2);
-	u32 div_sel = RREG32(mmPSOC_CPU_PLL_DIV_SEL_2);
-	u32 nr = RREG32(mmPSOC_CPU_PLL_NR);
-	u32 nf = RREG32(mmPSOC_CPU_PLL_NF);
-	u32 od = RREG32(mmPSOC_CPU_PLL_OD);
-
-	if (div_sel == DIV_SEL_REF_CLK || div_sel == DIV_SEL_DIVIDED_REF) {
-		if (div_sel == DIV_SEL_REF_CLK)
-			trace_freq = PLL_REF_CLK;
-		else
-			trace_freq = PLL_REF_CLK / (div_fctr + 1);
-	} else if (div_sel == DIV_SEL_PLL_CLK ||
+	u32 nr = 0, nf = 0, od = 0, pll_clk = 0, div_fctr, div_sel,
+			pll_base_addr = gaudi_pll_base_addresses[pll_index];
+	u16 freq = 0;
+	int i, rc;
+
+	if (hdev->asic_prop.fw_security_status_valid &&
+			(hdev->asic_prop.fw_app_security_map &
+					CPU_BOOT_DEV_STS0_PLL_INFO_EN)) {
+		rc = hl_fw_cpucp_pll_info_get(hdev, pll_index, pll_freq_arr);
+
+		if (rc)
+			return rc;
+	} else if (hdev->asic_prop.fw_security_disabled) {
+		/* Backward compatibility */
+		nr = RREG32(pll_base_addr + PLL_NR_OFFSET);
+		nf = RREG32(pll_base_addr + PLL_NF_OFFSET);
+		od = RREG32(pll_base_addr + PLL_OD_OFFSET);
+
+		for (i = 0; i < HL_PLL_NUM_OUTPUTS; i++) {
+			div_fctr = RREG32(pll_base_addr +
+					PLL_DIV_FACTOR_0_OFFSET + i * 4);
+			div_sel = RREG32(pll_base_addr +
+					PLL_DIV_SEL_0_OFFSET + i * 4);
+
+			if (div_sel == DIV_SEL_REF_CLK ||
+				div_sel == DIV_SEL_DIVIDED_REF) {
+				if (div_sel == DIV_SEL_REF_CLK)
+					freq = PLL_REF_CLK;
+				else
+					freq = PLL_REF_CLK / (div_fctr + 1);
+			} else if (div_sel == DIV_SEL_PLL_CLK ||
 					div_sel == DIV_SEL_DIVIDED_PLL) {
-		pll_clk = PLL_REF_CLK * (nf + 1) / ((nr + 1) * (od + 1));
-		if (div_sel == DIV_SEL_PLL_CLK)
-			trace_freq = pll_clk;
-		else
-			trace_freq = pll_clk / (div_fctr + 1);
+				pll_clk = PLL_REF_CLK * (nf + 1) /
+						((nr + 1) * (od + 1));
+				if (div_sel == DIV_SEL_PLL_CLK)
+					freq = pll_clk;
+				else
+					freq = pll_clk / (div_fctr + 1);
+			} else {
+				dev_warn(hdev->dev,
+					"Received invalid div select value: %d",
+					div_sel);
+			}
+
+			pll_freq_arr[i] = freq;
+		}
 	} else {
-		dev_warn(hdev->dev,
-			"Received invalid div select value: %d", div_sel);
+		dev_err(hdev->dev, "Failed to fetch PLL frequency values\n");
+		return -EIO;
 	}
 
-	prop->psoc_timestamp_frequency = trace_freq;
-	prop->psoc_pci_pll_nr = nr;
-	prop->psoc_pci_pll_nf = nf;
-	prop->psoc_pci_pll_od = od;
-	prop->psoc_pci_pll_div_factor = div_fctr;
+	return 0;
+}
+
+/**
+ * gaudi_fetch_psoc_frequency - Fetch PSOC frequency values
+ *
+ * @hdev: pointer to hl_device structure
+ *
+ */
+static int gaudi_fetch_psoc_frequency(struct hl_device *hdev)
+{
+	struct asic_fixed_properties *prop = &hdev->asic_prop;
+	u16 pll_freq[HL_PLL_NUM_OUTPUTS];
+	int rc;
+
+	rc = gaudi_fetch_pll_frequency(hdev, CPU_PLL, pll_freq);
+	if (rc)
+		return rc;
+
+	prop->psoc_timestamp_frequency = pll_freq[2];
+	prop->psoc_pci_pll_nr = 0;
+	prop->psoc_pci_pll_nf = 0;
+	prop->psoc_pci_pll_od = 0;
+	prop->psoc_pci_pll_div_factor = 0;
+
+	return 0;
 }
 
 static int _gaudi_init_tpc_mem(struct hl_device *hdev,
@@ -708,7 +838,7 @@ static int _gaudi_init_tpc_mem(struct hl_device *hdev,
 
 	job->id = 0;
 	job->user_cb = cb;
-	job->user_cb->cs_cnt++;
+	atomic_inc(&job->user_cb->cs_cnt);
 	job->user_cb_size = cb_size;
 	job->hw_queue_id = GAUDI_QUEUE_ID_DMA_0_0;
 	job->patched_cb = job->user_cb;
@@ -731,7 +861,7 @@ static int _gaudi_init_tpc_mem(struct hl_device *hdev,
 	hl_userptr_delete_list(hdev, &job->userptr_list);
 	hl_debugfs_remove_job(hdev, job);
 	kfree(job);
-	cb->cs_cnt--;
+	atomic_dec(&cb->cs_cnt);
 
 release_cb:
 	hl_cb_put(cb);
@@ -786,6 +916,451 @@ static int gaudi_init_tpc_mem(struct hl_device *hdev)
 	return rc;
 }
 
+static void gaudi_collective_map_sobs(struct hl_device *hdev, u32 stream)
+{
+	struct gaudi_device *gaudi = hdev->asic_specific;
+	struct gaudi_collective_properties *prop = &gaudi->collective_props;
+	struct hl_hw_queue *q;
+	u32 i, sob_id, sob_group_id, queue_id;
+
+	/* Iterate through SOB groups and assign a SOB for each slave queue */
+	sob_group_id =
+		stream * HL_RSVD_SOBS + prop->curr_sob_group_idx[stream];
+	sob_id = prop->hw_sob_group[sob_group_id].base_sob_id;
+
+	queue_id = GAUDI_QUEUE_ID_NIC_0_0 + stream;
+	for (i = 0 ; i < NIC_NUMBER_OF_ENGINES ; i++) {
+		q = &hdev->kernel_queues[queue_id + (4 * i)];
+		q->sync_stream_prop.collective_sob_id = sob_id + i;
+	}
+
+	/* Both DMA5 and TPC7 use the same resources since only a single
+	 * engine need to participate in the reduction process
+	 */
+	queue_id = GAUDI_QUEUE_ID_DMA_5_0 + stream;
+	q = &hdev->kernel_queues[queue_id];
+	q->sync_stream_prop.collective_sob_id =
+			sob_id + NIC_NUMBER_OF_ENGINES;
+
+	queue_id = GAUDI_QUEUE_ID_TPC_7_0 + stream;
+	q = &hdev->kernel_queues[queue_id];
+	q->sync_stream_prop.collective_sob_id =
+			sob_id + NIC_NUMBER_OF_ENGINES;
+}
+
+static void gaudi_sob_group_hw_reset(struct kref *ref)
+{
+	struct gaudi_hw_sob_group *hw_sob_group =
+		container_of(ref, struct gaudi_hw_sob_group, kref);
+	struct hl_device *hdev = hw_sob_group->hdev;
+	int i;
+
+	for (i = 0 ; i < NUMBER_OF_SOBS_IN_GRP ; i++)
+		WREG32(mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_SOB_OBJ_0 +
+				(hw_sob_group->base_sob_id + i) * 4, 0);
+
+	kref_init(&hw_sob_group->kref);
+}
+
+static void gaudi_sob_group_reset_error(struct kref *ref)
+{
+	struct gaudi_hw_sob_group *hw_sob_group =
+		container_of(ref, struct gaudi_hw_sob_group, kref);
+	struct hl_device *hdev = hw_sob_group->hdev;
+
+	dev_crit(hdev->dev,
+		"SOB release shouldn't be called here, base_sob_id: %d\n",
+		hw_sob_group->base_sob_id);
+}
+
+static int gaudi_collective_init(struct hl_device *hdev)
+{
+	u32 i, master_monitor_sobs, sob_id, reserved_sobs_per_group;
+	struct gaudi_collective_properties *prop;
+	struct gaudi_device *gaudi;
+
+	gaudi = hdev->asic_specific;
+	prop = &gaudi->collective_props;
+	sob_id = hdev->asic_prop.collective_first_sob;
+
+	/* First sob in group must be aligned to HL_MAX_SOBS_PER_MONITOR */
+	reserved_sobs_per_group =
+		ALIGN(NUMBER_OF_SOBS_IN_GRP, HL_MAX_SOBS_PER_MONITOR);
+
+	/* Init SOB groups */
+	for (i = 0 ; i < NUM_SOB_GROUPS; i++) {
+		prop->hw_sob_group[i].hdev = hdev;
+		prop->hw_sob_group[i].base_sob_id = sob_id;
+		sob_id += reserved_sobs_per_group;
+		gaudi_sob_group_hw_reset(&prop->hw_sob_group[i].kref);
+	}
+
+	for (i = 0 ; i < QMAN_STREAMS; i++) {
+		prop->next_sob_group_val[i] = 1;
+		prop->curr_sob_group_idx[i] = 0;
+		gaudi_collective_map_sobs(hdev, i);
+	}
+
+	prop->mstr_sob_mask[0] = 0;
+	master_monitor_sobs = HL_MAX_SOBS_PER_MONITOR;
+	for (i = 0 ; i < master_monitor_sobs ; i++)
+		if (gaudi->hw_cap_initialized & BIT(HW_CAP_NIC_SHIFT + i))
+			prop->mstr_sob_mask[0] |= BIT(i);
+
+	prop->mstr_sob_mask[1] = 0;
+	master_monitor_sobs =
+		NIC_NUMBER_OF_ENGINES - HL_MAX_SOBS_PER_MONITOR;
+	for (i = 0 ; i < master_monitor_sobs; i++) {
+		if (gaudi->hw_cap_initialized & BIT(HW_CAP_NIC_SHIFT + i))
+			prop->mstr_sob_mask[1] |= BIT(i);
+	}
+
+	/* Set collective engine bit */
+	prop->mstr_sob_mask[1] |= BIT(i);
+
+	return 0;
+}
+
+static void gaudi_reset_sob_group(struct hl_device *hdev, u16 sob_group)
+{
+	struct gaudi_device *gaudi = hdev->asic_specific;
+	struct gaudi_collective_properties *cprop = &gaudi->collective_props;
+
+	kref_put(&cprop->hw_sob_group[sob_group].kref,
+					gaudi_sob_group_hw_reset);
+}
+
+static void gaudi_collective_master_init_job(struct hl_device *hdev,
+		struct hl_cs_job *job, u32 stream, u32 sob_group_offset)
+{
+	u32 master_sob_base, master_monitor, queue_id, cb_size = 0;
+	struct gaudi_collective_properties *cprop;
+	struct hl_gen_wait_properties wait_prop;
+	struct hl_sync_stream_properties *prop;
+	struct gaudi_device *gaudi;
+
+	gaudi = hdev->asic_specific;
+	cprop = &gaudi->collective_props;
+	queue_id = job->hw_queue_id;
+	prop = &hdev->kernel_queues[queue_id].sync_stream_prop;
+
+	master_sob_base =
+		cprop->hw_sob_group[sob_group_offset].base_sob_id;
+	master_monitor = prop->collective_mstr_mon_id[0];
+
+	dev_dbg(hdev->dev,
+		"Generate master wait CBs, sob %d (mask %#x), val:0x%x, mon %u, q %d\n",
+		master_sob_base, cprop->mstr_sob_mask[0],
+		cprop->next_sob_group_val[stream],
+		master_monitor, queue_id);
+
+	wait_prop.data = (void *) job->patched_cb;
+	wait_prop.sob_base = master_sob_base;
+	wait_prop.sob_mask = cprop->mstr_sob_mask[0];
+	wait_prop.sob_val = cprop->next_sob_group_val[stream];
+	wait_prop.mon_id = master_monitor;
+	wait_prop.q_idx = queue_id;
+	wait_prop.size = cb_size;
+	cb_size += gaudi_gen_wait_cb(hdev, &wait_prop);
+
+	master_sob_base += HL_MAX_SOBS_PER_MONITOR;
+	master_monitor = prop->collective_mstr_mon_id[1];
+
+	dev_dbg(hdev->dev,
+		"Generate master wait CBs, sob %d (mask %#x), val:0x%x, mon %u, q %d\n",
+		master_sob_base, cprop->mstr_sob_mask[1],
+		cprop->next_sob_group_val[stream],
+		master_monitor, queue_id);
+
+	wait_prop.sob_base = master_sob_base;
+	wait_prop.sob_mask = cprop->mstr_sob_mask[1];
+	wait_prop.mon_id = master_monitor;
+	wait_prop.size = cb_size;
+	cb_size += gaudi_gen_wait_cb(hdev, &wait_prop);
+}
+
+static void gaudi_collective_slave_init_job(struct hl_device *hdev,
+		struct hl_cs_job *job, struct hl_cs_compl *cs_cmpl)
+{
+	struct hl_gen_wait_properties wait_prop;
+	struct hl_sync_stream_properties *prop;
+	u32 queue_id, cb_size = 0;
+
+	queue_id = job->hw_queue_id;
+	prop = &hdev->kernel_queues[queue_id].sync_stream_prop;
+
+	/* Add to wait CBs using slave monitor */
+	wait_prop.data = (void *) job->user_cb;
+	wait_prop.sob_base = cs_cmpl->hw_sob->sob_id;
+	wait_prop.sob_mask = 0x1;
+	wait_prop.sob_val = cs_cmpl->sob_val;
+	wait_prop.mon_id = prop->collective_slave_mon_id;
+	wait_prop.q_idx = queue_id;
+	wait_prop.size = cb_size;
+
+	dev_dbg(hdev->dev,
+		"Generate slave wait CB, sob %d, val:0x%x, mon %d, q %d\n",
+		cs_cmpl->hw_sob->sob_id, cs_cmpl->sob_val,
+		prop->collective_slave_mon_id, queue_id);
+
+	cb_size += gaudi_gen_wait_cb(hdev, &wait_prop);
+
+	dev_dbg(hdev->dev,
+		"generate signal CB, sob_id: %d, sob val: 1, q_idx: %d\n",
+		prop->collective_sob_id, queue_id);
+
+	cb_size += gaudi_gen_signal_cb(hdev, job->user_cb,
+			prop->collective_sob_id, cb_size);
+}
+
+static void gaudi_collective_wait_init_cs(struct hl_cs *cs)
+{
+	struct hl_cs_compl *signal_cs_cmpl =
+		container_of(cs->signal_fence, struct hl_cs_compl, base_fence);
+	struct hl_cs_compl *cs_cmpl =
+		container_of(cs->fence, struct hl_cs_compl, base_fence);
+	struct gaudi_collective_properties *cprop;
+	u32 stream, queue_id, sob_group_offset;
+	struct gaudi_device *gaudi;
+	struct hl_device *hdev;
+	struct hl_cs_job *job;
+	struct hl_ctx *ctx;
+
+	ctx = cs->ctx;
+	hdev = ctx->hdev;
+	gaudi = hdev->asic_specific;
+	cprop = &gaudi->collective_props;
+
+	/* copy the SOB id and value of the signal CS */
+	cs_cmpl->hw_sob = signal_cs_cmpl->hw_sob;
+	cs_cmpl->sob_val = signal_cs_cmpl->sob_val;
+
+	/* Calculate the stream from collective master queue (1st job) */
+	job = list_first_entry(&cs->job_list, struct hl_cs_job, cs_node);
+	stream = job->hw_queue_id % 4;
+	sob_group_offset =
+		stream * HL_RSVD_SOBS + cprop->curr_sob_group_idx[stream];
+
+	list_for_each_entry(job, &cs->job_list, cs_node) {
+		queue_id = job->hw_queue_id;
+
+		if (hdev->kernel_queues[queue_id].collective_mode ==
+				HL_COLLECTIVE_MASTER)
+			gaudi_collective_master_init_job(hdev, job, stream,
+						sob_group_offset);
+		else
+			gaudi_collective_slave_init_job(hdev, job, cs_cmpl);
+	}
+
+	cs_cmpl->sob_group = sob_group_offset;
+
+	/* Handle sob group kref and wraparound */
+	kref_get(&cprop->hw_sob_group[sob_group_offset].kref);
+	cprop->next_sob_group_val[stream]++;
+
+	if (cprop->next_sob_group_val[stream] == HL_MAX_SOB_VAL) {
+		/*
+		 * Decrement as we reached the max value.
+		 * The release function won't be called here as we've
+		 * just incremented the refcount.
+		 */
+		kref_put(&cprop->hw_sob_group[sob_group_offset].kref,
+				gaudi_sob_group_reset_error);
+		cprop->next_sob_group_val[stream] = 1;
+		/* only two SOBs are currently in use */
+		cprop->curr_sob_group_idx[stream] =
+			(cprop->curr_sob_group_idx[stream] + 1) &
+							(HL_RSVD_SOBS - 1);
+
+		gaudi_collective_map_sobs(hdev, stream);
+
+		dev_dbg(hdev->dev, "switched to SOB group %d, stream: %d\n",
+				cprop->curr_sob_group_idx[stream], stream);
+	}
+
+	/* Increment kref since all slave queues are now waiting on it */
+	kref_get(&cs_cmpl->hw_sob->kref);
+	/*
+	 * Must put the signal fence after the SOB refcnt increment so
+	 * the SOB refcnt won't turn 0 and reset the SOB before the
+	 * wait CS was submitted.
+	 */
+	mb();
+	hl_fence_put(cs->signal_fence);
+	cs->signal_fence = NULL;
+}
+
+static int gaudi_collective_wait_create_job(struct hl_device *hdev,
+		struct hl_ctx *ctx, struct hl_cs *cs,
+		enum hl_collective_mode mode, u32 queue_id, u32 wait_queue_id)
+{
+	struct hw_queue_properties *hw_queue_prop;
+	struct hl_cs_counters_atomic *cntr;
+	struct hl_cs_job *job;
+	struct hl_cb *cb;
+	u32 cb_size;
+	bool patched_cb;
+
+	cntr = &hdev->aggregated_cs_counters;
+
+	if (mode == HL_COLLECTIVE_MASTER) {
+		/* CB size of collective master queue contains
+		 * 4 msg short packets for monitor 1 configuration
+		 * 1 fence packet
+		 * 4 msg short packets for monitor 2 configuration
+		 * 1 fence packet
+		 * 2 msg prot packets for completion and MSI-X
+		 */
+		cb_size = sizeof(struct packet_msg_short) * 8 +
+				sizeof(struct packet_fence) * 2 +
+				sizeof(struct packet_msg_prot) * 2;
+		patched_cb = true;
+	} else {
+		/* CB size of collective slave queues contains
+		 * 4 msg short packets for monitor configuration
+		 * 1 fence packet
+		 * 1 additional msg short packet for sob signal
+		 */
+		cb_size = sizeof(struct packet_msg_short) * 5 +
+				sizeof(struct packet_fence);
+		patched_cb = false;
+	}
+
+	hw_queue_prop = &hdev->asic_prop.hw_queues_props[queue_id];
+	job = hl_cs_allocate_job(hdev, hw_queue_prop->type, true);
+	if (!job) {
+		atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt);
+		atomic64_inc(&cntr->out_of_mem_drop_cnt);
+		dev_err(hdev->dev, "Failed to allocate a new job\n");
+		return -ENOMEM;
+	}
+
+	/* Allocate internal mapped CB for non patched CBs */
+	cb = hl_cb_kernel_create(hdev, cb_size,
+			hdev->mmu_enable && !patched_cb);
+	if (!cb) {
+		atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt);
+		atomic64_inc(&cntr->out_of_mem_drop_cnt);
+		kfree(job);
+		return -EFAULT;
+	}
+
+	job->id = 0;
+	job->cs = cs;
+	job->user_cb = cb;
+	atomic_inc(&job->user_cb->cs_cnt);
+	job->user_cb_size = cb_size;
+	job->hw_queue_id = queue_id;
+
+	/*
+	 * No need in parsing, user CB is the patched CB.
+	 * We call hl_cb_destroy() out of two reasons - we don't need
+	 * the CB in the CB idr anymore and to decrement its refcount as
+	 * it was incremented inside hl_cb_kernel_create().
+	 */
+	if (patched_cb)
+		job->patched_cb = job->user_cb;
+	else
+		job->patched_cb = NULL;
+
+	job->job_cb_size = job->user_cb_size;
+	hl_cb_destroy(hdev, &hdev->kernel_cb_mgr, cb->id << PAGE_SHIFT);
+
+	/* increment refcount as for external queues we get completion */
+	if (hw_queue_prop->type == QUEUE_TYPE_EXT)
+		cs_get(cs);
+
+	cs->jobs_in_queue_cnt[job->hw_queue_id]++;
+
+	list_add_tail(&job->cs_node, &cs->job_list);
+
+	hl_debugfs_add_job(hdev, job);
+
+	return 0;
+}
+
+static int gaudi_collective_wait_create_jobs(struct hl_device *hdev,
+		struct hl_ctx *ctx, struct hl_cs *cs, u32 wait_queue_id,
+		u32 collective_engine_id)
+{
+	struct gaudi_device *gaudi = hdev->asic_specific;
+	struct hw_queue_properties *hw_queue_prop;
+	u32 queue_id, collective_queue, num_jobs;
+	u32 stream, nic_queue, nic_idx = 0;
+	bool skip;
+	int i, rc;
+
+	/* Verify wait queue id is configured as master */
+	hw_queue_prop = &hdev->asic_prop.hw_queues_props[wait_queue_id];
+	if (!(hw_queue_prop->collective_mode == HL_COLLECTIVE_MASTER)) {
+		dev_err(hdev->dev,
+			"Queue %d is not configured as collective master\n",
+			wait_queue_id);
+		return -EINVAL;
+	}
+
+	/* Verify engine id is supported */
+	if (collective_engine_id != GAUDI_ENGINE_ID_DMA_5 &&
+			collective_engine_id != GAUDI_ENGINE_ID_TPC_7) {
+		dev_err(hdev->dev,
+			"Collective wait does not support engine %u\n",
+			collective_engine_id);
+		return -EINVAL;
+	}
+
+	stream = wait_queue_id % 4;
+
+	if (collective_engine_id == GAUDI_ENGINE_ID_DMA_5)
+		collective_queue = GAUDI_QUEUE_ID_DMA_5_0 + stream;
+	else
+		collective_queue = GAUDI_QUEUE_ID_TPC_7_0 + stream;
+
+	num_jobs = NUMBER_OF_SOBS_IN_GRP + 1;
+	nic_queue = GAUDI_QUEUE_ID_NIC_0_0 + stream;
+
+	/* First job goes to the collective master queue, it will wait for
+	 * the collective slave queues to finish execution.
+	 * The synchronization is done using two monitors:
+	 * First monitor for NICs 0-7, second monitor for NICs 8-9 and the
+	 * reduction engine (DMA5/TPC7).
+	 *
+	 * Rest of the jobs goes to the collective slave queues which will
+	 * all wait for the user to signal sob 'cs_cmpl->sob_val'.
+	 */
+	for (i = 0 ; i < num_jobs ; i++) {
+		if (i == 0) {
+			queue_id = wait_queue_id;
+			rc = gaudi_collective_wait_create_job(hdev, ctx, cs,
+				HL_COLLECTIVE_MASTER, queue_id, wait_queue_id);
+		} else {
+			if (nic_idx < NIC_NUMBER_OF_ENGINES) {
+				if (gaudi->hw_cap_initialized &
+					BIT(HW_CAP_NIC_SHIFT + nic_idx))
+					skip = false;
+				else
+					skip = true;
+
+				queue_id = nic_queue;
+				nic_queue += 4;
+				nic_idx++;
+
+				if (skip)
+					continue;
+			} else {
+				queue_id = collective_queue;
+			}
+
+			rc = gaudi_collective_wait_create_job(hdev, ctx, cs,
+				HL_COLLECTIVE_SLAVE, queue_id, wait_queue_id);
+		}
+
+		if (rc)
+			return rc;
+	}
+
+	return rc;
+}
+
 static int gaudi_late_init(struct hl_device *hdev)
 {
 	struct gaudi_device *gaudi = hdev->asic_specific;
@@ -797,6 +1372,27 @@ static int gaudi_late_init(struct hl_device *hdev)
 		return rc;
 	}
 
+	if ((hdev->card_type == cpucp_card_type_pci) &&
+			(hdev->nic_ports_mask & 0x3)) {
+		dev_info(hdev->dev,
+			"PCI card detected, only 8 ports are enabled\n");
+		hdev->nic_ports_mask &= ~0x3;
+
+		/* Stop and disable unused NIC QMANs */
+		WREG32(mmNIC0_QM0_GLBL_CFG1, NIC0_QM0_GLBL_CFG1_PQF_STOP_MASK |
+					NIC0_QM0_GLBL_CFG1_CQF_STOP_MASK |
+					NIC0_QM0_GLBL_CFG1_CP_STOP_MASK);
+
+		WREG32(mmNIC0_QM1_GLBL_CFG1, NIC0_QM0_GLBL_CFG1_PQF_STOP_MASK |
+					NIC0_QM0_GLBL_CFG1_CQF_STOP_MASK |
+					NIC0_QM0_GLBL_CFG1_CP_STOP_MASK);
+
+		WREG32(mmNIC0_QM0_GLBL_CFG0, 0);
+		WREG32(mmNIC0_QM1_GLBL_CFG0, 0);
+
+		gaudi->hw_cap_initialized &= ~(HW_CAP_NIC0 | HW_CAP_NIC1);
+	}
+
 	rc = hl_fw_send_pci_access_msg(hdev, CPUCP_PACKET_ENABLE_PCI_ACCESS);
 	if (rc) {
 		dev_err(hdev->dev, "Failed to enable PCI access from CPU\n");
@@ -805,7 +1401,11 @@ static int gaudi_late_init(struct hl_device *hdev)
 
 	WREG32(mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR, GAUDI_EVENT_INTS_REGISTER);
 
-	gaudi_fetch_psoc_frequency(hdev);
+	rc = gaudi_fetch_psoc_frequency(hdev);
+	if (rc) {
+		dev_err(hdev->dev, "Failed to fetch psoc frequency\n");
+		goto disable_pci_access;
+	}
 
 	rc = gaudi_mmu_clear_pgt_range(hdev);
 	if (rc) {
@@ -819,6 +1419,12 @@ static int gaudi_late_init(struct hl_device *hdev)
 		goto disable_pci_access;
 	}
 
+	rc = gaudi_collective_init(hdev);
+	if (rc) {
+		dev_err(hdev->dev, "Failed to init collective\n");
+		goto disable_pci_access;
+	}
+
 	return 0;
 
 disable_pci_access:
@@ -892,7 +1498,8 @@ static int gaudi_alloc_cpu_accessible_dma_mem(struct hl_device *hdev)
 	hdev->cpu_pci_msb_addr =
 		GAUDI_CPU_PCI_MSB_ADDR(hdev->cpu_accessible_dma_address);
 
-	GAUDI_PCI_TO_CPU_ADDR(hdev->cpu_accessible_dma_address);
+	if (hdev->asic_prop.fw_security_disabled)
+		GAUDI_PCI_TO_CPU_ADDR(hdev->cpu_accessible_dma_address);
 
 free_dma_mem_arr:
 	for (j = 0 ; j < i ; j++)
@@ -933,8 +1540,7 @@ static int gaudi_alloc_internal_qmans_pq_mem(struct hl_device *hdev)
 		q = &gaudi->internal_qmans[i];
 
 		switch (i) {
-		case GAUDI_QUEUE_ID_DMA_2_0 ... GAUDI_QUEUE_ID_DMA_4_3:
-		case GAUDI_QUEUE_ID_DMA_6_0 ... GAUDI_QUEUE_ID_DMA_7_3:
+		case GAUDI_QUEUE_ID_DMA_2_0 ... GAUDI_QUEUE_ID_DMA_7_3:
 			q->pq_size = HBM_DMA_QMAN_SIZE_IN_BYTES;
 			break;
 		case GAUDI_QUEUE_ID_MME_0_0 ... GAUDI_QUEUE_ID_MME_1_3:
@@ -943,6 +1549,9 @@ static int gaudi_alloc_internal_qmans_pq_mem(struct hl_device *hdev)
 		case GAUDI_QUEUE_ID_TPC_0_0 ... GAUDI_QUEUE_ID_TPC_7_3:
 			q->pq_size = TPC_QMAN_SIZE_IN_BYTES;
 			break;
+		case GAUDI_QUEUE_ID_NIC_0_0 ... GAUDI_QUEUE_ID_NIC_9_3:
+			q->pq_size = NIC_QMAN_SIZE_IN_BYTES;
+			break;
 		default:
 			dev_err(hdev->dev, "Bad internal queue index %d", i);
 			rc = -EINVAL;
@@ -1044,8 +1653,9 @@ static int gaudi_sw_init(struct hl_device *hdev)
 free_cpu_accessible_dma_pool:
 	gen_pool_destroy(hdev->cpu_accessible_dma_pool);
 free_cpu_dma_mem:
-	GAUDI_CPU_TO_PCI_ADDR(hdev->cpu_accessible_dma_address,
-				hdev->cpu_pci_msb_addr);
+	if (hdev->asic_prop.fw_security_disabled)
+		GAUDI_CPU_TO_PCI_ADDR(hdev->cpu_accessible_dma_address,
+					hdev->cpu_pci_msb_addr);
 	hdev->asic_funcs->asic_dma_free_coherent(hdev,
 			HL_CPU_ACCESSIBLE_MEM_SIZE,
 			hdev->cpu_accessible_dma_mem,
@@ -1065,8 +1675,10 @@ static int gaudi_sw_fini(struct hl_device *hdev)
 
 	gen_pool_destroy(hdev->cpu_accessible_dma_pool);
 
-	GAUDI_CPU_TO_PCI_ADDR(hdev->cpu_accessible_dma_address,
+	if (hdev->asic_prop.fw_security_disabled)
+		GAUDI_CPU_TO_PCI_ADDR(hdev->cpu_accessible_dma_address,
 					hdev->cpu_pci_msb_addr);
+
 	hdev->asic_funcs->asic_dma_free_coherent(hdev,
 			HL_CPU_ACCESSIBLE_MEM_SIZE,
 			hdev->cpu_accessible_dma_mem,
@@ -1120,7 +1732,7 @@ static int gaudi_enable_msi_single(struct hl_device *hdev)
 {
 	int rc, irq;
 
-	dev_info(hdev->dev, "Working in single MSI IRQ mode\n");
+	dev_dbg(hdev->dev, "Working in single MSI IRQ mode\n");
 
 	irq = gaudi_pci_irq_vector(hdev, 0, false);
 	rc = request_irq(irq, gaudi_irq_handler_single, 0,
@@ -1252,6 +1864,14 @@ static void gaudi_init_scrambler_sram(struct hl_device *hdev)
 {
 	struct gaudi_device *gaudi = hdev->asic_specific;
 
+	if (!hdev->asic_prop.fw_security_disabled)
+		return;
+
+	if (hdev->asic_prop.fw_security_status_valid &&
+			(hdev->asic_prop.fw_app_security_map &
+					CPU_BOOT_DEV_STS0_SRAM_SCR_EN))
+		return;
+
 	if (gaudi->hw_cap_initialized & HW_CAP_SRAM_SCRAMBLER)
 		return;
 
@@ -1316,6 +1936,14 @@ static void gaudi_init_scrambler_hbm(struct hl_device *hdev)
 {
 	struct gaudi_device *gaudi = hdev->asic_specific;
 
+	if (!hdev->asic_prop.fw_security_disabled)
+		return;
+
+	if (hdev->asic_prop.fw_security_status_valid &&
+			(hdev->asic_prop.fw_boot_cpu_security_map &
+					CPU_BOOT_DEV_STS0_DRAM_SCR_EN))
+		return;
+
 	if (gaudi->hw_cap_initialized & HW_CAP_HBM_SCRAMBLER)
 		return;
 
@@ -1378,6 +2006,14 @@ static void gaudi_init_scrambler_hbm(struct hl_device *hdev)
 
 static void gaudi_init_e2e(struct hl_device *hdev)
 {
+	if (!hdev->asic_prop.fw_security_disabled)
+		return;
+
+	if (hdev->asic_prop.fw_security_status_valid &&
+			(hdev->asic_prop.fw_boot_cpu_security_map &
+					CPU_BOOT_DEV_STS0_E2E_CRED_EN))
+		return;
+
 	WREG32(mmSIF_RTR_CTRL_0_E2E_HBM_WR_SIZE, 247 >> 3);
 	WREG32(mmSIF_RTR_CTRL_0_E2E_HBM_RD_SIZE, 785 >> 3);
 	WREG32(mmSIF_RTR_CTRL_0_E2E_PCI_WR_SIZE, 49);
@@ -1745,6 +2381,14 @@ static void gaudi_init_hbm_cred(struct hl_device *hdev)
 {
 	uint32_t hbm0_wr, hbm1_wr, hbm0_rd, hbm1_rd;
 
+	if (!hdev->asic_prop.fw_security_disabled)
+		return;
+
+	if (hdev->asic_prop.fw_security_status_valid &&
+			(hdev->asic_prop.fw_boot_cpu_security_map &
+					CPU_BOOT_DEV_STS0_HBM_CRED_EN))
+		return;
+
 	hbm0_wr = 0x33333333;
 	hbm0_rd = 0x77777777;
 	hbm1_wr = 0x55555555;
@@ -1803,7 +2447,6 @@ static void gaudi_init_golden_registers(struct hl_device *hdev)
 	int tpc_id, i;
 
 	gaudi_init_e2e(hdev);
-
 	gaudi_init_hbm_cred(hdev);
 
 	hdev->asic_funcs->disable_clock_gating(hdev);
@@ -1998,21 +2641,29 @@ static void gaudi_init_pci_dma_qmans(struct hl_device *hdev)
 static void gaudi_init_hbm_dma_qman(struct hl_device *hdev, int dma_id,
 					int qman_id, u64 qman_base_addr)
 {
-	u32 mtr_base_lo, mtr_base_hi;
-	u32 so_base_lo, so_base_hi;
+	u32 mtr_base_en_lo, mtr_base_en_hi, mtr_base_ws_lo, mtr_base_ws_hi;
+	u32 so_base_en_lo, so_base_en_hi, so_base_ws_lo, so_base_ws_hi;
 	u32 q_off, dma_qm_offset;
 	u32 dma_qm_err_cfg;
 
 	dma_qm_offset = dma_id * DMA_QMAN_OFFSET;
 
-	mtr_base_lo = lower_32_bits(CFG_BASE +
-				mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_MON_PAY_ADDRL_0);
-	mtr_base_hi = upper_32_bits(CFG_BASE +
+	mtr_base_en_lo = lower_32_bits(CFG_BASE +
+			mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_MON_PAY_ADDRL_0);
+	mtr_base_en_hi = upper_32_bits(CFG_BASE +
 				mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_MON_PAY_ADDRL_0);
-	so_base_lo = lower_32_bits(CFG_BASE +
+	so_base_en_lo = lower_32_bits(CFG_BASE +
 				mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_SOB_OBJ_0);
-	so_base_hi = upper_32_bits(CFG_BASE +
+	so_base_en_hi = upper_32_bits(CFG_BASE +
 				mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_SOB_OBJ_0);
+	mtr_base_ws_lo = lower_32_bits(CFG_BASE +
+				mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_MON_PAY_ADDRL_0);
+	mtr_base_ws_hi = upper_32_bits(CFG_BASE +
+				mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_MON_PAY_ADDRL_0);
+	so_base_ws_lo = lower_32_bits(CFG_BASE +
+				mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_SOB_OBJ_0);
+	so_base_ws_hi = upper_32_bits(CFG_BASE +
+				mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_SOB_OBJ_0);
 
 	q_off = dma_qm_offset + qman_id * 4;
 
@@ -2070,10 +2721,22 @@ static void gaudi_init_hbm_dma_qman(struct hl_device *hdev, int dma_id,
 				QMAN_INTERNAL_MAKE_TRUSTED);
 	}
 
-	WREG32(mmDMA0_QM_CP_MSG_BASE0_ADDR_LO_0 + q_off, mtr_base_lo);
-	WREG32(mmDMA0_QM_CP_MSG_BASE0_ADDR_HI_0 + q_off, mtr_base_hi);
-	WREG32(mmDMA0_QM_CP_MSG_BASE1_ADDR_LO_0 + q_off, so_base_lo);
-	WREG32(mmDMA0_QM_CP_MSG_BASE1_ADDR_HI_0 + q_off, so_base_hi);
+	WREG32(mmDMA0_QM_CP_MSG_BASE0_ADDR_LO_0 + q_off, mtr_base_en_lo);
+	WREG32(mmDMA0_QM_CP_MSG_BASE0_ADDR_HI_0 + q_off, mtr_base_en_hi);
+	WREG32(mmDMA0_QM_CP_MSG_BASE1_ADDR_LO_0 + q_off, so_base_en_lo);
+	WREG32(mmDMA0_QM_CP_MSG_BASE1_ADDR_HI_0 + q_off, so_base_en_hi);
+
+	/* Configure DMA5 CP_MSG_BASE 2/3 for sync stream collective */
+	if (gaudi_dma_assignment[dma_id] == GAUDI_ENGINE_ID_DMA_5) {
+		WREG32(mmDMA0_QM_CP_MSG_BASE2_ADDR_LO_0 + q_off,
+				mtr_base_ws_lo);
+		WREG32(mmDMA0_QM_CP_MSG_BASE2_ADDR_HI_0 + q_off,
+				mtr_base_ws_hi);
+		WREG32(mmDMA0_QM_CP_MSG_BASE3_ADDR_LO_0 + q_off,
+				so_base_ws_lo);
+		WREG32(mmDMA0_QM_CP_MSG_BASE3_ADDR_HI_0 + q_off,
+				so_base_ws_hi);
+	}
 }
 
 static void gaudi_init_hbm_dma_qmans(struct hl_device *hdev)
@@ -2236,22 +2899,33 @@ static void gaudi_init_mme_qmans(struct hl_device *hdev)
 static void gaudi_init_tpc_qman(struct hl_device *hdev, u32 tpc_offset,
 				int qman_id, u64 qman_base_addr)
 {
-	u32 mtr_base_lo, mtr_base_hi;
-	u32 so_base_lo, so_base_hi;
+	u32 mtr_base_en_lo, mtr_base_en_hi, mtr_base_ws_lo, mtr_base_ws_hi;
+	u32 so_base_en_lo, so_base_en_hi, so_base_ws_lo, so_base_ws_hi;
 	u32 q_off, tpc_id;
 	u32 tpc_qm_err_cfg;
 
-	mtr_base_lo = lower_32_bits(CFG_BASE +
-				mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_MON_PAY_ADDRL_0);
-	mtr_base_hi = upper_32_bits(CFG_BASE +
+	mtr_base_en_lo = lower_32_bits(CFG_BASE +
+			mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_MON_PAY_ADDRL_0);
+	mtr_base_en_hi = upper_32_bits(CFG_BASE +
 				mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_MON_PAY_ADDRL_0);
-	so_base_lo = lower_32_bits(CFG_BASE +
+	so_base_en_lo = lower_32_bits(CFG_BASE +
 				mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_SOB_OBJ_0);
-	so_base_hi = upper_32_bits(CFG_BASE +
+	so_base_en_hi = upper_32_bits(CFG_BASE +
 				mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_SOB_OBJ_0);
+	mtr_base_ws_lo = lower_32_bits(CFG_BASE +
+				mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_MON_PAY_ADDRL_0);
+	mtr_base_ws_hi = upper_32_bits(CFG_BASE +
+				mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_MON_PAY_ADDRL_0);
+	so_base_ws_lo = lower_32_bits(CFG_BASE +
+				mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_SOB_OBJ_0);
+	so_base_ws_hi = upper_32_bits(CFG_BASE +
+				mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_SOB_OBJ_0);
+
+	q_off = tpc_offset + qman_id * 4;
+
+	tpc_id = tpc_offset /
+			(mmTPC1_QM_GLBL_CFG0 - mmTPC0_QM_GLBL_CFG0);
 
-	q_off = tpc_offset + qman_id * 4;
-
 	if (qman_id < 4) {
 		WREG32(mmTPC0_QM_PQ_BASE_LO_0 + q_off,
 					lower_32_bits(qman_base_addr));
@@ -2277,9 +2951,6 @@ static void gaudi_init_tpc_qman(struct hl_device *hdev, u32 tpc_offset,
 							QMAN_LDMA_DST_OFFSET);
 
 		/* Configure RAZWI IRQ */
-		tpc_id = tpc_offset /
-				(mmTPC1_QM_GLBL_CFG0 - mmTPC0_QM_GLBL_CFG0);
-
 		tpc_qm_err_cfg = TPC_QMAN_GLBL_ERR_CFG_MSG_EN_MASK;
 		if (hdev->stop_on_err) {
 			tpc_qm_err_cfg |=
@@ -2309,10 +2980,22 @@ static void gaudi_init_tpc_qman(struct hl_device *hdev, u32 tpc_offset,
 				QMAN_INTERNAL_MAKE_TRUSTED);
 	}
 
-	WREG32(mmTPC0_QM_CP_MSG_BASE0_ADDR_LO_0 + q_off, mtr_base_lo);
-	WREG32(mmTPC0_QM_CP_MSG_BASE0_ADDR_HI_0 + q_off, mtr_base_hi);
-	WREG32(mmTPC0_QM_CP_MSG_BASE1_ADDR_LO_0 + q_off, so_base_lo);
-	WREG32(mmTPC0_QM_CP_MSG_BASE1_ADDR_HI_0 + q_off, so_base_hi);
+	WREG32(mmTPC0_QM_CP_MSG_BASE0_ADDR_LO_0 + q_off, mtr_base_en_lo);
+	WREG32(mmTPC0_QM_CP_MSG_BASE0_ADDR_HI_0 + q_off, mtr_base_en_hi);
+	WREG32(mmTPC0_QM_CP_MSG_BASE1_ADDR_LO_0 + q_off, so_base_en_lo);
+	WREG32(mmTPC0_QM_CP_MSG_BASE1_ADDR_HI_0 + q_off, so_base_en_hi);
+
+	/* Configure TPC7 CP_MSG_BASE 2/3 for sync stream collective */
+	if (tpc_id == 6) {
+		WREG32(mmTPC0_QM_CP_MSG_BASE2_ADDR_LO_0 + q_off,
+				mtr_base_ws_lo);
+		WREG32(mmTPC0_QM_CP_MSG_BASE2_ADDR_HI_0 + q_off,
+				mtr_base_ws_hi);
+		WREG32(mmTPC0_QM_CP_MSG_BASE3_ADDR_LO_0 + q_off,
+				so_base_ws_lo);
+		WREG32(mmTPC0_QM_CP_MSG_BASE3_ADDR_HI_0 + q_off,
+				so_base_ws_hi);
+	}
 }
 
 static void gaudi_init_tpc_qmans(struct hl_device *hdev)
@@ -2360,6 +3043,142 @@ static void gaudi_init_tpc_qmans(struct hl_device *hdev)
 	}
 }
 
+static void gaudi_init_nic_qman(struct hl_device *hdev, u32 nic_offset,
+				int qman_id, u64 qman_base_addr, int nic_id)
+{
+	u32 mtr_base_en_lo, mtr_base_en_hi, mtr_base_ws_lo, mtr_base_ws_hi;
+	u32 so_base_en_lo, so_base_en_hi, so_base_ws_lo, so_base_ws_hi;
+	u32 q_off;
+	u32 nic_qm_err_cfg;
+
+	mtr_base_en_lo = lower_32_bits(CFG_BASE +
+			mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_MON_PAY_ADDRL_0);
+	mtr_base_en_hi = upper_32_bits(CFG_BASE +
+				mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_MON_PAY_ADDRL_0);
+	so_base_en_lo = lower_32_bits(CFG_BASE +
+				mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_SOB_OBJ_0);
+	so_base_en_hi = upper_32_bits(CFG_BASE +
+				mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_SOB_OBJ_0);
+	mtr_base_ws_lo = lower_32_bits(CFG_BASE +
+				mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_MON_PAY_ADDRL_0);
+	mtr_base_ws_hi = upper_32_bits(CFG_BASE +
+				mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_MON_PAY_ADDRL_0);
+	so_base_ws_lo = lower_32_bits(CFG_BASE +
+				mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_SOB_OBJ_0);
+	so_base_ws_hi = upper_32_bits(CFG_BASE +
+				mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_SOB_OBJ_0);
+
+	q_off = nic_offset + qman_id * 4;
+
+	WREG32(mmNIC0_QM0_PQ_BASE_LO_0 + q_off, lower_32_bits(qman_base_addr));
+	WREG32(mmNIC0_QM0_PQ_BASE_HI_0 + q_off, upper_32_bits(qman_base_addr));
+
+	WREG32(mmNIC0_QM0_PQ_SIZE_0 + q_off, ilog2(NIC_QMAN_LENGTH));
+	WREG32(mmNIC0_QM0_PQ_PI_0 + q_off, 0);
+	WREG32(mmNIC0_QM0_PQ_CI_0 + q_off, 0);
+
+	WREG32(mmNIC0_QM0_CP_LDMA_TSIZE_OFFSET_0 + q_off,
+							QMAN_LDMA_SIZE_OFFSET);
+	WREG32(mmNIC0_QM0_CP_LDMA_SRC_BASE_LO_OFFSET_0 + q_off,
+							QMAN_LDMA_SRC_OFFSET);
+	WREG32(mmNIC0_QM0_CP_LDMA_DST_BASE_LO_OFFSET_0 + q_off,
+							QMAN_LDMA_DST_OFFSET);
+
+	WREG32(mmNIC0_QM0_CP_MSG_BASE0_ADDR_LO_0 + q_off, mtr_base_en_lo);
+	WREG32(mmNIC0_QM0_CP_MSG_BASE0_ADDR_HI_0 + q_off, mtr_base_en_hi);
+	WREG32(mmNIC0_QM0_CP_MSG_BASE1_ADDR_LO_0 + q_off, so_base_en_lo);
+	WREG32(mmNIC0_QM0_CP_MSG_BASE1_ADDR_HI_0 + q_off, so_base_en_hi);
+
+	/* Configure NIC CP_MSG_BASE 2/3 for sync stream collective */
+	WREG32(mmNIC0_QM0_CP_MSG_BASE2_ADDR_LO_0 + q_off, mtr_base_ws_lo);
+	WREG32(mmNIC0_QM0_CP_MSG_BASE2_ADDR_HI_0 + q_off, mtr_base_ws_hi);
+	WREG32(mmNIC0_QM0_CP_MSG_BASE3_ADDR_LO_0 + q_off, so_base_ws_lo);
+	WREG32(mmNIC0_QM0_CP_MSG_BASE3_ADDR_HI_0 + q_off, so_base_ws_hi);
+
+	if (qman_id == 0) {
+		/* Configure RAZWI IRQ */
+		nic_qm_err_cfg = NIC_QMAN_GLBL_ERR_CFG_MSG_EN_MASK;
+		if (hdev->stop_on_err) {
+			nic_qm_err_cfg |=
+				NIC_QMAN_GLBL_ERR_CFG_STOP_ON_ERR_EN_MASK;
+		}
+
+		WREG32(mmNIC0_QM0_GLBL_ERR_CFG + nic_offset, nic_qm_err_cfg);
+		WREG32(mmNIC0_QM0_GLBL_ERR_ADDR_LO + nic_offset,
+			lower_32_bits(CFG_BASE +
+				mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR));
+		WREG32(mmNIC0_QM0_GLBL_ERR_ADDR_HI + nic_offset,
+			upper_32_bits(CFG_BASE +
+				mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR));
+		WREG32(mmNIC0_QM0_GLBL_ERR_WDATA + nic_offset,
+			gaudi_irq_map_table[GAUDI_EVENT_NIC0_QM0].cpu_id +
+									nic_id);
+
+		WREG32(mmNIC0_QM0_ARB_ERR_MSG_EN + nic_offset,
+				QM_ARB_ERR_MSG_EN_MASK);
+
+		/* Increase ARB WDT to support streams architecture */
+		WREG32(mmNIC0_QM0_ARB_SLV_CHOISE_WDT + nic_offset,
+				GAUDI_ARB_WDT_TIMEOUT);
+
+		WREG32(mmNIC0_QM0_GLBL_CFG1 + nic_offset, 0);
+		WREG32(mmNIC0_QM0_GLBL_PROT + nic_offset,
+				QMAN_INTERNAL_MAKE_TRUSTED);
+	}
+}
+
+static void gaudi_init_nic_qmans(struct hl_device *hdev)
+{
+	struct gaudi_device *gaudi = hdev->asic_specific;
+	struct gaudi_internal_qman_info *q;
+	u64 qman_base_addr;
+	u32 nic_offset = 0;
+	u32 nic_delta_between_qmans =
+			mmNIC0_QM1_GLBL_CFG0 - mmNIC0_QM0_GLBL_CFG0;
+	u32 nic_delta_between_nics =
+			mmNIC1_QM0_GLBL_CFG0 - mmNIC0_QM0_GLBL_CFG0;
+	int i, nic_id, internal_q_index;
+
+	if (!hdev->nic_ports_mask)
+		return;
+
+	if (gaudi->hw_cap_initialized & HW_CAP_NIC_MASK)
+		return;
+
+	dev_dbg(hdev->dev, "Initializing NIC QMANs\n");
+
+	for (nic_id = 0 ; nic_id < NIC_NUMBER_OF_ENGINES ; nic_id++) {
+		if (!(hdev->nic_ports_mask & (1 << nic_id))) {
+			nic_offset += nic_delta_between_qmans;
+			if (nic_id & 1) {
+				nic_offset -= (nic_delta_between_qmans * 2);
+				nic_offset += nic_delta_between_nics;
+			}
+			continue;
+		}
+
+		for (i = 0 ; i < QMAN_STREAMS ; i++) {
+			internal_q_index = GAUDI_QUEUE_ID_NIC_0_0 +
+						nic_id * QMAN_STREAMS + i;
+			q = &gaudi->internal_qmans[internal_q_index];
+			qman_base_addr = (u64) q->pq_dma_addr;
+			gaudi_init_nic_qman(hdev, nic_offset, (i & 0x3),
+						qman_base_addr, nic_id);
+		}
+
+		/* Enable the QMAN */
+		WREG32(mmNIC0_QM0_GLBL_CFG0 + nic_offset, NIC_QMAN_ENABLE);
+
+		nic_offset += nic_delta_between_qmans;
+		if (nic_id & 1) {
+			nic_offset -= (nic_delta_between_qmans * 2);
+			nic_offset += nic_delta_between_nics;
+		}
+
+		gaudi->hw_cap_initialized |= 1 << (HW_CAP_NIC_SHIFT + nic_id);
+	}
+}
+
 static void gaudi_disable_pci_dma_qmans(struct hl_device *hdev)
 {
 	struct gaudi_device *gaudi = hdev->asic_specific;
@@ -2412,6 +3231,30 @@ static void gaudi_disable_tpc_qmans(struct hl_device *hdev)
 	}
 }
 
+static void gaudi_disable_nic_qmans(struct hl_device *hdev)
+{
+	struct gaudi_device *gaudi = hdev->asic_specific;
+	u32 nic_mask, nic_offset = 0;
+	u32 nic_delta_between_qmans =
+			mmNIC0_QM1_GLBL_CFG0 - mmNIC0_QM0_GLBL_CFG0;
+	u32 nic_delta_between_nics =
+			mmNIC1_QM0_GLBL_CFG0 - mmNIC0_QM0_GLBL_CFG0;
+	int nic_id;
+
+	for (nic_id = 0 ; nic_id < NIC_NUMBER_OF_ENGINES ; nic_id++) {
+		nic_mask = 1 << (HW_CAP_NIC_SHIFT + nic_id);
+
+		if (gaudi->hw_cap_initialized & nic_mask)
+			WREG32(mmNIC0_QM0_GLBL_CFG0 + nic_offset, 0);
+
+		nic_offset += nic_delta_between_qmans;
+		if (nic_id & 1) {
+			nic_offset -= (nic_delta_between_qmans * 2);
+			nic_offset += nic_delta_between_nics;
+		}
+	}
+}
+
 static void gaudi_stop_pci_dma_qmans(struct hl_device *hdev)
 {
 	struct gaudi_device *gaudi = hdev->asic_specific;
@@ -2470,6 +3313,73 @@ static void gaudi_stop_tpc_qmans(struct hl_device *hdev)
 	WREG32(mmTPC7_QM_GLBL_CFG1, 0x1F << TPC0_QM_GLBL_CFG1_CP_STOP_SHIFT);
 }
 
+static void gaudi_stop_nic_qmans(struct hl_device *hdev)
+{
+	struct gaudi_device *gaudi = hdev->asic_specific;
+
+	/* Stop upper CPs of QMANs */
+
+	if (gaudi->hw_cap_initialized & HW_CAP_NIC0)
+		WREG32(mmNIC0_QM0_GLBL_CFG1,
+				NIC0_QM0_GLBL_CFG1_PQF_STOP_MASK |
+				NIC0_QM0_GLBL_CFG1_CQF_STOP_MASK |
+				NIC0_QM0_GLBL_CFG1_CP_STOP_MASK);
+
+	if (gaudi->hw_cap_initialized & HW_CAP_NIC1)
+		WREG32(mmNIC0_QM1_GLBL_CFG1,
+				NIC0_QM0_GLBL_CFG1_PQF_STOP_MASK |
+				NIC0_QM0_GLBL_CFG1_CQF_STOP_MASK |
+				NIC0_QM0_GLBL_CFG1_CP_STOP_MASK);
+
+	if (gaudi->hw_cap_initialized & HW_CAP_NIC2)
+		WREG32(mmNIC1_QM0_GLBL_CFG1,
+				NIC0_QM0_GLBL_CFG1_PQF_STOP_MASK |
+				NIC0_QM0_GLBL_CFG1_CQF_STOP_MASK |
+				NIC0_QM0_GLBL_CFG1_CP_STOP_MASK);
+
+	if (gaudi->hw_cap_initialized & HW_CAP_NIC3)
+		WREG32(mmNIC1_QM1_GLBL_CFG1,
+				NIC0_QM0_GLBL_CFG1_PQF_STOP_MASK |
+				NIC0_QM0_GLBL_CFG1_CQF_STOP_MASK |
+				NIC0_QM0_GLBL_CFG1_CP_STOP_MASK);
+
+	if (gaudi->hw_cap_initialized & HW_CAP_NIC4)
+		WREG32(mmNIC2_QM0_GLBL_CFG1,
+				NIC0_QM0_GLBL_CFG1_PQF_STOP_MASK |
+				NIC0_QM0_GLBL_CFG1_CQF_STOP_MASK |
+				NIC0_QM0_GLBL_CFG1_CP_STOP_MASK);
+
+	if (gaudi->hw_cap_initialized & HW_CAP_NIC5)
+		WREG32(mmNIC2_QM1_GLBL_CFG1,
+				NIC0_QM0_GLBL_CFG1_PQF_STOP_MASK |
+				NIC0_QM0_GLBL_CFG1_CQF_STOP_MASK |
+				NIC0_QM0_GLBL_CFG1_CP_STOP_MASK);
+
+	if (gaudi->hw_cap_initialized & HW_CAP_NIC6)
+		WREG32(mmNIC3_QM0_GLBL_CFG1,
+				NIC0_QM0_GLBL_CFG1_PQF_STOP_MASK |
+				NIC0_QM0_GLBL_CFG1_CQF_STOP_MASK |
+				NIC0_QM0_GLBL_CFG1_CP_STOP_MASK);
+
+	if (gaudi->hw_cap_initialized & HW_CAP_NIC7)
+		WREG32(mmNIC3_QM1_GLBL_CFG1,
+				NIC0_QM0_GLBL_CFG1_PQF_STOP_MASK |
+				NIC0_QM0_GLBL_CFG1_CQF_STOP_MASK |
+				NIC0_QM0_GLBL_CFG1_CP_STOP_MASK);
+
+	if (gaudi->hw_cap_initialized & HW_CAP_NIC8)
+		WREG32(mmNIC4_QM0_GLBL_CFG1,
+				NIC0_QM0_GLBL_CFG1_PQF_STOP_MASK |
+				NIC0_QM0_GLBL_CFG1_CQF_STOP_MASK |
+				NIC0_QM0_GLBL_CFG1_CP_STOP_MASK);
+
+	if (gaudi->hw_cap_initialized & HW_CAP_NIC9)
+		WREG32(mmNIC4_QM1_GLBL_CFG1,
+				NIC0_QM0_GLBL_CFG1_PQF_STOP_MASK |
+				NIC0_QM0_GLBL_CFG1_CQF_STOP_MASK |
+				NIC0_QM0_GLBL_CFG1_CP_STOP_MASK);
+}
+
 static void gaudi_pci_dma_stall(struct hl_device *hdev)
 {
 	struct gaudi_device *gaudi = hdev->asic_specific;
@@ -2659,7 +3569,7 @@ static void gaudi_halt_engines(struct hl_device *hdev, bool hard_reset)
 	else
 		wait_timeout_ms = GAUDI_RESET_WAIT_MSEC;
 
-
+	gaudi_stop_nic_qmans(hdev);
 	gaudi_stop_mme_qmans(hdev);
 	gaudi_stop_tpc_qmans(hdev);
 	gaudi_stop_hbm_dma_qmans(hdev);
@@ -2676,6 +3586,7 @@ static void gaudi_halt_engines(struct hl_device *hdev, bool hard_reset)
 
 	msleep(wait_timeout_ms);
 
+	gaudi_disable_nic_qmans(hdev);
 	gaudi_disable_mme_qmans(hdev);
 	gaudi_disable_tpc_qmans(hdev);
 	gaudi_disable_hbm_dma_qmans(hdev);
@@ -2699,8 +3610,6 @@ static int gaudi_mmu_init(struct hl_device *hdev)
 	if (gaudi->hw_cap_initialized & HW_CAP_MMU)
 		return 0;
 
-	hdev->dram_supports_virtual_memory = false;
-
 	for (i = 0 ; i < prop->max_asid ; i++) {
 		hop0_addr = prop->mmu_pgt_addr +
 				(i * prop->mmu_hop_table_size);
@@ -2748,7 +3657,7 @@ static int gaudi_load_firmware_to_device(struct hl_device *hdev)
 
 	dst = hdev->pcie_bar[HBM_BAR_ID] + LINUX_FW_OFFSET;
 
-	return hl_fw_load_fw_to_device(hdev, GAUDI_LINUX_FW_FILE, dst);
+	return hl_fw_load_fw_to_device(hdev, GAUDI_LINUX_FW_FILE, dst, 0, 0);
 }
 
 static int gaudi_load_boot_fit_to_device(struct hl_device *hdev)
@@ -2757,10 +3666,10 @@ static int gaudi_load_boot_fit_to_device(struct hl_device *hdev)
 
 	dst = hdev->pcie_bar[SRAM_BAR_ID] + BOOT_FIT_SRAM_OFFSET;
 
-	return hl_fw_load_fw_to_device(hdev, GAUDI_BOOT_FIT_FILE, dst);
+	return hl_fw_load_fw_to_device(hdev, GAUDI_BOOT_FIT_FILE, dst, 0, 0);
 }
 
-static void gaudi_read_device_fw_version(struct hl_device *hdev,
+static int gaudi_read_device_fw_version(struct hl_device *hdev,
 					enum hl_fw_component fwc)
 {
 	const char *name;
@@ -2780,7 +3689,7 @@ static void gaudi_read_device_fw_version(struct hl_device *hdev,
 		break;
 	default:
 		dev_warn(hdev->dev, "Undefined FW component: %d\n", fwc);
-		return;
+		return -EIO;
 	}
 
 	ver_off &= ~((u32)SRAM_BASE_ADDR);
@@ -2792,7 +3701,10 @@ static void gaudi_read_device_fw_version(struct hl_device *hdev,
 		dev_err(hdev->dev, "%s version offset (0x%x) is above SRAM\n",
 								name, ver_off);
 		strcpy(dest, "unavailable");
+		return -EIO;
 	}
+
+	return 0;
 }
 
 static int gaudi_init_cpu(struct hl_device *hdev)
@@ -2810,12 +3722,13 @@ static int gaudi_init_cpu(struct hl_device *hdev)
 	 * The device CPU works with 40 bits addresses.
 	 * This register sets the extension to 50 bits.
 	 */
-	WREG32(mmCPU_IF_CPU_MSB_ADDR, hdev->cpu_pci_msb_addr);
+	if (hdev->asic_prop.fw_security_disabled)
+		WREG32(mmCPU_IF_CPU_MSB_ADDR, hdev->cpu_pci_msb_addr);
 
 	rc = hl_fw_init_cpu(hdev, mmPSOC_GLOBAL_CONF_CPU_BOOT_STATUS,
 			mmPSOC_GLOBAL_CONF_KMD_MSG_TO_CPU,
 			mmCPU_CMD_STATUS_TO_HOST,
-			mmCPU_BOOT_ERR0,
+			mmCPU_BOOT_DEV_STS0, mmCPU_BOOT_ERR0,
 			!hdev->bmc_enable, GAUDI_CPU_TIMEOUT_USEC,
 			GAUDI_BOOT_FIT_REQ_TIMEOUT_USEC);
 
@@ -2895,17 +3808,19 @@ static void gaudi_pre_hw_init(struct hl_device *hdev)
 	/* Perform read from the device to make sure device is up */
 	RREG32(mmPCIE_DBI_DEVICE_ID_VENDOR_ID_REG);
 
-	/* Set the access through PCI bars (Linux driver only) as
-	 * secured
-	 */
-	WREG32(mmPCIE_WRAP_LBW_PROT_OVR,
-			(PCIE_WRAP_LBW_PROT_OVR_RD_EN_MASK |
-			PCIE_WRAP_LBW_PROT_OVR_WR_EN_MASK));
+	if (hdev->asic_prop.fw_security_disabled) {
+		/* Set the access through PCI bars (Linux driver only) as
+		 * secured
+		 */
+		WREG32(mmPCIE_WRAP_LBW_PROT_OVR,
+				(PCIE_WRAP_LBW_PROT_OVR_RD_EN_MASK |
+				PCIE_WRAP_LBW_PROT_OVR_WR_EN_MASK));
 
-	/* Perform read to flush the waiting writes to ensure
-	 * configuration was set in the device
-	 */
-	RREG32(mmPCIE_WRAP_LBW_PROT_OVR);
+		/* Perform read to flush the waiting writes to ensure
+		 * configuration was set in the device
+		 */
+		RREG32(mmPCIE_WRAP_LBW_PROT_OVR);
+	}
 
 	/*
 	 * Let's mark in the H/W that we have reached this point. We check
@@ -2914,40 +3829,12 @@ static void gaudi_pre_hw_init(struct hl_device *hdev)
 	 * cleared by the H/W upon H/W reset
 	 */
 	WREG32(mmHW_STATE, HL_DEVICE_HW_STATE_DIRTY);
-
-	/* Configure the reset registers. Must be done as early as possible
-	 * in case we fail during H/W initialization
-	 */
-	WREG32(mmPSOC_GLOBAL_CONF_SOFT_RST_CFG_H,
-					(CFG_RST_H_DMA_MASK |
-					CFG_RST_H_MME_MASK |
-					CFG_RST_H_SM_MASK |
-					CFG_RST_H_TPC_7_MASK));
-
-	WREG32(mmPSOC_GLOBAL_CONF_SOFT_RST_CFG_L, CFG_RST_L_TPC_MASK);
-
-	WREG32(mmPSOC_GLOBAL_CONF_SW_ALL_RST_CFG_H,
-					(CFG_RST_H_HBM_MASK |
-					CFG_RST_H_TPC_7_MASK |
-					CFG_RST_H_NIC_MASK |
-					CFG_RST_H_SM_MASK |
-					CFG_RST_H_DMA_MASK |
-					CFG_RST_H_MME_MASK |
-					CFG_RST_H_CPU_MASK |
-					CFG_RST_H_MMU_MASK));
-
-	WREG32(mmPSOC_GLOBAL_CONF_SW_ALL_RST_CFG_L,
-					(CFG_RST_L_IF_MASK |
-					CFG_RST_L_PSOC_MASK |
-					CFG_RST_L_TPC_MASK));
 }
 
 static int gaudi_hw_init(struct hl_device *hdev)
 {
 	int rc;
 
-	dev_info(hdev->dev, "Starting initialization of H/W\n");
-
 	gaudi_pre_hw_init(hdev);
 
 	gaudi_init_pci_dma_qmans(hdev);
@@ -2978,11 +3865,13 @@ static int gaudi_hw_init(struct hl_device *hdev)
 
 	gaudi_init_tpc_qmans(hdev);
 
+	gaudi_init_nic_qmans(hdev);
+
 	hdev->asic_funcs->set_clock_gating(hdev);
 
 	gaudi_enable_timestamp(hdev);
 
-	/* MSI must be enabled before CPU queues are initialized */
+	/* MSI must be enabled before CPU queues and NIC are initialized */
 	rc = gaudi_enable_msi(hdev);
 	if (rc)
 		goto disable_queues;
@@ -3012,7 +3901,7 @@ static int gaudi_hw_init(struct hl_device *hdev)
 static void gaudi_hw_fini(struct hl_device *hdev, bool hard_reset)
 {
 	struct gaudi_device *gaudi = hdev->asic_specific;
-	u32 status, reset_timeout_ms, cpu_timeout_ms, boot_strap = 0;
+	u32 status, reset_timeout_ms, cpu_timeout_ms;
 
 	if (!hard_reset) {
 		dev_err(hdev->dev, "GAUDI doesn't support soft-reset\n");
@@ -3030,35 +3919,60 @@ static void gaudi_hw_fini(struct hl_device *hdev, bool hard_reset)
 	/* Set device to handle FLR by H/W as we will put the device CPU to
 	 * halt mode
 	 */
-	WREG32(mmPCIE_AUX_FLR_CTRL, (PCIE_AUX_FLR_CTRL_HW_CTRL_MASK |
+	if (hdev->asic_prop.fw_security_disabled &&
+				!hdev->asic_prop.hard_reset_done_by_fw)
+		WREG32(mmPCIE_AUX_FLR_CTRL, (PCIE_AUX_FLR_CTRL_HW_CTRL_MASK |
 					PCIE_AUX_FLR_CTRL_INT_MASK_MASK));
 
 	/* I don't know what is the state of the CPU so make sure it is
 	 * stopped in any means necessary
 	 */
 	WREG32(mmPSOC_GLOBAL_CONF_KMD_MSG_TO_CPU, KMD_MSG_GOTO_WFE);
-	WREG32(mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR, GAUDI_EVENT_HALT_MACHINE);
 
-	msleep(cpu_timeout_ms);
-
-	/* Tell ASIC not to re-initialize PCIe */
-	WREG32(mmPREBOOT_PCIE_EN, LKD_HARD_RESET_MAGIC);
-
-	boot_strap = RREG32(mmPSOC_GLOBAL_CONF_BOOT_STRAP_PINS);
-
-	/* H/W bug WA:
-	 * rdata[31:0] = strap_read_val;
-	 * wdata[31:0] = rdata[30:21],1'b0,rdata[20:0]
-	 */
-	boot_strap = (((boot_strap & 0x7FE00000) << 1) |
-			(boot_strap & 0x001FFFFF));
-	WREG32(mmPSOC_GLOBAL_CONF_BOOT_STRAP_PINS, boot_strap & ~0x2);
+	WREG32(mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR, GAUDI_EVENT_HALT_MACHINE);
 
-	/* Restart BTL/BLR upon hard-reset */
-	WREG32(mmPSOC_GLOBAL_CONF_BOOT_SEQ_RE_START, 1);
+	if (hdev->asic_prop.fw_security_disabled &&
+				!hdev->asic_prop.hard_reset_done_by_fw) {
 
-	WREG32(mmPSOC_GLOBAL_CONF_SW_ALL_RST,
+		/* Configure the reset registers. Must be done as early as
+		 * possible in case we fail during H/W initialization
+		 */
+		WREG32(mmPSOC_GLOBAL_CONF_SOFT_RST_CFG_H,
+						(CFG_RST_H_DMA_MASK |
+						CFG_RST_H_MME_MASK |
+						CFG_RST_H_SM_MASK |
+						CFG_RST_H_TPC_7_MASK));
+
+		WREG32(mmPSOC_GLOBAL_CONF_SOFT_RST_CFG_L, CFG_RST_L_TPC_MASK);
+
+		WREG32(mmPSOC_GLOBAL_CONF_SW_ALL_RST_CFG_H,
+						(CFG_RST_H_HBM_MASK |
+						CFG_RST_H_TPC_7_MASK |
+						CFG_RST_H_NIC_MASK |
+						CFG_RST_H_SM_MASK |
+						CFG_RST_H_DMA_MASK |
+						CFG_RST_H_MME_MASK |
+						CFG_RST_H_CPU_MASK |
+						CFG_RST_H_MMU_MASK));
+
+		WREG32(mmPSOC_GLOBAL_CONF_SW_ALL_RST_CFG_L,
+						(CFG_RST_L_IF_MASK |
+						CFG_RST_L_PSOC_MASK |
+						CFG_RST_L_TPC_MASK));
+
+		msleep(cpu_timeout_ms);
+
+		/* Tell ASIC not to re-initialize PCIe */
+		WREG32(mmPREBOOT_PCIE_EN, LKD_HARD_RESET_MAGIC);
+
+		/* Restart BTL/BLR upon hard-reset */
+		if (hdev->asic_prop.fw_security_disabled)
+			WREG32(mmPSOC_GLOBAL_CONF_BOOT_SEQ_RE_START, 1);
+
+		WREG32(mmPSOC_GLOBAL_CONF_SW_ALL_RST,
 			1 << PSOC_GLOBAL_CONF_SW_ALL_RST_IND_SHIFT);
+	}
+
 	dev_info(hdev->dev,
 		"Issued HARD reset command, going to wait %dms\n",
 		reset_timeout_ms);
@@ -3075,18 +3989,18 @@ static void gaudi_hw_fini(struct hl_device *hdev, bool hard_reset)
 			"Timeout while waiting for device to reset 0x%x\n",
 			status);
 
-	WREG32(mmPSOC_GLOBAL_CONF_BOOT_STRAP_PINS, boot_strap);
-
-	gaudi->hw_cap_initialized &= ~(HW_CAP_CPU | HW_CAP_CPU_Q |
-					HW_CAP_HBM | HW_CAP_PCI_DMA |
-					HW_CAP_MME | HW_CAP_TPC_MASK |
-					HW_CAP_HBM_DMA | HW_CAP_PLL |
-					HW_CAP_MMU |
-					HW_CAP_SRAM_SCRAMBLER |
-					HW_CAP_HBM_SCRAMBLER |
-					HW_CAP_CLK_GATE);
+	if (gaudi) {
+		gaudi->hw_cap_initialized &= ~(HW_CAP_CPU | HW_CAP_CPU_Q |
+				HW_CAP_HBM | HW_CAP_PCI_DMA |
+				HW_CAP_MME | HW_CAP_TPC_MASK |
+				HW_CAP_HBM_DMA | HW_CAP_PLL |
+				HW_CAP_NIC_MASK | HW_CAP_MMU |
+				HW_CAP_SRAM_SCRAMBLER |
+				HW_CAP_HBM_SCRAMBLER |
+				HW_CAP_CLK_GATE);
 
-	memset(gaudi->events_stat, 0, sizeof(gaudi->events_stat));
+		memset(gaudi->events_stat, 0, sizeof(gaudi->events_stat));
+	}
 }
 
 static int gaudi_suspend(struct hl_device *hdev)
@@ -3164,21 +4078,21 @@ static void gaudi_ring_doorbell(struct hl_device *hdev, u32 hw_queue_id, u32 pi)
 		break;
 
 	case GAUDI_QUEUE_ID_DMA_5_0...GAUDI_QUEUE_ID_DMA_5_3:
-		dma_id = gaudi_dma_assignment[GAUDI_PCI_DMA_3];
+		dma_id = gaudi_dma_assignment[GAUDI_HBM_DMA_4];
 		dma_qm_offset = dma_id * DMA_QMAN_OFFSET;
 		q_off = dma_qm_offset + ((hw_queue_id - 1) & 0x3) * 4;
 		db_reg_offset = mmDMA0_QM_PQ_PI_0 + q_off;
 		break;
 
 	case GAUDI_QUEUE_ID_DMA_6_0...GAUDI_QUEUE_ID_DMA_6_3:
-		dma_id = gaudi_dma_assignment[GAUDI_HBM_DMA_4];
+		dma_id = gaudi_dma_assignment[GAUDI_HBM_DMA_5];
 		dma_qm_offset = dma_id * DMA_QMAN_OFFSET;
 		q_off = dma_qm_offset + ((hw_queue_id - 1) & 0x3) * 4;
 		db_reg_offset = mmDMA0_QM_PQ_PI_0 + q_off;
 		break;
 
 	case GAUDI_QUEUE_ID_DMA_7_0...GAUDI_QUEUE_ID_DMA_7_3:
-		dma_id = gaudi_dma_assignment[GAUDI_HBM_DMA_5];
+		dma_id = gaudi_dma_assignment[GAUDI_HBM_DMA_6];
 		dma_qm_offset = dma_id * DMA_QMAN_OFFSET;
 		q_off = dma_qm_offset + ((hw_queue_id - 1) & 0x3) * 4;
 		db_reg_offset = mmDMA0_QM_PQ_PI_0 + q_off;
@@ -3231,124 +4145,284 @@ static void gaudi_ring_doorbell(struct hl_device *hdev, u32 hw_queue_id, u32 pi)
 		db_reg_offset = mmTPC0_QM_PQ_PI_1;
 		break;
 
-	case GAUDI_QUEUE_ID_TPC_0_2:
-		db_reg_offset = mmTPC0_QM_PQ_PI_2;
+	case GAUDI_QUEUE_ID_TPC_0_2:
+		db_reg_offset = mmTPC0_QM_PQ_PI_2;
+		break;
+
+	case GAUDI_QUEUE_ID_TPC_0_3:
+		db_reg_offset = mmTPC0_QM_PQ_PI_3;
+		break;
+
+	case GAUDI_QUEUE_ID_TPC_1_0:
+		db_reg_offset = mmTPC1_QM_PQ_PI_0;
+		break;
+
+	case GAUDI_QUEUE_ID_TPC_1_1:
+		db_reg_offset = mmTPC1_QM_PQ_PI_1;
+		break;
+
+	case GAUDI_QUEUE_ID_TPC_1_2:
+		db_reg_offset = mmTPC1_QM_PQ_PI_2;
+		break;
+
+	case GAUDI_QUEUE_ID_TPC_1_3:
+		db_reg_offset = mmTPC1_QM_PQ_PI_3;
+		break;
+
+	case GAUDI_QUEUE_ID_TPC_2_0:
+		db_reg_offset = mmTPC2_QM_PQ_PI_0;
+		break;
+
+	case GAUDI_QUEUE_ID_TPC_2_1:
+		db_reg_offset = mmTPC2_QM_PQ_PI_1;
+		break;
+
+	case GAUDI_QUEUE_ID_TPC_2_2:
+		db_reg_offset = mmTPC2_QM_PQ_PI_2;
+		break;
+
+	case GAUDI_QUEUE_ID_TPC_2_3:
+		db_reg_offset = mmTPC2_QM_PQ_PI_3;
+		break;
+
+	case GAUDI_QUEUE_ID_TPC_3_0:
+		db_reg_offset = mmTPC3_QM_PQ_PI_0;
+		break;
+
+	case GAUDI_QUEUE_ID_TPC_3_1:
+		db_reg_offset = mmTPC3_QM_PQ_PI_1;
+		break;
+
+	case GAUDI_QUEUE_ID_TPC_3_2:
+		db_reg_offset = mmTPC3_QM_PQ_PI_2;
+		break;
+
+	case GAUDI_QUEUE_ID_TPC_3_3:
+		db_reg_offset = mmTPC3_QM_PQ_PI_3;
+		break;
+
+	case GAUDI_QUEUE_ID_TPC_4_0:
+		db_reg_offset = mmTPC4_QM_PQ_PI_0;
+		break;
+
+	case GAUDI_QUEUE_ID_TPC_4_1:
+		db_reg_offset = mmTPC4_QM_PQ_PI_1;
+		break;
+
+	case GAUDI_QUEUE_ID_TPC_4_2:
+		db_reg_offset = mmTPC4_QM_PQ_PI_2;
+		break;
+
+	case GAUDI_QUEUE_ID_TPC_4_3:
+		db_reg_offset = mmTPC4_QM_PQ_PI_3;
+		break;
+
+	case GAUDI_QUEUE_ID_TPC_5_0:
+		db_reg_offset = mmTPC5_QM_PQ_PI_0;
+		break;
+
+	case GAUDI_QUEUE_ID_TPC_5_1:
+		db_reg_offset = mmTPC5_QM_PQ_PI_1;
+		break;
+
+	case GAUDI_QUEUE_ID_TPC_5_2:
+		db_reg_offset = mmTPC5_QM_PQ_PI_2;
+		break;
+
+	case GAUDI_QUEUE_ID_TPC_5_3:
+		db_reg_offset = mmTPC5_QM_PQ_PI_3;
+		break;
+
+	case GAUDI_QUEUE_ID_TPC_6_0:
+		db_reg_offset = mmTPC6_QM_PQ_PI_0;
+		break;
+
+	case GAUDI_QUEUE_ID_TPC_6_1:
+		db_reg_offset = mmTPC6_QM_PQ_PI_1;
+		break;
+
+	case GAUDI_QUEUE_ID_TPC_6_2:
+		db_reg_offset = mmTPC6_QM_PQ_PI_2;
+		break;
+
+	case GAUDI_QUEUE_ID_TPC_6_3:
+		db_reg_offset = mmTPC6_QM_PQ_PI_3;
+		break;
+
+	case GAUDI_QUEUE_ID_TPC_7_0:
+		db_reg_offset = mmTPC7_QM_PQ_PI_0;
+		break;
+
+	case GAUDI_QUEUE_ID_TPC_7_1:
+		db_reg_offset = mmTPC7_QM_PQ_PI_1;
+		break;
+
+	case GAUDI_QUEUE_ID_TPC_7_2:
+		db_reg_offset = mmTPC7_QM_PQ_PI_2;
+		break;
+
+	case GAUDI_QUEUE_ID_TPC_7_3:
+		db_reg_offset = mmTPC7_QM_PQ_PI_3;
+		break;
+
+	case GAUDI_QUEUE_ID_NIC_0_0:
+		db_reg_offset = mmNIC0_QM0_PQ_PI_0;
+		break;
+
+	case GAUDI_QUEUE_ID_NIC_0_1:
+		db_reg_offset = mmNIC0_QM0_PQ_PI_1;
+		break;
+
+	case GAUDI_QUEUE_ID_NIC_0_2:
+		db_reg_offset = mmNIC0_QM0_PQ_PI_2;
+		break;
+
+	case GAUDI_QUEUE_ID_NIC_0_3:
+		db_reg_offset = mmNIC0_QM0_PQ_PI_3;
+		break;
+
+	case GAUDI_QUEUE_ID_NIC_1_0:
+		db_reg_offset = mmNIC0_QM1_PQ_PI_0;
+		break;
+
+	case GAUDI_QUEUE_ID_NIC_1_1:
+		db_reg_offset = mmNIC0_QM1_PQ_PI_1;
+		break;
+
+	case GAUDI_QUEUE_ID_NIC_1_2:
+		db_reg_offset = mmNIC0_QM1_PQ_PI_2;
+		break;
+
+	case GAUDI_QUEUE_ID_NIC_1_3:
+		db_reg_offset = mmNIC0_QM1_PQ_PI_3;
+		break;
+
+	case GAUDI_QUEUE_ID_NIC_2_0:
+		db_reg_offset = mmNIC1_QM0_PQ_PI_0;
+		break;
+
+	case GAUDI_QUEUE_ID_NIC_2_1:
+		db_reg_offset = mmNIC1_QM0_PQ_PI_1;
+		break;
+
+	case GAUDI_QUEUE_ID_NIC_2_2:
+		db_reg_offset = mmNIC1_QM0_PQ_PI_2;
 		break;
 
-	case GAUDI_QUEUE_ID_TPC_0_3:
-		db_reg_offset = mmTPC0_QM_PQ_PI_3;
+	case GAUDI_QUEUE_ID_NIC_2_3:
+		db_reg_offset = mmNIC1_QM0_PQ_PI_3;
 		break;
 
-	case GAUDI_QUEUE_ID_TPC_1_0:
-		db_reg_offset = mmTPC1_QM_PQ_PI_0;
+	case GAUDI_QUEUE_ID_NIC_3_0:
+		db_reg_offset = mmNIC1_QM1_PQ_PI_0;
 		break;
 
-	case GAUDI_QUEUE_ID_TPC_1_1:
-		db_reg_offset = mmTPC1_QM_PQ_PI_1;
+	case GAUDI_QUEUE_ID_NIC_3_1:
+		db_reg_offset = mmNIC1_QM1_PQ_PI_1;
 		break;
 
-	case GAUDI_QUEUE_ID_TPC_1_2:
-		db_reg_offset = mmTPC1_QM_PQ_PI_2;
+	case GAUDI_QUEUE_ID_NIC_3_2:
+		db_reg_offset = mmNIC1_QM1_PQ_PI_2;
 		break;
 
-	case GAUDI_QUEUE_ID_TPC_1_3:
-		db_reg_offset = mmTPC1_QM_PQ_PI_3;
+	case GAUDI_QUEUE_ID_NIC_3_3:
+		db_reg_offset = mmNIC1_QM1_PQ_PI_3;
 		break;
 
-	case GAUDI_QUEUE_ID_TPC_2_0:
-		db_reg_offset = mmTPC2_QM_PQ_PI_0;
+	case GAUDI_QUEUE_ID_NIC_4_0:
+		db_reg_offset = mmNIC2_QM0_PQ_PI_0;
 		break;
 
-	case GAUDI_QUEUE_ID_TPC_2_1:
-		db_reg_offset = mmTPC2_QM_PQ_PI_1;
+	case GAUDI_QUEUE_ID_NIC_4_1:
+		db_reg_offset = mmNIC2_QM0_PQ_PI_1;
 		break;
 
-	case GAUDI_QUEUE_ID_TPC_2_2:
-		db_reg_offset = mmTPC2_QM_PQ_PI_2;
+	case GAUDI_QUEUE_ID_NIC_4_2:
+		db_reg_offset = mmNIC2_QM0_PQ_PI_2;
 		break;
 
-	case GAUDI_QUEUE_ID_TPC_2_3:
-		db_reg_offset = mmTPC2_QM_PQ_PI_3;
+	case GAUDI_QUEUE_ID_NIC_4_3:
+		db_reg_offset = mmNIC2_QM0_PQ_PI_3;
 		break;
 
-	case GAUDI_QUEUE_ID_TPC_3_0:
-		db_reg_offset = mmTPC3_QM_PQ_PI_0;
+	case GAUDI_QUEUE_ID_NIC_5_0:
+		db_reg_offset = mmNIC2_QM1_PQ_PI_0;
 		break;
 
-	case GAUDI_QUEUE_ID_TPC_3_1:
-		db_reg_offset = mmTPC3_QM_PQ_PI_1;
+	case GAUDI_QUEUE_ID_NIC_5_1:
+		db_reg_offset = mmNIC2_QM1_PQ_PI_1;
 		break;
 
-	case GAUDI_QUEUE_ID_TPC_3_2:
-		db_reg_offset = mmTPC3_QM_PQ_PI_2;
+	case GAUDI_QUEUE_ID_NIC_5_2:
+		db_reg_offset = mmNIC2_QM1_PQ_PI_2;
 		break;
 
-	case GAUDI_QUEUE_ID_TPC_3_3:
-		db_reg_offset = mmTPC3_QM_PQ_PI_3;
+	case GAUDI_QUEUE_ID_NIC_5_3:
+		db_reg_offset = mmNIC2_QM1_PQ_PI_3;
 		break;
 
-	case GAUDI_QUEUE_ID_TPC_4_0:
-		db_reg_offset = mmTPC4_QM_PQ_PI_0;
+	case GAUDI_QUEUE_ID_NIC_6_0:
+		db_reg_offset = mmNIC3_QM0_PQ_PI_0;
 		break;
 
-	case GAUDI_QUEUE_ID_TPC_4_1:
-		db_reg_offset = mmTPC4_QM_PQ_PI_1;
+	case GAUDI_QUEUE_ID_NIC_6_1:
+		db_reg_offset = mmNIC3_QM0_PQ_PI_1;
 		break;
 
-	case GAUDI_QUEUE_ID_TPC_4_2:
-		db_reg_offset = mmTPC4_QM_PQ_PI_2;
+	case GAUDI_QUEUE_ID_NIC_6_2:
+		db_reg_offset = mmNIC3_QM0_PQ_PI_2;
 		break;
 
-	case GAUDI_QUEUE_ID_TPC_4_3:
-		db_reg_offset = mmTPC4_QM_PQ_PI_3;
+	case GAUDI_QUEUE_ID_NIC_6_3:
+		db_reg_offset = mmNIC3_QM0_PQ_PI_3;
 		break;
 
-	case GAUDI_QUEUE_ID_TPC_5_0:
-		db_reg_offset = mmTPC5_QM_PQ_PI_0;
+	case GAUDI_QUEUE_ID_NIC_7_0:
+		db_reg_offset = mmNIC3_QM1_PQ_PI_0;
 		break;
 
-	case GAUDI_QUEUE_ID_TPC_5_1:
-		db_reg_offset = mmTPC5_QM_PQ_PI_1;
+	case GAUDI_QUEUE_ID_NIC_7_1:
+		db_reg_offset = mmNIC3_QM1_PQ_PI_1;
 		break;
 
-	case GAUDI_QUEUE_ID_TPC_5_2:
-		db_reg_offset = mmTPC5_QM_PQ_PI_2;
+	case GAUDI_QUEUE_ID_NIC_7_2:
+		db_reg_offset = mmNIC3_QM1_PQ_PI_2;
 		break;
 
-	case GAUDI_QUEUE_ID_TPC_5_3:
-		db_reg_offset = mmTPC5_QM_PQ_PI_3;
+	case GAUDI_QUEUE_ID_NIC_7_3:
+		db_reg_offset = mmNIC3_QM1_PQ_PI_3;
 		break;
 
-	case GAUDI_QUEUE_ID_TPC_6_0:
-		db_reg_offset = mmTPC6_QM_PQ_PI_0;
+	case GAUDI_QUEUE_ID_NIC_8_0:
+		db_reg_offset = mmNIC4_QM0_PQ_PI_0;
 		break;
 
-	case GAUDI_QUEUE_ID_TPC_6_1:
-		db_reg_offset = mmTPC6_QM_PQ_PI_1;
+	case GAUDI_QUEUE_ID_NIC_8_1:
+		db_reg_offset = mmNIC4_QM0_PQ_PI_1;
 		break;
 
-	case GAUDI_QUEUE_ID_TPC_6_2:
-		db_reg_offset = mmTPC6_QM_PQ_PI_2;
+	case GAUDI_QUEUE_ID_NIC_8_2:
+		db_reg_offset = mmNIC4_QM0_PQ_PI_2;
 		break;
 
-	case GAUDI_QUEUE_ID_TPC_6_3:
-		db_reg_offset = mmTPC6_QM_PQ_PI_3;
+	case GAUDI_QUEUE_ID_NIC_8_3:
+		db_reg_offset = mmNIC4_QM0_PQ_PI_3;
 		break;
 
-	case GAUDI_QUEUE_ID_TPC_7_0:
-		db_reg_offset = mmTPC7_QM_PQ_PI_0;
+	case GAUDI_QUEUE_ID_NIC_9_0:
+		db_reg_offset = mmNIC4_QM1_PQ_PI_0;
 		break;
 
-	case GAUDI_QUEUE_ID_TPC_7_1:
-		db_reg_offset = mmTPC7_QM_PQ_PI_1;
+	case GAUDI_QUEUE_ID_NIC_9_1:
+		db_reg_offset = mmNIC4_QM1_PQ_PI_1;
 		break;
 
-	case GAUDI_QUEUE_ID_TPC_7_2:
-		db_reg_offset = mmTPC7_QM_PQ_PI_2;
+	case GAUDI_QUEUE_ID_NIC_9_2:
+		db_reg_offset = mmNIC4_QM1_PQ_PI_2;
 		break;
 
-	case GAUDI_QUEUE_ID_TPC_7_3:
-		db_reg_offset = mmTPC7_QM_PQ_PI_3;
+	case GAUDI_QUEUE_ID_NIC_9_3:
+		db_reg_offset = mmNIC4_QM1_PQ_PI_3;
 		break;
 
 	default:
@@ -3404,6 +4478,121 @@ static void gaudi_dma_free_coherent(struct hl_device *hdev, size_t size,
 	dma_free_coherent(&hdev->pdev->dev, size, cpu_addr, fixed_dma_handle);
 }
 
+static int gaudi_hbm_scrubbing(struct hl_device *hdev)
+{
+	struct asic_fixed_properties *prop = &hdev->asic_prop;
+	u64  cur_addr = DRAM_BASE_ADDR_USER;
+	u32 val;
+	u32 chunk_size;
+	int rc, dma_id;
+
+	while (cur_addr < prop->dram_end_address) {
+		for (dma_id = 0 ; dma_id < DMA_NUMBER_OF_CHANNELS ; dma_id++) {
+			u32 dma_offset = dma_id * DMA_CORE_OFFSET;
+
+			chunk_size =
+			min((u64)SZ_2G, prop->dram_end_address - cur_addr);
+
+			dev_dbg(hdev->dev,
+				"Doing HBM scrubbing for 0x%09llx - 0x%09llx\n",
+				cur_addr, cur_addr + chunk_size);
+
+			WREG32(mmDMA0_CORE_SRC_BASE_LO + dma_offset, 0);
+			WREG32(mmDMA0_CORE_SRC_BASE_HI + dma_offset, 0);
+			WREG32(mmDMA0_CORE_DST_BASE_LO + dma_offset,
+						lower_32_bits(cur_addr));
+			WREG32(mmDMA0_CORE_DST_BASE_HI + dma_offset,
+						upper_32_bits(cur_addr));
+			WREG32(mmDMA0_CORE_DST_TSIZE_0 + dma_offset,
+					chunk_size);
+			WREG32(mmDMA0_CORE_COMMIT + dma_offset,
+					((1 << DMA0_CORE_COMMIT_LIN_SHIFT) |
+					(1 << DMA0_CORE_COMMIT_MEM_SET_SHIFT)));
+
+			cur_addr += chunk_size;
+
+			if (cur_addr == prop->dram_end_address)
+				break;
+		}
+
+		for (dma_id = 0 ; dma_id < DMA_NUMBER_OF_CHANNELS ; dma_id++) {
+			u32 dma_offset = dma_id * DMA_CORE_OFFSET;
+
+			rc = hl_poll_timeout(
+				hdev,
+				mmDMA0_CORE_STS0 + dma_offset,
+				val,
+				((val & DMA0_CORE_STS0_BUSY_MASK) == 0),
+				1000,
+				HBM_SCRUBBING_TIMEOUT_US);
+
+			if (rc) {
+				dev_err(hdev->dev,
+					"DMA Timeout during HBM scrubbing of DMA #%d\n",
+					dma_id);
+				return -EIO;
+			}
+		}
+	}
+
+	return 0;
+}
+
+static int gaudi_scrub_device_mem(struct hl_device *hdev, u64 addr, u64 size)
+{
+	struct asic_fixed_properties *prop = &hdev->asic_prop;
+	struct gaudi_device *gaudi = hdev->asic_specific;
+	u64 idle_mask = 0;
+	int rc = 0;
+	u64 val = 0;
+
+	if (!hdev->memory_scrub)
+		return 0;
+
+	if (!addr && !size) {
+		/* Wait till device is idle */
+		rc = hl_poll_timeout(
+				hdev,
+				mmDMA0_CORE_STS0/* dummy */,
+				val/* dummy */,
+				(hdev->asic_funcs->is_device_idle(hdev,
+						&idle_mask, NULL)),
+						1000,
+						HBM_SCRUBBING_TIMEOUT_US);
+		if (rc) {
+			dev_err(hdev->dev, "waiting for idle timeout\n");
+			return -EIO;
+		}
+
+		/* Scrub SRAM */
+		addr = prop->sram_user_base_address;
+		size = hdev->pldm ? 0x10000 :
+				(prop->sram_size - SRAM_USER_BASE_OFFSET);
+		val = 0x7777777777777777ull;
+
+		rc = gaudi_memset_device_memory(hdev, addr, size, val);
+		if (rc) {
+			dev_err(hdev->dev,
+				"Failed to clear SRAM in mem scrub all\n");
+			return rc;
+		}
+
+		mutex_lock(&gaudi->clk_gate_mutex);
+		hdev->asic_funcs->disable_clock_gating(hdev);
+
+		/* Scrub HBM using all DMA channels in parallel */
+		rc = gaudi_hbm_scrubbing(hdev);
+		if (rc)
+			dev_err(hdev->dev,
+				"Failed to clear HBM in mem scrub all\n");
+
+		hdev->asic_funcs->set_clock_gating(hdev);
+		mutex_unlock(&gaudi->clk_gate_mutex);
+	}
+
+	return rc;
+}
+
 static void *gaudi_get_int_queue_base(struct hl_device *hdev,
 				u32 queue_id, dma_addr_t *dma_handle,
 				u16 *queue_len)
@@ -3425,7 +4614,7 @@ static void *gaudi_get_int_queue_base(struct hl_device *hdev,
 }
 
 static int gaudi_send_cpu_message(struct hl_device *hdev, u32 *msg,
-				u16 len, u32 timeout, long *result)
+				u16 len, u32 timeout, u64 *result)
 {
 	struct gaudi_device *gaudi = hdev->asic_specific;
 
@@ -4244,6 +5433,17 @@ static int gaudi_parse_cb_no_ext_queue(struct hl_device *hdev,
 					struct hl_cs_parser *parser)
 {
 	struct asic_fixed_properties *asic_prop = &hdev->asic_prop;
+	struct gaudi_device *gaudi = hdev->asic_specific;
+	u32 nic_mask_q_id = 1 << (HW_CAP_NIC_SHIFT +
+		((parser->hw_queue_id - GAUDI_QUEUE_ID_NIC_0_0) >> 2));
+
+	if ((parser->hw_queue_id >= GAUDI_QUEUE_ID_NIC_0_0) &&
+			(parser->hw_queue_id <= GAUDI_QUEUE_ID_NIC_9_3) &&
+			(!(gaudi->hw_cap_initialized & nic_mask_q_id))) {
+		dev_err(hdev->dev, "h/w queue %d is disabled\n",
+				parser->hw_queue_id);
+		return -EINVAL;
+	}
 
 	/* For internal queue jobs just check if CB address is valid */
 	if (hl_mem_area_inside_range((u64) (uintptr_t) parser->user_cb,
@@ -4370,7 +5570,7 @@ static int gaudi_memset_device_memory(struct hl_device *hdev, u64 addr,
 
 	job->id = 0;
 	job->user_cb = cb;
-	job->user_cb->cs_cnt++;
+	atomic_inc(&job->user_cb->cs_cnt);
 	job->user_cb_size = cb_size;
 	job->hw_queue_id = GAUDI_QUEUE_ID_DMA_0_0;
 	job->patched_cb = job->user_cb;
@@ -4381,7 +5581,7 @@ static int gaudi_memset_device_memory(struct hl_device *hdev, u64 addr,
 	rc = gaudi_send_job_on_qman0(hdev, job);
 	hl_debugfs_remove_job(hdev, job);
 	kfree(job);
-	cb->cs_cnt--;
+	atomic_dec(&cb->cs_cnt);
 
 	/* Verify DMA is OK */
 	err_cause = RREG32(mmDMA0_CORE_ERR_CAUSE);
@@ -4476,6 +5676,12 @@ static void gaudi_restore_qm_registers(struct hl_device *hdev)
 		qman_offset = i * TPC_QMAN_OFFSET;
 		WREG32(mmTPC0_QM_ARB_CFG_0 + qman_offset, 0);
 	}
+
+	for (i = 0 ; i < NIC_NUMBER_OF_ENGINES ; i++) {
+		qman_offset = (i >> 1) * NIC_MACRO_QMAN_OFFSET +
+				(i & 0x1) * NIC_ENGINE_QMAN_OFFSET;
+		WREG32(mmNIC0_QM0_ARB_CFG_0 + qman_offset, 0);
+	}
 }
 
 static void gaudi_restore_user_registers(struct hl_device *hdev)
@@ -4487,21 +5693,6 @@ static void gaudi_restore_user_registers(struct hl_device *hdev)
 
 static int gaudi_context_switch(struct hl_device *hdev, u32 asid)
 {
-	struct asic_fixed_properties *prop = &hdev->asic_prop;
-	u64 addr = prop->sram_user_base_address;
-	u32 size = hdev->pldm ? 0x10000 :
-			(prop->sram_size - SRAM_USER_BASE_OFFSET);
-	u64 val = 0x7777777777777777ull;
-	int rc;
-
-	rc = gaudi_memset_device_memory(hdev, addr, size, val);
-	if (rc) {
-		dev_err(hdev->dev, "Failed to clear SRAM in context switch\n");
-		return rc;
-	}
-
-	gaudi_mmu_prepare(hdev, asid);
-
 	gaudi_restore_user_registers(hdev);
 
 	return 0;
@@ -4910,6 +6101,136 @@ static void gaudi_mmu_prepare(struct hl_device *hdev, u32 asid)
 	gaudi_mmu_prepare_reg(hdev, mmMME2_ACC_WBC, asid);
 	gaudi_mmu_prepare_reg(hdev, mmMME3_ACC_WBC, asid);
 
+	if (hdev->nic_ports_mask & GAUDI_NIC_MASK_NIC0) {
+		gaudi_mmu_prepare_reg(hdev, mmNIC0_QM0_GLBL_NON_SECURE_PROPS_0,
+				asid);
+		gaudi_mmu_prepare_reg(hdev, mmNIC0_QM0_GLBL_NON_SECURE_PROPS_1,
+				asid);
+		gaudi_mmu_prepare_reg(hdev, mmNIC0_QM0_GLBL_NON_SECURE_PROPS_2,
+				asid);
+		gaudi_mmu_prepare_reg(hdev, mmNIC0_QM0_GLBL_NON_SECURE_PROPS_3,
+				asid);
+		gaudi_mmu_prepare_reg(hdev, mmNIC0_QM0_GLBL_NON_SECURE_PROPS_4,
+				asid);
+	}
+
+	if (hdev->nic_ports_mask & GAUDI_NIC_MASK_NIC1) {
+		gaudi_mmu_prepare_reg(hdev, mmNIC0_QM1_GLBL_NON_SECURE_PROPS_0,
+				asid);
+		gaudi_mmu_prepare_reg(hdev, mmNIC0_QM1_GLBL_NON_SECURE_PROPS_1,
+				asid);
+		gaudi_mmu_prepare_reg(hdev, mmNIC0_QM1_GLBL_NON_SECURE_PROPS_2,
+				asid);
+		gaudi_mmu_prepare_reg(hdev, mmNIC0_QM1_GLBL_NON_SECURE_PROPS_3,
+				asid);
+		gaudi_mmu_prepare_reg(hdev, mmNIC0_QM1_GLBL_NON_SECURE_PROPS_4,
+				asid);
+	}
+
+	if (hdev->nic_ports_mask & GAUDI_NIC_MASK_NIC2) {
+		gaudi_mmu_prepare_reg(hdev, mmNIC1_QM0_GLBL_NON_SECURE_PROPS_0,
+				asid);
+		gaudi_mmu_prepare_reg(hdev, mmNIC1_QM0_GLBL_NON_SECURE_PROPS_1,
+				asid);
+		gaudi_mmu_prepare_reg(hdev, mmNIC1_QM0_GLBL_NON_SECURE_PROPS_2,
+				asid);
+		gaudi_mmu_prepare_reg(hdev, mmNIC1_QM0_GLBL_NON_SECURE_PROPS_3,
+				asid);
+		gaudi_mmu_prepare_reg(hdev, mmNIC1_QM0_GLBL_NON_SECURE_PROPS_4,
+				asid);
+	}
+
+	if (hdev->nic_ports_mask & GAUDI_NIC_MASK_NIC3) {
+		gaudi_mmu_prepare_reg(hdev, mmNIC1_QM1_GLBL_NON_SECURE_PROPS_0,
+				asid);
+		gaudi_mmu_prepare_reg(hdev, mmNIC1_QM1_GLBL_NON_SECURE_PROPS_1,
+				asid);
+		gaudi_mmu_prepare_reg(hdev, mmNIC1_QM1_GLBL_NON_SECURE_PROPS_2,
+				asid);
+		gaudi_mmu_prepare_reg(hdev, mmNIC1_QM1_GLBL_NON_SECURE_PROPS_3,
+				asid);
+		gaudi_mmu_prepare_reg(hdev, mmNIC1_QM1_GLBL_NON_SECURE_PROPS_4,
+				asid);
+	}
+
+	if (hdev->nic_ports_mask & GAUDI_NIC_MASK_NIC4) {
+		gaudi_mmu_prepare_reg(hdev, mmNIC2_QM0_GLBL_NON_SECURE_PROPS_0,
+				asid);
+		gaudi_mmu_prepare_reg(hdev, mmNIC2_QM0_GLBL_NON_SECURE_PROPS_1,
+				asid);
+		gaudi_mmu_prepare_reg(hdev, mmNIC2_QM0_GLBL_NON_SECURE_PROPS_2,
+				asid);
+		gaudi_mmu_prepare_reg(hdev, mmNIC2_QM0_GLBL_NON_SECURE_PROPS_3,
+				asid);
+		gaudi_mmu_prepare_reg(hdev, mmNIC2_QM0_GLBL_NON_SECURE_PROPS_4,
+				asid);
+	}
+
+	if (hdev->nic_ports_mask & GAUDI_NIC_MASK_NIC5) {
+		gaudi_mmu_prepare_reg(hdev, mmNIC2_QM1_GLBL_NON_SECURE_PROPS_0,
+				asid);
+		gaudi_mmu_prepare_reg(hdev, mmNIC2_QM1_GLBL_NON_SECURE_PROPS_1,
+				asid);
+		gaudi_mmu_prepare_reg(hdev, mmNIC2_QM1_GLBL_NON_SECURE_PROPS_2,
+				asid);
+		gaudi_mmu_prepare_reg(hdev, mmNIC2_QM1_GLBL_NON_SECURE_PROPS_3,
+				asid);
+		gaudi_mmu_prepare_reg(hdev, mmNIC2_QM1_GLBL_NON_SECURE_PROPS_4,
+				asid);
+	}
+
+	if (hdev->nic_ports_mask & GAUDI_NIC_MASK_NIC6) {
+		gaudi_mmu_prepare_reg(hdev, mmNIC3_QM0_GLBL_NON_SECURE_PROPS_0,
+				asid);
+		gaudi_mmu_prepare_reg(hdev, mmNIC3_QM0_GLBL_NON_SECURE_PROPS_1,
+				asid);
+		gaudi_mmu_prepare_reg(hdev, mmNIC3_QM0_GLBL_NON_SECURE_PROPS_2,
+				asid);
+		gaudi_mmu_prepare_reg(hdev, mmNIC3_QM0_GLBL_NON_SECURE_PROPS_3,
+				asid);
+		gaudi_mmu_prepare_reg(hdev, mmNIC3_QM0_GLBL_NON_SECURE_PROPS_4,
+				asid);
+	}
+
+	if (hdev->nic_ports_mask & GAUDI_NIC_MASK_NIC7) {
+		gaudi_mmu_prepare_reg(hdev, mmNIC3_QM1_GLBL_NON_SECURE_PROPS_0,
+				asid);
+		gaudi_mmu_prepare_reg(hdev, mmNIC3_QM1_GLBL_NON_SECURE_PROPS_1,
+				asid);
+		gaudi_mmu_prepare_reg(hdev, mmNIC3_QM1_GLBL_NON_SECURE_PROPS_2,
+				asid);
+		gaudi_mmu_prepare_reg(hdev, mmNIC3_QM1_GLBL_NON_SECURE_PROPS_3,
+				asid);
+		gaudi_mmu_prepare_reg(hdev, mmNIC3_QM1_GLBL_NON_SECURE_PROPS_4,
+				asid);
+	}
+
+	if (hdev->nic_ports_mask & GAUDI_NIC_MASK_NIC8) {
+		gaudi_mmu_prepare_reg(hdev, mmNIC4_QM0_GLBL_NON_SECURE_PROPS_0,
+				asid);
+		gaudi_mmu_prepare_reg(hdev, mmNIC4_QM0_GLBL_NON_SECURE_PROPS_1,
+				asid);
+		gaudi_mmu_prepare_reg(hdev, mmNIC4_QM0_GLBL_NON_SECURE_PROPS_2,
+				asid);
+		gaudi_mmu_prepare_reg(hdev, mmNIC4_QM0_GLBL_NON_SECURE_PROPS_3,
+				asid);
+		gaudi_mmu_prepare_reg(hdev, mmNIC4_QM0_GLBL_NON_SECURE_PROPS_4,
+				asid);
+	}
+
+	if (hdev->nic_ports_mask & GAUDI_NIC_MASK_NIC9) {
+		gaudi_mmu_prepare_reg(hdev, mmNIC4_QM1_GLBL_NON_SECURE_PROPS_0,
+				asid);
+		gaudi_mmu_prepare_reg(hdev, mmNIC4_QM1_GLBL_NON_SECURE_PROPS_1,
+				asid);
+		gaudi_mmu_prepare_reg(hdev, mmNIC4_QM1_GLBL_NON_SECURE_PROPS_2,
+				asid);
+		gaudi_mmu_prepare_reg(hdev, mmNIC4_QM1_GLBL_NON_SECURE_PROPS_3,
+				asid);
+		gaudi_mmu_prepare_reg(hdev, mmNIC4_QM1_GLBL_NON_SECURE_PROPS_4,
+				asid);
+	}
+
 	hdev->asic_funcs->set_clock_gating(hdev);
 
 	mutex_unlock(&gaudi->clk_gate_mutex);
@@ -5489,6 +6810,56 @@ static void gaudi_handle_qman_err(struct hl_device *hdev, u16 event_type)
 			mmDMA0_QM_ARB_ERR_CAUSE + index * DMA_QMAN_OFFSET;
 		snprintf(desc, ARRAY_SIZE(desc), "%s%d", "DMA_QM", index);
 		break;
+	case GAUDI_EVENT_NIC0_QM0:
+		glbl_sts_addr = mmNIC0_QM0_GLBL_STS1_0;
+		arb_err_addr = mmNIC0_QM0_ARB_ERR_CAUSE;
+		snprintf(desc, ARRAY_SIZE(desc), "NIC0_QM0");
+		break;
+	case GAUDI_EVENT_NIC0_QM1:
+		glbl_sts_addr = mmNIC0_QM1_GLBL_STS1_0;
+		arb_err_addr = mmNIC0_QM1_ARB_ERR_CAUSE;
+		snprintf(desc, ARRAY_SIZE(desc), "NIC0_QM1");
+		break;
+	case GAUDI_EVENT_NIC1_QM0:
+		glbl_sts_addr = mmNIC1_QM0_GLBL_STS1_0;
+		arb_err_addr = mmNIC1_QM0_ARB_ERR_CAUSE;
+		snprintf(desc, ARRAY_SIZE(desc), "NIC1_QM0");
+		break;
+	case GAUDI_EVENT_NIC1_QM1:
+		glbl_sts_addr = mmNIC1_QM1_GLBL_STS1_0;
+		arb_err_addr = mmNIC1_QM1_ARB_ERR_CAUSE;
+		snprintf(desc, ARRAY_SIZE(desc), "NIC1_QM1");
+		break;
+	case GAUDI_EVENT_NIC2_QM0:
+		glbl_sts_addr = mmNIC2_QM0_GLBL_STS1_0;
+		arb_err_addr = mmNIC2_QM0_ARB_ERR_CAUSE;
+		snprintf(desc, ARRAY_SIZE(desc), "NIC2_QM0");
+		break;
+	case GAUDI_EVENT_NIC2_QM1:
+		glbl_sts_addr = mmNIC2_QM1_GLBL_STS1_0;
+		arb_err_addr = mmNIC2_QM1_ARB_ERR_CAUSE;
+		snprintf(desc, ARRAY_SIZE(desc), "NIC2_QM1");
+		break;
+	case GAUDI_EVENT_NIC3_QM0:
+		glbl_sts_addr = mmNIC3_QM0_GLBL_STS1_0;
+		arb_err_addr = mmNIC3_QM0_ARB_ERR_CAUSE;
+		snprintf(desc, ARRAY_SIZE(desc), "NIC3_QM0");
+		break;
+	case GAUDI_EVENT_NIC3_QM1:
+		glbl_sts_addr = mmNIC3_QM1_GLBL_STS1_0;
+		arb_err_addr = mmNIC3_QM1_ARB_ERR_CAUSE;
+		snprintf(desc, ARRAY_SIZE(desc), "NIC3_QM1");
+		break;
+	case GAUDI_EVENT_NIC4_QM0:
+		glbl_sts_addr = mmNIC4_QM0_GLBL_STS1_0;
+		arb_err_addr = mmNIC4_QM0_ARB_ERR_CAUSE;
+		snprintf(desc, ARRAY_SIZE(desc), "NIC4_QM0");
+		break;
+	case GAUDI_EVENT_NIC4_QM1:
+		glbl_sts_addr = mmNIC4_QM1_GLBL_STS1_0;
+		arb_err_addr = mmNIC4_QM1_ARB_ERR_CAUSE;
+		snprintf(desc, ARRAY_SIZE(desc), "NIC4_QM1");
+		break;
 	default:
 		return;
 	}
@@ -5521,10 +6892,41 @@ static int gaudi_soft_reset_late_init(struct hl_device *hdev)
 	return hl_fw_unmask_irq_arr(hdev, gaudi->events, sizeof(gaudi->events));
 }
 
-static int gaudi_hbm_read_interrupts(struct hl_device *hdev, int device)
+static int gaudi_hbm_read_interrupts(struct hl_device *hdev, int device,
+			struct hl_eq_hbm_ecc_data *hbm_ecc_data)
 {
-	int ch, err = 0;
-	u32 base, val, val2;
+	u32 base, val, val2, wr_par, rd_par, ca_par, derr, serr, type, ch;
+	int err = 0;
+
+	if (!hdev->asic_prop.fw_security_disabled) {
+		if (!hbm_ecc_data) {
+			dev_err(hdev->dev, "No FW ECC data");
+			return 0;
+		}
+
+		wr_par = FIELD_GET(CPUCP_PKT_HBM_ECC_INFO_WR_PAR_MASK,
+				le32_to_cpu(hbm_ecc_data->hbm_ecc_info));
+		rd_par = FIELD_GET(CPUCP_PKT_HBM_ECC_INFO_RD_PAR_MASK,
+				le32_to_cpu(hbm_ecc_data->hbm_ecc_info));
+		ca_par = FIELD_GET(CPUCP_PKT_HBM_ECC_INFO_CA_PAR_MASK,
+				le32_to_cpu(hbm_ecc_data->hbm_ecc_info));
+		derr = FIELD_GET(CPUCP_PKT_HBM_ECC_INFO_DERR_MASK,
+				le32_to_cpu(hbm_ecc_data->hbm_ecc_info));
+		serr = FIELD_GET(CPUCP_PKT_HBM_ECC_INFO_SERR_MASK,
+				le32_to_cpu(hbm_ecc_data->hbm_ecc_info));
+		type = FIELD_GET(CPUCP_PKT_HBM_ECC_INFO_TYPE_MASK,
+				le32_to_cpu(hbm_ecc_data->hbm_ecc_info));
+		ch = FIELD_GET(CPUCP_PKT_HBM_ECC_INFO_HBM_CH_MASK,
+				le32_to_cpu(hbm_ecc_data->hbm_ecc_info));
+
+		dev_err(hdev->dev,
+			"HBM%d pc%d ECC: TYPE=%d, WR_PAR=%d, RD_PAR=%d, CA_PAR=%d, SERR=%d, DERR=%d\n",
+			device, ch, type, wr_par, rd_par, ca_par, serr, derr);
+
+		err = 1;
+
+		return 0;
+	}
 
 	base = GAUDI_HBM_CFG_BASE + device * GAUDI_HBM_CFG_OFFSET;
 	for (ch = 0 ; ch < GAUDI_HBM_CHANNELS ; ch++) {
@@ -5540,7 +6942,7 @@ static int gaudi_hbm_read_interrupts(struct hl_device *hdev, int device)
 
 			val2 = RREG32(base + ch * 0x1000 + 0x060);
 			dev_err(hdev->dev,
-				"HBM%d pc%d ECC info: 1ST_ERR_ADDR=0x%x, 1ST_ERR_TYPE=%d, SEC_CONT_CNT=%d, SEC_CNT=%d, DED_CNT=%d\n",
+				"HBM%d pc%d ECC info: 1ST_ERR_ADDR=0x%x, 1ST_ERR_TYPE=%d, SEC_CONT_CNT=%d, SEC_CNT=%d, DEC_CNT=%d\n",
 				device, ch * 2,
 				RREG32(base + ch * 0x1000 + 0x064),
 				(val2 & 0x200) >> 9, (val2 & 0xFC00) >> 10,
@@ -5560,7 +6962,7 @@ static int gaudi_hbm_read_interrupts(struct hl_device *hdev, int device)
 
 			val2 = RREG32(base + ch * 0x1000 + 0x070);
 			dev_err(hdev->dev,
-				"HBM%d pc%d ECC info: 1ST_ERR_ADDR=0x%x, 1ST_ERR_TYPE=%d, SEC_CONT_CNT=%d, SEC_CNT=%d, DED_CNT=%d\n",
+				"HBM%d pc%d ECC info: 1ST_ERR_ADDR=0x%x, 1ST_ERR_TYPE=%d, SEC_CONT_CNT=%d, SEC_CNT=%d, DEC_CNT=%d\n",
 				device, ch * 2 + 1,
 				RREG32(base + ch * 0x1000 + 0x074),
 				(val2 & 0x200) >> 9, (val2 & 0xFC00) >> 10,
@@ -5761,7 +7163,8 @@ static void gaudi_handle_eqe(struct hl_device *hdev,
 	case GAUDI_EVENT_HBM3_SPI_0:
 		gaudi_print_irq_info(hdev, event_type, false);
 		gaudi_hbm_read_interrupts(hdev,
-					  gaudi_hbm_event_to_dev(event_type));
+				gaudi_hbm_event_to_dev(event_type),
+				&eq_entry->hbm_ecc_data);
 		if (hdev->hard_reset_on_fw_events)
 			hl_device_reset(hdev, true, false);
 		break;
@@ -5772,7 +7175,8 @@ static void gaudi_handle_eqe(struct hl_device *hdev,
 	case GAUDI_EVENT_HBM3_SPI_1:
 		gaudi_print_irq_info(hdev, event_type, false);
 		gaudi_hbm_read_interrupts(hdev,
-					  gaudi_hbm_event_to_dev(event_type));
+				gaudi_hbm_event_to_dev(event_type),
+				&eq_entry->hbm_ecc_data);
 		break;
 
 	case GAUDI_EVENT_TPC0_DEC:
@@ -5866,6 +7270,16 @@ static void gaudi_handle_eqe(struct hl_device *hdev,
 	case GAUDI_EVENT_MME0_QM ... GAUDI_EVENT_MME2_QM:
 	case GAUDI_EVENT_DMA0_QM ... GAUDI_EVENT_DMA7_QM:
 		fallthrough;
+	case GAUDI_EVENT_NIC0_QM0:
+	case GAUDI_EVENT_NIC0_QM1:
+	case GAUDI_EVENT_NIC1_QM0:
+	case GAUDI_EVENT_NIC1_QM1:
+	case GAUDI_EVENT_NIC2_QM0:
+	case GAUDI_EVENT_NIC2_QM1:
+	case GAUDI_EVENT_NIC3_QM0:
+	case GAUDI_EVENT_NIC3_QM1:
+	case GAUDI_EVENT_NIC4_QM0:
+	case GAUDI_EVENT_NIC4_QM1:
 	case GAUDI_EVENT_DMA0_CORE ... GAUDI_EVENT_DMA7_CORE:
 		gaudi_print_irq_info(hdev, event_type, true);
 		gaudi_handle_qman_err(hdev, event_type);
@@ -6073,7 +7487,7 @@ static int gaudi_cpucp_info_get(struct hl_device *hdev)
 	if (!(gaudi->hw_cap_initialized & HW_CAP_CPU_Q))
 		return 0;
 
-	rc = hl_fw_cpucp_info_get(hdev);
+	rc = hl_fw_cpucp_info_get(hdev, mmCPU_BOOT_DEV_STS0);
 	if (rc)
 		return rc;
 
@@ -6099,10 +7513,11 @@ static bool gaudi_is_device_idle(struct hl_device *hdev, u64 *mask,
 	struct gaudi_device *gaudi = hdev->asic_specific;
 	const char *fmt = "%-5d%-9s%#-14x%#-12x%#x\n";
 	const char *mme_slave_fmt = "%-5d%-9s%-14s%-12s%#x\n";
+	const char *nic_fmt = "%-5d%-9s%#-14x%#x\n";
 	u32 qm_glbl_sts0, qm_cgm_sts, dma_core_sts0, tpc_cfg_sts, mme_arch_sts;
 	bool is_idle = true, is_eng_idle, is_slave;
 	u64 offset;
-	int i, dma_id;
+	int i, dma_id, port;
 
 	mutex_lock(&gaudi->clk_gate_mutex);
 
@@ -6191,6 +7606,45 @@ static bool gaudi_is_device_idle(struct hl_device *hdev, u64 *mask,
 		}
 	}
 
+	if (s)
+		seq_puts(s, "\nNIC  is_idle  QM_GLBL_STS0  QM_CGM_STS\n"
+				"---  -------  ------------  ----------\n");
+
+	for (i = 0 ; i < (NIC_NUMBER_OF_ENGINES / 2) ; i++) {
+		offset = i * NIC_MACRO_QMAN_OFFSET;
+		port = 2 * i;
+		if (hdev->nic_ports_mask & BIT(port)) {
+			qm_glbl_sts0 = RREG32(mmNIC0_QM0_GLBL_STS0 + offset);
+			qm_cgm_sts = RREG32(mmNIC0_QM0_CGM_STS + offset);
+			is_eng_idle = IS_QM_IDLE(qm_glbl_sts0, qm_cgm_sts);
+			is_idle &= is_eng_idle;
+
+			if (mask)
+				*mask |= ((u64) !is_eng_idle) <<
+						(GAUDI_ENGINE_ID_NIC_0 + port);
+			if (s)
+				seq_printf(s, nic_fmt, port,
+						is_eng_idle ? "Y" : "N",
+						qm_glbl_sts0, qm_cgm_sts);
+		}
+
+		port = 2 * i + 1;
+		if (hdev->nic_ports_mask & BIT(port)) {
+			qm_glbl_sts0 = RREG32(mmNIC0_QM1_GLBL_STS0 + offset);
+			qm_cgm_sts = RREG32(mmNIC0_QM1_CGM_STS + offset);
+			is_eng_idle = IS_QM_IDLE(qm_glbl_sts0, qm_cgm_sts);
+			is_idle &= is_eng_idle;
+
+			if (mask)
+				*mask |= ((u64) !is_eng_idle) <<
+						(GAUDI_ENGINE_ID_NIC_0 + port);
+			if (s)
+				seq_printf(s, nic_fmt, port,
+						is_eng_idle ? "Y" : "N",
+						qm_glbl_sts0, qm_cgm_sts);
+		}
+	}
+
 	if (s)
 		seq_puts(s, "\n");
 
@@ -6346,14 +7800,121 @@ static int gaudi_run_tpc_kernel(struct hl_device *hdev, u64 tpc_kernel,
 	return 0;
 }
 
-static enum hl_device_hw_state gaudi_get_hw_state(struct hl_device *hdev)
+static int gaudi_internal_cb_pool_init(struct hl_device *hdev,
+		struct hl_ctx *ctx)
 {
-	return RREG32(mmHW_STATE);
+	struct gaudi_device *gaudi = hdev->asic_specific;
+	int min_alloc_order, rc, collective_cb_size;
+
+	if (!(gaudi->hw_cap_initialized & HW_CAP_MMU))
+		return 0;
+
+	hdev->internal_cb_pool_virt_addr =
+			hdev->asic_funcs->asic_dma_alloc_coherent(hdev,
+					HOST_SPACE_INTERNAL_CB_SZ,
+					&hdev->internal_cb_pool_dma_addr,
+					GFP_KERNEL | __GFP_ZERO);
+
+	if (!hdev->internal_cb_pool_virt_addr)
+		return -ENOMEM;
+
+	collective_cb_size = sizeof(struct packet_msg_short) * 5 +
+			sizeof(struct packet_fence);
+	min_alloc_order = ilog2(collective_cb_size);
+
+	hdev->internal_cb_pool = gen_pool_create(min_alloc_order, -1);
+	if (!hdev->internal_cb_pool) {
+		dev_err(hdev->dev,
+			"Failed to create internal CB pool\n");
+		rc = -ENOMEM;
+		goto free_internal_cb_pool;
+	}
+
+	rc = gen_pool_add(hdev->internal_cb_pool,
+				(uintptr_t) hdev->internal_cb_pool_virt_addr,
+				HOST_SPACE_INTERNAL_CB_SZ, -1);
+	if (rc) {
+		dev_err(hdev->dev,
+			"Failed to add memory to internal CB pool\n");
+		rc = -EFAULT;
+		goto destroy_internal_cb_pool;
+	}
+
+	hdev->internal_cb_va_base = hl_reserve_va_block(hdev, ctx,
+			HL_VA_RANGE_TYPE_HOST, HOST_SPACE_INTERNAL_CB_SZ,
+			HL_MMU_VA_ALIGNMENT_NOT_NEEDED);
+
+	if (!hdev->internal_cb_va_base)
+		goto destroy_internal_cb_pool;
+
+	mutex_lock(&ctx->mmu_lock);
+	rc = hl_mmu_map_contiguous(ctx, hdev->internal_cb_va_base,
+			hdev->internal_cb_pool_dma_addr,
+			HOST_SPACE_INTERNAL_CB_SZ);
+
+	hdev->asic_funcs->mmu_invalidate_cache(hdev, false, VM_TYPE_USERPTR);
+	mutex_unlock(&ctx->mmu_lock);
+
+	if (rc)
+		goto unreserve_internal_cb_pool;
+
+	return 0;
+
+unreserve_internal_cb_pool:
+	hl_unreserve_va_block(hdev, ctx, hdev->internal_cb_va_base,
+			HOST_SPACE_INTERNAL_CB_SZ);
+destroy_internal_cb_pool:
+	gen_pool_destroy(hdev->internal_cb_pool);
+free_internal_cb_pool:
+	hdev->asic_funcs->asic_dma_free_coherent(hdev,
+			HOST_SPACE_INTERNAL_CB_SZ,
+			hdev->internal_cb_pool_virt_addr,
+			hdev->internal_cb_pool_dma_addr);
+
+	return rc;
+}
+
+static void gaudi_internal_cb_pool_fini(struct hl_device *hdev,
+		struct hl_ctx *ctx)
+{
+	struct gaudi_device *gaudi = hdev->asic_specific;
+
+	if (!(gaudi->hw_cap_initialized & HW_CAP_MMU))
+		return;
+
+	mutex_lock(&ctx->mmu_lock);
+	hl_mmu_unmap_contiguous(ctx, hdev->internal_cb_va_base,
+			HOST_SPACE_INTERNAL_CB_SZ);
+	hl_unreserve_va_block(hdev, ctx, hdev->internal_cb_va_base,
+			HOST_SPACE_INTERNAL_CB_SZ);
+	hdev->asic_funcs->mmu_invalidate_cache(hdev, true, VM_TYPE_USERPTR);
+	mutex_unlock(&ctx->mmu_lock);
+
+	gen_pool_destroy(hdev->internal_cb_pool);
+
+	hdev->asic_funcs->asic_dma_free_coherent(hdev,
+			HOST_SPACE_INTERNAL_CB_SZ,
+			hdev->internal_cb_pool_virt_addr,
+			hdev->internal_cb_pool_dma_addr);
 }
 
 static int gaudi_ctx_init(struct hl_ctx *ctx)
 {
-	return 0;
+	gaudi_mmu_prepare(ctx->hdev, ctx->asid);
+	return gaudi_internal_cb_pool_init(ctx->hdev, ctx);
+}
+
+static void gaudi_ctx_fini(struct hl_ctx *ctx)
+{
+	struct hl_device *hdev = ctx->hdev;
+
+	/* Gaudi will NEVER support more then a single compute context.
+	 * Therefore, don't clear anything unless it is the compute context
+	 */
+	if (hdev->compute_ctx != ctx)
+		return;
+
+	gaudi_internal_cb_pool_fini(ctx->hdev, ctx);
 }
 
 static u32 gaudi_get_queue_id_for_cq(struct hl_device *hdev, u32 cq_idx)
@@ -6374,14 +7935,15 @@ static u32 gaudi_get_wait_cb_size(struct hl_device *hdev)
 			sizeof(struct packet_msg_prot) * 2;
 }
 
-static void gaudi_gen_signal_cb(struct hl_device *hdev, void *data, u16 sob_id)
+static u32 gaudi_gen_signal_cb(struct hl_device *hdev, void *data, u16 sob_id,
+				u32 size)
 {
 	struct hl_cb *cb = (struct hl_cb *) data;
 	struct packet_msg_short *pkt;
-	u32 value, ctl;
+	u32 value, ctl, pkt_size = sizeof(*pkt);
 
-	pkt = cb->kernel_address;
-	memset(pkt, 0, sizeof(*pkt));
+	pkt = cb->kernel_address + size;
+	memset(pkt, 0, pkt_size);
 
 	/* Inc by 1, Mode ADD */
 	value = FIELD_PREP(GAUDI_PKT_SHORT_VAL_SOB_SYNC_VAL_MASK, 1);
@@ -6397,6 +7959,8 @@ static void gaudi_gen_signal_cb(struct hl_device *hdev, void *data, u16 sob_id)
 
 	pkt->value = cpu_to_le32(value);
 	pkt->ctl = cpu_to_le32(ctl);
+
+	return size + pkt_size;
 }
 
 static u32 gaudi_add_mon_msg_short(struct packet_msg_short *pkt, u32 value,
@@ -6419,21 +7983,42 @@ static u32 gaudi_add_mon_msg_short(struct packet_msg_short *pkt, u32 value,
 	return pkt_size;
 }
 
-static u32 gaudi_add_arm_monitor_pkt(struct packet_msg_short *pkt, u16 sob_id,
-					u16 sob_val, u16 addr)
+static u32 gaudi_add_arm_monitor_pkt(struct hl_device *hdev,
+		struct packet_msg_short *pkt, u16 sob_base, u8 sob_mask,
+		u16 sob_val, u16 mon_id)
 {
+	u64 monitor_base;
 	u32 ctl, value, pkt_size = sizeof(*pkt);
-	u8 mask = ~(1 << (sob_id & 0x7));
+	u16 msg_addr_offset;
+	u8 mask;
+
+	if (hl_gen_sob_mask(sob_base, sob_mask, &mask)) {
+		dev_err(hdev->dev,
+			"sob_base %u (mask %#x) is not valid\n",
+			sob_base, sob_mask);
+		return 0;
+	}
+
+	/*
+	 * monitor_base should be the content of the base0 address registers,
+	 * so it will be added to the msg short offsets
+	 */
+	monitor_base = mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_MON_PAY_ADDRL_0;
+
+	msg_addr_offset =
+		(mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_MON_ARM_0 + mon_id * 4) -
+				monitor_base;
 
 	memset(pkt, 0, pkt_size);
 
-	value = FIELD_PREP(GAUDI_PKT_SHORT_VAL_MON_SYNC_GID_MASK, sob_id / 8);
+	/* Monitor config packet: bind the monitor to a sync object */
+	value = FIELD_PREP(GAUDI_PKT_SHORT_VAL_MON_SYNC_GID_MASK, sob_base / 8);
 	value |= FIELD_PREP(GAUDI_PKT_SHORT_VAL_MON_SYNC_VAL_MASK, sob_val);
 	value |= FIELD_PREP(GAUDI_PKT_SHORT_VAL_MON_MODE_MASK,
 			0); /* GREATER OR EQUAL*/
 	value |= FIELD_PREP(GAUDI_PKT_SHORT_VAL_MON_MASK_MASK, mask);
 
-	ctl = FIELD_PREP(GAUDI_PKT_SHORT_CTL_ADDR_MASK, addr);
+	ctl = FIELD_PREP(GAUDI_PKT_SHORT_CTL_ADDR_MASK, msg_addr_offset);
 	ctl |= FIELD_PREP(GAUDI_PKT_SHORT_CTL_OP_MASK, 0); /* write the value */
 	ctl |= FIELD_PREP(GAUDI_PKT_SHORT_CTL_BASE_MASK, 2); /* W_S MON base */
 	ctl |= FIELD_PREP(GAUDI_PKT_SHORT_CTL_OPCODE_MASK, PACKET_MSG_SHORT);
@@ -6468,60 +8053,133 @@ static u32 gaudi_add_fence_pkt(struct packet_fence *pkt)
 	return pkt_size;
 }
 
-static void gaudi_gen_wait_cb(struct hl_device *hdev, void *data, u16 sob_id,
-			u16 sob_val, u16 mon_id, u32 q_idx)
+static int gaudi_get_fence_addr(struct hl_device *hdev, u32 queue_id, u64 *addr)
 {
-	struct hl_cb *cb = (struct hl_cb *) data;
-	void *buf = cb->kernel_address;
-	u64 monitor_base, fence_addr = 0;
-	u32 size = 0;
-	u16 msg_addr_offset;
+	u32 offset, nic_index;
 
-	switch (q_idx) {
+	switch (queue_id) {
 	case GAUDI_QUEUE_ID_DMA_0_0:
-		fence_addr = mmDMA0_QM_CP_FENCE2_RDATA_0;
+		offset = mmDMA0_QM_CP_FENCE2_RDATA_0;
 		break;
 	case GAUDI_QUEUE_ID_DMA_0_1:
-		fence_addr = mmDMA0_QM_CP_FENCE2_RDATA_1;
+		offset = mmDMA0_QM_CP_FENCE2_RDATA_1;
 		break;
 	case GAUDI_QUEUE_ID_DMA_0_2:
-		fence_addr = mmDMA0_QM_CP_FENCE2_RDATA_2;
+		offset = mmDMA0_QM_CP_FENCE2_RDATA_2;
 		break;
 	case GAUDI_QUEUE_ID_DMA_0_3:
-		fence_addr = mmDMA0_QM_CP_FENCE2_RDATA_3;
+		offset = mmDMA0_QM_CP_FENCE2_RDATA_3;
 		break;
 	case GAUDI_QUEUE_ID_DMA_1_0:
-		fence_addr = mmDMA1_QM_CP_FENCE2_RDATA_0;
+		offset = mmDMA1_QM_CP_FENCE2_RDATA_0;
 		break;
 	case GAUDI_QUEUE_ID_DMA_1_1:
-		fence_addr = mmDMA1_QM_CP_FENCE2_RDATA_1;
+		offset = mmDMA1_QM_CP_FENCE2_RDATA_1;
 		break;
 	case GAUDI_QUEUE_ID_DMA_1_2:
-		fence_addr = mmDMA1_QM_CP_FENCE2_RDATA_2;
+		offset = mmDMA1_QM_CP_FENCE2_RDATA_2;
 		break;
 	case GAUDI_QUEUE_ID_DMA_1_3:
-		fence_addr = mmDMA1_QM_CP_FENCE2_RDATA_3;
+		offset = mmDMA1_QM_CP_FENCE2_RDATA_3;
 		break;
 	case GAUDI_QUEUE_ID_DMA_5_0:
-		fence_addr = mmDMA5_QM_CP_FENCE2_RDATA_0;
+		offset = mmDMA5_QM_CP_FENCE2_RDATA_0;
 		break;
 	case GAUDI_QUEUE_ID_DMA_5_1:
-		fence_addr = mmDMA5_QM_CP_FENCE2_RDATA_1;
+		offset = mmDMA5_QM_CP_FENCE2_RDATA_1;
 		break;
 	case GAUDI_QUEUE_ID_DMA_5_2:
-		fence_addr = mmDMA5_QM_CP_FENCE2_RDATA_2;
+		offset = mmDMA5_QM_CP_FENCE2_RDATA_2;
 		break;
 	case GAUDI_QUEUE_ID_DMA_5_3:
-		fence_addr = mmDMA5_QM_CP_FENCE2_RDATA_3;
+		offset = mmDMA5_QM_CP_FENCE2_RDATA_3;
+		break;
+	case GAUDI_QUEUE_ID_TPC_7_0:
+		offset = mmTPC7_QM_CP_FENCE2_RDATA_0;
+		break;
+	case GAUDI_QUEUE_ID_TPC_7_1:
+		offset = mmTPC7_QM_CP_FENCE2_RDATA_1;
+		break;
+	case GAUDI_QUEUE_ID_TPC_7_2:
+		offset = mmTPC7_QM_CP_FENCE2_RDATA_2;
+		break;
+	case GAUDI_QUEUE_ID_TPC_7_3:
+		offset = mmTPC7_QM_CP_FENCE2_RDATA_3;
+		break;
+	case GAUDI_QUEUE_ID_NIC_0_0:
+	case GAUDI_QUEUE_ID_NIC_1_0:
+	case GAUDI_QUEUE_ID_NIC_2_0:
+	case GAUDI_QUEUE_ID_NIC_3_0:
+	case GAUDI_QUEUE_ID_NIC_4_0:
+	case GAUDI_QUEUE_ID_NIC_5_0:
+	case GAUDI_QUEUE_ID_NIC_6_0:
+	case GAUDI_QUEUE_ID_NIC_7_0:
+	case GAUDI_QUEUE_ID_NIC_8_0:
+	case GAUDI_QUEUE_ID_NIC_9_0:
+		nic_index = (queue_id - GAUDI_QUEUE_ID_NIC_0_0) >> 2;
+		offset = mmNIC0_QM0_CP_FENCE2_RDATA_0 +
+				(nic_index >> 1) * NIC_MACRO_QMAN_OFFSET +
+				(nic_index & 0x1) * NIC_ENGINE_QMAN_OFFSET;
+		break;
+	case GAUDI_QUEUE_ID_NIC_0_1:
+	case GAUDI_QUEUE_ID_NIC_1_1:
+	case GAUDI_QUEUE_ID_NIC_2_1:
+	case GAUDI_QUEUE_ID_NIC_3_1:
+	case GAUDI_QUEUE_ID_NIC_4_1:
+	case GAUDI_QUEUE_ID_NIC_5_1:
+	case GAUDI_QUEUE_ID_NIC_6_1:
+	case GAUDI_QUEUE_ID_NIC_7_1:
+	case GAUDI_QUEUE_ID_NIC_8_1:
+	case GAUDI_QUEUE_ID_NIC_9_1:
+		nic_index = (queue_id - GAUDI_QUEUE_ID_NIC_0_1) >> 2;
+		offset = mmNIC0_QM0_CP_FENCE2_RDATA_1 +
+				(nic_index >> 1) * NIC_MACRO_QMAN_OFFSET +
+				(nic_index & 0x1) * NIC_ENGINE_QMAN_OFFSET;
+		break;
+	case GAUDI_QUEUE_ID_NIC_0_2:
+	case GAUDI_QUEUE_ID_NIC_1_2:
+	case GAUDI_QUEUE_ID_NIC_2_2:
+	case GAUDI_QUEUE_ID_NIC_3_2:
+	case GAUDI_QUEUE_ID_NIC_4_2:
+	case GAUDI_QUEUE_ID_NIC_5_2:
+	case GAUDI_QUEUE_ID_NIC_6_2:
+	case GAUDI_QUEUE_ID_NIC_7_2:
+	case GAUDI_QUEUE_ID_NIC_8_2:
+	case GAUDI_QUEUE_ID_NIC_9_2:
+		nic_index = (queue_id - GAUDI_QUEUE_ID_NIC_0_2) >> 2;
+		offset = mmNIC0_QM0_CP_FENCE2_RDATA_2 +
+				(nic_index >> 1) * NIC_MACRO_QMAN_OFFSET +
+				(nic_index & 0x1) * NIC_ENGINE_QMAN_OFFSET;
+		break;
+	case GAUDI_QUEUE_ID_NIC_0_3:
+	case GAUDI_QUEUE_ID_NIC_1_3:
+	case GAUDI_QUEUE_ID_NIC_2_3:
+	case GAUDI_QUEUE_ID_NIC_3_3:
+	case GAUDI_QUEUE_ID_NIC_4_3:
+	case GAUDI_QUEUE_ID_NIC_5_3:
+	case GAUDI_QUEUE_ID_NIC_6_3:
+	case GAUDI_QUEUE_ID_NIC_7_3:
+	case GAUDI_QUEUE_ID_NIC_8_3:
+	case GAUDI_QUEUE_ID_NIC_9_3:
+		nic_index = (queue_id - GAUDI_QUEUE_ID_NIC_0_3) >> 2;
+		offset = mmNIC0_QM0_CP_FENCE2_RDATA_3 +
+				(nic_index >> 1) * NIC_MACRO_QMAN_OFFSET +
+				(nic_index & 0x1) * NIC_ENGINE_QMAN_OFFSET;
 		break;
 	default:
-		/* queue index should be valid here */
-		dev_crit(hdev->dev, "wrong queue id %d for wait packet\n",
-				q_idx);
-		return;
+		return -EINVAL;
 	}
 
-	fence_addr += CFG_BASE;
+	*addr = CFG_BASE + offset;
+
+	return 0;
+}
+
+static u32 gaudi_add_mon_pkts(void *buf, u16 mon_id, u64 fence_addr)
+{
+	u64 monitor_base;
+	u32 size = 0;
+	u16 msg_addr_offset;
 
 	/*
 	 * monitor_base should be the content of the base0 address registers,
@@ -6555,15 +8213,29 @@ static void gaudi_gen_wait_cb(struct hl_device *hdev, void *data, u16 sob_id,
 
 	size += gaudi_add_mon_msg_short(buf + size, 1, msg_addr_offset);
 
-	/* Fourth monitor config packet: bind the monitor to a sync object */
-	msg_addr_offset =
-		(mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_MON_ARM_0 + mon_id * 4) -
-				monitor_base;
-	size += gaudi_add_arm_monitor_pkt(buf + size, sob_id, sob_val,
-						msg_addr_offset);
+	return size;
+}
 
-	/* Fence packet */
+static u32 gaudi_gen_wait_cb(struct hl_device *hdev,
+				struct hl_gen_wait_properties *prop)
+{
+	struct hl_cb *cb = (struct hl_cb *) prop->data;
+	void *buf = cb->kernel_address;
+	u64 fence_addr = 0;
+	u32 size = prop->size;
+
+	if (gaudi_get_fence_addr(hdev, prop->q_idx, &fence_addr)) {
+		dev_crit(hdev->dev, "wrong queue id %d for wait packet\n",
+				prop->q_idx);
+		return 0;
+	}
+
+	size += gaudi_add_mon_pkts(buf + size, prop->mon_id, fence_addr);
+	size += gaudi_add_arm_monitor_pkt(hdev, buf + size, prop->sob_base,
+			prop->sob_mask, prop->sob_val, prop->mon_id);
 	size += gaudi_add_fence_pkt(buf + size);
+
+	return size;
 }
 
 static void gaudi_reset_sob(struct hl_device *hdev, void *data)
@@ -6615,6 +8287,7 @@ static const struct hl_asic_funcs gaudi_funcs = {
 	.pqe_write = gaudi_pqe_write,
 	.asic_dma_alloc_coherent = gaudi_dma_alloc_coherent,
 	.asic_dma_free_coherent = gaudi_dma_free_coherent,
+	.scrub_device_mem = gaudi_scrub_device_mem,
 	.get_int_queue_base = gaudi_get_int_queue_base,
 	.test_queues = gaudi_test_queues,
 	.asic_dma_pool_zalloc = gaudi_dma_pool_zalloc,
@@ -6652,13 +8325,13 @@ static const struct hl_asic_funcs gaudi_funcs = {
 	.get_pci_id = gaudi_get_pci_id,
 	.get_eeprom_data = gaudi_get_eeprom_data,
 	.send_cpu_message = gaudi_send_cpu_message,
-	.get_hw_state = gaudi_get_hw_state,
 	.pci_bars_map = gaudi_pci_bars_map,
 	.init_iatu = gaudi_init_iatu,
 	.rreg = hl_rreg,
 	.wreg = hl_wreg,
 	.halt_coresight = gaudi_halt_coresight,
 	.ctx_init = gaudi_ctx_init,
+	.ctx_fini = gaudi_ctx_fini,
 	.get_clk_rate = gaudi_get_clk_rate,
 	.get_queue_id_for_cq = gaudi_get_queue_id_for_cq,
 	.read_device_fw_version = gaudi_read_device_fw_version,
@@ -6669,8 +8342,11 @@ static const struct hl_asic_funcs gaudi_funcs = {
 	.gen_signal_cb = gaudi_gen_signal_cb,
 	.gen_wait_cb = gaudi_gen_wait_cb,
 	.reset_sob = gaudi_reset_sob,
+	.reset_sob_group = gaudi_reset_sob_group,
 	.set_dma_mask_from_fw = gaudi_set_dma_mask_from_fw,
-	.get_device_time = gaudi_get_device_time
+	.get_device_time = gaudi_get_device_time,
+	.collective_wait_init_cs = gaudi_collective_wait_init_cs,
+	.collective_wait_create_jobs = gaudi_collective_wait_create_jobs
 };
 
 /**
diff --git a/drivers/misc/habanalabs/gaudi/gaudiP.h b/drivers/misc/habanalabs/gaudi/gaudiP.h
index 8eb598db81b298e745a96756feb9e97ab79c3339..f2d91f4fcffea3f19e9511435a78ce700e4a8cad 100644
--- a/drivers/misc/habanalabs/gaudi/gaudiP.h
+++ b/drivers/misc/habanalabs/gaudi/gaudiP.h
@@ -14,8 +14,9 @@
 #include "../include/gaudi/gaudi_packets.h"
 #include "../include/gaudi/gaudi.h"
 #include "../include/gaudi/gaudi_async_events.h"
+#include "../include/gaudi/gaudi_fw_if.h"
 
-#define NUMBER_OF_EXT_HW_QUEUES		12
+#define NUMBER_OF_EXT_HW_QUEUES		8
 #define NUMBER_OF_CMPLT_QUEUES		NUMBER_OF_EXT_HW_QUEUES
 #define NUMBER_OF_CPU_HW_QUEUES		1
 #define NUMBER_OF_INT_HW_QUEUES		100
@@ -23,6 +24,10 @@
 					NUMBER_OF_CPU_HW_QUEUES + \
 					NUMBER_OF_INT_HW_QUEUES)
 
+/* 10 NIC QMANs, DMA5 QMAN, TPC7 QMAN */
+#define NUMBER_OF_COLLECTIVE_QUEUES	12
+#define NUMBER_OF_SOBS_IN_GRP		11
+
 /*
  * Number of MSI interrupts IDS:
  * Each completion queue has 1 ID
@@ -56,14 +61,14 @@
 
 #define GAUDI_DEFAULT_CARD_NAME		"HL2000"
 
-#define GAUDI_MAX_PENDING_CS		1024
+#define GAUDI_MAX_PENDING_CS		SZ_16K
 
 #if !IS_MAX_PENDING_CS_VALID(GAUDI_MAX_PENDING_CS)
 #error "GAUDI_MAX_PENDING_CS must be power of 2 and greater than 1"
 #endif
 
-#define PCI_DMA_NUMBER_OF_CHNLS		3
-#define HBM_DMA_NUMBER_OF_CHNLS		5
+#define PCI_DMA_NUMBER_OF_CHNLS		2
+#define HBM_DMA_NUMBER_OF_CHNLS		6
 #define DMA_NUMBER_OF_CHNLS		(PCI_DMA_NUMBER_OF_CHNLS + \
 						HBM_DMA_NUMBER_OF_CHNLS)
 
@@ -79,6 +84,7 @@
 #define TPC_QMAN_OFFSET		(mmTPC1_QM_BASE - mmTPC0_QM_BASE)
 #define MME_QMAN_OFFSET		(mmMME1_QM_BASE - mmMME0_QM_BASE)
 #define NIC_MACRO_QMAN_OFFSET	(mmNIC1_QM0_BASE - mmNIC0_QM0_BASE)
+#define NIC_ENGINE_QMAN_OFFSET	(mmNIC0_QM1_BASE - mmNIC0_QM0_BASE)
 
 #define TPC_CFG_OFFSET		(mmTPC1_CFG_BASE - mmTPC0_CFG_BASE)
 
@@ -99,6 +105,13 @@
 #define MME_ACC_OFFSET		(mmMME1_ACC_BASE - mmMME0_ACC_BASE)
 #define SRAM_BANK_OFFSET	(mmSRAM_Y0_X1_RTR_BASE - mmSRAM_Y0_X0_RTR_BASE)
 
+#define PLL_NR_OFFSET		0
+#define PLL_NF_OFFSET		(mmPSOC_CPU_PLL_NF - mmPSOC_CPU_PLL_NR)
+#define PLL_OD_OFFSET		(mmPSOC_CPU_PLL_OD - mmPSOC_CPU_PLL_NR)
+#define PLL_DIV_FACTOR_0_OFFSET	(mmPSOC_CPU_PLL_DIV_FACTOR_0 - \
+				mmPSOC_CPU_PLL_NR)
+#define PLL_DIV_SEL_0_OFFSET	(mmPSOC_CPU_PLL_DIV_SEL_0 - mmPSOC_CPU_PLL_NR)
+
 #define NUM_OF_SOB_IN_BLOCK		\
 	(((mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_SOB_OBJ_2047 - \
 	mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_SOB_OBJ_0) + 4) >> 2)
@@ -140,13 +153,18 @@
 #define TPC_QMAN_LENGTH			1024
 #define TPC_QMAN_SIZE_IN_BYTES		(TPC_QMAN_LENGTH * QMAN_PQ_ENTRY_SIZE)
 
+#define NIC_QMAN_LENGTH			1024
+#define NIC_QMAN_SIZE_IN_BYTES		(NIC_QMAN_LENGTH * QMAN_PQ_ENTRY_SIZE)
+
+
 #define SRAM_USER_BASE_OFFSET  GAUDI_DRIVER_SRAM_RESERVED_SIZE_FROM_START
 
 /* Virtual address space */
 #define VA_HOST_SPACE_START	0x1000000000000ull	/* 256TB */
-#define VA_HOST_SPACE_END	0x3FF8000000000ull	/* 1PB - 1TB */
+#define VA_HOST_SPACE_END	0x3FF8000000000ull	/* 1PB - 512GB */
 #define VA_HOST_SPACE_SIZE	(VA_HOST_SPACE_END - \
 					VA_HOST_SPACE_START) /* 767TB */
+#define HOST_SPACE_INTERNAL_CB_SZ	SZ_2M
 
 #define HW_CAP_PLL		BIT(0)
 #define HW_CAP_HBM		BIT(1)
@@ -161,6 +179,19 @@
 #define HW_CAP_SRAM_SCRAMBLER	BIT(10)
 #define HW_CAP_HBM_SCRAMBLER	BIT(11)
 
+#define HW_CAP_NIC0		BIT(14)
+#define HW_CAP_NIC1		BIT(15)
+#define HW_CAP_NIC2		BIT(16)
+#define HW_CAP_NIC3		BIT(17)
+#define HW_CAP_NIC4		BIT(18)
+#define HW_CAP_NIC5		BIT(19)
+#define HW_CAP_NIC6		BIT(20)
+#define HW_CAP_NIC7		BIT(21)
+#define HW_CAP_NIC8		BIT(22)
+#define HW_CAP_NIC9		BIT(23)
+#define HW_CAP_NIC_MASK		GENMASK(23, 14)
+#define HW_CAP_NIC_SHIFT	14
+
 #define HW_CAP_TPC0		BIT(24)
 #define HW_CAP_TPC1		BIT(25)
 #define HW_CAP_TPC2		BIT(26)
@@ -187,12 +218,12 @@
 enum gaudi_dma_channels {
 	GAUDI_PCI_DMA_1,
 	GAUDI_PCI_DMA_2,
-	GAUDI_PCI_DMA_3,
 	GAUDI_HBM_DMA_1,
 	GAUDI_HBM_DMA_2,
 	GAUDI_HBM_DMA_3,
 	GAUDI_HBM_DMA_4,
 	GAUDI_HBM_DMA_5,
+	GAUDI_HBM_DMA_6,
 	GAUDI_DMA_MAX
 };
 
@@ -208,6 +239,48 @@ enum gaudi_tpc_mask {
 	GAUDI_TPC_MASK_ALL = 0xFF
 };
 
+enum gaudi_nic_mask {
+	GAUDI_NIC_MASK_NIC0 = 0x01,
+	GAUDI_NIC_MASK_NIC1 = 0x02,
+	GAUDI_NIC_MASK_NIC2 = 0x04,
+	GAUDI_NIC_MASK_NIC3 = 0x08,
+	GAUDI_NIC_MASK_NIC4 = 0x10,
+	GAUDI_NIC_MASK_NIC5 = 0x20,
+	GAUDI_NIC_MASK_NIC6 = 0x40,
+	GAUDI_NIC_MASK_NIC7 = 0x80,
+	GAUDI_NIC_MASK_NIC8 = 0x100,
+	GAUDI_NIC_MASK_NIC9 = 0x200,
+	GAUDI_NIC_MASK_ALL = 0x3FF
+};
+
+/*
+ * struct gaudi_hw_sob_group - H/W SOB group info.
+ * @hdev: habanalabs device structure.
+ * @kref: refcount of this SOB group. group will reset once refcount is zero.
+ * @base_sob_id: base sob id of this SOB group.
+ */
+struct gaudi_hw_sob_group {
+	struct hl_device	*hdev;
+	struct kref		kref;
+	u32			base_sob_id;
+};
+
+#define NUM_SOB_GROUPS (HL_RSVD_SOBS * QMAN_STREAMS)
+/**
+ * struct gaudi_collective_properties -
+ *     holds all SOB groups and queues info reserved for the collective
+ * @hw_sob_group: H/W SOB groups.
+ * @next_sob_group_val: the next value to use for the currently used SOB group.
+ * @curr_sob_group_idx: the index of the currently used SOB group.
+ * @mstr_sob_mask: pre-defined masks for collective master monitors
+ */
+struct gaudi_collective_properties {
+	struct gaudi_hw_sob_group hw_sob_group[NUM_SOB_GROUPS];
+	u16			next_sob_group_val[QMAN_STREAMS];
+	u8			curr_sob_group_idx[QMAN_STREAMS];
+	u8			mstr_sob_mask[HL_COLLECTIVE_RSVD_MSTR_MONS];
+};
+
 /**
  * struct gaudi_internal_qman_info - Internal QMAN information.
  * @pq_kernel_addr: Kernel address of the PQ memory area in the host.
@@ -253,6 +326,8 @@ struct gaudi_device {
 
 	struct gaudi_internal_qman_info	internal_qmans[GAUDI_QUEUE_ID_SIZE];
 
+	struct gaudi_collective_properties collective_props;
+
 	u64				hbm_bar_cur_addr;
 	u64				max_freq_value;
 
diff --git a/drivers/misc/habanalabs/gaudi/gaudi_coresight.c b/drivers/misc/habanalabs/gaudi/gaudi_coresight.c
index 3d2b0f0f46507bc39560bc75ffa7ccabb26b6171..2e3612e1ee28d7620a85dac854def1628e90fb7e 100644
--- a/drivers/misc/habanalabs/gaudi/gaudi_coresight.c
+++ b/drivers/misc/habanalabs/gaudi/gaudi_coresight.c
@@ -11,8 +11,6 @@
 #include "../include/gaudi/gaudi_masks.h"
 
 #include <uapi/misc/habanalabs.h>
-#include <linux/coresight.h>
-
 #define SPMU_SECTION_SIZE		MME0_ACC_SPMU_MAX_OFFSET
 #define SPMU_EVENT_TYPES_OFFSET		0x400
 #define SPMU_MAX_COUNTERS		6
diff --git a/drivers/misc/habanalabs/gaudi/gaudi_hwmgr.c b/drivers/misc/habanalabs/gaudi/gaudi_hwmgr.c
index 1076b4932ce2be87b83a5924ab850a5a61355e2c..8c49da4bcbd58a1ad07f024e5430dee72b5cde7c 100644
--- a/drivers/misc/habanalabs/gaudi/gaudi_hwmgr.c
+++ b/drivers/misc/habanalabs/gaudi/gaudi_hwmgr.c
@@ -20,7 +20,7 @@ int gaudi_get_clk_rate(struct hl_device *hdev, u32 *cur_clk, u32 *max_clk)
 {
 	long value;
 
-	if (hl_device_disabled_or_in_reset(hdev))
+	if (!hl_device_operational(hdev, NULL))
 		return -ENODEV;
 
 	value = hl_get_frequency(hdev, MME_PLL, false);
@@ -54,7 +54,7 @@ static ssize_t clk_max_freq_mhz_show(struct device *dev,
 	struct gaudi_device *gaudi = hdev->asic_specific;
 	long value;
 
-	if (hl_device_disabled_or_in_reset(hdev))
+	if (!hl_device_operational(hdev, NULL))
 		return -ENODEV;
 
 	value = hl_get_frequency(hdev, MME_PLL, false);
@@ -72,7 +72,7 @@ static ssize_t clk_max_freq_mhz_store(struct device *dev,
 	int rc;
 	u64 value;
 
-	if (hl_device_disabled_or_in_reset(hdev)) {
+	if (!hl_device_operational(hdev, NULL)) {
 		count = -ENODEV;
 		goto fail;
 	}
@@ -97,7 +97,7 @@ static ssize_t clk_cur_freq_mhz_show(struct device *dev,
 	struct hl_device *hdev = dev_get_drvdata(dev);
 	long value;
 
-	if (hl_device_disabled_or_in_reset(hdev))
+	if (!hl_device_operational(hdev, NULL))
 		return -ENODEV;
 
 	value = hl_get_frequency(hdev, MME_PLL, true);
diff --git a/drivers/misc/habanalabs/gaudi/gaudi_security.c b/drivers/misc/habanalabs/gaudi/gaudi_security.c
index 2d7add0e5bcc0a4f406f02578ed9ae5338d6ba37..e10181692d0bbab8896a9e52061f90f07e80c35e 100644
--- a/drivers/misc/habanalabs/gaudi/gaudi_security.c
+++ b/drivers/misc/habanalabs/gaudi/gaudi_security.c
@@ -1448,21 +1448,23 @@ static void gaudi_init_dma_protection_bits(struct hl_device *hdev)
 	u32 pb_addr, mask;
 	u8 word_offset;
 
-	gaudi_pb_set_block(hdev, mmDMA_IF_E_S_BASE);
-	gaudi_pb_set_block(hdev, mmDMA_IF_E_S_DOWN_CH0_BASE);
-	gaudi_pb_set_block(hdev, mmDMA_IF_E_S_DOWN_CH1_BASE);
-	gaudi_pb_set_block(hdev, mmDMA_E_PLL_BASE);
-	gaudi_pb_set_block(hdev, mmDMA_IF_E_S_DOWN_BASE);
-
-	gaudi_pb_set_block(hdev, mmDMA_IF_W_N_BASE);
-	gaudi_pb_set_block(hdev, mmDMA_IF_W_N_DOWN_CH0_BASE);
-	gaudi_pb_set_block(hdev, mmDMA_IF_W_N_DOWN_CH1_BASE);
-	gaudi_pb_set_block(hdev, mmDMA_IF_W_N_DOWN_BASE);
-
-	gaudi_pb_set_block(hdev, mmDMA_IF_E_N_BASE);
-	gaudi_pb_set_block(hdev, mmDMA_IF_E_N_DOWN_CH0_BASE);
-	gaudi_pb_set_block(hdev, mmDMA_IF_E_N_DOWN_CH1_BASE);
-	gaudi_pb_set_block(hdev, mmDMA_IF_E_N_DOWN_BASE);
+	if (hdev->asic_prop.fw_security_disabled) {
+		gaudi_pb_set_block(hdev, mmDMA_IF_E_S_BASE);
+		gaudi_pb_set_block(hdev, mmDMA_IF_E_S_DOWN_CH0_BASE);
+		gaudi_pb_set_block(hdev, mmDMA_IF_E_S_DOWN_CH1_BASE);
+		gaudi_pb_set_block(hdev, mmDMA_E_PLL_BASE);
+		gaudi_pb_set_block(hdev, mmDMA_IF_E_S_DOWN_BASE);
+
+		gaudi_pb_set_block(hdev, mmDMA_IF_W_N_BASE);
+		gaudi_pb_set_block(hdev, mmDMA_IF_W_N_DOWN_CH0_BASE);
+		gaudi_pb_set_block(hdev, mmDMA_IF_W_N_DOWN_CH1_BASE);
+		gaudi_pb_set_block(hdev, mmDMA_IF_W_N_DOWN_BASE);
+
+		gaudi_pb_set_block(hdev, mmDMA_IF_E_N_BASE);
+		gaudi_pb_set_block(hdev, mmDMA_IF_E_N_DOWN_CH0_BASE);
+		gaudi_pb_set_block(hdev, mmDMA_IF_E_N_DOWN_CH1_BASE);
+		gaudi_pb_set_block(hdev, mmDMA_IF_E_N_DOWN_BASE);
+	}
 
 	WREG32(mmDMA0_QM_BASE - CFG_BASE + PROT_BITS_OFFS + 0x7C, 0);
 	WREG32(mmDMA1_QM_BASE - CFG_BASE + PROT_BITS_OFFS + 0x7C, 0);
@@ -5157,19 +5159,3992 @@ static void gaudi_init_dma_protection_bits(struct hl_device *hdev)
 	WREG32(pb_addr + word_offset, ~mask);
 }
 
+static void gaudi_init_nic_protection_bits(struct hl_device *hdev)
+{
+	u32 pb_addr, mask;
+	u8 word_offset;
+
+	WREG32(mmNIC0_QM0_BASE - CFG_BASE + PROT_BITS_OFFS + 0x7C, 0);
+	WREG32(mmNIC0_QM1_BASE - CFG_BASE + PROT_BITS_OFFS + 0x7C, 0);
+
+	pb_addr = (mmNIC0_QM0_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS;
+	word_offset = ((mmNIC0_QM0_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2;
+	mask = 1U << ((mmNIC0_QM0_GLBL_CFG0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_GLBL_CFG1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_GLBL_PROT & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_GLBL_ERR_CFG & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_GLBL_SECURE_PROPS_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_GLBL_SECURE_PROPS_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_GLBL_SECURE_PROPS_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_GLBL_SECURE_PROPS_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_GLBL_SECURE_PROPS_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_GLBL_NON_SECURE_PROPS_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_GLBL_NON_SECURE_PROPS_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_GLBL_NON_SECURE_PROPS_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_GLBL_NON_SECURE_PROPS_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_GLBL_NON_SECURE_PROPS_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_GLBL_STS0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_GLBL_STS1_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_GLBL_STS1_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_GLBL_STS1_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_GLBL_STS1_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_GLBL_STS1_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_GLBL_MSG_EN_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_GLBL_MSG_EN_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_GLBL_MSG_EN_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_GLBL_MSG_EN_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_GLBL_MSG_EN_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_PQ_BASE_LO_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_PQ_BASE_LO_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_PQ_BASE_LO_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_PQ_BASE_LO_3 & 0x7F) >> 2);
+
+	WREG32(pb_addr + word_offset, ~mask);
+
+	pb_addr = (mmNIC0_QM0_PQ_BASE_HI_0 & ~0xFFF) + PROT_BITS_OFFS;
+	word_offset = ((mmNIC0_QM0_PQ_BASE_HI_0 & PROT_BITS_OFFS) >> 7) << 2;
+	mask = 1U << ((mmNIC0_QM0_PQ_BASE_HI_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_PQ_BASE_HI_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_PQ_BASE_HI_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_PQ_BASE_HI_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_PQ_SIZE_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_PQ_SIZE_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_PQ_SIZE_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_PQ_SIZE_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_PQ_PI_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_PQ_PI_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_PQ_PI_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_PQ_PI_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_PQ_CI_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_PQ_CI_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_PQ_CI_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_PQ_CI_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_PQ_CFG0_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_PQ_CFG0_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_PQ_CFG0_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_PQ_CFG0_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_PQ_CFG1_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_PQ_CFG1_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_PQ_CFG1_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_PQ_CFG1_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_PQ_ARUSER_31_11_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_PQ_ARUSER_31_11_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_PQ_ARUSER_31_11_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_PQ_ARUSER_31_11_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_PQ_STS0_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_PQ_STS0_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_PQ_STS0_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_PQ_STS0_3 & 0x7F) >> 2);
+
+	WREG32(pb_addr + word_offset, ~mask);
+
+	pb_addr = (mmNIC0_QM0_PQ_STS1_0 & ~0xFFF) + PROT_BITS_OFFS;
+	word_offset = ((mmNIC0_QM0_PQ_STS1_0 & PROT_BITS_OFFS) >> 7) << 2;
+	mask = 1U << ((mmNIC0_QM0_PQ_STS1_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_PQ_STS1_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_PQ_STS1_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_PQ_STS1_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_CQ_STS0_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_CQ_STS0_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_CQ_STS0_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_CQ_STS0_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_CQ_STS1_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_CQ_STS1_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_CQ_STS1_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_CQ_STS1_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_CQ_PTR_LO_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_CQ_PTR_HI_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_CQ_TSIZE_0 & 0x7F) >> 2);
+
+	WREG32(pb_addr + word_offset, ~mask);
+
+	pb_addr = (mmNIC0_QM0_CQ_CTL_0 & ~0xFFF) + PROT_BITS_OFFS;
+	word_offset = ((mmNIC0_QM0_CQ_CTL_0 & PROT_BITS_OFFS) >> 7) << 2;
+	mask = 1U << ((mmNIC0_QM0_CQ_CTL_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_CQ_PTR_LO_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_CQ_PTR_HI_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_CQ_TSIZE_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_CQ_CTL_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_CQ_PTR_LO_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_CQ_PTR_HI_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_CQ_TSIZE_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_CQ_CTL_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_CQ_PTR_LO_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_CQ_PTR_HI_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_CQ_TSIZE_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_CQ_CTL_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_CQ_PTR_LO_STS_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_CQ_PTR_LO_STS_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_CQ_PTR_LO_STS_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_CQ_PTR_LO_STS_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_CQ_PTR_LO_STS_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_CQ_PTR_HI_STS_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_CQ_PTR_HI_STS_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_CQ_PTR_HI_STS_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_CQ_PTR_HI_STS_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_CQ_PTR_HI_STS_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_CQ_TSIZE_STS_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_CQ_TSIZE_STS_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_CQ_TSIZE_STS_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_CQ_TSIZE_STS_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_CQ_TSIZE_STS_4 & 0x7F) >> 2);
+
+	WREG32(pb_addr + word_offset, ~mask);
+
+	pb_addr = (mmNIC0_QM0_CQ_CTL_STS_0 & ~0xFFF) + PROT_BITS_OFFS;
+	word_offset = ((mmNIC0_QM0_CQ_CTL_STS_0 & PROT_BITS_OFFS) >> 7) << 2;
+	mask = 1U << ((mmNIC0_QM0_CQ_CTL_STS_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_CQ_CTL_STS_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_CQ_CTL_STS_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_CQ_CTL_STS_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_CQ_CTL_STS_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_CQ_IFIFO_CNT_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_CQ_IFIFO_CNT_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_CQ_IFIFO_CNT_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_CQ_IFIFO_CNT_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_CQ_IFIFO_CNT_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_CP_MSG_BASE0_ADDR_LO_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_CP_MSG_BASE0_ADDR_LO_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_CP_MSG_BASE0_ADDR_LO_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_CP_MSG_BASE0_ADDR_LO_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_CP_MSG_BASE0_ADDR_LO_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_CP_MSG_BASE0_ADDR_HI_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_CP_MSG_BASE0_ADDR_HI_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_CP_MSG_BASE0_ADDR_HI_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_CP_MSG_BASE0_ADDR_HI_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_CP_MSG_BASE0_ADDR_HI_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_CP_MSG_BASE1_ADDR_LO_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_CP_MSG_BASE1_ADDR_LO_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_CP_MSG_BASE1_ADDR_LO_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_CP_MSG_BASE1_ADDR_LO_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_CP_MSG_BASE1_ADDR_LO_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_CP_MSG_BASE1_ADDR_HI_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_CP_MSG_BASE1_ADDR_HI_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_CP_MSG_BASE1_ADDR_HI_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_CP_MSG_BASE1_ADDR_HI_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_CP_MSG_BASE1_ADDR_HI_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_CP_MSG_BASE2_ADDR_LO_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_CP_MSG_BASE2_ADDR_LO_1 & 0x7F) >> 2);
+
+	WREG32(pb_addr + word_offset, ~mask);
+
+	pb_addr = (mmNIC0_QM0_CP_MSG_BASE2_ADDR_LO_2 & ~0xFFF) + PROT_BITS_OFFS;
+	word_offset = ((mmNIC0_QM0_CP_MSG_BASE2_ADDR_LO_2 &
+			PROT_BITS_OFFS) >> 7) << 2;
+	mask = 1U << ((mmNIC0_QM0_CP_MSG_BASE2_ADDR_LO_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_CP_MSG_BASE2_ADDR_LO_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_CP_MSG_BASE2_ADDR_LO_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_CP_MSG_BASE2_ADDR_HI_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_CP_MSG_BASE2_ADDR_HI_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_CP_MSG_BASE2_ADDR_HI_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_CP_MSG_BASE2_ADDR_HI_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_CP_MSG_BASE2_ADDR_HI_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_CP_MSG_BASE3_ADDR_LO_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_CP_MSG_BASE3_ADDR_LO_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_CP_MSG_BASE3_ADDR_LO_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_CP_MSG_BASE3_ADDR_LO_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_CP_MSG_BASE3_ADDR_LO_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_CP_MSG_BASE3_ADDR_HI_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_CP_MSG_BASE3_ADDR_HI_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_CP_MSG_BASE3_ADDR_HI_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_CP_MSG_BASE3_ADDR_HI_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_CP_MSG_BASE3_ADDR_HI_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_CP_LDMA_TSIZE_OFFSET_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_CP_LDMA_TSIZE_OFFSET_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_CP_LDMA_TSIZE_OFFSET_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_CP_LDMA_TSIZE_OFFSET_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_CP_LDMA_TSIZE_OFFSET_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_CP_LDMA_SRC_BASE_LO_OFFSET_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_CP_LDMA_SRC_BASE_LO_OFFSET_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_CP_LDMA_SRC_BASE_LO_OFFSET_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_CP_LDMA_SRC_BASE_LO_OFFSET_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_CP_LDMA_SRC_BASE_LO_OFFSET_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_CP_LDMA_DST_BASE_LO_OFFSET_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_CP_LDMA_DST_BASE_LO_OFFSET_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_CP_LDMA_DST_BASE_LO_OFFSET_2 & 0x7F) >> 2);
+
+	WREG32(pb_addr + word_offset, ~mask);
+
+	pb_addr = (mmNIC0_QM0_CP_LDMA_DST_BASE_LO_OFFSET_3 & ~0xFFF) +
+				PROT_BITS_OFFS;
+	word_offset = ((mmNIC0_QM0_CP_LDMA_DST_BASE_LO_OFFSET_3 &
+			PROT_BITS_OFFS) >> 7) << 2;
+	mask = 1U << ((mmNIC0_QM0_CP_LDMA_DST_BASE_LO_OFFSET_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_CP_LDMA_DST_BASE_LO_OFFSET_4 & 0x7F) >> 2);
+
+	WREG32(pb_addr + word_offset, ~mask);
+
+	pb_addr = (mmNIC0_QM0_CP_STS_0 & ~0xFFF) + PROT_BITS_OFFS;
+	word_offset = ((mmNIC0_QM0_CP_STS_0 & PROT_BITS_OFFS) >> 7) << 2;
+	mask = 1U << ((mmNIC0_QM0_CP_STS_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_CP_STS_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_CP_STS_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_CP_STS_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_CP_STS_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_CP_CURRENT_INST_LO_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_CP_CURRENT_INST_LO_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_CP_CURRENT_INST_LO_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_CP_CURRENT_INST_LO_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_CP_CURRENT_INST_LO_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_CP_CURRENT_INST_HI_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_CP_CURRENT_INST_HI_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_CP_CURRENT_INST_HI_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_CP_CURRENT_INST_HI_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_CP_CURRENT_INST_HI_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_CP_BARRIER_CFG_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_CP_BARRIER_CFG_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_CP_BARRIER_CFG_2 & 0x7F) >> 2);
+
+	WREG32(pb_addr + word_offset, ~mask);
+
+	pb_addr = (mmNIC0_QM0_CP_BARRIER_CFG_3 & ~0xFFF) + PROT_BITS_OFFS;
+	word_offset = ((mmNIC0_QM0_CP_BARRIER_CFG_3 & PROT_BITS_OFFS)
+			>> 7) << 2;
+	mask = 1U << ((mmNIC0_QM0_CP_BARRIER_CFG_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_CP_BARRIER_CFG_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_CP_DBG_0_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_CP_DBG_0_1 & 0x7F) >> 2);
+
+	WREG32(pb_addr + word_offset, ~mask);
+
+	pb_addr = (mmNIC0_QM0_CP_DBG_0_2 & ~0xFFF) + PROT_BITS_OFFS;
+	word_offset = ((mmNIC0_QM0_CP_DBG_0_2 & PROT_BITS_OFFS) >> 7) << 2;
+	mask = 1U << ((mmNIC0_QM0_CP_DBG_0_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_CP_DBG_0_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_CP_DBG_0_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_CP_ARUSER_31_11_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_CP_ARUSER_31_11_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_CP_ARUSER_31_11_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_CP_ARUSER_31_11_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_CP_ARUSER_31_11_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_CP_AWUSER_31_11_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_CP_AWUSER_31_11_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_CP_AWUSER_31_11_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_CP_AWUSER_31_11_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_CP_AWUSER_31_11_4 & 0x7F) >> 2);
+
+	WREG32(pb_addr + word_offset, ~mask);
+
+	pb_addr = (mmNIC0_QM0_ARB_CFG_0 & ~0xFFF) + PROT_BITS_OFFS;
+	word_offset = ((mmNIC0_QM0_ARB_CFG_0 & PROT_BITS_OFFS) >> 7) << 2;
+	mask = 1U << ((mmNIC0_QM0_ARB_CFG_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_ARB_MST_AVAIL_CRED_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_ARB_MST_AVAIL_CRED_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_ARB_MST_AVAIL_CRED_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_ARB_MST_AVAIL_CRED_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_ARB_MST_AVAIL_CRED_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_ARB_MST_AVAIL_CRED_5 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_ARB_MST_AVAIL_CRED_6 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_ARB_MST_AVAIL_CRED_7 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_ARB_MST_AVAIL_CRED_8 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_ARB_MST_AVAIL_CRED_9 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_ARB_MST_AVAIL_CRED_10 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_ARB_MST_AVAIL_CRED_11 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_ARB_MST_AVAIL_CRED_12 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_ARB_MST_AVAIL_CRED_13 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_ARB_MST_AVAIL_CRED_14 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_ARB_MST_AVAIL_CRED_15 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_ARB_MST_AVAIL_CRED_16 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_ARB_MST_AVAIL_CRED_17 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_ARB_MST_AVAIL_CRED_18 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_ARB_MST_AVAIL_CRED_19 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_ARB_MST_AVAIL_CRED_20 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_ARB_MST_AVAIL_CRED_21 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_ARB_MST_AVAIL_CRED_22 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_ARB_MST_AVAIL_CRED_23 & 0x7F) >> 2);
+
+	WREG32(pb_addr + word_offset, ~mask);
+
+	pb_addr = (mmNIC0_QM0_ARB_MST_AVAIL_CRED_24 & ~0xFFF) + PROT_BITS_OFFS;
+	word_offset = ((mmNIC0_QM0_ARB_MST_AVAIL_CRED_24 &
+			PROT_BITS_OFFS) >> 7) << 2;
+	mask = 1U << ((mmNIC0_QM0_ARB_MST_AVAIL_CRED_24 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_ARB_MST_AVAIL_CRED_25 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_ARB_MST_AVAIL_CRED_26 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_ARB_MST_AVAIL_CRED_27 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_ARB_MST_AVAIL_CRED_28 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_ARB_MST_AVAIL_CRED_29 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_ARB_MST_AVAIL_CRED_30 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_ARB_MST_AVAIL_CRED_31 & 0x7F) >> 2);
+
+	WREG32(pb_addr + word_offset, ~mask);
+
+	pb_addr = (mmNIC0_QM0_ARB_MST_CHOISE_PUSH_OFST_23 & ~0xFFF) +
+			PROT_BITS_OFFS;
+	word_offset = ((mmNIC0_QM0_ARB_MST_CHOISE_PUSH_OFST_23 &
+			PROT_BITS_OFFS) >> 7) << 2;
+	mask = 1U << ((mmNIC0_QM0_ARB_SLV_CHOISE_WDT & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_ARB_MSG_MAX_INFLIGHT & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_ARB_MSG_AWUSER_31_11 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_ARB_MSG_AWUSER_SEC_PROP & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_ARB_MSG_AWUSER_NON_SEC_PROP & 0x7F) >> 2);
+
+	WREG32(pb_addr + word_offset, ~mask);
+
+	pb_addr = (mmNIC0_QM0_ARB_STATE_STS & ~0xFFF) + PROT_BITS_OFFS;
+	word_offset = ((mmNIC0_QM0_ARB_STATE_STS & PROT_BITS_OFFS) >> 7) << 2;
+	mask = 1U << ((mmNIC0_QM0_ARB_STATE_STS & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_ARB_CHOISE_FULLNESS_STS & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_ARB_MSG_STS & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_ARB_SLV_CHOISE_Q_HEAD & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_ARB_ERR_CAUSE & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_ARB_ERR_MSG_EN & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_ARB_ERR_STS_DRP & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_ARB_MST_CRED_STS_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_ARB_MST_CRED_STS_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_ARB_MST_CRED_STS_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_ARB_MST_CRED_STS_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_ARB_MST_CRED_STS_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_ARB_MST_CRED_STS_5 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_ARB_MST_CRED_STS_6 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_ARB_MST_CRED_STS_7 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_ARB_MST_CRED_STS_8 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_ARB_MST_CRED_STS_9 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_ARB_MST_CRED_STS_10 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_ARB_MST_CRED_STS_11 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_ARB_MST_CRED_STS_12 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_ARB_MST_CRED_STS_13 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_ARB_MST_CRED_STS_14 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_ARB_MST_CRED_STS_15 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_ARB_MST_CRED_STS_16 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_ARB_MST_CRED_STS_17 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_ARB_MST_CRED_STS_18 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_ARB_MST_CRED_STS_19 & 0x7F) >> 2);
+
+	WREG32(pb_addr + word_offset, ~mask);
+
+	pb_addr = (mmNIC0_QM0_ARB_MST_CRED_STS_20 & ~0xFFF) + PROT_BITS_OFFS;
+	word_offset = ((mmNIC0_QM0_ARB_MST_CRED_STS_20 & PROT_BITS_OFFS)
+			>> 7) << 2;
+	mask = 1U << ((mmNIC0_QM0_ARB_MST_CRED_STS_20 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_ARB_MST_CRED_STS_21 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_ARB_MST_CRED_STS_22 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_ARB_MST_CRED_STS_23 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_ARB_MST_CRED_STS_24 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_ARB_MST_CRED_STS_25 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_ARB_MST_CRED_STS_26 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_ARB_MST_CRED_STS_27 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_ARB_MST_CRED_STS_28 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_ARB_MST_CRED_STS_29 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_ARB_MST_CRED_STS_30 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_ARB_MST_CRED_STS_31 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_CGM_CFG & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_CGM_STS & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_CGM_CFG1 & 0x7F) >> 2);
+
+	WREG32(pb_addr + word_offset, ~mask);
+
+	pb_addr = (mmNIC0_QM0_LOCAL_RANGE_BASE & ~0xFFF) + PROT_BITS_OFFS;
+	word_offset = ((mmNIC0_QM0_LOCAL_RANGE_BASE & PROT_BITS_OFFS)
+			>> 7) << 2;
+	mask = 1U << ((mmNIC0_QM0_LOCAL_RANGE_BASE & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_LOCAL_RANGE_SIZE & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_CSMR_STRICT_PRIO_CFG & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_HBW_RD_RATE_LIM_CFG_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_LBW_WR_RATE_LIM_CFG_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_LBW_WR_RATE_LIM_CFG_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_HBW_RD_RATE_LIM_CFG_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_GLBL_AXCACHE & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_IND_GW_APB_CFG & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_IND_GW_APB_WDATA & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_IND_GW_APB_RDATA & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_IND_GW_APB_STATUS & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_GLBL_ERR_ADDR_LO & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_GLBL_ERR_ADDR_HI & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM0_GLBL_ERR_WDATA & 0x7F) >> 2);
+
+	WREG32(pb_addr + word_offset, ~mask);
+
+	pb_addr = (mmNIC0_QM0_GLBL_MEM_INIT_BUSY & ~0xFFF) + PROT_BITS_OFFS;
+	word_offset = ((mmNIC0_QM0_GLBL_MEM_INIT_BUSY & PROT_BITS_OFFS)
+			>> 7) << 2;
+	mask = 1U << ((mmNIC0_QM0_GLBL_MEM_INIT_BUSY & 0x7F) >> 2);
+
+	WREG32(pb_addr + word_offset, ~mask);
+
+	pb_addr = (mmNIC0_QM1_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS;
+	word_offset = ((mmNIC0_QM1_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2;
+	mask = 1U << ((mmNIC0_QM1_GLBL_CFG0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_GLBL_CFG1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_GLBL_PROT & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_GLBL_ERR_CFG & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_GLBL_SECURE_PROPS_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_GLBL_SECURE_PROPS_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_GLBL_SECURE_PROPS_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_GLBL_SECURE_PROPS_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_GLBL_SECURE_PROPS_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_GLBL_NON_SECURE_PROPS_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_GLBL_NON_SECURE_PROPS_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_GLBL_NON_SECURE_PROPS_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_GLBL_NON_SECURE_PROPS_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_GLBL_NON_SECURE_PROPS_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_GLBL_STS0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_GLBL_STS1_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_GLBL_STS1_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_GLBL_STS1_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_GLBL_STS1_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_GLBL_STS1_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_GLBL_MSG_EN_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_GLBL_MSG_EN_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_GLBL_MSG_EN_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_GLBL_MSG_EN_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_GLBL_MSG_EN_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_PQ_BASE_LO_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_PQ_BASE_LO_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_PQ_BASE_LO_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_PQ_BASE_LO_3 & 0x7F) >> 2);
+
+	WREG32(pb_addr + word_offset, ~mask);
+
+	pb_addr = (mmNIC0_QM1_PQ_BASE_HI_0 & ~0xFFF) + PROT_BITS_OFFS;
+	word_offset = ((mmNIC0_QM1_PQ_BASE_HI_0 & PROT_BITS_OFFS) >> 7) << 2;
+	mask = 1U << ((mmNIC0_QM1_PQ_BASE_HI_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_PQ_BASE_HI_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_PQ_BASE_HI_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_PQ_BASE_HI_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_PQ_SIZE_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_PQ_SIZE_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_PQ_SIZE_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_PQ_SIZE_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_PQ_PI_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_PQ_PI_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_PQ_PI_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_PQ_PI_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_PQ_CI_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_PQ_CI_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_PQ_CI_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_PQ_CI_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_PQ_CFG0_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_PQ_CFG0_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_PQ_CFG0_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_PQ_CFG0_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_PQ_CFG1_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_PQ_CFG1_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_PQ_CFG1_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_PQ_CFG1_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_PQ_ARUSER_31_11_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_PQ_ARUSER_31_11_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_PQ_ARUSER_31_11_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_PQ_ARUSER_31_11_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_PQ_STS0_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_PQ_STS0_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_PQ_STS0_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_PQ_STS0_3 & 0x7F) >> 2);
+
+	WREG32(pb_addr + word_offset, ~mask);
+
+	pb_addr = (mmNIC0_QM1_PQ_STS1_0 & ~0xFFF) + PROT_BITS_OFFS;
+	word_offset = ((mmNIC0_QM1_PQ_STS1_0 & PROT_BITS_OFFS) >> 7) << 2;
+	mask = 1U << ((mmNIC0_QM1_PQ_STS1_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_PQ_STS1_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_PQ_STS1_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_PQ_STS1_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_CQ_STS0_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_CQ_STS0_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_CQ_STS0_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_CQ_STS0_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_CQ_STS1_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_CQ_STS1_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_CQ_STS1_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_CQ_STS1_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_CQ_PTR_LO_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_CQ_PTR_HI_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_CQ_TSIZE_0 & 0x7F) >> 2);
+
+	WREG32(pb_addr + word_offset, ~mask);
+
+	pb_addr = (mmNIC0_QM1_CQ_CTL_0 & ~0xFFF) + PROT_BITS_OFFS;
+	word_offset = ((mmNIC0_QM1_CQ_CTL_0 & PROT_BITS_OFFS) >> 7) << 2;
+	mask = 1U << ((mmNIC0_QM1_CQ_CTL_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_CQ_PTR_LO_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_CQ_PTR_HI_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_CQ_TSIZE_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_CQ_CTL_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_CQ_PTR_LO_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_CQ_PTR_HI_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_CQ_TSIZE_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_CQ_CTL_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_CQ_PTR_LO_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_CQ_PTR_HI_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_CQ_TSIZE_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_CQ_CTL_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_CQ_PTR_LO_STS_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_CQ_PTR_LO_STS_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_CQ_PTR_LO_STS_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_CQ_PTR_LO_STS_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_CQ_PTR_LO_STS_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_CQ_PTR_HI_STS_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_CQ_PTR_HI_STS_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_CQ_PTR_HI_STS_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_CQ_PTR_HI_STS_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_CQ_PTR_HI_STS_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_CQ_TSIZE_STS_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_CQ_TSIZE_STS_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_CQ_TSIZE_STS_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_CQ_TSIZE_STS_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_CQ_TSIZE_STS_4 & 0x7F) >> 2);
+
+	WREG32(pb_addr + word_offset, ~mask);
+
+	pb_addr = (mmNIC0_QM1_CQ_CTL_STS_0 & ~0xFFF) + PROT_BITS_OFFS;
+	word_offset = ((mmNIC0_QM1_CQ_CTL_STS_0 & PROT_BITS_OFFS) >> 7) << 2;
+	mask = 1U << ((mmNIC0_QM1_CQ_CTL_STS_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_CQ_CTL_STS_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_CQ_CTL_STS_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_CQ_CTL_STS_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_CQ_CTL_STS_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_CQ_IFIFO_CNT_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_CQ_IFIFO_CNT_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_CQ_IFIFO_CNT_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_CQ_IFIFO_CNT_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_CQ_IFIFO_CNT_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_CP_MSG_BASE0_ADDR_LO_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_CP_MSG_BASE0_ADDR_LO_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_CP_MSG_BASE0_ADDR_LO_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_CP_MSG_BASE0_ADDR_LO_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_CP_MSG_BASE0_ADDR_LO_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_CP_MSG_BASE0_ADDR_HI_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_CP_MSG_BASE0_ADDR_HI_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_CP_MSG_BASE0_ADDR_HI_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_CP_MSG_BASE0_ADDR_HI_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_CP_MSG_BASE0_ADDR_HI_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_CP_MSG_BASE1_ADDR_LO_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_CP_MSG_BASE1_ADDR_LO_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_CP_MSG_BASE1_ADDR_LO_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_CP_MSG_BASE1_ADDR_LO_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_CP_MSG_BASE1_ADDR_LO_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_CP_MSG_BASE1_ADDR_HI_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_CP_MSG_BASE1_ADDR_HI_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_CP_MSG_BASE1_ADDR_HI_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_CP_MSG_BASE1_ADDR_HI_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_CP_MSG_BASE1_ADDR_HI_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_CP_MSG_BASE2_ADDR_LO_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_CP_MSG_BASE2_ADDR_LO_1 & 0x7F) >> 2);
+
+	WREG32(pb_addr + word_offset, ~mask);
+
+	pb_addr = (mmNIC0_QM1_CP_MSG_BASE2_ADDR_LO_2 & ~0xFFF) + PROT_BITS_OFFS;
+	word_offset = ((mmNIC0_QM1_CP_MSG_BASE2_ADDR_LO_2 &
+			PROT_BITS_OFFS) >> 7) << 2;
+	mask = 1U << ((mmNIC0_QM1_CP_MSG_BASE2_ADDR_LO_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_CP_MSG_BASE2_ADDR_LO_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_CP_MSG_BASE2_ADDR_LO_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_CP_MSG_BASE2_ADDR_HI_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_CP_MSG_BASE2_ADDR_HI_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_CP_MSG_BASE2_ADDR_HI_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_CP_MSG_BASE2_ADDR_HI_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_CP_MSG_BASE2_ADDR_HI_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_CP_MSG_BASE3_ADDR_LO_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_CP_MSG_BASE3_ADDR_LO_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_CP_MSG_BASE3_ADDR_LO_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_CP_MSG_BASE3_ADDR_LO_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_CP_MSG_BASE3_ADDR_LO_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_CP_MSG_BASE3_ADDR_HI_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_CP_MSG_BASE3_ADDR_HI_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_CP_MSG_BASE3_ADDR_HI_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_CP_MSG_BASE3_ADDR_HI_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_CP_MSG_BASE3_ADDR_HI_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_CP_LDMA_TSIZE_OFFSET_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_CP_LDMA_TSIZE_OFFSET_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_CP_LDMA_TSIZE_OFFSET_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_CP_LDMA_TSIZE_OFFSET_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_CP_LDMA_TSIZE_OFFSET_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_CP_LDMA_SRC_BASE_LO_OFFSET_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_CP_LDMA_SRC_BASE_LO_OFFSET_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_CP_LDMA_SRC_BASE_LO_OFFSET_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_CP_LDMA_SRC_BASE_LO_OFFSET_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_CP_LDMA_SRC_BASE_LO_OFFSET_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_CP_LDMA_DST_BASE_LO_OFFSET_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_CP_LDMA_DST_BASE_LO_OFFSET_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_CP_LDMA_DST_BASE_LO_OFFSET_2 & 0x7F) >> 2);
+
+	WREG32(pb_addr + word_offset, ~mask);
+
+	pb_addr = (mmNIC0_QM1_CP_LDMA_DST_BASE_LO_OFFSET_3 & ~0xFFF) +
+			PROT_BITS_OFFS;
+	word_offset = ((mmNIC0_QM1_CP_LDMA_DST_BASE_LO_OFFSET_3 &
+			PROT_BITS_OFFS) >> 7) << 2;
+	mask = 1U << ((mmNIC0_QM1_CP_LDMA_DST_BASE_LO_OFFSET_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_CP_LDMA_DST_BASE_LO_OFFSET_4 & 0x7F) >> 2);
+
+	WREG32(pb_addr + word_offset, ~mask);
+
+	pb_addr = (mmNIC0_QM1_CP_STS_0 & ~0xFFF) + PROT_BITS_OFFS;
+	word_offset = ((mmNIC0_QM1_CP_STS_0 & PROT_BITS_OFFS) >> 7) << 2;
+	mask = 1U << ((mmNIC0_QM1_CP_STS_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_CP_STS_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_CP_STS_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_CP_STS_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_CP_STS_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_CP_CURRENT_INST_LO_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_CP_CURRENT_INST_LO_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_CP_CURRENT_INST_LO_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_CP_CURRENT_INST_LO_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_CP_CURRENT_INST_LO_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_CP_CURRENT_INST_HI_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_CP_CURRENT_INST_HI_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_CP_CURRENT_INST_HI_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_CP_CURRENT_INST_HI_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_CP_CURRENT_INST_HI_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_CP_BARRIER_CFG_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_CP_BARRIER_CFG_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_CP_BARRIER_CFG_2 & 0x7F) >> 2);
+
+	WREG32(pb_addr + word_offset, ~mask);
+
+	pb_addr = (mmNIC0_QM1_CP_BARRIER_CFG_3 & ~0xFFF) + PROT_BITS_OFFS;
+	word_offset = ((mmNIC0_QM1_CP_BARRIER_CFG_3 & PROT_BITS_OFFS)
+			>> 7) << 2;
+	mask = 1U << ((mmNIC0_QM1_CP_BARRIER_CFG_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_CP_BARRIER_CFG_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_CP_DBG_0_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_CP_DBG_0_1 & 0x7F) >> 2);
+
+	WREG32(pb_addr + word_offset, ~mask);
+
+	pb_addr = (mmNIC0_QM1_CP_DBG_0_2 & ~0xFFF) + PROT_BITS_OFFS;
+	word_offset = ((mmNIC0_QM1_CP_DBG_0_2 & PROT_BITS_OFFS) >> 7) << 2;
+	mask = 1U << ((mmNIC0_QM1_CP_DBG_0_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_CP_DBG_0_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_CP_DBG_0_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_CP_ARUSER_31_11_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_CP_ARUSER_31_11_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_CP_ARUSER_31_11_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_CP_ARUSER_31_11_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_CP_ARUSER_31_11_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_CP_AWUSER_31_11_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_CP_AWUSER_31_11_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_CP_AWUSER_31_11_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_CP_AWUSER_31_11_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_CP_AWUSER_31_11_4 & 0x7F) >> 2);
+
+	WREG32(pb_addr + word_offset, ~mask);
+
+	pb_addr = (mmNIC0_QM1_ARB_CFG_0 & ~0xFFF) + PROT_BITS_OFFS;
+	word_offset = ((mmNIC0_QM1_ARB_CFG_0 & PROT_BITS_OFFS) >> 7) << 2;
+	mask = 1U << ((mmNIC0_QM1_ARB_CFG_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_ARB_MST_AVAIL_CRED_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_ARB_MST_AVAIL_CRED_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_ARB_MST_AVAIL_CRED_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_ARB_MST_AVAIL_CRED_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_ARB_MST_AVAIL_CRED_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_ARB_MST_AVAIL_CRED_5 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_ARB_MST_AVAIL_CRED_6 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_ARB_MST_AVAIL_CRED_7 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_ARB_MST_AVAIL_CRED_8 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_ARB_MST_AVAIL_CRED_9 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_ARB_MST_AVAIL_CRED_10 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_ARB_MST_AVAIL_CRED_11 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_ARB_MST_AVAIL_CRED_12 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_ARB_MST_AVAIL_CRED_13 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_ARB_MST_AVAIL_CRED_14 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_ARB_MST_AVAIL_CRED_15 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_ARB_MST_AVAIL_CRED_16 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_ARB_MST_AVAIL_CRED_17 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_ARB_MST_AVAIL_CRED_18 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_ARB_MST_AVAIL_CRED_19 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_ARB_MST_AVAIL_CRED_20 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_ARB_MST_AVAIL_CRED_21 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_ARB_MST_AVAIL_CRED_22 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_ARB_MST_AVAIL_CRED_23 & 0x7F) >> 2);
+
+	WREG32(pb_addr + word_offset, ~mask);
+
+	pb_addr = (mmNIC0_QM1_ARB_MST_AVAIL_CRED_24 & ~0xFFF) + PROT_BITS_OFFS;
+	word_offset = ((mmNIC0_QM1_ARB_MST_AVAIL_CRED_24 &
+			PROT_BITS_OFFS) >> 7) << 2;
+	mask = 1U << ((mmNIC0_QM1_ARB_MST_AVAIL_CRED_24 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_ARB_MST_AVAIL_CRED_25 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_ARB_MST_AVAIL_CRED_26 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_ARB_MST_AVAIL_CRED_27 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_ARB_MST_AVAIL_CRED_28 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_ARB_MST_AVAIL_CRED_29 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_ARB_MST_AVAIL_CRED_30 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_ARB_MST_AVAIL_CRED_31 & 0x7F) >> 2);
+
+	WREG32(pb_addr + word_offset, ~mask);
+
+	pb_addr = (mmNIC0_QM1_ARB_MST_CHOISE_PUSH_OFST_23 & ~0xFFF) +
+			PROT_BITS_OFFS;
+	word_offset = ((mmNIC0_QM1_ARB_MST_CHOISE_PUSH_OFST_23 &
+			PROT_BITS_OFFS) >> 7) << 2;
+	mask = 1U << ((mmNIC0_QM1_ARB_SLV_CHOISE_WDT & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_ARB_MSG_MAX_INFLIGHT & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_ARB_MSG_AWUSER_31_11 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_ARB_MSG_AWUSER_SEC_PROP & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_ARB_MSG_AWUSER_NON_SEC_PROP & 0x7F) >> 2);
+
+	WREG32(pb_addr + word_offset, ~mask);
+
+	pb_addr = (mmNIC0_QM1_ARB_STATE_STS & ~0xFFF) + PROT_BITS_OFFS;
+	word_offset = ((mmNIC0_QM1_ARB_STATE_STS & PROT_BITS_OFFS) >> 7) << 2;
+	mask = 1U << ((mmNIC0_QM1_ARB_STATE_STS & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_ARB_CHOISE_FULLNESS_STS & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_ARB_MSG_STS & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_ARB_SLV_CHOISE_Q_HEAD & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_ARB_ERR_CAUSE & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_ARB_ERR_MSG_EN & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_ARB_ERR_STS_DRP & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_ARB_MST_CRED_STS_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_ARB_MST_CRED_STS_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_ARB_MST_CRED_STS_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_ARB_MST_CRED_STS_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_ARB_MST_CRED_STS_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_ARB_MST_CRED_STS_5 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_ARB_MST_CRED_STS_6 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_ARB_MST_CRED_STS_7 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_ARB_MST_CRED_STS_8 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_ARB_MST_CRED_STS_9 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_ARB_MST_CRED_STS_10 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_ARB_MST_CRED_STS_11 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_ARB_MST_CRED_STS_12 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_ARB_MST_CRED_STS_13 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_ARB_MST_CRED_STS_14 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_ARB_MST_CRED_STS_15 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_ARB_MST_CRED_STS_16 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_ARB_MST_CRED_STS_17 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_ARB_MST_CRED_STS_18 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_ARB_MST_CRED_STS_19 & 0x7F) >> 2);
+
+	WREG32(pb_addr + word_offset, ~mask);
+
+	pb_addr = (mmNIC0_QM1_ARB_MST_CRED_STS_20 & ~0xFFF) + PROT_BITS_OFFS;
+	word_offset = ((mmNIC0_QM1_ARB_MST_CRED_STS_20 & PROT_BITS_OFFS)
+			>> 7) << 2;
+	mask = 1U << ((mmNIC0_QM1_ARB_MST_CRED_STS_20 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_ARB_MST_CRED_STS_21 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_ARB_MST_CRED_STS_22 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_ARB_MST_CRED_STS_23 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_ARB_MST_CRED_STS_24 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_ARB_MST_CRED_STS_25 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_ARB_MST_CRED_STS_26 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_ARB_MST_CRED_STS_27 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_ARB_MST_CRED_STS_28 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_ARB_MST_CRED_STS_29 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_ARB_MST_CRED_STS_30 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_ARB_MST_CRED_STS_31 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_CGM_CFG & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_CGM_STS & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_CGM_CFG1 & 0x7F) >> 2);
+
+	WREG32(pb_addr + word_offset, ~mask);
+
+	pb_addr = (mmNIC0_QM1_LOCAL_RANGE_BASE & ~0xFFF) + PROT_BITS_OFFS;
+	word_offset = ((mmNIC0_QM1_LOCAL_RANGE_BASE & PROT_BITS_OFFS)
+			>> 7) << 2;
+	mask = 1U << ((mmNIC0_QM1_LOCAL_RANGE_BASE & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_LOCAL_RANGE_SIZE & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_CSMR_STRICT_PRIO_CFG & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_HBW_RD_RATE_LIM_CFG_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_LBW_WR_RATE_LIM_CFG_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_LBW_WR_RATE_LIM_CFG_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_HBW_RD_RATE_LIM_CFG_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_GLBL_AXCACHE & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_IND_GW_APB_CFG & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_IND_GW_APB_WDATA & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_IND_GW_APB_RDATA & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_IND_GW_APB_STATUS & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_GLBL_ERR_ADDR_LO & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_GLBL_ERR_ADDR_HI & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC0_QM1_GLBL_ERR_WDATA & 0x7F) >> 2);
+
+	WREG32(pb_addr + word_offset, ~mask);
+
+	pb_addr = (mmNIC0_QM1_GLBL_MEM_INIT_BUSY & ~0xFFF) + PROT_BITS_OFFS;
+	word_offset = ((mmNIC0_QM1_GLBL_MEM_INIT_BUSY & PROT_BITS_OFFS)
+			>> 7) << 2;
+	mask = 1U << ((mmNIC0_QM1_GLBL_MEM_INIT_BUSY & 0x7F) >> 2);
+
+	WREG32(pb_addr + word_offset, ~mask);
+
+	WREG32(mmNIC1_QM0_BASE - CFG_BASE + PROT_BITS_OFFS + 0x7C, 0);
+	WREG32(mmNIC1_QM1_BASE - CFG_BASE + PROT_BITS_OFFS + 0x7C, 0);
+
+	pb_addr = (mmNIC1_QM0_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS;
+	word_offset = ((mmNIC1_QM0_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2;
+	mask = 1U << ((mmNIC1_QM0_GLBL_CFG0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_GLBL_CFG1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_GLBL_PROT & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_GLBL_ERR_CFG & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_GLBL_SECURE_PROPS_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_GLBL_SECURE_PROPS_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_GLBL_SECURE_PROPS_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_GLBL_SECURE_PROPS_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_GLBL_SECURE_PROPS_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_GLBL_NON_SECURE_PROPS_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_GLBL_NON_SECURE_PROPS_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_GLBL_NON_SECURE_PROPS_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_GLBL_NON_SECURE_PROPS_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_GLBL_NON_SECURE_PROPS_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_GLBL_STS0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_GLBL_STS1_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_GLBL_STS1_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_GLBL_STS1_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_GLBL_STS1_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_GLBL_STS1_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_GLBL_MSG_EN_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_GLBL_MSG_EN_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_GLBL_MSG_EN_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_GLBL_MSG_EN_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_GLBL_MSG_EN_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_PQ_BASE_LO_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_PQ_BASE_LO_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_PQ_BASE_LO_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_PQ_BASE_LO_3 & 0x7F) >> 2);
+
+	WREG32(pb_addr + word_offset, ~mask);
+
+	pb_addr = (mmNIC1_QM0_PQ_BASE_HI_0 & ~0xFFF) + PROT_BITS_OFFS;
+	word_offset = ((mmNIC1_QM0_PQ_BASE_HI_0 & PROT_BITS_OFFS) >> 7) << 2;
+	mask = 1U << ((mmNIC1_QM0_PQ_BASE_HI_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_PQ_BASE_HI_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_PQ_BASE_HI_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_PQ_BASE_HI_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_PQ_SIZE_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_PQ_SIZE_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_PQ_SIZE_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_PQ_SIZE_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_PQ_PI_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_PQ_PI_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_PQ_PI_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_PQ_PI_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_PQ_CI_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_PQ_CI_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_PQ_CI_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_PQ_CI_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_PQ_CFG0_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_PQ_CFG0_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_PQ_CFG0_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_PQ_CFG0_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_PQ_CFG1_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_PQ_CFG1_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_PQ_CFG1_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_PQ_CFG1_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_PQ_ARUSER_31_11_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_PQ_ARUSER_31_11_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_PQ_ARUSER_31_11_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_PQ_ARUSER_31_11_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_PQ_STS0_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_PQ_STS0_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_PQ_STS0_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_PQ_STS0_3 & 0x7F) >> 2);
+
+	WREG32(pb_addr + word_offset, ~mask);
+
+	pb_addr = (mmNIC1_QM0_PQ_STS1_0 & ~0xFFF) + PROT_BITS_OFFS;
+	word_offset = ((mmNIC1_QM0_PQ_STS1_0 & PROT_BITS_OFFS) >> 7) << 2;
+	mask = 1U << ((mmNIC1_QM0_PQ_STS1_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_PQ_STS1_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_PQ_STS1_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_PQ_STS1_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_CQ_STS0_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_CQ_STS0_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_CQ_STS0_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_CQ_STS0_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_CQ_STS1_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_CQ_STS1_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_CQ_STS1_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_CQ_STS1_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_CQ_PTR_LO_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_CQ_PTR_HI_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_CQ_TSIZE_0 & 0x7F) >> 2);
+
+	WREG32(pb_addr + word_offset, ~mask);
+
+	pb_addr = (mmNIC1_QM0_CQ_CTL_0 & ~0xFFF) + PROT_BITS_OFFS;
+	word_offset = ((mmNIC1_QM0_CQ_CTL_0 & PROT_BITS_OFFS) >> 7) << 2;
+	mask = 1U << ((mmNIC1_QM0_CQ_CTL_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_CQ_PTR_LO_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_CQ_PTR_HI_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_CQ_TSIZE_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_CQ_CTL_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_CQ_PTR_LO_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_CQ_PTR_HI_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_CQ_TSIZE_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_CQ_CTL_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_CQ_PTR_LO_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_CQ_PTR_HI_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_CQ_TSIZE_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_CQ_CTL_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_CQ_PTR_LO_STS_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_CQ_PTR_LO_STS_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_CQ_PTR_LO_STS_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_CQ_PTR_LO_STS_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_CQ_PTR_LO_STS_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_CQ_PTR_HI_STS_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_CQ_PTR_HI_STS_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_CQ_PTR_HI_STS_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_CQ_PTR_HI_STS_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_CQ_PTR_HI_STS_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_CQ_TSIZE_STS_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_CQ_TSIZE_STS_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_CQ_TSIZE_STS_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_CQ_TSIZE_STS_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_CQ_TSIZE_STS_4 & 0x7F) >> 2);
+
+	WREG32(pb_addr + word_offset, ~mask);
+
+	pb_addr = (mmNIC1_QM0_CQ_CTL_STS_0 & ~0xFFF) + PROT_BITS_OFFS;
+	word_offset = ((mmNIC1_QM0_CQ_CTL_STS_0 & PROT_BITS_OFFS) >> 7) << 2;
+	mask = 1U << ((mmNIC1_QM0_CQ_CTL_STS_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_CQ_CTL_STS_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_CQ_CTL_STS_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_CQ_CTL_STS_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_CQ_CTL_STS_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_CQ_IFIFO_CNT_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_CQ_IFIFO_CNT_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_CQ_IFIFO_CNT_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_CQ_IFIFO_CNT_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_CQ_IFIFO_CNT_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_CP_MSG_BASE0_ADDR_LO_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_CP_MSG_BASE0_ADDR_LO_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_CP_MSG_BASE0_ADDR_LO_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_CP_MSG_BASE0_ADDR_LO_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_CP_MSG_BASE0_ADDR_LO_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_CP_MSG_BASE0_ADDR_HI_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_CP_MSG_BASE0_ADDR_HI_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_CP_MSG_BASE0_ADDR_HI_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_CP_MSG_BASE0_ADDR_HI_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_CP_MSG_BASE0_ADDR_HI_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_CP_MSG_BASE1_ADDR_LO_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_CP_MSG_BASE1_ADDR_LO_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_CP_MSG_BASE1_ADDR_LO_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_CP_MSG_BASE1_ADDR_LO_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_CP_MSG_BASE1_ADDR_LO_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_CP_MSG_BASE1_ADDR_HI_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_CP_MSG_BASE1_ADDR_HI_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_CP_MSG_BASE1_ADDR_HI_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_CP_MSG_BASE1_ADDR_HI_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_CP_MSG_BASE1_ADDR_HI_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_CP_MSG_BASE2_ADDR_LO_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_CP_MSG_BASE2_ADDR_LO_1 & 0x7F) >> 2);
+
+	WREG32(pb_addr + word_offset, ~mask);
+
+	pb_addr = (mmNIC1_QM0_CP_MSG_BASE2_ADDR_LO_2 & ~0xFFF) + PROT_BITS_OFFS;
+	word_offset = ((mmNIC1_QM0_CP_MSG_BASE2_ADDR_LO_2 &
+			PROT_BITS_OFFS) >> 7) << 2;
+	mask = 1U << ((mmNIC1_QM0_CP_MSG_BASE2_ADDR_LO_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_CP_MSG_BASE2_ADDR_LO_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_CP_MSG_BASE2_ADDR_LO_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_CP_MSG_BASE2_ADDR_HI_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_CP_MSG_BASE2_ADDR_HI_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_CP_MSG_BASE2_ADDR_HI_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_CP_MSG_BASE2_ADDR_HI_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_CP_MSG_BASE2_ADDR_HI_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_CP_MSG_BASE3_ADDR_LO_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_CP_MSG_BASE3_ADDR_LO_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_CP_MSG_BASE3_ADDR_LO_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_CP_MSG_BASE3_ADDR_LO_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_CP_MSG_BASE3_ADDR_LO_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_CP_MSG_BASE3_ADDR_HI_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_CP_MSG_BASE3_ADDR_HI_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_CP_MSG_BASE3_ADDR_HI_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_CP_MSG_BASE3_ADDR_HI_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_CP_MSG_BASE3_ADDR_HI_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_CP_LDMA_TSIZE_OFFSET_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_CP_LDMA_TSIZE_OFFSET_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_CP_LDMA_TSIZE_OFFSET_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_CP_LDMA_TSIZE_OFFSET_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_CP_LDMA_TSIZE_OFFSET_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_CP_LDMA_SRC_BASE_LO_OFFSET_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_CP_LDMA_SRC_BASE_LO_OFFSET_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_CP_LDMA_SRC_BASE_LO_OFFSET_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_CP_LDMA_SRC_BASE_LO_OFFSET_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_CP_LDMA_SRC_BASE_LO_OFFSET_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_CP_LDMA_DST_BASE_LO_OFFSET_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_CP_LDMA_DST_BASE_LO_OFFSET_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_CP_LDMA_DST_BASE_LO_OFFSET_2 & 0x7F) >> 2);
+
+	WREG32(pb_addr + word_offset, ~mask);
+
+	pb_addr = (mmNIC1_QM0_CP_LDMA_DST_BASE_LO_OFFSET_3 & ~0xFFF) +
+			PROT_BITS_OFFS;
+	word_offset = ((mmNIC1_QM0_CP_LDMA_DST_BASE_LO_OFFSET_3 &
+			PROT_BITS_OFFS) >> 7) << 2;
+	mask = 1U << ((mmNIC1_QM0_CP_LDMA_DST_BASE_LO_OFFSET_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_CP_LDMA_DST_BASE_LO_OFFSET_4 & 0x7F) >> 2);
+
+	WREG32(pb_addr + word_offset, ~mask);
+
+	pb_addr = (mmNIC1_QM0_CP_STS_0 & ~0xFFF) + PROT_BITS_OFFS;
+	word_offset = ((mmNIC1_QM0_CP_STS_0 & PROT_BITS_OFFS) >> 7) << 2;
+	mask = 1U << ((mmNIC1_QM0_CP_STS_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_CP_STS_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_CP_STS_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_CP_STS_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_CP_STS_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_CP_CURRENT_INST_LO_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_CP_CURRENT_INST_LO_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_CP_CURRENT_INST_LO_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_CP_CURRENT_INST_LO_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_CP_CURRENT_INST_LO_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_CP_CURRENT_INST_HI_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_CP_CURRENT_INST_HI_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_CP_CURRENT_INST_HI_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_CP_CURRENT_INST_HI_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_CP_CURRENT_INST_HI_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_CP_BARRIER_CFG_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_CP_BARRIER_CFG_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_CP_BARRIER_CFG_2 & 0x7F) >> 2);
+
+	WREG32(pb_addr + word_offset, ~mask);
+
+	pb_addr = (mmNIC1_QM0_CP_BARRIER_CFG_3 & ~0xFFF) + PROT_BITS_OFFS;
+	word_offset = ((mmNIC1_QM0_CP_BARRIER_CFG_3 & PROT_BITS_OFFS)
+			>> 7) << 2;
+	mask = 1U << ((mmNIC1_QM0_CP_BARRIER_CFG_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_CP_BARRIER_CFG_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_CP_DBG_0_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_CP_DBG_0_1 & 0x7F) >> 2);
+
+	WREG32(pb_addr + word_offset, ~mask);
+
+	pb_addr = (mmNIC1_QM0_CP_DBG_0_2 & ~0xFFF) + PROT_BITS_OFFS;
+	word_offset = ((mmNIC1_QM0_CP_DBG_0_2 & PROT_BITS_OFFS) >> 7) << 2;
+	mask = 1U << ((mmNIC1_QM0_CP_DBG_0_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_CP_DBG_0_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_CP_DBG_0_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_CP_ARUSER_31_11_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_CP_ARUSER_31_11_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_CP_ARUSER_31_11_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_CP_ARUSER_31_11_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_CP_ARUSER_31_11_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_CP_AWUSER_31_11_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_CP_AWUSER_31_11_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_CP_AWUSER_31_11_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_CP_AWUSER_31_11_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_CP_AWUSER_31_11_4 & 0x7F) >> 2);
+
+	WREG32(pb_addr + word_offset, ~mask);
+
+	pb_addr = (mmNIC1_QM0_ARB_CFG_0 & ~0xFFF) + PROT_BITS_OFFS;
+	word_offset = ((mmNIC1_QM0_ARB_CFG_0 & PROT_BITS_OFFS) >> 7) << 2;
+	mask = 1U << ((mmNIC1_QM0_ARB_CFG_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_ARB_MST_AVAIL_CRED_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_ARB_MST_AVAIL_CRED_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_ARB_MST_AVAIL_CRED_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_ARB_MST_AVAIL_CRED_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_ARB_MST_AVAIL_CRED_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_ARB_MST_AVAIL_CRED_5 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_ARB_MST_AVAIL_CRED_6 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_ARB_MST_AVAIL_CRED_7 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_ARB_MST_AVAIL_CRED_8 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_ARB_MST_AVAIL_CRED_9 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_ARB_MST_AVAIL_CRED_10 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_ARB_MST_AVAIL_CRED_11 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_ARB_MST_AVAIL_CRED_12 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_ARB_MST_AVAIL_CRED_13 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_ARB_MST_AVAIL_CRED_14 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_ARB_MST_AVAIL_CRED_15 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_ARB_MST_AVAIL_CRED_16 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_ARB_MST_AVAIL_CRED_17 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_ARB_MST_AVAIL_CRED_18 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_ARB_MST_AVAIL_CRED_19 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_ARB_MST_AVAIL_CRED_20 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_ARB_MST_AVAIL_CRED_21 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_ARB_MST_AVAIL_CRED_22 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_ARB_MST_AVAIL_CRED_23 & 0x7F) >> 2);
+
+	WREG32(pb_addr + word_offset, ~mask);
+
+	pb_addr = (mmNIC1_QM0_ARB_MST_AVAIL_CRED_24 & ~0xFFF) + PROT_BITS_OFFS;
+	word_offset = ((mmNIC1_QM0_ARB_MST_AVAIL_CRED_24 &
+			PROT_BITS_OFFS) >> 7) << 2;
+	mask = 1U << ((mmNIC1_QM0_ARB_MST_AVAIL_CRED_24 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_ARB_MST_AVAIL_CRED_25 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_ARB_MST_AVAIL_CRED_26 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_ARB_MST_AVAIL_CRED_27 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_ARB_MST_AVAIL_CRED_28 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_ARB_MST_AVAIL_CRED_29 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_ARB_MST_AVAIL_CRED_30 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_ARB_MST_AVAIL_CRED_31 & 0x7F) >> 2);
+
+	WREG32(pb_addr + word_offset, ~mask);
+
+	pb_addr = (mmNIC1_QM0_ARB_MST_CHOISE_PUSH_OFST_23 & ~0xFFF) +
+			PROT_BITS_OFFS;
+	word_offset = ((mmNIC1_QM0_ARB_MST_CHOISE_PUSH_OFST_23 &
+			PROT_BITS_OFFS) >> 7) << 2;
+	mask = 1U << ((mmNIC1_QM0_ARB_SLV_CHOISE_WDT & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_ARB_MSG_MAX_INFLIGHT & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_ARB_MSG_AWUSER_31_11 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_ARB_MSG_AWUSER_SEC_PROP & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_ARB_MSG_AWUSER_NON_SEC_PROP & 0x7F) >> 2);
+	WREG32(pb_addr + word_offset, ~mask);
+
+	pb_addr = (mmNIC1_QM0_ARB_STATE_STS & ~0xFFF) + PROT_BITS_OFFS;
+	word_offset = ((mmNIC1_QM0_ARB_STATE_STS & PROT_BITS_OFFS) >> 7) << 2;
+	mask = 1U << ((mmNIC1_QM0_ARB_STATE_STS & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_ARB_CHOISE_FULLNESS_STS & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_ARB_MSG_STS & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_ARB_SLV_CHOISE_Q_HEAD & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_ARB_ERR_CAUSE & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_ARB_ERR_MSG_EN & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_ARB_ERR_STS_DRP & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_ARB_MST_CRED_STS_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_ARB_MST_CRED_STS_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_ARB_MST_CRED_STS_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_ARB_MST_CRED_STS_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_ARB_MST_CRED_STS_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_ARB_MST_CRED_STS_5 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_ARB_MST_CRED_STS_6 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_ARB_MST_CRED_STS_7 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_ARB_MST_CRED_STS_8 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_ARB_MST_CRED_STS_9 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_ARB_MST_CRED_STS_10 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_ARB_MST_CRED_STS_11 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_ARB_MST_CRED_STS_12 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_ARB_MST_CRED_STS_13 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_ARB_MST_CRED_STS_14 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_ARB_MST_CRED_STS_15 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_ARB_MST_CRED_STS_16 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_ARB_MST_CRED_STS_17 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_ARB_MST_CRED_STS_18 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_ARB_MST_CRED_STS_19 & 0x7F) >> 2);
+
+	WREG32(pb_addr + word_offset, ~mask);
+
+	pb_addr = (mmNIC1_QM0_ARB_MST_CRED_STS_20 & ~0xFFF) + PROT_BITS_OFFS;
+	word_offset = ((mmNIC1_QM0_ARB_MST_CRED_STS_20 & PROT_BITS_OFFS)
+			>> 7) << 2;
+	mask = 1U << ((mmNIC1_QM0_ARB_MST_CRED_STS_20 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_ARB_MST_CRED_STS_21 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_ARB_MST_CRED_STS_22 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_ARB_MST_CRED_STS_23 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_ARB_MST_CRED_STS_24 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_ARB_MST_CRED_STS_25 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_ARB_MST_CRED_STS_26 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_ARB_MST_CRED_STS_27 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_ARB_MST_CRED_STS_28 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_ARB_MST_CRED_STS_29 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_ARB_MST_CRED_STS_30 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_ARB_MST_CRED_STS_31 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_CGM_CFG & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_CGM_STS & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_CGM_CFG1 & 0x7F) >> 2);
+
+	WREG32(pb_addr + word_offset, ~mask);
+
+	pb_addr = (mmNIC1_QM0_LOCAL_RANGE_BASE & ~0xFFF) + PROT_BITS_OFFS;
+	word_offset = ((mmNIC1_QM0_LOCAL_RANGE_BASE & PROT_BITS_OFFS)
+			>> 7) << 2;
+	mask = 1U << ((mmNIC1_QM0_LOCAL_RANGE_BASE & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_LOCAL_RANGE_SIZE & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_CSMR_STRICT_PRIO_CFG & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_HBW_RD_RATE_LIM_CFG_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_LBW_WR_RATE_LIM_CFG_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_LBW_WR_RATE_LIM_CFG_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_HBW_RD_RATE_LIM_CFG_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_GLBL_AXCACHE & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_IND_GW_APB_CFG & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_IND_GW_APB_WDATA & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_IND_GW_APB_RDATA & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_IND_GW_APB_STATUS & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_GLBL_ERR_ADDR_LO & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_GLBL_ERR_ADDR_HI & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM0_GLBL_ERR_WDATA & 0x7F) >> 2);
+
+	WREG32(pb_addr + word_offset, ~mask);
+
+	pb_addr = (mmNIC1_QM0_GLBL_MEM_INIT_BUSY & ~0xFFF) + PROT_BITS_OFFS;
+	word_offset = ((mmNIC1_QM0_GLBL_MEM_INIT_BUSY & PROT_BITS_OFFS)
+			>> 7) << 2;
+	mask = 1U << ((mmNIC1_QM0_GLBL_MEM_INIT_BUSY & 0x7F) >> 2);
+
+	WREG32(pb_addr + word_offset, ~mask);
+
+	pb_addr = (mmNIC1_QM1_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS;
+	word_offset = ((mmNIC1_QM1_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2;
+	mask = 1U << ((mmNIC1_QM1_GLBL_CFG0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_GLBL_CFG1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_GLBL_PROT & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_GLBL_ERR_CFG & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_GLBL_SECURE_PROPS_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_GLBL_SECURE_PROPS_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_GLBL_SECURE_PROPS_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_GLBL_SECURE_PROPS_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_GLBL_SECURE_PROPS_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_GLBL_NON_SECURE_PROPS_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_GLBL_NON_SECURE_PROPS_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_GLBL_NON_SECURE_PROPS_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_GLBL_NON_SECURE_PROPS_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_GLBL_NON_SECURE_PROPS_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_GLBL_STS0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_GLBL_STS1_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_GLBL_STS1_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_GLBL_STS1_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_GLBL_STS1_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_GLBL_STS1_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_GLBL_MSG_EN_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_GLBL_MSG_EN_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_GLBL_MSG_EN_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_GLBL_MSG_EN_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_GLBL_MSG_EN_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_PQ_BASE_LO_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_PQ_BASE_LO_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_PQ_BASE_LO_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_PQ_BASE_LO_3 & 0x7F) >> 2);
+
+	WREG32(pb_addr + word_offset, ~mask);
+
+	pb_addr = (mmNIC1_QM1_PQ_BASE_HI_0 & ~0xFFF) + PROT_BITS_OFFS;
+	word_offset = ((mmNIC1_QM1_PQ_BASE_HI_0 & PROT_BITS_OFFS) >> 7) << 2;
+	mask = 1U << ((mmNIC1_QM1_PQ_BASE_HI_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_PQ_BASE_HI_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_PQ_BASE_HI_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_PQ_BASE_HI_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_PQ_SIZE_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_PQ_SIZE_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_PQ_SIZE_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_PQ_SIZE_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_PQ_PI_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_PQ_PI_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_PQ_PI_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_PQ_PI_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_PQ_CI_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_PQ_CI_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_PQ_CI_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_PQ_CI_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_PQ_CFG0_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_PQ_CFG0_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_PQ_CFG0_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_PQ_CFG0_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_PQ_CFG1_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_PQ_CFG1_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_PQ_CFG1_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_PQ_CFG1_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_PQ_ARUSER_31_11_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_PQ_ARUSER_31_11_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_PQ_ARUSER_31_11_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_PQ_ARUSER_31_11_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_PQ_STS0_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_PQ_STS0_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_PQ_STS0_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_PQ_STS0_3 & 0x7F) >> 2);
+
+	WREG32(pb_addr + word_offset, ~mask);
+
+	pb_addr = (mmNIC1_QM1_PQ_STS1_0 & ~0xFFF) + PROT_BITS_OFFS;
+	word_offset = ((mmNIC1_QM1_PQ_STS1_0 & PROT_BITS_OFFS) >> 7) << 2;
+	mask = 1U << ((mmNIC1_QM1_PQ_STS1_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_PQ_STS1_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_PQ_STS1_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_PQ_STS1_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_CQ_STS0_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_CQ_STS0_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_CQ_STS0_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_CQ_STS0_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_CQ_STS1_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_CQ_STS1_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_CQ_STS1_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_CQ_STS1_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_CQ_PTR_LO_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_CQ_PTR_HI_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_CQ_TSIZE_0 & 0x7F) >> 2);
+
+	WREG32(pb_addr + word_offset, ~mask);
+
+	pb_addr = (mmNIC1_QM1_CQ_CTL_0 & ~0xFFF) + PROT_BITS_OFFS;
+	word_offset = ((mmNIC1_QM1_CQ_CTL_0 & PROT_BITS_OFFS) >> 7) << 2;
+	mask = 1U << ((mmNIC1_QM1_CQ_CTL_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_CQ_PTR_LO_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_CQ_PTR_HI_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_CQ_TSIZE_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_CQ_CTL_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_CQ_PTR_LO_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_CQ_PTR_HI_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_CQ_TSIZE_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_CQ_CTL_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_CQ_PTR_LO_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_CQ_PTR_HI_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_CQ_TSIZE_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_CQ_CTL_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_CQ_PTR_LO_STS_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_CQ_PTR_LO_STS_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_CQ_PTR_LO_STS_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_CQ_PTR_LO_STS_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_CQ_PTR_LO_STS_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_CQ_PTR_HI_STS_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_CQ_PTR_HI_STS_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_CQ_PTR_HI_STS_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_CQ_PTR_HI_STS_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_CQ_PTR_HI_STS_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_CQ_TSIZE_STS_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_CQ_TSIZE_STS_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_CQ_TSIZE_STS_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_CQ_TSIZE_STS_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_CQ_TSIZE_STS_4 & 0x7F) >> 2);
+
+	WREG32(pb_addr + word_offset, ~mask);
+
+	pb_addr = (mmNIC1_QM1_CQ_CTL_STS_0 & ~0xFFF) + PROT_BITS_OFFS;
+	word_offset = ((mmNIC1_QM1_CQ_CTL_STS_0 & PROT_BITS_OFFS) >> 7) << 2;
+	mask = 1U << ((mmNIC1_QM1_CQ_CTL_STS_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_CQ_CTL_STS_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_CQ_CTL_STS_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_CQ_CTL_STS_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_CQ_CTL_STS_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_CQ_IFIFO_CNT_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_CQ_IFIFO_CNT_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_CQ_IFIFO_CNT_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_CQ_IFIFO_CNT_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_CQ_IFIFO_CNT_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_CP_MSG_BASE0_ADDR_LO_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_CP_MSG_BASE0_ADDR_LO_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_CP_MSG_BASE0_ADDR_LO_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_CP_MSG_BASE0_ADDR_LO_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_CP_MSG_BASE0_ADDR_LO_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_CP_MSG_BASE0_ADDR_HI_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_CP_MSG_BASE0_ADDR_HI_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_CP_MSG_BASE0_ADDR_HI_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_CP_MSG_BASE0_ADDR_HI_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_CP_MSG_BASE0_ADDR_HI_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_CP_MSG_BASE1_ADDR_LO_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_CP_MSG_BASE1_ADDR_LO_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_CP_MSG_BASE1_ADDR_LO_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_CP_MSG_BASE1_ADDR_LO_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_CP_MSG_BASE1_ADDR_LO_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_CP_MSG_BASE1_ADDR_HI_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_CP_MSG_BASE1_ADDR_HI_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_CP_MSG_BASE1_ADDR_HI_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_CP_MSG_BASE1_ADDR_HI_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_CP_MSG_BASE1_ADDR_HI_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_CP_MSG_BASE2_ADDR_LO_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_CP_MSG_BASE2_ADDR_LO_1 & 0x7F) >> 2);
+
+	WREG32(pb_addr + word_offset, ~mask);
+
+	pb_addr = (mmNIC1_QM1_CP_MSG_BASE2_ADDR_LO_2 & ~0xFFF) + PROT_BITS_OFFS;
+	word_offset = ((mmNIC1_QM1_CP_MSG_BASE2_ADDR_LO_2 &
+			PROT_BITS_OFFS) >> 7) << 2;
+	mask = 1U << ((mmNIC1_QM1_CP_MSG_BASE2_ADDR_LO_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_CP_MSG_BASE2_ADDR_LO_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_CP_MSG_BASE2_ADDR_LO_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_CP_MSG_BASE2_ADDR_HI_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_CP_MSG_BASE2_ADDR_HI_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_CP_MSG_BASE2_ADDR_HI_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_CP_MSG_BASE2_ADDR_HI_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_CP_MSG_BASE2_ADDR_HI_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_CP_MSG_BASE3_ADDR_LO_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_CP_MSG_BASE3_ADDR_LO_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_CP_MSG_BASE3_ADDR_LO_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_CP_MSG_BASE3_ADDR_LO_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_CP_MSG_BASE3_ADDR_LO_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_CP_MSG_BASE3_ADDR_HI_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_CP_MSG_BASE3_ADDR_HI_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_CP_MSG_BASE3_ADDR_HI_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_CP_MSG_BASE3_ADDR_HI_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_CP_MSG_BASE3_ADDR_HI_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_CP_LDMA_TSIZE_OFFSET_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_CP_LDMA_TSIZE_OFFSET_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_CP_LDMA_TSIZE_OFFSET_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_CP_LDMA_TSIZE_OFFSET_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_CP_LDMA_TSIZE_OFFSET_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_CP_LDMA_SRC_BASE_LO_OFFSET_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_CP_LDMA_SRC_BASE_LO_OFFSET_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_CP_LDMA_SRC_BASE_LO_OFFSET_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_CP_LDMA_SRC_BASE_LO_OFFSET_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_CP_LDMA_SRC_BASE_LO_OFFSET_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_CP_LDMA_DST_BASE_LO_OFFSET_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_CP_LDMA_DST_BASE_LO_OFFSET_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_CP_LDMA_DST_BASE_LO_OFFSET_2 & 0x7F) >> 2);
+
+	WREG32(pb_addr + word_offset, ~mask);
+
+	pb_addr = (mmNIC1_QM1_CP_LDMA_DST_BASE_LO_OFFSET_3 & ~0xFFF) +
+			PROT_BITS_OFFS;
+	word_offset = ((mmNIC1_QM1_CP_LDMA_DST_BASE_LO_OFFSET_3 &
+			PROT_BITS_OFFS) >> 7) << 2;
+	mask = 1U << ((mmNIC1_QM1_CP_LDMA_DST_BASE_LO_OFFSET_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_CP_LDMA_DST_BASE_LO_OFFSET_4 & 0x7F) >> 2);
+
+	WREG32(pb_addr + word_offset, ~mask);
+
+	pb_addr = (mmNIC1_QM1_CP_STS_0 & ~0xFFF) + PROT_BITS_OFFS;
+	word_offset = ((mmNIC1_QM1_CP_STS_0 & PROT_BITS_OFFS) >> 7) << 2;
+	mask = 1U << ((mmNIC1_QM1_CP_STS_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_CP_STS_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_CP_STS_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_CP_STS_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_CP_STS_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_CP_CURRENT_INST_LO_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_CP_CURRENT_INST_LO_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_CP_CURRENT_INST_LO_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_CP_CURRENT_INST_LO_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_CP_CURRENT_INST_LO_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_CP_CURRENT_INST_HI_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_CP_CURRENT_INST_HI_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_CP_CURRENT_INST_HI_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_CP_CURRENT_INST_HI_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_CP_CURRENT_INST_HI_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_CP_BARRIER_CFG_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_CP_BARRIER_CFG_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_CP_BARRIER_CFG_2 & 0x7F) >> 2);
+
+	WREG32(pb_addr + word_offset, ~mask);
+
+	pb_addr = (mmNIC1_QM1_CP_BARRIER_CFG_3 & ~0xFFF) + PROT_BITS_OFFS;
+	word_offset = ((mmNIC1_QM1_CP_BARRIER_CFG_3 & PROT_BITS_OFFS)
+			>> 7) << 2;
+	mask = 1U << ((mmNIC1_QM1_CP_BARRIER_CFG_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_CP_BARRIER_CFG_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_CP_DBG_0_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_CP_DBG_0_1 & 0x7F) >> 2);
+
+	WREG32(pb_addr + word_offset, ~mask);
+
+	pb_addr = (mmNIC1_QM1_CP_DBG_0_2 & ~0xFFF) + PROT_BITS_OFFS;
+	word_offset = ((mmNIC1_QM1_CP_DBG_0_2 & PROT_BITS_OFFS) >> 7) << 2;
+	mask = 1U << ((mmNIC1_QM1_CP_DBG_0_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_CP_DBG_0_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_CP_DBG_0_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_CP_ARUSER_31_11_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_CP_ARUSER_31_11_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_CP_ARUSER_31_11_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_CP_ARUSER_31_11_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_CP_ARUSER_31_11_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_CP_AWUSER_31_11_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_CP_AWUSER_31_11_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_CP_AWUSER_31_11_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_CP_AWUSER_31_11_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_CP_AWUSER_31_11_4 & 0x7F) >> 2);
+
+	WREG32(pb_addr + word_offset, ~mask);
+
+	pb_addr = (mmNIC1_QM1_ARB_CFG_0 & ~0xFFF) + PROT_BITS_OFFS;
+	word_offset = ((mmNIC1_QM1_ARB_CFG_0 & PROT_BITS_OFFS) >> 7) << 2;
+	mask = 1U << ((mmNIC1_QM1_ARB_CFG_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_ARB_MST_AVAIL_CRED_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_ARB_MST_AVAIL_CRED_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_ARB_MST_AVAIL_CRED_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_ARB_MST_AVAIL_CRED_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_ARB_MST_AVAIL_CRED_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_ARB_MST_AVAIL_CRED_5 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_ARB_MST_AVAIL_CRED_6 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_ARB_MST_AVAIL_CRED_7 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_ARB_MST_AVAIL_CRED_8 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_ARB_MST_AVAIL_CRED_9 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_ARB_MST_AVAIL_CRED_10 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_ARB_MST_AVAIL_CRED_11 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_ARB_MST_AVAIL_CRED_12 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_ARB_MST_AVAIL_CRED_13 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_ARB_MST_AVAIL_CRED_14 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_ARB_MST_AVAIL_CRED_15 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_ARB_MST_AVAIL_CRED_16 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_ARB_MST_AVAIL_CRED_17 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_ARB_MST_AVAIL_CRED_18 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_ARB_MST_AVAIL_CRED_19 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_ARB_MST_AVAIL_CRED_20 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_ARB_MST_AVAIL_CRED_21 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_ARB_MST_AVAIL_CRED_22 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_ARB_MST_AVAIL_CRED_23 & 0x7F) >> 2);
+
+	WREG32(pb_addr + word_offset, ~mask);
+
+	pb_addr = (mmNIC1_QM1_ARB_MST_AVAIL_CRED_24 & ~0xFFF) + PROT_BITS_OFFS;
+	word_offset = ((mmNIC1_QM1_ARB_MST_AVAIL_CRED_24 &
+			PROT_BITS_OFFS) >> 7) << 2;
+	mask = 1U << ((mmNIC1_QM1_ARB_MST_AVAIL_CRED_24 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_ARB_MST_AVAIL_CRED_25 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_ARB_MST_AVAIL_CRED_26 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_ARB_MST_AVAIL_CRED_27 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_ARB_MST_AVAIL_CRED_28 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_ARB_MST_AVAIL_CRED_29 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_ARB_MST_AVAIL_CRED_30 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_ARB_MST_AVAIL_CRED_31 & 0x7F) >> 2);
+
+	WREG32(pb_addr + word_offset, ~mask);
+
+	pb_addr = (mmNIC1_QM1_ARB_MST_CHOISE_PUSH_OFST_23 & ~0xFFF) +
+			PROT_BITS_OFFS;
+	word_offset = ((mmNIC1_QM1_ARB_MST_CHOISE_PUSH_OFST_23 &
+			PROT_BITS_OFFS) >> 7) << 2;
+	mask = 1U << ((mmNIC1_QM1_ARB_SLV_CHOISE_WDT & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_ARB_MSG_MAX_INFLIGHT & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_ARB_MSG_AWUSER_31_11 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_ARB_MSG_AWUSER_SEC_PROP & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_ARB_MSG_AWUSER_NON_SEC_PROP & 0x7F) >> 2);
+
+	WREG32(pb_addr + word_offset, ~mask);
+
+	pb_addr = (mmNIC1_QM1_ARB_STATE_STS & ~0xFFF) + PROT_BITS_OFFS;
+	word_offset = ((mmNIC1_QM1_ARB_STATE_STS & PROT_BITS_OFFS) >> 7) << 2;
+	mask = 1U << ((mmNIC1_QM1_ARB_STATE_STS & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_ARB_CHOISE_FULLNESS_STS & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_ARB_MSG_STS & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_ARB_SLV_CHOISE_Q_HEAD & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_ARB_ERR_CAUSE & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_ARB_ERR_MSG_EN & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_ARB_ERR_STS_DRP & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_ARB_MST_CRED_STS_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_ARB_MST_CRED_STS_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_ARB_MST_CRED_STS_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_ARB_MST_CRED_STS_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_ARB_MST_CRED_STS_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_ARB_MST_CRED_STS_5 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_ARB_MST_CRED_STS_6 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_ARB_MST_CRED_STS_7 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_ARB_MST_CRED_STS_8 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_ARB_MST_CRED_STS_9 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_ARB_MST_CRED_STS_10 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_ARB_MST_CRED_STS_11 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_ARB_MST_CRED_STS_12 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_ARB_MST_CRED_STS_13 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_ARB_MST_CRED_STS_14 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_ARB_MST_CRED_STS_15 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_ARB_MST_CRED_STS_16 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_ARB_MST_CRED_STS_17 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_ARB_MST_CRED_STS_18 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_ARB_MST_CRED_STS_19 & 0x7F) >> 2);
+
+	WREG32(pb_addr + word_offset, ~mask);
+
+	pb_addr = (mmNIC1_QM1_ARB_MST_CRED_STS_20 & ~0xFFF) + PROT_BITS_OFFS;
+	word_offset = ((mmNIC1_QM1_ARB_MST_CRED_STS_20 & PROT_BITS_OFFS)
+			>> 7) << 2;
+	mask = 1U << ((mmNIC1_QM1_ARB_MST_CRED_STS_20 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_ARB_MST_CRED_STS_21 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_ARB_MST_CRED_STS_22 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_ARB_MST_CRED_STS_23 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_ARB_MST_CRED_STS_24 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_ARB_MST_CRED_STS_25 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_ARB_MST_CRED_STS_26 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_ARB_MST_CRED_STS_27 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_ARB_MST_CRED_STS_28 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_ARB_MST_CRED_STS_29 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_ARB_MST_CRED_STS_30 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_ARB_MST_CRED_STS_31 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_CGM_CFG & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_CGM_STS & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_CGM_CFG1 & 0x7F) >> 2);
+
+	WREG32(pb_addr + word_offset, ~mask);
+
+	pb_addr = (mmNIC1_QM1_LOCAL_RANGE_BASE & ~0xFFF) + PROT_BITS_OFFS;
+	word_offset = ((mmNIC1_QM1_LOCAL_RANGE_BASE & PROT_BITS_OFFS)
+			>> 7) << 2;
+	mask = 1U << ((mmNIC1_QM1_LOCAL_RANGE_BASE & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_LOCAL_RANGE_SIZE & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_CSMR_STRICT_PRIO_CFG & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_HBW_RD_RATE_LIM_CFG_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_LBW_WR_RATE_LIM_CFG_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_LBW_WR_RATE_LIM_CFG_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_HBW_RD_RATE_LIM_CFG_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_GLBL_AXCACHE & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_IND_GW_APB_CFG & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_IND_GW_APB_WDATA & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_IND_GW_APB_RDATA & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_IND_GW_APB_STATUS & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_GLBL_ERR_ADDR_LO & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_GLBL_ERR_ADDR_HI & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC1_QM1_GLBL_ERR_WDATA & 0x7F) >> 2);
+
+	WREG32(pb_addr + word_offset, ~mask);
+
+	pb_addr = (mmNIC1_QM1_GLBL_MEM_INIT_BUSY & ~0xFFF) + PROT_BITS_OFFS;
+	word_offset = ((mmNIC1_QM1_GLBL_MEM_INIT_BUSY & PROT_BITS_OFFS)
+			>> 7) << 2;
+	mask = 1U << ((mmNIC1_QM1_GLBL_MEM_INIT_BUSY & 0x7F) >> 2);
+
+	WREG32(pb_addr + word_offset, ~mask);
+
+	WREG32(mmNIC2_QM0_BASE - CFG_BASE + PROT_BITS_OFFS + 0x7C, 0);
+	WREG32(mmNIC2_QM1_BASE - CFG_BASE + PROT_BITS_OFFS + 0x7C, 0);
+
+	pb_addr = (mmNIC2_QM0_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS;
+	word_offset = ((mmNIC2_QM0_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2;
+	mask = 1U << ((mmNIC2_QM0_GLBL_CFG0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_GLBL_CFG1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_GLBL_PROT & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_GLBL_ERR_CFG & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_GLBL_SECURE_PROPS_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_GLBL_SECURE_PROPS_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_GLBL_SECURE_PROPS_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_GLBL_SECURE_PROPS_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_GLBL_SECURE_PROPS_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_GLBL_NON_SECURE_PROPS_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_GLBL_NON_SECURE_PROPS_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_GLBL_NON_SECURE_PROPS_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_GLBL_NON_SECURE_PROPS_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_GLBL_NON_SECURE_PROPS_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_GLBL_STS0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_GLBL_STS1_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_GLBL_STS1_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_GLBL_STS1_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_GLBL_STS1_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_GLBL_STS1_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_GLBL_MSG_EN_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_GLBL_MSG_EN_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_GLBL_MSG_EN_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_GLBL_MSG_EN_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_GLBL_MSG_EN_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_PQ_BASE_LO_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_PQ_BASE_LO_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_PQ_BASE_LO_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_PQ_BASE_LO_3 & 0x7F) >> 2);
+
+	WREG32(pb_addr + word_offset, ~mask);
+
+	pb_addr = (mmNIC2_QM0_PQ_BASE_HI_0 & ~0xFFF) + PROT_BITS_OFFS;
+	word_offset = ((mmNIC2_QM0_PQ_BASE_HI_0 & PROT_BITS_OFFS) >> 7) << 2;
+	mask = 1U << ((mmNIC2_QM0_PQ_BASE_HI_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_PQ_BASE_HI_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_PQ_BASE_HI_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_PQ_BASE_HI_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_PQ_SIZE_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_PQ_SIZE_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_PQ_SIZE_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_PQ_SIZE_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_PQ_PI_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_PQ_PI_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_PQ_PI_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_PQ_PI_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_PQ_CI_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_PQ_CI_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_PQ_CI_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_PQ_CI_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_PQ_CFG0_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_PQ_CFG0_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_PQ_CFG0_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_PQ_CFG0_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_PQ_CFG1_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_PQ_CFG1_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_PQ_CFG1_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_PQ_CFG1_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_PQ_ARUSER_31_11_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_PQ_ARUSER_31_11_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_PQ_ARUSER_31_11_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_PQ_ARUSER_31_11_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_PQ_STS0_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_PQ_STS0_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_PQ_STS0_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_PQ_STS0_3 & 0x7F) >> 2);
+
+	WREG32(pb_addr + word_offset, ~mask);
+
+	pb_addr = (mmNIC2_QM0_PQ_STS1_0 & ~0xFFF) + PROT_BITS_OFFS;
+	word_offset = ((mmNIC2_QM0_PQ_STS1_0 & PROT_BITS_OFFS) >> 7) << 2;
+	mask = 1U << ((mmNIC2_QM0_PQ_STS1_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_PQ_STS1_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_PQ_STS1_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_PQ_STS1_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_CQ_STS0_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_CQ_STS0_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_CQ_STS0_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_CQ_STS0_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_CQ_STS1_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_CQ_STS1_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_CQ_STS1_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_CQ_STS1_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_CQ_PTR_LO_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_CQ_PTR_HI_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_CQ_TSIZE_0 & 0x7F) >> 2);
+
+	WREG32(pb_addr + word_offset, ~mask);
+
+	pb_addr = (mmNIC2_QM0_CQ_CTL_0 & ~0xFFF) + PROT_BITS_OFFS;
+	word_offset = ((mmNIC2_QM0_CQ_CTL_0 & PROT_BITS_OFFS) >> 7) << 2;
+	mask = 1U << ((mmNIC2_QM0_CQ_CTL_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_CQ_PTR_LO_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_CQ_PTR_HI_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_CQ_TSIZE_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_CQ_CTL_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_CQ_PTR_LO_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_CQ_PTR_HI_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_CQ_TSIZE_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_CQ_CTL_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_CQ_PTR_LO_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_CQ_PTR_HI_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_CQ_TSIZE_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_CQ_CTL_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_CQ_PTR_LO_STS_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_CQ_PTR_LO_STS_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_CQ_PTR_LO_STS_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_CQ_PTR_LO_STS_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_CQ_PTR_LO_STS_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_CQ_PTR_HI_STS_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_CQ_PTR_HI_STS_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_CQ_PTR_HI_STS_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_CQ_PTR_HI_STS_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_CQ_PTR_HI_STS_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_CQ_TSIZE_STS_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_CQ_TSIZE_STS_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_CQ_TSIZE_STS_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_CQ_TSIZE_STS_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_CQ_TSIZE_STS_4 & 0x7F) >> 2);
+
+	WREG32(pb_addr + word_offset, ~mask);
+
+	pb_addr = (mmNIC2_QM0_CQ_CTL_STS_0 & ~0xFFF) + PROT_BITS_OFFS;
+	word_offset = ((mmNIC2_QM0_CQ_CTL_STS_0 & PROT_BITS_OFFS) >> 7) << 2;
+	mask = 1U << ((mmNIC2_QM0_CQ_CTL_STS_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_CQ_CTL_STS_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_CQ_CTL_STS_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_CQ_CTL_STS_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_CQ_CTL_STS_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_CQ_IFIFO_CNT_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_CQ_IFIFO_CNT_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_CQ_IFIFO_CNT_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_CQ_IFIFO_CNT_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_CQ_IFIFO_CNT_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_CP_MSG_BASE0_ADDR_LO_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_CP_MSG_BASE0_ADDR_LO_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_CP_MSG_BASE0_ADDR_LO_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_CP_MSG_BASE0_ADDR_LO_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_CP_MSG_BASE0_ADDR_LO_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_CP_MSG_BASE0_ADDR_HI_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_CP_MSG_BASE0_ADDR_HI_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_CP_MSG_BASE0_ADDR_HI_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_CP_MSG_BASE0_ADDR_HI_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_CP_MSG_BASE0_ADDR_HI_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_CP_MSG_BASE1_ADDR_LO_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_CP_MSG_BASE1_ADDR_LO_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_CP_MSG_BASE1_ADDR_LO_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_CP_MSG_BASE1_ADDR_LO_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_CP_MSG_BASE1_ADDR_LO_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_CP_MSG_BASE1_ADDR_HI_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_CP_MSG_BASE1_ADDR_HI_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_CP_MSG_BASE1_ADDR_HI_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_CP_MSG_BASE1_ADDR_HI_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_CP_MSG_BASE1_ADDR_HI_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_CP_MSG_BASE2_ADDR_LO_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_CP_MSG_BASE2_ADDR_LO_1 & 0x7F) >> 2);
+
+	WREG32(pb_addr + word_offset, ~mask);
+
+	pb_addr = (mmNIC2_QM0_CP_MSG_BASE2_ADDR_LO_2 & ~0xFFF) +
+			PROT_BITS_OFFS;
+	word_offset = ((mmNIC2_QM0_CP_MSG_BASE2_ADDR_LO_2 & PROT_BITS_OFFS)
+				>> 7) << 2;
+	mask = 1U << ((mmNIC2_QM0_CP_MSG_BASE2_ADDR_LO_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_CP_MSG_BASE2_ADDR_LO_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_CP_MSG_BASE2_ADDR_LO_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_CP_MSG_BASE2_ADDR_HI_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_CP_MSG_BASE2_ADDR_HI_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_CP_MSG_BASE2_ADDR_HI_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_CP_MSG_BASE2_ADDR_HI_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_CP_MSG_BASE2_ADDR_HI_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_CP_MSG_BASE3_ADDR_LO_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_CP_MSG_BASE3_ADDR_LO_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_CP_MSG_BASE3_ADDR_LO_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_CP_MSG_BASE3_ADDR_LO_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_CP_MSG_BASE3_ADDR_LO_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_CP_MSG_BASE3_ADDR_HI_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_CP_MSG_BASE3_ADDR_HI_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_CP_MSG_BASE3_ADDR_HI_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_CP_MSG_BASE3_ADDR_HI_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_CP_MSG_BASE3_ADDR_HI_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_CP_LDMA_TSIZE_OFFSET_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_CP_LDMA_TSIZE_OFFSET_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_CP_LDMA_TSIZE_OFFSET_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_CP_LDMA_TSIZE_OFFSET_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_CP_LDMA_TSIZE_OFFSET_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_CP_LDMA_SRC_BASE_LO_OFFSET_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_CP_LDMA_SRC_BASE_LO_OFFSET_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_CP_LDMA_SRC_BASE_LO_OFFSET_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_CP_LDMA_SRC_BASE_LO_OFFSET_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_CP_LDMA_SRC_BASE_LO_OFFSET_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_CP_LDMA_DST_BASE_LO_OFFSET_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_CP_LDMA_DST_BASE_LO_OFFSET_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_CP_LDMA_DST_BASE_LO_OFFSET_2 & 0x7F) >> 2);
+
+	WREG32(pb_addr + word_offset, ~mask);
+
+	pb_addr = (mmNIC2_QM0_CP_LDMA_DST_BASE_LO_OFFSET_3 & ~0xFFF) +
+			PROT_BITS_OFFS;
+	word_offset = ((mmNIC2_QM0_CP_LDMA_DST_BASE_LO_OFFSET_3 &
+			PROT_BITS_OFFS) >> 7) << 2;
+	mask = 1U << ((mmNIC2_QM0_CP_LDMA_DST_BASE_LO_OFFSET_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_CP_LDMA_DST_BASE_LO_OFFSET_4 & 0x7F) >> 2);
+
+	WREG32(pb_addr + word_offset, ~mask);
+
+	pb_addr = (mmNIC2_QM0_CP_STS_0 & ~0xFFF) + PROT_BITS_OFFS;
+	word_offset = ((mmNIC2_QM0_CP_STS_0 & PROT_BITS_OFFS) >> 7) << 2;
+	mask = 1U << ((mmNIC2_QM0_CP_STS_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_CP_STS_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_CP_STS_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_CP_STS_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_CP_STS_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_CP_CURRENT_INST_LO_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_CP_CURRENT_INST_LO_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_CP_CURRENT_INST_LO_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_CP_CURRENT_INST_LO_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_CP_CURRENT_INST_LO_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_CP_CURRENT_INST_HI_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_CP_CURRENT_INST_HI_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_CP_CURRENT_INST_HI_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_CP_CURRENT_INST_HI_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_CP_CURRENT_INST_HI_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_CP_BARRIER_CFG_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_CP_BARRIER_CFG_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_CP_BARRIER_CFG_2 & 0x7F) >> 2);
+
+	WREG32(pb_addr + word_offset, ~mask);
+
+	pb_addr = (mmNIC2_QM0_CP_BARRIER_CFG_3 & ~0xFFF) + PROT_BITS_OFFS;
+	word_offset = ((mmNIC2_QM0_CP_BARRIER_CFG_3 & PROT_BITS_OFFS)
+			>> 7) << 2;
+	mask = 1U << ((mmNIC2_QM0_CP_BARRIER_CFG_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_CP_BARRIER_CFG_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_CP_DBG_0_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_CP_DBG_0_1 & 0x7F) >> 2);
+
+	WREG32(pb_addr + word_offset, ~mask);
+
+	pb_addr = (mmNIC2_QM0_CP_DBG_0_2 & ~0xFFF) + PROT_BITS_OFFS;
+	word_offset = ((mmNIC2_QM0_CP_DBG_0_2 & PROT_BITS_OFFS) >> 7) << 2;
+	mask = 1U << ((mmNIC2_QM0_CP_DBG_0_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_CP_DBG_0_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_CP_DBG_0_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_CP_ARUSER_31_11_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_CP_ARUSER_31_11_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_CP_ARUSER_31_11_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_CP_ARUSER_31_11_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_CP_ARUSER_31_11_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_CP_AWUSER_31_11_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_CP_AWUSER_31_11_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_CP_AWUSER_31_11_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_CP_AWUSER_31_11_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_CP_AWUSER_31_11_4 & 0x7F) >> 2);
+
+	WREG32(pb_addr + word_offset, ~mask);
+
+	pb_addr = (mmNIC2_QM0_ARB_CFG_0 & ~0xFFF) + PROT_BITS_OFFS;
+	word_offset = ((mmNIC2_QM0_ARB_CFG_0 & PROT_BITS_OFFS) >> 7) << 2;
+	mask = 1U << ((mmNIC2_QM0_ARB_CFG_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_ARB_MST_AVAIL_CRED_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_ARB_MST_AVAIL_CRED_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_ARB_MST_AVAIL_CRED_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_ARB_MST_AVAIL_CRED_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_ARB_MST_AVAIL_CRED_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_ARB_MST_AVAIL_CRED_5 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_ARB_MST_AVAIL_CRED_6 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_ARB_MST_AVAIL_CRED_7 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_ARB_MST_AVAIL_CRED_8 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_ARB_MST_AVAIL_CRED_9 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_ARB_MST_AVAIL_CRED_10 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_ARB_MST_AVAIL_CRED_11 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_ARB_MST_AVAIL_CRED_12 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_ARB_MST_AVAIL_CRED_13 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_ARB_MST_AVAIL_CRED_14 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_ARB_MST_AVAIL_CRED_15 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_ARB_MST_AVAIL_CRED_16 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_ARB_MST_AVAIL_CRED_17 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_ARB_MST_AVAIL_CRED_18 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_ARB_MST_AVAIL_CRED_19 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_ARB_MST_AVAIL_CRED_20 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_ARB_MST_AVAIL_CRED_21 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_ARB_MST_AVAIL_CRED_22 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_ARB_MST_AVAIL_CRED_23 & 0x7F) >> 2);
+
+	WREG32(pb_addr + word_offset, ~mask);
+
+	pb_addr = (mmNIC2_QM0_ARB_MST_AVAIL_CRED_24 & ~0xFFF) + PROT_BITS_OFFS;
+	word_offset = ((mmNIC2_QM0_ARB_MST_AVAIL_CRED_24 &
+			PROT_BITS_OFFS) >> 7) << 2;
+	mask = 1U << ((mmNIC2_QM0_ARB_MST_AVAIL_CRED_24 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_ARB_MST_AVAIL_CRED_25 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_ARB_MST_AVAIL_CRED_26 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_ARB_MST_AVAIL_CRED_27 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_ARB_MST_AVAIL_CRED_28 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_ARB_MST_AVAIL_CRED_29 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_ARB_MST_AVAIL_CRED_30 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_ARB_MST_AVAIL_CRED_31 & 0x7F) >> 2);
+
+	WREG32(pb_addr + word_offset, ~mask);
+
+	pb_addr = (mmNIC2_QM0_ARB_MST_CHOISE_PUSH_OFST_23 & ~0xFFF) +
+			PROT_BITS_OFFS;
+	word_offset = ((mmNIC2_QM0_ARB_MST_CHOISE_PUSH_OFST_23 &
+			PROT_BITS_OFFS) >> 7) << 2;
+	mask = 1U << ((mmNIC2_QM0_ARB_SLV_CHOISE_WDT & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_ARB_MSG_MAX_INFLIGHT & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_ARB_MSG_AWUSER_31_11 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_ARB_MSG_AWUSER_SEC_PROP & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_ARB_MSG_AWUSER_NON_SEC_PROP & 0x7F) >> 2);
+
+	WREG32(pb_addr + word_offset, ~mask);
+
+	pb_addr = (mmNIC2_QM0_ARB_STATE_STS & ~0xFFF) + PROT_BITS_OFFS;
+	word_offset = ((mmNIC2_QM0_ARB_STATE_STS & PROT_BITS_OFFS) >> 7) << 2;
+	mask = 1U << ((mmNIC2_QM0_ARB_STATE_STS & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_ARB_CHOISE_FULLNESS_STS & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_ARB_MSG_STS & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_ARB_SLV_CHOISE_Q_HEAD & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_ARB_ERR_CAUSE & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_ARB_ERR_MSG_EN & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_ARB_ERR_STS_DRP & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_ARB_MST_CRED_STS_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_ARB_MST_CRED_STS_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_ARB_MST_CRED_STS_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_ARB_MST_CRED_STS_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_ARB_MST_CRED_STS_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_ARB_MST_CRED_STS_5 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_ARB_MST_CRED_STS_6 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_ARB_MST_CRED_STS_7 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_ARB_MST_CRED_STS_8 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_ARB_MST_CRED_STS_9 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_ARB_MST_CRED_STS_10 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_ARB_MST_CRED_STS_11 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_ARB_MST_CRED_STS_12 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_ARB_MST_CRED_STS_13 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_ARB_MST_CRED_STS_14 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_ARB_MST_CRED_STS_15 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_ARB_MST_CRED_STS_16 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_ARB_MST_CRED_STS_17 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_ARB_MST_CRED_STS_18 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_ARB_MST_CRED_STS_19 & 0x7F) >> 2);
+
+	WREG32(pb_addr + word_offset, ~mask);
+
+	pb_addr = (mmNIC2_QM0_ARB_MST_CRED_STS_20 & ~0xFFF) + PROT_BITS_OFFS;
+	word_offset = ((mmNIC2_QM0_ARB_MST_CRED_STS_20 & PROT_BITS_OFFS)
+			>> 7) << 2;
+	mask = 1U << ((mmNIC2_QM0_ARB_MST_CRED_STS_20 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_ARB_MST_CRED_STS_21 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_ARB_MST_CRED_STS_22 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_ARB_MST_CRED_STS_23 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_ARB_MST_CRED_STS_24 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_ARB_MST_CRED_STS_25 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_ARB_MST_CRED_STS_26 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_ARB_MST_CRED_STS_27 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_ARB_MST_CRED_STS_28 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_ARB_MST_CRED_STS_29 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_ARB_MST_CRED_STS_30 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_ARB_MST_CRED_STS_31 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_CGM_CFG & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_CGM_STS & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_CGM_CFG1 & 0x7F) >> 2);
+
+	WREG32(pb_addr + word_offset, ~mask);
+
+	pb_addr = (mmNIC2_QM0_LOCAL_RANGE_BASE & ~0xFFF) + PROT_BITS_OFFS;
+	word_offset = ((mmNIC2_QM0_LOCAL_RANGE_BASE & PROT_BITS_OFFS)
+			>> 7) << 2;
+	mask = 1U << ((mmNIC2_QM0_LOCAL_RANGE_BASE & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_LOCAL_RANGE_SIZE & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_CSMR_STRICT_PRIO_CFG & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_HBW_RD_RATE_LIM_CFG_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_LBW_WR_RATE_LIM_CFG_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_LBW_WR_RATE_LIM_CFG_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_HBW_RD_RATE_LIM_CFG_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_GLBL_AXCACHE & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_IND_GW_APB_CFG & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_IND_GW_APB_WDATA & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_IND_GW_APB_RDATA & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_IND_GW_APB_STATUS & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_GLBL_ERR_ADDR_LO & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_GLBL_ERR_ADDR_HI & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM0_GLBL_ERR_WDATA & 0x7F) >> 2);
+
+	WREG32(pb_addr + word_offset, ~mask);
+
+	pb_addr = (mmNIC2_QM0_GLBL_MEM_INIT_BUSY & ~0xFFF) + PROT_BITS_OFFS;
+	word_offset = ((mmNIC2_QM0_GLBL_MEM_INIT_BUSY & PROT_BITS_OFFS)
+			>> 7) << 2;
+	mask = 1U << ((mmNIC2_QM0_GLBL_MEM_INIT_BUSY & 0x7F) >> 2);
+
+	WREG32(pb_addr + word_offset, ~mask);
+
+	pb_addr = (mmNIC2_QM1_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS;
+	word_offset = ((mmNIC2_QM1_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2;
+	mask = 1U << ((mmNIC2_QM1_GLBL_CFG0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_GLBL_CFG1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_GLBL_PROT & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_GLBL_ERR_CFG & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_GLBL_SECURE_PROPS_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_GLBL_SECURE_PROPS_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_GLBL_SECURE_PROPS_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_GLBL_SECURE_PROPS_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_GLBL_SECURE_PROPS_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_GLBL_NON_SECURE_PROPS_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_GLBL_NON_SECURE_PROPS_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_GLBL_NON_SECURE_PROPS_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_GLBL_NON_SECURE_PROPS_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_GLBL_NON_SECURE_PROPS_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_GLBL_STS0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_GLBL_STS1_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_GLBL_STS1_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_GLBL_STS1_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_GLBL_STS1_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_GLBL_STS1_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_GLBL_MSG_EN_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_GLBL_MSG_EN_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_GLBL_MSG_EN_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_GLBL_MSG_EN_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_GLBL_MSG_EN_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_PQ_BASE_LO_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_PQ_BASE_LO_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_PQ_BASE_LO_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_PQ_BASE_LO_3 & 0x7F) >> 2);
+
+	WREG32(pb_addr + word_offset, ~mask);
+
+	pb_addr = (mmNIC2_QM1_PQ_BASE_HI_0 & ~0xFFF) + PROT_BITS_OFFS;
+	word_offset = ((mmNIC2_QM1_PQ_BASE_HI_0 & PROT_BITS_OFFS) >> 7) << 2;
+	mask = 1U << ((mmNIC2_QM1_PQ_BASE_HI_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_PQ_BASE_HI_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_PQ_BASE_HI_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_PQ_BASE_HI_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_PQ_SIZE_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_PQ_SIZE_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_PQ_SIZE_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_PQ_SIZE_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_PQ_PI_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_PQ_PI_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_PQ_PI_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_PQ_PI_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_PQ_CI_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_PQ_CI_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_PQ_CI_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_PQ_CI_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_PQ_CFG0_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_PQ_CFG0_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_PQ_CFG0_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_PQ_CFG0_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_PQ_CFG1_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_PQ_CFG1_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_PQ_CFG1_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_PQ_CFG1_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_PQ_ARUSER_31_11_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_PQ_ARUSER_31_11_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_PQ_ARUSER_31_11_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_PQ_ARUSER_31_11_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_PQ_STS0_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_PQ_STS0_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_PQ_STS0_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_PQ_STS0_3 & 0x7F) >> 2);
+
+	WREG32(pb_addr + word_offset, ~mask);
+
+	pb_addr = (mmNIC2_QM1_PQ_STS1_0 & ~0xFFF) + PROT_BITS_OFFS;
+	word_offset = ((mmNIC2_QM1_PQ_STS1_0 & PROT_BITS_OFFS) >> 7) << 2;
+	mask = 1U << ((mmNIC2_QM1_PQ_STS1_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_PQ_STS1_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_PQ_STS1_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_PQ_STS1_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_CQ_STS0_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_CQ_STS0_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_CQ_STS0_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_CQ_STS0_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_CQ_STS1_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_CQ_STS1_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_CQ_STS1_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_CQ_STS1_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_CQ_PTR_LO_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_CQ_PTR_HI_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_CQ_TSIZE_0 & 0x7F) >> 2);
+
+	WREG32(pb_addr + word_offset, ~mask);
+
+	pb_addr = (mmNIC2_QM1_CQ_CTL_0 & ~0xFFF) + PROT_BITS_OFFS;
+	word_offset = ((mmNIC2_QM1_CQ_CTL_0 & PROT_BITS_OFFS) >> 7) << 2;
+	mask = 1U << ((mmNIC2_QM1_CQ_CTL_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_CQ_PTR_LO_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_CQ_PTR_HI_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_CQ_TSIZE_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_CQ_CTL_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_CQ_PTR_LO_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_CQ_PTR_HI_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_CQ_TSIZE_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_CQ_CTL_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_CQ_PTR_LO_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_CQ_PTR_HI_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_CQ_TSIZE_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_CQ_CTL_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_CQ_PTR_LO_STS_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_CQ_PTR_LO_STS_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_CQ_PTR_LO_STS_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_CQ_PTR_LO_STS_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_CQ_PTR_LO_STS_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_CQ_PTR_HI_STS_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_CQ_PTR_HI_STS_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_CQ_PTR_HI_STS_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_CQ_PTR_HI_STS_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_CQ_PTR_HI_STS_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_CQ_TSIZE_STS_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_CQ_TSIZE_STS_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_CQ_TSIZE_STS_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_CQ_TSIZE_STS_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_CQ_TSIZE_STS_4 & 0x7F) >> 2);
+
+	WREG32(pb_addr + word_offset, ~mask);
+
+	pb_addr = (mmNIC2_QM1_CQ_CTL_STS_0 & ~0xFFF) + PROT_BITS_OFFS;
+	word_offset = ((mmNIC2_QM1_CQ_CTL_STS_0 & PROT_BITS_OFFS) >> 7) << 2;
+	mask = 1U << ((mmNIC2_QM1_CQ_CTL_STS_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_CQ_CTL_STS_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_CQ_CTL_STS_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_CQ_CTL_STS_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_CQ_CTL_STS_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_CQ_IFIFO_CNT_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_CQ_IFIFO_CNT_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_CQ_IFIFO_CNT_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_CQ_IFIFO_CNT_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_CQ_IFIFO_CNT_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_CP_MSG_BASE0_ADDR_LO_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_CP_MSG_BASE0_ADDR_LO_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_CP_MSG_BASE0_ADDR_LO_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_CP_MSG_BASE0_ADDR_LO_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_CP_MSG_BASE0_ADDR_LO_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_CP_MSG_BASE0_ADDR_HI_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_CP_MSG_BASE0_ADDR_HI_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_CP_MSG_BASE0_ADDR_HI_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_CP_MSG_BASE0_ADDR_HI_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_CP_MSG_BASE0_ADDR_HI_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_CP_MSG_BASE1_ADDR_LO_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_CP_MSG_BASE1_ADDR_LO_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_CP_MSG_BASE1_ADDR_LO_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_CP_MSG_BASE1_ADDR_LO_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_CP_MSG_BASE1_ADDR_LO_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_CP_MSG_BASE1_ADDR_HI_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_CP_MSG_BASE1_ADDR_HI_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_CP_MSG_BASE1_ADDR_HI_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_CP_MSG_BASE1_ADDR_HI_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_CP_MSG_BASE1_ADDR_HI_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_CP_MSG_BASE2_ADDR_LO_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_CP_MSG_BASE2_ADDR_LO_1 & 0x7F) >> 2);
+
+	WREG32(pb_addr + word_offset, ~mask);
+
+	pb_addr = (mmNIC2_QM1_CP_MSG_BASE2_ADDR_LO_2 & ~0xFFF) + PROT_BITS_OFFS;
+	word_offset = ((mmNIC2_QM1_CP_MSG_BASE2_ADDR_LO_2 &
+			PROT_BITS_OFFS) >> 7) << 2;
+	mask = 1U << ((mmNIC2_QM1_CP_MSG_BASE2_ADDR_LO_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_CP_MSG_BASE2_ADDR_LO_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_CP_MSG_BASE2_ADDR_LO_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_CP_MSG_BASE2_ADDR_HI_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_CP_MSG_BASE2_ADDR_HI_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_CP_MSG_BASE2_ADDR_HI_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_CP_MSG_BASE2_ADDR_HI_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_CP_MSG_BASE2_ADDR_HI_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_CP_MSG_BASE3_ADDR_LO_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_CP_MSG_BASE3_ADDR_LO_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_CP_MSG_BASE3_ADDR_LO_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_CP_MSG_BASE3_ADDR_LO_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_CP_MSG_BASE3_ADDR_LO_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_CP_MSG_BASE3_ADDR_HI_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_CP_MSG_BASE3_ADDR_HI_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_CP_MSG_BASE3_ADDR_HI_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_CP_MSG_BASE3_ADDR_HI_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_CP_MSG_BASE3_ADDR_HI_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_CP_LDMA_TSIZE_OFFSET_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_CP_LDMA_TSIZE_OFFSET_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_CP_LDMA_TSIZE_OFFSET_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_CP_LDMA_TSIZE_OFFSET_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_CP_LDMA_TSIZE_OFFSET_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_CP_LDMA_SRC_BASE_LO_OFFSET_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_CP_LDMA_SRC_BASE_LO_OFFSET_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_CP_LDMA_SRC_BASE_LO_OFFSET_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_CP_LDMA_SRC_BASE_LO_OFFSET_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_CP_LDMA_SRC_BASE_LO_OFFSET_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_CP_LDMA_DST_BASE_LO_OFFSET_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_CP_LDMA_DST_BASE_LO_OFFSET_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_CP_LDMA_DST_BASE_LO_OFFSET_2 & 0x7F) >> 2);
+
+	WREG32(pb_addr + word_offset, ~mask);
+
+	pb_addr = (mmNIC2_QM1_CP_LDMA_DST_BASE_LO_OFFSET_3 & ~0xFFF) +
+			PROT_BITS_OFFS;
+	word_offset = ((mmNIC2_QM1_CP_LDMA_DST_BASE_LO_OFFSET_3 &
+			PROT_BITS_OFFS) >> 7) << 2;
+	mask = 1U << ((mmNIC2_QM1_CP_LDMA_DST_BASE_LO_OFFSET_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_CP_LDMA_DST_BASE_LO_OFFSET_4 & 0x7F) >> 2);
+
+	WREG32(pb_addr + word_offset, ~mask);
+
+	pb_addr = (mmNIC2_QM1_CP_STS_0 & ~0xFFF) + PROT_BITS_OFFS;
+	word_offset = ((mmNIC2_QM1_CP_STS_0 & PROT_BITS_OFFS) >> 7) << 2;
+	mask = 1U << ((mmNIC2_QM1_CP_STS_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_CP_STS_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_CP_STS_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_CP_STS_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_CP_STS_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_CP_CURRENT_INST_LO_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_CP_CURRENT_INST_LO_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_CP_CURRENT_INST_LO_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_CP_CURRENT_INST_LO_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_CP_CURRENT_INST_LO_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_CP_CURRENT_INST_HI_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_CP_CURRENT_INST_HI_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_CP_CURRENT_INST_HI_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_CP_CURRENT_INST_HI_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_CP_CURRENT_INST_HI_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_CP_BARRIER_CFG_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_CP_BARRIER_CFG_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_CP_BARRIER_CFG_2 & 0x7F) >> 2);
+
+	WREG32(pb_addr + word_offset, ~mask);
+
+	pb_addr = (mmNIC2_QM1_CP_BARRIER_CFG_3 & ~0xFFF) + PROT_BITS_OFFS;
+	word_offset = ((mmNIC2_QM1_CP_BARRIER_CFG_3 & PROT_BITS_OFFS)
+			>> 7) << 2;
+	mask = 1U << ((mmNIC2_QM1_CP_BARRIER_CFG_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_CP_BARRIER_CFG_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_CP_DBG_0_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_CP_DBG_0_1 & 0x7F) >> 2);
+
+	WREG32(pb_addr + word_offset, ~mask);
+
+	pb_addr = (mmNIC2_QM1_CP_DBG_0_2 & ~0xFFF) + PROT_BITS_OFFS;
+	word_offset = ((mmNIC2_QM1_CP_DBG_0_2 & PROT_BITS_OFFS) >> 7) << 2;
+	mask = 1U << ((mmNIC2_QM1_CP_DBG_0_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_CP_DBG_0_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_CP_DBG_0_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_CP_ARUSER_31_11_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_CP_ARUSER_31_11_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_CP_ARUSER_31_11_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_CP_ARUSER_31_11_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_CP_ARUSER_31_11_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_CP_AWUSER_31_11_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_CP_AWUSER_31_11_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_CP_AWUSER_31_11_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_CP_AWUSER_31_11_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_CP_AWUSER_31_11_4 & 0x7F) >> 2);
+
+	WREG32(pb_addr + word_offset, ~mask);
+
+	pb_addr = (mmNIC2_QM1_ARB_CFG_0 & ~0xFFF) + PROT_BITS_OFFS;
+	word_offset = ((mmNIC2_QM1_ARB_CFG_0 & PROT_BITS_OFFS) >> 7) << 2;
+	mask = 1U << ((mmNIC2_QM1_ARB_CFG_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_ARB_MST_AVAIL_CRED_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_ARB_MST_AVAIL_CRED_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_ARB_MST_AVAIL_CRED_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_ARB_MST_AVAIL_CRED_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_ARB_MST_AVAIL_CRED_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_ARB_MST_AVAIL_CRED_5 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_ARB_MST_AVAIL_CRED_6 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_ARB_MST_AVAIL_CRED_7 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_ARB_MST_AVAIL_CRED_8 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_ARB_MST_AVAIL_CRED_9 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_ARB_MST_AVAIL_CRED_10 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_ARB_MST_AVAIL_CRED_11 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_ARB_MST_AVAIL_CRED_12 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_ARB_MST_AVAIL_CRED_13 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_ARB_MST_AVAIL_CRED_14 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_ARB_MST_AVAIL_CRED_15 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_ARB_MST_AVAIL_CRED_16 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_ARB_MST_AVAIL_CRED_17 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_ARB_MST_AVAIL_CRED_18 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_ARB_MST_AVAIL_CRED_19 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_ARB_MST_AVAIL_CRED_20 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_ARB_MST_AVAIL_CRED_21 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_ARB_MST_AVAIL_CRED_22 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_ARB_MST_AVAIL_CRED_23 & 0x7F) >> 2);
+
+	WREG32(pb_addr + word_offset, ~mask);
+
+	pb_addr = (mmNIC2_QM1_ARB_MST_AVAIL_CRED_24 & ~0xFFF) + PROT_BITS_OFFS;
+	word_offset = ((mmNIC2_QM1_ARB_MST_AVAIL_CRED_24 &
+			PROT_BITS_OFFS) >> 7) << 2;
+	mask = 1U << ((mmNIC2_QM1_ARB_MST_AVAIL_CRED_24 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_ARB_MST_AVAIL_CRED_25 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_ARB_MST_AVAIL_CRED_26 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_ARB_MST_AVAIL_CRED_27 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_ARB_MST_AVAIL_CRED_28 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_ARB_MST_AVAIL_CRED_29 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_ARB_MST_AVAIL_CRED_30 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_ARB_MST_AVAIL_CRED_31 & 0x7F) >> 2);
+
+	WREG32(pb_addr + word_offset, ~mask);
+
+	pb_addr = (mmNIC2_QM1_ARB_MST_CHOISE_PUSH_OFST_23 & ~0xFFF) +
+			PROT_BITS_OFFS;
+	word_offset = ((mmNIC2_QM1_ARB_MST_CHOISE_PUSH_OFST_23 &
+			PROT_BITS_OFFS) >> 7) << 2;
+	mask = 1U << ((mmNIC2_QM1_ARB_SLV_CHOISE_WDT & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_ARB_MSG_MAX_INFLIGHT & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_ARB_MSG_AWUSER_31_11 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_ARB_MSG_AWUSER_SEC_PROP & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_ARB_MSG_AWUSER_NON_SEC_PROP & 0x7F) >> 2);
+
+	WREG32(pb_addr + word_offset, ~mask);
+
+	pb_addr = (mmNIC2_QM1_ARB_STATE_STS & ~0xFFF) + PROT_BITS_OFFS;
+	word_offset = ((mmNIC2_QM1_ARB_STATE_STS & PROT_BITS_OFFS) >> 7) << 2;
+	mask = 1U << ((mmNIC2_QM1_ARB_STATE_STS & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_ARB_CHOISE_FULLNESS_STS & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_ARB_MSG_STS & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_ARB_SLV_CHOISE_Q_HEAD & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_ARB_ERR_CAUSE & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_ARB_ERR_MSG_EN & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_ARB_ERR_STS_DRP & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_ARB_MST_CRED_STS_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_ARB_MST_CRED_STS_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_ARB_MST_CRED_STS_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_ARB_MST_CRED_STS_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_ARB_MST_CRED_STS_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_ARB_MST_CRED_STS_5 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_ARB_MST_CRED_STS_6 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_ARB_MST_CRED_STS_7 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_ARB_MST_CRED_STS_8 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_ARB_MST_CRED_STS_9 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_ARB_MST_CRED_STS_10 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_ARB_MST_CRED_STS_11 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_ARB_MST_CRED_STS_12 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_ARB_MST_CRED_STS_13 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_ARB_MST_CRED_STS_14 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_ARB_MST_CRED_STS_15 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_ARB_MST_CRED_STS_16 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_ARB_MST_CRED_STS_17 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_ARB_MST_CRED_STS_18 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_ARB_MST_CRED_STS_19 & 0x7F) >> 2);
+
+	WREG32(pb_addr + word_offset, ~mask);
+
+	pb_addr = (mmNIC2_QM1_ARB_MST_CRED_STS_20 & ~0xFFF) + PROT_BITS_OFFS;
+	word_offset = ((mmNIC2_QM1_ARB_MST_CRED_STS_20 & PROT_BITS_OFFS)
+			>> 7) << 2;
+	mask = 1U << ((mmNIC2_QM1_ARB_MST_CRED_STS_20 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_ARB_MST_CRED_STS_21 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_ARB_MST_CRED_STS_22 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_ARB_MST_CRED_STS_23 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_ARB_MST_CRED_STS_24 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_ARB_MST_CRED_STS_25 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_ARB_MST_CRED_STS_26 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_ARB_MST_CRED_STS_27 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_ARB_MST_CRED_STS_28 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_ARB_MST_CRED_STS_29 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_ARB_MST_CRED_STS_30 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_ARB_MST_CRED_STS_31 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_CGM_CFG & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_CGM_STS & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_CGM_CFG1 & 0x7F) >> 2);
+
+	WREG32(pb_addr + word_offset, ~mask);
+
+	pb_addr = (mmNIC2_QM1_LOCAL_RANGE_BASE & ~0xFFF) + PROT_BITS_OFFS;
+	word_offset = ((mmNIC2_QM1_LOCAL_RANGE_BASE & PROT_BITS_OFFS)
+			>> 7) << 2;
+	mask = 1U << ((mmNIC2_QM1_LOCAL_RANGE_BASE & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_LOCAL_RANGE_SIZE & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_CSMR_STRICT_PRIO_CFG & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_HBW_RD_RATE_LIM_CFG_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_LBW_WR_RATE_LIM_CFG_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_LBW_WR_RATE_LIM_CFG_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_HBW_RD_RATE_LIM_CFG_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_GLBL_AXCACHE & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_IND_GW_APB_CFG & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_IND_GW_APB_WDATA & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_IND_GW_APB_RDATA & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_IND_GW_APB_STATUS & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_GLBL_ERR_ADDR_LO & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_GLBL_ERR_ADDR_HI & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC2_QM1_GLBL_ERR_WDATA & 0x7F) >> 2);
+
+	WREG32(pb_addr + word_offset, ~mask);
+
+	pb_addr = (mmNIC2_QM1_GLBL_MEM_INIT_BUSY & ~0xFFF) + PROT_BITS_OFFS;
+	word_offset = ((mmNIC2_QM1_GLBL_MEM_INIT_BUSY & PROT_BITS_OFFS)
+			>> 7) << 2;
+	mask = 1U << ((mmNIC2_QM1_GLBL_MEM_INIT_BUSY & 0x7F) >> 2);
+
+	WREG32(pb_addr + word_offset, ~mask);
+
+	WREG32(mmNIC3_QM0_BASE - CFG_BASE + PROT_BITS_OFFS + 0x7C, 0);
+	WREG32(mmNIC3_QM1_BASE - CFG_BASE + PROT_BITS_OFFS + 0x7C, 0);
+
+	pb_addr = (mmNIC3_QM0_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS;
+	word_offset = ((mmNIC3_QM0_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2;
+	mask = 1U << ((mmNIC3_QM0_GLBL_CFG0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_GLBL_CFG1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_GLBL_PROT & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_GLBL_ERR_CFG & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_GLBL_SECURE_PROPS_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_GLBL_SECURE_PROPS_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_GLBL_SECURE_PROPS_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_GLBL_SECURE_PROPS_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_GLBL_SECURE_PROPS_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_GLBL_NON_SECURE_PROPS_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_GLBL_NON_SECURE_PROPS_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_GLBL_NON_SECURE_PROPS_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_GLBL_NON_SECURE_PROPS_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_GLBL_NON_SECURE_PROPS_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_GLBL_STS0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_GLBL_STS1_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_GLBL_STS1_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_GLBL_STS1_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_GLBL_STS1_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_GLBL_STS1_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_GLBL_MSG_EN_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_GLBL_MSG_EN_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_GLBL_MSG_EN_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_GLBL_MSG_EN_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_GLBL_MSG_EN_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_PQ_BASE_LO_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_PQ_BASE_LO_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_PQ_BASE_LO_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_PQ_BASE_LO_3 & 0x7F) >> 2);
+
+	WREG32(pb_addr + word_offset, ~mask);
+
+	pb_addr = (mmNIC3_QM0_PQ_BASE_HI_0 & ~0xFFF) + PROT_BITS_OFFS;
+	word_offset = ((mmNIC3_QM0_PQ_BASE_HI_0 & PROT_BITS_OFFS) >> 7) << 2;
+	mask = 1U << ((mmNIC3_QM0_PQ_BASE_HI_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_PQ_BASE_HI_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_PQ_BASE_HI_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_PQ_BASE_HI_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_PQ_SIZE_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_PQ_SIZE_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_PQ_SIZE_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_PQ_SIZE_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_PQ_PI_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_PQ_PI_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_PQ_PI_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_PQ_PI_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_PQ_CI_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_PQ_CI_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_PQ_CI_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_PQ_CI_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_PQ_CFG0_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_PQ_CFG0_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_PQ_CFG0_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_PQ_CFG0_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_PQ_CFG1_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_PQ_CFG1_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_PQ_CFG1_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_PQ_CFG1_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_PQ_ARUSER_31_11_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_PQ_ARUSER_31_11_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_PQ_ARUSER_31_11_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_PQ_ARUSER_31_11_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_PQ_STS0_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_PQ_STS0_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_PQ_STS0_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_PQ_STS0_3 & 0x7F) >> 2);
+
+	WREG32(pb_addr + word_offset, ~mask);
+
+	pb_addr = (mmNIC3_QM0_PQ_STS1_0 & ~0xFFF) + PROT_BITS_OFFS;
+	word_offset = ((mmNIC3_QM0_PQ_STS1_0 & PROT_BITS_OFFS) >> 7) << 2;
+	mask = 1U << ((mmNIC3_QM0_PQ_STS1_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_PQ_STS1_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_PQ_STS1_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_PQ_STS1_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_CQ_STS0_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_CQ_STS0_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_CQ_STS0_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_CQ_STS0_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_CQ_STS1_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_CQ_STS1_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_CQ_STS1_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_CQ_STS1_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_CQ_PTR_LO_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_CQ_PTR_HI_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_CQ_TSIZE_0 & 0x7F) >> 2);
+
+	WREG32(pb_addr + word_offset, ~mask);
+
+	pb_addr = (mmNIC3_QM0_CQ_CTL_0 & ~0xFFF) + PROT_BITS_OFFS;
+	word_offset = ((mmNIC3_QM0_CQ_CTL_0 & PROT_BITS_OFFS) >> 7) << 2;
+	mask = 1U << ((mmNIC3_QM0_CQ_CTL_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_CQ_PTR_LO_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_CQ_PTR_HI_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_CQ_TSIZE_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_CQ_CTL_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_CQ_PTR_LO_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_CQ_PTR_HI_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_CQ_TSIZE_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_CQ_CTL_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_CQ_PTR_LO_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_CQ_PTR_HI_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_CQ_TSIZE_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_CQ_CTL_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_CQ_PTR_LO_STS_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_CQ_PTR_LO_STS_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_CQ_PTR_LO_STS_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_CQ_PTR_LO_STS_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_CQ_PTR_LO_STS_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_CQ_PTR_HI_STS_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_CQ_PTR_HI_STS_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_CQ_PTR_HI_STS_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_CQ_PTR_HI_STS_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_CQ_PTR_HI_STS_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_CQ_TSIZE_STS_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_CQ_TSIZE_STS_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_CQ_TSIZE_STS_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_CQ_TSIZE_STS_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_CQ_TSIZE_STS_4 & 0x7F) >> 2);
+
+	WREG32(pb_addr + word_offset, ~mask);
+
+	pb_addr = (mmNIC3_QM0_CQ_CTL_STS_0 & ~0xFFF) + PROT_BITS_OFFS;
+	word_offset = ((mmNIC3_QM0_CQ_CTL_STS_0 & PROT_BITS_OFFS) >> 7) << 2;
+	mask = 1U << ((mmNIC3_QM0_CQ_CTL_STS_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_CQ_CTL_STS_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_CQ_CTL_STS_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_CQ_CTL_STS_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_CQ_CTL_STS_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_CQ_IFIFO_CNT_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_CQ_IFIFO_CNT_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_CQ_IFIFO_CNT_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_CQ_IFIFO_CNT_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_CQ_IFIFO_CNT_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_CP_MSG_BASE0_ADDR_LO_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_CP_MSG_BASE0_ADDR_LO_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_CP_MSG_BASE0_ADDR_LO_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_CP_MSG_BASE0_ADDR_LO_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_CP_MSG_BASE0_ADDR_LO_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_CP_MSG_BASE0_ADDR_HI_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_CP_MSG_BASE0_ADDR_HI_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_CP_MSG_BASE0_ADDR_HI_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_CP_MSG_BASE0_ADDR_HI_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_CP_MSG_BASE0_ADDR_HI_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_CP_MSG_BASE1_ADDR_LO_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_CP_MSG_BASE1_ADDR_LO_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_CP_MSG_BASE1_ADDR_LO_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_CP_MSG_BASE1_ADDR_LO_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_CP_MSG_BASE1_ADDR_LO_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_CP_MSG_BASE1_ADDR_HI_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_CP_MSG_BASE1_ADDR_HI_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_CP_MSG_BASE1_ADDR_HI_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_CP_MSG_BASE1_ADDR_HI_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_CP_MSG_BASE1_ADDR_HI_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_CP_MSG_BASE2_ADDR_LO_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_CP_MSG_BASE2_ADDR_LO_1 & 0x7F) >> 2);
+
+	WREG32(pb_addr + word_offset, ~mask);
+
+	pb_addr = (mmNIC3_QM0_CP_MSG_BASE2_ADDR_LO_2 & ~0xFFF) + PROT_BITS_OFFS;
+	word_offset = ((mmNIC3_QM0_CP_MSG_BASE2_ADDR_LO_2 &
+			PROT_BITS_OFFS) >> 7) << 2;
+	mask = 1U << ((mmNIC3_QM0_CP_MSG_BASE2_ADDR_LO_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_CP_MSG_BASE2_ADDR_LO_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_CP_MSG_BASE2_ADDR_LO_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_CP_MSG_BASE2_ADDR_HI_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_CP_MSG_BASE2_ADDR_HI_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_CP_MSG_BASE2_ADDR_HI_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_CP_MSG_BASE2_ADDR_HI_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_CP_MSG_BASE2_ADDR_HI_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_CP_MSG_BASE3_ADDR_LO_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_CP_MSG_BASE3_ADDR_LO_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_CP_MSG_BASE3_ADDR_LO_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_CP_MSG_BASE3_ADDR_LO_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_CP_MSG_BASE3_ADDR_LO_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_CP_MSG_BASE3_ADDR_HI_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_CP_MSG_BASE3_ADDR_HI_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_CP_MSG_BASE3_ADDR_HI_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_CP_MSG_BASE3_ADDR_HI_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_CP_MSG_BASE3_ADDR_HI_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_CP_LDMA_TSIZE_OFFSET_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_CP_LDMA_TSIZE_OFFSET_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_CP_LDMA_TSIZE_OFFSET_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_CP_LDMA_TSIZE_OFFSET_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_CP_LDMA_TSIZE_OFFSET_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_CP_LDMA_SRC_BASE_LO_OFFSET_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_CP_LDMA_SRC_BASE_LO_OFFSET_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_CP_LDMA_SRC_BASE_LO_OFFSET_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_CP_LDMA_SRC_BASE_LO_OFFSET_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_CP_LDMA_SRC_BASE_LO_OFFSET_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_CP_LDMA_DST_BASE_LO_OFFSET_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_CP_LDMA_DST_BASE_LO_OFFSET_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_CP_LDMA_DST_BASE_LO_OFFSET_2 & 0x7F) >> 2);
+
+	WREG32(pb_addr + word_offset, ~mask);
+
+	pb_addr = (mmNIC3_QM0_CP_LDMA_DST_BASE_LO_OFFSET_3 & ~0xFFF) +
+			PROT_BITS_OFFS;
+	word_offset = ((mmNIC3_QM0_CP_LDMA_DST_BASE_LO_OFFSET_3 &
+			PROT_BITS_OFFS) >> 7) << 2;
+	mask = 1U << ((mmNIC3_QM0_CP_LDMA_DST_BASE_LO_OFFSET_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_CP_LDMA_DST_BASE_LO_OFFSET_4 & 0x7F) >> 2);
+
+	WREG32(pb_addr + word_offset, ~mask);
+
+	pb_addr = (mmNIC3_QM0_CP_STS_0 & ~0xFFF) + PROT_BITS_OFFS;
+	word_offset = ((mmNIC3_QM0_CP_STS_0 & PROT_BITS_OFFS) >> 7) << 2;
+	mask = 1U << ((mmNIC3_QM0_CP_STS_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_CP_STS_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_CP_STS_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_CP_STS_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_CP_STS_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_CP_CURRENT_INST_LO_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_CP_CURRENT_INST_LO_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_CP_CURRENT_INST_LO_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_CP_CURRENT_INST_LO_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_CP_CURRENT_INST_LO_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_CP_CURRENT_INST_HI_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_CP_CURRENT_INST_HI_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_CP_CURRENT_INST_HI_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_CP_CURRENT_INST_HI_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_CP_CURRENT_INST_HI_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_CP_BARRIER_CFG_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_CP_BARRIER_CFG_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_CP_BARRIER_CFG_2 & 0x7F) >> 2);
+
+	WREG32(pb_addr + word_offset, ~mask);
+
+	pb_addr = (mmNIC3_QM0_CP_BARRIER_CFG_3 & ~0xFFF) + PROT_BITS_OFFS;
+	word_offset = ((mmNIC3_QM0_CP_BARRIER_CFG_3 & PROT_BITS_OFFS)
+			>> 7) << 2;
+	mask = 1U << ((mmNIC3_QM0_CP_BARRIER_CFG_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_CP_BARRIER_CFG_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_CP_DBG_0_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_CP_DBG_0_1 & 0x7F) >> 2);
+
+	WREG32(pb_addr + word_offset, ~mask);
+
+	pb_addr = (mmNIC3_QM0_CP_DBG_0_2 & ~0xFFF) + PROT_BITS_OFFS;
+	word_offset = ((mmNIC3_QM0_CP_DBG_0_2 & PROT_BITS_OFFS) >> 7) << 2;
+	mask = 1U << ((mmNIC3_QM0_CP_DBG_0_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_CP_DBG_0_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_CP_DBG_0_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_CP_ARUSER_31_11_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_CP_ARUSER_31_11_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_CP_ARUSER_31_11_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_CP_ARUSER_31_11_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_CP_ARUSER_31_11_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_CP_AWUSER_31_11_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_CP_AWUSER_31_11_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_CP_AWUSER_31_11_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_CP_AWUSER_31_11_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_CP_AWUSER_31_11_4 & 0x7F) >> 2);
+
+	WREG32(pb_addr + word_offset, ~mask);
+
+	pb_addr = (mmNIC3_QM0_ARB_CFG_0 & ~0xFFF) + PROT_BITS_OFFS;
+	word_offset = ((mmNIC3_QM0_ARB_CFG_0 & PROT_BITS_OFFS) >> 7) << 2;
+	mask = 1U << ((mmNIC3_QM0_ARB_CFG_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_ARB_MST_AVAIL_CRED_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_ARB_MST_AVAIL_CRED_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_ARB_MST_AVAIL_CRED_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_ARB_MST_AVAIL_CRED_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_ARB_MST_AVAIL_CRED_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_ARB_MST_AVAIL_CRED_5 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_ARB_MST_AVAIL_CRED_6 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_ARB_MST_AVAIL_CRED_7 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_ARB_MST_AVAIL_CRED_8 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_ARB_MST_AVAIL_CRED_9 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_ARB_MST_AVAIL_CRED_10 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_ARB_MST_AVAIL_CRED_11 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_ARB_MST_AVAIL_CRED_12 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_ARB_MST_AVAIL_CRED_13 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_ARB_MST_AVAIL_CRED_14 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_ARB_MST_AVAIL_CRED_15 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_ARB_MST_AVAIL_CRED_16 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_ARB_MST_AVAIL_CRED_17 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_ARB_MST_AVAIL_CRED_18 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_ARB_MST_AVAIL_CRED_19 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_ARB_MST_AVAIL_CRED_20 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_ARB_MST_AVAIL_CRED_21 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_ARB_MST_AVAIL_CRED_22 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_ARB_MST_AVAIL_CRED_23 & 0x7F) >> 2);
+
+	WREG32(pb_addr + word_offset, ~mask);
+
+	pb_addr = (mmNIC3_QM0_ARB_MST_AVAIL_CRED_24 & ~0xFFF) + PROT_BITS_OFFS;
+	word_offset = ((mmNIC3_QM0_ARB_MST_AVAIL_CRED_24 &
+			PROT_BITS_OFFS) >> 7) << 2;
+	mask = 1U << ((mmNIC3_QM0_ARB_MST_AVAIL_CRED_24 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_ARB_MST_AVAIL_CRED_25 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_ARB_MST_AVAIL_CRED_26 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_ARB_MST_AVAIL_CRED_27 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_ARB_MST_AVAIL_CRED_28 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_ARB_MST_AVAIL_CRED_29 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_ARB_MST_AVAIL_CRED_30 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_ARB_MST_AVAIL_CRED_31 & 0x7F) >> 2);
+
+	WREG32(pb_addr + word_offset, ~mask);
+
+	pb_addr = (mmNIC3_QM0_ARB_MST_CHOISE_PUSH_OFST_23 & ~0xFFF) +
+			PROT_BITS_OFFS;
+	word_offset = ((mmNIC3_QM0_ARB_MST_CHOISE_PUSH_OFST_23 &
+			PROT_BITS_OFFS) >> 7) << 2;
+	mask = 1U << ((mmNIC3_QM0_ARB_SLV_CHOISE_WDT & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_ARB_MSG_MAX_INFLIGHT & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_ARB_MSG_AWUSER_31_11 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_ARB_MSG_AWUSER_SEC_PROP & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_ARB_MSG_AWUSER_NON_SEC_PROP & 0x7F) >> 2);
+
+	WREG32(pb_addr + word_offset, ~mask);
+
+	pb_addr = (mmNIC3_QM0_ARB_STATE_STS & ~0xFFF) + PROT_BITS_OFFS;
+	word_offset = ((mmNIC3_QM0_ARB_STATE_STS & PROT_BITS_OFFS) >> 7) << 2;
+	mask = 1U << ((mmNIC3_QM0_ARB_STATE_STS & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_ARB_CHOISE_FULLNESS_STS & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_ARB_MSG_STS & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_ARB_SLV_CHOISE_Q_HEAD & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_ARB_ERR_CAUSE & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_ARB_ERR_MSG_EN & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_ARB_ERR_STS_DRP & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_ARB_MST_CRED_STS_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_ARB_MST_CRED_STS_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_ARB_MST_CRED_STS_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_ARB_MST_CRED_STS_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_ARB_MST_CRED_STS_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_ARB_MST_CRED_STS_5 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_ARB_MST_CRED_STS_6 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_ARB_MST_CRED_STS_7 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_ARB_MST_CRED_STS_8 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_ARB_MST_CRED_STS_9 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_ARB_MST_CRED_STS_10 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_ARB_MST_CRED_STS_11 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_ARB_MST_CRED_STS_12 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_ARB_MST_CRED_STS_13 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_ARB_MST_CRED_STS_14 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_ARB_MST_CRED_STS_15 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_ARB_MST_CRED_STS_16 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_ARB_MST_CRED_STS_17 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_ARB_MST_CRED_STS_18 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_ARB_MST_CRED_STS_19 & 0x7F) >> 2);
+
+	WREG32(pb_addr + word_offset, ~mask);
+
+	pb_addr = (mmNIC3_QM0_ARB_MST_CRED_STS_20 & ~0xFFF) + PROT_BITS_OFFS;
+	word_offset = ((mmNIC3_QM0_ARB_MST_CRED_STS_20 & PROT_BITS_OFFS)
+			>> 7) << 2;
+	mask = 1U << ((mmNIC3_QM0_ARB_MST_CRED_STS_20 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_ARB_MST_CRED_STS_21 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_ARB_MST_CRED_STS_22 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_ARB_MST_CRED_STS_23 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_ARB_MST_CRED_STS_24 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_ARB_MST_CRED_STS_25 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_ARB_MST_CRED_STS_26 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_ARB_MST_CRED_STS_27 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_ARB_MST_CRED_STS_28 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_ARB_MST_CRED_STS_29 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_ARB_MST_CRED_STS_30 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_ARB_MST_CRED_STS_31 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_CGM_CFG & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_CGM_STS & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_CGM_CFG1 & 0x7F) >> 2);
+
+	WREG32(pb_addr + word_offset, ~mask);
+
+	pb_addr = (mmNIC3_QM0_LOCAL_RANGE_BASE & ~0xFFF) + PROT_BITS_OFFS;
+	word_offset = ((mmNIC3_QM0_LOCAL_RANGE_BASE & PROT_BITS_OFFS)
+			>> 7) << 2;
+	mask = 1U << ((mmNIC3_QM0_LOCAL_RANGE_BASE & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_LOCAL_RANGE_SIZE & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_CSMR_STRICT_PRIO_CFG & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_HBW_RD_RATE_LIM_CFG_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_LBW_WR_RATE_LIM_CFG_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_LBW_WR_RATE_LIM_CFG_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_HBW_RD_RATE_LIM_CFG_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_GLBL_AXCACHE & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_IND_GW_APB_CFG & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_IND_GW_APB_WDATA & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_IND_GW_APB_RDATA & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_IND_GW_APB_STATUS & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_GLBL_ERR_ADDR_LO & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_GLBL_ERR_ADDR_HI & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM0_GLBL_ERR_WDATA & 0x7F) >> 2);
+
+	WREG32(pb_addr + word_offset, ~mask);
+
+	pb_addr = (mmNIC3_QM0_GLBL_MEM_INIT_BUSY & ~0xFFF) + PROT_BITS_OFFS;
+	word_offset = ((mmNIC3_QM0_GLBL_MEM_INIT_BUSY & PROT_BITS_OFFS)
+			>> 7) << 2;
+	mask = 1U << ((mmNIC3_QM0_GLBL_MEM_INIT_BUSY & 0x7F) >> 2);
+
+	WREG32(pb_addr + word_offset, ~mask);
+
+	pb_addr = (mmNIC3_QM1_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS;
+	word_offset = ((mmNIC3_QM1_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2;
+	mask = 1U << ((mmNIC3_QM1_GLBL_CFG0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_GLBL_CFG1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_GLBL_PROT & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_GLBL_ERR_CFG & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_GLBL_SECURE_PROPS_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_GLBL_SECURE_PROPS_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_GLBL_SECURE_PROPS_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_GLBL_SECURE_PROPS_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_GLBL_SECURE_PROPS_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_GLBL_NON_SECURE_PROPS_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_GLBL_NON_SECURE_PROPS_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_GLBL_NON_SECURE_PROPS_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_GLBL_NON_SECURE_PROPS_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_GLBL_NON_SECURE_PROPS_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_GLBL_STS0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_GLBL_STS1_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_GLBL_STS1_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_GLBL_STS1_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_GLBL_STS1_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_GLBL_STS1_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_GLBL_MSG_EN_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_GLBL_MSG_EN_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_GLBL_MSG_EN_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_GLBL_MSG_EN_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_GLBL_MSG_EN_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_PQ_BASE_LO_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_PQ_BASE_LO_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_PQ_BASE_LO_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_PQ_BASE_LO_3 & 0x7F) >> 2);
+
+	WREG32(pb_addr + word_offset, ~mask);
+
+	pb_addr = (mmNIC3_QM1_PQ_BASE_HI_0 & ~0xFFF) + PROT_BITS_OFFS;
+	word_offset = ((mmNIC3_QM1_PQ_BASE_HI_0 & PROT_BITS_OFFS) >> 7) << 2;
+	mask = 1U << ((mmNIC3_QM1_PQ_BASE_HI_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_PQ_BASE_HI_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_PQ_BASE_HI_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_PQ_BASE_HI_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_PQ_SIZE_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_PQ_SIZE_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_PQ_SIZE_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_PQ_SIZE_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_PQ_PI_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_PQ_PI_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_PQ_PI_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_PQ_PI_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_PQ_CI_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_PQ_CI_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_PQ_CI_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_PQ_CI_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_PQ_CFG0_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_PQ_CFG0_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_PQ_CFG0_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_PQ_CFG0_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_PQ_CFG1_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_PQ_CFG1_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_PQ_CFG1_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_PQ_CFG1_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_PQ_ARUSER_31_11_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_PQ_ARUSER_31_11_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_PQ_ARUSER_31_11_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_PQ_ARUSER_31_11_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_PQ_STS0_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_PQ_STS0_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_PQ_STS0_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_PQ_STS0_3 & 0x7F) >> 2);
+
+	WREG32(pb_addr + word_offset, ~mask);
+
+	pb_addr = (mmNIC3_QM1_PQ_STS1_0 & ~0xFFF) + PROT_BITS_OFFS;
+	word_offset = ((mmNIC3_QM1_PQ_STS1_0 & PROT_BITS_OFFS) >> 7) << 2;
+	mask = 1U << ((mmNIC3_QM1_PQ_STS1_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_PQ_STS1_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_PQ_STS1_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_PQ_STS1_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_CQ_STS0_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_CQ_STS0_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_CQ_STS0_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_CQ_STS0_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_CQ_STS1_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_CQ_STS1_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_CQ_STS1_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_CQ_STS1_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_CQ_PTR_LO_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_CQ_PTR_HI_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_CQ_TSIZE_0 & 0x7F) >> 2);
+
+	WREG32(pb_addr + word_offset, ~mask);
+
+	pb_addr = (mmNIC3_QM1_CQ_CTL_0 & ~0xFFF) + PROT_BITS_OFFS;
+	word_offset = ((mmNIC3_QM1_CQ_CTL_0 & PROT_BITS_OFFS) >> 7) << 2;
+	mask = 1U << ((mmNIC3_QM1_CQ_CTL_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_CQ_PTR_LO_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_CQ_PTR_HI_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_CQ_TSIZE_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_CQ_CTL_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_CQ_PTR_LO_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_CQ_PTR_HI_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_CQ_TSIZE_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_CQ_CTL_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_CQ_PTR_LO_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_CQ_PTR_HI_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_CQ_TSIZE_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_CQ_CTL_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_CQ_PTR_LO_STS_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_CQ_PTR_LO_STS_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_CQ_PTR_LO_STS_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_CQ_PTR_LO_STS_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_CQ_PTR_LO_STS_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_CQ_PTR_HI_STS_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_CQ_PTR_HI_STS_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_CQ_PTR_HI_STS_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_CQ_PTR_HI_STS_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_CQ_PTR_HI_STS_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_CQ_TSIZE_STS_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_CQ_TSIZE_STS_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_CQ_TSIZE_STS_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_CQ_TSIZE_STS_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_CQ_TSIZE_STS_4 & 0x7F) >> 2);
+
+	WREG32(pb_addr + word_offset, ~mask);
+
+	pb_addr = (mmNIC3_QM1_CQ_CTL_STS_0 & ~0xFFF) + PROT_BITS_OFFS;
+	word_offset = ((mmNIC3_QM1_CQ_CTL_STS_0 & PROT_BITS_OFFS) >> 7) << 2;
+	mask = 1U << ((mmNIC3_QM1_CQ_CTL_STS_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_CQ_CTL_STS_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_CQ_CTL_STS_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_CQ_CTL_STS_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_CQ_CTL_STS_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_CQ_IFIFO_CNT_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_CQ_IFIFO_CNT_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_CQ_IFIFO_CNT_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_CQ_IFIFO_CNT_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_CQ_IFIFO_CNT_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_CP_MSG_BASE0_ADDR_LO_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_CP_MSG_BASE0_ADDR_LO_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_CP_MSG_BASE0_ADDR_LO_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_CP_MSG_BASE0_ADDR_LO_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_CP_MSG_BASE0_ADDR_LO_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_CP_MSG_BASE0_ADDR_HI_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_CP_MSG_BASE0_ADDR_HI_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_CP_MSG_BASE0_ADDR_HI_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_CP_MSG_BASE0_ADDR_HI_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_CP_MSG_BASE0_ADDR_HI_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_CP_MSG_BASE1_ADDR_LO_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_CP_MSG_BASE1_ADDR_LO_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_CP_MSG_BASE1_ADDR_LO_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_CP_MSG_BASE1_ADDR_LO_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_CP_MSG_BASE1_ADDR_LO_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_CP_MSG_BASE1_ADDR_HI_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_CP_MSG_BASE1_ADDR_HI_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_CP_MSG_BASE1_ADDR_HI_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_CP_MSG_BASE1_ADDR_HI_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_CP_MSG_BASE1_ADDR_HI_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_CP_MSG_BASE2_ADDR_LO_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_CP_MSG_BASE2_ADDR_LO_1 & 0x7F) >> 2);
+
+	WREG32(pb_addr + word_offset, ~mask);
+
+	pb_addr = (mmNIC3_QM1_CP_MSG_BASE2_ADDR_LO_2 & ~0xFFF) + PROT_BITS_OFFS;
+	word_offset = ((mmNIC3_QM1_CP_MSG_BASE2_ADDR_LO_2 &
+			PROT_BITS_OFFS) >> 7) << 2;
+	mask = 1U << ((mmNIC3_QM1_CP_MSG_BASE2_ADDR_LO_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_CP_MSG_BASE2_ADDR_LO_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_CP_MSG_BASE2_ADDR_LO_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_CP_MSG_BASE2_ADDR_HI_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_CP_MSG_BASE2_ADDR_HI_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_CP_MSG_BASE2_ADDR_HI_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_CP_MSG_BASE2_ADDR_HI_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_CP_MSG_BASE2_ADDR_HI_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_CP_MSG_BASE3_ADDR_LO_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_CP_MSG_BASE3_ADDR_LO_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_CP_MSG_BASE3_ADDR_LO_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_CP_MSG_BASE3_ADDR_LO_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_CP_MSG_BASE3_ADDR_LO_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_CP_MSG_BASE3_ADDR_HI_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_CP_MSG_BASE3_ADDR_HI_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_CP_MSG_BASE3_ADDR_HI_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_CP_MSG_BASE3_ADDR_HI_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_CP_MSG_BASE3_ADDR_HI_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_CP_LDMA_TSIZE_OFFSET_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_CP_LDMA_TSIZE_OFFSET_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_CP_LDMA_TSIZE_OFFSET_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_CP_LDMA_TSIZE_OFFSET_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_CP_LDMA_TSIZE_OFFSET_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_CP_LDMA_SRC_BASE_LO_OFFSET_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_CP_LDMA_SRC_BASE_LO_OFFSET_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_CP_LDMA_SRC_BASE_LO_OFFSET_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_CP_LDMA_SRC_BASE_LO_OFFSET_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_CP_LDMA_SRC_BASE_LO_OFFSET_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_CP_LDMA_DST_BASE_LO_OFFSET_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_CP_LDMA_DST_BASE_LO_OFFSET_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_CP_LDMA_DST_BASE_LO_OFFSET_2 & 0x7F) >> 2);
+
+	WREG32(pb_addr + word_offset, ~mask);
+
+	pb_addr = (mmNIC3_QM1_CP_LDMA_DST_BASE_LO_OFFSET_3 & ~0xFFF) +
+			PROT_BITS_OFFS;
+	word_offset = ((mmNIC3_QM1_CP_LDMA_DST_BASE_LO_OFFSET_3 &
+			PROT_BITS_OFFS) >> 7) << 2;
+	mask = 1U << ((mmNIC3_QM1_CP_LDMA_DST_BASE_LO_OFFSET_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_CP_LDMA_DST_BASE_LO_OFFSET_4 & 0x7F) >> 2);
+
+	WREG32(pb_addr + word_offset, ~mask);
+
+	pb_addr = (mmNIC3_QM1_CP_STS_0 & ~0xFFF) + PROT_BITS_OFFS;
+	word_offset = ((mmNIC3_QM1_CP_STS_0 & PROT_BITS_OFFS) >> 7) << 2;
+	mask = 1U << ((mmNIC3_QM1_CP_STS_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_CP_STS_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_CP_STS_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_CP_STS_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_CP_STS_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_CP_CURRENT_INST_LO_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_CP_CURRENT_INST_LO_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_CP_CURRENT_INST_LO_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_CP_CURRENT_INST_LO_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_CP_CURRENT_INST_LO_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_CP_CURRENT_INST_HI_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_CP_CURRENT_INST_HI_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_CP_CURRENT_INST_HI_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_CP_CURRENT_INST_HI_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_CP_CURRENT_INST_HI_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_CP_BARRIER_CFG_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_CP_BARRIER_CFG_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_CP_BARRIER_CFG_2 & 0x7F) >> 2);
+
+	WREG32(pb_addr + word_offset, ~mask);
+
+	pb_addr = (mmNIC3_QM1_CP_BARRIER_CFG_3 & ~0xFFF) + PROT_BITS_OFFS;
+	word_offset = ((mmNIC3_QM1_CP_BARRIER_CFG_3 & PROT_BITS_OFFS)
+			>> 7) << 2;
+	mask = 1U << ((mmNIC3_QM1_CP_BARRIER_CFG_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_CP_BARRIER_CFG_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_CP_DBG_0_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_CP_DBG_0_1 & 0x7F) >> 2);
+
+	WREG32(pb_addr + word_offset, ~mask);
+
+	pb_addr = (mmNIC3_QM1_CP_DBG_0_2 & ~0xFFF) + PROT_BITS_OFFS;
+	word_offset = ((mmNIC3_QM1_CP_DBG_0_2 & PROT_BITS_OFFS) >> 7) << 2;
+	mask = 1U << ((mmNIC3_QM1_CP_DBG_0_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_CP_DBG_0_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_CP_DBG_0_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_CP_ARUSER_31_11_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_CP_ARUSER_31_11_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_CP_ARUSER_31_11_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_CP_ARUSER_31_11_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_CP_ARUSER_31_11_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_CP_AWUSER_31_11_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_CP_AWUSER_31_11_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_CP_AWUSER_31_11_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_CP_AWUSER_31_11_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_CP_AWUSER_31_11_4 & 0x7F) >> 2);
+
+	WREG32(pb_addr + word_offset, ~mask);
+
+	pb_addr = (mmNIC3_QM1_ARB_CFG_0 & ~0xFFF) + PROT_BITS_OFFS;
+	word_offset = ((mmNIC3_QM1_ARB_CFG_0 & PROT_BITS_OFFS) >> 7) << 2;
+	mask = 1U << ((mmNIC3_QM1_ARB_CFG_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_ARB_MST_AVAIL_CRED_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_ARB_MST_AVAIL_CRED_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_ARB_MST_AVAIL_CRED_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_ARB_MST_AVAIL_CRED_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_ARB_MST_AVAIL_CRED_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_ARB_MST_AVAIL_CRED_5 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_ARB_MST_AVAIL_CRED_6 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_ARB_MST_AVAIL_CRED_7 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_ARB_MST_AVAIL_CRED_8 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_ARB_MST_AVAIL_CRED_9 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_ARB_MST_AVAIL_CRED_10 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_ARB_MST_AVAIL_CRED_11 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_ARB_MST_AVAIL_CRED_12 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_ARB_MST_AVAIL_CRED_13 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_ARB_MST_AVAIL_CRED_14 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_ARB_MST_AVAIL_CRED_15 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_ARB_MST_AVAIL_CRED_16 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_ARB_MST_AVAIL_CRED_17 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_ARB_MST_AVAIL_CRED_18 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_ARB_MST_AVAIL_CRED_19 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_ARB_MST_AVAIL_CRED_20 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_ARB_MST_AVAIL_CRED_21 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_ARB_MST_AVAIL_CRED_22 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_ARB_MST_AVAIL_CRED_23 & 0x7F) >> 2);
+
+	WREG32(pb_addr + word_offset, ~mask);
+
+	pb_addr = (mmNIC3_QM1_ARB_MST_AVAIL_CRED_24 & ~0xFFF) + PROT_BITS_OFFS;
+	word_offset = ((mmNIC3_QM1_ARB_MST_AVAIL_CRED_24 &
+			PROT_BITS_OFFS) >> 7) << 2;
+	mask = 1U << ((mmNIC3_QM1_ARB_MST_AVAIL_CRED_24 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_ARB_MST_AVAIL_CRED_25 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_ARB_MST_AVAIL_CRED_26 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_ARB_MST_AVAIL_CRED_27 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_ARB_MST_AVAIL_CRED_28 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_ARB_MST_AVAIL_CRED_29 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_ARB_MST_AVAIL_CRED_30 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_ARB_MST_AVAIL_CRED_31 & 0x7F) >> 2);
+
+	WREG32(pb_addr + word_offset, ~mask);
+
+	pb_addr = (mmNIC3_QM1_ARB_MST_CHOISE_PUSH_OFST_23 & ~0xFFF) +
+			PROT_BITS_OFFS;
+	word_offset = ((mmNIC3_QM1_ARB_MST_CHOISE_PUSH_OFST_23 &
+			PROT_BITS_OFFS) >> 7) << 2;
+	mask = 1U << ((mmNIC3_QM1_ARB_SLV_CHOISE_WDT & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_ARB_MSG_MAX_INFLIGHT & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_ARB_MSG_AWUSER_31_11 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_ARB_MSG_AWUSER_SEC_PROP & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_ARB_MSG_AWUSER_NON_SEC_PROP & 0x7F) >> 2);
+
+	WREG32(pb_addr + word_offset, ~mask);
+
+	pb_addr = (mmNIC3_QM1_ARB_STATE_STS & ~0xFFF) + PROT_BITS_OFFS;
+	word_offset = ((mmNIC3_QM1_ARB_STATE_STS & PROT_BITS_OFFS) >> 7) << 2;
+	mask = 1U << ((mmNIC3_QM1_ARB_STATE_STS & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_ARB_CHOISE_FULLNESS_STS & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_ARB_MSG_STS & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_ARB_SLV_CHOISE_Q_HEAD & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_ARB_ERR_CAUSE & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_ARB_ERR_MSG_EN & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_ARB_ERR_STS_DRP & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_ARB_MST_CRED_STS_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_ARB_MST_CRED_STS_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_ARB_MST_CRED_STS_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_ARB_MST_CRED_STS_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_ARB_MST_CRED_STS_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_ARB_MST_CRED_STS_5 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_ARB_MST_CRED_STS_6 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_ARB_MST_CRED_STS_7 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_ARB_MST_CRED_STS_8 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_ARB_MST_CRED_STS_9 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_ARB_MST_CRED_STS_10 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_ARB_MST_CRED_STS_11 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_ARB_MST_CRED_STS_12 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_ARB_MST_CRED_STS_13 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_ARB_MST_CRED_STS_14 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_ARB_MST_CRED_STS_15 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_ARB_MST_CRED_STS_16 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_ARB_MST_CRED_STS_17 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_ARB_MST_CRED_STS_18 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_ARB_MST_CRED_STS_19 & 0x7F) >> 2);
+
+	WREG32(pb_addr + word_offset, ~mask);
+
+	pb_addr = (mmNIC3_QM1_ARB_MST_CRED_STS_20 & ~0xFFF) + PROT_BITS_OFFS;
+	word_offset = ((mmNIC3_QM1_ARB_MST_CRED_STS_20 & PROT_BITS_OFFS)
+			>> 7) << 2;
+	mask = 1U << ((mmNIC3_QM1_ARB_MST_CRED_STS_20 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_ARB_MST_CRED_STS_21 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_ARB_MST_CRED_STS_22 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_ARB_MST_CRED_STS_23 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_ARB_MST_CRED_STS_24 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_ARB_MST_CRED_STS_25 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_ARB_MST_CRED_STS_26 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_ARB_MST_CRED_STS_27 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_ARB_MST_CRED_STS_28 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_ARB_MST_CRED_STS_29 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_ARB_MST_CRED_STS_30 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_ARB_MST_CRED_STS_31 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_CGM_CFG & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_CGM_STS & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_CGM_CFG1 & 0x7F) >> 2);
+
+	WREG32(pb_addr + word_offset, ~mask);
+
+	pb_addr = (mmNIC3_QM1_LOCAL_RANGE_BASE & ~0xFFF) + PROT_BITS_OFFS;
+	word_offset = ((mmNIC3_QM1_LOCAL_RANGE_BASE & PROT_BITS_OFFS)
+			>> 7) << 2;
+	mask = 1U << ((mmNIC3_QM1_LOCAL_RANGE_BASE & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_LOCAL_RANGE_SIZE & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_CSMR_STRICT_PRIO_CFG & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_HBW_RD_RATE_LIM_CFG_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_LBW_WR_RATE_LIM_CFG_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_LBW_WR_RATE_LIM_CFG_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_HBW_RD_RATE_LIM_CFG_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_GLBL_AXCACHE & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_IND_GW_APB_CFG & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_IND_GW_APB_WDATA & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_IND_GW_APB_RDATA & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_IND_GW_APB_STATUS & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_GLBL_ERR_ADDR_LO & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_GLBL_ERR_ADDR_HI & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC3_QM1_GLBL_ERR_WDATA & 0x7F) >> 2);
+
+	WREG32(pb_addr + word_offset, ~mask);
+
+	pb_addr = (mmNIC3_QM1_GLBL_MEM_INIT_BUSY & ~0xFFF) + PROT_BITS_OFFS;
+	word_offset = ((mmNIC3_QM1_GLBL_MEM_INIT_BUSY & PROT_BITS_OFFS)
+			>> 7) << 2;
+	mask = 1U << ((mmNIC3_QM1_GLBL_MEM_INIT_BUSY & 0x7F) >> 2);
+
+	WREG32(pb_addr + word_offset, ~mask);
+
+	WREG32(mmNIC4_QM0_BASE - CFG_BASE + PROT_BITS_OFFS + 0x7C, 0);
+	WREG32(mmNIC4_QM1_BASE - CFG_BASE + PROT_BITS_OFFS + 0x7C, 0);
+
+	pb_addr = (mmNIC4_QM0_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS;
+	word_offset = ((mmNIC4_QM0_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2;
+	mask = 1U << ((mmNIC4_QM0_GLBL_CFG0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_GLBL_CFG1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_GLBL_PROT & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_GLBL_ERR_CFG & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_GLBL_SECURE_PROPS_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_GLBL_SECURE_PROPS_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_GLBL_SECURE_PROPS_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_GLBL_SECURE_PROPS_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_GLBL_SECURE_PROPS_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_GLBL_NON_SECURE_PROPS_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_GLBL_NON_SECURE_PROPS_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_GLBL_NON_SECURE_PROPS_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_GLBL_NON_SECURE_PROPS_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_GLBL_NON_SECURE_PROPS_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_GLBL_STS0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_GLBL_STS1_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_GLBL_STS1_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_GLBL_STS1_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_GLBL_STS1_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_GLBL_STS1_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_GLBL_MSG_EN_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_GLBL_MSG_EN_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_GLBL_MSG_EN_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_GLBL_MSG_EN_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_GLBL_MSG_EN_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_PQ_BASE_LO_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_PQ_BASE_LO_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_PQ_BASE_LO_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_PQ_BASE_LO_3 & 0x7F) >> 2);
+
+	WREG32(pb_addr + word_offset, ~mask);
+
+	pb_addr = (mmNIC4_QM0_PQ_BASE_HI_0 & ~0xFFF) + PROT_BITS_OFFS;
+	word_offset = ((mmNIC4_QM0_PQ_BASE_HI_0 & PROT_BITS_OFFS) >> 7) << 2;
+	mask = 1U << ((mmNIC4_QM0_PQ_BASE_HI_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_PQ_BASE_HI_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_PQ_BASE_HI_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_PQ_BASE_HI_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_PQ_SIZE_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_PQ_SIZE_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_PQ_SIZE_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_PQ_SIZE_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_PQ_PI_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_PQ_PI_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_PQ_PI_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_PQ_PI_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_PQ_CI_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_PQ_CI_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_PQ_CI_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_PQ_CI_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_PQ_CFG0_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_PQ_CFG0_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_PQ_CFG0_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_PQ_CFG0_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_PQ_CFG1_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_PQ_CFG1_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_PQ_CFG1_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_PQ_CFG1_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_PQ_ARUSER_31_11_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_PQ_ARUSER_31_11_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_PQ_ARUSER_31_11_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_PQ_ARUSER_31_11_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_PQ_STS0_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_PQ_STS0_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_PQ_STS0_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_PQ_STS0_3 & 0x7F) >> 2);
+
+	WREG32(pb_addr + word_offset, ~mask);
+
+	pb_addr = (mmNIC4_QM0_PQ_STS1_0 & ~0xFFF) + PROT_BITS_OFFS;
+	word_offset = ((mmNIC4_QM0_PQ_STS1_0 & PROT_BITS_OFFS) >> 7) << 2;
+	mask = 1U << ((mmNIC4_QM0_PQ_STS1_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_PQ_STS1_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_PQ_STS1_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_PQ_STS1_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_CQ_STS0_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_CQ_STS0_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_CQ_STS0_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_CQ_STS0_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_CQ_STS1_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_CQ_STS1_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_CQ_STS1_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_CQ_STS1_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_CQ_PTR_LO_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_CQ_PTR_HI_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_CQ_TSIZE_0 & 0x7F) >> 2);
+
+	WREG32(pb_addr + word_offset, ~mask);
+
+	pb_addr = (mmNIC4_QM0_CQ_CTL_0 & ~0xFFF) + PROT_BITS_OFFS;
+	word_offset = ((mmNIC4_QM0_CQ_CTL_0 & PROT_BITS_OFFS) >> 7) << 2;
+	mask = 1U << ((mmNIC4_QM0_CQ_CTL_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_CQ_PTR_LO_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_CQ_PTR_HI_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_CQ_TSIZE_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_CQ_CTL_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_CQ_PTR_LO_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_CQ_PTR_HI_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_CQ_TSIZE_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_CQ_CTL_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_CQ_PTR_LO_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_CQ_PTR_HI_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_CQ_TSIZE_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_CQ_CTL_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_CQ_PTR_LO_STS_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_CQ_PTR_LO_STS_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_CQ_PTR_LO_STS_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_CQ_PTR_LO_STS_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_CQ_PTR_LO_STS_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_CQ_PTR_HI_STS_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_CQ_PTR_HI_STS_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_CQ_PTR_HI_STS_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_CQ_PTR_HI_STS_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_CQ_PTR_HI_STS_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_CQ_TSIZE_STS_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_CQ_TSIZE_STS_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_CQ_TSIZE_STS_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_CQ_TSIZE_STS_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_CQ_TSIZE_STS_4 & 0x7F) >> 2);
+
+	WREG32(pb_addr + word_offset, ~mask);
+
+	pb_addr = (mmNIC4_QM0_CQ_CTL_STS_0 & ~0xFFF) + PROT_BITS_OFFS;
+	word_offset = ((mmNIC4_QM0_CQ_CTL_STS_0 & PROT_BITS_OFFS) >> 7) << 2;
+	mask = 1U << ((mmNIC4_QM0_CQ_CTL_STS_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_CQ_CTL_STS_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_CQ_CTL_STS_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_CQ_CTL_STS_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_CQ_CTL_STS_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_CQ_IFIFO_CNT_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_CQ_IFIFO_CNT_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_CQ_IFIFO_CNT_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_CQ_IFIFO_CNT_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_CQ_IFIFO_CNT_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_CP_MSG_BASE0_ADDR_LO_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_CP_MSG_BASE0_ADDR_LO_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_CP_MSG_BASE0_ADDR_LO_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_CP_MSG_BASE0_ADDR_LO_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_CP_MSG_BASE0_ADDR_LO_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_CP_MSG_BASE0_ADDR_HI_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_CP_MSG_BASE0_ADDR_HI_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_CP_MSG_BASE0_ADDR_HI_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_CP_MSG_BASE0_ADDR_HI_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_CP_MSG_BASE0_ADDR_HI_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_CP_MSG_BASE1_ADDR_LO_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_CP_MSG_BASE1_ADDR_LO_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_CP_MSG_BASE1_ADDR_LO_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_CP_MSG_BASE1_ADDR_LO_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_CP_MSG_BASE1_ADDR_LO_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_CP_MSG_BASE1_ADDR_HI_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_CP_MSG_BASE1_ADDR_HI_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_CP_MSG_BASE1_ADDR_HI_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_CP_MSG_BASE1_ADDR_HI_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_CP_MSG_BASE1_ADDR_HI_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_CP_MSG_BASE2_ADDR_LO_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_CP_MSG_BASE2_ADDR_LO_1 & 0x7F) >> 2);
+
+	WREG32(pb_addr + word_offset, ~mask);
+
+	pb_addr = (mmNIC4_QM0_CP_MSG_BASE2_ADDR_LO_2 & ~0xFFF) + PROT_BITS_OFFS;
+	word_offset = ((mmNIC4_QM0_CP_MSG_BASE2_ADDR_LO_2 &
+			PROT_BITS_OFFS) >> 7) << 2;
+	mask = 1U << ((mmNIC4_QM0_CP_MSG_BASE2_ADDR_LO_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_CP_MSG_BASE2_ADDR_LO_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_CP_MSG_BASE2_ADDR_LO_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_CP_MSG_BASE2_ADDR_HI_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_CP_MSG_BASE2_ADDR_HI_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_CP_MSG_BASE2_ADDR_HI_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_CP_MSG_BASE2_ADDR_HI_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_CP_MSG_BASE2_ADDR_HI_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_CP_MSG_BASE3_ADDR_LO_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_CP_MSG_BASE3_ADDR_LO_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_CP_MSG_BASE3_ADDR_LO_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_CP_MSG_BASE3_ADDR_LO_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_CP_MSG_BASE3_ADDR_LO_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_CP_MSG_BASE3_ADDR_HI_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_CP_MSG_BASE3_ADDR_HI_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_CP_MSG_BASE3_ADDR_HI_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_CP_MSG_BASE3_ADDR_HI_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_CP_MSG_BASE3_ADDR_HI_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_CP_LDMA_TSIZE_OFFSET_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_CP_LDMA_TSIZE_OFFSET_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_CP_LDMA_TSIZE_OFFSET_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_CP_LDMA_TSIZE_OFFSET_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_CP_LDMA_TSIZE_OFFSET_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_CP_LDMA_SRC_BASE_LO_OFFSET_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_CP_LDMA_SRC_BASE_LO_OFFSET_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_CP_LDMA_SRC_BASE_LO_OFFSET_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_CP_LDMA_SRC_BASE_LO_OFFSET_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_CP_LDMA_SRC_BASE_LO_OFFSET_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_CP_LDMA_DST_BASE_LO_OFFSET_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_CP_LDMA_DST_BASE_LO_OFFSET_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_CP_LDMA_DST_BASE_LO_OFFSET_2 & 0x7F) >> 2);
+
+	WREG32(pb_addr + word_offset, ~mask);
+
+	pb_addr = (mmNIC4_QM0_CP_LDMA_DST_BASE_LO_OFFSET_3 & ~0xFFF) +
+			PROT_BITS_OFFS;
+	word_offset = ((mmNIC4_QM0_CP_LDMA_DST_BASE_LO_OFFSET_3 &
+			PROT_BITS_OFFS) >> 7) << 2;
+	mask = 1U << ((mmNIC4_QM0_CP_LDMA_DST_BASE_LO_OFFSET_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_CP_LDMA_DST_BASE_LO_OFFSET_4 & 0x7F) >> 2);
+
+	WREG32(pb_addr + word_offset, ~mask);
+
+	pb_addr = (mmNIC4_QM0_CP_STS_0 & ~0xFFF) + PROT_BITS_OFFS;
+	word_offset = ((mmNIC4_QM0_CP_STS_0 & PROT_BITS_OFFS) >> 7) << 2;
+	mask = 1U << ((mmNIC4_QM0_CP_STS_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_CP_STS_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_CP_STS_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_CP_STS_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_CP_STS_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_CP_CURRENT_INST_LO_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_CP_CURRENT_INST_LO_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_CP_CURRENT_INST_LO_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_CP_CURRENT_INST_LO_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_CP_CURRENT_INST_LO_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_CP_CURRENT_INST_HI_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_CP_CURRENT_INST_HI_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_CP_CURRENT_INST_HI_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_CP_CURRENT_INST_HI_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_CP_CURRENT_INST_HI_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_CP_BARRIER_CFG_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_CP_BARRIER_CFG_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_CP_BARRIER_CFG_2 & 0x7F) >> 2);
+
+	WREG32(pb_addr + word_offset, ~mask);
+
+	pb_addr = (mmNIC4_QM0_CP_BARRIER_CFG_3 & ~0xFFF) + PROT_BITS_OFFS;
+	word_offset = ((mmNIC4_QM0_CP_BARRIER_CFG_3 & PROT_BITS_OFFS)
+			>> 7) << 2;
+	mask = 1U << ((mmNIC4_QM0_CP_BARRIER_CFG_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_CP_BARRIER_CFG_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_CP_DBG_0_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_CP_DBG_0_1 & 0x7F) >> 2);
+
+	WREG32(pb_addr + word_offset, ~mask);
+
+	pb_addr = (mmNIC4_QM0_CP_DBG_0_2 & ~0xFFF) + PROT_BITS_OFFS;
+	word_offset = ((mmNIC4_QM0_CP_DBG_0_2 & PROT_BITS_OFFS) >> 7) << 2;
+	mask = 1U << ((mmNIC4_QM0_CP_DBG_0_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_CP_DBG_0_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_CP_DBG_0_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_CP_ARUSER_31_11_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_CP_ARUSER_31_11_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_CP_ARUSER_31_11_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_CP_ARUSER_31_11_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_CP_ARUSER_31_11_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_CP_AWUSER_31_11_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_CP_AWUSER_31_11_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_CP_AWUSER_31_11_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_CP_AWUSER_31_11_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_CP_AWUSER_31_11_4 & 0x7F) >> 2);
+
+	WREG32(pb_addr + word_offset, ~mask);
+
+	pb_addr = (mmNIC4_QM0_ARB_CFG_0 & ~0xFFF) + PROT_BITS_OFFS;
+	word_offset = ((mmNIC4_QM0_ARB_CFG_0 & PROT_BITS_OFFS) >> 7) << 2;
+	mask = 1U << ((mmNIC4_QM0_ARB_CFG_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_ARB_MST_AVAIL_CRED_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_ARB_MST_AVAIL_CRED_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_ARB_MST_AVAIL_CRED_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_ARB_MST_AVAIL_CRED_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_ARB_MST_AVAIL_CRED_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_ARB_MST_AVAIL_CRED_5 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_ARB_MST_AVAIL_CRED_6 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_ARB_MST_AVAIL_CRED_7 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_ARB_MST_AVAIL_CRED_8 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_ARB_MST_AVAIL_CRED_9 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_ARB_MST_AVAIL_CRED_10 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_ARB_MST_AVAIL_CRED_11 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_ARB_MST_AVAIL_CRED_12 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_ARB_MST_AVAIL_CRED_13 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_ARB_MST_AVAIL_CRED_14 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_ARB_MST_AVAIL_CRED_15 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_ARB_MST_AVAIL_CRED_16 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_ARB_MST_AVAIL_CRED_17 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_ARB_MST_AVAIL_CRED_18 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_ARB_MST_AVAIL_CRED_19 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_ARB_MST_AVAIL_CRED_20 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_ARB_MST_AVAIL_CRED_21 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_ARB_MST_AVAIL_CRED_22 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_ARB_MST_AVAIL_CRED_23 & 0x7F) >> 2);
+
+	WREG32(pb_addr + word_offset, ~mask);
+
+	pb_addr = (mmNIC4_QM0_ARB_MST_AVAIL_CRED_24 & ~0xFFF) + PROT_BITS_OFFS;
+	word_offset = ((mmNIC4_QM0_ARB_MST_AVAIL_CRED_24 &
+			PROT_BITS_OFFS) >> 7) << 2;
+	mask = 1U << ((mmNIC4_QM0_ARB_MST_AVAIL_CRED_24 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_ARB_MST_AVAIL_CRED_25 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_ARB_MST_AVAIL_CRED_26 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_ARB_MST_AVAIL_CRED_27 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_ARB_MST_AVAIL_CRED_28 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_ARB_MST_AVAIL_CRED_29 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_ARB_MST_AVAIL_CRED_30 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_ARB_MST_AVAIL_CRED_31 & 0x7F) >> 2);
+
+	WREG32(pb_addr + word_offset, ~mask);
+
+	pb_addr = (mmNIC4_QM0_ARB_MST_CHOISE_PUSH_OFST_23 & ~0xFFF) +
+			PROT_BITS_OFFS;
+	word_offset = ((mmNIC4_QM0_ARB_MST_CHOISE_PUSH_OFST_23 &
+			PROT_BITS_OFFS) >> 7) << 2;
+	mask = 1U << ((mmNIC4_QM0_ARB_SLV_CHOISE_WDT & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_ARB_MSG_MAX_INFLIGHT & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_ARB_MSG_AWUSER_31_11 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_ARB_MSG_AWUSER_SEC_PROP & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_ARB_MSG_AWUSER_NON_SEC_PROP & 0x7F) >> 2);
+
+	WREG32(pb_addr + word_offset, ~mask);
+
+	pb_addr = (mmNIC4_QM0_ARB_STATE_STS & ~0xFFF) + PROT_BITS_OFFS;
+	word_offset = ((mmNIC4_QM0_ARB_STATE_STS & PROT_BITS_OFFS) >> 7) << 2;
+	mask = 1U << ((mmNIC4_QM0_ARB_STATE_STS & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_ARB_CHOISE_FULLNESS_STS & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_ARB_MSG_STS & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_ARB_SLV_CHOISE_Q_HEAD & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_ARB_ERR_CAUSE & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_ARB_ERR_MSG_EN & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_ARB_ERR_STS_DRP & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_ARB_MST_CRED_STS_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_ARB_MST_CRED_STS_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_ARB_MST_CRED_STS_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_ARB_MST_CRED_STS_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_ARB_MST_CRED_STS_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_ARB_MST_CRED_STS_5 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_ARB_MST_CRED_STS_6 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_ARB_MST_CRED_STS_7 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_ARB_MST_CRED_STS_8 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_ARB_MST_CRED_STS_9 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_ARB_MST_CRED_STS_10 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_ARB_MST_CRED_STS_11 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_ARB_MST_CRED_STS_12 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_ARB_MST_CRED_STS_13 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_ARB_MST_CRED_STS_14 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_ARB_MST_CRED_STS_15 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_ARB_MST_CRED_STS_16 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_ARB_MST_CRED_STS_17 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_ARB_MST_CRED_STS_18 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_ARB_MST_CRED_STS_19 & 0x7F) >> 2);
+
+	WREG32(pb_addr + word_offset, ~mask);
+
+	pb_addr = (mmNIC4_QM0_ARB_MST_CRED_STS_20 & ~0xFFF) + PROT_BITS_OFFS;
+	word_offset = ((mmNIC4_QM0_ARB_MST_CRED_STS_20 & PROT_BITS_OFFS)
+			>> 7) << 2;
+	mask = 1U << ((mmNIC4_QM0_ARB_MST_CRED_STS_20 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_ARB_MST_CRED_STS_21 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_ARB_MST_CRED_STS_22 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_ARB_MST_CRED_STS_23 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_ARB_MST_CRED_STS_24 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_ARB_MST_CRED_STS_25 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_ARB_MST_CRED_STS_26 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_ARB_MST_CRED_STS_27 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_ARB_MST_CRED_STS_28 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_ARB_MST_CRED_STS_29 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_ARB_MST_CRED_STS_30 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_ARB_MST_CRED_STS_31 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_CGM_CFG & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_CGM_STS & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_CGM_CFG1 & 0x7F) >> 2);
+
+	WREG32(pb_addr + word_offset, ~mask);
+
+	pb_addr = (mmNIC4_QM0_LOCAL_RANGE_BASE & ~0xFFF) + PROT_BITS_OFFS;
+	word_offset = ((mmNIC4_QM0_LOCAL_RANGE_BASE & PROT_BITS_OFFS)
+			>> 7) << 2;
+	mask = 1U << ((mmNIC4_QM0_LOCAL_RANGE_BASE & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_LOCAL_RANGE_SIZE & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_CSMR_STRICT_PRIO_CFG & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_HBW_RD_RATE_LIM_CFG_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_LBW_WR_RATE_LIM_CFG_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_LBW_WR_RATE_LIM_CFG_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_HBW_RD_RATE_LIM_CFG_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_GLBL_AXCACHE & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_IND_GW_APB_CFG & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_IND_GW_APB_WDATA & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_IND_GW_APB_RDATA & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_IND_GW_APB_STATUS & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_GLBL_ERR_ADDR_LO & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_GLBL_ERR_ADDR_HI & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM0_GLBL_ERR_WDATA & 0x7F) >> 2);
+
+	WREG32(pb_addr + word_offset, ~mask);
+
+	pb_addr = (mmNIC4_QM0_GLBL_MEM_INIT_BUSY & ~0xFFF) + PROT_BITS_OFFS;
+	word_offset = ((mmNIC4_QM0_GLBL_MEM_INIT_BUSY & PROT_BITS_OFFS)
+			>> 7) << 2;
+	mask = 1U << ((mmNIC4_QM0_GLBL_MEM_INIT_BUSY & 0x7F) >> 2);
+
+	WREG32(pb_addr + word_offset, ~mask);
+
+	pb_addr = (mmNIC4_QM1_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS;
+	word_offset = ((mmNIC4_QM1_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2;
+	mask = 1U << ((mmNIC4_QM1_GLBL_CFG0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_GLBL_CFG1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_GLBL_PROT & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_GLBL_ERR_CFG & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_GLBL_SECURE_PROPS_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_GLBL_SECURE_PROPS_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_GLBL_SECURE_PROPS_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_GLBL_SECURE_PROPS_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_GLBL_SECURE_PROPS_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_GLBL_NON_SECURE_PROPS_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_GLBL_NON_SECURE_PROPS_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_GLBL_NON_SECURE_PROPS_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_GLBL_NON_SECURE_PROPS_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_GLBL_NON_SECURE_PROPS_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_GLBL_STS0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_GLBL_STS1_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_GLBL_STS1_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_GLBL_STS1_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_GLBL_STS1_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_GLBL_STS1_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_GLBL_MSG_EN_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_GLBL_MSG_EN_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_GLBL_MSG_EN_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_GLBL_MSG_EN_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_GLBL_MSG_EN_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_PQ_BASE_LO_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_PQ_BASE_LO_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_PQ_BASE_LO_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_PQ_BASE_LO_3 & 0x7F) >> 2);
+
+	WREG32(pb_addr + word_offset, ~mask);
+
+	pb_addr = (mmNIC4_QM1_PQ_BASE_HI_0 & ~0xFFF) + PROT_BITS_OFFS;
+	word_offset = ((mmNIC4_QM1_PQ_BASE_HI_0 & PROT_BITS_OFFS) >> 7) << 2;
+	mask = 1U << ((mmNIC4_QM1_PQ_BASE_HI_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_PQ_BASE_HI_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_PQ_BASE_HI_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_PQ_BASE_HI_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_PQ_SIZE_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_PQ_SIZE_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_PQ_SIZE_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_PQ_SIZE_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_PQ_PI_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_PQ_PI_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_PQ_PI_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_PQ_PI_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_PQ_CI_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_PQ_CI_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_PQ_CI_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_PQ_CI_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_PQ_CFG0_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_PQ_CFG0_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_PQ_CFG0_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_PQ_CFG0_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_PQ_CFG1_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_PQ_CFG1_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_PQ_CFG1_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_PQ_CFG1_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_PQ_ARUSER_31_11_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_PQ_ARUSER_31_11_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_PQ_ARUSER_31_11_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_PQ_ARUSER_31_11_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_PQ_STS0_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_PQ_STS0_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_PQ_STS0_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_PQ_STS0_3 & 0x7F) >> 2);
+
+	WREG32(pb_addr + word_offset, ~mask);
+
+	pb_addr = (mmNIC4_QM1_PQ_STS1_0 & ~0xFFF) + PROT_BITS_OFFS;
+	word_offset = ((mmNIC4_QM1_PQ_STS1_0 & PROT_BITS_OFFS) >> 7) << 2;
+	mask = 1U << ((mmNIC4_QM1_PQ_STS1_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_PQ_STS1_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_PQ_STS1_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_PQ_STS1_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_CQ_STS0_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_CQ_STS0_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_CQ_STS0_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_CQ_STS0_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_CQ_STS1_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_CQ_STS1_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_CQ_STS1_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_CQ_STS1_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_CQ_PTR_LO_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_CQ_PTR_HI_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_CQ_TSIZE_0 & 0x7F) >> 2);
+
+	WREG32(pb_addr + word_offset, ~mask);
+
+	pb_addr = (mmNIC4_QM1_CQ_CTL_0 & ~0xFFF) + PROT_BITS_OFFS;
+	word_offset = ((mmNIC4_QM1_CQ_CTL_0 & PROT_BITS_OFFS) >> 7) << 2;
+	mask = 1U << ((mmNIC4_QM1_CQ_CTL_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_CQ_PTR_LO_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_CQ_PTR_HI_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_CQ_TSIZE_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_CQ_CTL_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_CQ_PTR_LO_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_CQ_PTR_HI_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_CQ_TSIZE_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_CQ_CTL_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_CQ_PTR_LO_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_CQ_PTR_HI_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_CQ_TSIZE_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_CQ_CTL_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_CQ_PTR_LO_STS_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_CQ_PTR_LO_STS_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_CQ_PTR_LO_STS_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_CQ_PTR_LO_STS_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_CQ_PTR_LO_STS_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_CQ_PTR_HI_STS_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_CQ_PTR_HI_STS_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_CQ_PTR_HI_STS_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_CQ_PTR_HI_STS_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_CQ_PTR_HI_STS_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_CQ_TSIZE_STS_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_CQ_TSIZE_STS_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_CQ_TSIZE_STS_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_CQ_TSIZE_STS_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_CQ_TSIZE_STS_4 & 0x7F) >> 2);
+
+	WREG32(pb_addr + word_offset, ~mask);
+
+	pb_addr = (mmNIC4_QM1_CQ_CTL_STS_0 & ~0xFFF) + PROT_BITS_OFFS;
+	word_offset = ((mmNIC4_QM1_CQ_CTL_STS_0 & PROT_BITS_OFFS) >> 7) << 2;
+	mask = 1U << ((mmNIC4_QM1_CQ_CTL_STS_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_CQ_CTL_STS_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_CQ_CTL_STS_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_CQ_CTL_STS_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_CQ_CTL_STS_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_CQ_IFIFO_CNT_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_CQ_IFIFO_CNT_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_CQ_IFIFO_CNT_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_CQ_IFIFO_CNT_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_CQ_IFIFO_CNT_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_CP_MSG_BASE0_ADDR_LO_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_CP_MSG_BASE0_ADDR_LO_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_CP_MSG_BASE0_ADDR_LO_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_CP_MSG_BASE0_ADDR_LO_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_CP_MSG_BASE0_ADDR_LO_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_CP_MSG_BASE0_ADDR_HI_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_CP_MSG_BASE0_ADDR_HI_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_CP_MSG_BASE0_ADDR_HI_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_CP_MSG_BASE0_ADDR_HI_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_CP_MSG_BASE0_ADDR_HI_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_CP_MSG_BASE1_ADDR_LO_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_CP_MSG_BASE1_ADDR_LO_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_CP_MSG_BASE1_ADDR_LO_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_CP_MSG_BASE1_ADDR_LO_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_CP_MSG_BASE1_ADDR_LO_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_CP_MSG_BASE1_ADDR_HI_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_CP_MSG_BASE1_ADDR_HI_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_CP_MSG_BASE1_ADDR_HI_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_CP_MSG_BASE1_ADDR_HI_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_CP_MSG_BASE1_ADDR_HI_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_CP_MSG_BASE2_ADDR_LO_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_CP_MSG_BASE2_ADDR_LO_1 & 0x7F) >> 2);
+
+	WREG32(pb_addr + word_offset, ~mask);
+
+	pb_addr = (mmNIC4_QM1_CP_MSG_BASE2_ADDR_LO_2 & ~0xFFF) + PROT_BITS_OFFS;
+	word_offset = ((mmNIC4_QM1_CP_MSG_BASE2_ADDR_LO_2 &
+			PROT_BITS_OFFS) >> 7) << 2;
+	mask = 1U << ((mmNIC4_QM1_CP_MSG_BASE2_ADDR_LO_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_CP_MSG_BASE2_ADDR_LO_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_CP_MSG_BASE2_ADDR_LO_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_CP_MSG_BASE2_ADDR_HI_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_CP_MSG_BASE2_ADDR_HI_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_CP_MSG_BASE2_ADDR_HI_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_CP_MSG_BASE2_ADDR_HI_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_CP_MSG_BASE2_ADDR_HI_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_CP_MSG_BASE3_ADDR_LO_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_CP_MSG_BASE3_ADDR_LO_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_CP_MSG_BASE3_ADDR_LO_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_CP_MSG_BASE3_ADDR_LO_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_CP_MSG_BASE3_ADDR_LO_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_CP_MSG_BASE3_ADDR_HI_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_CP_MSG_BASE3_ADDR_HI_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_CP_MSG_BASE3_ADDR_HI_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_CP_MSG_BASE3_ADDR_HI_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_CP_MSG_BASE3_ADDR_HI_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_CP_LDMA_TSIZE_OFFSET_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_CP_LDMA_TSIZE_OFFSET_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_CP_LDMA_TSIZE_OFFSET_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_CP_LDMA_TSIZE_OFFSET_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_CP_LDMA_TSIZE_OFFSET_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_CP_LDMA_SRC_BASE_LO_OFFSET_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_CP_LDMA_SRC_BASE_LO_OFFSET_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_CP_LDMA_SRC_BASE_LO_OFFSET_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_CP_LDMA_SRC_BASE_LO_OFFSET_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_CP_LDMA_SRC_BASE_LO_OFFSET_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_CP_LDMA_DST_BASE_LO_OFFSET_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_CP_LDMA_DST_BASE_LO_OFFSET_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_CP_LDMA_DST_BASE_LO_OFFSET_2 & 0x7F) >> 2);
+
+	WREG32(pb_addr + word_offset, ~mask);
+
+	pb_addr = (mmNIC4_QM1_CP_LDMA_DST_BASE_LO_OFFSET_3 & ~0xFFF) +
+			PROT_BITS_OFFS;
+	word_offset = ((mmNIC4_QM1_CP_LDMA_DST_BASE_LO_OFFSET_3 &
+			PROT_BITS_OFFS) >> 7) << 2;
+	mask = 1U << ((mmNIC4_QM1_CP_LDMA_DST_BASE_LO_OFFSET_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_CP_LDMA_DST_BASE_LO_OFFSET_4 & 0x7F) >> 2);
+
+	WREG32(pb_addr + word_offset, ~mask);
+
+	pb_addr = (mmNIC4_QM1_CP_STS_0 & ~0xFFF) + PROT_BITS_OFFS;
+	word_offset = ((mmNIC4_QM1_CP_STS_0 & PROT_BITS_OFFS) >> 7) << 2;
+	mask = 1U << ((mmNIC4_QM1_CP_STS_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_CP_STS_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_CP_STS_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_CP_STS_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_CP_STS_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_CP_CURRENT_INST_LO_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_CP_CURRENT_INST_LO_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_CP_CURRENT_INST_LO_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_CP_CURRENT_INST_LO_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_CP_CURRENT_INST_LO_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_CP_CURRENT_INST_HI_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_CP_CURRENT_INST_HI_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_CP_CURRENT_INST_HI_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_CP_CURRENT_INST_HI_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_CP_CURRENT_INST_HI_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_CP_BARRIER_CFG_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_CP_BARRIER_CFG_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_CP_BARRIER_CFG_2 & 0x7F) >> 2);
+
+	WREG32(pb_addr + word_offset, ~mask);
+
+	pb_addr = (mmNIC4_QM1_CP_BARRIER_CFG_3 & ~0xFFF) + PROT_BITS_OFFS;
+	word_offset = ((mmNIC4_QM1_CP_BARRIER_CFG_3 & PROT_BITS_OFFS)
+			>> 7) << 2;
+	mask = 1U << ((mmNIC4_QM1_CP_BARRIER_CFG_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_CP_BARRIER_CFG_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_CP_DBG_0_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_CP_DBG_0_1 & 0x7F) >> 2);
+
+	WREG32(pb_addr + word_offset, ~mask);
+
+	pb_addr = (mmNIC4_QM1_CP_DBG_0_2 & ~0xFFF) + PROT_BITS_OFFS;
+	word_offset = ((mmNIC4_QM1_CP_DBG_0_2 & PROT_BITS_OFFS) >> 7) << 2;
+	mask = 1U << ((mmNIC4_QM1_CP_DBG_0_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_CP_DBG_0_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_CP_DBG_0_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_CP_ARUSER_31_11_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_CP_ARUSER_31_11_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_CP_ARUSER_31_11_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_CP_ARUSER_31_11_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_CP_ARUSER_31_11_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_CP_AWUSER_31_11_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_CP_AWUSER_31_11_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_CP_AWUSER_31_11_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_CP_AWUSER_31_11_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_CP_AWUSER_31_11_4 & 0x7F) >> 2);
+
+	WREG32(pb_addr + word_offset, ~mask);
+
+	pb_addr = (mmNIC4_QM1_ARB_CFG_0 & ~0xFFF) + PROT_BITS_OFFS;
+	word_offset = ((mmNIC4_QM1_ARB_CFG_0 & PROT_BITS_OFFS) >> 7) << 2;
+	mask = 1U << ((mmNIC4_QM1_ARB_CFG_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_ARB_MST_AVAIL_CRED_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_ARB_MST_AVAIL_CRED_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_ARB_MST_AVAIL_CRED_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_ARB_MST_AVAIL_CRED_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_ARB_MST_AVAIL_CRED_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_ARB_MST_AVAIL_CRED_5 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_ARB_MST_AVAIL_CRED_6 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_ARB_MST_AVAIL_CRED_7 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_ARB_MST_AVAIL_CRED_8 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_ARB_MST_AVAIL_CRED_9 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_ARB_MST_AVAIL_CRED_10 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_ARB_MST_AVAIL_CRED_11 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_ARB_MST_AVAIL_CRED_12 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_ARB_MST_AVAIL_CRED_13 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_ARB_MST_AVAIL_CRED_14 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_ARB_MST_AVAIL_CRED_15 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_ARB_MST_AVAIL_CRED_16 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_ARB_MST_AVAIL_CRED_17 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_ARB_MST_AVAIL_CRED_18 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_ARB_MST_AVAIL_CRED_19 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_ARB_MST_AVAIL_CRED_20 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_ARB_MST_AVAIL_CRED_21 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_ARB_MST_AVAIL_CRED_22 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_ARB_MST_AVAIL_CRED_23 & 0x7F) >> 2);
+
+	WREG32(pb_addr + word_offset, ~mask);
+
+	pb_addr = (mmNIC4_QM1_ARB_MST_AVAIL_CRED_24 & ~0xFFF) + PROT_BITS_OFFS;
+	word_offset = ((mmNIC4_QM1_ARB_MST_AVAIL_CRED_24 &
+			PROT_BITS_OFFS) >> 7) << 2;
+	mask = 1U << ((mmNIC4_QM1_ARB_MST_AVAIL_CRED_24 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_ARB_MST_AVAIL_CRED_25 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_ARB_MST_AVAIL_CRED_26 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_ARB_MST_AVAIL_CRED_27 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_ARB_MST_AVAIL_CRED_28 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_ARB_MST_AVAIL_CRED_29 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_ARB_MST_AVAIL_CRED_30 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_ARB_MST_AVAIL_CRED_31 & 0x7F) >> 2);
+
+	WREG32(pb_addr + word_offset, ~mask);
+
+	pb_addr = (mmNIC4_QM1_ARB_MST_CHOISE_PUSH_OFST_23 & ~0xFFF) +
+			PROT_BITS_OFFS;
+	word_offset = ((mmNIC4_QM1_ARB_MST_CHOISE_PUSH_OFST_23 &
+			PROT_BITS_OFFS) >> 7) << 2;
+	mask = 1U << ((mmNIC4_QM1_ARB_SLV_CHOISE_WDT & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_ARB_MSG_MAX_INFLIGHT & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_ARB_MSG_AWUSER_31_11 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_ARB_MSG_AWUSER_SEC_PROP & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_ARB_MSG_AWUSER_NON_SEC_PROP & 0x7F) >> 2);
+
+	WREG32(pb_addr + word_offset, ~mask);
+
+	pb_addr = (mmNIC4_QM1_ARB_STATE_STS & ~0xFFF) + PROT_BITS_OFFS;
+	word_offset = ((mmNIC4_QM1_ARB_STATE_STS & PROT_BITS_OFFS) >> 7) << 2;
+	mask = 1U << ((mmNIC4_QM1_ARB_STATE_STS & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_ARB_CHOISE_FULLNESS_STS & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_ARB_MSG_STS & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_ARB_SLV_CHOISE_Q_HEAD & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_ARB_ERR_CAUSE & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_ARB_ERR_MSG_EN & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_ARB_ERR_STS_DRP & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_ARB_MST_CRED_STS_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_ARB_MST_CRED_STS_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_ARB_MST_CRED_STS_2 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_ARB_MST_CRED_STS_3 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_ARB_MST_CRED_STS_4 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_ARB_MST_CRED_STS_5 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_ARB_MST_CRED_STS_6 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_ARB_MST_CRED_STS_7 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_ARB_MST_CRED_STS_8 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_ARB_MST_CRED_STS_9 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_ARB_MST_CRED_STS_10 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_ARB_MST_CRED_STS_11 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_ARB_MST_CRED_STS_12 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_ARB_MST_CRED_STS_13 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_ARB_MST_CRED_STS_14 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_ARB_MST_CRED_STS_15 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_ARB_MST_CRED_STS_16 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_ARB_MST_CRED_STS_17 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_ARB_MST_CRED_STS_18 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_ARB_MST_CRED_STS_19 & 0x7F) >> 2);
+
+	WREG32(pb_addr + word_offset, ~mask);
+
+	pb_addr = (mmNIC4_QM1_ARB_MST_CRED_STS_20 & ~0xFFF) + PROT_BITS_OFFS;
+	word_offset = ((mmNIC4_QM1_ARB_MST_CRED_STS_20 & PROT_BITS_OFFS)
+			>> 7) << 2;
+	mask = 1U << ((mmNIC4_QM1_ARB_MST_CRED_STS_20 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_ARB_MST_CRED_STS_21 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_ARB_MST_CRED_STS_22 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_ARB_MST_CRED_STS_23 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_ARB_MST_CRED_STS_24 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_ARB_MST_CRED_STS_25 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_ARB_MST_CRED_STS_26 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_ARB_MST_CRED_STS_27 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_ARB_MST_CRED_STS_28 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_ARB_MST_CRED_STS_29 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_ARB_MST_CRED_STS_30 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_ARB_MST_CRED_STS_31 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_CGM_CFG & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_CGM_STS & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_CGM_CFG1 & 0x7F) >> 2);
+
+	WREG32(pb_addr + word_offset, ~mask);
+
+	pb_addr = (mmNIC4_QM1_LOCAL_RANGE_BASE & ~0xFFF) + PROT_BITS_OFFS;
+	word_offset = ((mmNIC4_QM1_LOCAL_RANGE_BASE & PROT_BITS_OFFS)
+			>> 7) << 2;
+	mask = 1U << ((mmNIC4_QM1_LOCAL_RANGE_BASE & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_LOCAL_RANGE_SIZE & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_CSMR_STRICT_PRIO_CFG & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_HBW_RD_RATE_LIM_CFG_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_LBW_WR_RATE_LIM_CFG_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_LBW_WR_RATE_LIM_CFG_1 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_HBW_RD_RATE_LIM_CFG_0 & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_GLBL_AXCACHE & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_IND_GW_APB_CFG & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_IND_GW_APB_WDATA & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_IND_GW_APB_RDATA & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_IND_GW_APB_STATUS & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_GLBL_ERR_ADDR_LO & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_GLBL_ERR_ADDR_HI & 0x7F) >> 2);
+	mask |= 1U << ((mmNIC4_QM1_GLBL_ERR_WDATA & 0x7F) >> 2);
+
+	WREG32(pb_addr + word_offset, ~mask);
+
+	pb_addr = (mmNIC4_QM1_GLBL_MEM_INIT_BUSY & ~0xFFF) + PROT_BITS_OFFS;
+	word_offset = ((mmNIC4_QM1_GLBL_MEM_INIT_BUSY & PROT_BITS_OFFS)
+			>> 7) << 2;
+	mask = 1U << ((mmNIC4_QM1_GLBL_MEM_INIT_BUSY & 0x7F) >> 2);
+
+	WREG32(pb_addr + word_offset, ~mask);
+}
+
 static void gaudi_init_tpc_protection_bits(struct hl_device *hdev)
 {
 	u32 pb_addr, mask;
 	u8 word_offset;
 
-	gaudi_pb_set_block(hdev, mmTPC0_E2E_CRED_BASE);
-	gaudi_pb_set_block(hdev, mmTPC1_E2E_CRED_BASE);
-	gaudi_pb_set_block(hdev, mmTPC2_E2E_CRED_BASE);
-	gaudi_pb_set_block(hdev, mmTPC3_E2E_CRED_BASE);
-	gaudi_pb_set_block(hdev, mmTPC4_E2E_CRED_BASE);
-	gaudi_pb_set_block(hdev, mmTPC5_E2E_CRED_BASE);
-	gaudi_pb_set_block(hdev, mmTPC6_E2E_CRED_BASE);
-	gaudi_pb_set_block(hdev, mmTPC7_E2E_CRED_BASE);
+	if (hdev->asic_prop.fw_security_disabled) {
+		gaudi_pb_set_block(hdev, mmTPC0_E2E_CRED_BASE);
+		gaudi_pb_set_block(hdev, mmTPC1_E2E_CRED_BASE);
+		gaudi_pb_set_block(hdev, mmTPC2_E2E_CRED_BASE);
+		gaudi_pb_set_block(hdev, mmTPC3_E2E_CRED_BASE);
+		gaudi_pb_set_block(hdev, mmTPC4_E2E_CRED_BASE);
+		gaudi_pb_set_block(hdev, mmTPC5_E2E_CRED_BASE);
+		gaudi_pb_set_block(hdev, mmTPC6_E2E_CRED_BASE);
+		gaudi_pb_set_block(hdev, mmTPC7_E2E_CRED_BASE);
+	}
 
 	WREG32(mmTPC0_QM_BASE - CFG_BASE + PROT_BITS_OFFS + 0x7C, 0);
 	WREG32(mmTPC0_CFG_BASE - CFG_BASE + PROT_BITS_OFFS + 0x7C, 0);
@@ -8851,16 +12826,20 @@ static void gaudi_init_protection_bits(struct hl_device *hdev)
 	 * secured
 	 */
 
-	gaudi_pb_set_block(hdev, mmIF_E_PLL_BASE);
-	gaudi_pb_set_block(hdev, mmMESH_W_PLL_BASE);
-	gaudi_pb_set_block(hdev, mmSRAM_W_PLL_BASE);
-	gaudi_pb_set_block(hdev, mmMESH_E_PLL_BASE);
-	gaudi_pb_set_block(hdev, mmSRAM_E_PLL_BASE);
+	if (hdev->asic_prop.fw_security_disabled) {
+		gaudi_pb_set_block(hdev, mmIF_E_PLL_BASE);
+		gaudi_pb_set_block(hdev, mmMESH_W_PLL_BASE);
+		gaudi_pb_set_block(hdev, mmSRAM_W_PLL_BASE);
+		gaudi_pb_set_block(hdev, mmMESH_E_PLL_BASE);
+		gaudi_pb_set_block(hdev, mmSRAM_E_PLL_BASE);
+	}
 
 	gaudi_init_dma_protection_bits(hdev);
 
 	gaudi_init_mme_protection_bits(hdev);
 
+	gaudi_init_nic_protection_bits(hdev);
+
 	gaudi_init_tpc_protection_bits(hdev);
 }
 
@@ -9052,17 +13031,20 @@ void gaudi_init_security(struct hl_device *hdev)
 	 * property configuration of MME SBAB and ACC to be non-privileged and
 	 * non-secured
 	 */
-	WREG32(mmMME0_SBAB_PROT, 0x2);
-	WREG32(mmMME0_ACC_PROT, 0x2);
-	WREG32(mmMME1_SBAB_PROT, 0x2);
-	WREG32(mmMME1_ACC_PROT, 0x2);
-	WREG32(mmMME2_SBAB_PROT, 0x2);
-	WREG32(mmMME2_ACC_PROT, 0x2);
-	WREG32(mmMME3_SBAB_PROT, 0x2);
-	WREG32(mmMME3_ACC_PROT, 0x2);
+	if (hdev->asic_prop.fw_security_disabled) {
+		WREG32(mmMME0_SBAB_PROT, 0x2);
+		WREG32(mmMME0_ACC_PROT, 0x2);
+		WREG32(mmMME1_SBAB_PROT, 0x2);
+		WREG32(mmMME1_ACC_PROT, 0x2);
+		WREG32(mmMME2_SBAB_PROT, 0x2);
+		WREG32(mmMME2_ACC_PROT, 0x2);
+		WREG32(mmMME3_SBAB_PROT, 0x2);
+		WREG32(mmMME3_ACC_PROT, 0x2);
+	}
 
 	/* On RAZWI, 0 will be returned from RR and 0xBABA0BAD from PB */
-	WREG32(0xC01B28, 0x1);
+	if (hdev->asic_prop.fw_security_disabled)
+		WREG32(0xC01B28, 0x1);
 
 	gaudi_init_range_registers_lbw(hdev);
 
diff --git a/drivers/misc/habanalabs/goya/goya.c b/drivers/misc/habanalabs/goya/goya.c
index 235d47b2420f5beb7f567d040abd1aad375f33bc..3e5eb9e3d7bd84d4434a744903c231eb6ee538ce 100644
--- a/drivers/misc/habanalabs/goya/goya.c
+++ b/drivers/misc/habanalabs/goya/goya.c
@@ -12,9 +12,7 @@
 #include "../include/goya/goya_reg_map.h"
 
 #include <linux/pci.h>
-#include <linux/genalloc.h>
 #include <linux/hwmon.h>
-#include <linux/io-64-nonatomic-lo-hi.h>
 #include <linux/iommu.h>
 #include <linux/seq_file.h>
 
@@ -373,20 +371,20 @@ int goya_get_fixed_properties(struct hl_device *hdev)
 	for (i = 0 ; i < NUMBER_OF_EXT_HW_QUEUES ; i++) {
 		prop->hw_queues_props[i].type = QUEUE_TYPE_EXT;
 		prop->hw_queues_props[i].driver_only = 0;
-		prop->hw_queues_props[i].requires_kernel_cb = 1;
+		prop->hw_queues_props[i].cb_alloc_flags = CB_ALLOC_KERNEL;
 	}
 
 	for (; i < NUMBER_OF_EXT_HW_QUEUES + NUMBER_OF_CPU_HW_QUEUES ; i++) {
 		prop->hw_queues_props[i].type = QUEUE_TYPE_CPU;
 		prop->hw_queues_props[i].driver_only = 1;
-		prop->hw_queues_props[i].requires_kernel_cb = 0;
+		prop->hw_queues_props[i].cb_alloc_flags = CB_ALLOC_KERNEL;
 	}
 
 	for (; i < NUMBER_OF_EXT_HW_QUEUES + NUMBER_OF_CPU_HW_QUEUES +
 			NUMBER_OF_INT_HW_QUEUES; i++) {
 		prop->hw_queues_props[i].type = QUEUE_TYPE_INT;
 		prop->hw_queues_props[i].driver_only = 0;
-		prop->hw_queues_props[i].requires_kernel_cb = 0;
+		prop->hw_queues_props[i].cb_alloc_flags = CB_ALLOC_USER;
 	}
 
 	prop->completion_queues_count = NUMBER_OF_CMPLT_QUEUES;
@@ -412,6 +410,7 @@ int goya_get_fixed_properties(struct hl_device *hdev)
 	prop->mmu_hop_table_size = HOP_TABLE_SIZE;
 	prop->mmu_hop0_tables_total_size = HOP0_TABLES_TOTAL_SIZE;
 	prop->dram_page_size = PAGE_SIZE_2MB;
+	prop->dram_supports_virtual_memory = true;
 
 	prop->dmmu.hop0_shift = HOP0_SHIFT;
 	prop->dmmu.hop1_shift = HOP1_SHIFT;
@@ -456,6 +455,11 @@ int goya_get_fixed_properties(struct hl_device *hdev)
 
 	prop->max_pending_cs = GOYA_MAX_PENDING_CS;
 
+	/* disable fw security for now, set it in a later stage */
+	prop->fw_security_disabled = true;
+	prop->fw_security_status_valid = false;
+	prop->hard_reset_done_by_fw = false;
+
 	return 0;
 }
 
@@ -551,6 +555,11 @@ static int goya_init_iatu(struct hl_device *hdev)
 	return rc;
 }
 
+static enum hl_device_hw_state goya_get_hw_state(struct hl_device *hdev)
+{
+	return RREG32(mmHW_STATE);
+}
+
 /*
  * goya_early_init - GOYA early initialization code
  *
@@ -600,14 +609,27 @@ static int goya_early_init(struct hl_device *hdev)
 
 	prop->dram_pci_bar_size = pci_resource_len(pdev, DDR_BAR_ID);
 
-	rc = hl_pci_init(hdev, mmPSOC_GLOBAL_CONF_CPU_BOOT_STATUS,
-			mmCPU_BOOT_ERR0, GOYA_BOOT_FIT_REQ_TIMEOUT_USEC);
+	rc = hl_pci_init(hdev);
 	if (rc)
 		goto free_queue_props;
 
-	/* Goya Firmware does not support security */
-	prop->fw_security_disabled = true;
-	dev_info(hdev->dev, "firmware-level security is disabled\n");
+	if (goya_get_hw_state(hdev) == HL_DEVICE_HW_STATE_DIRTY) {
+		dev_info(hdev->dev,
+			"H/W state is dirty, must reset before initializing\n");
+		hdev->asic_funcs->hw_fini(hdev, true);
+	}
+
+	/* Before continuing in the initialization, we need to read the preboot
+	 * version to determine whether we run with a security-enabled firmware
+	 */
+	rc = hl_fw_read_preboot_status(hdev, mmPSOC_GLOBAL_CONF_CPU_BOOT_STATUS,
+			mmCPU_BOOT_DEV_STS0, mmCPU_BOOT_ERR0,
+			GOYA_BOOT_FIT_REQ_TIMEOUT_USEC);
+	if (rc) {
+		if (hdev->reset_on_preboot_fail)
+			hdev->asic_funcs->hw_fini(hdev, true);
+		goto pci_fini;
+	}
 
 	if (!hdev->pldm) {
 		val = RREG32(mmPSOC_GLOBAL_CONF_BOOT_STRAP_PINS);
@@ -618,6 +640,8 @@ static int goya_early_init(struct hl_device *hdev)
 
 	return 0;
 
+pci_fini:
+	hl_pci_fini(hdev);
 free_queue_props:
 	kfree(hdev->asic_prop.hw_queues_props);
 	return rc;
@@ -2315,7 +2339,7 @@ static int goya_load_firmware_to_device(struct hl_device *hdev)
 
 	dst = hdev->pcie_bar[DDR_BAR_ID] + LINUX_FW_OFFSET;
 
-	return hl_fw_load_fw_to_device(hdev, GOYA_LINUX_FW_FILE, dst);
+	return hl_fw_load_fw_to_device(hdev, GOYA_LINUX_FW_FILE, dst, 0, 0);
 }
 
 /*
@@ -2332,14 +2356,14 @@ static int goya_load_boot_fit_to_device(struct hl_device *hdev)
 
 	dst = hdev->pcie_bar[SRAM_CFG_BAR_ID] + BOOT_FIT_SRAM_OFFSET;
 
-	return hl_fw_load_fw_to_device(hdev, GOYA_BOOT_FIT_FILE, dst);
+	return hl_fw_load_fw_to_device(hdev, GOYA_BOOT_FIT_FILE, dst, 0, 0);
 }
 
 /*
  * FW component passes an offset from SRAM_BASE_ADDR in SCRATCHPAD_xx.
  * The version string should be located by that offset.
  */
-static void goya_read_device_fw_version(struct hl_device *hdev,
+static int goya_read_device_fw_version(struct hl_device *hdev,
 					enum hl_fw_component fwc)
 {
 	const char *name;
@@ -2359,7 +2383,7 @@ static void goya_read_device_fw_version(struct hl_device *hdev,
 		break;
 	default:
 		dev_warn(hdev->dev, "Undefined FW component: %d\n", fwc);
-		return;
+		return -EIO;
 	}
 
 	ver_off &= ~((u32)SRAM_BASE_ADDR);
@@ -2371,7 +2395,11 @@ static void goya_read_device_fw_version(struct hl_device *hdev,
 		dev_err(hdev->dev, "%s version offset (0x%x) is above SRAM\n",
 								name, ver_off);
 		strcpy(dest, "unavailable");
+
+		return -EIO;
 	}
+
+	return 0;
 }
 
 static int goya_init_cpu(struct hl_device *hdev)
@@ -2397,7 +2425,8 @@ static int goya_init_cpu(struct hl_device *hdev)
 
 	rc = hl_fw_init_cpu(hdev, mmPSOC_GLOBAL_CONF_CPU_BOOT_STATUS,
 			mmPSOC_GLOBAL_CONF_UBOOT_MAGIC,
-			mmCPU_CMD_STATUS_TO_HOST, mmCPU_BOOT_ERR0,
+			mmCPU_CMD_STATUS_TO_HOST,
+			mmCPU_BOOT_DEV_STS0, mmCPU_BOOT_ERR0,
 			false, GOYA_CPU_TIMEOUT_USEC,
 			GOYA_BOOT_FIT_REQ_TIMEOUT_USEC);
 
@@ -2454,7 +2483,6 @@ int goya_mmu_init(struct hl_device *hdev)
 	if (goya->hw_cap_initialized & HW_CAP_MMU)
 		return 0;
 
-	hdev->dram_supports_virtual_memory = true;
 	hdev->dram_default_page_mapping = true;
 
 	for (i = 0 ; i < prop->max_asid ; i++) {
@@ -2505,8 +2533,6 @@ static int goya_hw_init(struct hl_device *hdev)
 	struct asic_fixed_properties *prop = &hdev->asic_prop;
 	int rc;
 
-	dev_info(hdev->dev, "Starting initialization of H/W\n");
-
 	/* Perform read from the device to make sure device is up */
 	RREG32(mmPCIE_DBI_DEVICE_ID_VENDOR_ID_REG);
 
@@ -2628,7 +2654,7 @@ static void goya_hw_fini(struct hl_device *hdev, bool hard_reset)
 			"Timeout while waiting for device to reset 0x%x\n",
 			status);
 
-	if (!hard_reset) {
+	if (!hard_reset && goya) {
 		goya->hw_cap_initialized &= ~(HW_CAP_DMA | HW_CAP_MME |
 						HW_CAP_GOLDEN | HW_CAP_TPC);
 		WREG32(mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR,
@@ -2643,12 +2669,15 @@ static void goya_hw_fini(struct hl_device *hdev, bool hard_reset)
 	WREG32(mmPSOC_GLOBAL_CONF_SW_BTM_FSM,
 			0xA << PSOC_GLOBAL_CONF_SW_BTM_FSM_CTRL_SHIFT);
 
-	goya->hw_cap_initialized &= ~(HW_CAP_CPU | HW_CAP_CPU_Q |
-					HW_CAP_DDR_0 | HW_CAP_DDR_1 |
-					HW_CAP_DMA | HW_CAP_MME |
-					HW_CAP_MMU | HW_CAP_TPC_MBIST |
-					HW_CAP_GOLDEN | HW_CAP_TPC);
-	memset(goya->events_stat, 0, sizeof(goya->events_stat));
+	if (goya) {
+		goya->hw_cap_initialized &= ~(HW_CAP_CPU | HW_CAP_CPU_Q |
+				HW_CAP_DDR_0 | HW_CAP_DDR_1 |
+				HW_CAP_DMA | HW_CAP_MME |
+				HW_CAP_MMU | HW_CAP_TPC_MBIST |
+				HW_CAP_GOLDEN | HW_CAP_TPC);
+
+		memset(goya->events_stat, 0, sizeof(goya->events_stat));
+	}
 }
 
 int goya_suspend(struct hl_device *hdev)
@@ -2792,6 +2821,11 @@ static void goya_dma_free_coherent(struct hl_device *hdev, size_t size,
 	dma_free_coherent(&hdev->pdev->dev, size, cpu_addr, fixed_dma_handle);
 }
 
+int goya_scrub_device_mem(struct hl_device *hdev, u64 addr, u64 size)
+{
+	return 0;
+}
+
 void *goya_get_int_queue_base(struct hl_device *hdev, u32 queue_id,
 				dma_addr_t *dma_handle,	u16 *queue_len)
 {
@@ -2920,7 +2954,7 @@ static int goya_send_job_on_qman0(struct hl_device *hdev, struct hl_cs_job *job)
 }
 
 int goya_send_cpu_message(struct hl_device *hdev, u32 *msg, u16 len,
-				u32 timeout, long *result)
+				u32 timeout, u64 *result)
 {
 	struct goya_device *goya = hdev->asic_specific;
 
@@ -4506,7 +4540,7 @@ static int goya_unmask_irq_arr(struct hl_device *hdev, u32 *irq_arr,
 {
 	struct cpucp_unmask_irq_arr_packet *pkt;
 	size_t total_pkt_size;
-	long result;
+	u64 result;
 	int rc;
 	int irq_num_entries, irq_arr_index;
 	__le32 *goya_irq_arr;
@@ -4565,7 +4599,7 @@ static int goya_soft_reset_late_init(struct hl_device *hdev)
 static int goya_unmask_irq(struct hl_device *hdev, u16 event_type)
 {
 	struct cpucp_packet pkt;
-	long result;
+	u64 result;
 	int rc;
 
 	memset(&pkt, 0, sizeof(pkt));
@@ -4777,7 +4811,7 @@ static int goya_memset_device_memory(struct hl_device *hdev, u64 addr, u64 size,
 
 	job->id = 0;
 	job->user_cb = cb;
-	job->user_cb->cs_cnt++;
+	atomic_inc(&job->user_cb->cs_cnt);
 	job->user_cb_size = cb_size;
 	job->hw_queue_id = GOYA_QUEUE_ID_DMA_0;
 	job->patched_cb = job->user_cb;
@@ -4789,7 +4823,7 @@ static int goya_memset_device_memory(struct hl_device *hdev, u64 addr, u64 size,
 
 	hl_debugfs_remove_job(hdev, job);
 	kfree(job);
-	cb->cs_cnt--;
+	atomic_dec(&cb->cs_cnt);
 
 release_cb:
 	hl_cb_put(cb);
@@ -4872,9 +4906,10 @@ static int goya_mmu_add_mappings_for_device_cpu(struct hl_device *hdev)
 		return 0;
 
 	for (off = 0 ; off < CPU_FW_IMAGE_SIZE ; off += PAGE_SIZE_2MB) {
-		rc = hl_mmu_map(hdev->kernel_ctx, prop->dram_base_address + off,
-				prop->dram_base_address + off, PAGE_SIZE_2MB,
-				(off + PAGE_SIZE_2MB) == CPU_FW_IMAGE_SIZE);
+		rc = hl_mmu_map_page(hdev->kernel_ctx,
+			prop->dram_base_address + off,
+			prop->dram_base_address + off, PAGE_SIZE_2MB,
+			(off + PAGE_SIZE_2MB) == CPU_FW_IMAGE_SIZE);
 		if (rc) {
 			dev_err(hdev->dev, "Map failed for address 0x%llx\n",
 				prop->dram_base_address + off);
@@ -4883,8 +4918,10 @@ static int goya_mmu_add_mappings_for_device_cpu(struct hl_device *hdev)
 	}
 
 	if (!(hdev->cpu_accessible_dma_address & (PAGE_SIZE_2MB - 1))) {
-		rc = hl_mmu_map(hdev->kernel_ctx, VA_CPU_ACCESSIBLE_MEM_ADDR,
-			hdev->cpu_accessible_dma_address, PAGE_SIZE_2MB, true);
+		rc = hl_mmu_map_page(hdev->kernel_ctx,
+			VA_CPU_ACCESSIBLE_MEM_ADDR,
+			hdev->cpu_accessible_dma_address,
+			PAGE_SIZE_2MB, true);
 
 		if (rc) {
 			dev_err(hdev->dev,
@@ -4894,7 +4931,7 @@ static int goya_mmu_add_mappings_for_device_cpu(struct hl_device *hdev)
 		}
 	} else {
 		for (cpu_off = 0 ; cpu_off < SZ_2M ; cpu_off += PAGE_SIZE_4KB) {
-			rc = hl_mmu_map(hdev->kernel_ctx,
+			rc = hl_mmu_map_page(hdev->kernel_ctx,
 				VA_CPU_ACCESSIBLE_MEM_ADDR + cpu_off,
 				hdev->cpu_accessible_dma_address + cpu_off,
 				PAGE_SIZE_4KB, true);
@@ -4921,7 +4958,7 @@ static int goya_mmu_add_mappings_for_device_cpu(struct hl_device *hdev)
 
 unmap_cpu:
 	for (; cpu_off >= 0 ; cpu_off -= PAGE_SIZE_4KB)
-		if (hl_mmu_unmap(hdev->kernel_ctx,
+		if (hl_mmu_unmap_page(hdev->kernel_ctx,
 				VA_CPU_ACCESSIBLE_MEM_ADDR + cpu_off,
 				PAGE_SIZE_4KB, true))
 			dev_warn_ratelimited(hdev->dev,
@@ -4929,7 +4966,7 @@ static int goya_mmu_add_mappings_for_device_cpu(struct hl_device *hdev)
 				VA_CPU_ACCESSIBLE_MEM_ADDR + cpu_off);
 unmap:
 	for (; off >= 0 ; off -= PAGE_SIZE_2MB)
-		if (hl_mmu_unmap(hdev->kernel_ctx,
+		if (hl_mmu_unmap_page(hdev->kernel_ctx,
 				prop->dram_base_address + off, PAGE_SIZE_2MB,
 				true))
 			dev_warn_ratelimited(hdev->dev,
@@ -4955,13 +4992,14 @@ void goya_mmu_remove_device_cpu_mappings(struct hl_device *hdev)
 	WREG32(mmCPU_IF_AWUSER_OVR_EN, 0);
 
 	if (!(hdev->cpu_accessible_dma_address & (PAGE_SIZE_2MB - 1))) {
-		if (hl_mmu_unmap(hdev->kernel_ctx, VA_CPU_ACCESSIBLE_MEM_ADDR,
+		if (hl_mmu_unmap_page(hdev->kernel_ctx,
+				VA_CPU_ACCESSIBLE_MEM_ADDR,
 				PAGE_SIZE_2MB, true))
 			dev_warn(hdev->dev,
 				"Failed to unmap CPU accessible memory\n");
 	} else {
 		for (cpu_off = 0 ; cpu_off < SZ_2M ; cpu_off += PAGE_SIZE_4KB)
-			if (hl_mmu_unmap(hdev->kernel_ctx,
+			if (hl_mmu_unmap_page(hdev->kernel_ctx,
 					VA_CPU_ACCESSIBLE_MEM_ADDR + cpu_off,
 					PAGE_SIZE_4KB,
 					(cpu_off + PAGE_SIZE_4KB) >= SZ_2M))
@@ -4971,7 +5009,7 @@ void goya_mmu_remove_device_cpu_mappings(struct hl_device *hdev)
 	}
 
 	for (off = 0 ; off < CPU_FW_IMAGE_SIZE ; off += PAGE_SIZE_2MB)
-		if (hl_mmu_unmap(hdev->kernel_ctx,
+		if (hl_mmu_unmap_page(hdev->kernel_ctx,
 				prop->dram_base_address + off, PAGE_SIZE_2MB,
 				(off + PAGE_SIZE_2MB) >= CPU_FW_IMAGE_SIZE))
 			dev_warn_ratelimited(hdev->dev,
@@ -5118,7 +5156,7 @@ int goya_cpucp_info_get(struct hl_device *hdev)
 	if (!(goya->hw_cap_initialized & HW_CAP_CPU_Q))
 		return 0;
 
-	rc = hl_fw_cpucp_info_get(hdev);
+	rc = hl_fw_cpucp_info_get(hdev, mmCPU_BOOT_DEV_STS0);
 	if (rc)
 		return rc;
 
@@ -5265,11 +5303,6 @@ static int goya_get_eeprom_data(struct hl_device *hdev, void *data,
 	return hl_fw_get_eeprom_data(hdev, data, max_size);
 }
 
-static enum hl_device_hw_state goya_get_hw_state(struct hl_device *hdev)
-{
-	return RREG32(mmHW_STATE);
-}
-
 static int goya_ctx_init(struct hl_ctx *ctx)
 {
 	return 0;
@@ -5290,18 +5323,24 @@ static u32 goya_get_wait_cb_size(struct hl_device *hdev)
 	return 0;
 }
 
-static void goya_gen_signal_cb(struct hl_device *hdev, void *data, u16 sob_id)
+static u32 goya_gen_signal_cb(struct hl_device *hdev, void *data, u16 sob_id,
+		u32 size)
 {
+	return 0;
+}
 
+static u32 goya_gen_wait_cb(struct hl_device *hdev,
+		struct hl_gen_wait_properties *prop)
+{
+	return 0;
 }
 
-static void goya_gen_wait_cb(struct hl_device *hdev, void *data, u16 sob_id,
-			u16 sob_val, u16 mon_id, u32 q_idx)
+static void goya_reset_sob(struct hl_device *hdev, void *data)
 {
 
 }
 
-static void goya_reset_sob(struct hl_device *hdev, void *data)
+static void goya_reset_sob_group(struct hl_device *hdev, u16 sob_group)
 {
 
 }
@@ -5327,6 +5366,23 @@ u64 goya_get_device_time(struct hl_device *hdev)
 	return device_time | RREG32(mmPSOC_TIMESTAMP_CNTCVL);
 }
 
+static void goya_collective_wait_init_cs(struct hl_cs *cs)
+{
+
+}
+
+static int goya_collective_wait_create_jobs(struct hl_device *hdev,
+		struct hl_ctx *ctx, struct hl_cs *cs, u32 wait_queue_id,
+		u32 collective_engine_id)
+{
+	return -EINVAL;
+}
+
+static void goya_ctx_fini(struct hl_ctx *ctx)
+{
+
+}
+
 static const struct hl_asic_funcs goya_funcs = {
 	.early_init = goya_early_init,
 	.early_fini = goya_early_fini,
@@ -5344,6 +5400,7 @@ static const struct hl_asic_funcs goya_funcs = {
 	.pqe_write = goya_pqe_write,
 	.asic_dma_alloc_coherent = goya_dma_alloc_coherent,
 	.asic_dma_free_coherent = goya_dma_free_coherent,
+	.scrub_device_mem = goya_scrub_device_mem,
 	.get_int_queue_base = goya_get_int_queue_base,
 	.test_queues = goya_test_queues,
 	.asic_dma_pool_zalloc = goya_dma_pool_zalloc,
@@ -5381,13 +5438,13 @@ static const struct hl_asic_funcs goya_funcs = {
 	.get_pci_id = goya_get_pci_id,
 	.get_eeprom_data = goya_get_eeprom_data,
 	.send_cpu_message = goya_send_cpu_message,
-	.get_hw_state = goya_get_hw_state,
 	.pci_bars_map = goya_pci_bars_map,
 	.init_iatu = goya_init_iatu,
 	.rreg = hl_rreg,
 	.wreg = hl_wreg,
 	.halt_coresight = goya_halt_coresight,
 	.ctx_init = goya_ctx_init,
+	.ctx_fini = goya_ctx_fini,
 	.get_clk_rate = goya_get_clk_rate,
 	.get_queue_id_for_cq = goya_get_queue_id_for_cq,
 	.read_device_fw_version = goya_read_device_fw_version,
@@ -5398,8 +5455,11 @@ static const struct hl_asic_funcs goya_funcs = {
 	.gen_signal_cb = goya_gen_signal_cb,
 	.gen_wait_cb = goya_gen_wait_cb,
 	.reset_sob = goya_reset_sob,
+	.reset_sob_group = goya_reset_sob_group,
 	.set_dma_mask_from_fw = goya_set_dma_mask_from_fw,
-	.get_device_time = goya_get_device_time
+	.get_device_time = goya_get_device_time,
+	.collective_wait_init_cs = goya_collective_wait_init_cs,
+	.collective_wait_create_jobs = goya_collective_wait_create_jobs
 };
 
 /*
diff --git a/drivers/misc/habanalabs/goya/goyaP.h b/drivers/misc/habanalabs/goya/goyaP.h
index def86c75e0350b5903892b83cac8414bbb97628b..8b3408211af62c88d7423ab23ba1fa42cd4b658a 100644
--- a/drivers/misc/habanalabs/goya/goyaP.h
+++ b/drivers/misc/habanalabs/goya/goyaP.h
@@ -192,7 +192,7 @@ int goya_test_queue(struct hl_device *hdev, u32 hw_queue_id);
 int goya_test_queues(struct hl_device *hdev);
 int goya_test_cpu_queue(struct hl_device *hdev);
 int goya_send_cpu_message(struct hl_device *hdev, u32 *msg, u16 len,
-				u32 timeout, long *result);
+				u32 timeout, u64 *result);
 
 long goya_get_temperature(struct hl_device *hdev, int sensor_index, u32 attr);
 long goya_get_voltage(struct hl_device *hdev, int sensor_index, u32 attr);
@@ -221,6 +221,7 @@ void goya_add_end_of_cb_packets(struct hl_device *hdev, void *kernel_address,
 				u32 len, u64 cq_addr, u32 cq_val, u32 msix_vec,
 				bool eb);
 int goya_cs_parser(struct hl_device *hdev, struct hl_cs_parser *parser);
+int goya_scrub_device_mem(struct hl_device *hdev, u64 addr, u64 size);
 void *goya_get_int_queue_base(struct hl_device *hdev, u32 queue_id,
 				dma_addr_t *dma_handle,	u16 *queue_len);
 u32 goya_get_dma_desc_list_size(struct hl_device *hdev, struct sg_table *sgt);
diff --git a/drivers/misc/habanalabs/goya/goya_coresight.c b/drivers/misc/habanalabs/goya/goya_coresight.c
index 4027a6a334d7a61bb5df9e7a64912df6a4b60913..6fa03933b438c2e0961584b522c811580785021e 100644
--- a/drivers/misc/habanalabs/goya/goya_coresight.c
+++ b/drivers/misc/habanalabs/goya/goya_coresight.c
@@ -12,8 +12,6 @@
 
 #include <uapi/misc/habanalabs.h>
 
-#include <linux/coresight.h>
-
 #define GOYA_PLDM_CORESIGHT_TIMEOUT_USEC	(CORESIGHT_TIMEOUT_USEC * 100)
 
 #define SPMU_SECTION_SIZE		DMA_CH_0_CS_SPMU_MAX_OFFSET
diff --git a/drivers/misc/habanalabs/goya/goya_hwmgr.c b/drivers/misc/habanalabs/goya/goya_hwmgr.c
index cdd4903e48fa74633a26686d66e31e829a56a0ff..3acb36a1a902ea7129b23b5de379de88430c554b 100644
--- a/drivers/misc/habanalabs/goya/goya_hwmgr.c
+++ b/drivers/misc/habanalabs/goya/goya_hwmgr.c
@@ -36,7 +36,7 @@ int goya_get_clk_rate(struct hl_device *hdev, u32 *cur_clk, u32 *max_clk)
 {
 	long value;
 
-	if (hl_device_disabled_or_in_reset(hdev))
+	if (!hl_device_operational(hdev, NULL))
 		return -ENODEV;
 
 	value = hl_get_frequency(hdev, MME_PLL, false);
@@ -69,7 +69,7 @@ static ssize_t mme_clk_show(struct device *dev, struct device_attribute *attr,
 	struct hl_device *hdev = dev_get_drvdata(dev);
 	long value;
 
-	if (hl_device_disabled_or_in_reset(hdev))
+	if (!hl_device_operational(hdev, NULL))
 		return -ENODEV;
 
 	value = hl_get_frequency(hdev, MME_PLL, false);
@@ -88,7 +88,7 @@ static ssize_t mme_clk_store(struct device *dev, struct device_attribute *attr,
 	int rc;
 	long value;
 
-	if (hl_device_disabled_or_in_reset(hdev)) {
+	if (!hl_device_operational(hdev, NULL)) {
 		count = -ENODEV;
 		goto fail;
 	}
@@ -118,7 +118,7 @@ static ssize_t tpc_clk_show(struct device *dev, struct device_attribute *attr,
 	struct hl_device *hdev = dev_get_drvdata(dev);
 	long value;
 
-	if (hl_device_disabled_or_in_reset(hdev))
+	if (!hl_device_operational(hdev, NULL))
 		return -ENODEV;
 
 	value = hl_get_frequency(hdev, TPC_PLL, false);
@@ -137,7 +137,7 @@ static ssize_t tpc_clk_store(struct device *dev, struct device_attribute *attr,
 	int rc;
 	long value;
 
-	if (hl_device_disabled_or_in_reset(hdev)) {
+	if (!hl_device_operational(hdev, NULL)) {
 		count = -ENODEV;
 		goto fail;
 	}
@@ -167,7 +167,7 @@ static ssize_t ic_clk_show(struct device *dev, struct device_attribute *attr,
 	struct hl_device *hdev = dev_get_drvdata(dev);
 	long value;
 
-	if (hl_device_disabled_or_in_reset(hdev))
+	if (!hl_device_operational(hdev, NULL))
 		return -ENODEV;
 
 	value = hl_get_frequency(hdev, IC_PLL, false);
@@ -186,7 +186,7 @@ static ssize_t ic_clk_store(struct device *dev, struct device_attribute *attr,
 	int rc;
 	long value;
 
-	if (hl_device_disabled_or_in_reset(hdev)) {
+	if (!hl_device_operational(hdev, NULL)) {
 		count = -ENODEV;
 		goto fail;
 	}
@@ -216,7 +216,7 @@ static ssize_t mme_clk_curr_show(struct device *dev,
 	struct hl_device *hdev = dev_get_drvdata(dev);
 	long value;
 
-	if (hl_device_disabled_or_in_reset(hdev))
+	if (!hl_device_operational(hdev, NULL))
 		return -ENODEV;
 
 	value = hl_get_frequency(hdev, MME_PLL, true);
@@ -233,7 +233,7 @@ static ssize_t tpc_clk_curr_show(struct device *dev,
 	struct hl_device *hdev = dev_get_drvdata(dev);
 	long value;
 
-	if (hl_device_disabled_or_in_reset(hdev))
+	if (!hl_device_operational(hdev, NULL))
 		return -ENODEV;
 
 	value = hl_get_frequency(hdev, TPC_PLL, true);
@@ -250,7 +250,7 @@ static ssize_t ic_clk_curr_show(struct device *dev,
 	struct hl_device *hdev = dev_get_drvdata(dev);
 	long value;
 
-	if (hl_device_disabled_or_in_reset(hdev))
+	if (!hl_device_operational(hdev, NULL))
 		return -ENODEV;
 
 	value = hl_get_frequency(hdev, IC_PLL, true);
@@ -266,7 +266,7 @@ static ssize_t pm_mng_profile_show(struct device *dev,
 {
 	struct hl_device *hdev = dev_get_drvdata(dev);
 
-	if (hl_device_disabled_or_in_reset(hdev))
+	if (!hl_device_operational(hdev, NULL))
 		return -ENODEV;
 
 	return sprintf(buf, "%s\n",
@@ -280,7 +280,7 @@ static ssize_t pm_mng_profile_store(struct device *dev,
 {
 	struct hl_device *hdev = dev_get_drvdata(dev);
 
-	if (hl_device_disabled_or_in_reset(hdev)) {
+	if (!hl_device_operational(hdev, NULL)) {
 		count = -ENODEV;
 		goto out;
 	}
@@ -335,7 +335,7 @@ static ssize_t high_pll_show(struct device *dev, struct device_attribute *attr,
 {
 	struct hl_device *hdev = dev_get_drvdata(dev);
 
-	if (hl_device_disabled_or_in_reset(hdev))
+	if (!hl_device_operational(hdev, NULL))
 		return -ENODEV;
 
 	return sprintf(buf, "%u\n", hdev->high_pll);
@@ -348,7 +348,7 @@ static ssize_t high_pll_store(struct device *dev, struct device_attribute *attr,
 	long value;
 	int rc;
 
-	if (hl_device_disabled_or_in_reset(hdev)) {
+	if (!hl_device_operational(hdev, NULL)) {
 		count = -ENODEV;
 		goto out;
 	}
diff --git a/drivers/misc/habanalabs/include/common/cpucp_if.h b/drivers/misc/habanalabs/include/common/cpucp_if.h
index 2a5c9cb3d5058e06964d9fbe30a8ada1f976121c..00bd9b392f936061d78b40ed6e80e11dee806c74 100644
--- a/drivers/misc/habanalabs/include/common/cpucp_if.h
+++ b/drivers/misc/habanalabs/include/common/cpucp_if.h
@@ -9,6 +9,38 @@
 #define CPUCP_IF_H
 
 #include <linux/types.h>
+#include <linux/if_ether.h>
+
+#define NUM_HBM_PSEUDO_CH				2
+#define NUM_HBM_CH_PER_DEV				8
+#define CPUCP_PKT_HBM_ECC_INFO_WR_PAR_SHIFT		0
+#define CPUCP_PKT_HBM_ECC_INFO_WR_PAR_MASK		0x00000001
+#define CPUCP_PKT_HBM_ECC_INFO_RD_PAR_SHIFT		1
+#define CPUCP_PKT_HBM_ECC_INFO_RD_PAR_MASK		0x00000002
+#define CPUCP_PKT_HBM_ECC_INFO_CA_PAR_SHIFT		2
+#define CPUCP_PKT_HBM_ECC_INFO_CA_PAR_MASK		0x00000004
+#define CPUCP_PKT_HBM_ECC_INFO_DERR_SHIFT		3
+#define CPUCP_PKT_HBM_ECC_INFO_DERR_MASK		0x00000008
+#define CPUCP_PKT_HBM_ECC_INFO_SERR_SHIFT		4
+#define CPUCP_PKT_HBM_ECC_INFO_SERR_MASK		0x00000010
+#define CPUCP_PKT_HBM_ECC_INFO_TYPE_SHIFT		5
+#define CPUCP_PKT_HBM_ECC_INFO_TYPE_MASK		0x00000020
+#define CPUCP_PKT_HBM_ECC_INFO_HBM_CH_SHIFT		6
+#define CPUCP_PKT_HBM_ECC_INFO_HBM_CH_MASK		0x000007C0
+
+struct hl_eq_hbm_ecc_data {
+	/* SERR counter */
+	__le32 sec_cnt;
+	/* DERR counter */
+	__le32 dec_cnt;
+	/* Supplemental Information according to the mask bits */
+	__le32 hbm_ecc_info;
+	/* Address in hbm where the ecc happened */
+	__le32 first_addr;
+	/* SERR continuous address counter */
+	__le32 sec_cont_cnt;
+	__le32 pad;
+};
 
 /*
  * EVENT QUEUE
@@ -30,6 +62,7 @@ struct hl_eq_entry {
 	struct hl_eq_header hdr;
 	union {
 		struct hl_eq_ecc_data ecc_data;
+		struct hl_eq_hbm_ecc_data hbm_ecc_data;
 		__le64 data[7];
 	};
 };
@@ -199,6 +232,11 @@ enum pq_init_status {
  *       CpuCP to write to the structure, to prevent data corruption in case of
  *       mismatched driver/FW versions.
  *
+ * CPUCP_PACKET_NIC_INFO_GET -
+ *       Fetch information from the device regarding the NIC. the host's driver
+ *       passes the max size it allows the CpuCP to write to the structure, to
+ *       prevent data corruption in case of mismatched driver/FW versions.
+ *
  * CPUCP_PACKET_TEMPERATURE_SET -
  *       Set the value of the offset property of a specified thermal sensor.
  *       The packet's arguments specify the desired sensor and the field to
@@ -214,10 +252,26 @@ enum pq_init_status {
  *       The packet's arguments specify the desired sensor and the field to
  *       set.
  *
- * CPUCP_PACKET_PLL_REG_GET
- *       Fetch register of PLL from the required PLL IP.
- *       The packet's arguments specify the PLL IP and the register to get.
- *       Each register is 32-bit value which is returned in result field.
+ * CPUCP_PACKET_PCIE_THROUGHPUT_GET
+ *       Get throughput of PCIe.
+ *       The packet's arguments specify the transaction direction (TX/RX).
+ *       The window measurement is 10[msec], and the return value is in KB/sec.
+ *
+ * CPUCP_PACKET_PCIE_REPLAY_CNT_GET
+ *       Replay count measures number of "replay" events, which is basicly
+ *       number of retries done by PCIe.
+ *
+ * CPUCP_PACKET_TOTAL_ENERGY_GET
+ *       Total Energy is measurement of energy from the time FW Linux
+ *       is loaded. It is calculated by multiplying the average power
+ *       by time (passed from armcp start). The units are in MilliJouls.
+ *
+ * CPUCP_PACKET_PLL_INFO_GET
+ *       Fetch frequencies of PLL from the required PLL IP.
+ *       The packet's arguments specify the device PLL type
+ *       Pll type is the PLL from device pll_index enum.
+ *       The result is composed of 4 outputs, each is 16-bit
+ *       frequency in MHz.
  *
  */
 
@@ -244,14 +298,14 @@ enum cpucp_packet_id {
 	CPUCP_PACKET_MAX_POWER_GET,		/* sysfs */
 	CPUCP_PACKET_MAX_POWER_SET,		/* sysfs */
 	CPUCP_PACKET_EEPROM_DATA_GET,		/* sysfs */
-	CPUCP_RESERVED,
+	CPUCP_PACKET_NIC_INFO_GET,		/* internal */
 	CPUCP_PACKET_TEMPERATURE_SET,		/* sysfs */
 	CPUCP_PACKET_VOLTAGE_SET,		/* sysfs */
 	CPUCP_PACKET_CURRENT_SET,		/* sysfs */
-	CPUCP_PACKET_PCIE_THROUGHPUT_GET,		/* internal */
-	CPUCP_PACKET_PCIE_REPLAY_CNT_GET,		/* internal */
+	CPUCP_PACKET_PCIE_THROUGHPUT_GET,	/* internal */
+	CPUCP_PACKET_PCIE_REPLAY_CNT_GET,	/* internal */
 	CPUCP_PACKET_TOTAL_ENERGY_GET,		/* internal */
-	CPUCP_PACKET_PLL_REG_GET,		/* internal */
+	CPUCP_PACKET_PLL_INFO_GET,		/* internal */
 };
 
 #define CPUCP_PACKET_FENCE_VAL	0xFE8CE7A5
@@ -262,6 +316,15 @@ enum cpucp_packet_id {
 #define CPUCP_PKT_CTL_OPCODE_SHIFT	16
 #define CPUCP_PKT_CTL_OPCODE_MASK	0x1FFF0000
 
+#define CPUCP_PKT_RES_PLL_OUT0_SHIFT	0
+#define CPUCP_PKT_RES_PLL_OUT0_MASK	0x000000000000FFFFull
+#define CPUCP_PKT_RES_PLL_OUT1_SHIFT	16
+#define CPUCP_PKT_RES_PLL_OUT1_MASK	0x00000000FFFF0000ull
+#define CPUCP_PKT_RES_PLL_OUT2_SHIFT	32
+#define CPUCP_PKT_RES_PLL_OUT2_MASK	0x0000FFFF00000000ull
+#define CPUCP_PKT_RES_PLL_OUT3_SHIFT	48
+#define CPUCP_PKT_RES_PLL_OUT3_MASK	0xFFFF000000000000ull
+
 struct cpucp_packet {
 	union {
 		__le64 value;	/* For SET packets */
@@ -286,8 +349,9 @@ struct cpucp_packet {
 			__u8 pad; /* unused */
 		};
 
-		struct {/* For PLL register fetch */
+		struct {/* For PLL info fetch */
 			__le16 pll_type;
+			/* TODO pll_reg is kept temporary before removal */
 			__le16 pll_reg;
 		};
 
@@ -300,7 +364,7 @@ struct cpucp_packet {
 		/* For led set */
 		__le32 led_index;
 
-		/* For get CpuCP info/EEPROM data */
+		/* For get CpuCP info/EEPROM data/NIC info */
 		__le32 data_max_size;
 	};
 
@@ -366,6 +430,7 @@ enum cpucp_pcie_throughput_attributes {
 	cpucp_pcie_throughput_rx
 };
 
+/* TODO temporary kept before removal */
 enum cpucp_pll_reg_attributes {
 	cpucp_pll_nr_reg,
 	cpucp_pll_nf_reg,
@@ -374,6 +439,7 @@ enum cpucp_pll_reg_attributes {
 	cpucp_pll_div_sel_reg
 };
 
+/* TODO temporary kept before removal */
 enum cpucp_pll_type_attributes {
 	cpucp_pll_cpu,
 	cpucp_pll_pci,
@@ -392,6 +458,12 @@ struct eq_generic_event {
 #define CARD_NAME_MAX_LEN		16
 #define VERSION_MAX_LEN			128
 #define CPUCP_MAX_SENSORS		128
+#define CPUCP_MAX_NICS			128
+#define CPUCP_LANES_PER_NIC		4
+#define CPUCP_NIC_QSFP_EEPROM_MAX_LEN	1024
+#define CPUCP_MAX_NIC_LANES		(CPUCP_MAX_NICS * CPUCP_LANES_PER_NIC)
+#define CPUCP_NIC_MASK_ARR_LEN		((CPUCP_MAX_NICS + 63) / 64)
+#define CPUCP_NIC_POLARITY_ARR_LEN	((CPUCP_MAX_NIC_LANES + 63) / 64)
 
 struct cpucp_sensor {
 	__le32 type;
@@ -408,6 +480,29 @@ enum cpucp_card_types {
 	cpucp_card_type_pmc
 };
 
+#define CPUCP_SEC_CONF_ENABLED_SHIFT	0
+#define CPUCP_SEC_CONF_ENABLED_MASK	0x00000001
+
+#define CPUCP_SEC_CONF_FLASH_WP_SHIFT	1
+#define CPUCP_SEC_CONF_FLASH_WP_MASK	0x00000002
+
+#define CPUCP_SEC_CONF_EEPROM_WP_SHIFT	2
+#define CPUCP_SEC_CONF_EEPROM_WP_MASK	0x00000004
+
+/**
+ * struct cpucp_security_info - Security information.
+ * @config: configuration bit field
+ * @keys_num: number of stored keys
+ * @revoked_keys: revoked keys bit field
+ * @min_svn: minimal security version
+ */
+struct cpucp_security_info {
+	__u8 config;
+	__u8 keys_num;
+	__u8 revoked_keys;
+	__u8 min_svn;
+};
+
 /**
  * struct cpucp_info - Info from CpuCP that is necessary to the host's driver
  * @sensors: available sensors description.
@@ -423,6 +518,7 @@ enum cpucp_card_types {
  * @cpucp_version: CpuCP S/W version.
  * @dram_size: available DRAM size.
  * @card_name: card name that will be displayed in HWMON subsystem on the host
+ * @sec_info: security information
  */
 struct cpucp_info {
 	struct cpucp_sensor sensors[CPUCP_MAX_SENSORS];
@@ -438,6 +534,26 @@ struct cpucp_info {
 	__le32 reserved2;
 	__le64 dram_size;
 	char card_name[CARD_NAME_MAX_LEN];
+	__le64 reserved3;
+	__le64 reserved4;
+	__u8 reserved5;
+	__u8 pad[7];
+	struct cpucp_security_info sec_info;
+	__le32 reserved6;
+};
+
+struct cpucp_mac_addr {
+	__u8 mac_addr[ETH_ALEN];
+};
+
+struct cpucp_nic_info {
+	struct cpucp_mac_addr mac_addrs[CPUCP_MAX_NICS];
+	__le64 link_mask[CPUCP_NIC_MASK_ARR_LEN];
+	__le64 pol_tx_mask[CPUCP_NIC_POLARITY_ARR_LEN];
+	__le64 pol_rx_mask[CPUCP_NIC_POLARITY_ARR_LEN];
+	__le64 link_ext_mask[CPUCP_NIC_MASK_ARR_LEN];
+	__u8 qsfp_eeprom[CPUCP_NIC_QSFP_EEPROM_MAX_LEN];
+	__le64 auto_neg_mask[CPUCP_NIC_MASK_ARR_LEN];
 };
 
 #endif /* CPUCP_IF_H */
diff --git a/drivers/misc/habanalabs/include/common/hl_boot_if.h b/drivers/misc/habanalabs/include/common/hl_boot_if.h
index bb67cafc6e0060fbd4099f8155d713cd82eebc98..e5801ecf0cb23b33d8fd923dcb77f7ccc8a248e4 100644
--- a/drivers/misc/habanalabs/include/common/hl_boot_if.h
+++ b/drivers/misc/habanalabs/include/common/hl_boot_if.h
@@ -53,6 +53,23 @@
  *					trust), boot authentication (chain of
  *					trust), data packets authentication.
  *
+ * CPU_BOOT_ERR0_EFUSE_FAIL		Reading from eFuse failed.
+ *					The PCI device ID might be wrong.
+ *
+ * CPU_BOOT_ERR0_PRI_IMG_VER_FAIL	Verification of primary image failed.
+ *					It mean that ppboot checksum
+ *					verification for the preboot primary
+ *					image has failed to match expected
+ *					checksum. Trying to program image again
+ *					might solve this.
+ *
+ * CPU_BOOT_ERR0_SEC_IMG_VER_FAIL	Verification of secondary image failed.
+ *					It mean that ppboot checksum
+ *					verification for the preboot secondary
+ *					image has failed to match expected
+ *					checksum. Trying to program image again
+ *					might solve this.
+ *
  * CPU_BOOT_ERR0_ENABLED		Error registers enabled.
  *					This is a main indication that the
  *					running FW populates the error
@@ -68,8 +85,94 @@
 #define CPU_BOOT_ERR0_NIC_FW_FAIL		(1 << 6)
 #define CPU_BOOT_ERR0_SECURITY_NOT_RDY		(1 << 7)
 #define CPU_BOOT_ERR0_SECURITY_FAIL		(1 << 8)
+#define CPU_BOOT_ERR0_EFUSE_FAIL		(1 << 9)
+#define CPU_BOOT_ERR0_PRI_IMG_VER_FAIL		(1 << 10)
+#define CPU_BOOT_ERR0_SEC_IMG_VER_FAIL		(1 << 11)
 #define CPU_BOOT_ERR0_ENABLED			(1 << 31)
 
+/*
+ * BOOT DEVICE STATUS bits in BOOT_DEVICE_STS registers
+ *
+ * CPU_BOOT_DEV_STS0_SECURITY_EN	Security is Enabled.
+ *					This is an indication for security
+ *					enabled in FW, which means that
+ *					all conditions for security are met:
+ *					device is indicated as security enabled,
+ *					registers are protected, and device
+ *					uses keys for image verification.
+ *					Initialized in: preboot
+ *
+ * CPU_BOOT_DEV_STS0_DEBUG_EN		Debug is enabled.
+ *					Enabled when JTAG or DEBUG is enabled
+ *					in FW.
+ *					Initialized in: preboot
+ *
+ * CPU_BOOT_DEV_STS0_WATCHDOG_EN	Watchdog is enabled.
+ *					Watchdog is enabled in FW.
+ *					Initialized in: preboot
+ *
+ * CPU_BOOT_DEV_STS0_DRAM_INIT_EN	DRAM initialization is enabled.
+ *					DRAM initialization has been done in FW.
+ *					Initialized in: u-boot
+ *
+ * CPU_BOOT_DEV_STS0_BMC_WAIT_EN	Waiting for BMC data enabled.
+ *					If set, it means that during boot,
+ *					FW waited for BMC data.
+ *					Initialized in: u-boot
+ *
+ * CPU_BOOT_DEV_STS0_E2E_CRED_EN	E2E credits initialized.
+ *					FW initialized E2E credits.
+ *					Initialized in: u-boot
+ *
+ * CPU_BOOT_DEV_STS0_HBM_CRED_EN	HBM credits initialized.
+ *					FW initialized HBM credits.
+ *					Initialized in: u-boot
+ *
+ * CPU_BOOT_DEV_STS0_RL_EN		Rate limiter initialized.
+ *					FW initialized rate limiter.
+ *					Initialized in: u-boot
+ *
+ * CPU_BOOT_DEV_STS0_SRAM_SCR_EN	SRAM scrambler enabled.
+ *					FW initialized SRAM scrambler.
+ *					Initialized in: linux
+ *
+ * CPU_BOOT_DEV_STS0_DRAM_SCR_EN	DRAM scrambler enabled.
+ *					FW initialized DRAM scrambler.
+ *					Initialized in: u-boot
+ *
+ * CPU_BOOT_DEV_STS0_FW_HARD_RST_EN	FW hard reset procedure is enabled.
+ *					FW has the hard reset procedure
+ *					implemented. This means that FW will
+ *					perform hard reset procedure on
+ *					receiving the halt-machine event.
+ *					Initialized in: linux
+ *
+ * CPU_BOOT_DEV_STS0_PLL_INFO_EN	FW retrieval of PLL info is enabled.
+ *					Initialized in: linux
+ *
+ * CPU_BOOT_DEV_STS0_ENABLED		Device status register enabled.
+ *					This is a main indication that the
+ *					running FW populates the device status
+ *					register. Meaning the device status
+ *					bits are not garbage, but actual
+ *					statuses.
+ *					Initialized in: preboot
+ *
+ */
+#define CPU_BOOT_DEV_STS0_SECURITY_EN			(1 << 0)
+#define CPU_BOOT_DEV_STS0_DEBUG_EN			(1 << 1)
+#define CPU_BOOT_DEV_STS0_WATCHDOG_EN			(1 << 2)
+#define CPU_BOOT_DEV_STS0_DRAM_INIT_EN			(1 << 3)
+#define CPU_BOOT_DEV_STS0_BMC_WAIT_EN			(1 << 4)
+#define CPU_BOOT_DEV_STS0_E2E_CRED_EN			(1 << 5)
+#define CPU_BOOT_DEV_STS0_HBM_CRED_EN			(1 << 6)
+#define CPU_BOOT_DEV_STS0_RL_EN				(1 << 7)
+#define CPU_BOOT_DEV_STS0_SRAM_SCR_EN			(1 << 8)
+#define CPU_BOOT_DEV_STS0_DRAM_SCR_EN			(1 << 9)
+#define CPU_BOOT_DEV_STS0_FW_HARD_RST_EN		(1 << 10)
+#define CPU_BOOT_DEV_STS0_PLL_INFO_EN			(1 << 11)
+#define CPU_BOOT_DEV_STS0_ENABLED			(1 << 31)
+
 enum cpu_boot_status {
 	CPU_BOOT_STATUS_NA = 0,		/* Default value after reset of chip */
 	CPU_BOOT_STATUS_IN_WFE = 1,
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/gaudi_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/gaudi_regs.h
index f92dc53af07422dd882d97ece1684972756fa0ea..5bb54b34a8aebea9c4f7161b8dbef7f5ca872653 100644
--- a/drivers/misc/habanalabs/include/gaudi/asic_reg/gaudi_regs.h
+++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/gaudi_regs.h
@@ -81,6 +81,7 @@
 #include "sif_rtr_ctrl_6_regs.h"
 #include "sif_rtr_ctrl_7_regs.h"
 #include "psoc_etr_regs.h"
+#include "psoc_cpu_pll_regs.h"
 
 #include "dma0_qm_masks.h"
 #include "mme0_qm_masks.h"
@@ -89,9 +90,18 @@
 #include "tpc0_cfg_masks.h"
 #include "psoc_global_conf_masks.h"
 
-#include "psoc_pci_pll_regs.h"
-#include "psoc_hbm_pll_regs.h"
-#include "psoc_cpu_pll_regs.h"
+#include "nic0_qm0_regs.h"
+#include "nic1_qm0_regs.h"
+#include "nic2_qm0_regs.h"
+#include "nic3_qm0_regs.h"
+#include "nic4_qm0_regs.h"
+#include "nic0_qm1_regs.h"
+#include "nic1_qm1_regs.h"
+#include "nic2_qm1_regs.h"
+#include "nic3_qm1_regs.h"
+#include "nic4_qm1_regs.h"
+
+#include "nic0_qm0_masks.h"
 
 #define GAUDI_ECC_MEM_SEL_OFFSET		0xF18
 #define GAUDI_ECC_ADDRESS_OFFSET		0xF1C
@@ -295,4 +305,14 @@
 #define mmPCIE_AUX_FLR_CTRL                                          0xC07394
 #define mmPCIE_AUX_DBI                                               0xC07490
 
+#define mmPSOC_PCI_PLL_NR                                            0xC72100
+#define mmSRAM_W_PLL_NR                                              0x4C8100
+#define mmPSOC_HBM_PLL_NR                                            0xC74100
+#define mmNIC0_PLL_NR                                                0xCF9100
+#define mmDMA_W_PLL_NR                                               0x487100
+#define mmMESH_W_PLL_NR                                              0x4C7100
+#define mmPSOC_MME_PLL_NR                                            0xC71100
+#define mmPSOC_TPC_PLL_NR                                            0xC73100
+#define mmIF_W_PLL_NR                                                0x488100
+
 #endif /* ASIC_REG_GAUDI_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/nic0_qm0_masks.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/nic0_qm0_masks.h
new file mode 100644
index 0000000000000000000000000000000000000000..bd37b64521330015c4d11f7b0e7aaed8b731e0ae
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/nic0_qm0_masks.h
@@ -0,0 +1,800 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ **       DO NOT EDIT BELOW        **
+ ************************************/
+
+#ifndef ASIC_REG_NIC0_QM0_MASKS_H_
+#define ASIC_REG_NIC0_QM0_MASKS_H_
+
+/*
+ *****************************************
+ *   NIC0_QM0 (Prototype: QMAN)
+ *****************************************
+ */
+
+/* NIC0_QM0_GLBL_CFG0 */
+#define NIC0_QM0_GLBL_CFG0_PQF_EN_SHIFT                              0
+#define NIC0_QM0_GLBL_CFG0_PQF_EN_MASK                               0xF
+#define NIC0_QM0_GLBL_CFG0_CQF_EN_SHIFT                              4
+#define NIC0_QM0_GLBL_CFG0_CQF_EN_MASK                               0x1F0
+#define NIC0_QM0_GLBL_CFG0_CP_EN_SHIFT                               9
+#define NIC0_QM0_GLBL_CFG0_CP_EN_MASK                                0x3E00
+
+/* NIC0_QM0_GLBL_CFG1 */
+#define NIC0_QM0_GLBL_CFG1_PQF_STOP_SHIFT                            0
+#define NIC0_QM0_GLBL_CFG1_PQF_STOP_MASK                             0xF
+#define NIC0_QM0_GLBL_CFG1_CQF_STOP_SHIFT                            4
+#define NIC0_QM0_GLBL_CFG1_CQF_STOP_MASK                             0x1F0
+#define NIC0_QM0_GLBL_CFG1_CP_STOP_SHIFT                             9
+#define NIC0_QM0_GLBL_CFG1_CP_STOP_MASK                              0x3E00
+#define NIC0_QM0_GLBL_CFG1_PQF_FLUSH_SHIFT                           16
+#define NIC0_QM0_GLBL_CFG1_PQF_FLUSH_MASK                            0xF0000
+#define NIC0_QM0_GLBL_CFG1_CQF_FLUSH_SHIFT                           20
+#define NIC0_QM0_GLBL_CFG1_CQF_FLUSH_MASK                            0x1F00000
+#define NIC0_QM0_GLBL_CFG1_CP_FLUSH_SHIFT                            25
+#define NIC0_QM0_GLBL_CFG1_CP_FLUSH_MASK                             0x3E000000
+
+/* NIC0_QM0_GLBL_PROT */
+#define NIC0_QM0_GLBL_PROT_PQF_SHIFT                                 0
+#define NIC0_QM0_GLBL_PROT_PQF_MASK                                  0xF
+#define NIC0_QM0_GLBL_PROT_CQF_SHIFT                                 4
+#define NIC0_QM0_GLBL_PROT_CQF_MASK                                  0x1F0
+#define NIC0_QM0_GLBL_PROT_CP_SHIFT                                  9
+#define NIC0_QM0_GLBL_PROT_CP_MASK                                   0x3E00
+#define NIC0_QM0_GLBL_PROT_ERR_SHIFT                                 14
+#define NIC0_QM0_GLBL_PROT_ERR_MASK                                  0x4000
+#define NIC0_QM0_GLBL_PROT_ARB_SHIFT                                 15
+#define NIC0_QM0_GLBL_PROT_ARB_MASK                                  0x8000
+
+/* NIC0_QM0_GLBL_ERR_CFG */
+#define NIC0_QM0_GLBL_ERR_CFG_PQF_ERR_MSG_EN_SHIFT                   0
+#define NIC0_QM0_GLBL_ERR_CFG_PQF_ERR_MSG_EN_MASK                    0xF
+#define NIC0_QM0_GLBL_ERR_CFG_CQF_ERR_MSG_EN_SHIFT                   4
+#define NIC0_QM0_GLBL_ERR_CFG_CQF_ERR_MSG_EN_MASK                    0x1F0
+#define NIC0_QM0_GLBL_ERR_CFG_CP_ERR_MSG_EN_SHIFT                    9
+#define NIC0_QM0_GLBL_ERR_CFG_CP_ERR_MSG_EN_MASK                     0x3E00
+#define NIC0_QM0_GLBL_ERR_CFG_PQF_STOP_ON_ERR_SHIFT                  16
+#define NIC0_QM0_GLBL_ERR_CFG_PQF_STOP_ON_ERR_MASK                   0xF0000
+#define NIC0_QM0_GLBL_ERR_CFG_CQF_STOP_ON_ERR_SHIFT                  20
+#define NIC0_QM0_GLBL_ERR_CFG_CQF_STOP_ON_ERR_MASK                   0x1F00000
+#define NIC0_QM0_GLBL_ERR_CFG_CP_STOP_ON_ERR_SHIFT                   25
+#define NIC0_QM0_GLBL_ERR_CFG_CP_STOP_ON_ERR_MASK                    0x3E000000
+#define NIC0_QM0_GLBL_ERR_CFG_ARB_STOP_ON_ERR_SHIFT                  31
+#define NIC0_QM0_GLBL_ERR_CFG_ARB_STOP_ON_ERR_MASK                   0x80000000
+
+/* NIC0_QM0_GLBL_SECURE_PROPS */
+#define NIC0_QM0_GLBL_SECURE_PROPS_0_ASID_SHIFT                      0
+#define NIC0_QM0_GLBL_SECURE_PROPS_0_ASID_MASK                       0x3FF
+#define NIC0_QM0_GLBL_SECURE_PROPS_1_ASID_SHIFT                      0
+#define NIC0_QM0_GLBL_SECURE_PROPS_1_ASID_MASK                       0x3FF
+#define NIC0_QM0_GLBL_SECURE_PROPS_2_ASID_SHIFT                      0
+#define NIC0_QM0_GLBL_SECURE_PROPS_2_ASID_MASK                       0x3FF
+#define NIC0_QM0_GLBL_SECURE_PROPS_3_ASID_SHIFT                      0
+#define NIC0_QM0_GLBL_SECURE_PROPS_3_ASID_MASK                       0x3FF
+#define NIC0_QM0_GLBL_SECURE_PROPS_4_ASID_SHIFT                      0
+#define NIC0_QM0_GLBL_SECURE_PROPS_4_ASID_MASK                       0x3FF
+#define NIC0_QM0_GLBL_SECURE_PROPS_0_MMBP_SHIFT                      10
+#define NIC0_QM0_GLBL_SECURE_PROPS_0_MMBP_MASK                       0x400
+#define NIC0_QM0_GLBL_SECURE_PROPS_1_MMBP_SHIFT                      10
+#define NIC0_QM0_GLBL_SECURE_PROPS_1_MMBP_MASK                       0x400
+#define NIC0_QM0_GLBL_SECURE_PROPS_2_MMBP_SHIFT                      10
+#define NIC0_QM0_GLBL_SECURE_PROPS_2_MMBP_MASK                       0x400
+#define NIC0_QM0_GLBL_SECURE_PROPS_3_MMBP_SHIFT                      10
+#define NIC0_QM0_GLBL_SECURE_PROPS_3_MMBP_MASK                       0x400
+#define NIC0_QM0_GLBL_SECURE_PROPS_4_MMBP_SHIFT                      10
+#define NIC0_QM0_GLBL_SECURE_PROPS_4_MMBP_MASK                       0x400
+
+/* NIC0_QM0_GLBL_NON_SECURE_PROPS */
+#define NIC0_QM0_GLBL_NON_SECURE_PROPS_0_ASID_SHIFT                  0
+#define NIC0_QM0_GLBL_NON_SECURE_PROPS_0_ASID_MASK                   0x3FF
+#define NIC0_QM0_GLBL_NON_SECURE_PROPS_1_ASID_SHIFT                  0
+#define NIC0_QM0_GLBL_NON_SECURE_PROPS_1_ASID_MASK                   0x3FF
+#define NIC0_QM0_GLBL_NON_SECURE_PROPS_2_ASID_SHIFT                  0
+#define NIC0_QM0_GLBL_NON_SECURE_PROPS_2_ASID_MASK                   0x3FF
+#define NIC0_QM0_GLBL_NON_SECURE_PROPS_3_ASID_SHIFT                  0
+#define NIC0_QM0_GLBL_NON_SECURE_PROPS_3_ASID_MASK                   0x3FF
+#define NIC0_QM0_GLBL_NON_SECURE_PROPS_4_ASID_SHIFT                  0
+#define NIC0_QM0_GLBL_NON_SECURE_PROPS_4_ASID_MASK                   0x3FF
+#define NIC0_QM0_GLBL_NON_SECURE_PROPS_0_MMBP_SHIFT                  10
+#define NIC0_QM0_GLBL_NON_SECURE_PROPS_0_MMBP_MASK                   0x400
+#define NIC0_QM0_GLBL_NON_SECURE_PROPS_1_MMBP_SHIFT                  10
+#define NIC0_QM0_GLBL_NON_SECURE_PROPS_1_MMBP_MASK                   0x400
+#define NIC0_QM0_GLBL_NON_SECURE_PROPS_2_MMBP_SHIFT                  10
+#define NIC0_QM0_GLBL_NON_SECURE_PROPS_2_MMBP_MASK                   0x400
+#define NIC0_QM0_GLBL_NON_SECURE_PROPS_3_MMBP_SHIFT                  10
+#define NIC0_QM0_GLBL_NON_SECURE_PROPS_3_MMBP_MASK                   0x400
+#define NIC0_QM0_GLBL_NON_SECURE_PROPS_4_MMBP_SHIFT                  10
+#define NIC0_QM0_GLBL_NON_SECURE_PROPS_4_MMBP_MASK                   0x400
+
+/* NIC0_QM0_GLBL_STS0 */
+#define NIC0_QM0_GLBL_STS0_PQF_IDLE_SHIFT                            0
+#define NIC0_QM0_GLBL_STS0_PQF_IDLE_MASK                             0xF
+#define NIC0_QM0_GLBL_STS0_CQF_IDLE_SHIFT                            4
+#define NIC0_QM0_GLBL_STS0_CQF_IDLE_MASK                             0x1F0
+#define NIC0_QM0_GLBL_STS0_CP_IDLE_SHIFT                             9
+#define NIC0_QM0_GLBL_STS0_CP_IDLE_MASK                              0x3E00
+#define NIC0_QM0_GLBL_STS0_PQF_IS_STOP_SHIFT                         16
+#define NIC0_QM0_GLBL_STS0_PQF_IS_STOP_MASK                          0xF0000
+#define NIC0_QM0_GLBL_STS0_CQF_IS_STOP_SHIFT                         20
+#define NIC0_QM0_GLBL_STS0_CQF_IS_STOP_MASK                          0x1F00000
+#define NIC0_QM0_GLBL_STS0_CP_IS_STOP_SHIFT                          25
+#define NIC0_QM0_GLBL_STS0_CP_IS_STOP_MASK                           0x3E000000
+#define NIC0_QM0_GLBL_STS0_ARB_IS_STOP_SHIFT                         31
+#define NIC0_QM0_GLBL_STS0_ARB_IS_STOP_MASK                          0x80000000
+
+/* NIC0_QM0_GLBL_STS1 */
+#define NIC0_QM0_GLBL_STS1_PQF_RD_ERR_SHIFT                          0
+#define NIC0_QM0_GLBL_STS1_PQF_RD_ERR_MASK                           0x1
+#define NIC0_QM0_GLBL_STS1_CQF_RD_ERR_SHIFT                          1
+#define NIC0_QM0_GLBL_STS1_CQF_RD_ERR_MASK                           0x2
+#define NIC0_QM0_GLBL_STS1_CP_RD_ERR_SHIFT                           2
+#define NIC0_QM0_GLBL_STS1_CP_RD_ERR_MASK                            0x4
+#define NIC0_QM0_GLBL_STS1_CP_UNDEF_CMD_ERR_SHIFT                    3
+#define NIC0_QM0_GLBL_STS1_CP_UNDEF_CMD_ERR_MASK                     0x8
+#define NIC0_QM0_GLBL_STS1_CP_STOP_OP_SHIFT                          4
+#define NIC0_QM0_GLBL_STS1_CP_STOP_OP_MASK                           0x10
+#define NIC0_QM0_GLBL_STS1_CP_MSG_WR_ERR_SHIFT                       5
+#define NIC0_QM0_GLBL_STS1_CP_MSG_WR_ERR_MASK                        0x20
+#define NIC0_QM0_GLBL_STS1_CP_WREG_ERR_SHIFT                         6
+#define NIC0_QM0_GLBL_STS1_CP_WREG_ERR_MASK                          0x40
+#define NIC0_QM0_GLBL_STS1_CP_FENCE0_OVF_ERR_SHIFT                   8
+#define NIC0_QM0_GLBL_STS1_CP_FENCE0_OVF_ERR_MASK                    0x100
+#define NIC0_QM0_GLBL_STS1_CP_FENCE1_OVF_ERR_SHIFT                   9
+#define NIC0_QM0_GLBL_STS1_CP_FENCE1_OVF_ERR_MASK                    0x200
+#define NIC0_QM0_GLBL_STS1_CP_FENCE2_OVF_ERR_SHIFT                   10
+#define NIC0_QM0_GLBL_STS1_CP_FENCE2_OVF_ERR_MASK                    0x400
+#define NIC0_QM0_GLBL_STS1_CP_FENCE3_OVF_ERR_SHIFT                   11
+#define NIC0_QM0_GLBL_STS1_CP_FENCE3_OVF_ERR_MASK                    0x800
+#define NIC0_QM0_GLBL_STS1_CP_FENCE0_UDF_ERR_SHIFT                   12
+#define NIC0_QM0_GLBL_STS1_CP_FENCE0_UDF_ERR_MASK                    0x1000
+#define NIC0_QM0_GLBL_STS1_CP_FENCE1_UDF_ERR_SHIFT                   13
+#define NIC0_QM0_GLBL_STS1_CP_FENCE1_UDF_ERR_MASK                    0x2000
+#define NIC0_QM0_GLBL_STS1_CP_FENCE2_UDF_ERR_SHIFT                   14
+#define NIC0_QM0_GLBL_STS1_CP_FENCE2_UDF_ERR_MASK                    0x4000
+#define NIC0_QM0_GLBL_STS1_CP_FENCE3_UDF_ERR_SHIFT                   15
+#define NIC0_QM0_GLBL_STS1_CP_FENCE3_UDF_ERR_MASK                    0x8000
+
+/* NIC0_QM0_GLBL_STS1_4 */
+#define NIC0_QM0_GLBL_STS1_4_CQF_RD_ERR_SHIFT                        1
+#define NIC0_QM0_GLBL_STS1_4_CQF_RD_ERR_MASK                         0x2
+#define NIC0_QM0_GLBL_STS1_4_CP_RD_ERR_SHIFT                         2
+#define NIC0_QM0_GLBL_STS1_4_CP_RD_ERR_MASK                          0x4
+#define NIC0_QM0_GLBL_STS1_4_CP_UNDEF_CMD_ERR_SHIFT                  3
+#define NIC0_QM0_GLBL_STS1_4_CP_UNDEF_CMD_ERR_MASK                   0x8
+#define NIC0_QM0_GLBL_STS1_4_CP_STOP_OP_SHIFT                        4
+#define NIC0_QM0_GLBL_STS1_4_CP_STOP_OP_MASK                         0x10
+#define NIC0_QM0_GLBL_STS1_4_CP_MSG_WR_ERR_SHIFT                     5
+#define NIC0_QM0_GLBL_STS1_4_CP_MSG_WR_ERR_MASK                      0x20
+#define NIC0_QM0_GLBL_STS1_4_CP_WREG_ERR_SHIFT                       6
+#define NIC0_QM0_GLBL_STS1_4_CP_WREG_ERR_MASK                        0x40
+#define NIC0_QM0_GLBL_STS1_4_CP_FENCE0_OVF_ERR_SHIFT                 8
+#define NIC0_QM0_GLBL_STS1_4_CP_FENCE0_OVF_ERR_MASK                  0x100
+#define NIC0_QM0_GLBL_STS1_4_CP_FENCE1_OVF_ERR_SHIFT                 9
+#define NIC0_QM0_GLBL_STS1_4_CP_FENCE1_OVF_ERR_MASK                  0x200
+#define NIC0_QM0_GLBL_STS1_4_CP_FENCE2_OVF_ERR_SHIFT                 10
+#define NIC0_QM0_GLBL_STS1_4_CP_FENCE2_OVF_ERR_MASK                  0x400
+#define NIC0_QM0_GLBL_STS1_4_CP_FENCE3_OVF_ERR_SHIFT                 11
+#define NIC0_QM0_GLBL_STS1_4_CP_FENCE3_OVF_ERR_MASK                  0x800
+#define NIC0_QM0_GLBL_STS1_4_CP_FENCE0_UDF_ERR_SHIFT                 12
+#define NIC0_QM0_GLBL_STS1_4_CP_FENCE0_UDF_ERR_MASK                  0x1000
+#define NIC0_QM0_GLBL_STS1_4_CP_FENCE1_UDF_ERR_SHIFT                 13
+#define NIC0_QM0_GLBL_STS1_4_CP_FENCE1_UDF_ERR_MASK                  0x2000
+#define NIC0_QM0_GLBL_STS1_4_CP_FENCE2_UDF_ERR_SHIFT                 14
+#define NIC0_QM0_GLBL_STS1_4_CP_FENCE2_UDF_ERR_MASK                  0x4000
+#define NIC0_QM0_GLBL_STS1_4_CP_FENCE3_UDF_ERR_SHIFT                 15
+#define NIC0_QM0_GLBL_STS1_4_CP_FENCE3_UDF_ERR_MASK                  0x8000
+
+/* NIC0_QM0_GLBL_MSG_EN */
+#define NIC0_QM0_GLBL_MSG_EN_PQF_RD_ERR_SHIFT                        0
+#define NIC0_QM0_GLBL_MSG_EN_PQF_RD_ERR_MASK                         0x1
+#define NIC0_QM0_GLBL_MSG_EN_CQF_RD_ERR_SHIFT                        1
+#define NIC0_QM0_GLBL_MSG_EN_CQF_RD_ERR_MASK                         0x2
+#define NIC0_QM0_GLBL_MSG_EN_CP_RD_ERR_SHIFT                         2
+#define NIC0_QM0_GLBL_MSG_EN_CP_RD_ERR_MASK                          0x4
+#define NIC0_QM0_GLBL_MSG_EN_CP_UNDEF_CMD_ERR_SHIFT                  3
+#define NIC0_QM0_GLBL_MSG_EN_CP_UNDEF_CMD_ERR_MASK                   0x8
+#define NIC0_QM0_GLBL_MSG_EN_CP_STOP_OP_SHIFT                        4
+#define NIC0_QM0_GLBL_MSG_EN_CP_STOP_OP_MASK                         0x10
+#define NIC0_QM0_GLBL_MSG_EN_CP_MSG_WR_ERR_SHIFT                     5
+#define NIC0_QM0_GLBL_MSG_EN_CP_MSG_WR_ERR_MASK                      0x20
+#define NIC0_QM0_GLBL_MSG_EN_CP_WREG_ERR_SHIFT                       6
+#define NIC0_QM0_GLBL_MSG_EN_CP_WREG_ERR_MASK                        0x40
+#define NIC0_QM0_GLBL_MSG_EN_CP_FENCE0_OVF_ERR_SHIFT                 8
+#define NIC0_QM0_GLBL_MSG_EN_CP_FENCE0_OVF_ERR_MASK                  0x100
+#define NIC0_QM0_GLBL_MSG_EN_CP_FENCE1_OVF_ERR_SHIFT                 9
+#define NIC0_QM0_GLBL_MSG_EN_CP_FENCE1_OVF_ERR_MASK                  0x200
+#define NIC0_QM0_GLBL_MSG_EN_CP_FENCE2_OVF_ERR_SHIFT                 10
+#define NIC0_QM0_GLBL_MSG_EN_CP_FENCE2_OVF_ERR_MASK                  0x400
+#define NIC0_QM0_GLBL_MSG_EN_CP_FENCE3_OVF_ERR_SHIFT                 11
+#define NIC0_QM0_GLBL_MSG_EN_CP_FENCE3_OVF_ERR_MASK                  0x800
+#define NIC0_QM0_GLBL_MSG_EN_CP_FENCE0_UDF_ERR_SHIFT                 12
+#define NIC0_QM0_GLBL_MSG_EN_CP_FENCE0_UDF_ERR_MASK                  0x1000
+#define NIC0_QM0_GLBL_MSG_EN_CP_FENCE1_UDF_ERR_SHIFT                 13
+#define NIC0_QM0_GLBL_MSG_EN_CP_FENCE1_UDF_ERR_MASK                  0x2000
+#define NIC0_QM0_GLBL_MSG_EN_CP_FENCE2_UDF_ERR_SHIFT                 14
+#define NIC0_QM0_GLBL_MSG_EN_CP_FENCE2_UDF_ERR_MASK                  0x4000
+#define NIC0_QM0_GLBL_MSG_EN_CP_FENCE3_UDF_ERR_SHIFT                 15
+#define NIC0_QM0_GLBL_MSG_EN_CP_FENCE3_UDF_ERR_MASK                  0x8000
+
+/* NIC0_QM0_GLBL_MSG_EN_4 */
+#define NIC0_QM0_GLBL_MSG_EN_4_CQF_RD_ERR_SHIFT                      1
+#define NIC0_QM0_GLBL_MSG_EN_4_CQF_RD_ERR_MASK                       0x2
+#define NIC0_QM0_GLBL_MSG_EN_4_CP_RD_ERR_SHIFT                       2
+#define NIC0_QM0_GLBL_MSG_EN_4_CP_RD_ERR_MASK                        0x4
+#define NIC0_QM0_GLBL_MSG_EN_4_CP_UNDEF_CMD_ERR_SHIFT                3
+#define NIC0_QM0_GLBL_MSG_EN_4_CP_UNDEF_CMD_ERR_MASK                 0x8
+#define NIC0_QM0_GLBL_MSG_EN_4_CP_STOP_OP_SHIFT                      4
+#define NIC0_QM0_GLBL_MSG_EN_4_CP_STOP_OP_MASK                       0x10
+#define NIC0_QM0_GLBL_MSG_EN_4_CP_MSG_WR_ERR_SHIFT                   5
+#define NIC0_QM0_GLBL_MSG_EN_4_CP_MSG_WR_ERR_MASK                    0x20
+#define NIC0_QM0_GLBL_MSG_EN_4_CP_WREG_ERR_SHIFT                     6
+#define NIC0_QM0_GLBL_MSG_EN_4_CP_WREG_ERR_MASK                      0x40
+#define NIC0_QM0_GLBL_MSG_EN_4_CP_FENCE0_OVF_ERR_SHIFT               8
+#define NIC0_QM0_GLBL_MSG_EN_4_CP_FENCE0_OVF_ERR_MASK                0x100
+#define NIC0_QM0_GLBL_MSG_EN_4_CP_FENCE1_OVF_ERR_SHIFT               9
+#define NIC0_QM0_GLBL_MSG_EN_4_CP_FENCE1_OVF_ERR_MASK                0x200
+#define NIC0_QM0_GLBL_MSG_EN_4_CP_FENCE2_OVF_ERR_SHIFT               10
+#define NIC0_QM0_GLBL_MSG_EN_4_CP_FENCE2_OVF_ERR_MASK                0x400
+#define NIC0_QM0_GLBL_MSG_EN_4_CP_FENCE3_OVF_ERR_SHIFT               11
+#define NIC0_QM0_GLBL_MSG_EN_4_CP_FENCE3_OVF_ERR_MASK                0x800
+#define NIC0_QM0_GLBL_MSG_EN_4_CP_FENCE0_UDF_ERR_SHIFT               12
+#define NIC0_QM0_GLBL_MSG_EN_4_CP_FENCE0_UDF_ERR_MASK                0x1000
+#define NIC0_QM0_GLBL_MSG_EN_4_CP_FENCE1_UDF_ERR_SHIFT               13
+#define NIC0_QM0_GLBL_MSG_EN_4_CP_FENCE1_UDF_ERR_MASK                0x2000
+#define NIC0_QM0_GLBL_MSG_EN_4_CP_FENCE2_UDF_ERR_SHIFT               14
+#define NIC0_QM0_GLBL_MSG_EN_4_CP_FENCE2_UDF_ERR_MASK                0x4000
+#define NIC0_QM0_GLBL_MSG_EN_4_CP_FENCE3_UDF_ERR_SHIFT               15
+#define NIC0_QM0_GLBL_MSG_EN_4_CP_FENCE3_UDF_ERR_MASK                0x8000
+
+/* NIC0_QM0_PQ_BASE_LO */
+#define NIC0_QM0_PQ_BASE_LO_VAL_SHIFT                                0
+#define NIC0_QM0_PQ_BASE_LO_VAL_MASK                                 0xFFFFFFFF
+
+/* NIC0_QM0_PQ_BASE_HI */
+#define NIC0_QM0_PQ_BASE_HI_VAL_SHIFT                                0
+#define NIC0_QM0_PQ_BASE_HI_VAL_MASK                                 0xFFFFFFFF
+
+/* NIC0_QM0_PQ_SIZE */
+#define NIC0_QM0_PQ_SIZE_VAL_SHIFT                                   0
+#define NIC0_QM0_PQ_SIZE_VAL_MASK                                    0xFFFFFFFF
+
+/* NIC0_QM0_PQ_PI */
+#define NIC0_QM0_PQ_PI_VAL_SHIFT                                     0
+#define NIC0_QM0_PQ_PI_VAL_MASK                                      0xFFFFFFFF
+
+/* NIC0_QM0_PQ_CI */
+#define NIC0_QM0_PQ_CI_VAL_SHIFT                                     0
+#define NIC0_QM0_PQ_CI_VAL_MASK                                      0xFFFFFFFF
+
+/* NIC0_QM0_PQ_CFG0 */
+#define NIC0_QM0_PQ_CFG0_RESERVED_SHIFT                              0
+#define NIC0_QM0_PQ_CFG0_RESERVED_MASK                               0x1
+
+/* NIC0_QM0_PQ_CFG1 */
+#define NIC0_QM0_PQ_CFG1_CREDIT_LIM_SHIFT                            0
+#define NIC0_QM0_PQ_CFG1_CREDIT_LIM_MASK                             0xFFFF
+#define NIC0_QM0_PQ_CFG1_MAX_INFLIGHT_SHIFT                          16
+#define NIC0_QM0_PQ_CFG1_MAX_INFLIGHT_MASK                           0xFFFF0000
+
+/* NIC0_QM0_PQ_ARUSER_31_11 */
+#define NIC0_QM0_PQ_ARUSER_31_11_VAL_SHIFT                           0
+#define NIC0_QM0_PQ_ARUSER_31_11_VAL_MASK                            0x1FFFFF
+
+/* NIC0_QM0_PQ_STS0 */
+#define NIC0_QM0_PQ_STS0_PQ_CREDIT_CNT_SHIFT                         0
+#define NIC0_QM0_PQ_STS0_PQ_CREDIT_CNT_MASK                          0xFFFF
+#define NIC0_QM0_PQ_STS0_PQ_FREE_CNT_SHIFT                           16
+#define NIC0_QM0_PQ_STS0_PQ_FREE_CNT_MASK                            0xFFFF0000
+
+/* NIC0_QM0_PQ_STS1 */
+#define NIC0_QM0_PQ_STS1_PQ_INFLIGHT_CNT_SHIFT                       0
+#define NIC0_QM0_PQ_STS1_PQ_INFLIGHT_CNT_MASK                        0xFFFF
+#define NIC0_QM0_PQ_STS1_PQ_BUF_EMPTY_SHIFT                          30
+#define NIC0_QM0_PQ_STS1_PQ_BUF_EMPTY_MASK                           0x40000000
+#define NIC0_QM0_PQ_STS1_PQ_BUSY_SHIFT                               31
+#define NIC0_QM0_PQ_STS1_PQ_BUSY_MASK                                0x80000000
+
+/* NIC0_QM0_CQ_CFG0 */
+#define NIC0_QM0_CQ_CFG0_RESERVED_SHIFT                              0
+#define NIC0_QM0_CQ_CFG0_RESERVED_MASK                               0x1
+
+/* NIC0_QM0_CQ_CFG1 */
+#define NIC0_QM0_CQ_CFG1_CREDIT_LIM_SHIFT                            0
+#define NIC0_QM0_CQ_CFG1_CREDIT_LIM_MASK                             0xFFFF
+#define NIC0_QM0_CQ_CFG1_MAX_INFLIGHT_SHIFT                          16
+#define NIC0_QM0_CQ_CFG1_MAX_INFLIGHT_MASK                           0xFFFF0000
+
+/* NIC0_QM0_CQ_ARUSER_31_11 */
+#define NIC0_QM0_CQ_ARUSER_31_11_VAL_SHIFT                           0
+#define NIC0_QM0_CQ_ARUSER_31_11_VAL_MASK                            0x1FFFFF
+
+/* NIC0_QM0_CQ_STS0 */
+#define NIC0_QM0_CQ_STS0_CQ_CREDIT_CNT_SHIFT                         0
+#define NIC0_QM0_CQ_STS0_CQ_CREDIT_CNT_MASK                          0xFFFF
+#define NIC0_QM0_CQ_STS0_CQ_FREE_CNT_SHIFT                           16
+#define NIC0_QM0_CQ_STS0_CQ_FREE_CNT_MASK                            0xFFFF0000
+
+/* NIC0_QM0_CQ_STS1 */
+#define NIC0_QM0_CQ_STS1_CQ_INFLIGHT_CNT_SHIFT                       0
+#define NIC0_QM0_CQ_STS1_CQ_INFLIGHT_CNT_MASK                        0xFFFF
+#define NIC0_QM0_CQ_STS1_CQ_BUF_EMPTY_SHIFT                          30
+#define NIC0_QM0_CQ_STS1_CQ_BUF_EMPTY_MASK                           0x40000000
+#define NIC0_QM0_CQ_STS1_CQ_BUSY_SHIFT                               31
+#define NIC0_QM0_CQ_STS1_CQ_BUSY_MASK                                0x80000000
+
+/* NIC0_QM0_CQ_PTR_LO_0 */
+#define NIC0_QM0_CQ_PTR_LO_0_VAL_SHIFT                               0
+#define NIC0_QM0_CQ_PTR_LO_0_VAL_MASK                                0xFFFFFFFF
+
+/* NIC0_QM0_CQ_PTR_HI_0 */
+#define NIC0_QM0_CQ_PTR_HI_0_VAL_SHIFT                               0
+#define NIC0_QM0_CQ_PTR_HI_0_VAL_MASK                                0xFFFFFFFF
+
+/* NIC0_QM0_CQ_TSIZE_0 */
+#define NIC0_QM0_CQ_TSIZE_0_VAL_SHIFT                                0
+#define NIC0_QM0_CQ_TSIZE_0_VAL_MASK                                 0xFFFFFFFF
+
+/* NIC0_QM0_CQ_CTL_0 */
+#define NIC0_QM0_CQ_CTL_0_RPT_SHIFT                                  0
+#define NIC0_QM0_CQ_CTL_0_RPT_MASK                                   0xFFFF
+#define NIC0_QM0_CQ_CTL_0_CTL_SHIFT                                  16
+#define NIC0_QM0_CQ_CTL_0_CTL_MASK                                   0xFFFF0000
+
+/* NIC0_QM0_CQ_PTR_LO_1 */
+#define NIC0_QM0_CQ_PTR_LO_1_VAL_SHIFT                               0
+#define NIC0_QM0_CQ_PTR_LO_1_VAL_MASK                                0xFFFFFFFF
+
+/* NIC0_QM0_CQ_PTR_HI_1 */
+#define NIC0_QM0_CQ_PTR_HI_1_VAL_SHIFT                               0
+#define NIC0_QM0_CQ_PTR_HI_1_VAL_MASK                                0xFFFFFFFF
+
+/* NIC0_QM0_CQ_TSIZE_1 */
+#define NIC0_QM0_CQ_TSIZE_1_VAL_SHIFT                                0
+#define NIC0_QM0_CQ_TSIZE_1_VAL_MASK                                 0xFFFFFFFF
+
+/* NIC0_QM0_CQ_CTL_1 */
+#define NIC0_QM0_CQ_CTL_1_RPT_SHIFT                                  0
+#define NIC0_QM0_CQ_CTL_1_RPT_MASK                                   0xFFFF
+#define NIC0_QM0_CQ_CTL_1_CTL_SHIFT                                  16
+#define NIC0_QM0_CQ_CTL_1_CTL_MASK                                   0xFFFF0000
+
+/* NIC0_QM0_CQ_PTR_LO_2 */
+#define NIC0_QM0_CQ_PTR_LO_2_VAL_SHIFT                               0
+#define NIC0_QM0_CQ_PTR_LO_2_VAL_MASK                                0xFFFFFFFF
+
+/* NIC0_QM0_CQ_PTR_HI_2 */
+#define NIC0_QM0_CQ_PTR_HI_2_VAL_SHIFT                               0
+#define NIC0_QM0_CQ_PTR_HI_2_VAL_MASK                                0xFFFFFFFF
+
+/* NIC0_QM0_CQ_TSIZE_2 */
+#define NIC0_QM0_CQ_TSIZE_2_VAL_SHIFT                                0
+#define NIC0_QM0_CQ_TSIZE_2_VAL_MASK                                 0xFFFFFFFF
+
+/* NIC0_QM0_CQ_CTL_2 */
+#define NIC0_QM0_CQ_CTL_2_RPT_SHIFT                                  0
+#define NIC0_QM0_CQ_CTL_2_RPT_MASK                                   0xFFFF
+#define NIC0_QM0_CQ_CTL_2_CTL_SHIFT                                  16
+#define NIC0_QM0_CQ_CTL_2_CTL_MASK                                   0xFFFF0000
+
+/* NIC0_QM0_CQ_PTR_LO_3 */
+#define NIC0_QM0_CQ_PTR_LO_3_VAL_SHIFT                               0
+#define NIC0_QM0_CQ_PTR_LO_3_VAL_MASK                                0xFFFFFFFF
+
+/* NIC0_QM0_CQ_PTR_HI_3 */
+#define NIC0_QM0_CQ_PTR_HI_3_VAL_SHIFT                               0
+#define NIC0_QM0_CQ_PTR_HI_3_VAL_MASK                                0xFFFFFFFF
+
+/* NIC0_QM0_CQ_TSIZE_3 */
+#define NIC0_QM0_CQ_TSIZE_3_VAL_SHIFT                                0
+#define NIC0_QM0_CQ_TSIZE_3_VAL_MASK                                 0xFFFFFFFF
+
+/* NIC0_QM0_CQ_CTL_3 */
+#define NIC0_QM0_CQ_CTL_3_RPT_SHIFT                                  0
+#define NIC0_QM0_CQ_CTL_3_RPT_MASK                                   0xFFFF
+#define NIC0_QM0_CQ_CTL_3_CTL_SHIFT                                  16
+#define NIC0_QM0_CQ_CTL_3_CTL_MASK                                   0xFFFF0000
+
+/* NIC0_QM0_CQ_PTR_LO_4 */
+#define NIC0_QM0_CQ_PTR_LO_4_VAL_SHIFT                               0
+#define NIC0_QM0_CQ_PTR_LO_4_VAL_MASK                                0xFFFFFFFF
+
+/* NIC0_QM0_CQ_PTR_HI_4 */
+#define NIC0_QM0_CQ_PTR_HI_4_VAL_SHIFT                               0
+#define NIC0_QM0_CQ_PTR_HI_4_VAL_MASK                                0xFFFFFFFF
+
+/* NIC0_QM0_CQ_TSIZE_4 */
+#define NIC0_QM0_CQ_TSIZE_4_VAL_SHIFT                                0
+#define NIC0_QM0_CQ_TSIZE_4_VAL_MASK                                 0xFFFFFFFF
+
+/* NIC0_QM0_CQ_CTL_4 */
+#define NIC0_QM0_CQ_CTL_4_RPT_SHIFT                                  0
+#define NIC0_QM0_CQ_CTL_4_RPT_MASK                                   0xFFFF
+#define NIC0_QM0_CQ_CTL_4_CTL_SHIFT                                  16
+#define NIC0_QM0_CQ_CTL_4_CTL_MASK                                   0xFFFF0000
+
+/* NIC0_QM0_CQ_PTR_LO_STS */
+#define NIC0_QM0_CQ_PTR_LO_STS_VAL_SHIFT                             0
+#define NIC0_QM0_CQ_PTR_LO_STS_VAL_MASK                              0xFFFFFFFF
+
+/* NIC0_QM0_CQ_PTR_HI_STS */
+#define NIC0_QM0_CQ_PTR_HI_STS_VAL_SHIFT                             0
+#define NIC0_QM0_CQ_PTR_HI_STS_VAL_MASK                              0xFFFFFFFF
+
+/* NIC0_QM0_CQ_TSIZE_STS */
+#define NIC0_QM0_CQ_TSIZE_STS_VAL_SHIFT                              0
+#define NIC0_QM0_CQ_TSIZE_STS_VAL_MASK                               0xFFFFFFFF
+
+/* NIC0_QM0_CQ_CTL_STS */
+#define NIC0_QM0_CQ_CTL_STS_RPT_SHIFT                                0
+#define NIC0_QM0_CQ_CTL_STS_RPT_MASK                                 0xFFFF
+#define NIC0_QM0_CQ_CTL_STS_CTL_SHIFT                                16
+#define NIC0_QM0_CQ_CTL_STS_CTL_MASK                                 0xFFFF0000
+
+/* NIC0_QM0_CQ_IFIFO_CNT */
+#define NIC0_QM0_CQ_IFIFO_CNT_VAL_SHIFT                              0
+#define NIC0_QM0_CQ_IFIFO_CNT_VAL_MASK                               0x3
+
+/* NIC0_QM0_CP_MSG_BASE0_ADDR_LO */
+#define NIC0_QM0_CP_MSG_BASE0_ADDR_LO_VAL_SHIFT                      0
+#define NIC0_QM0_CP_MSG_BASE0_ADDR_LO_VAL_MASK                       0xFFFFFFFF
+
+/* NIC0_QM0_CP_MSG_BASE0_ADDR_HI */
+#define NIC0_QM0_CP_MSG_BASE0_ADDR_HI_VAL_SHIFT                      0
+#define NIC0_QM0_CP_MSG_BASE0_ADDR_HI_VAL_MASK                       0xFFFFFFFF
+
+/* NIC0_QM0_CP_MSG_BASE1_ADDR_LO */
+#define NIC0_QM0_CP_MSG_BASE1_ADDR_LO_VAL_SHIFT                      0
+#define NIC0_QM0_CP_MSG_BASE1_ADDR_LO_VAL_MASK                       0xFFFFFFFF
+
+/* NIC0_QM0_CP_MSG_BASE1_ADDR_HI */
+#define NIC0_QM0_CP_MSG_BASE1_ADDR_HI_VAL_SHIFT                      0
+#define NIC0_QM0_CP_MSG_BASE1_ADDR_HI_VAL_MASK                       0xFFFFFFFF
+
+/* NIC0_QM0_CP_MSG_BASE2_ADDR_LO */
+#define NIC0_QM0_CP_MSG_BASE2_ADDR_LO_VAL_SHIFT                      0
+#define NIC0_QM0_CP_MSG_BASE2_ADDR_LO_VAL_MASK                       0xFFFFFFFF
+
+/* NIC0_QM0_CP_MSG_BASE2_ADDR_HI */
+#define NIC0_QM0_CP_MSG_BASE2_ADDR_HI_VAL_SHIFT                      0
+#define NIC0_QM0_CP_MSG_BASE2_ADDR_HI_VAL_MASK                       0xFFFFFFFF
+
+/* NIC0_QM0_CP_MSG_BASE3_ADDR_LO */
+#define NIC0_QM0_CP_MSG_BASE3_ADDR_LO_VAL_SHIFT                      0
+#define NIC0_QM0_CP_MSG_BASE3_ADDR_LO_VAL_MASK                       0xFFFFFFFF
+
+/* NIC0_QM0_CP_MSG_BASE3_ADDR_HI */
+#define NIC0_QM0_CP_MSG_BASE3_ADDR_HI_VAL_SHIFT                      0
+#define NIC0_QM0_CP_MSG_BASE3_ADDR_HI_VAL_MASK                       0xFFFFFFFF
+
+/* NIC0_QM0_CP_LDMA_TSIZE_OFFSET */
+#define NIC0_QM0_CP_LDMA_TSIZE_OFFSET_VAL_SHIFT                      0
+#define NIC0_QM0_CP_LDMA_TSIZE_OFFSET_VAL_MASK                       0xFFFFFFFF
+
+/* NIC0_QM0_CP_LDMA_SRC_BASE_LO_OFFSET */
+#define NIC0_QM0_CP_LDMA_SRC_BASE_LO_OFFSET_VAL_SHIFT                0
+#define NIC0_QM0_CP_LDMA_SRC_BASE_LO_OFFSET_VAL_MASK                 0xFFFFFFFF
+
+/* NIC0_QM0_CP_LDMA_DST_BASE_LO_OFFSET */
+#define NIC0_QM0_CP_LDMA_DST_BASE_LO_OFFSET_VAL_SHIFT                0
+#define NIC0_QM0_CP_LDMA_DST_BASE_LO_OFFSET_VAL_MASK                 0xFFFFFFFF
+
+/* NIC0_QM0_CP_FENCE0_RDATA */
+#define NIC0_QM0_CP_FENCE0_RDATA_INC_VAL_SHIFT                       0
+#define NIC0_QM0_CP_FENCE0_RDATA_INC_VAL_MASK                        0xF
+
+/* NIC0_QM0_CP_FENCE1_RDATA */
+#define NIC0_QM0_CP_FENCE1_RDATA_INC_VAL_SHIFT                       0
+#define NIC0_QM0_CP_FENCE1_RDATA_INC_VAL_MASK                        0xF
+
+/* NIC0_QM0_CP_FENCE2_RDATA */
+#define NIC0_QM0_CP_FENCE2_RDATA_INC_VAL_SHIFT                       0
+#define NIC0_QM0_CP_FENCE2_RDATA_INC_VAL_MASK                        0xF
+
+/* NIC0_QM0_CP_FENCE3_RDATA */
+#define NIC0_QM0_CP_FENCE3_RDATA_INC_VAL_SHIFT                       0
+#define NIC0_QM0_CP_FENCE3_RDATA_INC_VAL_MASK                        0xF
+
+/* NIC0_QM0_CP_FENCE0_CNT */
+#define NIC0_QM0_CP_FENCE0_CNT_VAL_SHIFT                             0
+#define NIC0_QM0_CP_FENCE0_CNT_VAL_MASK                              0x3FFF
+
+/* NIC0_QM0_CP_FENCE1_CNT */
+#define NIC0_QM0_CP_FENCE1_CNT_VAL_SHIFT                             0
+#define NIC0_QM0_CP_FENCE1_CNT_VAL_MASK                              0x3FFF
+
+/* NIC0_QM0_CP_FENCE2_CNT */
+#define NIC0_QM0_CP_FENCE2_CNT_VAL_SHIFT                             0
+#define NIC0_QM0_CP_FENCE2_CNT_VAL_MASK                              0x3FFF
+
+/* NIC0_QM0_CP_FENCE3_CNT */
+#define NIC0_QM0_CP_FENCE3_CNT_VAL_SHIFT                             0
+#define NIC0_QM0_CP_FENCE3_CNT_VAL_MASK                              0x3FFF
+
+/* NIC0_QM0_CP_STS */
+#define NIC0_QM0_CP_STS_MSG_INFLIGHT_CNT_SHIFT                       0
+#define NIC0_QM0_CP_STS_MSG_INFLIGHT_CNT_MASK                        0xFFFF
+#define NIC0_QM0_CP_STS_ERDY_SHIFT                                   16
+#define NIC0_QM0_CP_STS_ERDY_MASK                                    0x10000
+#define NIC0_QM0_CP_STS_RRDY_SHIFT                                   17
+#define NIC0_QM0_CP_STS_RRDY_MASK                                    0x20000
+#define NIC0_QM0_CP_STS_MRDY_SHIFT                                   18
+#define NIC0_QM0_CP_STS_MRDY_MASK                                    0x40000
+#define NIC0_QM0_CP_STS_SW_STOP_SHIFT                                19
+#define NIC0_QM0_CP_STS_SW_STOP_MASK                                 0x80000
+#define NIC0_QM0_CP_STS_FENCE_ID_SHIFT                               20
+#define NIC0_QM0_CP_STS_FENCE_ID_MASK                                0x300000
+#define NIC0_QM0_CP_STS_FENCE_IN_PROGRESS_SHIFT                      22
+#define NIC0_QM0_CP_STS_FENCE_IN_PROGRESS_MASK                       0x400000
+
+/* NIC0_QM0_CP_CURRENT_INST_LO */
+#define NIC0_QM0_CP_CURRENT_INST_LO_VAL_SHIFT                        0
+#define NIC0_QM0_CP_CURRENT_INST_LO_VAL_MASK                         0xFFFFFFFF
+
+/* NIC0_QM0_CP_CURRENT_INST_HI */
+#define NIC0_QM0_CP_CURRENT_INST_HI_VAL_SHIFT                        0
+#define NIC0_QM0_CP_CURRENT_INST_HI_VAL_MASK                         0xFFFFFFFF
+
+/* NIC0_QM0_CP_BARRIER_CFG */
+#define NIC0_QM0_CP_BARRIER_CFG_EBGUARD_SHIFT                        0
+#define NIC0_QM0_CP_BARRIER_CFG_EBGUARD_MASK                         0xFFF
+#define NIC0_QM0_CP_BARRIER_CFG_RBGUARD_SHIFT                        16
+#define NIC0_QM0_CP_BARRIER_CFG_RBGUARD_MASK                         0xF0000
+
+/* NIC0_QM0_CP_DBG_0 */
+#define NIC0_QM0_CP_DBG_0_CS_SHIFT                                   0
+#define NIC0_QM0_CP_DBG_0_CS_MASK                                    0xF
+#define NIC0_QM0_CP_DBG_0_EB_CNT_NOT_ZERO_SHIFT                      4
+#define NIC0_QM0_CP_DBG_0_EB_CNT_NOT_ZERO_MASK                       0x10
+#define NIC0_QM0_CP_DBG_0_BULK_CNT_NOT_ZERO_SHIFT                    5
+#define NIC0_QM0_CP_DBG_0_BULK_CNT_NOT_ZERO_MASK                     0x20
+#define NIC0_QM0_CP_DBG_0_MREB_STALL_SHIFT                           6
+#define NIC0_QM0_CP_DBG_0_MREB_STALL_MASK                            0x40
+#define NIC0_QM0_CP_DBG_0_STALL_SHIFT                                7
+#define NIC0_QM0_CP_DBG_0_STALL_MASK                                 0x80
+
+/* NIC0_QM0_CP_ARUSER_31_11 */
+#define NIC0_QM0_CP_ARUSER_31_11_VAL_SHIFT                           0
+#define NIC0_QM0_CP_ARUSER_31_11_VAL_MASK                            0x1FFFFF
+
+/* NIC0_QM0_CP_AWUSER_31_11 */
+#define NIC0_QM0_CP_AWUSER_31_11_VAL_SHIFT                           0
+#define NIC0_QM0_CP_AWUSER_31_11_VAL_MASK                            0x1FFFFF
+
+/* NIC0_QM0_ARB_CFG_0 */
+#define NIC0_QM0_ARB_CFG_0_TYPE_SHIFT                                0
+#define NIC0_QM0_ARB_CFG_0_TYPE_MASK                                 0x1
+#define NIC0_QM0_ARB_CFG_0_IS_MASTER_SHIFT                           4
+#define NIC0_QM0_ARB_CFG_0_IS_MASTER_MASK                            0x10
+#define NIC0_QM0_ARB_CFG_0_EN_SHIFT                                  8
+#define NIC0_QM0_ARB_CFG_0_EN_MASK                                   0x100
+#define NIC0_QM0_ARB_CFG_0_MASK_SHIFT                                12
+#define NIC0_QM0_ARB_CFG_0_MASK_MASK                                 0xF000
+#define NIC0_QM0_ARB_CFG_0_MST_MSG_NOSTALL_SHIFT                     16
+#define NIC0_QM0_ARB_CFG_0_MST_MSG_NOSTALL_MASK                      0x10000
+
+/* NIC0_QM0_ARB_CHOISE_Q_PUSH */
+#define NIC0_QM0_ARB_CHOISE_Q_PUSH_VAL_SHIFT                         0
+#define NIC0_QM0_ARB_CHOISE_Q_PUSH_VAL_MASK                          0x3
+
+/* NIC0_QM0_ARB_WRR_WEIGHT */
+#define NIC0_QM0_ARB_WRR_WEIGHT_VAL_SHIFT                            0
+#define NIC0_QM0_ARB_WRR_WEIGHT_VAL_MASK                             0xFFFFFFFF
+
+/* NIC0_QM0_ARB_CFG_1 */
+#define NIC0_QM0_ARB_CFG_1_CLR_SHIFT                                 0
+#define NIC0_QM0_ARB_CFG_1_CLR_MASK                                  0x1
+
+/* NIC0_QM0_ARB_MST_AVAIL_CRED */
+#define NIC0_QM0_ARB_MST_AVAIL_CRED_VAL_SHIFT                        0
+#define NIC0_QM0_ARB_MST_AVAIL_CRED_VAL_MASK                         0x7F
+
+/* NIC0_QM0_ARB_MST_CRED_INC */
+#define NIC0_QM0_ARB_MST_CRED_INC_VAL_SHIFT                          0
+#define NIC0_QM0_ARB_MST_CRED_INC_VAL_MASK                           0xFFFFFFFF
+
+/* NIC0_QM0_ARB_MST_CHOISE_PUSH_OFST */
+#define NIC0_QM0_ARB_MST_CHOISE_PUSH_OFST_VAL_SHIFT                  0
+#define NIC0_QM0_ARB_MST_CHOISE_PUSH_OFST_VAL_MASK                   0xFFFFFFFF
+
+/* NIC0_QM0_ARB_SLV_MASTER_INC_CRED_OFST */
+#define NIC0_QM0_ARB_SLV_MASTER_INC_CRED_OFST_VAL_SHIFT              0
+#define NIC0_QM0_ARB_SLV_MASTER_INC_CRED_OFST_VAL_MASK               0xFFFFFFFF
+
+/* NIC0_QM0_ARB_MST_SLAVE_EN */
+#define NIC0_QM0_ARB_MST_SLAVE_EN_VAL_SHIFT                          0
+#define NIC0_QM0_ARB_MST_SLAVE_EN_VAL_MASK                           0xFFFFFFFF
+
+/* NIC0_QM0_ARB_MST_QUIET_PER */
+#define NIC0_QM0_ARB_MST_QUIET_PER_VAL_SHIFT                         0
+#define NIC0_QM0_ARB_MST_QUIET_PER_VAL_MASK                          0xFFFFFFFF
+
+/* NIC0_QM0_ARB_SLV_CHOISE_WDT */
+#define NIC0_QM0_ARB_SLV_CHOISE_WDT_VAL_SHIFT                        0
+#define NIC0_QM0_ARB_SLV_CHOISE_WDT_VAL_MASK                         0xFFFFFFFF
+
+/* NIC0_QM0_ARB_SLV_ID */
+#define NIC0_QM0_ARB_SLV_ID_VAL_SHIFT                                0
+#define NIC0_QM0_ARB_SLV_ID_VAL_MASK                                 0x1F
+
+/* NIC0_QM0_ARB_MSG_MAX_INFLIGHT */
+#define NIC0_QM0_ARB_MSG_MAX_INFLIGHT_VAL_SHIFT                      0
+#define NIC0_QM0_ARB_MSG_MAX_INFLIGHT_VAL_MASK                       0x3F
+
+/* NIC0_QM0_ARB_MSG_AWUSER_31_11 */
+#define NIC0_QM0_ARB_MSG_AWUSER_31_11_VAL_SHIFT                      0
+#define NIC0_QM0_ARB_MSG_AWUSER_31_11_VAL_MASK                       0x1FFFFF
+
+/* NIC0_QM0_ARB_MSG_AWUSER_SEC_PROP */
+#define NIC0_QM0_ARB_MSG_AWUSER_SEC_PROP_ASID_SHIFT                  0
+#define NIC0_QM0_ARB_MSG_AWUSER_SEC_PROP_ASID_MASK                   0x3FF
+#define NIC0_QM0_ARB_MSG_AWUSER_SEC_PROP_MMBP_SHIFT                  10
+#define NIC0_QM0_ARB_MSG_AWUSER_SEC_PROP_MMBP_MASK                   0x400
+
+/* NIC0_QM0_ARB_MSG_AWUSER_NON_SEC_PROP */
+#define NIC0_QM0_ARB_MSG_AWUSER_NON_SEC_PROP_ASID_SHIFT              0
+#define NIC0_QM0_ARB_MSG_AWUSER_NON_SEC_PROP_ASID_MASK               0x3FF
+#define NIC0_QM0_ARB_MSG_AWUSER_NON_SEC_PROP_MMBP_SHIFT              10
+#define NIC0_QM0_ARB_MSG_AWUSER_NON_SEC_PROP_MMBP_MASK               0x400
+
+/* NIC0_QM0_ARB_BASE_LO */
+#define NIC0_QM0_ARB_BASE_LO_VAL_SHIFT                               0
+#define NIC0_QM0_ARB_BASE_LO_VAL_MASK                                0xFFFFFFFF
+
+/* NIC0_QM0_ARB_BASE_HI */
+#define NIC0_QM0_ARB_BASE_HI_VAL_SHIFT                               0
+#define NIC0_QM0_ARB_BASE_HI_VAL_MASK                                0xFFFFFFFF
+
+/* NIC0_QM0_ARB_STATE_STS */
+#define NIC0_QM0_ARB_STATE_STS_VAL_SHIFT                             0
+#define NIC0_QM0_ARB_STATE_STS_VAL_MASK                              0xFFFFFFFF
+
+/* NIC0_QM0_ARB_CHOISE_FULLNESS_STS */
+#define NIC0_QM0_ARB_CHOISE_FULLNESS_STS_VAL_SHIFT                   0
+#define NIC0_QM0_ARB_CHOISE_FULLNESS_STS_VAL_MASK                    0x7F
+
+/* NIC0_QM0_ARB_MSG_STS */
+#define NIC0_QM0_ARB_MSG_STS_FULL_SHIFT                              0
+#define NIC0_QM0_ARB_MSG_STS_FULL_MASK                               0x1
+#define NIC0_QM0_ARB_MSG_STS_NO_INFLIGHT_SHIFT                       1
+#define NIC0_QM0_ARB_MSG_STS_NO_INFLIGHT_MASK                        0x2
+
+/* NIC0_QM0_ARB_SLV_CHOISE_Q_HEAD */
+#define NIC0_QM0_ARB_SLV_CHOISE_Q_HEAD_VAL_SHIFT                     0
+#define NIC0_QM0_ARB_SLV_CHOISE_Q_HEAD_VAL_MASK                      0x3
+
+/* NIC0_QM0_ARB_ERR_CAUSE */
+#define NIC0_QM0_ARB_ERR_CAUSE_CHOISE_OVF_SHIFT                      0
+#define NIC0_QM0_ARB_ERR_CAUSE_CHOISE_OVF_MASK                       0x1
+#define NIC0_QM0_ARB_ERR_CAUSE_CHOISE_WDT_SHIFT                      1
+#define NIC0_QM0_ARB_ERR_CAUSE_CHOISE_WDT_MASK                       0x2
+#define NIC0_QM0_ARB_ERR_CAUSE_AXI_LBW_ERR_SHIFT                     2
+#define NIC0_QM0_ARB_ERR_CAUSE_AXI_LBW_ERR_MASK                      0x4
+
+/* NIC0_QM0_ARB_ERR_MSG_EN */
+#define NIC0_QM0_ARB_ERR_MSG_EN_CHOISE_OVF_SHIFT                     0
+#define NIC0_QM0_ARB_ERR_MSG_EN_CHOISE_OVF_MASK                      0x1
+#define NIC0_QM0_ARB_ERR_MSG_EN_CHOISE_WDT_SHIFT                     1
+#define NIC0_QM0_ARB_ERR_MSG_EN_CHOISE_WDT_MASK                      0x2
+#define NIC0_QM0_ARB_ERR_MSG_EN_AXI_LBW_ERR_SHIFT                    2
+#define NIC0_QM0_ARB_ERR_MSG_EN_AXI_LBW_ERR_MASK                     0x4
+
+/* NIC0_QM0_ARB_ERR_STS_DRP */
+#define NIC0_QM0_ARB_ERR_STS_DRP_VAL_SHIFT                           0
+#define NIC0_QM0_ARB_ERR_STS_DRP_VAL_MASK                            0x3
+
+/* NIC0_QM0_ARB_MST_CRED_STS */
+#define NIC0_QM0_ARB_MST_CRED_STS_VAL_SHIFT                          0
+#define NIC0_QM0_ARB_MST_CRED_STS_VAL_MASK                           0x7F
+
+/* NIC0_QM0_CGM_CFG */
+#define NIC0_QM0_CGM_CFG_IDLE_TH_SHIFT                               0
+#define NIC0_QM0_CGM_CFG_IDLE_TH_MASK                                0xFFF
+#define NIC0_QM0_CGM_CFG_G2F_TH_SHIFT                                16
+#define NIC0_QM0_CGM_CFG_G2F_TH_MASK                                 0xFF0000
+#define NIC0_QM0_CGM_CFG_CP_IDLE_MASK_SHIFT                          24
+#define NIC0_QM0_CGM_CFG_CP_IDLE_MASK_MASK                           0x1F000000
+#define NIC0_QM0_CGM_CFG_EN_SHIFT                                    31
+#define NIC0_QM0_CGM_CFG_EN_MASK                                     0x80000000
+
+/* NIC0_QM0_CGM_STS */
+#define NIC0_QM0_CGM_STS_ST_SHIFT                                    0
+#define NIC0_QM0_CGM_STS_ST_MASK                                     0x3
+#define NIC0_QM0_CGM_STS_CG_SHIFT                                    4
+#define NIC0_QM0_CGM_STS_CG_MASK                                     0x10
+#define NIC0_QM0_CGM_STS_AGENT_IDLE_SHIFT                            8
+#define NIC0_QM0_CGM_STS_AGENT_IDLE_MASK                             0x100
+#define NIC0_QM0_CGM_STS_AXI_IDLE_SHIFT                              9
+#define NIC0_QM0_CGM_STS_AXI_IDLE_MASK                               0x200
+#define NIC0_QM0_CGM_STS_CP_IDLE_SHIFT                               10
+#define NIC0_QM0_CGM_STS_CP_IDLE_MASK                                0x400
+
+/* NIC0_QM0_CGM_CFG1 */
+#define NIC0_QM0_CGM_CFG1_MASK_TH_SHIFT                              0
+#define NIC0_QM0_CGM_CFG1_MASK_TH_MASK                               0xFF
+
+/* NIC0_QM0_LOCAL_RANGE_BASE */
+#define NIC0_QM0_LOCAL_RANGE_BASE_VAL_SHIFT                          0
+#define NIC0_QM0_LOCAL_RANGE_BASE_VAL_MASK                           0xFFFF
+
+/* NIC0_QM0_LOCAL_RANGE_SIZE */
+#define NIC0_QM0_LOCAL_RANGE_SIZE_VAL_SHIFT                          0
+#define NIC0_QM0_LOCAL_RANGE_SIZE_VAL_MASK                           0xFFFF
+
+/* NIC0_QM0_CSMR_STRICT_PRIO_CFG */
+#define NIC0_QM0_CSMR_STRICT_PRIO_CFG_TYPE_SHIFT                     0
+#define NIC0_QM0_CSMR_STRICT_PRIO_CFG_TYPE_MASK                      0x1
+
+/* NIC0_QM0_HBW_RD_RATE_LIM_CFG_1 */
+#define NIC0_QM0_HBW_RD_RATE_LIM_CFG_1_TOUT_SHIFT                    0
+#define NIC0_QM0_HBW_RD_RATE_LIM_CFG_1_TOUT_MASK                     0xFF
+#define NIC0_QM0_HBW_RD_RATE_LIM_CFG_1_EN_SHIFT                      31
+#define NIC0_QM0_HBW_RD_RATE_LIM_CFG_1_EN_MASK                       0x80000000
+
+/* NIC0_QM0_LBW_WR_RATE_LIM_CFG_0 */
+#define NIC0_QM0_LBW_WR_RATE_LIM_CFG_0_RST_TOKEN_SHIFT               0
+#define NIC0_QM0_LBW_WR_RATE_LIM_CFG_0_RST_TOKEN_MASK                0xFF
+#define NIC0_QM0_LBW_WR_RATE_LIM_CFG_0_SAT_SHIFT                     16
+#define NIC0_QM0_LBW_WR_RATE_LIM_CFG_0_SAT_MASK                      0xFF0000
+
+/* NIC0_QM0_LBW_WR_RATE_LIM_CFG_1 */
+#define NIC0_QM0_LBW_WR_RATE_LIM_CFG_1_TOUT_SHIFT                    0
+#define NIC0_QM0_LBW_WR_RATE_LIM_CFG_1_TOUT_MASK                     0xFF
+#define NIC0_QM0_LBW_WR_RATE_LIM_CFG_1_EN_SHIFT                      31
+#define NIC0_QM0_LBW_WR_RATE_LIM_CFG_1_EN_MASK                       0x80000000
+
+/* NIC0_QM0_HBW_RD_RATE_LIM_CFG_0 */
+#define NIC0_QM0_HBW_RD_RATE_LIM_CFG_0_RST_TOKEN_SHIFT               0
+#define NIC0_QM0_HBW_RD_RATE_LIM_CFG_0_RST_TOKEN_MASK                0xFF
+#define NIC0_QM0_HBW_RD_RATE_LIM_CFG_0_SAT_SHIFT                     16
+#define NIC0_QM0_HBW_RD_RATE_LIM_CFG_0_SAT_MASK                      0xFF0000
+
+/* NIC0_QM0_GLBL_AXCACHE */
+#define NIC0_QM0_GLBL_AXCACHE_AR_SHIFT                               0
+#define NIC0_QM0_GLBL_AXCACHE_AR_MASK                                0xF
+#define NIC0_QM0_GLBL_AXCACHE_AW_SHIFT                               16
+#define NIC0_QM0_GLBL_AXCACHE_AW_MASK                                0xF0000
+
+/* NIC0_QM0_IND_GW_APB_CFG */
+#define NIC0_QM0_IND_GW_APB_CFG_ADDR_SHIFT                           0
+#define NIC0_QM0_IND_GW_APB_CFG_ADDR_MASK                            0x7FFFFFFF
+#define NIC0_QM0_IND_GW_APB_CFG_CMD_SHIFT                            31
+#define NIC0_QM0_IND_GW_APB_CFG_CMD_MASK                             0x80000000
+
+/* NIC0_QM0_IND_GW_APB_WDATA */
+#define NIC0_QM0_IND_GW_APB_WDATA_VAL_SHIFT                          0
+#define NIC0_QM0_IND_GW_APB_WDATA_VAL_MASK                           0xFFFFFFFF
+
+/* NIC0_QM0_IND_GW_APB_RDATA */
+#define NIC0_QM0_IND_GW_APB_RDATA_VAL_SHIFT                          0
+#define NIC0_QM0_IND_GW_APB_RDATA_VAL_MASK                           0xFFFFFFFF
+
+/* NIC0_QM0_IND_GW_APB_STATUS */
+#define NIC0_QM0_IND_GW_APB_STATUS_RDY_SHIFT                         0
+#define NIC0_QM0_IND_GW_APB_STATUS_RDY_MASK                          0x1
+#define NIC0_QM0_IND_GW_APB_STATUS_ERR_SHIFT                         1
+#define NIC0_QM0_IND_GW_APB_STATUS_ERR_MASK                          0x2
+
+/* NIC0_QM0_GLBL_ERR_ADDR_LO */
+#define NIC0_QM0_GLBL_ERR_ADDR_LO_VAL_SHIFT                          0
+#define NIC0_QM0_GLBL_ERR_ADDR_LO_VAL_MASK                           0xFFFFFFFF
+
+/* NIC0_QM0_GLBL_ERR_ADDR_HI */
+#define NIC0_QM0_GLBL_ERR_ADDR_HI_VAL_SHIFT                          0
+#define NIC0_QM0_GLBL_ERR_ADDR_HI_VAL_MASK                           0xFFFFFFFF
+
+/* NIC0_QM0_GLBL_ERR_WDATA */
+#define NIC0_QM0_GLBL_ERR_WDATA_VAL_SHIFT                            0
+#define NIC0_QM0_GLBL_ERR_WDATA_VAL_MASK                             0xFFFFFFFF
+
+/* NIC0_QM0_GLBL_MEM_INIT_BUSY */
+#define NIC0_QM0_GLBL_MEM_INIT_BUSY_RBUF_SHIFT                       0
+#define NIC0_QM0_GLBL_MEM_INIT_BUSY_RBUF_MASK                        0xF
+
+#endif /* ASIC_REG_NIC0_QM0_MASKS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/nic0_qm0_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/nic0_qm0_regs.h
new file mode 100644
index 0000000000000000000000000000000000000000..7c97f4041b8e6369638de2e5630e661e0e3fe538
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/nic0_qm0_regs.h
@@ -0,0 +1,834 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ **       DO NOT EDIT BELOW        **
+ ************************************/
+
+#ifndef ASIC_REG_NIC0_QM0_REGS_H_
+#define ASIC_REG_NIC0_QM0_REGS_H_
+
+/*
+ *****************************************
+ *   NIC0_QM0 (Prototype: QMAN)
+ *****************************************
+ */
+
+#define mmNIC0_QM0_GLBL_CFG0                                         0xCE0000
+
+#define mmNIC0_QM0_GLBL_CFG1                                         0xCE0004
+
+#define mmNIC0_QM0_GLBL_PROT                                         0xCE0008
+
+#define mmNIC0_QM0_GLBL_ERR_CFG                                      0xCE000C
+
+#define mmNIC0_QM0_GLBL_SECURE_PROPS_0                               0xCE0010
+
+#define mmNIC0_QM0_GLBL_SECURE_PROPS_1                               0xCE0014
+
+#define mmNIC0_QM0_GLBL_SECURE_PROPS_2                               0xCE0018
+
+#define mmNIC0_QM0_GLBL_SECURE_PROPS_3                               0xCE001C
+
+#define mmNIC0_QM0_GLBL_SECURE_PROPS_4                               0xCE0020
+
+#define mmNIC0_QM0_GLBL_NON_SECURE_PROPS_0                           0xCE0024
+
+#define mmNIC0_QM0_GLBL_NON_SECURE_PROPS_1                           0xCE0028
+
+#define mmNIC0_QM0_GLBL_NON_SECURE_PROPS_2                           0xCE002C
+
+#define mmNIC0_QM0_GLBL_NON_SECURE_PROPS_3                           0xCE0030
+
+#define mmNIC0_QM0_GLBL_NON_SECURE_PROPS_4                           0xCE0034
+
+#define mmNIC0_QM0_GLBL_STS0                                         0xCE0038
+
+#define mmNIC0_QM0_GLBL_STS1_0                                       0xCE0040
+
+#define mmNIC0_QM0_GLBL_STS1_1                                       0xCE0044
+
+#define mmNIC0_QM0_GLBL_STS1_2                                       0xCE0048
+
+#define mmNIC0_QM0_GLBL_STS1_3                                       0xCE004C
+
+#define mmNIC0_QM0_GLBL_STS1_4                                       0xCE0050
+
+#define mmNIC0_QM0_GLBL_MSG_EN_0                                     0xCE0054
+
+#define mmNIC0_QM0_GLBL_MSG_EN_1                                     0xCE0058
+
+#define mmNIC0_QM0_GLBL_MSG_EN_2                                     0xCE005C
+
+#define mmNIC0_QM0_GLBL_MSG_EN_3                                     0xCE0060
+
+#define mmNIC0_QM0_GLBL_MSG_EN_4                                     0xCE0068
+
+#define mmNIC0_QM0_PQ_BASE_LO_0                                      0xCE0070
+
+#define mmNIC0_QM0_PQ_BASE_LO_1                                      0xCE0074
+
+#define mmNIC0_QM0_PQ_BASE_LO_2                                      0xCE0078
+
+#define mmNIC0_QM0_PQ_BASE_LO_3                                      0xCE007C
+
+#define mmNIC0_QM0_PQ_BASE_HI_0                                      0xCE0080
+
+#define mmNIC0_QM0_PQ_BASE_HI_1                                      0xCE0084
+
+#define mmNIC0_QM0_PQ_BASE_HI_2                                      0xCE0088
+
+#define mmNIC0_QM0_PQ_BASE_HI_3                                      0xCE008C
+
+#define mmNIC0_QM0_PQ_SIZE_0                                         0xCE0090
+
+#define mmNIC0_QM0_PQ_SIZE_1                                         0xCE0094
+
+#define mmNIC0_QM0_PQ_SIZE_2                                         0xCE0098
+
+#define mmNIC0_QM0_PQ_SIZE_3                                         0xCE009C
+
+#define mmNIC0_QM0_PQ_PI_0                                           0xCE00A0
+
+#define mmNIC0_QM0_PQ_PI_1                                           0xCE00A4
+
+#define mmNIC0_QM0_PQ_PI_2                                           0xCE00A8
+
+#define mmNIC0_QM0_PQ_PI_3                                           0xCE00AC
+
+#define mmNIC0_QM0_PQ_CI_0                                           0xCE00B0
+
+#define mmNIC0_QM0_PQ_CI_1                                           0xCE00B4
+
+#define mmNIC0_QM0_PQ_CI_2                                           0xCE00B8
+
+#define mmNIC0_QM0_PQ_CI_3                                           0xCE00BC
+
+#define mmNIC0_QM0_PQ_CFG0_0                                         0xCE00C0
+
+#define mmNIC0_QM0_PQ_CFG0_1                                         0xCE00C4
+
+#define mmNIC0_QM0_PQ_CFG0_2                                         0xCE00C8
+
+#define mmNIC0_QM0_PQ_CFG0_3                                         0xCE00CC
+
+#define mmNIC0_QM0_PQ_CFG1_0                                         0xCE00D0
+
+#define mmNIC0_QM0_PQ_CFG1_1                                         0xCE00D4
+
+#define mmNIC0_QM0_PQ_CFG1_2                                         0xCE00D8
+
+#define mmNIC0_QM0_PQ_CFG1_3                                         0xCE00DC
+
+#define mmNIC0_QM0_PQ_ARUSER_31_11_0                                 0xCE00E0
+
+#define mmNIC0_QM0_PQ_ARUSER_31_11_1                                 0xCE00E4
+
+#define mmNIC0_QM0_PQ_ARUSER_31_11_2                                 0xCE00E8
+
+#define mmNIC0_QM0_PQ_ARUSER_31_11_3                                 0xCE00EC
+
+#define mmNIC0_QM0_PQ_STS0_0                                         0xCE00F0
+
+#define mmNIC0_QM0_PQ_STS0_1                                         0xCE00F4
+
+#define mmNIC0_QM0_PQ_STS0_2                                         0xCE00F8
+
+#define mmNIC0_QM0_PQ_STS0_3                                         0xCE00FC
+
+#define mmNIC0_QM0_PQ_STS1_0                                         0xCE0100
+
+#define mmNIC0_QM0_PQ_STS1_1                                         0xCE0104
+
+#define mmNIC0_QM0_PQ_STS1_2                                         0xCE0108
+
+#define mmNIC0_QM0_PQ_STS1_3                                         0xCE010C
+
+#define mmNIC0_QM0_CQ_CFG0_0                                         0xCE0110
+
+#define mmNIC0_QM0_CQ_CFG0_1                                         0xCE0114
+
+#define mmNIC0_QM0_CQ_CFG0_2                                         0xCE0118
+
+#define mmNIC0_QM0_CQ_CFG0_3                                         0xCE011C
+
+#define mmNIC0_QM0_CQ_CFG0_4                                         0xCE0120
+
+#define mmNIC0_QM0_CQ_CFG1_0                                         0xCE0124
+
+#define mmNIC0_QM0_CQ_CFG1_1                                         0xCE0128
+
+#define mmNIC0_QM0_CQ_CFG1_2                                         0xCE012C
+
+#define mmNIC0_QM0_CQ_CFG1_3                                         0xCE0130
+
+#define mmNIC0_QM0_CQ_CFG1_4                                         0xCE0134
+
+#define mmNIC0_QM0_CQ_ARUSER_31_11_0                                 0xCE0138
+
+#define mmNIC0_QM0_CQ_ARUSER_31_11_1                                 0xCE013C
+
+#define mmNIC0_QM0_CQ_ARUSER_31_11_2                                 0xCE0140
+
+#define mmNIC0_QM0_CQ_ARUSER_31_11_3                                 0xCE0144
+
+#define mmNIC0_QM0_CQ_ARUSER_31_11_4                                 0xCE0148
+
+#define mmNIC0_QM0_CQ_STS0_0                                         0xCE014C
+
+#define mmNIC0_QM0_CQ_STS0_1                                         0xCE0150
+
+#define mmNIC0_QM0_CQ_STS0_2                                         0xCE0154
+
+#define mmNIC0_QM0_CQ_STS0_3                                         0xCE0158
+
+#define mmNIC0_QM0_CQ_STS0_4                                         0xCE015C
+
+#define mmNIC0_QM0_CQ_STS1_0                                         0xCE0160
+
+#define mmNIC0_QM0_CQ_STS1_1                                         0xCE0164
+
+#define mmNIC0_QM0_CQ_STS1_2                                         0xCE0168
+
+#define mmNIC0_QM0_CQ_STS1_3                                         0xCE016C
+
+#define mmNIC0_QM0_CQ_STS1_4                                         0xCE0170
+
+#define mmNIC0_QM0_CQ_PTR_LO_0                                       0xCE0174
+
+#define mmNIC0_QM0_CQ_PTR_HI_0                                       0xCE0178
+
+#define mmNIC0_QM0_CQ_TSIZE_0                                        0xCE017C
+
+#define mmNIC0_QM0_CQ_CTL_0                                          0xCE0180
+
+#define mmNIC0_QM0_CQ_PTR_LO_1                                       0xCE0184
+
+#define mmNIC0_QM0_CQ_PTR_HI_1                                       0xCE0188
+
+#define mmNIC0_QM0_CQ_TSIZE_1                                        0xCE018C
+
+#define mmNIC0_QM0_CQ_CTL_1                                          0xCE0190
+
+#define mmNIC0_QM0_CQ_PTR_LO_2                                       0xCE0194
+
+#define mmNIC0_QM0_CQ_PTR_HI_2                                       0xCE0198
+
+#define mmNIC0_QM0_CQ_TSIZE_2                                        0xCE019C
+
+#define mmNIC0_QM0_CQ_CTL_2                                          0xCE01A0
+
+#define mmNIC0_QM0_CQ_PTR_LO_3                                       0xCE01A4
+
+#define mmNIC0_QM0_CQ_PTR_HI_3                                       0xCE01A8
+
+#define mmNIC0_QM0_CQ_TSIZE_3                                        0xCE01AC
+
+#define mmNIC0_QM0_CQ_CTL_3                                          0xCE01B0
+
+#define mmNIC0_QM0_CQ_PTR_LO_4                                       0xCE01B4
+
+#define mmNIC0_QM0_CQ_PTR_HI_4                                       0xCE01B8
+
+#define mmNIC0_QM0_CQ_TSIZE_4                                        0xCE01BC
+
+#define mmNIC0_QM0_CQ_CTL_4                                          0xCE01C0
+
+#define mmNIC0_QM0_CQ_PTR_LO_STS_0                                   0xCE01C4
+
+#define mmNIC0_QM0_CQ_PTR_LO_STS_1                                   0xCE01C8
+
+#define mmNIC0_QM0_CQ_PTR_LO_STS_2                                   0xCE01CC
+
+#define mmNIC0_QM0_CQ_PTR_LO_STS_3                                   0xCE01D0
+
+#define mmNIC0_QM0_CQ_PTR_LO_STS_4                                   0xCE01D4
+
+#define mmNIC0_QM0_CQ_PTR_HI_STS_0                                   0xCE01D8
+
+#define mmNIC0_QM0_CQ_PTR_HI_STS_1                                   0xCE01DC
+
+#define mmNIC0_QM0_CQ_PTR_HI_STS_2                                   0xCE01E0
+
+#define mmNIC0_QM0_CQ_PTR_HI_STS_3                                   0xCE01E4
+
+#define mmNIC0_QM0_CQ_PTR_HI_STS_4                                   0xCE01E8
+
+#define mmNIC0_QM0_CQ_TSIZE_STS_0                                    0xCE01EC
+
+#define mmNIC0_QM0_CQ_TSIZE_STS_1                                    0xCE01F0
+
+#define mmNIC0_QM0_CQ_TSIZE_STS_2                                    0xCE01F4
+
+#define mmNIC0_QM0_CQ_TSIZE_STS_3                                    0xCE01F8
+
+#define mmNIC0_QM0_CQ_TSIZE_STS_4                                    0xCE01FC
+
+#define mmNIC0_QM0_CQ_CTL_STS_0                                      0xCE0200
+
+#define mmNIC0_QM0_CQ_CTL_STS_1                                      0xCE0204
+
+#define mmNIC0_QM0_CQ_CTL_STS_2                                      0xCE0208
+
+#define mmNIC0_QM0_CQ_CTL_STS_3                                      0xCE020C
+
+#define mmNIC0_QM0_CQ_CTL_STS_4                                      0xCE0210
+
+#define mmNIC0_QM0_CQ_IFIFO_CNT_0                                    0xCE0214
+
+#define mmNIC0_QM0_CQ_IFIFO_CNT_1                                    0xCE0218
+
+#define mmNIC0_QM0_CQ_IFIFO_CNT_2                                    0xCE021C
+
+#define mmNIC0_QM0_CQ_IFIFO_CNT_3                                    0xCE0220
+
+#define mmNIC0_QM0_CQ_IFIFO_CNT_4                                    0xCE0224
+
+#define mmNIC0_QM0_CP_MSG_BASE0_ADDR_LO_0                            0xCE0228
+
+#define mmNIC0_QM0_CP_MSG_BASE0_ADDR_LO_1                            0xCE022C
+
+#define mmNIC0_QM0_CP_MSG_BASE0_ADDR_LO_2                            0xCE0230
+
+#define mmNIC0_QM0_CP_MSG_BASE0_ADDR_LO_3                            0xCE0234
+
+#define mmNIC0_QM0_CP_MSG_BASE0_ADDR_LO_4                            0xCE0238
+
+#define mmNIC0_QM0_CP_MSG_BASE0_ADDR_HI_0                            0xCE023C
+
+#define mmNIC0_QM0_CP_MSG_BASE0_ADDR_HI_1                            0xCE0240
+
+#define mmNIC0_QM0_CP_MSG_BASE0_ADDR_HI_2                            0xCE0244
+
+#define mmNIC0_QM0_CP_MSG_BASE0_ADDR_HI_3                            0xCE0248
+
+#define mmNIC0_QM0_CP_MSG_BASE0_ADDR_HI_4                            0xCE024C
+
+#define mmNIC0_QM0_CP_MSG_BASE1_ADDR_LO_0                            0xCE0250
+
+#define mmNIC0_QM0_CP_MSG_BASE1_ADDR_LO_1                            0xCE0254
+
+#define mmNIC0_QM0_CP_MSG_BASE1_ADDR_LO_2                            0xCE0258
+
+#define mmNIC0_QM0_CP_MSG_BASE1_ADDR_LO_3                            0xCE025C
+
+#define mmNIC0_QM0_CP_MSG_BASE1_ADDR_LO_4                            0xCE0260
+
+#define mmNIC0_QM0_CP_MSG_BASE1_ADDR_HI_0                            0xCE0264
+
+#define mmNIC0_QM0_CP_MSG_BASE1_ADDR_HI_1                            0xCE0268
+
+#define mmNIC0_QM0_CP_MSG_BASE1_ADDR_HI_2                            0xCE026C
+
+#define mmNIC0_QM0_CP_MSG_BASE1_ADDR_HI_3                            0xCE0270
+
+#define mmNIC0_QM0_CP_MSG_BASE1_ADDR_HI_4                            0xCE0274
+
+#define mmNIC0_QM0_CP_MSG_BASE2_ADDR_LO_0                            0xCE0278
+
+#define mmNIC0_QM0_CP_MSG_BASE2_ADDR_LO_1                            0xCE027C
+
+#define mmNIC0_QM0_CP_MSG_BASE2_ADDR_LO_2                            0xCE0280
+
+#define mmNIC0_QM0_CP_MSG_BASE2_ADDR_LO_3                            0xCE0284
+
+#define mmNIC0_QM0_CP_MSG_BASE2_ADDR_LO_4                            0xCE0288
+
+#define mmNIC0_QM0_CP_MSG_BASE2_ADDR_HI_0                            0xCE028C
+
+#define mmNIC0_QM0_CP_MSG_BASE2_ADDR_HI_1                            0xCE0290
+
+#define mmNIC0_QM0_CP_MSG_BASE2_ADDR_HI_2                            0xCE0294
+
+#define mmNIC0_QM0_CP_MSG_BASE2_ADDR_HI_3                            0xCE0298
+
+#define mmNIC0_QM0_CP_MSG_BASE2_ADDR_HI_4                            0xCE029C
+
+#define mmNIC0_QM0_CP_MSG_BASE3_ADDR_LO_0                            0xCE02A0
+
+#define mmNIC0_QM0_CP_MSG_BASE3_ADDR_LO_1                            0xCE02A4
+
+#define mmNIC0_QM0_CP_MSG_BASE3_ADDR_LO_2                            0xCE02A8
+
+#define mmNIC0_QM0_CP_MSG_BASE3_ADDR_LO_3                            0xCE02AC
+
+#define mmNIC0_QM0_CP_MSG_BASE3_ADDR_LO_4                            0xCE02B0
+
+#define mmNIC0_QM0_CP_MSG_BASE3_ADDR_HI_0                            0xCE02B4
+
+#define mmNIC0_QM0_CP_MSG_BASE3_ADDR_HI_1                            0xCE02B8
+
+#define mmNIC0_QM0_CP_MSG_BASE3_ADDR_HI_2                            0xCE02BC
+
+#define mmNIC0_QM0_CP_MSG_BASE3_ADDR_HI_3                            0xCE02C0
+
+#define mmNIC0_QM0_CP_MSG_BASE3_ADDR_HI_4                            0xCE02C4
+
+#define mmNIC0_QM0_CP_LDMA_TSIZE_OFFSET_0                            0xCE02C8
+
+#define mmNIC0_QM0_CP_LDMA_TSIZE_OFFSET_1                            0xCE02CC
+
+#define mmNIC0_QM0_CP_LDMA_TSIZE_OFFSET_2                            0xCE02D0
+
+#define mmNIC0_QM0_CP_LDMA_TSIZE_OFFSET_3                            0xCE02D4
+
+#define mmNIC0_QM0_CP_LDMA_TSIZE_OFFSET_4                            0xCE02D8
+
+#define mmNIC0_QM0_CP_LDMA_SRC_BASE_LO_OFFSET_0                      0xCE02E0
+
+#define mmNIC0_QM0_CP_LDMA_SRC_BASE_LO_OFFSET_1                      0xCE02E4
+
+#define mmNIC0_QM0_CP_LDMA_SRC_BASE_LO_OFFSET_2                      0xCE02E8
+
+#define mmNIC0_QM0_CP_LDMA_SRC_BASE_LO_OFFSET_3                      0xCE02EC
+
+#define mmNIC0_QM0_CP_LDMA_SRC_BASE_LO_OFFSET_4                      0xCE02F0
+
+#define mmNIC0_QM0_CP_LDMA_DST_BASE_LO_OFFSET_0                      0xCE02F4
+
+#define mmNIC0_QM0_CP_LDMA_DST_BASE_LO_OFFSET_1                      0xCE02F8
+
+#define mmNIC0_QM0_CP_LDMA_DST_BASE_LO_OFFSET_2                      0xCE02FC
+
+#define mmNIC0_QM0_CP_LDMA_DST_BASE_LO_OFFSET_3                      0xCE0300
+
+#define mmNIC0_QM0_CP_LDMA_DST_BASE_LO_OFFSET_4                      0xCE0304
+
+#define mmNIC0_QM0_CP_FENCE0_RDATA_0                                 0xCE0308
+
+#define mmNIC0_QM0_CP_FENCE0_RDATA_1                                 0xCE030C
+
+#define mmNIC0_QM0_CP_FENCE0_RDATA_2                                 0xCE0310
+
+#define mmNIC0_QM0_CP_FENCE0_RDATA_3                                 0xCE0314
+
+#define mmNIC0_QM0_CP_FENCE0_RDATA_4                                 0xCE0318
+
+#define mmNIC0_QM0_CP_FENCE1_RDATA_0                                 0xCE031C
+
+#define mmNIC0_QM0_CP_FENCE1_RDATA_1                                 0xCE0320
+
+#define mmNIC0_QM0_CP_FENCE1_RDATA_2                                 0xCE0324
+
+#define mmNIC0_QM0_CP_FENCE1_RDATA_3                                 0xCE0328
+
+#define mmNIC0_QM0_CP_FENCE1_RDATA_4                                 0xCE032C
+
+#define mmNIC0_QM0_CP_FENCE2_RDATA_0                                 0xCE0330
+
+#define mmNIC0_QM0_CP_FENCE2_RDATA_1                                 0xCE0334
+
+#define mmNIC0_QM0_CP_FENCE2_RDATA_2                                 0xCE0338
+
+#define mmNIC0_QM0_CP_FENCE2_RDATA_3                                 0xCE033C
+
+#define mmNIC0_QM0_CP_FENCE2_RDATA_4                                 0xCE0340
+
+#define mmNIC0_QM0_CP_FENCE3_RDATA_0                                 0xCE0344
+
+#define mmNIC0_QM0_CP_FENCE3_RDATA_1                                 0xCE0348
+
+#define mmNIC0_QM0_CP_FENCE3_RDATA_2                                 0xCE034C
+
+#define mmNIC0_QM0_CP_FENCE3_RDATA_3                                 0xCE0350
+
+#define mmNIC0_QM0_CP_FENCE3_RDATA_4                                 0xCE0354
+
+#define mmNIC0_QM0_CP_FENCE0_CNT_0                                   0xCE0358
+
+#define mmNIC0_QM0_CP_FENCE0_CNT_1                                   0xCE035C
+
+#define mmNIC0_QM0_CP_FENCE0_CNT_2                                   0xCE0360
+
+#define mmNIC0_QM0_CP_FENCE0_CNT_3                                   0xCE0364
+
+#define mmNIC0_QM0_CP_FENCE0_CNT_4                                   0xCE0368
+
+#define mmNIC0_QM0_CP_FENCE1_CNT_0                                   0xCE036C
+
+#define mmNIC0_QM0_CP_FENCE1_CNT_1                                   0xCE0370
+
+#define mmNIC0_QM0_CP_FENCE1_CNT_2                                   0xCE0374
+
+#define mmNIC0_QM0_CP_FENCE1_CNT_3                                   0xCE0378
+
+#define mmNIC0_QM0_CP_FENCE1_CNT_4                                   0xCE037C
+
+#define mmNIC0_QM0_CP_FENCE2_CNT_0                                   0xCE0380
+
+#define mmNIC0_QM0_CP_FENCE2_CNT_1                                   0xCE0384
+
+#define mmNIC0_QM0_CP_FENCE2_CNT_2                                   0xCE0388
+
+#define mmNIC0_QM0_CP_FENCE2_CNT_3                                   0xCE038C
+
+#define mmNIC0_QM0_CP_FENCE2_CNT_4                                   0xCE0390
+
+#define mmNIC0_QM0_CP_FENCE3_CNT_0                                   0xCE0394
+
+#define mmNIC0_QM0_CP_FENCE3_CNT_1                                   0xCE0398
+
+#define mmNIC0_QM0_CP_FENCE3_CNT_2                                   0xCE039C
+
+#define mmNIC0_QM0_CP_FENCE3_CNT_3                                   0xCE03A0
+
+#define mmNIC0_QM0_CP_FENCE3_CNT_4                                   0xCE03A4
+
+#define mmNIC0_QM0_CP_STS_0                                          0xCE03A8
+
+#define mmNIC0_QM0_CP_STS_1                                          0xCE03AC
+
+#define mmNIC0_QM0_CP_STS_2                                          0xCE03B0
+
+#define mmNIC0_QM0_CP_STS_3                                          0xCE03B4
+
+#define mmNIC0_QM0_CP_STS_4                                          0xCE03B8
+
+#define mmNIC0_QM0_CP_CURRENT_INST_LO_0                              0xCE03BC
+
+#define mmNIC0_QM0_CP_CURRENT_INST_LO_1                              0xCE03C0
+
+#define mmNIC0_QM0_CP_CURRENT_INST_LO_2                              0xCE03C4
+
+#define mmNIC0_QM0_CP_CURRENT_INST_LO_3                              0xCE03C8
+
+#define mmNIC0_QM0_CP_CURRENT_INST_LO_4                              0xCE03CC
+
+#define mmNIC0_QM0_CP_CURRENT_INST_HI_0                              0xCE03D0
+
+#define mmNIC0_QM0_CP_CURRENT_INST_HI_1                              0xCE03D4
+
+#define mmNIC0_QM0_CP_CURRENT_INST_HI_2                              0xCE03D8
+
+#define mmNIC0_QM0_CP_CURRENT_INST_HI_3                              0xCE03DC
+
+#define mmNIC0_QM0_CP_CURRENT_INST_HI_4                              0xCE03E0
+
+#define mmNIC0_QM0_CP_BARRIER_CFG_0                                  0xCE03F4
+
+#define mmNIC0_QM0_CP_BARRIER_CFG_1                                  0xCE03F8
+
+#define mmNIC0_QM0_CP_BARRIER_CFG_2                                  0xCE03FC
+
+#define mmNIC0_QM0_CP_BARRIER_CFG_3                                  0xCE0400
+
+#define mmNIC0_QM0_CP_BARRIER_CFG_4                                  0xCE0404
+
+#define mmNIC0_QM0_CP_DBG_0_0                                        0xCE0408
+
+#define mmNIC0_QM0_CP_DBG_0_1                                        0xCE040C
+
+#define mmNIC0_QM0_CP_DBG_0_2                                        0xCE0410
+
+#define mmNIC0_QM0_CP_DBG_0_3                                        0xCE0414
+
+#define mmNIC0_QM0_CP_DBG_0_4                                        0xCE0418
+
+#define mmNIC0_QM0_CP_ARUSER_31_11_0                                 0xCE041C
+
+#define mmNIC0_QM0_CP_ARUSER_31_11_1                                 0xCE0420
+
+#define mmNIC0_QM0_CP_ARUSER_31_11_2                                 0xCE0424
+
+#define mmNIC0_QM0_CP_ARUSER_31_11_3                                 0xCE0428
+
+#define mmNIC0_QM0_CP_ARUSER_31_11_4                                 0xCE042C
+
+#define mmNIC0_QM0_CP_AWUSER_31_11_0                                 0xCE0430
+
+#define mmNIC0_QM0_CP_AWUSER_31_11_1                                 0xCE0434
+
+#define mmNIC0_QM0_CP_AWUSER_31_11_2                                 0xCE0438
+
+#define mmNIC0_QM0_CP_AWUSER_31_11_3                                 0xCE043C
+
+#define mmNIC0_QM0_CP_AWUSER_31_11_4                                 0xCE0440
+
+#define mmNIC0_QM0_ARB_CFG_0                                         0xCE0A00
+
+#define mmNIC0_QM0_ARB_CHOISE_Q_PUSH                                 0xCE0A04
+
+#define mmNIC0_QM0_ARB_WRR_WEIGHT_0                                  0xCE0A08
+
+#define mmNIC0_QM0_ARB_WRR_WEIGHT_1                                  0xCE0A0C
+
+#define mmNIC0_QM0_ARB_WRR_WEIGHT_2                                  0xCE0A10
+
+#define mmNIC0_QM0_ARB_WRR_WEIGHT_3                                  0xCE0A14
+
+#define mmNIC0_QM0_ARB_CFG_1                                         0xCE0A18
+
+#define mmNIC0_QM0_ARB_MST_AVAIL_CRED_0                              0xCE0A20
+
+#define mmNIC0_QM0_ARB_MST_AVAIL_CRED_1                              0xCE0A24
+
+#define mmNIC0_QM0_ARB_MST_AVAIL_CRED_2                              0xCE0A28
+
+#define mmNIC0_QM0_ARB_MST_AVAIL_CRED_3                              0xCE0A2C
+
+#define mmNIC0_QM0_ARB_MST_AVAIL_CRED_4                              0xCE0A30
+
+#define mmNIC0_QM0_ARB_MST_AVAIL_CRED_5                              0xCE0A34
+
+#define mmNIC0_QM0_ARB_MST_AVAIL_CRED_6                              0xCE0A38
+
+#define mmNIC0_QM0_ARB_MST_AVAIL_CRED_7                              0xCE0A3C
+
+#define mmNIC0_QM0_ARB_MST_AVAIL_CRED_8                              0xCE0A40
+
+#define mmNIC0_QM0_ARB_MST_AVAIL_CRED_9                              0xCE0A44
+
+#define mmNIC0_QM0_ARB_MST_AVAIL_CRED_10                             0xCE0A48
+
+#define mmNIC0_QM0_ARB_MST_AVAIL_CRED_11                             0xCE0A4C
+
+#define mmNIC0_QM0_ARB_MST_AVAIL_CRED_12                             0xCE0A50
+
+#define mmNIC0_QM0_ARB_MST_AVAIL_CRED_13                             0xCE0A54
+
+#define mmNIC0_QM0_ARB_MST_AVAIL_CRED_14                             0xCE0A58
+
+#define mmNIC0_QM0_ARB_MST_AVAIL_CRED_15                             0xCE0A5C
+
+#define mmNIC0_QM0_ARB_MST_AVAIL_CRED_16                             0xCE0A60
+
+#define mmNIC0_QM0_ARB_MST_AVAIL_CRED_17                             0xCE0A64
+
+#define mmNIC0_QM0_ARB_MST_AVAIL_CRED_18                             0xCE0A68
+
+#define mmNIC0_QM0_ARB_MST_AVAIL_CRED_19                             0xCE0A6C
+
+#define mmNIC0_QM0_ARB_MST_AVAIL_CRED_20                             0xCE0A70
+
+#define mmNIC0_QM0_ARB_MST_AVAIL_CRED_21                             0xCE0A74
+
+#define mmNIC0_QM0_ARB_MST_AVAIL_CRED_22                             0xCE0A78
+
+#define mmNIC0_QM0_ARB_MST_AVAIL_CRED_23                             0xCE0A7C
+
+#define mmNIC0_QM0_ARB_MST_AVAIL_CRED_24                             0xCE0A80
+
+#define mmNIC0_QM0_ARB_MST_AVAIL_CRED_25                             0xCE0A84
+
+#define mmNIC0_QM0_ARB_MST_AVAIL_CRED_26                             0xCE0A88
+
+#define mmNIC0_QM0_ARB_MST_AVAIL_CRED_27                             0xCE0A8C
+
+#define mmNIC0_QM0_ARB_MST_AVAIL_CRED_28                             0xCE0A90
+
+#define mmNIC0_QM0_ARB_MST_AVAIL_CRED_29                             0xCE0A94
+
+#define mmNIC0_QM0_ARB_MST_AVAIL_CRED_30                             0xCE0A98
+
+#define mmNIC0_QM0_ARB_MST_AVAIL_CRED_31                             0xCE0A9C
+
+#define mmNIC0_QM0_ARB_MST_CRED_INC                                  0xCE0AA0
+
+#define mmNIC0_QM0_ARB_MST_CHOISE_PUSH_OFST_0                        0xCE0AA4
+
+#define mmNIC0_QM0_ARB_MST_CHOISE_PUSH_OFST_1                        0xCE0AA8
+
+#define mmNIC0_QM0_ARB_MST_CHOISE_PUSH_OFST_2                        0xCE0AAC
+
+#define mmNIC0_QM0_ARB_MST_CHOISE_PUSH_OFST_3                        0xCE0AB0
+
+#define mmNIC0_QM0_ARB_MST_CHOISE_PUSH_OFST_4                        0xCE0AB4
+
+#define mmNIC0_QM0_ARB_MST_CHOISE_PUSH_OFST_5                        0xCE0AB8
+
+#define mmNIC0_QM0_ARB_MST_CHOISE_PUSH_OFST_6                        0xCE0ABC
+
+#define mmNIC0_QM0_ARB_MST_CHOISE_PUSH_OFST_7                        0xCE0AC0
+
+#define mmNIC0_QM0_ARB_MST_CHOISE_PUSH_OFST_8                        0xCE0AC4
+
+#define mmNIC0_QM0_ARB_MST_CHOISE_PUSH_OFST_9                        0xCE0AC8
+
+#define mmNIC0_QM0_ARB_MST_CHOISE_PUSH_OFST_10                       0xCE0ACC
+
+#define mmNIC0_QM0_ARB_MST_CHOISE_PUSH_OFST_11                       0xCE0AD0
+
+#define mmNIC0_QM0_ARB_MST_CHOISE_PUSH_OFST_12                       0xCE0AD4
+
+#define mmNIC0_QM0_ARB_MST_CHOISE_PUSH_OFST_13                       0xCE0AD8
+
+#define mmNIC0_QM0_ARB_MST_CHOISE_PUSH_OFST_14                       0xCE0ADC
+
+#define mmNIC0_QM0_ARB_MST_CHOISE_PUSH_OFST_15                       0xCE0AE0
+
+#define mmNIC0_QM0_ARB_MST_CHOISE_PUSH_OFST_16                       0xCE0AE4
+
+#define mmNIC0_QM0_ARB_MST_CHOISE_PUSH_OFST_17                       0xCE0AE8
+
+#define mmNIC0_QM0_ARB_MST_CHOISE_PUSH_OFST_18                       0xCE0AEC
+
+#define mmNIC0_QM0_ARB_MST_CHOISE_PUSH_OFST_19                       0xCE0AF0
+
+#define mmNIC0_QM0_ARB_MST_CHOISE_PUSH_OFST_20                       0xCE0AF4
+
+#define mmNIC0_QM0_ARB_MST_CHOISE_PUSH_OFST_21                       0xCE0AF8
+
+#define mmNIC0_QM0_ARB_MST_CHOISE_PUSH_OFST_22                       0xCE0AFC
+
+#define mmNIC0_QM0_ARB_MST_CHOISE_PUSH_OFST_23                       0xCE0B00
+
+#define mmNIC0_QM0_ARB_MST_CHOISE_PUSH_OFST_24                       0xCE0B04
+
+#define mmNIC0_QM0_ARB_MST_CHOISE_PUSH_OFST_25                       0xCE0B08
+
+#define mmNIC0_QM0_ARB_MST_CHOISE_PUSH_OFST_26                       0xCE0B0C
+
+#define mmNIC0_QM0_ARB_MST_CHOISE_PUSH_OFST_27                       0xCE0B10
+
+#define mmNIC0_QM0_ARB_MST_CHOISE_PUSH_OFST_28                       0xCE0B14
+
+#define mmNIC0_QM0_ARB_MST_CHOISE_PUSH_OFST_29                       0xCE0B18
+
+#define mmNIC0_QM0_ARB_MST_CHOISE_PUSH_OFST_30                       0xCE0B1C
+
+#define mmNIC0_QM0_ARB_MST_CHOISE_PUSH_OFST_31                       0xCE0B20
+
+#define mmNIC0_QM0_ARB_SLV_MASTER_INC_CRED_OFST                      0xCE0B28
+
+#define mmNIC0_QM0_ARB_MST_SLAVE_EN                                  0xCE0B2C
+
+#define mmNIC0_QM0_ARB_MST_QUIET_PER                                 0xCE0B34
+
+#define mmNIC0_QM0_ARB_SLV_CHOISE_WDT                                0xCE0B38
+
+#define mmNIC0_QM0_ARB_SLV_ID                                        0xCE0B3C
+
+#define mmNIC0_QM0_ARB_MSG_MAX_INFLIGHT                              0xCE0B44
+
+#define mmNIC0_QM0_ARB_MSG_AWUSER_31_11                              0xCE0B48
+
+#define mmNIC0_QM0_ARB_MSG_AWUSER_SEC_PROP                           0xCE0B4C
+
+#define mmNIC0_QM0_ARB_MSG_AWUSER_NON_SEC_PROP                       0xCE0B50
+
+#define mmNIC0_QM0_ARB_BASE_LO                                       0xCE0B54
+
+#define mmNIC0_QM0_ARB_BASE_HI                                       0xCE0B58
+
+#define mmNIC0_QM0_ARB_STATE_STS                                     0xCE0B80
+
+#define mmNIC0_QM0_ARB_CHOISE_FULLNESS_STS                           0xCE0B84
+
+#define mmNIC0_QM0_ARB_MSG_STS                                       0xCE0B88
+
+#define mmNIC0_QM0_ARB_SLV_CHOISE_Q_HEAD                             0xCE0B8C
+
+#define mmNIC0_QM0_ARB_ERR_CAUSE                                     0xCE0B9C
+
+#define mmNIC0_QM0_ARB_ERR_MSG_EN                                    0xCE0BA0
+
+#define mmNIC0_QM0_ARB_ERR_STS_DRP                                   0xCE0BA8
+
+#define mmNIC0_QM0_ARB_MST_CRED_STS_0                                0xCE0BB0
+
+#define mmNIC0_QM0_ARB_MST_CRED_STS_1                                0xCE0BB4
+
+#define mmNIC0_QM0_ARB_MST_CRED_STS_2                                0xCE0BB8
+
+#define mmNIC0_QM0_ARB_MST_CRED_STS_3                                0xCE0BBC
+
+#define mmNIC0_QM0_ARB_MST_CRED_STS_4                                0xCE0BC0
+
+#define mmNIC0_QM0_ARB_MST_CRED_STS_5                                0xCE0BC4
+
+#define mmNIC0_QM0_ARB_MST_CRED_STS_6                                0xCE0BC8
+
+#define mmNIC0_QM0_ARB_MST_CRED_STS_7                                0xCE0BCC
+
+#define mmNIC0_QM0_ARB_MST_CRED_STS_8                                0xCE0BD0
+
+#define mmNIC0_QM0_ARB_MST_CRED_STS_9                                0xCE0BD4
+
+#define mmNIC0_QM0_ARB_MST_CRED_STS_10                               0xCE0BD8
+
+#define mmNIC0_QM0_ARB_MST_CRED_STS_11                               0xCE0BDC
+
+#define mmNIC0_QM0_ARB_MST_CRED_STS_12                               0xCE0BE0
+
+#define mmNIC0_QM0_ARB_MST_CRED_STS_13                               0xCE0BE4
+
+#define mmNIC0_QM0_ARB_MST_CRED_STS_14                               0xCE0BE8
+
+#define mmNIC0_QM0_ARB_MST_CRED_STS_15                               0xCE0BEC
+
+#define mmNIC0_QM0_ARB_MST_CRED_STS_16                               0xCE0BF0
+
+#define mmNIC0_QM0_ARB_MST_CRED_STS_17                               0xCE0BF4
+
+#define mmNIC0_QM0_ARB_MST_CRED_STS_18                               0xCE0BF8
+
+#define mmNIC0_QM0_ARB_MST_CRED_STS_19                               0xCE0BFC
+
+#define mmNIC0_QM0_ARB_MST_CRED_STS_20                               0xCE0C00
+
+#define mmNIC0_QM0_ARB_MST_CRED_STS_21                               0xCE0C04
+
+#define mmNIC0_QM0_ARB_MST_CRED_STS_22                               0xCE0C08
+
+#define mmNIC0_QM0_ARB_MST_CRED_STS_23                               0xCE0C0C
+
+#define mmNIC0_QM0_ARB_MST_CRED_STS_24                               0xCE0C10
+
+#define mmNIC0_QM0_ARB_MST_CRED_STS_25                               0xCE0C14
+
+#define mmNIC0_QM0_ARB_MST_CRED_STS_26                               0xCE0C18
+
+#define mmNIC0_QM0_ARB_MST_CRED_STS_27                               0xCE0C1C
+
+#define mmNIC0_QM0_ARB_MST_CRED_STS_28                               0xCE0C20
+
+#define mmNIC0_QM0_ARB_MST_CRED_STS_29                               0xCE0C24
+
+#define mmNIC0_QM0_ARB_MST_CRED_STS_30                               0xCE0C28
+
+#define mmNIC0_QM0_ARB_MST_CRED_STS_31                               0xCE0C2C
+
+#define mmNIC0_QM0_CGM_CFG                                           0xCE0C70
+
+#define mmNIC0_QM0_CGM_STS                                           0xCE0C74
+
+#define mmNIC0_QM0_CGM_CFG1                                          0xCE0C78
+
+#define mmNIC0_QM0_LOCAL_RANGE_BASE                                  0xCE0C80
+
+#define mmNIC0_QM0_LOCAL_RANGE_SIZE                                  0xCE0C84
+
+#define mmNIC0_QM0_CSMR_STRICT_PRIO_CFG                              0xCE0C90
+
+#define mmNIC0_QM0_HBW_RD_RATE_LIM_CFG_1                             0xCE0C94
+
+#define mmNIC0_QM0_LBW_WR_RATE_LIM_CFG_0                             0xCE0C98
+
+#define mmNIC0_QM0_LBW_WR_RATE_LIM_CFG_1                             0xCE0C9C
+
+#define mmNIC0_QM0_HBW_RD_RATE_LIM_CFG_0                             0xCE0CA0
+
+#define mmNIC0_QM0_GLBL_AXCACHE                                      0xCE0CA4
+
+#define mmNIC0_QM0_IND_GW_APB_CFG                                    0xCE0CB0
+
+#define mmNIC0_QM0_IND_GW_APB_WDATA                                  0xCE0CB4
+
+#define mmNIC0_QM0_IND_GW_APB_RDATA                                  0xCE0CB8
+
+#define mmNIC0_QM0_IND_GW_APB_STATUS                                 0xCE0CBC
+
+#define mmNIC0_QM0_GLBL_ERR_ADDR_LO                                  0xCE0CD0
+
+#define mmNIC0_QM0_GLBL_ERR_ADDR_HI                                  0xCE0CD4
+
+#define mmNIC0_QM0_GLBL_ERR_WDATA                                    0xCE0CD8
+
+#define mmNIC0_QM0_GLBL_MEM_INIT_BUSY                                0xCE0D00
+
+#endif /* ASIC_REG_NIC0_QM0_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/nic0_qm1_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/nic0_qm1_regs.h
new file mode 100644
index 0000000000000000000000000000000000000000..fe96c575b5c61a1507f7d167152c7490375406a9
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/nic0_qm1_regs.h
@@ -0,0 +1,834 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ **       DO NOT EDIT BELOW        **
+ ************************************/
+
+#ifndef ASIC_REG_NIC0_QM1_REGS_H_
+#define ASIC_REG_NIC0_QM1_REGS_H_
+
+/*
+ *****************************************
+ *   NIC0_QM1 (Prototype: QMAN)
+ *****************************************
+ */
+
+#define mmNIC0_QM1_GLBL_CFG0                                         0xCE2000
+
+#define mmNIC0_QM1_GLBL_CFG1                                         0xCE2004
+
+#define mmNIC0_QM1_GLBL_PROT                                         0xCE2008
+
+#define mmNIC0_QM1_GLBL_ERR_CFG                                      0xCE200C
+
+#define mmNIC0_QM1_GLBL_SECURE_PROPS_0                               0xCE2010
+
+#define mmNIC0_QM1_GLBL_SECURE_PROPS_1                               0xCE2014
+
+#define mmNIC0_QM1_GLBL_SECURE_PROPS_2                               0xCE2018
+
+#define mmNIC0_QM1_GLBL_SECURE_PROPS_3                               0xCE201C
+
+#define mmNIC0_QM1_GLBL_SECURE_PROPS_4                               0xCE2020
+
+#define mmNIC0_QM1_GLBL_NON_SECURE_PROPS_0                           0xCE2024
+
+#define mmNIC0_QM1_GLBL_NON_SECURE_PROPS_1                           0xCE2028
+
+#define mmNIC0_QM1_GLBL_NON_SECURE_PROPS_2                           0xCE202C
+
+#define mmNIC0_QM1_GLBL_NON_SECURE_PROPS_3                           0xCE2030
+
+#define mmNIC0_QM1_GLBL_NON_SECURE_PROPS_4                           0xCE2034
+
+#define mmNIC0_QM1_GLBL_STS0                                         0xCE2038
+
+#define mmNIC0_QM1_GLBL_STS1_0                                       0xCE2040
+
+#define mmNIC0_QM1_GLBL_STS1_1                                       0xCE2044
+
+#define mmNIC0_QM1_GLBL_STS1_2                                       0xCE2048
+
+#define mmNIC0_QM1_GLBL_STS1_3                                       0xCE204C
+
+#define mmNIC0_QM1_GLBL_STS1_4                                       0xCE2050
+
+#define mmNIC0_QM1_GLBL_MSG_EN_0                                     0xCE2054
+
+#define mmNIC0_QM1_GLBL_MSG_EN_1                                     0xCE2058
+
+#define mmNIC0_QM1_GLBL_MSG_EN_2                                     0xCE205C
+
+#define mmNIC0_QM1_GLBL_MSG_EN_3                                     0xCE2060
+
+#define mmNIC0_QM1_GLBL_MSG_EN_4                                     0xCE2068
+
+#define mmNIC0_QM1_PQ_BASE_LO_0                                      0xCE2070
+
+#define mmNIC0_QM1_PQ_BASE_LO_1                                      0xCE2074
+
+#define mmNIC0_QM1_PQ_BASE_LO_2                                      0xCE2078
+
+#define mmNIC0_QM1_PQ_BASE_LO_3                                      0xCE207C
+
+#define mmNIC0_QM1_PQ_BASE_HI_0                                      0xCE2080
+
+#define mmNIC0_QM1_PQ_BASE_HI_1                                      0xCE2084
+
+#define mmNIC0_QM1_PQ_BASE_HI_2                                      0xCE2088
+
+#define mmNIC0_QM1_PQ_BASE_HI_3                                      0xCE208C
+
+#define mmNIC0_QM1_PQ_SIZE_0                                         0xCE2090
+
+#define mmNIC0_QM1_PQ_SIZE_1                                         0xCE2094
+
+#define mmNIC0_QM1_PQ_SIZE_2                                         0xCE2098
+
+#define mmNIC0_QM1_PQ_SIZE_3                                         0xCE209C
+
+#define mmNIC0_QM1_PQ_PI_0                                           0xCE20A0
+
+#define mmNIC0_QM1_PQ_PI_1                                           0xCE20A4
+
+#define mmNIC0_QM1_PQ_PI_2                                           0xCE20A8
+
+#define mmNIC0_QM1_PQ_PI_3                                           0xCE20AC
+
+#define mmNIC0_QM1_PQ_CI_0                                           0xCE20B0
+
+#define mmNIC0_QM1_PQ_CI_1                                           0xCE20B4
+
+#define mmNIC0_QM1_PQ_CI_2                                           0xCE20B8
+
+#define mmNIC0_QM1_PQ_CI_3                                           0xCE20BC
+
+#define mmNIC0_QM1_PQ_CFG0_0                                         0xCE20C0
+
+#define mmNIC0_QM1_PQ_CFG0_1                                         0xCE20C4
+
+#define mmNIC0_QM1_PQ_CFG0_2                                         0xCE20C8
+
+#define mmNIC0_QM1_PQ_CFG0_3                                         0xCE20CC
+
+#define mmNIC0_QM1_PQ_CFG1_0                                         0xCE20D0
+
+#define mmNIC0_QM1_PQ_CFG1_1                                         0xCE20D4
+
+#define mmNIC0_QM1_PQ_CFG1_2                                         0xCE20D8
+
+#define mmNIC0_QM1_PQ_CFG1_3                                         0xCE20DC
+
+#define mmNIC0_QM1_PQ_ARUSER_31_11_0                                 0xCE20E0
+
+#define mmNIC0_QM1_PQ_ARUSER_31_11_1                                 0xCE20E4
+
+#define mmNIC0_QM1_PQ_ARUSER_31_11_2                                 0xCE20E8
+
+#define mmNIC0_QM1_PQ_ARUSER_31_11_3                                 0xCE20EC
+
+#define mmNIC0_QM1_PQ_STS0_0                                         0xCE20F0
+
+#define mmNIC0_QM1_PQ_STS0_1                                         0xCE20F4
+
+#define mmNIC0_QM1_PQ_STS0_2                                         0xCE20F8
+
+#define mmNIC0_QM1_PQ_STS0_3                                         0xCE20FC
+
+#define mmNIC0_QM1_PQ_STS1_0                                         0xCE2100
+
+#define mmNIC0_QM1_PQ_STS1_1                                         0xCE2104
+
+#define mmNIC0_QM1_PQ_STS1_2                                         0xCE2108
+
+#define mmNIC0_QM1_PQ_STS1_3                                         0xCE210C
+
+#define mmNIC0_QM1_CQ_CFG0_0                                         0xCE2110
+
+#define mmNIC0_QM1_CQ_CFG0_1                                         0xCE2114
+
+#define mmNIC0_QM1_CQ_CFG0_2                                         0xCE2118
+
+#define mmNIC0_QM1_CQ_CFG0_3                                         0xCE211C
+
+#define mmNIC0_QM1_CQ_CFG0_4                                         0xCE2120
+
+#define mmNIC0_QM1_CQ_CFG1_0                                         0xCE2124
+
+#define mmNIC0_QM1_CQ_CFG1_1                                         0xCE2128
+
+#define mmNIC0_QM1_CQ_CFG1_2                                         0xCE212C
+
+#define mmNIC0_QM1_CQ_CFG1_3                                         0xCE2130
+
+#define mmNIC0_QM1_CQ_CFG1_4                                         0xCE2134
+
+#define mmNIC0_QM1_CQ_ARUSER_31_11_0                                 0xCE2138
+
+#define mmNIC0_QM1_CQ_ARUSER_31_11_1                                 0xCE213C
+
+#define mmNIC0_QM1_CQ_ARUSER_31_11_2                                 0xCE2140
+
+#define mmNIC0_QM1_CQ_ARUSER_31_11_3                                 0xCE2144
+
+#define mmNIC0_QM1_CQ_ARUSER_31_11_4                                 0xCE2148
+
+#define mmNIC0_QM1_CQ_STS0_0                                         0xCE214C
+
+#define mmNIC0_QM1_CQ_STS0_1                                         0xCE2150
+
+#define mmNIC0_QM1_CQ_STS0_2                                         0xCE2154
+
+#define mmNIC0_QM1_CQ_STS0_3                                         0xCE2158
+
+#define mmNIC0_QM1_CQ_STS0_4                                         0xCE215C
+
+#define mmNIC0_QM1_CQ_STS1_0                                         0xCE2160
+
+#define mmNIC0_QM1_CQ_STS1_1                                         0xCE2164
+
+#define mmNIC0_QM1_CQ_STS1_2                                         0xCE2168
+
+#define mmNIC0_QM1_CQ_STS1_3                                         0xCE216C
+
+#define mmNIC0_QM1_CQ_STS1_4                                         0xCE2170
+
+#define mmNIC0_QM1_CQ_PTR_LO_0                                       0xCE2174
+
+#define mmNIC0_QM1_CQ_PTR_HI_0                                       0xCE2178
+
+#define mmNIC0_QM1_CQ_TSIZE_0                                        0xCE217C
+
+#define mmNIC0_QM1_CQ_CTL_0                                          0xCE2180
+
+#define mmNIC0_QM1_CQ_PTR_LO_1                                       0xCE2184
+
+#define mmNIC0_QM1_CQ_PTR_HI_1                                       0xCE2188
+
+#define mmNIC0_QM1_CQ_TSIZE_1                                        0xCE218C
+
+#define mmNIC0_QM1_CQ_CTL_1                                          0xCE2190
+
+#define mmNIC0_QM1_CQ_PTR_LO_2                                       0xCE2194
+
+#define mmNIC0_QM1_CQ_PTR_HI_2                                       0xCE2198
+
+#define mmNIC0_QM1_CQ_TSIZE_2                                        0xCE219C
+
+#define mmNIC0_QM1_CQ_CTL_2                                          0xCE21A0
+
+#define mmNIC0_QM1_CQ_PTR_LO_3                                       0xCE21A4
+
+#define mmNIC0_QM1_CQ_PTR_HI_3                                       0xCE21A8
+
+#define mmNIC0_QM1_CQ_TSIZE_3                                        0xCE21AC
+
+#define mmNIC0_QM1_CQ_CTL_3                                          0xCE21B0
+
+#define mmNIC0_QM1_CQ_PTR_LO_4                                       0xCE21B4
+
+#define mmNIC0_QM1_CQ_PTR_HI_4                                       0xCE21B8
+
+#define mmNIC0_QM1_CQ_TSIZE_4                                        0xCE21BC
+
+#define mmNIC0_QM1_CQ_CTL_4                                          0xCE21C0
+
+#define mmNIC0_QM1_CQ_PTR_LO_STS_0                                   0xCE21C4
+
+#define mmNIC0_QM1_CQ_PTR_LO_STS_1                                   0xCE21C8
+
+#define mmNIC0_QM1_CQ_PTR_LO_STS_2                                   0xCE21CC
+
+#define mmNIC0_QM1_CQ_PTR_LO_STS_3                                   0xCE21D0
+
+#define mmNIC0_QM1_CQ_PTR_LO_STS_4                                   0xCE21D4
+
+#define mmNIC0_QM1_CQ_PTR_HI_STS_0                                   0xCE21D8
+
+#define mmNIC0_QM1_CQ_PTR_HI_STS_1                                   0xCE21DC
+
+#define mmNIC0_QM1_CQ_PTR_HI_STS_2                                   0xCE21E0
+
+#define mmNIC0_QM1_CQ_PTR_HI_STS_3                                   0xCE21E4
+
+#define mmNIC0_QM1_CQ_PTR_HI_STS_4                                   0xCE21E8
+
+#define mmNIC0_QM1_CQ_TSIZE_STS_0                                    0xCE21EC
+
+#define mmNIC0_QM1_CQ_TSIZE_STS_1                                    0xCE21F0
+
+#define mmNIC0_QM1_CQ_TSIZE_STS_2                                    0xCE21F4
+
+#define mmNIC0_QM1_CQ_TSIZE_STS_3                                    0xCE21F8
+
+#define mmNIC0_QM1_CQ_TSIZE_STS_4                                    0xCE21FC
+
+#define mmNIC0_QM1_CQ_CTL_STS_0                                      0xCE2200
+
+#define mmNIC0_QM1_CQ_CTL_STS_1                                      0xCE2204
+
+#define mmNIC0_QM1_CQ_CTL_STS_2                                      0xCE2208
+
+#define mmNIC0_QM1_CQ_CTL_STS_3                                      0xCE220C
+
+#define mmNIC0_QM1_CQ_CTL_STS_4                                      0xCE2210
+
+#define mmNIC0_QM1_CQ_IFIFO_CNT_0                                    0xCE2214
+
+#define mmNIC0_QM1_CQ_IFIFO_CNT_1                                    0xCE2218
+
+#define mmNIC0_QM1_CQ_IFIFO_CNT_2                                    0xCE221C
+
+#define mmNIC0_QM1_CQ_IFIFO_CNT_3                                    0xCE2220
+
+#define mmNIC0_QM1_CQ_IFIFO_CNT_4                                    0xCE2224
+
+#define mmNIC0_QM1_CP_MSG_BASE0_ADDR_LO_0                            0xCE2228
+
+#define mmNIC0_QM1_CP_MSG_BASE0_ADDR_LO_1                            0xCE222C
+
+#define mmNIC0_QM1_CP_MSG_BASE0_ADDR_LO_2                            0xCE2230
+
+#define mmNIC0_QM1_CP_MSG_BASE0_ADDR_LO_3                            0xCE2234
+
+#define mmNIC0_QM1_CP_MSG_BASE0_ADDR_LO_4                            0xCE2238
+
+#define mmNIC0_QM1_CP_MSG_BASE0_ADDR_HI_0                            0xCE223C
+
+#define mmNIC0_QM1_CP_MSG_BASE0_ADDR_HI_1                            0xCE2240
+
+#define mmNIC0_QM1_CP_MSG_BASE0_ADDR_HI_2                            0xCE2244
+
+#define mmNIC0_QM1_CP_MSG_BASE0_ADDR_HI_3                            0xCE2248
+
+#define mmNIC0_QM1_CP_MSG_BASE0_ADDR_HI_4                            0xCE224C
+
+#define mmNIC0_QM1_CP_MSG_BASE1_ADDR_LO_0                            0xCE2250
+
+#define mmNIC0_QM1_CP_MSG_BASE1_ADDR_LO_1                            0xCE2254
+
+#define mmNIC0_QM1_CP_MSG_BASE1_ADDR_LO_2                            0xCE2258
+
+#define mmNIC0_QM1_CP_MSG_BASE1_ADDR_LO_3                            0xCE225C
+
+#define mmNIC0_QM1_CP_MSG_BASE1_ADDR_LO_4                            0xCE2260
+
+#define mmNIC0_QM1_CP_MSG_BASE1_ADDR_HI_0                            0xCE2264
+
+#define mmNIC0_QM1_CP_MSG_BASE1_ADDR_HI_1                            0xCE2268
+
+#define mmNIC0_QM1_CP_MSG_BASE1_ADDR_HI_2                            0xCE226C
+
+#define mmNIC0_QM1_CP_MSG_BASE1_ADDR_HI_3                            0xCE2270
+
+#define mmNIC0_QM1_CP_MSG_BASE1_ADDR_HI_4                            0xCE2274
+
+#define mmNIC0_QM1_CP_MSG_BASE2_ADDR_LO_0                            0xCE2278
+
+#define mmNIC0_QM1_CP_MSG_BASE2_ADDR_LO_1                            0xCE227C
+
+#define mmNIC0_QM1_CP_MSG_BASE2_ADDR_LO_2                            0xCE2280
+
+#define mmNIC0_QM1_CP_MSG_BASE2_ADDR_LO_3                            0xCE2284
+
+#define mmNIC0_QM1_CP_MSG_BASE2_ADDR_LO_4                            0xCE2288
+
+#define mmNIC0_QM1_CP_MSG_BASE2_ADDR_HI_0                            0xCE228C
+
+#define mmNIC0_QM1_CP_MSG_BASE2_ADDR_HI_1                            0xCE2290
+
+#define mmNIC0_QM1_CP_MSG_BASE2_ADDR_HI_2                            0xCE2294
+
+#define mmNIC0_QM1_CP_MSG_BASE2_ADDR_HI_3                            0xCE2298
+
+#define mmNIC0_QM1_CP_MSG_BASE2_ADDR_HI_4                            0xCE229C
+
+#define mmNIC0_QM1_CP_MSG_BASE3_ADDR_LO_0                            0xCE22A0
+
+#define mmNIC0_QM1_CP_MSG_BASE3_ADDR_LO_1                            0xCE22A4
+
+#define mmNIC0_QM1_CP_MSG_BASE3_ADDR_LO_2                            0xCE22A8
+
+#define mmNIC0_QM1_CP_MSG_BASE3_ADDR_LO_3                            0xCE22AC
+
+#define mmNIC0_QM1_CP_MSG_BASE3_ADDR_LO_4                            0xCE22B0
+
+#define mmNIC0_QM1_CP_MSG_BASE3_ADDR_HI_0                            0xCE22B4
+
+#define mmNIC0_QM1_CP_MSG_BASE3_ADDR_HI_1                            0xCE22B8
+
+#define mmNIC0_QM1_CP_MSG_BASE3_ADDR_HI_2                            0xCE22BC
+
+#define mmNIC0_QM1_CP_MSG_BASE3_ADDR_HI_3                            0xCE22C0
+
+#define mmNIC0_QM1_CP_MSG_BASE3_ADDR_HI_4                            0xCE22C4
+
+#define mmNIC0_QM1_CP_LDMA_TSIZE_OFFSET_0                            0xCE22C8
+
+#define mmNIC0_QM1_CP_LDMA_TSIZE_OFFSET_1                            0xCE22CC
+
+#define mmNIC0_QM1_CP_LDMA_TSIZE_OFFSET_2                            0xCE22D0
+
+#define mmNIC0_QM1_CP_LDMA_TSIZE_OFFSET_3                            0xCE22D4
+
+#define mmNIC0_QM1_CP_LDMA_TSIZE_OFFSET_4                            0xCE22D8
+
+#define mmNIC0_QM1_CP_LDMA_SRC_BASE_LO_OFFSET_0                      0xCE22E0
+
+#define mmNIC0_QM1_CP_LDMA_SRC_BASE_LO_OFFSET_1                      0xCE22E4
+
+#define mmNIC0_QM1_CP_LDMA_SRC_BASE_LO_OFFSET_2                      0xCE22E8
+
+#define mmNIC0_QM1_CP_LDMA_SRC_BASE_LO_OFFSET_3                      0xCE22EC
+
+#define mmNIC0_QM1_CP_LDMA_SRC_BASE_LO_OFFSET_4                      0xCE22F0
+
+#define mmNIC0_QM1_CP_LDMA_DST_BASE_LO_OFFSET_0                      0xCE22F4
+
+#define mmNIC0_QM1_CP_LDMA_DST_BASE_LO_OFFSET_1                      0xCE22F8
+
+#define mmNIC0_QM1_CP_LDMA_DST_BASE_LO_OFFSET_2                      0xCE22FC
+
+#define mmNIC0_QM1_CP_LDMA_DST_BASE_LO_OFFSET_3                      0xCE2300
+
+#define mmNIC0_QM1_CP_LDMA_DST_BASE_LO_OFFSET_4                      0xCE2304
+
+#define mmNIC0_QM1_CP_FENCE0_RDATA_0                                 0xCE2308
+
+#define mmNIC0_QM1_CP_FENCE0_RDATA_1                                 0xCE230C
+
+#define mmNIC0_QM1_CP_FENCE0_RDATA_2                                 0xCE2310
+
+#define mmNIC0_QM1_CP_FENCE0_RDATA_3                                 0xCE2314
+
+#define mmNIC0_QM1_CP_FENCE0_RDATA_4                                 0xCE2318
+
+#define mmNIC0_QM1_CP_FENCE1_RDATA_0                                 0xCE231C
+
+#define mmNIC0_QM1_CP_FENCE1_RDATA_1                                 0xCE2320
+
+#define mmNIC0_QM1_CP_FENCE1_RDATA_2                                 0xCE2324
+
+#define mmNIC0_QM1_CP_FENCE1_RDATA_3                                 0xCE2328
+
+#define mmNIC0_QM1_CP_FENCE1_RDATA_4                                 0xCE232C
+
+#define mmNIC0_QM1_CP_FENCE2_RDATA_0                                 0xCE2330
+
+#define mmNIC0_QM1_CP_FENCE2_RDATA_1                                 0xCE2334
+
+#define mmNIC0_QM1_CP_FENCE2_RDATA_2                                 0xCE2338
+
+#define mmNIC0_QM1_CP_FENCE2_RDATA_3                                 0xCE233C
+
+#define mmNIC0_QM1_CP_FENCE2_RDATA_4                                 0xCE2340
+
+#define mmNIC0_QM1_CP_FENCE3_RDATA_0                                 0xCE2344
+
+#define mmNIC0_QM1_CP_FENCE3_RDATA_1                                 0xCE2348
+
+#define mmNIC0_QM1_CP_FENCE3_RDATA_2                                 0xCE234C
+
+#define mmNIC0_QM1_CP_FENCE3_RDATA_3                                 0xCE2350
+
+#define mmNIC0_QM1_CP_FENCE3_RDATA_4                                 0xCE2354
+
+#define mmNIC0_QM1_CP_FENCE0_CNT_0                                   0xCE2358
+
+#define mmNIC0_QM1_CP_FENCE0_CNT_1                                   0xCE235C
+
+#define mmNIC0_QM1_CP_FENCE0_CNT_2                                   0xCE2360
+
+#define mmNIC0_QM1_CP_FENCE0_CNT_3                                   0xCE2364
+
+#define mmNIC0_QM1_CP_FENCE0_CNT_4                                   0xCE2368
+
+#define mmNIC0_QM1_CP_FENCE1_CNT_0                                   0xCE236C
+
+#define mmNIC0_QM1_CP_FENCE1_CNT_1                                   0xCE2370
+
+#define mmNIC0_QM1_CP_FENCE1_CNT_2                                   0xCE2374
+
+#define mmNIC0_QM1_CP_FENCE1_CNT_3                                   0xCE2378
+
+#define mmNIC0_QM1_CP_FENCE1_CNT_4                                   0xCE237C
+
+#define mmNIC0_QM1_CP_FENCE2_CNT_0                                   0xCE2380
+
+#define mmNIC0_QM1_CP_FENCE2_CNT_1                                   0xCE2384
+
+#define mmNIC0_QM1_CP_FENCE2_CNT_2                                   0xCE2388
+
+#define mmNIC0_QM1_CP_FENCE2_CNT_3                                   0xCE238C
+
+#define mmNIC0_QM1_CP_FENCE2_CNT_4                                   0xCE2390
+
+#define mmNIC0_QM1_CP_FENCE3_CNT_0                                   0xCE2394
+
+#define mmNIC0_QM1_CP_FENCE3_CNT_1                                   0xCE2398
+
+#define mmNIC0_QM1_CP_FENCE3_CNT_2                                   0xCE239C
+
+#define mmNIC0_QM1_CP_FENCE3_CNT_3                                   0xCE23A0
+
+#define mmNIC0_QM1_CP_FENCE3_CNT_4                                   0xCE23A4
+
+#define mmNIC0_QM1_CP_STS_0                                          0xCE23A8
+
+#define mmNIC0_QM1_CP_STS_1                                          0xCE23AC
+
+#define mmNIC0_QM1_CP_STS_2                                          0xCE23B0
+
+#define mmNIC0_QM1_CP_STS_3                                          0xCE23B4
+
+#define mmNIC0_QM1_CP_STS_4                                          0xCE23B8
+
+#define mmNIC0_QM1_CP_CURRENT_INST_LO_0                              0xCE23BC
+
+#define mmNIC0_QM1_CP_CURRENT_INST_LO_1                              0xCE23C0
+
+#define mmNIC0_QM1_CP_CURRENT_INST_LO_2                              0xCE23C4
+
+#define mmNIC0_QM1_CP_CURRENT_INST_LO_3                              0xCE23C8
+
+#define mmNIC0_QM1_CP_CURRENT_INST_LO_4                              0xCE23CC
+
+#define mmNIC0_QM1_CP_CURRENT_INST_HI_0                              0xCE23D0
+
+#define mmNIC0_QM1_CP_CURRENT_INST_HI_1                              0xCE23D4
+
+#define mmNIC0_QM1_CP_CURRENT_INST_HI_2                              0xCE23D8
+
+#define mmNIC0_QM1_CP_CURRENT_INST_HI_3                              0xCE23DC
+
+#define mmNIC0_QM1_CP_CURRENT_INST_HI_4                              0xCE23E0
+
+#define mmNIC0_QM1_CP_BARRIER_CFG_0                                  0xCE23F4
+
+#define mmNIC0_QM1_CP_BARRIER_CFG_1                                  0xCE23F8
+
+#define mmNIC0_QM1_CP_BARRIER_CFG_2                                  0xCE23FC
+
+#define mmNIC0_QM1_CP_BARRIER_CFG_3                                  0xCE2400
+
+#define mmNIC0_QM1_CP_BARRIER_CFG_4                                  0xCE2404
+
+#define mmNIC0_QM1_CP_DBG_0_0                                        0xCE2408
+
+#define mmNIC0_QM1_CP_DBG_0_1                                        0xCE240C
+
+#define mmNIC0_QM1_CP_DBG_0_2                                        0xCE2410
+
+#define mmNIC0_QM1_CP_DBG_0_3                                        0xCE2414
+
+#define mmNIC0_QM1_CP_DBG_0_4                                        0xCE2418
+
+#define mmNIC0_QM1_CP_ARUSER_31_11_0                                 0xCE241C
+
+#define mmNIC0_QM1_CP_ARUSER_31_11_1                                 0xCE2420
+
+#define mmNIC0_QM1_CP_ARUSER_31_11_2                                 0xCE2424
+
+#define mmNIC0_QM1_CP_ARUSER_31_11_3                                 0xCE2428
+
+#define mmNIC0_QM1_CP_ARUSER_31_11_4                                 0xCE242C
+
+#define mmNIC0_QM1_CP_AWUSER_31_11_0                                 0xCE2430
+
+#define mmNIC0_QM1_CP_AWUSER_31_11_1                                 0xCE2434
+
+#define mmNIC0_QM1_CP_AWUSER_31_11_2                                 0xCE2438
+
+#define mmNIC0_QM1_CP_AWUSER_31_11_3                                 0xCE243C
+
+#define mmNIC0_QM1_CP_AWUSER_31_11_4                                 0xCE2440
+
+#define mmNIC0_QM1_ARB_CFG_0                                         0xCE2A00
+
+#define mmNIC0_QM1_ARB_CHOISE_Q_PUSH                                 0xCE2A04
+
+#define mmNIC0_QM1_ARB_WRR_WEIGHT_0                                  0xCE2A08
+
+#define mmNIC0_QM1_ARB_WRR_WEIGHT_1                                  0xCE2A0C
+
+#define mmNIC0_QM1_ARB_WRR_WEIGHT_2                                  0xCE2A10
+
+#define mmNIC0_QM1_ARB_WRR_WEIGHT_3                                  0xCE2A14
+
+#define mmNIC0_QM1_ARB_CFG_1                                         0xCE2A18
+
+#define mmNIC0_QM1_ARB_MST_AVAIL_CRED_0                              0xCE2A20
+
+#define mmNIC0_QM1_ARB_MST_AVAIL_CRED_1                              0xCE2A24
+
+#define mmNIC0_QM1_ARB_MST_AVAIL_CRED_2                              0xCE2A28
+
+#define mmNIC0_QM1_ARB_MST_AVAIL_CRED_3                              0xCE2A2C
+
+#define mmNIC0_QM1_ARB_MST_AVAIL_CRED_4                              0xCE2A30
+
+#define mmNIC0_QM1_ARB_MST_AVAIL_CRED_5                              0xCE2A34
+
+#define mmNIC0_QM1_ARB_MST_AVAIL_CRED_6                              0xCE2A38
+
+#define mmNIC0_QM1_ARB_MST_AVAIL_CRED_7                              0xCE2A3C
+
+#define mmNIC0_QM1_ARB_MST_AVAIL_CRED_8                              0xCE2A40
+
+#define mmNIC0_QM1_ARB_MST_AVAIL_CRED_9                              0xCE2A44
+
+#define mmNIC0_QM1_ARB_MST_AVAIL_CRED_10                             0xCE2A48
+
+#define mmNIC0_QM1_ARB_MST_AVAIL_CRED_11                             0xCE2A4C
+
+#define mmNIC0_QM1_ARB_MST_AVAIL_CRED_12                             0xCE2A50
+
+#define mmNIC0_QM1_ARB_MST_AVAIL_CRED_13                             0xCE2A54
+
+#define mmNIC0_QM1_ARB_MST_AVAIL_CRED_14                             0xCE2A58
+
+#define mmNIC0_QM1_ARB_MST_AVAIL_CRED_15                             0xCE2A5C
+
+#define mmNIC0_QM1_ARB_MST_AVAIL_CRED_16                             0xCE2A60
+
+#define mmNIC0_QM1_ARB_MST_AVAIL_CRED_17                             0xCE2A64
+
+#define mmNIC0_QM1_ARB_MST_AVAIL_CRED_18                             0xCE2A68
+
+#define mmNIC0_QM1_ARB_MST_AVAIL_CRED_19                             0xCE2A6C
+
+#define mmNIC0_QM1_ARB_MST_AVAIL_CRED_20                             0xCE2A70
+
+#define mmNIC0_QM1_ARB_MST_AVAIL_CRED_21                             0xCE2A74
+
+#define mmNIC0_QM1_ARB_MST_AVAIL_CRED_22                             0xCE2A78
+
+#define mmNIC0_QM1_ARB_MST_AVAIL_CRED_23                             0xCE2A7C
+
+#define mmNIC0_QM1_ARB_MST_AVAIL_CRED_24                             0xCE2A80
+
+#define mmNIC0_QM1_ARB_MST_AVAIL_CRED_25                             0xCE2A84
+
+#define mmNIC0_QM1_ARB_MST_AVAIL_CRED_26                             0xCE2A88
+
+#define mmNIC0_QM1_ARB_MST_AVAIL_CRED_27                             0xCE2A8C
+
+#define mmNIC0_QM1_ARB_MST_AVAIL_CRED_28                             0xCE2A90
+
+#define mmNIC0_QM1_ARB_MST_AVAIL_CRED_29                             0xCE2A94
+
+#define mmNIC0_QM1_ARB_MST_AVAIL_CRED_30                             0xCE2A98
+
+#define mmNIC0_QM1_ARB_MST_AVAIL_CRED_31                             0xCE2A9C
+
+#define mmNIC0_QM1_ARB_MST_CRED_INC                                  0xCE2AA0
+
+#define mmNIC0_QM1_ARB_MST_CHOISE_PUSH_OFST_0                        0xCE2AA4
+
+#define mmNIC0_QM1_ARB_MST_CHOISE_PUSH_OFST_1                        0xCE2AA8
+
+#define mmNIC0_QM1_ARB_MST_CHOISE_PUSH_OFST_2                        0xCE2AAC
+
+#define mmNIC0_QM1_ARB_MST_CHOISE_PUSH_OFST_3                        0xCE2AB0
+
+#define mmNIC0_QM1_ARB_MST_CHOISE_PUSH_OFST_4                        0xCE2AB4
+
+#define mmNIC0_QM1_ARB_MST_CHOISE_PUSH_OFST_5                        0xCE2AB8
+
+#define mmNIC0_QM1_ARB_MST_CHOISE_PUSH_OFST_6                        0xCE2ABC
+
+#define mmNIC0_QM1_ARB_MST_CHOISE_PUSH_OFST_7                        0xCE2AC0
+
+#define mmNIC0_QM1_ARB_MST_CHOISE_PUSH_OFST_8                        0xCE2AC4
+
+#define mmNIC0_QM1_ARB_MST_CHOISE_PUSH_OFST_9                        0xCE2AC8
+
+#define mmNIC0_QM1_ARB_MST_CHOISE_PUSH_OFST_10                       0xCE2ACC
+
+#define mmNIC0_QM1_ARB_MST_CHOISE_PUSH_OFST_11                       0xCE2AD0
+
+#define mmNIC0_QM1_ARB_MST_CHOISE_PUSH_OFST_12                       0xCE2AD4
+
+#define mmNIC0_QM1_ARB_MST_CHOISE_PUSH_OFST_13                       0xCE2AD8
+
+#define mmNIC0_QM1_ARB_MST_CHOISE_PUSH_OFST_14                       0xCE2ADC
+
+#define mmNIC0_QM1_ARB_MST_CHOISE_PUSH_OFST_15                       0xCE2AE0
+
+#define mmNIC0_QM1_ARB_MST_CHOISE_PUSH_OFST_16                       0xCE2AE4
+
+#define mmNIC0_QM1_ARB_MST_CHOISE_PUSH_OFST_17                       0xCE2AE8
+
+#define mmNIC0_QM1_ARB_MST_CHOISE_PUSH_OFST_18                       0xCE2AEC
+
+#define mmNIC0_QM1_ARB_MST_CHOISE_PUSH_OFST_19                       0xCE2AF0
+
+#define mmNIC0_QM1_ARB_MST_CHOISE_PUSH_OFST_20                       0xCE2AF4
+
+#define mmNIC0_QM1_ARB_MST_CHOISE_PUSH_OFST_21                       0xCE2AF8
+
+#define mmNIC0_QM1_ARB_MST_CHOISE_PUSH_OFST_22                       0xCE2AFC
+
+#define mmNIC0_QM1_ARB_MST_CHOISE_PUSH_OFST_23                       0xCE2B00
+
+#define mmNIC0_QM1_ARB_MST_CHOISE_PUSH_OFST_24                       0xCE2B04
+
+#define mmNIC0_QM1_ARB_MST_CHOISE_PUSH_OFST_25                       0xCE2B08
+
+#define mmNIC0_QM1_ARB_MST_CHOISE_PUSH_OFST_26                       0xCE2B0C
+
+#define mmNIC0_QM1_ARB_MST_CHOISE_PUSH_OFST_27                       0xCE2B10
+
+#define mmNIC0_QM1_ARB_MST_CHOISE_PUSH_OFST_28                       0xCE2B14
+
+#define mmNIC0_QM1_ARB_MST_CHOISE_PUSH_OFST_29                       0xCE2B18
+
+#define mmNIC0_QM1_ARB_MST_CHOISE_PUSH_OFST_30                       0xCE2B1C
+
+#define mmNIC0_QM1_ARB_MST_CHOISE_PUSH_OFST_31                       0xCE2B20
+
+#define mmNIC0_QM1_ARB_SLV_MASTER_INC_CRED_OFST                      0xCE2B28
+
+#define mmNIC0_QM1_ARB_MST_SLAVE_EN                                  0xCE2B2C
+
+#define mmNIC0_QM1_ARB_MST_QUIET_PER                                 0xCE2B34
+
+#define mmNIC0_QM1_ARB_SLV_CHOISE_WDT                                0xCE2B38
+
+#define mmNIC0_QM1_ARB_SLV_ID                                        0xCE2B3C
+
+#define mmNIC0_QM1_ARB_MSG_MAX_INFLIGHT                              0xCE2B44
+
+#define mmNIC0_QM1_ARB_MSG_AWUSER_31_11                              0xCE2B48
+
+#define mmNIC0_QM1_ARB_MSG_AWUSER_SEC_PROP                           0xCE2B4C
+
+#define mmNIC0_QM1_ARB_MSG_AWUSER_NON_SEC_PROP                       0xCE2B50
+
+#define mmNIC0_QM1_ARB_BASE_LO                                       0xCE2B54
+
+#define mmNIC0_QM1_ARB_BASE_HI                                       0xCE2B58
+
+#define mmNIC0_QM1_ARB_STATE_STS                                     0xCE2B80
+
+#define mmNIC0_QM1_ARB_CHOISE_FULLNESS_STS                           0xCE2B84
+
+#define mmNIC0_QM1_ARB_MSG_STS                                       0xCE2B88
+
+#define mmNIC0_QM1_ARB_SLV_CHOISE_Q_HEAD                             0xCE2B8C
+
+#define mmNIC0_QM1_ARB_ERR_CAUSE                                     0xCE2B9C
+
+#define mmNIC0_QM1_ARB_ERR_MSG_EN                                    0xCE2BA0
+
+#define mmNIC0_QM1_ARB_ERR_STS_DRP                                   0xCE2BA8
+
+#define mmNIC0_QM1_ARB_MST_CRED_STS_0                                0xCE2BB0
+
+#define mmNIC0_QM1_ARB_MST_CRED_STS_1                                0xCE2BB4
+
+#define mmNIC0_QM1_ARB_MST_CRED_STS_2                                0xCE2BB8
+
+#define mmNIC0_QM1_ARB_MST_CRED_STS_3                                0xCE2BBC
+
+#define mmNIC0_QM1_ARB_MST_CRED_STS_4                                0xCE2BC0
+
+#define mmNIC0_QM1_ARB_MST_CRED_STS_5                                0xCE2BC4
+
+#define mmNIC0_QM1_ARB_MST_CRED_STS_6                                0xCE2BC8
+
+#define mmNIC0_QM1_ARB_MST_CRED_STS_7                                0xCE2BCC
+
+#define mmNIC0_QM1_ARB_MST_CRED_STS_8                                0xCE2BD0
+
+#define mmNIC0_QM1_ARB_MST_CRED_STS_9                                0xCE2BD4
+
+#define mmNIC0_QM1_ARB_MST_CRED_STS_10                               0xCE2BD8
+
+#define mmNIC0_QM1_ARB_MST_CRED_STS_11                               0xCE2BDC
+
+#define mmNIC0_QM1_ARB_MST_CRED_STS_12                               0xCE2BE0
+
+#define mmNIC0_QM1_ARB_MST_CRED_STS_13                               0xCE2BE4
+
+#define mmNIC0_QM1_ARB_MST_CRED_STS_14                               0xCE2BE8
+
+#define mmNIC0_QM1_ARB_MST_CRED_STS_15                               0xCE2BEC
+
+#define mmNIC0_QM1_ARB_MST_CRED_STS_16                               0xCE2BF0
+
+#define mmNIC0_QM1_ARB_MST_CRED_STS_17                               0xCE2BF4
+
+#define mmNIC0_QM1_ARB_MST_CRED_STS_18                               0xCE2BF8
+
+#define mmNIC0_QM1_ARB_MST_CRED_STS_19                               0xCE2BFC
+
+#define mmNIC0_QM1_ARB_MST_CRED_STS_20                               0xCE2C00
+
+#define mmNIC0_QM1_ARB_MST_CRED_STS_21                               0xCE2C04
+
+#define mmNIC0_QM1_ARB_MST_CRED_STS_22                               0xCE2C08
+
+#define mmNIC0_QM1_ARB_MST_CRED_STS_23                               0xCE2C0C
+
+#define mmNIC0_QM1_ARB_MST_CRED_STS_24                               0xCE2C10
+
+#define mmNIC0_QM1_ARB_MST_CRED_STS_25                               0xCE2C14
+
+#define mmNIC0_QM1_ARB_MST_CRED_STS_26                               0xCE2C18
+
+#define mmNIC0_QM1_ARB_MST_CRED_STS_27                               0xCE2C1C
+
+#define mmNIC0_QM1_ARB_MST_CRED_STS_28                               0xCE2C20
+
+#define mmNIC0_QM1_ARB_MST_CRED_STS_29                               0xCE2C24
+
+#define mmNIC0_QM1_ARB_MST_CRED_STS_30                               0xCE2C28
+
+#define mmNIC0_QM1_ARB_MST_CRED_STS_31                               0xCE2C2C
+
+#define mmNIC0_QM1_CGM_CFG                                           0xCE2C70
+
+#define mmNIC0_QM1_CGM_STS                                           0xCE2C74
+
+#define mmNIC0_QM1_CGM_CFG1                                          0xCE2C78
+
+#define mmNIC0_QM1_LOCAL_RANGE_BASE                                  0xCE2C80
+
+#define mmNIC0_QM1_LOCAL_RANGE_SIZE                                  0xCE2C84
+
+#define mmNIC0_QM1_CSMR_STRICT_PRIO_CFG                              0xCE2C90
+
+#define mmNIC0_QM1_HBW_RD_RATE_LIM_CFG_1                             0xCE2C94
+
+#define mmNIC0_QM1_LBW_WR_RATE_LIM_CFG_0                             0xCE2C98
+
+#define mmNIC0_QM1_LBW_WR_RATE_LIM_CFG_1                             0xCE2C9C
+
+#define mmNIC0_QM1_HBW_RD_RATE_LIM_CFG_0                             0xCE2CA0
+
+#define mmNIC0_QM1_GLBL_AXCACHE                                      0xCE2CA4
+
+#define mmNIC0_QM1_IND_GW_APB_CFG                                    0xCE2CB0
+
+#define mmNIC0_QM1_IND_GW_APB_WDATA                                  0xCE2CB4
+
+#define mmNIC0_QM1_IND_GW_APB_RDATA                                  0xCE2CB8
+
+#define mmNIC0_QM1_IND_GW_APB_STATUS                                 0xCE2CBC
+
+#define mmNIC0_QM1_GLBL_ERR_ADDR_LO                                  0xCE2CD0
+
+#define mmNIC0_QM1_GLBL_ERR_ADDR_HI                                  0xCE2CD4
+
+#define mmNIC0_QM1_GLBL_ERR_WDATA                                    0xCE2CD8
+
+#define mmNIC0_QM1_GLBL_MEM_INIT_BUSY                                0xCE2D00
+
+#endif /* ASIC_REG_NIC0_QM1_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/nic1_qm0_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/nic1_qm0_regs.h
new file mode 100644
index 0000000000000000000000000000000000000000..0d1caf057ad03f4f0b229b004fa6045632f95f4f
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/nic1_qm0_regs.h
@@ -0,0 +1,834 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ **       DO NOT EDIT BELOW        **
+ ************************************/
+
+#ifndef ASIC_REG_NIC1_QM0_REGS_H_
+#define ASIC_REG_NIC1_QM0_REGS_H_
+
+/*
+ *****************************************
+ *   NIC1_QM0 (Prototype: QMAN)
+ *****************************************
+ */
+
+#define mmNIC1_QM0_GLBL_CFG0                                         0xD20000
+
+#define mmNIC1_QM0_GLBL_CFG1                                         0xD20004
+
+#define mmNIC1_QM0_GLBL_PROT                                         0xD20008
+
+#define mmNIC1_QM0_GLBL_ERR_CFG                                      0xD2000C
+
+#define mmNIC1_QM0_GLBL_SECURE_PROPS_0                               0xD20010
+
+#define mmNIC1_QM0_GLBL_SECURE_PROPS_1                               0xD20014
+
+#define mmNIC1_QM0_GLBL_SECURE_PROPS_2                               0xD20018
+
+#define mmNIC1_QM0_GLBL_SECURE_PROPS_3                               0xD2001C
+
+#define mmNIC1_QM0_GLBL_SECURE_PROPS_4                               0xD20020
+
+#define mmNIC1_QM0_GLBL_NON_SECURE_PROPS_0                           0xD20024
+
+#define mmNIC1_QM0_GLBL_NON_SECURE_PROPS_1                           0xD20028
+
+#define mmNIC1_QM0_GLBL_NON_SECURE_PROPS_2                           0xD2002C
+
+#define mmNIC1_QM0_GLBL_NON_SECURE_PROPS_3                           0xD20030
+
+#define mmNIC1_QM0_GLBL_NON_SECURE_PROPS_4                           0xD20034
+
+#define mmNIC1_QM0_GLBL_STS0                                         0xD20038
+
+#define mmNIC1_QM0_GLBL_STS1_0                                       0xD20040
+
+#define mmNIC1_QM0_GLBL_STS1_1                                       0xD20044
+
+#define mmNIC1_QM0_GLBL_STS1_2                                       0xD20048
+
+#define mmNIC1_QM0_GLBL_STS1_3                                       0xD2004C
+
+#define mmNIC1_QM0_GLBL_STS1_4                                       0xD20050
+
+#define mmNIC1_QM0_GLBL_MSG_EN_0                                     0xD20054
+
+#define mmNIC1_QM0_GLBL_MSG_EN_1                                     0xD20058
+
+#define mmNIC1_QM0_GLBL_MSG_EN_2                                     0xD2005C
+
+#define mmNIC1_QM0_GLBL_MSG_EN_3                                     0xD20060
+
+#define mmNIC1_QM0_GLBL_MSG_EN_4                                     0xD20068
+
+#define mmNIC1_QM0_PQ_BASE_LO_0                                      0xD20070
+
+#define mmNIC1_QM0_PQ_BASE_LO_1                                      0xD20074
+
+#define mmNIC1_QM0_PQ_BASE_LO_2                                      0xD20078
+
+#define mmNIC1_QM0_PQ_BASE_LO_3                                      0xD2007C
+
+#define mmNIC1_QM0_PQ_BASE_HI_0                                      0xD20080
+
+#define mmNIC1_QM0_PQ_BASE_HI_1                                      0xD20084
+
+#define mmNIC1_QM0_PQ_BASE_HI_2                                      0xD20088
+
+#define mmNIC1_QM0_PQ_BASE_HI_3                                      0xD2008C
+
+#define mmNIC1_QM0_PQ_SIZE_0                                         0xD20090
+
+#define mmNIC1_QM0_PQ_SIZE_1                                         0xD20094
+
+#define mmNIC1_QM0_PQ_SIZE_2                                         0xD20098
+
+#define mmNIC1_QM0_PQ_SIZE_3                                         0xD2009C
+
+#define mmNIC1_QM0_PQ_PI_0                                           0xD200A0
+
+#define mmNIC1_QM0_PQ_PI_1                                           0xD200A4
+
+#define mmNIC1_QM0_PQ_PI_2                                           0xD200A8
+
+#define mmNIC1_QM0_PQ_PI_3                                           0xD200AC
+
+#define mmNIC1_QM0_PQ_CI_0                                           0xD200B0
+
+#define mmNIC1_QM0_PQ_CI_1                                           0xD200B4
+
+#define mmNIC1_QM0_PQ_CI_2                                           0xD200B8
+
+#define mmNIC1_QM0_PQ_CI_3                                           0xD200BC
+
+#define mmNIC1_QM0_PQ_CFG0_0                                         0xD200C0
+
+#define mmNIC1_QM0_PQ_CFG0_1                                         0xD200C4
+
+#define mmNIC1_QM0_PQ_CFG0_2                                         0xD200C8
+
+#define mmNIC1_QM0_PQ_CFG0_3                                         0xD200CC
+
+#define mmNIC1_QM0_PQ_CFG1_0                                         0xD200D0
+
+#define mmNIC1_QM0_PQ_CFG1_1                                         0xD200D4
+
+#define mmNIC1_QM0_PQ_CFG1_2                                         0xD200D8
+
+#define mmNIC1_QM0_PQ_CFG1_3                                         0xD200DC
+
+#define mmNIC1_QM0_PQ_ARUSER_31_11_0                                 0xD200E0
+
+#define mmNIC1_QM0_PQ_ARUSER_31_11_1                                 0xD200E4
+
+#define mmNIC1_QM0_PQ_ARUSER_31_11_2                                 0xD200E8
+
+#define mmNIC1_QM0_PQ_ARUSER_31_11_3                                 0xD200EC
+
+#define mmNIC1_QM0_PQ_STS0_0                                         0xD200F0
+
+#define mmNIC1_QM0_PQ_STS0_1                                         0xD200F4
+
+#define mmNIC1_QM0_PQ_STS0_2                                         0xD200F8
+
+#define mmNIC1_QM0_PQ_STS0_3                                         0xD200FC
+
+#define mmNIC1_QM0_PQ_STS1_0                                         0xD20100
+
+#define mmNIC1_QM0_PQ_STS1_1                                         0xD20104
+
+#define mmNIC1_QM0_PQ_STS1_2                                         0xD20108
+
+#define mmNIC1_QM0_PQ_STS1_3                                         0xD2010C
+
+#define mmNIC1_QM0_CQ_CFG0_0                                         0xD20110
+
+#define mmNIC1_QM0_CQ_CFG0_1                                         0xD20114
+
+#define mmNIC1_QM0_CQ_CFG0_2                                         0xD20118
+
+#define mmNIC1_QM0_CQ_CFG0_3                                         0xD2011C
+
+#define mmNIC1_QM0_CQ_CFG0_4                                         0xD20120
+
+#define mmNIC1_QM0_CQ_CFG1_0                                         0xD20124
+
+#define mmNIC1_QM0_CQ_CFG1_1                                         0xD20128
+
+#define mmNIC1_QM0_CQ_CFG1_2                                         0xD2012C
+
+#define mmNIC1_QM0_CQ_CFG1_3                                         0xD20130
+
+#define mmNIC1_QM0_CQ_CFG1_4                                         0xD20134
+
+#define mmNIC1_QM0_CQ_ARUSER_31_11_0                                 0xD20138
+
+#define mmNIC1_QM0_CQ_ARUSER_31_11_1                                 0xD2013C
+
+#define mmNIC1_QM0_CQ_ARUSER_31_11_2                                 0xD20140
+
+#define mmNIC1_QM0_CQ_ARUSER_31_11_3                                 0xD20144
+
+#define mmNIC1_QM0_CQ_ARUSER_31_11_4                                 0xD20148
+
+#define mmNIC1_QM0_CQ_STS0_0                                         0xD2014C
+
+#define mmNIC1_QM0_CQ_STS0_1                                         0xD20150
+
+#define mmNIC1_QM0_CQ_STS0_2                                         0xD20154
+
+#define mmNIC1_QM0_CQ_STS0_3                                         0xD20158
+
+#define mmNIC1_QM0_CQ_STS0_4                                         0xD2015C
+
+#define mmNIC1_QM0_CQ_STS1_0                                         0xD20160
+
+#define mmNIC1_QM0_CQ_STS1_1                                         0xD20164
+
+#define mmNIC1_QM0_CQ_STS1_2                                         0xD20168
+
+#define mmNIC1_QM0_CQ_STS1_3                                         0xD2016C
+
+#define mmNIC1_QM0_CQ_STS1_4                                         0xD20170
+
+#define mmNIC1_QM0_CQ_PTR_LO_0                                       0xD20174
+
+#define mmNIC1_QM0_CQ_PTR_HI_0                                       0xD20178
+
+#define mmNIC1_QM0_CQ_TSIZE_0                                        0xD2017C
+
+#define mmNIC1_QM0_CQ_CTL_0                                          0xD20180
+
+#define mmNIC1_QM0_CQ_PTR_LO_1                                       0xD20184
+
+#define mmNIC1_QM0_CQ_PTR_HI_1                                       0xD20188
+
+#define mmNIC1_QM0_CQ_TSIZE_1                                        0xD2018C
+
+#define mmNIC1_QM0_CQ_CTL_1                                          0xD20190
+
+#define mmNIC1_QM0_CQ_PTR_LO_2                                       0xD20194
+
+#define mmNIC1_QM0_CQ_PTR_HI_2                                       0xD20198
+
+#define mmNIC1_QM0_CQ_TSIZE_2                                        0xD2019C
+
+#define mmNIC1_QM0_CQ_CTL_2                                          0xD201A0
+
+#define mmNIC1_QM0_CQ_PTR_LO_3                                       0xD201A4
+
+#define mmNIC1_QM0_CQ_PTR_HI_3                                       0xD201A8
+
+#define mmNIC1_QM0_CQ_TSIZE_3                                        0xD201AC
+
+#define mmNIC1_QM0_CQ_CTL_3                                          0xD201B0
+
+#define mmNIC1_QM0_CQ_PTR_LO_4                                       0xD201B4
+
+#define mmNIC1_QM0_CQ_PTR_HI_4                                       0xD201B8
+
+#define mmNIC1_QM0_CQ_TSIZE_4                                        0xD201BC
+
+#define mmNIC1_QM0_CQ_CTL_4                                          0xD201C0
+
+#define mmNIC1_QM0_CQ_PTR_LO_STS_0                                   0xD201C4
+
+#define mmNIC1_QM0_CQ_PTR_LO_STS_1                                   0xD201C8
+
+#define mmNIC1_QM0_CQ_PTR_LO_STS_2                                   0xD201CC
+
+#define mmNIC1_QM0_CQ_PTR_LO_STS_3                                   0xD201D0
+
+#define mmNIC1_QM0_CQ_PTR_LO_STS_4                                   0xD201D4
+
+#define mmNIC1_QM0_CQ_PTR_HI_STS_0                                   0xD201D8
+
+#define mmNIC1_QM0_CQ_PTR_HI_STS_1                                   0xD201DC
+
+#define mmNIC1_QM0_CQ_PTR_HI_STS_2                                   0xD201E0
+
+#define mmNIC1_QM0_CQ_PTR_HI_STS_3                                   0xD201E4
+
+#define mmNIC1_QM0_CQ_PTR_HI_STS_4                                   0xD201E8
+
+#define mmNIC1_QM0_CQ_TSIZE_STS_0                                    0xD201EC
+
+#define mmNIC1_QM0_CQ_TSIZE_STS_1                                    0xD201F0
+
+#define mmNIC1_QM0_CQ_TSIZE_STS_2                                    0xD201F4
+
+#define mmNIC1_QM0_CQ_TSIZE_STS_3                                    0xD201F8
+
+#define mmNIC1_QM0_CQ_TSIZE_STS_4                                    0xD201FC
+
+#define mmNIC1_QM0_CQ_CTL_STS_0                                      0xD20200
+
+#define mmNIC1_QM0_CQ_CTL_STS_1                                      0xD20204
+
+#define mmNIC1_QM0_CQ_CTL_STS_2                                      0xD20208
+
+#define mmNIC1_QM0_CQ_CTL_STS_3                                      0xD2020C
+
+#define mmNIC1_QM0_CQ_CTL_STS_4                                      0xD20210
+
+#define mmNIC1_QM0_CQ_IFIFO_CNT_0                                    0xD20214
+
+#define mmNIC1_QM0_CQ_IFIFO_CNT_1                                    0xD20218
+
+#define mmNIC1_QM0_CQ_IFIFO_CNT_2                                    0xD2021C
+
+#define mmNIC1_QM0_CQ_IFIFO_CNT_3                                    0xD20220
+
+#define mmNIC1_QM0_CQ_IFIFO_CNT_4                                    0xD20224
+
+#define mmNIC1_QM0_CP_MSG_BASE0_ADDR_LO_0                            0xD20228
+
+#define mmNIC1_QM0_CP_MSG_BASE0_ADDR_LO_1                            0xD2022C
+
+#define mmNIC1_QM0_CP_MSG_BASE0_ADDR_LO_2                            0xD20230
+
+#define mmNIC1_QM0_CP_MSG_BASE0_ADDR_LO_3                            0xD20234
+
+#define mmNIC1_QM0_CP_MSG_BASE0_ADDR_LO_4                            0xD20238
+
+#define mmNIC1_QM0_CP_MSG_BASE0_ADDR_HI_0                            0xD2023C
+
+#define mmNIC1_QM0_CP_MSG_BASE0_ADDR_HI_1                            0xD20240
+
+#define mmNIC1_QM0_CP_MSG_BASE0_ADDR_HI_2                            0xD20244
+
+#define mmNIC1_QM0_CP_MSG_BASE0_ADDR_HI_3                            0xD20248
+
+#define mmNIC1_QM0_CP_MSG_BASE0_ADDR_HI_4                            0xD2024C
+
+#define mmNIC1_QM0_CP_MSG_BASE1_ADDR_LO_0                            0xD20250
+
+#define mmNIC1_QM0_CP_MSG_BASE1_ADDR_LO_1                            0xD20254
+
+#define mmNIC1_QM0_CP_MSG_BASE1_ADDR_LO_2                            0xD20258
+
+#define mmNIC1_QM0_CP_MSG_BASE1_ADDR_LO_3                            0xD2025C
+
+#define mmNIC1_QM0_CP_MSG_BASE1_ADDR_LO_4                            0xD20260
+
+#define mmNIC1_QM0_CP_MSG_BASE1_ADDR_HI_0                            0xD20264
+
+#define mmNIC1_QM0_CP_MSG_BASE1_ADDR_HI_1                            0xD20268
+
+#define mmNIC1_QM0_CP_MSG_BASE1_ADDR_HI_2                            0xD2026C
+
+#define mmNIC1_QM0_CP_MSG_BASE1_ADDR_HI_3                            0xD20270
+
+#define mmNIC1_QM0_CP_MSG_BASE1_ADDR_HI_4                            0xD20274
+
+#define mmNIC1_QM0_CP_MSG_BASE2_ADDR_LO_0                            0xD20278
+
+#define mmNIC1_QM0_CP_MSG_BASE2_ADDR_LO_1                            0xD2027C
+
+#define mmNIC1_QM0_CP_MSG_BASE2_ADDR_LO_2                            0xD20280
+
+#define mmNIC1_QM0_CP_MSG_BASE2_ADDR_LO_3                            0xD20284
+
+#define mmNIC1_QM0_CP_MSG_BASE2_ADDR_LO_4                            0xD20288
+
+#define mmNIC1_QM0_CP_MSG_BASE2_ADDR_HI_0                            0xD2028C
+
+#define mmNIC1_QM0_CP_MSG_BASE2_ADDR_HI_1                            0xD20290
+
+#define mmNIC1_QM0_CP_MSG_BASE2_ADDR_HI_2                            0xD20294
+
+#define mmNIC1_QM0_CP_MSG_BASE2_ADDR_HI_3                            0xD20298
+
+#define mmNIC1_QM0_CP_MSG_BASE2_ADDR_HI_4                            0xD2029C
+
+#define mmNIC1_QM0_CP_MSG_BASE3_ADDR_LO_0                            0xD202A0
+
+#define mmNIC1_QM0_CP_MSG_BASE3_ADDR_LO_1                            0xD202A4
+
+#define mmNIC1_QM0_CP_MSG_BASE3_ADDR_LO_2                            0xD202A8
+
+#define mmNIC1_QM0_CP_MSG_BASE3_ADDR_LO_3                            0xD202AC
+
+#define mmNIC1_QM0_CP_MSG_BASE3_ADDR_LO_4                            0xD202B0
+
+#define mmNIC1_QM0_CP_MSG_BASE3_ADDR_HI_0                            0xD202B4
+
+#define mmNIC1_QM0_CP_MSG_BASE3_ADDR_HI_1                            0xD202B8
+
+#define mmNIC1_QM0_CP_MSG_BASE3_ADDR_HI_2                            0xD202BC
+
+#define mmNIC1_QM0_CP_MSG_BASE3_ADDR_HI_3                            0xD202C0
+
+#define mmNIC1_QM0_CP_MSG_BASE3_ADDR_HI_4                            0xD202C4
+
+#define mmNIC1_QM0_CP_LDMA_TSIZE_OFFSET_0                            0xD202C8
+
+#define mmNIC1_QM0_CP_LDMA_TSIZE_OFFSET_1                            0xD202CC
+
+#define mmNIC1_QM0_CP_LDMA_TSIZE_OFFSET_2                            0xD202D0
+
+#define mmNIC1_QM0_CP_LDMA_TSIZE_OFFSET_3                            0xD202D4
+
+#define mmNIC1_QM0_CP_LDMA_TSIZE_OFFSET_4                            0xD202D8
+
+#define mmNIC1_QM0_CP_LDMA_SRC_BASE_LO_OFFSET_0                      0xD202E0
+
+#define mmNIC1_QM0_CP_LDMA_SRC_BASE_LO_OFFSET_1                      0xD202E4
+
+#define mmNIC1_QM0_CP_LDMA_SRC_BASE_LO_OFFSET_2                      0xD202E8
+
+#define mmNIC1_QM0_CP_LDMA_SRC_BASE_LO_OFFSET_3                      0xD202EC
+
+#define mmNIC1_QM0_CP_LDMA_SRC_BASE_LO_OFFSET_4                      0xD202F0
+
+#define mmNIC1_QM0_CP_LDMA_DST_BASE_LO_OFFSET_0                      0xD202F4
+
+#define mmNIC1_QM0_CP_LDMA_DST_BASE_LO_OFFSET_1                      0xD202F8
+
+#define mmNIC1_QM0_CP_LDMA_DST_BASE_LO_OFFSET_2                      0xD202FC
+
+#define mmNIC1_QM0_CP_LDMA_DST_BASE_LO_OFFSET_3                      0xD20300
+
+#define mmNIC1_QM0_CP_LDMA_DST_BASE_LO_OFFSET_4                      0xD20304
+
+#define mmNIC1_QM0_CP_FENCE0_RDATA_0                                 0xD20308
+
+#define mmNIC1_QM0_CP_FENCE0_RDATA_1                                 0xD2030C
+
+#define mmNIC1_QM0_CP_FENCE0_RDATA_2                                 0xD20310
+
+#define mmNIC1_QM0_CP_FENCE0_RDATA_3                                 0xD20314
+
+#define mmNIC1_QM0_CP_FENCE0_RDATA_4                                 0xD20318
+
+#define mmNIC1_QM0_CP_FENCE1_RDATA_0                                 0xD2031C
+
+#define mmNIC1_QM0_CP_FENCE1_RDATA_1                                 0xD20320
+
+#define mmNIC1_QM0_CP_FENCE1_RDATA_2                                 0xD20324
+
+#define mmNIC1_QM0_CP_FENCE1_RDATA_3                                 0xD20328
+
+#define mmNIC1_QM0_CP_FENCE1_RDATA_4                                 0xD2032C
+
+#define mmNIC1_QM0_CP_FENCE2_RDATA_0                                 0xD20330
+
+#define mmNIC1_QM0_CP_FENCE2_RDATA_1                                 0xD20334
+
+#define mmNIC1_QM0_CP_FENCE2_RDATA_2                                 0xD20338
+
+#define mmNIC1_QM0_CP_FENCE2_RDATA_3                                 0xD2033C
+
+#define mmNIC1_QM0_CP_FENCE2_RDATA_4                                 0xD20340
+
+#define mmNIC1_QM0_CP_FENCE3_RDATA_0                                 0xD20344
+
+#define mmNIC1_QM0_CP_FENCE3_RDATA_1                                 0xD20348
+
+#define mmNIC1_QM0_CP_FENCE3_RDATA_2                                 0xD2034C
+
+#define mmNIC1_QM0_CP_FENCE3_RDATA_3                                 0xD20350
+
+#define mmNIC1_QM0_CP_FENCE3_RDATA_4                                 0xD20354
+
+#define mmNIC1_QM0_CP_FENCE0_CNT_0                                   0xD20358
+
+#define mmNIC1_QM0_CP_FENCE0_CNT_1                                   0xD2035C
+
+#define mmNIC1_QM0_CP_FENCE0_CNT_2                                   0xD20360
+
+#define mmNIC1_QM0_CP_FENCE0_CNT_3                                   0xD20364
+
+#define mmNIC1_QM0_CP_FENCE0_CNT_4                                   0xD20368
+
+#define mmNIC1_QM0_CP_FENCE1_CNT_0                                   0xD2036C
+
+#define mmNIC1_QM0_CP_FENCE1_CNT_1                                   0xD20370
+
+#define mmNIC1_QM0_CP_FENCE1_CNT_2                                   0xD20374
+
+#define mmNIC1_QM0_CP_FENCE1_CNT_3                                   0xD20378
+
+#define mmNIC1_QM0_CP_FENCE1_CNT_4                                   0xD2037C
+
+#define mmNIC1_QM0_CP_FENCE2_CNT_0                                   0xD20380
+
+#define mmNIC1_QM0_CP_FENCE2_CNT_1                                   0xD20384
+
+#define mmNIC1_QM0_CP_FENCE2_CNT_2                                   0xD20388
+
+#define mmNIC1_QM0_CP_FENCE2_CNT_3                                   0xD2038C
+
+#define mmNIC1_QM0_CP_FENCE2_CNT_4                                   0xD20390
+
+#define mmNIC1_QM0_CP_FENCE3_CNT_0                                   0xD20394
+
+#define mmNIC1_QM0_CP_FENCE3_CNT_1                                   0xD20398
+
+#define mmNIC1_QM0_CP_FENCE3_CNT_2                                   0xD2039C
+
+#define mmNIC1_QM0_CP_FENCE3_CNT_3                                   0xD203A0
+
+#define mmNIC1_QM0_CP_FENCE3_CNT_4                                   0xD203A4
+
+#define mmNIC1_QM0_CP_STS_0                                          0xD203A8
+
+#define mmNIC1_QM0_CP_STS_1                                          0xD203AC
+
+#define mmNIC1_QM0_CP_STS_2                                          0xD203B0
+
+#define mmNIC1_QM0_CP_STS_3                                          0xD203B4
+
+#define mmNIC1_QM0_CP_STS_4                                          0xD203B8
+
+#define mmNIC1_QM0_CP_CURRENT_INST_LO_0                              0xD203BC
+
+#define mmNIC1_QM0_CP_CURRENT_INST_LO_1                              0xD203C0
+
+#define mmNIC1_QM0_CP_CURRENT_INST_LO_2                              0xD203C4
+
+#define mmNIC1_QM0_CP_CURRENT_INST_LO_3                              0xD203C8
+
+#define mmNIC1_QM0_CP_CURRENT_INST_LO_4                              0xD203CC
+
+#define mmNIC1_QM0_CP_CURRENT_INST_HI_0                              0xD203D0
+
+#define mmNIC1_QM0_CP_CURRENT_INST_HI_1                              0xD203D4
+
+#define mmNIC1_QM0_CP_CURRENT_INST_HI_2                              0xD203D8
+
+#define mmNIC1_QM0_CP_CURRENT_INST_HI_3                              0xD203DC
+
+#define mmNIC1_QM0_CP_CURRENT_INST_HI_4                              0xD203E0
+
+#define mmNIC1_QM0_CP_BARRIER_CFG_0                                  0xD203F4
+
+#define mmNIC1_QM0_CP_BARRIER_CFG_1                                  0xD203F8
+
+#define mmNIC1_QM0_CP_BARRIER_CFG_2                                  0xD203FC
+
+#define mmNIC1_QM0_CP_BARRIER_CFG_3                                  0xD20400
+
+#define mmNIC1_QM0_CP_BARRIER_CFG_4                                  0xD20404
+
+#define mmNIC1_QM0_CP_DBG_0_0                                        0xD20408
+
+#define mmNIC1_QM0_CP_DBG_0_1                                        0xD2040C
+
+#define mmNIC1_QM0_CP_DBG_0_2                                        0xD20410
+
+#define mmNIC1_QM0_CP_DBG_0_3                                        0xD20414
+
+#define mmNIC1_QM0_CP_DBG_0_4                                        0xD20418
+
+#define mmNIC1_QM0_CP_ARUSER_31_11_0                                 0xD2041C
+
+#define mmNIC1_QM0_CP_ARUSER_31_11_1                                 0xD20420
+
+#define mmNIC1_QM0_CP_ARUSER_31_11_2                                 0xD20424
+
+#define mmNIC1_QM0_CP_ARUSER_31_11_3                                 0xD20428
+
+#define mmNIC1_QM0_CP_ARUSER_31_11_4                                 0xD2042C
+
+#define mmNIC1_QM0_CP_AWUSER_31_11_0                                 0xD20430
+
+#define mmNIC1_QM0_CP_AWUSER_31_11_1                                 0xD20434
+
+#define mmNIC1_QM0_CP_AWUSER_31_11_2                                 0xD20438
+
+#define mmNIC1_QM0_CP_AWUSER_31_11_3                                 0xD2043C
+
+#define mmNIC1_QM0_CP_AWUSER_31_11_4                                 0xD20440
+
+#define mmNIC1_QM0_ARB_CFG_0                                         0xD20A00
+
+#define mmNIC1_QM0_ARB_CHOISE_Q_PUSH                                 0xD20A04
+
+#define mmNIC1_QM0_ARB_WRR_WEIGHT_0                                  0xD20A08
+
+#define mmNIC1_QM0_ARB_WRR_WEIGHT_1                                  0xD20A0C
+
+#define mmNIC1_QM0_ARB_WRR_WEIGHT_2                                  0xD20A10
+
+#define mmNIC1_QM0_ARB_WRR_WEIGHT_3                                  0xD20A14
+
+#define mmNIC1_QM0_ARB_CFG_1                                         0xD20A18
+
+#define mmNIC1_QM0_ARB_MST_AVAIL_CRED_0                              0xD20A20
+
+#define mmNIC1_QM0_ARB_MST_AVAIL_CRED_1                              0xD20A24
+
+#define mmNIC1_QM0_ARB_MST_AVAIL_CRED_2                              0xD20A28
+
+#define mmNIC1_QM0_ARB_MST_AVAIL_CRED_3                              0xD20A2C
+
+#define mmNIC1_QM0_ARB_MST_AVAIL_CRED_4                              0xD20A30
+
+#define mmNIC1_QM0_ARB_MST_AVAIL_CRED_5                              0xD20A34
+
+#define mmNIC1_QM0_ARB_MST_AVAIL_CRED_6                              0xD20A38
+
+#define mmNIC1_QM0_ARB_MST_AVAIL_CRED_7                              0xD20A3C
+
+#define mmNIC1_QM0_ARB_MST_AVAIL_CRED_8                              0xD20A40
+
+#define mmNIC1_QM0_ARB_MST_AVAIL_CRED_9                              0xD20A44
+
+#define mmNIC1_QM0_ARB_MST_AVAIL_CRED_10                             0xD20A48
+
+#define mmNIC1_QM0_ARB_MST_AVAIL_CRED_11                             0xD20A4C
+
+#define mmNIC1_QM0_ARB_MST_AVAIL_CRED_12                             0xD20A50
+
+#define mmNIC1_QM0_ARB_MST_AVAIL_CRED_13                             0xD20A54
+
+#define mmNIC1_QM0_ARB_MST_AVAIL_CRED_14                             0xD20A58
+
+#define mmNIC1_QM0_ARB_MST_AVAIL_CRED_15                             0xD20A5C
+
+#define mmNIC1_QM0_ARB_MST_AVAIL_CRED_16                             0xD20A60
+
+#define mmNIC1_QM0_ARB_MST_AVAIL_CRED_17                             0xD20A64
+
+#define mmNIC1_QM0_ARB_MST_AVAIL_CRED_18                             0xD20A68
+
+#define mmNIC1_QM0_ARB_MST_AVAIL_CRED_19                             0xD20A6C
+
+#define mmNIC1_QM0_ARB_MST_AVAIL_CRED_20                             0xD20A70
+
+#define mmNIC1_QM0_ARB_MST_AVAIL_CRED_21                             0xD20A74
+
+#define mmNIC1_QM0_ARB_MST_AVAIL_CRED_22                             0xD20A78
+
+#define mmNIC1_QM0_ARB_MST_AVAIL_CRED_23                             0xD20A7C
+
+#define mmNIC1_QM0_ARB_MST_AVAIL_CRED_24                             0xD20A80
+
+#define mmNIC1_QM0_ARB_MST_AVAIL_CRED_25                             0xD20A84
+
+#define mmNIC1_QM0_ARB_MST_AVAIL_CRED_26                             0xD20A88
+
+#define mmNIC1_QM0_ARB_MST_AVAIL_CRED_27                             0xD20A8C
+
+#define mmNIC1_QM0_ARB_MST_AVAIL_CRED_28                             0xD20A90
+
+#define mmNIC1_QM0_ARB_MST_AVAIL_CRED_29                             0xD20A94
+
+#define mmNIC1_QM0_ARB_MST_AVAIL_CRED_30                             0xD20A98
+
+#define mmNIC1_QM0_ARB_MST_AVAIL_CRED_31                             0xD20A9C
+
+#define mmNIC1_QM0_ARB_MST_CRED_INC                                  0xD20AA0
+
+#define mmNIC1_QM0_ARB_MST_CHOISE_PUSH_OFST_0                        0xD20AA4
+
+#define mmNIC1_QM0_ARB_MST_CHOISE_PUSH_OFST_1                        0xD20AA8
+
+#define mmNIC1_QM0_ARB_MST_CHOISE_PUSH_OFST_2                        0xD20AAC
+
+#define mmNIC1_QM0_ARB_MST_CHOISE_PUSH_OFST_3                        0xD20AB0
+
+#define mmNIC1_QM0_ARB_MST_CHOISE_PUSH_OFST_4                        0xD20AB4
+
+#define mmNIC1_QM0_ARB_MST_CHOISE_PUSH_OFST_5                        0xD20AB8
+
+#define mmNIC1_QM0_ARB_MST_CHOISE_PUSH_OFST_6                        0xD20ABC
+
+#define mmNIC1_QM0_ARB_MST_CHOISE_PUSH_OFST_7                        0xD20AC0
+
+#define mmNIC1_QM0_ARB_MST_CHOISE_PUSH_OFST_8                        0xD20AC4
+
+#define mmNIC1_QM0_ARB_MST_CHOISE_PUSH_OFST_9                        0xD20AC8
+
+#define mmNIC1_QM0_ARB_MST_CHOISE_PUSH_OFST_10                       0xD20ACC
+
+#define mmNIC1_QM0_ARB_MST_CHOISE_PUSH_OFST_11                       0xD20AD0
+
+#define mmNIC1_QM0_ARB_MST_CHOISE_PUSH_OFST_12                       0xD20AD4
+
+#define mmNIC1_QM0_ARB_MST_CHOISE_PUSH_OFST_13                       0xD20AD8
+
+#define mmNIC1_QM0_ARB_MST_CHOISE_PUSH_OFST_14                       0xD20ADC
+
+#define mmNIC1_QM0_ARB_MST_CHOISE_PUSH_OFST_15                       0xD20AE0
+
+#define mmNIC1_QM0_ARB_MST_CHOISE_PUSH_OFST_16                       0xD20AE4
+
+#define mmNIC1_QM0_ARB_MST_CHOISE_PUSH_OFST_17                       0xD20AE8
+
+#define mmNIC1_QM0_ARB_MST_CHOISE_PUSH_OFST_18                       0xD20AEC
+
+#define mmNIC1_QM0_ARB_MST_CHOISE_PUSH_OFST_19                       0xD20AF0
+
+#define mmNIC1_QM0_ARB_MST_CHOISE_PUSH_OFST_20                       0xD20AF4
+
+#define mmNIC1_QM0_ARB_MST_CHOISE_PUSH_OFST_21                       0xD20AF8
+
+#define mmNIC1_QM0_ARB_MST_CHOISE_PUSH_OFST_22                       0xD20AFC
+
+#define mmNIC1_QM0_ARB_MST_CHOISE_PUSH_OFST_23                       0xD20B00
+
+#define mmNIC1_QM0_ARB_MST_CHOISE_PUSH_OFST_24                       0xD20B04
+
+#define mmNIC1_QM0_ARB_MST_CHOISE_PUSH_OFST_25                       0xD20B08
+
+#define mmNIC1_QM0_ARB_MST_CHOISE_PUSH_OFST_26                       0xD20B0C
+
+#define mmNIC1_QM0_ARB_MST_CHOISE_PUSH_OFST_27                       0xD20B10
+
+#define mmNIC1_QM0_ARB_MST_CHOISE_PUSH_OFST_28                       0xD20B14
+
+#define mmNIC1_QM0_ARB_MST_CHOISE_PUSH_OFST_29                       0xD20B18
+
+#define mmNIC1_QM0_ARB_MST_CHOISE_PUSH_OFST_30                       0xD20B1C
+
+#define mmNIC1_QM0_ARB_MST_CHOISE_PUSH_OFST_31                       0xD20B20
+
+#define mmNIC1_QM0_ARB_SLV_MASTER_INC_CRED_OFST                      0xD20B28
+
+#define mmNIC1_QM0_ARB_MST_SLAVE_EN                                  0xD20B2C
+
+#define mmNIC1_QM0_ARB_MST_QUIET_PER                                 0xD20B34
+
+#define mmNIC1_QM0_ARB_SLV_CHOISE_WDT                                0xD20B38
+
+#define mmNIC1_QM0_ARB_SLV_ID                                        0xD20B3C
+
+#define mmNIC1_QM0_ARB_MSG_MAX_INFLIGHT                              0xD20B44
+
+#define mmNIC1_QM0_ARB_MSG_AWUSER_31_11                              0xD20B48
+
+#define mmNIC1_QM0_ARB_MSG_AWUSER_SEC_PROP                           0xD20B4C
+
+#define mmNIC1_QM0_ARB_MSG_AWUSER_NON_SEC_PROP                       0xD20B50
+
+#define mmNIC1_QM0_ARB_BASE_LO                                       0xD20B54
+
+#define mmNIC1_QM0_ARB_BASE_HI                                       0xD20B58
+
+#define mmNIC1_QM0_ARB_STATE_STS                                     0xD20B80
+
+#define mmNIC1_QM0_ARB_CHOISE_FULLNESS_STS                           0xD20B84
+
+#define mmNIC1_QM0_ARB_MSG_STS                                       0xD20B88
+
+#define mmNIC1_QM0_ARB_SLV_CHOISE_Q_HEAD                             0xD20B8C
+
+#define mmNIC1_QM0_ARB_ERR_CAUSE                                     0xD20B9C
+
+#define mmNIC1_QM0_ARB_ERR_MSG_EN                                    0xD20BA0
+
+#define mmNIC1_QM0_ARB_ERR_STS_DRP                                   0xD20BA8
+
+#define mmNIC1_QM0_ARB_MST_CRED_STS_0                                0xD20BB0
+
+#define mmNIC1_QM0_ARB_MST_CRED_STS_1                                0xD20BB4
+
+#define mmNIC1_QM0_ARB_MST_CRED_STS_2                                0xD20BB8
+
+#define mmNIC1_QM0_ARB_MST_CRED_STS_3                                0xD20BBC
+
+#define mmNIC1_QM0_ARB_MST_CRED_STS_4                                0xD20BC0
+
+#define mmNIC1_QM0_ARB_MST_CRED_STS_5                                0xD20BC4
+
+#define mmNIC1_QM0_ARB_MST_CRED_STS_6                                0xD20BC8
+
+#define mmNIC1_QM0_ARB_MST_CRED_STS_7                                0xD20BCC
+
+#define mmNIC1_QM0_ARB_MST_CRED_STS_8                                0xD20BD0
+
+#define mmNIC1_QM0_ARB_MST_CRED_STS_9                                0xD20BD4
+
+#define mmNIC1_QM0_ARB_MST_CRED_STS_10                               0xD20BD8
+
+#define mmNIC1_QM0_ARB_MST_CRED_STS_11                               0xD20BDC
+
+#define mmNIC1_QM0_ARB_MST_CRED_STS_12                               0xD20BE0
+
+#define mmNIC1_QM0_ARB_MST_CRED_STS_13                               0xD20BE4
+
+#define mmNIC1_QM0_ARB_MST_CRED_STS_14                               0xD20BE8
+
+#define mmNIC1_QM0_ARB_MST_CRED_STS_15                               0xD20BEC
+
+#define mmNIC1_QM0_ARB_MST_CRED_STS_16                               0xD20BF0
+
+#define mmNIC1_QM0_ARB_MST_CRED_STS_17                               0xD20BF4
+
+#define mmNIC1_QM0_ARB_MST_CRED_STS_18                               0xD20BF8
+
+#define mmNIC1_QM0_ARB_MST_CRED_STS_19                               0xD20BFC
+
+#define mmNIC1_QM0_ARB_MST_CRED_STS_20                               0xD20C00
+
+#define mmNIC1_QM0_ARB_MST_CRED_STS_21                               0xD20C04
+
+#define mmNIC1_QM0_ARB_MST_CRED_STS_22                               0xD20C08
+
+#define mmNIC1_QM0_ARB_MST_CRED_STS_23                               0xD20C0C
+
+#define mmNIC1_QM0_ARB_MST_CRED_STS_24                               0xD20C10
+
+#define mmNIC1_QM0_ARB_MST_CRED_STS_25                               0xD20C14
+
+#define mmNIC1_QM0_ARB_MST_CRED_STS_26                               0xD20C18
+
+#define mmNIC1_QM0_ARB_MST_CRED_STS_27                               0xD20C1C
+
+#define mmNIC1_QM0_ARB_MST_CRED_STS_28                               0xD20C20
+
+#define mmNIC1_QM0_ARB_MST_CRED_STS_29                               0xD20C24
+
+#define mmNIC1_QM0_ARB_MST_CRED_STS_30                               0xD20C28
+
+#define mmNIC1_QM0_ARB_MST_CRED_STS_31                               0xD20C2C
+
+#define mmNIC1_QM0_CGM_CFG                                           0xD20C70
+
+#define mmNIC1_QM0_CGM_STS                                           0xD20C74
+
+#define mmNIC1_QM0_CGM_CFG1                                          0xD20C78
+
+#define mmNIC1_QM0_LOCAL_RANGE_BASE                                  0xD20C80
+
+#define mmNIC1_QM0_LOCAL_RANGE_SIZE                                  0xD20C84
+
+#define mmNIC1_QM0_CSMR_STRICT_PRIO_CFG                              0xD20C90
+
+#define mmNIC1_QM0_HBW_RD_RATE_LIM_CFG_1                             0xD20C94
+
+#define mmNIC1_QM0_LBW_WR_RATE_LIM_CFG_0                             0xD20C98
+
+#define mmNIC1_QM0_LBW_WR_RATE_LIM_CFG_1                             0xD20C9C
+
+#define mmNIC1_QM0_HBW_RD_RATE_LIM_CFG_0                             0xD20CA0
+
+#define mmNIC1_QM0_GLBL_AXCACHE                                      0xD20CA4
+
+#define mmNIC1_QM0_IND_GW_APB_CFG                                    0xD20CB0
+
+#define mmNIC1_QM0_IND_GW_APB_WDATA                                  0xD20CB4
+
+#define mmNIC1_QM0_IND_GW_APB_RDATA                                  0xD20CB8
+
+#define mmNIC1_QM0_IND_GW_APB_STATUS                                 0xD20CBC
+
+#define mmNIC1_QM0_GLBL_ERR_ADDR_LO                                  0xD20CD0
+
+#define mmNIC1_QM0_GLBL_ERR_ADDR_HI                                  0xD20CD4
+
+#define mmNIC1_QM0_GLBL_ERR_WDATA                                    0xD20CD8
+
+#define mmNIC1_QM0_GLBL_MEM_INIT_BUSY                                0xD20D00
+
+#endif /* ASIC_REG_NIC1_QM0_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/nic1_qm1_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/nic1_qm1_regs.h
new file mode 100644
index 0000000000000000000000000000000000000000..1b115ee6d6f0883abd64e4ce22197320804ef0ba
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/nic1_qm1_regs.h
@@ -0,0 +1,834 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ **       DO NOT EDIT BELOW        **
+ ************************************/
+
+#ifndef ASIC_REG_NIC1_QM1_REGS_H_
+#define ASIC_REG_NIC1_QM1_REGS_H_
+
+/*
+ *****************************************
+ *   NIC1_QM1 (Prototype: QMAN)
+ *****************************************
+ */
+
+#define mmNIC1_QM1_GLBL_CFG0                                         0xD22000
+
+#define mmNIC1_QM1_GLBL_CFG1                                         0xD22004
+
+#define mmNIC1_QM1_GLBL_PROT                                         0xD22008
+
+#define mmNIC1_QM1_GLBL_ERR_CFG                                      0xD2200C
+
+#define mmNIC1_QM1_GLBL_SECURE_PROPS_0                               0xD22010
+
+#define mmNIC1_QM1_GLBL_SECURE_PROPS_1                               0xD22014
+
+#define mmNIC1_QM1_GLBL_SECURE_PROPS_2                               0xD22018
+
+#define mmNIC1_QM1_GLBL_SECURE_PROPS_3                               0xD2201C
+
+#define mmNIC1_QM1_GLBL_SECURE_PROPS_4                               0xD22020
+
+#define mmNIC1_QM1_GLBL_NON_SECURE_PROPS_0                           0xD22024
+
+#define mmNIC1_QM1_GLBL_NON_SECURE_PROPS_1                           0xD22028
+
+#define mmNIC1_QM1_GLBL_NON_SECURE_PROPS_2                           0xD2202C
+
+#define mmNIC1_QM1_GLBL_NON_SECURE_PROPS_3                           0xD22030
+
+#define mmNIC1_QM1_GLBL_NON_SECURE_PROPS_4                           0xD22034
+
+#define mmNIC1_QM1_GLBL_STS0                                         0xD22038
+
+#define mmNIC1_QM1_GLBL_STS1_0                                       0xD22040
+
+#define mmNIC1_QM1_GLBL_STS1_1                                       0xD22044
+
+#define mmNIC1_QM1_GLBL_STS1_2                                       0xD22048
+
+#define mmNIC1_QM1_GLBL_STS1_3                                       0xD2204C
+
+#define mmNIC1_QM1_GLBL_STS1_4                                       0xD22050
+
+#define mmNIC1_QM1_GLBL_MSG_EN_0                                     0xD22054
+
+#define mmNIC1_QM1_GLBL_MSG_EN_1                                     0xD22058
+
+#define mmNIC1_QM1_GLBL_MSG_EN_2                                     0xD2205C
+
+#define mmNIC1_QM1_GLBL_MSG_EN_3                                     0xD22060
+
+#define mmNIC1_QM1_GLBL_MSG_EN_4                                     0xD22068
+
+#define mmNIC1_QM1_PQ_BASE_LO_0                                      0xD22070
+
+#define mmNIC1_QM1_PQ_BASE_LO_1                                      0xD22074
+
+#define mmNIC1_QM1_PQ_BASE_LO_2                                      0xD22078
+
+#define mmNIC1_QM1_PQ_BASE_LO_3                                      0xD2207C
+
+#define mmNIC1_QM1_PQ_BASE_HI_0                                      0xD22080
+
+#define mmNIC1_QM1_PQ_BASE_HI_1                                      0xD22084
+
+#define mmNIC1_QM1_PQ_BASE_HI_2                                      0xD22088
+
+#define mmNIC1_QM1_PQ_BASE_HI_3                                      0xD2208C
+
+#define mmNIC1_QM1_PQ_SIZE_0                                         0xD22090
+
+#define mmNIC1_QM1_PQ_SIZE_1                                         0xD22094
+
+#define mmNIC1_QM1_PQ_SIZE_2                                         0xD22098
+
+#define mmNIC1_QM1_PQ_SIZE_3                                         0xD2209C
+
+#define mmNIC1_QM1_PQ_PI_0                                           0xD220A0
+
+#define mmNIC1_QM1_PQ_PI_1                                           0xD220A4
+
+#define mmNIC1_QM1_PQ_PI_2                                           0xD220A8
+
+#define mmNIC1_QM1_PQ_PI_3                                           0xD220AC
+
+#define mmNIC1_QM1_PQ_CI_0                                           0xD220B0
+
+#define mmNIC1_QM1_PQ_CI_1                                           0xD220B4
+
+#define mmNIC1_QM1_PQ_CI_2                                           0xD220B8
+
+#define mmNIC1_QM1_PQ_CI_3                                           0xD220BC
+
+#define mmNIC1_QM1_PQ_CFG0_0                                         0xD220C0
+
+#define mmNIC1_QM1_PQ_CFG0_1                                         0xD220C4
+
+#define mmNIC1_QM1_PQ_CFG0_2                                         0xD220C8
+
+#define mmNIC1_QM1_PQ_CFG0_3                                         0xD220CC
+
+#define mmNIC1_QM1_PQ_CFG1_0                                         0xD220D0
+
+#define mmNIC1_QM1_PQ_CFG1_1                                         0xD220D4
+
+#define mmNIC1_QM1_PQ_CFG1_2                                         0xD220D8
+
+#define mmNIC1_QM1_PQ_CFG1_3                                         0xD220DC
+
+#define mmNIC1_QM1_PQ_ARUSER_31_11_0                                 0xD220E0
+
+#define mmNIC1_QM1_PQ_ARUSER_31_11_1                                 0xD220E4
+
+#define mmNIC1_QM1_PQ_ARUSER_31_11_2                                 0xD220E8
+
+#define mmNIC1_QM1_PQ_ARUSER_31_11_3                                 0xD220EC
+
+#define mmNIC1_QM1_PQ_STS0_0                                         0xD220F0
+
+#define mmNIC1_QM1_PQ_STS0_1                                         0xD220F4
+
+#define mmNIC1_QM1_PQ_STS0_2                                         0xD220F8
+
+#define mmNIC1_QM1_PQ_STS0_3                                         0xD220FC
+
+#define mmNIC1_QM1_PQ_STS1_0                                         0xD22100
+
+#define mmNIC1_QM1_PQ_STS1_1                                         0xD22104
+
+#define mmNIC1_QM1_PQ_STS1_2                                         0xD22108
+
+#define mmNIC1_QM1_PQ_STS1_3                                         0xD2210C
+
+#define mmNIC1_QM1_CQ_CFG0_0                                         0xD22110
+
+#define mmNIC1_QM1_CQ_CFG0_1                                         0xD22114
+
+#define mmNIC1_QM1_CQ_CFG0_2                                         0xD22118
+
+#define mmNIC1_QM1_CQ_CFG0_3                                         0xD2211C
+
+#define mmNIC1_QM1_CQ_CFG0_4                                         0xD22120
+
+#define mmNIC1_QM1_CQ_CFG1_0                                         0xD22124
+
+#define mmNIC1_QM1_CQ_CFG1_1                                         0xD22128
+
+#define mmNIC1_QM1_CQ_CFG1_2                                         0xD2212C
+
+#define mmNIC1_QM1_CQ_CFG1_3                                         0xD22130
+
+#define mmNIC1_QM1_CQ_CFG1_4                                         0xD22134
+
+#define mmNIC1_QM1_CQ_ARUSER_31_11_0                                 0xD22138
+
+#define mmNIC1_QM1_CQ_ARUSER_31_11_1                                 0xD2213C
+
+#define mmNIC1_QM1_CQ_ARUSER_31_11_2                                 0xD22140
+
+#define mmNIC1_QM1_CQ_ARUSER_31_11_3                                 0xD22144
+
+#define mmNIC1_QM1_CQ_ARUSER_31_11_4                                 0xD22148
+
+#define mmNIC1_QM1_CQ_STS0_0                                         0xD2214C
+
+#define mmNIC1_QM1_CQ_STS0_1                                         0xD22150
+
+#define mmNIC1_QM1_CQ_STS0_2                                         0xD22154
+
+#define mmNIC1_QM1_CQ_STS0_3                                         0xD22158
+
+#define mmNIC1_QM1_CQ_STS0_4                                         0xD2215C
+
+#define mmNIC1_QM1_CQ_STS1_0                                         0xD22160
+
+#define mmNIC1_QM1_CQ_STS1_1                                         0xD22164
+
+#define mmNIC1_QM1_CQ_STS1_2                                         0xD22168
+
+#define mmNIC1_QM1_CQ_STS1_3                                         0xD2216C
+
+#define mmNIC1_QM1_CQ_STS1_4                                         0xD22170
+
+#define mmNIC1_QM1_CQ_PTR_LO_0                                       0xD22174
+
+#define mmNIC1_QM1_CQ_PTR_HI_0                                       0xD22178
+
+#define mmNIC1_QM1_CQ_TSIZE_0                                        0xD2217C
+
+#define mmNIC1_QM1_CQ_CTL_0                                          0xD22180
+
+#define mmNIC1_QM1_CQ_PTR_LO_1                                       0xD22184
+
+#define mmNIC1_QM1_CQ_PTR_HI_1                                       0xD22188
+
+#define mmNIC1_QM1_CQ_TSIZE_1                                        0xD2218C
+
+#define mmNIC1_QM1_CQ_CTL_1                                          0xD22190
+
+#define mmNIC1_QM1_CQ_PTR_LO_2                                       0xD22194
+
+#define mmNIC1_QM1_CQ_PTR_HI_2                                       0xD22198
+
+#define mmNIC1_QM1_CQ_TSIZE_2                                        0xD2219C
+
+#define mmNIC1_QM1_CQ_CTL_2                                          0xD221A0
+
+#define mmNIC1_QM1_CQ_PTR_LO_3                                       0xD221A4
+
+#define mmNIC1_QM1_CQ_PTR_HI_3                                       0xD221A8
+
+#define mmNIC1_QM1_CQ_TSIZE_3                                        0xD221AC
+
+#define mmNIC1_QM1_CQ_CTL_3                                          0xD221B0
+
+#define mmNIC1_QM1_CQ_PTR_LO_4                                       0xD221B4
+
+#define mmNIC1_QM1_CQ_PTR_HI_4                                       0xD221B8
+
+#define mmNIC1_QM1_CQ_TSIZE_4                                        0xD221BC
+
+#define mmNIC1_QM1_CQ_CTL_4                                          0xD221C0
+
+#define mmNIC1_QM1_CQ_PTR_LO_STS_0                                   0xD221C4
+
+#define mmNIC1_QM1_CQ_PTR_LO_STS_1                                   0xD221C8
+
+#define mmNIC1_QM1_CQ_PTR_LO_STS_2                                   0xD221CC
+
+#define mmNIC1_QM1_CQ_PTR_LO_STS_3                                   0xD221D0
+
+#define mmNIC1_QM1_CQ_PTR_LO_STS_4                                   0xD221D4
+
+#define mmNIC1_QM1_CQ_PTR_HI_STS_0                                   0xD221D8
+
+#define mmNIC1_QM1_CQ_PTR_HI_STS_1                                   0xD221DC
+
+#define mmNIC1_QM1_CQ_PTR_HI_STS_2                                   0xD221E0
+
+#define mmNIC1_QM1_CQ_PTR_HI_STS_3                                   0xD221E4
+
+#define mmNIC1_QM1_CQ_PTR_HI_STS_4                                   0xD221E8
+
+#define mmNIC1_QM1_CQ_TSIZE_STS_0                                    0xD221EC
+
+#define mmNIC1_QM1_CQ_TSIZE_STS_1                                    0xD221F0
+
+#define mmNIC1_QM1_CQ_TSIZE_STS_2                                    0xD221F4
+
+#define mmNIC1_QM1_CQ_TSIZE_STS_3                                    0xD221F8
+
+#define mmNIC1_QM1_CQ_TSIZE_STS_4                                    0xD221FC
+
+#define mmNIC1_QM1_CQ_CTL_STS_0                                      0xD22200
+
+#define mmNIC1_QM1_CQ_CTL_STS_1                                      0xD22204
+
+#define mmNIC1_QM1_CQ_CTL_STS_2                                      0xD22208
+
+#define mmNIC1_QM1_CQ_CTL_STS_3                                      0xD2220C
+
+#define mmNIC1_QM1_CQ_CTL_STS_4                                      0xD22210
+
+#define mmNIC1_QM1_CQ_IFIFO_CNT_0                                    0xD22214
+
+#define mmNIC1_QM1_CQ_IFIFO_CNT_1                                    0xD22218
+
+#define mmNIC1_QM1_CQ_IFIFO_CNT_2                                    0xD2221C
+
+#define mmNIC1_QM1_CQ_IFIFO_CNT_3                                    0xD22220
+
+#define mmNIC1_QM1_CQ_IFIFO_CNT_4                                    0xD22224
+
+#define mmNIC1_QM1_CP_MSG_BASE0_ADDR_LO_0                            0xD22228
+
+#define mmNIC1_QM1_CP_MSG_BASE0_ADDR_LO_1                            0xD2222C
+
+#define mmNIC1_QM1_CP_MSG_BASE0_ADDR_LO_2                            0xD22230
+
+#define mmNIC1_QM1_CP_MSG_BASE0_ADDR_LO_3                            0xD22234
+
+#define mmNIC1_QM1_CP_MSG_BASE0_ADDR_LO_4                            0xD22238
+
+#define mmNIC1_QM1_CP_MSG_BASE0_ADDR_HI_0                            0xD2223C
+
+#define mmNIC1_QM1_CP_MSG_BASE0_ADDR_HI_1                            0xD22240
+
+#define mmNIC1_QM1_CP_MSG_BASE0_ADDR_HI_2                            0xD22244
+
+#define mmNIC1_QM1_CP_MSG_BASE0_ADDR_HI_3                            0xD22248
+
+#define mmNIC1_QM1_CP_MSG_BASE0_ADDR_HI_4                            0xD2224C
+
+#define mmNIC1_QM1_CP_MSG_BASE1_ADDR_LO_0                            0xD22250
+
+#define mmNIC1_QM1_CP_MSG_BASE1_ADDR_LO_1                            0xD22254
+
+#define mmNIC1_QM1_CP_MSG_BASE1_ADDR_LO_2                            0xD22258
+
+#define mmNIC1_QM1_CP_MSG_BASE1_ADDR_LO_3                            0xD2225C
+
+#define mmNIC1_QM1_CP_MSG_BASE1_ADDR_LO_4                            0xD22260
+
+#define mmNIC1_QM1_CP_MSG_BASE1_ADDR_HI_0                            0xD22264
+
+#define mmNIC1_QM1_CP_MSG_BASE1_ADDR_HI_1                            0xD22268
+
+#define mmNIC1_QM1_CP_MSG_BASE1_ADDR_HI_2                            0xD2226C
+
+#define mmNIC1_QM1_CP_MSG_BASE1_ADDR_HI_3                            0xD22270
+
+#define mmNIC1_QM1_CP_MSG_BASE1_ADDR_HI_4                            0xD22274
+
+#define mmNIC1_QM1_CP_MSG_BASE2_ADDR_LO_0                            0xD22278
+
+#define mmNIC1_QM1_CP_MSG_BASE2_ADDR_LO_1                            0xD2227C
+
+#define mmNIC1_QM1_CP_MSG_BASE2_ADDR_LO_2                            0xD22280
+
+#define mmNIC1_QM1_CP_MSG_BASE2_ADDR_LO_3                            0xD22284
+
+#define mmNIC1_QM1_CP_MSG_BASE2_ADDR_LO_4                            0xD22288
+
+#define mmNIC1_QM1_CP_MSG_BASE2_ADDR_HI_0                            0xD2228C
+
+#define mmNIC1_QM1_CP_MSG_BASE2_ADDR_HI_1                            0xD22290
+
+#define mmNIC1_QM1_CP_MSG_BASE2_ADDR_HI_2                            0xD22294
+
+#define mmNIC1_QM1_CP_MSG_BASE2_ADDR_HI_3                            0xD22298
+
+#define mmNIC1_QM1_CP_MSG_BASE2_ADDR_HI_4                            0xD2229C
+
+#define mmNIC1_QM1_CP_MSG_BASE3_ADDR_LO_0                            0xD222A0
+
+#define mmNIC1_QM1_CP_MSG_BASE3_ADDR_LO_1                            0xD222A4
+
+#define mmNIC1_QM1_CP_MSG_BASE3_ADDR_LO_2                            0xD222A8
+
+#define mmNIC1_QM1_CP_MSG_BASE3_ADDR_LO_3                            0xD222AC
+
+#define mmNIC1_QM1_CP_MSG_BASE3_ADDR_LO_4                            0xD222B0
+
+#define mmNIC1_QM1_CP_MSG_BASE3_ADDR_HI_0                            0xD222B4
+
+#define mmNIC1_QM1_CP_MSG_BASE3_ADDR_HI_1                            0xD222B8
+
+#define mmNIC1_QM1_CP_MSG_BASE3_ADDR_HI_2                            0xD222BC
+
+#define mmNIC1_QM1_CP_MSG_BASE3_ADDR_HI_3                            0xD222C0
+
+#define mmNIC1_QM1_CP_MSG_BASE3_ADDR_HI_4                            0xD222C4
+
+#define mmNIC1_QM1_CP_LDMA_TSIZE_OFFSET_0                            0xD222C8
+
+#define mmNIC1_QM1_CP_LDMA_TSIZE_OFFSET_1                            0xD222CC
+
+#define mmNIC1_QM1_CP_LDMA_TSIZE_OFFSET_2                            0xD222D0
+
+#define mmNIC1_QM1_CP_LDMA_TSIZE_OFFSET_3                            0xD222D4
+
+#define mmNIC1_QM1_CP_LDMA_TSIZE_OFFSET_4                            0xD222D8
+
+#define mmNIC1_QM1_CP_LDMA_SRC_BASE_LO_OFFSET_0                      0xD222E0
+
+#define mmNIC1_QM1_CP_LDMA_SRC_BASE_LO_OFFSET_1                      0xD222E4
+
+#define mmNIC1_QM1_CP_LDMA_SRC_BASE_LO_OFFSET_2                      0xD222E8
+
+#define mmNIC1_QM1_CP_LDMA_SRC_BASE_LO_OFFSET_3                      0xD222EC
+
+#define mmNIC1_QM1_CP_LDMA_SRC_BASE_LO_OFFSET_4                      0xD222F0
+
+#define mmNIC1_QM1_CP_LDMA_DST_BASE_LO_OFFSET_0                      0xD222F4
+
+#define mmNIC1_QM1_CP_LDMA_DST_BASE_LO_OFFSET_1                      0xD222F8
+
+#define mmNIC1_QM1_CP_LDMA_DST_BASE_LO_OFFSET_2                      0xD222FC
+
+#define mmNIC1_QM1_CP_LDMA_DST_BASE_LO_OFFSET_3                      0xD22300
+
+#define mmNIC1_QM1_CP_LDMA_DST_BASE_LO_OFFSET_4                      0xD22304
+
+#define mmNIC1_QM1_CP_FENCE0_RDATA_0                                 0xD22308
+
+#define mmNIC1_QM1_CP_FENCE0_RDATA_1                                 0xD2230C
+
+#define mmNIC1_QM1_CP_FENCE0_RDATA_2                                 0xD22310
+
+#define mmNIC1_QM1_CP_FENCE0_RDATA_3                                 0xD22314
+
+#define mmNIC1_QM1_CP_FENCE0_RDATA_4                                 0xD22318
+
+#define mmNIC1_QM1_CP_FENCE1_RDATA_0                                 0xD2231C
+
+#define mmNIC1_QM1_CP_FENCE1_RDATA_1                                 0xD22320
+
+#define mmNIC1_QM1_CP_FENCE1_RDATA_2                                 0xD22324
+
+#define mmNIC1_QM1_CP_FENCE1_RDATA_3                                 0xD22328
+
+#define mmNIC1_QM1_CP_FENCE1_RDATA_4                                 0xD2232C
+
+#define mmNIC1_QM1_CP_FENCE2_RDATA_0                                 0xD22330
+
+#define mmNIC1_QM1_CP_FENCE2_RDATA_1                                 0xD22334
+
+#define mmNIC1_QM1_CP_FENCE2_RDATA_2                                 0xD22338
+
+#define mmNIC1_QM1_CP_FENCE2_RDATA_3                                 0xD2233C
+
+#define mmNIC1_QM1_CP_FENCE2_RDATA_4                                 0xD22340
+
+#define mmNIC1_QM1_CP_FENCE3_RDATA_0                                 0xD22344
+
+#define mmNIC1_QM1_CP_FENCE3_RDATA_1                                 0xD22348
+
+#define mmNIC1_QM1_CP_FENCE3_RDATA_2                                 0xD2234C
+
+#define mmNIC1_QM1_CP_FENCE3_RDATA_3                                 0xD22350
+
+#define mmNIC1_QM1_CP_FENCE3_RDATA_4                                 0xD22354
+
+#define mmNIC1_QM1_CP_FENCE0_CNT_0                                   0xD22358
+
+#define mmNIC1_QM1_CP_FENCE0_CNT_1                                   0xD2235C
+
+#define mmNIC1_QM1_CP_FENCE0_CNT_2                                   0xD22360
+
+#define mmNIC1_QM1_CP_FENCE0_CNT_3                                   0xD22364
+
+#define mmNIC1_QM1_CP_FENCE0_CNT_4                                   0xD22368
+
+#define mmNIC1_QM1_CP_FENCE1_CNT_0                                   0xD2236C
+
+#define mmNIC1_QM1_CP_FENCE1_CNT_1                                   0xD22370
+
+#define mmNIC1_QM1_CP_FENCE1_CNT_2                                   0xD22374
+
+#define mmNIC1_QM1_CP_FENCE1_CNT_3                                   0xD22378
+
+#define mmNIC1_QM1_CP_FENCE1_CNT_4                                   0xD2237C
+
+#define mmNIC1_QM1_CP_FENCE2_CNT_0                                   0xD22380
+
+#define mmNIC1_QM1_CP_FENCE2_CNT_1                                   0xD22384
+
+#define mmNIC1_QM1_CP_FENCE2_CNT_2                                   0xD22388
+
+#define mmNIC1_QM1_CP_FENCE2_CNT_3                                   0xD2238C
+
+#define mmNIC1_QM1_CP_FENCE2_CNT_4                                   0xD22390
+
+#define mmNIC1_QM1_CP_FENCE3_CNT_0                                   0xD22394
+
+#define mmNIC1_QM1_CP_FENCE3_CNT_1                                   0xD22398
+
+#define mmNIC1_QM1_CP_FENCE3_CNT_2                                   0xD2239C
+
+#define mmNIC1_QM1_CP_FENCE3_CNT_3                                   0xD223A0
+
+#define mmNIC1_QM1_CP_FENCE3_CNT_4                                   0xD223A4
+
+#define mmNIC1_QM1_CP_STS_0                                          0xD223A8
+
+#define mmNIC1_QM1_CP_STS_1                                          0xD223AC
+
+#define mmNIC1_QM1_CP_STS_2                                          0xD223B0
+
+#define mmNIC1_QM1_CP_STS_3                                          0xD223B4
+
+#define mmNIC1_QM1_CP_STS_4                                          0xD223B8
+
+#define mmNIC1_QM1_CP_CURRENT_INST_LO_0                              0xD223BC
+
+#define mmNIC1_QM1_CP_CURRENT_INST_LO_1                              0xD223C0
+
+#define mmNIC1_QM1_CP_CURRENT_INST_LO_2                              0xD223C4
+
+#define mmNIC1_QM1_CP_CURRENT_INST_LO_3                              0xD223C8
+
+#define mmNIC1_QM1_CP_CURRENT_INST_LO_4                              0xD223CC
+
+#define mmNIC1_QM1_CP_CURRENT_INST_HI_0                              0xD223D0
+
+#define mmNIC1_QM1_CP_CURRENT_INST_HI_1                              0xD223D4
+
+#define mmNIC1_QM1_CP_CURRENT_INST_HI_2                              0xD223D8
+
+#define mmNIC1_QM1_CP_CURRENT_INST_HI_3                              0xD223DC
+
+#define mmNIC1_QM1_CP_CURRENT_INST_HI_4                              0xD223E0
+
+#define mmNIC1_QM1_CP_BARRIER_CFG_0                                  0xD223F4
+
+#define mmNIC1_QM1_CP_BARRIER_CFG_1                                  0xD223F8
+
+#define mmNIC1_QM1_CP_BARRIER_CFG_2                                  0xD223FC
+
+#define mmNIC1_QM1_CP_BARRIER_CFG_3                                  0xD22400
+
+#define mmNIC1_QM1_CP_BARRIER_CFG_4                                  0xD22404
+
+#define mmNIC1_QM1_CP_DBG_0_0                                        0xD22408
+
+#define mmNIC1_QM1_CP_DBG_0_1                                        0xD2240C
+
+#define mmNIC1_QM1_CP_DBG_0_2                                        0xD22410
+
+#define mmNIC1_QM1_CP_DBG_0_3                                        0xD22414
+
+#define mmNIC1_QM1_CP_DBG_0_4                                        0xD22418
+
+#define mmNIC1_QM1_CP_ARUSER_31_11_0                                 0xD2241C
+
+#define mmNIC1_QM1_CP_ARUSER_31_11_1                                 0xD22420
+
+#define mmNIC1_QM1_CP_ARUSER_31_11_2                                 0xD22424
+
+#define mmNIC1_QM1_CP_ARUSER_31_11_3                                 0xD22428
+
+#define mmNIC1_QM1_CP_ARUSER_31_11_4                                 0xD2242C
+
+#define mmNIC1_QM1_CP_AWUSER_31_11_0                                 0xD22430
+
+#define mmNIC1_QM1_CP_AWUSER_31_11_1                                 0xD22434
+
+#define mmNIC1_QM1_CP_AWUSER_31_11_2                                 0xD22438
+
+#define mmNIC1_QM1_CP_AWUSER_31_11_3                                 0xD2243C
+
+#define mmNIC1_QM1_CP_AWUSER_31_11_4                                 0xD22440
+
+#define mmNIC1_QM1_ARB_CFG_0                                         0xD22A00
+
+#define mmNIC1_QM1_ARB_CHOISE_Q_PUSH                                 0xD22A04
+
+#define mmNIC1_QM1_ARB_WRR_WEIGHT_0                                  0xD22A08
+
+#define mmNIC1_QM1_ARB_WRR_WEIGHT_1                                  0xD22A0C
+
+#define mmNIC1_QM1_ARB_WRR_WEIGHT_2                                  0xD22A10
+
+#define mmNIC1_QM1_ARB_WRR_WEIGHT_3                                  0xD22A14
+
+#define mmNIC1_QM1_ARB_CFG_1                                         0xD22A18
+
+#define mmNIC1_QM1_ARB_MST_AVAIL_CRED_0                              0xD22A20
+
+#define mmNIC1_QM1_ARB_MST_AVAIL_CRED_1                              0xD22A24
+
+#define mmNIC1_QM1_ARB_MST_AVAIL_CRED_2                              0xD22A28
+
+#define mmNIC1_QM1_ARB_MST_AVAIL_CRED_3                              0xD22A2C
+
+#define mmNIC1_QM1_ARB_MST_AVAIL_CRED_4                              0xD22A30
+
+#define mmNIC1_QM1_ARB_MST_AVAIL_CRED_5                              0xD22A34
+
+#define mmNIC1_QM1_ARB_MST_AVAIL_CRED_6                              0xD22A38
+
+#define mmNIC1_QM1_ARB_MST_AVAIL_CRED_7                              0xD22A3C
+
+#define mmNIC1_QM1_ARB_MST_AVAIL_CRED_8                              0xD22A40
+
+#define mmNIC1_QM1_ARB_MST_AVAIL_CRED_9                              0xD22A44
+
+#define mmNIC1_QM1_ARB_MST_AVAIL_CRED_10                             0xD22A48
+
+#define mmNIC1_QM1_ARB_MST_AVAIL_CRED_11                             0xD22A4C
+
+#define mmNIC1_QM1_ARB_MST_AVAIL_CRED_12                             0xD22A50
+
+#define mmNIC1_QM1_ARB_MST_AVAIL_CRED_13                             0xD22A54
+
+#define mmNIC1_QM1_ARB_MST_AVAIL_CRED_14                             0xD22A58
+
+#define mmNIC1_QM1_ARB_MST_AVAIL_CRED_15                             0xD22A5C
+
+#define mmNIC1_QM1_ARB_MST_AVAIL_CRED_16                             0xD22A60
+
+#define mmNIC1_QM1_ARB_MST_AVAIL_CRED_17                             0xD22A64
+
+#define mmNIC1_QM1_ARB_MST_AVAIL_CRED_18                             0xD22A68
+
+#define mmNIC1_QM1_ARB_MST_AVAIL_CRED_19                             0xD22A6C
+
+#define mmNIC1_QM1_ARB_MST_AVAIL_CRED_20                             0xD22A70
+
+#define mmNIC1_QM1_ARB_MST_AVAIL_CRED_21                             0xD22A74
+
+#define mmNIC1_QM1_ARB_MST_AVAIL_CRED_22                             0xD22A78
+
+#define mmNIC1_QM1_ARB_MST_AVAIL_CRED_23                             0xD22A7C
+
+#define mmNIC1_QM1_ARB_MST_AVAIL_CRED_24                             0xD22A80
+
+#define mmNIC1_QM1_ARB_MST_AVAIL_CRED_25                             0xD22A84
+
+#define mmNIC1_QM1_ARB_MST_AVAIL_CRED_26                             0xD22A88
+
+#define mmNIC1_QM1_ARB_MST_AVAIL_CRED_27                             0xD22A8C
+
+#define mmNIC1_QM1_ARB_MST_AVAIL_CRED_28                             0xD22A90
+
+#define mmNIC1_QM1_ARB_MST_AVAIL_CRED_29                             0xD22A94
+
+#define mmNIC1_QM1_ARB_MST_AVAIL_CRED_30                             0xD22A98
+
+#define mmNIC1_QM1_ARB_MST_AVAIL_CRED_31                             0xD22A9C
+
+#define mmNIC1_QM1_ARB_MST_CRED_INC                                  0xD22AA0
+
+#define mmNIC1_QM1_ARB_MST_CHOISE_PUSH_OFST_0                        0xD22AA4
+
+#define mmNIC1_QM1_ARB_MST_CHOISE_PUSH_OFST_1                        0xD22AA8
+
+#define mmNIC1_QM1_ARB_MST_CHOISE_PUSH_OFST_2                        0xD22AAC
+
+#define mmNIC1_QM1_ARB_MST_CHOISE_PUSH_OFST_3                        0xD22AB0
+
+#define mmNIC1_QM1_ARB_MST_CHOISE_PUSH_OFST_4                        0xD22AB4
+
+#define mmNIC1_QM1_ARB_MST_CHOISE_PUSH_OFST_5                        0xD22AB8
+
+#define mmNIC1_QM1_ARB_MST_CHOISE_PUSH_OFST_6                        0xD22ABC
+
+#define mmNIC1_QM1_ARB_MST_CHOISE_PUSH_OFST_7                        0xD22AC0
+
+#define mmNIC1_QM1_ARB_MST_CHOISE_PUSH_OFST_8                        0xD22AC4
+
+#define mmNIC1_QM1_ARB_MST_CHOISE_PUSH_OFST_9                        0xD22AC8
+
+#define mmNIC1_QM1_ARB_MST_CHOISE_PUSH_OFST_10                       0xD22ACC
+
+#define mmNIC1_QM1_ARB_MST_CHOISE_PUSH_OFST_11                       0xD22AD0
+
+#define mmNIC1_QM1_ARB_MST_CHOISE_PUSH_OFST_12                       0xD22AD4
+
+#define mmNIC1_QM1_ARB_MST_CHOISE_PUSH_OFST_13                       0xD22AD8
+
+#define mmNIC1_QM1_ARB_MST_CHOISE_PUSH_OFST_14                       0xD22ADC
+
+#define mmNIC1_QM1_ARB_MST_CHOISE_PUSH_OFST_15                       0xD22AE0
+
+#define mmNIC1_QM1_ARB_MST_CHOISE_PUSH_OFST_16                       0xD22AE4
+
+#define mmNIC1_QM1_ARB_MST_CHOISE_PUSH_OFST_17                       0xD22AE8
+
+#define mmNIC1_QM1_ARB_MST_CHOISE_PUSH_OFST_18                       0xD22AEC
+
+#define mmNIC1_QM1_ARB_MST_CHOISE_PUSH_OFST_19                       0xD22AF0
+
+#define mmNIC1_QM1_ARB_MST_CHOISE_PUSH_OFST_20                       0xD22AF4
+
+#define mmNIC1_QM1_ARB_MST_CHOISE_PUSH_OFST_21                       0xD22AF8
+
+#define mmNIC1_QM1_ARB_MST_CHOISE_PUSH_OFST_22                       0xD22AFC
+
+#define mmNIC1_QM1_ARB_MST_CHOISE_PUSH_OFST_23                       0xD22B00
+
+#define mmNIC1_QM1_ARB_MST_CHOISE_PUSH_OFST_24                       0xD22B04
+
+#define mmNIC1_QM1_ARB_MST_CHOISE_PUSH_OFST_25                       0xD22B08
+
+#define mmNIC1_QM1_ARB_MST_CHOISE_PUSH_OFST_26                       0xD22B0C
+
+#define mmNIC1_QM1_ARB_MST_CHOISE_PUSH_OFST_27                       0xD22B10
+
+#define mmNIC1_QM1_ARB_MST_CHOISE_PUSH_OFST_28                       0xD22B14
+
+#define mmNIC1_QM1_ARB_MST_CHOISE_PUSH_OFST_29                       0xD22B18
+
+#define mmNIC1_QM1_ARB_MST_CHOISE_PUSH_OFST_30                       0xD22B1C
+
+#define mmNIC1_QM1_ARB_MST_CHOISE_PUSH_OFST_31                       0xD22B20
+
+#define mmNIC1_QM1_ARB_SLV_MASTER_INC_CRED_OFST                      0xD22B28
+
+#define mmNIC1_QM1_ARB_MST_SLAVE_EN                                  0xD22B2C
+
+#define mmNIC1_QM1_ARB_MST_QUIET_PER                                 0xD22B34
+
+#define mmNIC1_QM1_ARB_SLV_CHOISE_WDT                                0xD22B38
+
+#define mmNIC1_QM1_ARB_SLV_ID                                        0xD22B3C
+
+#define mmNIC1_QM1_ARB_MSG_MAX_INFLIGHT                              0xD22B44
+
+#define mmNIC1_QM1_ARB_MSG_AWUSER_31_11                              0xD22B48
+
+#define mmNIC1_QM1_ARB_MSG_AWUSER_SEC_PROP                           0xD22B4C
+
+#define mmNIC1_QM1_ARB_MSG_AWUSER_NON_SEC_PROP                       0xD22B50
+
+#define mmNIC1_QM1_ARB_BASE_LO                                       0xD22B54
+
+#define mmNIC1_QM1_ARB_BASE_HI                                       0xD22B58
+
+#define mmNIC1_QM1_ARB_STATE_STS                                     0xD22B80
+
+#define mmNIC1_QM1_ARB_CHOISE_FULLNESS_STS                           0xD22B84
+
+#define mmNIC1_QM1_ARB_MSG_STS                                       0xD22B88
+
+#define mmNIC1_QM1_ARB_SLV_CHOISE_Q_HEAD                             0xD22B8C
+
+#define mmNIC1_QM1_ARB_ERR_CAUSE                                     0xD22B9C
+
+#define mmNIC1_QM1_ARB_ERR_MSG_EN                                    0xD22BA0
+
+#define mmNIC1_QM1_ARB_ERR_STS_DRP                                   0xD22BA8
+
+#define mmNIC1_QM1_ARB_MST_CRED_STS_0                                0xD22BB0
+
+#define mmNIC1_QM1_ARB_MST_CRED_STS_1                                0xD22BB4
+
+#define mmNIC1_QM1_ARB_MST_CRED_STS_2                                0xD22BB8
+
+#define mmNIC1_QM1_ARB_MST_CRED_STS_3                                0xD22BBC
+
+#define mmNIC1_QM1_ARB_MST_CRED_STS_4                                0xD22BC0
+
+#define mmNIC1_QM1_ARB_MST_CRED_STS_5                                0xD22BC4
+
+#define mmNIC1_QM1_ARB_MST_CRED_STS_6                                0xD22BC8
+
+#define mmNIC1_QM1_ARB_MST_CRED_STS_7                                0xD22BCC
+
+#define mmNIC1_QM1_ARB_MST_CRED_STS_8                                0xD22BD0
+
+#define mmNIC1_QM1_ARB_MST_CRED_STS_9                                0xD22BD4
+
+#define mmNIC1_QM1_ARB_MST_CRED_STS_10                               0xD22BD8
+
+#define mmNIC1_QM1_ARB_MST_CRED_STS_11                               0xD22BDC
+
+#define mmNIC1_QM1_ARB_MST_CRED_STS_12                               0xD22BE0
+
+#define mmNIC1_QM1_ARB_MST_CRED_STS_13                               0xD22BE4
+
+#define mmNIC1_QM1_ARB_MST_CRED_STS_14                               0xD22BE8
+
+#define mmNIC1_QM1_ARB_MST_CRED_STS_15                               0xD22BEC
+
+#define mmNIC1_QM1_ARB_MST_CRED_STS_16                               0xD22BF0
+
+#define mmNIC1_QM1_ARB_MST_CRED_STS_17                               0xD22BF4
+
+#define mmNIC1_QM1_ARB_MST_CRED_STS_18                               0xD22BF8
+
+#define mmNIC1_QM1_ARB_MST_CRED_STS_19                               0xD22BFC
+
+#define mmNIC1_QM1_ARB_MST_CRED_STS_20                               0xD22C00
+
+#define mmNIC1_QM1_ARB_MST_CRED_STS_21                               0xD22C04
+
+#define mmNIC1_QM1_ARB_MST_CRED_STS_22                               0xD22C08
+
+#define mmNIC1_QM1_ARB_MST_CRED_STS_23                               0xD22C0C
+
+#define mmNIC1_QM1_ARB_MST_CRED_STS_24                               0xD22C10
+
+#define mmNIC1_QM1_ARB_MST_CRED_STS_25                               0xD22C14
+
+#define mmNIC1_QM1_ARB_MST_CRED_STS_26                               0xD22C18
+
+#define mmNIC1_QM1_ARB_MST_CRED_STS_27                               0xD22C1C
+
+#define mmNIC1_QM1_ARB_MST_CRED_STS_28                               0xD22C20
+
+#define mmNIC1_QM1_ARB_MST_CRED_STS_29                               0xD22C24
+
+#define mmNIC1_QM1_ARB_MST_CRED_STS_30                               0xD22C28
+
+#define mmNIC1_QM1_ARB_MST_CRED_STS_31                               0xD22C2C
+
+#define mmNIC1_QM1_CGM_CFG                                           0xD22C70
+
+#define mmNIC1_QM1_CGM_STS                                           0xD22C74
+
+#define mmNIC1_QM1_CGM_CFG1                                          0xD22C78
+
+#define mmNIC1_QM1_LOCAL_RANGE_BASE                                  0xD22C80
+
+#define mmNIC1_QM1_LOCAL_RANGE_SIZE                                  0xD22C84
+
+#define mmNIC1_QM1_CSMR_STRICT_PRIO_CFG                              0xD22C90
+
+#define mmNIC1_QM1_HBW_RD_RATE_LIM_CFG_1                             0xD22C94
+
+#define mmNIC1_QM1_LBW_WR_RATE_LIM_CFG_0                             0xD22C98
+
+#define mmNIC1_QM1_LBW_WR_RATE_LIM_CFG_1                             0xD22C9C
+
+#define mmNIC1_QM1_HBW_RD_RATE_LIM_CFG_0                             0xD22CA0
+
+#define mmNIC1_QM1_GLBL_AXCACHE                                      0xD22CA4
+
+#define mmNIC1_QM1_IND_GW_APB_CFG                                    0xD22CB0
+
+#define mmNIC1_QM1_IND_GW_APB_WDATA                                  0xD22CB4
+
+#define mmNIC1_QM1_IND_GW_APB_RDATA                                  0xD22CB8
+
+#define mmNIC1_QM1_IND_GW_APB_STATUS                                 0xD22CBC
+
+#define mmNIC1_QM1_GLBL_ERR_ADDR_LO                                  0xD22CD0
+
+#define mmNIC1_QM1_GLBL_ERR_ADDR_HI                                  0xD22CD4
+
+#define mmNIC1_QM1_GLBL_ERR_WDATA                                    0xD22CD8
+
+#define mmNIC1_QM1_GLBL_MEM_INIT_BUSY                                0xD22D00
+
+#endif /* ASIC_REG_NIC1_QM1_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/nic2_qm0_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/nic2_qm0_regs.h
new file mode 100644
index 0000000000000000000000000000000000000000..a89116a4586f6f5877d209b6faf00ecdf79666be
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/nic2_qm0_regs.h
@@ -0,0 +1,834 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ **       DO NOT EDIT BELOW        **
+ ************************************/
+
+#ifndef ASIC_REG_NIC2_QM0_REGS_H_
+#define ASIC_REG_NIC2_QM0_REGS_H_
+
+/*
+ *****************************************
+ *   NIC2_QM0 (Prototype: QMAN)
+ *****************************************
+ */
+
+#define mmNIC2_QM0_GLBL_CFG0                                         0xD60000
+
+#define mmNIC2_QM0_GLBL_CFG1                                         0xD60004
+
+#define mmNIC2_QM0_GLBL_PROT                                         0xD60008
+
+#define mmNIC2_QM0_GLBL_ERR_CFG                                      0xD6000C
+
+#define mmNIC2_QM0_GLBL_SECURE_PROPS_0                               0xD60010
+
+#define mmNIC2_QM0_GLBL_SECURE_PROPS_1                               0xD60014
+
+#define mmNIC2_QM0_GLBL_SECURE_PROPS_2                               0xD60018
+
+#define mmNIC2_QM0_GLBL_SECURE_PROPS_3                               0xD6001C
+
+#define mmNIC2_QM0_GLBL_SECURE_PROPS_4                               0xD60020
+
+#define mmNIC2_QM0_GLBL_NON_SECURE_PROPS_0                           0xD60024
+
+#define mmNIC2_QM0_GLBL_NON_SECURE_PROPS_1                           0xD60028
+
+#define mmNIC2_QM0_GLBL_NON_SECURE_PROPS_2                           0xD6002C
+
+#define mmNIC2_QM0_GLBL_NON_SECURE_PROPS_3                           0xD60030
+
+#define mmNIC2_QM0_GLBL_NON_SECURE_PROPS_4                           0xD60034
+
+#define mmNIC2_QM0_GLBL_STS0                                         0xD60038
+
+#define mmNIC2_QM0_GLBL_STS1_0                                       0xD60040
+
+#define mmNIC2_QM0_GLBL_STS1_1                                       0xD60044
+
+#define mmNIC2_QM0_GLBL_STS1_2                                       0xD60048
+
+#define mmNIC2_QM0_GLBL_STS1_3                                       0xD6004C
+
+#define mmNIC2_QM0_GLBL_STS1_4                                       0xD60050
+
+#define mmNIC2_QM0_GLBL_MSG_EN_0                                     0xD60054
+
+#define mmNIC2_QM0_GLBL_MSG_EN_1                                     0xD60058
+
+#define mmNIC2_QM0_GLBL_MSG_EN_2                                     0xD6005C
+
+#define mmNIC2_QM0_GLBL_MSG_EN_3                                     0xD60060
+
+#define mmNIC2_QM0_GLBL_MSG_EN_4                                     0xD60068
+
+#define mmNIC2_QM0_PQ_BASE_LO_0                                      0xD60070
+
+#define mmNIC2_QM0_PQ_BASE_LO_1                                      0xD60074
+
+#define mmNIC2_QM0_PQ_BASE_LO_2                                      0xD60078
+
+#define mmNIC2_QM0_PQ_BASE_LO_3                                      0xD6007C
+
+#define mmNIC2_QM0_PQ_BASE_HI_0                                      0xD60080
+
+#define mmNIC2_QM0_PQ_BASE_HI_1                                      0xD60084
+
+#define mmNIC2_QM0_PQ_BASE_HI_2                                      0xD60088
+
+#define mmNIC2_QM0_PQ_BASE_HI_3                                      0xD6008C
+
+#define mmNIC2_QM0_PQ_SIZE_0                                         0xD60090
+
+#define mmNIC2_QM0_PQ_SIZE_1                                         0xD60094
+
+#define mmNIC2_QM0_PQ_SIZE_2                                         0xD60098
+
+#define mmNIC2_QM0_PQ_SIZE_3                                         0xD6009C
+
+#define mmNIC2_QM0_PQ_PI_0                                           0xD600A0
+
+#define mmNIC2_QM0_PQ_PI_1                                           0xD600A4
+
+#define mmNIC2_QM0_PQ_PI_2                                           0xD600A8
+
+#define mmNIC2_QM0_PQ_PI_3                                           0xD600AC
+
+#define mmNIC2_QM0_PQ_CI_0                                           0xD600B0
+
+#define mmNIC2_QM0_PQ_CI_1                                           0xD600B4
+
+#define mmNIC2_QM0_PQ_CI_2                                           0xD600B8
+
+#define mmNIC2_QM0_PQ_CI_3                                           0xD600BC
+
+#define mmNIC2_QM0_PQ_CFG0_0                                         0xD600C0
+
+#define mmNIC2_QM0_PQ_CFG0_1                                         0xD600C4
+
+#define mmNIC2_QM0_PQ_CFG0_2                                         0xD600C8
+
+#define mmNIC2_QM0_PQ_CFG0_3                                         0xD600CC
+
+#define mmNIC2_QM0_PQ_CFG1_0                                         0xD600D0
+
+#define mmNIC2_QM0_PQ_CFG1_1                                         0xD600D4
+
+#define mmNIC2_QM0_PQ_CFG1_2                                         0xD600D8
+
+#define mmNIC2_QM0_PQ_CFG1_3                                         0xD600DC
+
+#define mmNIC2_QM0_PQ_ARUSER_31_11_0                                 0xD600E0
+
+#define mmNIC2_QM0_PQ_ARUSER_31_11_1                                 0xD600E4
+
+#define mmNIC2_QM0_PQ_ARUSER_31_11_2                                 0xD600E8
+
+#define mmNIC2_QM0_PQ_ARUSER_31_11_3                                 0xD600EC
+
+#define mmNIC2_QM0_PQ_STS0_0                                         0xD600F0
+
+#define mmNIC2_QM0_PQ_STS0_1                                         0xD600F4
+
+#define mmNIC2_QM0_PQ_STS0_2                                         0xD600F8
+
+#define mmNIC2_QM0_PQ_STS0_3                                         0xD600FC
+
+#define mmNIC2_QM0_PQ_STS1_0                                         0xD60100
+
+#define mmNIC2_QM0_PQ_STS1_1                                         0xD60104
+
+#define mmNIC2_QM0_PQ_STS1_2                                         0xD60108
+
+#define mmNIC2_QM0_PQ_STS1_3                                         0xD6010C
+
+#define mmNIC2_QM0_CQ_CFG0_0                                         0xD60110
+
+#define mmNIC2_QM0_CQ_CFG0_1                                         0xD60114
+
+#define mmNIC2_QM0_CQ_CFG0_2                                         0xD60118
+
+#define mmNIC2_QM0_CQ_CFG0_3                                         0xD6011C
+
+#define mmNIC2_QM0_CQ_CFG0_4                                         0xD60120
+
+#define mmNIC2_QM0_CQ_CFG1_0                                         0xD60124
+
+#define mmNIC2_QM0_CQ_CFG1_1                                         0xD60128
+
+#define mmNIC2_QM0_CQ_CFG1_2                                         0xD6012C
+
+#define mmNIC2_QM0_CQ_CFG1_3                                         0xD60130
+
+#define mmNIC2_QM0_CQ_CFG1_4                                         0xD60134
+
+#define mmNIC2_QM0_CQ_ARUSER_31_11_0                                 0xD60138
+
+#define mmNIC2_QM0_CQ_ARUSER_31_11_1                                 0xD6013C
+
+#define mmNIC2_QM0_CQ_ARUSER_31_11_2                                 0xD60140
+
+#define mmNIC2_QM0_CQ_ARUSER_31_11_3                                 0xD60144
+
+#define mmNIC2_QM0_CQ_ARUSER_31_11_4                                 0xD60148
+
+#define mmNIC2_QM0_CQ_STS0_0                                         0xD6014C
+
+#define mmNIC2_QM0_CQ_STS0_1                                         0xD60150
+
+#define mmNIC2_QM0_CQ_STS0_2                                         0xD60154
+
+#define mmNIC2_QM0_CQ_STS0_3                                         0xD60158
+
+#define mmNIC2_QM0_CQ_STS0_4                                         0xD6015C
+
+#define mmNIC2_QM0_CQ_STS1_0                                         0xD60160
+
+#define mmNIC2_QM0_CQ_STS1_1                                         0xD60164
+
+#define mmNIC2_QM0_CQ_STS1_2                                         0xD60168
+
+#define mmNIC2_QM0_CQ_STS1_3                                         0xD6016C
+
+#define mmNIC2_QM0_CQ_STS1_4                                         0xD60170
+
+#define mmNIC2_QM0_CQ_PTR_LO_0                                       0xD60174
+
+#define mmNIC2_QM0_CQ_PTR_HI_0                                       0xD60178
+
+#define mmNIC2_QM0_CQ_TSIZE_0                                        0xD6017C
+
+#define mmNIC2_QM0_CQ_CTL_0                                          0xD60180
+
+#define mmNIC2_QM0_CQ_PTR_LO_1                                       0xD60184
+
+#define mmNIC2_QM0_CQ_PTR_HI_1                                       0xD60188
+
+#define mmNIC2_QM0_CQ_TSIZE_1                                        0xD6018C
+
+#define mmNIC2_QM0_CQ_CTL_1                                          0xD60190
+
+#define mmNIC2_QM0_CQ_PTR_LO_2                                       0xD60194
+
+#define mmNIC2_QM0_CQ_PTR_HI_2                                       0xD60198
+
+#define mmNIC2_QM0_CQ_TSIZE_2                                        0xD6019C
+
+#define mmNIC2_QM0_CQ_CTL_2                                          0xD601A0
+
+#define mmNIC2_QM0_CQ_PTR_LO_3                                       0xD601A4
+
+#define mmNIC2_QM0_CQ_PTR_HI_3                                       0xD601A8
+
+#define mmNIC2_QM0_CQ_TSIZE_3                                        0xD601AC
+
+#define mmNIC2_QM0_CQ_CTL_3                                          0xD601B0
+
+#define mmNIC2_QM0_CQ_PTR_LO_4                                       0xD601B4
+
+#define mmNIC2_QM0_CQ_PTR_HI_4                                       0xD601B8
+
+#define mmNIC2_QM0_CQ_TSIZE_4                                        0xD601BC
+
+#define mmNIC2_QM0_CQ_CTL_4                                          0xD601C0
+
+#define mmNIC2_QM0_CQ_PTR_LO_STS_0                                   0xD601C4
+
+#define mmNIC2_QM0_CQ_PTR_LO_STS_1                                   0xD601C8
+
+#define mmNIC2_QM0_CQ_PTR_LO_STS_2                                   0xD601CC
+
+#define mmNIC2_QM0_CQ_PTR_LO_STS_3                                   0xD601D0
+
+#define mmNIC2_QM0_CQ_PTR_LO_STS_4                                   0xD601D4
+
+#define mmNIC2_QM0_CQ_PTR_HI_STS_0                                   0xD601D8
+
+#define mmNIC2_QM0_CQ_PTR_HI_STS_1                                   0xD601DC
+
+#define mmNIC2_QM0_CQ_PTR_HI_STS_2                                   0xD601E0
+
+#define mmNIC2_QM0_CQ_PTR_HI_STS_3                                   0xD601E4
+
+#define mmNIC2_QM0_CQ_PTR_HI_STS_4                                   0xD601E8
+
+#define mmNIC2_QM0_CQ_TSIZE_STS_0                                    0xD601EC
+
+#define mmNIC2_QM0_CQ_TSIZE_STS_1                                    0xD601F0
+
+#define mmNIC2_QM0_CQ_TSIZE_STS_2                                    0xD601F4
+
+#define mmNIC2_QM0_CQ_TSIZE_STS_3                                    0xD601F8
+
+#define mmNIC2_QM0_CQ_TSIZE_STS_4                                    0xD601FC
+
+#define mmNIC2_QM0_CQ_CTL_STS_0                                      0xD60200
+
+#define mmNIC2_QM0_CQ_CTL_STS_1                                      0xD60204
+
+#define mmNIC2_QM0_CQ_CTL_STS_2                                      0xD60208
+
+#define mmNIC2_QM0_CQ_CTL_STS_3                                      0xD6020C
+
+#define mmNIC2_QM0_CQ_CTL_STS_4                                      0xD60210
+
+#define mmNIC2_QM0_CQ_IFIFO_CNT_0                                    0xD60214
+
+#define mmNIC2_QM0_CQ_IFIFO_CNT_1                                    0xD60218
+
+#define mmNIC2_QM0_CQ_IFIFO_CNT_2                                    0xD6021C
+
+#define mmNIC2_QM0_CQ_IFIFO_CNT_3                                    0xD60220
+
+#define mmNIC2_QM0_CQ_IFIFO_CNT_4                                    0xD60224
+
+#define mmNIC2_QM0_CP_MSG_BASE0_ADDR_LO_0                            0xD60228
+
+#define mmNIC2_QM0_CP_MSG_BASE0_ADDR_LO_1                            0xD6022C
+
+#define mmNIC2_QM0_CP_MSG_BASE0_ADDR_LO_2                            0xD60230
+
+#define mmNIC2_QM0_CP_MSG_BASE0_ADDR_LO_3                            0xD60234
+
+#define mmNIC2_QM0_CP_MSG_BASE0_ADDR_LO_4                            0xD60238
+
+#define mmNIC2_QM0_CP_MSG_BASE0_ADDR_HI_0                            0xD6023C
+
+#define mmNIC2_QM0_CP_MSG_BASE0_ADDR_HI_1                            0xD60240
+
+#define mmNIC2_QM0_CP_MSG_BASE0_ADDR_HI_2                            0xD60244
+
+#define mmNIC2_QM0_CP_MSG_BASE0_ADDR_HI_3                            0xD60248
+
+#define mmNIC2_QM0_CP_MSG_BASE0_ADDR_HI_4                            0xD6024C
+
+#define mmNIC2_QM0_CP_MSG_BASE1_ADDR_LO_0                            0xD60250
+
+#define mmNIC2_QM0_CP_MSG_BASE1_ADDR_LO_1                            0xD60254
+
+#define mmNIC2_QM0_CP_MSG_BASE1_ADDR_LO_2                            0xD60258
+
+#define mmNIC2_QM0_CP_MSG_BASE1_ADDR_LO_3                            0xD6025C
+
+#define mmNIC2_QM0_CP_MSG_BASE1_ADDR_LO_4                            0xD60260
+
+#define mmNIC2_QM0_CP_MSG_BASE1_ADDR_HI_0                            0xD60264
+
+#define mmNIC2_QM0_CP_MSG_BASE1_ADDR_HI_1                            0xD60268
+
+#define mmNIC2_QM0_CP_MSG_BASE1_ADDR_HI_2                            0xD6026C
+
+#define mmNIC2_QM0_CP_MSG_BASE1_ADDR_HI_3                            0xD60270
+
+#define mmNIC2_QM0_CP_MSG_BASE1_ADDR_HI_4                            0xD60274
+
+#define mmNIC2_QM0_CP_MSG_BASE2_ADDR_LO_0                            0xD60278
+
+#define mmNIC2_QM0_CP_MSG_BASE2_ADDR_LO_1                            0xD6027C
+
+#define mmNIC2_QM0_CP_MSG_BASE2_ADDR_LO_2                            0xD60280
+
+#define mmNIC2_QM0_CP_MSG_BASE2_ADDR_LO_3                            0xD60284
+
+#define mmNIC2_QM0_CP_MSG_BASE2_ADDR_LO_4                            0xD60288
+
+#define mmNIC2_QM0_CP_MSG_BASE2_ADDR_HI_0                            0xD6028C
+
+#define mmNIC2_QM0_CP_MSG_BASE2_ADDR_HI_1                            0xD60290
+
+#define mmNIC2_QM0_CP_MSG_BASE2_ADDR_HI_2                            0xD60294
+
+#define mmNIC2_QM0_CP_MSG_BASE2_ADDR_HI_3                            0xD60298
+
+#define mmNIC2_QM0_CP_MSG_BASE2_ADDR_HI_4                            0xD6029C
+
+#define mmNIC2_QM0_CP_MSG_BASE3_ADDR_LO_0                            0xD602A0
+
+#define mmNIC2_QM0_CP_MSG_BASE3_ADDR_LO_1                            0xD602A4
+
+#define mmNIC2_QM0_CP_MSG_BASE3_ADDR_LO_2                            0xD602A8
+
+#define mmNIC2_QM0_CP_MSG_BASE3_ADDR_LO_3                            0xD602AC
+
+#define mmNIC2_QM0_CP_MSG_BASE3_ADDR_LO_4                            0xD602B0
+
+#define mmNIC2_QM0_CP_MSG_BASE3_ADDR_HI_0                            0xD602B4
+
+#define mmNIC2_QM0_CP_MSG_BASE3_ADDR_HI_1                            0xD602B8
+
+#define mmNIC2_QM0_CP_MSG_BASE3_ADDR_HI_2                            0xD602BC
+
+#define mmNIC2_QM0_CP_MSG_BASE3_ADDR_HI_3                            0xD602C0
+
+#define mmNIC2_QM0_CP_MSG_BASE3_ADDR_HI_4                            0xD602C4
+
+#define mmNIC2_QM0_CP_LDMA_TSIZE_OFFSET_0                            0xD602C8
+
+#define mmNIC2_QM0_CP_LDMA_TSIZE_OFFSET_1                            0xD602CC
+
+#define mmNIC2_QM0_CP_LDMA_TSIZE_OFFSET_2                            0xD602D0
+
+#define mmNIC2_QM0_CP_LDMA_TSIZE_OFFSET_3                            0xD602D4
+
+#define mmNIC2_QM0_CP_LDMA_TSIZE_OFFSET_4                            0xD602D8
+
+#define mmNIC2_QM0_CP_LDMA_SRC_BASE_LO_OFFSET_0                      0xD602E0
+
+#define mmNIC2_QM0_CP_LDMA_SRC_BASE_LO_OFFSET_1                      0xD602E4
+
+#define mmNIC2_QM0_CP_LDMA_SRC_BASE_LO_OFFSET_2                      0xD602E8
+
+#define mmNIC2_QM0_CP_LDMA_SRC_BASE_LO_OFFSET_3                      0xD602EC
+
+#define mmNIC2_QM0_CP_LDMA_SRC_BASE_LO_OFFSET_4                      0xD602F0
+
+#define mmNIC2_QM0_CP_LDMA_DST_BASE_LO_OFFSET_0                      0xD602F4
+
+#define mmNIC2_QM0_CP_LDMA_DST_BASE_LO_OFFSET_1                      0xD602F8
+
+#define mmNIC2_QM0_CP_LDMA_DST_BASE_LO_OFFSET_2                      0xD602FC
+
+#define mmNIC2_QM0_CP_LDMA_DST_BASE_LO_OFFSET_3                      0xD60300
+
+#define mmNIC2_QM0_CP_LDMA_DST_BASE_LO_OFFSET_4                      0xD60304
+
+#define mmNIC2_QM0_CP_FENCE0_RDATA_0                                 0xD60308
+
+#define mmNIC2_QM0_CP_FENCE0_RDATA_1                                 0xD6030C
+
+#define mmNIC2_QM0_CP_FENCE0_RDATA_2                                 0xD60310
+
+#define mmNIC2_QM0_CP_FENCE0_RDATA_3                                 0xD60314
+
+#define mmNIC2_QM0_CP_FENCE0_RDATA_4                                 0xD60318
+
+#define mmNIC2_QM0_CP_FENCE1_RDATA_0                                 0xD6031C
+
+#define mmNIC2_QM0_CP_FENCE1_RDATA_1                                 0xD60320
+
+#define mmNIC2_QM0_CP_FENCE1_RDATA_2                                 0xD60324
+
+#define mmNIC2_QM0_CP_FENCE1_RDATA_3                                 0xD60328
+
+#define mmNIC2_QM0_CP_FENCE1_RDATA_4                                 0xD6032C
+
+#define mmNIC2_QM0_CP_FENCE2_RDATA_0                                 0xD60330
+
+#define mmNIC2_QM0_CP_FENCE2_RDATA_1                                 0xD60334
+
+#define mmNIC2_QM0_CP_FENCE2_RDATA_2                                 0xD60338
+
+#define mmNIC2_QM0_CP_FENCE2_RDATA_3                                 0xD6033C
+
+#define mmNIC2_QM0_CP_FENCE2_RDATA_4                                 0xD60340
+
+#define mmNIC2_QM0_CP_FENCE3_RDATA_0                                 0xD60344
+
+#define mmNIC2_QM0_CP_FENCE3_RDATA_1                                 0xD60348
+
+#define mmNIC2_QM0_CP_FENCE3_RDATA_2                                 0xD6034C
+
+#define mmNIC2_QM0_CP_FENCE3_RDATA_3                                 0xD60350
+
+#define mmNIC2_QM0_CP_FENCE3_RDATA_4                                 0xD60354
+
+#define mmNIC2_QM0_CP_FENCE0_CNT_0                                   0xD60358
+
+#define mmNIC2_QM0_CP_FENCE0_CNT_1                                   0xD6035C
+
+#define mmNIC2_QM0_CP_FENCE0_CNT_2                                   0xD60360
+
+#define mmNIC2_QM0_CP_FENCE0_CNT_3                                   0xD60364
+
+#define mmNIC2_QM0_CP_FENCE0_CNT_4                                   0xD60368
+
+#define mmNIC2_QM0_CP_FENCE1_CNT_0                                   0xD6036C
+
+#define mmNIC2_QM0_CP_FENCE1_CNT_1                                   0xD60370
+
+#define mmNIC2_QM0_CP_FENCE1_CNT_2                                   0xD60374
+
+#define mmNIC2_QM0_CP_FENCE1_CNT_3                                   0xD60378
+
+#define mmNIC2_QM0_CP_FENCE1_CNT_4                                   0xD6037C
+
+#define mmNIC2_QM0_CP_FENCE2_CNT_0                                   0xD60380
+
+#define mmNIC2_QM0_CP_FENCE2_CNT_1                                   0xD60384
+
+#define mmNIC2_QM0_CP_FENCE2_CNT_2                                   0xD60388
+
+#define mmNIC2_QM0_CP_FENCE2_CNT_3                                   0xD6038C
+
+#define mmNIC2_QM0_CP_FENCE2_CNT_4                                   0xD60390
+
+#define mmNIC2_QM0_CP_FENCE3_CNT_0                                   0xD60394
+
+#define mmNIC2_QM0_CP_FENCE3_CNT_1                                   0xD60398
+
+#define mmNIC2_QM0_CP_FENCE3_CNT_2                                   0xD6039C
+
+#define mmNIC2_QM0_CP_FENCE3_CNT_3                                   0xD603A0
+
+#define mmNIC2_QM0_CP_FENCE3_CNT_4                                   0xD603A4
+
+#define mmNIC2_QM0_CP_STS_0                                          0xD603A8
+
+#define mmNIC2_QM0_CP_STS_1                                          0xD603AC
+
+#define mmNIC2_QM0_CP_STS_2                                          0xD603B0
+
+#define mmNIC2_QM0_CP_STS_3                                          0xD603B4
+
+#define mmNIC2_QM0_CP_STS_4                                          0xD603B8
+
+#define mmNIC2_QM0_CP_CURRENT_INST_LO_0                              0xD603BC
+
+#define mmNIC2_QM0_CP_CURRENT_INST_LO_1                              0xD603C0
+
+#define mmNIC2_QM0_CP_CURRENT_INST_LO_2                              0xD603C4
+
+#define mmNIC2_QM0_CP_CURRENT_INST_LO_3                              0xD603C8
+
+#define mmNIC2_QM0_CP_CURRENT_INST_LO_4                              0xD603CC
+
+#define mmNIC2_QM0_CP_CURRENT_INST_HI_0                              0xD603D0
+
+#define mmNIC2_QM0_CP_CURRENT_INST_HI_1                              0xD603D4
+
+#define mmNIC2_QM0_CP_CURRENT_INST_HI_2                              0xD603D8
+
+#define mmNIC2_QM0_CP_CURRENT_INST_HI_3                              0xD603DC
+
+#define mmNIC2_QM0_CP_CURRENT_INST_HI_4                              0xD603E0
+
+#define mmNIC2_QM0_CP_BARRIER_CFG_0                                  0xD603F4
+
+#define mmNIC2_QM0_CP_BARRIER_CFG_1                                  0xD603F8
+
+#define mmNIC2_QM0_CP_BARRIER_CFG_2                                  0xD603FC
+
+#define mmNIC2_QM0_CP_BARRIER_CFG_3                                  0xD60400
+
+#define mmNIC2_QM0_CP_BARRIER_CFG_4                                  0xD60404
+
+#define mmNIC2_QM0_CP_DBG_0_0                                        0xD60408
+
+#define mmNIC2_QM0_CP_DBG_0_1                                        0xD6040C
+
+#define mmNIC2_QM0_CP_DBG_0_2                                        0xD60410
+
+#define mmNIC2_QM0_CP_DBG_0_3                                        0xD60414
+
+#define mmNIC2_QM0_CP_DBG_0_4                                        0xD60418
+
+#define mmNIC2_QM0_CP_ARUSER_31_11_0                                 0xD6041C
+
+#define mmNIC2_QM0_CP_ARUSER_31_11_1                                 0xD60420
+
+#define mmNIC2_QM0_CP_ARUSER_31_11_2                                 0xD60424
+
+#define mmNIC2_QM0_CP_ARUSER_31_11_3                                 0xD60428
+
+#define mmNIC2_QM0_CP_ARUSER_31_11_4                                 0xD6042C
+
+#define mmNIC2_QM0_CP_AWUSER_31_11_0                                 0xD60430
+
+#define mmNIC2_QM0_CP_AWUSER_31_11_1                                 0xD60434
+
+#define mmNIC2_QM0_CP_AWUSER_31_11_2                                 0xD60438
+
+#define mmNIC2_QM0_CP_AWUSER_31_11_3                                 0xD6043C
+
+#define mmNIC2_QM0_CP_AWUSER_31_11_4                                 0xD60440
+
+#define mmNIC2_QM0_ARB_CFG_0                                         0xD60A00
+
+#define mmNIC2_QM0_ARB_CHOISE_Q_PUSH                                 0xD60A04
+
+#define mmNIC2_QM0_ARB_WRR_WEIGHT_0                                  0xD60A08
+
+#define mmNIC2_QM0_ARB_WRR_WEIGHT_1                                  0xD60A0C
+
+#define mmNIC2_QM0_ARB_WRR_WEIGHT_2                                  0xD60A10
+
+#define mmNIC2_QM0_ARB_WRR_WEIGHT_3                                  0xD60A14
+
+#define mmNIC2_QM0_ARB_CFG_1                                         0xD60A18
+
+#define mmNIC2_QM0_ARB_MST_AVAIL_CRED_0                              0xD60A20
+
+#define mmNIC2_QM0_ARB_MST_AVAIL_CRED_1                              0xD60A24
+
+#define mmNIC2_QM0_ARB_MST_AVAIL_CRED_2                              0xD60A28
+
+#define mmNIC2_QM0_ARB_MST_AVAIL_CRED_3                              0xD60A2C
+
+#define mmNIC2_QM0_ARB_MST_AVAIL_CRED_4                              0xD60A30
+
+#define mmNIC2_QM0_ARB_MST_AVAIL_CRED_5                              0xD60A34
+
+#define mmNIC2_QM0_ARB_MST_AVAIL_CRED_6                              0xD60A38
+
+#define mmNIC2_QM0_ARB_MST_AVAIL_CRED_7                              0xD60A3C
+
+#define mmNIC2_QM0_ARB_MST_AVAIL_CRED_8                              0xD60A40
+
+#define mmNIC2_QM0_ARB_MST_AVAIL_CRED_9                              0xD60A44
+
+#define mmNIC2_QM0_ARB_MST_AVAIL_CRED_10                             0xD60A48
+
+#define mmNIC2_QM0_ARB_MST_AVAIL_CRED_11                             0xD60A4C
+
+#define mmNIC2_QM0_ARB_MST_AVAIL_CRED_12                             0xD60A50
+
+#define mmNIC2_QM0_ARB_MST_AVAIL_CRED_13                             0xD60A54
+
+#define mmNIC2_QM0_ARB_MST_AVAIL_CRED_14                             0xD60A58
+
+#define mmNIC2_QM0_ARB_MST_AVAIL_CRED_15                             0xD60A5C
+
+#define mmNIC2_QM0_ARB_MST_AVAIL_CRED_16                             0xD60A60
+
+#define mmNIC2_QM0_ARB_MST_AVAIL_CRED_17                             0xD60A64
+
+#define mmNIC2_QM0_ARB_MST_AVAIL_CRED_18                             0xD60A68
+
+#define mmNIC2_QM0_ARB_MST_AVAIL_CRED_19                             0xD60A6C
+
+#define mmNIC2_QM0_ARB_MST_AVAIL_CRED_20                             0xD60A70
+
+#define mmNIC2_QM0_ARB_MST_AVAIL_CRED_21                             0xD60A74
+
+#define mmNIC2_QM0_ARB_MST_AVAIL_CRED_22                             0xD60A78
+
+#define mmNIC2_QM0_ARB_MST_AVAIL_CRED_23                             0xD60A7C
+
+#define mmNIC2_QM0_ARB_MST_AVAIL_CRED_24                             0xD60A80
+
+#define mmNIC2_QM0_ARB_MST_AVAIL_CRED_25                             0xD60A84
+
+#define mmNIC2_QM0_ARB_MST_AVAIL_CRED_26                             0xD60A88
+
+#define mmNIC2_QM0_ARB_MST_AVAIL_CRED_27                             0xD60A8C
+
+#define mmNIC2_QM0_ARB_MST_AVAIL_CRED_28                             0xD60A90
+
+#define mmNIC2_QM0_ARB_MST_AVAIL_CRED_29                             0xD60A94
+
+#define mmNIC2_QM0_ARB_MST_AVAIL_CRED_30                             0xD60A98
+
+#define mmNIC2_QM0_ARB_MST_AVAIL_CRED_31                             0xD60A9C
+
+#define mmNIC2_QM0_ARB_MST_CRED_INC                                  0xD60AA0
+
+#define mmNIC2_QM0_ARB_MST_CHOISE_PUSH_OFST_0                        0xD60AA4
+
+#define mmNIC2_QM0_ARB_MST_CHOISE_PUSH_OFST_1                        0xD60AA8
+
+#define mmNIC2_QM0_ARB_MST_CHOISE_PUSH_OFST_2                        0xD60AAC
+
+#define mmNIC2_QM0_ARB_MST_CHOISE_PUSH_OFST_3                        0xD60AB0
+
+#define mmNIC2_QM0_ARB_MST_CHOISE_PUSH_OFST_4                        0xD60AB4
+
+#define mmNIC2_QM0_ARB_MST_CHOISE_PUSH_OFST_5                        0xD60AB8
+
+#define mmNIC2_QM0_ARB_MST_CHOISE_PUSH_OFST_6                        0xD60ABC
+
+#define mmNIC2_QM0_ARB_MST_CHOISE_PUSH_OFST_7                        0xD60AC0
+
+#define mmNIC2_QM0_ARB_MST_CHOISE_PUSH_OFST_8                        0xD60AC4
+
+#define mmNIC2_QM0_ARB_MST_CHOISE_PUSH_OFST_9                        0xD60AC8
+
+#define mmNIC2_QM0_ARB_MST_CHOISE_PUSH_OFST_10                       0xD60ACC
+
+#define mmNIC2_QM0_ARB_MST_CHOISE_PUSH_OFST_11                       0xD60AD0
+
+#define mmNIC2_QM0_ARB_MST_CHOISE_PUSH_OFST_12                       0xD60AD4
+
+#define mmNIC2_QM0_ARB_MST_CHOISE_PUSH_OFST_13                       0xD60AD8
+
+#define mmNIC2_QM0_ARB_MST_CHOISE_PUSH_OFST_14                       0xD60ADC
+
+#define mmNIC2_QM0_ARB_MST_CHOISE_PUSH_OFST_15                       0xD60AE0
+
+#define mmNIC2_QM0_ARB_MST_CHOISE_PUSH_OFST_16                       0xD60AE4
+
+#define mmNIC2_QM0_ARB_MST_CHOISE_PUSH_OFST_17                       0xD60AE8
+
+#define mmNIC2_QM0_ARB_MST_CHOISE_PUSH_OFST_18                       0xD60AEC
+
+#define mmNIC2_QM0_ARB_MST_CHOISE_PUSH_OFST_19                       0xD60AF0
+
+#define mmNIC2_QM0_ARB_MST_CHOISE_PUSH_OFST_20                       0xD60AF4
+
+#define mmNIC2_QM0_ARB_MST_CHOISE_PUSH_OFST_21                       0xD60AF8
+
+#define mmNIC2_QM0_ARB_MST_CHOISE_PUSH_OFST_22                       0xD60AFC
+
+#define mmNIC2_QM0_ARB_MST_CHOISE_PUSH_OFST_23                       0xD60B00
+
+#define mmNIC2_QM0_ARB_MST_CHOISE_PUSH_OFST_24                       0xD60B04
+
+#define mmNIC2_QM0_ARB_MST_CHOISE_PUSH_OFST_25                       0xD60B08
+
+#define mmNIC2_QM0_ARB_MST_CHOISE_PUSH_OFST_26                       0xD60B0C
+
+#define mmNIC2_QM0_ARB_MST_CHOISE_PUSH_OFST_27                       0xD60B10
+
+#define mmNIC2_QM0_ARB_MST_CHOISE_PUSH_OFST_28                       0xD60B14
+
+#define mmNIC2_QM0_ARB_MST_CHOISE_PUSH_OFST_29                       0xD60B18
+
+#define mmNIC2_QM0_ARB_MST_CHOISE_PUSH_OFST_30                       0xD60B1C
+
+#define mmNIC2_QM0_ARB_MST_CHOISE_PUSH_OFST_31                       0xD60B20
+
+#define mmNIC2_QM0_ARB_SLV_MASTER_INC_CRED_OFST                      0xD60B28
+
+#define mmNIC2_QM0_ARB_MST_SLAVE_EN                                  0xD60B2C
+
+#define mmNIC2_QM0_ARB_MST_QUIET_PER                                 0xD60B34
+
+#define mmNIC2_QM0_ARB_SLV_CHOISE_WDT                                0xD60B38
+
+#define mmNIC2_QM0_ARB_SLV_ID                                        0xD60B3C
+
+#define mmNIC2_QM0_ARB_MSG_MAX_INFLIGHT                              0xD60B44
+
+#define mmNIC2_QM0_ARB_MSG_AWUSER_31_11                              0xD60B48
+
+#define mmNIC2_QM0_ARB_MSG_AWUSER_SEC_PROP                           0xD60B4C
+
+#define mmNIC2_QM0_ARB_MSG_AWUSER_NON_SEC_PROP                       0xD60B50
+
+#define mmNIC2_QM0_ARB_BASE_LO                                       0xD60B54
+
+#define mmNIC2_QM0_ARB_BASE_HI                                       0xD60B58
+
+#define mmNIC2_QM0_ARB_STATE_STS                                     0xD60B80
+
+#define mmNIC2_QM0_ARB_CHOISE_FULLNESS_STS                           0xD60B84
+
+#define mmNIC2_QM0_ARB_MSG_STS                                       0xD60B88
+
+#define mmNIC2_QM0_ARB_SLV_CHOISE_Q_HEAD                             0xD60B8C
+
+#define mmNIC2_QM0_ARB_ERR_CAUSE                                     0xD60B9C
+
+#define mmNIC2_QM0_ARB_ERR_MSG_EN                                    0xD60BA0
+
+#define mmNIC2_QM0_ARB_ERR_STS_DRP                                   0xD60BA8
+
+#define mmNIC2_QM0_ARB_MST_CRED_STS_0                                0xD60BB0
+
+#define mmNIC2_QM0_ARB_MST_CRED_STS_1                                0xD60BB4
+
+#define mmNIC2_QM0_ARB_MST_CRED_STS_2                                0xD60BB8
+
+#define mmNIC2_QM0_ARB_MST_CRED_STS_3                                0xD60BBC
+
+#define mmNIC2_QM0_ARB_MST_CRED_STS_4                                0xD60BC0
+
+#define mmNIC2_QM0_ARB_MST_CRED_STS_5                                0xD60BC4
+
+#define mmNIC2_QM0_ARB_MST_CRED_STS_6                                0xD60BC8
+
+#define mmNIC2_QM0_ARB_MST_CRED_STS_7                                0xD60BCC
+
+#define mmNIC2_QM0_ARB_MST_CRED_STS_8                                0xD60BD0
+
+#define mmNIC2_QM0_ARB_MST_CRED_STS_9                                0xD60BD4
+
+#define mmNIC2_QM0_ARB_MST_CRED_STS_10                               0xD60BD8
+
+#define mmNIC2_QM0_ARB_MST_CRED_STS_11                               0xD60BDC
+
+#define mmNIC2_QM0_ARB_MST_CRED_STS_12                               0xD60BE0
+
+#define mmNIC2_QM0_ARB_MST_CRED_STS_13                               0xD60BE4
+
+#define mmNIC2_QM0_ARB_MST_CRED_STS_14                               0xD60BE8
+
+#define mmNIC2_QM0_ARB_MST_CRED_STS_15                               0xD60BEC
+
+#define mmNIC2_QM0_ARB_MST_CRED_STS_16                               0xD60BF0
+
+#define mmNIC2_QM0_ARB_MST_CRED_STS_17                               0xD60BF4
+
+#define mmNIC2_QM0_ARB_MST_CRED_STS_18                               0xD60BF8
+
+#define mmNIC2_QM0_ARB_MST_CRED_STS_19                               0xD60BFC
+
+#define mmNIC2_QM0_ARB_MST_CRED_STS_20                               0xD60C00
+
+#define mmNIC2_QM0_ARB_MST_CRED_STS_21                               0xD60C04
+
+#define mmNIC2_QM0_ARB_MST_CRED_STS_22                               0xD60C08
+
+#define mmNIC2_QM0_ARB_MST_CRED_STS_23                               0xD60C0C
+
+#define mmNIC2_QM0_ARB_MST_CRED_STS_24                               0xD60C10
+
+#define mmNIC2_QM0_ARB_MST_CRED_STS_25                               0xD60C14
+
+#define mmNIC2_QM0_ARB_MST_CRED_STS_26                               0xD60C18
+
+#define mmNIC2_QM0_ARB_MST_CRED_STS_27                               0xD60C1C
+
+#define mmNIC2_QM0_ARB_MST_CRED_STS_28                               0xD60C20
+
+#define mmNIC2_QM0_ARB_MST_CRED_STS_29                               0xD60C24
+
+#define mmNIC2_QM0_ARB_MST_CRED_STS_30                               0xD60C28
+
+#define mmNIC2_QM0_ARB_MST_CRED_STS_31                               0xD60C2C
+
+#define mmNIC2_QM0_CGM_CFG                                           0xD60C70
+
+#define mmNIC2_QM0_CGM_STS                                           0xD60C74
+
+#define mmNIC2_QM0_CGM_CFG1                                          0xD60C78
+
+#define mmNIC2_QM0_LOCAL_RANGE_BASE                                  0xD60C80
+
+#define mmNIC2_QM0_LOCAL_RANGE_SIZE                                  0xD60C84
+
+#define mmNIC2_QM0_CSMR_STRICT_PRIO_CFG                              0xD60C90
+
+#define mmNIC2_QM0_HBW_RD_RATE_LIM_CFG_1                             0xD60C94
+
+#define mmNIC2_QM0_LBW_WR_RATE_LIM_CFG_0                             0xD60C98
+
+#define mmNIC2_QM0_LBW_WR_RATE_LIM_CFG_1                             0xD60C9C
+
+#define mmNIC2_QM0_HBW_RD_RATE_LIM_CFG_0                             0xD60CA0
+
+#define mmNIC2_QM0_GLBL_AXCACHE                                      0xD60CA4
+
+#define mmNIC2_QM0_IND_GW_APB_CFG                                    0xD60CB0
+
+#define mmNIC2_QM0_IND_GW_APB_WDATA                                  0xD60CB4
+
+#define mmNIC2_QM0_IND_GW_APB_RDATA                                  0xD60CB8
+
+#define mmNIC2_QM0_IND_GW_APB_STATUS                                 0xD60CBC
+
+#define mmNIC2_QM0_GLBL_ERR_ADDR_LO                                  0xD60CD0
+
+#define mmNIC2_QM0_GLBL_ERR_ADDR_HI                                  0xD60CD4
+
+#define mmNIC2_QM0_GLBL_ERR_WDATA                                    0xD60CD8
+
+#define mmNIC2_QM0_GLBL_MEM_INIT_BUSY                                0xD60D00
+
+#endif /* ASIC_REG_NIC2_QM0_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/nic2_qm1_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/nic2_qm1_regs.h
new file mode 100644
index 0000000000000000000000000000000000000000..b7f091ddc89ce07bbbcf09df0301731d68deabd1
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/nic2_qm1_regs.h
@@ -0,0 +1,834 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ **       DO NOT EDIT BELOW        **
+ ************************************/
+
+#ifndef ASIC_REG_NIC2_QM1_REGS_H_
+#define ASIC_REG_NIC2_QM1_REGS_H_
+
+/*
+ *****************************************
+ *   NIC2_QM1 (Prototype: QMAN)
+ *****************************************
+ */
+
+#define mmNIC2_QM1_GLBL_CFG0                                         0xD62000
+
+#define mmNIC2_QM1_GLBL_CFG1                                         0xD62004
+
+#define mmNIC2_QM1_GLBL_PROT                                         0xD62008
+
+#define mmNIC2_QM1_GLBL_ERR_CFG                                      0xD6200C
+
+#define mmNIC2_QM1_GLBL_SECURE_PROPS_0                               0xD62010
+
+#define mmNIC2_QM1_GLBL_SECURE_PROPS_1                               0xD62014
+
+#define mmNIC2_QM1_GLBL_SECURE_PROPS_2                               0xD62018
+
+#define mmNIC2_QM1_GLBL_SECURE_PROPS_3                               0xD6201C
+
+#define mmNIC2_QM1_GLBL_SECURE_PROPS_4                               0xD62020
+
+#define mmNIC2_QM1_GLBL_NON_SECURE_PROPS_0                           0xD62024
+
+#define mmNIC2_QM1_GLBL_NON_SECURE_PROPS_1                           0xD62028
+
+#define mmNIC2_QM1_GLBL_NON_SECURE_PROPS_2                           0xD6202C
+
+#define mmNIC2_QM1_GLBL_NON_SECURE_PROPS_3                           0xD62030
+
+#define mmNIC2_QM1_GLBL_NON_SECURE_PROPS_4                           0xD62034
+
+#define mmNIC2_QM1_GLBL_STS0                                         0xD62038
+
+#define mmNIC2_QM1_GLBL_STS1_0                                       0xD62040
+
+#define mmNIC2_QM1_GLBL_STS1_1                                       0xD62044
+
+#define mmNIC2_QM1_GLBL_STS1_2                                       0xD62048
+
+#define mmNIC2_QM1_GLBL_STS1_3                                       0xD6204C
+
+#define mmNIC2_QM1_GLBL_STS1_4                                       0xD62050
+
+#define mmNIC2_QM1_GLBL_MSG_EN_0                                     0xD62054
+
+#define mmNIC2_QM1_GLBL_MSG_EN_1                                     0xD62058
+
+#define mmNIC2_QM1_GLBL_MSG_EN_2                                     0xD6205C
+
+#define mmNIC2_QM1_GLBL_MSG_EN_3                                     0xD62060
+
+#define mmNIC2_QM1_GLBL_MSG_EN_4                                     0xD62068
+
+#define mmNIC2_QM1_PQ_BASE_LO_0                                      0xD62070
+
+#define mmNIC2_QM1_PQ_BASE_LO_1                                      0xD62074
+
+#define mmNIC2_QM1_PQ_BASE_LO_2                                      0xD62078
+
+#define mmNIC2_QM1_PQ_BASE_LO_3                                      0xD6207C
+
+#define mmNIC2_QM1_PQ_BASE_HI_0                                      0xD62080
+
+#define mmNIC2_QM1_PQ_BASE_HI_1                                      0xD62084
+
+#define mmNIC2_QM1_PQ_BASE_HI_2                                      0xD62088
+
+#define mmNIC2_QM1_PQ_BASE_HI_3                                      0xD6208C
+
+#define mmNIC2_QM1_PQ_SIZE_0                                         0xD62090
+
+#define mmNIC2_QM1_PQ_SIZE_1                                         0xD62094
+
+#define mmNIC2_QM1_PQ_SIZE_2                                         0xD62098
+
+#define mmNIC2_QM1_PQ_SIZE_3                                         0xD6209C
+
+#define mmNIC2_QM1_PQ_PI_0                                           0xD620A0
+
+#define mmNIC2_QM1_PQ_PI_1                                           0xD620A4
+
+#define mmNIC2_QM1_PQ_PI_2                                           0xD620A8
+
+#define mmNIC2_QM1_PQ_PI_3                                           0xD620AC
+
+#define mmNIC2_QM1_PQ_CI_0                                           0xD620B0
+
+#define mmNIC2_QM1_PQ_CI_1                                           0xD620B4
+
+#define mmNIC2_QM1_PQ_CI_2                                           0xD620B8
+
+#define mmNIC2_QM1_PQ_CI_3                                           0xD620BC
+
+#define mmNIC2_QM1_PQ_CFG0_0                                         0xD620C0
+
+#define mmNIC2_QM1_PQ_CFG0_1                                         0xD620C4
+
+#define mmNIC2_QM1_PQ_CFG0_2                                         0xD620C8
+
+#define mmNIC2_QM1_PQ_CFG0_3                                         0xD620CC
+
+#define mmNIC2_QM1_PQ_CFG1_0                                         0xD620D0
+
+#define mmNIC2_QM1_PQ_CFG1_1                                         0xD620D4
+
+#define mmNIC2_QM1_PQ_CFG1_2                                         0xD620D8
+
+#define mmNIC2_QM1_PQ_CFG1_3                                         0xD620DC
+
+#define mmNIC2_QM1_PQ_ARUSER_31_11_0                                 0xD620E0
+
+#define mmNIC2_QM1_PQ_ARUSER_31_11_1                                 0xD620E4
+
+#define mmNIC2_QM1_PQ_ARUSER_31_11_2                                 0xD620E8
+
+#define mmNIC2_QM1_PQ_ARUSER_31_11_3                                 0xD620EC
+
+#define mmNIC2_QM1_PQ_STS0_0                                         0xD620F0
+
+#define mmNIC2_QM1_PQ_STS0_1                                         0xD620F4
+
+#define mmNIC2_QM1_PQ_STS0_2                                         0xD620F8
+
+#define mmNIC2_QM1_PQ_STS0_3                                         0xD620FC
+
+#define mmNIC2_QM1_PQ_STS1_0                                         0xD62100
+
+#define mmNIC2_QM1_PQ_STS1_1                                         0xD62104
+
+#define mmNIC2_QM1_PQ_STS1_2                                         0xD62108
+
+#define mmNIC2_QM1_PQ_STS1_3                                         0xD6210C
+
+#define mmNIC2_QM1_CQ_CFG0_0                                         0xD62110
+
+#define mmNIC2_QM1_CQ_CFG0_1                                         0xD62114
+
+#define mmNIC2_QM1_CQ_CFG0_2                                         0xD62118
+
+#define mmNIC2_QM1_CQ_CFG0_3                                         0xD6211C
+
+#define mmNIC2_QM1_CQ_CFG0_4                                         0xD62120
+
+#define mmNIC2_QM1_CQ_CFG1_0                                         0xD62124
+
+#define mmNIC2_QM1_CQ_CFG1_1                                         0xD62128
+
+#define mmNIC2_QM1_CQ_CFG1_2                                         0xD6212C
+
+#define mmNIC2_QM1_CQ_CFG1_3                                         0xD62130
+
+#define mmNIC2_QM1_CQ_CFG1_4                                         0xD62134
+
+#define mmNIC2_QM1_CQ_ARUSER_31_11_0                                 0xD62138
+
+#define mmNIC2_QM1_CQ_ARUSER_31_11_1                                 0xD6213C
+
+#define mmNIC2_QM1_CQ_ARUSER_31_11_2                                 0xD62140
+
+#define mmNIC2_QM1_CQ_ARUSER_31_11_3                                 0xD62144
+
+#define mmNIC2_QM1_CQ_ARUSER_31_11_4                                 0xD62148
+
+#define mmNIC2_QM1_CQ_STS0_0                                         0xD6214C
+
+#define mmNIC2_QM1_CQ_STS0_1                                         0xD62150
+
+#define mmNIC2_QM1_CQ_STS0_2                                         0xD62154
+
+#define mmNIC2_QM1_CQ_STS0_3                                         0xD62158
+
+#define mmNIC2_QM1_CQ_STS0_4                                         0xD6215C
+
+#define mmNIC2_QM1_CQ_STS1_0                                         0xD62160
+
+#define mmNIC2_QM1_CQ_STS1_1                                         0xD62164
+
+#define mmNIC2_QM1_CQ_STS1_2                                         0xD62168
+
+#define mmNIC2_QM1_CQ_STS1_3                                         0xD6216C
+
+#define mmNIC2_QM1_CQ_STS1_4                                         0xD62170
+
+#define mmNIC2_QM1_CQ_PTR_LO_0                                       0xD62174
+
+#define mmNIC2_QM1_CQ_PTR_HI_0                                       0xD62178
+
+#define mmNIC2_QM1_CQ_TSIZE_0                                        0xD6217C
+
+#define mmNIC2_QM1_CQ_CTL_0                                          0xD62180
+
+#define mmNIC2_QM1_CQ_PTR_LO_1                                       0xD62184
+
+#define mmNIC2_QM1_CQ_PTR_HI_1                                       0xD62188
+
+#define mmNIC2_QM1_CQ_TSIZE_1                                        0xD6218C
+
+#define mmNIC2_QM1_CQ_CTL_1                                          0xD62190
+
+#define mmNIC2_QM1_CQ_PTR_LO_2                                       0xD62194
+
+#define mmNIC2_QM1_CQ_PTR_HI_2                                       0xD62198
+
+#define mmNIC2_QM1_CQ_TSIZE_2                                        0xD6219C
+
+#define mmNIC2_QM1_CQ_CTL_2                                          0xD621A0
+
+#define mmNIC2_QM1_CQ_PTR_LO_3                                       0xD621A4
+
+#define mmNIC2_QM1_CQ_PTR_HI_3                                       0xD621A8
+
+#define mmNIC2_QM1_CQ_TSIZE_3                                        0xD621AC
+
+#define mmNIC2_QM1_CQ_CTL_3                                          0xD621B0
+
+#define mmNIC2_QM1_CQ_PTR_LO_4                                       0xD621B4
+
+#define mmNIC2_QM1_CQ_PTR_HI_4                                       0xD621B8
+
+#define mmNIC2_QM1_CQ_TSIZE_4                                        0xD621BC
+
+#define mmNIC2_QM1_CQ_CTL_4                                          0xD621C0
+
+#define mmNIC2_QM1_CQ_PTR_LO_STS_0                                   0xD621C4
+
+#define mmNIC2_QM1_CQ_PTR_LO_STS_1                                   0xD621C8
+
+#define mmNIC2_QM1_CQ_PTR_LO_STS_2                                   0xD621CC
+
+#define mmNIC2_QM1_CQ_PTR_LO_STS_3                                   0xD621D0
+
+#define mmNIC2_QM1_CQ_PTR_LO_STS_4                                   0xD621D4
+
+#define mmNIC2_QM1_CQ_PTR_HI_STS_0                                   0xD621D8
+
+#define mmNIC2_QM1_CQ_PTR_HI_STS_1                                   0xD621DC
+
+#define mmNIC2_QM1_CQ_PTR_HI_STS_2                                   0xD621E0
+
+#define mmNIC2_QM1_CQ_PTR_HI_STS_3                                   0xD621E4
+
+#define mmNIC2_QM1_CQ_PTR_HI_STS_4                                   0xD621E8
+
+#define mmNIC2_QM1_CQ_TSIZE_STS_0                                    0xD621EC
+
+#define mmNIC2_QM1_CQ_TSIZE_STS_1                                    0xD621F0
+
+#define mmNIC2_QM1_CQ_TSIZE_STS_2                                    0xD621F4
+
+#define mmNIC2_QM1_CQ_TSIZE_STS_3                                    0xD621F8
+
+#define mmNIC2_QM1_CQ_TSIZE_STS_4                                    0xD621FC
+
+#define mmNIC2_QM1_CQ_CTL_STS_0                                      0xD62200
+
+#define mmNIC2_QM1_CQ_CTL_STS_1                                      0xD62204
+
+#define mmNIC2_QM1_CQ_CTL_STS_2                                      0xD62208
+
+#define mmNIC2_QM1_CQ_CTL_STS_3                                      0xD6220C
+
+#define mmNIC2_QM1_CQ_CTL_STS_4                                      0xD62210
+
+#define mmNIC2_QM1_CQ_IFIFO_CNT_0                                    0xD62214
+
+#define mmNIC2_QM1_CQ_IFIFO_CNT_1                                    0xD62218
+
+#define mmNIC2_QM1_CQ_IFIFO_CNT_2                                    0xD6221C
+
+#define mmNIC2_QM1_CQ_IFIFO_CNT_3                                    0xD62220
+
+#define mmNIC2_QM1_CQ_IFIFO_CNT_4                                    0xD62224
+
+#define mmNIC2_QM1_CP_MSG_BASE0_ADDR_LO_0                            0xD62228
+
+#define mmNIC2_QM1_CP_MSG_BASE0_ADDR_LO_1                            0xD6222C
+
+#define mmNIC2_QM1_CP_MSG_BASE0_ADDR_LO_2                            0xD62230
+
+#define mmNIC2_QM1_CP_MSG_BASE0_ADDR_LO_3                            0xD62234
+
+#define mmNIC2_QM1_CP_MSG_BASE0_ADDR_LO_4                            0xD62238
+
+#define mmNIC2_QM1_CP_MSG_BASE0_ADDR_HI_0                            0xD6223C
+
+#define mmNIC2_QM1_CP_MSG_BASE0_ADDR_HI_1                            0xD62240
+
+#define mmNIC2_QM1_CP_MSG_BASE0_ADDR_HI_2                            0xD62244
+
+#define mmNIC2_QM1_CP_MSG_BASE0_ADDR_HI_3                            0xD62248
+
+#define mmNIC2_QM1_CP_MSG_BASE0_ADDR_HI_4                            0xD6224C
+
+#define mmNIC2_QM1_CP_MSG_BASE1_ADDR_LO_0                            0xD62250
+
+#define mmNIC2_QM1_CP_MSG_BASE1_ADDR_LO_1                            0xD62254
+
+#define mmNIC2_QM1_CP_MSG_BASE1_ADDR_LO_2                            0xD62258
+
+#define mmNIC2_QM1_CP_MSG_BASE1_ADDR_LO_3                            0xD6225C
+
+#define mmNIC2_QM1_CP_MSG_BASE1_ADDR_LO_4                            0xD62260
+
+#define mmNIC2_QM1_CP_MSG_BASE1_ADDR_HI_0                            0xD62264
+
+#define mmNIC2_QM1_CP_MSG_BASE1_ADDR_HI_1                            0xD62268
+
+#define mmNIC2_QM1_CP_MSG_BASE1_ADDR_HI_2                            0xD6226C
+
+#define mmNIC2_QM1_CP_MSG_BASE1_ADDR_HI_3                            0xD62270
+
+#define mmNIC2_QM1_CP_MSG_BASE1_ADDR_HI_4                            0xD62274
+
+#define mmNIC2_QM1_CP_MSG_BASE2_ADDR_LO_0                            0xD62278
+
+#define mmNIC2_QM1_CP_MSG_BASE2_ADDR_LO_1                            0xD6227C
+
+#define mmNIC2_QM1_CP_MSG_BASE2_ADDR_LO_2                            0xD62280
+
+#define mmNIC2_QM1_CP_MSG_BASE2_ADDR_LO_3                            0xD62284
+
+#define mmNIC2_QM1_CP_MSG_BASE2_ADDR_LO_4                            0xD62288
+
+#define mmNIC2_QM1_CP_MSG_BASE2_ADDR_HI_0                            0xD6228C
+
+#define mmNIC2_QM1_CP_MSG_BASE2_ADDR_HI_1                            0xD62290
+
+#define mmNIC2_QM1_CP_MSG_BASE2_ADDR_HI_2                            0xD62294
+
+#define mmNIC2_QM1_CP_MSG_BASE2_ADDR_HI_3                            0xD62298
+
+#define mmNIC2_QM1_CP_MSG_BASE2_ADDR_HI_4                            0xD6229C
+
+#define mmNIC2_QM1_CP_MSG_BASE3_ADDR_LO_0                            0xD622A0
+
+#define mmNIC2_QM1_CP_MSG_BASE3_ADDR_LO_1                            0xD622A4
+
+#define mmNIC2_QM1_CP_MSG_BASE3_ADDR_LO_2                            0xD622A8
+
+#define mmNIC2_QM1_CP_MSG_BASE3_ADDR_LO_3                            0xD622AC
+
+#define mmNIC2_QM1_CP_MSG_BASE3_ADDR_LO_4                            0xD622B0
+
+#define mmNIC2_QM1_CP_MSG_BASE3_ADDR_HI_0                            0xD622B4
+
+#define mmNIC2_QM1_CP_MSG_BASE3_ADDR_HI_1                            0xD622B8
+
+#define mmNIC2_QM1_CP_MSG_BASE3_ADDR_HI_2                            0xD622BC
+
+#define mmNIC2_QM1_CP_MSG_BASE3_ADDR_HI_3                            0xD622C0
+
+#define mmNIC2_QM1_CP_MSG_BASE3_ADDR_HI_4                            0xD622C4
+
+#define mmNIC2_QM1_CP_LDMA_TSIZE_OFFSET_0                            0xD622C8
+
+#define mmNIC2_QM1_CP_LDMA_TSIZE_OFFSET_1                            0xD622CC
+
+#define mmNIC2_QM1_CP_LDMA_TSIZE_OFFSET_2                            0xD622D0
+
+#define mmNIC2_QM1_CP_LDMA_TSIZE_OFFSET_3                            0xD622D4
+
+#define mmNIC2_QM1_CP_LDMA_TSIZE_OFFSET_4                            0xD622D8
+
+#define mmNIC2_QM1_CP_LDMA_SRC_BASE_LO_OFFSET_0                      0xD622E0
+
+#define mmNIC2_QM1_CP_LDMA_SRC_BASE_LO_OFFSET_1                      0xD622E4
+
+#define mmNIC2_QM1_CP_LDMA_SRC_BASE_LO_OFFSET_2                      0xD622E8
+
+#define mmNIC2_QM1_CP_LDMA_SRC_BASE_LO_OFFSET_3                      0xD622EC
+
+#define mmNIC2_QM1_CP_LDMA_SRC_BASE_LO_OFFSET_4                      0xD622F0
+
+#define mmNIC2_QM1_CP_LDMA_DST_BASE_LO_OFFSET_0                      0xD622F4
+
+#define mmNIC2_QM1_CP_LDMA_DST_BASE_LO_OFFSET_1                      0xD622F8
+
+#define mmNIC2_QM1_CP_LDMA_DST_BASE_LO_OFFSET_2                      0xD622FC
+
+#define mmNIC2_QM1_CP_LDMA_DST_BASE_LO_OFFSET_3                      0xD62300
+
+#define mmNIC2_QM1_CP_LDMA_DST_BASE_LO_OFFSET_4                      0xD62304
+
+#define mmNIC2_QM1_CP_FENCE0_RDATA_0                                 0xD62308
+
+#define mmNIC2_QM1_CP_FENCE0_RDATA_1                                 0xD6230C
+
+#define mmNIC2_QM1_CP_FENCE0_RDATA_2                                 0xD62310
+
+#define mmNIC2_QM1_CP_FENCE0_RDATA_3                                 0xD62314
+
+#define mmNIC2_QM1_CP_FENCE0_RDATA_4                                 0xD62318
+
+#define mmNIC2_QM1_CP_FENCE1_RDATA_0                                 0xD6231C
+
+#define mmNIC2_QM1_CP_FENCE1_RDATA_1                                 0xD62320
+
+#define mmNIC2_QM1_CP_FENCE1_RDATA_2                                 0xD62324
+
+#define mmNIC2_QM1_CP_FENCE1_RDATA_3                                 0xD62328
+
+#define mmNIC2_QM1_CP_FENCE1_RDATA_4                                 0xD6232C
+
+#define mmNIC2_QM1_CP_FENCE2_RDATA_0                                 0xD62330
+
+#define mmNIC2_QM1_CP_FENCE2_RDATA_1                                 0xD62334
+
+#define mmNIC2_QM1_CP_FENCE2_RDATA_2                                 0xD62338
+
+#define mmNIC2_QM1_CP_FENCE2_RDATA_3                                 0xD6233C
+
+#define mmNIC2_QM1_CP_FENCE2_RDATA_4                                 0xD62340
+
+#define mmNIC2_QM1_CP_FENCE3_RDATA_0                                 0xD62344
+
+#define mmNIC2_QM1_CP_FENCE3_RDATA_1                                 0xD62348
+
+#define mmNIC2_QM1_CP_FENCE3_RDATA_2                                 0xD6234C
+
+#define mmNIC2_QM1_CP_FENCE3_RDATA_3                                 0xD62350
+
+#define mmNIC2_QM1_CP_FENCE3_RDATA_4                                 0xD62354
+
+#define mmNIC2_QM1_CP_FENCE0_CNT_0                                   0xD62358
+
+#define mmNIC2_QM1_CP_FENCE0_CNT_1                                   0xD6235C
+
+#define mmNIC2_QM1_CP_FENCE0_CNT_2                                   0xD62360
+
+#define mmNIC2_QM1_CP_FENCE0_CNT_3                                   0xD62364
+
+#define mmNIC2_QM1_CP_FENCE0_CNT_4                                   0xD62368
+
+#define mmNIC2_QM1_CP_FENCE1_CNT_0                                   0xD6236C
+
+#define mmNIC2_QM1_CP_FENCE1_CNT_1                                   0xD62370
+
+#define mmNIC2_QM1_CP_FENCE1_CNT_2                                   0xD62374
+
+#define mmNIC2_QM1_CP_FENCE1_CNT_3                                   0xD62378
+
+#define mmNIC2_QM1_CP_FENCE1_CNT_4                                   0xD6237C
+
+#define mmNIC2_QM1_CP_FENCE2_CNT_0                                   0xD62380
+
+#define mmNIC2_QM1_CP_FENCE2_CNT_1                                   0xD62384
+
+#define mmNIC2_QM1_CP_FENCE2_CNT_2                                   0xD62388
+
+#define mmNIC2_QM1_CP_FENCE2_CNT_3                                   0xD6238C
+
+#define mmNIC2_QM1_CP_FENCE2_CNT_4                                   0xD62390
+
+#define mmNIC2_QM1_CP_FENCE3_CNT_0                                   0xD62394
+
+#define mmNIC2_QM1_CP_FENCE3_CNT_1                                   0xD62398
+
+#define mmNIC2_QM1_CP_FENCE3_CNT_2                                   0xD6239C
+
+#define mmNIC2_QM1_CP_FENCE3_CNT_3                                   0xD623A0
+
+#define mmNIC2_QM1_CP_FENCE3_CNT_4                                   0xD623A4
+
+#define mmNIC2_QM1_CP_STS_0                                          0xD623A8
+
+#define mmNIC2_QM1_CP_STS_1                                          0xD623AC
+
+#define mmNIC2_QM1_CP_STS_2                                          0xD623B0
+
+#define mmNIC2_QM1_CP_STS_3                                          0xD623B4
+
+#define mmNIC2_QM1_CP_STS_4                                          0xD623B8
+
+#define mmNIC2_QM1_CP_CURRENT_INST_LO_0                              0xD623BC
+
+#define mmNIC2_QM1_CP_CURRENT_INST_LO_1                              0xD623C0
+
+#define mmNIC2_QM1_CP_CURRENT_INST_LO_2                              0xD623C4
+
+#define mmNIC2_QM1_CP_CURRENT_INST_LO_3                              0xD623C8
+
+#define mmNIC2_QM1_CP_CURRENT_INST_LO_4                              0xD623CC
+
+#define mmNIC2_QM1_CP_CURRENT_INST_HI_0                              0xD623D0
+
+#define mmNIC2_QM1_CP_CURRENT_INST_HI_1                              0xD623D4
+
+#define mmNIC2_QM1_CP_CURRENT_INST_HI_2                              0xD623D8
+
+#define mmNIC2_QM1_CP_CURRENT_INST_HI_3                              0xD623DC
+
+#define mmNIC2_QM1_CP_CURRENT_INST_HI_4                              0xD623E0
+
+#define mmNIC2_QM1_CP_BARRIER_CFG_0                                  0xD623F4
+
+#define mmNIC2_QM1_CP_BARRIER_CFG_1                                  0xD623F8
+
+#define mmNIC2_QM1_CP_BARRIER_CFG_2                                  0xD623FC
+
+#define mmNIC2_QM1_CP_BARRIER_CFG_3                                  0xD62400
+
+#define mmNIC2_QM1_CP_BARRIER_CFG_4                                  0xD62404
+
+#define mmNIC2_QM1_CP_DBG_0_0                                        0xD62408
+
+#define mmNIC2_QM1_CP_DBG_0_1                                        0xD6240C
+
+#define mmNIC2_QM1_CP_DBG_0_2                                        0xD62410
+
+#define mmNIC2_QM1_CP_DBG_0_3                                        0xD62414
+
+#define mmNIC2_QM1_CP_DBG_0_4                                        0xD62418
+
+#define mmNIC2_QM1_CP_ARUSER_31_11_0                                 0xD6241C
+
+#define mmNIC2_QM1_CP_ARUSER_31_11_1                                 0xD62420
+
+#define mmNIC2_QM1_CP_ARUSER_31_11_2                                 0xD62424
+
+#define mmNIC2_QM1_CP_ARUSER_31_11_3                                 0xD62428
+
+#define mmNIC2_QM1_CP_ARUSER_31_11_4                                 0xD6242C
+
+#define mmNIC2_QM1_CP_AWUSER_31_11_0                                 0xD62430
+
+#define mmNIC2_QM1_CP_AWUSER_31_11_1                                 0xD62434
+
+#define mmNIC2_QM1_CP_AWUSER_31_11_2                                 0xD62438
+
+#define mmNIC2_QM1_CP_AWUSER_31_11_3                                 0xD6243C
+
+#define mmNIC2_QM1_CP_AWUSER_31_11_4                                 0xD62440
+
+#define mmNIC2_QM1_ARB_CFG_0                                         0xD62A00
+
+#define mmNIC2_QM1_ARB_CHOISE_Q_PUSH                                 0xD62A04
+
+#define mmNIC2_QM1_ARB_WRR_WEIGHT_0                                  0xD62A08
+
+#define mmNIC2_QM1_ARB_WRR_WEIGHT_1                                  0xD62A0C
+
+#define mmNIC2_QM1_ARB_WRR_WEIGHT_2                                  0xD62A10
+
+#define mmNIC2_QM1_ARB_WRR_WEIGHT_3                                  0xD62A14
+
+#define mmNIC2_QM1_ARB_CFG_1                                         0xD62A18
+
+#define mmNIC2_QM1_ARB_MST_AVAIL_CRED_0                              0xD62A20
+
+#define mmNIC2_QM1_ARB_MST_AVAIL_CRED_1                              0xD62A24
+
+#define mmNIC2_QM1_ARB_MST_AVAIL_CRED_2                              0xD62A28
+
+#define mmNIC2_QM1_ARB_MST_AVAIL_CRED_3                              0xD62A2C
+
+#define mmNIC2_QM1_ARB_MST_AVAIL_CRED_4                              0xD62A30
+
+#define mmNIC2_QM1_ARB_MST_AVAIL_CRED_5                              0xD62A34
+
+#define mmNIC2_QM1_ARB_MST_AVAIL_CRED_6                              0xD62A38
+
+#define mmNIC2_QM1_ARB_MST_AVAIL_CRED_7                              0xD62A3C
+
+#define mmNIC2_QM1_ARB_MST_AVAIL_CRED_8                              0xD62A40
+
+#define mmNIC2_QM1_ARB_MST_AVAIL_CRED_9                              0xD62A44
+
+#define mmNIC2_QM1_ARB_MST_AVAIL_CRED_10                             0xD62A48
+
+#define mmNIC2_QM1_ARB_MST_AVAIL_CRED_11                             0xD62A4C
+
+#define mmNIC2_QM1_ARB_MST_AVAIL_CRED_12                             0xD62A50
+
+#define mmNIC2_QM1_ARB_MST_AVAIL_CRED_13                             0xD62A54
+
+#define mmNIC2_QM1_ARB_MST_AVAIL_CRED_14                             0xD62A58
+
+#define mmNIC2_QM1_ARB_MST_AVAIL_CRED_15                             0xD62A5C
+
+#define mmNIC2_QM1_ARB_MST_AVAIL_CRED_16                             0xD62A60
+
+#define mmNIC2_QM1_ARB_MST_AVAIL_CRED_17                             0xD62A64
+
+#define mmNIC2_QM1_ARB_MST_AVAIL_CRED_18                             0xD62A68
+
+#define mmNIC2_QM1_ARB_MST_AVAIL_CRED_19                             0xD62A6C
+
+#define mmNIC2_QM1_ARB_MST_AVAIL_CRED_20                             0xD62A70
+
+#define mmNIC2_QM1_ARB_MST_AVAIL_CRED_21                             0xD62A74
+
+#define mmNIC2_QM1_ARB_MST_AVAIL_CRED_22                             0xD62A78
+
+#define mmNIC2_QM1_ARB_MST_AVAIL_CRED_23                             0xD62A7C
+
+#define mmNIC2_QM1_ARB_MST_AVAIL_CRED_24                             0xD62A80
+
+#define mmNIC2_QM1_ARB_MST_AVAIL_CRED_25                             0xD62A84
+
+#define mmNIC2_QM1_ARB_MST_AVAIL_CRED_26                             0xD62A88
+
+#define mmNIC2_QM1_ARB_MST_AVAIL_CRED_27                             0xD62A8C
+
+#define mmNIC2_QM1_ARB_MST_AVAIL_CRED_28                             0xD62A90
+
+#define mmNIC2_QM1_ARB_MST_AVAIL_CRED_29                             0xD62A94
+
+#define mmNIC2_QM1_ARB_MST_AVAIL_CRED_30                             0xD62A98
+
+#define mmNIC2_QM1_ARB_MST_AVAIL_CRED_31                             0xD62A9C
+
+#define mmNIC2_QM1_ARB_MST_CRED_INC                                  0xD62AA0
+
+#define mmNIC2_QM1_ARB_MST_CHOISE_PUSH_OFST_0                        0xD62AA4
+
+#define mmNIC2_QM1_ARB_MST_CHOISE_PUSH_OFST_1                        0xD62AA8
+
+#define mmNIC2_QM1_ARB_MST_CHOISE_PUSH_OFST_2                        0xD62AAC
+
+#define mmNIC2_QM1_ARB_MST_CHOISE_PUSH_OFST_3                        0xD62AB0
+
+#define mmNIC2_QM1_ARB_MST_CHOISE_PUSH_OFST_4                        0xD62AB4
+
+#define mmNIC2_QM1_ARB_MST_CHOISE_PUSH_OFST_5                        0xD62AB8
+
+#define mmNIC2_QM1_ARB_MST_CHOISE_PUSH_OFST_6                        0xD62ABC
+
+#define mmNIC2_QM1_ARB_MST_CHOISE_PUSH_OFST_7                        0xD62AC0
+
+#define mmNIC2_QM1_ARB_MST_CHOISE_PUSH_OFST_8                        0xD62AC4
+
+#define mmNIC2_QM1_ARB_MST_CHOISE_PUSH_OFST_9                        0xD62AC8
+
+#define mmNIC2_QM1_ARB_MST_CHOISE_PUSH_OFST_10                       0xD62ACC
+
+#define mmNIC2_QM1_ARB_MST_CHOISE_PUSH_OFST_11                       0xD62AD0
+
+#define mmNIC2_QM1_ARB_MST_CHOISE_PUSH_OFST_12                       0xD62AD4
+
+#define mmNIC2_QM1_ARB_MST_CHOISE_PUSH_OFST_13                       0xD62AD8
+
+#define mmNIC2_QM1_ARB_MST_CHOISE_PUSH_OFST_14                       0xD62ADC
+
+#define mmNIC2_QM1_ARB_MST_CHOISE_PUSH_OFST_15                       0xD62AE0
+
+#define mmNIC2_QM1_ARB_MST_CHOISE_PUSH_OFST_16                       0xD62AE4
+
+#define mmNIC2_QM1_ARB_MST_CHOISE_PUSH_OFST_17                       0xD62AE8
+
+#define mmNIC2_QM1_ARB_MST_CHOISE_PUSH_OFST_18                       0xD62AEC
+
+#define mmNIC2_QM1_ARB_MST_CHOISE_PUSH_OFST_19                       0xD62AF0
+
+#define mmNIC2_QM1_ARB_MST_CHOISE_PUSH_OFST_20                       0xD62AF4
+
+#define mmNIC2_QM1_ARB_MST_CHOISE_PUSH_OFST_21                       0xD62AF8
+
+#define mmNIC2_QM1_ARB_MST_CHOISE_PUSH_OFST_22                       0xD62AFC
+
+#define mmNIC2_QM1_ARB_MST_CHOISE_PUSH_OFST_23                       0xD62B00
+
+#define mmNIC2_QM1_ARB_MST_CHOISE_PUSH_OFST_24                       0xD62B04
+
+#define mmNIC2_QM1_ARB_MST_CHOISE_PUSH_OFST_25                       0xD62B08
+
+#define mmNIC2_QM1_ARB_MST_CHOISE_PUSH_OFST_26                       0xD62B0C
+
+#define mmNIC2_QM1_ARB_MST_CHOISE_PUSH_OFST_27                       0xD62B10
+
+#define mmNIC2_QM1_ARB_MST_CHOISE_PUSH_OFST_28                       0xD62B14
+
+#define mmNIC2_QM1_ARB_MST_CHOISE_PUSH_OFST_29                       0xD62B18
+
+#define mmNIC2_QM1_ARB_MST_CHOISE_PUSH_OFST_30                       0xD62B1C
+
+#define mmNIC2_QM1_ARB_MST_CHOISE_PUSH_OFST_31                       0xD62B20
+
+#define mmNIC2_QM1_ARB_SLV_MASTER_INC_CRED_OFST                      0xD62B28
+
+#define mmNIC2_QM1_ARB_MST_SLAVE_EN                                  0xD62B2C
+
+#define mmNIC2_QM1_ARB_MST_QUIET_PER                                 0xD62B34
+
+#define mmNIC2_QM1_ARB_SLV_CHOISE_WDT                                0xD62B38
+
+#define mmNIC2_QM1_ARB_SLV_ID                                        0xD62B3C
+
+#define mmNIC2_QM1_ARB_MSG_MAX_INFLIGHT                              0xD62B44
+
+#define mmNIC2_QM1_ARB_MSG_AWUSER_31_11                              0xD62B48
+
+#define mmNIC2_QM1_ARB_MSG_AWUSER_SEC_PROP                           0xD62B4C
+
+#define mmNIC2_QM1_ARB_MSG_AWUSER_NON_SEC_PROP                       0xD62B50
+
+#define mmNIC2_QM1_ARB_BASE_LO                                       0xD62B54
+
+#define mmNIC2_QM1_ARB_BASE_HI                                       0xD62B58
+
+#define mmNIC2_QM1_ARB_STATE_STS                                     0xD62B80
+
+#define mmNIC2_QM1_ARB_CHOISE_FULLNESS_STS                           0xD62B84
+
+#define mmNIC2_QM1_ARB_MSG_STS                                       0xD62B88
+
+#define mmNIC2_QM1_ARB_SLV_CHOISE_Q_HEAD                             0xD62B8C
+
+#define mmNIC2_QM1_ARB_ERR_CAUSE                                     0xD62B9C
+
+#define mmNIC2_QM1_ARB_ERR_MSG_EN                                    0xD62BA0
+
+#define mmNIC2_QM1_ARB_ERR_STS_DRP                                   0xD62BA8
+
+#define mmNIC2_QM1_ARB_MST_CRED_STS_0                                0xD62BB0
+
+#define mmNIC2_QM1_ARB_MST_CRED_STS_1                                0xD62BB4
+
+#define mmNIC2_QM1_ARB_MST_CRED_STS_2                                0xD62BB8
+
+#define mmNIC2_QM1_ARB_MST_CRED_STS_3                                0xD62BBC
+
+#define mmNIC2_QM1_ARB_MST_CRED_STS_4                                0xD62BC0
+
+#define mmNIC2_QM1_ARB_MST_CRED_STS_5                                0xD62BC4
+
+#define mmNIC2_QM1_ARB_MST_CRED_STS_6                                0xD62BC8
+
+#define mmNIC2_QM1_ARB_MST_CRED_STS_7                                0xD62BCC
+
+#define mmNIC2_QM1_ARB_MST_CRED_STS_8                                0xD62BD0
+
+#define mmNIC2_QM1_ARB_MST_CRED_STS_9                                0xD62BD4
+
+#define mmNIC2_QM1_ARB_MST_CRED_STS_10                               0xD62BD8
+
+#define mmNIC2_QM1_ARB_MST_CRED_STS_11                               0xD62BDC
+
+#define mmNIC2_QM1_ARB_MST_CRED_STS_12                               0xD62BE0
+
+#define mmNIC2_QM1_ARB_MST_CRED_STS_13                               0xD62BE4
+
+#define mmNIC2_QM1_ARB_MST_CRED_STS_14                               0xD62BE8
+
+#define mmNIC2_QM1_ARB_MST_CRED_STS_15                               0xD62BEC
+
+#define mmNIC2_QM1_ARB_MST_CRED_STS_16                               0xD62BF0
+
+#define mmNIC2_QM1_ARB_MST_CRED_STS_17                               0xD62BF4
+
+#define mmNIC2_QM1_ARB_MST_CRED_STS_18                               0xD62BF8
+
+#define mmNIC2_QM1_ARB_MST_CRED_STS_19                               0xD62BFC
+
+#define mmNIC2_QM1_ARB_MST_CRED_STS_20                               0xD62C00
+
+#define mmNIC2_QM1_ARB_MST_CRED_STS_21                               0xD62C04
+
+#define mmNIC2_QM1_ARB_MST_CRED_STS_22                               0xD62C08
+
+#define mmNIC2_QM1_ARB_MST_CRED_STS_23                               0xD62C0C
+
+#define mmNIC2_QM1_ARB_MST_CRED_STS_24                               0xD62C10
+
+#define mmNIC2_QM1_ARB_MST_CRED_STS_25                               0xD62C14
+
+#define mmNIC2_QM1_ARB_MST_CRED_STS_26                               0xD62C18
+
+#define mmNIC2_QM1_ARB_MST_CRED_STS_27                               0xD62C1C
+
+#define mmNIC2_QM1_ARB_MST_CRED_STS_28                               0xD62C20
+
+#define mmNIC2_QM1_ARB_MST_CRED_STS_29                               0xD62C24
+
+#define mmNIC2_QM1_ARB_MST_CRED_STS_30                               0xD62C28
+
+#define mmNIC2_QM1_ARB_MST_CRED_STS_31                               0xD62C2C
+
+#define mmNIC2_QM1_CGM_CFG                                           0xD62C70
+
+#define mmNIC2_QM1_CGM_STS                                           0xD62C74
+
+#define mmNIC2_QM1_CGM_CFG1                                          0xD62C78
+
+#define mmNIC2_QM1_LOCAL_RANGE_BASE                                  0xD62C80
+
+#define mmNIC2_QM1_LOCAL_RANGE_SIZE                                  0xD62C84
+
+#define mmNIC2_QM1_CSMR_STRICT_PRIO_CFG                              0xD62C90
+
+#define mmNIC2_QM1_HBW_RD_RATE_LIM_CFG_1                             0xD62C94
+
+#define mmNIC2_QM1_LBW_WR_RATE_LIM_CFG_0                             0xD62C98
+
+#define mmNIC2_QM1_LBW_WR_RATE_LIM_CFG_1                             0xD62C9C
+
+#define mmNIC2_QM1_HBW_RD_RATE_LIM_CFG_0                             0xD62CA0
+
+#define mmNIC2_QM1_GLBL_AXCACHE                                      0xD62CA4
+
+#define mmNIC2_QM1_IND_GW_APB_CFG                                    0xD62CB0
+
+#define mmNIC2_QM1_IND_GW_APB_WDATA                                  0xD62CB4
+
+#define mmNIC2_QM1_IND_GW_APB_RDATA                                  0xD62CB8
+
+#define mmNIC2_QM1_IND_GW_APB_STATUS                                 0xD62CBC
+
+#define mmNIC2_QM1_GLBL_ERR_ADDR_LO                                  0xD62CD0
+
+#define mmNIC2_QM1_GLBL_ERR_ADDR_HI                                  0xD62CD4
+
+#define mmNIC2_QM1_GLBL_ERR_WDATA                                    0xD62CD8
+
+#define mmNIC2_QM1_GLBL_MEM_INIT_BUSY                                0xD62D00
+
+#endif /* ASIC_REG_NIC2_QM1_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/nic3_qm0_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/nic3_qm0_regs.h
new file mode 100644
index 0000000000000000000000000000000000000000..4712cc62b0091ae41f10f278e68a14c2c708c0e3
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/nic3_qm0_regs.h
@@ -0,0 +1,834 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ **       DO NOT EDIT BELOW        **
+ ************************************/
+
+#ifndef ASIC_REG_NIC3_QM0_REGS_H_
+#define ASIC_REG_NIC3_QM0_REGS_H_
+
+/*
+ *****************************************
+ *   NIC3_QM0 (Prototype: QMAN)
+ *****************************************
+ */
+
+#define mmNIC3_QM0_GLBL_CFG0                                         0xDA0000
+
+#define mmNIC3_QM0_GLBL_CFG1                                         0xDA0004
+
+#define mmNIC3_QM0_GLBL_PROT                                         0xDA0008
+
+#define mmNIC3_QM0_GLBL_ERR_CFG                                      0xDA000C
+
+#define mmNIC3_QM0_GLBL_SECURE_PROPS_0                               0xDA0010
+
+#define mmNIC3_QM0_GLBL_SECURE_PROPS_1                               0xDA0014
+
+#define mmNIC3_QM0_GLBL_SECURE_PROPS_2                               0xDA0018
+
+#define mmNIC3_QM0_GLBL_SECURE_PROPS_3                               0xDA001C
+
+#define mmNIC3_QM0_GLBL_SECURE_PROPS_4                               0xDA0020
+
+#define mmNIC3_QM0_GLBL_NON_SECURE_PROPS_0                           0xDA0024
+
+#define mmNIC3_QM0_GLBL_NON_SECURE_PROPS_1                           0xDA0028
+
+#define mmNIC3_QM0_GLBL_NON_SECURE_PROPS_2                           0xDA002C
+
+#define mmNIC3_QM0_GLBL_NON_SECURE_PROPS_3                           0xDA0030
+
+#define mmNIC3_QM0_GLBL_NON_SECURE_PROPS_4                           0xDA0034
+
+#define mmNIC3_QM0_GLBL_STS0                                         0xDA0038
+
+#define mmNIC3_QM0_GLBL_STS1_0                                       0xDA0040
+
+#define mmNIC3_QM0_GLBL_STS1_1                                       0xDA0044
+
+#define mmNIC3_QM0_GLBL_STS1_2                                       0xDA0048
+
+#define mmNIC3_QM0_GLBL_STS1_3                                       0xDA004C
+
+#define mmNIC3_QM0_GLBL_STS1_4                                       0xDA0050
+
+#define mmNIC3_QM0_GLBL_MSG_EN_0                                     0xDA0054
+
+#define mmNIC3_QM0_GLBL_MSG_EN_1                                     0xDA0058
+
+#define mmNIC3_QM0_GLBL_MSG_EN_2                                     0xDA005C
+
+#define mmNIC3_QM0_GLBL_MSG_EN_3                                     0xDA0060
+
+#define mmNIC3_QM0_GLBL_MSG_EN_4                                     0xDA0068
+
+#define mmNIC3_QM0_PQ_BASE_LO_0                                      0xDA0070
+
+#define mmNIC3_QM0_PQ_BASE_LO_1                                      0xDA0074
+
+#define mmNIC3_QM0_PQ_BASE_LO_2                                      0xDA0078
+
+#define mmNIC3_QM0_PQ_BASE_LO_3                                      0xDA007C
+
+#define mmNIC3_QM0_PQ_BASE_HI_0                                      0xDA0080
+
+#define mmNIC3_QM0_PQ_BASE_HI_1                                      0xDA0084
+
+#define mmNIC3_QM0_PQ_BASE_HI_2                                      0xDA0088
+
+#define mmNIC3_QM0_PQ_BASE_HI_3                                      0xDA008C
+
+#define mmNIC3_QM0_PQ_SIZE_0                                         0xDA0090
+
+#define mmNIC3_QM0_PQ_SIZE_1                                         0xDA0094
+
+#define mmNIC3_QM0_PQ_SIZE_2                                         0xDA0098
+
+#define mmNIC3_QM0_PQ_SIZE_3                                         0xDA009C
+
+#define mmNIC3_QM0_PQ_PI_0                                           0xDA00A0
+
+#define mmNIC3_QM0_PQ_PI_1                                           0xDA00A4
+
+#define mmNIC3_QM0_PQ_PI_2                                           0xDA00A8
+
+#define mmNIC3_QM0_PQ_PI_3                                           0xDA00AC
+
+#define mmNIC3_QM0_PQ_CI_0                                           0xDA00B0
+
+#define mmNIC3_QM0_PQ_CI_1                                           0xDA00B4
+
+#define mmNIC3_QM0_PQ_CI_2                                           0xDA00B8
+
+#define mmNIC3_QM0_PQ_CI_3                                           0xDA00BC
+
+#define mmNIC3_QM0_PQ_CFG0_0                                         0xDA00C0
+
+#define mmNIC3_QM0_PQ_CFG0_1                                         0xDA00C4
+
+#define mmNIC3_QM0_PQ_CFG0_2                                         0xDA00C8
+
+#define mmNIC3_QM0_PQ_CFG0_3                                         0xDA00CC
+
+#define mmNIC3_QM0_PQ_CFG1_0                                         0xDA00D0
+
+#define mmNIC3_QM0_PQ_CFG1_1                                         0xDA00D4
+
+#define mmNIC3_QM0_PQ_CFG1_2                                         0xDA00D8
+
+#define mmNIC3_QM0_PQ_CFG1_3                                         0xDA00DC
+
+#define mmNIC3_QM0_PQ_ARUSER_31_11_0                                 0xDA00E0
+
+#define mmNIC3_QM0_PQ_ARUSER_31_11_1                                 0xDA00E4
+
+#define mmNIC3_QM0_PQ_ARUSER_31_11_2                                 0xDA00E8
+
+#define mmNIC3_QM0_PQ_ARUSER_31_11_3                                 0xDA00EC
+
+#define mmNIC3_QM0_PQ_STS0_0                                         0xDA00F0
+
+#define mmNIC3_QM0_PQ_STS0_1                                         0xDA00F4
+
+#define mmNIC3_QM0_PQ_STS0_2                                         0xDA00F8
+
+#define mmNIC3_QM0_PQ_STS0_3                                         0xDA00FC
+
+#define mmNIC3_QM0_PQ_STS1_0                                         0xDA0100
+
+#define mmNIC3_QM0_PQ_STS1_1                                         0xDA0104
+
+#define mmNIC3_QM0_PQ_STS1_2                                         0xDA0108
+
+#define mmNIC3_QM0_PQ_STS1_3                                         0xDA010C
+
+#define mmNIC3_QM0_CQ_CFG0_0                                         0xDA0110
+
+#define mmNIC3_QM0_CQ_CFG0_1                                         0xDA0114
+
+#define mmNIC3_QM0_CQ_CFG0_2                                         0xDA0118
+
+#define mmNIC3_QM0_CQ_CFG0_3                                         0xDA011C
+
+#define mmNIC3_QM0_CQ_CFG0_4                                         0xDA0120
+
+#define mmNIC3_QM0_CQ_CFG1_0                                         0xDA0124
+
+#define mmNIC3_QM0_CQ_CFG1_1                                         0xDA0128
+
+#define mmNIC3_QM0_CQ_CFG1_2                                         0xDA012C
+
+#define mmNIC3_QM0_CQ_CFG1_3                                         0xDA0130
+
+#define mmNIC3_QM0_CQ_CFG1_4                                         0xDA0134
+
+#define mmNIC3_QM0_CQ_ARUSER_31_11_0                                 0xDA0138
+
+#define mmNIC3_QM0_CQ_ARUSER_31_11_1                                 0xDA013C
+
+#define mmNIC3_QM0_CQ_ARUSER_31_11_2                                 0xDA0140
+
+#define mmNIC3_QM0_CQ_ARUSER_31_11_3                                 0xDA0144
+
+#define mmNIC3_QM0_CQ_ARUSER_31_11_4                                 0xDA0148
+
+#define mmNIC3_QM0_CQ_STS0_0                                         0xDA014C
+
+#define mmNIC3_QM0_CQ_STS0_1                                         0xDA0150
+
+#define mmNIC3_QM0_CQ_STS0_2                                         0xDA0154
+
+#define mmNIC3_QM0_CQ_STS0_3                                         0xDA0158
+
+#define mmNIC3_QM0_CQ_STS0_4                                         0xDA015C
+
+#define mmNIC3_QM0_CQ_STS1_0                                         0xDA0160
+
+#define mmNIC3_QM0_CQ_STS1_1                                         0xDA0164
+
+#define mmNIC3_QM0_CQ_STS1_2                                         0xDA0168
+
+#define mmNIC3_QM0_CQ_STS1_3                                         0xDA016C
+
+#define mmNIC3_QM0_CQ_STS1_4                                         0xDA0170
+
+#define mmNIC3_QM0_CQ_PTR_LO_0                                       0xDA0174
+
+#define mmNIC3_QM0_CQ_PTR_HI_0                                       0xDA0178
+
+#define mmNIC3_QM0_CQ_TSIZE_0                                        0xDA017C
+
+#define mmNIC3_QM0_CQ_CTL_0                                          0xDA0180
+
+#define mmNIC3_QM0_CQ_PTR_LO_1                                       0xDA0184
+
+#define mmNIC3_QM0_CQ_PTR_HI_1                                       0xDA0188
+
+#define mmNIC3_QM0_CQ_TSIZE_1                                        0xDA018C
+
+#define mmNIC3_QM0_CQ_CTL_1                                          0xDA0190
+
+#define mmNIC3_QM0_CQ_PTR_LO_2                                       0xDA0194
+
+#define mmNIC3_QM0_CQ_PTR_HI_2                                       0xDA0198
+
+#define mmNIC3_QM0_CQ_TSIZE_2                                        0xDA019C
+
+#define mmNIC3_QM0_CQ_CTL_2                                          0xDA01A0
+
+#define mmNIC3_QM0_CQ_PTR_LO_3                                       0xDA01A4
+
+#define mmNIC3_QM0_CQ_PTR_HI_3                                       0xDA01A8
+
+#define mmNIC3_QM0_CQ_TSIZE_3                                        0xDA01AC
+
+#define mmNIC3_QM0_CQ_CTL_3                                          0xDA01B0
+
+#define mmNIC3_QM0_CQ_PTR_LO_4                                       0xDA01B4
+
+#define mmNIC3_QM0_CQ_PTR_HI_4                                       0xDA01B8
+
+#define mmNIC3_QM0_CQ_TSIZE_4                                        0xDA01BC
+
+#define mmNIC3_QM0_CQ_CTL_4                                          0xDA01C0
+
+#define mmNIC3_QM0_CQ_PTR_LO_STS_0                                   0xDA01C4
+
+#define mmNIC3_QM0_CQ_PTR_LO_STS_1                                   0xDA01C8
+
+#define mmNIC3_QM0_CQ_PTR_LO_STS_2                                   0xDA01CC
+
+#define mmNIC3_QM0_CQ_PTR_LO_STS_3                                   0xDA01D0
+
+#define mmNIC3_QM0_CQ_PTR_LO_STS_4                                   0xDA01D4
+
+#define mmNIC3_QM0_CQ_PTR_HI_STS_0                                   0xDA01D8
+
+#define mmNIC3_QM0_CQ_PTR_HI_STS_1                                   0xDA01DC
+
+#define mmNIC3_QM0_CQ_PTR_HI_STS_2                                   0xDA01E0
+
+#define mmNIC3_QM0_CQ_PTR_HI_STS_3                                   0xDA01E4
+
+#define mmNIC3_QM0_CQ_PTR_HI_STS_4                                   0xDA01E8
+
+#define mmNIC3_QM0_CQ_TSIZE_STS_0                                    0xDA01EC
+
+#define mmNIC3_QM0_CQ_TSIZE_STS_1                                    0xDA01F0
+
+#define mmNIC3_QM0_CQ_TSIZE_STS_2                                    0xDA01F4
+
+#define mmNIC3_QM0_CQ_TSIZE_STS_3                                    0xDA01F8
+
+#define mmNIC3_QM0_CQ_TSIZE_STS_4                                    0xDA01FC
+
+#define mmNIC3_QM0_CQ_CTL_STS_0                                      0xDA0200
+
+#define mmNIC3_QM0_CQ_CTL_STS_1                                      0xDA0204
+
+#define mmNIC3_QM0_CQ_CTL_STS_2                                      0xDA0208
+
+#define mmNIC3_QM0_CQ_CTL_STS_3                                      0xDA020C
+
+#define mmNIC3_QM0_CQ_CTL_STS_4                                      0xDA0210
+
+#define mmNIC3_QM0_CQ_IFIFO_CNT_0                                    0xDA0214
+
+#define mmNIC3_QM0_CQ_IFIFO_CNT_1                                    0xDA0218
+
+#define mmNIC3_QM0_CQ_IFIFO_CNT_2                                    0xDA021C
+
+#define mmNIC3_QM0_CQ_IFIFO_CNT_3                                    0xDA0220
+
+#define mmNIC3_QM0_CQ_IFIFO_CNT_4                                    0xDA0224
+
+#define mmNIC3_QM0_CP_MSG_BASE0_ADDR_LO_0                            0xDA0228
+
+#define mmNIC3_QM0_CP_MSG_BASE0_ADDR_LO_1                            0xDA022C
+
+#define mmNIC3_QM0_CP_MSG_BASE0_ADDR_LO_2                            0xDA0230
+
+#define mmNIC3_QM0_CP_MSG_BASE0_ADDR_LO_3                            0xDA0234
+
+#define mmNIC3_QM0_CP_MSG_BASE0_ADDR_LO_4                            0xDA0238
+
+#define mmNIC3_QM0_CP_MSG_BASE0_ADDR_HI_0                            0xDA023C
+
+#define mmNIC3_QM0_CP_MSG_BASE0_ADDR_HI_1                            0xDA0240
+
+#define mmNIC3_QM0_CP_MSG_BASE0_ADDR_HI_2                            0xDA0244
+
+#define mmNIC3_QM0_CP_MSG_BASE0_ADDR_HI_3                            0xDA0248
+
+#define mmNIC3_QM0_CP_MSG_BASE0_ADDR_HI_4                            0xDA024C
+
+#define mmNIC3_QM0_CP_MSG_BASE1_ADDR_LO_0                            0xDA0250
+
+#define mmNIC3_QM0_CP_MSG_BASE1_ADDR_LO_1                            0xDA0254
+
+#define mmNIC3_QM0_CP_MSG_BASE1_ADDR_LO_2                            0xDA0258
+
+#define mmNIC3_QM0_CP_MSG_BASE1_ADDR_LO_3                            0xDA025C
+
+#define mmNIC3_QM0_CP_MSG_BASE1_ADDR_LO_4                            0xDA0260
+
+#define mmNIC3_QM0_CP_MSG_BASE1_ADDR_HI_0                            0xDA0264
+
+#define mmNIC3_QM0_CP_MSG_BASE1_ADDR_HI_1                            0xDA0268
+
+#define mmNIC3_QM0_CP_MSG_BASE1_ADDR_HI_2                            0xDA026C
+
+#define mmNIC3_QM0_CP_MSG_BASE1_ADDR_HI_3                            0xDA0270
+
+#define mmNIC3_QM0_CP_MSG_BASE1_ADDR_HI_4                            0xDA0274
+
+#define mmNIC3_QM0_CP_MSG_BASE2_ADDR_LO_0                            0xDA0278
+
+#define mmNIC3_QM0_CP_MSG_BASE2_ADDR_LO_1                            0xDA027C
+
+#define mmNIC3_QM0_CP_MSG_BASE2_ADDR_LO_2                            0xDA0280
+
+#define mmNIC3_QM0_CP_MSG_BASE2_ADDR_LO_3                            0xDA0284
+
+#define mmNIC3_QM0_CP_MSG_BASE2_ADDR_LO_4                            0xDA0288
+
+#define mmNIC3_QM0_CP_MSG_BASE2_ADDR_HI_0                            0xDA028C
+
+#define mmNIC3_QM0_CP_MSG_BASE2_ADDR_HI_1                            0xDA0290
+
+#define mmNIC3_QM0_CP_MSG_BASE2_ADDR_HI_2                            0xDA0294
+
+#define mmNIC3_QM0_CP_MSG_BASE2_ADDR_HI_3                            0xDA0298
+
+#define mmNIC3_QM0_CP_MSG_BASE2_ADDR_HI_4                            0xDA029C
+
+#define mmNIC3_QM0_CP_MSG_BASE3_ADDR_LO_0                            0xDA02A0
+
+#define mmNIC3_QM0_CP_MSG_BASE3_ADDR_LO_1                            0xDA02A4
+
+#define mmNIC3_QM0_CP_MSG_BASE3_ADDR_LO_2                            0xDA02A8
+
+#define mmNIC3_QM0_CP_MSG_BASE3_ADDR_LO_3                            0xDA02AC
+
+#define mmNIC3_QM0_CP_MSG_BASE3_ADDR_LO_4                            0xDA02B0
+
+#define mmNIC3_QM0_CP_MSG_BASE3_ADDR_HI_0                            0xDA02B4
+
+#define mmNIC3_QM0_CP_MSG_BASE3_ADDR_HI_1                            0xDA02B8
+
+#define mmNIC3_QM0_CP_MSG_BASE3_ADDR_HI_2                            0xDA02BC
+
+#define mmNIC3_QM0_CP_MSG_BASE3_ADDR_HI_3                            0xDA02C0
+
+#define mmNIC3_QM0_CP_MSG_BASE3_ADDR_HI_4                            0xDA02C4
+
+#define mmNIC3_QM0_CP_LDMA_TSIZE_OFFSET_0                            0xDA02C8
+
+#define mmNIC3_QM0_CP_LDMA_TSIZE_OFFSET_1                            0xDA02CC
+
+#define mmNIC3_QM0_CP_LDMA_TSIZE_OFFSET_2                            0xDA02D0
+
+#define mmNIC3_QM0_CP_LDMA_TSIZE_OFFSET_3                            0xDA02D4
+
+#define mmNIC3_QM0_CP_LDMA_TSIZE_OFFSET_4                            0xDA02D8
+
+#define mmNIC3_QM0_CP_LDMA_SRC_BASE_LO_OFFSET_0                      0xDA02E0
+
+#define mmNIC3_QM0_CP_LDMA_SRC_BASE_LO_OFFSET_1                      0xDA02E4
+
+#define mmNIC3_QM0_CP_LDMA_SRC_BASE_LO_OFFSET_2                      0xDA02E8
+
+#define mmNIC3_QM0_CP_LDMA_SRC_BASE_LO_OFFSET_3                      0xDA02EC
+
+#define mmNIC3_QM0_CP_LDMA_SRC_BASE_LO_OFFSET_4                      0xDA02F0
+
+#define mmNIC3_QM0_CP_LDMA_DST_BASE_LO_OFFSET_0                      0xDA02F4
+
+#define mmNIC3_QM0_CP_LDMA_DST_BASE_LO_OFFSET_1                      0xDA02F8
+
+#define mmNIC3_QM0_CP_LDMA_DST_BASE_LO_OFFSET_2                      0xDA02FC
+
+#define mmNIC3_QM0_CP_LDMA_DST_BASE_LO_OFFSET_3                      0xDA0300
+
+#define mmNIC3_QM0_CP_LDMA_DST_BASE_LO_OFFSET_4                      0xDA0304
+
+#define mmNIC3_QM0_CP_FENCE0_RDATA_0                                 0xDA0308
+
+#define mmNIC3_QM0_CP_FENCE0_RDATA_1                                 0xDA030C
+
+#define mmNIC3_QM0_CP_FENCE0_RDATA_2                                 0xDA0310
+
+#define mmNIC3_QM0_CP_FENCE0_RDATA_3                                 0xDA0314
+
+#define mmNIC3_QM0_CP_FENCE0_RDATA_4                                 0xDA0318
+
+#define mmNIC3_QM0_CP_FENCE1_RDATA_0                                 0xDA031C
+
+#define mmNIC3_QM0_CP_FENCE1_RDATA_1                                 0xDA0320
+
+#define mmNIC3_QM0_CP_FENCE1_RDATA_2                                 0xDA0324
+
+#define mmNIC3_QM0_CP_FENCE1_RDATA_3                                 0xDA0328
+
+#define mmNIC3_QM0_CP_FENCE1_RDATA_4                                 0xDA032C
+
+#define mmNIC3_QM0_CP_FENCE2_RDATA_0                                 0xDA0330
+
+#define mmNIC3_QM0_CP_FENCE2_RDATA_1                                 0xDA0334
+
+#define mmNIC3_QM0_CP_FENCE2_RDATA_2                                 0xDA0338
+
+#define mmNIC3_QM0_CP_FENCE2_RDATA_3                                 0xDA033C
+
+#define mmNIC3_QM0_CP_FENCE2_RDATA_4                                 0xDA0340
+
+#define mmNIC3_QM0_CP_FENCE3_RDATA_0                                 0xDA0344
+
+#define mmNIC3_QM0_CP_FENCE3_RDATA_1                                 0xDA0348
+
+#define mmNIC3_QM0_CP_FENCE3_RDATA_2                                 0xDA034C
+
+#define mmNIC3_QM0_CP_FENCE3_RDATA_3                                 0xDA0350
+
+#define mmNIC3_QM0_CP_FENCE3_RDATA_4                                 0xDA0354
+
+#define mmNIC3_QM0_CP_FENCE0_CNT_0                                   0xDA0358
+
+#define mmNIC3_QM0_CP_FENCE0_CNT_1                                   0xDA035C
+
+#define mmNIC3_QM0_CP_FENCE0_CNT_2                                   0xDA0360
+
+#define mmNIC3_QM0_CP_FENCE0_CNT_3                                   0xDA0364
+
+#define mmNIC3_QM0_CP_FENCE0_CNT_4                                   0xDA0368
+
+#define mmNIC3_QM0_CP_FENCE1_CNT_0                                   0xDA036C
+
+#define mmNIC3_QM0_CP_FENCE1_CNT_1                                   0xDA0370
+
+#define mmNIC3_QM0_CP_FENCE1_CNT_2                                   0xDA0374
+
+#define mmNIC3_QM0_CP_FENCE1_CNT_3                                   0xDA0378
+
+#define mmNIC3_QM0_CP_FENCE1_CNT_4                                   0xDA037C
+
+#define mmNIC3_QM0_CP_FENCE2_CNT_0                                   0xDA0380
+
+#define mmNIC3_QM0_CP_FENCE2_CNT_1                                   0xDA0384
+
+#define mmNIC3_QM0_CP_FENCE2_CNT_2                                   0xDA0388
+
+#define mmNIC3_QM0_CP_FENCE2_CNT_3                                   0xDA038C
+
+#define mmNIC3_QM0_CP_FENCE2_CNT_4                                   0xDA0390
+
+#define mmNIC3_QM0_CP_FENCE3_CNT_0                                   0xDA0394
+
+#define mmNIC3_QM0_CP_FENCE3_CNT_1                                   0xDA0398
+
+#define mmNIC3_QM0_CP_FENCE3_CNT_2                                   0xDA039C
+
+#define mmNIC3_QM0_CP_FENCE3_CNT_3                                   0xDA03A0
+
+#define mmNIC3_QM0_CP_FENCE3_CNT_4                                   0xDA03A4
+
+#define mmNIC3_QM0_CP_STS_0                                          0xDA03A8
+
+#define mmNIC3_QM0_CP_STS_1                                          0xDA03AC
+
+#define mmNIC3_QM0_CP_STS_2                                          0xDA03B0
+
+#define mmNIC3_QM0_CP_STS_3                                          0xDA03B4
+
+#define mmNIC3_QM0_CP_STS_4                                          0xDA03B8
+
+#define mmNIC3_QM0_CP_CURRENT_INST_LO_0                              0xDA03BC
+
+#define mmNIC3_QM0_CP_CURRENT_INST_LO_1                              0xDA03C0
+
+#define mmNIC3_QM0_CP_CURRENT_INST_LO_2                              0xDA03C4
+
+#define mmNIC3_QM0_CP_CURRENT_INST_LO_3                              0xDA03C8
+
+#define mmNIC3_QM0_CP_CURRENT_INST_LO_4                              0xDA03CC
+
+#define mmNIC3_QM0_CP_CURRENT_INST_HI_0                              0xDA03D0
+
+#define mmNIC3_QM0_CP_CURRENT_INST_HI_1                              0xDA03D4
+
+#define mmNIC3_QM0_CP_CURRENT_INST_HI_2                              0xDA03D8
+
+#define mmNIC3_QM0_CP_CURRENT_INST_HI_3                              0xDA03DC
+
+#define mmNIC3_QM0_CP_CURRENT_INST_HI_4                              0xDA03E0
+
+#define mmNIC3_QM0_CP_BARRIER_CFG_0                                  0xDA03F4
+
+#define mmNIC3_QM0_CP_BARRIER_CFG_1                                  0xDA03F8
+
+#define mmNIC3_QM0_CP_BARRIER_CFG_2                                  0xDA03FC
+
+#define mmNIC3_QM0_CP_BARRIER_CFG_3                                  0xDA0400
+
+#define mmNIC3_QM0_CP_BARRIER_CFG_4                                  0xDA0404
+
+#define mmNIC3_QM0_CP_DBG_0_0                                        0xDA0408
+
+#define mmNIC3_QM0_CP_DBG_0_1                                        0xDA040C
+
+#define mmNIC3_QM0_CP_DBG_0_2                                        0xDA0410
+
+#define mmNIC3_QM0_CP_DBG_0_3                                        0xDA0414
+
+#define mmNIC3_QM0_CP_DBG_0_4                                        0xDA0418
+
+#define mmNIC3_QM0_CP_ARUSER_31_11_0                                 0xDA041C
+
+#define mmNIC3_QM0_CP_ARUSER_31_11_1                                 0xDA0420
+
+#define mmNIC3_QM0_CP_ARUSER_31_11_2                                 0xDA0424
+
+#define mmNIC3_QM0_CP_ARUSER_31_11_3                                 0xDA0428
+
+#define mmNIC3_QM0_CP_ARUSER_31_11_4                                 0xDA042C
+
+#define mmNIC3_QM0_CP_AWUSER_31_11_0                                 0xDA0430
+
+#define mmNIC3_QM0_CP_AWUSER_31_11_1                                 0xDA0434
+
+#define mmNIC3_QM0_CP_AWUSER_31_11_2                                 0xDA0438
+
+#define mmNIC3_QM0_CP_AWUSER_31_11_3                                 0xDA043C
+
+#define mmNIC3_QM0_CP_AWUSER_31_11_4                                 0xDA0440
+
+#define mmNIC3_QM0_ARB_CFG_0                                         0xDA0A00
+
+#define mmNIC3_QM0_ARB_CHOISE_Q_PUSH                                 0xDA0A04
+
+#define mmNIC3_QM0_ARB_WRR_WEIGHT_0                                  0xDA0A08
+
+#define mmNIC3_QM0_ARB_WRR_WEIGHT_1                                  0xDA0A0C
+
+#define mmNIC3_QM0_ARB_WRR_WEIGHT_2                                  0xDA0A10
+
+#define mmNIC3_QM0_ARB_WRR_WEIGHT_3                                  0xDA0A14
+
+#define mmNIC3_QM0_ARB_CFG_1                                         0xDA0A18
+
+#define mmNIC3_QM0_ARB_MST_AVAIL_CRED_0                              0xDA0A20
+
+#define mmNIC3_QM0_ARB_MST_AVAIL_CRED_1                              0xDA0A24
+
+#define mmNIC3_QM0_ARB_MST_AVAIL_CRED_2                              0xDA0A28
+
+#define mmNIC3_QM0_ARB_MST_AVAIL_CRED_3                              0xDA0A2C
+
+#define mmNIC3_QM0_ARB_MST_AVAIL_CRED_4                              0xDA0A30
+
+#define mmNIC3_QM0_ARB_MST_AVAIL_CRED_5                              0xDA0A34
+
+#define mmNIC3_QM0_ARB_MST_AVAIL_CRED_6                              0xDA0A38
+
+#define mmNIC3_QM0_ARB_MST_AVAIL_CRED_7                              0xDA0A3C
+
+#define mmNIC3_QM0_ARB_MST_AVAIL_CRED_8                              0xDA0A40
+
+#define mmNIC3_QM0_ARB_MST_AVAIL_CRED_9                              0xDA0A44
+
+#define mmNIC3_QM0_ARB_MST_AVAIL_CRED_10                             0xDA0A48
+
+#define mmNIC3_QM0_ARB_MST_AVAIL_CRED_11                             0xDA0A4C
+
+#define mmNIC3_QM0_ARB_MST_AVAIL_CRED_12                             0xDA0A50
+
+#define mmNIC3_QM0_ARB_MST_AVAIL_CRED_13                             0xDA0A54
+
+#define mmNIC3_QM0_ARB_MST_AVAIL_CRED_14                             0xDA0A58
+
+#define mmNIC3_QM0_ARB_MST_AVAIL_CRED_15                             0xDA0A5C
+
+#define mmNIC3_QM0_ARB_MST_AVAIL_CRED_16                             0xDA0A60
+
+#define mmNIC3_QM0_ARB_MST_AVAIL_CRED_17                             0xDA0A64
+
+#define mmNIC3_QM0_ARB_MST_AVAIL_CRED_18                             0xDA0A68
+
+#define mmNIC3_QM0_ARB_MST_AVAIL_CRED_19                             0xDA0A6C
+
+#define mmNIC3_QM0_ARB_MST_AVAIL_CRED_20                             0xDA0A70
+
+#define mmNIC3_QM0_ARB_MST_AVAIL_CRED_21                             0xDA0A74
+
+#define mmNIC3_QM0_ARB_MST_AVAIL_CRED_22                             0xDA0A78
+
+#define mmNIC3_QM0_ARB_MST_AVAIL_CRED_23                             0xDA0A7C
+
+#define mmNIC3_QM0_ARB_MST_AVAIL_CRED_24                             0xDA0A80
+
+#define mmNIC3_QM0_ARB_MST_AVAIL_CRED_25                             0xDA0A84
+
+#define mmNIC3_QM0_ARB_MST_AVAIL_CRED_26                             0xDA0A88
+
+#define mmNIC3_QM0_ARB_MST_AVAIL_CRED_27                             0xDA0A8C
+
+#define mmNIC3_QM0_ARB_MST_AVAIL_CRED_28                             0xDA0A90
+
+#define mmNIC3_QM0_ARB_MST_AVAIL_CRED_29                             0xDA0A94
+
+#define mmNIC3_QM0_ARB_MST_AVAIL_CRED_30                             0xDA0A98
+
+#define mmNIC3_QM0_ARB_MST_AVAIL_CRED_31                             0xDA0A9C
+
+#define mmNIC3_QM0_ARB_MST_CRED_INC                                  0xDA0AA0
+
+#define mmNIC3_QM0_ARB_MST_CHOISE_PUSH_OFST_0                        0xDA0AA4
+
+#define mmNIC3_QM0_ARB_MST_CHOISE_PUSH_OFST_1                        0xDA0AA8
+
+#define mmNIC3_QM0_ARB_MST_CHOISE_PUSH_OFST_2                        0xDA0AAC
+
+#define mmNIC3_QM0_ARB_MST_CHOISE_PUSH_OFST_3                        0xDA0AB0
+
+#define mmNIC3_QM0_ARB_MST_CHOISE_PUSH_OFST_4                        0xDA0AB4
+
+#define mmNIC3_QM0_ARB_MST_CHOISE_PUSH_OFST_5                        0xDA0AB8
+
+#define mmNIC3_QM0_ARB_MST_CHOISE_PUSH_OFST_6                        0xDA0ABC
+
+#define mmNIC3_QM0_ARB_MST_CHOISE_PUSH_OFST_7                        0xDA0AC0
+
+#define mmNIC3_QM0_ARB_MST_CHOISE_PUSH_OFST_8                        0xDA0AC4
+
+#define mmNIC3_QM0_ARB_MST_CHOISE_PUSH_OFST_9                        0xDA0AC8
+
+#define mmNIC3_QM0_ARB_MST_CHOISE_PUSH_OFST_10                       0xDA0ACC
+
+#define mmNIC3_QM0_ARB_MST_CHOISE_PUSH_OFST_11                       0xDA0AD0
+
+#define mmNIC3_QM0_ARB_MST_CHOISE_PUSH_OFST_12                       0xDA0AD4
+
+#define mmNIC3_QM0_ARB_MST_CHOISE_PUSH_OFST_13                       0xDA0AD8
+
+#define mmNIC3_QM0_ARB_MST_CHOISE_PUSH_OFST_14                       0xDA0ADC
+
+#define mmNIC3_QM0_ARB_MST_CHOISE_PUSH_OFST_15                       0xDA0AE0
+
+#define mmNIC3_QM0_ARB_MST_CHOISE_PUSH_OFST_16                       0xDA0AE4
+
+#define mmNIC3_QM0_ARB_MST_CHOISE_PUSH_OFST_17                       0xDA0AE8
+
+#define mmNIC3_QM0_ARB_MST_CHOISE_PUSH_OFST_18                       0xDA0AEC
+
+#define mmNIC3_QM0_ARB_MST_CHOISE_PUSH_OFST_19                       0xDA0AF0
+
+#define mmNIC3_QM0_ARB_MST_CHOISE_PUSH_OFST_20                       0xDA0AF4
+
+#define mmNIC3_QM0_ARB_MST_CHOISE_PUSH_OFST_21                       0xDA0AF8
+
+#define mmNIC3_QM0_ARB_MST_CHOISE_PUSH_OFST_22                       0xDA0AFC
+
+#define mmNIC3_QM0_ARB_MST_CHOISE_PUSH_OFST_23                       0xDA0B00
+
+#define mmNIC3_QM0_ARB_MST_CHOISE_PUSH_OFST_24                       0xDA0B04
+
+#define mmNIC3_QM0_ARB_MST_CHOISE_PUSH_OFST_25                       0xDA0B08
+
+#define mmNIC3_QM0_ARB_MST_CHOISE_PUSH_OFST_26                       0xDA0B0C
+
+#define mmNIC3_QM0_ARB_MST_CHOISE_PUSH_OFST_27                       0xDA0B10
+
+#define mmNIC3_QM0_ARB_MST_CHOISE_PUSH_OFST_28                       0xDA0B14
+
+#define mmNIC3_QM0_ARB_MST_CHOISE_PUSH_OFST_29                       0xDA0B18
+
+#define mmNIC3_QM0_ARB_MST_CHOISE_PUSH_OFST_30                       0xDA0B1C
+
+#define mmNIC3_QM0_ARB_MST_CHOISE_PUSH_OFST_31                       0xDA0B20
+
+#define mmNIC3_QM0_ARB_SLV_MASTER_INC_CRED_OFST                      0xDA0B28
+
+#define mmNIC3_QM0_ARB_MST_SLAVE_EN                                  0xDA0B2C
+
+#define mmNIC3_QM0_ARB_MST_QUIET_PER                                 0xDA0B34
+
+#define mmNIC3_QM0_ARB_SLV_CHOISE_WDT                                0xDA0B38
+
+#define mmNIC3_QM0_ARB_SLV_ID                                        0xDA0B3C
+
+#define mmNIC3_QM0_ARB_MSG_MAX_INFLIGHT                              0xDA0B44
+
+#define mmNIC3_QM0_ARB_MSG_AWUSER_31_11                              0xDA0B48
+
+#define mmNIC3_QM0_ARB_MSG_AWUSER_SEC_PROP                           0xDA0B4C
+
+#define mmNIC3_QM0_ARB_MSG_AWUSER_NON_SEC_PROP                       0xDA0B50
+
+#define mmNIC3_QM0_ARB_BASE_LO                                       0xDA0B54
+
+#define mmNIC3_QM0_ARB_BASE_HI                                       0xDA0B58
+
+#define mmNIC3_QM0_ARB_STATE_STS                                     0xDA0B80
+
+#define mmNIC3_QM0_ARB_CHOISE_FULLNESS_STS                           0xDA0B84
+
+#define mmNIC3_QM0_ARB_MSG_STS                                       0xDA0B88
+
+#define mmNIC3_QM0_ARB_SLV_CHOISE_Q_HEAD                             0xDA0B8C
+
+#define mmNIC3_QM0_ARB_ERR_CAUSE                                     0xDA0B9C
+
+#define mmNIC3_QM0_ARB_ERR_MSG_EN                                    0xDA0BA0
+
+#define mmNIC3_QM0_ARB_ERR_STS_DRP                                   0xDA0BA8
+
+#define mmNIC3_QM0_ARB_MST_CRED_STS_0                                0xDA0BB0
+
+#define mmNIC3_QM0_ARB_MST_CRED_STS_1                                0xDA0BB4
+
+#define mmNIC3_QM0_ARB_MST_CRED_STS_2                                0xDA0BB8
+
+#define mmNIC3_QM0_ARB_MST_CRED_STS_3                                0xDA0BBC
+
+#define mmNIC3_QM0_ARB_MST_CRED_STS_4                                0xDA0BC0
+
+#define mmNIC3_QM0_ARB_MST_CRED_STS_5                                0xDA0BC4
+
+#define mmNIC3_QM0_ARB_MST_CRED_STS_6                                0xDA0BC8
+
+#define mmNIC3_QM0_ARB_MST_CRED_STS_7                                0xDA0BCC
+
+#define mmNIC3_QM0_ARB_MST_CRED_STS_8                                0xDA0BD0
+
+#define mmNIC3_QM0_ARB_MST_CRED_STS_9                                0xDA0BD4
+
+#define mmNIC3_QM0_ARB_MST_CRED_STS_10                               0xDA0BD8
+
+#define mmNIC3_QM0_ARB_MST_CRED_STS_11                               0xDA0BDC
+
+#define mmNIC3_QM0_ARB_MST_CRED_STS_12                               0xDA0BE0
+
+#define mmNIC3_QM0_ARB_MST_CRED_STS_13                               0xDA0BE4
+
+#define mmNIC3_QM0_ARB_MST_CRED_STS_14                               0xDA0BE8
+
+#define mmNIC3_QM0_ARB_MST_CRED_STS_15                               0xDA0BEC
+
+#define mmNIC3_QM0_ARB_MST_CRED_STS_16                               0xDA0BF0
+
+#define mmNIC3_QM0_ARB_MST_CRED_STS_17                               0xDA0BF4
+
+#define mmNIC3_QM0_ARB_MST_CRED_STS_18                               0xDA0BF8
+
+#define mmNIC3_QM0_ARB_MST_CRED_STS_19                               0xDA0BFC
+
+#define mmNIC3_QM0_ARB_MST_CRED_STS_20                               0xDA0C00
+
+#define mmNIC3_QM0_ARB_MST_CRED_STS_21                               0xDA0C04
+
+#define mmNIC3_QM0_ARB_MST_CRED_STS_22                               0xDA0C08
+
+#define mmNIC3_QM0_ARB_MST_CRED_STS_23                               0xDA0C0C
+
+#define mmNIC3_QM0_ARB_MST_CRED_STS_24                               0xDA0C10
+
+#define mmNIC3_QM0_ARB_MST_CRED_STS_25                               0xDA0C14
+
+#define mmNIC3_QM0_ARB_MST_CRED_STS_26                               0xDA0C18
+
+#define mmNIC3_QM0_ARB_MST_CRED_STS_27                               0xDA0C1C
+
+#define mmNIC3_QM0_ARB_MST_CRED_STS_28                               0xDA0C20
+
+#define mmNIC3_QM0_ARB_MST_CRED_STS_29                               0xDA0C24
+
+#define mmNIC3_QM0_ARB_MST_CRED_STS_30                               0xDA0C28
+
+#define mmNIC3_QM0_ARB_MST_CRED_STS_31                               0xDA0C2C
+
+#define mmNIC3_QM0_CGM_CFG                                           0xDA0C70
+
+#define mmNIC3_QM0_CGM_STS                                           0xDA0C74
+
+#define mmNIC3_QM0_CGM_CFG1                                          0xDA0C78
+
+#define mmNIC3_QM0_LOCAL_RANGE_BASE                                  0xDA0C80
+
+#define mmNIC3_QM0_LOCAL_RANGE_SIZE                                  0xDA0C84
+
+#define mmNIC3_QM0_CSMR_STRICT_PRIO_CFG                              0xDA0C90
+
+#define mmNIC3_QM0_HBW_RD_RATE_LIM_CFG_1                             0xDA0C94
+
+#define mmNIC3_QM0_LBW_WR_RATE_LIM_CFG_0                             0xDA0C98
+
+#define mmNIC3_QM0_LBW_WR_RATE_LIM_CFG_1                             0xDA0C9C
+
+#define mmNIC3_QM0_HBW_RD_RATE_LIM_CFG_0                             0xDA0CA0
+
+#define mmNIC3_QM0_GLBL_AXCACHE                                      0xDA0CA4
+
+#define mmNIC3_QM0_IND_GW_APB_CFG                                    0xDA0CB0
+
+#define mmNIC3_QM0_IND_GW_APB_WDATA                                  0xDA0CB4
+
+#define mmNIC3_QM0_IND_GW_APB_RDATA                                  0xDA0CB8
+
+#define mmNIC3_QM0_IND_GW_APB_STATUS                                 0xDA0CBC
+
+#define mmNIC3_QM0_GLBL_ERR_ADDR_LO                                  0xDA0CD0
+
+#define mmNIC3_QM0_GLBL_ERR_ADDR_HI                                  0xDA0CD4
+
+#define mmNIC3_QM0_GLBL_ERR_WDATA                                    0xDA0CD8
+
+#define mmNIC3_QM0_GLBL_MEM_INIT_BUSY                                0xDA0D00
+
+#endif /* ASIC_REG_NIC3_QM0_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/nic3_qm1_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/nic3_qm1_regs.h
new file mode 100644
index 0000000000000000000000000000000000000000..7fa040f65004bf8fd39b89c587ff217ea61a9a68
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/nic3_qm1_regs.h
@@ -0,0 +1,834 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ **       DO NOT EDIT BELOW        **
+ ************************************/
+
+#ifndef ASIC_REG_NIC3_QM1_REGS_H_
+#define ASIC_REG_NIC3_QM1_REGS_H_
+
+/*
+ *****************************************
+ *   NIC3_QM1 (Prototype: QMAN)
+ *****************************************
+ */
+
+#define mmNIC3_QM1_GLBL_CFG0                                         0xDA2000
+
+#define mmNIC3_QM1_GLBL_CFG1                                         0xDA2004
+
+#define mmNIC3_QM1_GLBL_PROT                                         0xDA2008
+
+#define mmNIC3_QM1_GLBL_ERR_CFG                                      0xDA200C
+
+#define mmNIC3_QM1_GLBL_SECURE_PROPS_0                               0xDA2010
+
+#define mmNIC3_QM1_GLBL_SECURE_PROPS_1                               0xDA2014
+
+#define mmNIC3_QM1_GLBL_SECURE_PROPS_2                               0xDA2018
+
+#define mmNIC3_QM1_GLBL_SECURE_PROPS_3                               0xDA201C
+
+#define mmNIC3_QM1_GLBL_SECURE_PROPS_4                               0xDA2020
+
+#define mmNIC3_QM1_GLBL_NON_SECURE_PROPS_0                           0xDA2024
+
+#define mmNIC3_QM1_GLBL_NON_SECURE_PROPS_1                           0xDA2028
+
+#define mmNIC3_QM1_GLBL_NON_SECURE_PROPS_2                           0xDA202C
+
+#define mmNIC3_QM1_GLBL_NON_SECURE_PROPS_3                           0xDA2030
+
+#define mmNIC3_QM1_GLBL_NON_SECURE_PROPS_4                           0xDA2034
+
+#define mmNIC3_QM1_GLBL_STS0                                         0xDA2038
+
+#define mmNIC3_QM1_GLBL_STS1_0                                       0xDA2040
+
+#define mmNIC3_QM1_GLBL_STS1_1                                       0xDA2044
+
+#define mmNIC3_QM1_GLBL_STS1_2                                       0xDA2048
+
+#define mmNIC3_QM1_GLBL_STS1_3                                       0xDA204C
+
+#define mmNIC3_QM1_GLBL_STS1_4                                       0xDA2050
+
+#define mmNIC3_QM1_GLBL_MSG_EN_0                                     0xDA2054
+
+#define mmNIC3_QM1_GLBL_MSG_EN_1                                     0xDA2058
+
+#define mmNIC3_QM1_GLBL_MSG_EN_2                                     0xDA205C
+
+#define mmNIC3_QM1_GLBL_MSG_EN_3                                     0xDA2060
+
+#define mmNIC3_QM1_GLBL_MSG_EN_4                                     0xDA2068
+
+#define mmNIC3_QM1_PQ_BASE_LO_0                                      0xDA2070
+
+#define mmNIC3_QM1_PQ_BASE_LO_1                                      0xDA2074
+
+#define mmNIC3_QM1_PQ_BASE_LO_2                                      0xDA2078
+
+#define mmNIC3_QM1_PQ_BASE_LO_3                                      0xDA207C
+
+#define mmNIC3_QM1_PQ_BASE_HI_0                                      0xDA2080
+
+#define mmNIC3_QM1_PQ_BASE_HI_1                                      0xDA2084
+
+#define mmNIC3_QM1_PQ_BASE_HI_2                                      0xDA2088
+
+#define mmNIC3_QM1_PQ_BASE_HI_3                                      0xDA208C
+
+#define mmNIC3_QM1_PQ_SIZE_0                                         0xDA2090
+
+#define mmNIC3_QM1_PQ_SIZE_1                                         0xDA2094
+
+#define mmNIC3_QM1_PQ_SIZE_2                                         0xDA2098
+
+#define mmNIC3_QM1_PQ_SIZE_3                                         0xDA209C
+
+#define mmNIC3_QM1_PQ_PI_0                                           0xDA20A0
+
+#define mmNIC3_QM1_PQ_PI_1                                           0xDA20A4
+
+#define mmNIC3_QM1_PQ_PI_2                                           0xDA20A8
+
+#define mmNIC3_QM1_PQ_PI_3                                           0xDA20AC
+
+#define mmNIC3_QM1_PQ_CI_0                                           0xDA20B0
+
+#define mmNIC3_QM1_PQ_CI_1                                           0xDA20B4
+
+#define mmNIC3_QM1_PQ_CI_2                                           0xDA20B8
+
+#define mmNIC3_QM1_PQ_CI_3                                           0xDA20BC
+
+#define mmNIC3_QM1_PQ_CFG0_0                                         0xDA20C0
+
+#define mmNIC3_QM1_PQ_CFG0_1                                         0xDA20C4
+
+#define mmNIC3_QM1_PQ_CFG0_2                                         0xDA20C8
+
+#define mmNIC3_QM1_PQ_CFG0_3                                         0xDA20CC
+
+#define mmNIC3_QM1_PQ_CFG1_0                                         0xDA20D0
+
+#define mmNIC3_QM1_PQ_CFG1_1                                         0xDA20D4
+
+#define mmNIC3_QM1_PQ_CFG1_2                                         0xDA20D8
+
+#define mmNIC3_QM1_PQ_CFG1_3                                         0xDA20DC
+
+#define mmNIC3_QM1_PQ_ARUSER_31_11_0                                 0xDA20E0
+
+#define mmNIC3_QM1_PQ_ARUSER_31_11_1                                 0xDA20E4
+
+#define mmNIC3_QM1_PQ_ARUSER_31_11_2                                 0xDA20E8
+
+#define mmNIC3_QM1_PQ_ARUSER_31_11_3                                 0xDA20EC
+
+#define mmNIC3_QM1_PQ_STS0_0                                         0xDA20F0
+
+#define mmNIC3_QM1_PQ_STS0_1                                         0xDA20F4
+
+#define mmNIC3_QM1_PQ_STS0_2                                         0xDA20F8
+
+#define mmNIC3_QM1_PQ_STS0_3                                         0xDA20FC
+
+#define mmNIC3_QM1_PQ_STS1_0                                         0xDA2100
+
+#define mmNIC3_QM1_PQ_STS1_1                                         0xDA2104
+
+#define mmNIC3_QM1_PQ_STS1_2                                         0xDA2108
+
+#define mmNIC3_QM1_PQ_STS1_3                                         0xDA210C
+
+#define mmNIC3_QM1_CQ_CFG0_0                                         0xDA2110
+
+#define mmNIC3_QM1_CQ_CFG0_1                                         0xDA2114
+
+#define mmNIC3_QM1_CQ_CFG0_2                                         0xDA2118
+
+#define mmNIC3_QM1_CQ_CFG0_3                                         0xDA211C
+
+#define mmNIC3_QM1_CQ_CFG0_4                                         0xDA2120
+
+#define mmNIC3_QM1_CQ_CFG1_0                                         0xDA2124
+
+#define mmNIC3_QM1_CQ_CFG1_1                                         0xDA2128
+
+#define mmNIC3_QM1_CQ_CFG1_2                                         0xDA212C
+
+#define mmNIC3_QM1_CQ_CFG1_3                                         0xDA2130
+
+#define mmNIC3_QM1_CQ_CFG1_4                                         0xDA2134
+
+#define mmNIC3_QM1_CQ_ARUSER_31_11_0                                 0xDA2138
+
+#define mmNIC3_QM1_CQ_ARUSER_31_11_1                                 0xDA213C
+
+#define mmNIC3_QM1_CQ_ARUSER_31_11_2                                 0xDA2140
+
+#define mmNIC3_QM1_CQ_ARUSER_31_11_3                                 0xDA2144
+
+#define mmNIC3_QM1_CQ_ARUSER_31_11_4                                 0xDA2148
+
+#define mmNIC3_QM1_CQ_STS0_0                                         0xDA214C
+
+#define mmNIC3_QM1_CQ_STS0_1                                         0xDA2150
+
+#define mmNIC3_QM1_CQ_STS0_2                                         0xDA2154
+
+#define mmNIC3_QM1_CQ_STS0_3                                         0xDA2158
+
+#define mmNIC3_QM1_CQ_STS0_4                                         0xDA215C
+
+#define mmNIC3_QM1_CQ_STS1_0                                         0xDA2160
+
+#define mmNIC3_QM1_CQ_STS1_1                                         0xDA2164
+
+#define mmNIC3_QM1_CQ_STS1_2                                         0xDA2168
+
+#define mmNIC3_QM1_CQ_STS1_3                                         0xDA216C
+
+#define mmNIC3_QM1_CQ_STS1_4                                         0xDA2170
+
+#define mmNIC3_QM1_CQ_PTR_LO_0                                       0xDA2174
+
+#define mmNIC3_QM1_CQ_PTR_HI_0                                       0xDA2178
+
+#define mmNIC3_QM1_CQ_TSIZE_0                                        0xDA217C
+
+#define mmNIC3_QM1_CQ_CTL_0                                          0xDA2180
+
+#define mmNIC3_QM1_CQ_PTR_LO_1                                       0xDA2184
+
+#define mmNIC3_QM1_CQ_PTR_HI_1                                       0xDA2188
+
+#define mmNIC3_QM1_CQ_TSIZE_1                                        0xDA218C
+
+#define mmNIC3_QM1_CQ_CTL_1                                          0xDA2190
+
+#define mmNIC3_QM1_CQ_PTR_LO_2                                       0xDA2194
+
+#define mmNIC3_QM1_CQ_PTR_HI_2                                       0xDA2198
+
+#define mmNIC3_QM1_CQ_TSIZE_2                                        0xDA219C
+
+#define mmNIC3_QM1_CQ_CTL_2                                          0xDA21A0
+
+#define mmNIC3_QM1_CQ_PTR_LO_3                                       0xDA21A4
+
+#define mmNIC3_QM1_CQ_PTR_HI_3                                       0xDA21A8
+
+#define mmNIC3_QM1_CQ_TSIZE_3                                        0xDA21AC
+
+#define mmNIC3_QM1_CQ_CTL_3                                          0xDA21B0
+
+#define mmNIC3_QM1_CQ_PTR_LO_4                                       0xDA21B4
+
+#define mmNIC3_QM1_CQ_PTR_HI_4                                       0xDA21B8
+
+#define mmNIC3_QM1_CQ_TSIZE_4                                        0xDA21BC
+
+#define mmNIC3_QM1_CQ_CTL_4                                          0xDA21C0
+
+#define mmNIC3_QM1_CQ_PTR_LO_STS_0                                   0xDA21C4
+
+#define mmNIC3_QM1_CQ_PTR_LO_STS_1                                   0xDA21C8
+
+#define mmNIC3_QM1_CQ_PTR_LO_STS_2                                   0xDA21CC
+
+#define mmNIC3_QM1_CQ_PTR_LO_STS_3                                   0xDA21D0
+
+#define mmNIC3_QM1_CQ_PTR_LO_STS_4                                   0xDA21D4
+
+#define mmNIC3_QM1_CQ_PTR_HI_STS_0                                   0xDA21D8
+
+#define mmNIC3_QM1_CQ_PTR_HI_STS_1                                   0xDA21DC
+
+#define mmNIC3_QM1_CQ_PTR_HI_STS_2                                   0xDA21E0
+
+#define mmNIC3_QM1_CQ_PTR_HI_STS_3                                   0xDA21E4
+
+#define mmNIC3_QM1_CQ_PTR_HI_STS_4                                   0xDA21E8
+
+#define mmNIC3_QM1_CQ_TSIZE_STS_0                                    0xDA21EC
+
+#define mmNIC3_QM1_CQ_TSIZE_STS_1                                    0xDA21F0
+
+#define mmNIC3_QM1_CQ_TSIZE_STS_2                                    0xDA21F4
+
+#define mmNIC3_QM1_CQ_TSIZE_STS_3                                    0xDA21F8
+
+#define mmNIC3_QM1_CQ_TSIZE_STS_4                                    0xDA21FC
+
+#define mmNIC3_QM1_CQ_CTL_STS_0                                      0xDA2200
+
+#define mmNIC3_QM1_CQ_CTL_STS_1                                      0xDA2204
+
+#define mmNIC3_QM1_CQ_CTL_STS_2                                      0xDA2208
+
+#define mmNIC3_QM1_CQ_CTL_STS_3                                      0xDA220C
+
+#define mmNIC3_QM1_CQ_CTL_STS_4                                      0xDA2210
+
+#define mmNIC3_QM1_CQ_IFIFO_CNT_0                                    0xDA2214
+
+#define mmNIC3_QM1_CQ_IFIFO_CNT_1                                    0xDA2218
+
+#define mmNIC3_QM1_CQ_IFIFO_CNT_2                                    0xDA221C
+
+#define mmNIC3_QM1_CQ_IFIFO_CNT_3                                    0xDA2220
+
+#define mmNIC3_QM1_CQ_IFIFO_CNT_4                                    0xDA2224
+
+#define mmNIC3_QM1_CP_MSG_BASE0_ADDR_LO_0                            0xDA2228
+
+#define mmNIC3_QM1_CP_MSG_BASE0_ADDR_LO_1                            0xDA222C
+
+#define mmNIC3_QM1_CP_MSG_BASE0_ADDR_LO_2                            0xDA2230
+
+#define mmNIC3_QM1_CP_MSG_BASE0_ADDR_LO_3                            0xDA2234
+
+#define mmNIC3_QM1_CP_MSG_BASE0_ADDR_LO_4                            0xDA2238
+
+#define mmNIC3_QM1_CP_MSG_BASE0_ADDR_HI_0                            0xDA223C
+
+#define mmNIC3_QM1_CP_MSG_BASE0_ADDR_HI_1                            0xDA2240
+
+#define mmNIC3_QM1_CP_MSG_BASE0_ADDR_HI_2                            0xDA2244
+
+#define mmNIC3_QM1_CP_MSG_BASE0_ADDR_HI_3                            0xDA2248
+
+#define mmNIC3_QM1_CP_MSG_BASE0_ADDR_HI_4                            0xDA224C
+
+#define mmNIC3_QM1_CP_MSG_BASE1_ADDR_LO_0                            0xDA2250
+
+#define mmNIC3_QM1_CP_MSG_BASE1_ADDR_LO_1                            0xDA2254
+
+#define mmNIC3_QM1_CP_MSG_BASE1_ADDR_LO_2                            0xDA2258
+
+#define mmNIC3_QM1_CP_MSG_BASE1_ADDR_LO_3                            0xDA225C
+
+#define mmNIC3_QM1_CP_MSG_BASE1_ADDR_LO_4                            0xDA2260
+
+#define mmNIC3_QM1_CP_MSG_BASE1_ADDR_HI_0                            0xDA2264
+
+#define mmNIC3_QM1_CP_MSG_BASE1_ADDR_HI_1                            0xDA2268
+
+#define mmNIC3_QM1_CP_MSG_BASE1_ADDR_HI_2                            0xDA226C
+
+#define mmNIC3_QM1_CP_MSG_BASE1_ADDR_HI_3                            0xDA2270
+
+#define mmNIC3_QM1_CP_MSG_BASE1_ADDR_HI_4                            0xDA2274
+
+#define mmNIC3_QM1_CP_MSG_BASE2_ADDR_LO_0                            0xDA2278
+
+#define mmNIC3_QM1_CP_MSG_BASE2_ADDR_LO_1                            0xDA227C
+
+#define mmNIC3_QM1_CP_MSG_BASE2_ADDR_LO_2                            0xDA2280
+
+#define mmNIC3_QM1_CP_MSG_BASE2_ADDR_LO_3                            0xDA2284
+
+#define mmNIC3_QM1_CP_MSG_BASE2_ADDR_LO_4                            0xDA2288
+
+#define mmNIC3_QM1_CP_MSG_BASE2_ADDR_HI_0                            0xDA228C
+
+#define mmNIC3_QM1_CP_MSG_BASE2_ADDR_HI_1                            0xDA2290
+
+#define mmNIC3_QM1_CP_MSG_BASE2_ADDR_HI_2                            0xDA2294
+
+#define mmNIC3_QM1_CP_MSG_BASE2_ADDR_HI_3                            0xDA2298
+
+#define mmNIC3_QM1_CP_MSG_BASE2_ADDR_HI_4                            0xDA229C
+
+#define mmNIC3_QM1_CP_MSG_BASE3_ADDR_LO_0                            0xDA22A0
+
+#define mmNIC3_QM1_CP_MSG_BASE3_ADDR_LO_1                            0xDA22A4
+
+#define mmNIC3_QM1_CP_MSG_BASE3_ADDR_LO_2                            0xDA22A8
+
+#define mmNIC3_QM1_CP_MSG_BASE3_ADDR_LO_3                            0xDA22AC
+
+#define mmNIC3_QM1_CP_MSG_BASE3_ADDR_LO_4                            0xDA22B0
+
+#define mmNIC3_QM1_CP_MSG_BASE3_ADDR_HI_0                            0xDA22B4
+
+#define mmNIC3_QM1_CP_MSG_BASE3_ADDR_HI_1                            0xDA22B8
+
+#define mmNIC3_QM1_CP_MSG_BASE3_ADDR_HI_2                            0xDA22BC
+
+#define mmNIC3_QM1_CP_MSG_BASE3_ADDR_HI_3                            0xDA22C0
+
+#define mmNIC3_QM1_CP_MSG_BASE3_ADDR_HI_4                            0xDA22C4
+
+#define mmNIC3_QM1_CP_LDMA_TSIZE_OFFSET_0                            0xDA22C8
+
+#define mmNIC3_QM1_CP_LDMA_TSIZE_OFFSET_1                            0xDA22CC
+
+#define mmNIC3_QM1_CP_LDMA_TSIZE_OFFSET_2                            0xDA22D0
+
+#define mmNIC3_QM1_CP_LDMA_TSIZE_OFFSET_3                            0xDA22D4
+
+#define mmNIC3_QM1_CP_LDMA_TSIZE_OFFSET_4                            0xDA22D8
+
+#define mmNIC3_QM1_CP_LDMA_SRC_BASE_LO_OFFSET_0                      0xDA22E0
+
+#define mmNIC3_QM1_CP_LDMA_SRC_BASE_LO_OFFSET_1                      0xDA22E4
+
+#define mmNIC3_QM1_CP_LDMA_SRC_BASE_LO_OFFSET_2                      0xDA22E8
+
+#define mmNIC3_QM1_CP_LDMA_SRC_BASE_LO_OFFSET_3                      0xDA22EC
+
+#define mmNIC3_QM1_CP_LDMA_SRC_BASE_LO_OFFSET_4                      0xDA22F0
+
+#define mmNIC3_QM1_CP_LDMA_DST_BASE_LO_OFFSET_0                      0xDA22F4
+
+#define mmNIC3_QM1_CP_LDMA_DST_BASE_LO_OFFSET_1                      0xDA22F8
+
+#define mmNIC3_QM1_CP_LDMA_DST_BASE_LO_OFFSET_2                      0xDA22FC
+
+#define mmNIC3_QM1_CP_LDMA_DST_BASE_LO_OFFSET_3                      0xDA2300
+
+#define mmNIC3_QM1_CP_LDMA_DST_BASE_LO_OFFSET_4                      0xDA2304
+
+#define mmNIC3_QM1_CP_FENCE0_RDATA_0                                 0xDA2308
+
+#define mmNIC3_QM1_CP_FENCE0_RDATA_1                                 0xDA230C
+
+#define mmNIC3_QM1_CP_FENCE0_RDATA_2                                 0xDA2310
+
+#define mmNIC3_QM1_CP_FENCE0_RDATA_3                                 0xDA2314
+
+#define mmNIC3_QM1_CP_FENCE0_RDATA_4                                 0xDA2318
+
+#define mmNIC3_QM1_CP_FENCE1_RDATA_0                                 0xDA231C
+
+#define mmNIC3_QM1_CP_FENCE1_RDATA_1                                 0xDA2320
+
+#define mmNIC3_QM1_CP_FENCE1_RDATA_2                                 0xDA2324
+
+#define mmNIC3_QM1_CP_FENCE1_RDATA_3                                 0xDA2328
+
+#define mmNIC3_QM1_CP_FENCE1_RDATA_4                                 0xDA232C
+
+#define mmNIC3_QM1_CP_FENCE2_RDATA_0                                 0xDA2330
+
+#define mmNIC3_QM1_CP_FENCE2_RDATA_1                                 0xDA2334
+
+#define mmNIC3_QM1_CP_FENCE2_RDATA_2                                 0xDA2338
+
+#define mmNIC3_QM1_CP_FENCE2_RDATA_3                                 0xDA233C
+
+#define mmNIC3_QM1_CP_FENCE2_RDATA_4                                 0xDA2340
+
+#define mmNIC3_QM1_CP_FENCE3_RDATA_0                                 0xDA2344
+
+#define mmNIC3_QM1_CP_FENCE3_RDATA_1                                 0xDA2348
+
+#define mmNIC3_QM1_CP_FENCE3_RDATA_2                                 0xDA234C
+
+#define mmNIC3_QM1_CP_FENCE3_RDATA_3                                 0xDA2350
+
+#define mmNIC3_QM1_CP_FENCE3_RDATA_4                                 0xDA2354
+
+#define mmNIC3_QM1_CP_FENCE0_CNT_0                                   0xDA2358
+
+#define mmNIC3_QM1_CP_FENCE0_CNT_1                                   0xDA235C
+
+#define mmNIC3_QM1_CP_FENCE0_CNT_2                                   0xDA2360
+
+#define mmNIC3_QM1_CP_FENCE0_CNT_3                                   0xDA2364
+
+#define mmNIC3_QM1_CP_FENCE0_CNT_4                                   0xDA2368
+
+#define mmNIC3_QM1_CP_FENCE1_CNT_0                                   0xDA236C
+
+#define mmNIC3_QM1_CP_FENCE1_CNT_1                                   0xDA2370
+
+#define mmNIC3_QM1_CP_FENCE1_CNT_2                                   0xDA2374
+
+#define mmNIC3_QM1_CP_FENCE1_CNT_3                                   0xDA2378
+
+#define mmNIC3_QM1_CP_FENCE1_CNT_4                                   0xDA237C
+
+#define mmNIC3_QM1_CP_FENCE2_CNT_0                                   0xDA2380
+
+#define mmNIC3_QM1_CP_FENCE2_CNT_1                                   0xDA2384
+
+#define mmNIC3_QM1_CP_FENCE2_CNT_2                                   0xDA2388
+
+#define mmNIC3_QM1_CP_FENCE2_CNT_3                                   0xDA238C
+
+#define mmNIC3_QM1_CP_FENCE2_CNT_4                                   0xDA2390
+
+#define mmNIC3_QM1_CP_FENCE3_CNT_0                                   0xDA2394
+
+#define mmNIC3_QM1_CP_FENCE3_CNT_1                                   0xDA2398
+
+#define mmNIC3_QM1_CP_FENCE3_CNT_2                                   0xDA239C
+
+#define mmNIC3_QM1_CP_FENCE3_CNT_3                                   0xDA23A0
+
+#define mmNIC3_QM1_CP_FENCE3_CNT_4                                   0xDA23A4
+
+#define mmNIC3_QM1_CP_STS_0                                          0xDA23A8
+
+#define mmNIC3_QM1_CP_STS_1                                          0xDA23AC
+
+#define mmNIC3_QM1_CP_STS_2                                          0xDA23B0
+
+#define mmNIC3_QM1_CP_STS_3                                          0xDA23B4
+
+#define mmNIC3_QM1_CP_STS_4                                          0xDA23B8
+
+#define mmNIC3_QM1_CP_CURRENT_INST_LO_0                              0xDA23BC
+
+#define mmNIC3_QM1_CP_CURRENT_INST_LO_1                              0xDA23C0
+
+#define mmNIC3_QM1_CP_CURRENT_INST_LO_2                              0xDA23C4
+
+#define mmNIC3_QM1_CP_CURRENT_INST_LO_3                              0xDA23C8
+
+#define mmNIC3_QM1_CP_CURRENT_INST_LO_4                              0xDA23CC
+
+#define mmNIC3_QM1_CP_CURRENT_INST_HI_0                              0xDA23D0
+
+#define mmNIC3_QM1_CP_CURRENT_INST_HI_1                              0xDA23D4
+
+#define mmNIC3_QM1_CP_CURRENT_INST_HI_2                              0xDA23D8
+
+#define mmNIC3_QM1_CP_CURRENT_INST_HI_3                              0xDA23DC
+
+#define mmNIC3_QM1_CP_CURRENT_INST_HI_4                              0xDA23E0
+
+#define mmNIC3_QM1_CP_BARRIER_CFG_0                                  0xDA23F4
+
+#define mmNIC3_QM1_CP_BARRIER_CFG_1                                  0xDA23F8
+
+#define mmNIC3_QM1_CP_BARRIER_CFG_2                                  0xDA23FC
+
+#define mmNIC3_QM1_CP_BARRIER_CFG_3                                  0xDA2400
+
+#define mmNIC3_QM1_CP_BARRIER_CFG_4                                  0xDA2404
+
+#define mmNIC3_QM1_CP_DBG_0_0                                        0xDA2408
+
+#define mmNIC3_QM1_CP_DBG_0_1                                        0xDA240C
+
+#define mmNIC3_QM1_CP_DBG_0_2                                        0xDA2410
+
+#define mmNIC3_QM1_CP_DBG_0_3                                        0xDA2414
+
+#define mmNIC3_QM1_CP_DBG_0_4                                        0xDA2418
+
+#define mmNIC3_QM1_CP_ARUSER_31_11_0                                 0xDA241C
+
+#define mmNIC3_QM1_CP_ARUSER_31_11_1                                 0xDA2420
+
+#define mmNIC3_QM1_CP_ARUSER_31_11_2                                 0xDA2424
+
+#define mmNIC3_QM1_CP_ARUSER_31_11_3                                 0xDA2428
+
+#define mmNIC3_QM1_CP_ARUSER_31_11_4                                 0xDA242C
+
+#define mmNIC3_QM1_CP_AWUSER_31_11_0                                 0xDA2430
+
+#define mmNIC3_QM1_CP_AWUSER_31_11_1                                 0xDA2434
+
+#define mmNIC3_QM1_CP_AWUSER_31_11_2                                 0xDA2438
+
+#define mmNIC3_QM1_CP_AWUSER_31_11_3                                 0xDA243C
+
+#define mmNIC3_QM1_CP_AWUSER_31_11_4                                 0xDA2440
+
+#define mmNIC3_QM1_ARB_CFG_0                                         0xDA2A00
+
+#define mmNIC3_QM1_ARB_CHOISE_Q_PUSH                                 0xDA2A04
+
+#define mmNIC3_QM1_ARB_WRR_WEIGHT_0                                  0xDA2A08
+
+#define mmNIC3_QM1_ARB_WRR_WEIGHT_1                                  0xDA2A0C
+
+#define mmNIC3_QM1_ARB_WRR_WEIGHT_2                                  0xDA2A10
+
+#define mmNIC3_QM1_ARB_WRR_WEIGHT_3                                  0xDA2A14
+
+#define mmNIC3_QM1_ARB_CFG_1                                         0xDA2A18
+
+#define mmNIC3_QM1_ARB_MST_AVAIL_CRED_0                              0xDA2A20
+
+#define mmNIC3_QM1_ARB_MST_AVAIL_CRED_1                              0xDA2A24
+
+#define mmNIC3_QM1_ARB_MST_AVAIL_CRED_2                              0xDA2A28
+
+#define mmNIC3_QM1_ARB_MST_AVAIL_CRED_3                              0xDA2A2C
+
+#define mmNIC3_QM1_ARB_MST_AVAIL_CRED_4                              0xDA2A30
+
+#define mmNIC3_QM1_ARB_MST_AVAIL_CRED_5                              0xDA2A34
+
+#define mmNIC3_QM1_ARB_MST_AVAIL_CRED_6                              0xDA2A38
+
+#define mmNIC3_QM1_ARB_MST_AVAIL_CRED_7                              0xDA2A3C
+
+#define mmNIC3_QM1_ARB_MST_AVAIL_CRED_8                              0xDA2A40
+
+#define mmNIC3_QM1_ARB_MST_AVAIL_CRED_9                              0xDA2A44
+
+#define mmNIC3_QM1_ARB_MST_AVAIL_CRED_10                             0xDA2A48
+
+#define mmNIC3_QM1_ARB_MST_AVAIL_CRED_11                             0xDA2A4C
+
+#define mmNIC3_QM1_ARB_MST_AVAIL_CRED_12                             0xDA2A50
+
+#define mmNIC3_QM1_ARB_MST_AVAIL_CRED_13                             0xDA2A54
+
+#define mmNIC3_QM1_ARB_MST_AVAIL_CRED_14                             0xDA2A58
+
+#define mmNIC3_QM1_ARB_MST_AVAIL_CRED_15                             0xDA2A5C
+
+#define mmNIC3_QM1_ARB_MST_AVAIL_CRED_16                             0xDA2A60
+
+#define mmNIC3_QM1_ARB_MST_AVAIL_CRED_17                             0xDA2A64
+
+#define mmNIC3_QM1_ARB_MST_AVAIL_CRED_18                             0xDA2A68
+
+#define mmNIC3_QM1_ARB_MST_AVAIL_CRED_19                             0xDA2A6C
+
+#define mmNIC3_QM1_ARB_MST_AVAIL_CRED_20                             0xDA2A70
+
+#define mmNIC3_QM1_ARB_MST_AVAIL_CRED_21                             0xDA2A74
+
+#define mmNIC3_QM1_ARB_MST_AVAIL_CRED_22                             0xDA2A78
+
+#define mmNIC3_QM1_ARB_MST_AVAIL_CRED_23                             0xDA2A7C
+
+#define mmNIC3_QM1_ARB_MST_AVAIL_CRED_24                             0xDA2A80
+
+#define mmNIC3_QM1_ARB_MST_AVAIL_CRED_25                             0xDA2A84
+
+#define mmNIC3_QM1_ARB_MST_AVAIL_CRED_26                             0xDA2A88
+
+#define mmNIC3_QM1_ARB_MST_AVAIL_CRED_27                             0xDA2A8C
+
+#define mmNIC3_QM1_ARB_MST_AVAIL_CRED_28                             0xDA2A90
+
+#define mmNIC3_QM1_ARB_MST_AVAIL_CRED_29                             0xDA2A94
+
+#define mmNIC3_QM1_ARB_MST_AVAIL_CRED_30                             0xDA2A98
+
+#define mmNIC3_QM1_ARB_MST_AVAIL_CRED_31                             0xDA2A9C
+
+#define mmNIC3_QM1_ARB_MST_CRED_INC                                  0xDA2AA0
+
+#define mmNIC3_QM1_ARB_MST_CHOISE_PUSH_OFST_0                        0xDA2AA4
+
+#define mmNIC3_QM1_ARB_MST_CHOISE_PUSH_OFST_1                        0xDA2AA8
+
+#define mmNIC3_QM1_ARB_MST_CHOISE_PUSH_OFST_2                        0xDA2AAC
+
+#define mmNIC3_QM1_ARB_MST_CHOISE_PUSH_OFST_3                        0xDA2AB0
+
+#define mmNIC3_QM1_ARB_MST_CHOISE_PUSH_OFST_4                        0xDA2AB4
+
+#define mmNIC3_QM1_ARB_MST_CHOISE_PUSH_OFST_5                        0xDA2AB8
+
+#define mmNIC3_QM1_ARB_MST_CHOISE_PUSH_OFST_6                        0xDA2ABC
+
+#define mmNIC3_QM1_ARB_MST_CHOISE_PUSH_OFST_7                        0xDA2AC0
+
+#define mmNIC3_QM1_ARB_MST_CHOISE_PUSH_OFST_8                        0xDA2AC4
+
+#define mmNIC3_QM1_ARB_MST_CHOISE_PUSH_OFST_9                        0xDA2AC8
+
+#define mmNIC3_QM1_ARB_MST_CHOISE_PUSH_OFST_10                       0xDA2ACC
+
+#define mmNIC3_QM1_ARB_MST_CHOISE_PUSH_OFST_11                       0xDA2AD0
+
+#define mmNIC3_QM1_ARB_MST_CHOISE_PUSH_OFST_12                       0xDA2AD4
+
+#define mmNIC3_QM1_ARB_MST_CHOISE_PUSH_OFST_13                       0xDA2AD8
+
+#define mmNIC3_QM1_ARB_MST_CHOISE_PUSH_OFST_14                       0xDA2ADC
+
+#define mmNIC3_QM1_ARB_MST_CHOISE_PUSH_OFST_15                       0xDA2AE0
+
+#define mmNIC3_QM1_ARB_MST_CHOISE_PUSH_OFST_16                       0xDA2AE4
+
+#define mmNIC3_QM1_ARB_MST_CHOISE_PUSH_OFST_17                       0xDA2AE8
+
+#define mmNIC3_QM1_ARB_MST_CHOISE_PUSH_OFST_18                       0xDA2AEC
+
+#define mmNIC3_QM1_ARB_MST_CHOISE_PUSH_OFST_19                       0xDA2AF0
+
+#define mmNIC3_QM1_ARB_MST_CHOISE_PUSH_OFST_20                       0xDA2AF4
+
+#define mmNIC3_QM1_ARB_MST_CHOISE_PUSH_OFST_21                       0xDA2AF8
+
+#define mmNIC3_QM1_ARB_MST_CHOISE_PUSH_OFST_22                       0xDA2AFC
+
+#define mmNIC3_QM1_ARB_MST_CHOISE_PUSH_OFST_23                       0xDA2B00
+
+#define mmNIC3_QM1_ARB_MST_CHOISE_PUSH_OFST_24                       0xDA2B04
+
+#define mmNIC3_QM1_ARB_MST_CHOISE_PUSH_OFST_25                       0xDA2B08
+
+#define mmNIC3_QM1_ARB_MST_CHOISE_PUSH_OFST_26                       0xDA2B0C
+
+#define mmNIC3_QM1_ARB_MST_CHOISE_PUSH_OFST_27                       0xDA2B10
+
+#define mmNIC3_QM1_ARB_MST_CHOISE_PUSH_OFST_28                       0xDA2B14
+
+#define mmNIC3_QM1_ARB_MST_CHOISE_PUSH_OFST_29                       0xDA2B18
+
+#define mmNIC3_QM1_ARB_MST_CHOISE_PUSH_OFST_30                       0xDA2B1C
+
+#define mmNIC3_QM1_ARB_MST_CHOISE_PUSH_OFST_31                       0xDA2B20
+
+#define mmNIC3_QM1_ARB_SLV_MASTER_INC_CRED_OFST                      0xDA2B28
+
+#define mmNIC3_QM1_ARB_MST_SLAVE_EN                                  0xDA2B2C
+
+#define mmNIC3_QM1_ARB_MST_QUIET_PER                                 0xDA2B34
+
+#define mmNIC3_QM1_ARB_SLV_CHOISE_WDT                                0xDA2B38
+
+#define mmNIC3_QM1_ARB_SLV_ID                                        0xDA2B3C
+
+#define mmNIC3_QM1_ARB_MSG_MAX_INFLIGHT                              0xDA2B44
+
+#define mmNIC3_QM1_ARB_MSG_AWUSER_31_11                              0xDA2B48
+
+#define mmNIC3_QM1_ARB_MSG_AWUSER_SEC_PROP                           0xDA2B4C
+
+#define mmNIC3_QM1_ARB_MSG_AWUSER_NON_SEC_PROP                       0xDA2B50
+
+#define mmNIC3_QM1_ARB_BASE_LO                                       0xDA2B54
+
+#define mmNIC3_QM1_ARB_BASE_HI                                       0xDA2B58
+
+#define mmNIC3_QM1_ARB_STATE_STS                                     0xDA2B80
+
+#define mmNIC3_QM1_ARB_CHOISE_FULLNESS_STS                           0xDA2B84
+
+#define mmNIC3_QM1_ARB_MSG_STS                                       0xDA2B88
+
+#define mmNIC3_QM1_ARB_SLV_CHOISE_Q_HEAD                             0xDA2B8C
+
+#define mmNIC3_QM1_ARB_ERR_CAUSE                                     0xDA2B9C
+
+#define mmNIC3_QM1_ARB_ERR_MSG_EN                                    0xDA2BA0
+
+#define mmNIC3_QM1_ARB_ERR_STS_DRP                                   0xDA2BA8
+
+#define mmNIC3_QM1_ARB_MST_CRED_STS_0                                0xDA2BB0
+
+#define mmNIC3_QM1_ARB_MST_CRED_STS_1                                0xDA2BB4
+
+#define mmNIC3_QM1_ARB_MST_CRED_STS_2                                0xDA2BB8
+
+#define mmNIC3_QM1_ARB_MST_CRED_STS_3                                0xDA2BBC
+
+#define mmNIC3_QM1_ARB_MST_CRED_STS_4                                0xDA2BC0
+
+#define mmNIC3_QM1_ARB_MST_CRED_STS_5                                0xDA2BC4
+
+#define mmNIC3_QM1_ARB_MST_CRED_STS_6                                0xDA2BC8
+
+#define mmNIC3_QM1_ARB_MST_CRED_STS_7                                0xDA2BCC
+
+#define mmNIC3_QM1_ARB_MST_CRED_STS_8                                0xDA2BD0
+
+#define mmNIC3_QM1_ARB_MST_CRED_STS_9                                0xDA2BD4
+
+#define mmNIC3_QM1_ARB_MST_CRED_STS_10                               0xDA2BD8
+
+#define mmNIC3_QM1_ARB_MST_CRED_STS_11                               0xDA2BDC
+
+#define mmNIC3_QM1_ARB_MST_CRED_STS_12                               0xDA2BE0
+
+#define mmNIC3_QM1_ARB_MST_CRED_STS_13                               0xDA2BE4
+
+#define mmNIC3_QM1_ARB_MST_CRED_STS_14                               0xDA2BE8
+
+#define mmNIC3_QM1_ARB_MST_CRED_STS_15                               0xDA2BEC
+
+#define mmNIC3_QM1_ARB_MST_CRED_STS_16                               0xDA2BF0
+
+#define mmNIC3_QM1_ARB_MST_CRED_STS_17                               0xDA2BF4
+
+#define mmNIC3_QM1_ARB_MST_CRED_STS_18                               0xDA2BF8
+
+#define mmNIC3_QM1_ARB_MST_CRED_STS_19                               0xDA2BFC
+
+#define mmNIC3_QM1_ARB_MST_CRED_STS_20                               0xDA2C00
+
+#define mmNIC3_QM1_ARB_MST_CRED_STS_21                               0xDA2C04
+
+#define mmNIC3_QM1_ARB_MST_CRED_STS_22                               0xDA2C08
+
+#define mmNIC3_QM1_ARB_MST_CRED_STS_23                               0xDA2C0C
+
+#define mmNIC3_QM1_ARB_MST_CRED_STS_24                               0xDA2C10
+
+#define mmNIC3_QM1_ARB_MST_CRED_STS_25                               0xDA2C14
+
+#define mmNIC3_QM1_ARB_MST_CRED_STS_26                               0xDA2C18
+
+#define mmNIC3_QM1_ARB_MST_CRED_STS_27                               0xDA2C1C
+
+#define mmNIC3_QM1_ARB_MST_CRED_STS_28                               0xDA2C20
+
+#define mmNIC3_QM1_ARB_MST_CRED_STS_29                               0xDA2C24
+
+#define mmNIC3_QM1_ARB_MST_CRED_STS_30                               0xDA2C28
+
+#define mmNIC3_QM1_ARB_MST_CRED_STS_31                               0xDA2C2C
+
+#define mmNIC3_QM1_CGM_CFG                                           0xDA2C70
+
+#define mmNIC3_QM1_CGM_STS                                           0xDA2C74
+
+#define mmNIC3_QM1_CGM_CFG1                                          0xDA2C78
+
+#define mmNIC3_QM1_LOCAL_RANGE_BASE                                  0xDA2C80
+
+#define mmNIC3_QM1_LOCAL_RANGE_SIZE                                  0xDA2C84
+
+#define mmNIC3_QM1_CSMR_STRICT_PRIO_CFG                              0xDA2C90
+
+#define mmNIC3_QM1_HBW_RD_RATE_LIM_CFG_1                             0xDA2C94
+
+#define mmNIC3_QM1_LBW_WR_RATE_LIM_CFG_0                             0xDA2C98
+
+#define mmNIC3_QM1_LBW_WR_RATE_LIM_CFG_1                             0xDA2C9C
+
+#define mmNIC3_QM1_HBW_RD_RATE_LIM_CFG_0                             0xDA2CA0
+
+#define mmNIC3_QM1_GLBL_AXCACHE                                      0xDA2CA4
+
+#define mmNIC3_QM1_IND_GW_APB_CFG                                    0xDA2CB0
+
+#define mmNIC3_QM1_IND_GW_APB_WDATA                                  0xDA2CB4
+
+#define mmNIC3_QM1_IND_GW_APB_RDATA                                  0xDA2CB8
+
+#define mmNIC3_QM1_IND_GW_APB_STATUS                                 0xDA2CBC
+
+#define mmNIC3_QM1_GLBL_ERR_ADDR_LO                                  0xDA2CD0
+
+#define mmNIC3_QM1_GLBL_ERR_ADDR_HI                                  0xDA2CD4
+
+#define mmNIC3_QM1_GLBL_ERR_WDATA                                    0xDA2CD8
+
+#define mmNIC3_QM1_GLBL_MEM_INIT_BUSY                                0xDA2D00
+
+#endif /* ASIC_REG_NIC3_QM1_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/nic4_qm0_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/nic4_qm0_regs.h
new file mode 100644
index 0000000000000000000000000000000000000000..99d5319672ddb570ef3eabeab54ead2d154419b6
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/nic4_qm0_regs.h
@@ -0,0 +1,834 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ **       DO NOT EDIT BELOW        **
+ ************************************/
+
+#ifndef ASIC_REG_NIC4_QM0_REGS_H_
+#define ASIC_REG_NIC4_QM0_REGS_H_
+
+/*
+ *****************************************
+ *   NIC4_QM0 (Prototype: QMAN)
+ *****************************************
+ */
+
+#define mmNIC4_QM0_GLBL_CFG0                                         0xDE0000
+
+#define mmNIC4_QM0_GLBL_CFG1                                         0xDE0004
+
+#define mmNIC4_QM0_GLBL_PROT                                         0xDE0008
+
+#define mmNIC4_QM0_GLBL_ERR_CFG                                      0xDE000C
+
+#define mmNIC4_QM0_GLBL_SECURE_PROPS_0                               0xDE0010
+
+#define mmNIC4_QM0_GLBL_SECURE_PROPS_1                               0xDE0014
+
+#define mmNIC4_QM0_GLBL_SECURE_PROPS_2                               0xDE0018
+
+#define mmNIC4_QM0_GLBL_SECURE_PROPS_3                               0xDE001C
+
+#define mmNIC4_QM0_GLBL_SECURE_PROPS_4                               0xDE0020
+
+#define mmNIC4_QM0_GLBL_NON_SECURE_PROPS_0                           0xDE0024
+
+#define mmNIC4_QM0_GLBL_NON_SECURE_PROPS_1                           0xDE0028
+
+#define mmNIC4_QM0_GLBL_NON_SECURE_PROPS_2                           0xDE002C
+
+#define mmNIC4_QM0_GLBL_NON_SECURE_PROPS_3                           0xDE0030
+
+#define mmNIC4_QM0_GLBL_NON_SECURE_PROPS_4                           0xDE0034
+
+#define mmNIC4_QM0_GLBL_STS0                                         0xDE0038
+
+#define mmNIC4_QM0_GLBL_STS1_0                                       0xDE0040
+
+#define mmNIC4_QM0_GLBL_STS1_1                                       0xDE0044
+
+#define mmNIC4_QM0_GLBL_STS1_2                                       0xDE0048
+
+#define mmNIC4_QM0_GLBL_STS1_3                                       0xDE004C
+
+#define mmNIC4_QM0_GLBL_STS1_4                                       0xDE0050
+
+#define mmNIC4_QM0_GLBL_MSG_EN_0                                     0xDE0054
+
+#define mmNIC4_QM0_GLBL_MSG_EN_1                                     0xDE0058
+
+#define mmNIC4_QM0_GLBL_MSG_EN_2                                     0xDE005C
+
+#define mmNIC4_QM0_GLBL_MSG_EN_3                                     0xDE0060
+
+#define mmNIC4_QM0_GLBL_MSG_EN_4                                     0xDE0068
+
+#define mmNIC4_QM0_PQ_BASE_LO_0                                      0xDE0070
+
+#define mmNIC4_QM0_PQ_BASE_LO_1                                      0xDE0074
+
+#define mmNIC4_QM0_PQ_BASE_LO_2                                      0xDE0078
+
+#define mmNIC4_QM0_PQ_BASE_LO_3                                      0xDE007C
+
+#define mmNIC4_QM0_PQ_BASE_HI_0                                      0xDE0080
+
+#define mmNIC4_QM0_PQ_BASE_HI_1                                      0xDE0084
+
+#define mmNIC4_QM0_PQ_BASE_HI_2                                      0xDE0088
+
+#define mmNIC4_QM0_PQ_BASE_HI_3                                      0xDE008C
+
+#define mmNIC4_QM0_PQ_SIZE_0                                         0xDE0090
+
+#define mmNIC4_QM0_PQ_SIZE_1                                         0xDE0094
+
+#define mmNIC4_QM0_PQ_SIZE_2                                         0xDE0098
+
+#define mmNIC4_QM0_PQ_SIZE_3                                         0xDE009C
+
+#define mmNIC4_QM0_PQ_PI_0                                           0xDE00A0
+
+#define mmNIC4_QM0_PQ_PI_1                                           0xDE00A4
+
+#define mmNIC4_QM0_PQ_PI_2                                           0xDE00A8
+
+#define mmNIC4_QM0_PQ_PI_3                                           0xDE00AC
+
+#define mmNIC4_QM0_PQ_CI_0                                           0xDE00B0
+
+#define mmNIC4_QM0_PQ_CI_1                                           0xDE00B4
+
+#define mmNIC4_QM0_PQ_CI_2                                           0xDE00B8
+
+#define mmNIC4_QM0_PQ_CI_3                                           0xDE00BC
+
+#define mmNIC4_QM0_PQ_CFG0_0                                         0xDE00C0
+
+#define mmNIC4_QM0_PQ_CFG0_1                                         0xDE00C4
+
+#define mmNIC4_QM0_PQ_CFG0_2                                         0xDE00C8
+
+#define mmNIC4_QM0_PQ_CFG0_3                                         0xDE00CC
+
+#define mmNIC4_QM0_PQ_CFG1_0                                         0xDE00D0
+
+#define mmNIC4_QM0_PQ_CFG1_1                                         0xDE00D4
+
+#define mmNIC4_QM0_PQ_CFG1_2                                         0xDE00D8
+
+#define mmNIC4_QM0_PQ_CFG1_3                                         0xDE00DC
+
+#define mmNIC4_QM0_PQ_ARUSER_31_11_0                                 0xDE00E0
+
+#define mmNIC4_QM0_PQ_ARUSER_31_11_1                                 0xDE00E4
+
+#define mmNIC4_QM0_PQ_ARUSER_31_11_2                                 0xDE00E8
+
+#define mmNIC4_QM0_PQ_ARUSER_31_11_3                                 0xDE00EC
+
+#define mmNIC4_QM0_PQ_STS0_0                                         0xDE00F0
+
+#define mmNIC4_QM0_PQ_STS0_1                                         0xDE00F4
+
+#define mmNIC4_QM0_PQ_STS0_2                                         0xDE00F8
+
+#define mmNIC4_QM0_PQ_STS0_3                                         0xDE00FC
+
+#define mmNIC4_QM0_PQ_STS1_0                                         0xDE0100
+
+#define mmNIC4_QM0_PQ_STS1_1                                         0xDE0104
+
+#define mmNIC4_QM0_PQ_STS1_2                                         0xDE0108
+
+#define mmNIC4_QM0_PQ_STS1_3                                         0xDE010C
+
+#define mmNIC4_QM0_CQ_CFG0_0                                         0xDE0110
+
+#define mmNIC4_QM0_CQ_CFG0_1                                         0xDE0114
+
+#define mmNIC4_QM0_CQ_CFG0_2                                         0xDE0118
+
+#define mmNIC4_QM0_CQ_CFG0_3                                         0xDE011C
+
+#define mmNIC4_QM0_CQ_CFG0_4                                         0xDE0120
+
+#define mmNIC4_QM0_CQ_CFG1_0                                         0xDE0124
+
+#define mmNIC4_QM0_CQ_CFG1_1                                         0xDE0128
+
+#define mmNIC4_QM0_CQ_CFG1_2                                         0xDE012C
+
+#define mmNIC4_QM0_CQ_CFG1_3                                         0xDE0130
+
+#define mmNIC4_QM0_CQ_CFG1_4                                         0xDE0134
+
+#define mmNIC4_QM0_CQ_ARUSER_31_11_0                                 0xDE0138
+
+#define mmNIC4_QM0_CQ_ARUSER_31_11_1                                 0xDE013C
+
+#define mmNIC4_QM0_CQ_ARUSER_31_11_2                                 0xDE0140
+
+#define mmNIC4_QM0_CQ_ARUSER_31_11_3                                 0xDE0144
+
+#define mmNIC4_QM0_CQ_ARUSER_31_11_4                                 0xDE0148
+
+#define mmNIC4_QM0_CQ_STS0_0                                         0xDE014C
+
+#define mmNIC4_QM0_CQ_STS0_1                                         0xDE0150
+
+#define mmNIC4_QM0_CQ_STS0_2                                         0xDE0154
+
+#define mmNIC4_QM0_CQ_STS0_3                                         0xDE0158
+
+#define mmNIC4_QM0_CQ_STS0_4                                         0xDE015C
+
+#define mmNIC4_QM0_CQ_STS1_0                                         0xDE0160
+
+#define mmNIC4_QM0_CQ_STS1_1                                         0xDE0164
+
+#define mmNIC4_QM0_CQ_STS1_2                                         0xDE0168
+
+#define mmNIC4_QM0_CQ_STS1_3                                         0xDE016C
+
+#define mmNIC4_QM0_CQ_STS1_4                                         0xDE0170
+
+#define mmNIC4_QM0_CQ_PTR_LO_0                                       0xDE0174
+
+#define mmNIC4_QM0_CQ_PTR_HI_0                                       0xDE0178
+
+#define mmNIC4_QM0_CQ_TSIZE_0                                        0xDE017C
+
+#define mmNIC4_QM0_CQ_CTL_0                                          0xDE0180
+
+#define mmNIC4_QM0_CQ_PTR_LO_1                                       0xDE0184
+
+#define mmNIC4_QM0_CQ_PTR_HI_1                                       0xDE0188
+
+#define mmNIC4_QM0_CQ_TSIZE_1                                        0xDE018C
+
+#define mmNIC4_QM0_CQ_CTL_1                                          0xDE0190
+
+#define mmNIC4_QM0_CQ_PTR_LO_2                                       0xDE0194
+
+#define mmNIC4_QM0_CQ_PTR_HI_2                                       0xDE0198
+
+#define mmNIC4_QM0_CQ_TSIZE_2                                        0xDE019C
+
+#define mmNIC4_QM0_CQ_CTL_2                                          0xDE01A0
+
+#define mmNIC4_QM0_CQ_PTR_LO_3                                       0xDE01A4
+
+#define mmNIC4_QM0_CQ_PTR_HI_3                                       0xDE01A8
+
+#define mmNIC4_QM0_CQ_TSIZE_3                                        0xDE01AC
+
+#define mmNIC4_QM0_CQ_CTL_3                                          0xDE01B0
+
+#define mmNIC4_QM0_CQ_PTR_LO_4                                       0xDE01B4
+
+#define mmNIC4_QM0_CQ_PTR_HI_4                                       0xDE01B8
+
+#define mmNIC4_QM0_CQ_TSIZE_4                                        0xDE01BC
+
+#define mmNIC4_QM0_CQ_CTL_4                                          0xDE01C0
+
+#define mmNIC4_QM0_CQ_PTR_LO_STS_0                                   0xDE01C4
+
+#define mmNIC4_QM0_CQ_PTR_LO_STS_1                                   0xDE01C8
+
+#define mmNIC4_QM0_CQ_PTR_LO_STS_2                                   0xDE01CC
+
+#define mmNIC4_QM0_CQ_PTR_LO_STS_3                                   0xDE01D0
+
+#define mmNIC4_QM0_CQ_PTR_LO_STS_4                                   0xDE01D4
+
+#define mmNIC4_QM0_CQ_PTR_HI_STS_0                                   0xDE01D8
+
+#define mmNIC4_QM0_CQ_PTR_HI_STS_1                                   0xDE01DC
+
+#define mmNIC4_QM0_CQ_PTR_HI_STS_2                                   0xDE01E0
+
+#define mmNIC4_QM0_CQ_PTR_HI_STS_3                                   0xDE01E4
+
+#define mmNIC4_QM0_CQ_PTR_HI_STS_4                                   0xDE01E8
+
+#define mmNIC4_QM0_CQ_TSIZE_STS_0                                    0xDE01EC
+
+#define mmNIC4_QM0_CQ_TSIZE_STS_1                                    0xDE01F0
+
+#define mmNIC4_QM0_CQ_TSIZE_STS_2                                    0xDE01F4
+
+#define mmNIC4_QM0_CQ_TSIZE_STS_3                                    0xDE01F8
+
+#define mmNIC4_QM0_CQ_TSIZE_STS_4                                    0xDE01FC
+
+#define mmNIC4_QM0_CQ_CTL_STS_0                                      0xDE0200
+
+#define mmNIC4_QM0_CQ_CTL_STS_1                                      0xDE0204
+
+#define mmNIC4_QM0_CQ_CTL_STS_2                                      0xDE0208
+
+#define mmNIC4_QM0_CQ_CTL_STS_3                                      0xDE020C
+
+#define mmNIC4_QM0_CQ_CTL_STS_4                                      0xDE0210
+
+#define mmNIC4_QM0_CQ_IFIFO_CNT_0                                    0xDE0214
+
+#define mmNIC4_QM0_CQ_IFIFO_CNT_1                                    0xDE0218
+
+#define mmNIC4_QM0_CQ_IFIFO_CNT_2                                    0xDE021C
+
+#define mmNIC4_QM0_CQ_IFIFO_CNT_3                                    0xDE0220
+
+#define mmNIC4_QM0_CQ_IFIFO_CNT_4                                    0xDE0224
+
+#define mmNIC4_QM0_CP_MSG_BASE0_ADDR_LO_0                            0xDE0228
+
+#define mmNIC4_QM0_CP_MSG_BASE0_ADDR_LO_1                            0xDE022C
+
+#define mmNIC4_QM0_CP_MSG_BASE0_ADDR_LO_2                            0xDE0230
+
+#define mmNIC4_QM0_CP_MSG_BASE0_ADDR_LO_3                            0xDE0234
+
+#define mmNIC4_QM0_CP_MSG_BASE0_ADDR_LO_4                            0xDE0238
+
+#define mmNIC4_QM0_CP_MSG_BASE0_ADDR_HI_0                            0xDE023C
+
+#define mmNIC4_QM0_CP_MSG_BASE0_ADDR_HI_1                            0xDE0240
+
+#define mmNIC4_QM0_CP_MSG_BASE0_ADDR_HI_2                            0xDE0244
+
+#define mmNIC4_QM0_CP_MSG_BASE0_ADDR_HI_3                            0xDE0248
+
+#define mmNIC4_QM0_CP_MSG_BASE0_ADDR_HI_4                            0xDE024C
+
+#define mmNIC4_QM0_CP_MSG_BASE1_ADDR_LO_0                            0xDE0250
+
+#define mmNIC4_QM0_CP_MSG_BASE1_ADDR_LO_1                            0xDE0254
+
+#define mmNIC4_QM0_CP_MSG_BASE1_ADDR_LO_2                            0xDE0258
+
+#define mmNIC4_QM0_CP_MSG_BASE1_ADDR_LO_3                            0xDE025C
+
+#define mmNIC4_QM0_CP_MSG_BASE1_ADDR_LO_4                            0xDE0260
+
+#define mmNIC4_QM0_CP_MSG_BASE1_ADDR_HI_0                            0xDE0264
+
+#define mmNIC4_QM0_CP_MSG_BASE1_ADDR_HI_1                            0xDE0268
+
+#define mmNIC4_QM0_CP_MSG_BASE1_ADDR_HI_2                            0xDE026C
+
+#define mmNIC4_QM0_CP_MSG_BASE1_ADDR_HI_3                            0xDE0270
+
+#define mmNIC4_QM0_CP_MSG_BASE1_ADDR_HI_4                            0xDE0274
+
+#define mmNIC4_QM0_CP_MSG_BASE2_ADDR_LO_0                            0xDE0278
+
+#define mmNIC4_QM0_CP_MSG_BASE2_ADDR_LO_1                            0xDE027C
+
+#define mmNIC4_QM0_CP_MSG_BASE2_ADDR_LO_2                            0xDE0280
+
+#define mmNIC4_QM0_CP_MSG_BASE2_ADDR_LO_3                            0xDE0284
+
+#define mmNIC4_QM0_CP_MSG_BASE2_ADDR_LO_4                            0xDE0288
+
+#define mmNIC4_QM0_CP_MSG_BASE2_ADDR_HI_0                            0xDE028C
+
+#define mmNIC4_QM0_CP_MSG_BASE2_ADDR_HI_1                            0xDE0290
+
+#define mmNIC4_QM0_CP_MSG_BASE2_ADDR_HI_2                            0xDE0294
+
+#define mmNIC4_QM0_CP_MSG_BASE2_ADDR_HI_3                            0xDE0298
+
+#define mmNIC4_QM0_CP_MSG_BASE2_ADDR_HI_4                            0xDE029C
+
+#define mmNIC4_QM0_CP_MSG_BASE3_ADDR_LO_0                            0xDE02A0
+
+#define mmNIC4_QM0_CP_MSG_BASE3_ADDR_LO_1                            0xDE02A4
+
+#define mmNIC4_QM0_CP_MSG_BASE3_ADDR_LO_2                            0xDE02A8
+
+#define mmNIC4_QM0_CP_MSG_BASE3_ADDR_LO_3                            0xDE02AC
+
+#define mmNIC4_QM0_CP_MSG_BASE3_ADDR_LO_4                            0xDE02B0
+
+#define mmNIC4_QM0_CP_MSG_BASE3_ADDR_HI_0                            0xDE02B4
+
+#define mmNIC4_QM0_CP_MSG_BASE3_ADDR_HI_1                            0xDE02B8
+
+#define mmNIC4_QM0_CP_MSG_BASE3_ADDR_HI_2                            0xDE02BC
+
+#define mmNIC4_QM0_CP_MSG_BASE3_ADDR_HI_3                            0xDE02C0
+
+#define mmNIC4_QM0_CP_MSG_BASE3_ADDR_HI_4                            0xDE02C4
+
+#define mmNIC4_QM0_CP_LDMA_TSIZE_OFFSET_0                            0xDE02C8
+
+#define mmNIC4_QM0_CP_LDMA_TSIZE_OFFSET_1                            0xDE02CC
+
+#define mmNIC4_QM0_CP_LDMA_TSIZE_OFFSET_2                            0xDE02D0
+
+#define mmNIC4_QM0_CP_LDMA_TSIZE_OFFSET_3                            0xDE02D4
+
+#define mmNIC4_QM0_CP_LDMA_TSIZE_OFFSET_4                            0xDE02D8
+
+#define mmNIC4_QM0_CP_LDMA_SRC_BASE_LO_OFFSET_0                      0xDE02E0
+
+#define mmNIC4_QM0_CP_LDMA_SRC_BASE_LO_OFFSET_1                      0xDE02E4
+
+#define mmNIC4_QM0_CP_LDMA_SRC_BASE_LO_OFFSET_2                      0xDE02E8
+
+#define mmNIC4_QM0_CP_LDMA_SRC_BASE_LO_OFFSET_3                      0xDE02EC
+
+#define mmNIC4_QM0_CP_LDMA_SRC_BASE_LO_OFFSET_4                      0xDE02F0
+
+#define mmNIC4_QM0_CP_LDMA_DST_BASE_LO_OFFSET_0                      0xDE02F4
+
+#define mmNIC4_QM0_CP_LDMA_DST_BASE_LO_OFFSET_1                      0xDE02F8
+
+#define mmNIC4_QM0_CP_LDMA_DST_BASE_LO_OFFSET_2                      0xDE02FC
+
+#define mmNIC4_QM0_CP_LDMA_DST_BASE_LO_OFFSET_3                      0xDE0300
+
+#define mmNIC4_QM0_CP_LDMA_DST_BASE_LO_OFFSET_4                      0xDE0304
+
+#define mmNIC4_QM0_CP_FENCE0_RDATA_0                                 0xDE0308
+
+#define mmNIC4_QM0_CP_FENCE0_RDATA_1                                 0xDE030C
+
+#define mmNIC4_QM0_CP_FENCE0_RDATA_2                                 0xDE0310
+
+#define mmNIC4_QM0_CP_FENCE0_RDATA_3                                 0xDE0314
+
+#define mmNIC4_QM0_CP_FENCE0_RDATA_4                                 0xDE0318
+
+#define mmNIC4_QM0_CP_FENCE1_RDATA_0                                 0xDE031C
+
+#define mmNIC4_QM0_CP_FENCE1_RDATA_1                                 0xDE0320
+
+#define mmNIC4_QM0_CP_FENCE1_RDATA_2                                 0xDE0324
+
+#define mmNIC4_QM0_CP_FENCE1_RDATA_3                                 0xDE0328
+
+#define mmNIC4_QM0_CP_FENCE1_RDATA_4                                 0xDE032C
+
+#define mmNIC4_QM0_CP_FENCE2_RDATA_0                                 0xDE0330
+
+#define mmNIC4_QM0_CP_FENCE2_RDATA_1                                 0xDE0334
+
+#define mmNIC4_QM0_CP_FENCE2_RDATA_2                                 0xDE0338
+
+#define mmNIC4_QM0_CP_FENCE2_RDATA_3                                 0xDE033C
+
+#define mmNIC4_QM0_CP_FENCE2_RDATA_4                                 0xDE0340
+
+#define mmNIC4_QM0_CP_FENCE3_RDATA_0                                 0xDE0344
+
+#define mmNIC4_QM0_CP_FENCE3_RDATA_1                                 0xDE0348
+
+#define mmNIC4_QM0_CP_FENCE3_RDATA_2                                 0xDE034C
+
+#define mmNIC4_QM0_CP_FENCE3_RDATA_3                                 0xDE0350
+
+#define mmNIC4_QM0_CP_FENCE3_RDATA_4                                 0xDE0354
+
+#define mmNIC4_QM0_CP_FENCE0_CNT_0                                   0xDE0358
+
+#define mmNIC4_QM0_CP_FENCE0_CNT_1                                   0xDE035C
+
+#define mmNIC4_QM0_CP_FENCE0_CNT_2                                   0xDE0360
+
+#define mmNIC4_QM0_CP_FENCE0_CNT_3                                   0xDE0364
+
+#define mmNIC4_QM0_CP_FENCE0_CNT_4                                   0xDE0368
+
+#define mmNIC4_QM0_CP_FENCE1_CNT_0                                   0xDE036C
+
+#define mmNIC4_QM0_CP_FENCE1_CNT_1                                   0xDE0370
+
+#define mmNIC4_QM0_CP_FENCE1_CNT_2                                   0xDE0374
+
+#define mmNIC4_QM0_CP_FENCE1_CNT_3                                   0xDE0378
+
+#define mmNIC4_QM0_CP_FENCE1_CNT_4                                   0xDE037C
+
+#define mmNIC4_QM0_CP_FENCE2_CNT_0                                   0xDE0380
+
+#define mmNIC4_QM0_CP_FENCE2_CNT_1                                   0xDE0384
+
+#define mmNIC4_QM0_CP_FENCE2_CNT_2                                   0xDE0388
+
+#define mmNIC4_QM0_CP_FENCE2_CNT_3                                   0xDE038C
+
+#define mmNIC4_QM0_CP_FENCE2_CNT_4                                   0xDE0390
+
+#define mmNIC4_QM0_CP_FENCE3_CNT_0                                   0xDE0394
+
+#define mmNIC4_QM0_CP_FENCE3_CNT_1                                   0xDE0398
+
+#define mmNIC4_QM0_CP_FENCE3_CNT_2                                   0xDE039C
+
+#define mmNIC4_QM0_CP_FENCE3_CNT_3                                   0xDE03A0
+
+#define mmNIC4_QM0_CP_FENCE3_CNT_4                                   0xDE03A4
+
+#define mmNIC4_QM0_CP_STS_0                                          0xDE03A8
+
+#define mmNIC4_QM0_CP_STS_1                                          0xDE03AC
+
+#define mmNIC4_QM0_CP_STS_2                                          0xDE03B0
+
+#define mmNIC4_QM0_CP_STS_3                                          0xDE03B4
+
+#define mmNIC4_QM0_CP_STS_4                                          0xDE03B8
+
+#define mmNIC4_QM0_CP_CURRENT_INST_LO_0                              0xDE03BC
+
+#define mmNIC4_QM0_CP_CURRENT_INST_LO_1                              0xDE03C0
+
+#define mmNIC4_QM0_CP_CURRENT_INST_LO_2                              0xDE03C4
+
+#define mmNIC4_QM0_CP_CURRENT_INST_LO_3                              0xDE03C8
+
+#define mmNIC4_QM0_CP_CURRENT_INST_LO_4                              0xDE03CC
+
+#define mmNIC4_QM0_CP_CURRENT_INST_HI_0                              0xDE03D0
+
+#define mmNIC4_QM0_CP_CURRENT_INST_HI_1                              0xDE03D4
+
+#define mmNIC4_QM0_CP_CURRENT_INST_HI_2                              0xDE03D8
+
+#define mmNIC4_QM0_CP_CURRENT_INST_HI_3                              0xDE03DC
+
+#define mmNIC4_QM0_CP_CURRENT_INST_HI_4                              0xDE03E0
+
+#define mmNIC4_QM0_CP_BARRIER_CFG_0                                  0xDE03F4
+
+#define mmNIC4_QM0_CP_BARRIER_CFG_1                                  0xDE03F8
+
+#define mmNIC4_QM0_CP_BARRIER_CFG_2                                  0xDE03FC
+
+#define mmNIC4_QM0_CP_BARRIER_CFG_3                                  0xDE0400
+
+#define mmNIC4_QM0_CP_BARRIER_CFG_4                                  0xDE0404
+
+#define mmNIC4_QM0_CP_DBG_0_0                                        0xDE0408
+
+#define mmNIC4_QM0_CP_DBG_0_1                                        0xDE040C
+
+#define mmNIC4_QM0_CP_DBG_0_2                                        0xDE0410
+
+#define mmNIC4_QM0_CP_DBG_0_3                                        0xDE0414
+
+#define mmNIC4_QM0_CP_DBG_0_4                                        0xDE0418
+
+#define mmNIC4_QM0_CP_ARUSER_31_11_0                                 0xDE041C
+
+#define mmNIC4_QM0_CP_ARUSER_31_11_1                                 0xDE0420
+
+#define mmNIC4_QM0_CP_ARUSER_31_11_2                                 0xDE0424
+
+#define mmNIC4_QM0_CP_ARUSER_31_11_3                                 0xDE0428
+
+#define mmNIC4_QM0_CP_ARUSER_31_11_4                                 0xDE042C
+
+#define mmNIC4_QM0_CP_AWUSER_31_11_0                                 0xDE0430
+
+#define mmNIC4_QM0_CP_AWUSER_31_11_1                                 0xDE0434
+
+#define mmNIC4_QM0_CP_AWUSER_31_11_2                                 0xDE0438
+
+#define mmNIC4_QM0_CP_AWUSER_31_11_3                                 0xDE043C
+
+#define mmNIC4_QM0_CP_AWUSER_31_11_4                                 0xDE0440
+
+#define mmNIC4_QM0_ARB_CFG_0                                         0xDE0A00
+
+#define mmNIC4_QM0_ARB_CHOISE_Q_PUSH                                 0xDE0A04
+
+#define mmNIC4_QM0_ARB_WRR_WEIGHT_0                                  0xDE0A08
+
+#define mmNIC4_QM0_ARB_WRR_WEIGHT_1                                  0xDE0A0C
+
+#define mmNIC4_QM0_ARB_WRR_WEIGHT_2                                  0xDE0A10
+
+#define mmNIC4_QM0_ARB_WRR_WEIGHT_3                                  0xDE0A14
+
+#define mmNIC4_QM0_ARB_CFG_1                                         0xDE0A18
+
+#define mmNIC4_QM0_ARB_MST_AVAIL_CRED_0                              0xDE0A20
+
+#define mmNIC4_QM0_ARB_MST_AVAIL_CRED_1                              0xDE0A24
+
+#define mmNIC4_QM0_ARB_MST_AVAIL_CRED_2                              0xDE0A28
+
+#define mmNIC4_QM0_ARB_MST_AVAIL_CRED_3                              0xDE0A2C
+
+#define mmNIC4_QM0_ARB_MST_AVAIL_CRED_4                              0xDE0A30
+
+#define mmNIC4_QM0_ARB_MST_AVAIL_CRED_5                              0xDE0A34
+
+#define mmNIC4_QM0_ARB_MST_AVAIL_CRED_6                              0xDE0A38
+
+#define mmNIC4_QM0_ARB_MST_AVAIL_CRED_7                              0xDE0A3C
+
+#define mmNIC4_QM0_ARB_MST_AVAIL_CRED_8                              0xDE0A40
+
+#define mmNIC4_QM0_ARB_MST_AVAIL_CRED_9                              0xDE0A44
+
+#define mmNIC4_QM0_ARB_MST_AVAIL_CRED_10                             0xDE0A48
+
+#define mmNIC4_QM0_ARB_MST_AVAIL_CRED_11                             0xDE0A4C
+
+#define mmNIC4_QM0_ARB_MST_AVAIL_CRED_12                             0xDE0A50
+
+#define mmNIC4_QM0_ARB_MST_AVAIL_CRED_13                             0xDE0A54
+
+#define mmNIC4_QM0_ARB_MST_AVAIL_CRED_14                             0xDE0A58
+
+#define mmNIC4_QM0_ARB_MST_AVAIL_CRED_15                             0xDE0A5C
+
+#define mmNIC4_QM0_ARB_MST_AVAIL_CRED_16                             0xDE0A60
+
+#define mmNIC4_QM0_ARB_MST_AVAIL_CRED_17                             0xDE0A64
+
+#define mmNIC4_QM0_ARB_MST_AVAIL_CRED_18                             0xDE0A68
+
+#define mmNIC4_QM0_ARB_MST_AVAIL_CRED_19                             0xDE0A6C
+
+#define mmNIC4_QM0_ARB_MST_AVAIL_CRED_20                             0xDE0A70
+
+#define mmNIC4_QM0_ARB_MST_AVAIL_CRED_21                             0xDE0A74
+
+#define mmNIC4_QM0_ARB_MST_AVAIL_CRED_22                             0xDE0A78
+
+#define mmNIC4_QM0_ARB_MST_AVAIL_CRED_23                             0xDE0A7C
+
+#define mmNIC4_QM0_ARB_MST_AVAIL_CRED_24                             0xDE0A80
+
+#define mmNIC4_QM0_ARB_MST_AVAIL_CRED_25                             0xDE0A84
+
+#define mmNIC4_QM0_ARB_MST_AVAIL_CRED_26                             0xDE0A88
+
+#define mmNIC4_QM0_ARB_MST_AVAIL_CRED_27                             0xDE0A8C
+
+#define mmNIC4_QM0_ARB_MST_AVAIL_CRED_28                             0xDE0A90
+
+#define mmNIC4_QM0_ARB_MST_AVAIL_CRED_29                             0xDE0A94
+
+#define mmNIC4_QM0_ARB_MST_AVAIL_CRED_30                             0xDE0A98
+
+#define mmNIC4_QM0_ARB_MST_AVAIL_CRED_31                             0xDE0A9C
+
+#define mmNIC4_QM0_ARB_MST_CRED_INC                                  0xDE0AA0
+
+#define mmNIC4_QM0_ARB_MST_CHOISE_PUSH_OFST_0                        0xDE0AA4
+
+#define mmNIC4_QM0_ARB_MST_CHOISE_PUSH_OFST_1                        0xDE0AA8
+
+#define mmNIC4_QM0_ARB_MST_CHOISE_PUSH_OFST_2                        0xDE0AAC
+
+#define mmNIC4_QM0_ARB_MST_CHOISE_PUSH_OFST_3                        0xDE0AB0
+
+#define mmNIC4_QM0_ARB_MST_CHOISE_PUSH_OFST_4                        0xDE0AB4
+
+#define mmNIC4_QM0_ARB_MST_CHOISE_PUSH_OFST_5                        0xDE0AB8
+
+#define mmNIC4_QM0_ARB_MST_CHOISE_PUSH_OFST_6                        0xDE0ABC
+
+#define mmNIC4_QM0_ARB_MST_CHOISE_PUSH_OFST_7                        0xDE0AC0
+
+#define mmNIC4_QM0_ARB_MST_CHOISE_PUSH_OFST_8                        0xDE0AC4
+
+#define mmNIC4_QM0_ARB_MST_CHOISE_PUSH_OFST_9                        0xDE0AC8
+
+#define mmNIC4_QM0_ARB_MST_CHOISE_PUSH_OFST_10                       0xDE0ACC
+
+#define mmNIC4_QM0_ARB_MST_CHOISE_PUSH_OFST_11                       0xDE0AD0
+
+#define mmNIC4_QM0_ARB_MST_CHOISE_PUSH_OFST_12                       0xDE0AD4
+
+#define mmNIC4_QM0_ARB_MST_CHOISE_PUSH_OFST_13                       0xDE0AD8
+
+#define mmNIC4_QM0_ARB_MST_CHOISE_PUSH_OFST_14                       0xDE0ADC
+
+#define mmNIC4_QM0_ARB_MST_CHOISE_PUSH_OFST_15                       0xDE0AE0
+
+#define mmNIC4_QM0_ARB_MST_CHOISE_PUSH_OFST_16                       0xDE0AE4
+
+#define mmNIC4_QM0_ARB_MST_CHOISE_PUSH_OFST_17                       0xDE0AE8
+
+#define mmNIC4_QM0_ARB_MST_CHOISE_PUSH_OFST_18                       0xDE0AEC
+
+#define mmNIC4_QM0_ARB_MST_CHOISE_PUSH_OFST_19                       0xDE0AF0
+
+#define mmNIC4_QM0_ARB_MST_CHOISE_PUSH_OFST_20                       0xDE0AF4
+
+#define mmNIC4_QM0_ARB_MST_CHOISE_PUSH_OFST_21                       0xDE0AF8
+
+#define mmNIC4_QM0_ARB_MST_CHOISE_PUSH_OFST_22                       0xDE0AFC
+
+#define mmNIC4_QM0_ARB_MST_CHOISE_PUSH_OFST_23                       0xDE0B00
+
+#define mmNIC4_QM0_ARB_MST_CHOISE_PUSH_OFST_24                       0xDE0B04
+
+#define mmNIC4_QM0_ARB_MST_CHOISE_PUSH_OFST_25                       0xDE0B08
+
+#define mmNIC4_QM0_ARB_MST_CHOISE_PUSH_OFST_26                       0xDE0B0C
+
+#define mmNIC4_QM0_ARB_MST_CHOISE_PUSH_OFST_27                       0xDE0B10
+
+#define mmNIC4_QM0_ARB_MST_CHOISE_PUSH_OFST_28                       0xDE0B14
+
+#define mmNIC4_QM0_ARB_MST_CHOISE_PUSH_OFST_29                       0xDE0B18
+
+#define mmNIC4_QM0_ARB_MST_CHOISE_PUSH_OFST_30                       0xDE0B1C
+
+#define mmNIC4_QM0_ARB_MST_CHOISE_PUSH_OFST_31                       0xDE0B20
+
+#define mmNIC4_QM0_ARB_SLV_MASTER_INC_CRED_OFST                      0xDE0B28
+
+#define mmNIC4_QM0_ARB_MST_SLAVE_EN                                  0xDE0B2C
+
+#define mmNIC4_QM0_ARB_MST_QUIET_PER                                 0xDE0B34
+
+#define mmNIC4_QM0_ARB_SLV_CHOISE_WDT                                0xDE0B38
+
+#define mmNIC4_QM0_ARB_SLV_ID                                        0xDE0B3C
+
+#define mmNIC4_QM0_ARB_MSG_MAX_INFLIGHT                              0xDE0B44
+
+#define mmNIC4_QM0_ARB_MSG_AWUSER_31_11                              0xDE0B48
+
+#define mmNIC4_QM0_ARB_MSG_AWUSER_SEC_PROP                           0xDE0B4C
+
+#define mmNIC4_QM0_ARB_MSG_AWUSER_NON_SEC_PROP                       0xDE0B50
+
+#define mmNIC4_QM0_ARB_BASE_LO                                       0xDE0B54
+
+#define mmNIC4_QM0_ARB_BASE_HI                                       0xDE0B58
+
+#define mmNIC4_QM0_ARB_STATE_STS                                     0xDE0B80
+
+#define mmNIC4_QM0_ARB_CHOISE_FULLNESS_STS                           0xDE0B84
+
+#define mmNIC4_QM0_ARB_MSG_STS                                       0xDE0B88
+
+#define mmNIC4_QM0_ARB_SLV_CHOISE_Q_HEAD                             0xDE0B8C
+
+#define mmNIC4_QM0_ARB_ERR_CAUSE                                     0xDE0B9C
+
+#define mmNIC4_QM0_ARB_ERR_MSG_EN                                    0xDE0BA0
+
+#define mmNIC4_QM0_ARB_ERR_STS_DRP                                   0xDE0BA8
+
+#define mmNIC4_QM0_ARB_MST_CRED_STS_0                                0xDE0BB0
+
+#define mmNIC4_QM0_ARB_MST_CRED_STS_1                                0xDE0BB4
+
+#define mmNIC4_QM0_ARB_MST_CRED_STS_2                                0xDE0BB8
+
+#define mmNIC4_QM0_ARB_MST_CRED_STS_3                                0xDE0BBC
+
+#define mmNIC4_QM0_ARB_MST_CRED_STS_4                                0xDE0BC0
+
+#define mmNIC4_QM0_ARB_MST_CRED_STS_5                                0xDE0BC4
+
+#define mmNIC4_QM0_ARB_MST_CRED_STS_6                                0xDE0BC8
+
+#define mmNIC4_QM0_ARB_MST_CRED_STS_7                                0xDE0BCC
+
+#define mmNIC4_QM0_ARB_MST_CRED_STS_8                                0xDE0BD0
+
+#define mmNIC4_QM0_ARB_MST_CRED_STS_9                                0xDE0BD4
+
+#define mmNIC4_QM0_ARB_MST_CRED_STS_10                               0xDE0BD8
+
+#define mmNIC4_QM0_ARB_MST_CRED_STS_11                               0xDE0BDC
+
+#define mmNIC4_QM0_ARB_MST_CRED_STS_12                               0xDE0BE0
+
+#define mmNIC4_QM0_ARB_MST_CRED_STS_13                               0xDE0BE4
+
+#define mmNIC4_QM0_ARB_MST_CRED_STS_14                               0xDE0BE8
+
+#define mmNIC4_QM0_ARB_MST_CRED_STS_15                               0xDE0BEC
+
+#define mmNIC4_QM0_ARB_MST_CRED_STS_16                               0xDE0BF0
+
+#define mmNIC4_QM0_ARB_MST_CRED_STS_17                               0xDE0BF4
+
+#define mmNIC4_QM0_ARB_MST_CRED_STS_18                               0xDE0BF8
+
+#define mmNIC4_QM0_ARB_MST_CRED_STS_19                               0xDE0BFC
+
+#define mmNIC4_QM0_ARB_MST_CRED_STS_20                               0xDE0C00
+
+#define mmNIC4_QM0_ARB_MST_CRED_STS_21                               0xDE0C04
+
+#define mmNIC4_QM0_ARB_MST_CRED_STS_22                               0xDE0C08
+
+#define mmNIC4_QM0_ARB_MST_CRED_STS_23                               0xDE0C0C
+
+#define mmNIC4_QM0_ARB_MST_CRED_STS_24                               0xDE0C10
+
+#define mmNIC4_QM0_ARB_MST_CRED_STS_25                               0xDE0C14
+
+#define mmNIC4_QM0_ARB_MST_CRED_STS_26                               0xDE0C18
+
+#define mmNIC4_QM0_ARB_MST_CRED_STS_27                               0xDE0C1C
+
+#define mmNIC4_QM0_ARB_MST_CRED_STS_28                               0xDE0C20
+
+#define mmNIC4_QM0_ARB_MST_CRED_STS_29                               0xDE0C24
+
+#define mmNIC4_QM0_ARB_MST_CRED_STS_30                               0xDE0C28
+
+#define mmNIC4_QM0_ARB_MST_CRED_STS_31                               0xDE0C2C
+
+#define mmNIC4_QM0_CGM_CFG                                           0xDE0C70
+
+#define mmNIC4_QM0_CGM_STS                                           0xDE0C74
+
+#define mmNIC4_QM0_CGM_CFG1                                          0xDE0C78
+
+#define mmNIC4_QM0_LOCAL_RANGE_BASE                                  0xDE0C80
+
+#define mmNIC4_QM0_LOCAL_RANGE_SIZE                                  0xDE0C84
+
+#define mmNIC4_QM0_CSMR_STRICT_PRIO_CFG                              0xDE0C90
+
+#define mmNIC4_QM0_HBW_RD_RATE_LIM_CFG_1                             0xDE0C94
+
+#define mmNIC4_QM0_LBW_WR_RATE_LIM_CFG_0                             0xDE0C98
+
+#define mmNIC4_QM0_LBW_WR_RATE_LIM_CFG_1                             0xDE0C9C
+
+#define mmNIC4_QM0_HBW_RD_RATE_LIM_CFG_0                             0xDE0CA0
+
+#define mmNIC4_QM0_GLBL_AXCACHE                                      0xDE0CA4
+
+#define mmNIC4_QM0_IND_GW_APB_CFG                                    0xDE0CB0
+
+#define mmNIC4_QM0_IND_GW_APB_WDATA                                  0xDE0CB4
+
+#define mmNIC4_QM0_IND_GW_APB_RDATA                                  0xDE0CB8
+
+#define mmNIC4_QM0_IND_GW_APB_STATUS                                 0xDE0CBC
+
+#define mmNIC4_QM0_GLBL_ERR_ADDR_LO                                  0xDE0CD0
+
+#define mmNIC4_QM0_GLBL_ERR_ADDR_HI                                  0xDE0CD4
+
+#define mmNIC4_QM0_GLBL_ERR_WDATA                                    0xDE0CD8
+
+#define mmNIC4_QM0_GLBL_MEM_INIT_BUSY                                0xDE0D00
+
+#endif /* ASIC_REG_NIC4_QM0_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/nic4_qm1_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/nic4_qm1_regs.h
new file mode 100644
index 0000000000000000000000000000000000000000..34b21b21da5293388b82f34fdc02bd179452858f
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/nic4_qm1_regs.h
@@ -0,0 +1,834 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ **       DO NOT EDIT BELOW        **
+ ************************************/
+
+#ifndef ASIC_REG_NIC4_QM1_REGS_H_
+#define ASIC_REG_NIC4_QM1_REGS_H_
+
+/*
+ *****************************************
+ *   NIC4_QM1 (Prototype: QMAN)
+ *****************************************
+ */
+
+#define mmNIC4_QM1_GLBL_CFG0                                         0xDE2000
+
+#define mmNIC4_QM1_GLBL_CFG1                                         0xDE2004
+
+#define mmNIC4_QM1_GLBL_PROT                                         0xDE2008
+
+#define mmNIC4_QM1_GLBL_ERR_CFG                                      0xDE200C
+
+#define mmNIC4_QM1_GLBL_SECURE_PROPS_0                               0xDE2010
+
+#define mmNIC4_QM1_GLBL_SECURE_PROPS_1                               0xDE2014
+
+#define mmNIC4_QM1_GLBL_SECURE_PROPS_2                               0xDE2018
+
+#define mmNIC4_QM1_GLBL_SECURE_PROPS_3                               0xDE201C
+
+#define mmNIC4_QM1_GLBL_SECURE_PROPS_4                               0xDE2020
+
+#define mmNIC4_QM1_GLBL_NON_SECURE_PROPS_0                           0xDE2024
+
+#define mmNIC4_QM1_GLBL_NON_SECURE_PROPS_1                           0xDE2028
+
+#define mmNIC4_QM1_GLBL_NON_SECURE_PROPS_2                           0xDE202C
+
+#define mmNIC4_QM1_GLBL_NON_SECURE_PROPS_3                           0xDE2030
+
+#define mmNIC4_QM1_GLBL_NON_SECURE_PROPS_4                           0xDE2034
+
+#define mmNIC4_QM1_GLBL_STS0                                         0xDE2038
+
+#define mmNIC4_QM1_GLBL_STS1_0                                       0xDE2040
+
+#define mmNIC4_QM1_GLBL_STS1_1                                       0xDE2044
+
+#define mmNIC4_QM1_GLBL_STS1_2                                       0xDE2048
+
+#define mmNIC4_QM1_GLBL_STS1_3                                       0xDE204C
+
+#define mmNIC4_QM1_GLBL_STS1_4                                       0xDE2050
+
+#define mmNIC4_QM1_GLBL_MSG_EN_0                                     0xDE2054
+
+#define mmNIC4_QM1_GLBL_MSG_EN_1                                     0xDE2058
+
+#define mmNIC4_QM1_GLBL_MSG_EN_2                                     0xDE205C
+
+#define mmNIC4_QM1_GLBL_MSG_EN_3                                     0xDE2060
+
+#define mmNIC4_QM1_GLBL_MSG_EN_4                                     0xDE2068
+
+#define mmNIC4_QM1_PQ_BASE_LO_0                                      0xDE2070
+
+#define mmNIC4_QM1_PQ_BASE_LO_1                                      0xDE2074
+
+#define mmNIC4_QM1_PQ_BASE_LO_2                                      0xDE2078
+
+#define mmNIC4_QM1_PQ_BASE_LO_3                                      0xDE207C
+
+#define mmNIC4_QM1_PQ_BASE_HI_0                                      0xDE2080
+
+#define mmNIC4_QM1_PQ_BASE_HI_1                                      0xDE2084
+
+#define mmNIC4_QM1_PQ_BASE_HI_2                                      0xDE2088
+
+#define mmNIC4_QM1_PQ_BASE_HI_3                                      0xDE208C
+
+#define mmNIC4_QM1_PQ_SIZE_0                                         0xDE2090
+
+#define mmNIC4_QM1_PQ_SIZE_1                                         0xDE2094
+
+#define mmNIC4_QM1_PQ_SIZE_2                                         0xDE2098
+
+#define mmNIC4_QM1_PQ_SIZE_3                                         0xDE209C
+
+#define mmNIC4_QM1_PQ_PI_0                                           0xDE20A0
+
+#define mmNIC4_QM1_PQ_PI_1                                           0xDE20A4
+
+#define mmNIC4_QM1_PQ_PI_2                                           0xDE20A8
+
+#define mmNIC4_QM1_PQ_PI_3                                           0xDE20AC
+
+#define mmNIC4_QM1_PQ_CI_0                                           0xDE20B0
+
+#define mmNIC4_QM1_PQ_CI_1                                           0xDE20B4
+
+#define mmNIC4_QM1_PQ_CI_2                                           0xDE20B8
+
+#define mmNIC4_QM1_PQ_CI_3                                           0xDE20BC
+
+#define mmNIC4_QM1_PQ_CFG0_0                                         0xDE20C0
+
+#define mmNIC4_QM1_PQ_CFG0_1                                         0xDE20C4
+
+#define mmNIC4_QM1_PQ_CFG0_2                                         0xDE20C8
+
+#define mmNIC4_QM1_PQ_CFG0_3                                         0xDE20CC
+
+#define mmNIC4_QM1_PQ_CFG1_0                                         0xDE20D0
+
+#define mmNIC4_QM1_PQ_CFG1_1                                         0xDE20D4
+
+#define mmNIC4_QM1_PQ_CFG1_2                                         0xDE20D8
+
+#define mmNIC4_QM1_PQ_CFG1_3                                         0xDE20DC
+
+#define mmNIC4_QM1_PQ_ARUSER_31_11_0                                 0xDE20E0
+
+#define mmNIC4_QM1_PQ_ARUSER_31_11_1                                 0xDE20E4
+
+#define mmNIC4_QM1_PQ_ARUSER_31_11_2                                 0xDE20E8
+
+#define mmNIC4_QM1_PQ_ARUSER_31_11_3                                 0xDE20EC
+
+#define mmNIC4_QM1_PQ_STS0_0                                         0xDE20F0
+
+#define mmNIC4_QM1_PQ_STS0_1                                         0xDE20F4
+
+#define mmNIC4_QM1_PQ_STS0_2                                         0xDE20F8
+
+#define mmNIC4_QM1_PQ_STS0_3                                         0xDE20FC
+
+#define mmNIC4_QM1_PQ_STS1_0                                         0xDE2100
+
+#define mmNIC4_QM1_PQ_STS1_1                                         0xDE2104
+
+#define mmNIC4_QM1_PQ_STS1_2                                         0xDE2108
+
+#define mmNIC4_QM1_PQ_STS1_3                                         0xDE210C
+
+#define mmNIC4_QM1_CQ_CFG0_0                                         0xDE2110
+
+#define mmNIC4_QM1_CQ_CFG0_1                                         0xDE2114
+
+#define mmNIC4_QM1_CQ_CFG0_2                                         0xDE2118
+
+#define mmNIC4_QM1_CQ_CFG0_3                                         0xDE211C
+
+#define mmNIC4_QM1_CQ_CFG0_4                                         0xDE2120
+
+#define mmNIC4_QM1_CQ_CFG1_0                                         0xDE2124
+
+#define mmNIC4_QM1_CQ_CFG1_1                                         0xDE2128
+
+#define mmNIC4_QM1_CQ_CFG1_2                                         0xDE212C
+
+#define mmNIC4_QM1_CQ_CFG1_3                                         0xDE2130
+
+#define mmNIC4_QM1_CQ_CFG1_4                                         0xDE2134
+
+#define mmNIC4_QM1_CQ_ARUSER_31_11_0                                 0xDE2138
+
+#define mmNIC4_QM1_CQ_ARUSER_31_11_1                                 0xDE213C
+
+#define mmNIC4_QM1_CQ_ARUSER_31_11_2                                 0xDE2140
+
+#define mmNIC4_QM1_CQ_ARUSER_31_11_3                                 0xDE2144
+
+#define mmNIC4_QM1_CQ_ARUSER_31_11_4                                 0xDE2148
+
+#define mmNIC4_QM1_CQ_STS0_0                                         0xDE214C
+
+#define mmNIC4_QM1_CQ_STS0_1                                         0xDE2150
+
+#define mmNIC4_QM1_CQ_STS0_2                                         0xDE2154
+
+#define mmNIC4_QM1_CQ_STS0_3                                         0xDE2158
+
+#define mmNIC4_QM1_CQ_STS0_4                                         0xDE215C
+
+#define mmNIC4_QM1_CQ_STS1_0                                         0xDE2160
+
+#define mmNIC4_QM1_CQ_STS1_1                                         0xDE2164
+
+#define mmNIC4_QM1_CQ_STS1_2                                         0xDE2168
+
+#define mmNIC4_QM1_CQ_STS1_3                                         0xDE216C
+
+#define mmNIC4_QM1_CQ_STS1_4                                         0xDE2170
+
+#define mmNIC4_QM1_CQ_PTR_LO_0                                       0xDE2174
+
+#define mmNIC4_QM1_CQ_PTR_HI_0                                       0xDE2178
+
+#define mmNIC4_QM1_CQ_TSIZE_0                                        0xDE217C
+
+#define mmNIC4_QM1_CQ_CTL_0                                          0xDE2180
+
+#define mmNIC4_QM1_CQ_PTR_LO_1                                       0xDE2184
+
+#define mmNIC4_QM1_CQ_PTR_HI_1                                       0xDE2188
+
+#define mmNIC4_QM1_CQ_TSIZE_1                                        0xDE218C
+
+#define mmNIC4_QM1_CQ_CTL_1                                          0xDE2190
+
+#define mmNIC4_QM1_CQ_PTR_LO_2                                       0xDE2194
+
+#define mmNIC4_QM1_CQ_PTR_HI_2                                       0xDE2198
+
+#define mmNIC4_QM1_CQ_TSIZE_2                                        0xDE219C
+
+#define mmNIC4_QM1_CQ_CTL_2                                          0xDE21A0
+
+#define mmNIC4_QM1_CQ_PTR_LO_3                                       0xDE21A4
+
+#define mmNIC4_QM1_CQ_PTR_HI_3                                       0xDE21A8
+
+#define mmNIC4_QM1_CQ_TSIZE_3                                        0xDE21AC
+
+#define mmNIC4_QM1_CQ_CTL_3                                          0xDE21B0
+
+#define mmNIC4_QM1_CQ_PTR_LO_4                                       0xDE21B4
+
+#define mmNIC4_QM1_CQ_PTR_HI_4                                       0xDE21B8
+
+#define mmNIC4_QM1_CQ_TSIZE_4                                        0xDE21BC
+
+#define mmNIC4_QM1_CQ_CTL_4                                          0xDE21C0
+
+#define mmNIC4_QM1_CQ_PTR_LO_STS_0                                   0xDE21C4
+
+#define mmNIC4_QM1_CQ_PTR_LO_STS_1                                   0xDE21C8
+
+#define mmNIC4_QM1_CQ_PTR_LO_STS_2                                   0xDE21CC
+
+#define mmNIC4_QM1_CQ_PTR_LO_STS_3                                   0xDE21D0
+
+#define mmNIC4_QM1_CQ_PTR_LO_STS_4                                   0xDE21D4
+
+#define mmNIC4_QM1_CQ_PTR_HI_STS_0                                   0xDE21D8
+
+#define mmNIC4_QM1_CQ_PTR_HI_STS_1                                   0xDE21DC
+
+#define mmNIC4_QM1_CQ_PTR_HI_STS_2                                   0xDE21E0
+
+#define mmNIC4_QM1_CQ_PTR_HI_STS_3                                   0xDE21E4
+
+#define mmNIC4_QM1_CQ_PTR_HI_STS_4                                   0xDE21E8
+
+#define mmNIC4_QM1_CQ_TSIZE_STS_0                                    0xDE21EC
+
+#define mmNIC4_QM1_CQ_TSIZE_STS_1                                    0xDE21F0
+
+#define mmNIC4_QM1_CQ_TSIZE_STS_2                                    0xDE21F4
+
+#define mmNIC4_QM1_CQ_TSIZE_STS_3                                    0xDE21F8
+
+#define mmNIC4_QM1_CQ_TSIZE_STS_4                                    0xDE21FC
+
+#define mmNIC4_QM1_CQ_CTL_STS_0                                      0xDE2200
+
+#define mmNIC4_QM1_CQ_CTL_STS_1                                      0xDE2204
+
+#define mmNIC4_QM1_CQ_CTL_STS_2                                      0xDE2208
+
+#define mmNIC4_QM1_CQ_CTL_STS_3                                      0xDE220C
+
+#define mmNIC4_QM1_CQ_CTL_STS_4                                      0xDE2210
+
+#define mmNIC4_QM1_CQ_IFIFO_CNT_0                                    0xDE2214
+
+#define mmNIC4_QM1_CQ_IFIFO_CNT_1                                    0xDE2218
+
+#define mmNIC4_QM1_CQ_IFIFO_CNT_2                                    0xDE221C
+
+#define mmNIC4_QM1_CQ_IFIFO_CNT_3                                    0xDE2220
+
+#define mmNIC4_QM1_CQ_IFIFO_CNT_4                                    0xDE2224
+
+#define mmNIC4_QM1_CP_MSG_BASE0_ADDR_LO_0                            0xDE2228
+
+#define mmNIC4_QM1_CP_MSG_BASE0_ADDR_LO_1                            0xDE222C
+
+#define mmNIC4_QM1_CP_MSG_BASE0_ADDR_LO_2                            0xDE2230
+
+#define mmNIC4_QM1_CP_MSG_BASE0_ADDR_LO_3                            0xDE2234
+
+#define mmNIC4_QM1_CP_MSG_BASE0_ADDR_LO_4                            0xDE2238
+
+#define mmNIC4_QM1_CP_MSG_BASE0_ADDR_HI_0                            0xDE223C
+
+#define mmNIC4_QM1_CP_MSG_BASE0_ADDR_HI_1                            0xDE2240
+
+#define mmNIC4_QM1_CP_MSG_BASE0_ADDR_HI_2                            0xDE2244
+
+#define mmNIC4_QM1_CP_MSG_BASE0_ADDR_HI_3                            0xDE2248
+
+#define mmNIC4_QM1_CP_MSG_BASE0_ADDR_HI_4                            0xDE224C
+
+#define mmNIC4_QM1_CP_MSG_BASE1_ADDR_LO_0                            0xDE2250
+
+#define mmNIC4_QM1_CP_MSG_BASE1_ADDR_LO_1                            0xDE2254
+
+#define mmNIC4_QM1_CP_MSG_BASE1_ADDR_LO_2                            0xDE2258
+
+#define mmNIC4_QM1_CP_MSG_BASE1_ADDR_LO_3                            0xDE225C
+
+#define mmNIC4_QM1_CP_MSG_BASE1_ADDR_LO_4                            0xDE2260
+
+#define mmNIC4_QM1_CP_MSG_BASE1_ADDR_HI_0                            0xDE2264
+
+#define mmNIC4_QM1_CP_MSG_BASE1_ADDR_HI_1                            0xDE2268
+
+#define mmNIC4_QM1_CP_MSG_BASE1_ADDR_HI_2                            0xDE226C
+
+#define mmNIC4_QM1_CP_MSG_BASE1_ADDR_HI_3                            0xDE2270
+
+#define mmNIC4_QM1_CP_MSG_BASE1_ADDR_HI_4                            0xDE2274
+
+#define mmNIC4_QM1_CP_MSG_BASE2_ADDR_LO_0                            0xDE2278
+
+#define mmNIC4_QM1_CP_MSG_BASE2_ADDR_LO_1                            0xDE227C
+
+#define mmNIC4_QM1_CP_MSG_BASE2_ADDR_LO_2                            0xDE2280
+
+#define mmNIC4_QM1_CP_MSG_BASE2_ADDR_LO_3                            0xDE2284
+
+#define mmNIC4_QM1_CP_MSG_BASE2_ADDR_LO_4                            0xDE2288
+
+#define mmNIC4_QM1_CP_MSG_BASE2_ADDR_HI_0                            0xDE228C
+
+#define mmNIC4_QM1_CP_MSG_BASE2_ADDR_HI_1                            0xDE2290
+
+#define mmNIC4_QM1_CP_MSG_BASE2_ADDR_HI_2                            0xDE2294
+
+#define mmNIC4_QM1_CP_MSG_BASE2_ADDR_HI_3                            0xDE2298
+
+#define mmNIC4_QM1_CP_MSG_BASE2_ADDR_HI_4                            0xDE229C
+
+#define mmNIC4_QM1_CP_MSG_BASE3_ADDR_LO_0                            0xDE22A0
+
+#define mmNIC4_QM1_CP_MSG_BASE3_ADDR_LO_1                            0xDE22A4
+
+#define mmNIC4_QM1_CP_MSG_BASE3_ADDR_LO_2                            0xDE22A8
+
+#define mmNIC4_QM1_CP_MSG_BASE3_ADDR_LO_3                            0xDE22AC
+
+#define mmNIC4_QM1_CP_MSG_BASE3_ADDR_LO_4                            0xDE22B0
+
+#define mmNIC4_QM1_CP_MSG_BASE3_ADDR_HI_0                            0xDE22B4
+
+#define mmNIC4_QM1_CP_MSG_BASE3_ADDR_HI_1                            0xDE22B8
+
+#define mmNIC4_QM1_CP_MSG_BASE3_ADDR_HI_2                            0xDE22BC
+
+#define mmNIC4_QM1_CP_MSG_BASE3_ADDR_HI_3                            0xDE22C0
+
+#define mmNIC4_QM1_CP_MSG_BASE3_ADDR_HI_4                            0xDE22C4
+
+#define mmNIC4_QM1_CP_LDMA_TSIZE_OFFSET_0                            0xDE22C8
+
+#define mmNIC4_QM1_CP_LDMA_TSIZE_OFFSET_1                            0xDE22CC
+
+#define mmNIC4_QM1_CP_LDMA_TSIZE_OFFSET_2                            0xDE22D0
+
+#define mmNIC4_QM1_CP_LDMA_TSIZE_OFFSET_3                            0xDE22D4
+
+#define mmNIC4_QM1_CP_LDMA_TSIZE_OFFSET_4                            0xDE22D8
+
+#define mmNIC4_QM1_CP_LDMA_SRC_BASE_LO_OFFSET_0                      0xDE22E0
+
+#define mmNIC4_QM1_CP_LDMA_SRC_BASE_LO_OFFSET_1                      0xDE22E4
+
+#define mmNIC4_QM1_CP_LDMA_SRC_BASE_LO_OFFSET_2                      0xDE22E8
+
+#define mmNIC4_QM1_CP_LDMA_SRC_BASE_LO_OFFSET_3                      0xDE22EC
+
+#define mmNIC4_QM1_CP_LDMA_SRC_BASE_LO_OFFSET_4                      0xDE22F0
+
+#define mmNIC4_QM1_CP_LDMA_DST_BASE_LO_OFFSET_0                      0xDE22F4
+
+#define mmNIC4_QM1_CP_LDMA_DST_BASE_LO_OFFSET_1                      0xDE22F8
+
+#define mmNIC4_QM1_CP_LDMA_DST_BASE_LO_OFFSET_2                      0xDE22FC
+
+#define mmNIC4_QM1_CP_LDMA_DST_BASE_LO_OFFSET_3                      0xDE2300
+
+#define mmNIC4_QM1_CP_LDMA_DST_BASE_LO_OFFSET_4                      0xDE2304
+
+#define mmNIC4_QM1_CP_FENCE0_RDATA_0                                 0xDE2308
+
+#define mmNIC4_QM1_CP_FENCE0_RDATA_1                                 0xDE230C
+
+#define mmNIC4_QM1_CP_FENCE0_RDATA_2                                 0xDE2310
+
+#define mmNIC4_QM1_CP_FENCE0_RDATA_3                                 0xDE2314
+
+#define mmNIC4_QM1_CP_FENCE0_RDATA_4                                 0xDE2318
+
+#define mmNIC4_QM1_CP_FENCE1_RDATA_0                                 0xDE231C
+
+#define mmNIC4_QM1_CP_FENCE1_RDATA_1                                 0xDE2320
+
+#define mmNIC4_QM1_CP_FENCE1_RDATA_2                                 0xDE2324
+
+#define mmNIC4_QM1_CP_FENCE1_RDATA_3                                 0xDE2328
+
+#define mmNIC4_QM1_CP_FENCE1_RDATA_4                                 0xDE232C
+
+#define mmNIC4_QM1_CP_FENCE2_RDATA_0                                 0xDE2330
+
+#define mmNIC4_QM1_CP_FENCE2_RDATA_1                                 0xDE2334
+
+#define mmNIC4_QM1_CP_FENCE2_RDATA_2                                 0xDE2338
+
+#define mmNIC4_QM1_CP_FENCE2_RDATA_3                                 0xDE233C
+
+#define mmNIC4_QM1_CP_FENCE2_RDATA_4                                 0xDE2340
+
+#define mmNIC4_QM1_CP_FENCE3_RDATA_0                                 0xDE2344
+
+#define mmNIC4_QM1_CP_FENCE3_RDATA_1                                 0xDE2348
+
+#define mmNIC4_QM1_CP_FENCE3_RDATA_2                                 0xDE234C
+
+#define mmNIC4_QM1_CP_FENCE3_RDATA_3                                 0xDE2350
+
+#define mmNIC4_QM1_CP_FENCE3_RDATA_4                                 0xDE2354
+
+#define mmNIC4_QM1_CP_FENCE0_CNT_0                                   0xDE2358
+
+#define mmNIC4_QM1_CP_FENCE0_CNT_1                                   0xDE235C
+
+#define mmNIC4_QM1_CP_FENCE0_CNT_2                                   0xDE2360
+
+#define mmNIC4_QM1_CP_FENCE0_CNT_3                                   0xDE2364
+
+#define mmNIC4_QM1_CP_FENCE0_CNT_4                                   0xDE2368
+
+#define mmNIC4_QM1_CP_FENCE1_CNT_0                                   0xDE236C
+
+#define mmNIC4_QM1_CP_FENCE1_CNT_1                                   0xDE2370
+
+#define mmNIC4_QM1_CP_FENCE1_CNT_2                                   0xDE2374
+
+#define mmNIC4_QM1_CP_FENCE1_CNT_3                                   0xDE2378
+
+#define mmNIC4_QM1_CP_FENCE1_CNT_4                                   0xDE237C
+
+#define mmNIC4_QM1_CP_FENCE2_CNT_0                                   0xDE2380
+
+#define mmNIC4_QM1_CP_FENCE2_CNT_1                                   0xDE2384
+
+#define mmNIC4_QM1_CP_FENCE2_CNT_2                                   0xDE2388
+
+#define mmNIC4_QM1_CP_FENCE2_CNT_3                                   0xDE238C
+
+#define mmNIC4_QM1_CP_FENCE2_CNT_4                                   0xDE2390
+
+#define mmNIC4_QM1_CP_FENCE3_CNT_0                                   0xDE2394
+
+#define mmNIC4_QM1_CP_FENCE3_CNT_1                                   0xDE2398
+
+#define mmNIC4_QM1_CP_FENCE3_CNT_2                                   0xDE239C
+
+#define mmNIC4_QM1_CP_FENCE3_CNT_3                                   0xDE23A0
+
+#define mmNIC4_QM1_CP_FENCE3_CNT_4                                   0xDE23A4
+
+#define mmNIC4_QM1_CP_STS_0                                          0xDE23A8
+
+#define mmNIC4_QM1_CP_STS_1                                          0xDE23AC
+
+#define mmNIC4_QM1_CP_STS_2                                          0xDE23B0
+
+#define mmNIC4_QM1_CP_STS_3                                          0xDE23B4
+
+#define mmNIC4_QM1_CP_STS_4                                          0xDE23B8
+
+#define mmNIC4_QM1_CP_CURRENT_INST_LO_0                              0xDE23BC
+
+#define mmNIC4_QM1_CP_CURRENT_INST_LO_1                              0xDE23C0
+
+#define mmNIC4_QM1_CP_CURRENT_INST_LO_2                              0xDE23C4
+
+#define mmNIC4_QM1_CP_CURRENT_INST_LO_3                              0xDE23C8
+
+#define mmNIC4_QM1_CP_CURRENT_INST_LO_4                              0xDE23CC
+
+#define mmNIC4_QM1_CP_CURRENT_INST_HI_0                              0xDE23D0
+
+#define mmNIC4_QM1_CP_CURRENT_INST_HI_1                              0xDE23D4
+
+#define mmNIC4_QM1_CP_CURRENT_INST_HI_2                              0xDE23D8
+
+#define mmNIC4_QM1_CP_CURRENT_INST_HI_3                              0xDE23DC
+
+#define mmNIC4_QM1_CP_CURRENT_INST_HI_4                              0xDE23E0
+
+#define mmNIC4_QM1_CP_BARRIER_CFG_0                                  0xDE23F4
+
+#define mmNIC4_QM1_CP_BARRIER_CFG_1                                  0xDE23F8
+
+#define mmNIC4_QM1_CP_BARRIER_CFG_2                                  0xDE23FC
+
+#define mmNIC4_QM1_CP_BARRIER_CFG_3                                  0xDE2400
+
+#define mmNIC4_QM1_CP_BARRIER_CFG_4                                  0xDE2404
+
+#define mmNIC4_QM1_CP_DBG_0_0                                        0xDE2408
+
+#define mmNIC4_QM1_CP_DBG_0_1                                        0xDE240C
+
+#define mmNIC4_QM1_CP_DBG_0_2                                        0xDE2410
+
+#define mmNIC4_QM1_CP_DBG_0_3                                        0xDE2414
+
+#define mmNIC4_QM1_CP_DBG_0_4                                        0xDE2418
+
+#define mmNIC4_QM1_CP_ARUSER_31_11_0                                 0xDE241C
+
+#define mmNIC4_QM1_CP_ARUSER_31_11_1                                 0xDE2420
+
+#define mmNIC4_QM1_CP_ARUSER_31_11_2                                 0xDE2424
+
+#define mmNIC4_QM1_CP_ARUSER_31_11_3                                 0xDE2428
+
+#define mmNIC4_QM1_CP_ARUSER_31_11_4                                 0xDE242C
+
+#define mmNIC4_QM1_CP_AWUSER_31_11_0                                 0xDE2430
+
+#define mmNIC4_QM1_CP_AWUSER_31_11_1                                 0xDE2434
+
+#define mmNIC4_QM1_CP_AWUSER_31_11_2                                 0xDE2438
+
+#define mmNIC4_QM1_CP_AWUSER_31_11_3                                 0xDE243C
+
+#define mmNIC4_QM1_CP_AWUSER_31_11_4                                 0xDE2440
+
+#define mmNIC4_QM1_ARB_CFG_0                                         0xDE2A00
+
+#define mmNIC4_QM1_ARB_CHOISE_Q_PUSH                                 0xDE2A04
+
+#define mmNIC4_QM1_ARB_WRR_WEIGHT_0                                  0xDE2A08
+
+#define mmNIC4_QM1_ARB_WRR_WEIGHT_1                                  0xDE2A0C
+
+#define mmNIC4_QM1_ARB_WRR_WEIGHT_2                                  0xDE2A10
+
+#define mmNIC4_QM1_ARB_WRR_WEIGHT_3                                  0xDE2A14
+
+#define mmNIC4_QM1_ARB_CFG_1                                         0xDE2A18
+
+#define mmNIC4_QM1_ARB_MST_AVAIL_CRED_0                              0xDE2A20
+
+#define mmNIC4_QM1_ARB_MST_AVAIL_CRED_1                              0xDE2A24
+
+#define mmNIC4_QM1_ARB_MST_AVAIL_CRED_2                              0xDE2A28
+
+#define mmNIC4_QM1_ARB_MST_AVAIL_CRED_3                              0xDE2A2C
+
+#define mmNIC4_QM1_ARB_MST_AVAIL_CRED_4                              0xDE2A30
+
+#define mmNIC4_QM1_ARB_MST_AVAIL_CRED_5                              0xDE2A34
+
+#define mmNIC4_QM1_ARB_MST_AVAIL_CRED_6                              0xDE2A38
+
+#define mmNIC4_QM1_ARB_MST_AVAIL_CRED_7                              0xDE2A3C
+
+#define mmNIC4_QM1_ARB_MST_AVAIL_CRED_8                              0xDE2A40
+
+#define mmNIC4_QM1_ARB_MST_AVAIL_CRED_9                              0xDE2A44
+
+#define mmNIC4_QM1_ARB_MST_AVAIL_CRED_10                             0xDE2A48
+
+#define mmNIC4_QM1_ARB_MST_AVAIL_CRED_11                             0xDE2A4C
+
+#define mmNIC4_QM1_ARB_MST_AVAIL_CRED_12                             0xDE2A50
+
+#define mmNIC4_QM1_ARB_MST_AVAIL_CRED_13                             0xDE2A54
+
+#define mmNIC4_QM1_ARB_MST_AVAIL_CRED_14                             0xDE2A58
+
+#define mmNIC4_QM1_ARB_MST_AVAIL_CRED_15                             0xDE2A5C
+
+#define mmNIC4_QM1_ARB_MST_AVAIL_CRED_16                             0xDE2A60
+
+#define mmNIC4_QM1_ARB_MST_AVAIL_CRED_17                             0xDE2A64
+
+#define mmNIC4_QM1_ARB_MST_AVAIL_CRED_18                             0xDE2A68
+
+#define mmNIC4_QM1_ARB_MST_AVAIL_CRED_19                             0xDE2A6C
+
+#define mmNIC4_QM1_ARB_MST_AVAIL_CRED_20                             0xDE2A70
+
+#define mmNIC4_QM1_ARB_MST_AVAIL_CRED_21                             0xDE2A74
+
+#define mmNIC4_QM1_ARB_MST_AVAIL_CRED_22                             0xDE2A78
+
+#define mmNIC4_QM1_ARB_MST_AVAIL_CRED_23                             0xDE2A7C
+
+#define mmNIC4_QM1_ARB_MST_AVAIL_CRED_24                             0xDE2A80
+
+#define mmNIC4_QM1_ARB_MST_AVAIL_CRED_25                             0xDE2A84
+
+#define mmNIC4_QM1_ARB_MST_AVAIL_CRED_26                             0xDE2A88
+
+#define mmNIC4_QM1_ARB_MST_AVAIL_CRED_27                             0xDE2A8C
+
+#define mmNIC4_QM1_ARB_MST_AVAIL_CRED_28                             0xDE2A90
+
+#define mmNIC4_QM1_ARB_MST_AVAIL_CRED_29                             0xDE2A94
+
+#define mmNIC4_QM1_ARB_MST_AVAIL_CRED_30                             0xDE2A98
+
+#define mmNIC4_QM1_ARB_MST_AVAIL_CRED_31                             0xDE2A9C
+
+#define mmNIC4_QM1_ARB_MST_CRED_INC                                  0xDE2AA0
+
+#define mmNIC4_QM1_ARB_MST_CHOISE_PUSH_OFST_0                        0xDE2AA4
+
+#define mmNIC4_QM1_ARB_MST_CHOISE_PUSH_OFST_1                        0xDE2AA8
+
+#define mmNIC4_QM1_ARB_MST_CHOISE_PUSH_OFST_2                        0xDE2AAC
+
+#define mmNIC4_QM1_ARB_MST_CHOISE_PUSH_OFST_3                        0xDE2AB0
+
+#define mmNIC4_QM1_ARB_MST_CHOISE_PUSH_OFST_4                        0xDE2AB4
+
+#define mmNIC4_QM1_ARB_MST_CHOISE_PUSH_OFST_5                        0xDE2AB8
+
+#define mmNIC4_QM1_ARB_MST_CHOISE_PUSH_OFST_6                        0xDE2ABC
+
+#define mmNIC4_QM1_ARB_MST_CHOISE_PUSH_OFST_7                        0xDE2AC0
+
+#define mmNIC4_QM1_ARB_MST_CHOISE_PUSH_OFST_8                        0xDE2AC4
+
+#define mmNIC4_QM1_ARB_MST_CHOISE_PUSH_OFST_9                        0xDE2AC8
+
+#define mmNIC4_QM1_ARB_MST_CHOISE_PUSH_OFST_10                       0xDE2ACC
+
+#define mmNIC4_QM1_ARB_MST_CHOISE_PUSH_OFST_11                       0xDE2AD0
+
+#define mmNIC4_QM1_ARB_MST_CHOISE_PUSH_OFST_12                       0xDE2AD4
+
+#define mmNIC4_QM1_ARB_MST_CHOISE_PUSH_OFST_13                       0xDE2AD8
+
+#define mmNIC4_QM1_ARB_MST_CHOISE_PUSH_OFST_14                       0xDE2ADC
+
+#define mmNIC4_QM1_ARB_MST_CHOISE_PUSH_OFST_15                       0xDE2AE0
+
+#define mmNIC4_QM1_ARB_MST_CHOISE_PUSH_OFST_16                       0xDE2AE4
+
+#define mmNIC4_QM1_ARB_MST_CHOISE_PUSH_OFST_17                       0xDE2AE8
+
+#define mmNIC4_QM1_ARB_MST_CHOISE_PUSH_OFST_18                       0xDE2AEC
+
+#define mmNIC4_QM1_ARB_MST_CHOISE_PUSH_OFST_19                       0xDE2AF0
+
+#define mmNIC4_QM1_ARB_MST_CHOISE_PUSH_OFST_20                       0xDE2AF4
+
+#define mmNIC4_QM1_ARB_MST_CHOISE_PUSH_OFST_21                       0xDE2AF8
+
+#define mmNIC4_QM1_ARB_MST_CHOISE_PUSH_OFST_22                       0xDE2AFC
+
+#define mmNIC4_QM1_ARB_MST_CHOISE_PUSH_OFST_23                       0xDE2B00
+
+#define mmNIC4_QM1_ARB_MST_CHOISE_PUSH_OFST_24                       0xDE2B04
+
+#define mmNIC4_QM1_ARB_MST_CHOISE_PUSH_OFST_25                       0xDE2B08
+
+#define mmNIC4_QM1_ARB_MST_CHOISE_PUSH_OFST_26                       0xDE2B0C
+
+#define mmNIC4_QM1_ARB_MST_CHOISE_PUSH_OFST_27                       0xDE2B10
+
+#define mmNIC4_QM1_ARB_MST_CHOISE_PUSH_OFST_28                       0xDE2B14
+
+#define mmNIC4_QM1_ARB_MST_CHOISE_PUSH_OFST_29                       0xDE2B18
+
+#define mmNIC4_QM1_ARB_MST_CHOISE_PUSH_OFST_30                       0xDE2B1C
+
+#define mmNIC4_QM1_ARB_MST_CHOISE_PUSH_OFST_31                       0xDE2B20
+
+#define mmNIC4_QM1_ARB_SLV_MASTER_INC_CRED_OFST                      0xDE2B28
+
+#define mmNIC4_QM1_ARB_MST_SLAVE_EN                                  0xDE2B2C
+
+#define mmNIC4_QM1_ARB_MST_QUIET_PER                                 0xDE2B34
+
+#define mmNIC4_QM1_ARB_SLV_CHOISE_WDT                                0xDE2B38
+
+#define mmNIC4_QM1_ARB_SLV_ID                                        0xDE2B3C
+
+#define mmNIC4_QM1_ARB_MSG_MAX_INFLIGHT                              0xDE2B44
+
+#define mmNIC4_QM1_ARB_MSG_AWUSER_31_11                              0xDE2B48
+
+#define mmNIC4_QM1_ARB_MSG_AWUSER_SEC_PROP                           0xDE2B4C
+
+#define mmNIC4_QM1_ARB_MSG_AWUSER_NON_SEC_PROP                       0xDE2B50
+
+#define mmNIC4_QM1_ARB_BASE_LO                                       0xDE2B54
+
+#define mmNIC4_QM1_ARB_BASE_HI                                       0xDE2B58
+
+#define mmNIC4_QM1_ARB_STATE_STS                                     0xDE2B80
+
+#define mmNIC4_QM1_ARB_CHOISE_FULLNESS_STS                           0xDE2B84
+
+#define mmNIC4_QM1_ARB_MSG_STS                                       0xDE2B88
+
+#define mmNIC4_QM1_ARB_SLV_CHOISE_Q_HEAD                             0xDE2B8C
+
+#define mmNIC4_QM1_ARB_ERR_CAUSE                                     0xDE2B9C
+
+#define mmNIC4_QM1_ARB_ERR_MSG_EN                                    0xDE2BA0
+
+#define mmNIC4_QM1_ARB_ERR_STS_DRP                                   0xDE2BA8
+
+#define mmNIC4_QM1_ARB_MST_CRED_STS_0                                0xDE2BB0
+
+#define mmNIC4_QM1_ARB_MST_CRED_STS_1                                0xDE2BB4
+
+#define mmNIC4_QM1_ARB_MST_CRED_STS_2                                0xDE2BB8
+
+#define mmNIC4_QM1_ARB_MST_CRED_STS_3                                0xDE2BBC
+
+#define mmNIC4_QM1_ARB_MST_CRED_STS_4                                0xDE2BC0
+
+#define mmNIC4_QM1_ARB_MST_CRED_STS_5                                0xDE2BC4
+
+#define mmNIC4_QM1_ARB_MST_CRED_STS_6                                0xDE2BC8
+
+#define mmNIC4_QM1_ARB_MST_CRED_STS_7                                0xDE2BCC
+
+#define mmNIC4_QM1_ARB_MST_CRED_STS_8                                0xDE2BD0
+
+#define mmNIC4_QM1_ARB_MST_CRED_STS_9                                0xDE2BD4
+
+#define mmNIC4_QM1_ARB_MST_CRED_STS_10                               0xDE2BD8
+
+#define mmNIC4_QM1_ARB_MST_CRED_STS_11                               0xDE2BDC
+
+#define mmNIC4_QM1_ARB_MST_CRED_STS_12                               0xDE2BE0
+
+#define mmNIC4_QM1_ARB_MST_CRED_STS_13                               0xDE2BE4
+
+#define mmNIC4_QM1_ARB_MST_CRED_STS_14                               0xDE2BE8
+
+#define mmNIC4_QM1_ARB_MST_CRED_STS_15                               0xDE2BEC
+
+#define mmNIC4_QM1_ARB_MST_CRED_STS_16                               0xDE2BF0
+
+#define mmNIC4_QM1_ARB_MST_CRED_STS_17                               0xDE2BF4
+
+#define mmNIC4_QM1_ARB_MST_CRED_STS_18                               0xDE2BF8
+
+#define mmNIC4_QM1_ARB_MST_CRED_STS_19                               0xDE2BFC
+
+#define mmNIC4_QM1_ARB_MST_CRED_STS_20                               0xDE2C00
+
+#define mmNIC4_QM1_ARB_MST_CRED_STS_21                               0xDE2C04
+
+#define mmNIC4_QM1_ARB_MST_CRED_STS_22                               0xDE2C08
+
+#define mmNIC4_QM1_ARB_MST_CRED_STS_23                               0xDE2C0C
+
+#define mmNIC4_QM1_ARB_MST_CRED_STS_24                               0xDE2C10
+
+#define mmNIC4_QM1_ARB_MST_CRED_STS_25                               0xDE2C14
+
+#define mmNIC4_QM1_ARB_MST_CRED_STS_26                               0xDE2C18
+
+#define mmNIC4_QM1_ARB_MST_CRED_STS_27                               0xDE2C1C
+
+#define mmNIC4_QM1_ARB_MST_CRED_STS_28                               0xDE2C20
+
+#define mmNIC4_QM1_ARB_MST_CRED_STS_29                               0xDE2C24
+
+#define mmNIC4_QM1_ARB_MST_CRED_STS_30                               0xDE2C28
+
+#define mmNIC4_QM1_ARB_MST_CRED_STS_31                               0xDE2C2C
+
+#define mmNIC4_QM1_CGM_CFG                                           0xDE2C70
+
+#define mmNIC4_QM1_CGM_STS                                           0xDE2C74
+
+#define mmNIC4_QM1_CGM_CFG1                                          0xDE2C78
+
+#define mmNIC4_QM1_LOCAL_RANGE_BASE                                  0xDE2C80
+
+#define mmNIC4_QM1_LOCAL_RANGE_SIZE                                  0xDE2C84
+
+#define mmNIC4_QM1_CSMR_STRICT_PRIO_CFG                              0xDE2C90
+
+#define mmNIC4_QM1_HBW_RD_RATE_LIM_CFG_1                             0xDE2C94
+
+#define mmNIC4_QM1_LBW_WR_RATE_LIM_CFG_0                             0xDE2C98
+
+#define mmNIC4_QM1_LBW_WR_RATE_LIM_CFG_1                             0xDE2C9C
+
+#define mmNIC4_QM1_HBW_RD_RATE_LIM_CFG_0                             0xDE2CA0
+
+#define mmNIC4_QM1_GLBL_AXCACHE                                      0xDE2CA4
+
+#define mmNIC4_QM1_IND_GW_APB_CFG                                    0xDE2CB0
+
+#define mmNIC4_QM1_IND_GW_APB_WDATA                                  0xDE2CB4
+
+#define mmNIC4_QM1_IND_GW_APB_RDATA                                  0xDE2CB8
+
+#define mmNIC4_QM1_IND_GW_APB_STATUS                                 0xDE2CBC
+
+#define mmNIC4_QM1_GLBL_ERR_ADDR_LO                                  0xDE2CD0
+
+#define mmNIC4_QM1_GLBL_ERR_ADDR_HI                                  0xDE2CD4
+
+#define mmNIC4_QM1_GLBL_ERR_WDATA                                    0xDE2CD8
+
+#define mmNIC4_QM1_GLBL_MEM_INIT_BUSY                                0xDE2D00
+
+#endif /* ASIC_REG_NIC4_QM1_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/psoc_hbm_pll_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/psoc_hbm_pll_regs.h
deleted file mode 100644
index 687e2255cb19f718014e3b2faebef5418e68b37b..0000000000000000000000000000000000000000
--- a/drivers/misc/habanalabs/include/gaudi/asic_reg/psoc_hbm_pll_regs.h
+++ /dev/null
@@ -1,114 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0
- *
- * Copyright 2016-2018 HabanaLabs, Ltd.
- * All Rights Reserved.
- *
- */
-
-/************************************
- ** This is an auto-generated file **
- **       DO NOT EDIT BELOW        **
- ************************************/
-
-#ifndef ASIC_REG_PSOC_HBM_PLL_REGS_H_
-#define ASIC_REG_PSOC_HBM_PLL_REGS_H_
-
-/*
- *****************************************
- *   PSOC_HBM_PLL (Prototype: PLL)
- *****************************************
- */
-
-#define mmPSOC_HBM_PLL_NR                                            0xC74100
-
-#define mmPSOC_HBM_PLL_NF                                            0xC74104
-
-#define mmPSOC_HBM_PLL_OD                                            0xC74108
-
-#define mmPSOC_HBM_PLL_NB                                            0xC7410C
-
-#define mmPSOC_HBM_PLL_CFG                                           0xC74110
-
-#define mmPSOC_HBM_PLL_LOSE_MASK                                     0xC74120
-
-#define mmPSOC_HBM_PLL_LOCK_INTR                                     0xC74128
-
-#define mmPSOC_HBM_PLL_LOCK_BYPASS                                   0xC7412C
-
-#define mmPSOC_HBM_PLL_DATA_CHNG                                     0xC74130
-
-#define mmPSOC_HBM_PLL_RST                                           0xC74134
-
-#define mmPSOC_HBM_PLL_SLIP_WD_CNTR                                  0xC74150
-
-#define mmPSOC_HBM_PLL_DIV_FACTOR_0                                  0xC74200
-
-#define mmPSOC_HBM_PLL_DIV_FACTOR_1                                  0xC74204
-
-#define mmPSOC_HBM_PLL_DIV_FACTOR_2                                  0xC74208
-
-#define mmPSOC_HBM_PLL_DIV_FACTOR_3                                  0xC7420C
-
-#define mmPSOC_HBM_PLL_DIV_FACTOR_CMD_0                              0xC74220
-
-#define mmPSOC_HBM_PLL_DIV_FACTOR_CMD_1                              0xC74224
-
-#define mmPSOC_HBM_PLL_DIV_FACTOR_CMD_2                              0xC74228
-
-#define mmPSOC_HBM_PLL_DIV_FACTOR_CMD_3                              0xC7422C
-
-#define mmPSOC_HBM_PLL_DIV_SEL_0                                     0xC74280
-
-#define mmPSOC_HBM_PLL_DIV_SEL_1                                     0xC74284
-
-#define mmPSOC_HBM_PLL_DIV_SEL_2                                     0xC74288
-
-#define mmPSOC_HBM_PLL_DIV_SEL_3                                     0xC7428C
-
-#define mmPSOC_HBM_PLL_DIV_EN_0                                      0xC742A0
-
-#define mmPSOC_HBM_PLL_DIV_EN_1                                      0xC742A4
-
-#define mmPSOC_HBM_PLL_DIV_EN_2                                      0xC742A8
-
-#define mmPSOC_HBM_PLL_DIV_EN_3                                      0xC742AC
-
-#define mmPSOC_HBM_PLL_DIV_FACTOR_BUSY_0                             0xC742C0
-
-#define mmPSOC_HBM_PLL_DIV_FACTOR_BUSY_1                             0xC742C4
-
-#define mmPSOC_HBM_PLL_DIV_FACTOR_BUSY_2                             0xC742C8
-
-#define mmPSOC_HBM_PLL_DIV_FACTOR_BUSY_3                             0xC742CC
-
-#define mmPSOC_HBM_PLL_CLK_GATER                                     0xC74300
-
-#define mmPSOC_HBM_PLL_CLK_RLX_0                                     0xC74310
-
-#define mmPSOC_HBM_PLL_CLK_RLX_1                                     0xC74314
-
-#define mmPSOC_HBM_PLL_CLK_RLX_2                                     0xC74318
-
-#define mmPSOC_HBM_PLL_CLK_RLX_3                                     0xC7431C
-
-#define mmPSOC_HBM_PLL_REF_CNTR_PERIOD                               0xC74400
-
-#define mmPSOC_HBM_PLL_REF_LOW_THRESHOLD                             0xC74410
-
-#define mmPSOC_HBM_PLL_REF_HIGH_THRESHOLD                            0xC74420
-
-#define mmPSOC_HBM_PLL_PLL_NOT_STABLE                                0xC74430
-
-#define mmPSOC_HBM_PLL_FREQ_CALC_EN                                  0xC74440
-
-#define mmPSOC_HBM_PLL_RLX_BITMAP_CFG                                0xC74500
-
-#define mmPSOC_HBM_PLL_RLX_BITMAP_0                                  0xC74510
-
-#define mmPSOC_HBM_PLL_RLX_BITMAP_1                                  0xC74514
-
-#define mmPSOC_HBM_PLL_RLX_BITMAP_2                                  0xC74518
-
-#define mmPSOC_HBM_PLL_RLX_BITMAP_3                                  0xC7451C
-
-#endif /* ASIC_REG_PSOC_HBM_PLL_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/psoc_pci_pll_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/psoc_pci_pll_regs.h
deleted file mode 100644
index 3dc9bb4542ddc8de7e1204292b2a5bbdbc7c0db6..0000000000000000000000000000000000000000
--- a/drivers/misc/habanalabs/include/gaudi/asic_reg/psoc_pci_pll_regs.h
+++ /dev/null
@@ -1,114 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0
- *
- * Copyright 2016-2018 HabanaLabs, Ltd.
- * All Rights Reserved.
- *
- */
-
-/************************************
- ** This is an auto-generated file **
- **       DO NOT EDIT BELOW        **
- ************************************/
-
-#ifndef ASIC_REG_PSOC_PCI_PLL_REGS_H_
-#define ASIC_REG_PSOC_PCI_PLL_REGS_H_
-
-/*
- *****************************************
- *   PSOC_PCI_PLL (Prototype: PLL)
- *****************************************
- */
-
-#define mmPSOC_PCI_PLL_NR                                            0xC72100
-
-#define mmPSOC_PCI_PLL_NF                                            0xC72104
-
-#define mmPSOC_PCI_PLL_OD                                            0xC72108
-
-#define mmPSOC_PCI_PLL_NB                                            0xC7210C
-
-#define mmPSOC_PCI_PLL_CFG                                           0xC72110
-
-#define mmPSOC_PCI_PLL_LOSE_MASK                                     0xC72120
-
-#define mmPSOC_PCI_PLL_LOCK_INTR                                     0xC72128
-
-#define mmPSOC_PCI_PLL_LOCK_BYPASS                                   0xC7212C
-
-#define mmPSOC_PCI_PLL_DATA_CHNG                                     0xC72130
-
-#define mmPSOC_PCI_PLL_RST                                           0xC72134
-
-#define mmPSOC_PCI_PLL_SLIP_WD_CNTR                                  0xC72150
-
-#define mmPSOC_PCI_PLL_DIV_FACTOR_0                                  0xC72200
-
-#define mmPSOC_PCI_PLL_DIV_FACTOR_1                                  0xC72204
-
-#define mmPSOC_PCI_PLL_DIV_FACTOR_2                                  0xC72208
-
-#define mmPSOC_PCI_PLL_DIV_FACTOR_3                                  0xC7220C
-
-#define mmPSOC_PCI_PLL_DIV_FACTOR_CMD_0                              0xC72220
-
-#define mmPSOC_PCI_PLL_DIV_FACTOR_CMD_1                              0xC72224
-
-#define mmPSOC_PCI_PLL_DIV_FACTOR_CMD_2                              0xC72228
-
-#define mmPSOC_PCI_PLL_DIV_FACTOR_CMD_3                              0xC7222C
-
-#define mmPSOC_PCI_PLL_DIV_SEL_0                                     0xC72280
-
-#define mmPSOC_PCI_PLL_DIV_SEL_1                                     0xC72284
-
-#define mmPSOC_PCI_PLL_DIV_SEL_2                                     0xC72288
-
-#define mmPSOC_PCI_PLL_DIV_SEL_3                                     0xC7228C
-
-#define mmPSOC_PCI_PLL_DIV_EN_0                                      0xC722A0
-
-#define mmPSOC_PCI_PLL_DIV_EN_1                                      0xC722A4
-
-#define mmPSOC_PCI_PLL_DIV_EN_2                                      0xC722A8
-
-#define mmPSOC_PCI_PLL_DIV_EN_3                                      0xC722AC
-
-#define mmPSOC_PCI_PLL_DIV_FACTOR_BUSY_0                             0xC722C0
-
-#define mmPSOC_PCI_PLL_DIV_FACTOR_BUSY_1                             0xC722C4
-
-#define mmPSOC_PCI_PLL_DIV_FACTOR_BUSY_2                             0xC722C8
-
-#define mmPSOC_PCI_PLL_DIV_FACTOR_BUSY_3                             0xC722CC
-
-#define mmPSOC_PCI_PLL_CLK_GATER                                     0xC72300
-
-#define mmPSOC_PCI_PLL_CLK_RLX_0                                     0xC72310
-
-#define mmPSOC_PCI_PLL_CLK_RLX_1                                     0xC72314
-
-#define mmPSOC_PCI_PLL_CLK_RLX_2                                     0xC72318
-
-#define mmPSOC_PCI_PLL_CLK_RLX_3                                     0xC7231C
-
-#define mmPSOC_PCI_PLL_REF_CNTR_PERIOD                               0xC72400
-
-#define mmPSOC_PCI_PLL_REF_LOW_THRESHOLD                             0xC72410
-
-#define mmPSOC_PCI_PLL_REF_HIGH_THRESHOLD                            0xC72420
-
-#define mmPSOC_PCI_PLL_PLL_NOT_STABLE                                0xC72430
-
-#define mmPSOC_PCI_PLL_FREQ_CALC_EN                                  0xC72440
-
-#define mmPSOC_PCI_PLL_RLX_BITMAP_CFG                                0xC72500
-
-#define mmPSOC_PCI_PLL_RLX_BITMAP_0                                  0xC72510
-
-#define mmPSOC_PCI_PLL_RLX_BITMAP_1                                  0xC72514
-
-#define mmPSOC_PCI_PLL_RLX_BITMAP_2                                  0xC72518
-
-#define mmPSOC_PCI_PLL_RLX_BITMAP_3                                  0xC7251C
-
-#endif /* ASIC_REG_PSOC_PCI_PLL_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi/gaudi_fw_if.h b/drivers/misc/habanalabs/include/gaudi/gaudi_fw_if.h
index 8aadc6357da138a46251a07c22d7c2bf66814fa1..25acd9e87e209c4bb2826aab80890a1c1a1d7e0f 100644
--- a/drivers/misc/habanalabs/include/gaudi/gaudi_fw_if.h
+++ b/drivers/misc/habanalabs/include/gaudi/gaudi_fw_if.h
@@ -8,6 +8,8 @@
 #ifndef GAUDI_FW_IF_H
 #define GAUDI_FW_IF_H
 
+#include <linux/types.h>
+
 #define GAUDI_EVENT_QUEUE_MSI_IDX	8
 #define GAUDI_NIC_PORT1_MSI_IDX		10
 #define GAUDI_NIC_PORT3_MSI_IDX		12
@@ -28,7 +30,30 @@ enum gaudi_pll_index {
 	MESH_PLL,
 	MME_PLL,
 	TPC_PLL,
-	IF_PLL
+	IF_PLL,
+	PLL_MAX
+};
+
+enum gaudi_nic_axi_error {
+	RXB,
+	RXE,
+	TXS,
+	TXE,
+	QPC_RESP,
+	NON_AXI_ERR,
+};
+
+/*
+ * struct eq_nic_sei_event - describes an AXI error cause.
+ * @axi_error_cause: one of the events defined in enum gaudi_nic_axi_error.
+ * @id: can be either 0 or 1, to further describe unit with interrupt cause
+ *      (i.e. TXE0 or TXE1).
+ * @pad[6]: padding structure to 64bit.
+ */
+struct eq_nic_sei_event {
+	__u8 axi_error_cause;
+	__u8 id;
+	__u8 pad[6];
 };
 
 #define GAUDI_PLL_FREQ_LOW		200000000 /* 200 MHz */
diff --git a/drivers/misc/habanalabs/include/gaudi/gaudi_masks.h b/drivers/misc/habanalabs/include/gaudi/gaudi_masks.h
index 46aed13f16b199d0964b8d13b959ee589bf64fe1..b9b90d079e23d91704be87d303d77b3b6b363293 100644
--- a/drivers/misc/habanalabs/include/gaudi/gaudi_masks.h
+++ b/drivers/misc/habanalabs/include/gaudi/gaudi_masks.h
@@ -41,6 +41,11 @@
 	(FIELD_PREP(TPC0_QM_GLBL_CFG0_CQF_EN_MASK, 0x1F)) | \
 	(FIELD_PREP(TPC0_QM_GLBL_CFG0_CP_EN_MASK, 0x1F)))
 
+#define NIC_QMAN_ENABLE		(\
+	(FIELD_PREP(NIC0_QM0_GLBL_CFG0_PQF_EN_MASK, 0xF)) | \
+	(FIELD_PREP(NIC0_QM0_GLBL_CFG0_CQF_EN_MASK, 0xF)) | \
+	(FIELD_PREP(NIC0_QM0_GLBL_CFG0_CP_EN_MASK, 0xF)))
+
 #define QMAN_UPPER_CP_CGM_PWR_GATE_EN	(\
 	(FIELD_PREP(DMA0_QM_CGM_CFG_IDLE_TH_MASK, 0x20)) | \
 	(FIELD_PREP(DMA0_QM_CGM_CFG_G2F_TH_MASK, 0xA)) | \
@@ -93,6 +98,16 @@
 	(FIELD_PREP(MME0_QM_GLBL_ERR_CFG_CQF_STOP_ON_ERR_MASK, 0x1F)) | \
 	(FIELD_PREP(MME0_QM_GLBL_ERR_CFG_CP_STOP_ON_ERR_MASK, 0x1F)))
 
+#define NIC_QMAN_GLBL_ERR_CFG_MSG_EN_MASK	(\
+	(FIELD_PREP(NIC0_QM0_GLBL_ERR_CFG_PQF_ERR_MSG_EN_MASK, 0xF)) | \
+	(FIELD_PREP(NIC0_QM0_GLBL_ERR_CFG_CQF_ERR_MSG_EN_MASK, 0xF)) | \
+	(FIELD_PREP(NIC0_QM0_GLBL_ERR_CFG_CP_ERR_MSG_EN_MASK, 0xF)))
+
+#define NIC_QMAN_GLBL_ERR_CFG_STOP_ON_ERR_EN_MASK	(\
+	(FIELD_PREP(NIC0_QM0_GLBL_ERR_CFG_PQF_STOP_ON_ERR_MASK, 0xF)) | \
+	(FIELD_PREP(NIC0_QM0_GLBL_ERR_CFG_CQF_STOP_ON_ERR_MASK, 0xF)) | \
+	(FIELD_PREP(NIC0_QM0_GLBL_ERR_CFG_CP_STOP_ON_ERR_MASK, 0xF)))
+
 #define QMAN_CGM1_PWR_GATE_EN	(FIELD_PREP(DMA0_QM_CGM_CFG1_MASK_TH_MASK, 0xA))
 
 /* RESET registers configuration */
diff --git a/drivers/misc/habanalabs/include/gaudi/gaudi_reg_map.h b/drivers/misc/habanalabs/include/gaudi/gaudi_reg_map.h
index 977fb341a6e777c931cd022d05052c3828839fa9..137afedf5f1596221703855abafa87c68f0be6fb 100644
--- a/drivers/misc/habanalabs/include/gaudi/gaudi_reg_map.h
+++ b/drivers/misc/habanalabs/include/gaudi/gaudi_reg_map.h
@@ -12,6 +12,8 @@
  * PSOC scratch-pad registers
  */
 #define mmHW_STATE			mmPSOC_GLOBAL_CONF_SCRATCHPAD_0
+#define mmCPU_BOOT_DEV_STS0		mmPSOC_GLOBAL_CONF_SCRATCHPAD_20
+#define mmCPU_BOOT_DEV_STS1		mmPSOC_GLOBAL_CONF_SCRATCHPAD_21
 #define mmFUSE_VER_OFFSET		mmPSOC_GLOBAL_CONF_SCRATCHPAD_22
 #define mmCPU_CMD_STATUS_TO_HOST	mmPSOC_GLOBAL_CONF_SCRATCHPAD_23
 #define mmCPU_BOOT_ERR0			mmPSOC_GLOBAL_CONF_SCRATCHPAD_24
diff --git a/drivers/misc/habanalabs/include/goya/goya_fw_if.h b/drivers/misc/habanalabs/include/goya/goya_fw_if.h
index 0fa80fe9f6cc34e0e180906122c9bb2e6d217dc6..daf8d8cd14be7e2d8642a8a2b69e301ffd389878 100644
--- a/drivers/misc/habanalabs/include/goya/goya_fw_if.h
+++ b/drivers/misc/habanalabs/include/goya/goya_fw_if.h
@@ -22,7 +22,8 @@ enum goya_pll_index {
 	MME_PLL,
 	PCI_PLL,
 	EMMC_PLL,
-	TPC_PLL
+	TPC_PLL,
+	PLL_MAX
 };
 
 #define GOYA_PLL_FREQ_LOW		50000000 /* 50 MHz */
diff --git a/drivers/misc/habanalabs/include/goya/goya_reg_map.h b/drivers/misc/habanalabs/include/goya/goya_reg_map.h
index e56124265a059331189b2def882551d7ffd14063..f3ab282cafa4d963bf55ea6672a5d0da8c99ccc0 100644
--- a/drivers/misc/habanalabs/include/goya/goya_reg_map.h
+++ b/drivers/misc/habanalabs/include/goya/goya_reg_map.h
@@ -22,6 +22,8 @@
 #define mmCPU_CQ_BASE_ADDR_LOW		mmPSOC_GLOBAL_CONF_SCRATCHPAD_8
 #define mmCPU_CQ_BASE_ADDR_HIGH		mmPSOC_GLOBAL_CONF_SCRATCHPAD_9
 #define mmCPU_CQ_LENGTH			mmPSOC_GLOBAL_CONF_SCRATCHPAD_10
+#define mmCPU_BOOT_DEV_STS0		mmPSOC_GLOBAL_CONF_SCRATCHPAD_20
+#define mmCPU_BOOT_DEV_STS1		mmPSOC_GLOBAL_CONF_SCRATCHPAD_21
 #define mmFUSE_VER_OFFSET		mmPSOC_GLOBAL_CONF_SCRATCHPAD_22
 #define mmCPU_CMD_STATUS_TO_HOST	mmPSOC_GLOBAL_CONF_SCRATCHPAD_23
 #define mmCPU_BOOT_ERR0			mmPSOC_GLOBAL_CONF_SCRATCHPAD_24
diff --git a/drivers/misc/hisi_hikey_usb.c b/drivers/misc/hisi_hikey_usb.c
index cc93569e601c86c41cb9c0ee4467b8adfe719e64..989d7d129469cfa2d2613344fc9a1ce97824ff16 100644
--- a/drivers/misc/hisi_hikey_usb.c
+++ b/drivers/misc/hisi_hikey_usb.c
@@ -168,10 +168,7 @@ static int hisi_hikey_usb_parse_kirin970(struct platform_device *pdev,
 
 	hisi_hikey_usb->reset = devm_gpiod_get(&pdev->dev, "hub_reset_en_gpio",
 					       GPIOD_OUT_HIGH);
-	if (IS_ERR(hisi_hikey_usb->reset))
-		return PTR_ERR(hisi_hikey_usb->reset);
-
-	return 0;
+	return PTR_ERR_OR_ZERO(hisi_hikey_usb->reset);
 }
 
 static int hisi_hikey_usb_probe(struct platform_device *pdev)
diff --git a/drivers/misc/isl29003.c b/drivers/misc/isl29003.c
index c12406f610d5546b51c5f662480f58c6b5b44e0e..703d20e83ebd7dd3af3c701a426f300f4b247bca 100644
--- a/drivers/misc/isl29003.c
+++ b/drivers/misc/isl29003.c
@@ -127,13 +127,13 @@ static int isl29003_set_resolution(struct i2c_client *client, int res)
 static int isl29003_get_mode(struct i2c_client *client)
 {
 	return __isl29003_read_reg(client, ISL29003_REG_COMMAND,
-		ISL29003_RES_MASK, ISL29003_RES_SHIFT);
+		ISL29003_MODE_MASK, ISL29003_MODE_SHIFT);
 }
 
 static int isl29003_set_mode(struct i2c_client *client, int mode)
 {
 	return __isl29003_write_reg(client, ISL29003_REG_COMMAND,
-		ISL29003_RES_MASK, ISL29003_RES_SHIFT, mode);
+		ISL29003_MODE_MASK, ISL29003_MODE_SHIFT, mode);
 }
 
 /* power_state */
diff --git a/drivers/misc/lkdtm/Makefile b/drivers/misc/lkdtm/Makefile
index 1c4c7aca00266e96046cac025bbdc18737fcd8fc..1ef7888a12b52d4ebe0d38c5861758d802afbc59 100644
--- a/drivers/misc/lkdtm/Makefile
+++ b/drivers/misc/lkdtm/Makefile
@@ -17,7 +17,7 @@ KCOV_INSTRUMENT_rodata.o	:= n
 
 OBJCOPYFLAGS :=
 OBJCOPYFLAGS_rodata_objcopy.o	:= \
-			--rename-section .text=.rodata,alloc,readonly,load
+			--rename-section .noinstr.text=.rodata,alloc,readonly,load
 targets += rodata.o rodata_objcopy.o
 $(obj)/rodata_objcopy.o: $(obj)/rodata.o FORCE
 	$(call if_changed,objcopy)
diff --git a/drivers/misc/lkdtm/rodata.c b/drivers/misc/lkdtm/rodata.c
index 58d180af72cf0e3afd3f1a9459aef892cbc85eb3..baacb876d1d94a6de80767b58df4848173a570e1 100644
--- a/drivers/misc/lkdtm/rodata.c
+++ b/drivers/misc/lkdtm/rodata.c
@@ -5,7 +5,7 @@
  */
 #include "lkdtm.h"
 
-void notrace lkdtm_rodata_do_nothing(void)
+void noinstr lkdtm_rodata_do_nothing(void)
 {
 	/* Does nothing. We just want an architecture agnostic "return". */
 }
diff --git a/drivers/misc/mei/bus-fixup.c b/drivers/misc/mei/bus-fixup.c
index 4e30fa98fe7d31ec79957f66e17123f290179d05..d8e760b11ae3cb70ec7bbaf77cd3afbcf257cacc 100644
--- a/drivers/misc/mei/bus-fixup.c
+++ b/drivers/misc/mei/bus-fixup.c
@@ -33,6 +33,9 @@ static const uuid_le mei_nfc_info_guid = MEI_UUID_NFC_INFO;
 #define MEI_UUID_HDCP UUID_LE(0xB638AB7E, 0x94E2, 0x4EA2, \
 			      0xA5, 0x52, 0xD1, 0xC5, 0x4B, 0x62, 0x7F, 0x04)
 
+#define MEI_UUID_PAVP UUID_LE(0xfbf6fcf1, 0x96cf, 0x4e2e, 0xA6, \
+			      0xa6, 0x1b, 0xab, 0x8c, 0xbe, 0x36, 0xb1)
+
 #define MEI_UUID_ANY NULL_UUID_LE
 
 /**
@@ -148,7 +151,7 @@ static int mei_osver(struct mei_cl_device *cldev)
 	os_ver = (struct mei_os_ver *)fwcaps->data;
 	os_ver->os_type = OSTYPE_LINUX;
 
-	return __mei_cl_send(cldev->cl, buf, size, mode);
+	return __mei_cl_send(cldev->cl, buf, size, 0, mode);
 }
 
 #define MKHI_FWVER_BUF_LEN (sizeof(struct mkhi_msg_hdr) + \
@@ -169,7 +172,7 @@ static int mei_fwver(struct mei_cl_device *cldev)
 	req.hdr.group_id = MKHI_GEN_GROUP_ID;
 	req.hdr.command = MKHI_GEN_GET_FW_VERSION_CMD;
 
-	ret = __mei_cl_send(cldev->cl, (u8 *)&req, sizeof(req),
+	ret = __mei_cl_send(cldev->cl, (u8 *)&req, sizeof(req), 0,
 			    MEI_CL_IO_TX_BLOCKING);
 	if (ret < 0) {
 		dev_err(&cldev->dev, "Could not send ReqFWVersion cmd\n");
@@ -177,7 +180,7 @@ static int mei_fwver(struct mei_cl_device *cldev)
 	}
 
 	ret = 0;
-	bytes_recv = __mei_cl_recv(cldev->cl, buf, sizeof(buf), 0,
+	bytes_recv = __mei_cl_recv(cldev->cl, buf, sizeof(buf), NULL, 0,
 				   MKHI_RCV_TIMEOUT);
 	if (bytes_recv < 0 || (size_t)bytes_recv < MKHI_FWVER_LEN(1)) {
 		/*
@@ -324,13 +327,15 @@ static int mei_nfc_if_version(struct mei_cl *cl,
 	};
 	struct mei_nfc_reply *reply = NULL;
 	size_t if_version_length;
+	u8 vtag;
 	int bytes_recv, ret;
 
 	bus = cl->dev;
 
 	WARN_ON(mutex_is_locked(&bus->device_lock));
 
-	ret = __mei_cl_send(cl, (u8 *)&cmd, sizeof(cmd), MEI_CL_IO_TX_BLOCKING);
+	ret = __mei_cl_send(cl, (u8 *)&cmd, sizeof(cmd), 0,
+			    MEI_CL_IO_TX_BLOCKING);
 	if (ret < 0) {
 		dev_err(bus->dev, "Could not send IF version cmd\n");
 		return ret;
@@ -344,7 +349,8 @@ static int mei_nfc_if_version(struct mei_cl *cl,
 		return -ENOMEM;
 
 	ret = 0;
-	bytes_recv = __mei_cl_recv(cl, (u8 *)reply, if_version_length, 0, 0);
+	bytes_recv = __mei_cl_recv(cl, (u8 *)reply, if_version_length, &vtag,
+				   0, 0);
 	if (bytes_recv < 0 || (size_t)bytes_recv < if_version_length) {
 		dev_err(bus->dev, "Could not read IF version\n");
 		ret = -EIO;
@@ -488,6 +494,7 @@ static struct mei_fixup {
 	MEI_FIXUP(MEI_UUID_MKHIF_FIX, mei_mkhi_fix),
 	MEI_FIXUP(MEI_UUID_HDCP, whitelist),
 	MEI_FIXUP(MEI_UUID_ANY, vt_support),
+	MEI_FIXUP(MEI_UUID_PAVP, whitelist),
 };
 
 /**
diff --git a/drivers/misc/mei/bus.c b/drivers/misc/mei/bus.c
index 9cdaa7f3af235679b2a4b6ead8905f235ea9441a..2907db260fba5ead002155992ac513e0fca3104a 100644
--- a/drivers/misc/mei/bus.c
+++ b/drivers/misc/mei/bus.c
@@ -26,11 +26,12 @@
  * @cl: host client
  * @buf: buffer to send
  * @length: buffer length
+ * @vtag: virtual tag
  * @mode: sending mode
  *
  * Return: written size bytes or < 0 on error
  */
-ssize_t __mei_cl_send(struct mei_cl *cl, u8 *buf, size_t length,
+ssize_t __mei_cl_send(struct mei_cl *cl, u8 *buf, size_t length, u8 vtag,
 		      unsigned int mode)
 {
 	struct mei_device *bus;
@@ -86,6 +87,7 @@ ssize_t __mei_cl_send(struct mei_cl *cl, u8 *buf, size_t length,
 		rets = -ENOMEM;
 		goto out;
 	}
+	cb->vtag = vtag;
 
 	cb->internal = !!(mode & MEI_CL_IO_TX_INTERNAL);
 	cb->blocking = !!(mode & MEI_CL_IO_TX_BLOCKING);
@@ -106,11 +108,12 @@ ssize_t __mei_cl_send(struct mei_cl *cl, u8 *buf, size_t length,
  * @buf: buffer to receive
  * @length: buffer length
  * @mode: io mode
+ * @vtag: virtual tag
  * @timeout: recv timeout, 0 for infinite timeout
  *
  * Return: read size in bytes of < 0 on error
  */
-ssize_t __mei_cl_recv(struct mei_cl *cl, u8 *buf, size_t length,
+ssize_t __mei_cl_recv(struct mei_cl *cl, u8 *buf, size_t length, u8 *vtag,
 		      unsigned int mode, unsigned long timeout)
 {
 	struct mei_device *bus;
@@ -196,6 +199,8 @@ ssize_t __mei_cl_recv(struct mei_cl *cl, u8 *buf, size_t length,
 	r_length = min_t(size_t, length, cb->buf_idx);
 	memcpy(buf, cb->buf.data, r_length);
 	rets = r_length;
+	if (vtag)
+		*vtag = cb->vtag;
 
 free:
 	mei_cl_del_rd_completed(cl, cb);
@@ -206,40 +211,87 @@ ssize_t __mei_cl_recv(struct mei_cl *cl, u8 *buf, size_t length,
 }
 
 /**
- * mei_cldev_send - me device send  (write)
+ * mei_cldev_send_vtag - me device send with vtag  (write)
  *
  * @cldev: me client device
  * @buf: buffer to send
  * @length: buffer length
+ * @vtag: virtual tag
  *
- * Return: written size in bytes or < 0 on error
+ * Return:
+ *  * written size in bytes
+ *  * < 0 on error
  */
-ssize_t mei_cldev_send(struct mei_cl_device *cldev, u8 *buf, size_t length)
+
+ssize_t mei_cldev_send_vtag(struct mei_cl_device *cldev, u8 *buf, size_t length,
+			    u8 vtag)
 {
 	struct mei_cl *cl = cldev->cl;
 
-	return __mei_cl_send(cl, buf, length, MEI_CL_IO_TX_BLOCKING);
+	return __mei_cl_send(cl, buf, length, vtag, MEI_CL_IO_TX_BLOCKING);
 }
-EXPORT_SYMBOL_GPL(mei_cldev_send);
+EXPORT_SYMBOL_GPL(mei_cldev_send_vtag);
 
 /**
- * mei_cldev_recv_nonblock - non block client receive (read)
+ * mei_cldev_recv_vtag - client receive with vtag (read)
  *
  * @cldev: me client device
  * @buf: buffer to receive
  * @length: buffer length
+ * @vtag: virtual tag
  *
- * Return: read size in bytes of < 0 on error
- *         -EAGAIN if function will block.
+ * Return:
+ * * read size in bytes
+ * *  < 0 on error
  */
-ssize_t mei_cldev_recv_nonblock(struct mei_cl_device *cldev, u8 *buf,
-				size_t length)
+
+ssize_t mei_cldev_recv_vtag(struct mei_cl_device *cldev, u8 *buf, size_t length,
+			    u8 *vtag)
 {
 	struct mei_cl *cl = cldev->cl;
 
-	return __mei_cl_recv(cl, buf, length, MEI_CL_IO_RX_NONBLOCK, 0);
+	return __mei_cl_recv(cl, buf, length, vtag, 0, 0);
 }
-EXPORT_SYMBOL_GPL(mei_cldev_recv_nonblock);
+EXPORT_SYMBOL_GPL(mei_cldev_recv_vtag);
+
+/**
+ * mei_cldev_recv_nonblock_vtag - non block client receive with vtag (read)
+ *
+ * @cldev: me client device
+ * @buf: buffer to receive
+ * @length: buffer length
+ * @vtag: virtual tag
+ *
+ * Return:
+ * * read size in bytes
+ * * -EAGAIN if function will block.
+ * * < 0 on other error
+ */
+ssize_t mei_cldev_recv_nonblock_vtag(struct mei_cl_device *cldev, u8 *buf,
+				     size_t length, u8 *vtag)
+{
+	struct mei_cl *cl = cldev->cl;
+
+	return __mei_cl_recv(cl, buf, length, vtag, MEI_CL_IO_RX_NONBLOCK, 0);
+}
+EXPORT_SYMBOL_GPL(mei_cldev_recv_nonblock_vtag);
+
+/**
+ * mei_cldev_send - me device send  (write)
+ *
+ * @cldev: me client device
+ * @buf: buffer to send
+ * @length: buffer length
+ *
+ * Return:
+ *  * written size in bytes
+ *  * < 0 on error
+ */
+ssize_t mei_cldev_send(struct mei_cl_device *cldev, u8 *buf, size_t length)
+{
+	return mei_cldev_send_vtag(cldev, buf, length, 0);
+}
+EXPORT_SYMBOL_GPL(mei_cldev_send);
 
 /**
  * mei_cldev_recv - client receive (read)
@@ -252,12 +304,27 @@ EXPORT_SYMBOL_GPL(mei_cldev_recv_nonblock);
  */
 ssize_t mei_cldev_recv(struct mei_cl_device *cldev, u8 *buf, size_t length)
 {
-	struct mei_cl *cl = cldev->cl;
-
-	return __mei_cl_recv(cl, buf, length, 0, 0);
+	return mei_cldev_recv_vtag(cldev, buf, length, NULL);
 }
 EXPORT_SYMBOL_GPL(mei_cldev_recv);
 
+/**
+ * mei_cldev_recv_nonblock - non block client receive (read)
+ *
+ * @cldev: me client device
+ * @buf: buffer to receive
+ * @length: buffer length
+ *
+ * Return: read size in bytes of < 0 on error
+ *         -EAGAIN if function will block.
+ */
+ssize_t mei_cldev_recv_nonblock(struct mei_cl_device *cldev, u8 *buf,
+				size_t length)
+{
+	return mei_cldev_recv_nonblock_vtag(cldev, buf, length, NULL);
+}
+EXPORT_SYMBOL_GPL(mei_cldev_recv_nonblock);
+
 /**
  * mei_cl_bus_rx_work - dispatch rx event for a bus device
  *
@@ -276,7 +343,8 @@ static void mei_cl_bus_rx_work(struct work_struct *work)
 		cldev->rx_cb(cldev);
 
 	mutex_lock(&bus->device_lock);
-	mei_cl_read_start(cldev->cl, mei_cl_mtu(cldev->cl), NULL);
+	if (mei_cl_is_connected(cldev->cl))
+		mei_cl_read_start(cldev->cl, mei_cl_mtu(cldev->cl), NULL);
 	mutex_unlock(&bus->device_lock);
 }
 
@@ -364,10 +432,16 @@ int mei_cldev_register_rx_cb(struct mei_cl_device *cldev, mei_cldev_cb_t rx_cb)
 	INIT_WORK(&cldev->rx_work, mei_cl_bus_rx_work);
 
 	mutex_lock(&bus->device_lock);
-	ret = mei_cl_read_start(cldev->cl, mei_cl_mtu(cldev->cl), NULL);
+	if (mei_cl_is_connected(cldev->cl))
+		ret = mei_cl_read_start(cldev->cl, mei_cl_mtu(cldev->cl), NULL);
+	else
+		ret = -ENODEV;
 	mutex_unlock(&bus->device_lock);
-	if (ret && ret != -EBUSY)
+	if (ret && ret != -EBUSY) {
+		cancel_work_sync(&cldev->rx_work);
+		cldev->rx_cb = NULL;
 		return ret;
+	}
 
 	return 0;
 }
@@ -401,8 +475,11 @@ int mei_cldev_register_notif_cb(struct mei_cl_device *cldev,
 	mutex_lock(&bus->device_lock);
 	ret = mei_cl_notify_request(cldev->cl, NULL, 1);
 	mutex_unlock(&bus->device_lock);
-	if (ret)
+	if (ret) {
+		cancel_work_sync(&cldev->notif_work);
+		cldev->notif_cb = NULL;
 		return ret;
+	}
 
 	return 0;
 }
@@ -1037,7 +1114,7 @@ static struct mei_cl_device *mei_cl_bus_dev_alloc(struct mei_device *bus,
 }
 
 /**
- * mei_cl_dev_setup - setup me client device
+ * mei_cl_bus_dev_setup - setup me client device
  *    run fix up routines and set the device name
  *
  * @bus: mei device
diff --git a/drivers/misc/mei/client.c b/drivers/misc/mei/client.c
index d5c3f7d54634c5ac98391a8841b2250f90f8413d..a56d41321f3290184c022c4962fd710e4483023f 100644
--- a/drivers/misc/mei/client.c
+++ b/drivers/misc/mei/client.c
@@ -1306,7 +1306,7 @@ struct mei_cl_vtag *mei_cl_vtag_alloc(struct file *fp, u8 vtag)
  * mei_cl_fp_by_vtag - obtain the file pointer by vtag
  *
  * @cl: host client
- * @vtag: vm tag
+ * @vtag: virtual tag
  *
  * Return:
  * * A file pointer - on success
@@ -1317,7 +1317,9 @@ const struct file *mei_cl_fp_by_vtag(const struct mei_cl *cl, u8 vtag)
 	struct mei_cl_vtag *vtag_l;
 
 	list_for_each_entry(vtag_l, &cl->vtag_map, list)
-		if (vtag_l->vtag == vtag)
+		/* The client on bus has one fixed fp */
+		if ((cl->cldev && mei_cldev_enabled(cl->cldev)) ||
+		    vtag_l->vtag == vtag)
 			return vtag_l->fp;
 
 	return ERR_PTR(-ENOENT);
diff --git a/drivers/misc/mei/hbm.c b/drivers/misc/mei/hbm.c
index a97eb5d47705d2c2cc6873c0dda967f0ccae3fda..686e8b6a4c55eb8367310e79ad21a1d636691920 100644
--- a/drivers/misc/mei/hbm.c
+++ b/drivers/misc/mei/hbm.c
@@ -1377,7 +1377,6 @@ int mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr)
 		dev_info(dev->dev, "hbm: stop response: resetting.\n");
 		/* force the reset */
 		return -EPROTO;
-		break;
 
 	case CLIENT_DISCONNECT_REQ_CMD:
 		dev_dbg(dev->dev, "hbm: disconnect request: message received\n");
diff --git a/drivers/misc/mei/mei_dev.h b/drivers/misc/mei/mei_dev.h
index 2f4cc1a8aae8ce355e3c2d64b91f1b270635a493..8c395bfdf6f37404f00b6d250d510f24b3301684 100644
--- a/drivers/misc/mei/mei_dev.h
+++ b/drivers/misc/mei/mei_dev.h
@@ -340,9 +340,9 @@ struct mei_hw_ops {
 /* MEI bus API*/
 void mei_cl_bus_rescan_work(struct work_struct *work);
 void mei_cl_bus_dev_fixup(struct mei_cl_device *dev);
-ssize_t __mei_cl_send(struct mei_cl *cl, u8 *buf, size_t length,
+ssize_t __mei_cl_send(struct mei_cl *cl, u8 *buf, size_t length, u8 vtag,
 		      unsigned int mode);
-ssize_t __mei_cl_recv(struct mei_cl *cl, u8 *buf, size_t length,
+ssize_t __mei_cl_recv(struct mei_cl *cl, u8 *buf, size_t length, u8 *vtag,
 		      unsigned int mode, unsigned long timeout);
 bool mei_cl_bus_rx_event(struct mei_cl *cl);
 bool mei_cl_bus_notify_event(struct mei_cl *cl);
diff --git a/drivers/misc/ocxl/config.c b/drivers/misc/ocxl/config.c
index 4d490b92d951fe32ab0ec455858a24e34e3c0f57..a68738f382521bce16aa79ad1b0653f97e6023fc 100644
--- a/drivers/misc/ocxl/config.c
+++ b/drivers/misc/ocxl/config.c
@@ -73,7 +73,7 @@ static int find_dvsec_afu_ctrl(struct pci_dev *dev, u8 afu_idx)
 
 /**
  * get_function_0() - Find a related PCI device (function 0)
- * @device: PCI device to match
+ * @dev: PCI device to match
  *
  * Returns a pointer to the related device, or null if not found
  */
diff --git a/drivers/misc/pci_endpoint_test.c b/drivers/misc/pci_endpoint_test.c
index 146ca6fb3260f32a581ae942da98cddb0dce9baf..eff481ce08ee0e0bcce635419d52a7579f396cb5 100644
--- a/drivers/misc/pci_endpoint_test.c
+++ b/drivers/misc/pci_endpoint_test.c
@@ -708,7 +708,7 @@ static long pci_endpoint_test_ioctl(struct file *file, unsigned int cmd,
 	switch (cmd) {
 	case PCITEST_BAR:
 		bar = arg;
-		if (bar < 0 || bar > 5)
+		if (bar > BAR_5)
 			goto ret;
 		if (is_am654_pci_dev(pdev) && bar == BAR_0)
 			goto ret;
@@ -811,8 +811,10 @@ static int pci_endpoint_test_probe(struct pci_dev *pdev,
 
 	pci_set_master(pdev);
 
-	if (!pci_endpoint_test_alloc_irq_vectors(test, irq_type))
+	if (!pci_endpoint_test_alloc_irq_vectors(test, irq_type)) {
+		err = -EINVAL;
 		goto err_disable_irq;
+	}
 
 	for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
 		if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) {
@@ -849,8 +851,10 @@ static int pci_endpoint_test_probe(struct pci_dev *pdev,
 		goto err_ida_remove;
 	}
 
-	if (!pci_endpoint_test_request_irq(test))
+	if (!pci_endpoint_test_request_irq(test)) {
+		err = -EINVAL;
 		goto err_kfree_test_name;
+	}
 
 	misc_device = &test->miscdev;
 	misc_device->minor = MISC_DYNAMIC_MINOR;
diff --git a/drivers/misc/sgi-xp/xpc.h b/drivers/misc/sgi-xp/xpc.h
index 71db60edff655254a454a93dd7d93a20dc07f696..225f2bb84e39bb953d85d139cb09372b7d965fd8 100644
--- a/drivers/misc/sgi-xp/xpc.h
+++ b/drivers/misc/sgi-xp/xpc.h
@@ -634,6 +634,7 @@ extern int xpc_setup_rsvd_page(void);
 extern void xpc_teardown_rsvd_page(void);
 extern int xpc_identify_activate_IRQ_sender(void);
 extern int xpc_partition_disengaged(struct xpc_partition *);
+extern int xpc_partition_disengaged_from_timer(struct xpc_partition *part);
 extern enum xp_retval xpc_mark_partition_active(struct xpc_partition *);
 extern void xpc_mark_partition_inactive(struct xpc_partition *);
 extern void xpc_discovery(void);
diff --git a/drivers/misc/sgi-xp/xpc_main.c b/drivers/misc/sgi-xp/xpc_main.c
index e5244fc1dab302ff6a8a7fc246d12e9a682ba7a1..84610bbcc131434c0f0088ce095b82c8377187ca 100644
--- a/drivers/misc/sgi-xp/xpc_main.c
+++ b/drivers/misc/sgi-xp/xpc_main.c
@@ -179,7 +179,7 @@ xpc_timeout_partition_disengage(struct timer_list *t)
 
 	DBUG_ON(time_is_after_jiffies(part->disengage_timeout));
 
-	(void)xpc_partition_disengaged(part);
+	xpc_partition_disengaged_from_timer(part);
 
 	DBUG_ON(part->disengage_timeout != 0);
 	DBUG_ON(xpc_arch_ops.partition_engaged(XPC_PARTID(part)));
diff --git a/drivers/misc/sgi-xp/xpc_partition.c b/drivers/misc/sgi-xp/xpc_partition.c
index 57df06820bae2bc5e7612efd87e5028d6143e10c..1999d02923dee7de578246cc847e138dd072943d 100644
--- a/drivers/misc/sgi-xp/xpc_partition.c
+++ b/drivers/misc/sgi-xp/xpc_partition.c
@@ -262,8 +262,8 @@ xpc_get_remote_rp(int nasid, unsigned long *discovered_nasids,
  * from us. Though we requested the remote partition to deactivate with regard
  * to us, we really only need to wait for the other side to disengage from us.
  */
-int
-xpc_partition_disengaged(struct xpc_partition *part)
+static int __xpc_partition_disengaged(struct xpc_partition *part,
+				      bool from_timer)
 {
 	short partid = XPC_PARTID(part);
 	int disengaged;
@@ -289,9 +289,9 @@ xpc_partition_disengaged(struct xpc_partition *part)
 		}
 		part->disengage_timeout = 0;
 
-		/* cancel the timer function, provided it's not us */
-		if (!in_interrupt())
-			del_singleshot_timer_sync(&part->disengage_timer);
+		/* Cancel the timer function if not called from it */
+		if (!from_timer)
+			del_timer_sync(&part->disengage_timer);
 
 		DBUG_ON(part->act_state != XPC_P_AS_DEACTIVATING &&
 			part->act_state != XPC_P_AS_INACTIVE);
@@ -303,6 +303,16 @@ xpc_partition_disengaged(struct xpc_partition *part)
 	return disengaged;
 }
 
+int xpc_partition_disengaged(struct xpc_partition *part)
+{
+	return __xpc_partition_disengaged(part, false);
+}
+
+int xpc_partition_disengaged_from_timer(struct xpc_partition *part)
+{
+	return __xpc_partition_disengaged(part, true);
+}
+
 /*
  * Mark specified partition as active.
  */
diff --git a/drivers/misc/ti-st/st_core.c b/drivers/misc/ti-st/st_core.c
index f4ddd1e6701513af262e30aedf956a8409ac68e2..5a0a5fc3d3abca6536af520e47b9e4505f2ad48d 100644
--- a/drivers/misc/ti-st/st_core.c
+++ b/drivers/misc/ti-st/st_core.c
@@ -380,7 +380,7 @@ void st_int_recv(void *disc_data,
 			st_gdata->rx_state = ST_W4_HEADER;
 			st_gdata->rx_count = st_gdata->list[type]->hdr_len;
 			pr_debug("rx_count %ld\n", st_gdata->rx_count);
-		};
+		}
 		ptr++;
 		count--;
 	}
diff --git a/drivers/misc/uacce/uacce.c b/drivers/misc/uacce/uacce.c
index 56dd98ab5a81463584799833611fae48917a42c0..d07af4edfcacf4251d51594ed12fefee27f3de50 100644
--- a/drivers/misc/uacce/uacce.c
+++ b/drivers/misc/uacce/uacce.c
@@ -231,17 +231,6 @@ static int uacce_fops_mmap(struct file *filep, struct vm_area_struct *vma)
 
 	switch (type) {
 	case UACCE_QFRT_MMIO:
-		if (!uacce->ops->mmap) {
-			ret = -EINVAL;
-			goto out_with_lock;
-		}
-
-		ret = uacce->ops->mmap(q, vma, qfr);
-		if (ret)
-			goto out_with_lock;
-
-		break;
-
 	case UACCE_QFRT_DUS:
 		if (!uacce->ops->mmap) {
 			ret = -EINVAL;
@@ -533,5 +522,5 @@ subsys_initcall(uacce_init);
 module_exit(uacce_exit);
 
 MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Hisilicon Tech. Co., Ltd.");
+MODULE_AUTHOR("HiSilicon Tech. Co., Ltd.");
 MODULE_DESCRIPTION("Accelerator interface for Userland applications");
diff --git a/drivers/misc/vmw_vmci/vmci_context.c b/drivers/misc/vmw_vmci/vmci_context.c
index 16695366ec926dd76d8239e288cc40b9ee408ec4..26ff49fdf0f7d3ccf95e6e803acf922c9ee2bee9 100644
--- a/drivers/misc/vmw_vmci/vmci_context.c
+++ b/drivers/misc/vmw_vmci/vmci_context.c
@@ -743,7 +743,7 @@ static int vmci_ctx_get_chkpt_doorbells(struct vmci_ctx *context,
 			return VMCI_ERROR_MORE_DATA;
 		}
 
-		dbells = kmalloc(data_size, GFP_ATOMIC);
+		dbells = kzalloc(data_size, GFP_ATOMIC);
 		if (!dbells)
 			return VMCI_ERROR_NO_MEM;
 
diff --git a/drivers/misc/xilinx_sdfec.c b/drivers/misc/xilinx_sdfec.c
index 92291292756a81062877be31f86132ba957ea424..23c8448a9c3b79c71d538ac4085dbadf5cf9542f 100644
--- a/drivers/misc/xilinx_sdfec.c
+++ b/drivers/misc/xilinx_sdfec.c
@@ -944,8 +944,8 @@ static long xsdfec_dev_ioctl(struct file *fptr, unsigned int cmd,
 			     unsigned long data)
 {
 	struct xsdfec_dev *xsdfec;
-	void __user *arg = NULL;
-	int rval = -EINVAL;
+	void __user *arg = (void __user *)data;
+	int rval;
 
 	xsdfec = container_of(fptr->private_data, struct xsdfec_dev, miscdev);
 
@@ -956,16 +956,6 @@ static long xsdfec_dev_ioctl(struct file *fptr, unsigned int cmd,
 		return -EPERM;
 	}
 
-	if (_IOC_TYPE(cmd) != XSDFEC_MAGIC)
-		return -ENOTTY;
-
-	/* check if ioctl argument is present and valid */
-	if (_IOC_DIR(cmd) != _IOC_NONE) {
-		arg = (void __user *)data;
-		if (!arg)
-			return rval;
-	}
-
 	switch (cmd) {
 	case XSDFEC_START_DEV:
 		rval = xsdfec_start(xsdfec);
@@ -1010,20 +1000,12 @@ static long xsdfec_dev_ioctl(struct file *fptr, unsigned int cmd,
 		rval = xsdfec_is_active(xsdfec, (bool __user *)arg);
 		break;
 	default:
-		/* Should not get here */
+		rval = -ENOTTY;
 		break;
 	}
 	return rval;
 }
 
-#ifdef CONFIG_COMPAT
-static long xsdfec_dev_compat_ioctl(struct file *file, unsigned int cmd,
-				    unsigned long data)
-{
-	return xsdfec_dev_ioctl(file, cmd, (unsigned long)compat_ptr(data));
-}
-#endif
-
 static __poll_t xsdfec_poll(struct file *file, poll_table *wait)
 {
 	__poll_t mask = 0;
@@ -1054,9 +1036,7 @@ static const struct file_operations xsdfec_fops = {
 	.release = xsdfec_dev_release,
 	.unlocked_ioctl = xsdfec_dev_ioctl,
 	.poll = xsdfec_poll,
-#ifdef CONFIG_COMPAT
-	.compat_ioctl = xsdfec_dev_compat_ioctl,
-#endif
+	.compat_ioctl = compat_ptr_ioctl,
 };
 
 static int xsdfec_parse_of(struct xsdfec_dev *xsdfec)
diff --git a/drivers/mmc/host/rtsx_pci_sdmmc.c b/drivers/mmc/host/rtsx_pci_sdmmc.c
index eb395e14420717770377b345c41f7f5f2b0b7b5f..a7b5ad17bcf5ab73c327688a0664b5bde96778b8 100644
--- a/drivers/mmc/host/rtsx_pci_sdmmc.c
+++ b/drivers/mmc/host/rtsx_pci_sdmmc.c
@@ -20,6 +20,7 @@
 #include <linux/mmc/card.h>
 #include <linux/rtsx_pci.h>
 #include <asm/unaligned.h>
+#include <linux/pm_runtime.h>
 
 struct realtek_pci_sdmmc {
 	struct platform_device	*pdev;
@@ -1343,6 +1344,7 @@ static void init_extra_caps(struct realtek_pci_sdmmc *host)
 static void realtek_init_host(struct realtek_pci_sdmmc *host)
 {
 	struct mmc_host *mmc = host->mmc;
+	struct rtsx_pcr *pcr = host->pcr;
 
 	mmc->f_min = 250000;
 	mmc->f_max = 208000000;
@@ -1350,6 +1352,8 @@ static void realtek_init_host(struct realtek_pci_sdmmc *host)
 	mmc->caps = MMC_CAP_4_BIT_DATA | MMC_CAP_SD_HIGHSPEED |
 		MMC_CAP_MMC_HIGHSPEED | MMC_CAP_BUS_WIDTH_TEST |
 		MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25;
+	if (pcr->rtd3_en)
+		mmc->caps = mmc->caps | MMC_CAP_AGGRESSIVE_PM;
 	mmc->caps2 = MMC_CAP2_NO_PRESCAN_POWERUP | MMC_CAP2_FULL_PWR_CYCLE;
 	mmc->max_current_330 = 400;
 	mmc->max_current_180 = 800;
@@ -1407,6 +1411,13 @@ static int rtsx_pci_sdmmc_drv_probe(struct platform_device *pdev)
 
 	realtek_init_host(host);
 
+	if (pcr->rtd3_en) {
+		pm_runtime_set_autosuspend_delay(&pdev->dev, 5000);
+		pm_runtime_use_autosuspend(&pdev->dev);
+		pm_runtime_enable(&pdev->dev);
+	}
+
+
 	mmc_add_host(mmc);
 
 	return 0;
@@ -1426,6 +1437,11 @@ static int rtsx_pci_sdmmc_drv_remove(struct platform_device *pdev)
 	pcr->slots[RTSX_SD_CARD].card_event = NULL;
 	mmc = host->mmc;
 
+	if (pcr->rtd3_en) {
+		pm_runtime_dont_use_autosuspend(&pdev->dev);
+		pm_runtime_disable(&pdev->dev);
+	}
+
 	cancel_work_sync(&host->work);
 
 	mutex_lock(&host->host_mutex);
diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c
index a09ff8409f600613d51e176b8d3a18aab4dffdad..177f5bf27c6d5d239d2bb96d082dd93b2e4f5b24 100644
--- a/drivers/nvmem/core.c
+++ b/drivers/nvmem/core.c
@@ -34,6 +34,8 @@ struct nvmem_device {
 	struct bin_attribute	eeprom;
 	struct device		*base_dev;
 	struct list_head	cells;
+	const struct nvmem_keepout *keepout;
+	unsigned int		nkeepout;
 	nvmem_reg_read_t	reg_read;
 	nvmem_reg_write_t	reg_write;
 	struct gpio_desc	*wp_gpio;
@@ -66,8 +68,8 @@ static LIST_HEAD(nvmem_lookup_list);
 
 static BLOCKING_NOTIFIER_HEAD(nvmem_notifier);
 
-static int nvmem_reg_read(struct nvmem_device *nvmem, unsigned int offset,
-			  void *val, size_t bytes)
+static int __nvmem_reg_read(struct nvmem_device *nvmem, unsigned int offset,
+			    void *val, size_t bytes)
 {
 	if (nvmem->reg_read)
 		return nvmem->reg_read(nvmem->priv, offset, val, bytes);
@@ -75,8 +77,8 @@ static int nvmem_reg_read(struct nvmem_device *nvmem, unsigned int offset,
 	return -EINVAL;
 }
 
-static int nvmem_reg_write(struct nvmem_device *nvmem, unsigned int offset,
-			   void *val, size_t bytes)
+static int __nvmem_reg_write(struct nvmem_device *nvmem, unsigned int offset,
+			     void *val, size_t bytes)
 {
 	int ret;
 
@@ -90,6 +92,88 @@ static int nvmem_reg_write(struct nvmem_device *nvmem, unsigned int offset,
 	return -EINVAL;
 }
 
+static int nvmem_access_with_keepouts(struct nvmem_device *nvmem,
+				      unsigned int offset, void *val,
+				      size_t bytes, int write)
+{
+
+	unsigned int end = offset + bytes;
+	unsigned int kend, ksize;
+	const struct nvmem_keepout *keepout = nvmem->keepout;
+	const struct nvmem_keepout *keepoutend = keepout + nvmem->nkeepout;
+	int rc;
+
+	/*
+	 * Skip all keepouts before the range being accessed.
+	 * Keepouts are sorted.
+	 */
+	while ((keepout < keepoutend) && (keepout->end <= offset))
+		keepout++;
+
+	while ((offset < end) && (keepout < keepoutend)) {
+		/* Access the valid portion before the keepout. */
+		if (offset < keepout->start) {
+			kend = min(end, keepout->start);
+			ksize = kend - offset;
+			if (write)
+				rc = __nvmem_reg_write(nvmem, offset, val, ksize);
+			else
+				rc = __nvmem_reg_read(nvmem, offset, val, ksize);
+
+			if (rc)
+				return rc;
+
+			offset += ksize;
+			val += ksize;
+		}
+
+		/*
+		 * Now we're aligned to the start of this keepout zone. Go
+		 * through it.
+		 */
+		kend = min(end, keepout->end);
+		ksize = kend - offset;
+		if (!write)
+			memset(val, keepout->value, ksize);
+
+		val += ksize;
+		offset += ksize;
+		keepout++;
+	}
+
+	/*
+	 * If we ran out of keepouts but there's still stuff to do, send it
+	 * down directly
+	 */
+	if (offset < end) {
+		ksize = end - offset;
+		if (write)
+			return __nvmem_reg_write(nvmem, offset, val, ksize);
+		else
+			return __nvmem_reg_read(nvmem, offset, val, ksize);
+	}
+
+	return 0;
+}
+
+static int nvmem_reg_read(struct nvmem_device *nvmem, unsigned int offset,
+			  void *val, size_t bytes)
+{
+	if (!nvmem->nkeepout)
+		return __nvmem_reg_read(nvmem, offset, val, bytes);
+
+	return nvmem_access_with_keepouts(nvmem, offset, val, bytes, false);
+}
+
+static int nvmem_reg_write(struct nvmem_device *nvmem, unsigned int offset,
+			   void *val, size_t bytes)
+{
+	if (!nvmem->nkeepout)
+		return __nvmem_reg_write(nvmem, offset, val, bytes);
+
+	return nvmem_access_with_keepouts(nvmem, offset, val, bytes, true);
+}
+
 #ifdef CONFIG_NVMEM_SYSFS
 static const char * const nvmem_type_str[] = {
 	[NVMEM_TYPE_UNKNOWN] = "Unknown",
@@ -533,6 +617,59 @@ nvmem_find_cell_by_name(struct nvmem_device *nvmem, const char *cell_id)
 	return cell;
 }
 
+static int nvmem_validate_keepouts(struct nvmem_device *nvmem)
+{
+	unsigned int cur = 0;
+	const struct nvmem_keepout *keepout = nvmem->keepout;
+	const struct nvmem_keepout *keepoutend = keepout + nvmem->nkeepout;
+
+	while (keepout < keepoutend) {
+		/* Ensure keepouts are sorted and don't overlap. */
+		if (keepout->start < cur) {
+			dev_err(&nvmem->dev,
+				"Keepout regions aren't sorted or overlap.\n");
+
+			return -ERANGE;
+		}
+
+		if (keepout->end < keepout->start) {
+			dev_err(&nvmem->dev,
+				"Invalid keepout region.\n");
+
+			return -EINVAL;
+		}
+
+		/*
+		 * Validate keepouts (and holes between) don't violate
+		 * word_size constraints.
+		 */
+		if ((keepout->end - keepout->start < nvmem->word_size) ||
+		    ((keepout->start != cur) &&
+		     (keepout->start - cur < nvmem->word_size))) {
+
+			dev_err(&nvmem->dev,
+				"Keepout regions violate word_size constraints.\n");
+
+			return -ERANGE;
+		}
+
+		/* Validate keepouts don't violate stride (alignment). */
+		if (!IS_ALIGNED(keepout->start, nvmem->stride) ||
+		    !IS_ALIGNED(keepout->end, nvmem->stride)) {
+
+			dev_err(&nvmem->dev,
+				"Keepout regions violate stride.\n");
+
+			return -EINVAL;
+		}
+
+		cur = keepout->end;
+		keepout++;
+	}
+
+	return 0;
+}
+
 static int nvmem_add_cells_from_of(struct nvmem_device *nvmem)
 {
 	struct device_node *parent, *child;
@@ -647,6 +784,8 @@ struct nvmem_device *nvmem_register(const struct nvmem_config *config)
 	nvmem->type = config->type;
 	nvmem->reg_read = config->reg_read;
 	nvmem->reg_write = config->reg_write;
+	nvmem->keepout = config->keepout;
+	nvmem->nkeepout = config->nkeepout;
 	if (!config->no_of_node)
 		nvmem->dev.of_node = config->dev->of_node;
 
@@ -671,6 +810,12 @@ struct nvmem_device *nvmem_register(const struct nvmem_config *config)
 	nvmem->dev.groups = nvmem_dev_groups;
 #endif
 
+	if (nvmem->nkeepout) {
+		rval = nvmem_validate_keepouts(nvmem);
+		if (rval)
+			goto err_put_device;
+	}
+
 	dev_dbg(&nvmem->dev, "Registering nvmem device %s\n", config->name);
 
 	rval = device_register(&nvmem->dev);
diff --git a/drivers/nvmem/imx-ocotp.c b/drivers/nvmem/imx-ocotp.c
index 7a1ebd6fd08b2af4c9ddefbe8da919fb27f1cac1..08f41328cc71199fd1220343ce64e62f02ffe0e0 100644
--- a/drivers/nvmem/imx-ocotp.c
+++ b/drivers/nvmem/imx-ocotp.c
@@ -4,6 +4,8 @@
  *
  * Copyright (c) 2015 Pengutronix, Philipp Zabel <p.zabel@pengutronix.de>
  *
+ * Copyright 2019 NXP
+ *
  * Based on the barebox ocotp driver,
  * Copyright (c) 2010 Baruch Siach <baruch@tkos.co.il>,
  *	Orex Computed Radiography
@@ -158,22 +160,30 @@ static int imx_ocotp_read(void *context, unsigned int offset,
 {
 	struct ocotp_priv *priv = context;
 	unsigned int count;
-	u32 *buf = val;
+	u8 *buf, *p;
 	int i, ret;
-	u32 index;
+	u32 index, num_bytes;
 
 	index = offset >> 2;
-	count = bytes >> 2;
+	num_bytes = round_up((offset % 4) + bytes, 4);
+	count = num_bytes >> 2;
 
 	if (count > (priv->params->nregs - index))
 		count = priv->params->nregs - index;
 
+	p = kzalloc(num_bytes, GFP_KERNEL);
+	if (!p)
+		return -ENOMEM;
+
 	mutex_lock(&ocotp_mutex);
 
+	buf = p;
+
 	ret = clk_prepare_enable(priv->clk);
 	if (ret < 0) {
 		mutex_unlock(&ocotp_mutex);
 		dev_err(priv->dev, "failed to prepare/enable ocotp clk\n");
+		kfree(p);
 		return ret;
 	}
 
@@ -184,7 +194,7 @@ static int imx_ocotp_read(void *context, unsigned int offset,
 	}
 
 	for (i = index; i < (index + count); i++) {
-		*buf++ = readl(priv->base + IMX_OCOTP_OFFSET_B0W0 +
+		*(u32 *)buf = readl(priv->base + IMX_OCOTP_OFFSET_B0W0 +
 			       i * IMX_OCOTP_OFFSET_PER_WORD);
 
 		/* 47.3.1.2
@@ -193,13 +203,21 @@ static int imx_ocotp_read(void *context, unsigned int offset,
 		 * software before any new write, read or reload access can be
 		 * issued
 		 */
-		if (*(buf - 1) == IMX_OCOTP_READ_LOCKED_VAL)
+		if (*((u32 *)buf) == IMX_OCOTP_READ_LOCKED_VAL)
 			imx_ocotp_clr_err_if_set(priv);
+
+		buf += 4;
 	}
 
+	index = offset % 4;
+	memcpy(val, &p[index], bytes);
+
 read_end:
 	clk_disable_unprepare(priv->clk);
 	mutex_unlock(&ocotp_mutex);
+
+	kfree(p);
+
 	return ret;
 }
 
@@ -447,7 +465,7 @@ static struct nvmem_config imx_ocotp_nvmem_config = {
 	.name = "imx-ocotp",
 	.read_only = false,
 	.word_size = 4,
-	.stride = 4,
+	.stride = 1,
 	.reg_read = imx_ocotp_read,
 	.reg_write = imx_ocotp_write,
 };
diff --git a/drivers/nvmem/qfprom.c b/drivers/nvmem/qfprom.c
index 5e9e60e2e591d3f3227738aa4c6611d53db0b85c..6cace24dfbf734d0917fe06e01eed07d59fdb5cb 100644
--- a/drivers/nvmem/qfprom.c
+++ b/drivers/nvmem/qfprom.c
@@ -12,6 +12,7 @@
 #include <linux/mod_devicetable.h>
 #include <linux/nvmem-provider.h>
 #include <linux/platform_device.h>
+#include <linux/property.h>
 #include <linux/regulator/consumer.h>
 
 /* Blow timer clock frequency in Mhz */
@@ -88,6 +89,28 @@ struct qfprom_touched_values {
 	u32 timer_val;
 };
 
+/**
+ * struct qfprom_soc_compatible_data - Data matched against the SoC
+ * compatible string.
+ *
+ * @keepout: Array of keepout regions for this SoC.
+ * @nkeepout: Number of elements in the keepout array.
+ */
+struct qfprom_soc_compatible_data {
+	const struct nvmem_keepout *keepout;
+	unsigned int nkeepout;
+};
+
+static const struct nvmem_keepout sc7180_qfprom_keepout[] = {
+	{.start = 0x128, .end = 0x148},
+	{.start = 0x220, .end = 0x228}
+};
+
+static const struct qfprom_soc_compatible_data sc7180_qfprom = {
+	.keepout = sc7180_qfprom_keepout,
+	.nkeepout = ARRAY_SIZE(sc7180_qfprom_keepout)
+};
+
 /**
  * qfprom_disable_fuse_blowing() - Undo enabling of fuse blowing.
  * @priv: Our driver data.
@@ -281,6 +304,7 @@ static int qfprom_probe(struct platform_device *pdev)
 	struct device *dev = &pdev->dev;
 	struct resource *res;
 	struct nvmem_device *nvmem;
+	const struct qfprom_soc_compatible_data *soc_data;
 	struct qfprom_priv *priv;
 	int ret;
 
@@ -299,6 +323,11 @@ static int qfprom_probe(struct platform_device *pdev)
 	econfig.priv = priv;
 
 	priv->dev = dev;
+	soc_data = device_get_match_data(dev);
+	if (soc_data) {
+		econfig.keepout = soc_data->keepout;
+		econfig.nkeepout = soc_data->nkeepout;
+	}
 
 	/*
 	 * If more than one region is provided then the OS has the ability
@@ -354,6 +383,7 @@ static int qfprom_probe(struct platform_device *pdev)
 
 static const struct of_device_id qfprom_of_match[] = {
 	{ .compatible = "qcom,qfprom",},
+	{ .compatible = "qcom,sc7180-qfprom", .data = &sc7180_qfprom},
 	{/* sentinel */},
 };
 MODULE_DEVICE_TABLE(of, qfprom_of_match);
diff --git a/drivers/phy/Kconfig b/drivers/phy/Kconfig
index 01b53f86004cb2b1ce70a64a617c147a72133f36..00dabe5fab8a0be1ddbb473f4cfb2f6452ff61a0 100644
--- a/drivers/phy/Kconfig
+++ b/drivers/phy/Kconfig
@@ -66,6 +66,7 @@ source "drivers/phy/broadcom/Kconfig"
 source "drivers/phy/cadence/Kconfig"
 source "drivers/phy/freescale/Kconfig"
 source "drivers/phy/hisilicon/Kconfig"
+source "drivers/phy/ingenic/Kconfig"
 source "drivers/phy/lantiq/Kconfig"
 source "drivers/phy/marvell/Kconfig"
 source "drivers/phy/mediatek/Kconfig"
diff --git a/drivers/phy/Makefile b/drivers/phy/Makefile
index 6eb2916773c572acd08182344dba835e32cb640b..32261e164abda782ae86c8b4ba158e00091bf077 100644
--- a/drivers/phy/Makefile
+++ b/drivers/phy/Makefile
@@ -15,6 +15,7 @@ obj-y					+= allwinner/	\
 					   cadence/	\
 					   freescale/	\
 					   hisilicon/	\
+					   ingenic/	\
 					   intel/	\
 					   lantiq/	\
 					   marvell/	\
diff --git a/drivers/phy/allwinner/phy-sun4i-usb.c b/drivers/phy/allwinner/phy-sun4i-usb.c
index 651d5e2a25ce09a3a25039c83785627c64df79d3..788dd5cdbb7d3efc59932104742fda3becbc6799 100644
--- a/drivers/phy/allwinner/phy-sun4i-usb.c
+++ b/drivers/phy/allwinner/phy-sun4i-usb.c
@@ -686,7 +686,6 @@ static int sun4i_usb_phy_probe(struct platform_device *pdev)
 	struct device *dev = &pdev->dev;
 	struct device_node *np = dev->of_node;
 	struct phy_provider *phy_provider;
-	struct resource *res;
 	int i, ret;
 
 	data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
@@ -700,8 +699,7 @@ static int sun4i_usb_phy_probe(struct platform_device *pdev)
 	if (!data->cfg)
 		return -EINVAL;
 
-	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "phy_ctrl");
-	data->base = devm_ioremap_resource(dev, res);
+	data->base = devm_platform_ioremap_resource_byname(pdev, "phy_ctrl");
 	if (IS_ERR(data->base))
 		return PTR_ERR(data->base);
 
@@ -796,9 +794,7 @@ static int sun4i_usb_phy_probe(struct platform_device *pdev)
 
 		if (i || data->cfg->phy0_dual_route) { /* No pmu for musb */
 			snprintf(name, sizeof(name), "pmu%d", i);
-			res = platform_get_resource_byname(pdev,
-							IORESOURCE_MEM, name);
-			phy->pmu = devm_ioremap_resource(dev, res);
+			phy->pmu = devm_platform_ioremap_resource_byname(pdev, name);
 			if (IS_ERR(phy->pmu))
 				return PTR_ERR(phy->pmu);
 		}
@@ -969,7 +965,6 @@ static const struct sun4i_usb_phy_cfg sun50i_h6_cfg = {
 	.disc_thresh = 3,
 	.phyctl_offset = REG_PHYCTL_A33,
 	.dedicated_clocks = true,
-	.enable_pmu_unk1 = true,
 	.phy0_dual_route = true,
 	.missing_phys = BIT(1) | BIT(2),
 };
diff --git a/drivers/phy/allwinner/phy-sun50i-usb3.c b/drivers/phy/allwinner/phy-sun50i-usb3.c
index b1c04f71a31d9e6e5d0c33ad6b13f7c9e5d81159..84055b720016ebe0a511817bbd0c28ec8a948484 100644
--- a/drivers/phy/allwinner/phy-sun50i-usb3.c
+++ b/drivers/phy/allwinner/phy-sun50i-usb3.c
@@ -134,7 +134,6 @@ static int sun50i_usb3_phy_probe(struct platform_device *pdev)
 	struct sun50i_usb3_phy *phy;
 	struct device *dev = &pdev->dev;
 	struct phy_provider *phy_provider;
-	struct resource *res;
 
 	phy = devm_kzalloc(dev, sizeof(*phy), GFP_KERNEL);
 	if (!phy)
@@ -153,8 +152,7 @@ static int sun50i_usb3_phy_probe(struct platform_device *pdev)
 		return PTR_ERR(phy->reset);
 	}
 
-	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	phy->regs = devm_ioremap_resource(dev, res);
+	phy->regs = devm_platform_ioremap_resource(pdev, 0);
 	if (IS_ERR(phy->regs))
 		return PTR_ERR(phy->regs);
 
diff --git a/drivers/phy/allwinner/phy-sun6i-mipi-dphy.c b/drivers/phy/allwinner/phy-sun6i-mipi-dphy.c
index 1fa761ba6cbbadf6f52609f317a111f5e717cb33..f0bc87d654d4a192d7a76f0e1a6a168c4e079a4d 100644
--- a/drivers/phy/allwinner/phy-sun6i-mipi-dphy.c
+++ b/drivers/phy/allwinner/phy-sun6i-mipi-dphy.c
@@ -253,15 +253,13 @@ static int sun6i_dphy_probe(struct platform_device *pdev)
 {
 	struct phy_provider *phy_provider;
 	struct sun6i_dphy *dphy;
-	struct resource *res;
 	void __iomem *regs;
 
 	dphy = devm_kzalloc(&pdev->dev, sizeof(*dphy), GFP_KERNEL);
 	if (!dphy)
 		return -ENOMEM;
 
-	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	regs = devm_ioremap_resource(&pdev->dev, res);
+	regs = devm_platform_ioremap_resource(pdev, 0);
 	if (IS_ERR(regs)) {
 		dev_err(&pdev->dev, "Couldn't map the DPHY encoder registers\n");
 		return PTR_ERR(regs);
diff --git a/drivers/phy/allwinner/phy-sun9i-usb.c b/drivers/phy/allwinner/phy-sun9i-usb.c
index fc6784dd7fa08209ae38ea7b1c6bf46007f3c77e..2f9e60c188b8f901bfb921279f788a70d8e6686a 100644
--- a/drivers/phy/allwinner/phy-sun9i-usb.c
+++ b/drivers/phy/allwinner/phy-sun9i-usb.c
@@ -117,7 +117,6 @@ static int sun9i_usb_phy_probe(struct platform_device *pdev)
 	struct device *dev = &pdev->dev;
 	struct device_node *np = dev->of_node;
 	struct phy_provider *phy_provider;
-	struct resource *res;
 
 	phy = devm_kzalloc(dev, sizeof(*phy), GFP_KERNEL);
 	if (!phy)
@@ -156,8 +155,7 @@ static int sun9i_usb_phy_probe(struct platform_device *pdev)
 		}
 	}
 
-	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	phy->pmu = devm_ioremap_resource(dev, res);
+	phy->pmu = devm_platform_ioremap_resource(pdev, 0);
 	if (IS_ERR(phy->pmu))
 		return PTR_ERR(phy->pmu);
 
diff --git a/drivers/phy/amlogic/Kconfig b/drivers/phy/amlogic/Kconfig
index 617cf073e9aa4055d7e95f76589ac57d8d2be053..db5d0cd757e3ad9563de6fb7924a419c4297462b 100644
--- a/drivers/phy/amlogic/Kconfig
+++ b/drivers/phy/amlogic/Kconfig
@@ -66,7 +66,20 @@ config PHY_MESON_AXG_MIPI_PCIE_ANALOG
 	depends on OF && (ARCH_MESON || COMPILE_TEST)
 	select GENERIC_PHY
 	select REGMAP_MMIO
+	select GENERIC_PHY_MIPI_DPHY
 	help
 	  Enable this to support the Meson MIPI + PCIE analog PHY
 	  found in Meson AXG SoCs.
 	  If unsure, say N.
+
+config PHY_MESON_AXG_MIPI_DPHY
+	tristate "Meson AXG MIPI DPHY driver"
+	default ARCH_MESON
+	depends on OF && (ARCH_MESON || COMPILE_TEST)
+	select GENERIC_PHY
+	select REGMAP_MMIO
+	select GENERIC_PHY_MIPI_DPHY
+	help
+	  Enable this to support the Meson MIPI DPHY found in Meson AXG
+	  SoCs.
+	  If unsure, say N.
diff --git a/drivers/phy/amlogic/Makefile b/drivers/phy/amlogic/Makefile
index 99702a45e9be342e985ec3424e99d554fb73e78b..8fa07fbd0d92e14f7ad23ff0a4a4ae6db1dfe8e7 100644
--- a/drivers/phy/amlogic/Makefile
+++ b/drivers/phy/amlogic/Makefile
@@ -5,3 +5,4 @@ obj-$(CONFIG_PHY_MESON_G12A_USB2)		+= phy-meson-g12a-usb2.o
 obj-$(CONFIG_PHY_MESON_G12A_USB3_PCIE)		+= phy-meson-g12a-usb3-pcie.o
 obj-$(CONFIG_PHY_MESON_AXG_PCIE)		+= phy-meson-axg-pcie.o
 obj-$(CONFIG_PHY_MESON_AXG_MIPI_PCIE_ANALOG)	+= phy-meson-axg-mipi-pcie-analog.o
+obj-$(CONFIG_PHY_MESON_AXG_MIPI_DPHY)		+= phy-meson-axg-mipi-dphy.o
diff --git a/drivers/phy/amlogic/phy-meson-axg-mipi-dphy.c b/drivers/phy/amlogic/phy-meson-axg-mipi-dphy.c
new file mode 100644
index 0000000000000000000000000000000000000000..cd2332bf0e31ad8d562120343683541435b1bf88
--- /dev/null
+++ b/drivers/phy/amlogic/phy-meson-axg-mipi-dphy.c
@@ -0,0 +1,413 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Meson AXG MIPI DPHY driver
+ *
+ * Copyright (C) 2018 Amlogic, Inc. All rights reserved
+ * Copyright (C) 2020 BayLibre, SAS
+ * Author: Neil Armstrong <narmstrong@baylibre.com>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/bitops.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/regmap.h>
+#include <linux/reset.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+
+/* [31] soft reset for the phy.
+ *		1: reset. 0: dessert the reset.
+ * [30] clock lane soft reset.
+ * [29] data byte lane 3 soft reset.
+ * [28] data byte lane 2 soft reset.
+ * [27] data byte lane 1 soft reset.
+ * [26] data byte lane 0 soft reset.
+ * [25] mipi dsi pll clock selection.
+ *		1:  clock from fixed 850Mhz clock source. 0: from VID2 PLL.
+ * [12] mipi HSbyteclk enable.
+ * [11] mipi divider clk selection.
+ *		1: select the mipi DDRCLKHS from clock divider.
+ *		0: from PLL clock.
+ * [10] mipi clock divider control.
+ *		1: /4. 0: /2.
+ * [9]  mipi divider output enable.
+ * [8]  mipi divider counter enable.
+ * [7]  PLL clock enable.
+ * [5]  LPDT data endian.
+ *		1 = transfer the high bit first. 0 : transfer the low bit first.
+ * [4]  HS data endian.
+ * [3]  force data byte lane in stop mode.
+ * [2]  force data byte lane 0 in receiver mode.
+ * [1]  write 1 to sync the txclkesc input. the internal logic have to
+ *	use txclkesc to decide Txvalid and Txready.
+ * [0]  enalbe the MIPI DPHY TxDDRClk.
+ */
+#define MIPI_DSI_PHY_CTRL				0x0
+
+/* [31] clk lane tx_hs_en control selection.
+ *		1: from register. 0: use clk lane state machine.
+ * [30] register bit for clock lane tx_hs_en.
+ * [29] clk lane tx_lp_en contrl selection.
+ *		1: from register. 0: from clk lane state machine.
+ * [28] register bit for clock lane tx_lp_en.
+ * [27] chan0 tx_hs_en control selection.
+ *		1: from register. 0: from chan0 state machine.
+ * [26] register bit for chan0 tx_hs_en.
+ * [25] chan0 tx_lp_en control selection.
+ *		1: from register. 0: from chan0 state machine.
+ * [24] register bit from chan0 tx_lp_en.
+ * [23] chan0 rx_lp_en control selection.
+ *		1: from register. 0: from chan0 state machine.
+ * [22] register bit from chan0 rx_lp_en.
+ * [21] chan0 contention detection enable control selection.
+ *		1: from register. 0: from chan0 state machine.
+ * [20] register bit from chan0 contention dectection enable.
+ * [19] chan1 tx_hs_en control selection.
+ *		1: from register. 0: from chan0 state machine.
+ * [18] register bit for chan1 tx_hs_en.
+ * [17] chan1 tx_lp_en control selection.
+ *		1: from register. 0: from chan0 state machine.
+ * [16] register bit from chan1 tx_lp_en.
+ * [15] chan2 tx_hs_en control selection.
+ *		1: from register. 0: from chan0 state machine.
+ * [14] register bit for chan2 tx_hs_en.
+ * [13] chan2 tx_lp_en control selection.
+ *		1: from register. 0: from chan0 state machine.
+ * [12] register bit from chan2 tx_lp_en.
+ * [11] chan3 tx_hs_en control selection.
+ *		1: from register. 0: from chan0 state machine.
+ * [10] register bit for chan3 tx_hs_en.
+ * [9]  chan3 tx_lp_en control selection.
+ *		1: from register. 0: from chan0 state machine.
+ * [8]  register bit from chan3 tx_lp_en.
+ * [4]  clk chan power down. this bit is also used as the power down
+ *	of the whole MIPI_DSI_PHY.
+ * [3]  chan3 power down.
+ * [2]  chan2 power down.
+ * [1]  chan1 power down.
+ * [0]  chan0 power down.
+ */
+#define MIPI_DSI_CHAN_CTRL				0x4
+
+/* [24]   rx turn watch dog triggered.
+ * [23]   rx esc watchdog  triggered.
+ * [22]   mbias ready.
+ * [21]   txclkesc  synced and ready.
+ * [20:17] clk lane state. {mbias_ready, tx_stop, tx_ulps, tx_hs_active}
+ * [16:13] chan3 state{0, tx_stop, tx_ulps, tx_hs_active}
+ * [12:9]  chan2 state.{0, tx_stop, tx_ulps, tx_hs_active}
+ * [8:5]   chan1 state. {0, tx_stop, tx_ulps, tx_hs_active}
+ * [4:0]   chan0 state. {TX_STOP, tx_ULPS, hs_active, direction, rxulpsesc}
+ */
+#define MIPI_DSI_CHAN_STS				0x8
+
+/* [31:24] TCLK_PREPARE.
+ * [23:16] TCLK_ZERO.
+ * [15:8]  TCLK_POST.
+ * [7:0]   TCLK_TRAIL.
+ */
+#define MIPI_DSI_CLK_TIM				0xc
+
+/* [31:24] THS_PREPARE.
+ * [23:16] THS_ZERO.
+ * [15:8]  THS_TRAIL.
+ * [7:0]   THS_EXIT.
+ */
+#define MIPI_DSI_HS_TIM					0x10
+
+/* [31:24] tTA_GET.
+ * [23:16] tTA_GO.
+ * [15:8]  tTA_SURE.
+ * [7:0]   tLPX.
+ */
+#define MIPI_DSI_LP_TIM					0x14
+
+/* wait time to  MIPI DIS analog ready. */
+#define MIPI_DSI_ANA_UP_TIM				0x18
+
+/* TINIT. */
+#define MIPI_DSI_INIT_TIM				0x1c
+
+/* TWAKEUP. */
+#define MIPI_DSI_WAKEUP_TIM				0x20
+
+/* when in RxULPS check state, after the the logic enable the analog,
+ *	how long we should wait to check the lP state .
+ */
+#define MIPI_DSI_LPOK_TIM				0x24
+
+/* Watchdog for RX low power state no finished. */
+#define MIPI_DSI_LP_WCHDOG				0x28
+
+/* tMBIAS,  after send power up signals to analog,
+ *	how long we should wait for analog powered up.
+ */
+#define MIPI_DSI_ANA_CTRL				0x2c
+
+/* [31:8]  reserved for future.
+ * [7:0]   tCLK_PRE.
+ */
+#define MIPI_DSI_CLK_TIM1				0x30
+
+/* watchdog for turn around waiting time. */
+#define MIPI_DSI_TURN_WCHDOG				0x34
+
+/* When in RxULPS state, how frequency we should to check
+ *	if the TX side out of ULPS state.
+ */
+#define MIPI_DSI_ULPS_CHECK				0x38
+#define MIPI_DSI_TEST_CTRL0				0x3c
+#define MIPI_DSI_TEST_CTRL1				0x40
+
+struct phy_meson_axg_mipi_dphy_priv {
+	struct device				*dev;
+	struct regmap				*regmap;
+	struct clk				*clk;
+	struct reset_control			*reset;
+	struct phy				*analog;
+	struct phy_configure_opts_mipi_dphy	config;
+};
+
+static const struct regmap_config phy_meson_axg_mipi_dphy_regmap_conf = {
+	.reg_bits = 8,
+	.val_bits = 32,
+	.reg_stride = 4,
+	.max_register = MIPI_DSI_TEST_CTRL1,
+};
+
+static int phy_meson_axg_mipi_dphy_init(struct phy *phy)
+{
+	struct phy_meson_axg_mipi_dphy_priv *priv = phy_get_drvdata(phy);
+	int ret;
+
+	ret = phy_init(priv->analog);
+	if (ret)
+		return ret;
+
+	ret = reset_control_reset(priv->reset);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+static int phy_meson_axg_mipi_dphy_configure(struct phy *phy,
+					      union phy_configure_opts *opts)
+{
+	struct phy_meson_axg_mipi_dphy_priv *priv = phy_get_drvdata(phy);
+	int ret;
+
+	ret = phy_mipi_dphy_config_validate(&opts->mipi_dphy);
+	if (ret)
+		return ret;
+
+	ret = phy_configure(priv->analog, opts);
+	if (ret)
+		return ret;
+
+	memcpy(&priv->config, opts, sizeof(priv->config));
+
+	return 0;
+}
+
+static int phy_meson_axg_mipi_dphy_power_on(struct phy *phy)
+{
+	struct phy_meson_axg_mipi_dphy_priv *priv = phy_get_drvdata(phy);
+	int ret;
+	unsigned long temp;
+
+	ret = phy_power_on(priv->analog);
+	if (ret)
+		return ret;
+
+	/* enable phy clock */
+	regmap_write(priv->regmap, MIPI_DSI_PHY_CTRL,  0x1);
+	regmap_write(priv->regmap, MIPI_DSI_PHY_CTRL,
+		     BIT(0) | /* enable the DSI PLL clock . */
+		     BIT(7) | /* enable pll clock which connected to DDR clock path */
+		     BIT(8)); /* enable the clock divider counter */
+
+	/* enable the divider clock out */
+	regmap_update_bits(priv->regmap, MIPI_DSI_PHY_CTRL, BIT(9), BIT(9));
+
+	/* enable the byte clock generation. */
+	regmap_update_bits(priv->regmap, MIPI_DSI_PHY_CTRL, BIT(12), BIT(12));
+	regmap_update_bits(priv->regmap, MIPI_DSI_PHY_CTRL, BIT(31), BIT(31));
+	regmap_update_bits(priv->regmap, MIPI_DSI_PHY_CTRL, BIT(31), 0);
+
+	/* Calculate lanebyteclk period in ps */
+	temp = (1000000 * 100) / (priv->config.hs_clk_rate / 1000);
+	temp = temp * 8 * 10;
+
+	regmap_write(priv->regmap, MIPI_DSI_CLK_TIM,
+		     DIV_ROUND_UP(priv->config.clk_trail, temp) |
+		     (DIV_ROUND_UP(priv->config.clk_post +
+				   priv->config.hs_trail, temp) << 8) |
+		     (DIV_ROUND_UP(priv->config.clk_zero, temp) << 16) |
+		     (DIV_ROUND_UP(priv->config.clk_prepare, temp) << 24));
+	regmap_write(priv->regmap, MIPI_DSI_CLK_TIM1,
+		     DIV_ROUND_UP(priv->config.clk_pre, temp));
+
+	regmap_write(priv->regmap, MIPI_DSI_HS_TIM,
+		     DIV_ROUND_UP(priv->config.hs_exit, temp) |
+		     (DIV_ROUND_UP(priv->config.hs_trail, temp) << 8) |
+		     (DIV_ROUND_UP(priv->config.hs_zero, temp) << 16) |
+		     (DIV_ROUND_UP(priv->config.hs_prepare, temp) << 24));
+
+	regmap_write(priv->regmap, MIPI_DSI_LP_TIM,
+		     DIV_ROUND_UP(priv->config.lpx, temp) |
+		     (DIV_ROUND_UP(priv->config.ta_sure, temp) << 8) |
+		     (DIV_ROUND_UP(priv->config.ta_go, temp) << 16) |
+		     (DIV_ROUND_UP(priv->config.ta_get, temp) << 24));
+
+	regmap_write(priv->regmap, MIPI_DSI_ANA_UP_TIM, 0x0100);
+	regmap_write(priv->regmap, MIPI_DSI_INIT_TIM,
+		     DIV_ROUND_UP(priv->config.init * NSEC_PER_MSEC, temp));
+	regmap_write(priv->regmap, MIPI_DSI_WAKEUP_TIM,
+		     DIV_ROUND_UP(priv->config.wakeup * NSEC_PER_MSEC, temp));
+	regmap_write(priv->regmap, MIPI_DSI_LPOK_TIM, 0x7C);
+	regmap_write(priv->regmap, MIPI_DSI_ULPS_CHECK, 0x927C);
+	regmap_write(priv->regmap, MIPI_DSI_LP_WCHDOG, 0x1000);
+	regmap_write(priv->regmap, MIPI_DSI_TURN_WCHDOG, 0x1000);
+
+	/* Powerup the analog circuit */
+	switch (priv->config.lanes) {
+	case 1:
+		regmap_write(priv->regmap, MIPI_DSI_CHAN_CTRL, 0xe);
+		break;
+	case 2:
+		regmap_write(priv->regmap, MIPI_DSI_CHAN_CTRL, 0xc);
+		break;
+	case 3:
+		regmap_write(priv->regmap, MIPI_DSI_CHAN_CTRL, 0x8);
+		break;
+	case 4:
+	default:
+		regmap_write(priv->regmap, MIPI_DSI_CHAN_CTRL, 0);
+		break;
+	}
+
+	/* Trigger a sync active for esc_clk */
+	regmap_update_bits(priv->regmap, MIPI_DSI_PHY_CTRL, BIT(1), BIT(1));
+
+	return 0;
+}
+
+static int phy_meson_axg_mipi_dphy_power_off(struct phy *phy)
+{
+	struct phy_meson_axg_mipi_dphy_priv *priv = phy_get_drvdata(phy);
+
+	regmap_write(priv->regmap, MIPI_DSI_CHAN_CTRL, 0xf);
+	regmap_write(priv->regmap, MIPI_DSI_PHY_CTRL, BIT(31));
+
+	phy_power_off(priv->analog);
+
+	return 0;
+}
+
+static int phy_meson_axg_mipi_dphy_exit(struct phy *phy)
+{
+	struct phy_meson_axg_mipi_dphy_priv *priv = phy_get_drvdata(phy);
+	int ret;
+
+	ret = phy_exit(priv->analog);
+	if (ret)
+		return ret;
+
+	return reset_control_reset(priv->reset);
+}
+
+static const struct phy_ops phy_meson_axg_mipi_dphy_ops = {
+	.configure	= phy_meson_axg_mipi_dphy_configure,
+	.init		= phy_meson_axg_mipi_dphy_init,
+	.exit		= phy_meson_axg_mipi_dphy_exit,
+	.power_on	= phy_meson_axg_mipi_dphy_power_on,
+	.power_off	= phy_meson_axg_mipi_dphy_power_off,
+	.owner		= THIS_MODULE,
+};
+
+static int phy_meson_axg_mipi_dphy_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct phy_provider *phy_provider;
+	struct resource *res;
+	struct phy_meson_axg_mipi_dphy_priv *priv;
+	struct phy *phy;
+	void __iomem *base;
+	int ret;
+
+	priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+	if (!priv)
+		return -ENOMEM;
+
+	priv->dev = dev;
+	platform_set_drvdata(pdev, priv);
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	base = devm_ioremap_resource(dev, res);
+	if (IS_ERR(base))
+		return PTR_ERR(base);
+
+	priv->regmap = devm_regmap_init_mmio(dev, base,
+					&phy_meson_axg_mipi_dphy_regmap_conf);
+	if (IS_ERR(priv->regmap))
+		return PTR_ERR(priv->regmap);
+
+	priv->clk = devm_clk_get(dev, "pclk");
+	if (IS_ERR(priv->clk))
+		return PTR_ERR(priv->clk);
+
+	priv->reset = devm_reset_control_get(dev, "phy");
+	if (IS_ERR(priv->reset))
+		return PTR_ERR(priv->reset);
+
+	priv->analog = devm_phy_get(dev, "analog");
+	if (IS_ERR(priv->analog))
+		return PTR_ERR(priv->analog);
+
+	ret = clk_prepare_enable(priv->clk);
+	if (ret)
+		return ret;
+
+	ret = reset_control_deassert(priv->reset);
+	if (ret)
+		return ret;
+
+	phy = devm_phy_create(dev, NULL, &phy_meson_axg_mipi_dphy_ops);
+	if (IS_ERR(phy)) {
+		ret = PTR_ERR(phy);
+		if (ret != -EPROBE_DEFER)
+			dev_err(dev, "failed to create PHY\n");
+
+		return ret;
+	}
+
+	phy_set_drvdata(phy, priv);
+
+	phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
+
+	return PTR_ERR_OR_ZERO(phy_provider);
+}
+
+static const struct of_device_id phy_meson_axg_mipi_dphy_of_match[] = {
+	{ .compatible = "amlogic,axg-mipi-dphy", },
+	{ },
+};
+MODULE_DEVICE_TABLE(of, phy_meson_axg_mipi_dphy_of_match);
+
+static struct platform_driver phy_meson_axg_mipi_dphy_driver = {
+	.probe	= phy_meson_axg_mipi_dphy_probe,
+	.driver	= {
+		.name		= "phy-meson-axg-mipi-dphy",
+		.of_match_table	= phy_meson_axg_mipi_dphy_of_match,
+	},
+};
+module_platform_driver(phy_meson_axg_mipi_dphy_driver);
+
+MODULE_AUTHOR("Neil Armstrong <narmstrong@baylibre.com>");
+MODULE_DESCRIPTION("Meson AXG MIPI DPHY driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/phy/amlogic/phy-meson-axg-mipi-pcie-analog.c b/drivers/phy/amlogic/phy-meson-axg-mipi-pcie-analog.c
index 1431cbf885e19c7931d87cd2c7939ca7a63d79bd..1027ece6ca123fc81b6a10600c327fa7088edca7 100644
--- a/drivers/phy/amlogic/phy-meson-axg-mipi-pcie-analog.c
+++ b/drivers/phy/amlogic/phy-meson-axg-mipi-pcie-analog.c
@@ -4,9 +4,13 @@
  *
  * Copyright (C) 2019 Remi Pommarel <repk@triplefau.lt>
  */
+#include <linux/bitfield.h>
+#include <linux/bitops.h>
 #include <linux/module.h>
 #include <linux/phy/phy.h>
 #include <linux/regmap.h>
+#include <linux/delay.h>
+#include <linux/mfd/syscon.h>
 #include <linux/platform_device.h>
 #include <dt-bindings/phy/phy.h>
 
@@ -14,10 +18,10 @@
 #define		HHI_MIPI_CNTL0_COMMON_BLOCK	GENMASK(31, 28)
 #define		HHI_MIPI_CNTL0_ENABLE		BIT(29)
 #define		HHI_MIPI_CNTL0_BANDGAP		BIT(26)
-#define		HHI_MIPI_CNTL0_DECODE_TO_RTERM	GENMASK(15, 12)
-#define		HHI_MIPI_CNTL0_OUTPUT_EN	BIT(3)
+#define		HHI_MIPI_CNTL0_DIF_REF_CTL1	GENMASK(25, 16)
+#define		HHI_MIPI_CNTL0_DIF_REF_CTL0	GENMASK(15, 0)
 
-#define HHI_MIPI_CNTL1 0x01
+#define HHI_MIPI_CNTL1 0x04
 #define		HHI_MIPI_CNTL1_CH0_CML_PDR_EN	BIT(12)
 #define		HHI_MIPI_CNTL1_LP_ABILITY	GENMASK(5, 4)
 #define		HHI_MIPI_CNTL1_LP_RESISTER	BIT(3)
@@ -25,100 +29,169 @@
 #define		HHI_MIPI_CNTL1_INPUT_SEL	BIT(1)
 #define		HHI_MIPI_CNTL1_PRBS7_EN		BIT(0)
 
-#define HHI_MIPI_CNTL2 0x02
+#define HHI_MIPI_CNTL2 0x08
 #define		HHI_MIPI_CNTL2_CH_PU		GENMASK(31, 25)
 #define		HHI_MIPI_CNTL2_CH_CTL		GENMASK(24, 19)
 #define		HHI_MIPI_CNTL2_CH0_DIGDR_EN	BIT(18)
 #define		HHI_MIPI_CNTL2_CH_DIGDR_EN	BIT(17)
 #define		HHI_MIPI_CNTL2_LPULPS_EN	BIT(16)
-#define		HHI_MIPI_CNTL2_CH_EN(n)		BIT(15 - (n))
+#define		HHI_MIPI_CNTL2_CH_EN		GENMASK(15, 11)
 #define		HHI_MIPI_CNTL2_CH0_LP_CTL	GENMASK(10, 1)
 
+#define DSI_LANE_0              BIT(4)
+#define DSI_LANE_1              BIT(3)
+#define DSI_LANE_CLK            BIT(2)
+#define DSI_LANE_2              BIT(1)
+#define DSI_LANE_3              BIT(0)
+
 struct phy_axg_mipi_pcie_analog_priv {
 	struct phy *phy;
-	unsigned int mode;
 	struct regmap *regmap;
+	bool dsi_configured;
+	bool dsi_enabled;
+	bool powered;
+	struct phy_configure_opts_mipi_dphy config;
 };
 
-static const struct regmap_config phy_axg_mipi_pcie_analog_regmap_conf = {
-	.reg_bits = 8,
-	.val_bits = 32,
-	.reg_stride = 4,
-	.max_register = HHI_MIPI_CNTL2,
-};
+static void phy_bandgap_enable(struct phy_axg_mipi_pcie_analog_priv *priv)
+{
+	regmap_update_bits(priv->regmap, HHI_MIPI_CNTL0,
+			HHI_MIPI_CNTL0_BANDGAP, HHI_MIPI_CNTL0_BANDGAP);
 
-static int phy_axg_mipi_pcie_analog_power_on(struct phy *phy)
+	regmap_update_bits(priv->regmap, HHI_MIPI_CNTL0,
+			HHI_MIPI_CNTL0_ENABLE, HHI_MIPI_CNTL0_ENABLE);
+}
+
+static void phy_bandgap_disable(struct phy_axg_mipi_pcie_analog_priv *priv)
 {
-	struct phy_axg_mipi_pcie_analog_priv *priv = phy_get_drvdata(phy);
+	regmap_update_bits(priv->regmap, HHI_MIPI_CNTL0,
+			HHI_MIPI_CNTL0_BANDGAP, 0);
+	regmap_update_bits(priv->regmap, HHI_MIPI_CNTL0,
+			HHI_MIPI_CNTL0_ENABLE, 0);
+}
 
-	/* MIPI not supported yet */
-	if (priv->mode != PHY_TYPE_PCIE)
-		return -EINVAL;
+static void phy_dsi_analog_enable(struct phy_axg_mipi_pcie_analog_priv *priv)
+{
+	u32 reg;
 
 	regmap_update_bits(priv->regmap, HHI_MIPI_CNTL0,
-			   HHI_MIPI_CNTL0_BANDGAP, HHI_MIPI_CNTL0_BANDGAP);
+			   HHI_MIPI_CNTL0_DIF_REF_CTL1,
+			   FIELD_PREP(HHI_MIPI_CNTL0_DIF_REF_CTL1, 0x1b8));
+	regmap_update_bits(priv->regmap, HHI_MIPI_CNTL0,
+			   BIT(31), BIT(31));
+	regmap_update_bits(priv->regmap, HHI_MIPI_CNTL0,
+			   HHI_MIPI_CNTL0_DIF_REF_CTL0,
+			   FIELD_PREP(HHI_MIPI_CNTL0_DIF_REF_CTL0, 0x8));
+
+	regmap_write(priv->regmap, HHI_MIPI_CNTL1, 0x001e);
+
+	regmap_write(priv->regmap, HHI_MIPI_CNTL2,
+		     (0x26e0 << 16) | (0x459 << 0));
+
+	reg = DSI_LANE_CLK;
+	switch (priv->config.lanes) {
+	case 4:
+		reg |= DSI_LANE_3;
+		fallthrough;
+	case 3:
+		reg |= DSI_LANE_2;
+		fallthrough;
+	case 2:
+		reg |= DSI_LANE_1;
+		fallthrough;
+	case 1:
+		reg |= DSI_LANE_0;
+		break;
+	default:
+		reg = 0;
+	}
+
+	regmap_update_bits(priv->regmap, HHI_MIPI_CNTL2,
+			   HHI_MIPI_CNTL2_CH_EN,
+			   FIELD_PREP(HHI_MIPI_CNTL2_CH_EN, reg));
+
+	priv->dsi_enabled = true;
+}
 
+static void phy_dsi_analog_disable(struct phy_axg_mipi_pcie_analog_priv *priv)
+{
 	regmap_update_bits(priv->regmap, HHI_MIPI_CNTL0,
-			   HHI_MIPI_CNTL0_ENABLE, HHI_MIPI_CNTL0_ENABLE);
-	return 0;
+			HHI_MIPI_CNTL0_DIF_REF_CTL1,
+			FIELD_PREP(HHI_MIPI_CNTL0_DIF_REF_CTL1, 0));
+	regmap_update_bits(priv->regmap, HHI_MIPI_CNTL0, BIT(31), 0);
+	regmap_update_bits(priv->regmap, HHI_MIPI_CNTL0,
+			HHI_MIPI_CNTL0_DIF_REF_CTL1, 0);
+
+	regmap_write(priv->regmap, HHI_MIPI_CNTL1, 0x6);
+
+	regmap_write(priv->regmap, HHI_MIPI_CNTL2, 0x00200000);
+
+	priv->dsi_enabled = false;
 }
 
-static int phy_axg_mipi_pcie_analog_power_off(struct phy *phy)
+static int phy_axg_mipi_pcie_analog_configure(struct phy *phy,
+					      union phy_configure_opts *opts)
 {
 	struct phy_axg_mipi_pcie_analog_priv *priv = phy_get_drvdata(phy);
+	int ret;
 
-	/* MIPI not supported yet */
-	if (priv->mode != PHY_TYPE_PCIE)
-		return -EINVAL;
+	ret = phy_mipi_dphy_config_validate(&opts->mipi_dphy);
+	if (ret)
+		return ret;
+
+	memcpy(&priv->config, opts, sizeof(priv->config));
+
+	priv->dsi_configured = true;
+
+	/* If PHY was already powered on, setup the DSI analog part */
+	if (priv->powered) {
+		/* If reconfiguring, disable & reconfigure */
+		if (priv->dsi_enabled)
+			phy_dsi_analog_disable(priv);
+
+		usleep_range(100, 200);
+
+		phy_dsi_analog_enable(priv);
+	}
 
-	regmap_update_bits(priv->regmap, HHI_MIPI_CNTL0,
-			   HHI_MIPI_CNTL0_BANDGAP, 0);
-	regmap_update_bits(priv->regmap, HHI_MIPI_CNTL0,
-			   HHI_MIPI_CNTL0_ENABLE, 0);
 	return 0;
 }
 
-static int phy_axg_mipi_pcie_analog_init(struct phy *phy)
+static int phy_axg_mipi_pcie_analog_power_on(struct phy *phy)
 {
+	struct phy_axg_mipi_pcie_analog_priv *priv = phy_get_drvdata(phy);
+
+	phy_bandgap_enable(priv);
+
+	if (priv->dsi_configured)
+		phy_dsi_analog_enable(priv);
+
+	priv->powered = true;
+
 	return 0;
 }
 
-static int phy_axg_mipi_pcie_analog_exit(struct phy *phy)
+static int phy_axg_mipi_pcie_analog_power_off(struct phy *phy)
 {
+	struct phy_axg_mipi_pcie_analog_priv *priv = phy_get_drvdata(phy);
+
+	phy_bandgap_disable(priv);
+
+	if (priv->dsi_enabled)
+		phy_dsi_analog_disable(priv);
+
+	priv->powered = false;
+
 	return 0;
 }
 
 static const struct phy_ops phy_axg_mipi_pcie_analog_ops = {
-	.init = phy_axg_mipi_pcie_analog_init,
-	.exit = phy_axg_mipi_pcie_analog_exit,
+	.configure = phy_axg_mipi_pcie_analog_configure,
 	.power_on = phy_axg_mipi_pcie_analog_power_on,
 	.power_off = phy_axg_mipi_pcie_analog_power_off,
 	.owner = THIS_MODULE,
 };
 
-static struct phy *phy_axg_mipi_pcie_analog_xlate(struct device *dev,
-						  struct of_phandle_args *args)
-{
-	struct phy_axg_mipi_pcie_analog_priv *priv = dev_get_drvdata(dev);
-	unsigned int mode;
-
-	if (args->args_count != 1) {
-		dev_err(dev, "invalid number of arguments\n");
-		return ERR_PTR(-EINVAL);
-	}
-
-	mode = args->args[0];
-
-	/* MIPI mode is not supported yet */
-	if (mode != PHY_TYPE_PCIE) {
-		dev_err(dev, "invalid phy mode select argument\n");
-		return ERR_PTR(-EINVAL);
-	}
-
-	priv->mode = mode;
-	return priv->phy;
-}
-
 static int phy_axg_mipi_pcie_analog_probe(struct platform_device *pdev)
 {
 	struct phy_provider *phy;
@@ -126,27 +199,20 @@ static int phy_axg_mipi_pcie_analog_probe(struct platform_device *pdev)
 	struct phy_axg_mipi_pcie_analog_priv *priv;
 	struct device_node *np = dev->of_node;
 	struct regmap *map;
-	struct resource *res;
-	void __iomem *base;
 	int ret;
 
 	priv = devm_kmalloc(dev, sizeof(*priv), GFP_KERNEL);
 	if (!priv)
 		return -ENOMEM;
 
-	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	base = devm_ioremap_resource(dev, res);
-	if (IS_ERR(base)) {
-		dev_err(dev, "failed to get regmap base\n");
-		return PTR_ERR(base);
-	}
-
-	map = devm_regmap_init_mmio(dev, base,
-				    &phy_axg_mipi_pcie_analog_regmap_conf);
+	/* Get the hhi system controller node */
+	map = syscon_node_to_regmap(of_get_parent(dev->of_node));
 	if (IS_ERR(map)) {
-		dev_err(dev, "failed to get HHI regmap\n");
+		dev_err(dev,
+			"failed to get HHI regmap\n");
 		return PTR_ERR(map);
 	}
+
 	priv->regmap = map;
 
 	priv->phy = devm_phy_create(dev, np, &phy_axg_mipi_pcie_analog_ops);
@@ -160,8 +226,7 @@ static int phy_axg_mipi_pcie_analog_probe(struct platform_device *pdev)
 	phy_set_drvdata(priv->phy, priv);
 	dev_set_drvdata(dev, priv);
 
-	phy = devm_of_phy_provider_register(dev,
-					    phy_axg_mipi_pcie_analog_xlate);
+	phy = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
 
 	return PTR_ERR_OR_ZERO(phy);
 }
diff --git a/drivers/phy/amlogic/phy-meson-axg-pcie.c b/drivers/phy/amlogic/phy-meson-axg-pcie.c
index 377ed0dcd0d9aa35dd0212a1fc862387c7ee2205..2299bab38e05d403008f16374d59dce5a963f84f 100644
--- a/drivers/phy/amlogic/phy-meson-axg-pcie.c
+++ b/drivers/phy/amlogic/phy-meson-axg-pcie.c
@@ -129,7 +129,6 @@ static int phy_axg_pcie_probe(struct platform_device *pdev)
 	struct device *dev = &pdev->dev;
 	struct phy_axg_pcie_priv *priv;
 	struct device_node *np = dev->of_node;
-	struct resource *res;
 	void __iomem *base;
 	int ret;
 
@@ -145,8 +144,7 @@ static int phy_axg_pcie_probe(struct platform_device *pdev)
 		return ret;
 	}
 
-	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	base = devm_ioremap_resource(dev, res);
+	base = devm_platform_ioremap_resource(pdev, 0);
 	if (IS_ERR(base))
 		return PTR_ERR(base);
 
@@ -155,7 +153,7 @@ static int phy_axg_pcie_probe(struct platform_device *pdev)
 	if (IS_ERR(priv->regmap))
 		return PTR_ERR(priv->regmap);
 
-	priv->reset = devm_reset_control_array_get(dev, false, false);
+	priv->reset = devm_reset_control_array_get_exclusive(dev);
 	if (IS_ERR(priv->reset))
 		return PTR_ERR(priv->reset);
 
diff --git a/drivers/phy/amlogic/phy-meson-g12a-usb2.c b/drivers/phy/amlogic/phy-meson-g12a-usb2.c
index b26e30e1afaf3040401e846915d45385e9c50b9b..9d1efa0d93948383c09ae151d3111d71948a3c7c 100644
--- a/drivers/phy/amlogic/phy-meson-g12a-usb2.c
+++ b/drivers/phy/amlogic/phy-meson-g12a-usb2.c
@@ -292,7 +292,6 @@ static int phy_meson_g12a_usb2_probe(struct platform_device *pdev)
 {
 	struct device *dev = &pdev->dev;
 	struct phy_provider *phy_provider;
-	struct resource *res;
 	struct phy_meson_g12a_usb2_priv *priv;
 	struct phy *phy;
 	void __iomem *base;
@@ -305,8 +304,7 @@ static int phy_meson_g12a_usb2_probe(struct platform_device *pdev)
 	priv->dev = dev;
 	platform_set_drvdata(pdev, priv);
 
-	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	base = devm_ioremap_resource(dev, res);
+	base = devm_platform_ioremap_resource(pdev, 0);
 	if (IS_ERR(base))
 		return PTR_ERR(base);
 
diff --git a/drivers/phy/amlogic/phy-meson-g12a-usb3-pcie.c b/drivers/phy/amlogic/phy-meson-g12a-usb3-pcie.c
index 08e322789e59c3a56502f9a60cf8e6b560be4fe4..5b471ab80fe289e6b66161292f589077b676672e 100644
--- a/drivers/phy/amlogic/phy-meson-g12a-usb3-pcie.c
+++ b/drivers/phy/amlogic/phy-meson-g12a-usb3-pcie.c
@@ -386,7 +386,6 @@ static int phy_g12a_usb3_pcie_probe(struct platform_device *pdev)
 	struct device *dev = &pdev->dev;
 	struct device_node *np = dev->of_node;
 	struct phy_g12a_usb3_pcie_priv *priv;
-	struct resource *res;
 	struct phy_provider *phy_provider;
 	void __iomem *base;
 	int ret;
@@ -395,8 +394,7 @@ static int phy_g12a_usb3_pcie_probe(struct platform_device *pdev)
 	if (!priv)
 		return -ENOMEM;
 
-	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	base = devm_ioremap_resource(dev, res);
+	base = devm_platform_ioremap_resource(pdev, 0);
 	if (IS_ERR(base))
 		return PTR_ERR(base);
 
@@ -418,7 +416,7 @@ static int phy_g12a_usb3_pcie_probe(struct platform_device *pdev)
 	if (ret)
 		goto err_disable_clk_ref;
 
-	priv->reset = devm_reset_control_array_get(dev, false, false);
+	priv->reset = devm_reset_control_array_get_exclusive(dev);
 	if (IS_ERR(priv->reset))
 		return PTR_ERR(priv->reset);
 
diff --git a/drivers/phy/amlogic/phy-meson-gxl-usb2.c b/drivers/phy/amlogic/phy-meson-gxl-usb2.c
index 43ec9bf24abffd0150c449f29ab23687379bb157..2b3c0d730f20f66b31f0a4a352d9a7c5f8016bf7 100644
--- a/drivers/phy/amlogic/phy-meson-gxl-usb2.c
+++ b/drivers/phy/amlogic/phy-meson-gxl-usb2.c
@@ -158,7 +158,8 @@ static int phy_meson_gxl_usb2_set_mode(struct phy *phy,
 				   U2P_R0_DM_PULLDOWN);
 		regmap_update_bits(priv->regmap, U2P_R0, U2P_R0_DP_PULLDOWN,
 				   U2P_R0_DP_PULLDOWN);
-		regmap_update_bits(priv->regmap, U2P_R0, U2P_R0_ID_PULLUP, 0);
+		regmap_update_bits(priv->regmap, U2P_R0, U2P_R0_ID_PULLUP,
+				   U2P_R0_ID_PULLUP);
 		break;
 
 	case PHY_MODE_USB_DEVICE:
@@ -230,7 +231,6 @@ static int phy_meson_gxl_usb2_probe(struct platform_device *pdev)
 {
 	struct device *dev = &pdev->dev;
 	struct phy_provider *phy_provider;
-	struct resource *res;
 	struct phy_meson_gxl_usb2_priv *priv;
 	struct phy *phy;
 	void __iomem *base;
@@ -242,8 +242,7 @@ static int phy_meson_gxl_usb2_probe(struct platform_device *pdev)
 
 	platform_set_drvdata(pdev, priv);
 
-	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	base = devm_ioremap_resource(dev, res);
+	base = devm_platform_ioremap_resource(pdev, 0);
 	if (IS_ERR(base))
 		return PTR_ERR(base);
 
diff --git a/drivers/phy/broadcom/phy-bcm-cygnus-pcie.c b/drivers/phy/broadcom/phy-bcm-cygnus-pcie.c
index b074682d9dd88337ffc5672f5de956e4d0d760c1..548e467761008f9b383440a633f2f74f4f9f1eea 100644
--- a/drivers/phy/broadcom/phy-bcm-cygnus-pcie.c
+++ b/drivers/phy/broadcom/phy-bcm-cygnus-pcie.c
@@ -126,7 +126,6 @@ static int cygnus_pcie_phy_probe(struct platform_device *pdev)
 	struct device_node *node = dev->of_node, *child;
 	struct cygnus_pcie_phy_core *core;
 	struct phy_provider *provider;
-	struct resource *res;
 	unsigned cnt = 0;
 	int ret;
 
@@ -141,8 +140,7 @@ static int cygnus_pcie_phy_probe(struct platform_device *pdev)
 
 	core->dev = dev;
 
-	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	core->base = devm_ioremap_resource(dev, res);
+	core->base = devm_platform_ioremap_resource(pdev, 0);
 	if (IS_ERR(core->base))
 		return PTR_ERR(core->base);
 
diff --git a/drivers/phy/broadcom/phy-bcm-kona-usb2.c b/drivers/phy/broadcom/phy-bcm-kona-usb2.c
index 6459296d9bf93b2ff81dd8927c0b486ce76f1ae8..e9cc5f2cb89afc64c69f1b15007f356dd42fe8d4 100644
--- a/drivers/phy/broadcom/phy-bcm-kona-usb2.c
+++ b/drivers/phy/broadcom/phy-bcm-kona-usb2.c
@@ -94,7 +94,6 @@ static int bcm_kona_usb2_probe(struct platform_device *pdev)
 {
 	struct device *dev = &pdev->dev;
 	struct bcm_kona_usb *phy;
-	struct resource *res;
 	struct phy *gphy;
 	struct phy_provider *phy_provider;
 
@@ -102,8 +101,7 @@ static int bcm_kona_usb2_probe(struct platform_device *pdev)
 	if (!phy)
 		return -ENOMEM;
 
-	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	phy->regs = devm_ioremap_resource(&pdev->dev, res);
+	phy->regs = devm_platform_ioremap_resource(pdev, 0);
 	if (IS_ERR(phy->regs))
 		return PTR_ERR(phy->regs);
 
diff --git a/drivers/phy/broadcom/phy-bcm-ns-usb2.c b/drivers/phy/broadcom/phy-bcm-ns-usb2.c
index 9f2f84d65dcd32840b8c57cd140d12f3394e2547..4b015b8a71c351c5bc44ad861e983ecf2c59fecb 100644
--- a/drivers/phy/broadcom/phy-bcm-ns-usb2.c
+++ b/drivers/phy/broadcom/phy-bcm-ns-usb2.c
@@ -83,7 +83,6 @@ static int bcm_ns_usb2_probe(struct platform_device *pdev)
 {
 	struct device *dev = &pdev->dev;
 	struct bcm_ns_usb2 *usb2;
-	struct resource *res;
 	struct phy_provider *phy_provider;
 
 	usb2 = devm_kzalloc(&pdev->dev, sizeof(*usb2), GFP_KERNEL);
@@ -91,8 +90,7 @@ static int bcm_ns_usb2_probe(struct platform_device *pdev)
 		return -ENOMEM;
 	usb2->dev = dev;
 
-	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dmu");
-	usb2->dmu = devm_ioremap_resource(dev, res);
+	usb2->dmu = devm_platform_ioremap_resource_byname(pdev, "dmu");
 	if (IS_ERR(usb2->dmu)) {
 		dev_err(dev, "Failed to map DMU regs\n");
 		return PTR_ERR(usb2->dmu);
diff --git a/drivers/phy/broadcom/phy-bcm-ns-usb3.c b/drivers/phy/broadcom/phy-bcm-ns-usb3.c
index 47b029fbebbdca226ef351d91c7939f3aea5adab..eb10ffa13a62eb59843f875fb2615263fa7b67bb 100644
--- a/drivers/phy/broadcom/phy-bcm-ns-usb3.c
+++ b/drivers/phy/broadcom/phy-bcm-ns-usb3.c
@@ -22,8 +22,6 @@
 #include <linux/phy/phy.h>
 #include <linux/slab.h>
 
-#define BCM_NS_USB3_MII_MNG_TIMEOUT_US	1000	/* usecs */
-
 #define BCM_NS_USB3_PHY_BASE_ADDR_REG	0x1f
 #define BCM_NS_USB3_PHY_PLL30_BLOCK	0x8000
 #define BCM_NS_USB3_PHY_TX_PMD_BLOCK	0x8040
@@ -51,11 +49,8 @@ struct bcm_ns_usb3 {
 	struct device *dev;
 	enum bcm_ns_family family;
 	void __iomem *dmp;
-	void __iomem *ccb_mii;
 	struct mdio_device *mdiodev;
 	struct phy *phy;
-
-	int (*phy_write)(struct bcm_ns_usb3 *usb3, u16 reg, u16 value);
 };
 
 static const struct of_device_id bcm_ns_usb3_id_table[] = {
@@ -69,13 +64,9 @@ static const struct of_device_id bcm_ns_usb3_id_table[] = {
 	},
 	{},
 };
-MODULE_DEVICE_TABLE(of, bcm_ns_usb3_id_table);
 
 static int bcm_ns_usb3_mdio_phy_write(struct bcm_ns_usb3 *usb3, u16 reg,
-				      u16 value)
-{
-	return usb3->phy_write(usb3, reg, value);
-}
+				      u16 value);
 
 static int bcm_ns_usb3_phy_init_ns_bx(struct bcm_ns_usb3 *usb3)
 {
@@ -187,8 +178,8 @@ static const struct phy_ops ops = {
  * MDIO driver code
  **************************************************/
 
-static int bcm_ns_usb3_mdiodev_phy_write(struct bcm_ns_usb3 *usb3, u16 reg,
-					 u16 value)
+static int bcm_ns_usb3_mdio_phy_write(struct bcm_ns_usb3 *usb3, u16 reg,
+				      u16 value)
 {
 	struct mdio_device *mdiodev = usb3->mdiodev;
 
@@ -229,8 +220,6 @@ static int bcm_ns_usb3_mdio_probe(struct mdio_device *mdiodev)
 		return PTR_ERR(usb3->dmp);
 	}
 
-	usb3->phy_write = bcm_ns_usb3_mdiodev_phy_write;
-
 	usb3->phy = devm_phy_create(dev, NULL, &ops);
 	if (IS_ERR(usb3->phy)) {
 		dev_err(dev, "Failed to create PHY\n");
@@ -254,145 +243,7 @@ static struct mdio_driver bcm_ns_usb3_mdio_driver = {
 	.probe = bcm_ns_usb3_mdio_probe,
 };
 
-/**************************************************
- * Platform driver code
- **************************************************/
-
-static int bcm_ns_usb3_wait_reg(struct bcm_ns_usb3 *usb3, void __iomem *addr,
-				u32 mask, u32 value, int usec)
-{
-	u32 val;
-	int ret;
-
-	ret = readl_poll_timeout_atomic(addr, val, ((val & mask) == value),
-					10, usec);
-	if (ret)
-		dev_err(usb3->dev, "Timeout waiting for register %p\n", addr);
-
-	return ret;
-}
-
-static inline int bcm_ns_usb3_mii_mng_wait_idle(struct bcm_ns_usb3 *usb3)
-{
-	return bcm_ns_usb3_wait_reg(usb3, usb3->ccb_mii + BCMA_CCB_MII_MNG_CTL,
-				    0x0100, 0x0000,
-				    BCM_NS_USB3_MII_MNG_TIMEOUT_US);
-}
-
-static int bcm_ns_usb3_platform_phy_write(struct bcm_ns_usb3 *usb3, u16 reg,
-					  u16 value)
-{
-	u32 tmp = 0;
-	int err;
-
-	err = bcm_ns_usb3_mii_mng_wait_idle(usb3);
-	if (err < 0) {
-		dev_err(usb3->dev, "Couldn't write 0x%08x value\n", value);
-		return err;
-	}
-
-	/* TODO: Use a proper MDIO bus layer */
-	tmp |= 0x58020000; /* Magic value for MDIO PHY write */
-	tmp |= reg << 18;
-	tmp |= value;
-	writel(tmp, usb3->ccb_mii + BCMA_CCB_MII_MNG_CMD_DATA);
-
-	return bcm_ns_usb3_mii_mng_wait_idle(usb3);
-}
-
-static int bcm_ns_usb3_probe(struct platform_device *pdev)
-{
-	struct device *dev = &pdev->dev;
-	const struct of_device_id *of_id;
-	struct bcm_ns_usb3 *usb3;
-	struct resource *res;
-	struct phy_provider *phy_provider;
-
-	usb3 = devm_kzalloc(dev, sizeof(*usb3), GFP_KERNEL);
-	if (!usb3)
-		return -ENOMEM;
-
-	usb3->dev = dev;
-
-	of_id = of_match_device(bcm_ns_usb3_id_table, dev);
-	if (!of_id)
-		return -EINVAL;
-	usb3->family = (enum bcm_ns_family)of_id->data;
-
-	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dmp");
-	usb3->dmp = devm_ioremap_resource(dev, res);
-	if (IS_ERR(usb3->dmp)) {
-		dev_err(dev, "Failed to map DMP regs\n");
-		return PTR_ERR(usb3->dmp);
-	}
-
-	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ccb-mii");
-	usb3->ccb_mii = devm_ioremap_resource(dev, res);
-	if (IS_ERR(usb3->ccb_mii)) {
-		dev_err(dev, "Failed to map ChipCommon B MII regs\n");
-		return PTR_ERR(usb3->ccb_mii);
-	}
-
-	/* Enable MDIO. Setting MDCDIV as 26  */
-	writel(0x0000009a, usb3->ccb_mii + BCMA_CCB_MII_MNG_CTL);
-
-	/* Wait for MDIO? */
-	udelay(2);
-
-	usb3->phy_write = bcm_ns_usb3_platform_phy_write;
-
-	usb3->phy = devm_phy_create(dev, NULL, &ops);
-	if (IS_ERR(usb3->phy)) {
-		dev_err(dev, "Failed to create PHY\n");
-		return PTR_ERR(usb3->phy);
-	}
-
-	phy_set_drvdata(usb3->phy, usb3);
-	platform_set_drvdata(pdev, usb3);
-
-	phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
-	if (!IS_ERR(phy_provider))
-		dev_info(dev, "Registered Broadcom Northstar USB 3.0 PHY driver\n");
-
-	return PTR_ERR_OR_ZERO(phy_provider);
-}
-
-static struct platform_driver bcm_ns_usb3_driver = {
-	.probe		= bcm_ns_usb3_probe,
-	.driver = {
-		.name = "bcm_ns_usb3",
-		.of_match_table = bcm_ns_usb3_id_table,
-	},
-};
-
-static int __init bcm_ns_usb3_module_init(void)
-{
-	int err;
-
-	/*
-	 * For backward compatibility we register as MDIO and platform driver.
-	 * After getting MDIO binding commonly used (e.g. switching all DT files
-	 * to use it) we should deprecate the old binding and eventually drop
-	 * support for it.
-	 */
-
-	err = mdio_driver_register(&bcm_ns_usb3_mdio_driver);
-	if (err)
-		return err;
-
-	err = platform_driver_register(&bcm_ns_usb3_driver);
-	if (err)
-		mdio_driver_unregister(&bcm_ns_usb3_mdio_driver);
-
-	return err;
-}
-module_init(bcm_ns_usb3_module_init);
-
-static void __exit bcm_ns_usb3_module_exit(void)
-{
-	platform_driver_unregister(&bcm_ns_usb3_driver);
-	mdio_driver_unregister(&bcm_ns_usb3_mdio_driver);
-}
-module_exit(bcm_ns_usb3_module_exit)
+mdio_module_driver(bcm_ns_usb3_mdio_driver);
 
 MODULE_LICENSE("GPL v2");
+MODULE_DEVICE_TABLE(of, bcm_ns_usb3_id_table);
diff --git a/drivers/phy/broadcom/phy-bcm-ns2-usbdrd.c b/drivers/phy/broadcom/phy-bcm-ns2-usbdrd.c
index 9630ac127366de924b6a5505d213fc6928eafcc9..65a399acc845eff17787ed8eff1ca9867780e958 100644
--- a/drivers/phy/broadcom/phy-bcm-ns2-usbdrd.c
+++ b/drivers/phy/broadcom/phy-bcm-ns2-usbdrd.c
@@ -293,7 +293,6 @@ static int ns2_drd_phy_probe(struct platform_device *pdev)
 	struct device *dev = &pdev->dev;
 	struct ns2_phy_driver *driver;
 	struct ns2_phy_data *data;
-	struct resource *res;
 	int ret;
 	u32 val;
 
@@ -307,23 +306,19 @@ static int ns2_drd_phy_probe(struct platform_device *pdev)
 	if (!driver->data)
 		return -ENOMEM;
 
-	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "icfg");
-	driver->icfgdrd_regs = devm_ioremap_resource(dev, res);
+	driver->icfgdrd_regs = devm_platform_ioremap_resource_byname(pdev, "icfg");
 	if (IS_ERR(driver->icfgdrd_regs))
 		return PTR_ERR(driver->icfgdrd_regs);
 
-	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rst-ctrl");
-	driver->idmdrd_rst_ctrl = devm_ioremap_resource(dev, res);
+	driver->idmdrd_rst_ctrl = devm_platform_ioremap_resource_byname(pdev, "rst-ctrl");
 	if (IS_ERR(driver->idmdrd_rst_ctrl))
 		return PTR_ERR(driver->idmdrd_rst_ctrl);
 
-	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "crmu-ctrl");
-	driver->crmu_usb2_ctrl = devm_ioremap_resource(dev, res);
+	driver->crmu_usb2_ctrl = devm_platform_ioremap_resource_byname(pdev, "crmu-ctrl");
 	if (IS_ERR(driver->crmu_usb2_ctrl))
 		return PTR_ERR(driver->crmu_usb2_ctrl);
 
-	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "usb2-strap");
-	driver->usb2h_strap_reg = devm_ioremap_resource(dev, res);
+	driver->usb2h_strap_reg = devm_platform_ioremap_resource_byname(pdev, "usb2-strap");
 	if (IS_ERR(driver->usb2h_strap_reg))
 		return PTR_ERR(driver->usb2h_strap_reg);
 
diff --git a/drivers/phy/broadcom/phy-bcm-sr-pcie.c b/drivers/phy/broadcom/phy-bcm-sr-pcie.c
index 96a3af126a78d780cc4e212f53bd186200f41f63..8a4aadf166cf9e778af011f235b971d625877215 100644
--- a/drivers/phy/broadcom/phy-bcm-sr-pcie.c
+++ b/drivers/phy/broadcom/phy-bcm-sr-pcie.c
@@ -217,7 +217,6 @@ static int sr_pcie_phy_probe(struct platform_device *pdev)
 	struct device *dev = &pdev->dev;
 	struct device_node *node = dev->of_node;
 	struct sr_pcie_phy_core *core;
-	struct resource *res;
 	struct phy_provider *provider;
 	unsigned int phy_idx = 0;
 
@@ -226,9 +225,7 @@ static int sr_pcie_phy_probe(struct platform_device *pdev)
 		return -ENOMEM;
 
 	core->dev = dev;
-
-	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	core->base = devm_ioremap_resource(core->dev, res);
+	core->base = devm_platform_ioremap_resource(pdev, 0);
 	if (IS_ERR(core->base))
 		return PTR_ERR(core->base);
 
diff --git a/drivers/phy/broadcom/phy-bcm-sr-usb.c b/drivers/phy/broadcom/phy-bcm-sr-usb.c
index c3e99ad174874496ac68978a98bebc55e37c1dc4..0002da3b5b5d7e5ca92d68b6d031730678193a42 100644
--- a/drivers/phy/broadcom/phy-bcm-sr-usb.c
+++ b/drivers/phy/broadcom/phy-bcm-sr-usb.c
@@ -300,14 +300,12 @@ static int bcm_usb_phy_probe(struct platform_device *pdev)
 	struct device *dev = &pdev->dev;
 	struct device_node *dn = dev->of_node;
 	const struct of_device_id *of_id;
-	struct resource *res;
 	void __iomem *regs;
 	int ret;
 	enum bcm_usb_phy_version version;
 	struct phy_provider *phy_provider;
 
-	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	regs = devm_ioremap_resource(dev, res);
+	regs = devm_platform_ioremap_resource(pdev, 0);
 	if (IS_ERR(regs))
 		return PTR_ERR(regs);
 
diff --git a/drivers/phy/broadcom/phy-brcm-sata.c b/drivers/phy/broadcom/phy-brcm-sata.c
index 18251f232172b7c544c823cab82e05ee601ea2b9..3ecf41359591a81b46d55d412c37c20dd1895e88 100644
--- a/drivers/phy/broadcom/phy-brcm-sata.c
+++ b/drivers/phy/broadcom/phy-brcm-sata.c
@@ -65,6 +65,7 @@ struct brcm_sata_port {
 	bool ssc_en;
 	enum brcm_sata_phy_rxaeq_mode rxaeq_mode;
 	u32 rxaeq_val;
+	u32 tx_amplitude_val;
 };
 
 struct brcm_sata_phy {
@@ -84,6 +85,10 @@ enum sata_phy_regs {
 	BLOCK0_SPARE_OOB_CLK_SEL_MASK		= 0x3,
 	BLOCK0_SPARE_OOB_CLK_SEL_REFBY2		= 0x1,
 
+	BLOCK1_REG_BANK				= 0x10,
+	BLOCK1_TEST_TX				= 0x83,
+	BLOCK1_TEST_TX_AMP_SHIFT		= 12,
+
 	PLL_REG_BANK_0				= 0x050,
 	PLL_REG_BANK_0_PLLCONTROL_0		= 0x81,
 	PLLCONTROL_0_FREQ_DET_RESTART		= BIT(13),
@@ -379,6 +384,29 @@ static int brcm_stb_sata_16nm_ssc_init(struct brcm_sata_port *port)
 	brcm_sata_phy_wr(port, RXPMD_REG_BANK, RXPMD_RX_FREQ_MON_CONTROL1,
 			 ~tmp, RXPMD_MON_CORRECT_EN | value);
 
+	tmp = GENMASK(15, 12);
+	switch (port->tx_amplitude_val) {
+	case 400:
+		value = BIT(12) | BIT(13);
+		break;
+	case 500:
+		value = BIT(13);
+		break;
+	case 600:
+		value = BIT(12);
+		break;
+	case 800:
+		value = 0;
+		break;
+	default:
+		value = tmp;
+		break;
+	}
+
+	if (value != tmp)
+		brcm_sata_phy_wr(port, BLOCK1_REG_BANK, BLOCK1_TEST_TX, ~tmp,
+				 value);
+
 	/* Turn on/off SSC */
 	brcm_sata_phy_wr(port, TX_REG_BANK, TX_ACTRL5, ~TX_ACTRL5_SSC_EN,
 			 port->ssc_en ? TX_ACTRL5_SSC_EN : 0);
@@ -726,7 +754,6 @@ static int brcm_sata_phy_probe(struct platform_device *pdev)
 	struct device_node *dn = dev->of_node, *child;
 	const struct of_device_id *of_id;
 	struct brcm_sata_phy *priv;
-	struct resource *res;
 	struct phy_provider *provider;
 	int ret, count = 0;
 
@@ -739,8 +766,7 @@ static int brcm_sata_phy_probe(struct platform_device *pdev)
 	dev_set_drvdata(dev, priv);
 	priv->dev = dev;
 
-	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "phy");
-	priv->phy_base = devm_ioremap_resource(dev, res);
+	priv->phy_base = devm_platform_ioremap_resource_byname(pdev, "phy");
 	if (IS_ERR(priv->phy_base))
 		return PTR_ERR(priv->phy_base);
 
@@ -751,9 +777,7 @@ static int brcm_sata_phy_probe(struct platform_device *pdev)
 		priv->version = BRCM_SATA_PHY_STB_28NM;
 
 	if (priv->version == BRCM_SATA_PHY_IPROC_NS2) {
-		res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
-						   "phy-ctrl");
-		priv->ctrl_base = devm_ioremap_resource(dev, res);
+		priv->ctrl_base = devm_platform_ioremap_resource_byname(pdev, "phy-ctrl");
 		if (IS_ERR(priv->ctrl_base))
 			return PTR_ERR(priv->ctrl_base);
 	}
@@ -791,6 +815,10 @@ static int brcm_sata_phy_probe(struct platform_device *pdev)
 		if (port->rxaeq_mode == RXAEQ_MODE_MANUAL)
 			of_property_read_u32(child, "brcm,rxaeq-value",
 					     &port->rxaeq_val);
+
+		of_property_read_u32(child, "brcm,tx-amplitude-millivolt",
+				     &port->tx_amplitude_val);
+
 		port->ssc_en = of_property_read_bool(child, "brcm,enable-ssc");
 		if (IS_ERR(port->phy)) {
 			dev_err(dev, "failed to create PHY\n");
diff --git a/drivers/phy/cadence/cdns-dphy.c b/drivers/phy/cadence/cdns-dphy.c
index 90c4e9b5aac8307fa6d2d77c5f52c05cf4a48cde..ba042e39cfaf4b3d957fdd197f90eedd74879659 100644
--- a/drivers/phy/cadence/cdns-dphy.c
+++ b/drivers/phy/cadence/cdns-dphy.c
@@ -314,7 +314,6 @@ static int cdns_dphy_probe(struct platform_device *pdev)
 {
 	struct phy_provider *phy_provider;
 	struct cdns_dphy *dphy;
-	struct resource *res;
 	int ret;
 
 	dphy = devm_kzalloc(&pdev->dev, sizeof(*dphy), GFP_KERNEL);
@@ -326,8 +325,7 @@ static int cdns_dphy_probe(struct platform_device *pdev)
 	if (!dphy->ops)
 		return -EINVAL;
 
-	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	dphy->regs = devm_ioremap_resource(&pdev->dev, res);
+	dphy->regs = devm_platform_ioremap_resource(pdev, 0);
 	if (IS_ERR(dphy->regs))
 		return PTR_ERR(dphy->regs);
 
diff --git a/drivers/phy/cadence/phy-cadence-salvo.c b/drivers/phy/cadence/phy-cadence-salvo.c
index 88e239adc3b81424a681d9c5d69edd911b3dca01..51c0b98f5fd7cc546e4936043ccd0112924af0d0 100644
--- a/drivers/phy/cadence/phy-cadence-salvo.c
+++ b/drivers/phy/cadence/phy-cadence-salvo.c
@@ -263,7 +263,6 @@ static int cdns_salvo_phy_probe(struct platform_device *pdev)
 	struct phy_provider *phy_provider;
 	struct device *dev = &pdev->dev;
 	struct cdns_salvo_phy *salvo_phy;
-	struct resource *res;
 	const struct of_device_id *match;
 	struct cdns_salvo_data *data;
 
@@ -281,8 +280,7 @@ static int cdns_salvo_phy_probe(struct platform_device *pdev)
 	if (IS_ERR(salvo_phy->clk))
 		return PTR_ERR(salvo_phy->clk);
 
-	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	salvo_phy->base = devm_ioremap_resource(dev, res);
+	salvo_phy->base = devm_platform_ioremap_resource(pdev, 0);
 	if (IS_ERR(salvo_phy->base))
 		return PTR_ERR(salvo_phy->base);
 
diff --git a/drivers/phy/cadence/phy-cadence-sierra.c b/drivers/phy/cadence/phy-cadence-sierra.c
index 453ef26fa1c7ff0b5b26cd1e7dabe761765908ad..26a0badabe38b77852c03279dbe0ad59bcc8db25 100644
--- a/drivers/phy/cadence/phy-cadence-sierra.c
+++ b/drivers/phy/cadence/phy-cadence-sierra.c
@@ -479,7 +479,6 @@ static int cdns_sierra_phy_probe(struct platform_device *pdev)
 	const struct of_device_id *match;
 	struct cdns_sierra_data *data;
 	unsigned int id_value;
-	struct resource *res;
 	int i, ret, node = 0;
 	void __iomem *base;
 	struct clk *clk;
@@ -502,8 +501,7 @@ static int cdns_sierra_phy_probe(struct platform_device *pdev)
 	sp->dev = dev;
 	sp->init_data = data;
 
-	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	base = devm_ioremap_resource(dev, res);
+	base = devm_platform_ioremap_resource(pdev, 0);
 	if (IS_ERR(base)) {
 		dev_err(dev, "missing \"reg\"\n");
 		return PTR_ERR(base);
diff --git a/drivers/phy/freescale/phy-fsl-imx8-mipi-dphy.c b/drivers/phy/freescale/phy-fsl-imx8-mipi-dphy.c
index 9f2c1da14f5ae782169c2b4afdd369a8e0db80a4..a95572b397ca115122787af2c61d9e890972703e 100644
--- a/drivers/phy/freescale/phy-fsl-imx8-mipi-dphy.c
+++ b/drivers/phy/freescale/phy-fsl-imx8-mipi-dphy.c
@@ -434,7 +434,6 @@ static int mixel_dphy_probe(struct platform_device *pdev)
 	struct device_node *np = dev->of_node;
 	struct phy_provider *phy_provider;
 	struct mixel_dphy_priv *priv;
-	struct resource *res;
 	struct phy *phy;
 	void __iomem *base;
 
@@ -449,8 +448,7 @@ static int mixel_dphy_probe(struct platform_device *pdev)
 	if (!priv->devdata)
 		return -EINVAL;
 
-	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	base = devm_ioremap_resource(dev, res);
+	base = devm_platform_ioremap_resource(pdev, 0);
 	if (IS_ERR(base))
 		return PTR_ERR(base);
 
diff --git a/drivers/phy/freescale/phy-fsl-imx8mq-usb.c b/drivers/phy/freescale/phy-fsl-imx8mq-usb.c
index 62d6d6849ad609c802e2b5c16f92a1b7a2af4df0..a29b4a6f7c249bae07e86eb466afd8fccb84d155 100644
--- a/drivers/phy/freescale/phy-fsl-imx8mq-usb.c
+++ b/drivers/phy/freescale/phy-fsl-imx8mq-usb.c
@@ -131,7 +131,7 @@ static const struct phy_ops imx8mq_usb_phy_ops = {
 	.owner		= THIS_MODULE,
 };
 
-static struct phy_ops imx8mp_usb_phy_ops = {
+static const struct phy_ops imx8mp_usb_phy_ops = {
 	.init		= imx8mp_usb_phy_init,
 	.power_on	= imx8mq_phy_power_on,
 	.power_off	= imx8mq_phy_power_off,
@@ -152,7 +152,6 @@ static int imx8mq_usb_phy_probe(struct platform_device *pdev)
 	struct phy_provider *phy_provider;
 	struct device *dev = &pdev->dev;
 	struct imx8mq_usb_phy *imx_phy;
-	struct resource *res;
 	const struct phy_ops *phy_ops;
 
 	imx_phy = devm_kzalloc(dev, sizeof(*imx_phy), GFP_KERNEL);
@@ -165,8 +164,7 @@ static int imx8mq_usb_phy_probe(struct platform_device *pdev)
 		return PTR_ERR(imx_phy->clk);
 	}
 
-	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	imx_phy->base = devm_ioremap_resource(dev, res);
+	imx_phy->base = devm_platform_ioremap_resource(pdev, 0);
 	if (IS_ERR(imx_phy->base))
 		return PTR_ERR(imx_phy->base);
 
diff --git a/drivers/phy/ingenic/Kconfig b/drivers/phy/ingenic/Kconfig
new file mode 100644
index 0000000000000000000000000000000000000000..f23cc109324b32911fc1dcfc89af78c640b67fef
--- /dev/null
+++ b/drivers/phy/ingenic/Kconfig
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Phy drivers for Ingenic platforms
+#
+config PHY_INGENIC_USB
+	tristate "Ingenic SoCs USB PHY Driver"
+	depends on MIPS || COMPILE_TEST
+	depends on USB_SUPPORT
+	depends on HAS_IOMEM
+	select GENERIC_PHY
+	help
+	  This driver provides USB PHY support for the USB controller found
+	  on the JZ-series and X-series SoCs from Ingenic.
diff --git a/drivers/phy/ingenic/Makefile b/drivers/phy/ingenic/Makefile
new file mode 100644
index 0000000000000000000000000000000000000000..65d5ea00fc9d4bb8eac784605f3b15527afdfe16
--- /dev/null
+++ b/drivers/phy/ingenic/Makefile
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-y		+= phy-ingenic-usb.o
diff --git a/drivers/phy/ingenic/phy-ingenic-usb.c b/drivers/phy/ingenic/phy-ingenic-usb.c
new file mode 100644
index 0000000000000000000000000000000000000000..4d1587d8228613593d73f81165d1c7529bdffac2
--- /dev/null
+++ b/drivers/phy/ingenic/phy-ingenic-usb.c
@@ -0,0 +1,412 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Ingenic SoCs USB PHY driver
+ * Copyright (c) Paul Cercueil <paul@crapouillou.net>
+ * Copyright (c) 漆鹏振 (Qi Pengzhen) <aric.pzqi@ingenic.com>
+ * Copyright (c) 周琰杰 (Zhou Yanjie) <zhouyanjie@wanyeetech.com>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+
+/* OTGPHY register offsets */
+#define REG_USBPCR_OFFSET			0x00
+#define REG_USBRDT_OFFSET			0x04
+#define REG_USBVBFIL_OFFSET			0x08
+#define REG_USBPCR1_OFFSET			0x0c
+
+/* bits within the USBPCR register */
+#define USBPCR_USB_MODE				BIT(31)
+#define USBPCR_AVLD_REG				BIT(30)
+#define USBPCR_COMMONONN			BIT(25)
+#define USBPCR_VBUSVLDEXT			BIT(24)
+#define USBPCR_VBUSVLDEXTSEL		BIT(23)
+#define USBPCR_POR					BIT(22)
+#define USBPCR_SIDDQ				BIT(21)
+#define USBPCR_OTG_DISABLE			BIT(20)
+#define USBPCR_TXPREEMPHTUNE		BIT(6)
+
+#define USBPCR_IDPULLUP_MASK		GENMASK(29, 28)
+#define USBPCR_IDPULLUP_ALWAYS		0x2
+#define USBPCR_IDPULLUP_SUSPEND		0x1
+#define USBPCR_IDPULLUP_OTG			0x0
+
+#define USBPCR_COMPDISTUNE_MASK		GENMASK(19, 17)
+#define USBPCR_COMPDISTUNE_DFT		0x4
+
+#define USBPCR_OTGTUNE_MASK			GENMASK(16, 14)
+#define USBPCR_OTGTUNE_DFT			0x4
+
+#define USBPCR_SQRXTUNE_MASK		GENMASK(13, 11)
+#define USBPCR_SQRXTUNE_DCR_20PCT	0x7
+#define USBPCR_SQRXTUNE_DFT			0x3
+
+#define USBPCR_TXFSLSTUNE_MASK		GENMASK(10, 7)
+#define USBPCR_TXFSLSTUNE_DCR_50PPT	0xf
+#define USBPCR_TXFSLSTUNE_DCR_25PPT	0x7
+#define USBPCR_TXFSLSTUNE_DFT		0x3
+#define USBPCR_TXFSLSTUNE_INC_25PPT	0x1
+#define USBPCR_TXFSLSTUNE_INC_50PPT	0x0
+
+#define USBPCR_TXHSXVTUNE_MASK		GENMASK(5, 4)
+#define USBPCR_TXHSXVTUNE_DFT		0x3
+#define USBPCR_TXHSXVTUNE_DCR_15MV	0x1
+
+#define USBPCR_TXRISETUNE_MASK		GENMASK(5, 4)
+#define USBPCR_TXRISETUNE_DFT		0x3
+
+#define USBPCR_TXVREFTUNE_MASK		GENMASK(3, 0)
+#define USBPCR_TXVREFTUNE_INC_75PPT	0xb
+#define USBPCR_TXVREFTUNE_INC_25PPT	0x7
+#define USBPCR_TXVREFTUNE_DFT		0x5
+
+/* bits within the USBRDTR register */
+#define USBRDT_UTMI_RST				BIT(27)
+#define USBRDT_HB_MASK				BIT(26)
+#define USBRDT_VBFIL_LD_EN			BIT(25)
+#define USBRDT_IDDIG_EN				BIT(24)
+#define USBRDT_IDDIG_REG			BIT(23)
+#define USBRDT_VBFIL_EN				BIT(2)
+
+/* bits within the USBPCR1 register */
+#define USBPCR1_BVLD_REG			BIT(31)
+#define USBPCR1_DPPD				BIT(29)
+#define USBPCR1_DMPD				BIT(28)
+#define USBPCR1_USB_SEL				BIT(28)
+#define USBPCR1_PORT_RST			BIT(21)
+#define USBPCR1_WORD_IF_16BIT		BIT(19)
+
+enum ingenic_usb_phy_version {
+	ID_JZ4770,
+	ID_JZ4775,
+	ID_JZ4780,
+	ID_X1000,
+	ID_X1830,
+	ID_X2000,
+};
+
+struct ingenic_soc_info {
+	enum ingenic_usb_phy_version version;
+
+	void (*usb_phy_init)(struct phy *phy);
+};
+
+struct ingenic_usb_phy {
+	const struct ingenic_soc_info *soc_info;
+
+	struct phy *phy;
+	void __iomem *base;
+	struct clk *clk;
+	struct regulator *vcc_supply;
+};
+
+static int ingenic_usb_phy_init(struct phy *phy)
+{
+	struct ingenic_usb_phy *priv = phy_get_drvdata(phy);
+	int err;
+	u32 reg;
+
+	err = clk_prepare_enable(priv->clk);
+	if (err) {
+		dev_err(&phy->dev, "Unable to start clock: %d\n", err);
+		return err;
+	}
+
+	priv->soc_info->usb_phy_init(phy);
+
+	/* Wait for PHY to reset */
+	usleep_range(30, 300);
+	reg = readl(priv->base + REG_USBPCR_OFFSET);
+	writel(reg & ~USBPCR_POR, priv->base + REG_USBPCR_OFFSET);
+	usleep_range(300, 1000);
+
+	return 0;
+}
+
+static int ingenic_usb_phy_exit(struct phy *phy)
+{
+	struct ingenic_usb_phy *priv = phy_get_drvdata(phy);
+
+	clk_disable_unprepare(priv->clk);
+	regulator_disable(priv->vcc_supply);
+
+	return 0;
+}
+
+static int ingenic_usb_phy_power_on(struct phy *phy)
+{
+	struct ingenic_usb_phy *priv = phy_get_drvdata(phy);
+	int err;
+
+	err = regulator_enable(priv->vcc_supply);
+	if (err) {
+		dev_err(&phy->dev, "Unable to enable VCC: %d\n", err);
+		return err;
+	}
+
+	return 0;
+}
+
+static int ingenic_usb_phy_power_off(struct phy *phy)
+{
+	struct ingenic_usb_phy *priv = phy_get_drvdata(phy);
+
+	regulator_disable(priv->vcc_supply);
+
+	return 0;
+}
+
+static int ingenic_usb_phy_set_mode(struct phy *phy,
+				  enum phy_mode mode, int submode)
+{
+	struct ingenic_usb_phy *priv = phy_get_drvdata(phy);
+	u32 reg;
+
+	switch (mode) {
+	case PHY_MODE_USB_HOST:
+		reg = readl(priv->base + REG_USBPCR_OFFSET);
+		u32p_replace_bits(&reg, 1, USBPCR_USB_MODE);
+		u32p_replace_bits(&reg, 0, USBPCR_VBUSVLDEXT);
+		u32p_replace_bits(&reg, 0, USBPCR_VBUSVLDEXTSEL);
+		u32p_replace_bits(&reg, 0, USBPCR_OTG_DISABLE);
+		writel(reg, priv->base + REG_USBPCR_OFFSET);
+
+		break;
+	case PHY_MODE_USB_DEVICE:
+		reg = readl(priv->base + REG_USBPCR_OFFSET);
+		u32p_replace_bits(&reg, 0, USBPCR_USB_MODE);
+		u32p_replace_bits(&reg, 1, USBPCR_VBUSVLDEXT);
+		u32p_replace_bits(&reg, 1, USBPCR_VBUSVLDEXTSEL);
+		u32p_replace_bits(&reg, 1, USBPCR_OTG_DISABLE);
+		writel(reg, priv->base + REG_USBPCR_OFFSET);
+
+		break;
+	case PHY_MODE_USB_OTG:
+		reg = readl(priv->base + REG_USBPCR_OFFSET);
+		u32p_replace_bits(&reg, 1, USBPCR_USB_MODE);
+		u32p_replace_bits(&reg, 1, USBPCR_VBUSVLDEXT);
+		u32p_replace_bits(&reg, 1, USBPCR_VBUSVLDEXTSEL);
+		u32p_replace_bits(&reg, 0, USBPCR_OTG_DISABLE);
+		writel(reg, priv->base + REG_USBPCR_OFFSET);
+
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static const struct phy_ops ingenic_usb_phy_ops = {
+	.init		= ingenic_usb_phy_init,
+	.exit		= ingenic_usb_phy_exit,
+	.power_on	= ingenic_usb_phy_power_on,
+	.power_off	= ingenic_usb_phy_power_off,
+	.set_mode	= ingenic_usb_phy_set_mode,
+	.owner		= THIS_MODULE,
+};
+
+static void jz4770_usb_phy_init(struct phy *phy)
+{
+	struct ingenic_usb_phy *priv = phy_get_drvdata(phy);
+	u32 reg;
+
+	reg = USBPCR_AVLD_REG | USBPCR_COMMONONN | USBPCR_POR |
+		FIELD_PREP(USBPCR_IDPULLUP_MASK, USBPCR_IDPULLUP_ALWAYS) |
+		FIELD_PREP(USBPCR_COMPDISTUNE_MASK, USBPCR_COMPDISTUNE_DFT) |
+		FIELD_PREP(USBPCR_OTGTUNE_MASK, USBPCR_OTGTUNE_DFT) |
+		FIELD_PREP(USBPCR_SQRXTUNE_MASK, USBPCR_SQRXTUNE_DFT) |
+		FIELD_PREP(USBPCR_TXFSLSTUNE_MASK, USBPCR_TXFSLSTUNE_DFT) |
+		FIELD_PREP(USBPCR_TXRISETUNE_MASK, USBPCR_TXRISETUNE_DFT) |
+		FIELD_PREP(USBPCR_TXVREFTUNE_MASK, USBPCR_TXVREFTUNE_DFT);
+	writel(reg, priv->base + REG_USBPCR_OFFSET);
+}
+
+static void jz4775_usb_phy_init(struct phy *phy)
+{
+	struct ingenic_usb_phy *priv = phy_get_drvdata(phy);
+	u32 reg;
+
+	reg = readl(priv->base + REG_USBPCR1_OFFSET) | USBPCR1_USB_SEL |
+		USBPCR1_WORD_IF_16BIT;
+	writel(reg, priv->base + REG_USBPCR1_OFFSET);
+
+	reg = USBPCR_COMMONONN | USBPCR_POR |
+		FIELD_PREP(USBPCR_TXVREFTUNE_MASK, USBPCR_TXVREFTUNE_INC_75PPT);
+	writel(reg, priv->base + REG_USBPCR_OFFSET);
+}
+
+static void jz4780_usb_phy_init(struct phy *phy)
+{
+	struct ingenic_usb_phy *priv = phy_get_drvdata(phy);
+	u32 reg;
+
+	reg = readl(priv->base + REG_USBPCR1_OFFSET) | USBPCR1_USB_SEL |
+		USBPCR1_WORD_IF_16BIT;
+	writel(reg, priv->base + REG_USBPCR1_OFFSET);
+
+	reg = USBPCR_TXPREEMPHTUNE | USBPCR_COMMONONN | USBPCR_POR;
+	writel(reg, priv->base + REG_USBPCR_OFFSET);
+}
+
+static void x1000_usb_phy_init(struct phy *phy)
+{
+	struct ingenic_usb_phy *priv = phy_get_drvdata(phy);
+	u32 reg;
+
+	reg = readl(priv->base + REG_USBPCR1_OFFSET) | USBPCR1_WORD_IF_16BIT;
+	writel(reg, priv->base + REG_USBPCR1_OFFSET);
+
+	reg = USBPCR_TXPREEMPHTUNE | USBPCR_COMMONONN | USBPCR_POR |
+		FIELD_PREP(USBPCR_SQRXTUNE_MASK, USBPCR_SQRXTUNE_DCR_20PCT) |
+		FIELD_PREP(USBPCR_TXHSXVTUNE_MASK, USBPCR_TXHSXVTUNE_DCR_15MV) |
+		FIELD_PREP(USBPCR_TXVREFTUNE_MASK, USBPCR_TXVREFTUNE_INC_25PPT);
+	writel(reg, priv->base + REG_USBPCR_OFFSET);
+}
+
+static void x1830_usb_phy_init(struct phy *phy)
+{
+	struct ingenic_usb_phy *priv = phy_get_drvdata(phy);
+	u32 reg;
+
+	/* rdt */
+	writel(USBRDT_VBFIL_EN | USBRDT_UTMI_RST, priv->base + REG_USBRDT_OFFSET);
+
+	reg = readl(priv->base + REG_USBPCR1_OFFSET) | USBPCR1_WORD_IF_16BIT |
+		USBPCR1_DMPD | USBPCR1_DPPD;
+	writel(reg, priv->base + REG_USBPCR1_OFFSET);
+
+	reg = USBPCR_VBUSVLDEXT | USBPCR_TXPREEMPHTUNE | USBPCR_COMMONONN | USBPCR_POR |
+		FIELD_PREP(USBPCR_IDPULLUP_MASK, USBPCR_IDPULLUP_OTG);
+	writel(reg, priv->base + REG_USBPCR_OFFSET);
+}
+
+static void x2000_usb_phy_init(struct phy *phy)
+{
+	struct ingenic_usb_phy *priv = phy_get_drvdata(phy);
+	u32 reg;
+
+	reg = readl(priv->base + REG_USBPCR1_OFFSET) | USBPCR1_DPPD | USBPCR1_DMPD;
+	writel(reg & ~USBPCR1_PORT_RST, priv->base + REG_USBPCR1_OFFSET);
+
+	reg = USBPCR_POR | FIELD_PREP(USBPCR_IDPULLUP_MASK, USBPCR_IDPULLUP_OTG);
+	writel(reg, priv->base + REG_USBPCR_OFFSET);
+}
+
+static const struct ingenic_soc_info jz4770_soc_info = {
+	.version = ID_JZ4770,
+
+	.usb_phy_init = jz4770_usb_phy_init,
+};
+
+static const struct ingenic_soc_info jz4775_soc_info = {
+	.version = ID_JZ4775,
+
+	.usb_phy_init = jz4775_usb_phy_init,
+};
+
+static const struct ingenic_soc_info jz4780_soc_info = {
+	.version = ID_JZ4780,
+
+	.usb_phy_init = jz4780_usb_phy_init,
+};
+
+static const struct ingenic_soc_info x1000_soc_info = {
+	.version = ID_X1000,
+
+	.usb_phy_init = x1000_usb_phy_init,
+};
+
+static const struct ingenic_soc_info x1830_soc_info = {
+	.version = ID_X1830,
+
+	.usb_phy_init = x1830_usb_phy_init,
+};
+
+static const struct ingenic_soc_info x2000_soc_info = {
+	.version = ID_X2000,
+
+	.usb_phy_init = x2000_usb_phy_init,
+};
+
+static int ingenic_usb_phy_probe(struct platform_device *pdev)
+{
+	struct ingenic_usb_phy *priv;
+	struct phy_provider *provider;
+	struct device *dev = &pdev->dev;
+	int err;
+
+	priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+	if (!priv)
+		return -ENOMEM;
+
+	priv->soc_info = device_get_match_data(dev);
+	if (!priv->soc_info) {
+		dev_err(dev, "Error: No device match found\n");
+		return -ENODEV;
+	}
+
+	priv->base = devm_platform_ioremap_resource(pdev, 0);
+	if (IS_ERR(priv->base)) {
+		dev_err(dev, "Failed to map registers\n");
+		return PTR_ERR(priv->base);
+	}
+
+	priv->clk = devm_clk_get(dev, NULL);
+	if (IS_ERR(priv->clk)) {
+		err = PTR_ERR(priv->clk);
+		if (err != -EPROBE_DEFER)
+			dev_err(dev, "Failed to get clock\n");
+		return err;
+	}
+
+	priv->vcc_supply = devm_regulator_get(dev, "vcc");
+	if (IS_ERR(priv->vcc_supply)) {
+		err = PTR_ERR(priv->vcc_supply);
+		if (err != -EPROBE_DEFER)
+			dev_err(dev, "Failed to get regulator\n");
+		return err;
+	}
+
+	priv->phy = devm_phy_create(dev, NULL, &ingenic_usb_phy_ops);
+	if (IS_ERR(priv))
+		return PTR_ERR(priv);
+
+	phy_set_drvdata(priv->phy, priv);
+
+	provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
+
+	return PTR_ERR_OR_ZERO(provider);
+}
+
+static const struct of_device_id ingenic_usb_phy_of_matches[] = {
+	{ .compatible = "ingenic,jz4770-phy", .data = &jz4770_soc_info },
+	{ .compatible = "ingenic,jz4775-phy", .data = &jz4775_soc_info },
+	{ .compatible = "ingenic,jz4780-phy", .data = &jz4780_soc_info },
+	{ .compatible = "ingenic,x1000-phy", .data = &x1000_soc_info },
+	{ .compatible = "ingenic,x1830-phy", .data = &x1830_soc_info },
+	{ .compatible = "ingenic,x2000-phy", .data = &x2000_soc_info },
+	{ /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, ingenic_usb_phy_of_matches);
+
+static struct platform_driver ingenic_usb_phy_driver = {
+	.probe		= ingenic_usb_phy_probe,
+	.driver		= {
+		.name	= "ingenic-usb-phy",
+		.of_match_table = ingenic_usb_phy_of_matches,
+	},
+};
+module_platform_driver(ingenic_usb_phy_driver);
+
+MODULE_AUTHOR("周琰杰 (Zhou Yanjie) <zhouyanjie@wanyeetech.com>");
+MODULE_AUTHOR("漆鹏振 (Qi Pengzhen) <aric.pzqi@ingenic.com>");
+MODULE_AUTHOR("Paul Cercueil <paul@crapouillou.net>");
+MODULE_DESCRIPTION("Ingenic SoCs USB PHY driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/phy/intel/Kconfig b/drivers/phy/intel/Kconfig
index 62c24764654b2e5d3eb13365e1beb421dd9aa69c..ac42bb2fb394d66f27f9145d6a0aff1cbf833ad5 100644
--- a/drivers/phy/intel/Kconfig
+++ b/drivers/phy/intel/Kconfig
@@ -14,6 +14,18 @@ config PHY_INTEL_KEEMBAY_EMMC
 	  To compile this driver as a module, choose M here: the module
 	  will be called phy-keembay-emmc.ko.
 
+config PHY_INTEL_KEEMBAY_USB
+	tristate "Intel Keem Bay USB PHY driver"
+	depends on ARCH_KEEMBAY || COMPILE_TEST
+	depends on HAS_IOMEM
+	select GENERIC_PHY
+	select REGMAP_MMIO
+	help
+	  Choose this option if you have an Intel Keem Bay SoC.
+
+	  To compile this driver as a module, choose M here: the module
+	  will be called phy-keembay-usb.ko.
+
 config PHY_INTEL_LGM_COMBO
 	bool "Intel Lightning Mountain ComboPHY driver"
 	depends on X86 || COMPILE_TEST
diff --git a/drivers/phy/intel/Makefile b/drivers/phy/intel/Makefile
index a5e0af5ccd75f03014137b483ad7fa1749db2fcb..14550981a70740b642dfc400bb7294b40ba78393 100644
--- a/drivers/phy/intel/Makefile
+++ b/drivers/phy/intel/Makefile
@@ -1,4 +1,5 @@
 # SPDX-License-Identifier: GPL-2.0
 obj-$(CONFIG_PHY_INTEL_KEEMBAY_EMMC)	+= phy-intel-keembay-emmc.o
+obj-$(CONFIG_PHY_INTEL_KEEMBAY_USB)	+= phy-intel-keembay-usb.o
 obj-$(CONFIG_PHY_INTEL_LGM_COMBO)	+= phy-intel-lgm-combo.o
 obj-$(CONFIG_PHY_INTEL_LGM_EMMC)	+= phy-intel-lgm-emmc.o
diff --git a/drivers/phy/intel/phy-intel-keembay-usb.c b/drivers/phy/intel/phy-intel-keembay-usb.c
new file mode 100644
index 0000000000000000000000000000000000000000..c8b05f7b2445f391d328977123fc42f53d0e2daf
--- /dev/null
+++ b/drivers/phy/intel/phy-intel-keembay-usb.c
@@ -0,0 +1,301 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Intel Keem Bay USB PHY driver
+ * Copyright (C) 2020 Intel Corporation
+ */
+
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+/* USS (USB Subsystem) clock control registers */
+#define USS_CPR_CLK_EN		0x00
+#define USS_CPR_CLK_SET		0x04
+#define USS_CPR_CLK_CLR		0x08
+#define USS_CPR_RST_EN		0x10
+#define USS_CPR_RST_SET		0x14
+#define USS_CPR_RST_CLR		0x18
+
+/* USS clock/reset bit fields */
+#define USS_CPR_PHY_TST		BIT(6)
+#define USS_CPR_LOW_JIT		BIT(5)
+#define USS_CPR_CORE		BIT(4)
+#define USS_CPR_SUSPEND		BIT(3)
+#define USS_CPR_ALT_REF		BIT(2)
+#define USS_CPR_REF		BIT(1)
+#define USS_CPR_SYS		BIT(0)
+#define USS_CPR_MASK		GENMASK(6, 0)
+
+/* USS APB slave registers */
+#define USS_USB_CTRL_CFG0		0x10
+#define  VCC_RESET_N_MASK		BIT(31)
+
+#define USS_USB_PHY_CFG0		0x30
+#define  POR_MASK			BIT(15)
+#define  PHY_RESET_MASK			BIT(14)
+#define  PHY_REF_USE_PAD_MASK		BIT(5)
+
+#define USS_USB_PHY_CFG6		0x64
+#define  PHY0_SRAM_EXT_LD_DONE_MASK	BIT(23)
+
+#define USS_USB_PARALLEL_IF_CTRL	0xa0
+#define  USB_PHY_CR_PARA_SEL_MASK	BIT(2)
+
+#define USS_USB_TSET_SIGNALS_AND_GLOB	0xac
+#define  USB_PHY_CR_PARA_CLK_EN_MASK	BIT(7)
+
+#define USS_USB_STATUS_REG		0xb8
+#define  PHY0_SRAM_INIT_DONE_MASK	BIT(3)
+
+#define USS_USB_TIEOFFS_CONSTANTS_REG1	0xc0
+#define  IDDQ_ENABLE_MASK		BIT(10)
+
+struct keembay_usb_phy {
+	struct device *dev;
+	struct regmap *regmap_cpr;
+	struct regmap *regmap_slv;
+};
+
+static const struct regmap_config keembay_regmap_config = {
+	.reg_bits = 32,
+	.val_bits = 32,
+	.reg_stride = 4,
+	.max_register = USS_USB_TIEOFFS_CONSTANTS_REG1,
+};
+
+static int keembay_usb_clocks_on(struct keembay_usb_phy *priv)
+{
+	int ret;
+
+	ret = regmap_update_bits(priv->regmap_cpr, USS_CPR_CLK_SET,
+				 USS_CPR_MASK, USS_CPR_MASK);
+	if (ret) {
+		dev_err(priv->dev, "error clock set: %d\n", ret);
+		return ret;
+	}
+
+	ret = regmap_update_bits(priv->regmap_cpr, USS_CPR_RST_SET,
+				 USS_CPR_MASK, USS_CPR_MASK);
+	if (ret) {
+		dev_err(priv->dev, "error reset set: %d\n", ret);
+		return ret;
+	}
+
+	ret = regmap_update_bits(priv->regmap_slv,
+				 USS_USB_TIEOFFS_CONSTANTS_REG1,
+				 IDDQ_ENABLE_MASK,
+				 FIELD_PREP(IDDQ_ENABLE_MASK, 0));
+	if (ret) {
+		dev_err(priv->dev, "error iddq disable: %d\n", ret);
+		return ret;
+	}
+
+	/* Wait 30us to ensure all analog blocks are powered up. */
+	usleep_range(30, 60);
+
+	ret = regmap_update_bits(priv->regmap_slv, USS_USB_PHY_CFG0,
+				 PHY_REF_USE_PAD_MASK,
+				 FIELD_PREP(PHY_REF_USE_PAD_MASK, 1));
+	if (ret)
+		dev_err(priv->dev, "error ref clock select: %d\n", ret);
+
+	return ret;
+}
+
+static int keembay_usb_core_off(struct keembay_usb_phy *priv)
+{
+	int ret;
+
+	ret = regmap_update_bits(priv->regmap_slv, USS_USB_CTRL_CFG0,
+				 VCC_RESET_N_MASK,
+				 FIELD_PREP(VCC_RESET_N_MASK, 0));
+	if (ret)
+		dev_err(priv->dev, "error core reset: %d\n", ret);
+
+	return ret;
+}
+
+static int keembay_usb_core_on(struct keembay_usb_phy *priv)
+{
+	int ret;
+
+	ret = regmap_update_bits(priv->regmap_slv, USS_USB_CTRL_CFG0,
+				 VCC_RESET_N_MASK,
+				 FIELD_PREP(VCC_RESET_N_MASK, 1));
+	if (ret)
+		dev_err(priv->dev, "error core on: %d\n", ret);
+
+	return ret;
+}
+
+static int keembay_usb_phys_on(struct keembay_usb_phy *priv)
+{
+	int ret;
+
+	ret = regmap_update_bits(priv->regmap_slv, USS_USB_PHY_CFG0,
+				 POR_MASK | PHY_RESET_MASK,
+				 FIELD_PREP(POR_MASK | PHY_RESET_MASK, 0));
+	if (ret)
+		dev_err(priv->dev, "error phys on: %d\n", ret);
+
+	return ret;
+}
+
+static int keembay_usb_phy_init(struct phy *phy)
+{
+	struct keembay_usb_phy *priv = phy_get_drvdata(phy);
+	u32 val;
+	int ret;
+
+	ret = keembay_usb_core_off(priv);
+	if (ret)
+		return ret;
+
+	/*
+	 * According to Keem Bay datasheet, wait minimum 20us after clock
+	 * enable before bringing PHYs out of reset.
+	 */
+	usleep_range(20, 40);
+
+	ret = keembay_usb_phys_on(priv);
+	if (ret)
+		return ret;
+
+	ret = regmap_update_bits(priv->regmap_slv,
+				 USS_USB_TSET_SIGNALS_AND_GLOB,
+				 USB_PHY_CR_PARA_CLK_EN_MASK,
+				 FIELD_PREP(USB_PHY_CR_PARA_CLK_EN_MASK, 0));
+	if (ret) {
+		dev_err(priv->dev, "error cr clock disable: %d\n", ret);
+		return ret;
+	}
+
+	/*
+	 * According to Keem Bay datasheet, wait 2us after disabling the
+	 * clock into the USB 3.x parallel interface.
+	 */
+	udelay(2);
+
+	ret = regmap_update_bits(priv->regmap_slv,
+				 USS_USB_PARALLEL_IF_CTRL,
+				 USB_PHY_CR_PARA_SEL_MASK,
+				 FIELD_PREP(USB_PHY_CR_PARA_SEL_MASK, 1));
+	if (ret) {
+		dev_err(priv->dev, "error cr select: %d\n", ret);
+		return ret;
+	}
+
+	ret = regmap_update_bits(priv->regmap_slv,
+				 USS_USB_TSET_SIGNALS_AND_GLOB,
+				 USB_PHY_CR_PARA_CLK_EN_MASK,
+				 FIELD_PREP(USB_PHY_CR_PARA_CLK_EN_MASK, 1));
+	if (ret) {
+		dev_err(priv->dev, "error cr clock enable: %d\n", ret);
+		return ret;
+	}
+
+	ret = regmap_read_poll_timeout(priv->regmap_slv, USS_USB_STATUS_REG,
+				       val, val & PHY0_SRAM_INIT_DONE_MASK,
+				       USEC_PER_MSEC, 10 * USEC_PER_MSEC);
+	if (ret) {
+		dev_err(priv->dev, "SRAM init not done: %d\n", ret);
+		return ret;
+	}
+
+	ret = regmap_update_bits(priv->regmap_slv, USS_USB_PHY_CFG6,
+				 PHY0_SRAM_EXT_LD_DONE_MASK,
+				 FIELD_PREP(PHY0_SRAM_EXT_LD_DONE_MASK, 1));
+	if (ret) {
+		dev_err(priv->dev, "error SRAM init done set: %d\n", ret);
+		return ret;
+	}
+
+	/*
+	 * According to Keem Bay datasheet, wait 20us after setting the
+	 * SRAM load done bit, before releasing the controller reset.
+	 */
+	usleep_range(20, 40);
+
+	return keembay_usb_core_on(priv);
+}
+
+static const struct phy_ops ops = {
+	.init		= keembay_usb_phy_init,
+	.owner		= THIS_MODULE,
+};
+
+static int keembay_usb_phy_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct keembay_usb_phy *priv;
+	struct phy *generic_phy;
+	struct phy_provider *phy_provider;
+	void __iomem *base;
+	int ret;
+
+	priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+	if (!priv)
+		return -ENOMEM;
+
+	base = devm_platform_ioremap_resource_byname(pdev, "cpr-apb-base");
+	if (IS_ERR(base))
+		return PTR_ERR(base);
+
+	priv->regmap_cpr = devm_regmap_init_mmio(dev, base,
+						 &keembay_regmap_config);
+	if (IS_ERR(priv->regmap_cpr))
+		return PTR_ERR(priv->regmap_cpr);
+
+	base = devm_platform_ioremap_resource_byname(pdev, "slv-apb-base");
+	if (IS_ERR(base))
+		return PTR_ERR(base);
+
+	priv->regmap_slv = devm_regmap_init_mmio(dev, base,
+						 &keembay_regmap_config);
+	if (IS_ERR(priv->regmap_slv))
+		return PTR_ERR(priv->regmap_slv);
+
+	generic_phy = devm_phy_create(dev, dev->of_node, &ops);
+	if (IS_ERR(generic_phy))
+		return dev_err_probe(dev, PTR_ERR(generic_phy),
+				     "failed to create PHY\n");
+
+	phy_set_drvdata(generic_phy, priv);
+	phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
+	if (IS_ERR(phy_provider))
+		return dev_err_probe(dev, PTR_ERR(phy_provider),
+				     "failed to register phy provider\n");
+
+	/* Setup USB subsystem clocks */
+	ret = keembay_usb_clocks_on(priv);
+	if (ret)
+		return ret;
+
+	/* and turn on the DWC3 core, prior to DWC3 driver init. */
+	return keembay_usb_core_on(priv);
+}
+
+static const struct of_device_id keembay_usb_phy_dt_ids[] = {
+	{ .compatible = "intel,keembay-usb-phy" },
+	{}
+};
+MODULE_DEVICE_TABLE(of, keembay_usb_phy_dt_ids);
+
+static struct platform_driver keembay_usb_phy_driver = {
+	.probe		= keembay_usb_phy_probe,
+	.driver		= {
+		.name	= "keembay-usb-phy",
+		.of_match_table = keembay_usb_phy_dt_ids,
+	},
+};
+module_platform_driver(keembay_usb_phy_driver);
+
+MODULE_AUTHOR("Wan Ahmad Zainie <wan.ahmad.zainie.wan.mohamad@intel.com>");
+MODULE_DESCRIPTION("Intel Keem Bay USB PHY driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/phy/lantiq/phy-lantiq-vrx200-pcie.c b/drivers/phy/lantiq/phy-lantiq-vrx200-pcie.c
index 22c5698123cf0fa9b34d6416869d55b20380973d..ef93bf2cba1053be5196c83a8bb8f6def7460e1f 100644
--- a/drivers/phy/lantiq/phy-lantiq-vrx200-pcie.c
+++ b/drivers/phy/lantiq/phy-lantiq-vrx200-pcie.c
@@ -402,7 +402,6 @@ static int ltq_vrx200_pcie_phy_probe(struct platform_device *pdev)
 	struct ltq_vrx200_pcie_phy_priv *priv;
 	struct device *dev = &pdev->dev;
 	struct phy_provider *provider;
-	struct resource *res;
 	void __iomem *base;
 	int ret;
 
@@ -410,8 +409,7 @@ static int ltq_vrx200_pcie_phy_probe(struct platform_device *pdev)
 	if (!priv)
 		return -ENOMEM;
 
-	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	base = devm_ioremap_resource(dev, res);
+	base = devm_platform_ioremap_resource(pdev, 0);
 	if (IS_ERR(base))
 		return PTR_ERR(base);
 
diff --git a/drivers/phy/marvell/Kconfig b/drivers/phy/marvell/Kconfig
index 8f6273c837ec32885c25b77e18a079eef5b0951a..6c96f2bf52665789ff67fb391b8dfc877e3df544 100644
--- a/drivers/phy/marvell/Kconfig
+++ b/drivers/phy/marvell/Kconfig
@@ -116,3 +116,15 @@ config PHY_MMP3_USB
 	  The PHY driver will be used by Marvell udc/ehci/otg driver.
 
 	  To compile this driver as a module, choose M here.
+
+config PHY_MMP3_HSIC
+	tristate "Marvell MMP3 USB HSIC PHY Driver"
+	depends on MACH_MMP3_DT || COMPILE_TEST
+	select GENERIC_PHY
+	help
+	  Enable this to support Marvell MMP3 USB HSIC PHY driver for
+	  Marvell MMP3 SoC. This driver will be used my the Marvell EHCI
+	  driver to initialize the interface to internal USB HSIC
+	  components on MMP3-based boards.
+
+	  To compile this driver as a module, choose M here.
diff --git a/drivers/phy/marvell/Makefile b/drivers/phy/marvell/Makefile
index 5a106b1549f4108cdca66865837040eb5766b1ab..7f296ef028292d610e71d2918ea6bf62e8a248b7 100644
--- a/drivers/phy/marvell/Makefile
+++ b/drivers/phy/marvell/Makefile
@@ -3,6 +3,7 @@ obj-$(CONFIG_ARMADA375_USBCLUSTER_PHY)	+= phy-armada375-usb2.o
 obj-$(CONFIG_PHY_BERLIN_SATA)		+= phy-berlin-sata.o
 obj-$(CONFIG_PHY_BERLIN_USB)		+= phy-berlin-usb.o
 obj-$(CONFIG_PHY_MMP3_USB)		+= phy-mmp3-usb.o
+obj-$(CONFIG_PHY_MMP3_HSIC)		+= phy-mmp3-hsic.o
 obj-$(CONFIG_PHY_MVEBU_A3700_COMPHY)	+= phy-mvebu-a3700-comphy.o
 obj-$(CONFIG_PHY_MVEBU_A3700_UTMI)	+= phy-mvebu-a3700-utmi.o
 obj-$(CONFIG_PHY_MVEBU_A38X_COMPHY)	+= phy-armada38x-comphy.o
diff --git a/drivers/phy/marvell/phy-armada375-usb2.c b/drivers/phy/marvell/phy-armada375-usb2.c
index fa5dc9462d09444c7d87a7468668ea2f7acd8b1f..b141e3cd8a941998ef3140def0edfba331671d0b 100644
--- a/drivers/phy/marvell/phy-armada375-usb2.c
+++ b/drivers/phy/marvell/phy-armada375-usb2.c
@@ -105,15 +105,13 @@ static int armada375_usb_phy_probe(struct platform_device *pdev)
 	struct phy *phy;
 	struct phy_provider *phy_provider;
 	void __iomem *usb_cluster_base;
-	struct resource *res;
 	struct armada375_cluster_phy *cluster_phy;
 
 	cluster_phy = devm_kzalloc(dev, sizeof(*cluster_phy), GFP_KERNEL);
 	if (!cluster_phy)
 		return  -ENOMEM;
 
-	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	usb_cluster_base = devm_ioremap_resource(&pdev->dev, res);
+	usb_cluster_base = devm_platform_ioremap_resource(pdev, 0);
 	if (IS_ERR(usb_cluster_base))
 		return PTR_ERR(usb_cluster_base);
 
diff --git a/drivers/phy/marvell/phy-berlin-usb.c b/drivers/phy/marvell/phy-berlin-usb.c
index a43df63007c5ccdf3186c59b609df4e43a4adda2..78ef6ae72a9a7482bde14e69b72ddd8b3b7a8e23 100644
--- a/drivers/phy/marvell/phy-berlin-usb.c
+++ b/drivers/phy/marvell/phy-berlin-usb.c
@@ -165,7 +165,6 @@ static int phy_berlin_usb_probe(struct platform_device *pdev)
 	const struct of_device_id *match =
 		of_match_device(phy_berlin_usb_of_match, &pdev->dev);
 	struct phy_berlin_usb_priv *priv;
-	struct resource *res;
 	struct phy *phy;
 	struct phy_provider *phy_provider;
 
@@ -173,8 +172,7 @@ static int phy_berlin_usb_probe(struct platform_device *pdev)
 	if (!priv)
 		return -ENOMEM;
 
-	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	priv->base = devm_ioremap_resource(&pdev->dev, res);
+	priv->base = devm_platform_ioremap_resource(pdev, 0);
 	if (IS_ERR(priv->base))
 		return PTR_ERR(priv->base);
 
diff --git a/drivers/phy/marvell/phy-mmp3-hsic.c b/drivers/phy/marvell/phy-mmp3-hsic.c
new file mode 100644
index 0000000000000000000000000000000000000000..47c1e8894939fd53199037fcd723c1c053a2fb48
--- /dev/null
+++ b/drivers/phy/marvell/phy-mmp3-hsic.c
@@ -0,0 +1,82 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2020 Lubomir Rintel <lkundrak@v3.sk>
+ */
+
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+
+#define HSIC_CTRL	0x08
+#define HSIC_ENABLE	BIT(7)
+#define PLL_BYPASS	BIT(4)
+
+static int mmp3_hsic_phy_init(struct phy *phy)
+{
+	void __iomem *base = (void __iomem *)phy_get_drvdata(phy);
+	u32 hsic_ctrl;
+
+	hsic_ctrl = readl_relaxed(base + HSIC_CTRL);
+	hsic_ctrl |= HSIC_ENABLE;
+	hsic_ctrl |= PLL_BYPASS;
+	writel_relaxed(hsic_ctrl, base + HSIC_CTRL);
+
+	return 0;
+}
+
+static const struct phy_ops mmp3_hsic_phy_ops = {
+	.init		= mmp3_hsic_phy_init,
+	.owner		= THIS_MODULE,
+};
+
+static const struct of_device_id mmp3_hsic_phy_of_match[] = {
+	{ .compatible = "marvell,mmp3-hsic-phy", },
+	{ },
+};
+MODULE_DEVICE_TABLE(of, mmp3_hsic_phy_of_match);
+
+static int mmp3_hsic_phy_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct phy_provider *provider;
+	struct resource *resource;
+	void __iomem *base;
+	struct phy *phy;
+
+	resource = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	base = devm_ioremap_resource(dev, resource);
+	if (IS_ERR(base)) {
+		dev_err(dev, "failed to remap PHY regs\n");
+		return PTR_ERR(base);
+	}
+
+	phy = devm_phy_create(dev, NULL, &mmp3_hsic_phy_ops);
+	if (IS_ERR(phy)) {
+		dev_err(dev, "failed to create PHY\n");
+		return PTR_ERR(phy);
+	}
+
+	phy_set_drvdata(phy, (void *)base);
+	provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
+	if (IS_ERR(provider)) {
+		dev_err(dev, "failed to register PHY provider\n");
+		return PTR_ERR(provider);
+	}
+
+	return 0;
+}
+
+static struct platform_driver mmp3_hsic_phy_driver = {
+	.probe		= mmp3_hsic_phy_probe,
+	.driver		= {
+		.name	= "mmp3-hsic-phy",
+		.of_match_table = mmp3_hsic_phy_of_match,
+	},
+};
+module_platform_driver(mmp3_hsic_phy_driver);
+
+MODULE_AUTHOR("Lubomir Rintel <lkundrak@v3.sk>");
+MODULE_DESCRIPTION("Marvell MMP3 USB HSIC PHY Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/phy/marvell/phy-mmp3-usb.c b/drivers/phy/marvell/phy-mmp3-usb.c
index 499869595a5825dbab57e06fa265c03d8a2d9214..04c0bada35194630d2f707dcc41d4ae226ebd4cd 100644
--- a/drivers/phy/marvell/phy-mmp3-usb.c
+++ b/drivers/phy/marvell/phy-mmp3-usb.c
@@ -246,7 +246,6 @@ MODULE_DEVICE_TABLE(of, mmp3_usb_phy_of_match);
 static int mmp3_usb_phy_probe(struct platform_device *pdev)
 {
 	struct device *dev = &pdev->dev;
-	struct resource *resource;
 	struct mmp3_usb_phy *mmp3_usb_phy;
 	struct phy_provider *provider;
 
@@ -254,8 +253,7 @@ static int mmp3_usb_phy_probe(struct platform_device *pdev)
 	if (!mmp3_usb_phy)
 		return -ENOMEM;
 
-	resource = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	mmp3_usb_phy->base = devm_ioremap_resource(dev, resource);
+	mmp3_usb_phy->base = devm_platform_ioremap_resource(pdev, 0);
 	if (IS_ERR(mmp3_usb_phy->base)) {
 		dev_err(dev, "failed to remap PHY regs\n");
 		return PTR_ERR(mmp3_usb_phy->base);
diff --git a/drivers/phy/marvell/phy-mvebu-sata.c b/drivers/phy/marvell/phy-mvebu-sata.c
index 3c01b5dceaae69895c237dabfef963df9c53b66e..51a4646e293357b23d3a2f8071d9627e769a889c 100644
--- a/drivers/phy/marvell/phy-mvebu-sata.c
+++ b/drivers/phy/marvell/phy-mvebu-sata.c
@@ -80,7 +80,6 @@ static const struct phy_ops phy_mvebu_sata_ops = {
 static int phy_mvebu_sata_probe(struct platform_device *pdev)
 {
 	struct phy_provider *phy_provider;
-	struct resource *res;
 	struct priv *priv;
 	struct phy *phy;
 
@@ -88,8 +87,7 @@ static int phy_mvebu_sata_probe(struct platform_device *pdev)
 	if (!priv)
 		return -ENOMEM;
 
-	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	priv->base = devm_ioremap_resource(&pdev->dev, res);
+	priv->base = devm_platform_ioremap_resource(pdev, 0);
 	if (IS_ERR(priv->base))
 		return PTR_ERR(priv->base);
 
diff --git a/drivers/phy/marvell/phy-pxa-28nm-hsic.c b/drivers/phy/marvell/phy-pxa-28nm-hsic.c
index 31b43d2ee39a69fb5af263b86c1e5ebc96bf3f4b..c5c100563f556559ab67db34d969bd07371e9215 100644
--- a/drivers/phy/marvell/phy-pxa-28nm-hsic.c
+++ b/drivers/phy/marvell/phy-pxa-28nm-hsic.c
@@ -162,7 +162,6 @@ static int mv_hsic_phy_probe(struct platform_device *pdev)
 {
 	struct phy_provider *phy_provider;
 	struct mv_hsic_phy *mv_phy;
-	struct resource *r;
 
 	mv_phy = devm_kzalloc(&pdev->dev, sizeof(*mv_phy), GFP_KERNEL);
 	if (!mv_phy)
@@ -176,8 +175,7 @@ static int mv_hsic_phy_probe(struct platform_device *pdev)
 		return PTR_ERR(mv_phy->clk);
 	}
 
-	r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	mv_phy->base = devm_ioremap_resource(&pdev->dev, r);
+	mv_phy->base = devm_platform_ioremap_resource(pdev, 0);
 	if (IS_ERR(mv_phy->base))
 		return PTR_ERR(mv_phy->base);
 
diff --git a/drivers/phy/marvell/phy-pxa-28nm-usb2.c b/drivers/phy/marvell/phy-pxa-28nm-usb2.c
index a175ae915f02f402201d50bb8106dc286529d03a..0b390b9d2ae12f067441dc6f1750c57d32ff72ea 100644
--- a/drivers/phy/marvell/phy-pxa-28nm-usb2.c
+++ b/drivers/phy/marvell/phy-pxa-28nm-usb2.c
@@ -294,7 +294,6 @@ static int mv_usb2_phy_probe(struct platform_device *pdev)
 {
 	struct phy_provider *phy_provider;
 	struct mv_usb2_phy *mv_phy;
-	struct resource *r;
 
 	mv_phy = devm_kzalloc(&pdev->dev, sizeof(*mv_phy), GFP_KERNEL);
 	if (!mv_phy)
@@ -308,8 +307,7 @@ static int mv_usb2_phy_probe(struct platform_device *pdev)
 		return PTR_ERR(mv_phy->clk);
 	}
 
-	r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	mv_phy->base = devm_ioremap_resource(&pdev->dev, r);
+	mv_phy->base = devm_platform_ioremap_resource(pdev, 0);
 	if (IS_ERR(mv_phy->base))
 		return PTR_ERR(mv_phy->base);
 
diff --git a/drivers/phy/marvell/phy-pxa-usb.c b/drivers/phy/marvell/phy-pxa-usb.c
index 87ff7550b9123c0213d4a551045508e7b50659b5..ffe889893ff4a0e5ed9c5290501c74c8690ce935 100644
--- a/drivers/phy/marvell/phy-pxa-usb.c
+++ b/drivers/phy/marvell/phy-pxa-usb.c
@@ -286,7 +286,6 @@ MODULE_DEVICE_TABLE(of, pxa_usb_phy_of_match);
 static int pxa_usb_phy_probe(struct platform_device *pdev)
 {
 	struct device *dev = &pdev->dev;
-	struct resource *resource;
 	struct pxa_usb_phy *pxa_usb_phy;
 	struct phy_provider *provider;
 	const struct of_device_id *of_id;
@@ -301,8 +300,7 @@ static int pxa_usb_phy_probe(struct platform_device *pdev)
 	else
 		pxa_usb_phy->version = PXA_USB_PHY_MMP2;
 
-	resource = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	pxa_usb_phy->base = devm_ioremap_resource(dev, resource);
+	pxa_usb_phy->base = devm_platform_ioremap_resource(pdev, 0);
 	if (IS_ERR(pxa_usb_phy->base)) {
 		dev_err(dev, "failed to remap PHY regs\n");
 		return PTR_ERR(pxa_usb_phy->base);
diff --git a/drivers/phy/mediatek/Kconfig b/drivers/phy/mediatek/Kconfig
index 01f2118633de48cb5a572d9fb3aca50d9f0217a1..d38def43b1bf6214303deef36d6066b848005f5d 100644
--- a/drivers/phy/mediatek/Kconfig
+++ b/drivers/phy/mediatek/Kconfig
@@ -5,7 +5,8 @@
 config PHY_MTK_TPHY
 	tristate "MediaTek T-PHY Driver"
 	depends on ARCH_MEDIATEK || COMPILE_TEST
-	depends on OF
+	depends on OF && OF_ADDRESS
+	depends on HAS_IOMEM
 	select GENERIC_PHY
 	help
 	  Say 'Y' here to add support for MediaTek T-PHY driver,
@@ -29,7 +30,8 @@ config PHY_MTK_UFS
 config PHY_MTK_XSPHY
 	tristate "MediaTek XS-PHY Driver"
 	depends on ARCH_MEDIATEK || COMPILE_TEST
-	depends on OF
+	depends on OF && OF_ADDRESS
+	depends on HAS_IOMEM
 	select GENERIC_PHY
 	help
 	  Enable this to support the SuperSpeedPlus XS-PHY transceiver for
@@ -38,7 +40,9 @@ config PHY_MTK_XSPHY
 
 config PHY_MTK_HDMI
 	tristate "MediaTek HDMI-PHY Driver"
-	depends on ARCH_MEDIATEK && OF
+	depends on ARCH_MEDIATEK || COMPILE_TEST
+	depends on COMMON_CLK
+	depends on OF
 	select GENERIC_PHY
 	help
 	  Support HDMI PHY for Mediatek SoCs.
diff --git a/drivers/phy/mediatek/phy-mtk-hdmi.c b/drivers/phy/mediatek/phy-mtk-hdmi.c
index 47c029d4b270bbc7b558517374aaffcf5df69feb..45be8aa724f3a69d01b5be01e615ca43d9d9eb42 100644
--- a/drivers/phy/mediatek/phy-mtk-hdmi.c
+++ b/drivers/phy/mediatek/phy-mtk-hdmi.c
@@ -84,8 +84,9 @@ mtk_hdmi_phy_dev_get_ops(const struct mtk_hdmi_phy *hdmi_phy)
 	    hdmi_phy->conf->hdmi_phy_disable_tmds)
 		return &mtk_hdmi_phy_dev_ops;
 
-	dev_err(hdmi_phy->dev, "Failed to get dev ops of phy\n");
-		return NULL;
+	if (hdmi_phy)
+		dev_err(hdmi_phy->dev, "Failed to get dev ops of phy\n");
+	return NULL;
 }
 
 static void mtk_hdmi_phy_clk_get_data(struct mtk_hdmi_phy *hdmi_phy,
@@ -201,7 +202,7 @@ static const struct of_device_id mtk_hdmi_phy_match[] = {
 	{},
 };
 
-struct platform_driver mtk_hdmi_phy_driver = {
+static struct platform_driver mtk_hdmi_phy_driver = {
 	.probe = mtk_hdmi_phy_probe,
 	.driver = {
 		.name = "mediatek-hdmi-phy",
diff --git a/drivers/phy/mediatek/phy-mtk-ufs.c b/drivers/phy/mediatek/phy-mtk-ufs.c
index cf94f5c35dc5ee75c42bf6d1db7bdbca90ec4735..769b00b038d83fb1b7f7cc5d020945f28b8b419d 100644
--- a/drivers/phy/mediatek/phy-mtk-ufs.c
+++ b/drivers/phy/mediatek/phy-mtk-ufs.c
@@ -195,7 +195,6 @@ static int ufs_mtk_phy_probe(struct platform_device *pdev)
 	struct device *dev = &pdev->dev;
 	struct phy *generic_phy;
 	struct phy_provider *phy_provider;
-	struct resource *res;
 	struct ufs_mtk_phy *phy;
 	int ret;
 
@@ -203,8 +202,7 @@ static int ufs_mtk_phy_probe(struct platform_device *pdev)
 	if (!phy)
 		return -ENOMEM;
 
-	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	phy->mmio = devm_ioremap_resource(dev, res);
+	phy->mmio = devm_platform_ioremap_resource(pdev, 0);
 	if (IS_ERR(phy->mmio))
 		return PTR_ERR(phy->mmio);
 
diff --git a/drivers/phy/phy-xgene.c b/drivers/phy/phy-xgene.c
index b88922e7de1dced4f9f21d81d016afccbe7e4a7c..f4cd590fbde7fa7d11abb5724a9a97f1adba8fb8 100644
--- a/drivers/phy/phy-xgene.c
+++ b/drivers/phy/phy-xgene.c
@@ -1644,7 +1644,6 @@ static int xgene_phy_probe(struct platform_device *pdev)
 {
 	struct phy_provider *phy_provider;
 	struct xgene_phy_ctx *ctx;
-	struct resource *res;
 	u32 default_spd[] = DEFAULT_SATA_SPD_SEL;
 	u32 default_txboost_gain[] = DEFAULT_SATA_TXBOOST_GAIN;
 	u32 default_txeye_direction[] = DEFAULT_SATA_TXEYEDIRECTION;
@@ -1661,8 +1660,7 @@ static int xgene_phy_probe(struct platform_device *pdev)
 
 	ctx->dev = &pdev->dev;
 
-	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	ctx->sds_base = devm_ioremap_resource(&pdev->dev, res);
+	ctx->sds_base = devm_platform_ioremap_resource(pdev, 0);
 	if (IS_ERR(ctx->sds_base))
 		return PTR_ERR(ctx->sds_base);
 
diff --git a/drivers/phy/qualcomm/phy-qcom-apq8064-sata.c b/drivers/phy/qualcomm/phy-qcom-apq8064-sata.c
index ce91ae7f8dbd2e392e803ddea1bf079b581d7d19..d437a249cd7310d166f495b662aaeb23c6b13d86 100644
--- a/drivers/phy/qualcomm/phy-qcom-apq8064-sata.c
+++ b/drivers/phy/qualcomm/phy-qcom-apq8064-sata.c
@@ -201,7 +201,6 @@ static int qcom_apq8064_sata_phy_probe(struct platform_device *pdev)
 {
 	struct qcom_apq8064_sata_phy *phy;
 	struct device *dev = &pdev->dev;
-	struct resource *res;
 	struct phy_provider *phy_provider;
 	struct phy *generic_phy;
 	int ret;
@@ -210,8 +209,7 @@ static int qcom_apq8064_sata_phy_probe(struct platform_device *pdev)
 	if (!phy)
 		return -ENOMEM;
 
-	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	phy->mmio = devm_ioremap_resource(dev, res);
+	phy->mmio = devm_platform_ioremap_resource(pdev, 0);
 	if (IS_ERR(phy->mmio))
 		return PTR_ERR(phy->mmio);
 
diff --git a/drivers/phy/qualcomm/phy-qcom-ipq4019-usb.c b/drivers/phy/qualcomm/phy-qcom-ipq4019-usb.c
index fc7f9df80a7bd00f23ad665c4bb6005920952589..d3e7d5e1d1b6a3786b90b01e959a6a3be299f19a 100644
--- a/drivers/phy/qualcomm/phy-qcom-ipq4019-usb.c
+++ b/drivers/phy/qualcomm/phy-qcom-ipq4019-usb.c
@@ -95,7 +95,6 @@ MODULE_DEVICE_TABLE(of, ipq4019_usb_phy_of_match);
 static int ipq4019_usb_phy_probe(struct platform_device *pdev)
 {
 	struct device *dev = &pdev->dev;
-	struct resource *res;
 	struct phy_provider *phy_provider;
 	struct ipq4019_usb_phy *phy;
 
@@ -104,8 +103,7 @@ static int ipq4019_usb_phy_probe(struct platform_device *pdev)
 		return -ENOMEM;
 
 	phy->dev = &pdev->dev;
-	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	phy->base = devm_ioremap_resource(&pdev->dev, res);
+	phy->base = devm_platform_ioremap_resource(pdev, 0);
 	if (IS_ERR(phy->base)) {
 		dev_err(dev, "failed to remap register memory\n");
 		return PTR_ERR(phy->base);
diff --git a/drivers/phy/qualcomm/phy-qcom-ipq806x-sata.c b/drivers/phy/qualcomm/phy-qcom-ipq806x-sata.c
index 41a69f56b3468941e6d3f89ad154c52032dfcecf..0fc2a1ed39b3ff95d3e5ed8acb45c86dd64780cb 100644
--- a/drivers/phy/qualcomm/phy-qcom-ipq806x-sata.c
+++ b/drivers/phy/qualcomm/phy-qcom-ipq806x-sata.c
@@ -128,7 +128,6 @@ static int qcom_ipq806x_sata_phy_probe(struct platform_device *pdev)
 {
 	struct qcom_ipq806x_sata_phy *phy;
 	struct device *dev = &pdev->dev;
-	struct resource *res;
 	struct phy_provider *phy_provider;
 	struct phy *generic_phy;
 	int ret;
@@ -137,8 +136,7 @@ static int qcom_ipq806x_sata_phy_probe(struct platform_device *pdev)
 	if (!phy)
 		return -ENOMEM;
 
-	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	phy->mmio = devm_ioremap_resource(dev, res);
+	phy->mmio = devm_platform_ioremap_resource(pdev, 0);
 	if (IS_ERR(phy->mmio))
 		return PTR_ERR(phy->mmio);
 
diff --git a/drivers/phy/qualcomm/phy-qcom-pcie2.c b/drivers/phy/qualcomm/phy-qcom-pcie2.c
index 9dba3594e6d965508aa0a554a1f9c838edac8081..5407e59bb1854a07e42ce8b036c7740466660a1e 100644
--- a/drivers/phy/qualcomm/phy-qcom-pcie2.c
+++ b/drivers/phy/qualcomm/phy-qcom-pcie2.c
@@ -250,7 +250,6 @@ static int qcom_pcie2_phy_probe(struct platform_device *pdev)
 {
 	struct phy_provider *phy_provider;
 	struct qcom_phy *qphy;
-	struct resource *res;
 	struct device *dev = &pdev->dev;
 	struct phy *phy;
 	int ret;
@@ -260,9 +259,7 @@ static int qcom_pcie2_phy_probe(struct platform_device *pdev)
 		return -ENOMEM;
 
 	qphy->dev = dev;
-
-	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	qphy->base = devm_ioremap_resource(dev, res);
+	qphy->base = devm_platform_ioremap_resource(pdev, 0);
 	if (IS_ERR(qphy->base))
 		return PTR_ERR(qphy->base);
 
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp.c b/drivers/phy/qualcomm/phy-qcom-qmp.c
index 0cda16846962594a3a31407559d9893e4adace7a..0939a9e9d4484884105a5bf6939cc255d4eb5e3f 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp.c
+++ b/drivers/phy/qualcomm/phy-qcom-qmp.c
@@ -217,6 +217,13 @@ static const unsigned int sdm845_ufsphy_regs_layout[QPHY_LAYOUT_SIZE] = {
 	[QPHY_PCS_READY_STATUS]		= 0x160,
 };
 
+static const unsigned int sm8250_pcie_regs_layout[QPHY_LAYOUT_SIZE] = {
+	[QPHY_SW_RESET]			= 0x00,
+	[QPHY_START_CTRL]		= 0x44,
+	[QPHY_PCS_STATUS]		= 0x14,
+	[QPHY_PCS_POWER_DOWN_CONTROL]	= 0x40,
+};
+
 static const unsigned int sm8150_ufsphy_regs_layout[QPHY_LAYOUT_SIZE] = {
 	[QPHY_START_CTRL]		= QPHY_V4_PCS_UFS_PHY_START,
 	[QPHY_PCS_READY_STATUS]		= QPHY_V4_PCS_UFS_READY_STATUS,
@@ -1824,6 +1831,149 @@ static const struct qmp_phy_init_tbl sm8250_usb3_uniphy_pcs_tbl[] = {
 	QMP_PHY_INIT_CFG(QPHY_V4_PCS_REFGEN_REQ_CONFIG1, 0x21),
 };
 
+static const struct qmp_phy_init_tbl sm8250_qmp_pcie_serdes_tbl[] = {
+	QMP_PHY_INIT_CFG(QSERDES_V4_COM_SYSCLK_EN_SEL, 0x08),
+	QMP_PHY_INIT_CFG(QSERDES_V4_COM_CLK_SELECT, 0x34),
+	QMP_PHY_INIT_CFG(QSERDES_V4_COM_CORECLK_DIV_MODE1, 0x08),
+	QMP_PHY_INIT_CFG(QSERDES_V4_COM_PLL_IVCO, 0x0f),
+	QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP_EN, 0x42),
+	QMP_PHY_INIT_CFG(QSERDES_V4_COM_VCO_TUNE1_MODE0, 0x24),
+	QMP_PHY_INIT_CFG(QSERDES_V4_COM_VCO_TUNE2_MODE1, 0x03),
+	QMP_PHY_INIT_CFG(QSERDES_V4_COM_VCO_TUNE1_MODE1, 0xb4),
+	QMP_PHY_INIT_CFG(QSERDES_V4_COM_VCO_TUNE_MAP, 0x02),
+	QMP_PHY_INIT_CFG(QSERDES_V4_COM_BIN_VCOCAL_HSCLK_SEL, 0x11),
+	QMP_PHY_INIT_CFG(QSERDES_V4_COM_DEC_START_MODE0, 0x82),
+	QMP_PHY_INIT_CFG(QSERDES_V4_COM_DIV_FRAC_START3_MODE0, 0x03),
+	QMP_PHY_INIT_CFG(QSERDES_V4_COM_DIV_FRAC_START2_MODE0, 0x55),
+	QMP_PHY_INIT_CFG(QSERDES_V4_COM_DIV_FRAC_START1_MODE0, 0x55),
+	QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP2_MODE0, 0x1a),
+	QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP1_MODE0, 0x0a),
+	QMP_PHY_INIT_CFG(QSERDES_V4_COM_DEC_START_MODE1, 0x68),
+	QMP_PHY_INIT_CFG(QSERDES_V4_COM_DIV_FRAC_START3_MODE1, 0x02),
+	QMP_PHY_INIT_CFG(QSERDES_V4_COM_DIV_FRAC_START2_MODE1, 0xaa),
+	QMP_PHY_INIT_CFG(QSERDES_V4_COM_DIV_FRAC_START1_MODE1, 0xab),
+	QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP2_MODE1, 0x34),
+	QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP1_MODE1, 0x14),
+	QMP_PHY_INIT_CFG(QSERDES_V4_COM_HSCLK_SEL, 0x01),
+	QMP_PHY_INIT_CFG(QSERDES_V4_COM_CP_CTRL_MODE0, 0x06),
+	QMP_PHY_INIT_CFG(QSERDES_V4_COM_PLL_RCTRL_MODE0, 0x16),
+	QMP_PHY_INIT_CFG(QSERDES_V4_COM_PLL_CCTRL_MODE0, 0x36),
+	QMP_PHY_INIT_CFG(QSERDES_V4_COM_CP_CTRL_MODE1, 0x06),
+	QMP_PHY_INIT_CFG(QSERDES_V4_COM_PLL_RCTRL_MODE1, 0x16),
+	QMP_PHY_INIT_CFG(QSERDES_V4_COM_PLL_CCTRL_MODE1, 0x36),
+	QMP_PHY_INIT_CFG(QSERDES_V4_COM_BIN_VCOCAL_CMP_CODE2_MODE0, 0x1e),
+	QMP_PHY_INIT_CFG(QSERDES_V4_COM_BIN_VCOCAL_CMP_CODE1_MODE0, 0xca),
+	QMP_PHY_INIT_CFG(QSERDES_V4_COM_BIN_VCOCAL_CMP_CODE2_MODE1, 0x18),
+	QMP_PHY_INIT_CFG(QSERDES_V4_COM_BIN_VCOCAL_CMP_CODE1_MODE1, 0xa2),
+	QMP_PHY_INIT_CFG(QSERDES_V4_COM_SSC_EN_CENTER, 0x01),
+	QMP_PHY_INIT_CFG(QSERDES_V4_COM_SSC_PER1, 0x31),
+	QMP_PHY_INIT_CFG(QSERDES_V4_COM_SSC_PER2, 0x01),
+	QMP_PHY_INIT_CFG(QSERDES_V4_COM_SSC_STEP_SIZE1_MODE0, 0xde),
+	QMP_PHY_INIT_CFG(QSERDES_V4_COM_SSC_STEP_SIZE2_MODE0, 0x07),
+	QMP_PHY_INIT_CFG(QSERDES_V4_COM_SSC_STEP_SIZE1_MODE1, 0x4c),
+	QMP_PHY_INIT_CFG(QSERDES_V4_COM_SSC_STEP_SIZE2_MODE1, 0x06),
+	QMP_PHY_INIT_CFG(QSERDES_V4_COM_CLK_ENABLE1, 0x90),
+};
+
+static const struct qmp_phy_init_tbl sm8250_qmp_gen3x1_pcie_serdes_tbl[] = {
+	QMP_PHY_INIT_CFG(QSERDES_V4_COM_SYSCLK_BUF_ENABLE, 0x07),
+};
+
+static const struct qmp_phy_init_tbl sm8250_qmp_pcie_tx_tbl[] = {
+	QMP_PHY_INIT_CFG(QSERDES_V4_TX_RCV_DETECT_LVL_2, 0x12),
+	QMP_PHY_INIT_CFG(QSERDES_V4_TX_LANE_MODE_1, 0x35),
+	QMP_PHY_INIT_CFG(QSERDES_V4_TX_RES_CODE_LANE_OFFSET_TX, 0x11),
+};
+
+static const struct qmp_phy_init_tbl sm8250_qmp_pcie_rx_tbl[] = {
+	QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_FO_GAIN, 0x0c),
+	QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SO_GAIN, 0x03),
+	QMP_PHY_INIT_CFG(QSERDES_V4_RX_GM_CAL, 0x1b),
+	QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_IDAC_TSETTLE_HIGH, 0x00),
+	QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_IDAC_TSETTLE_LOW, 0xc0),
+	QMP_PHY_INIT_CFG(QSERDES_V4_RX_AUX_DATA_TCOARSE_TFINE, 0x30),
+	QMP_PHY_INIT_CFG(QSERDES_V4_RX_VGA_CAL_CNTRL1, 0x04),
+	QMP_PHY_INIT_CFG(QSERDES_V4_RX_VGA_CAL_CNTRL2, 0x07),
+	QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SO_SATURATION_AND_ENABLE, 0x7f),
+	QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_PI_CONTROLS, 0x70),
+	QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL2, 0x0e),
+	QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL3, 0x4a),
+	QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL4, 0x0f),
+	QMP_PHY_INIT_CFG(QSERDES_V4_RX_SIGDET_CNTRL, 0x03),
+	QMP_PHY_INIT_CFG(QSERDES_V4_RX_SIGDET_ENABLES, 0x1c),
+	QMP_PHY_INIT_CFG(QSERDES_V4_RX_SIGDET_DEGLITCH_CNTRL, 0x1e),
+	QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQ_OFFSET_ADAPTOR_CNTRL1, 0x17),
+	QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_10_LOW, 0xd4),
+	QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_10_HIGH, 0x54),
+	QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_10_HIGH2, 0xdb),
+	QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_10_HIGH3, 0x3b),
+	QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_10_HIGH4, 0x31),
+	QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_LOW, 0x24),
+	QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH2, 0xff),
+	QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH3, 0x7f),
+	QMP_PHY_INIT_CFG(QSERDES_V4_RX_DCC_CTRL1, 0x0c),
+	QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH, 0xe4),
+	QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH2, 0xec),
+	QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH3, 0x3b),
+	QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH4, 0x36),
+};
+
+static const struct qmp_phy_init_tbl sm8250_qmp_gen3x1_pcie_rx_tbl[] = {
+	QMP_PHY_INIT_CFG(QSERDES_V4_RX_RCLK_AUXDATA_SEL, 0x00),
+	QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL1, 0x00),
+	QMP_PHY_INIT_CFG(QSERDES_V4_RX_DFE_EN_TIMER, 0x04),
+	QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_LOW, 0x3f),
+	QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH4, 0x14),
+	QMP_PHY_INIT_CFG(QSERDES_V4_RX_DFE_CTLE_POST_CAL_OFFSET, 0x30),
+};
+
+static const struct qmp_phy_init_tbl sm8250_qmp_pcie_pcs_tbl[] = {
+	QMP_PHY_INIT_CFG(QPHY_V4_PCS_P2U3_WAKEUP_DLY_TIME_AUXCLK_L, 0x01),
+	QMP_PHY_INIT_CFG(QPHY_V4_PCS_RX_SIGDET_LVL, 0x77),
+	QMP_PHY_INIT_CFG(QPHY_V4_PCS_RATE_SLEW_CNTRL1, 0x0b),
+};
+
+static const struct qmp_phy_init_tbl sm8250_qmp_gen3x1_pcie_pcs_tbl[] = {
+	QMP_PHY_INIT_CFG(QPHY_V4_PCS_REFGEN_REQ_CONFIG1, 0x0d),
+	QMP_PHY_INIT_CFG(QPHY_V4_PCS_EQ_CONFIG5, 0x12),
+};
+
+static const struct qmp_phy_init_tbl sm8250_qmp_pcie_pcs_misc_tbl[] = {
+	QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_OSC_DTCT_ACTIONS, 0x00),
+	QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_L1P1_WAKEUP_DLY_TIME_AUXCLK_L, 0x01),
+	QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_L1P2_WAKEUP_DLY_TIME_AUXCLK_L, 0x01),
+	QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_PRESET_P6_P7_PRE, 0x33),
+	QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_PRESET_P10_PRE, 0x00),
+	QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_PRESET_P10_POST, 0x58),
+	QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_ENDPOINT_REFCLK_DRIVE, 0xc1),
+};
+
+static const struct qmp_phy_init_tbl sm8250_qmp_gen3x1_pcie_pcs_misc_tbl[] = {
+	QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_INT_AUX_CLK_CONFIG1, 0x00),
+	QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_EQ_CONFIG2, 0x0f),
+};
+
+static const struct qmp_phy_init_tbl sm8250_qmp_gen3x2_pcie_tx_tbl[] = {
+	QMP_PHY_INIT_CFG(QSERDES_V4_TX_PI_QEC_CTRL, 0x20),
+};
+
+static const struct qmp_phy_init_tbl sm8250_qmp_gen3x2_pcie_rx_tbl[] = {
+	QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL1, 0x04),
+	QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_LOW, 0xbf),
+	QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH4, 0x15),
+	QMP_PHY_INIT_CFG(QSERDES_V4_RX_DFE_CTLE_POST_CAL_OFFSET, 0x38),
+};
+
+static const struct qmp_phy_init_tbl sm8250_qmp_gen3x2_pcie_pcs_tbl[] = {
+	QMP_PHY_INIT_CFG(QPHY_V4_PCS_REFGEN_REQ_CONFIG1, 0x05),
+	QMP_PHY_INIT_CFG(QPHY_V4_PCS_EQ_CONFIG2, 0x0f),
+};
+
+static const struct qmp_phy_init_tbl sm8250_qmp_gen3x2_pcie_pcs_misc_tbl[] = {
+	QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_POWER_STATE_CONFIG2, 0x0d),
+	QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_POWER_STATE_CONFIG4, 0x07),
+};
+
 /* struct qmp_phy_cfg - per-PHY initialization config */
 struct qmp_phy_cfg {
 	/* phy-type - PCIE/UFS/USB */
@@ -1834,14 +1984,24 @@ struct qmp_phy_cfg {
 	/* Init sequence for PHY blocks - serdes, tx, rx, pcs */
 	const struct qmp_phy_init_tbl *serdes_tbl;
 	int serdes_tbl_num;
+	const struct qmp_phy_init_tbl *serdes_tbl_sec;
+	int serdes_tbl_num_sec;
 	const struct qmp_phy_init_tbl *tx_tbl;
 	int tx_tbl_num;
+	const struct qmp_phy_init_tbl *tx_tbl_sec;
+	int tx_tbl_num_sec;
 	const struct qmp_phy_init_tbl *rx_tbl;
 	int rx_tbl_num;
+	const struct qmp_phy_init_tbl *rx_tbl_sec;
+	int rx_tbl_num_sec;
 	const struct qmp_phy_init_tbl *pcs_tbl;
 	int pcs_tbl_num;
+	const struct qmp_phy_init_tbl *pcs_tbl_sec;
+	int pcs_tbl_num_sec;
 	const struct qmp_phy_init_tbl *pcs_misc_tbl;
 	int pcs_misc_tbl_num;
+	const struct qmp_phy_init_tbl *pcs_misc_tbl_sec;
+	int pcs_misc_tbl_num_sec;
 
 	/* Init sequence for DP PHY block link rates */
 	const struct qmp_phy_init_tbl *serdes_tbl_rbr;
@@ -2245,6 +2405,83 @@ static const struct qmp_phy_cfg sdm845_qhp_pciephy_cfg = {
 	.pwrdn_delay_max	= 1005,		/* us */
 };
 
+static const struct qmp_phy_cfg sm8250_qmp_gen3x1_pciephy_cfg = {
+	.type = PHY_TYPE_PCIE,
+	.nlanes = 1,
+
+	.serdes_tbl		= sm8250_qmp_pcie_serdes_tbl,
+	.serdes_tbl_num		= ARRAY_SIZE(sm8250_qmp_pcie_serdes_tbl),
+	.serdes_tbl_sec		= sm8250_qmp_gen3x1_pcie_serdes_tbl,
+	.serdes_tbl_num_sec	= ARRAY_SIZE(sm8250_qmp_gen3x1_pcie_serdes_tbl),
+	.tx_tbl			= sm8250_qmp_pcie_tx_tbl,
+	.tx_tbl_num		= ARRAY_SIZE(sm8250_qmp_pcie_tx_tbl),
+	.rx_tbl			= sm8250_qmp_pcie_rx_tbl,
+	.rx_tbl_num		= ARRAY_SIZE(sm8250_qmp_pcie_rx_tbl),
+	.rx_tbl_sec		= sm8250_qmp_gen3x1_pcie_rx_tbl,
+	.rx_tbl_num_sec		= ARRAY_SIZE(sm8250_qmp_gen3x1_pcie_rx_tbl),
+	.pcs_tbl		= sm8250_qmp_pcie_pcs_tbl,
+	.pcs_tbl_num		= ARRAY_SIZE(sm8250_qmp_pcie_pcs_tbl),
+	.pcs_tbl_sec		= sm8250_qmp_gen3x1_pcie_pcs_tbl,
+	.pcs_tbl_num_sec		= ARRAY_SIZE(sm8250_qmp_gen3x1_pcie_pcs_tbl),
+	.pcs_misc_tbl		= sm8250_qmp_pcie_pcs_misc_tbl,
+	.pcs_misc_tbl_num	= ARRAY_SIZE(sm8250_qmp_pcie_pcs_misc_tbl),
+	.pcs_misc_tbl_sec		= sm8250_qmp_gen3x1_pcie_pcs_misc_tbl,
+	.pcs_misc_tbl_num_sec	= ARRAY_SIZE(sm8250_qmp_gen3x1_pcie_pcs_misc_tbl),
+	.clk_list		= sdm845_pciephy_clk_l,
+	.num_clks		= ARRAY_SIZE(sdm845_pciephy_clk_l),
+	.reset_list		= sdm845_pciephy_reset_l,
+	.num_resets		= ARRAY_SIZE(sdm845_pciephy_reset_l),
+	.vreg_list		= qmp_phy_vreg_l,
+	.num_vregs		= ARRAY_SIZE(qmp_phy_vreg_l),
+	.regs			= sm8250_pcie_regs_layout,
+
+	.start_ctrl		= PCS_START | SERDES_START,
+	.pwrdn_ctrl		= SW_PWRDN | REFCLK_DRV_DSBL,
+
+	.has_pwrdn_delay	= true,
+	.pwrdn_delay_min	= 995,		/* us */
+	.pwrdn_delay_max	= 1005,		/* us */
+};
+
+static const struct qmp_phy_cfg sm8250_qmp_gen3x2_pciephy_cfg = {
+	.type = PHY_TYPE_PCIE,
+	.nlanes = 2,
+
+	.serdes_tbl		= sm8250_qmp_pcie_serdes_tbl,
+	.serdes_tbl_num		= ARRAY_SIZE(sm8250_qmp_pcie_serdes_tbl),
+	.tx_tbl			= sm8250_qmp_pcie_tx_tbl,
+	.tx_tbl_num		= ARRAY_SIZE(sm8250_qmp_pcie_tx_tbl),
+	.tx_tbl_sec		= sm8250_qmp_gen3x2_pcie_tx_tbl,
+	.tx_tbl_num_sec		= ARRAY_SIZE(sm8250_qmp_gen3x2_pcie_tx_tbl),
+	.rx_tbl			= sm8250_qmp_pcie_rx_tbl,
+	.rx_tbl_num		= ARRAY_SIZE(sm8250_qmp_pcie_rx_tbl),
+	.rx_tbl_sec		= sm8250_qmp_gen3x2_pcie_rx_tbl,
+	.rx_tbl_num_sec		= ARRAY_SIZE(sm8250_qmp_gen3x2_pcie_rx_tbl),
+	.pcs_tbl		= sm8250_qmp_pcie_pcs_tbl,
+	.pcs_tbl_num		= ARRAY_SIZE(sm8250_qmp_pcie_pcs_tbl),
+	.pcs_tbl_sec		= sm8250_qmp_gen3x2_pcie_pcs_tbl,
+	.pcs_tbl_num_sec		= ARRAY_SIZE(sm8250_qmp_gen3x2_pcie_pcs_tbl),
+	.pcs_misc_tbl		= sm8250_qmp_pcie_pcs_misc_tbl,
+	.pcs_misc_tbl_num	= ARRAY_SIZE(sm8250_qmp_pcie_pcs_misc_tbl),
+	.pcs_misc_tbl_sec		= sm8250_qmp_gen3x2_pcie_pcs_misc_tbl,
+	.pcs_misc_tbl_num_sec	= ARRAY_SIZE(sm8250_qmp_gen3x2_pcie_pcs_misc_tbl),
+	.clk_list		= sdm845_pciephy_clk_l,
+	.num_clks		= ARRAY_SIZE(sdm845_pciephy_clk_l),
+	.reset_list		= sdm845_pciephy_reset_l,
+	.num_resets		= ARRAY_SIZE(sdm845_pciephy_reset_l),
+	.vreg_list		= qmp_phy_vreg_l,
+	.num_vregs		= ARRAY_SIZE(qmp_phy_vreg_l),
+	.regs			= sm8250_pcie_regs_layout,
+
+	.start_ctrl		= PCS_START | SERDES_START,
+	.pwrdn_ctrl		= SW_PWRDN | REFCLK_DRV_DSBL,
+
+	.is_dual_lane_phy	= true,
+	.has_pwrdn_delay	= true,
+	.pwrdn_delay_min	= 995,		/* us */
+	.pwrdn_delay_max	= 1005,		/* us */
+};
+
 static const struct qmp_phy_cfg qmp_v3_usb3phy_cfg = {
 	.type			= PHY_TYPE_USB3,
 	.nlanes			= 1,
@@ -2629,6 +2866,9 @@ static int qcom_qmp_phy_serdes_init(struct qmp_phy *qphy)
 	int ret;
 
 	qcom_qmp_phy_configure(serdes, cfg->regs, serdes_tbl, serdes_tbl_num);
+	if (cfg->serdes_tbl_sec)
+		qcom_qmp_phy_configure(serdes, cfg->regs, cfg->serdes_tbl_sec,
+				       cfg->serdes_tbl_num_sec);
 
 	if (cfg->type == PHY_TYPE_DP) {
 		switch (dp_opts->link_rate) {
@@ -3117,10 +3357,19 @@ static int qcom_qmp_phy_power_on(struct phy *phy)
 	/* Tx, Rx, and PCS configurations */
 	qcom_qmp_phy_configure_lane(tx, cfg->regs,
 				    cfg->tx_tbl, cfg->tx_tbl_num, 1);
+	if (cfg->tx_tbl_sec)
+		qcom_qmp_phy_configure_lane(tx, cfg->regs, cfg->tx_tbl_sec,
+					    cfg->tx_tbl_num_sec, 1);
+
 	/* Configuration for other LANE for USB-DP combo PHY */
-	if (cfg->is_dual_lane_phy)
+	if (cfg->is_dual_lane_phy) {
 		qcom_qmp_phy_configure_lane(qphy->tx2, cfg->regs,
 					    cfg->tx_tbl, cfg->tx_tbl_num, 2);
+		if (cfg->tx_tbl_sec)
+			qcom_qmp_phy_configure_lane(qphy->tx2, cfg->regs,
+						    cfg->tx_tbl_sec,
+						    cfg->tx_tbl_num_sec, 2);
+	}
 
 	/* Configure special DP tx tunings */
 	if (cfg->type == PHY_TYPE_DP)
@@ -3128,16 +3377,28 @@ static int qcom_qmp_phy_power_on(struct phy *phy)
 
 	qcom_qmp_phy_configure_lane(rx, cfg->regs,
 				    cfg->rx_tbl, cfg->rx_tbl_num, 1);
+	if (cfg->rx_tbl_sec)
+		qcom_qmp_phy_configure_lane(rx, cfg->regs,
+					    cfg->rx_tbl_sec, cfg->rx_tbl_num_sec, 1);
 
-	if (cfg->is_dual_lane_phy)
+	if (cfg->is_dual_lane_phy) {
 		qcom_qmp_phy_configure_lane(qphy->rx2, cfg->regs,
 					    cfg->rx_tbl, cfg->rx_tbl_num, 2);
+		if (cfg->rx_tbl_sec)
+			qcom_qmp_phy_configure_lane(qphy->rx2, cfg->regs,
+						    cfg->rx_tbl_sec,
+						    cfg->rx_tbl_num_sec, 2);
+	}
 
 	/* Configure link rate, swing, etc. */
-	if (cfg->type == PHY_TYPE_DP)
+	if (cfg->type == PHY_TYPE_DP) {
 		qcom_qmp_phy_configure_dp_phy(qphy);
-	else
+	} else {
 		qcom_qmp_phy_configure(pcs, cfg->regs, cfg->pcs_tbl, cfg->pcs_tbl_num);
+		if (cfg->pcs_tbl_sec)
+			qcom_qmp_phy_configure(pcs, cfg->regs, cfg->pcs_tbl_sec,
+					       cfg->pcs_tbl_num_sec);
+	}
 
 	ret = reset_control_deassert(qmp->ufs_reset);
 	if (ret)
@@ -3145,6 +3406,9 @@ static int qcom_qmp_phy_power_on(struct phy *phy)
 
 	qcom_qmp_phy_configure(pcs_misc, cfg->regs, cfg->pcs_misc_tbl,
 			       cfg->pcs_misc_tbl_num);
+	if (cfg->pcs_misc_tbl_sec)
+		qcom_qmp_phy_configure(pcs_misc, cfg->regs, cfg->pcs_misc_tbl_sec,
+				       cfg->pcs_misc_tbl_num_sec);
 
 	/*
 	 * Pull out PHY from POWER DOWN state.
@@ -3900,6 +4164,15 @@ static const struct of_device_id qcom_qmp_phy_of_match_table[] = {
 	}, {
 		.compatible = "qcom,sm8250-qmp-usb3-uni-phy",
 		.data = &sm8250_usb3_uniphy_cfg,
+	}, {
+		.compatible = "qcom,sm8250-qmp-gen3x1-pcie-phy",
+		.data = &sm8250_qmp_gen3x1_pciephy_cfg,
+	}, {
+		.compatible = "qcom,sm8250-qmp-gen3x2-pcie-phy",
+		.data = &sm8250_qmp_gen3x2_pciephy_cfg,
+	}, {
+		.compatible = "qcom,sm8250-qmp-modem-pcie-phy",
+		.data = &sm8250_qmp_gen3x2_pciephy_cfg,
 	},
 	{ },
 };
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp.h b/drivers/phy/qualcomm/phy-qcom-qmp.h
index b7c530088a6ca41eb4c48cf65efd48624d3b76ac..db92a461dd2ee3767b2d449f6e7c883cdc1cc79f 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp.h
+++ b/drivers/phy/qualcomm/phy-qcom-qmp.h
@@ -403,6 +403,7 @@
 #define QSERDES_V4_COM_SSC_STEP_SIZE2_MODE0		0x028
 #define QSERDES_V4_COM_SSC_STEP_SIZE1_MODE1		0x030
 #define QSERDES_V4_COM_SSC_STEP_SIZE2_MODE1		0x034
+#define QSERDES_V4_COM_CLK_ENABLE1			0x048
 #define QSERDES_V4_COM_SYSCLK_BUF_ENABLE		0x050
 #define QSERDES_V4_COM_PLL_IVCO				0x058
 #define QSERDES_V4_COM_CMN_IPTRIM			0x060
@@ -432,6 +433,7 @@
 #define QSERDES_V4_COM_VCO_TUNE1_MODE1			0x118
 #define QSERDES_V4_COM_VCO_TUNE2_MODE1			0x11c
 #define QSERDES_V4_COM_VCO_TUNE_INITVAL2		0x124
+#define QSERDES_V4_COM_CLK_SELECT			0x154
 #define QSERDES_V4_COM_HSCLK_SEL			0x158
 #define QSERDES_V4_COM_HSCLK_HS_SWITCH_SEL		0x15c
 #define QSERDES_V4_COM_CORECLK_DIV_MODE1		0x16c
@@ -471,12 +473,14 @@
 #define QSERDES_V4_RX_UCDR_SB2_GAIN1			0x054
 #define QSERDES_V4_RX_UCDR_SB2_GAIN2			0x058
 #define QSERDES_V4_RX_AUX_DATA_TCOARSE_TFINE			0x060
+#define QSERDES_V4_RX_RCLK_AUXDATA_SEL			0x064
 #define QSERDES_V4_RX_AC_JTAG_ENABLE			0x068
 #define QSERDES_V4_RX_AC_JTAG_MODE			0x078
 #define QSERDES_V4_RX_RX_TERM_BW			0x080
 #define QSERDES_V4_RX_VGA_CAL_CNTRL1			0x0d4
 #define QSERDES_V4_RX_VGA_CAL_CNTRL2			0x0d8
 #define QSERDES_V4_RX_GM_CAL				0x0dc
+#define QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL1		0x0e8
 #define QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL2		0x0ec
 #define QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL3		0x0f0
 #define QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL4		0x0f4
@@ -485,6 +489,7 @@
 #define QSERDES_V4_RX_RX_IDAC_MEASURE_TIME		0x100
 #define QSERDES_V4_RX_RX_EQ_OFFSET_ADAPTOR_CNTRL1	0x110
 #define QSERDES_V4_RX_RX_OFFSET_ADAPTOR_CNTRL2		0x114
+#define QSERDES_V4_RX_SIGDET_ENABLES			0x118
 #define QSERDES_V4_RX_SIGDET_CNTRL			0x11c
 #define QSERDES_V4_RX_SIGDET_LVL			0x120
 #define QSERDES_V4_RX_SIGDET_DEGLITCH_CNTRL		0x124
@@ -806,4 +811,17 @@
 #define QPHY_V4_PCS_MISC_TYPEC_STATUS			0x10
 #define QPHY_V4_PCS_MISC_PLACEHOLDER_STATUS		0x14
 
+/* Only for QMP V4 PHY - PCS_PCIE registers (same as PCS_MISC?) */
+#define QPHY_V4_PCS_PCIE_POWER_STATE_CONFIG2		0x0c
+#define QPHY_V4_PCS_PCIE_POWER_STATE_CONFIG4		0x14
+#define QPHY_V4_PCS_PCIE_ENDPOINT_REFCLK_DRIVE		0x1c
+#define QPHY_V4_PCS_PCIE_L1P1_WAKEUP_DLY_TIME_AUXCLK_L	0x40
+#define QPHY_V4_PCS_PCIE_L1P2_WAKEUP_DLY_TIME_AUXCLK_L	0x48
+#define QPHY_V4_PCS_PCIE_INT_AUX_CLK_CONFIG1		0x50
+#define QPHY_V4_PCS_PCIE_OSC_DTCT_ACTIONS		0x90
+#define QPHY_V4_PCS_PCIE_EQ_CONFIG2			0xa4
+#define QPHY_V4_PCS_PCIE_PRESET_P6_P7_PRE		0xb4
+#define QPHY_V4_PCS_PCIE_PRESET_P10_PRE			0xbc
+#define QPHY_V4_PCS_PCIE_PRESET_P10_POST		0xe0
+
 #endif
diff --git a/drivers/phy/qualcomm/phy-qcom-qusb2.c b/drivers/phy/qualcomm/phy-qcom-qusb2.c
index 557547dabfd50eb17086e528502b924213041e65..109792203baf97792289496546c72b03b573b0b5 100644
--- a/drivers/phy/qualcomm/phy-qcom-qusb2.c
+++ b/drivers/phy/qualcomm/phy-qcom-qusb2.c
@@ -844,7 +844,6 @@ static int qusb2_phy_probe(struct platform_device *pdev)
 	struct qusb2_phy *qphy;
 	struct phy_provider *phy_provider;
 	struct phy *generic_phy;
-	struct resource *res;
 	int ret, i;
 	int num;
 	u32 value;
@@ -855,8 +854,7 @@ static int qusb2_phy_probe(struct platform_device *pdev)
 		return -ENOMEM;
 	or = &qphy->overrides;
 
-	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	qphy->base = devm_ioremap_resource(dev, res);
+	qphy->base = devm_platform_ioremap_resource(pdev, 0);
 	if (IS_ERR(qphy->base))
 		return PTR_ERR(qphy->base);
 
diff --git a/drivers/phy/ralink/Kconfig b/drivers/phy/ralink/Kconfig
index da982c9cffb337d1a7ca8b1671aea096a8bd3155..ecc309ba9feeb31d6d570dda675a579a563f19e3 100644
--- a/drivers/phy/ralink/Kconfig
+++ b/drivers/phy/ralink/Kconfig
@@ -2,6 +2,14 @@
 #
 # PHY drivers for Ralink platforms.
 #
+config PHY_MT7621_PCI
+	tristate "MediaTek MT7621 PCI PHY Driver"
+	depends on RALINK && OF
+	select GENERIC_PHY
+	select REGMAP_MMIO
+	help
+	  Say 'Y' here to add support for MediaTek MT7621 PCI PHY driver,
+
 config PHY_RALINK_USB
 	tristate "Ralink USB PHY driver"
 	depends on RALINK || COMPILE_TEST
diff --git a/drivers/phy/ralink/Makefile b/drivers/phy/ralink/Makefile
index d8d3ffcf0a15a02c6e7a82f1ad58436168ca0598..cda2a4a7ca5e91160fbda8bbed90f712a4c560db 100644
--- a/drivers/phy/ralink/Makefile
+++ b/drivers/phy/ralink/Makefile
@@ -1,2 +1,3 @@
 # SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_PHY_MT7621_PCI)	+= phy-mt7621-pci.o
 obj-$(CONFIG_PHY_RALINK_USB)	+= phy-ralink-usb.o
diff --git a/drivers/staging/mt7621-pci-phy/pci-mt7621-phy.c b/drivers/phy/ralink/phy-mt7621-pci.c
similarity index 75%
rename from drivers/staging/mt7621-pci-phy/pci-mt7621-phy.c
rename to drivers/phy/ralink/phy-mt7621-pci.c
index 57743fd22be4c824205df2b3ca49c96ef6db6cbb..9a610b414b1fb36f901765134d69e26ad969ead0 100644
--- a/drivers/staging/mt7621-pci-phy/pci-mt7621-phy.c
+++ b/drivers/phy/ralink/phy-mt7621-pci.c
@@ -5,6 +5,7 @@
  */
 
 #include <dt-bindings/phy/phy.h>
+#include <linux/bitfield.h>
 #include <linux/bitops.h>
 #include <linux/module.h>
 #include <linux/of_address.h>
@@ -23,12 +24,10 @@
 #define RG_P0_TO_P1_WIDTH			0x100
 #define RG_PE1_H_LCDDS_REG			0x49c
 #define RG_PE1_H_LCDDS_PCW			GENMASK(30, 0)
-#define RG_PE1_H_LCDDS_PCW_VAL(x)		((0x7fffffff & (x)) << 0)
 
 #define RG_PE1_FRC_H_XTAL_REG			0x400
 #define RG_PE1_FRC_H_XTAL_TYPE			BIT(8)
 #define RG_PE1_H_XTAL_TYPE			GENMASK(10, 9)
-#define RG_PE1_H_XTAL_TYPE_VAL(x)		((0x3 & (x)) << 9)
 
 #define RG_PE1_FRC_PHY_REG			0x000
 #define RG_PE1_FRC_PHY_EN			BIT(4)
@@ -36,47 +35,34 @@
 
 #define RG_PE1_H_PLL_REG			0x490
 #define RG_PE1_H_PLL_BC				GENMASK(23, 22)
-#define RG_PE1_H_PLL_BC_VAL(x)			((0x3 & (x)) << 22)
 #define RG_PE1_H_PLL_BP				GENMASK(21, 18)
-#define RG_PE1_H_PLL_BP_VAL(x)			((0xf & (x)) << 18)
 #define RG_PE1_H_PLL_IR				GENMASK(15, 12)
-#define RG_PE1_H_PLL_IR_VAL(x)			((0xf & (x)) << 12)
 #define RG_PE1_H_PLL_IC				GENMASK(11, 8)
-#define RG_PE1_H_PLL_IC_VAL(x)			((0xf & (x)) << 8)
 #define RG_PE1_H_PLL_PREDIV			GENMASK(7, 6)
-#define RG_PE1_H_PLL_PREDIV_VAL(x)		((0x3 & (x)) << 6)
 #define RG_PE1_PLL_DIVEN			GENMASK(3, 1)
-#define RG_PE1_PLL_DIVEN_VAL(x)			((0x7 & (x)) << 1)
 
 #define RG_PE1_H_PLL_FBKSEL_REG			0x4bc
 #define RG_PE1_H_PLL_FBKSEL			GENMASK(5, 4)
-#define RG_PE1_H_PLL_FBKSEL_VAL(x)		((0x3 & (x)) << 4)
 
 #define	RG_PE1_H_LCDDS_SSC_PRD_REG		0x4a4
 #define RG_PE1_H_LCDDS_SSC_PRD			GENMASK(15, 0)
-#define RG_PE1_H_LCDDS_SSC_PRD_VAL(x)		((0xffff & (x)) << 0)
 
 #define RG_PE1_H_LCDDS_SSC_DELTA_REG		0x4a8
 #define RG_PE1_H_LCDDS_SSC_DELTA		GENMASK(11, 0)
-#define RG_PE1_H_LCDDS_SSC_DELTA_VAL(x)		((0xfff & (x)) << 0)
 #define RG_PE1_H_LCDDS_SSC_DELTA1		GENMASK(27, 16)
-#define RG_PE1_H_LCDDS_SSC_DELTA1_VAL(x)	((0xff & (x)) << 16)
 
 #define RG_PE1_LCDDS_CLK_PH_INV_REG		0x4a0
 #define RG_PE1_LCDDS_CLK_PH_INV			BIT(5)
 
 #define RG_PE1_H_PLL_BR_REG			0x4ac
 #define RG_PE1_H_PLL_BR				GENMASK(18, 16)
-#define RG_PE1_H_PLL_BR_VAL(x)			((0x7 & (x)) << 16)
 
 #define	RG_PE1_MSTCKDIV_REG			0x414
 #define RG_PE1_MSTCKDIV				GENMASK(7, 6)
-#define RG_PE1_MSTCKDIV_VAL(x)			((0x3 & (x)) << 6)
 
 #define RG_PE1_FRC_MSTCKDIV			BIT(5)
 
-#define XTAL_MODE_SEL_SHIFT			6
-#define XTAL_MODE_SEL_MASK			0x7
+#define XTAL_MASK				GENMASK(7, 6)
 
 #define MAX_PHYS	2
 
@@ -99,28 +85,22 @@ struct mt7621_pci_phy {
 	bool bypass_pipe_rst;
 };
 
-static inline u32 phy_read(struct mt7621_pci_phy *phy, u32 reg)
-{
-	u32 val;
-
-	regmap_read(phy->regmap, reg, &val);
-
-	return val;
-}
-
-static inline void phy_write(struct mt7621_pci_phy *phy, u32 val, u32 reg)
-{
-	regmap_write(phy->regmap, reg, val);
-}
-
 static inline void mt7621_phy_rmw(struct mt7621_pci_phy *phy,
 				  u32 reg, u32 clr, u32 set)
 {
-	u32 val = phy_read(phy, reg);
+	u32 val;
 
+	/*
+	 * We cannot use 'regmap_write_bits' here because internally
+	 * 'set' is masked before is set to the value that will be
+	 * written to the register. That way results in no reliable
+	 * pci setup. Avoid to mask 'set' before set value to 'val'
+	 * completely avoid the problem.
+	 */
+	regmap_read(phy->regmap, reg, &val);
 	val &= ~clr;
 	val |= set;
-	phy_write(phy, val, reg);
+	regmap_write(phy->regmap, reg, val);
 }
 
 static void mt7621_bypass_pipe_rst(struct mt7621_pci_phy *phy)
@@ -141,18 +121,18 @@ static void mt7621_set_phy_for_ssc(struct mt7621_pci_phy *phy)
 	struct device *dev = phy->dev;
 	u32 xtal_mode;
 
-	xtal_mode = (rt_sysc_r32(SYSC_REG_SYSTEM_CONFIG0)
-		     >> XTAL_MODE_SEL_SHIFT) & XTAL_MODE_SEL_MASK;
+	xtal_mode = FIELD_GET(XTAL_MASK, rt_sysc_r32(SYSC_REG_SYSTEM_CONFIG0));
 
 	/* Set PCIe Port PHY to disable SSC */
 	/* Debug Xtal Type */
 	mt7621_phy_rmw(phy, RG_PE1_FRC_H_XTAL_REG,
 		       RG_PE1_FRC_H_XTAL_TYPE | RG_PE1_H_XTAL_TYPE,
-		       RG_PE1_FRC_H_XTAL_TYPE | RG_PE1_H_XTAL_TYPE_VAL(0x00));
+		       RG_PE1_FRC_H_XTAL_TYPE |
+		       FIELD_PREP(RG_PE1_H_XTAL_TYPE, 0x00));
 
 	/* disable port */
-	mt7621_phy_rmw(phy, RG_PE1_FRC_PHY_REG,
-		       RG_PE1_PHY_EN, RG_PE1_FRC_PHY_EN);
+	mt7621_phy_rmw(phy, RG_PE1_FRC_PHY_REG, RG_PE1_PHY_EN,
+		       RG_PE1_FRC_PHY_EN);
 
 	if (phy->has_dual_port) {
 		mt7621_phy_rmw(phy, RG_PE1_FRC_PHY_REG + RG_P0_TO_P1_WIDTH,
@@ -161,39 +141,42 @@ static void mt7621_set_phy_for_ssc(struct mt7621_pci_phy *phy)
 
 	if (xtal_mode <= 5 && xtal_mode >= 3) { /* 40MHz Xtal */
 		/* Set Pre-divider ratio (for host mode) */
-		mt7621_phy_rmw(phy, RG_PE1_H_PLL_REG,
-			       RG_PE1_H_PLL_PREDIV,
-			       RG_PE1_H_PLL_PREDIV_VAL(0x01));
-		dev_info(dev, "Xtal is 40MHz\n");
+		mt7621_phy_rmw(phy, RG_PE1_H_PLL_REG, RG_PE1_H_PLL_PREDIV,
+			       FIELD_PREP(RG_PE1_H_PLL_PREDIV, 0x01));
+
+		dev_dbg(dev, "Xtal is 40MHz\n");
 	} else if (xtal_mode >= 6) { /* 25MHz Xal */
-		mt7621_phy_rmw(phy, RG_PE1_H_PLL_REG,
-			       RG_PE1_H_PLL_PREDIV,
-			       RG_PE1_H_PLL_PREDIV_VAL(0x00));
+		mt7621_phy_rmw(phy, RG_PE1_H_PLL_REG, RG_PE1_H_PLL_PREDIV,
+			       FIELD_PREP(RG_PE1_H_PLL_PREDIV, 0x00));
+
 		/* Select feedback clock */
 		mt7621_phy_rmw(phy, RG_PE1_H_PLL_FBKSEL_REG,
 			       RG_PE1_H_PLL_FBKSEL,
-			       RG_PE1_H_PLL_FBKSEL_VAL(0x01));
+			       FIELD_PREP(RG_PE1_H_PLL_FBKSEL, 0x01));
+
 		/* DDS NCPO PCW (for host mode) */
 		mt7621_phy_rmw(phy, RG_PE1_H_LCDDS_SSC_PRD_REG,
 			       RG_PE1_H_LCDDS_SSC_PRD,
-			       RG_PE1_H_LCDDS_SSC_PRD_VAL(0x18000000));
+			       FIELD_PREP(RG_PE1_H_LCDDS_SSC_PRD, 0x00));
+
 		/* DDS SSC dither period control */
 		mt7621_phy_rmw(phy, RG_PE1_H_LCDDS_SSC_PRD_REG,
 			       RG_PE1_H_LCDDS_SSC_PRD,
-			       RG_PE1_H_LCDDS_SSC_PRD_VAL(0x18d));
+			       FIELD_PREP(RG_PE1_H_LCDDS_SSC_PRD, 0x18d));
+
 		/* DDS SSC dither amplitude control */
 		mt7621_phy_rmw(phy, RG_PE1_H_LCDDS_SSC_DELTA_REG,
 			       RG_PE1_H_LCDDS_SSC_DELTA |
 			       RG_PE1_H_LCDDS_SSC_DELTA1,
-			       RG_PE1_H_LCDDS_SSC_DELTA_VAL(0x4a) |
-			       RG_PE1_H_LCDDS_SSC_DELTA1_VAL(0x4a));
-		dev_info(dev, "Xtal is 25MHz\n");
+			       FIELD_PREP(RG_PE1_H_LCDDS_SSC_DELTA, 0x4a) |
+			       FIELD_PREP(RG_PE1_H_LCDDS_SSC_DELTA1, 0x4a));
+
+		dev_dbg(dev, "Xtal is 25MHz\n");
 	} else { /* 20MHz Xtal */
-		mt7621_phy_rmw(phy, RG_PE1_H_PLL_REG,
-			       RG_PE1_H_PLL_PREDIV,
-			       RG_PE1_H_PLL_PREDIV_VAL(0x00));
+		mt7621_phy_rmw(phy, RG_PE1_H_PLL_REG, RG_PE1_H_PLL_PREDIV,
+			       FIELD_PREP(RG_PE1_H_PLL_PREDIV, 0x00));
 
-		dev_info(dev, "Xtal is 20MHz\n");
+		dev_dbg(dev, "Xtal is 20MHz\n");
 	}
 
 	/* DDS clock inversion */
@@ -204,18 +187,21 @@ static void mt7621_set_phy_for_ssc(struct mt7621_pci_phy *phy)
 	mt7621_phy_rmw(phy, RG_PE1_H_PLL_REG,
 		       RG_PE1_H_PLL_BC | RG_PE1_H_PLL_BP | RG_PE1_H_PLL_IR |
 		       RG_PE1_H_PLL_IC | RG_PE1_PLL_DIVEN,
-		       RG_PE1_H_PLL_BC_VAL(0x02) | RG_PE1_H_PLL_BP_VAL(0x06) |
-		       RG_PE1_H_PLL_IR_VAL(0x02) | RG_PE1_H_PLL_IC_VAL(0x01) |
-		       RG_PE1_PLL_DIVEN_VAL(0x02));
+		       FIELD_PREP(RG_PE1_H_PLL_BC, 0x02) |
+		       FIELD_PREP(RG_PE1_H_PLL_BP, 0x06) |
+		       FIELD_PREP(RG_PE1_H_PLL_IR, 0x02) |
+		       FIELD_PREP(RG_PE1_H_PLL_IC, 0x01) |
+		       FIELD_PREP(RG_PE1_PLL_DIVEN, 0x02));
 
-	mt7621_phy_rmw(phy, RG_PE1_H_PLL_BR_REG,
-		       RG_PE1_H_PLL_BR, RG_PE1_H_PLL_BR_VAL(0x00));
+	mt7621_phy_rmw(phy, RG_PE1_H_PLL_BR_REG, RG_PE1_H_PLL_BR,
+		       FIELD_PREP(RG_PE1_H_PLL_BR, 0x00));
 
 	if (xtal_mode <= 5 && xtal_mode >= 3) { /* 40MHz Xtal */
 		/* set force mode enable of da_pe1_mstckdiv */
 		mt7621_phy_rmw(phy, RG_PE1_MSTCKDIV_REG,
 			       RG_PE1_MSTCKDIV | RG_PE1_FRC_MSTCKDIV,
-			       RG_PE1_MSTCKDIV_VAL(0x01) | RG_PE1_FRC_MSTCKDIV);
+			       FIELD_PREP(RG_PE1_MSTCKDIV, 0x01) |
+			       RG_PE1_FRC_MSTCKDIV);
 	}
 }
 
@@ -309,7 +295,6 @@ static int mt7621_pci_phy_probe(struct platform_device *pdev)
 	const struct soc_device_attribute *attr;
 	struct phy_provider *provider;
 	struct mt7621_pci_phy *phy;
-	struct resource *res;
 
 	phy = devm_kzalloc(dev, sizeof(*phy), GFP_KERNEL);
 	if (!phy)
@@ -322,13 +307,7 @@ static int mt7621_pci_phy_probe(struct platform_device *pdev)
 	phy->dev = dev;
 	platform_set_drvdata(pdev, phy);
 
-	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	if (!res) {
-		dev_err(dev, "failed to get address resource\n");
-		return -ENXIO;
-	}
-
-	phy->port_base = devm_ioremap_resource(dev, res);
+	phy->port_base = devm_platform_ioremap_resource(pdev, 0);
 	if (IS_ERR(phy->port_base)) {
 		dev_err(dev, "failed to remap phy regs\n");
 		return PTR_ERR(phy->port_base);
@@ -356,7 +335,7 @@ static const struct of_device_id mt7621_pci_phy_ids[] = {
 	{ .compatible = "mediatek,mt7621-pci-phy" },
 	{},
 };
-MODULE_DEVICE_TABLE(of, mt7621_pci_ids);
+MODULE_DEVICE_TABLE(of, mt7621_pci_phy_ids);
 
 static struct platform_driver mt7621_pci_phy_driver = {
 	.probe = mt7621_pci_phy_probe,
diff --git a/drivers/phy/ralink/phy-ralink-usb.c b/drivers/phy/ralink/phy-ralink-usb.c
index 95dfa9fd284daf60935731e1f60924d044f609d6..2bd8ad2e76eda05f349d6c4dc5cee27a5a7eb605 100644
--- a/drivers/phy/ralink/phy-ralink-usb.c
+++ b/drivers/phy/ralink/phy-ralink-usb.c
@@ -170,7 +170,6 @@ MODULE_DEVICE_TABLE(of, ralink_usb_phy_of_match);
 static int ralink_usb_phy_probe(struct platform_device *pdev)
 {
 	struct device *dev = &pdev->dev;
-	struct resource *res;
 	struct phy_provider *phy_provider;
 	const struct of_device_id *match;
 	struct ralink_usb_phy *phy;
@@ -194,8 +193,7 @@ static int ralink_usb_phy_probe(struct platform_device *pdev)
 
 	/* The MT7628 and MT7688 require extra setup of PHY registers. */
 	if (of_device_is_compatible(dev->of_node, "mediatek,mt7628-usbphy")) {
-		res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-		phy->base = devm_ioremap_resource(&pdev->dev, res);
+		phy->base = devm_platform_ioremap_resource(pdev, 0);
 		if (IS_ERR(phy->base)) {
 			dev_err(dev, "failed to remap register memory\n");
 			return PTR_ERR(phy->base);
diff --git a/drivers/phy/renesas/phy-rcar-gen2.c b/drivers/phy/renesas/phy-rcar-gen2.c
index 2e279ac0fa4d68b624a2cf0a0db547165a9e1d86..c375a4676a3dc57f6f1f248faf8a39575e886d33 100644
--- a/drivers/phy/renesas/phy-rcar-gen2.c
+++ b/drivers/phy/renesas/phy-rcar-gen2.c
@@ -339,7 +339,6 @@ static int rcar_gen2_phy_probe(struct platform_device *pdev)
 	struct rcar_gen2_phy_driver *drv;
 	struct phy_provider *provider;
 	struct device_node *np;
-	struct resource *res;
 	void __iomem *base;
 	struct clk *clk;
 	const struct rcar_gen2_phy_data *data;
@@ -357,8 +356,7 @@ static int rcar_gen2_phy_probe(struct platform_device *pdev)
 		return PTR_ERR(clk);
 	}
 
-	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	base = devm_ioremap_resource(dev, res);
+	base = devm_platform_ioremap_resource(pdev, 0);
 	if (IS_ERR(base))
 		return PTR_ERR(base);
 
diff --git a/drivers/phy/renesas/phy-rcar-gen3-pcie.c b/drivers/phy/renesas/phy-rcar-gen3-pcie.c
index c4e4aa21693659dc658a3a0ca2e5ea701a212ca6..4dc721eb9577a04bdb66ef1c513638c31ce0229e 100644
--- a/drivers/phy/renesas/phy-rcar-gen3-pcie.c
+++ b/drivers/phy/renesas/phy-rcar-gen3-pcie.c
@@ -76,7 +76,6 @@ static int rcar_gen3_phy_pcie_probe(struct platform_device *pdev)
 	struct device *dev = &pdev->dev;
 	struct phy_provider *provider;
 	struct rcar_gen3_phy *phy;
-	struct resource *res;
 	void __iomem *base;
 	int error;
 
@@ -86,8 +85,7 @@ static int rcar_gen3_phy_pcie_probe(struct platform_device *pdev)
 		return -EINVAL;
 	}
 
-	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	base = devm_ioremap_resource(dev, res);
+	base = devm_platform_ioremap_resource(pdev, 0);
 	if (IS_ERR(base))
 		return PTR_ERR(base);
 
diff --git a/drivers/phy/renesas/phy-rcar-gen3-usb2.c b/drivers/phy/renesas/phy-rcar-gen3-usb2.c
index e34e4475027cae20c07387b925972c0fa9d044c4..fbc55232120e6c10e616dd6879c0564c5fe26d41 100644
--- a/drivers/phy/renesas/phy-rcar-gen3-usb2.c
+++ b/drivers/phy/renesas/phy-rcar-gen3-usb2.c
@@ -611,7 +611,6 @@ static int rcar_gen3_phy_usb2_probe(struct platform_device *pdev)
 	struct device *dev = &pdev->dev;
 	struct rcar_gen3_chan *channel;
 	struct phy_provider *provider;
-	struct resource *res;
 	const struct phy_ops *phy_usb2_ops;
 	int ret = 0, i;
 
@@ -624,8 +623,7 @@ static int rcar_gen3_phy_usb2_probe(struct platform_device *pdev)
 	if (!channel)
 		return -ENOMEM;
 
-	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	channel->base = devm_ioremap_resource(dev, res);
+	channel->base = devm_platform_ioremap_resource(pdev, 0);
 	if (IS_ERR(channel->base))
 		return PTR_ERR(channel->base);
 
@@ -656,8 +654,10 @@ static int rcar_gen3_phy_usb2_probe(struct platform_device *pdev)
 	 */
 	pm_runtime_enable(dev);
 	phy_usb2_ops = of_device_get_match_data(dev);
-	if (!phy_usb2_ops)
-		return -EINVAL;
+	if (!phy_usb2_ops) {
+		ret = -EINVAL;
+		goto error;
+	}
 
 	mutex_init(&channel->lock);
 	for (i = 0; i < NUM_OF_PHYS; i++) {
diff --git a/drivers/phy/renesas/phy-rcar-gen3-usb3.c b/drivers/phy/renesas/phy-rcar-gen3-usb3.c
index 566b4cf4ff383b64b97a0ae482f2c1bdf44e1741..f27d6f4716296e9ddd651b2f5e28dcceb5f75f42 100644
--- a/drivers/phy/renesas/phy-rcar-gen3-usb3.c
+++ b/drivers/phy/renesas/phy-rcar-gen3-usb3.c
@@ -133,7 +133,6 @@ static int rcar_gen3_phy_usb3_probe(struct platform_device *pdev)
 	struct device *dev = &pdev->dev;
 	struct rcar_gen3_usb3 *r;
 	struct phy_provider *provider;
-	struct resource *res;
 	int ret = 0;
 	struct clk *clk;
 
@@ -146,8 +145,7 @@ static int rcar_gen3_phy_usb3_probe(struct platform_device *pdev)
 	if (!r)
 		return -ENOMEM;
 
-	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	r->base = devm_ioremap_resource(dev, res);
+	r->base = devm_platform_ioremap_resource(pdev, 0);
 	if (IS_ERR(r->base))
 		return PTR_ERR(r->base);
 
diff --git a/drivers/phy/rockchip/Kconfig b/drivers/phy/rockchip/Kconfig
index c2f22f90736c419471e503710240608390d17f44..159285f42e5ce15b46afe877da6742a8e625c897 100644
--- a/drivers/phy/rockchip/Kconfig
+++ b/drivers/phy/rockchip/Kconfig
@@ -32,6 +32,7 @@ config PHY_ROCKCHIP_INNO_HDMI
 	tristate "Rockchip INNO HDMI PHY Driver"
 	depends on (ARCH_ROCKCHIP || COMPILE_TEST) && OF
 	depends on COMMON_CLK
+	depends on HAS_IOMEM
 	select GENERIC_PHY
 	help
 	  Enable this to support the Rockchip Innosilicon HDMI PHY.
diff --git a/drivers/phy/rockchip/phy-rockchip-emmc.c b/drivers/phy/rockchip/phy-rockchip-emmc.c
index 2dc19ddd120f5adb54807c48edae48133fcee018..1e424f263e7ab32ea3d22cfaadd8f0c2fb339e37 100644
--- a/drivers/phy/rockchip/phy-rockchip-emmc.c
+++ b/drivers/phy/rockchip/phy-rockchip-emmc.c
@@ -65,8 +65,14 @@
 #define PHYCTRL_OTAPDLYENA		0x1
 #define PHYCTRL_OTAPDLYENA_MASK		0x1
 #define PHYCTRL_OTAPDLYENA_SHIFT	0xb
+#define PHYCTRL_OTAPDLYSEL_DEFAULT	0x4
+#define PHYCTRL_OTAPDLYSEL_MAXVALUE	0xf
 #define PHYCTRL_OTAPDLYSEL_MASK		0xf
 #define PHYCTRL_OTAPDLYSEL_SHIFT	0x7
+#define PHYCTRL_REN_STRB_DISABLE	0x0
+#define PHYCTRL_REN_STRB_ENABLE		0x1
+#define PHYCTRL_REN_STRB_MASK		0x1
+#define PHYCTRL_REN_STRB_SHIFT		0x9
 
 #define PHYCTRL_IS_CALDONE(x) \
 	((((x) >> PHYCTRL_CALDONE_SHIFT) & \
@@ -80,6 +86,8 @@ struct rockchip_emmc_phy {
 	struct regmap	*reg_base;
 	struct clk	*emmcclk;
 	unsigned int drive_impedance;
+	unsigned int enable_strobe_pulldown;
+	unsigned int output_tapdelay_select;
 };
 
 static int rockchip_emmc_phy_power(struct phy *phy, bool on_off)
@@ -291,10 +299,17 @@ static int rockchip_emmc_phy_power_on(struct phy *phy)
 	/* Output tap delay */
 	regmap_write(rk_phy->reg_base,
 		     rk_phy->reg_offset + GRF_EMMCPHY_CON0,
-		     HIWORD_UPDATE(4,
+		     HIWORD_UPDATE(rk_phy->output_tapdelay_select,
 				   PHYCTRL_OTAPDLYSEL_MASK,
 				   PHYCTRL_OTAPDLYSEL_SHIFT));
 
+	/* Internal pull-down for strobe line */
+	regmap_write(rk_phy->reg_base,
+		     rk_phy->reg_offset + GRF_EMMCPHY_CON2,
+		     HIWORD_UPDATE(rk_phy->enable_strobe_pulldown,
+				   PHYCTRL_REN_STRB_MASK,
+				   PHYCTRL_REN_STRB_SHIFT));
+
 	/* Power up emmc phy analog blocks */
 	return rockchip_emmc_phy_power(phy, PHYCTRL_PDB_PWR_ON);
 }
@@ -359,10 +374,22 @@ static int rockchip_emmc_phy_probe(struct platform_device *pdev)
 	rk_phy->reg_offset = reg_offset;
 	rk_phy->reg_base = grf;
 	rk_phy->drive_impedance = PHYCTRL_DR_50OHM;
+	rk_phy->enable_strobe_pulldown = PHYCTRL_REN_STRB_DISABLE;
+	rk_phy->output_tapdelay_select = PHYCTRL_OTAPDLYSEL_DEFAULT;
 
 	if (!of_property_read_u32(dev->of_node, "drive-impedance-ohm", &val))
 		rk_phy->drive_impedance = convert_drive_impedance_ohm(pdev, val);
 
+	if (of_property_read_bool(dev->of_node, "enable-strobe-pulldown"))
+		rk_phy->enable_strobe_pulldown = PHYCTRL_REN_STRB_ENABLE;
+
+	if (!of_property_read_u32(dev->of_node, "output-tapdelay-select", &val)) {
+		if (val <= PHYCTRL_OTAPDLYSEL_MAXVALUE)
+			rk_phy->output_tapdelay_select = val;
+		else
+			dev_err(dev, "output-tapdelay-select exceeds limit, apply default\n");
+	}
+
 	generic_phy = devm_phy_create(dev, dev->of_node, &ops);
 	if (IS_ERR(generic_phy)) {
 		dev_err(dev, "failed to create PHY\n");
diff --git a/drivers/phy/rockchip/phy-rockchip-inno-hdmi.c b/drivers/phy/rockchip/phy-rockchip-inno-hdmi.c
index 9ca20c947283de2f5d03866aa404c0411b2eb72c..a37f3f342642ce55e57aab73f38ef5b1b0e5ad34 100644
--- a/drivers/phy/rockchip/phy-rockchip-inno-hdmi.c
+++ b/drivers/phy/rockchip/phy-rockchip-inno-hdmi.c
@@ -1144,7 +1144,6 @@ static int inno_hdmi_phy_probe(struct platform_device *pdev)
 {
 	struct inno_hdmi_phy *inno;
 	struct phy_provider *phy_provider;
-	struct resource *res;
 	void __iomem *regs;
 	int ret;
 
@@ -1158,8 +1157,7 @@ static int inno_hdmi_phy_probe(struct platform_device *pdev)
 	if (!inno->plat_data || !inno->plat_data->ops)
 		return -EINVAL;
 
-	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	regs = devm_ioremap_resource(inno->dev, res);
+	regs = devm_platform_ioremap_resource(pdev, 0);
 	if (IS_ERR(regs))
 		return PTR_ERR(regs);
 
diff --git a/drivers/phy/samsung/phy-exynos-pcie.c b/drivers/phy/samsung/phy-exynos-pcie.c
index 7e28b1aea0d1988873c1803f32093e9f7e38d78a..578cfe07d07abf790628a2ea3b345d0a05874792 100644
--- a/drivers/phy/samsung/phy-exynos-pcie.c
+++ b/drivers/phy/samsung/phy-exynos-pcie.c
@@ -4,70 +4,41 @@
  *
  * Phy provider for PCIe controller on Exynos SoC series
  *
- * Copyright (C) 2017 Samsung Electronics Co., Ltd.
+ * Copyright (C) 2017-2020 Samsung Electronics Co., Ltd.
  * Jaehoon Chung <jh80.chung@samsung.com>
  */
 
-#include <linux/delay.h>
 #include <linux/io.h>
-#include <linux/iopoll.h>
-#include <linux/init.h>
 #include <linux/mfd/syscon.h>
-#include <linux/of.h>
-#include <linux/of_address.h>
 #include <linux/of_platform.h>
 #include <linux/platform_device.h>
 #include <linux/phy/phy.h>
 #include <linux/regmap.h>
 
-/* PCIe Purple registers */
-#define PCIE_PHY_GLOBAL_RESET		0x000
-#define PCIE_PHY_COMMON_RESET		0x004
-#define PCIE_PHY_CMN_REG		0x008
-#define PCIE_PHY_MAC_RESET		0x00c
-#define PCIE_PHY_PLL_LOCKED		0x010
-#define PCIE_PHY_TRSVREG_RESET		0x020
-#define PCIE_PHY_TRSV_RESET		0x024
-
-/* PCIe PHY registers */
-#define PCIE_PHY_IMPEDANCE		0x004
-#define PCIE_PHY_PLL_DIV_0		0x008
-#define PCIE_PHY_PLL_BIAS		0x00c
-#define PCIE_PHY_DCC_FEEDBACK		0x014
-#define PCIE_PHY_PLL_DIV_1		0x05c
-#define PCIE_PHY_COMMON_POWER		0x064
-#define PCIE_PHY_COMMON_PD_CMN		BIT(3)
-#define PCIE_PHY_TRSV0_EMP_LVL		0x084
-#define PCIE_PHY_TRSV0_DRV_LVL		0x088
-#define PCIE_PHY_TRSV0_RXCDR		0x0ac
-#define PCIE_PHY_TRSV0_POWER		0x0c4
-#define PCIE_PHY_TRSV0_PD_TSV		BIT(7)
-#define PCIE_PHY_TRSV0_LVCC		0x0dc
-#define PCIE_PHY_TRSV1_EMP_LVL		0x144
-#define PCIE_PHY_TRSV1_RXCDR		0x16c
-#define PCIE_PHY_TRSV1_POWER		0x184
-#define PCIE_PHY_TRSV1_PD_TSV		BIT(7)
-#define PCIE_PHY_TRSV1_LVCC		0x19c
-#define PCIE_PHY_TRSV2_EMP_LVL		0x204
-#define PCIE_PHY_TRSV2_RXCDR		0x22c
-#define PCIE_PHY_TRSV2_POWER		0x244
-#define PCIE_PHY_TRSV2_PD_TSV		BIT(7)
-#define PCIE_PHY_TRSV2_LVCC		0x25c
-#define PCIE_PHY_TRSV3_EMP_LVL		0x2c4
-#define PCIE_PHY_TRSV3_RXCDR		0x2ec
-#define PCIE_PHY_TRSV3_POWER		0x304
-#define PCIE_PHY_TRSV3_PD_TSV		BIT(7)
-#define PCIE_PHY_TRSV3_LVCC		0x31c
-
-struct exynos_pcie_phy_data {
-	const struct phy_ops	*ops;
-};
+#define PCIE_PHY_OFFSET(x)		((x) * 0x4)
+
+/* Sysreg FSYS register offsets and bits for Exynos5433 */
+#define PCIE_EXYNOS5433_PHY_MAC_RESET		0x0208
+#define PCIE_MAC_RESET_MASK			0xFF
+#define PCIE_MAC_RESET				BIT(4)
+#define PCIE_EXYNOS5433_PHY_L1SUB_CM_CON	0x1010
+#define PCIE_REFCLK_GATING_EN			BIT(0)
+#define PCIE_EXYNOS5433_PHY_COMMON_RESET	0x1020
+#define PCIE_PHY_RESET				BIT(0)
+#define PCIE_EXYNOS5433_PHY_GLOBAL_RESET	0x1040
+#define PCIE_GLOBAL_RESET			BIT(0)
+#define PCIE_REFCLK				BIT(1)
+#define PCIE_REFCLK_MASK			0x16
+#define PCIE_APP_REQ_EXIT_L1_MODE		BIT(5)
+
+/* PMU PCIE PHY isolation control */
+#define EXYNOS5433_PMU_PCIE_PHY_OFFSET		0x730
 
 /* For Exynos pcie phy */
 struct exynos_pcie_phy {
-	const struct exynos_pcie_phy_data *drv_data;
-	void __iomem *phy_base;
-	void __iomem *blk_base; /* For exynos5440 */
+	void __iomem *base;
+	struct regmap *pmureg;
+	struct regmap *fsysreg;
 };
 
 static void exynos_pcie_phy_writel(void __iomem *base, u32 val, u32 offset)
@@ -75,153 +46,103 @@ static void exynos_pcie_phy_writel(void __iomem *base, u32 val, u32 offset)
 	writel(val, base + offset);
 }
 
-static u32 exynos_pcie_phy_readl(void __iomem *base, u32 offset)
-{
-	return readl(base + offset);
-}
-
-/* For Exynos5440 specific functions */
-static int exynos5440_pcie_phy_init(struct phy *phy)
+/* Exynos5433 specific functions */
+static int exynos5433_pcie_phy_init(struct phy *phy)
 {
 	struct exynos_pcie_phy *ep = phy_get_drvdata(phy);
 
-	/* DCC feedback control off */
-	exynos_pcie_phy_writel(ep->phy_base, 0x29, PCIE_PHY_DCC_FEEDBACK);
-
-	/* set TX/RX impedance */
-	exynos_pcie_phy_writel(ep->phy_base, 0xd5, PCIE_PHY_IMPEDANCE);
-
-	/* set 50Mhz PHY clock */
-	exynos_pcie_phy_writel(ep->phy_base, 0x14, PCIE_PHY_PLL_DIV_0);
-	exynos_pcie_phy_writel(ep->phy_base, 0x12, PCIE_PHY_PLL_DIV_1);
-
-	/* set TX Differential output for lane 0 */
-	exynos_pcie_phy_writel(ep->phy_base, 0x7f, PCIE_PHY_TRSV0_DRV_LVL);
-
-	/* set TX Pre-emphasis Level Control for lane 0 to minimum */
-	exynos_pcie_phy_writel(ep->phy_base, 0x0, PCIE_PHY_TRSV0_EMP_LVL);
-
-	/* set RX clock and data recovery bandwidth */
-	exynos_pcie_phy_writel(ep->phy_base, 0xe7, PCIE_PHY_PLL_BIAS);
-	exynos_pcie_phy_writel(ep->phy_base, 0x82, PCIE_PHY_TRSV0_RXCDR);
-	exynos_pcie_phy_writel(ep->phy_base, 0x82, PCIE_PHY_TRSV1_RXCDR);
-	exynos_pcie_phy_writel(ep->phy_base, 0x82, PCIE_PHY_TRSV2_RXCDR);
-	exynos_pcie_phy_writel(ep->phy_base, 0x82, PCIE_PHY_TRSV3_RXCDR);
-
-	/* change TX Pre-emphasis Level Control for lanes */
-	exynos_pcie_phy_writel(ep->phy_base, 0x39, PCIE_PHY_TRSV0_EMP_LVL);
-	exynos_pcie_phy_writel(ep->phy_base, 0x39, PCIE_PHY_TRSV1_EMP_LVL);
-	exynos_pcie_phy_writel(ep->phy_base, 0x39, PCIE_PHY_TRSV2_EMP_LVL);
-	exynos_pcie_phy_writel(ep->phy_base, 0x39, PCIE_PHY_TRSV3_EMP_LVL);
-
-	/* set LVCC */
-	exynos_pcie_phy_writel(ep->phy_base, 0x20, PCIE_PHY_TRSV0_LVCC);
-	exynos_pcie_phy_writel(ep->phy_base, 0xa0, PCIE_PHY_TRSV1_LVCC);
-	exynos_pcie_phy_writel(ep->phy_base, 0xa0, PCIE_PHY_TRSV2_LVCC);
-	exynos_pcie_phy_writel(ep->phy_base, 0xa0, PCIE_PHY_TRSV3_LVCC);
-
-	/* pulse for common reset */
-	exynos_pcie_phy_writel(ep->blk_base, 1, PCIE_PHY_COMMON_RESET);
-	udelay(500);
-	exynos_pcie_phy_writel(ep->blk_base, 0, PCIE_PHY_COMMON_RESET);
-
+	regmap_update_bits(ep->fsysreg,	PCIE_EXYNOS5433_PHY_COMMON_RESET,
+			   PCIE_PHY_RESET, 1);
+	regmap_update_bits(ep->fsysreg, PCIE_EXYNOS5433_PHY_MAC_RESET,
+			   PCIE_MAC_RESET, 0);
+
+	/* PHY refclk 24MHz */
+	regmap_update_bits(ep->fsysreg, PCIE_EXYNOS5433_PHY_GLOBAL_RESET,
+			   PCIE_REFCLK_MASK, PCIE_REFCLK);
+	regmap_update_bits(ep->fsysreg, PCIE_EXYNOS5433_PHY_GLOBAL_RESET,
+			   PCIE_GLOBAL_RESET, 0);
+
+
+	exynos_pcie_phy_writel(ep->base, 0x11, PCIE_PHY_OFFSET(0x3));
+
+	/* band gap reference on */
+	exynos_pcie_phy_writel(ep->base, 0, PCIE_PHY_OFFSET(0x20));
+	exynos_pcie_phy_writel(ep->base, 0, PCIE_PHY_OFFSET(0x4b));
+
+	/* jitter tuning */
+	exynos_pcie_phy_writel(ep->base, 0x34, PCIE_PHY_OFFSET(0x4));
+	exynos_pcie_phy_writel(ep->base, 0x02, PCIE_PHY_OFFSET(0x7));
+	exynos_pcie_phy_writel(ep->base, 0x41, PCIE_PHY_OFFSET(0x21));
+	exynos_pcie_phy_writel(ep->base, 0x7F, PCIE_PHY_OFFSET(0x14));
+	exynos_pcie_phy_writel(ep->base, 0xC0, PCIE_PHY_OFFSET(0x15));
+	exynos_pcie_phy_writel(ep->base, 0x61, PCIE_PHY_OFFSET(0x36));
+
+	/* D0 uninit.. */
+	exynos_pcie_phy_writel(ep->base, 0x44, PCIE_PHY_OFFSET(0x3D));
+
+	/* 24MHz */
+	exynos_pcie_phy_writel(ep->base, 0x94, PCIE_PHY_OFFSET(0x8));
+	exynos_pcie_phy_writel(ep->base, 0xA7, PCIE_PHY_OFFSET(0x9));
+	exynos_pcie_phy_writel(ep->base, 0x93, PCIE_PHY_OFFSET(0xA));
+	exynos_pcie_phy_writel(ep->base, 0x6B, PCIE_PHY_OFFSET(0xC));
+	exynos_pcie_phy_writel(ep->base, 0xA5, PCIE_PHY_OFFSET(0xF));
+	exynos_pcie_phy_writel(ep->base, 0x34, PCIE_PHY_OFFSET(0x16));
+	exynos_pcie_phy_writel(ep->base, 0xA3, PCIE_PHY_OFFSET(0x17));
+	exynos_pcie_phy_writel(ep->base, 0xA7, PCIE_PHY_OFFSET(0x1A));
+	exynos_pcie_phy_writel(ep->base, 0x71, PCIE_PHY_OFFSET(0x23));
+	exynos_pcie_phy_writel(ep->base, 0x4C, PCIE_PHY_OFFSET(0x24));
+
+	exynos_pcie_phy_writel(ep->base, 0x0E, PCIE_PHY_OFFSET(0x26));
+	exynos_pcie_phy_writel(ep->base, 0x14, PCIE_PHY_OFFSET(0x7));
+	exynos_pcie_phy_writel(ep->base, 0x48, PCIE_PHY_OFFSET(0x43));
+	exynos_pcie_phy_writel(ep->base, 0x44, PCIE_PHY_OFFSET(0x44));
+	exynos_pcie_phy_writel(ep->base, 0x03, PCIE_PHY_OFFSET(0x45));
+	exynos_pcie_phy_writel(ep->base, 0xA7, PCIE_PHY_OFFSET(0x48));
+	exynos_pcie_phy_writel(ep->base, 0x13, PCIE_PHY_OFFSET(0x54));
+	exynos_pcie_phy_writel(ep->base, 0x04, PCIE_PHY_OFFSET(0x31));
+	exynos_pcie_phy_writel(ep->base, 0, PCIE_PHY_OFFSET(0x32));
+
+	regmap_update_bits(ep->fsysreg, PCIE_EXYNOS5433_PHY_COMMON_RESET,
+			   PCIE_PHY_RESET, 0);
+	regmap_update_bits(ep->fsysreg, PCIE_EXYNOS5433_PHY_MAC_RESET,
+			   PCIE_MAC_RESET_MASK, PCIE_MAC_RESET);
 	return 0;
 }
 
-static int exynos5440_pcie_phy_power_on(struct phy *phy)
+static int exynos5433_pcie_phy_power_on(struct phy *phy)
 {
 	struct exynos_pcie_phy *ep = phy_get_drvdata(phy);
-	u32 val;
-
-	exynos_pcie_phy_writel(ep->blk_base, 0, PCIE_PHY_COMMON_RESET);
-	exynos_pcie_phy_writel(ep->blk_base, 0, PCIE_PHY_CMN_REG);
-	exynos_pcie_phy_writel(ep->blk_base, 0, PCIE_PHY_TRSVREG_RESET);
-	exynos_pcie_phy_writel(ep->blk_base, 0, PCIE_PHY_TRSV_RESET);
-
-	val = exynos_pcie_phy_readl(ep->phy_base, PCIE_PHY_COMMON_POWER);
-	val &= ~PCIE_PHY_COMMON_PD_CMN;
-	exynos_pcie_phy_writel(ep->phy_base, val, PCIE_PHY_COMMON_POWER);
-
-	val = exynos_pcie_phy_readl(ep->phy_base, PCIE_PHY_TRSV0_POWER);
-	val &= ~PCIE_PHY_TRSV0_PD_TSV;
-	exynos_pcie_phy_writel(ep->phy_base, val, PCIE_PHY_TRSV0_POWER);
-
-	val = exynos_pcie_phy_readl(ep->phy_base, PCIE_PHY_TRSV1_POWER);
-	val &= ~PCIE_PHY_TRSV1_PD_TSV;
-	exynos_pcie_phy_writel(ep->phy_base, val, PCIE_PHY_TRSV1_POWER);
-
-	val = exynos_pcie_phy_readl(ep->phy_base, PCIE_PHY_TRSV2_POWER);
-	val &= ~PCIE_PHY_TRSV2_PD_TSV;
-	exynos_pcie_phy_writel(ep->phy_base, val, PCIE_PHY_TRSV2_POWER);
-
-	val = exynos_pcie_phy_readl(ep->phy_base, PCIE_PHY_TRSV3_POWER);
-	val &= ~PCIE_PHY_TRSV3_PD_TSV;
-	exynos_pcie_phy_writel(ep->phy_base, val, PCIE_PHY_TRSV3_POWER);
 
+	regmap_update_bits(ep->pmureg, EXYNOS5433_PMU_PCIE_PHY_OFFSET,
+			   BIT(0), 1);
+	regmap_update_bits(ep->fsysreg, PCIE_EXYNOS5433_PHY_GLOBAL_RESET,
+			   PCIE_APP_REQ_EXIT_L1_MODE, 0);
+	regmap_update_bits(ep->fsysreg, PCIE_EXYNOS5433_PHY_L1SUB_CM_CON,
+			   PCIE_REFCLK_GATING_EN, 0);
 	return 0;
 }
 
-static int exynos5440_pcie_phy_power_off(struct phy *phy)
+static int exynos5433_pcie_phy_power_off(struct phy *phy)
 {
 	struct exynos_pcie_phy *ep = phy_get_drvdata(phy);
-	u32 val;
-
-	if (readl_poll_timeout(ep->phy_base + PCIE_PHY_PLL_LOCKED, val,
-				(val != 0), 1, 500)) {
-		dev_err(&phy->dev, "PLL Locked: 0x%x\n", val);
-		return -ETIMEDOUT;
-	}
-
-	val = exynos_pcie_phy_readl(ep->phy_base, PCIE_PHY_COMMON_POWER);
-	val |= PCIE_PHY_COMMON_PD_CMN;
-	exynos_pcie_phy_writel(ep->phy_base, val, PCIE_PHY_COMMON_POWER);
-
-	val = exynos_pcie_phy_readl(ep->phy_base, PCIE_PHY_TRSV0_POWER);
-	val |= PCIE_PHY_TRSV0_PD_TSV;
-	exynos_pcie_phy_writel(ep->phy_base, val, PCIE_PHY_TRSV0_POWER);
-
-	val = exynos_pcie_phy_readl(ep->phy_base, PCIE_PHY_TRSV1_POWER);
-	val |= PCIE_PHY_TRSV1_PD_TSV;
-	exynos_pcie_phy_writel(ep->phy_base, val, PCIE_PHY_TRSV1_POWER);
-
-	val = exynos_pcie_phy_readl(ep->phy_base, PCIE_PHY_TRSV2_POWER);
-	val |= PCIE_PHY_TRSV2_PD_TSV;
-	exynos_pcie_phy_writel(ep->phy_base, val, PCIE_PHY_TRSV2_POWER);
-
-	val = exynos_pcie_phy_readl(ep->phy_base, PCIE_PHY_TRSV3_POWER);
-	val |= PCIE_PHY_TRSV3_PD_TSV;
-	exynos_pcie_phy_writel(ep->phy_base, val, PCIE_PHY_TRSV3_POWER);
 
+	regmap_update_bits(ep->fsysreg, PCIE_EXYNOS5433_PHY_L1SUB_CM_CON,
+			   PCIE_REFCLK_GATING_EN, PCIE_REFCLK_GATING_EN);
+	regmap_update_bits(ep->pmureg, EXYNOS5433_PMU_PCIE_PHY_OFFSET,
+			   BIT(0), 0);
 	return 0;
 }
 
-static int exynos5440_pcie_phy_reset(struct phy *phy)
-{
-	struct exynos_pcie_phy *ep = phy_get_drvdata(phy);
-
-	exynos_pcie_phy_writel(ep->blk_base, 0, PCIE_PHY_MAC_RESET);
-	exynos_pcie_phy_writel(ep->blk_base, 1, PCIE_PHY_GLOBAL_RESET);
-	exynos_pcie_phy_writel(ep->blk_base, 0, PCIE_PHY_GLOBAL_RESET);
-
-	return 0;
-}
-
-static const struct phy_ops exynos5440_phy_ops = {
-	.init		= exynos5440_pcie_phy_init,
-	.power_on	= exynos5440_pcie_phy_power_on,
-	.power_off	= exynos5440_pcie_phy_power_off,
-	.reset		= exynos5440_pcie_phy_reset,
+static const struct phy_ops exynos5433_phy_ops = {
+	.init		= exynos5433_pcie_phy_init,
+	.power_on	= exynos5433_pcie_phy_power_on,
+	.power_off	= exynos5433_pcie_phy_power_off,
 	.owner		= THIS_MODULE,
 };
 
-static const struct exynos_pcie_phy_data exynos5440_pcie_phy_data = {
-	.ops		= &exynos5440_phy_ops,
-};
-
 static const struct of_device_id exynos_pcie_phy_match[] = {
 	{
-		.compatible = "samsung,exynos5440-pcie-phy",
-		.data = &exynos5440_pcie_phy_data,
+		.compatible = "samsung,exynos5433-pcie-phy",
 	},
 	{},
 };
@@ -232,30 +153,30 @@ static int exynos_pcie_phy_probe(struct platform_device *pdev)
 	struct exynos_pcie_phy *exynos_phy;
 	struct phy *generic_phy;
 	struct phy_provider *phy_provider;
-	struct resource *res;
-	const struct exynos_pcie_phy_data *drv_data;
-
-	drv_data = of_device_get_match_data(dev);
-	if (!drv_data)
-		return -ENODEV;
 
 	exynos_phy = devm_kzalloc(dev, sizeof(*exynos_phy), GFP_KERNEL);
 	if (!exynos_phy)
 		return -ENOMEM;
 
-	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	exynos_phy->phy_base = devm_ioremap_resource(dev, res);
-	if (IS_ERR(exynos_phy->phy_base))
-		return PTR_ERR(exynos_phy->phy_base);
+	exynos_phy->base = devm_platform_ioremap_resource(pdev, 0);
+	if (IS_ERR(exynos_phy->base))
+		return PTR_ERR(exynos_phy->base);
 
-	res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
-	exynos_phy->blk_base = devm_ioremap_resource(dev, res);
-	if (IS_ERR(exynos_phy->blk_base))
-		return PTR_ERR(exynos_phy->blk_base);
+	exynos_phy->pmureg = syscon_regmap_lookup_by_phandle(dev->of_node,
+							"samsung,pmu-syscon");
+	if (IS_ERR(exynos_phy->pmureg)) {
+		dev_err(&pdev->dev, "PMU regmap lookup failed.\n");
+		return PTR_ERR(exynos_phy->pmureg);
+	}
 
-	exynos_phy->drv_data = drv_data;
+	exynos_phy->fsysreg = syscon_regmap_lookup_by_phandle(dev->of_node,
+							 "samsung,fsys-sysreg");
+	if (IS_ERR(exynos_phy->fsysreg)) {
+		dev_err(&pdev->dev, "FSYS sysreg regmap lookup failed.\n");
+		return PTR_ERR(exynos_phy->fsysreg);
+	}
 
-	generic_phy = devm_phy_create(dev, dev->of_node, drv_data->ops);
+	generic_phy = devm_phy_create(dev, dev->of_node, &exynos5433_phy_ops);
 	if (IS_ERR(generic_phy)) {
 		dev_err(dev, "failed to create PHY\n");
 		return PTR_ERR(generic_phy);
@@ -275,5 +196,4 @@ static struct platform_driver exynos_pcie_phy_driver = {
 		.suppress_bind_attrs = true,
 	}
 };
-
 builtin_platform_driver(exynos_pcie_phy_driver);
diff --git a/drivers/phy/samsung/phy-exynos5-usbdrd.c b/drivers/phy/samsung/phy-exynos5-usbdrd.c
index cfa9b8b7e5ac2d590d8ab2f90857d040cb34eb91..ee0848fe8432473a63ff195e1825361bfb0f5cb9 100644
--- a/drivers/phy/samsung/phy-exynos5-usbdrd.c
+++ b/drivers/phy/samsung/phy-exynos5-usbdrd.c
@@ -829,7 +829,6 @@ static int exynos5_usbdrd_phy_probe(struct platform_device *pdev)
 	struct device_node *node = dev->of_node;
 	struct exynos5_usbdrd_phy *phy_drd;
 	struct phy_provider *phy_provider;
-	struct resource *res;
 	const struct exynos5_usbdrd_phy_drvdata *drv_data;
 	struct regmap *reg_pmu;
 	u32 pmu_offset;
@@ -843,8 +842,7 @@ static int exynos5_usbdrd_phy_probe(struct platform_device *pdev)
 	dev_set_drvdata(dev, phy_drd);
 	phy_drd->dev = dev;
 
-	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	phy_drd->reg_phy = devm_ioremap_resource(dev, res);
+	phy_drd->reg_phy = devm_platform_ioremap_resource(pdev, 0);
 	if (IS_ERR(phy_drd->reg_phy))
 		return PTR_ERR(phy_drd->reg_phy);
 
diff --git a/drivers/phy/samsung/phy-exynos5250-sata.c b/drivers/phy/samsung/phy-exynos5250-sata.c
index 4dd7324d91b26469ee1461fe9847bca7486cd417..9ec234243f7c6fd29ca82a4b1c4464c58430340f 100644
--- a/drivers/phy/samsung/phy-exynos5250-sata.c
+++ b/drivers/phy/samsung/phy-exynos5250-sata.c
@@ -162,7 +162,6 @@ static int exynos_sata_phy_probe(struct platform_device *pdev)
 {
 	struct exynos_sata_phy *sata_phy;
 	struct device *dev = &pdev->dev;
-	struct resource *res;
 	struct phy_provider *phy_provider;
 	struct device_node *node;
 	int ret = 0;
@@ -171,9 +170,7 @@ static int exynos_sata_phy_probe(struct platform_device *pdev)
 	if (!sata_phy)
 		return -ENOMEM;
 
-	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-
-	sata_phy->regs = devm_ioremap_resource(dev, res);
+	sata_phy->regs = devm_platform_ioremap_resource(pdev, 0);
 	if (IS_ERR(sata_phy->regs))
 		return PTR_ERR(sata_phy->regs);
 
diff --git a/drivers/phy/samsung/phy-exynos5250-usb2.c b/drivers/phy/samsung/phy-exynos5250-usb2.c
index 4f53b711fd6f51869591832753b4cbcfb73e9ee1..e198010e1bfdb2dcfb536a835bcafc2a6128b46b 100644
--- a/drivers/phy/samsung/phy-exynos5250-usb2.c
+++ b/drivers/phy/samsung/phy-exynos5250-usb2.c
@@ -117,9 +117,9 @@
 
 /* Isolation, configured in the power management unit */
 #define EXYNOS_5250_USB_ISOL_OTG_OFFSET		0x704
-#define EXYNOS_5250_USB_ISOL_OTG		BIT(0)
 #define EXYNOS_5250_USB_ISOL_HOST_OFFSET	0x708
-#define EXYNOS_5250_USB_ISOL_HOST		BIT(0)
+#define EXYNOS_5420_USB_ISOL_HOST_OFFSET	0x70C
+#define EXYNOS_5250_USB_ISOL_ENABLE		BIT(0)
 
 /* Mode swtich register */
 #define EXYNOS_5250_MODE_SWITCH_OFFSET		0x230
@@ -132,7 +132,6 @@ enum exynos4x12_phy_id {
 	EXYNOS5250_HOST,
 	EXYNOS5250_HSIC0,
 	EXYNOS5250_HSIC1,
-	EXYNOS5250_NUM_PHYS,
 };
 
 /*
@@ -176,20 +175,19 @@ static void exynos5250_isol(struct samsung_usb2_phy_instance *inst, bool on)
 {
 	struct samsung_usb2_phy_driver *drv = inst->drv;
 	u32 offset;
-	u32 mask;
+	u32 mask = EXYNOS_5250_USB_ISOL_ENABLE;
 
-	switch (inst->cfg->id) {
-	case EXYNOS5250_DEVICE:
+	if (drv->cfg == &exynos5250_usb2_phy_config &&
+	    inst->cfg->id == EXYNOS5250_DEVICE)
 		offset = EXYNOS_5250_USB_ISOL_OTG_OFFSET;
-		mask = EXYNOS_5250_USB_ISOL_OTG;
-		break;
-	case EXYNOS5250_HOST:
+	else if (drv->cfg == &exynos5250_usb2_phy_config &&
+		 inst->cfg->id == EXYNOS5250_HOST)
 		offset = EXYNOS_5250_USB_ISOL_HOST_OFFSET;
-		mask = EXYNOS_5250_USB_ISOL_HOST;
-		break;
-	default:
+	else if (drv->cfg == &exynos5420_usb2_phy_config &&
+		 inst->cfg->id == EXYNOS5250_HOST)
+		offset = EXYNOS_5420_USB_ISOL_HOST_OFFSET;
+	else
 		return;
-	}
 
 	regmap_update_bits(drv->reg_pmu, offset, mask, on ? 0 : mask);
 }
@@ -390,9 +388,31 @@ static const struct samsung_usb2_common_phy exynos5250_phys[] = {
 	},
 };
 
+static const struct samsung_usb2_common_phy exynos5420_phys[] = {
+	{
+		.label		= "host",
+		.id		= EXYNOS5250_HOST,
+		.power_on	= exynos5250_power_on,
+		.power_off	= exynos5250_power_off,
+	},
+	{
+		.label		= "hsic",
+		.id		= EXYNOS5250_HSIC0,
+		.power_on	= exynos5250_power_on,
+		.power_off	= exynos5250_power_off,
+	},
+};
+
 const struct samsung_usb2_phy_config exynos5250_usb2_phy_config = {
 	.has_mode_switch	= 1,
-	.num_phys		= EXYNOS5250_NUM_PHYS,
+	.num_phys		= ARRAY_SIZE(exynos5250_phys),
 	.phys			= exynos5250_phys,
 	.rate_to_clk		= exynos5250_rate_to_clk,
 };
+
+const struct samsung_usb2_phy_config exynos5420_usb2_phy_config = {
+	.has_mode_switch	= 1,
+	.num_phys		= ARRAY_SIZE(exynos5420_phys),
+	.phys			= exynos5420_phys,
+	.rate_to_clk		= exynos5250_rate_to_clk,
+};
diff --git a/drivers/phy/samsung/phy-samsung-usb2.c b/drivers/phy/samsung/phy-samsung-usb2.c
index a3ed3ff04690caa9fba940d786444adca0c1b77c..ec2befabeea6b5214226775303fc3149e3d12808 100644
--- a/drivers/phy/samsung/phy-samsung-usb2.c
+++ b/drivers/phy/samsung/phy-samsung-usb2.c
@@ -127,6 +127,10 @@ static const struct of_device_id samsung_usb2_phy_of_match[] = {
 		.compatible = "samsung,exynos5250-usb2-phy",
 		.data = &exynos5250_usb2_phy_config,
 	},
+	{
+		.compatible = "samsung,exynos5420-usb2-phy",
+		.data = &exynos5420_usb2_phy_config,
+	},
 #endif
 #ifdef CONFIG_PHY_S5PV210_USB2
 	{
@@ -143,7 +147,6 @@ static int samsung_usb2_phy_probe(struct platform_device *pdev)
 	const struct samsung_usb2_phy_config *cfg;
 	struct device *dev = &pdev->dev;
 	struct phy_provider *phy_provider;
-	struct resource *mem;
 	struct samsung_usb2_phy_driver *drv;
 	int i, ret;
 
@@ -167,8 +170,7 @@ static int samsung_usb2_phy_probe(struct platform_device *pdev)
 	drv->cfg = cfg;
 	drv->dev = dev;
 
-	mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	drv->reg_phy = devm_ioremap_resource(dev, mem);
+	drv->reg_phy = devm_platform_ioremap_resource(pdev, 0);
 	if (IS_ERR(drv->reg_phy)) {
 		dev_err(dev, "Failed to map register memory (phy)\n");
 		return PTR_ERR(drv->reg_phy);
diff --git a/drivers/phy/samsung/phy-samsung-usb2.h b/drivers/phy/samsung/phy-samsung-usb2.h
index 77fb23bc218f154f132aaf7da8be449f2ae657a0..ebaf43bfc5a2d84eae5aacccf183e23d4a0e6266 100644
--- a/drivers/phy/samsung/phy-samsung-usb2.h
+++ b/drivers/phy/samsung/phy-samsung-usb2.h
@@ -66,5 +66,6 @@ extern const struct samsung_usb2_phy_config exynos3250_usb2_phy_config;
 extern const struct samsung_usb2_phy_config exynos4210_usb2_phy_config;
 extern const struct samsung_usb2_phy_config exynos4x12_usb2_phy_config;
 extern const struct samsung_usb2_phy_config exynos5250_usb2_phy_config;
+extern const struct samsung_usb2_phy_config exynos5420_usb2_phy_config;
 extern const struct samsung_usb2_phy_config s5pv210_usb2_phy_config;
 #endif
diff --git a/drivers/phy/st/phy-stm32-usbphyc.c b/drivers/phy/st/phy-stm32-usbphyc.c
index 2b3639cba51aa77bce3b30c6757ca44fc6cfb344..a54317e96c41144e3d8f2a4e5157327743519279 100644
--- a/drivers/phy/st/phy-stm32-usbphyc.c
+++ b/drivers/phy/st/phy-stm32-usbphyc.c
@@ -311,7 +311,6 @@ static int stm32_usbphyc_probe(struct platform_device *pdev)
 	struct stm32_usbphyc *usbphyc;
 	struct device *dev = &pdev->dev;
 	struct device_node *child, *np = dev->of_node;
-	struct resource *res;
 	struct phy_provider *phy_provider;
 	u32 version;
 	int ret, port = 0;
@@ -322,17 +321,13 @@ static int stm32_usbphyc_probe(struct platform_device *pdev)
 	usbphyc->dev = dev;
 	dev_set_drvdata(dev, usbphyc);
 
-	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	usbphyc->base = devm_ioremap_resource(dev, res);
+	usbphyc->base = devm_platform_ioremap_resource(pdev, 0);
 	if (IS_ERR(usbphyc->base))
 		return PTR_ERR(usbphyc->base);
 
 	usbphyc->clk = devm_clk_get(dev, NULL);
-	if (IS_ERR(usbphyc->clk)) {
-		ret = PTR_ERR(usbphyc->clk);
-		dev_err(dev, "clk get failed: %d\n", ret);
-		return ret;
-	}
+	if (IS_ERR(usbphyc->clk))
+		return dev_err_probe(dev, PTR_ERR(usbphyc->clk), "clk get_failed\n");
 
 	ret = clk_prepare_enable(usbphyc->clk);
 	if (ret) {
@@ -345,6 +340,10 @@ static int stm32_usbphyc_probe(struct platform_device *pdev)
 		reset_control_assert(usbphyc->rst);
 		udelay(2);
 		reset_control_deassert(usbphyc->rst);
+	} else {
+		ret = PTR_ERR(usbphyc->rst);
+		if (ret == -EPROBE_DEFER)
+			goto clk_disable;
 	}
 
 	usbphyc->switch_setup = -EINVAL;
diff --git a/drivers/phy/tegra/phy-tegra194-p2u.c b/drivers/phy/tegra/phy-tegra194-p2u.c
index 7042bed9feaaf0ed7f3ef60df65dc0d1b322751f..3ee02b9eb04fd670fab734400cd7df1556c81a1d 100644
--- a/drivers/phy/tegra/phy-tegra194-p2u.c
+++ b/drivers/phy/tegra/phy-tegra194-p2u.c
@@ -72,14 +72,12 @@ static int tegra_p2u_probe(struct platform_device *pdev)
 	struct device *dev = &pdev->dev;
 	struct phy *generic_phy;
 	struct tegra_p2u *phy;
-	struct resource *res;
 
 	phy = devm_kzalloc(dev, sizeof(*phy), GFP_KERNEL);
 	if (!phy)
 		return -ENOMEM;
 
-	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ctl");
-	phy->base = devm_ioremap_resource(dev, res);
+	phy->base = devm_platform_ioremap_resource_byname(pdev, "ctl");
 	if (IS_ERR(phy->base))
 		return PTR_ERR(phy->base);
 
diff --git a/drivers/phy/tegra/xusb.c b/drivers/phy/tegra/xusb.c
index ad88d74c18842c5cc9d962f185d9c03524bff529..941006f503e4ccce12b28448234116314c4323be 100644
--- a/drivers/phy/tegra/xusb.c
+++ b/drivers/phy/tegra/xusb.c
@@ -146,7 +146,7 @@ static void tegra_xusb_pad_release(struct device *dev)
 	pad->soc->ops->remove(pad);
 }
 
-static struct device_type tegra_xusb_pad_type = {
+static const struct device_type tegra_xusb_pad_type = {
 	.release = tegra_xusb_pad_release,
 };
 
@@ -513,7 +513,7 @@ static void tegra_xusb_port_release(struct device *dev)
 		port->ops->release(port);
 }
 
-static struct device_type tegra_xusb_port_type = {
+static const struct device_type tegra_xusb_port_type = {
 	.release = tegra_xusb_port_release,
 };
 
@@ -688,7 +688,7 @@ static int tegra_xusb_setup_usb_role_switch(struct tegra_xusb_port *port)
 	 * reference to retrieve usb-phy details.
 	 */
 	port->usb_phy.dev = &lane->pad->lanes[port->index]->dev;
-	port->usb_phy.dev->driver = port->padctl->dev->driver;
+	port->usb_phy.dev->driver = port->dev.driver;
 	port->usb_phy.otg->usb_phy = &port->usb_phy;
 	port->usb_phy.otg->set_peripheral = tegra_xusb_set_peripheral;
 	port->usb_phy.otg->set_host = tegra_xusb_set_host;
@@ -1148,7 +1148,6 @@ static int tegra_xusb_padctl_probe(struct platform_device *pdev)
 	const struct tegra_xusb_padctl_soc *soc;
 	struct tegra_xusb_padctl *padctl;
 	const struct of_device_id *match;
-	struct resource *res;
 	int err;
 
 	/* for backwards compatibility with old device trees */
@@ -1173,8 +1172,7 @@ static int tegra_xusb_padctl_probe(struct platform_device *pdev)
 	INIT_LIST_HEAD(&padctl->pads);
 	mutex_init(&padctl->lock);
 
-	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	padctl->regs = devm_ioremap_resource(&pdev->dev, res);
+	padctl->regs = devm_platform_ioremap_resource(pdev, 0);
 	if (IS_ERR(padctl->regs)) {
 		err = PTR_ERR(padctl->regs);
 		goto remove;
@@ -1200,7 +1198,7 @@ static int tegra_xusb_padctl_probe(struct platform_device *pdev)
 	err = devm_regulator_bulk_get(&pdev->dev, padctl->soc->num_supplies,
 				      padctl->supplies);
 	if (err < 0) {
-		dev_err(&pdev->dev, "failed to get regulators: %d\n", err);
+		dev_err_probe(&pdev->dev, err, "failed to get regulators\n");
 		goto remove;
 	}
 
diff --git a/drivers/phy/ti/phy-omap-control.c b/drivers/phy/ti/phy-omap-control.c
index ccd0e4e00451a408fb9001c90abc2f8ac38a9599..47482f106fab34ed0a0438baf6580c07b848374f 100644
--- a/drivers/phy/ti/phy-omap-control.c
+++ b/drivers/phy/ti/phy-omap-control.c
@@ -268,7 +268,6 @@ MODULE_DEVICE_TABLE(of, omap_control_phy_id_table);
 
 static int omap_control_phy_probe(struct platform_device *pdev)
 {
-	struct resource	*res;
 	const struct of_device_id *of_id;
 	struct omap_control_phy *control_phy;
 
@@ -285,16 +284,13 @@ static int omap_control_phy_probe(struct platform_device *pdev)
 	control_phy->type = *(enum omap_control_phy_type *)of_id->data;
 
 	if (control_phy->type == OMAP_CTRL_TYPE_OTGHS) {
-		res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
-			"otghs_control");
-		control_phy->otghs_control = devm_ioremap_resource(
-			&pdev->dev, res);
+		control_phy->otghs_control =
+			devm_platform_ioremap_resource_byname(pdev, "otghs_control");
 		if (IS_ERR(control_phy->otghs_control))
 			return PTR_ERR(control_phy->otghs_control);
 	} else {
-		res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
-				"power");
-		control_phy->power = devm_ioremap_resource(&pdev->dev, res);
+		control_phy->power =
+			devm_platform_ioremap_resource_byname(pdev, "power");
 		if (IS_ERR(control_phy->power)) {
 			dev_err(&pdev->dev, "Couldn't get power register\n");
 			return PTR_ERR(control_phy->power);
@@ -312,9 +308,8 @@ static int omap_control_phy_probe(struct platform_device *pdev)
 	}
 
 	if (control_phy->type == OMAP_CTRL_TYPE_PCIE) {
-		res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
-						   "pcie_pcs");
-		control_phy->pcie_pcs = devm_ioremap_resource(&pdev->dev, res);
+		control_phy->pcie_pcs =
+			devm_platform_ioremap_resource_byname(pdev, "pcie_pcs");
 		if (IS_ERR(control_phy->pcie_pcs))
 			return PTR_ERR(control_phy->pcie_pcs);
 	}
diff --git a/drivers/phy/ti/phy-omap-usb2.c b/drivers/phy/ti/phy-omap-usb2.c
index 4fec90d2624ff3d108ab824a771c79992dcaaa7f..ebceb1520ce88ade401a7c4e97a35806a90efb48 100644
--- a/drivers/phy/ti/phy-omap-usb2.c
+++ b/drivers/phy/ti/phy-omap-usb2.c
@@ -366,7 +366,6 @@ static int omap_usb2_probe(struct platform_device *pdev)
 {
 	struct omap_usb	*phy;
 	struct phy *generic_phy;
-	struct resource *res;
 	struct phy_provider *phy_provider;
 	struct usb_otg *otg;
 	struct device_node *node = pdev->dev.of_node;
@@ -403,8 +402,7 @@ static int omap_usb2_probe(struct platform_device *pdev)
 
 	omap_usb2_init_errata(phy);
 
-	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	phy->phy_base = devm_ioremap_resource(&pdev->dev, res);
+	phy->phy_base = devm_platform_ioremap_resource(pdev, 0);
 	if (IS_ERR(phy->phy_base))
 		return PTR_ERR(phy->phy_base);
 
diff --git a/drivers/phy/ti/phy-ti-pipe3.c b/drivers/phy/ti/phy-ti-pipe3.c
index e9332c90f75f58caced353b789cde515a8b9f36e..2cbc91e535d465bcf7e4772ca29f40438bed5357 100644
--- a/drivers/phy/ti/phy-ti-pipe3.c
+++ b/drivers/phy/ti/phy-ti-pipe3.c
@@ -745,35 +745,28 @@ static int ti_pipe3_get_sysctrl(struct ti_pipe3 *phy)
 
 static int ti_pipe3_get_tx_rx_base(struct ti_pipe3 *phy)
 {
-	struct resource *res;
 	struct device *dev = phy->dev;
 	struct platform_device *pdev = to_platform_device(dev);
 
-	res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
-					   "phy_rx");
-	phy->phy_rx = devm_ioremap_resource(dev, res);
+	phy->phy_rx = devm_platform_ioremap_resource_byname(pdev, "phy_rx");
 	if (IS_ERR(phy->phy_rx))
 		return PTR_ERR(phy->phy_rx);
 
-	res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
-					   "phy_tx");
-	phy->phy_tx = devm_ioremap_resource(dev, res);
+	phy->phy_tx = devm_platform_ioremap_resource_byname(pdev, "phy_tx");
 
 	return PTR_ERR_OR_ZERO(phy->phy_tx);
 }
 
 static int ti_pipe3_get_pll_base(struct ti_pipe3 *phy)
 {
-	struct resource *res;
 	struct device *dev = phy->dev;
 	struct platform_device *pdev = to_platform_device(dev);
 
 	if (phy->mode == PIPE3_MODE_PCIE)
 		return 0;
 
-	res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
-					   "pll_ctrl");
-	phy->pll_ctrl_base = devm_ioremap_resource(dev, res);
+	phy->pll_ctrl_base =
+		devm_platform_ioremap_resource_byname(pdev, "pll_ctrl");
 	return PTR_ERR_OR_ZERO(phy->pll_ctrl_base);
 }
 
diff --git a/drivers/siox/siox-core.c b/drivers/siox/siox-core.c
index f8c08fb9891d7495fbfcf9705413727d554c3733..1794ff0106bc6df0e9a31dc87b6f2075e734044a 100644
--- a/drivers/siox/siox-core.c
+++ b/drivers/siox/siox-core.c
@@ -512,41 +512,47 @@ static int siox_match(struct device *dev, struct device_driver *drv)
 	return 1;
 }
 
-static struct bus_type siox_bus_type = {
-	.name = "siox",
-	.match = siox_match,
-};
-
-static int siox_driver_probe(struct device *dev)
+static int siox_probe(struct device *dev)
 {
 	struct siox_driver *sdriver = to_siox_driver(dev->driver);
 	struct siox_device *sdevice = to_siox_device(dev);
-	int ret;
 
-	ret = sdriver->probe(sdevice);
-	return ret;
+	return sdriver->probe(sdevice);
 }
 
-static int siox_driver_remove(struct device *dev)
+static int siox_remove(struct device *dev)
 {
 	struct siox_driver *sdriver =
 		container_of(dev->driver, struct siox_driver, driver);
 	struct siox_device *sdevice = to_siox_device(dev);
-	int ret;
 
-	ret = sdriver->remove(sdevice);
-	return ret;
+	if (sdriver->remove)
+		sdriver->remove(sdevice);
+
+	return 0;
 }
 
-static void siox_driver_shutdown(struct device *dev)
+static void siox_shutdown(struct device *dev)
 {
-	struct siox_driver *sdriver =
-		container_of(dev->driver, struct siox_driver, driver);
 	struct siox_device *sdevice = to_siox_device(dev);
+	struct siox_driver *sdriver;
+
+	if (!dev->driver)
+		return;
 
-	sdriver->shutdown(sdevice);
+	sdriver = container_of(dev->driver, struct siox_driver, driver);
+	if (sdriver->shutdown)
+		sdriver->shutdown(sdevice);
 }
 
+static struct bus_type siox_bus_type = {
+	.name = "siox",
+	.match = siox_match,
+	.probe = siox_probe,
+	.remove = siox_remove,
+	.shutdown = siox_shutdown,
+};
+
 static ssize_t active_show(struct device *dev,
 			   struct device_attribute *attr, char *buf)
 {
@@ -882,7 +888,8 @@ int __siox_driver_register(struct siox_driver *sdriver, struct module *owner)
 	if (unlikely(!siox_is_registered))
 		return -EPROBE_DEFER;
 
-	if (!sdriver->set_data && !sdriver->get_data) {
+	if (!sdriver->probe ||
+	    (!sdriver->set_data && !sdriver->get_data)) {
 		pr_err("Driver %s doesn't provide needed callbacks\n",
 		       sdriver->driver.name);
 		return -EINVAL;
@@ -891,13 +898,6 @@ int __siox_driver_register(struct siox_driver *sdriver, struct module *owner)
 	sdriver->driver.owner = owner;
 	sdriver->driver.bus = &siox_bus_type;
 
-	if (sdriver->probe)
-		sdriver->driver.probe = siox_driver_probe;
-	if (sdriver->remove)
-		sdriver->driver.remove = siox_driver_remove;
-	if (sdriver->shutdown)
-		sdriver->driver.shutdown = siox_driver_shutdown;
-
 	ret = driver_register(&sdriver->driver);
 	if (ret)
 		pr_err("Failed to register siox driver %s (%d)\n",
diff --git a/drivers/slimbus/Kconfig b/drivers/slimbus/Kconfig
index 8cd595148d178a4dcca7a29d565b0dd36e3c480e..1235b7dc8496cc33a9234a1b6d62e9f0900f540b 100644
--- a/drivers/slimbus/Kconfig
+++ b/drivers/slimbus/Kconfig
@@ -22,9 +22,10 @@ config SLIM_QCOM_CTRL
 
 config SLIM_QCOM_NGD_CTRL
 	tristate "Qualcomm SLIMbus Satellite Non-Generic Device Component"
-	depends on HAS_IOMEM && DMA_ENGINE && NET
+	depends on HAS_IOMEM && DMA_ENGINE && NET && QCOM_RPROC_COMMON
 	depends on ARCH_QCOM || COMPILE_TEST
 	select QCOM_QMI_HELPERS
+	select QCOM_PDR_HELPERS
 	help
 	  Select driver if Qualcomm's SLIMbus Satellite Non-Generic Device
 	  Component is programmed using Linux kernel.
diff --git a/drivers/slimbus/messaging.c b/drivers/slimbus/messaging.c
index d5879142dbef1c4903f0e7279c46cf2859af80ea..f2b5d347d227bbc07b7190c53f4ac4e941fa8177 100644
--- a/drivers/slimbus/messaging.c
+++ b/drivers/slimbus/messaging.c
@@ -258,6 +258,7 @@ int slim_xfer_msg(struct slim_device *sbdev, struct slim_val_inf *msg,
 	case SLIM_MSG_MC_REQUEST_CLEAR_INFORMATION:
 	case SLIM_MSG_MC_CLEAR_INFORMATION:
 		txn->rl += msg->num_bytes;
+		break;
 	default:
 		break;
 	}
diff --git a/drivers/slimbus/qcom-ctrl.c b/drivers/slimbus/qcom-ctrl.c
index 4aad2566f52d275f546b196dcec571b71ba95c56..f04b961b96cd4de5513a46fc2b24d27f9ef22e35 100644
--- a/drivers/slimbus/qcom-ctrl.c
+++ b/drivers/slimbus/qcom-ctrl.c
@@ -472,15 +472,10 @@ static void qcom_slim_rxwq(struct work_struct *work)
 static void qcom_slim_prg_slew(struct platform_device *pdev,
 				struct qcom_slim_ctrl *ctrl)
 {
-	struct resource	*slew_mem;
-
 	if (!ctrl->slew_reg) {
 		/* SLEW RATE register for this SLIMbus */
-		slew_mem = platform_get_resource_byname(pdev, IORESOURCE_MEM,
-				"slew");
-		ctrl->slew_reg = devm_ioremap(&pdev->dev, slew_mem->start,
-				resource_size(slew_mem));
-		if (!ctrl->slew_reg)
+		ctrl->slew_reg = devm_platform_ioremap_resource_byname(pdev, "slew");
+		if (IS_ERR(ctrl->slew_reg))
 			return;
 	}
 
diff --git a/drivers/slimbus/qcom-ngd-ctrl.c b/drivers/slimbus/qcom-ngd-ctrl.c
index 218aefc3531cd0d8e534a264b029b176c80acc66..c054e83ab63615564e7db8f405582a13785c3ebf 100644
--- a/drivers/slimbus/qcom-ngd-ctrl.c
+++ b/drivers/slimbus/qcom-ngd-ctrl.c
@@ -13,9 +13,13 @@
 #include <linux/slimbus.h>
 #include <linux/delay.h>
 #include <linux/pm_runtime.h>
+#include <linux/mutex.h>
+#include <linux/notifier.h>
+#include <linux/remoteproc/qcom_rproc.h>
 #include <linux/of.h>
 #include <linux/io.h>
 #include <linux/soc/qcom/qmi.h>
+#include <linux/soc/qcom/pdr.h>
 #include <net/sock.h>
 #include "slimbus.h"
 
@@ -155,8 +159,15 @@ struct qcom_slim_ngd_ctrl {
 	struct qcom_slim_ngd_dma_desc txdesc[QCOM_SLIM_NGD_DESC_NUM];
 	struct completion reconf;
 	struct work_struct m_work;
+	struct work_struct ngd_up_work;
 	struct workqueue_struct *mwq;
+	struct completion qmi_up;
 	spinlock_t tx_buf_lock;
+	struct mutex tx_lock;
+	struct mutex ssr_lock;
+	struct notifier_block nb;
+	void *notifier;
+	struct pdr_handle *pdr;
 	enum qcom_slim_ngd_state state;
 	dma_addr_t rx_phys_base;
 	dma_addr_t tx_phys_base;
@@ -423,7 +434,7 @@ static int qcom_slim_qmi_send_power_request(struct qcom_slim_ngd_ctrl *ctrl,
 	return 0;
 }
 
-static struct qmi_msg_handler qcom_slim_qmi_msg_handlers[] = {
+static const struct qmi_msg_handler qcom_slim_qmi_msg_handlers[] = {
 	{
 		.type = QMI_RESPONSE,
 		.msg_id = SLIMBUS_QMI_POWER_RESP_V01,
@@ -678,7 +689,6 @@ static int qcom_slim_ngd_init_rx_msgq(struct qcom_slim_ngd_ctrl *ctrl)
 	ctrl->rx_base = dma_alloc_coherent(dev, size, &ctrl->rx_phys_base,
 					   GFP_KERNEL);
 	if (!ctrl->rx_base) {
-		dev_err(dev, "dma_alloc_coherent failed\n");
 		ret = -ENOMEM;
 		goto rel_rx;
 	}
@@ -717,7 +727,6 @@ static int qcom_slim_ngd_init_tx_msgq(struct qcom_slim_ngd_ctrl *ctrl)
 	ctrl->tx_base = dma_alloc_coherent(dev, size, &ctrl->tx_phys_base,
 					   GFP_KERNEL);
 	if (!ctrl->tx_base) {
-		dev_err(dev, "dma_alloc_coherent failed\n");
 		ret = -EINVAL;
 		goto rel_tx;
 	}
@@ -868,14 +877,18 @@ static int qcom_slim_ngd_xfer_msg(struct slim_controller *sctrl,
 	if (txn->msg && txn->msg->wbuf)
 		memcpy(puc, txn->msg->wbuf, txn->msg->num_bytes);
 
+	mutex_lock(&ctrl->tx_lock);
 	ret = qcom_slim_ngd_tx_msg_post(ctrl, pbuf, txn->rl);
-	if (ret)
+	if (ret) {
+		mutex_unlock(&ctrl->tx_lock);
 		return ret;
+	}
 
 	timeout = wait_for_completion_timeout(&tx_sent, HZ);
 	if (!timeout) {
 		dev_err(sctrl->dev, "TX timed out:MC:0x%x,mt:0x%x", txn->mc,
 					txn->mt);
+		mutex_unlock(&ctrl->tx_lock);
 		return -ETIMEDOUT;
 	}
 
@@ -884,10 +897,12 @@ static int qcom_slim_ngd_xfer_msg(struct slim_controller *sctrl,
 		if (!timeout) {
 			dev_err(sctrl->dev, "TX timed out:MC:0x%x,mt:0x%x",
 				txn->mc, txn->mt);
+			mutex_unlock(&ctrl->tx_lock);
 			return -ETIMEDOUT;
 		}
 	}
 
+	mutex_unlock(&ctrl->tx_lock);
 	return 0;
 }
 
@@ -1200,11 +1215,21 @@ static void qcom_slim_ngd_master_worker(struct work_struct *work)
 	}
 }
 
+static int qcom_slim_ngd_update_device_status(struct device *dev, void *null)
+{
+	slim_report_absent(to_slim_device(dev));
+
+	return 0;
+}
+
 static int qcom_slim_ngd_runtime_resume(struct device *dev)
 {
 	struct qcom_slim_ngd_ctrl *ctrl = dev_get_drvdata(dev);
 	int ret = 0;
 
+	if (!ctrl->qmi.handle)
+		return 0;
+
 	if (ctrl->state >= QCOM_SLIM_NGD_CTRL_ASLEEP)
 		ret = qcom_slim_ngd_power_up(ctrl);
 	if (ret) {
@@ -1267,7 +1292,7 @@ static int qcom_slim_ngd_qmi_new_server(struct qmi_handle *hdl,
 	qmi->svc_info.sq_node = service->node;
 	qmi->svc_info.sq_port = service->port;
 
-	qcom_slim_ngd_enable(ctrl, true);
+	complete(&ctrl->qmi_up);
 
 	return 0;
 }
@@ -1280,13 +1305,12 @@ static void qcom_slim_ngd_qmi_del_server(struct qmi_handle *hdl,
 	struct qcom_slim_ngd_ctrl *ctrl =
 		container_of(qmi, struct qcom_slim_ngd_ctrl, qmi);
 
+	reinit_completion(&ctrl->qmi_up);
 	qmi->svc_info.sq_node = 0;
 	qmi->svc_info.sq_port = 0;
-
-	qcom_slim_ngd_enable(ctrl, false);
 }
 
-static struct qmi_ops qcom_slim_ngd_qmi_svc_event_ops = {
+static const struct qmi_ops qcom_slim_ngd_qmi_svc_event_ops = {
 	.new_server = qcom_slim_ngd_qmi_new_server,
 	.del_server = qcom_slim_ngd_qmi_del_server,
 };
@@ -1333,6 +1357,72 @@ static const struct of_device_id qcom_slim_ngd_dt_match[] = {
 
 MODULE_DEVICE_TABLE(of, qcom_slim_ngd_dt_match);
 
+static void qcom_slim_ngd_down(struct qcom_slim_ngd_ctrl *ctrl)
+{
+	mutex_lock(&ctrl->ssr_lock);
+	device_for_each_child(ctrl->ctrl.dev, NULL,
+			      qcom_slim_ngd_update_device_status);
+	qcom_slim_ngd_enable(ctrl, false);
+	mutex_unlock(&ctrl->ssr_lock);
+}
+
+static void qcom_slim_ngd_up_worker(struct work_struct *work)
+{
+	struct qcom_slim_ngd_ctrl *ctrl;
+
+	ctrl = container_of(work, struct qcom_slim_ngd_ctrl, ngd_up_work);
+
+	/* Make sure qmi service is up before continuing */
+	wait_for_completion_interruptible(&ctrl->qmi_up);
+
+	mutex_lock(&ctrl->ssr_lock);
+	qcom_slim_ngd_enable(ctrl, true);
+	mutex_unlock(&ctrl->ssr_lock);
+}
+
+static int qcom_slim_ngd_ssr_pdr_notify(struct qcom_slim_ngd_ctrl *ctrl,
+					unsigned long action)
+{
+	switch (action) {
+	case QCOM_SSR_BEFORE_SHUTDOWN:
+	case SERVREG_SERVICE_STATE_DOWN:
+		/* Make sure the last dma xfer is finished */
+		mutex_lock(&ctrl->tx_lock);
+		if (ctrl->state != QCOM_SLIM_NGD_CTRL_DOWN) {
+			pm_runtime_get_noresume(ctrl->dev);
+			ctrl->state = QCOM_SLIM_NGD_CTRL_DOWN;
+			qcom_slim_ngd_down(ctrl);
+			qcom_slim_ngd_exit_dma(ctrl);
+		}
+		mutex_unlock(&ctrl->tx_lock);
+		break;
+	case QCOM_SSR_AFTER_POWERUP:
+	case SERVREG_SERVICE_STATE_UP:
+		schedule_work(&ctrl->ngd_up_work);
+		break;
+	default:
+		break;
+	}
+
+	return NOTIFY_OK;
+}
+
+static int qcom_slim_ngd_ssr_notify(struct notifier_block *nb,
+				    unsigned long action,
+				    void *data)
+{
+	struct qcom_slim_ngd_ctrl *ctrl = container_of(nb,
+					       struct qcom_slim_ngd_ctrl, nb);
+
+	return qcom_slim_ngd_ssr_pdr_notify(ctrl, action);
+}
+
+static void slim_pd_status(int state, char *svc_path, void *priv)
+{
+	struct qcom_slim_ngd_ctrl *ctrl = (struct qcom_slim_ngd_ctrl *)priv;
+
+	qcom_slim_ngd_ssr_pdr_notify(ctrl, state);
+}
 static int of_qcom_slim_ngd_register(struct device *parent,
 				     struct qcom_slim_ngd_ctrl *ctrl)
 {
@@ -1397,6 +1487,7 @@ static int qcom_slim_ngd_probe(struct platform_device *pdev)
 	}
 
 	INIT_WORK(&ctrl->m_work, qcom_slim_ngd_master_worker);
+	INIT_WORK(&ctrl->ngd_up_work, qcom_slim_ngd_up_worker);
 	ctrl->mwq = create_singlethread_workqueue("ngd_master");
 	if (!ctrl->mwq) {
 		dev_err(&pdev->dev, "Failed to start master worker\n");
@@ -1419,6 +1510,7 @@ static int qcom_slim_ngd_ctrl_probe(struct platform_device *pdev)
 	struct qcom_slim_ngd_ctrl *ctrl;
 	struct resource *res;
 	int ret;
+	struct pdr_service *pds;
 
 	ctrl = devm_kzalloc(dev, sizeof(*ctrl), GFP_KERNEL);
 	if (!ctrl)
@@ -1444,6 +1536,11 @@ static int qcom_slim_ngd_ctrl_probe(struct platform_device *pdev)
 		return ret;
 	}
 
+	ctrl->nb.notifier_call = qcom_slim_ngd_ssr_notify;
+	ctrl->notifier = qcom_register_ssr_notifier("lpass", &ctrl->nb);
+	if (IS_ERR(ctrl->notifier))
+		return PTR_ERR(ctrl->notifier);
+
 	ctrl->dev = dev;
 	ctrl->framer.rootfreq = SLIM_ROOT_FREQ >> 3;
 	ctrl->framer.superfreq =
@@ -1457,9 +1554,24 @@ static int qcom_slim_ngd_ctrl_probe(struct platform_device *pdev)
 	ctrl->ctrl.wakeup = NULL;
 	ctrl->state = QCOM_SLIM_NGD_CTRL_DOWN;
 
+	mutex_init(&ctrl->tx_lock);
+	mutex_init(&ctrl->ssr_lock);
 	spin_lock_init(&ctrl->tx_buf_lock);
 	init_completion(&ctrl->reconf);
 	init_completion(&ctrl->qmi.qmi_comp);
+	init_completion(&ctrl->qmi_up);
+
+	ctrl->pdr = pdr_handle_alloc(slim_pd_status, ctrl);
+	if (IS_ERR(ctrl->pdr)) {
+		dev_err(dev, "Failed to init PDR handle\n");
+		return PTR_ERR(ctrl->pdr);
+	}
+
+	pds = pdr_add_lookup(ctrl->pdr, "avs/audio", "msm/adsp/audio_pd");
+	if (IS_ERR(pds) && PTR_ERR(pds) != -EALREADY) {
+		dev_err(dev, "pdr add lookup failed: %d\n", ret);
+		return PTR_ERR(pds);
+	}
 
 	platform_driver_register(&qcom_slim_ngd_driver);
 	return of_qcom_slim_ngd_register(dev, ctrl);
@@ -1477,6 +1589,8 @@ static int qcom_slim_ngd_remove(struct platform_device *pdev)
 	struct qcom_slim_ngd_ctrl *ctrl = platform_get_drvdata(pdev);
 
 	pm_runtime_disable(&pdev->dev);
+	pdr_handle_release(ctrl->pdr);
+	qcom_unregister_ssr_notifier(ctrl->notifier, &ctrl->nb);
 	qcom_slim_ngd_enable(ctrl, false);
 	qcom_slim_ngd_exit_dma(ctrl);
 	qcom_slim_ngd_qmi_svc_event_deinit(&ctrl->qmi);
@@ -1503,6 +1617,9 @@ static int __maybe_unused qcom_slim_ngd_runtime_suspend(struct device *dev)
 	struct qcom_slim_ngd_ctrl *ctrl = dev_get_drvdata(dev);
 	int ret = 0;
 
+	if (!ctrl->qmi.handle)
+		return 0;
+
 	ret = qcom_slim_qmi_power_request(ctrl, false);
 	if (ret && ret != -EBUSY)
 		dev_info(ctrl->dev, "slim resource not idle:%d\n", ret);
diff --git a/drivers/slimbus/slimbus.h b/drivers/slimbus/slimbus.h
index c73035915f1d4363044bdd3161eb486c4a97a46c..00a7f112574b53ea2cbc9b5419834a0e008a0338 100644
--- a/drivers/slimbus/slimbus.h
+++ b/drivers/slimbus/slimbus.h
@@ -244,7 +244,7 @@ enum slim_ch_data_fmt {
 };
 
 /**
- * enum slim_ch_aux_fmt: SLIMbus channel Aux Field format IDs according to
+ * enum slim_ch_aux_bit_fmt: SLIMbus channel Aux Field format IDs according to
  *	Table 63 of SLIMbus Spec 2.0
  * @SLIM_CH_AUX_FMT_NOT_APPLICABLE: Undefined
  * @SLIM_CH_AUX_FMT_ZCUV_TUNNEL_IEC60958: ZCUV for tunneling IEC60958
diff --git a/drivers/soundwire/bus.c b/drivers/soundwire/bus.c
index ffe4600fd95bbcbe414cd98ebde9664a829a696d..d1e8c3a54976bbaf62f7032ba4f310bd71fa50b9 100644
--- a/drivers/soundwire/bus.c
+++ b/drivers/soundwire/bus.c
@@ -1280,7 +1280,7 @@ static int sdw_initialize_slave(struct sdw_slave *slave)
 
 static int sdw_handle_dp0_interrupt(struct sdw_slave *slave, u8 *slave_status)
 {
-	u8 clear = 0, impl_int_mask;
+	u8 clear, impl_int_mask;
 	int status, status2, ret, count = 0;
 
 	status = sdw_read(slave, SDW_DP0_INT);
@@ -1291,6 +1291,8 @@ static int sdw_handle_dp0_interrupt(struct sdw_slave *slave, u8 *slave_status)
 	}
 
 	do {
+		clear = status & ~SDW_DP0_INTERRUPTS;
+
 		if (status & SDW_DP0_INT_TEST_FAIL) {
 			dev_err(&slave->dev, "Test fail for port 0\n");
 			clear |= SDW_DP0_INT_TEST_FAIL;
@@ -1319,7 +1321,7 @@ static int sdw_handle_dp0_interrupt(struct sdw_slave *slave, u8 *slave_status)
 			*slave_status = clear;
 		}
 
-		/* clear the interrupt */
+		/* clear the interrupts but don't touch reserved and SDCA_CASCADE fields */
 		ret = sdw_write(slave, SDW_DP0_INT, clear);
 		if (ret < 0) {
 			dev_err(slave->bus->dev,
@@ -1334,12 +1336,13 @@ static int sdw_handle_dp0_interrupt(struct sdw_slave *slave, u8 *slave_status)
 				"SDW_DP0_INT read failed:%d\n", status2);
 			return status2;
 		}
+		/* filter to limit loop to interrupts identified in the first status read */
 		status &= status2;
 
 		count++;
 
 		/* we can get alerts while processing so keep retrying */
-	} while (status != 0 && count < SDW_READ_INTR_CLEAR_RETRY);
+	} while ((status & SDW_DP0_INTERRUPTS) && (count < SDW_READ_INTR_CLEAR_RETRY));
 
 	if (count == SDW_READ_INTR_CLEAR_RETRY)
 		dev_warn(slave->bus->dev, "Reached MAX_RETRY on DP0 read\n");
@@ -1350,7 +1353,7 @@ static int sdw_handle_dp0_interrupt(struct sdw_slave *slave, u8 *slave_status)
 static int sdw_handle_port_interrupt(struct sdw_slave *slave,
 				     int port, u8 *slave_status)
 {
-	u8 clear = 0, impl_int_mask;
+	u8 clear, impl_int_mask;
 	int status, status2, ret, count = 0;
 	u32 addr;
 
@@ -1367,6 +1370,8 @@ static int sdw_handle_port_interrupt(struct sdw_slave *slave,
 	}
 
 	do {
+		clear = status & ~SDW_DPN_INTERRUPTS;
+
 		if (status & SDW_DPN_INT_TEST_FAIL) {
 			dev_err(&slave->dev, "Test fail for port:%d\n", port);
 			clear |= SDW_DPN_INT_TEST_FAIL;
@@ -1389,7 +1394,7 @@ static int sdw_handle_port_interrupt(struct sdw_slave *slave,
 			*slave_status = clear;
 		}
 
-		/* clear the interrupt */
+		/* clear the interrupt but don't touch reserved fields */
 		ret = sdw_write(slave, addr, clear);
 		if (ret < 0) {
 			dev_err(slave->bus->dev,
@@ -1404,12 +1409,13 @@ static int sdw_handle_port_interrupt(struct sdw_slave *slave,
 				"SDW_DPN_INT read failed:%d\n", status2);
 			return status2;
 		}
+		/* filter to limit loop to interrupts identified in the first status read */
 		status &= status2;
 
 		count++;
 
 		/* we can get alerts while processing so keep retrying */
-	} while (status != 0 && count < SDW_READ_INTR_CLEAR_RETRY);
+	} while ((status & SDW_DPN_INTERRUPTS) && (count < SDW_READ_INTR_CLEAR_RETRY));
 
 	if (count == SDW_READ_INTR_CLEAR_RETRY)
 		dev_warn(slave->bus->dev, "Reached MAX_RETRY on port read");
@@ -1423,7 +1429,7 @@ static int sdw_handle_slave_alerts(struct sdw_slave *slave)
 	u8 clear = 0, bit, port_status[15] = {0};
 	int port_num, stat, ret, count = 0;
 	unsigned long port;
-	bool slave_notify = false;
+	bool slave_notify;
 	u8 sdca_cascade = 0;
 	u8 buf, buf2[2], _buf, _buf2[2];
 	bool parity_check;
@@ -1465,6 +1471,8 @@ static int sdw_handle_slave_alerts(struct sdw_slave *slave)
 	}
 
 	do {
+		slave_notify = false;
+
 		/*
 		 * Check parity, bus clash and Slave (impl defined)
 		 * interrupt
@@ -1589,7 +1597,10 @@ static int sdw_handle_slave_alerts(struct sdw_slave *slave)
 			sdca_cascade = ret & SDW_DP0_SDCA_CASCADE;
 		}
 
-		/* Make sure no interrupts are pending */
+		/*
+		 * Make sure no interrupts are pending, but filter to limit loop
+		 * to interrupts identified in the first status read
+		 */
 		buf &= _buf;
 		buf2[0] &= _buf2[0];
 		buf2[1] &= _buf2[1];
diff --git a/drivers/soundwire/intel.c b/drivers/soundwire/intel.c
index 6a1e862b16c38c901d27a5dddecb89810c12c20b..66adb258a425825476d8232c9424fa44cae832d0 100644
--- a/drivers/soundwire/intel.c
+++ b/drivers/soundwire/intel.c
@@ -1585,8 +1585,6 @@ int intel_master_process_wakeen_event(struct platform_device *pdev)
  * PM calls
  */
 
-#ifdef CONFIG_PM
-
 static int __maybe_unused intel_suspend(struct device *dev)
 {
 	struct sdw_cdns *cdns = dev_get_drvdata(dev);
@@ -1641,7 +1639,7 @@ static int __maybe_unused intel_suspend(struct device *dev)
 	return 0;
 }
 
-static int intel_suspend_runtime(struct device *dev)
+static int __maybe_unused intel_suspend_runtime(struct device *dev)
 {
 	struct sdw_cdns *cdns = dev_get_drvdata(dev);
 	struct sdw_intel *sdw = cdns_to_intel(cdns);
@@ -1796,7 +1794,7 @@ static int __maybe_unused intel_resume(struct device *dev)
 	return ret;
 }
 
-static int intel_resume_runtime(struct device *dev)
+static int __maybe_unused intel_resume_runtime(struct device *dev)
 {
 	struct sdw_cdns *cdns = dev_get_drvdata(dev);
 	struct sdw_intel *sdw = cdns_to_intel(cdns);
@@ -1969,8 +1967,6 @@ static int intel_resume_runtime(struct device *dev)
 	return ret;
 }
 
-#endif
-
 static const struct dev_pm_ops intel_pm = {
 	SET_SYSTEM_SLEEP_PM_OPS(intel_suspend, intel_resume)
 	SET_RUNTIME_PM_OPS(intel_suspend_runtime, intel_resume_runtime, NULL)
diff --git a/drivers/soundwire/master.c b/drivers/soundwire/master.c
index 3488bb824e845f1ae18d1b2af7fc72c1a4b973fe..9b05c9e25ebe48a7d135ff45ad4b3af0fb1cb2aa 100644
--- a/drivers/soundwire/master.c
+++ b/drivers/soundwire/master.c
@@ -8,6 +8,15 @@
 #include <linux/soundwire/sdw_type.h>
 #include "bus.h"
 
+/*
+ * The 3s value for autosuspend will only be used if there are no
+ * devices physically attached on a bus segment. In practice enabling
+ * the bus operation will result in children devices become active and
+ * the master device will only suspend when all its children are no
+ * longer active.
+ */
+#define SDW_MASTER_SUSPEND_DELAY_MS 3000
+
 /*
  * The sysfs for properties reflects the MIPI description as given
  * in the MIPI DisCo spec
@@ -154,7 +163,12 @@ int sdw_master_device_add(struct sdw_bus *bus, struct device *parent,
 	bus->dev = &md->dev;
 	bus->md = md;
 
+	pm_runtime_set_autosuspend_delay(&bus->md->dev, SDW_MASTER_SUSPEND_DELAY_MS);
+	pm_runtime_use_autosuspend(&bus->md->dev);
+	pm_runtime_mark_last_busy(&bus->md->dev);
+	pm_runtime_set_active(&bus->md->dev);
 	pm_runtime_enable(&bus->md->dev);
+	pm_runtime_idle(&bus->md->dev);
 device_register_err:
 	return ret;
 }
diff --git a/drivers/soundwire/qcom.c b/drivers/soundwire/qcom.c
index fbca4ebf63e9278d22f2ca84ec7c2d1086e01cfa..6d22df01f35471e8a8803c09d6ea074a756e9734 100644
--- a/drivers/soundwire/qcom.c
+++ b/drivers/soundwire/qcom.c
@@ -799,7 +799,7 @@ static int qcom_swrm_probe(struct platform_device *pdev)
 	data = of_device_get_match_data(dev);
 	ctrl->rows_index = sdw_find_row_index(data->default_rows);
 	ctrl->cols_index = sdw_find_col_index(data->default_cols);
-#if IS_ENABLED(CONFIG_SLIMBUS)
+#if IS_REACHABLE(CONFIG_SLIMBUS)
 	if (dev->parent->bus == &slimbus_bus) {
 #else
 	if (false) {
diff --git a/drivers/spmi/spmi.c b/drivers/spmi/spmi.c
index c16b60f645a4de6100a043143fb42386cbc65a7a..51f5aeb65b3b20dd60bbf8cdf7852a973a180b8a 100644
--- a/drivers/spmi/spmi.c
+++ b/drivers/spmi/spmi.c
@@ -23,6 +23,7 @@ static DEFINE_IDA(ctrl_ida);
 static void spmi_dev_release(struct device *dev)
 {
 	struct spmi_device *sdev = to_spmi_device(dev);
+
 	kfree(sdev);
 }
 
@@ -33,6 +34,7 @@ static const struct device_type spmi_dev_type = {
 static void spmi_ctrl_release(struct device *dev)
 {
 	struct spmi_controller *ctrl = to_spmi_controller(dev);
+
 	ida_simple_remove(&ctrl_ida, ctrl->nr);
 	kfree(ctrl);
 }
@@ -357,6 +359,14 @@ static int spmi_drv_remove(struct device *dev)
 	return 0;
 }
 
+static void spmi_drv_shutdown(struct device *dev)
+{
+	const struct spmi_driver *sdrv = to_spmi_driver(dev->driver);
+
+	if (sdrv && sdrv->shutdown)
+		sdrv->shutdown(to_spmi_device(dev));
+}
+
 static int spmi_drv_uevent(struct device *dev, struct kobj_uevent_env *env)
 {
 	int ret;
@@ -373,6 +383,7 @@ static struct bus_type spmi_bus_type = {
 	.match		= spmi_device_match,
 	.probe		= spmi_drv_probe,
 	.remove		= spmi_drv_remove,
+	.shutdown	= spmi_drv_shutdown,
 	.uevent		= spmi_drv_uevent,
 };
 
@@ -487,7 +498,7 @@ static void of_spmi_register_devices(struct spmi_controller *ctrl)
 			continue;
 
 		sdev->dev.of_node = node;
-		sdev->usid = (u8) reg[0];
+		sdev->usid = (u8)reg[0];
 
 		err = spmi_device_add(sdev);
 		if (err) {
@@ -531,6 +542,7 @@ EXPORT_SYMBOL_GPL(spmi_controller_add);
 static int spmi_ctrl_remove_device(struct device *dev, void *data)
 {
 	struct spmi_device *spmidev = to_spmi_device(dev);
+
 	if (dev->type == &spmi_dev_type)
 		spmi_device_remove(spmidev);
 	return 0;
@@ -545,13 +557,10 @@ static int spmi_ctrl_remove_device(struct device *dev, void *data)
  */
 void spmi_controller_remove(struct spmi_controller *ctrl)
 {
-	int dummy;
-
 	if (!ctrl)
 		return;
 
-	dummy = device_for_each_child(&ctrl->dev, NULL,
-				      spmi_ctrl_remove_device);
+	device_for_each_child(&ctrl->dev, NULL, spmi_ctrl_remove_device);
 	device_del(&ctrl->dev);
 }
 EXPORT_SYMBOL_GPL(spmi_controller_remove);
diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig
index 443ca3f3cdf09b15f58d92cbe86a729b89c2abc0..4d7a5ddf999290c9f1c68d85801d992d231c91bd 100644
--- a/drivers/staging/Kconfig
+++ b/drivers/staging/Kconfig
@@ -94,8 +94,6 @@ source "drivers/staging/pi433/Kconfig"
 
 source "drivers/staging/mt7621-pci/Kconfig"
 
-source "drivers/staging/mt7621-pci-phy/Kconfig"
-
 source "drivers/staging/mt7621-pinctrl/Kconfig"
 
 source "drivers/staging/mt7621-dma/Kconfig"
diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile
index dc45128ef5255b68765018d201aa63afe4d9d76a..89bde2370eee8f1e917620e57b4b48925a45cf0b 100644
--- a/drivers/staging/Makefile
+++ b/drivers/staging/Makefile
@@ -37,7 +37,6 @@ obj-$(CONFIG_GREYBUS)		+= greybus/
 obj-$(CONFIG_BCM2835_VCHIQ)	+= vc04_services/
 obj-$(CONFIG_PI433)		+= pi433/
 obj-$(CONFIG_PCI_MT7621)	+= mt7621-pci/
-obj-$(CONFIG_PCI_MT7621_PHY)	+= mt7621-pci-phy/
 obj-$(CONFIG_PINCTRL_RT2880)	+= mt7621-pinctrl/
 obj-$(CONFIG_SOC_MT7621)	+= mt7621-dma/
 obj-$(CONFIG_DMA_RALINK)	+= ralink-gdma/
diff --git a/drivers/staging/mt7621-pci-phy/Kconfig b/drivers/staging/mt7621-pci-phy/Kconfig
deleted file mode 100644
index 263e0a91c424b5b7a41993f58d19a110091e07f1..0000000000000000000000000000000000000000
--- a/drivers/staging/mt7621-pci-phy/Kconfig
+++ /dev/null
@@ -1,8 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-config PCI_MT7621_PHY
-	tristate "MediaTek MT7621 PCI PHY Driver"
-	depends on RALINK && OF
-	select GENERIC_PHY
-	help
-	  Say 'Y' here to add support for MediaTek MT7621 PCI PHY driver,
-
diff --git a/drivers/staging/mt7621-pci-phy/Makefile b/drivers/staging/mt7621-pci-phy/Makefile
deleted file mode 100644
index b4d99b9119e08dbdd750fa8f02a3dbaad4f3cf9e..0000000000000000000000000000000000000000
--- a/drivers/staging/mt7621-pci-phy/Makefile
+++ /dev/null
@@ -1,2 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-obj-$(CONFIG_PCI_MT7621_PHY)       += pci-mt7621-phy.o
diff --git a/drivers/staging/mt7621-pci-phy/TODO b/drivers/staging/mt7621-pci-phy/TODO
deleted file mode 100644
index a255e8f753eb10edad78dc2ab83ba917e7394e65..0000000000000000000000000000000000000000
--- a/drivers/staging/mt7621-pci-phy/TODO
+++ /dev/null
@@ -1,4 +0,0 @@
-
-- general code review and cleanup
-
-Cc:  NeilBrown <neil@brown.name> and Sergio Paracuellos <sergio.paracuellos@gmail.com>
diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
index be06f1a961c2c2a3dc8aa0f2ca44dadc117026c1..ea96e319c8a0ef2971deb53d9e03f618c6ef2239 100644
--- a/drivers/uio/uio.c
+++ b/drivers/uio/uio.c
@@ -906,7 +906,7 @@ static void uio_device_release(struct device *dev)
 }
 
 /**
- * uio_register_device - register a new userspace IO device
+ * __uio_register_device - register a new userspace IO device
  * @owner:	module that creates the new device
  * @parent:	parent device
  * @info:	UIO device capabilities
@@ -1002,7 +1002,7 @@ static void devm_uio_unregister_device(struct device *dev, void *res)
 }
 
 /**
- * devm_uio_register_device - Resource managed uio_register_device()
+ * __devm_uio_register_device - Resource managed uio_register_device()
  * @owner:	module that creates the new device
  * @parent:	parent device
  * @info:	UIO device capabilities
diff --git a/drivers/uio/uio_aec.c b/drivers/uio/uio_aec.c
index 381a26dfac46a96a53f00ee3b9e71ee8c8a4b5d7..32357f8a92b527b91cf8981d6557eb6d809c6168 100644
--- a/drivers/uio/uio_aec.c
+++ b/drivers/uio/uio_aec.c
@@ -71,12 +71,12 @@ static int probe(struct pci_dev *pdev, const struct pci_device_id *id)
 	struct uio_info *info;
 	int ret;
 
-	info = kzalloc(sizeof(struct uio_info), GFP_KERNEL);
+	info = devm_kzalloc(&pdev->dev, sizeof(struct uio_info), GFP_KERNEL);
 	if (!info)
 		return -ENOMEM;
 
 	if (pci_enable_device(pdev))
-		goto out_free;
+		return -ENODEV;
 
 	if (pci_request_regions(pdev, "aectc"))
 		goto out_disable;
@@ -117,8 +117,6 @@ static int probe(struct pci_dev *pdev, const struct pci_device_id *id)
 	pci_release_regions(pdev);
 out_disable:
 	pci_disable_device(pdev);
-out_free:
-	kfree(info);
 	return -ENODEV;
 }
 
@@ -136,8 +134,6 @@ static void remove(struct pci_dev *pdev)
 	pci_release_regions(pdev);
 	pci_disable_device(pdev);
 	iounmap(info->priv);
-
-	kfree(info);
 }
 
 static struct pci_driver pci_driver = {
diff --git a/drivers/uio/uio_cif.c b/drivers/uio/uio_cif.c
index ab60186f97593c112eece63b3ad72bde4864343a..653f842a14915605c8a5acf2f1faa09d09021244 100644
--- a/drivers/uio/uio_cif.c
+++ b/drivers/uio/uio_cif.c
@@ -43,12 +43,12 @@ static int hilscher_pci_probe(struct pci_dev *dev,
 {
 	struct uio_info *info;
 
-	info = kzalloc(sizeof(struct uio_info), GFP_KERNEL);
+	info = devm_kzalloc(&dev->dev, sizeof(struct uio_info), GFP_KERNEL);
 	if (!info)
 		return -ENOMEM;
 
 	if (pci_enable_device(dev))
-		goto out_free;
+		return -ENODEV;
 
 	if (pci_request_regions(dev, "hilscher"))
 		goto out_disable;
@@ -92,8 +92,6 @@ static int hilscher_pci_probe(struct pci_dev *dev,
 	pci_release_regions(dev);
 out_disable:
 	pci_disable_device(dev);
-out_free:
-	kfree (info);
 	return -ENODEV;
 }
 
@@ -105,8 +103,6 @@ static void hilscher_pci_remove(struct pci_dev *dev)
 	pci_release_regions(dev);
 	pci_disable_device(dev);
 	iounmap(info->mem[0].internal_addr);
-
-	kfree (info);
 }
 
 static struct pci_device_id hilscher_pci_ids[] = {
diff --git a/drivers/uio/uio_dmem_genirq.c b/drivers/uio/uio_dmem_genirq.c
index ec7f66f4555a60f8630d3f3bd02afcf7265fb135..6b5cfa5b06733c572d3e5d47dd68e2d6473892ef 100644
--- a/drivers/uio/uio_dmem_genirq.c
+++ b/drivers/uio/uio_dmem_genirq.c
@@ -143,6 +143,13 @@ static int uio_dmem_genirq_irqcontrol(struct uio_info *dev_info, s32 irq_on)
 	return 0;
 }
 
+static void uio_dmem_genirq_pm_disable(void *data)
+{
+	struct device *dev = data;
+
+	pm_runtime_disable(dev);
+}
+
 static int uio_dmem_genirq_probe(struct platform_device *pdev)
 {
 	struct uio_dmem_genirq_pdata *pdata = dev_get_platdata(&pdev->dev);
@@ -154,11 +161,10 @@ static int uio_dmem_genirq_probe(struct platform_device *pdev)
 
 	if (pdev->dev.of_node) {
 		/* alloc uioinfo for one device */
-		uioinfo = kzalloc(sizeof(*uioinfo), GFP_KERNEL);
+		uioinfo = devm_kzalloc(&pdev->dev, sizeof(*uioinfo), GFP_KERNEL);
 		if (!uioinfo) {
-			ret = -ENOMEM;
 			dev_err(&pdev->dev, "unable to kmalloc\n");
-			goto bad2;
+			return -ENOMEM;
 		}
 		uioinfo->name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "%pOFn",
 					       pdev->dev.of_node);
@@ -167,20 +173,19 @@ static int uio_dmem_genirq_probe(struct platform_device *pdev)
 
 	if (!uioinfo || !uioinfo->name || !uioinfo->version) {
 		dev_err(&pdev->dev, "missing platform_data\n");
-		goto bad0;
+		return -EINVAL;
 	}
 
 	if (uioinfo->handler || uioinfo->irqcontrol ||
 	    uioinfo->irq_flags & IRQF_SHARED) {
 		dev_err(&pdev->dev, "interrupt configuration error\n");
-		goto bad0;
+		return -EINVAL;
 	}
 
-	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+	priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
 	if (!priv) {
-		ret = -ENOMEM;
 		dev_err(&pdev->dev, "unable to kmalloc\n");
-		goto bad0;
+		return -ENOMEM;
 	}
 
 	dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
@@ -197,7 +202,7 @@ static int uio_dmem_genirq_probe(struct platform_device *pdev)
 		if (ret == -ENXIO && pdev->dev.of_node)
 			ret = UIO_IRQ_NONE;
 		else if (ret < 0)
-			goto bad1;
+			return ret;
 		uioinfo->irq = ret;
 	}
 
@@ -282,41 +287,11 @@ static int uio_dmem_genirq_probe(struct platform_device *pdev)
 	 */
 	pm_runtime_enable(&pdev->dev);
 
-	ret = uio_register_device(&pdev->dev, priv->uioinfo);
-	if (ret) {
-		dev_err(&pdev->dev, "unable to register uio device\n");
-		pm_runtime_disable(&pdev->dev);
-		goto bad1;
-	}
-
-	platform_set_drvdata(pdev, priv);
-	return 0;
- bad1:
-	kfree(priv);
- bad0:
-	/* kfree uioinfo for OF */
-	if (pdev->dev.of_node)
-		kfree(uioinfo);
- bad2:
-	return ret;
-}
-
-static int uio_dmem_genirq_remove(struct platform_device *pdev)
-{
-	struct uio_dmem_genirq_platdata *priv = platform_get_drvdata(pdev);
-
-	uio_unregister_device(priv->uioinfo);
-	pm_runtime_disable(&pdev->dev);
+	ret = devm_add_action_or_reset(&pdev->dev, uio_dmem_genirq_pm_disable, &pdev->dev);
+	if (ret)
+		return ret;
 
-	priv->uioinfo->handler = NULL;
-	priv->uioinfo->irqcontrol = NULL;
-
-	/* kfree uioinfo for OF */
-	if (pdev->dev.of_node)
-		kfree(priv->uioinfo);
-
-	kfree(priv);
-	return 0;
+	return devm_uio_register_device(&pdev->dev, priv->uioinfo);
 }
 
 static int uio_dmem_genirq_runtime_nop(struct device *dev)
@@ -350,7 +325,6 @@ MODULE_DEVICE_TABLE(of, uio_of_genirq_match);
 
 static struct platform_driver uio_dmem_genirq = {
 	.probe = uio_dmem_genirq_probe,
-	.remove = uio_dmem_genirq_remove,
 	.driver = {
 		.name = DRIVER_NAME,
 		.pm = &uio_dmem_genirq_dev_pm_ops,
diff --git a/drivers/uio/uio_fsl_elbc_gpcm.c b/drivers/uio/uio_fsl_elbc_gpcm.c
index be8a6905f507fa9d00d4468fbf7cdedb485d1b55..7d8eb9dc20681938354c4e320a7600f6cb06dbb2 100644
--- a/drivers/uio/uio_fsl_elbc_gpcm.c
+++ b/drivers/uio/uio_fsl_elbc_gpcm.c
@@ -299,7 +299,7 @@ static int get_of_data(struct fsl_elbc_gpcm *priv, struct device_node *node,
 	/* get optional uio name */
 	if (of_property_read_string(node, "uio_name", &dt_name) != 0)
 		dt_name = "eLBC_GPCM";
-	*name = kstrdup(dt_name, GFP_KERNEL);
+	*name = devm_kstrdup(priv->dev, dt_name, GFP_KERNEL);
 	if (!*name)
 		return -ENOMEM;
 
@@ -324,7 +324,7 @@ static int uio_fsl_elbc_gpcm_probe(struct platform_device *pdev)
 		return -ENODEV;
 
 	/* allocate private data */
-	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+	priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
 	if (!priv)
 		return -ENOMEM;
 	priv->dev = &pdev->dev;
@@ -334,14 +334,12 @@ static int uio_fsl_elbc_gpcm_probe(struct platform_device *pdev)
 	ret = get_of_data(priv, node, &res, &reg_br_new, &reg_or_new,
 			  &irq, &uio_name);
 	if (ret)
-		goto out_err0;
+		return ret;
 
 	/* allocate UIO structure */
-	info = kzalloc(sizeof(*info), GFP_KERNEL);
-	if (!info) {
-		ret = -ENOMEM;
-		goto out_err0;
-	}
+	info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
+	if (!info)
+		return -ENOMEM;
 
 	/* get current BR/OR values */
 	reg_br_cur = in_be32(&priv->lbc->bank[priv->bank].br);
@@ -354,8 +352,7 @@ static int uio_fsl_elbc_gpcm_probe(struct platform_device *pdev)
 		     != fsl_lbc_addr(res.start)) {
 			dev_err(priv->dev,
 				"bank in use by another peripheral\n");
-			ret = -ENODEV;
-			goto out_err1;
+			return -ENODEV;
 		}
 
 		/* warn if behavior settings changing */
@@ -382,12 +379,11 @@ static int uio_fsl_elbc_gpcm_probe(struct platform_device *pdev)
 	info->mem[0].internal_addr = ioremap(res.start, resource_size(&res));
 	if (!info->mem[0].internal_addr) {
 		dev_err(priv->dev, "failed to map chip region\n");
-		ret = -ENODEV;
-		goto out_err1;
+		return -ENODEV;
 	}
 
 	/* set all UIO data */
-	info->mem[0].name = kasprintf(GFP_KERNEL, "%pOFn", node);
+	info->mem[0].name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "%pOFn", node);
 	info->mem[0].addr = res.start;
 	info->mem[0].size = resource_size(&res);
 	info->mem[0].memtype = UIO_MEM_PHYS;
@@ -428,12 +424,6 @@ static int uio_fsl_elbc_gpcm_probe(struct platform_device *pdev)
 	if (priv->shutdown)
 		priv->shutdown(info, true);
 	iounmap(info->mem[0].internal_addr);
-out_err1:
-	kfree(info->mem[0].name);
-	kfree(info);
-out_err0:
-	kfree(uio_name);
-	kfree(priv);
 	return ret;
 }
 
@@ -447,10 +437,6 @@ static int uio_fsl_elbc_gpcm_remove(struct platform_device *pdev)
 	if (priv->shutdown)
 		priv->shutdown(info, false);
 	iounmap(info->mem[0].internal_addr);
-	kfree(info->mem[0].name);
-	kfree(info->name);
-	kfree(info);
-	kfree(priv);
 
 	return 0;
 
diff --git a/drivers/uio/uio_hv_generic.c b/drivers/uio/uio_hv_generic.c
index 4dae2320b103e2d03eeb7117cb0d263ed8a8a591..0330ba99730e2b2a21054c52b72cdf14bebfea9d 100644
--- a/drivers/uio/uio_hv_generic.c
+++ b/drivers/uio/uio_hv_generic.c
@@ -247,14 +247,14 @@ hv_uio_probe(struct hv_device *dev,
 		return -ENOTSUPP;
 	}
 
-	pdata = kzalloc(sizeof(*pdata), GFP_KERNEL);
+	pdata = devm_kzalloc(&dev->device, sizeof(*pdata), GFP_KERNEL);
 	if (!pdata)
 		return -ENOMEM;
 
 	ret = vmbus_alloc_ring(channel, HV_RING_SIZE * PAGE_SIZE,
 			       HV_RING_SIZE * PAGE_SIZE);
 	if (ret)
-		goto fail;
+		return ret;
 
 	set_channel_read_mode(channel, HV_CALL_ISR);
 
@@ -347,8 +347,6 @@ hv_uio_probe(struct hv_device *dev,
 
 fail_close:
 	hv_uio_cleanup(dev, pdata);
-fail:
-	kfree(pdata);
 
 	return ret;
 }
@@ -364,10 +362,8 @@ hv_uio_remove(struct hv_device *dev)
 	sysfs_remove_bin_file(&dev->channel->kobj, &ring_buffer_bin_attr);
 	uio_unregister_device(&pdata->info);
 	hv_uio_cleanup(dev, pdata);
-	hv_set_drvdata(dev, NULL);
 
 	vmbus_free_ring(dev->channel);
-	kfree(pdata);
 	return 0;
 }
 
diff --git a/drivers/uio/uio_mf624.c b/drivers/uio/uio_mf624.c
index b6a406986667ccf3f17039f9055143d18de4f7c2..5065c6a073a835621e74af0ae0b84e75534734ff 100644
--- a/drivers/uio/uio_mf624.c
+++ b/drivers/uio/uio_mf624.c
@@ -136,12 +136,12 @@ static int mf624_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
 {
 	struct uio_info *info;
 
-	info = kzalloc(sizeof(struct uio_info), GFP_KERNEL);
+	info = devm_kzalloc(&dev->dev, sizeof(struct uio_info), GFP_KERNEL);
 	if (!info)
 		return -ENOMEM;
 
 	if (pci_enable_device(dev))
-		goto out_free;
+		return -ENODEV;
 
 	if (pci_request_regions(dev, "mf624"))
 		goto out_disable;
@@ -189,8 +189,6 @@ static int mf624_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
 out_disable:
 	pci_disable_device(dev);
 
-out_free:
-	kfree(info);
 	return -ENODEV;
 }
 
@@ -207,8 +205,6 @@ static void mf624_pci_remove(struct pci_dev *dev)
 	iounmap(info->mem[0].internal_addr);
 	iounmap(info->mem[1].internal_addr);
 	iounmap(info->mem[2].internal_addr);
-
-	kfree(info);
 }
 
 static const struct pci_device_id mf624_pci_id[] = {
diff --git a/drivers/uio/uio_netx.c b/drivers/uio/uio_netx.c
index 9ae29ffde4100c215ce0c617a316ee4bde7de5f2..2319d6de8d0421dc433a2ba2ed4aa4e1c0ce9160 100644
--- a/drivers/uio/uio_netx.c
+++ b/drivers/uio/uio_netx.c
@@ -53,12 +53,12 @@ static int netx_pci_probe(struct pci_dev *dev,
 	struct uio_info *info;
 	int bar;
 
-	info = kzalloc(sizeof(struct uio_info), GFP_KERNEL);
+	info = devm_kzalloc(&dev->dev, sizeof(struct uio_info), GFP_KERNEL);
 	if (!info)
 		return -ENOMEM;
 
 	if (pci_enable_device(dev))
-		goto out_free;
+		return -ENODEV;
 
 	if (pci_request_regions(dev, "netx"))
 		goto out_disable;
@@ -112,8 +112,6 @@ static int netx_pci_probe(struct pci_dev *dev,
 	pci_release_regions(dev);
 out_disable:
 	pci_disable_device(dev);
-out_free:
-	kfree(info);
 	return -ENODEV;
 }
 
@@ -127,8 +125,6 @@ static void netx_pci_remove(struct pci_dev *dev)
 	pci_release_regions(dev);
 	pci_disable_device(dev);
 	iounmap(info->mem[0].internal_addr);
-
-	kfree(info);
 }
 
 static struct pci_device_id netx_pci_ids[] = {
diff --git a/drivers/uio/uio_pci_generic.c b/drivers/uio/uio_pci_generic.c
index dde5cbb2717876b59d4d1393eced78d6cddcf193..b8e44d16279fbc0f77c3bf41346eb83d2b163c18 100644
--- a/drivers/uio/uio_pci_generic.c
+++ b/drivers/uio/uio_pci_generic.c
@@ -74,23 +74,19 @@ static int probe(struct pci_dev *pdev,
 	struct uio_pci_generic_dev *gdev;
 	int err;
 
-	err = pci_enable_device(pdev);
+	err = pcim_enable_device(pdev);
 	if (err) {
 		dev_err(&pdev->dev, "%s: pci_enable_device failed: %d\n",
 			__func__, err);
 		return err;
 	}
 
-	if (pdev->irq && !pci_intx_mask_supported(pdev)) {
-		err = -ENODEV;
-		goto err_verify;
-	}
+	if (pdev->irq && !pci_intx_mask_supported(pdev))
+		return -ENOMEM;
 
-	gdev = kzalloc(sizeof(struct uio_pci_generic_dev), GFP_KERNEL);
-	if (!gdev) {
-		err = -ENOMEM;
-		goto err_alloc;
-	}
+	gdev = devm_kzalloc(&pdev->dev, sizeof(struct uio_pci_generic_dev), GFP_KERNEL);
+	if (!gdev)
+		return -ENOMEM;
 
 	gdev->info.name = "uio_pci_generic";
 	gdev->info.version = DRIVER_VERSION;
@@ -105,34 +101,13 @@ static int probe(struct pci_dev *pdev,
 			 "no support for interrupts?\n");
 	}
 
-	err = uio_register_device(&pdev->dev, &gdev->info);
-	if (err)
-		goto err_register;
-	pci_set_drvdata(pdev, gdev);
-
-	return 0;
-err_register:
-	kfree(gdev);
-err_alloc:
-err_verify:
-	pci_disable_device(pdev);
-	return err;
-}
-
-static void remove(struct pci_dev *pdev)
-{
-	struct uio_pci_generic_dev *gdev = pci_get_drvdata(pdev);
-
-	uio_unregister_device(&gdev->info);
-	pci_disable_device(pdev);
-	kfree(gdev);
+	return devm_uio_register_device(&pdev->dev, &gdev->info);
 }
 
 static struct pci_driver uio_pci_driver = {
 	.name = "uio_pci_generic",
 	.id_table = NULL, /* only dynamic id's */
 	.probe = probe,
-	.remove = remove,
 };
 
 module_pci_driver(uio_pci_driver);
diff --git a/drivers/uio/uio_pruss.c b/drivers/uio/uio_pruss.c
index 1cc175d3c25c58db0a1d8f0f7a4657962122cb1e..e9096f53b4cc6c2dd1fce5724c037ad208815b39 100644
--- a/drivers/uio/uio_pruss.c
+++ b/drivers/uio/uio_pruss.c
@@ -99,7 +99,6 @@ static void pruss_cleanup(struct device *dev, struct uio_pruss_dev *gdev)
 
 	for (cnt = 0; cnt < MAX_PRUSS_EVT; cnt++, p++) {
 		uio_unregister_device(p);
-		kfree(p->name);
 	}
 	iounmap(gdev->prussio_vaddr);
 	if (gdev->ddr_vaddr) {
@@ -110,10 +109,7 @@ static void pruss_cleanup(struct device *dev, struct uio_pruss_dev *gdev)
 		gen_pool_free(gdev->sram_pool,
 			      gdev->sram_vaddr,
 			      sram_pool_sz);
-	kfree(gdev->info);
 	clk_disable(gdev->pruss_clk);
-	clk_put(gdev->pruss_clk);
-	kfree(gdev);
 }
 
 static int pruss_probe(struct platform_device *pdev)
@@ -125,28 +121,25 @@ static int pruss_probe(struct platform_device *pdev)
 	int ret, cnt, i, len;
 	struct uio_pruss_pdata *pdata = dev_get_platdata(dev);
 
-	gdev = kzalloc(sizeof(struct uio_pruss_dev), GFP_KERNEL);
+	gdev = devm_kzalloc(dev, sizeof(struct uio_pruss_dev), GFP_KERNEL);
 	if (!gdev)
 		return -ENOMEM;
 
-	gdev->info = kcalloc(MAX_PRUSS_EVT, sizeof(*p), GFP_KERNEL);
-	if (!gdev->info) {
-		ret = -ENOMEM;
-		goto err_free_gdev;
-	}
+	gdev->info = devm_kcalloc(dev, MAX_PRUSS_EVT, sizeof(*p), GFP_KERNEL);
+	if (!gdev->info)
+		return -ENOMEM;
 
 	/* Power on PRU in case its not done as part of boot-loader */
-	gdev->pruss_clk = clk_get(dev, "pruss");
+	gdev->pruss_clk = devm_clk_get(dev, "pruss");
 	if (IS_ERR(gdev->pruss_clk)) {
 		dev_err(dev, "Failed to get clock\n");
-		ret = PTR_ERR(gdev->pruss_clk);
-		goto err_free_info;
+		return PTR_ERR(gdev->pruss_clk);
 	}
 
 	ret = clk_enable(gdev->pruss_clk);
 	if (ret) {
 		dev_err(dev, "Failed to enable clock\n");
-		goto err_clk_put;
+		return ret;
 	}
 
 	regs_prussio = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -206,7 +199,7 @@ static int pruss_probe(struct platform_device *pdev)
 		p->mem[2].size = extram_pool_sz;
 		p->mem[2].memtype = UIO_MEM_PHYS;
 
-		p->name = kasprintf(GFP_KERNEL, "pruss_evt%d", cnt);
+		p->name = devm_kasprintf(dev, GFP_KERNEL, "pruss_evt%d", cnt);
 		p->version = DRV_VERSION;
 
 		/* Register PRUSS IRQ lines */
@@ -215,10 +208,8 @@ static int pruss_probe(struct platform_device *pdev)
 		p->priv = gdev;
 
 		ret = uio_register_device(dev, p);
-		if (ret < 0) {
-			kfree(p->name);
+		if (ret < 0)
 			goto err_unloop;
-		}
 	}
 
 	platform_set_drvdata(pdev, gdev);
@@ -227,7 +218,6 @@ static int pruss_probe(struct platform_device *pdev)
 err_unloop:
 	for (i = 0, p = gdev->info; i < cnt; i++, p++) {
 		uio_unregister_device(p);
-		kfree(p->name);
 	}
 	iounmap(gdev->prussio_vaddr);
 err_free_ddr_vaddr:
@@ -238,12 +228,6 @@ static int pruss_probe(struct platform_device *pdev)
 		gen_pool_free(gdev->sram_pool, gdev->sram_vaddr, sram_pool_sz);
 err_clk_disable:
 	clk_disable(gdev->pruss_clk);
-err_clk_put:
-	clk_put(gdev->pruss_clk);
-err_free_info:
-	kfree(gdev->info);
-err_free_gdev:
-	kfree(gdev);
 
 	return ret;
 }
diff --git a/drivers/uio/uio_sercos3.c b/drivers/uio/uio_sercos3.c
index 9658a0887feeb9c8a56ff6de65039d51d6aad6db..b93a5f8f4cba2e065925cc004c1d38fb3525756e 100644
--- a/drivers/uio/uio_sercos3.c
+++ b/drivers/uio/uio_sercos3.c
@@ -124,16 +124,16 @@ static int sercos3_pci_probe(struct pci_dev *dev,
 	struct sercos3_priv *priv;
 	int i;
 
-	info = kzalloc(sizeof(struct uio_info), GFP_KERNEL);
+	info = devm_kzalloc(&dev->dev, sizeof(struct uio_info), GFP_KERNEL);
 	if (!info)
 		return -ENOMEM;
 
-	priv = kzalloc(sizeof(struct sercos3_priv), GFP_KERNEL);
+	priv = devm_kzalloc(&dev->dev, sizeof(struct sercos3_priv), GFP_KERNEL);
 	if (!priv)
-		goto out_free;
+		return -ENOMEM;
 
 	if (pci_enable_device(dev))
-		goto out_free_priv;
+		return -ENODEV;
 
 	if (pci_request_regions(dev, "sercos3"))
 		goto out_disable;
@@ -174,10 +174,6 @@ static int sercos3_pci_probe(struct pci_dev *dev,
 	pci_release_regions(dev);
 out_disable:
 	pci_disable_device(dev);
-out_free_priv:
-	kfree(priv);
-out_free:
-	kfree(info);
 	return -ENODEV;
 }
 
@@ -193,8 +189,6 @@ static void sercos3_pci_remove(struct pci_dev *dev)
 		if (info->mem[i].internal_addr)
 			iounmap(info->mem[i].internal_addr);
 	}
-	kfree(info->priv);
-	kfree(info);
 }
 
 static struct pci_device_id sercos3_pci_ids[] = {
diff --git a/drivers/usb/phy/phy-jz4770.c b/drivers/usb/phy/phy-jz4770.c
index f6d3731581ebdeac4a53c2a1f5b611115896ff32..4025da20b3fdbbe30ce029dc45e09d699bd2c77f 100644
--- a/drivers/usb/phy/phy-jz4770.c
+++ b/drivers/usb/phy/phy-jz4770.c
@@ -350,7 +350,7 @@ static struct platform_driver ingenic_phy_driver = {
 	.probe		= jz4770_phy_probe,
 	.driver		= {
 		.name	= "jz4770-phy",
-		.of_match_table = of_match_ptr(ingenic_usb_phy_of_matches),
+		.of_match_table = ingenic_usb_phy_of_matches,
 	},
 };
 module_platform_driver(ingenic_phy_driver);
diff --git a/drivers/vme/bridges/vme_ca91cx42.c b/drivers/vme/bridges/vme_ca91cx42.c
index ea938dc29c5e314e756e37940bc711630f1277c4..439b0edeca0897a0f5e40d9a369cad3bd06d65df 100644
--- a/drivers/vme/bridges/vme_ca91cx42.c
+++ b/drivers/vme/bridges/vme_ca91cx42.c
@@ -1510,7 +1510,7 @@ static void *ca91cx42_alloc_consistent(struct device *parent, size_t size,
 	/* Find pci_dev container of dev */
 	pdev = to_pci_dev(parent);
 
-	return pci_alloc_consistent(pdev, size, dma);
+	return dma_alloc_coherent(&pdev->dev, size, dma, GFP_KERNEL);
 }
 
 static void ca91cx42_free_consistent(struct device *parent, size_t size,
@@ -1521,7 +1521,7 @@ static void ca91cx42_free_consistent(struct device *parent, size_t size,
 	/* Find pci_dev container of dev */
 	pdev = to_pci_dev(parent);
 
-	pci_free_consistent(pdev, size, vaddr, dma);
+	dma_free_coherent(&pdev->dev, size, vaddr, dma);
 }
 
 /*
@@ -1555,8 +1555,9 @@ static int ca91cx42_crcsr_init(struct vme_bridge *ca91cx42_bridge,
 	}
 
 	/* Allocate mem for CR/CSR image */
-	bridge->crcsr_kernel = pci_zalloc_consistent(pdev, VME_CRCSR_BUF_SIZE,
-						     &bridge->crcsr_bus);
+	bridge->crcsr_kernel = dma_alloc_coherent(&pdev->dev,
+						  VME_CRCSR_BUF_SIZE,
+						  &bridge->crcsr_bus, GFP_KERNEL);
 	if (!bridge->crcsr_kernel) {
 		dev_err(&pdev->dev, "Failed to allocate memory for CR/CSR "
 			"image\n");
@@ -1589,8 +1590,8 @@ static void ca91cx42_crcsr_exit(struct vme_bridge *ca91cx42_bridge,
 	/* Free image */
 	iowrite32(0, bridge->base + VCSR_TO);
 
-	pci_free_consistent(pdev, VME_CRCSR_BUF_SIZE, bridge->crcsr_kernel,
-		bridge->crcsr_bus);
+	dma_free_coherent(&pdev->dev, VME_CRCSR_BUF_SIZE,
+			  bridge->crcsr_kernel, bridge->crcsr_bus);
 }
 
 static int ca91cx42_probe(struct pci_dev *pdev, const struct pci_device_id *id)
diff --git a/drivers/vme/bridges/vme_tsi148.c b/drivers/vme/bridges/vme_tsi148.c
index 50ae26977a0277596e51ebd55480f0bf58596126..be9051b02f24cf2bad4427fae5e693d6b1b54492 100644
--- a/drivers/vme/bridges/vme_tsi148.c
+++ b/drivers/vme/bridges/vme_tsi148.c
@@ -506,7 +506,6 @@ static int tsi148_slave_set(struct vme_slave_resource *image, int enabled,
 	default:
 		dev_err(tsi148_bridge->parent, "Invalid address space\n");
 		return -EINVAL;
-		break;
 	}
 
 	/* Convert 64-bit variables to 2x 32-bit variables */
@@ -995,7 +994,6 @@ static int tsi148_master_set(struct vme_master_resource *image, int enabled,
 		dev_err(tsi148_bridge->parent, "Invalid address space\n");
 		retval = -EINVAL;
 		goto err_aspace;
-		break;
 	}
 
 	temp_ctl &= ~(3<<4);
@@ -1503,7 +1501,6 @@ static int tsi148_dma_set_vme_src_attributes(struct device *dev, __be32 *attr,
 	default:
 		dev_err(dev, "Invalid address space\n");
 		return -EINVAL;
-		break;
 	}
 
 	if (cycle & VME_SUPER)
@@ -1603,7 +1600,6 @@ static int tsi148_dma_set_vme_dest_attributes(struct device *dev, __be32 *attr,
 	default:
 		dev_err(dev, "Invalid address space\n");
 		return -EINVAL;
-		break;
 	}
 
 	if (cycle & VME_SUPER)
@@ -1701,7 +1697,6 @@ static int tsi148_dma_list_add(struct vme_dma_list *list,
 		dev_err(tsi148_bridge->parent, "Invalid source type\n");
 		retval = -EINVAL;
 		goto err_source;
-		break;
 	}
 
 	/* Assume last link - this will be over-written by adding another */
@@ -1738,7 +1733,6 @@ static int tsi148_dma_list_add(struct vme_dma_list *list,
 		dev_err(tsi148_bridge->parent, "Invalid destination type\n");
 		retval = -EINVAL;
 		goto err_dest;
-		break;
 	}
 
 	/* Fill out count */
@@ -1964,7 +1958,6 @@ static int tsi148_lm_set(struct vme_lm_resource *lm, unsigned long long lm_base,
 		mutex_unlock(&lm->mtx);
 		dev_err(tsi148_bridge->parent, "Invalid address space\n");
 		return -EINVAL;
-		break;
 	}
 
 	if (cycle & VME_SUPER)
@@ -2162,7 +2155,7 @@ static void *tsi148_alloc_consistent(struct device *parent, size_t size,
 	/* Find pci_dev container of dev */
 	pdev = to_pci_dev(parent);
 
-	return pci_alloc_consistent(pdev, size, dma);
+	return dma_alloc_coherent(&pdev->dev, size, dma, GFP_KERNEL);
 }
 
 static void tsi148_free_consistent(struct device *parent, size_t size,
@@ -2173,7 +2166,7 @@ static void tsi148_free_consistent(struct device *parent, size_t size,
 	/* Find pci_dev container of dev */
 	pdev = to_pci_dev(parent);
 
-	pci_free_consistent(pdev, size, vaddr, dma);
+	dma_free_coherent(&pdev->dev, size, vaddr, dma);
 }
 
 /*
@@ -2199,8 +2192,9 @@ static int tsi148_crcsr_init(struct vme_bridge *tsi148_bridge,
 	bridge = tsi148_bridge->driver_priv;
 
 	/* Allocate mem for CR/CSR image */
-	bridge->crcsr_kernel = pci_zalloc_consistent(pdev, VME_CRCSR_BUF_SIZE,
-						     &bridge->crcsr_bus);
+	bridge->crcsr_kernel = dma_alloc_coherent(&pdev->dev,
+						  VME_CRCSR_BUF_SIZE,
+						  &bridge->crcsr_bus, GFP_KERNEL);
 	if (!bridge->crcsr_kernel) {
 		dev_err(tsi148_bridge->parent, "Failed to allocate memory for "
 			"CR/CSR image\n");
@@ -2268,8 +2262,8 @@ static void tsi148_crcsr_exit(struct vme_bridge *tsi148_bridge,
 	iowrite32be(0, bridge->base + TSI148_LCSR_CROU);
 	iowrite32be(0, bridge->base + TSI148_LCSR_CROL);
 
-	pci_free_consistent(pdev, VME_CRCSR_BUF_SIZE, bridge->crcsr_kernel,
-		bridge->crcsr_bus);
+	dma_free_coherent(&pdev->dev, VME_CRCSR_BUF_SIZE,
+			  bridge->crcsr_kernel, bridge->crcsr_bus);
 }
 
 static int tsi148_probe(struct pci_dev *pdev, const struct pci_device_id *id)
diff --git a/drivers/vme/vme.c b/drivers/vme/vme.c
index b398293980b66329c2e7ed7097ea10f70ccc0c42..54d7963c10782fed64d0b5a7147b52037da2c123 100644
--- a/drivers/vme/vme.c
+++ b/drivers/vme/vme.c
@@ -52,28 +52,23 @@ static struct vme_bridge *find_bridge(struct vme_resource *resource)
 	case VME_MASTER:
 		return list_entry(resource->entry, struct vme_master_resource,
 			list)->parent;
-		break;
 	case VME_SLAVE:
 		return list_entry(resource->entry, struct vme_slave_resource,
 			list)->parent;
-		break;
 	case VME_DMA:
 		return list_entry(resource->entry, struct vme_dma_resource,
 			list)->parent;
-		break;
 	case VME_LM:
 		return list_entry(resource->entry, struct vme_lm_resource,
 			list)->parent;
-		break;
 	default:
 		printk(KERN_ERR "Unknown resource type\n");
 		return NULL;
-		break;
 	}
 }
 
 /**
- * vme_free_consistent - Allocate contiguous memory.
+ * vme_alloc_consistent - Allocate contiguous memory.
  * @resource: Pointer to VME resource.
  * @size: Size of allocation required.
  * @dma: Pointer to variable to store physical address of allocation.
@@ -179,7 +174,6 @@ size_t vme_get_size(struct vme_resource *resource)
 			return 0;
 
 		return size;
-		break;
 	case VME_SLAVE:
 		retval = vme_slave_get(resource, &enabled, &base, &size,
 			&buf_base, &aspace, &cycle);
@@ -187,14 +181,11 @@ size_t vme_get_size(struct vme_resource *resource)
 			return 0;
 
 		return size;
-		break;
 	case VME_DMA:
 		return 0;
-		break;
 	default:
 		printk(KERN_ERR "Unknown resource type\n");
 		return 0;
-		break;
 	}
 }
 EXPORT_SYMBOL(vme_get_size);
@@ -647,7 +638,7 @@ int vme_master_get(struct vme_resource *resource, int *enabled,
 EXPORT_SYMBOL(vme_master_get);
 
 /**
- * vme_master_write - Read data from VME space into a buffer.
+ * vme_master_read - Read data from VME space into a buffer.
  * @resource: Pointer to VME master resource.
  * @buf: Pointer to buffer where data should be transferred.
  * @count: Number of bytes to transfer.
diff --git a/drivers/w1/slaves/w1_therm.c b/drivers/w1/slaves/w1_therm.c
index cddf60b7309cafc8ddbd767712ad4a201d470a9a..3712b1e6dc71e41700c6177e3c6e27c8b7d135c7 100644
--- a/drivers/w1/slaves/w1_therm.c
+++ b/drivers/w1/slaves/w1_therm.c
@@ -315,7 +315,7 @@ static ssize_t resolution_show(struct device *device,
 static ssize_t resolution_store(struct device *device,
 	struct device_attribute *attr, const char *buf, size_t size);
 
-static ssize_t eeprom_store(struct device *device,
+static ssize_t eeprom_cmd_store(struct device *device,
 	struct device_attribute *attr, const char *buf, size_t size);
 
 static ssize_t alarms_store(struct device *device,
@@ -350,7 +350,7 @@ static DEVICE_ATTR_RO(w1_seq);
 static DEVICE_ATTR_RO(temperature);
 static DEVICE_ATTR_RO(ext_power);
 static DEVICE_ATTR_RW(resolution);
-static DEVICE_ATTR_WO(eeprom);
+static DEVICE_ATTR_WO(eeprom_cmd);
 static DEVICE_ATTR_RW(alarms);
 static DEVICE_ATTR_RW(conv_time);
 static DEVICE_ATTR_RW(features);
@@ -386,7 +386,7 @@ static struct attribute *w1_therm_attrs[] = {
 	&dev_attr_temperature.attr,
 	&dev_attr_ext_power.attr,
 	&dev_attr_resolution.attr,
-	&dev_attr_eeprom.attr,
+	&dev_attr_eeprom_cmd.attr,
 	&dev_attr_alarms.attr,
 	&dev_attr_conv_time.attr,
 	&dev_attr_features.attr,
@@ -397,7 +397,7 @@ static struct attribute *w1_ds18s20_attrs[] = {
 	&dev_attr_w1_slave.attr,
 	&dev_attr_temperature.attr,
 	&dev_attr_ext_power.attr,
-	&dev_attr_eeprom.attr,
+	&dev_attr_eeprom_cmd.attr,
 	&dev_attr_alarms.attr,
 	&dev_attr_conv_time.attr,
 	&dev_attr_features.attr,
@@ -410,7 +410,7 @@ static struct attribute *w1_ds28ea00_attrs[] = {
 	&dev_attr_temperature.attr,
 	&dev_attr_ext_power.attr,
 	&dev_attr_resolution.attr,
-	&dev_attr_eeprom.attr,
+	&dev_attr_eeprom_cmd.attr,
 	&dev_attr_alarms.attr,
 	&dev_attr_conv_time.attr,
 	&dev_attr_features.attr,
@@ -1740,7 +1740,7 @@ static ssize_t resolution_store(struct device *device,
 	return size;
 }
 
-static ssize_t eeprom_store(struct device *device,
+static ssize_t eeprom_cmd_store(struct device *device,
 	struct device_attribute *attr, const char *buf, size_t size)
 {
 	struct w1_slave *sl = dev_to_w1_slave(device);
diff --git a/include/linux/fpga/fpga-mgr.h b/include/linux/fpga/fpga-mgr.h
index e8ca62b2cb5bef57b41f2ecacf60ff20a63eca72..2bc3030a69e54b4ea609a37b8718fd1c2931467a 100644
--- a/include/linux/fpga/fpga-mgr.h
+++ b/include/linux/fpga/fpga-mgr.h
@@ -198,6 +198,8 @@ void fpga_mgr_free(struct fpga_manager *mgr);
 int fpga_mgr_register(struct fpga_manager *mgr);
 void fpga_mgr_unregister(struct fpga_manager *mgr);
 
+int devm_fpga_mgr_register(struct device *dev, struct fpga_manager *mgr);
+
 struct fpga_manager *devm_fpga_mgr_create(struct device *dev, const char *name,
 					  const struct fpga_manager_ops *mops,
 					  void *priv);
diff --git a/include/linux/mei_cl_bus.h b/include/linux/mei_cl_bus.h
index 52aa4821093aa9db56273c51fb32bbce79f16754..959ad7d850b4efe9580cf5651aabce65958c9eb8 100644
--- a/include/linux/mei_cl_bus.h
+++ b/include/linux/mei_cl_bus.h
@@ -95,6 +95,12 @@ ssize_t mei_cldev_send(struct mei_cl_device *cldev, u8 *buf, size_t length);
 ssize_t mei_cldev_recv(struct mei_cl_device *cldev, u8 *buf, size_t length);
 ssize_t mei_cldev_recv_nonblock(struct mei_cl_device *cldev, u8 *buf,
 				size_t length);
+ssize_t mei_cldev_send_vtag(struct mei_cl_device *cldev, u8 *buf, size_t length,
+			    u8 vtag);
+ssize_t mei_cldev_recv_vtag(struct mei_cl_device *cldev, u8 *buf, size_t length,
+			    u8 *vtag);
+ssize_t mei_cldev_recv_nonblock_vtag(struct mei_cl_device *cldev, u8 *buf,
+				     size_t length, u8 *vtag);
 
 int mei_cldev_register_rx_cb(struct mei_cl_device *cldev, mei_cldev_cb_t rx_cb);
 int mei_cldev_register_notif_cb(struct mei_cl_device *cldev,
diff --git a/include/linux/mhi.h b/include/linux/mhi.h
index cb7cd54c5e636f2ca62f68bbe2a1271657c461b6..562862ff819c4316d20fa8ca86f98ae6aeab1e61 100644
--- a/include/linux/mhi.h
+++ b/include/linux/mhi.h
@@ -335,6 +335,7 @@ struct mhi_controller_config {
  * @wlock: Lock for protecting device wakeup
  * @mhi_link_info: Device bandwidth info
  * @st_worker: State transition worker
+ * @hiprio_wq: High priority workqueue for MHI work such as state transitions
  * @state_event: State change event
  * @status_cb: CB function to notify power states of the device (required)
  * @wake_get: CB function to assert device wake (optional)
@@ -347,6 +348,7 @@ struct mhi_controller_config {
  * @read_reg: Read a MHI register via the physical link (required)
  * @write_reg: Write a MHI register via the physical link (required)
  * @buffer_len: Bounce buffer length
+ * @index: Index of the MHI controller instance
  * @bounce_buf: Use of bounce buffer
  * @fbc_download: MHI host needs to do complete image transfer (optional)
  * @pre_init: MHI host needs to do pre-initialization before power up
@@ -417,6 +419,7 @@ struct mhi_controller {
 	spinlock_t wlock;
 	struct mhi_link_info mhi_link_info;
 	struct work_struct st_worker;
+	struct workqueue_struct *hiprio_wq;
 	wait_queue_head_t state_event;
 
 	void (*status_cb)(struct mhi_controller *mhi_cntrl,
@@ -436,6 +439,7 @@ struct mhi_controller {
 			  u32 val);
 
 	size_t buffer_len;
+	int index;
 	bool bounce_buf;
 	bool fbc_download;
 	bool pre_init;
@@ -643,12 +647,12 @@ int mhi_pm_suspend(struct mhi_controller *mhi_cntrl);
 int mhi_pm_resume(struct mhi_controller *mhi_cntrl);
 
 /**
- * mhi_download_rddm_img - Download ramdump image from device for
- *                         debugging purpose.
+ * mhi_download_rddm_image - Download ramdump image from device for
+ *                           debugging purpose.
  * @mhi_cntrl: MHI controller
  * @in_panic: Download rddm image during kernel panic
  */
-int mhi_download_rddm_img(struct mhi_controller *mhi_cntrl, bool in_panic);
+int mhi_download_rddm_image(struct mhi_controller *mhi_cntrl, bool in_panic);
 
 /**
  * mhi_force_rddm_mode - Force device into rddm mode
@@ -656,6 +660,12 @@ int mhi_download_rddm_img(struct mhi_controller *mhi_cntrl, bool in_panic);
  */
 int mhi_force_rddm_mode(struct mhi_controller *mhi_cntrl);
 
+/**
+ * mhi_get_exec_env - Get BHI execution environment of the device
+ * @mhi_cntrl: MHI controller
+ */
+enum mhi_ee_type mhi_get_exec_env(struct mhi_controller *mhi_cntrl);
+
 /**
  * mhi_get_mhi_state - Get MHI state of the device
  * @mhi_cntrl: MHI controller
diff --git a/include/linux/nvmem-provider.h b/include/linux/nvmem-provider.h
index 06409a6c40bcb2a0ca7e8ba9408fce159b0d9b6a..e162b757b6d547715d0c2d4497475e72adcc830c 100644
--- a/include/linux/nvmem-provider.h
+++ b/include/linux/nvmem-provider.h
@@ -30,6 +30,19 @@ enum nvmem_type {
 #define NVMEM_DEVID_NONE	(-1)
 #define NVMEM_DEVID_AUTO	(-2)
 
+/**
+ * struct nvmem_keepout - NVMEM register keepout range.
+ *
+ * @start:	The first byte offset to avoid.
+ * @end:	One beyond the last byte offset to avoid.
+ * @value:	The byte to fill reads with for this region.
+ */
+struct nvmem_keepout {
+	unsigned int start;
+	unsigned int end;
+	unsigned char value;
+};
+
 /**
  * struct nvmem_config - NVMEM device configuration
  *
@@ -39,6 +52,8 @@ enum nvmem_type {
  * @owner:	Pointer to exporter module. Used for refcounting.
  * @cells:	Optional array of pre-defined NVMEM cells.
  * @ncells:	Number of elements in cells.
+ * @keepout:	Optional array of keepout ranges (sorted ascending by start).
+ * @nkeepout:	Number of elements in the keepout array.
  * @type:	Type of the nvmem storage
  * @read_only:	Device is read-only.
  * @root_only:	Device is accessibly to root only.
@@ -66,6 +81,8 @@ struct nvmem_config {
 	struct gpio_desc	*wp_gpio;
 	const struct nvmem_cell_info	*cells;
 	int			ncells;
+	const struct nvmem_keepout *keepout;
+	unsigned int		nkeepout;
 	enum nvmem_type		type;
 	bool			read_only;
 	bool			root_only;
diff --git a/include/linux/rtsx_pci.h b/include/linux/rtsx_pci.h
index 745f5e73f99acdd4f41b2e5dd3e6048df86c83a2..f895ccabbe2950624a8ca9d439e77f89deba7ae1 100644
--- a/include/linux/rtsx_pci.h
+++ b/include/linux/rtsx_pci.h
@@ -1174,6 +1174,7 @@ struct rtsx_pcr {
 
 	struct delayed_work		carddet_work;
 	struct delayed_work		idle_work;
+	struct delayed_work		rtd3_work;
 
 	spinlock_t			lock;
 	struct mutex			pcr_mutex;
@@ -1183,6 +1184,7 @@ struct rtsx_pcr {
 	unsigned int			cur_clock;
 	bool				remove_pci;
 	bool				msi_en;
+	bool				is_runtime_suspended;
 
 #define EXTRA_CAPS_SD_SDR50		(1 << 0)
 #define EXTRA_CAPS_SD_SDR104		(1 << 1)
diff --git a/include/linux/siox.h b/include/linux/siox.h
index da7225bf1877e2494688eb77cf4daf0ded6b789f..6bfbda3f634c2d7cb48fd057656e8e2c5b2d6f43 100644
--- a/include/linux/siox.h
+++ b/include/linux/siox.h
@@ -36,7 +36,7 @@ bool siox_device_connected(struct siox_device *sdevice);
 
 struct siox_driver {
 	int (*probe)(struct siox_device *sdevice);
-	int (*remove)(struct siox_device *sdevice);
+	void (*remove)(struct siox_device *sdevice);
 	void (*shutdown)(struct siox_device *sdevice);
 
 	/*
diff --git a/include/linux/soundwire/sdw_registers.h b/include/linux/soundwire/sdw_registers.h
index e14dff9a9c7f3180e470ab45f14c1750e7574b2c..138bec908c40c590269ca7faf4a6ff8688e9223b 100644
--- a/include/linux/soundwire/sdw_registers.h
+++ b/include/linux/soundwire/sdw_registers.h
@@ -41,6 +41,12 @@
 #define SDW_DP0_INT_IMPDEF1			BIT(5)
 #define SDW_DP0_INT_IMPDEF2			BIT(6)
 #define SDW_DP0_INT_IMPDEF3			BIT(7)
+#define SDW_DP0_INTERRUPTS			(SDW_DP0_INT_TEST_FAIL | \
+						 SDW_DP0_INT_PORT_READY | \
+						 SDW_DP0_INT_BRA_FAILURE | \
+						 SDW_DP0_INT_IMPDEF1 | \
+						 SDW_DP0_INT_IMPDEF2 | \
+						 SDW_DP0_INT_IMPDEF3)
 
 #define SDW_DP0_PORTCTRL_DATAMODE		GENMASK(3, 2)
 #define SDW_DP0_PORTCTRL_NXTINVBANK		BIT(4)
@@ -241,6 +247,11 @@
 #define SDW_DPN_INT_IMPDEF1			BIT(5)
 #define SDW_DPN_INT_IMPDEF2			BIT(6)
 #define SDW_DPN_INT_IMPDEF3			BIT(7)
+#define SDW_DPN_INTERRUPTS			(SDW_DPN_INT_TEST_FAIL | \
+						 SDW_DPN_INT_PORT_READY | \
+						 SDW_DPN_INT_IMPDEF1 | \
+						 SDW_DPN_INT_IMPDEF2 | \
+						 SDW_DPN_INT_IMPDEF3)
 
 #define SDW_DPN_PORTCTRL_FLOWMODE		GENMASK(1, 0)
 #define SDW_DPN_PORTCTRL_DATAMODE		GENMASK(3, 2)
diff --git a/include/linux/spmi.h b/include/linux/spmi.h
index 394a3f68bad5df36a1d0ad15c179e9b88adca7e6..729bcbf9f5ad1197818728b8546668fb64addd0e 100644
--- a/include/linux/spmi.h
+++ b/include/linux/spmi.h
@@ -138,6 +138,7 @@ struct spmi_driver {
 	struct device_driver driver;
 	int	(*probe)(struct spmi_device *sdev);
 	void	(*remove)(struct spmi_device *sdev);
+	void	(*shutdown)(struct spmi_device *sdev);
 };
 
 static inline struct spmi_driver *to_spmi_driver(struct device_driver *d)
diff --git a/include/linux/uio_driver.h b/include/linux/uio_driver.h
index 54bf6b1184010bce6581104158160773574d8834..47c5962b876b027e2ea935c4003b37f7a1bfdca3 100644
--- a/include/linux/uio_driver.h
+++ b/include/linux/uio_driver.h
@@ -117,6 +117,14 @@ extern int __must_check
 			      struct uio_info *info);
 
 /* use a define to avoid include chaining to get THIS_MODULE */
+
+/**
+ * uio_register_device - register a new userspace IO device
+ * @parent:	parent device
+ * @info:	UIO device capabilities
+ *
+ * returns zero on success or a negative error code.
+ */
 #define uio_register_device(parent, info) \
 	__uio_register_device(THIS_MODULE, parent, info)
 
@@ -129,6 +137,14 @@ extern int __must_check
 				   struct uio_info *info);
 
 /* use a define to avoid include chaining to get THIS_MODULE */
+
+/**
+ * devm_uio_register_device - Resource managed uio_register_device()
+ * @parent:	parent device
+ * @info:	UIO device capabilities
+ *
+ * returns zero on success or a negative error code.
+ */
 #define devm_uio_register_device(parent, info) \
 	__devm_uio_register_device(THIS_MODULE, parent, info)
 
diff --git a/include/uapi/linux/android/binder.h b/include/uapi/linux/android/binder.h
index f1ce2c4c077e2fe1667db4c75e1d605a0ed2ec76..ec84ad10656834d690089c0556110ad09005c2b1 100644
--- a/include/uapi/linux/android/binder.h
+++ b/include/uapi/linux/android/binder.h
@@ -248,6 +248,7 @@ enum transaction_flags {
 	TF_ROOT_OBJECT	= 0x04,	/* contents are the component's root object */
 	TF_STATUS_CODE	= 0x08,	/* contents are a 32-bit status code */
 	TF_ACCEPT_FDS	= 0x10,	/* allow replies with file descriptors */
+	TF_CLEAR_BUF	= 0x20,	/* clear buffer on txn complete */
 };
 
 struct binder_transaction_data {
diff --git a/include/uapi/misc/habanalabs.h b/include/uapi/misc/habanalabs.h
index 9705b8adb60cc3a7263c3ca4e9761b048793536d..8c15a7d336a0d1889d821f3f5ddeb583fee5f212 100644
--- a/include/uapi/misc/habanalabs.h
+++ b/include/uapi/misc/habanalabs.h
@@ -18,8 +18,18 @@
 #define GOYA_KMD_SRAM_RESERVED_SIZE_FROM_START		0x8000	/* 32KB */
 #define GAUDI_DRIVER_SRAM_RESERVED_SIZE_FROM_START	0x80	/* 128 bytes */
 
-#define GAUDI_FIRST_AVAILABLE_W_S_SYNC_OBJECT		48
-#define GAUDI_FIRST_AVAILABLE_W_S_MONITOR		24
+/*
+ * 128 SOBs reserved for collective wait
+ * 16 SOBs reserved for sync stream
+ */
+#define GAUDI_FIRST_AVAILABLE_W_S_SYNC_OBJECT		144
+
+/*
+ * 64 monitors reserved for collective wait
+ * 8 monitors reserved for sync stream
+ */
+#define GAUDI_FIRST_AVAILABLE_W_S_MONITOR		72
+
 /*
  * Goya queue Numbering
  *
@@ -76,10 +86,10 @@ enum gaudi_queue_id {
 	GAUDI_QUEUE_ID_DMA_4_1 = 18,	/* internal */
 	GAUDI_QUEUE_ID_DMA_4_2 = 19,	/* internal */
 	GAUDI_QUEUE_ID_DMA_4_3 = 20,	/* internal */
-	GAUDI_QUEUE_ID_DMA_5_0 = 21,	/* external */
-	GAUDI_QUEUE_ID_DMA_5_1 = 22,	/* external */
-	GAUDI_QUEUE_ID_DMA_5_2 = 23,	/* external */
-	GAUDI_QUEUE_ID_DMA_5_3 = 24,	/* external */
+	GAUDI_QUEUE_ID_DMA_5_0 = 21,	/* internal */
+	GAUDI_QUEUE_ID_DMA_5_1 = 22,	/* internal */
+	GAUDI_QUEUE_ID_DMA_5_2 = 23,	/* internal */
+	GAUDI_QUEUE_ID_DMA_5_3 = 24,	/* internal */
 	GAUDI_QUEUE_ID_DMA_6_0 = 25,	/* internal */
 	GAUDI_QUEUE_ID_DMA_6_1 = 26,	/* internal */
 	GAUDI_QUEUE_ID_DMA_6_2 = 27,	/* internal */
@@ -232,7 +242,8 @@ enum gaudi_engine_id {
 enum hl_device_status {
 	HL_DEVICE_STATUS_OPERATIONAL,
 	HL_DEVICE_STATUS_IN_RESET,
-	HL_DEVICE_STATUS_MALFUNCTION
+	HL_DEVICE_STATUS_MALFUNCTION,
+	HL_DEVICE_STATUS_NEEDS_RESET
 };
 
 /* Opcode for management ioctl
@@ -284,6 +295,7 @@ enum hl_device_status {
 #define HL_INFO_CLK_THROTTLE_REASON	13
 #define HL_INFO_SYNC_MANAGER		14
 #define HL_INFO_TOTAL_ENERGY		15
+#define HL_INFO_PLL_FREQUENCY		16
 
 #define HL_INFO_VERSION_MAX_LEN	128
 #define HL_INFO_CARD_NAME_MAX_LEN	16
@@ -385,6 +397,12 @@ struct hl_info_energy {
 	__u64 total_energy_consumption;
 };
 
+#define HL_PLL_NUM_OUTPUTS 4
+
+struct hl_pll_frequency_info {
+	__u16 output[HL_PLL_NUM_OUTPUTS];
+};
+
 /**
  * struct hl_info_sync_manager - sync manager information
  * @first_available_sync_object: first available sob
@@ -397,23 +415,28 @@ struct hl_info_sync_manager {
 
 /**
  * struct hl_info_cs_counters - command submission counters
- * @out_of_mem_drop_cnt: dropped due to memory allocation issue
- * @parsing_drop_cnt: dropped due to error in packet parsing
- * @queue_full_drop_cnt: dropped due to queue full
- * @device_in_reset_drop_cnt: dropped due to device in reset
- * @max_cs_in_flight_drop_cnt: dropped due to maximum CS in-flight
+ * @total_out_of_mem_drop_cnt: total dropped due to memory allocation issue
+ * @ctx_out_of_mem_drop_cnt: context dropped due to memory allocation issue
+ * @total_parsing_drop_cnt: total dropped due to error in packet parsing
+ * @ctx_parsing_drop_cnt: context dropped due to error in packet parsing
+ * @total_queue_full_drop_cnt: total dropped due to queue full
+ * @ctx_queue_full_drop_cnt: context dropped due to queue full
+ * @total_device_in_reset_drop_cnt: total dropped due to device in reset
+ * @ctx_device_in_reset_drop_cnt: context dropped due to device in reset
+ * @total_max_cs_in_flight_drop_cnt: total dropped due to maximum CS in-flight
+ * @ctx_max_cs_in_flight_drop_cnt: context dropped due to maximum CS in-flight
  */
-struct hl_cs_counters {
-	__u64 out_of_mem_drop_cnt;
-	__u64 parsing_drop_cnt;
-	__u64 queue_full_drop_cnt;
-	__u64 device_in_reset_drop_cnt;
-	__u64 max_cs_in_flight_drop_cnt;
-};
-
 struct hl_info_cs_counters {
-	struct hl_cs_counters cs_counters;
-	struct hl_cs_counters ctx_cs_counters;
+	__u64 total_out_of_mem_drop_cnt;
+	__u64 ctx_out_of_mem_drop_cnt;
+	__u64 total_parsing_drop_cnt;
+	__u64 ctx_parsing_drop_cnt;
+	__u64 total_queue_full_drop_cnt;
+	__u64 ctx_queue_full_drop_cnt;
+	__u64 total_device_in_reset_drop_cnt;
+	__u64 ctx_device_in_reset_drop_cnt;
+	__u64 total_max_cs_in_flight_drop_cnt;
+	__u64 ctx_max_cs_in_flight_drop_cnt;
 };
 
 enum gaudi_dcores {
@@ -449,6 +472,8 @@ struct hl_info_args {
 		 * resolution.
 		 */
 		__u32 period_ms;
+		/* PLL frequency retrieval */
+		__u32 pll_index;
 	};
 
 	__u32 pad;
@@ -458,6 +483,8 @@ struct hl_info_args {
 #define HL_CB_OP_CREATE		0
 /* Opcode to destroy previously created command buffer */
 #define HL_CB_OP_DESTROY	1
+/* Opcode to retrieve information about a command buffer */
+#define HL_CB_OP_INFO		2
 
 /* 2MB minus 32 bytes for 2xMSG_PROT */
 #define HL_MAX_CB_SIZE		(0x200000 - 32)
@@ -481,8 +508,17 @@ struct hl_cb_in {
 };
 
 struct hl_cb_out {
-	/* Handle of CB */
-	__u64 cb_handle;
+	union {
+		/* Handle of CB */
+		__u64 cb_handle;
+
+		/* Information about CB */
+		struct {
+			/* Usage count of CB */
+			__u32 usage_cnt;
+			__u32 pad;
+		};
+	};
 };
 
 union hl_cb_args {
@@ -490,6 +526,22 @@ union hl_cb_args {
 	struct hl_cb_out out;
 };
 
+/* HL_CS_CHUNK_FLAGS_ values
+ *
+ * HL_CS_CHUNK_FLAGS_USER_ALLOC_CB:
+ *      Indicates if the CB was allocated and mapped by userspace.
+ *      User allocated CB is a command buffer allocated by the user, via malloc
+ *      (or similar). After allocating the CB, the user invokes “memory ioctl”
+ *      to map the user memory into a device virtual address. The user provides
+ *      this address via the cb_handle field. The interface provides the
+ *      ability to create a large CBs, Which aren’t limited to
+ *      “HL_MAX_CB_SIZE”. Therefore, it increases the PCI-DMA queues
+ *      throughput. This CB allocation method also reduces the use of Linux
+ *      DMA-able memory pool. Which are limited and used by other Linux
+ *      sub-systems.
+ */
+#define HL_CS_CHUNK_FLAGS_USER_ALLOC_CB 0x1
+
 /*
  * This structure size must always be fixed to 64-bytes for backward
  * compatibility
@@ -507,7 +559,8 @@ struct hl_cs_chunk {
 		 */
 		__u64 cb_handle;
 
-		/* Relevant only when HL_CS_FLAGS_WAIT is set.
+		/* Relevant only when HL_CS_FLAGS_WAIT or
+		 * HL_CS_FLAGS_COLLECTIVE_WAIT is set.
 		 * This holds address of array of u64 values that contain
 		 * signal CS sequence numbers. The wait described by this job
 		 * will listen on all those signals (wait event per signal)
@@ -525,7 +578,8 @@ struct hl_cs_chunk {
 		 */
 		__u32 cb_size;
 
-		/* Relevant only when HL_CS_FLAGS_WAIT is set.
+		/* Relevant only when HL_CS_FLAGS_WAIT or
+		 * HL_CS_FLAGS_COLLECTIVE_WAIT is set.
 		 * Number of entries in signal_seq_arr
 		 */
 		__u32 num_signal_seq_arr;
@@ -534,14 +588,22 @@ struct hl_cs_chunk {
 	/* HL_CS_CHUNK_FLAGS_* */
 	__u32 cs_chunk_flags;
 
+	/* Relevant only when HL_CS_FLAGS_COLLECTIVE_WAIT is set.
+	 * This holds the collective engine ID. The wait described by this job
+	 * will sync with this engine and with all NICs before completion.
+	 */
+	__u32 collective_engine_id;
+
 	/* Align structure to 64 bytes */
-	__u32 pad[11];
+	__u32 pad[10];
 };
 
-/* SIGNAL and WAIT flags are mutually exclusive */
+/* SIGNAL and WAIT/COLLECTIVE_WAIT flags are mutually exclusive */
 #define HL_CS_FLAGS_FORCE_RESTORE	0x1
 #define HL_CS_FLAGS_SIGNAL		0x2
 #define HL_CS_FLAGS_WAIT		0x4
+#define HL_CS_FLAGS_COLLECTIVE_WAIT	0x8
+#define HL_CS_FLAGS_TIMESTAMP		0x20
 
 #define HL_CS_STATUS_SUCCESS		0
 
@@ -612,10 +674,16 @@ struct hl_wait_cs_in {
 #define HL_WAIT_CS_STATUS_ABORTED	3
 #define HL_WAIT_CS_STATUS_INTERRUPTED	4
 
+#define HL_WAIT_CS_STATUS_FLAG_GONE		0x1
+#define HL_WAIT_CS_STATUS_FLAG_TIMESTAMP_VLD	0x2
+
 struct hl_wait_cs_out {
 	/* HL_WAIT_CS_STATUS_* */
 	__u32 status;
-	__u32 pad;
+	/* HL_WAIT_CS_STATUS_FLAG* */
+	__u32 flags;
+	/* valid only if HL_WAIT_CS_STATUS_FLAG_TIMESTAMP_VLD is set */
+	__s64 timestamp_nsec;
 };
 
 union hl_wait_cs_args {